diff --git a/.ci/scripts/auditwheel_wrapper.py b/.ci/scripts/auditwheel_wrapper.py
new file mode 100755
index 0000000000..a33b39314f
--- /dev/null
+++ b/.ci/scripts/auditwheel_wrapper.py
@@ -0,0 +1,132 @@
+#!/usr/bin/env python
+# Copyright 2022 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Wraps `auditwheel repair` to first check if we're repairing a potentially abi3
+# compatible wheel, if so rename the wheel before repairing it.
+
+import argparse
+import os
+import subprocess
+from typing import Optional
+from zipfile import ZipFile
+
+from packaging.tags import Tag
+from packaging.utils import parse_wheel_filename
+from packaging.version import Version
+
+
+def check_is_abi3_compatible(wheel_file: str) -> None:
+ """Check the contents of the built wheel for any `.so` files that are *not*
+ abi3 compatible.
+ """
+
+ with ZipFile(wheel_file, "r") as wheel:
+ for file in wheel.namelist():
+ if not file.endswith(".so"):
+ continue
+
+ if not file.endswith(".abi3.so"):
+ raise Exception(f"Found non-abi3 lib: {file}")
+
+
+def cpython(wheel_file: str, name: str, version: Version, tag: Tag) -> str:
+ """Replaces the cpython wheel file with a ABI3 compatible wheel"""
+
+ if tag.abi == "abi3":
+ # Nothing to do.
+ return wheel_file
+
+ check_is_abi3_compatible(wheel_file)
+
+ abi3_tag = Tag(tag.interpreter, "abi3", tag.platform)
+
+ dirname = os.path.dirname(wheel_file)
+ new_wheel_file = os.path.join(
+ dirname,
+ f"{name}-{version}-{abi3_tag}.whl",
+ )
+
+ os.rename(wheel_file, new_wheel_file)
+
+ print("Renamed wheel to", new_wheel_file)
+
+ return new_wheel_file
+
+
+def main(wheel_file: str, dest_dir: str, archs: Optional[str]) -> None:
+ """Entry point"""
+
+ # Parse the wheel file name into its parts. Note that `parse_wheel_filename`
+ # normalizes the package name (i.e. it converts matrix_synapse ->
+ # matrix-synapse), which is not what we want.
+ _, version, build, tags = parse_wheel_filename(os.path.basename(wheel_file))
+ name = os.path.basename(wheel_file).split("-")[0]
+
+ if len(tags) != 1:
+ # We expect only a wheel file with only a single tag
+ raise Exception(f"Unexpectedly found multiple tags: {tags}")
+
+ tag = next(iter(tags))
+
+ if build:
+ # We don't use build tags in Synapse
+ raise Exception(f"Unexpected build tag: {build}")
+
+ # If the wheel is for cpython then convert it into an abi3 wheel.
+ if tag.interpreter.startswith("cp"):
+ wheel_file = cpython(wheel_file, name, version, tag)
+
+ # Finally, repair the wheel.
+ if archs is not None:
+ # If we are given archs then we are on macos and need to use
+ # `delocate-listdeps`.
+ subprocess.run(["delocate-listdeps", wheel_file], check=True)
+ subprocess.run(
+ ["delocate-wheel", "--require-archs", archs, "-w", dest_dir, wheel_file],
+ check=True,
+ )
+ else:
+ subprocess.run(["auditwheel", "repair", "-w", dest_dir, wheel_file], check=True)
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser(description="Tag wheel as abi3 and repair it.")
+
+ parser.add_argument(
+ "--wheel-dir",
+ "-w",
+ metavar="WHEEL_DIR",
+ help="Directory to store delocated wheels",
+ required=True,
+ )
+
+ parser.add_argument(
+ "--require-archs",
+ metavar="archs",
+ default=None,
+ )
+
+ parser.add_argument(
+ "wheel_file",
+ metavar="WHEEL_FILE",
+ )
+
+ args = parser.parse_args()
+
+ wheel_file = args.wheel_file
+ wheel_dir = args.wheel_dir
+ archs = args.require_archs
+
+ main(wheel_file, wheel_dir, archs)
diff --git a/.ci/scripts/calculate_jobs.py b/.ci/scripts/calculate_jobs.py
index b1f604eeb0..0cdc20e19c 100755
--- a/.ci/scripts/calculate_jobs.py
+++ b/.ci/scripts/calculate_jobs.py
@@ -18,6 +18,13 @@
import json
import os
+
+def set_output(key: str, value: str):
+ # See https://docs.github.com/en/actions/using-workflows/workflow-commands-for-github-actions#setting-an-output-parameter
+ with open(os.environ["GITHUB_OUTPUT"], "at") as f:
+ print(f"{key}={value}", file=f)
+
+
IS_PR = os.environ["GITHUB_REF"].startswith("refs/pull/")
# First calculate the various trial jobs.
@@ -39,7 +46,7 @@ if not IS_PR:
"database": "sqlite",
"extras": "all",
}
- for version in ("3.8", "3.9", "3.10")
+ for version in ("3.8", "3.9", "3.10", "3.11")
)
@@ -47,7 +54,7 @@ trial_postgres_tests = [
{
"python-version": "3.7",
"database": "postgres",
- "postgres-version": "10",
+ "postgres-version": "11",
"extras": "all",
}
]
@@ -55,9 +62,9 @@ trial_postgres_tests = [
if not IS_PR:
trial_postgres_tests.append(
{
- "python-version": "3.10",
+ "python-version": "3.11",
"database": "postgres",
- "postgres-version": "14",
+ "postgres-version": "15",
"extras": "all",
}
)
@@ -81,7 +88,7 @@ print("::endgroup::")
test_matrix = json.dumps(
trial_sqlite_tests + trial_postgres_tests + trial_no_extra_tests
)
-print(f"::set-output name=trial_test_matrix::{test_matrix}")
+set_output("trial_test_matrix", test_matrix)
# First calculate the various sytest jobs.
@@ -125,4 +132,4 @@ print(json.dumps(sytest_tests, indent=4))
print("::endgroup::")
test_matrix = json.dumps(sytest_tests)
-print(f"::set-output name=sytest_test_matrix::{test_matrix}")
+set_output("sytest_test_matrix", test_matrix)
diff --git a/.ci/scripts/setup_complement_prerequisites.sh b/.ci/scripts/setup_complement_prerequisites.sh
index 4848901cbf..42ef654167 100755
--- a/.ci/scripts/setup_complement_prerequisites.sh
+++ b/.ci/scripts/setup_complement_prerequisites.sh
@@ -21,7 +21,7 @@ endblock
block Install Complement Dependencies
sudo apt-get -qq update && sudo apt-get install -qqy libolm3 libolm-dev
- go get -v github.com/haveyoudebuggedit/gotestfmt/v2/cmd/gotestfmt@latest
+ go get -v github.com/gotesttools/gotestfmt/v2/cmd/gotestfmt@latest
endblock
block Install custom gotestfmt template
diff --git a/.dockerignore b/.dockerignore
index 1c6905b1bb..0b51345cbd 100644
--- a/.dockerignore
+++ b/.dockerignore
@@ -9,6 +9,7 @@
!pyproject.toml
!poetry.lock
!Cargo.lock
+!Cargo.toml
!build_rust.py
rust/target
diff --git a/.flake8 b/.flake8
index acb118c86e..4c6a4d5843 100644
--- a/.flake8
+++ b/.flake8
@@ -8,4 +8,11 @@
# E203: whitespace before ':' (which is contrary to pep8?)
# E731: do not assign a lambda expression, use a def
# E501: Line too long (black enforces this for us)
-ignore=W503,W504,E203,E731,E501
+#
+# flake8-bugbear runs extra checks. Its error codes are described at
+# https://github.com/PyCQA/flake8-bugbear#list-of-warnings
+# B019: Use of functools.lru_cache or functools.cache on methods can lead to memory leaks
+# B023: Functions defined inside a loop must not use variables redefined in the loop
+# B024: Abstract base class with no abstract method.
+
+ignore=W503,W504,E203,E731,E501,B019,B023,B024
diff --git a/.github/dependabot.yml b/.github/dependabot.yml
new file mode 100644
index 0000000000..7ce353ed64
--- /dev/null
+++ b/.github/dependabot.yml
@@ -0,0 +1,23 @@
+version: 2
+updates:
+ - # "pip" is the correct setting for poetry, per https://docs.github.com/en/code-security/dependabot/dependabot-version-updates/configuration-options-for-the-dependabot.yml-file#package-ecosystem
+ package-ecosystem: "pip"
+ directory: "/"
+ schedule:
+ interval: "weekly"
+
+ - package-ecosystem: "docker"
+ directory: "/docker"
+ schedule:
+ interval: "weekly"
+
+ - package-ecosystem: "github-actions"
+ directory: "/"
+ schedule:
+ interval: "weekly"
+
+ - package-ecosystem: "cargo"
+ directory: "/"
+ versioning-strategy: "lockfile-only"
+ schedule:
+ interval: "weekly"
diff --git a/.github/workflows/dependabot_changelog.yml b/.github/workflows/dependabot_changelog.yml
new file mode 100644
index 0000000000..b6a29a5722
--- /dev/null
+++ b/.github/workflows/dependabot_changelog.yml
@@ -0,0 +1,46 @@
+name: Write changelog for dependabot PR
+on:
+ pull_request:
+ types:
+ - opened
+ - reopened # For debugging!
+
+permissions:
+ # Needed to be able to push the commit. See
+ # https://docs.github.com/en/code-security/dependabot/working-with-dependabot/automating-dependabot-with-github-actions#enable-auto-merge-on-a-pull-request
+ # for a similar example
+ contents: write
+
+jobs:
+ add-changelog:
+ runs-on: 'ubuntu-latest'
+ if: ${{ github.actor == 'dependabot[bot]' }}
+ steps:
+ - uses: actions/checkout@v3
+ with:
+ ref: ${{ github.event.pull_request.head.ref }}
+ - name: Write, commit and push changelog
+ run: |
+ echo "${{ github.event.pull_request.title }}." > "changelog.d/${{ github.event.pull_request.number }}".misc
+ git add changelog.d
+ git config user.email "github-actions[bot]@users.noreply.github.com"
+ git config user.name "GitHub Actions"
+ git commit -m "Changelog"
+ git push
+ shell: bash
+ # The `git push` above does not trigger CI on the dependabot PR.
+ #
+ # By default, workflows can't trigger other workflows when they're just using the
+ # default `GITHUB_TOKEN` access token. (This is intended to stop you from writing
+ # recursive workflow loops by accident, because that'll get very expensive very
+ # quickly.) Instead, you have to manually call out to another workflow, or else
+ # make your changes (i.e. the `git push` above) using a personal access token.
+ # See
+ # https://docs.github.com/en/actions/using-workflows/triggering-a-workflow#triggering-a-workflow-from-a-workflow
+ #
+ # I have tried and failed to find a way to trigger CI on the "merge ref" of the PR.
+ # See git commit history for previous attempts. If anyone desperately wants to try
+ # again in the future, make a matrix-bot account and use its access token to git push.
+
+ # THIS WORKFLOW HAS WRITE PERMISSIONS---do not add other jobs here unless they
+ # are sufficiently locked down to dependabot only as above.
diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml
index d20d30c035..49427ab50d 100644
--- a/.github/workflows/docker.yml
+++ b/.github/workflows/docker.yml
@@ -17,19 +17,19 @@ jobs:
steps:
- name: Set up QEMU
id: qemu
- uses: docker/setup-qemu-action@v1
+ uses: docker/setup-qemu-action@v2
with:
platforms: arm64
- name: Set up Docker Buildx
id: buildx
- uses: docker/setup-buildx-action@v1
+ uses: docker/setup-buildx-action@v2
- name: Inspect builder
run: docker buildx inspect
-
+
- name: Log in to DockerHub
- uses: docker/login-action@v1
+ uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
@@ -48,10 +48,15 @@ jobs:
type=pep440,pattern={{raw}}
- name: Build and push all platforms
- uses: docker/build-push-action@v2
+ uses: docker/build-push-action@v3
with:
push: true
labels: "gitsha1=${{ github.sha }}"
tags: "${{ steps.set-tag.outputs.tags }}"
file: "docker/Dockerfile"
platforms: linux/amd64,linux/arm64
+
+ # arm64 builds OOM without the git fetch setting. c.f.
+ # https://github.com/rust-lang/cargo/issues/10583
+ build-args: |
+ CARGO_NET_GIT_FETCH_WITH_CLI=true
diff --git a/.github/workflows/docs-pr-netlify.yaml b/.github/workflows/docs-pr-netlify.yaml
new file mode 100644
index 0000000000..231982f681
--- /dev/null
+++ b/.github/workflows/docs-pr-netlify.yaml
@@ -0,0 +1,34 @@
+name: Deploy documentation PR preview
+
+on:
+ workflow_run:
+ workflows: [ "Prepare documentation PR preview" ]
+ types:
+ - completed
+
+jobs:
+ netlify:
+ if: github.event.workflow_run.conclusion == 'success' && github.event.workflow_run.event == 'pull_request'
+ runs-on: ubuntu-latest
+ steps:
+ # There's a 'download artifact' action, but it hasn't been updated for the workflow_run action
+ # (https://github.com/actions/download-artifact/issues/60) so instead we get this mess:
+ - name: 📥 Download artifact
+ uses: dawidd6/action-download-artifact@e6e25ac3a2b93187502a8be1ef9e9603afc34925 # v2.24.2
+ with:
+ workflow: docs-pr.yaml
+ run_id: ${{ github.event.workflow_run.id }}
+ name: book
+ path: book
+
+ - name: 📤 Deploy to Netlify
+ uses: matrix-org/netlify-pr-preview@v1
+ with:
+ path: book
+ owner: ${{ github.event.workflow_run.head_repository.owner.login }}
+ branch: ${{ github.event.workflow_run.head_branch }}
+ revision: ${{ github.event.workflow_run.head_sha }}
+ token: ${{ secrets.NETLIFY_AUTH_TOKEN }}
+ site_id: ${{ secrets.NETLIFY_SITE_ID }}
+ desc: Documentation preview
+ deployment_env: PR Documentation Preview
diff --git a/.github/workflows/docs-pr.yaml b/.github/workflows/docs-pr.yaml
new file mode 100644
index 0000000000..cde6cf511e
--- /dev/null
+++ b/.github/workflows/docs-pr.yaml
@@ -0,0 +1,34 @@
+name: Prepare documentation PR preview
+
+on:
+ pull_request:
+ paths:
+ - docs/**
+
+jobs:
+ pages:
+ name: GitHub Pages
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v2
+
+ - name: Setup mdbook
+ uses: peaceiris/actions-mdbook@adeb05db28a0c0004681db83893d56c0388ea9ea # v1.2.0
+ with:
+ mdbook-version: '0.4.17'
+
+ - name: Build the documentation
+ # mdbook will only create an index.html if we're including docs/README.md in SUMMARY.md.
+ # However, we're using docs/README.md for other purposes and need to pick a new page
+ # as the default. Let's opt for the welcome page instead.
+ run: |
+ mdbook build
+ cp book/welcome_and_overview.html book/index.html
+
+ - name: Upload Artifact
+ uses: actions/upload-artifact@v3
+ with:
+ name: book
+ path: book
+ # We'll only use this in a workflow_run, then we're done with it
+ retention-days: 1
diff --git a/.github/workflows/docs.yaml b/.github/workflows/docs.yaml
index b366eb8667..575412d965 100644
--- a/.github/workflows/docs.yaml
+++ b/.github/workflows/docs.yaml
@@ -17,10 +17,10 @@ jobs:
name: GitHub Pages
runs-on: ubuntu-latest
steps:
- - uses: actions/checkout@v2
+ - uses: actions/checkout@v3
- name: Setup mdbook
- uses: peaceiris/actions-mdbook@4b5ef36b314c2599664ca107bb8c02412548d79d # v1.1.14
+ uses: peaceiris/actions-mdbook@adeb05db28a0c0004681db83893d56c0388ea9ea # v1.2.0
with:
mdbook-version: '0.4.17'
@@ -54,11 +54,11 @@ jobs:
esac
# finally, set the 'branch-version' var.
- echo "::set-output name=branch-version::$branch"
+ echo "branch-version=$branch" >> "$GITHUB_OUTPUT"
# Deploy to the target directory.
- name: Deploy to gh pages
- uses: peaceiris/actions-gh-pages@068dc23d9710f1ba62e86896f84735d869951305 # v3.8.0
+ uses: peaceiris/actions-gh-pages@de7ea6f8efb354206b205ef54722213d99067935 # v3.9.0
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
publish_dir: ./book
diff --git a/.github/workflows/latest_deps.yml b/.github/workflows/latest_deps.yml
index 9a708286a4..c6f481cdaa 100644
--- a/.github/workflows/latest_deps.yml
+++ b/.github/workflows/latest_deps.yml
@@ -25,12 +25,11 @@ jobs:
mypy:
runs-on: ubuntu-latest
steps:
- - uses: actions/checkout@v2
+ - uses: actions/checkout@v3
- name: Install Rust
- uses: actions-rs/toolchain@v1
+ uses: dtolnay/rust-toolchain@55c7845fad90d0ae8b2e83715cb900e5e861e8cb
with:
- toolchain: stable
- override: true
+ toolchain: stable
- uses: Swatinem/rust-cache@v2
# The dev dependencies aren't exposed in the wheel metadata (at least with current
@@ -59,13 +58,12 @@ jobs:
postgres-version: "14"
steps:
- - uses: actions/checkout@v2
+ - uses: actions/checkout@v3
- name: Install Rust
- uses: actions-rs/toolchain@v1
+ uses: dtolnay/rust-toolchain@55c7845fad90d0ae8b2e83715cb900e5e861e8cb
with:
- toolchain: stable
- override: true
+ toolchain: stable
- uses: Swatinem/rust-cache@v2
- run: sudo apt-get -qq install xmlsec1
@@ -76,7 +74,7 @@ jobs:
-e POSTGRES_PASSWORD=postgres \
-e POSTGRES_INITDB_ARGS="--lc-collate C --lc-ctype C --encoding UTF8" \
postgres:${{ matrix.postgres-version }}
- - uses: actions/setup-python@v2
+ - uses: actions/setup-python@v4
with:
python-version: "3.x"
- run: pip install .[all,test]
@@ -133,13 +131,12 @@ jobs:
BLACKLIST: ${{ matrix.workers && 'synapse-blacklist-with-workers' }}
steps:
- - uses: actions/checkout@v2
+ - uses: actions/checkout@v3
- name: Install Rust
- uses: actions-rs/toolchain@v1
+ uses: dtolnay/rust-toolchain@55c7845fad90d0ae8b2e83715cb900e5e861e8cb
with:
- toolchain: stable
- override: true
+ toolchain: stable
- uses: Swatinem/rust-cache@v2
- name: Ensure sytest runs `pip install`
@@ -155,7 +152,7 @@ jobs:
if: ${{ always() }}
run: /sytest/scripts/tap_to_gha.pl /logs/results.tap
- name: Upload SyTest logs
- uses: actions/upload-artifact@v2
+ uses: actions/upload-artifact@v3
if: ${{ always() }}
with:
name: Sytest Logs - ${{ job.status }} - (${{ join(matrix.*, ', ') }})
@@ -182,8 +179,8 @@ jobs:
database: Postgres
steps:
- - name: Run actions/checkout@v2 for synapse
- uses: actions/checkout@v2
+ - name: Run actions/checkout@v3 for synapse
+ uses: actions/checkout@v3
with:
path: synapse
@@ -210,7 +207,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- - uses: actions/checkout@v2
+ - uses: actions/checkout@v3
- uses: JasonEtco/create-an-issue@5d9504915f79f9cc6d791934b8ef34f2353dd74d # v2.5.0, 2020-12-06
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
diff --git a/.github/workflows/release-artifacts.yml b/.github/workflows/release-artifacts.yml
index 0708d631cd..0601a7dbaf 100644
--- a/.github/workflows/release-artifacts.yml
+++ b/.github/workflows/release-artifacts.yml
@@ -11,6 +11,7 @@ on:
# we do the full build on tags.
tags: ["v*"]
+ workflow_dispatch:
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
@@ -24,8 +25,10 @@ jobs:
name: "Calculate list of debian distros"
runs-on: ubuntu-latest
steps:
- - uses: actions/checkout@v2
- - uses: actions/setup-python@v2
+ - uses: actions/checkout@v3
+ - uses: actions/setup-python@v4
+ with:
+ python-version: '3.x'
- id: set-distros
run: |
# if we're running from a tag, get the full list of distros; otherwise just use debian:sid
@@ -33,7 +36,7 @@ jobs:
if [[ $GITHUB_REF == refs/tags/* ]]; then
dists=$(scripts-dev/build_debian_packages.py --show-dists-json)
fi
- echo "::set-output name=distros::$dists"
+ echo "distros=$dists" >> "$GITHUB_OUTPUT"
# map the step outputs to job outputs
outputs:
distros: ${{ steps.set-distros.outputs.distros }}
@@ -49,18 +52,18 @@ jobs:
steps:
- name: Checkout
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
with:
path: src
- name: Set up Docker Buildx
id: buildx
- uses: docker/setup-buildx-action@v1
+ uses: docker/setup-buildx-action@v2
with:
install: true
- name: Set up docker layer caching
- uses: actions/cache@v2
+ uses: actions/cache@v3
with:
path: /tmp/.buildx-cache
key: ${{ runner.os }}-buildx-${{ github.sha }}
@@ -68,7 +71,9 @@ jobs:
${{ runner.os }}-buildx-
- name: Set up python
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v4
+ with:
+ python-version: '3.x'
- name: Build the packages
# see https://github.com/docker/build-push-action/issues/252
@@ -84,38 +89,59 @@ jobs:
mv /tmp/.buildx-cache-new /tmp/.buildx-cache
- name: Upload debs as artifacts
- uses: actions/upload-artifact@v2
+ uses: actions/upload-artifact@v3
with:
name: debs
path: debs/*
build-wheels:
- name: Build wheels on ${{ matrix.os }}
+ name: Build wheels on ${{ matrix.os }} for ${{ matrix.arch }}
runs-on: ${{ matrix.os }}
strategy:
matrix:
- os: [ubuntu-20.04, macos-10.15]
+ os: [ubuntu-20.04, macos-11]
+ arch: [x86_64, aarch64]
+ # is_pr is a flag used to exclude certain jobs from the matrix on PRs.
+ # It is not read by the rest of the workflow.
is_pr:
- ${{ startsWith(github.ref, 'refs/pull/') }}
exclude:
# Don't build macos wheels on PR CI.
- is_pr: true
- os: "macos-10.15"
+ os: "macos-11"
+ # Don't build aarch64 wheels on mac.
+ - os: "macos-11"
+ arch: aarch64
+ # Don't build aarch64 wheels on PR CI.
+ - is_pr: true
+ arch: aarch64
steps:
- uses: actions/checkout@v3
- - uses: actions/setup-python@v3
+ - uses: actions/setup-python@v4
+ with:
+ # setup-python@v4 doesn't impose a default python version. Need to use 3.x
+ # here, because `python` on osx points to Python 2.7.
+ python-version: "3.x"
- name: Install cibuildwheel
run: python -m pip install cibuildwheel==2.9.0 poetry==1.2.0
- # Only build a single wheel in CI.
- - name: Set env vars.
- run: |
- echo "CIBW_BUILD="cp37-manylinux_x86_64"" >> $GITHUB_ENV
+ - name: Set up QEMU to emulate aarch64
+ if: matrix.arch == 'aarch64'
+ uses: docker/setup-qemu-action@v2
+ with:
+ platforms: arm64
+
+ - name: Build aarch64 wheels
+ if: matrix.arch == 'aarch64'
+ run: echo 'CIBW_ARCHS_LINUX=aarch64' >> $GITHUB_ENV
+
+ - name: Only build a single wheel on PR
if: startsWith(github.ref, 'refs/pull/')
+ run: echo "CIBW_BUILD="cp37-manylinux_${{ matrix.arch }}"" >> $GITHUB_ENV
- name: Build wheels
run: python -m cibuildwheel --output-dir wheelhouse
@@ -123,6 +149,9 @@ jobs:
# Skip testing for platforms which various libraries don't have wheels
# for, and so need extra build deps.
CIBW_TEST_SKIP: pp39-* *i686* *musl* pp37-macosx*
+ # Fix Rust OOM errors on emulated aarch64: https://github.com/rust-lang/cargo/issues/10583
+ CARGO_NET_GIT_FETCH_WITH_CLI: true
+ CIBW_ENVIRONMENT_PASS_LINUX: CARGO_NET_GIT_FETCH_WITH_CLI
- uses: actions/upload-artifact@v3
with:
@@ -145,7 +174,7 @@ jobs:
- name: Build sdist
run: python -m build --sdist
- - uses: actions/upload-artifact@v2
+ - uses: actions/upload-artifact@v3
with:
name: Sdist
path: dist/*.tar.gz
@@ -162,7 +191,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Download all workflow run artifacts
- uses: actions/download-artifact@v2
+ uses: actions/download-artifact@v3
- name: Build a tarball for the debs
run: tar -cvJf debs.tar.xz debs
- name: Attach to release
diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml
index 9fe61930a5..ec5ab79f9c 100644
--- a/.github/workflows/tests.yml
+++ b/.github/workflows/tests.yml
@@ -4,6 +4,7 @@ on:
push:
branches: ["develop", "release-*"]
pull_request:
+ workflow_dispatch:
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
@@ -30,8 +31,10 @@ jobs:
check-sampleconfig:
runs-on: ubuntu-latest
steps:
- - uses: actions/checkout@v2
- - uses: actions/setup-python@v2
+ - uses: actions/checkout@v3
+ - uses: actions/setup-python@v4
+ with:
+ python-version: "3.x"
- uses: matrix-org/setup-python-poetry@v1
with:
extras: "all"
@@ -41,8 +44,10 @@ jobs:
check-schema-delta:
runs-on: ubuntu-latest
steps:
- - uses: actions/checkout@v2
- - uses: actions/setup-python@v2
+ - uses: actions/checkout@v3
+ - uses: actions/setup-python@v4
+ with:
+ python-version: "3.x"
- run: "pip install 'click==8.1.1' 'GitPython>=3.1.20'"
- run: scripts-dev/check_schema_delta.py --force-colors
@@ -54,19 +59,21 @@ jobs:
lint-crlf:
runs-on: ubuntu-latest
steps:
- - uses: actions/checkout@v2
+ - uses: actions/checkout@v3
- name: Check line endings
run: scripts-dev/check_line_terminators.sh
lint-newsfile:
- if: ${{ github.base_ref == 'develop' || contains(github.base_ref, 'release-') }}
+ if: ${{ (github.base_ref == 'develop' || contains(github.base_ref, 'release-')) && github.actor != 'dependabot[bot]' }}
runs-on: ubuntu-latest
steps:
- - uses: actions/checkout@v2
+ - uses: actions/checkout@v3
with:
ref: ${{ github.event.pull_request.head.sha }}
fetch-depth: 0
- - uses: actions/setup-python@v2
+ - uses: actions/setup-python@v4
+ with:
+ python-version: "3.x"
- run: "pip install 'towncrier>=18.6.0rc1'"
- run: scripts-dev/check-newsfragment.sh
env:
@@ -75,7 +82,7 @@ jobs:
lint-pydantic:
runs-on: ubuntu-latest
steps:
- - uses: actions/checkout@v2
+ - uses: actions/checkout@v3
with:
ref: ${{ github.event.pull_request.head.sha }}
- uses: matrix-org/setup-python-poetry@v1
@@ -89,13 +96,15 @@ jobs:
if: ${{ needs.changes.outputs.rust == 'true' }}
steps:
- - uses: actions/checkout@v2
+ - uses: actions/checkout@v3
- name: Install Rust
- uses: actions-rs/toolchain@v1
+ # There don't seem to be versioned releases of this action per se: for each rust
+ # version there is a branch which gets constantly rebased on top of master.
+ # We pin to a specific commit for paranoia's sake.
+ uses: dtolnay/rust-toolchain@55c7845fad90d0ae8b2e83715cb900e5e861e8cb
with:
toolchain: 1.58.1
- override: true
components: clippy
- uses: Swatinem/rust-cache@v2
@@ -107,14 +116,16 @@ jobs:
if: ${{ needs.changes.outputs.rust == 'true' }}
steps:
- - uses: actions/checkout@v2
+ - uses: actions/checkout@v3
- name: Install Rust
- uses: actions-rs/toolchain@v1
+ # There don't seem to be versioned releases of this action per se: for each rust
+ # version there is a branch which gets constantly rebased on top of master.
+ # We pin to a specific commit for paranoia's sake.
+ uses: dtolnay/rust-toolchain@55c7845fad90d0ae8b2e83715cb900e5e861e8cb
with:
- toolchain: 1.58.1
- override: true
- components: rustfmt
+ toolchain: 1.58.1
+ components: rustfmt
- uses: Swatinem/rust-cache@v2
- run: cargo fmt --check
@@ -140,8 +151,10 @@ jobs:
needs: linting-done
runs-on: ubuntu-latest
steps:
- - uses: actions/checkout@v2
- - uses: actions/setup-python@v2
+ - uses: actions/checkout@v3
+ - uses: actions/setup-python@v4
+ with:
+ python-version: "3.x"
- id: get-matrix
run: .ci/scripts/calculate_jobs.py
outputs:
@@ -157,7 +170,7 @@ jobs:
job: ${{ fromJson(needs.calculate-test-jobs.outputs.trial_test_matrix) }}
steps:
- - uses: actions/checkout@v2
+ - uses: actions/checkout@v3
- run: sudo apt-get -qq install xmlsec1
- name: Set up PostgreSQL ${{ matrix.job.postgres-version }}
if: ${{ matrix.job.postgres-version }}
@@ -166,6 +179,16 @@ jobs:
-e POSTGRES_PASSWORD=postgres \
-e POSTGRES_INITDB_ARGS="--lc-collate C --lc-ctype C --encoding UTF8" \
postgres:${{ matrix.job.postgres-version }}
+
+ - name: Install Rust
+ # There don't seem to be versioned releases of this action per se: for each rust
+ # version there is a branch which gets constantly rebased on top of master.
+ # We pin to a specific commit for paranoia's sake.
+ uses: dtolnay/rust-toolchain@55c7845fad90d0ae8b2e83715cb900e5e861e8cb
+ with:
+ toolchain: 1.58.1
+ - uses: Swatinem/rust-cache@v2
+
- uses: matrix-org/setup-python-poetry@v1
with:
python-version: ${{ matrix.job.python-version }}
@@ -199,13 +222,15 @@ jobs:
needs: linting-done
runs-on: ubuntu-20.04
steps:
- - uses: actions/checkout@v2
+ - uses: actions/checkout@v3
- name: Install Rust
- uses: actions-rs/toolchain@v1
+ # There don't seem to be versioned releases of this action per se: for each rust
+ # version there is a branch which gets constantly rebased on top of master.
+ # We pin to a specific commit for paranoia's sake.
+ uses: dtolnay/rust-toolchain@55c7845fad90d0ae8b2e83715cb900e5e861e8cb
with:
toolchain: 1.58.1
- override: true
- uses: Swatinem/rust-cache@v2
# There aren't wheels for some of the older deps, so we need to install
@@ -270,7 +295,7 @@ jobs:
extras: ["all"]
steps:
- - uses: actions/checkout@v2
+ - uses: actions/checkout@v3
# Install libs necessary for PyPy to build binary wheels for dependencies
- run: sudo apt-get -qq install xmlsec1 libxml2-dev libxslt-dev
- uses: matrix-org/setup-python-poetry@v1
@@ -313,15 +338,17 @@ jobs:
job: ${{ fromJson(needs.calculate-test-jobs.outputs.sytest_test_matrix) }}
steps:
- - uses: actions/checkout@v2
+ - uses: actions/checkout@v3
- name: Prepare test blacklist
run: cat sytest-blacklist .ci/worker-blacklist > synapse-blacklist-with-workers
- name: Install Rust
- uses: actions-rs/toolchain@v1
+ # There don't seem to be versioned releases of this action per se: for each rust
+ # version there is a branch which gets constantly rebased on top of master.
+ # We pin to a specific commit for paranoia's sake.
+ uses: dtolnay/rust-toolchain@55c7845fad90d0ae8b2e83715cb900e5e861e8cb
with:
toolchain: 1.58.1
- override: true
- uses: Swatinem/rust-cache@v2
- name: Run SyTest
@@ -331,7 +358,7 @@ jobs:
if: ${{ always() }}
run: /sytest/scripts/tap_to_gha.pl /logs/results.tap
- name: Upload SyTest logs
- uses: actions/upload-artifact@v2
+ uses: actions/upload-artifact@v3
if: ${{ always() }}
with:
name: Sytest Logs - ${{ job.status }} - (${{ join(matrix.job.*, ', ') }})
@@ -361,7 +388,7 @@ jobs:
--health-retries 5
steps:
- - uses: actions/checkout@v2
+ - uses: actions/checkout@v3
- run: sudo apt-get -qq install xmlsec1 postgresql-client
- uses: matrix-org/setup-python-poetry@v1
with:
@@ -382,10 +409,10 @@ jobs:
matrix:
include:
- python-version: "3.7"
- postgres-version: "10"
+ postgres-version: "11"
- - python-version: "3.10"
- postgres-version: "14"
+ - python-version: "3.11"
+ postgres-version: "15"
services:
postgres:
@@ -402,7 +429,16 @@ jobs:
--health-retries 5
steps:
- - uses: actions/checkout@v2
+ - uses: actions/checkout@v3
+ - name: Add PostgreSQL apt repository
+ # We need a version of pg_dump that can handle the version of
+ # PostgreSQL being tested against. The Ubuntu package repository lags
+ # behind new releases, so we have to use the PostreSQL apt repository.
+ # Steps taken from https://www.postgresql.org/download/linux/ubuntu/
+ run: |
+ sudo sh -c 'echo "deb http://apt.postgresql.org/pub/repos/apt $(lsb_release -cs)-pgdg main" > /etc/apt/sources.list.d/pgdg.list'
+ wget --quiet -O - https://www.postgresql.org/media/keys/ACCC4CF8.asc | sudo apt-key add -
+ sudo apt-get update
- run: sudo apt-get -qq install xmlsec1 postgresql-client
- uses: matrix-org/setup-python-poetry@v1
with:
@@ -444,16 +480,18 @@ jobs:
database: Postgres
steps:
- - name: Run actions/checkout@v2 for synapse
- uses: actions/checkout@v2
+ - name: Run actions/checkout@v3 for synapse
+ uses: actions/checkout@v3
with:
path: synapse
- name: Install Rust
- uses: actions-rs/toolchain@v1
+ # There don't seem to be versioned releases of this action per se: for each rust
+ # version there is a branch which gets constantly rebased on top of master.
+ # We pin to a specific commit for paranoia's sake.
+ uses: dtolnay/rust-toolchain@55c7845fad90d0ae8b2e83715cb900e5e861e8cb
with:
toolchain: 1.58.1
- override: true
- uses: Swatinem/rust-cache@v2
- name: Prepare Complement's Prerequisites
@@ -473,13 +511,15 @@ jobs:
- changes
steps:
- - uses: actions/checkout@v2
+ - uses: actions/checkout@v3
- name: Install Rust
- uses: actions-rs/toolchain@v1
+ # There don't seem to be versioned releases of this action per se: for each rust
+ # version there is a branch which gets constantly rebased on top of master.
+ # We pin to a specific commit for paranoia's sake.
+ uses: dtolnay/rust-toolchain@55c7845fad90d0ae8b2e83715cb900e5e861e8cb
with:
toolchain: 1.58.1
- override: true
- uses: Swatinem/rust-cache@v2
- run: cargo test
diff --git a/.github/workflows/triage-incoming.yml b/.github/workflows/triage-incoming.yml
index f926bcb759..0f0397cf5b 100644
--- a/.github/workflows/triage-incoming.yml
+++ b/.github/workflows/triage-incoming.yml
@@ -5,24 +5,11 @@ on:
types: [ opened ]
jobs:
- add_new_issues:
- name: Add new issues to the triage board
- runs-on: ubuntu-latest
- steps:
- - uses: octokit/graphql-action@v2.x
- id: add_to_project
- with:
- headers: '{"GraphQL-Features": "projects_next_graphql"}'
- query: |
- mutation add_to_project($projectid:ID!,$contentid:ID!) {
- addProjectV2ItemById(input: {projectId: $projectid contentId: $contentid}) {
- item {
- id
- }
- }
- }
- projectid: ${{ env.PROJECT_ID }}
- contentid: ${{ github.event.issue.node_id }}
- env:
- PROJECT_ID: "PVT_kwDOAIB0Bs4AFDdZ"
- GITHUB_TOKEN: ${{ secrets.ELEMENT_BOT_TOKEN }}
+ triage:
+ uses: matrix-org/backend-meta/.github/workflows/triage-incoming.yml@v1
+ with:
+ project_id: 'PVT_kwDOAIB0Bs4AFDdZ'
+ content_id: ${{ github.event.issue.node_id }}
+ secrets:
+ github_access_token: ${{ secrets.ELEMENT_BOT_TOKEN }}
+
diff --git a/.github/workflows/triage_labelled.yml b/.github/workflows/triage_labelled.yml
index fbd55de17f..d1ac4357b1 100644
--- a/.github/workflows/triage_labelled.yml
+++ b/.github/workflows/triage_labelled.yml
@@ -11,34 +11,34 @@ jobs:
if: >
contains(github.event.issue.labels.*.name, 'X-Needs-Info')
steps:
- - uses: octokit/graphql-action@v2.x
- id: add_to_project
+ - uses: actions/add-to-project@main
+ id: add_project
with:
- headers: '{"GraphQL-Features": "projects_next_graphql"}'
- query: |
- mutation {
- updateProjectV2ItemFieldValue(
- input: {
- projectId: $projectid
- itemId: $contentid
- fieldId: $fieldid
- value: {
- singleSelectOptionId: "Todo"
+ project-url: "https://github.com/orgs/matrix-org/projects/67"
+ github-token: ${{ secrets.ELEMENT_BOT_TOKEN }}
+ - name: Set status
+ env:
+ GITHUB_TOKEN: ${{ secrets.ELEMENT_BOT_TOKEN }}
+ run: |
+ gh api graphql -f query='
+ mutation(
+ $project: ID!
+ $item: ID!
+ $fieldid: ID!
+ $columnid: String!
+ ) {
+ updateProjectV2ItemFieldValue(
+ input: {
+ projectId: $project
+ itemId: $item
+ fieldId: $fieldid
+ value: {
+ singleSelectOptionId: $columnid
}
- }
- ) {
- projectV2Item {
- id
- }
+ }
+ ) {
+ projectV2Item {
+ id
}
}
-
- projectid: ${{ env.PROJECT_ID }}
- contentid: ${{ github.event.issue.node_id }}
- fieldid: ${{ env.FIELD_ID }}
- optionid: ${{ env.OPTION_ID }}
- env:
- PROJECT_ID: "PVT_kwDOAIB0Bs4AFDdZ"
- GITHUB_TOKEN: ${{ secrets.ELEMENT_BOT_TOKEN }}
- FIELD_ID: "PVTSSF_lADOAIB0Bs4AFDdZzgC6ZA4"
- OPTION_ID: "ba22e43c"
+ }' -f project="PVT_kwDOAIB0Bs4AFDdZ" -f item=${{ steps.add_project.outputs.itemId }} -f fieldid="PVTSSF_lADOAIB0Bs4AFDdZzgC6ZA4" -f columnid=ba22e43c --silent
diff --git a/.github/workflows/twisted_trunk.yml b/.github/workflows/twisted_trunk.yml
index 8fa2fbdea0..6a047193f6 100644
--- a/.github/workflows/twisted_trunk.yml
+++ b/.github/workflows/twisted_trunk.yml
@@ -15,13 +15,12 @@ jobs:
runs-on: ubuntu-latest
steps:
- - uses: actions/checkout@v2
+ - uses: actions/checkout@v3
- name: Install Rust
- uses: actions-rs/toolchain@v1
+ uses: dtolnay/rust-toolchain@55c7845fad90d0ae8b2e83715cb900e5e861e8cb
with:
- toolchain: stable
- override: true
+ toolchain: stable
- uses: Swatinem/rust-cache@v2
- uses: matrix-org/setup-python-poetry@v1
@@ -40,14 +39,13 @@ jobs:
runs-on: ubuntu-latest
steps:
- - uses: actions/checkout@v2
+ - uses: actions/checkout@v3
- run: sudo apt-get -qq install xmlsec1
- name: Install Rust
- uses: actions-rs/toolchain@v1
+ uses: dtolnay/rust-toolchain@55c7845fad90d0ae8b2e83715cb900e5e861e8cb
with:
- toolchain: stable
- override: true
+ toolchain: stable
- uses: Swatinem/rust-cache@v2
- uses: matrix-org/setup-python-poetry@v1
@@ -81,13 +79,12 @@ jobs:
- ${{ github.workspace }}:/src
steps:
- - uses: actions/checkout@v2
+ - uses: actions/checkout@v3
- name: Install Rust
- uses: actions-rs/toolchain@v1
+ uses: dtolnay/rust-toolchain@55c7845fad90d0ae8b2e83715cb900e5e861e8cb
with:
- toolchain: stable
- override: true
+ toolchain: stable
- uses: Swatinem/rust-cache@v2
- name: Patch dependencies
@@ -112,7 +109,7 @@ jobs:
if: ${{ always() }}
run: /sytest/scripts/tap_to_gha.pl /logs/results.tap
- name: Upload SyTest logs
- uses: actions/upload-artifact@v2
+ uses: actions/upload-artifact@v3
if: ${{ always() }}
with:
name: Sytest Logs - ${{ job.status }} - (${{ join(matrix.*, ', ') }})
@@ -138,8 +135,8 @@ jobs:
database: Postgres
steps:
- - name: Run actions/checkout@v2 for synapse
- uses: actions/checkout@v2
+ - name: Run actions/checkout@v3 for synapse
+ uses: actions/checkout@v3
with:
path: synapse
@@ -151,12 +148,11 @@ jobs:
run: |
set -x
DEBIAN_FRONTEND=noninteractive sudo apt-get install -yqq python3 pipx
- pipx install poetry==1.1.14
+ pipx install poetry==1.2.0
poetry remove -n twisted
poetry add -n --extras tls git+https://github.com/twisted/twisted.git#trunk
poetry lock --no-update
- # NOT IN 1.1.14 poetry lock --check
working-directory: synapse
- run: |
@@ -177,7 +173,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- - uses: actions/checkout@v2
+ - uses: actions/checkout@v3
- uses: JasonEtco/create-an-issue@5d9504915f79f9cc6d791934b8ef34f2353dd74d # v2.5.0, 2020-12-06
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
diff --git a/CHANGES.md b/CHANGES.md
index 82b5526f94..0fa6f7fab7 100644
--- a/CHANGES.md
+++ b/CHANGES.md
@@ -1,17 +1,510 @@
-Synapse 1.68.0rc2 (2022-09-23)
+Synapse 1.72.0rc1 (2022-11-16)
==============================
-Please note that Synapse will now refuse to start if configured to use a version of SQLite earlier than 3.27.
+Please note that we now only support PostgreSQL 11+, because PostgreSQL 10 has reached end-of-life, c.f. our [Deprecation Policy](https://github.com/matrix-org/synapse/blob/develop/docs/deprecation_policy.md).
-In addition, please note that installing Synapse from a source checkout now requires a recent Rust compiler.
-Those using packages will not be affected. On most platforms, installing with `pip install matrix-synapse` will not be affected.
-See the [upgrade notes](https://matrix-org.github.io/synapse/v1.68/upgrade.html#upgrading-to-v1670).
+Features
+--------
+
+- Add experimental support for [MSC3912](https://github.com/matrix-org/matrix-spec-proposals/pull/3912): Relation-based redactions. ([\#14260](https://github.com/matrix-org/synapse/issues/14260))
+- Build Debian packages for Ubuntu 22.10 (Kinetic Kudu). ([\#14396](https://github.com/matrix-org/synapse/issues/14396))
+- Add an [Admin API](https://matrix-org.github.io/synapse/latest/usage/administration/admin_api/index.html) endpoint for user lookup based on third-party ID (3PID). Contributed by @ashfame. ([\#14405](https://github.com/matrix-org/synapse/issues/14405))
+- Faster joins: include heroes' membership events in the partial join response, for rooms without a name or canonical alias. ([\#14442](https://github.com/matrix-org/synapse/issues/14442))
Bugfixes
--------
-- Fix building from packaged sdist. Broke in v1.68.0rc1. ([\#13866](https://github.com/matrix-org/synapse/issues/13866))
+- Faster joins: do not block creation of or queries for room aliases during the resync. ([\#14292](https://github.com/matrix-org/synapse/issues/14292))
+- Fix a bug introduced in Synapse 1.64.0rc1 which could cause log spam when fetching events from other homeservers. ([\#14347](https://github.com/matrix-org/synapse/issues/14347))
+- Fix a bug introduced in 1.66 which would not send certain pushrules to clients. Contributed by Nico. ([\#14356](https://github.com/matrix-org/synapse/issues/14356))
+- Fix a bug introduced in v1.71.0rc1 where the power level event was incorrectly created during initial room creation. ([\#14361](https://github.com/matrix-org/synapse/issues/14361))
+- Fix the refresh token endpoint to be under /r0 and /v3 instead of /v1. Contributed by Tulir @ Beeper. ([\#14364](https://github.com/matrix-org/synapse/issues/14364))
+- Fix a long-standing bug where Synapse would raise an error when encountering an unrecognised field in a `/sync` filter, instead of ignoring it for forward compatibility. ([\#14369](https://github.com/matrix-org/synapse/issues/14369))
+- Fix a background database update, introduced in Synapse 1.64.0, which could cause poor database performance. ([\#14374](https://github.com/matrix-org/synapse/issues/14374))
+- Fix PostgreSQL sometimes using table scans for queries against the `event_search` table, taking a long time and a large amount of IO. ([\#14409](https://github.com/matrix-org/synapse/issues/14409))
+- Fix rendering of some HTML templates (including emails). Introduced in v1.71.0. ([\#14448](https://github.com/matrix-org/synapse/issues/14448))
+- Fix a bug introduced in Synapse 1.70.0 where the background updates to add non-thread unique indexes on receipts could fail when upgrading from 1.67.0 or earlier. ([\#14453](https://github.com/matrix-org/synapse/issues/14453))
+
+
+Updates to the Docker image
+---------------------------
+
+- Add all Stream Writer worker types to `configure_workers_and_start.py`. ([\#14197](https://github.com/matrix-org/synapse/issues/14197))
+- Remove references to legacy worker types in the multi-worker Dockerfile. ([\#14294](https://github.com/matrix-org/synapse/issues/14294))
+
+
+Improved Documentation
+----------------------
+
+- Upload documentation PRs to Netlify. ([\#12947](https://github.com/matrix-org/synapse/issues/12947), [\#14370](https://github.com/matrix-org/synapse/issues/14370))
+- Add addtional TURN server configuration example based on [eturnal](https://github.com/processone/eturnal) and adjust general TURN server doc structure. ([\#14293](https://github.com/matrix-org/synapse/issues/14293))
+- Add example on how to load balance /sync requests. Contributed by [aceArt](https://aceart.de). ([\#14297](https://github.com/matrix-org/synapse/issues/14297))
+- Edit sample Nginx reverse proxy configuration to use HTTP/1.1. Contributed by Brad Jones. ([\#14414](https://github.com/matrix-org/synapse/issues/14414))
+
+
+Deprecations and Removals
+-------------------------
+
+- Remove support for PostgreSQL 10. ([\#14392](https://github.com/matrix-org/synapse/issues/14392), [\#14397](https://github.com/matrix-org/synapse/issues/14397))
+
+
+Internal Changes
+----------------
+
+- Run unit tests against Python 3.11. ([\#13812](https://github.com/matrix-org/synapse/issues/13812))
+- Add TLS support for generic worker endpoints. ([\#14128](https://github.com/matrix-org/synapse/issues/14128), [\#14455](https://github.com/matrix-org/synapse/issues/14455))
+- Switch to a maintained action for installing Rust in CI. ([\#14313](https://github.com/matrix-org/synapse/issues/14313))
+- Add override ability to `complement.sh` command line script to request certain types of workers. ([\#14324](https://github.com/matrix-org/synapse/issues/14324))
+- Enabling testing of [MSC3874](https://github.com/matrix-org/matrix-spec-proposals/pull/3874) (filtering of `/messages` by relation type) in complement. ([\#14339](https://github.com/matrix-org/synapse/issues/14339))
+- Concisely log a failure to resolve state due to missing `prev_events`. ([\#14346](https://github.com/matrix-org/synapse/issues/14346))
+- Use a maintained Github action to install Rust. ([\#14351](https://github.com/matrix-org/synapse/issues/14351))
+- Cleanup old worker datastore classes. Contributed by Nick @ Beeper (@fizzadar). ([\#14375](https://github.com/matrix-org/synapse/issues/14375))
+- Test against PostgreSQL 15 in CI. ([\#14394](https://github.com/matrix-org/synapse/issues/14394))
+- Remove unreachable code. ([\#14410](https://github.com/matrix-org/synapse/issues/14410))
+- Clean-up event persistence code. ([\#14411](https://github.com/matrix-org/synapse/issues/14411))
+- Update docstring to clarify that `get_partial_state_events_batch` does not just give you completely arbitrary partial-state events. ([\#14417](https://github.com/matrix-org/synapse/issues/14417))
+- Fix mypy errors introduced by bumping the locked version of `attrs` and `gitpython`. ([\#14433](https://github.com/matrix-org/synapse/issues/14433))
+- Make Dependabot only bump Rust deps in the lock file. ([\#14434](https://github.com/matrix-org/synapse/issues/14434))
+- Fix an incorrect stub return type for `PushRuleEvaluator.run`. ([\#14451](https://github.com/matrix-org/synapse/issues/14451))
+- Improve performance of `/context` in large rooms. ([\#14461](https://github.com/matrix-org/synapse/issues/14461))
+
+
+Synapse 1.71.0 (2022-11-08)
+===========================
+
+Please note that, as announced in the release notes for Synapse 1.69.0, legacy Prometheus metric names are now disabled by default.
+They will be removed altogether in Synapse 1.73.0.
+If not already done, server administrators should update their dashboards and alerting rules to avoid using the deprecated metric names.
+See the [upgrade notes](https://matrix-org.github.io/synapse/v1.71/upgrade.html#upgrading-to-v1710) for more details.
+
+**Note:** in line with our [deprecation policy](https://matrix-org.github.io/synapse/latest/deprecation_policy.html) for platform dependencies, this will be the last release to support PostgreSQL 10, which reaches upstream end-of-life on November 10th, 2022. Future releases of Synapse will require PostgreSQL 11+.
+
+No significant changes since 1.71.0rc2.
+
+
+Synapse 1.71.0rc2 (2022-11-04)
+==============================
+
+Improved Documentation
+----------------------
+
+- Document the changes to monthly active user metrics due to deprecation of legacy Prometheus metric names. ([\#14358](https://github.com/matrix-org/synapse/issues/14358), [\#14360](https://github.com/matrix-org/synapse/issues/14360))
+
+
+Deprecations and Removals
+-------------------------
+
+- Disable legacy Prometheus metric names by default. They can still be re-enabled for now, but they will be removed altogether in Synapse 1.73.0. ([\#14353](https://github.com/matrix-org/synapse/issues/14353))
+
+
+Internal Changes
+----------------
+
+- Run unit tests against Python 3.11. ([\#13812](https://github.com/matrix-org/synapse/issues/13812))
+
+
+Synapse 1.71.0rc1 (2022-11-01)
+==============================
+
+Features
+--------
+
+- Support back-channel logouts from OpenID Connect providers. ([\#11414](https://github.com/matrix-org/synapse/issues/11414))
+- Allow use of Postgres and SQLlite full-text search operators in search queries. ([\#11635](https://github.com/matrix-org/synapse/issues/11635), [\#14310](https://github.com/matrix-org/synapse/issues/14310), [\#14311](https://github.com/matrix-org/synapse/issues/14311))
+- Implement [MSC3664](https://github.com/matrix-org/matrix-doc/pull/3664), Pushrules for relations. Contributed by Nico. ([\#11804](https://github.com/matrix-org/synapse/issues/11804))
+- Improve aesthetics of HTML templates. Note that these changes do not retroactively apply to templates which have been [customised](https://matrix-org.github.io/synapse/latest/templates.html#templates) by server admins. ([\#13652](https://github.com/matrix-org/synapse/issues/13652))
+- Enable write-ahead logging for SQLite installations. Contributed by [@asymmetric](https://github.com/asymmetric). ([\#13897](https://github.com/matrix-org/synapse/issues/13897))
+- Show erasure status when [listing users](https://matrix-org.github.io/synapse/latest/admin_api/user_admin_api.html#query-user-account) in the Admin API. ([\#14205](https://github.com/matrix-org/synapse/issues/14205))
+- Provide a specific error code when a `/sync` request provides a filter which doesn't represent a JSON object. ([\#14262](https://github.com/matrix-org/synapse/issues/14262))
+
+
+Bugfixes
+--------
+
+- Fix a long-standing bug where the `update_synapse_database` script could not be run with multiple databases. Contributed by @thefinn93 @ Beeper. ([\#13422](https://github.com/matrix-org/synapse/issues/13422))
+- Fix a bug which prevented setting an avatar on homeservers which have an explicit port in their `server_name` and have `max_avatar_size` and/or `allowed_avatar_mimetypes` configuration. Contributed by @ashfame. ([\#13927](https://github.com/matrix-org/synapse/issues/13927))
+- Check appservice user interest against the local users instead of all users in the room to align with [MSC3905](https://github.com/matrix-org/matrix-spec-proposals/pull/3905). ([\#13958](https://github.com/matrix-org/synapse/issues/13958))
+- Fix a long-standing bug where Synapse would accidentally include extra information in the response to [`PUT /_matrix/federation/v2/invite/{roomId}/{eventId}`](https://spec.matrix.org/v1.4/server-server-api/#put_matrixfederationv2inviteroomideventid). ([\#14064](https://github.com/matrix-org/synapse/issues/14064))
+- Fix a bug introduced in Synapse 1.64.0 where presence updates could be missing from `/sync` responses. ([\#14243](https://github.com/matrix-org/synapse/issues/14243))
+- Fix a bug introduced in Synapse 1.60.0 which caused an error to be logged when Synapse received a SIGHUP signal if debug logging was enabled. ([\#14258](https://github.com/matrix-org/synapse/issues/14258))
+- Prevent history insertion ([MSC2716](https://github.com/matrix-org/matrix-spec-proposals/pull/2716)) during an partial join ([MSC3706](https://github.com/matrix-org/matrix-spec-proposals/pull/3706)). ([\#14291](https://github.com/matrix-org/synapse/issues/14291))
+- Fix a bug introduced in Synapse 1.34.0 where device names would be returned via a federation user key query request when `allow_device_name_lookup_over_federation` was set to `false`. ([\#14304](https://github.com/matrix-org/synapse/issues/14304))
+- Fix a bug introduced in Synapse 0.34.0 where logs could include error spam when background processes are measured as taking a negative amount of time. ([\#14323](https://github.com/matrix-org/synapse/issues/14323))
+- Fix a bug introduced in Synapse 1.70.0 where clients were unable to PUT new [dehydrated devices](https://github.com/matrix-org/matrix-spec-proposals/pull/2697). ([\#14336](https://github.com/matrix-org/synapse/issues/14336))
+
+
+Improved Documentation
+----------------------
+
+- Explain how to disable the use of [`trusted_key_servers`](https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#trusted_key_servers). ([\#13999](https://github.com/matrix-org/synapse/issues/13999))
+- Add workers settings to [configuration manual](https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#individual-worker-configuration). ([\#14086](https://github.com/matrix-org/synapse/issues/14086))
+- Correct the name of the config option [`encryption_enabled_by_default_for_room_type`](https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#encryption_enabled_by_default_for_room_type). ([\#14110](https://github.com/matrix-org/synapse/issues/14110))
+- Update docstrings of `SynapseError` and `FederationError` to bettter describe what they are used for and the effects of using them are. ([\#14191](https://github.com/matrix-org/synapse/issues/14191))
+
+
+Internal Changes
+----------------
+
+- Remove unused `@lru_cache` decorator. ([\#13595](https://github.com/matrix-org/synapse/issues/13595))
+- Save login tokens in database and prevent login token reuse. ([\#13844](https://github.com/matrix-org/synapse/issues/13844))
+- Refactor OIDC tests to better mimic an actual OIDC provider. ([\#13910](https://github.com/matrix-org/synapse/issues/13910))
+- Fix type annotation causing import time error in the Complement forking launcher. ([\#14084](https://github.com/matrix-org/synapse/issues/14084))
+- Refactor [MSC3030](https://github.com/matrix-org/matrix-spec-proposals/pull/3030) `/timestamp_to_event` endpoint to loop over federation destinations with standard pattern and error handling. ([\#14096](https://github.com/matrix-org/synapse/issues/14096))
+- Add initial power level event to batch of bulk persisted events when creating a new room. ([\#14228](https://github.com/matrix-org/synapse/issues/14228))
+- Refactor `/key/` endpoints to use `RestServlet` classes. ([\#14229](https://github.com/matrix-org/synapse/issues/14229))
+- Switch to using the `matrix-org/backend-meta` version of `triage-incoming` for new issues in CI. ([\#14230](https://github.com/matrix-org/synapse/issues/14230))
+- Build wheels on macos 11, not 10.15. ([\#14249](https://github.com/matrix-org/synapse/issues/14249))
+- Add debugging to help diagnose lost device list updates. ([\#14268](https://github.com/matrix-org/synapse/issues/14268))
+- Add Rust cache to CI for `trial` runs. ([\#14287](https://github.com/matrix-org/synapse/issues/14287))
+- Improve type hinting of `RawHeaders`. ([\#14303](https://github.com/matrix-org/synapse/issues/14303))
+- Use Poetry 1.2.0 in the Twisted Trunk CI job. ([\#14305](https://github.com/matrix-org/synapse/issues/14305))
+
+
+Dependency updates
+
+Runtime:
+
+- Bump anyhow from 1.0.65 to 1.0.66. ([\#14278](https://github.com/matrix-org/synapse/issues/14278))
+- Bump jinja2 from 3.0.3 to 3.1.2. ([\#14271](https://github.com/matrix-org/synapse/issues/14271))
+- Bump prometheus-client from 0.14.0 to 0.15.0. ([\#14274](https://github.com/matrix-org/synapse/issues/14274))
+- Bump psycopg2 from 2.9.4 to 2.9.5. ([\#14331](https://github.com/matrix-org/synapse/issues/14331))
+- Bump pysaml2 from 7.1.2 to 7.2.1. ([\#14270](https://github.com/matrix-org/synapse/issues/14270))
+- Bump sentry-sdk from 1.5.11 to 1.10.1. ([\#14330](https://github.com/matrix-org/synapse/issues/14330))
+- Bump serde from 1.0.145 to 1.0.147. ([\#14277](https://github.com/matrix-org/synapse/issues/14277))
+- Bump serde_json from 1.0.86 to 1.0.87. ([\#14279](https://github.com/matrix-org/synapse/issues/14279))
+
+Tooling and CI:
+
+- Bump black from 22.3.0 to 22.10.0. ([\#14328](https://github.com/matrix-org/synapse/issues/14328))
+- Bump flake8-bugbear from 21.3.2 to 22.9.23. ([\#14042](https://github.com/matrix-org/synapse/issues/14042))
+- Bump peaceiris/actions-gh-pages from 3.8.0 to 3.9.0. ([\#14276](https://github.com/matrix-org/synapse/issues/14276))
+- Bump peaceiris/actions-mdbook from 1.1.14 to 1.2.0. ([\#14275](https://github.com/matrix-org/synapse/issues/14275))
+- Bump setuptools-rust from 1.5.1 to 1.5.2. ([\#14273](https://github.com/matrix-org/synapse/issues/14273))
+- Bump twine from 3.8.0 to 4.0.1. ([\#14332](https://github.com/matrix-org/synapse/issues/14332))
+- Bump types-opentracing from 2.4.7 to 2.4.10. ([\#14133](https://github.com/matrix-org/synapse/issues/14133))
+- Bump types-requests from 2.28.11 to 2.28.11.2. ([\#14272](https://github.com/matrix-org/synapse/issues/14272))
+
+
+Synapse 1.70.1 (2022-10-28)
+===========================
+
+This release fixes some regressions that were discovered in 1.70.0.
+
+[#14300](https://github.com/matrix-org/synapse/issues/14300)
+was previously reported to be a regression in 1.70.0 as well. However, we have
+since concluded that it was limited to the reporter and thus have not needed
+to include any fix for it in 1.70.1.
+
+
+Bugfixes
+--------
+
+- Fix a bug introduced in Synapse 1.70.0rc1 where the access tokens sent to application services as headers were malformed. Application services which were obtaining access tokens from query parameters were not affected. ([\#14301](https://github.com/matrix-org/synapse/issues/14301))
+- Fix room creation being rate limited too aggressively since Synapse v1.69.0. ([\#14314](https://github.com/matrix-org/synapse/issues/14314))
+
+
+Synapse 1.70.0 (2022-10-26)
+===========================
+
+No significant changes since 1.70.0rc2.
+
+
+Synapse 1.70.0rc2 (2022-10-25)
+==============================
+
+Bugfixes
+--------
+
+- Fix a bug introduced in Synapse 1.70.0rc1 where the information returned from the `/threads` API could be stale when threaded events are redacted. ([\#14248](https://github.com/matrix-org/synapse/issues/14248))
+- Fix a bug introduced in Synapse 1.70.0rc1 leading to broken outbound federation when using Python 3.7. ([\#14280](https://github.com/matrix-org/synapse/issues/14280))
+- Fix a bug introduced in Synapse 1.70.0rc1 where edits to non-message events were aggregated by the homeserver. ([\#14283](https://github.com/matrix-org/synapse/issues/14283))
+
+
+Internal Changes
+----------------
+
+- Build ABI3 wheels for CPython. ([\#14253](https://github.com/matrix-org/synapse/issues/14253))
+- For the aarch64 architecture, only build wheels for CPython manylinux. ([\#14259](https://github.com/matrix-org/synapse/issues/14259))
+
+
+Synapse 1.70.0rc1 (2022-10-19)
+==============================
+
+Features
+--------
+
+- Support for [MSC3856](https://github.com/matrix-org/matrix-spec-proposals/pull/3856): threads list API. ([\#13394](https://github.com/matrix-org/synapse/issues/13394), [\#14171](https://github.com/matrix-org/synapse/issues/14171), [\#14175](https://github.com/matrix-org/synapse/issues/14175))
+- Support for thread-specific notifications & receipts ([MSC3771](https://github.com/matrix-org/matrix-spec-proposals/pull/3771) and [MSC3773](https://github.com/matrix-org/matrix-spec-proposals/pull/3773)). ([\#13776](https://github.com/matrix-org/synapse/issues/13776), [\#13824](https://github.com/matrix-org/synapse/issues/13824), [\#13877](https://github.com/matrix-org/synapse/issues/13877), [\#13878](https://github.com/matrix-org/synapse/issues/13878), [\#14050](https://github.com/matrix-org/synapse/issues/14050), [\#14140](https://github.com/matrix-org/synapse/issues/14140), [\#14159](https://github.com/matrix-org/synapse/issues/14159), [\#14163](https://github.com/matrix-org/synapse/issues/14163), [\#14174](https://github.com/matrix-org/synapse/issues/14174), [\#14222](https://github.com/matrix-org/synapse/issues/14222))
+- Stop fetching missing `prev_events` after we already know their signature is invalid. ([\#13816](https://github.com/matrix-org/synapse/issues/13816))
+- Send application service access tokens as a header (and query parameter). Implements [MSC2832](https://github.com/matrix-org/matrix-spec-proposals/pull/2832). ([\#13996](https://github.com/matrix-org/synapse/issues/13996))
+- Ignore server ACL changes when generating pushes. Implements [MSC3786](https://github.com/matrix-org/matrix-spec-proposals/pull/3786). ([\#13997](https://github.com/matrix-org/synapse/issues/13997))
+- Experimental support for redirecting to an implementation of a [MSC3886](https://github.com/matrix-org/matrix-spec-proposals/pull/3886) HTTP rendezvous service. ([\#14018](https://github.com/matrix-org/synapse/issues/14018))
+- The `/relations` endpoint can now be used on workers. ([\#14028](https://github.com/matrix-org/synapse/issues/14028))
+- Advertise support for Matrix 1.3 and 1.4 on `/_matrix/client/versions`. ([\#14032](https://github.com/matrix-org/synapse/issues/14032), [\#14184](https://github.com/matrix-org/synapse/issues/14184))
+- Improve validation of request bodies for the [Device Management](https://spec.matrix.org/v1.4/client-server-api/#device-management) and [MSC2697 Device Dehyrdation](https://github.com/matrix-org/matrix-spec-proposals/pull/2697) client-server API endpoints. ([\#14054](https://github.com/matrix-org/synapse/issues/14054))
+- Experimental support for [MSC3874](https://github.com/matrix-org/matrix-spec-proposals/pull/3874): Filtering threads from the `/messages` endpoint. ([\#14148](https://github.com/matrix-org/synapse/issues/14148))
+- Improve the validation of the following PUT endpoints: [`/directory/room/{roomAlias}`](https://spec.matrix.org/v1.4/client-server-api/#put_matrixclientv3directoryroomroomalias), [`/directory/list/room/{roomId}`](https://spec.matrix.org/v1.4/client-server-api/#put_matrixclientv3directorylistroomroomid) and [`/directory/list/appservice/{networkId}/{roomId}`](https://spec.matrix.org/v1.4/application-service-api/#put_matrixclientv3directorylistappservicenetworkidroomid). ([\#14179](https://github.com/matrix-org/synapse/issues/14179))
+- Build and publish binary wheels for `aarch64` platforms. ([\#14212](https://github.com/matrix-org/synapse/issues/14212))
+
+
+Bugfixes
+--------
+
+- Prevent device names from appearing in device list updates in some situations when `allow_device_name_lookup_over_federation` is `false`. (This is not comprehensive: see [\#13114](https://github.com/matrix-org/synapse/issues/13114).) ([\#10015](https://github.com/matrix-org/synapse/issues/10015))
+- Fix a long-standing bug where redactions were not being sent over federation if we did not have the original event. ([\#13813](https://github.com/matrix-org/synapse/issues/13813))
+- Fix a long-standing bug where edits of non-`m.room.message` events would not be correctly bundled or have their new content applied. ([\#14034](https://github.com/matrix-org/synapse/issues/14034))
+- Fix a bug introduced in Synapse 1.53.0 when querying `/publicRooms` with both a `room_type` filter and a `third_party_instance_id`. ([\#14053](https://github.com/matrix-org/synapse/issues/14053))
+- Fix a bug introduced in Synapse 1.35.0 where errors parsing a `/send_join` or `/state` response would produce excessive, low-quality Sentry events. ([\#14065](https://github.com/matrix-org/synapse/issues/14065))
+- Fix a long-standing bug where Synapse would error on the optional 'invite_room_state' field not being provided to [`PUT /_matrix/federation/v2/invite/{roomId}/{eventId}`](https://spec.matrix.org/v1.4/server-server-api/#put_matrixfederationv2inviteroomideventid). ([\#14083](https://github.com/matrix-org/synapse/issues/14083))
+- Fix a bug where invalid oEmbed fields would cause the entire response to be discarded. Introduced in Synapse 1.18.0. ([\#14089](https://github.com/matrix-org/synapse/issues/14089))
+- Fix a bug introduced in Synapse 1.37.0 in which an incorrect key name was used for sending and receiving room metadata when knocking on a room. ([\#14102](https://github.com/matrix-org/synapse/issues/14102))
+- Fix a bug introduced in v1.69.0rc1 where the joined hosts for a given event were not being properly cached. ([\#14125](https://github.com/matrix-org/synapse/issues/14125))
+- Fix a bug introduced in Synapse 1.30.0 where purging and rejoining a room without restarting in-between would result in a broken room. ([\#14161](https://github.com/matrix-org/synapse/issues/14161), [\#14164](https://github.com/matrix-org/synapse/issues/14164))
+- Fix [MSC3030](https://github.com/matrix-org/matrix-spec-proposals/pull/3030) `/timestamp_to_event` endpoint returning potentially inaccurate closest events with `outliers` present. ([\#14215](https://github.com/matrix-org/synapse/issues/14215))
+
+
+Updates to the Docker image
+---------------------------
+
+- Update the version of frozendict in Docker images and Debian packages from 2.3.3 to 2.3.4, which may fix memory leak problems. ([\#13955](https://github.com/matrix-org/synapse/issues/13955))
+- Use the `minimal` Rust profile when building Synapse. ([\#14141](https://github.com/matrix-org/synapse/issues/14141))
+- Prevent a class of database sharding errors when using `Dockerfile-workers` to spawn multiple instances of the same worker. Contributed by Jason Little. ([\#14165](https://github.com/matrix-org/synapse/issues/14165))
+- Set `LD_PRELOAD` to use jemalloc memory allocator in Dockerfile-workers. ([\#14182](https://github.com/matrix-org/synapse/issues/14182))
+- Fix pre-startup logging being lost when using the `Dockerfile-workers` image. ([\#14195](https://github.com/matrix-org/synapse/issues/14195))
+
+
+Improved Documentation
+----------------------
+
+- Add sample worker files for `pusher` and `federation_sender`. ([\#14077](https://github.com/matrix-org/synapse/issues/14077))
+- Improve the listener example on the metrics documentation. ([\#14078](https://github.com/matrix-org/synapse/issues/14078))
+- Expand Google OpenID Connect example config to map email attribute. Contributed by @ptman. ([\#14081](https://github.com/matrix-org/synapse/issues/14081))
+- The changelog entry ending in a full stop or exclamation mark is not optional. ([\#14087](https://github.com/matrix-org/synapse/issues/14087))
+- Fix links to jemalloc documentation, which were broken in [#13491](https://github.com/matrix-org/synapse/pull/14124). ([\#14093](https://github.com/matrix-org/synapse/issues/14093))
+- Remove not needed `replication` listener in docker compose example. ([\#14107](https://github.com/matrix-org/synapse/issues/14107))
+- Fix name of `alias_creation_rules` option in the config manual documentation. ([\#14124](https://github.com/matrix-org/synapse/issues/14124))
+- Clarify comment on event contexts. ([\#14145](https://github.com/matrix-org/synapse/issues/14145))
+- Fix dead link to the [Admin Registration API](https://matrix-org.github.io/synapse/latest/admin_api/register_api.html). ([\#14189](https://github.com/matrix-org/synapse/issues/14189))
+
+
+Deprecations and Removals
+-------------------------
+
+- Remove the experimental implementation of [MSC3772](https://github.com/matrix-org/matrix-spec-proposals/pull/3772). ([\#14094](https://github.com/matrix-org/synapse/issues/14094))
+- Remove the unstable identifier for [MSC3715](https://github.com/matrix-org/matrix-doc/pull/3715). ([\#14106](https://github.com/matrix-org/synapse/issues/14106), [\#14146](https://github.com/matrix-org/synapse/issues/14146))
+
+
+Internal Changes
+----------------
+
+- Optimise queries used to get a users rooms during sync. Contributed by Nick @ Beeper (@fizzadar). ([\#13991](https://github.com/matrix-org/synapse/issues/13991))
+- Update authlib from 0.15.5 to 1.1.0. ([\#14006](https://github.com/matrix-org/synapse/issues/14006))
+- Make `parse_server_name` consistent in handling invalid server names. ([\#14007](https://github.com/matrix-org/synapse/issues/14007))
+- Don't repeatedly wake up the same users for batched events. ([\#14033](https://github.com/matrix-org/synapse/issues/14033))
+- Complement test image: capture logs from nginx. ([\#14063](https://github.com/matrix-org/synapse/issues/14063))
+- Don't create noisy Sentry events when a requester drops connection to the metrics server mid-request. ([\#14072](https://github.com/matrix-org/synapse/issues/14072))
+- Run the integration test suites with the asyncio reactor enabled in CI. ([\#14092](https://github.com/matrix-org/synapse/issues/14092))
+- Add debug logs to figure out why an event was filtered out of the client response. ([\#14095](https://github.com/matrix-org/synapse/issues/14095))
+- Indicate what endpoint came back with a JSON response we were unable to parse. ([\#14097](https://github.com/matrix-org/synapse/issues/14097))
+- Break up calls to fetch rooms for many users. Contributed by Nick @ Beeper (@fizzadar). ([\#14109](https://github.com/matrix-org/synapse/issues/14109))
+- Faster joins: prioritise the server we joined by when restarting a partial join resync. ([\#14126](https://github.com/matrix-org/synapse/issues/14126))
+- Cache Rust build cache when building docker images. ([\#14130](https://github.com/matrix-org/synapse/issues/14130))
+- Enable dependabot for Rust dependencies. ([\#14132](https://github.com/matrix-org/synapse/issues/14132))
+- Bump typing-extensions from 4.1.1 to 4.4.0. ([\#14134](https://github.com/matrix-org/synapse/issues/14134))
+- Use the `minimal` Rust profile when building Synapse. ([\#14141](https://github.com/matrix-org/synapse/issues/14141))
+- Remove unused configuration code. ([\#14142](https://github.com/matrix-org/synapse/issues/14142))
+- Prepare for the [`gotestfmt` repository move](https://github.com/GoTestTools/gotestfmt/discussions/46). ([\#14144](https://github.com/matrix-org/synapse/issues/14144))
+- Invalidate rooms for user caches on replicated event, fix sync cache race in synapse workers. Contributed by Nick @ Beeper (@fizzadar). ([\#14155](https://github.com/matrix-org/synapse/issues/14155))
+- Enable url previews when testing with complement. ([\#14198](https://github.com/matrix-org/synapse/issues/14198))
+- When authenticating batched events, check for auth events in batch as well as DB. ([\#14214](https://github.com/matrix-org/synapse/issues/14214))
+- Update CI config to avoid GitHub Actions deprecation warnings. ([\#14216](https://github.com/matrix-org/synapse/issues/14216), [\#14224](https://github.com/matrix-org/synapse/issues/14224))
+- Update dependency requirements to allow building with poetry-core 1.3.2. ([\#14217](https://github.com/matrix-org/synapse/issues/14217))
+- Rename the `cache_memory` extra to `cache-memory`, for compatability with poetry-core 1.3.0 and [PEP 685](https://peps.python.org/pep-0685/). From-source installations using this extra will need to install using the new name. ([\#14221](https://github.com/matrix-org/synapse/issues/14221))
+- Specify dev-dependencies using lower bounds, to reduce the likelihood of a dependabot merge conflict. The lockfile continues to pin to specific versions. ([\#14227](https://github.com/matrix-org/synapse/issues/14227))
+
+
+Synapse 1.69.0 (2022-10-17)
+===========================
+
+Please note that legacy Prometheus metric names are now deprecated and will be removed in Synapse 1.73.0.
+Server administrators should update their dashboards and alerting rules to avoid using the deprecated metric names.
+See the [upgrade notes](https://matrix-org.github.io/synapse/v1.69/upgrade.html#upgrading-to-v1690) for more details.
+
+
+No significant changes since 1.69.0rc4.
+
+
+Synapse 1.69.0rc4 (2022-10-14)
+==============================
+
+Bugfixes
+--------
+
+- Fix poor performance of the `event_push_backfill_thread_id` background update, which was introduced in Synapse 1.68.0rc1. ([\#14172](https://github.com/matrix-org/synapse/issues/14172), [\#14181](https://github.com/matrix-org/synapse/issues/14181))
+
+
+Updates to the Docker image
+---------------------------
+
+- Fix docker build OOMing in CI for arm64 builds. ([\#14173](https://github.com/matrix-org/synapse/issues/14173))
+
+
+Synapse 1.69.0rc3 (2022-10-12)
+==============================
+
+Bugfixes
+--------
+
+- Fix an issue with Docker images causing the Rust dependencies to not be pinned correctly. Introduced in v1.68.0 ([\#14129](https://github.com/matrix-org/synapse/issues/14129))
+- Fix a bug introduced in Synapse 1.69.0rc1 which would cause registration replication requests to fail if the worker sending the request is not running Synapse 1.69. ([\#14135](https://github.com/matrix-org/synapse/issues/14135))
+- Fix error in background update when rotating existing notifications. Introduced in v1.69.0rc2. ([\#14138](https://github.com/matrix-org/synapse/issues/14138))
+
+
+Internal Changes
+----------------
+
+- Rename the `url_preview` extra to `url-preview`, for compatability with poetry-core 1.3.0 and [PEP 685](https://peps.python.org/pep-0685/). From-source installations using this extra will need to install using the new name. ([\#14085](https://github.com/matrix-org/synapse/issues/14085))
+
+
+Synapse 1.69.0rc2 (2022-10-06)
+==============================
+
+Deprecations and Removals
+-------------------------
+
+- Deprecate the `generate_short_term_login_token` method in favor of an async `create_login_token` method in the Module API. ([\#13842](https://github.com/matrix-org/synapse/issues/13842))
+
+
+Internal Changes
+----------------
+
+- Ensure Synapse v1.69 works with upcoming database changes in v1.70. ([\#14045](https://github.com/matrix-org/synapse/issues/14045))
+- Fix a bug introduced in Synapse v1.68.0 where messages could not be sent in rooms with non-integer `notifications` power level. ([\#14073](https://github.com/matrix-org/synapse/issues/14073))
+- Temporarily pin build-system requirements to workaround an incompatibility with poetry-core 1.3.0. This will be reverted before the v1.69.0 release proper, see [\#14079](https://github.com/matrix-org/synapse/issues/14079). ([\#14080](https://github.com/matrix-org/synapse/issues/14080))
+
+
+Synapse 1.69.0rc1 (2022-10-04)
+==============================
+
+Features
+--------
+
+- Allow application services to set the `origin_server_ts` of a state event by providing the query parameter `ts` in [`PUT /_matrix/client/r0/rooms/{roomId}/state/{eventType}/{stateKey}`](https://spec.matrix.org/v1.4/client-server-api/#put_matrixclientv3roomsroomidstateeventtypestatekey), per [MSC3316](https://github.com/matrix-org/matrix-doc/pull/3316). Contributed by @lukasdenk. ([\#11866](https://github.com/matrix-org/synapse/issues/11866))
+- Allow server admins to require a manual approval process before new accounts can be used (using [MSC3866](https://github.com/matrix-org/matrix-spec-proposals/pull/3866)). ([\#13556](https://github.com/matrix-org/synapse/issues/13556))
+- Exponentially backoff from backfilling the same event over and over. ([\#13635](https://github.com/matrix-org/synapse/issues/13635), [\#13936](https://github.com/matrix-org/synapse/issues/13936))
+- Add cache invalidation across workers to module API. ([\#13667](https://github.com/matrix-org/synapse/issues/13667), [\#13947](https://github.com/matrix-org/synapse/issues/13947))
+- Experimental implementation of [MSC3882](https://github.com/matrix-org/matrix-spec-proposals/pull/3882) to allow an existing device/session to generate a login token for use on a new device/session. ([\#13722](https://github.com/matrix-org/synapse/issues/13722), [\#13868](https://github.com/matrix-org/synapse/issues/13868))
+- Experimental support for thread-specific receipts ([MSC3771](https://github.com/matrix-org/matrix-spec-proposals/pull/3771)). ([\#13782](https://github.com/matrix-org/synapse/issues/13782), [\#13893](https://github.com/matrix-org/synapse/issues/13893), [\#13932](https://github.com/matrix-org/synapse/issues/13932), [\#13937](https://github.com/matrix-org/synapse/issues/13937), [\#13939](https://github.com/matrix-org/synapse/issues/13939))
+- Add experimental support for [MSC3881: Remotely toggle push notifications for another client](https://github.com/matrix-org/matrix-spec-proposals/pull/3881). ([\#13799](https://github.com/matrix-org/synapse/issues/13799), [\#13831](https://github.com/matrix-org/synapse/issues/13831), [\#13860](https://github.com/matrix-org/synapse/issues/13860))
+- Keep track when an event pulled over federation fails its signature check so we can intelligently back-off in the future. ([\#13815](https://github.com/matrix-org/synapse/issues/13815))
+- Improve validation for the unspecced, internal-only `_matrix/client/unstable/add_threepid/msisdn/submit_token` endpoint. ([\#13832](https://github.com/matrix-org/synapse/issues/13832))
+- Faster remote room joins: record _when_ we first partial-join to a room. ([\#13892](https://github.com/matrix-org/synapse/issues/13892))
+- Support a `dir` parameter on the `/relations` endpoint per [MSC3715](https://github.com/matrix-org/matrix-doc/pull/3715). ([\#13920](https://github.com/matrix-org/synapse/issues/13920))
+- Ask mail servers receiving emails from Synapse to not send automatic replies (e.g. out-of-office responses). ([\#13957](https://github.com/matrix-org/synapse/issues/13957))
+
+
+Bugfixes
+--------
+
+- Send push notifications for invites received over federation. ([\#13719](https://github.com/matrix-org/synapse/issues/13719), [\#14014](https://github.com/matrix-org/synapse/issues/14014))
+- Fix a long-standing bug where typing events would be accepted from remote servers not present in a room. Also fix a bug where incoming typing events would cause other incoming events to get stuck during a fast join. ([\#13830](https://github.com/matrix-org/synapse/issues/13830))
+- Fix a bug introduced in Synapse v1.53.0 where the experimental implementation of [MSC3715](https://github.com/matrix-org/matrix-spec-proposals/pull/3715) would give incorrect results when paginating forward. ([\#13840](https://github.com/matrix-org/synapse/issues/13840))
+- Fix access token leak to logs from proxy agent. ([\#13855](https://github.com/matrix-org/synapse/issues/13855))
+- Fix `have_seen_event` cache not being invalidated after we persist an event which causes inefficiency effects like extra `/state` federation calls. ([\#13863](https://github.com/matrix-org/synapse/issues/13863))
+- Faster room joins: Fix a bug introduced in 1.66.0 where an error would be logged when syncing after joining a room. ([\#13872](https://github.com/matrix-org/synapse/issues/13872))
+- Fix a bug introduced in 1.66.0 where some required fields in the pushrules sent to clients were not present anymore. Contributed by Nico. ([\#13904](https://github.com/matrix-org/synapse/issues/13904))
+- Fix packaging to include `Cargo.lock` in `sdist`. ([\#13909](https://github.com/matrix-org/synapse/issues/13909))
+- Fix a long-standing bug where device updates could cause delays sending out to-device messages over federation. ([\#13922](https://github.com/matrix-org/synapse/issues/13922))
+- Fix a bug introduced in v1.68.0 where Synapse would require `setuptools_rust` at runtime, even though the package is only required at build time. ([\#13952](https://github.com/matrix-org/synapse/issues/13952))
+- Fix a long-standing bug where `POST /_matrix/client/v3/keys/query` requests could result in excessively large SQL queries. ([\#13956](https://github.com/matrix-org/synapse/issues/13956))
+- Fix a performance regression in the `get_users_in_room` database query. Introduced in v1.67.0. ([\#13972](https://github.com/matrix-org/synapse/issues/13972))
+- Fix a bug introduced in v1.68.0 bug where Rust extension wasn't built in `release` mode when using `poetry install`. ([\#14009](https://github.com/matrix-org/synapse/issues/14009))
+- Do not return an unspecified `original_event` field when using the stable `/relations` endpoint. Introduced in Synapse v1.57.0. ([\#14025](https://github.com/matrix-org/synapse/issues/14025))
+- Correctly handle a race with device lists when a remote user leaves during a partial join. ([\#13885](https://github.com/matrix-org/synapse/issues/13885))
+- Correctly handle sending local device list updates to remote servers during a partial join. ([\#13934](https://github.com/matrix-org/synapse/issues/13934))
+
+
+Improved Documentation
+----------------------
+
+- Add `worker_main_http_uri` for the worker generator bash script. ([\#13772](https://github.com/matrix-org/synapse/issues/13772))
+- Update URL for the NixOS module for Synapse. ([\#13818](https://github.com/matrix-org/synapse/issues/13818))
+- Fix a mistake in sso_mapping_providers.md: `map_user_attributes` is expected to return `display_name`, not `displayname`. ([\#13836](https://github.com/matrix-org/synapse/issues/13836))
+- Fix a cross-link from the registration admin API to the `registration_shared_secret` configuration documentation. ([\#13870](https://github.com/matrix-org/synapse/issues/13870))
+- Update the man page for the `hash_password` script to correct the default number of bcrypt rounds performed. ([\#13911](https://github.com/matrix-org/synapse/issues/13911), [\#13930](https://github.com/matrix-org/synapse/issues/13930))
+- Emphasize the right reasons when to use `(room_id, event_id)` in a database schema. ([\#13915](https://github.com/matrix-org/synapse/issues/13915))
+- Add instruction to contributing guide for running unit tests in parallel. Contributed by @ashfame. ([\#13928](https://github.com/matrix-org/synapse/issues/13928))
+- Clarify that the `auto_join_rooms` config option can also be used with Space aliases. ([\#13931](https://github.com/matrix-org/synapse/issues/13931))
+- Add some cross references to worker documentation. ([\#13974](https://github.com/matrix-org/synapse/issues/13974))
+- Linkify urls in config documentation. ([\#14003](https://github.com/matrix-org/synapse/issues/14003))
+
+
+Deprecations and Removals
+-------------------------
+
+- Remove the `complete_sso_login` method from the Module API which was deprecated in Synapse 1.13.0. ([\#13843](https://github.com/matrix-org/synapse/issues/13843))
+- Announce that legacy metric names are deprecated, will be turned off by default in Synapse v1.71.0 and removed altogether in Synapse v1.73.0. See the upgrade notes for more information. ([\#14024](https://github.com/matrix-org/synapse/issues/14024))
+
+
+Internal Changes
+----------------
+
+- Speed up creation of DM rooms. ([\#13487](https://github.com/matrix-org/synapse/issues/13487), [\#13800](https://github.com/matrix-org/synapse/issues/13800))
+- Port push rules to using Rust. ([\#13768](https://github.com/matrix-org/synapse/issues/13768), [\#13838](https://github.com/matrix-org/synapse/issues/13838), [\#13889](https://github.com/matrix-org/synapse/issues/13889))
+- Optimise get rooms for user calls. Contributed by Nick @ Beeper (@fizzadar). ([\#13787](https://github.com/matrix-org/synapse/issues/13787))
+- Update the script which makes full schema dumps. ([\#13792](https://github.com/matrix-org/synapse/issues/13792))
+- Use shared methods for cache invalidation when persisting events, remove duplicate codepaths. Contributed by Nick @ Beeper (@fizzadar). ([\#13796](https://github.com/matrix-org/synapse/issues/13796))
+- Improve the `synapse.api.auth.Auth` mock used in unit tests. ([\#13809](https://github.com/matrix-org/synapse/issues/13809))
+- Faster Remote Room Joins: tell remote homeservers that we are unable to authorise them if they query a room which has partial state on our server. ([\#13823](https://github.com/matrix-org/synapse/issues/13823))
+- Carry IdP Session IDs through user-mapping sessions. ([\#13839](https://github.com/matrix-org/synapse/issues/13839))
+- Fix the release script not publishing binary wheels. ([\#13850](https://github.com/matrix-org/synapse/issues/13850))
+- Raise issue if complement fails with latest deps. ([\#13859](https://github.com/matrix-org/synapse/issues/13859))
+- Correct the comments in the complement dockerfile. ([\#13867](https://github.com/matrix-org/synapse/issues/13867))
+- Create a new snapshot of the database schema. ([\#13873](https://github.com/matrix-org/synapse/issues/13873))
+- Faster room joins: Send device list updates to most servers in rooms with partial state. ([\#13874](https://github.com/matrix-org/synapse/issues/13874), [\#14013](https://github.com/matrix-org/synapse/issues/14013))
+- Add comments to the Prometheus recording rules to make it clear which set of rules you need for Grafana or Prometheus Console. ([\#13876](https://github.com/matrix-org/synapse/issues/13876))
+- Only pull relevant backfill points from the database based on the current depth and limit (instead of all) every time we want to `/backfill`. ([\#13879](https://github.com/matrix-org/synapse/issues/13879))
+- Faster room joins: Avoid waiting for full state when processing `/keys/changes` requests. ([\#13888](https://github.com/matrix-org/synapse/issues/13888))
+- Improve backfill robustness by trying more servers when we get a `4xx` error back. ([\#13890](https://github.com/matrix-org/synapse/issues/13890))
+- Fix mypy errors with canonicaljson 1.6.3. ([\#13905](https://github.com/matrix-org/synapse/issues/13905))
+- Faster remote room joins: correctly handle remote device list updates during a partial join. ([\#13913](https://github.com/matrix-org/synapse/issues/13913))
+- Complement image: propagate SIGTERM to all workers. ([\#13914](https://github.com/matrix-org/synapse/issues/13914))
+- Update an innaccurate comment in Synapse's upsert database helper. ([\#13924](https://github.com/matrix-org/synapse/issues/13924))
+- Update mypy (0.950 -> 0.981) and mypy-zope (0.3.7 -> 0.3.11). ([\#13925](https://github.com/matrix-org/synapse/issues/13925), [\#13993](https://github.com/matrix-org/synapse/issues/13993))
+- Use dedicated `get_local_users_in_room(room_id)` function to find local users when calculating users to copy over during a room upgrade. ([\#13960](https://github.com/matrix-org/synapse/issues/13960))
+- Refactor language in user directory `_track_user_joined_room` code to make it more clear that we use both local and remote users. ([\#13966](https://github.com/matrix-org/synapse/issues/13966))
+- Revert catch-all exceptions being recorded as event pull attempt failures (only handle what we know about). ([\#13969](https://github.com/matrix-org/synapse/issues/13969))
+- Speed up calculating push actions in large rooms. ([\#13973](https://github.com/matrix-org/synapse/issues/13973), [\#13992](https://github.com/matrix-org/synapse/issues/13992))
+- Enable update notifications from Github's dependabot. ([\#13976](https://github.com/matrix-org/synapse/issues/13976))
+- Prototype a workflow to automatically add changelogs to dependabot PRs. ([\#13998](https://github.com/matrix-org/synapse/issues/13998), [\#14011](https://github.com/matrix-org/synapse/issues/14011), [\#14017](https://github.com/matrix-org/synapse/issues/14017), [\#14021](https://github.com/matrix-org/synapse/issues/14021), [\#14027](https://github.com/matrix-org/synapse/issues/14027))
+- Fix type annotations to be compatible with new annotations in development versions of twisted. ([\#14012](https://github.com/matrix-org/synapse/issues/14012))
+- Clear out stale entries in `event_push_actions_staging` table. ([\#14020](https://github.com/matrix-org/synapse/issues/14020))
+- Bump versions of GitHub actions. ([\#13978](https://github.com/matrix-org/synapse/issues/13978), [\#13979](https://github.com/matrix-org/synapse/issues/13979), [\#13980](https://github.com/matrix-org/synapse/issues/13980), [\#13982](https://github.com/matrix-org/synapse/issues/13982), [\#14015](https://github.com/matrix-org/synapse/issues/14015), [\#14019](https://github.com/matrix-org/synapse/issues/14019), [\#14022](https://github.com/matrix-org/synapse/issues/14022), [\#14023](https://github.com/matrix-org/synapse/issues/14023))
+
+
+Synapse 1.68.0 (2022-09-27)
+===========================
+
+Please note that Synapse will now refuse to start if configured to use a version of SQLite older than 3.27.
+
+In addition, please note that installing Synapse from a source checkout now requires a recent Rust compiler.
+Those using packages will not be affected. On most platforms, installing with `pip install matrix-synapse` will not be affected.
+See the [upgrade notes](https://matrix-org.github.io/synapse/v1.68/upgrade.html#upgrading-to-v1680).
+
+Bugfixes
+--------
+
+- Fix packaging to include `Cargo.lock` in `sdist`. ([\#13909](https://github.com/matrix-org/synapse/issues/13909))
+
+
+Synapse 1.68.0rc2 (2022-09-23)
+==============================
+
+Bugfixes
+--------
+
+- Fix building from packaged sdist. Broken in v1.68.0rc1. ([\#13866](https://github.com/matrix-org/synapse/issues/13866))
Internal Changes
@@ -19,7 +512,7 @@ Internal Changes
- Fix the release script not publishing binary wheels. ([\#13850](https://github.com/matrix-org/synapse/issues/13850))
- Lower minimum supported rustc version to 1.58.1. ([\#13857](https://github.com/matrix-org/synapse/issues/13857))
-- Lock Rust dependencies versions. ([\#13858](https://github.com/matrix-org/synapse/issues/13858))
+- Lock Rust dependencies' versions. ([\#13858](https://github.com/matrix-org/synapse/issues/13858))
Synapse 1.68.0rc1 (2022-09-20)
@@ -40,7 +533,7 @@ Features
Bugfixes
--------
-- Fix a bug introduced in Synapse v1.41.0 where the `/hierarchy` API returned non-standard information (a `room_id` field under each entry in `children_state`). ([\#13506](https://github.com/matrix-org/synapse/issues/13506))
+- Fix a bug introduced in Synapse 1.41.0 where the `/hierarchy` API returned non-standard information (a `room_id` field under each entry in `children_state`). ([\#13506](https://github.com/matrix-org/synapse/issues/13506))
- Fix a long-standing bug where previously rejected events could end up in room state because they pass auth checks given the current state of the room. ([\#13723](https://github.com/matrix-org/synapse/issues/13723))
- Fix a long-standing bug where Synapse fails to start if a signing key file contains an empty line. ([\#13738](https://github.com/matrix-org/synapse/issues/13738))
- Fix a long-standing bug where Synapse would fail to handle malformed user IDs or room aliases gracefully in certain cases. ([\#13746](https://github.com/matrix-org/synapse/issues/13746))
@@ -54,10 +547,10 @@ Improved Documentation
----------------------
- Note that `libpq` is required on ARM-based Macs. ([\#13480](https://github.com/matrix-org/synapse/issues/13480))
-- Fix a mistake in the config manual: the `event_cache_size` _is_ scaled by `caches.global_factor`. The documentation was incorrect since Synapse v1.22.0. ([\#13726](https://github.com/matrix-org/synapse/issues/13726))
+- Fix a mistake in the config manual introduced in Synapse 1.22.0: the `event_cache_size` _is_ scaled by `caches.global_factor`. ([\#13726](https://github.com/matrix-org/synapse/issues/13726))
- Fix a typo in the documentation for the login ratelimiting configuration. ([\#13727](https://github.com/matrix-org/synapse/issues/13727))
- Define Synapse's compatability policy for SQLite versions. ([\#13728](https://github.com/matrix-org/synapse/issues/13728))
-- Add docs for common fix of deleting the `matrix_synapse.egg-info/` directory for fixing Python dependency problems. ([\#13785](https://github.com/matrix-org/synapse/issues/13785))
+- Add docs for the common fix of deleting the `matrix_synapse.egg-info/` directory for fixing Python dependency problems. ([\#13785](https://github.com/matrix-org/synapse/issues/13785))
- Update request log format documentation to mention the format used when the authenticated user is controlling another user. ([\#13794](https://github.com/matrix-org/synapse/issues/13794))
@@ -98,7 +591,7 @@ Internal Changes
- Fix a memory leak when running the unit tests. ([\#13798](https://github.com/matrix-org/synapse/issues/13798))
- Use partial indices on SQLite. ([\#13802](https://github.com/matrix-org/synapse/issues/13802))
- Check that portdb generates the same postgres schema as that in the source tree. ([\#13808](https://github.com/matrix-org/synapse/issues/13808))
-- Fix Docker build when Rust .so has been build locally first. ([\#13811](https://github.com/matrix-org/synapse/issues/13811))
+- Fix Docker build when Rust .so has been built locally first. ([\#13811](https://github.com/matrix-org/synapse/issues/13811))
- Complement: Initialise the Postgres database directly inside the target image instead of the base Postgres image to fix building using Buildah. ([\#13819](https://github.com/matrix-org/synapse/issues/13819))
- Support providing an index predicate clause when doing upserts. ([\#13822](https://github.com/matrix-org/synapse/issues/13822))
- Minor speedups to linting in CI. ([\#13827](https://github.com/matrix-org/synapse/issues/13827))
@@ -152,7 +645,7 @@ Bugfixes
- Fix [MSC3030](https://github.com/matrix-org/matrix-spec-proposals/pull/3030) `/timestamp_to_event` endpoint to return the correct next event when the events have the same timestamp. ([\#13658](https://github.com/matrix-org/synapse/issues/13658))
- Fix bug where we wedge media plugins if clients disconnect early. Introduced in v1.22.0. ([\#13660](https://github.com/matrix-org/synapse/issues/13660))
- Fix a long-standing bug which meant that keys for unwhitelisted servers were not returned by `/_matrix/key/v2/query`. ([\#13683](https://github.com/matrix-org/synapse/issues/13683))
-- Fix a bug introduced in Synapse v1.20.0 that would cause the unstable unread counts from [MSC2654](https://github.com/matrix-org/matrix-spec-proposals/pull/2654) to be calculated even if the feature is disabled. ([\#13694](https://github.com/matrix-org/synapse/issues/13694))
+- Fix a bug introduced in Synapse 1.20.0 that would cause the unstable unread counts from [MSC2654](https://github.com/matrix-org/matrix-spec-proposals/pull/2654) to be calculated even if the feature is disabled. ([\#13694](https://github.com/matrix-org/synapse/issues/13694))
Updates to the Docker image
@@ -179,7 +672,7 @@ Deprecations and Removals
- Drop support for calling `/_matrix/client/v3/rooms/{roomId}/invite` without an `id_access_token`, which was not permitted by the spec. Contributed by @Vetchu. ([\#13241](https://github.com/matrix-org/synapse/issues/13241))
- Remove redundant `_get_joined_users_from_context` cache. Contributed by Nick @ Beeper (@fizzadar). ([\#13569](https://github.com/matrix-org/synapse/issues/13569))
-- Remove the ability to use direct TCP replication with workers. Direct TCP replication was deprecated in Synapse v1.18.0. Workers now require using Redis. ([\#13647](https://github.com/matrix-org/synapse/issues/13647))
+- Remove the ability to use direct TCP replication with workers. Direct TCP replication was deprecated in Synapse 1.18.0. Workers now require using Redis. ([\#13647](https://github.com/matrix-org/synapse/issues/13647))
- Remove support for unstable [private read receipts](https://github.com/matrix-org/matrix-spec-proposals/pull/2285). ([\#13653](https://github.com/matrix-org/synapse/issues/13653), [\#13692](https://github.com/matrix-org/synapse/issues/13692))
@@ -223,7 +716,7 @@ was originally planned for Synapse 1.64, but was later deferred until now. See
the [upgrade notes](https://matrix-org.github.io/synapse/v1.66/upgrade.html#upgrading-to-v1660) for more details.
Deployments with multiple workers should note that the direct TCP replication
-configuration was deprecated in Synapse v1.18.0 and will be removed in Synapse
+configuration was deprecated in Synapse 1.18.0 and will be removed in Synapse
v1.67.0. In particular, the TCP `replication` [listener](https://matrix-org.github.io/synapse/v1.66/usage/configuration/config_documentation.html#listeners)
type (not to be confused with the `replication` resource on the `http` listener
type) and the `worker_replication_port` config option will be removed .
@@ -353,7 +846,7 @@ Bugfixes
--------
- Update the version of the LDAP3 auth provider module included in the `matrixdotorg/synapse` DockerHub images and the Debian packages hosted on packages.matrix.org to 0.2.2. This version fixes a regression in the module. ([\#13470](https://github.com/matrix-org/synapse/issues/13470))
-- Fix a bug introduced in Synapse v1.41.0 where the `/hierarchy` API returned non-standard information (a `room_id` field under each entry in `children_state`) (this was reverted in v1.65.0rc2, see changelog notes above). ([\#13365](https://github.com/matrix-org/synapse/issues/13365))
+- Fix a bug introduced in Synapse 1.41.0 where the `/hierarchy` API returned non-standard information (a `room_id` field under each entry in `children_state`) (this was reverted in v1.65.0rc2, see changelog notes above). ([\#13365](https://github.com/matrix-org/synapse/issues/13365))
- Fix a bug introduced in Synapse 0.24.0 that would respond with the wrong error status code to `/joined_members` requests when the requester is not a current member of the room. Contributed by @andrewdoh. ([\#13374](https://github.com/matrix-org/synapse/issues/13374))
- Fix bug in handling of typing events for appservices. Contributed by Nick @ Beeper (@fizzadar). ([\#13392](https://github.com/matrix-org/synapse/issues/13392))
- Fix a bug introduced in Synapse 1.57.0 where rooms listed in `exclude_rooms_from_sync` in the configuration file would not be properly excluded from incremental syncs. ([\#13408](https://github.com/matrix-org/synapse/issues/13408))
@@ -418,7 +911,7 @@ No significant changes since 1.64.0rc2.
Deprecation Warning
-------------------
-Synapse v1.66.0 will remove the ability to delegate the tasks of verifying email address ownership, and password reset confirmation, to an identity server.
+Synapse 1.66.0 will remove the ability to delegate the tasks of verifying email address ownership, and password reset confirmation, to an identity server.
If you require your homeserver to verify e-mail addresses or to support password resets via e-mail, please configure your homeserver with SMTP access so that it can send e-mails on its own behalf.
[Consult the configuration documentation for more information.](https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#email)
@@ -427,7 +920,7 @@ If you require your homeserver to verify e-mail addresses or to support password
Synapse 1.64.0rc2 (2022-07-29)
==============================
-This RC reintroduces support for `account_threepid_delegates.email`, which was removed in 1.64.0rc1. It remains deprecated and will be removed altogether in Synapse v1.66.0. ([\#13406](https://github.com/matrix-org/synapse/issues/13406))
+This RC reintroduces support for `account_threepid_delegates.email`, which was removed in 1.64.0rc1. It remains deprecated and will be removed altogether in Synapse 1.66.0. ([\#13406](https://github.com/matrix-org/synapse/issues/13406))
Synapse 1.64.0rc1 (2022-07-26)
@@ -676,7 +1169,7 @@ Bugfixes
- Fix a bug introduced in Synapse 1.58 where Synapse would not report full version information when installed from a git checkout. This is a best-effort affair and not guaranteed to be stable. ([\#12973](https://github.com/matrix-org/synapse/issues/12973))
- Fix a bug introduced in Synapse 1.60 where Synapse would fail to start if the `sqlite3` module was not available. ([\#12979](https://github.com/matrix-org/synapse/issues/12979))
- Fix a bug where non-standard information was required when requesting the `/hierarchy` API over federation. Introduced
- in Synapse v1.41.0. ([\#12991](https://github.com/matrix-org/synapse/issues/12991))
+ in Synapse 1.41.0. ([\#12991](https://github.com/matrix-org/synapse/issues/12991))
- Fix a long-standing bug which meant that rate limiting was not restrictive enough in some cases. ([\#13018](https://github.com/matrix-org/synapse/issues/13018))
- Fix a bug introduced in Synapse 1.58 where profile requests for a malformed user ID would ccause an internal error. Synapse now returns 400 Bad Request in this situation. ([\#13041](https://github.com/matrix-org/synapse/issues/13041))
- Fix some inconsistencies in the event authentication code. ([\#13087](https://github.com/matrix-org/synapse/issues/13087), [\#13088](https://github.com/matrix-org/synapse/issues/13088))
@@ -1269,7 +1762,7 @@ If you have already upgraded to Synapse 1.57.0 without problem, then you have no
Updates to the Docker image
---------------------------
-- Include version 0.2.0 of the Synapse LDAP Auth Provider module in the Docker image. This matches the version that was present in the Docker image for Synapse v1.56.0. ([\#12512](https://github.com/matrix-org/synapse/issues/12512))
+- Include version 0.2.0 of the Synapse LDAP Auth Provider module in the Docker image. This matches the version that was present in the Docker image for Synapse 1.56.0. ([\#12512](https://github.com/matrix-org/synapse/issues/12512))
Synapse 1.57.0 (2022-04-19)
@@ -1521,10 +2014,10 @@ Features
Bugfixes
--------
-- Use the proper serialization format for bundled thread aggregations. The bug has existed since Synapse v1.48.0. ([\#12090](https://github.com/matrix-org/synapse/issues/12090))
+- Use the proper serialization format for bundled thread aggregations. The bug has existed since Synapse 1.48.0. ([\#12090](https://github.com/matrix-org/synapse/issues/12090))
- Fix a long-standing bug when redacting events with relations. ([\#12113](https://github.com/matrix-org/synapse/issues/12113), [\#12121](https://github.com/matrix-org/synapse/issues/12121), [\#12130](https://github.com/matrix-org/synapse/issues/12130), [\#12189](https://github.com/matrix-org/synapse/issues/12189))
- Fix a bug introduced in Synapse 1.7.2 whereby background updates are never run with the default background batch size. ([\#12157](https://github.com/matrix-org/synapse/issues/12157))
-- Fix a bug where non-standard information was returned from the `/hierarchy` API. Introduced in Synapse v1.41.0. ([\#12175](https://github.com/matrix-org/synapse/issues/12175))
+- Fix a bug where non-standard information was returned from the `/hierarchy` API. Introduced in Synapse 1.41.0. ([\#12175](https://github.com/matrix-org/synapse/issues/12175))
- Fix a bug introduced in Synapse 1.54.0 that broke background updates on sqlite homeservers while search was disabled. ([\#12215](https://github.com/matrix-org/synapse/issues/12215))
- Fix a long-standing bug when a `filter` argument with `event_fields` which did not include the `unsigned` field could result in a 500 error on `/sync`. ([\#12234](https://github.com/matrix-org/synapse/issues/12234))
@@ -1909,15 +2402,15 @@ Bugfixes
- Fix a long-standing issue which could cause Synapse to incorrectly accept data in the unsigned field of events
received over federation. ([\#11530](https://github.com/matrix-org/synapse/issues/11530))
- Fix a long-standing bug where Synapse wouldn't cache a response indicating that a remote user has no devices. ([\#11587](https://github.com/matrix-org/synapse/issues/11587))
-- Fix an error that occurs whilst trying to get the federation status of a destination server that was working normally. This admin API was newly introduced in Synapse v1.49.0. ([\#11593](https://github.com/matrix-org/synapse/issues/11593))
+- Fix an error that occurs whilst trying to get the federation status of a destination server that was working normally. This admin API was newly introduced in Synapse 1.49.0. ([\#11593](https://github.com/matrix-org/synapse/issues/11593))
- Fix bundled aggregations not being included in the `/sync` response, per [MSC2675](https://github.com/matrix-org/matrix-doc/pull/2675). ([\#11612](https://github.com/matrix-org/synapse/issues/11612), [\#11659](https://github.com/matrix-org/synapse/issues/11659), [\#11791](https://github.com/matrix-org/synapse/issues/11791))
- Fix the `/_matrix/client/v1/room/{roomId}/hierarchy` endpoint returning incorrect fields which have been present since Synapse 1.49.0. ([\#11667](https://github.com/matrix-org/synapse/issues/11667))
- Fix preview of some GIF URLs (like tenor.com). Contributed by Philippe Daouadi. ([\#11669](https://github.com/matrix-org/synapse/issues/11669))
-- Fix a bug where only the first 50 rooms from a space were returned from the `/hierarchy` API. This has existed since the introduction of the API in Synapse v1.41.0. ([\#11695](https://github.com/matrix-org/synapse/issues/11695))
-- Fix a bug introduced in Synapse v1.18.0 where password reset and address validation emails would not be sent if their subject was configured to use the 'app' template variable. Contributed by @br4nnigan. ([\#11710](https://github.com/matrix-org/synapse/issues/11710), [\#11745](https://github.com/matrix-org/synapse/issues/11745))
+- Fix a bug where only the first 50 rooms from a space were returned from the `/hierarchy` API. This has existed since the introduction of the API in Synapse 1.41.0. ([\#11695](https://github.com/matrix-org/synapse/issues/11695))
+- Fix a bug introduced in Synapse 1.18.0 where password reset and address validation emails would not be sent if their subject was configured to use the 'app' template variable. Contributed by @br4nnigan. ([\#11710](https://github.com/matrix-org/synapse/issues/11710), [\#11745](https://github.com/matrix-org/synapse/issues/11745))
- Make the 'List Rooms' Admin API sort stable. Contributed by Daniël Sonck. ([\#11737](https://github.com/matrix-org/synapse/issues/11737))
- Fix a long-standing bug where space hierarchy over federation would only work correctly some of the time. ([\#11775](https://github.com/matrix-org/synapse/issues/11775))
-- Fix a bug introduced in Synapse v1.46.0 that prevented `on_logged_out` module callbacks from being correctly awaited by Synapse. ([\#11786](https://github.com/matrix-org/synapse/issues/11786))
+- Fix a bug introduced in Synapse 1.46.0 that prevented `on_logged_out` module callbacks from being correctly awaited by Synapse. ([\#11786](https://github.com/matrix-org/synapse/issues/11786))
Improved Documentation
@@ -1997,8 +2490,8 @@ This release candidate fixes a federation-breaking regression introduced in Syna
Bugfixes
--------
-- Fix a bug introduced in Synapse v1.0.0 whereby some device list updates would not be sent to remote homeservers if there were too many to send at once. ([\#11729](https://github.com/matrix-org/synapse/issues/11729))
-- Fix a bug introduced in Synapse v1.50.0rc1 whereby outbound federation could fail because too many EDUs were produced for device updates. ([\#11730](https://github.com/matrix-org/synapse/issues/11730))
+- Fix a bug introduced in Synapse 1.0.0 whereby some device list updates would not be sent to remote homeservers if there were too many to send at once. ([\#11729](https://github.com/matrix-org/synapse/issues/11729))
+- Fix a bug introduced in Synapse 1.50.0rc1 whereby outbound federation could fail because too many EDUs were produced for device updates. ([\#11730](https://github.com/matrix-org/synapse/issues/11730))
Improved Documentation
diff --git a/Cargo.lock b/Cargo.lock
index b952b6b4c0..8a8099bc6d 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -13,9 +13,9 @@ dependencies = [
[[package]]
name = "anyhow"
-version = "1.0.65"
+version = "1.0.66"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "98161a4e3e2184da77bb14f02184cdd111e83bbbcc9979dfee3c44b9a85f5602"
+checksum = "216261ddc8289130e551ddcd5ce8a064710c0d064a4d2895c67151c92b5443f6"
[[package]]
name = "arc-swap"
@@ -37,9 +37,9 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a"
[[package]]
name = "blake2"
-version = "0.10.4"
+version = "0.10.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b9cf849ee05b2ee5fba5e36f97ff8ec2533916700fc0758d40d92136a42f3388"
+checksum = "b12e5fd123190ce1c2e559308a94c9bacad77907d4c6005d9e58fe1a0689e55e"
dependencies = [
"digest",
]
@@ -104,9 +104,9 @@ checksum = "adab1eaa3408fb7f0c777a73e7465fd5656136fc93b670eb6df3c88c2c1344e3"
[[package]]
name = "itoa"
-version = "1.0.3"
+version = "1.0.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "6c8af84674fe1f223a982c933a0ee1086ac4d4052aa0fb8060c12c6ad838e754"
+checksum = "4217ad341ebadf8d8e724e264f13e593e0648f5b3e94b3896a5df283be015ecc"
[[package]]
name = "lazy_static"
@@ -116,15 +116,15 @@ checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646"
[[package]]
name = "libc"
-version = "0.2.132"
+version = "0.2.135"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8371e4e5341c3a96db127eb2465ac681ced4c433e01dd0e938adbef26ba93ba5"
+checksum = "68783febc7782c6c5cb401fbda4de5a9898be1762314da0bb2c10ced61f18b0c"
[[package]]
name = "lock_api"
-version = "0.4.7"
+version = "0.4.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "327fa5b6a6940e4699ec49a9beae1ea4845c6bab9314e4f84ac68742139d8c53"
+checksum = "435011366fe56583b16cf956f9df0095b405b82d76425bc8981c0e22e60ec4df"
dependencies = [
"autocfg",
"scopeguard",
@@ -156,9 +156,9 @@ dependencies = [
[[package]]
name = "once_cell"
-version = "1.13.1"
+version = "1.15.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "074864da206b4973b84eb91683020dbefd6a8c3f0f38e054d93954e891935e4e"
+checksum = "e82dad04139b71a90c080c8463fe0dc7902db5192d939bd0950f074d014339e1"
[[package]]
name = "parking_lot"
@@ -185,18 +185,18 @@ dependencies = [
[[package]]
name = "proc-macro2"
-version = "1.0.43"
+version = "1.0.46"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0a2ca2c61bc9f3d74d2886294ab7b9853abd9c1ad903a3ac7815c58989bb7bab"
+checksum = "94e2ef8dbfc347b10c094890f778ee2e36ca9bb4262e86dc99cd217e35f3470b"
dependencies = [
"unicode-ident",
]
[[package]]
name = "pyo3"
-version = "0.17.1"
+version = "0.17.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "12f72538a0230791398a0986a6518ebd88abc3fded89007b506ed072acc831e1"
+checksum = "268be0c73583c183f2b14052337465768c07726936a260f480f0857cb95ba543"
dependencies = [
"anyhow",
"cfg-if",
@@ -212,9 +212,9 @@ dependencies = [
[[package]]
name = "pyo3-build-config"
-version = "0.17.1"
+version = "0.17.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "fc4cf18c20f4f09995f3554e6bcf9b09bd5e4d6b67c562fdfaafa644526ba479"
+checksum = "28fcd1e73f06ec85bf3280c48c67e731d8290ad3d730f8be9dc07946923005c8"
dependencies = [
"once_cell",
"target-lexicon",
@@ -222,9 +222,9 @@ dependencies = [
[[package]]
name = "pyo3-ffi"
-version = "0.17.1"
+version = "0.17.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a41877f28d8ebd600b6aa21a17b40c3b0fc4dfe73a27b6e81ab3d895e401b0e9"
+checksum = "0f6cb136e222e49115b3c51c32792886defbfb0adead26a688142b346a0b9ffc"
dependencies = [
"libc",
"pyo3-build-config",
@@ -243,9 +243,9 @@ dependencies = [
[[package]]
name = "pyo3-macros"
-version = "0.17.1"
+version = "0.17.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "2e81c8d4bcc2f216dc1b665412df35e46d12ee8d3d046b381aad05f1fcf30547"
+checksum = "94144a1266e236b1c932682136dc35a9dee8d3589728f68130c7c3861ef96b28"
dependencies = [
"proc-macro2",
"pyo3-macros-backend",
@@ -255,9 +255,9 @@ dependencies = [
[[package]]
name = "pyo3-macros-backend"
-version = "0.17.1"
+version = "0.17.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "85752a767ee19399a78272cc2ab625cd7d373b2e112b4b13db28de71fa892784"
+checksum = "c8df9be978a2d2f0cdebabb03206ed73b11314701a5bfe71b0d753b81997777f"
dependencies = [
"proc-macro2",
"quote",
@@ -294,9 +294,9 @@ dependencies = [
[[package]]
name = "regex"
-version = "1.6.0"
+version = "1.7.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "4c4eb3267174b8c6c2f654116623910a0fef09c4753f8dd83db29c48a0df988b"
+checksum = "e076559ef8e241f2ae3479e36f97bd5741c0330689e217ad51ce2c76808b868a"
dependencies = [
"aho-corasick",
"memchr",
@@ -323,18 +323,18 @@ checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd"
[[package]]
name = "serde"
-version = "1.0.145"
+version = "1.0.147"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "728eb6351430bccb993660dfffc5a72f91ccc1295abaa8ce19b27ebe4f75568b"
+checksum = "d193d69bae983fc11a79df82342761dfbf28a99fc8d203dca4c3c1b590948965"
dependencies = [
"serde_derive",
]
[[package]]
name = "serde_derive"
-version = "1.0.145"
+version = "1.0.147"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "81fa1584d3d1bcacd84c277a0dfe21f5b0f6accf4a23d04d4c6d61f1af522b4c"
+checksum = "4f1d362ca8fc9c3e3a7484440752472d68a6caa98f1ab81d99b5dfe517cec852"
dependencies = [
"proc-macro2",
"quote",
@@ -343,9 +343,9 @@ dependencies = [
[[package]]
name = "serde_json"
-version = "1.0.85"
+version = "1.0.87"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e55a28e3aaef9d5ce0506d0a14dbba8054ddc7e499ef522dd8b26859ec9d4a44"
+checksum = "6ce777b7b150d76b9cf60d28b55f5847135a003f7d7350c6be7a773508ce7d45"
dependencies = [
"itoa",
"ryu",
@@ -354,9 +354,9 @@ dependencies = [
[[package]]
name = "smallvec"
-version = "1.9.0"
+version = "1.10.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "2fd0db749597d91ff862fd1d55ea87f7855a744a8425a64695b6fca237d1dad1"
+checksum = "a507befe795404456341dfab10cef66ead4c041f62b8b11bbb92bffe5d0953e0"
[[package]]
name = "subtle"
@@ -366,9 +366,9 @@ checksum = "6bdef32e8150c2a081110b42772ffe7d7c9032b606bc226c8260fd97e0976601"
[[package]]
name = "syn"
-version = "1.0.99"
+version = "1.0.102"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "58dbef6ec655055e20b86b15a8cc6d439cca19b667537ac6a1369572d151ab13"
+checksum = "3fcd952facd492f9be3ef0d0b7032a6e442ee9b361d4acc2b1d0c4aaa5f613a1"
dependencies = [
"proc-macro2",
"quote",
@@ -406,9 +406,9 @@ checksum = "dcf81ac59edc17cc8697ff311e8f5ef2d99fcbd9817b34cec66f90b6c3dfd987"
[[package]]
name = "unicode-ident"
-version = "1.0.3"
+version = "1.0.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c4f5b37a154999a8f3f98cc23a628d850e154479cd94decf3414696e12e31aaf"
+checksum = "6ceab39d59e4c9499d4e5a8ee0e2735b891bb7308ac83dfb4e80cad195c9f6f3"
[[package]]
name = "unindent"
diff --git a/build_rust.py b/build_rust.py
index 5c5e557ee8..662474dcb4 100644
--- a/build_rust.py
+++ b/build_rust.py
@@ -15,6 +15,9 @@ def build(setup_kwargs: Dict[str, Any]) -> None:
path=cargo_toml_path,
binding=Binding.PyO3,
py_limited_api=True,
+ # We force always building in release mode, as we can't tell the
+ # difference between using `poetry` in development vs production.
+ debug=False,
)
setup_kwargs.setdefault("rust_extensions", []).append(extension)
setup_kwargs["zip_safe"] = False
diff --git a/changelog.d/13635.feature b/changelog.d/13635.feature
deleted file mode 100644
index d86bf7ed80..0000000000
--- a/changelog.d/13635.feature
+++ /dev/null
@@ -1 +0,0 @@
-Exponentially backoff from backfilling the same event over and over.
diff --git a/changelog.d/13667.feature b/changelog.d/13667.feature
deleted file mode 100644
index a0b3cfe18c..0000000000
--- a/changelog.d/13667.feature
+++ /dev/null
@@ -1 +0,0 @@
-Add cache invalidation across workers to module API.
diff --git a/changelog.d/13722.feature b/changelog.d/13722.feature
deleted file mode 100644
index 588d143c0f..0000000000
--- a/changelog.d/13722.feature
+++ /dev/null
@@ -1 +0,0 @@
-Experimental implementation of MSC3882 to allow an existing device/session to generate a login token for use on a new device/session.
diff --git a/changelog.d/13768.misc b/changelog.d/13768.misc
deleted file mode 100644
index 28bddb7059..0000000000
--- a/changelog.d/13768.misc
+++ /dev/null
@@ -1 +0,0 @@
-Port push rules to using Rust.
diff --git a/changelog.d/13772.doc b/changelog.d/13772.doc
deleted file mode 100644
index 3398ff3765..0000000000
--- a/changelog.d/13772.doc
+++ /dev/null
@@ -1 +0,0 @@
-Add `worker_main_http_uri` for the worker generator bash script.
diff --git a/changelog.d/13782.feature b/changelog.d/13782.feature
deleted file mode 100644
index d0cb902dff..0000000000
--- a/changelog.d/13782.feature
+++ /dev/null
@@ -1 +0,0 @@
-Experimental support for thread-specific receipts ([MSC3771](https://github.com/matrix-org/matrix-spec-proposals/pull/3771)).
diff --git a/changelog.d/13792.misc b/changelog.d/13792.misc
deleted file mode 100644
index 36ac91400a..0000000000
--- a/changelog.d/13792.misc
+++ /dev/null
@@ -1 +0,0 @@
-Update the script which makes full schema dumps.
diff --git a/changelog.d/13796.misc b/changelog.d/13796.misc
deleted file mode 100644
index 9ed1662394..0000000000
--- a/changelog.d/13796.misc
+++ /dev/null
@@ -1 +0,0 @@
-Use shared methods for cache invalidation when persisting events, remove duplicate codepaths. Contributed by Nick @ Beeper (@fizzadar).
diff --git a/changelog.d/13799.feature b/changelog.d/13799.feature
deleted file mode 100644
index 6c8e5cffe2..0000000000
--- a/changelog.d/13799.feature
+++ /dev/null
@@ -1 +0,0 @@
-Add experimental support for [MSC3881: Remotely toggle push notifications for another client](https://github.com/matrix-org/matrix-spec-proposals/pull/3881).
diff --git a/changelog.d/13809.misc b/changelog.d/13809.misc
deleted file mode 100644
index c2dacca2f2..0000000000
--- a/changelog.d/13809.misc
+++ /dev/null
@@ -1 +0,0 @@
-Improve the `synapse.api.auth.Auth` mock used in unit tests.
diff --git a/changelog.d/13818.doc b/changelog.d/13818.doc
deleted file mode 100644
index 16b31f5071..0000000000
--- a/changelog.d/13818.doc
+++ /dev/null
@@ -1 +0,0 @@
-Update URL for the NixOS module for Synapse.
diff --git a/changelog.d/13823.misc b/changelog.d/13823.misc
deleted file mode 100644
index 527d79f4b2..0000000000
--- a/changelog.d/13823.misc
+++ /dev/null
@@ -1 +0,0 @@
-Faster Remote Room Joins: tell remote homeservers that we are unable to authorise them if they query a room which has partial state on our server.
\ No newline at end of file
diff --git a/changelog.d/13830.bugfix b/changelog.d/13830.bugfix
deleted file mode 100644
index e6215806cd..0000000000
--- a/changelog.d/13830.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix a long-standing bug where typing events would be accepted from remote servers not present in a room. Also fix a bug where incoming typing events would cause other incoming events to get stuck during a fast join.
diff --git a/changelog.d/13831.feature b/changelog.d/13831.feature
deleted file mode 100644
index 6c8e5cffe2..0000000000
--- a/changelog.d/13831.feature
+++ /dev/null
@@ -1 +0,0 @@
-Add experimental support for [MSC3881: Remotely toggle push notifications for another client](https://github.com/matrix-org/matrix-spec-proposals/pull/3881).
diff --git a/changelog.d/13832.feature b/changelog.d/13832.feature
deleted file mode 100644
index 1dc1d66efe..0000000000
--- a/changelog.d/13832.feature
+++ /dev/null
@@ -1 +0,0 @@
-Improve validation for the unspecced, internal-only `_matrix/client/unstable/add_threepid/msisdn/submit_token` endpoint.
diff --git a/changelog.d/13836.doc b/changelog.d/13836.doc
deleted file mode 100644
index f2edab00f4..0000000000
--- a/changelog.d/13836.doc
+++ /dev/null
@@ -1 +0,0 @@
-Fix a mistake in sso_mapping_providers.md: `map_user_attributes` is expected to return `display_name` not `displayname`.
diff --git a/changelog.d/13840.bugfix b/changelog.d/13840.bugfix
deleted file mode 100644
index 0f014439a8..0000000000
--- a/changelog.d/13840.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix a bug introduced in Synapse v1.53.0 where the experimental implementation of [MSC3715](https://github.com/matrix-org/matrix-spec-proposals/pull/3715) would give incorrect results when paginating forward.
diff --git a/changelog.d/13843.removal b/changelog.d/13843.removal
deleted file mode 100644
index f6caaa8895..0000000000
--- a/changelog.d/13843.removal
+++ /dev/null
@@ -1 +0,0 @@
-Remove the `complete_sso_login` method from the Module API which was deprecated in Synapse 1.13.0.
diff --git a/changelog.d/13850.misc b/changelog.d/13850.misc
deleted file mode 100644
index a973118aaf..0000000000
--- a/changelog.d/13850.misc
+++ /dev/null
@@ -1 +0,0 @@
-Fix the release script not publishing binary wheels.
\ No newline at end of file
diff --git a/changelog.d/13855.bugfix b/changelog.d/13855.bugfix
deleted file mode 100644
index 5ea8539bd8..0000000000
--- a/changelog.d/13855.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix access token leak to logs from proxy agent.
diff --git a/changelog.d/13859.misc b/changelog.d/13859.misc
deleted file mode 100644
index 2780a4af3c..0000000000
--- a/changelog.d/13859.misc
+++ /dev/null
@@ -1 +0,0 @@
-Raise issue if complement fails with latest deps.
diff --git a/changelog.d/13860.feature b/changelog.d/13860.feature
deleted file mode 100644
index 6c8e5cffe2..0000000000
--- a/changelog.d/13860.feature
+++ /dev/null
@@ -1 +0,0 @@
-Add experimental support for [MSC3881: Remotely toggle push notifications for another client](https://github.com/matrix-org/matrix-spec-proposals/pull/3881).
diff --git a/changelog.d/13870.doc b/changelog.d/13870.doc
deleted file mode 100644
index 2598bc270c..0000000000
--- a/changelog.d/13870.doc
+++ /dev/null
@@ -1 +0,0 @@
-Fix a cross-link from the register admin API to the `registration_shared_secret` configuration documentation.
diff --git a/changelog.d/13873.misc b/changelog.d/13873.misc
deleted file mode 100644
index f4342482f0..0000000000
--- a/changelog.d/13873.misc
+++ /dev/null
@@ -1 +0,0 @@
-Create a new snapshot of the database schema.
diff --git a/changelog.d/13874.misc b/changelog.d/13874.misc
deleted file mode 100644
index 499e488c35..0000000000
--- a/changelog.d/13874.misc
+++ /dev/null
@@ -1 +0,0 @@
-Faster room joins: Send device list updates to most servers in rooms with partial state.
diff --git a/changelog.d/13876.misc b/changelog.d/13876.misc
deleted file mode 100644
index ef37100115..0000000000
--- a/changelog.d/13876.misc
+++ /dev/null
@@ -1 +0,0 @@
-Add comments to the Prometheus recording rules to make it clear which set of rules you need for Grafana or Prometheus Console.
\ No newline at end of file
diff --git a/changelog.d/13888.misc b/changelog.d/13888.misc
deleted file mode 100644
index 4ffd9bcede..0000000000
--- a/changelog.d/13888.misc
+++ /dev/null
@@ -1 +0,0 @@
-Faster room joins: Avoid waiting for full state when processing `/keys/changes` requests.
diff --git a/changelog.d/13889.misc b/changelog.d/13889.misc
deleted file mode 100644
index 28bddb7059..0000000000
--- a/changelog.d/13889.misc
+++ /dev/null
@@ -1 +0,0 @@
-Port push rules to using Rust.
diff --git a/changelog.d/13893.feature b/changelog.d/13893.feature
deleted file mode 100644
index d0cb902dff..0000000000
--- a/changelog.d/13893.feature
+++ /dev/null
@@ -1 +0,0 @@
-Experimental support for thread-specific receipts ([MSC3771](https://github.com/matrix-org/matrix-spec-proposals/pull/3771)).
diff --git a/changelog.d/13905.misc b/changelog.d/13905.misc
deleted file mode 100644
index efe3bed5f1..0000000000
--- a/changelog.d/13905.misc
+++ /dev/null
@@ -1 +0,0 @@
-Fix mypy errors with canonicaljson 1.6.3.
diff --git a/changelog.d/13909.bugfix b/changelog.d/13909.bugfix
deleted file mode 100644
index 883dd72919..0000000000
--- a/changelog.d/13909.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix packaging to include `Cargo.lock` in `sdist`.
diff --git a/changelog.d/13911.doc b/changelog.d/13911.doc
deleted file mode 100644
index 7cc3206501..0000000000
--- a/changelog.d/13911.doc
+++ /dev/null
@@ -1 +0,0 @@
-Update the man page for the `hash_password` script to correct the default number of bcrypt rounds performed.
\ No newline at end of file
diff --git a/changelog.d/14376.misc b/changelog.d/14376.misc
new file mode 100644
index 0000000000..2ca326fea6
--- /dev/null
+++ b/changelog.d/14376.misc
@@ -0,0 +1 @@
+Remove old stream ID tracking code. Contributed by Nick @Beeper (@fizzadar).
diff --git a/changelog.d/14393.bugfix b/changelog.d/14393.bugfix
new file mode 100644
index 0000000000..97177bc62f
--- /dev/null
+++ b/changelog.d/14393.bugfix
@@ -0,0 +1 @@
+Fix a bug introduced in 1.58.0 where a user with presence state 'org.matrix.msc3026.busy' would mistakenly be set to 'online' when calling `/sync` or `/events` on a worker process.
\ No newline at end of file
diff --git a/changelog.d/14400.misc b/changelog.d/14400.misc
new file mode 100644
index 0000000000..6e025329c4
--- /dev/null
+++ b/changelog.d/14400.misc
@@ -0,0 +1 @@
+Remove the `worker_main_http_uri` configuration setting. This is now handled via internal replication.
diff --git a/changelog.d/14403.misc b/changelog.d/14403.misc
new file mode 100644
index 0000000000..ff28a2712a
--- /dev/null
+++ b/changelog.d/14403.misc
@@ -0,0 +1 @@
+Faster joins: do not wait for full state when creating events to send.
diff --git a/changelog.d/14412.misc b/changelog.d/14412.misc
new file mode 100644
index 0000000000..4da061d461
--- /dev/null
+++ b/changelog.d/14412.misc
@@ -0,0 +1 @@
+Remove duplicated type information from type hints.
diff --git a/changelog.d/14449.misc b/changelog.d/14449.misc
new file mode 100644
index 0000000000..320c0b6fae
--- /dev/null
+++ b/changelog.d/14449.misc
@@ -0,0 +1 @@
+Fix type logic in TCP replication code that prevented correctly ignoring blank commands.
\ No newline at end of file
diff --git a/changelog.d/14452.misc b/changelog.d/14452.misc
new file mode 100644
index 0000000000..cb190c0823
--- /dev/null
+++ b/changelog.d/14452.misc
@@ -0,0 +1 @@
+Enable mypy's [`strict_equality` check](https://mypy.readthedocs.io/en/stable/command_line.html#cmdoption-mypy-strict-equality) by default.
\ No newline at end of file
diff --git a/changelog.d/14468.misc b/changelog.d/14468.misc
new file mode 100644
index 0000000000..2ca326fea6
--- /dev/null
+++ b/changelog.d/14468.misc
@@ -0,0 +1 @@
+Remove old stream ID tracking code. Contributed by Nick @Beeper (@fizzadar).
diff --git a/changelog.d/14476.misc b/changelog.d/14476.misc
new file mode 100644
index 0000000000..6e025329c4
--- /dev/null
+++ b/changelog.d/14476.misc
@@ -0,0 +1 @@
+Remove the `worker_main_http_uri` configuration setting. This is now handled via internal replication.
diff --git a/changelog.d/14487.misc b/changelog.d/14487.misc
new file mode 100644
index 0000000000..f6b47a1d8e
--- /dev/null
+++ b/changelog.d/14487.misc
@@ -0,0 +1 @@
+Reduce default third party invite rate limit to 216 invites per day.
diff --git a/changelog.d/14490.misc b/changelog.d/14490.misc
new file mode 100644
index 0000000000..c0a4daa885
--- /dev/null
+++ b/changelog.d/14490.misc
@@ -0,0 +1 @@
+Fix a bug introduced in Synapse 0.9 where it would fail to fetch server keys whose IDs contain a forward slash.
diff --git a/contrib/docker_compose_workers/README.md b/contrib/docker_compose_workers/README.md
index 4dbfee2853..bdd3dd32e0 100644
--- a/contrib/docker_compose_workers/README.md
+++ b/contrib/docker_compose_workers/README.md
@@ -94,20 +94,6 @@ worker_replication_host: synapse
worker_replication_http_port: 9093
```
-### Add Workers to `instance_map`
-
-Locate the `instance_map` section of your `homeserver.yaml` and populate it with your workers:
-
-```yaml
-instance_map:
- synapse-generic-worker-1: # The worker_name setting in your worker configuration file
- host: synapse-generic-worker-1 # The name of the worker service in your Docker Compose file
- port: 8034 # The port assigned to the replication listener in your worker config file
- synapse-federation-sender-1:
- host: synapse-federation-sender-1
- port: 8034
-```
-
### Configure Federation Senders
This section is applicable if you are using Federation senders (synapse.app.federation_sender). Locate the `send_federation` and `federation_sender_instances` settings in your `homeserver.yaml` and configure them:
@@ -122,4 +108,4 @@ federation_sender_instances:
## Other Worker types
-Using the concepts shown here it is possible to create other worker types in Docker Compose. See the [Workers](https://matrix-org.github.io/synapse/latest/workers.html#available-worker-applications) documentation for a list of available workers.
\ No newline at end of file
+Using the concepts shown here it is possible to create other worker types in Docker Compose. See the [Workers](https://matrix-org.github.io/synapse/latest/workers.html#available-worker-applications) documentation for a list of available workers.
diff --git a/contrib/docker_compose_workers/workers/synapse-federation-sender-1.yaml b/contrib/docker_compose_workers/workers/synapse-federation-sender-1.yaml
index 5ba42a92d2..5b40664d67 100644
--- a/contrib/docker_compose_workers/workers/synapse-federation-sender-1.yaml
+++ b/contrib/docker_compose_workers/workers/synapse-federation-sender-1.yaml
@@ -5,10 +5,4 @@ worker_name: synapse-federation-sender-1
worker_replication_host: synapse
worker_replication_http_port: 9093
-worker_listeners:
- - type: http
- port: 8034
- resources:
- - names: [replication]
-
worker_log_config: /data/federation_sender.log.config
diff --git a/contrib/docker_compose_workers/workers/synapse-generic-worker-1.yaml b/contrib/docker_compose_workers/workers/synapse-generic-worker-1.yaml
index 694584105a..09e55df9f3 100644
--- a/contrib/docker_compose_workers/workers/synapse-generic-worker-1.yaml
+++ b/contrib/docker_compose_workers/workers/synapse-generic-worker-1.yaml
@@ -6,10 +6,6 @@ worker_replication_host: synapse
worker_replication_http_port: 9093
worker_listeners:
- - type: http
- port: 8034
- resources:
- - names: [replication]
- type: http
port: 8081
x_forwarded: true
diff --git a/debian/changelog b/debian/changelog
index d6c7639775..57d7b18078 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -1,9 +1,88 @@
-matrix-synapse-py3 (1.69.0~rc1+nmu1) UNRELEASED; urgency=medium
+matrix-synapse-py3 (1.72.0~rc1) stable; urgency=medium
+
+ * New Synapse release 1.72.0rc1.
+
+ -- Synapse Packaging team Wed, 16 Nov 2022 15:10:59 +0000
+
+matrix-synapse-py3 (1.71.0) stable; urgency=medium
+
+ * New Synapse release 1.71.0.
+
+ -- Synapse Packaging team Tue, 08 Nov 2022 10:38:10 +0000
+
+matrix-synapse-py3 (1.71.0~rc2) stable; urgency=medium
+
+ * New Synapse release 1.71.0rc2.
+
+ -- Synapse Packaging team Fri, 04 Nov 2022 12:00:33 +0000
+
+matrix-synapse-py3 (1.71.0~rc1) stable; urgency=medium
+
+ * New Synapse release 1.71.0rc1.
+
+ -- Synapse Packaging team Tue, 01 Nov 2022 12:10:17 +0000
+
+matrix-synapse-py3 (1.70.1) stable; urgency=medium
+
+ * New Synapse release 1.70.1.
+
+ -- Synapse Packaging team Fri, 28 Oct 2022 12:10:21 +0100
+
+matrix-synapse-py3 (1.70.0) stable; urgency=medium
+
+ * New Synapse release 1.70.0.
+
+ -- Synapse Packaging team Wed, 26 Oct 2022 11:11:50 +0100
+
+matrix-synapse-py3 (1.70.0~rc2) stable; urgency=medium
+
+ * New Synapse release 1.70.0rc2.
+
+ -- Synapse Packaging team Tue, 25 Oct 2022 10:59:47 +0100
+
+matrix-synapse-py3 (1.70.0~rc1) stable; urgency=medium
+
+ * New Synapse release 1.70.0rc1.
+
+ -- Synapse Packaging team Wed, 19 Oct 2022 14:11:57 +0100
+
+matrix-synapse-py3 (1.69.0) stable; urgency=medium
+
+ * New Synapse release 1.69.0.
+
+ -- Synapse Packaging team Mon, 17 Oct 2022 11:31:03 +0100
+
+matrix-synapse-py3 (1.69.0~rc4) stable; urgency=medium
+
+ * New Synapse release 1.69.0rc4.
+
+ -- Synapse Packaging team Fri, 14 Oct 2022 15:04:47 +0100
+
+matrix-synapse-py3 (1.69.0~rc3) stable; urgency=medium
+
+ * New Synapse release 1.69.0rc3.
+
+ -- Synapse Packaging team Wed, 12 Oct 2022 13:24:04 +0100
+
+matrix-synapse-py3 (1.69.0~rc2) stable; urgency=medium
+
+ * New Synapse release 1.69.0rc2.
+
+ -- Synapse Packaging team Thu, 06 Oct 2022 14:45:00 +0100
+
+matrix-synapse-py3 (1.69.0~rc1) stable; urgency=medium
* The man page for the hash_password script has been updated to reflect
the correct default value of 'bcrypt_rounds'.
+ * New Synapse release 1.69.0rc1.
- -- Synapse Packaging team Mon, 26 Sep 2022 18:05:09 +0100
+ -- Synapse Packaging team Tue, 04 Oct 2022 11:17:16 +0100
+
+matrix-synapse-py3 (1.68.0) stable; urgency=medium
+
+ * New Synapse release 1.68.0.
+
+ -- Synapse Packaging team Tue, 27 Sep 2022 12:02:09 +0100
matrix-synapse-py3 (1.68.0~rc2) stable; urgency=medium
diff --git a/debian/hash_password.1 b/debian/hash_password.1
index d64b91e7c8..39fa3ffcbf 100644
--- a/debian/hash_password.1
+++ b/debian/hash_password.1
@@ -10,7 +10,7 @@
.P
\fBhash_password\fR takes a password as an parameter either on the command line or the \fBSTDIN\fR if not supplied\.
.P
-It accepts an YAML file which can be used to specify parameters like the number of rounds for bcrypt and password_config section having the pepper value used for the hashing\. By default \fBbcrypt_rounds\fR is set to \fB10\fR\.
+It accepts an YAML file which can be used to specify parameters like the number of rounds for bcrypt and password_config section having the pepper value used for the hashing\. By default \fBbcrypt_rounds\fR is set to \fB12\fR\.
.P
The hashed password is written on the \fBSTDOUT\fR\.
.SH "FILES"
diff --git a/docker/Dockerfile b/docker/Dockerfile
index b20951d4cf..7f8756e8a4 100644
--- a/docker/Dockerfile
+++ b/docker/Dockerfile
@@ -106,7 +106,13 @@ ENV CARGO_HOME=/cargo
ENV PATH=/cargo/bin:/rust/bin:$PATH
RUN mkdir /rust /cargo
-RUN curl -sSf https://sh.rustup.rs | sh -s -- -y --no-modify-path --default-toolchain stable
+RUN curl -sSf https://sh.rustup.rs | sh -s -- -y --no-modify-path --default-toolchain stable --profile minimal
+
+
+# arm64 builds consume a lot of memory if `CARGO_NET_GIT_FETCH_WITH_CLI` is not
+# set to true, so we expose it as a build-arg.
+ARG CARGO_NET_GIT_FETCH_WITH_CLI=false
+ENV CARGO_NET_GIT_FETCH_WITH_CLI=$CARGO_NET_GIT_FETCH_WITH_CLI
# To speed up rebuilds, install all of the dependencies before we copy over
# the whole synapse project, so that this layer in the Docker cache can be
@@ -121,7 +127,7 @@ RUN --mount=type=cache,target=/root/.cache/pip \
COPY synapse /synapse/synapse/
COPY rust /synapse/rust/
# ... and what we need to `pip install`.
-COPY pyproject.toml README.rst build_rust.py /synapse/
+COPY pyproject.toml README.rst build_rust.py Cargo.toml Cargo.lock /synapse/
# Repeat of earlier build argument declaration, as this is a new build stage.
ARG TEST_ONLY_IGNORE_POETRY_LOCKFILE
@@ -129,7 +135,9 @@ ARG TEST_ONLY_IGNORE_POETRY_LOCKFILE
# Install the synapse package itself.
# If we have populated requirements.txt, we don't install any dependencies
# as we should already have those from the previous `pip install` step.
-RUN if [ -z "$TEST_ONLY_IGNORE_POETRY_LOCKFILE" ]; then \
+RUN --mount=type=cache,target=/synapse/target,sharing=locked \
+ --mount=type=cache,target=${CARGO_HOME}/registry,sharing=locked \
+ if [ -z "$TEST_ONLY_IGNORE_POETRY_LOCKFILE" ]; then \
pip install --prefix="/install" --no-deps --no-warn-script-location /synapse[all]; \
else \
pip install --prefix="/install" --no-warn-script-location /synapse[all]; \
diff --git a/docker/Dockerfile-dhvirtualenv b/docker/Dockerfile-dhvirtualenv
index ca3a259081..73165f6f85 100644
--- a/docker/Dockerfile-dhvirtualenv
+++ b/docker/Dockerfile-dhvirtualenv
@@ -92,7 +92,7 @@ ENV CARGO_HOME=/cargo
ENV PATH=/cargo/bin:/rust/bin:$PATH
RUN mkdir /rust /cargo
-RUN curl -sSf https://sh.rustup.rs | sh -s -- -y --no-modify-path --default-toolchain stable
+RUN curl -sSf https://sh.rustup.rs | sh -s -- -y --no-modify-path --default-toolchain stable --profile minimal
COPY --from=builder /dh-virtualenv_1.2.2-1_all.deb /
diff --git a/docker/Dockerfile-workers b/docker/Dockerfile-workers
index 003a1cc3bf..0c2d4f3047 100644
--- a/docker/Dockerfile-workers
+++ b/docker/Dockerfile-workers
@@ -40,7 +40,11 @@ FROM matrixdotorg/synapse:$SYNAPSE_VERSION
COPY --from=deps_base /etc/nginx /etc/nginx
RUN rm /etc/nginx/sites-enabled/default
RUN mkdir /var/log/nginx /var/lib/nginx
- RUN chown www-data /var/log/nginx /var/lib/nginx
+ RUN chown www-data /var/lib/nginx
+
+ # have nginx log to stderr/out
+ RUN ln -sf /dev/stdout /var/log/nginx/access.log
+ RUN ln -sf /dev/stderr /var/log/nginx/error.log
# Copy Synapse worker, nginx and supervisord configuration template files
COPY ./docker/conf-workers/* /conf/
diff --git a/docker/README.md b/docker/README.md
index 017f046c58..eda3221c23 100644
--- a/docker/README.md
+++ b/docker/README.md
@@ -241,4 +241,4 @@ healthcheck:
Jemalloc is embedded in the image and will be used instead of the default allocator.
You can read about jemalloc by reading the Synapse
-[README](https://github.com/matrix-org/synapse/blob/HEAD/README.rst#help-synapse-is-slow-and-eats-all-my-ram-cpu).
+[Admin FAQ](https://matrix-org.github.io/synapse/latest/usage/administration/admin_faq.html#help-synapse-is-slow-and-eats-all-my-ramcpu).
diff --git a/docker/complement/Dockerfile b/docker/complement/Dockerfile
index 0e13722d1c..c0935c99a8 100644
--- a/docker/complement/Dockerfile
+++ b/docker/complement/Dockerfile
@@ -8,19 +8,15 @@
ARG SYNAPSE_VERSION=latest
-# first of all, we create a base image with a postgres server and database,
-# which we can copy into the target image. For repeated rebuilds, this is
-# much faster than apt installing postgres each time.
-#
-# This trick only works because (a) the Synapse image happens to have all the
-# shared libraries that postgres wants, (b) we use a postgres image based on
-# the same debian version as Synapse's docker image (so the versions of the
-# shared libraries match).
-
-# now build the final image, based on the Synapse image.
-
FROM matrixdotorg/synapse-workers:$SYNAPSE_VERSION
- # copy the postgres installation over from the image we built above
+ # First of all, we copy postgres server from the official postgres image,
+ # since for repeated rebuilds, this is much faster than apt installing
+ # postgres each time.
+
+ # This trick only works because (a) the Synapse image happens to have all the
+ # shared libraries that postgres wants, (b) we use a postgres image based on
+ # the same debian version as Synapse's docker image (so the versions of the
+ # shared libraries match).
RUN adduser --system --uid 999 postgres --home /var/lib/postgresql
COPY --from=postgres:13-bullseye /usr/lib/postgresql /usr/lib/postgresql
COPY --from=postgres:13-bullseye /usr/share/postgresql /usr/share/postgresql
@@ -28,7 +24,7 @@ FROM matrixdotorg/synapse-workers:$SYNAPSE_VERSION
ENV PATH="${PATH}:/usr/lib/postgresql/13/bin"
ENV PGDATA=/var/lib/postgresql/data
- # initialise the database cluster in /var/lib/postgresql
+ # We also initialize the database at build time, rather than runtime, so that it's faster to spin up the image.
RUN gosu postgres initdb --locale=C --encoding=UTF-8 --auth-host password
# Configure a password and create a database for Synapse
diff --git a/docker/complement/conf/start_for_complement.sh b/docker/complement/conf/start_for_complement.sh
index cc6482f763..49d79745b0 100755
--- a/docker/complement/conf/start_for_complement.sh
+++ b/docker/complement/conf/start_for_complement.sh
@@ -45,7 +45,12 @@ esac
if [[ -n "$SYNAPSE_COMPLEMENT_USE_WORKERS" ]]; then
# Specify the workers to test with
- export SYNAPSE_WORKER_TYPES="\
+ # Allow overriding by explicitly setting SYNAPSE_WORKER_TYPES outside, while still
+ # utilizing WORKERS=1 for backwards compatibility.
+ # -n True if the length of string is non-zero.
+ # -z True if the length of string is zero.
+ if [[ -z "$SYNAPSE_WORKER_TYPES" ]]; then
+ export SYNAPSE_WORKER_TYPES="\
event_persister, \
event_persister, \
background_worker, \
@@ -57,9 +62,12 @@ if [[ -n "$SYNAPSE_COMPLEMENT_USE_WORKERS" ]]; then
federation_reader, \
federation_sender, \
synchrotron, \
+ client_reader, \
appservice, \
pusher"
+ fi
+ log "Workers requested: $SYNAPSE_WORKER_TYPES"
# Improve startup times by using a launcher based on fork()
export SYNAPSE_USE_EXPERIMENTAL_FORKING_LAUNCHER=1
else
diff --git a/docker/complement/conf/workers-shared-extra.yaml.j2 b/docker/complement/conf/workers-shared-extra.yaml.j2
index 9e554a865e..883a87159c 100644
--- a/docker/complement/conf/workers-shared-extra.yaml.j2
+++ b/docker/complement/conf/workers-shared-extra.yaml.j2
@@ -12,6 +12,8 @@ trusted_key_servers: []
enable_registration: true
enable_registration_without_verification: true
bcrypt_rounds: 4
+url_preview_enabled: true
+url_preview_ip_range_blacklist: []
## Registration ##
@@ -90,8 +92,6 @@ allow_device_name_lookup_over_federation: true
## Experimental Features ##
experimental_features:
- # Enable spaces support
- spaces_enabled: true
# Enable history backfilling support
msc2716_enabled: true
# server-side support for partial state in /send_join responses
@@ -102,6 +102,8 @@ experimental_features:
{% endif %}
# Enable jump to date endpoint
msc3030_enabled: true
+ # Filtering /messages by relation type.
+ msc3874_enabled: true
server_notices:
system_mxid_localpart: _server
diff --git a/docker/configure_workers_and_start.py b/docker/configure_workers_and_start.py
index 51583dc13d..c1e1544536 100755
--- a/docker/configure_workers_and_start.py
+++ b/docker/configure_workers_and_start.py
@@ -20,7 +20,7 @@
# * SYNAPSE_SERVER_NAME: The desired server_name of the homeserver.
# * SYNAPSE_REPORT_STATS: Whether to report stats.
# * SYNAPSE_WORKER_TYPES: A comma separated list of worker names as specified in WORKER_CONFIG
-# below. Leave empty for no workers, or set to '*' for all possible workers.
+# below. Leave empty for no workers.
# * SYNAPSE_AS_REGISTRATION_DIR: If specified, a directory in which .yaml and .yml files
# will be treated as Application Service registration files.
# * SYNAPSE_TLS_CERT: Path to a TLS certificate in PEM format.
@@ -39,6 +39,7 @@
# continue to work if so.
import os
+import platform
import subprocess
import sys
from pathlib import Path
@@ -49,13 +50,18 @@ from jinja2 import Environment, FileSystemLoader
MAIN_PROCESS_HTTP_LISTENER_PORT = 8080
-
+# Workers with exposed endpoints needs either "client", "federation", or "media" listener_resources
+# Watching /_matrix/client needs a "client" listener
+# Watching /_matrix/federation needs a "federation" listener
+# Watching /_matrix/media and related needs a "media" listener
+# Stream Writers require "client" and "replication" listeners because they
+# have to attach by instance_map to the master process and have client endpoints.
WORKERS_CONFIG: Dict[str, Dict[str, Any]] = {
"pusher": {
- "app": "synapse.app.pusher",
+ "app": "synapse.app.generic_worker",
"listener_resources": [],
"endpoint_patterns": [],
- "shared_extra_conf": {"start_pushers": False},
+ "shared_extra_conf": {},
"worker_extra_conf": "",
},
"user_dir": {
@@ -78,7 +84,11 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = {
"^/_synapse/admin/v1/media/.*$",
"^/_synapse/admin/v1/quarantine_media/.*$",
],
- "shared_extra_conf": {"enable_media_repo": False},
+ # The first configured media worker will run the media background jobs
+ "shared_extra_conf": {
+ "enable_media_repo": False,
+ "media_instance_running_background_jobs": "media_repository1",
+ },
"worker_extra_conf": "enable_media_repo: true",
},
"appservice": {
@@ -89,10 +99,10 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = {
"worker_extra_conf": "",
},
"federation_sender": {
- "app": "synapse.app.federation_sender",
+ "app": "synapse.app.generic_worker",
"listener_resources": [],
"endpoint_patterns": [],
- "shared_extra_conf": {"send_federation": False},
+ "shared_extra_conf": {},
"worker_extra_conf": "",
},
"synchrotron": {
@@ -107,6 +117,34 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = {
"shared_extra_conf": {},
"worker_extra_conf": "",
},
+ "client_reader": {
+ "app": "synapse.app.generic_worker",
+ "listener_resources": ["client"],
+ "endpoint_patterns": [
+ "^/_matrix/client/(api/v1|r0|v3|unstable)/publicRooms$",
+ "^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/joined_members$",
+ "^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/context/.*$",
+ "^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/members$",
+ "^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/state$",
+ "^/_matrix/client/v1/rooms/.*/hierarchy$",
+ "^/_matrix/client/(v1|unstable)/rooms/.*/relations/",
+ "^/_matrix/client/v1/rooms/.*/threads$",
+ "^/_matrix/client/(api/v1|r0|v3|unstable)/login$",
+ "^/_matrix/client/(api/v1|r0|v3|unstable)/account/3pid$",
+ "^/_matrix/client/(api/v1|r0|v3|unstable)/account/whoami$",
+ "^/_matrix/client/versions$",
+ "^/_matrix/client/(api/v1|r0|v3|unstable)/voip/turnServer$",
+ "^/_matrix/client/(r0|v3|unstable)/register$",
+ "^/_matrix/client/(r0|v3|unstable)/auth/.*/fallback/web$",
+ "^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/messages$",
+ "^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/event",
+ "^/_matrix/client/(api/v1|r0|v3|unstable)/joined_rooms",
+ "^/_matrix/client/(api/v1|r0|v3|unstable/.*)/rooms/.*/aliases",
+ "^/_matrix/client/(api/v1|r0|v3|unstable)/search",
+ ],
+ "shared_extra_conf": {},
+ "worker_extra_conf": "",
+ },
"federation_reader": {
"app": "synapse.app.generic_worker",
"listener_resources": ["federation"],
@@ -171,14 +209,54 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = {
"worker_extra_conf": "",
},
"frontend_proxy": {
- "app": "synapse.app.frontend_proxy",
+ "app": "synapse.app.generic_worker",
"listener_resources": ["client", "replication"],
"endpoint_patterns": ["^/_matrix/client/(api/v1|r0|v3|unstable)/keys/upload"],
"shared_extra_conf": {},
- "worker_extra_conf": (
- "worker_main_http_uri: http://127.0.0.1:%d"
- % (MAIN_PROCESS_HTTP_LISTENER_PORT,)
- ),
+ "worker_extra_conf": "",
+ },
+ "account_data": {
+ "app": "synapse.app.generic_worker",
+ "listener_resources": ["client", "replication"],
+ "endpoint_patterns": [
+ "^/_matrix/client/(r0|v3|unstable)/.*/tags",
+ "^/_matrix/client/(r0|v3|unstable)/.*/account_data",
+ ],
+ "shared_extra_conf": {},
+ "worker_extra_conf": "",
+ },
+ "presence": {
+ "app": "synapse.app.generic_worker",
+ "listener_resources": ["client", "replication"],
+ "endpoint_patterns": ["^/_matrix/client/(api/v1|r0|v3|unstable)/presence/"],
+ "shared_extra_conf": {},
+ "worker_extra_conf": "",
+ },
+ "receipts": {
+ "app": "synapse.app.generic_worker",
+ "listener_resources": ["client", "replication"],
+ "endpoint_patterns": [
+ "^/_matrix/client/(r0|v3|unstable)/rooms/.*/receipt",
+ "^/_matrix/client/(r0|v3|unstable)/rooms/.*/read_markers",
+ ],
+ "shared_extra_conf": {},
+ "worker_extra_conf": "",
+ },
+ "to_device": {
+ "app": "synapse.app.generic_worker",
+ "listener_resources": ["client", "replication"],
+ "endpoint_patterns": ["^/_matrix/client/(r0|v3|unstable)/sendToDevice/"],
+ "shared_extra_conf": {},
+ "worker_extra_conf": "",
+ },
+ "typing": {
+ "app": "synapse.app.generic_worker",
+ "listener_resources": ["client", "replication"],
+ "endpoint_patterns": [
+ "^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/typing"
+ ],
+ "shared_extra_conf": {},
+ "worker_extra_conf": "",
},
}
@@ -201,24 +279,19 @@ upstream {upstream_worker_type} {{
# Utility functions
def log(txt: str) -> None:
- """Log something to the stdout.
-
- Args:
- txt: The text to log.
- """
print(txt)
def error(txt: str) -> NoReturn:
- """Log something and exit with an error code.
-
- Args:
- txt: The text to log in error.
- """
- log(txt)
+ print(txt, file=sys.stderr)
sys.exit(2)
+def flush_buffers() -> None:
+ sys.stdout.flush()
+ sys.stderr.flush()
+
+
def convert(src: str, dst: str, **template_vars: object) -> None:
"""Generate a file from a template
@@ -247,14 +320,14 @@ def convert(src: str, dst: str, **template_vars: object) -> None:
outfile.write(rendered)
-def add_sharding_to_shared_config(
+def add_worker_roles_to_shared_config(
shared_config: dict,
worker_type: str,
worker_name: str,
worker_port: int,
) -> None:
"""Given a dictionary representing a config file shared across all workers,
- append sharded worker information to it for the current worker_type instance.
+ append appropriate worker information to it for the current worker_type instance.
Args:
shared_config: The config dict that all worker instances share (after being converted to YAML)
@@ -285,9 +358,19 @@ def add_sharding_to_shared_config(
"port": worker_port,
}
- elif worker_type == "media_repository":
- # The first configured media worker will run the media background jobs
- shared_config.setdefault("media_instance_running_background_jobs", worker_name)
+ elif worker_type in ["account_data", "presence", "receipts", "to_device", "typing"]:
+ # Update the list of stream writers
+ # It's convenient that the name of the worker type is the same as the stream to write
+ shared_config.setdefault("stream_writers", {}).setdefault(
+ worker_type, []
+ ).append(worker_name)
+
+ # Map of stream writer instance names to host/ports combos
+ # For now, all stream writers need http replication ports
+ instance_map[worker_name] = {
+ "host": "localhost",
+ "port": worker_port,
+ }
def generate_base_homeserver_config() -> None:
@@ -299,7 +382,7 @@ def generate_base_homeserver_config() -> None:
# start.py already does this for us, so just call that.
# note that this script is copied in in the official, monolith dockerfile
os.environ["SYNAPSE_HTTP_PORT"] = str(MAIN_PROCESS_HTTP_LISTENER_PORT)
- subprocess.check_output(["/usr/local/bin/python", "/start.py", "migrate_config"])
+ subprocess.run(["/usr/local/bin/python", "/start.py", "migrate_config"], check=True)
def generate_worker_files(
@@ -373,8 +456,8 @@ def generate_worker_files(
# No workers, just the main process
worker_types = []
else:
- # Split type names by comma
- worker_types = worker_types_env.split(",")
+ # Split type names by comma, ignoring whitespace.
+ worker_types = [x.strip() for x in worker_types_env.split(",")]
# Create the worker configuration directory if it doesn't already exist
os.makedirs("/conf/workers", exist_ok=True)
@@ -393,14 +476,11 @@ def generate_worker_files(
# For each worker type specified by the user, create config values
for worker_type in worker_types:
- worker_type = worker_type.strip()
-
worker_config = WORKERS_CONFIG.get(worker_type)
if worker_config:
worker_config = worker_config.copy()
else:
- log(worker_type + " is an unknown worker type! It will be ignored")
- continue
+ error(worker_type + " is an unknown worker type! Please fix!")
new_worker_count = worker_type_counter.setdefault(worker_type, 0) + 1
worker_type_counter[worker_type] = new_worker_count
@@ -419,11 +499,11 @@ def generate_worker_files(
# Check if more than one instance of this worker type has been specified
worker_type_total_count = worker_types.count(worker_type)
- if worker_type_total_count > 1:
- # Update the shared config with sharding-related options if necessary
- add_sharding_to_shared_config(
- shared_config, worker_type, worker_name, worker_port
- )
+
+ # Update the shared config with sharding-related options if necessary
+ add_worker_roles_to_shared_config(
+ shared_config, worker_type, worker_name, worker_port
+ )
# Enable the worker in supervisord
worker_descriptors.append(worker_config)
@@ -604,14 +684,24 @@ def main(args: List[str], environ: MutableMapping[str, str]) -> None:
with open(mark_filepath, "w") as f:
f.write("")
+ # Lifted right out of start.py
+ jemallocpath = "/usr/lib/%s-linux-gnu/libjemalloc.so.2" % (platform.machine(),)
+
+ if os.path.isfile(jemallocpath):
+ environ["LD_PRELOAD"] = jemallocpath
+ else:
+ log("Could not find %s, will not use" % (jemallocpath,))
+
# Start supervisord, which will start Synapse, all of the configured worker
# processes, redis, nginx etc. according to the config we created above.
log("Starting supervisord")
- os.execl(
+ flush_buffers()
+ os.execle(
"/usr/local/bin/supervisord",
"supervisord",
"-c",
"/etc/supervisor/supervisord.conf",
+ environ,
)
diff --git a/docker/start.py b/docker/start.py
index 5a98dce551..ebcc599f04 100755
--- a/docker/start.py
+++ b/docker/start.py
@@ -13,14 +13,19 @@ import jinja2
# Utility functions
def log(txt: str) -> None:
- print(txt, file=sys.stderr)
+ print(txt)
def error(txt: str) -> NoReturn:
- log(txt)
+ print(txt, file=sys.stderr)
sys.exit(2)
+def flush_buffers() -> None:
+ sys.stdout.flush()
+ sys.stderr.flush()
+
+
def convert(src: str, dst: str, environ: Mapping[str, object]) -> None:
"""Generate a file from a template
@@ -131,10 +136,10 @@ def generate_config_from_template(
if ownership is not None:
log(f"Setting ownership on /data to {ownership}")
- subprocess.check_output(["chown", "-R", ownership, "/data"])
+ subprocess.run(["chown", "-R", ownership, "/data"], check=True)
args = ["gosu", ownership] + args
- subprocess.check_output(args)
+ subprocess.run(args, check=True)
def run_generate_config(environ: Mapping[str, str], ownership: Optional[str]) -> None:
@@ -158,7 +163,7 @@ def run_generate_config(environ: Mapping[str, str], ownership: Optional[str]) ->
if ownership is not None:
# make sure that synapse has perms to write to the data dir.
log(f"Setting ownership on {data_dir} to {ownership}")
- subprocess.check_output(["chown", ownership, data_dir])
+ subprocess.run(["chown", ownership, data_dir], check=True)
# create a suitable log config from our template
log_config_file = "%s/%s.log.config" % (config_dir, server_name)
@@ -185,6 +190,7 @@ def run_generate_config(environ: Mapping[str, str], ownership: Optional[str]) ->
"--open-private-ports",
]
# log("running %s" % (args, ))
+ flush_buffers()
os.execv(sys.executable, args)
@@ -267,8 +273,10 @@ running with 'migrate_config'. See the README for more details.
args = [sys.executable] + args
if ownership is not None:
args = ["gosu", ownership] + args
+ flush_buffers()
os.execve("/usr/sbin/gosu", args, environ)
else:
+ flush_buffers()
os.execve(sys.executable, args, environ)
diff --git a/docs/SUMMARY.md b/docs/SUMMARY.md
index 38849593c6..e398895b36 100644
--- a/docs/SUMMARY.md
+++ b/docs/SUMMARY.md
@@ -9,6 +9,8 @@
- [Configuring a Reverse Proxy](reverse_proxy.md)
- [Configuring a Forward/Outbound Proxy](setup/forward_proxy.md)
- [Configuring a Turn Server](turn-howto.md)
+ - [coturn TURN server](setup/turn/coturn.md)
+ - [eturnal TURN server](setup/turn/eturnal.md)
- [Delegation](delegate.md)
# Upgrading
diff --git a/docs/admin_api/user_admin_api.md b/docs/admin_api/user_admin_api.md
index 3625c7b6c5..880bef4194 100644
--- a/docs/admin_api/user_admin_api.md
+++ b/docs/admin_api/user_admin_api.md
@@ -37,6 +37,7 @@ It returns a JSON body like the following:
"is_guest": 0,
"admin": 0,
"deactivated": 0,
+ "erased": false,
"shadow_banned": 0,
"creation_ts": 1560432506,
"appservice_id": null,
@@ -167,6 +168,7 @@ A response body like the following is returned:
"admin": 0,
"user_type": null,
"deactivated": 0,
+ "erased": false,
"shadow_banned": 0,
"displayname": "",
"avatar_url": null,
@@ -177,6 +179,7 @@ A response body like the following is returned:
"admin": 1,
"user_type": null,
"deactivated": 0,
+ "erased": false,
"shadow_banned": 0,
"displayname": "",
"avatar_url": "",
@@ -247,6 +250,7 @@ The following fields are returned in the JSON response body:
- `user_type` - string - Type of the user. Normal users are type `None`.
This allows user type specific behaviour. There are also types `support` and `bot`.
- `deactivated` - bool - Status if that user has been marked as deactivated.
+ - `erased` - bool - Status if that user has been marked as erased.
- `shadow_banned` - bool - Status if that user has been marked as shadow banned.
- `displayname` - string - The user's display name if they have set one.
- `avatar_url` - string - The user's avatar URL if they have set one.
@@ -1193,3 +1197,42 @@ Returns a `404` HTTP status code if no user was found, with a response body like
```
_Added in Synapse 1.68.0._
+
+
+### Find a user based on their Third Party ID (ThreePID or 3PID)
+
+The API is:
+
+```
+GET /_synapse/admin/v1/threepid/$medium/users/$address
+```
+
+When a user matched the given address for the given medium, an HTTP code `200` with a response body like the following is returned:
+
+```json
+{
+ "user_id": "@hello:example.org"
+}
+```
+
+**Parameters**
+
+The following parameters should be set in the URL:
+
+- `medium` - Kind of third-party ID, either `email` or `msisdn`.
+- `address` - Value of the third-party ID.
+
+The `address` may have characters that are not URL-safe, so it is advised to URL-encode those parameters.
+
+**Errors**
+
+Returns a `404` HTTP status code if no user was found, with a response body like this:
+
+```json
+{
+ "errcode":"M_NOT_FOUND",
+ "error":"User not found"
+}
+```
+
+_Added in Synapse 1.72.0._
diff --git a/docs/development/contributing_guide.md b/docs/development/contributing_guide.md
index cb0d727efa..342bc1d340 100644
--- a/docs/development/contributing_guide.md
+++ b/docs/development/contributing_guide.md
@@ -167,6 +167,12 @@ was broken. They are slower than the linters but will typically catch more error
poetry run trial tests
```
+You can run unit tests in parallel by specifying `-jX` argument to `trial` where `X` is the number of parallel runners you want. To use 4 cpu cores, you would run them like:
+
+```sh
+poetry run trial -j4 tests
+```
+
If you wish to only run *some* unit tests, you may specify
another module instead of `tests` - or a test class or a method:
@@ -318,6 +324,12 @@ The above will run a monolithic (single-process) Synapse with SQLite as the data
- Passing `POSTGRES=1` as an environment variable to use the Postgres database instead.
- Passing `WORKERS=1` as an environment variable to use a workerised setup instead. This option implies the use of Postgres.
+ - If setting `WORKERS=1`, optionally set `WORKER_TYPES=` to declare which worker
+ types you wish to test. A simple comma-delimited string containing the worker types
+ defined from the `WORKERS_CONFIG` template in
+ [here](https://github.com/matrix-org/synapse/blob/develop/docker/configure_workers_and_start.py#L54).
+ A safe example would be `WORKER_TYPES="federation_inbound, federation_sender, synchrotron"`.
+ See the [worker documentation](../workers.md) for additional information on workers.
To increase the log level for the tests, set `SYNAPSE_TEST_LOG_LEVEL`, e.g:
```sh
@@ -327,7 +339,7 @@ SYNAPSE_TEST_LOG_LEVEL=DEBUG COMPLEMENT_DIR=../complement ./scripts-dev/compleme
### Prettier formatting with `gotestfmt`
If you want to format the output of the tests the same way as it looks in CI,
-install [gotestfmt](https://github.com/haveyoudebuggedit/gotestfmt).
+install [gotestfmt](https://github.com/GoTestTools/gotestfmt).
You can then use this incantation to format the tests appropriately:
@@ -384,7 +396,7 @@ This file will become part of our [changelog](
https://github.com/matrix-org/synapse/blob/master/CHANGES.md) at the next
release, so the content of the file should be a short description of your
change in the same style as the rest of the changelog. The file can contain Markdown
-formatting, and should end with a full stop (.) or an exclamation mark (!) for
+formatting, and must end with a full stop (.) or an exclamation mark (!) for
consistency.
Adding credits to the changelog is encouraged, we value your
diff --git a/docs/development/database_schema.md b/docs/development/database_schema.md
index e9b925ddd8..29945c264e 100644
--- a/docs/development/database_schema.md
+++ b/docs/development/database_schema.md
@@ -195,23 +195,24 @@ There are three separate aspects to this:
## `event_id` global uniqueness
-In room versions `1` and `2` it's possible to end up with two events with the
-same `event_id` (in the same or different rooms). After room version `3`, that
-can only happen with a hash collision, which we basically hope will never
-happen.
-
-There are several places in Synapse and even Matrix APIs like [`GET
+`event_id`'s can be considered globally unique although there has been a lot of
+debate on this topic in places like
+[MSC2779](https://github.com/matrix-org/matrix-spec-proposals/issues/2779) and
+[MSC2848](https://github.com/matrix-org/matrix-spec-proposals/pull/2848) which
+has no resolution yet (as of 2022-09-01). There are several places in Synapse
+and even in the Matrix APIs like [`GET
/_matrix/federation/v1/event/{eventId}`](https://spec.matrix.org/v1.1/server-server-api/#get_matrixfederationv1eventeventid)
where we assume that event IDs are globally unique.
-But hash collisions are still possible, and by treating event IDs as room
-scoped, we can reduce the possibility of a hash collision. When scoping
-`event_id` in the database schema, it should be also accompanied by `room_id`
-(`PRIMARY KEY (room_id, event_id)`) and lookups should be done through the pair
-`(room_id, event_id)`.
+When scoping `event_id` in a database schema, it is often nice to accompany it
+with `room_id` (`PRIMARY KEY (room_id, event_id)` and a `FOREIGN KEY(room_id)
+REFERENCES rooms(room_id)`) which makes flexible lookups easy. For example it
+makes it very easy to find and clean up everything in a room when it needs to be
+purged (no need to use sub-`select` query or join from the `events` table).
+
+A note on collisions: In room versions `1` and `2` it's possible to end up with
+two events with the same `event_id` (in the same or different rooms). After room
+version `3`, that can only happen with a hash collision, which we basically hope
+will never happen (SHA256 has a massive big key space).
-There has been a lot of debate on this in places like
-https://github.com/matrix-org/matrix-spec-proposals/issues/2779 and
-[MSC2848](https://github.com/matrix-org/matrix-spec-proposals/pull/2848) which
-has no resolution yet (as of 2022-09-01).
diff --git a/docs/metrics-howto.md b/docs/metrics-howto.md
index 279303a798..16e4368f35 100644
--- a/docs/metrics-howto.md
+++ b/docs/metrics-howto.md
@@ -16,14 +16,21 @@
There are two methods of enabling the metrics endpoint in Synapse.
The first serves the metrics as a part of the usual web server and
- can be enabled by adding the \"metrics\" resource to the existing
- listener as such:
+ can be enabled by adding the `metrics` resource to the existing
+ listener as such as in this example:
```yaml
- resources:
- - names:
- - client
- - metrics
+ listeners:
+ - port: 8008
+ tls: false
+ type: http
+ x_forwarded: true
+ bind_addresses: ['::1', '127.0.0.1']
+
+ resources:
+ # added "metrics" in this line
+ - names: [client, federation, metrics]
+ compress: false
```
This provides a simple way of adding metrics to your Synapse
@@ -37,14 +44,24 @@
to just internal networks easier. The served metrics are available
over HTTP only, and will be available at `/_synapse/metrics`.
- Add a new listener to homeserver.yaml:
+ Add a new listener to homeserver.yaml as in this example:
```yaml
- listeners:
- - type: metrics
- port: 9000
- bind_addresses:
- - '0.0.0.0'
+ listeners:
+ - port: 8008
+ tls: false
+ type: http
+ x_forwarded: true
+ bind_addresses: ['::1', '127.0.0.1']
+
+ resources:
+ - names: [client, federation]
+ compress: false
+
+ # beginning of the new metrics listener
+ - port: 9000
+ type: metrics
+ bind_addresses: ['::1', '127.0.0.1']
```
1. Restart Synapse.
@@ -135,6 +152,8 @@ Synapse 1.2 updates the Prometheus metrics to match the naming
convention of the upstream `prometheus_client`. The old names are
considered deprecated and will be removed in a future version of
Synapse.
+**The old names will be disabled by default in Synapse v1.71.0 and removed
+altogether in Synapse v1.73.0.**
| New Name | Old Name |
| ---------------------------------------------------------------------------- | ---------------------------------------------------------------------- |
@@ -146,6 +165,13 @@ Synapse.
| synapse_federation_client_events_processed_total | synapse_federation_client_events_processed |
| synapse_event_processing_loop_count_total | synapse_event_processing_loop_count |
| synapse_event_processing_loop_room_count_total | synapse_event_processing_loop_room_count |
+| synapse_util_caches_cache_hits | synapse_util_caches_cache:hits |
+| synapse_util_caches_cache_size | synapse_util_caches_cache:size |
+| synapse_util_caches_cache_evicted_size | synapse_util_caches_cache:evicted_size |
+| synapse_util_caches_cache | synapse_util_caches_cache:total |
+| synapse_util_caches_response_cache_size | synapse_util_caches_response_cache:size |
+| synapse_util_caches_response_cache_hits | synapse_util_caches_response_cache:hits |
+| synapse_util_caches_response_cache_evicted_size | synapse_util_caches_response_cache:evicted_size |
| synapse_util_metrics_block_count_total | synapse_util_metrics_block_count |
| synapse_util_metrics_block_time_seconds_total | synapse_util_metrics_block_time_seconds |
| synapse_util_metrics_block_ru_utime_seconds_total | synapse_util_metrics_block_ru_utime_seconds |
@@ -183,6 +209,9 @@ Synapse.
| synapse_http_httppusher_http_pushes_failed_total | synapse_http_httppusher_http_pushes_failed |
| synapse_http_httppusher_badge_updates_processed_total | synapse_http_httppusher_badge_updates_processed |
| synapse_http_httppusher_badge_updates_failed_total | synapse_http_httppusher_badge_updates_failed |
+| synapse_admin_mau_current | synapse_admin_mau:current |
+| synapse_admin_mau_max | synapse_admin_mau:max |
+| synapse_admin_mau_registered_reserved_users | synapse_admin_mau:registered_reserved_users |
Removal of deprecated metrics & time based counters becoming histograms in 0.31.0
---------------------------------------------------------------------------------
@@ -261,7 +290,7 @@ Standard Metric Names
As of synapse version 0.18.2, the format of the process-wide metrics has
been changed to fit prometheus standard naming conventions. Additionally
-the units have been changed to seconds, from miliseconds.
+the units have been changed to seconds, from milliseconds.
| New name | Old name |
| ---------------------------------------- | --------------------------------- |
diff --git a/docs/openid.md b/docs/openid.md
index ce9b026228..37c5eb244d 100644
--- a/docs/openid.md
+++ b/docs/openid.md
@@ -49,6 +49,13 @@ setting in your configuration file.
See the [configuration manual](usage/configuration/config_documentation.md#oidc_providers) for some sample settings, as well as
the text below for example configurations for specific providers.
+## OIDC Back-Channel Logout
+
+Synapse supports receiving [OpenID Connect Back-Channel Logout](https://openid.net/specs/openid-connect-backchannel-1_0.html) notifications.
+
+This lets the OpenID Connect Provider notify Synapse when a user logs out, so that Synapse can end that user session.
+This feature can be enabled by setting the `backchannel_logout_enabled` property to `true` in the provider configuration, and setting the following URL as destination for Back-Channel Logout notifications in your OpenID Connect Provider: `[synapse public baseurl]/_synapse/client/oidc/backchannel_logout`
+
## Sample configs
Here are a few configs for providers that should work with Synapse.
@@ -123,6 +130,9 @@ oidc_providers:
[Keycloak][keycloak-idp] is an opensource IdP maintained by Red Hat.
+Keycloak supports OIDC Back-Channel Logout, which sends logout notification to Synapse, so that Synapse users get logged out when they log out from Keycloak.
+This can be optionally enabled by setting `backchannel_logout_enabled` to `true` in the Synapse configuration, and by setting the "Backchannel Logout URL" in Keycloak.
+
Follow the [Getting Started Guide](https://www.keycloak.org/getting-started) to install Keycloak and set up a realm.
1. Click `Clients` in the sidebar and click `Create`
@@ -144,6 +154,8 @@ Follow the [Getting Started Guide](https://www.keycloak.org/getting-started) to
| Client Protocol | `openid-connect` |
| Access Type | `confidential` |
| Valid Redirect URIs | `[synapse public baseurl]/_synapse/client/oidc/callback` |
+| Backchannel Logout URL (optional) | `[synapse public baseurl]/_synapse/client/oidc/backchannel_logout` |
+| Backchannel Logout Session Required (optional) | `On` |
5. Click `Save`
6. On the Credentials tab, update the fields:
@@ -167,7 +179,9 @@ oidc_providers:
config:
localpart_template: "{{ user.preferred_username }}"
display_name_template: "{{ user.name }}"
+ backchannel_logout_enabled: true # Optional
```
+
### Auth0
[Auth0][auth0] is a hosted SaaS IdP solution.
@@ -336,11 +350,12 @@ oidc_providers:
issuer: "https://accounts.google.com/"
client_id: "your-client-id" # TO BE FILLED
client_secret: "your-client-secret" # TO BE FILLED
- scopes: ["openid", "profile"]
+ scopes: ["openid", "profile", "email"] # email is optional, read below
user_mapping_provider:
config:
localpart_template: "{{ user.given_name|lower }}"
display_name_template: "{{ user.name }}"
+ email_template: "{{ user.email }}" # needs "email" in scopes above
```
4. Back in the Google console, add this Authorized redirect URI: `[synapse
public baseurl]/_synapse/client/oidc/callback`.
@@ -423,7 +438,7 @@ Synapse config:
user_mapping_provider:
config:
display_name_template: "{{ user.name }}"
- email_template: "{{ '{{ user.email }}' }}"
+ email_template: "{{ user.email }}"
```
Relevant documents:
diff --git a/docs/reverse_proxy.md b/docs/reverse_proxy.md
index 4e7a1d4435..48dbc1c58e 100644
--- a/docs/reverse_proxy.md
+++ b/docs/reverse_proxy.md
@@ -79,6 +79,9 @@ server {
# Nginx by default only allows file uploads up to 1M in size
# Increase client_max_body_size to match max_upload_size defined in homeserver.yaml
client_max_body_size 50M;
+
+ # Synapse responses may be chunked, which is an HTTP/1.1 feature.
+ proxy_http_version 1.1;
}
}
```
diff --git a/docs/sample_log_config.yaml b/docs/sample_log_config.yaml
index 3065a0e2d9..6339160d00 100644
--- a/docs/sample_log_config.yaml
+++ b/docs/sample_log_config.yaml
@@ -6,7 +6,7 @@
# Synapse also supports structured logging for machine readable logs which can
# be ingested by ELK stacks. See [2] for details.
#
-# [1]: https://docs.python.org/3.7/library/logging.config.html#configuration-dictionary-schema
+# [1]: https://docs.python.org/3/library/logging.config.html#configuration-dictionary-schema
# [2]: https://matrix-org.github.io/synapse/latest/structured_logging.html
version: 1
diff --git a/docs/setup/turn/coturn.md b/docs/setup/turn/coturn.md
new file mode 100644
index 0000000000..a1bb1e934c
--- /dev/null
+++ b/docs/setup/turn/coturn.md
@@ -0,0 +1,188 @@
+# coturn TURN server
+
+The following sections describe how to install [coturn]() (which implements the TURN REST API).
+
+## `coturn` setup
+
+### Initial installation
+
+The TURN daemon `coturn` is available from a variety of sources such as native package managers, or installation from source.
+
+#### Debian and Ubuntu based distributions
+
+Just install the debian package:
+
+```sh
+sudo apt install coturn
+```
+
+This will install and start a systemd service called `coturn`.
+
+#### Source installation
+
+1. Download the [latest release](https://github.com/coturn/coturn/releases/latest) from github. Unpack it and `cd` into the directory.
+
+1. Configure it:
+
+ ```sh
+ ./configure
+ ```
+
+ You may need to install `libevent2`: if so, you should do so in
+ the way recommended by your operating system. You can ignore
+ warnings about lack of database support: a database is unnecessary
+ for this purpose.
+
+1. Build and install it:
+
+ ```sh
+ make
+ sudo make install
+ ```
+
+### Configuration
+
+1. Create or edit the config file in `/etc/turnserver.conf`. The relevant
+ lines, with example values, are:
+
+ ```
+ use-auth-secret
+ static-auth-secret=[your secret key here]
+ realm=turn.myserver.org
+ ```
+
+ See `turnserver.conf` for explanations of the options. One way to generate
+ the `static-auth-secret` is with `pwgen`:
+
+ ```sh
+ pwgen -s 64 1
+ ```
+
+ A `realm` must be specified, but its value is somewhat arbitrary. (It is
+ sent to clients as part of the authentication flow.) It is conventional to
+ set it to be your server name.
+
+1. You will most likely want to configure `coturn` to write logs somewhere. The
+ easiest way is normally to send them to the syslog:
+
+ ```sh
+ syslog
+ ```
+
+ (in which case, the logs will be available via `journalctl -u coturn` on a
+ systemd system). Alternatively, `coturn` can be configured to write to a
+ logfile - check the example config file supplied with `coturn`.
+
+1. Consider your security settings. TURN lets users request a relay which will
+ connect to arbitrary IP addresses and ports. The following configuration is
+ suggested as a minimum starting point:
+
+ ```
+ # VoIP traffic is all UDP. There is no reason to let users connect to arbitrary TCP endpoints via the relay.
+ no-tcp-relay
+
+ # don't let the relay ever try to connect to private IP address ranges within your network (if any)
+ # given the turn server is likely behind your firewall, remember to include any privileged public IPs too.
+ denied-peer-ip=10.0.0.0-10.255.255.255
+ denied-peer-ip=192.168.0.0-192.168.255.255
+ denied-peer-ip=172.16.0.0-172.31.255.255
+
+ # recommended additional local peers to block, to mitigate external access to internal services.
+ # https://www.rtcsec.com/article/slack-webrtc-turn-compromise-and-bug-bounty/#how-to-fix-an-open-turn-relay-to-address-this-vulnerability
+ no-multicast-peers
+ denied-peer-ip=0.0.0.0-0.255.255.255
+ denied-peer-ip=100.64.0.0-100.127.255.255
+ denied-peer-ip=127.0.0.0-127.255.255.255
+ denied-peer-ip=169.254.0.0-169.254.255.255
+ denied-peer-ip=192.0.0.0-192.0.0.255
+ denied-peer-ip=192.0.2.0-192.0.2.255
+ denied-peer-ip=192.88.99.0-192.88.99.255
+ denied-peer-ip=198.18.0.0-198.19.255.255
+ denied-peer-ip=198.51.100.0-198.51.100.255
+ denied-peer-ip=203.0.113.0-203.0.113.255
+ denied-peer-ip=240.0.0.0-255.255.255.255
+
+ # special case the turn server itself so that client->TURN->TURN->client flows work
+ # this should be one of the turn server's listening IPs
+ allowed-peer-ip=10.0.0.1
+
+ # consider whether you want to limit the quota of relayed streams per user (or total) to avoid risk of DoS.
+ user-quota=12 # 4 streams per video call, so 12 streams = 3 simultaneous relayed calls per user.
+ total-quota=1200
+ ```
+
+1. Also consider supporting TLS/DTLS. To do this, add the following settings
+ to `turnserver.conf`:
+
+ ```
+ # TLS certificates, including intermediate certs.
+ # For Let's Encrypt certificates, use `fullchain.pem` here.
+ cert=/path/to/fullchain.pem
+
+ # TLS private key file
+ pkey=/path/to/privkey.pem
+
+ # Ensure the configuration lines that disable TLS/DTLS are commented-out or removed
+ #no-tls
+ #no-dtls
+ ```
+
+ In this case, replace the `turn:` schemes in the `turn_uris` settings below
+ with `turns:`.
+
+ We recommend that you only try to set up TLS/DTLS once you have set up a
+ basic installation and got it working.
+
+ NB: If your TLS certificate was provided by Let's Encrypt, TLS/DTLS will
+ not work with any Matrix client that uses Chromium's WebRTC library. This
+ currently includes Element Android & iOS; for more details, see their
+ [respective](https://github.com/vector-im/element-android/issues/1533)
+ [issues](https://github.com/vector-im/element-ios/issues/2712) as well as the underlying
+ [WebRTC issue](https://bugs.chromium.org/p/webrtc/issues/detail?id=11710).
+ Consider using a ZeroSSL certificate for your TURN server as a working alternative.
+
+1. Ensure your firewall allows traffic into the TURN server on the ports
+ you've configured it to listen on (By default: 3478 and 5349 for TURN
+ traffic (remember to allow both TCP and UDP traffic), and ports 49152-65535
+ for the UDP relay.)
+
+1. If your TURN server is behind NAT, the NAT gateway must have an external,
+ publicly-reachable IP address. You must configure `coturn` to advertise that
+ address to connecting clients:
+
+ ```
+ external-ip=EXTERNAL_NAT_IPv4_ADDRESS
+ ```
+
+ You may optionally limit the TURN server to listen only on the local
+ address that is mapped by NAT to the external address:
+
+ ```
+ listening-ip=INTERNAL_TURNSERVER_IPv4_ADDRESS
+ ```
+
+ If your NAT gateway is reachable over both IPv4 and IPv6, you may
+ configure `coturn` to advertise each available address:
+
+ ```
+ external-ip=EXTERNAL_NAT_IPv4_ADDRESS
+ external-ip=EXTERNAL_NAT_IPv6_ADDRESS
+ ```
+
+ When advertising an external IPv6 address, ensure that the firewall and
+ network settings of the system running your TURN server are configured to
+ accept IPv6 traffic, and that the TURN server is listening on the local
+ IPv6 address that is mapped by NAT to the external IPv6 address.
+
+1. (Re)start the turn server:
+
+ * If you used the Debian package (or have set up a systemd unit yourself):
+ ```sh
+ sudo systemctl restart coturn
+ ```
+
+ * If you built from source:
+
+ ```sh
+ /usr/local/bin/turnserver -o
+ ```
diff --git a/docs/setup/turn/eturnal.md b/docs/setup/turn/eturnal.md
new file mode 100644
index 0000000000..2e5a45673e
--- /dev/null
+++ b/docs/setup/turn/eturnal.md
@@ -0,0 +1,170 @@
+# eturnal TURN server
+
+The following sections describe how to install [eturnal]()
+(which implements the TURN REST API).
+
+## `eturnal` setup
+
+### Initial installation
+
+The `eturnal` TURN server implementation is available from a variety of sources
+such as native package managers, binary packages, installation from source or
+[container image](https://eturnal.net/documentation/code/docker.html). They are
+all described [here](https://github.com/processone/eturnal#installation).
+
+Quick-Test instructions in a [Linux Shell](https://github.com/processone/eturnal/blob/master/QUICK-TEST.md)
+or with [Docker](https://github.com/processone/eturnal/blob/master/docker-k8s/QUICK-TEST.md)
+are available as well.
+
+### Configuration
+
+After installation, `eturnal` usually ships a [default configuration file](https://github.com/processone/eturnal/blob/master/config/eturnal.yml)
+here: `/etc/eturnal.yml` (and, if not found there, there is a backup file here:
+`/opt/eturnal/etc/eturnal.yml`). It uses the (indentation-sensitive!) [YAML](https://en.wikipedia.org/wiki/YAML)
+format. The file contains further explanations.
+
+Here are some hints how to configure eturnal on your [host machine](https://github.com/processone/eturnal#configuration)
+or when using e.g. [Docker](https://eturnal.net/documentation/code/docker.html).
+You may also further deep dive into the [reference documentation](https://eturnal.net/documentation/).
+
+`eturnal` runs out of the box with the default configuration. To enable TURN and
+to integrate it with your homeserver, some aspects in `eturnal`'s default configuration file
+must be edited:
+
+1. Homeserver's [`turn_shared_secret`](../../usage/configuration/config_documentation.md#turn_shared_secret)
+ and eturnal's shared `secret` for authentication
+
+ Both need to have the same value. Uncomment and adjust this line in `eturnal`'s
+ configuration file:
+
+ ```yaml
+ secret: "long-and-cryptic" # Shared secret, CHANGE THIS.
+ ```
+
+ One way to generate a `secret` is with `pwgen`:
+
+ ```sh
+ pwgen -s 64 1
+ ```
+
+1. Public IP address
+
+ If your TURN server is behind NAT, the NAT gateway must have an external,
+ publicly-reachable IP address. `eturnal` tries to autodetect the public IP address,
+ however, it may also be configured by uncommenting and adjusting this line, so
+ `eturnal` advertises that address to connecting clients:
+
+ ```yaml
+ relay_ipv4_addr: "203.0.113.4" # The server's public IPv4 address.
+ ```
+
+ If your NAT gateway is reachable over both IPv4 and IPv6, you may
+ configure `eturnal` to advertise each available address:
+
+ ```yaml
+ relay_ipv4_addr: "203.0.113.4" # The server's public IPv4 address.
+ relay_ipv6_addr: "2001:db8::4" # The server's public IPv6 address (optional).
+ ```
+
+ When advertising an external IPv6 address, ensure that the firewall and
+ network settings of the system running your TURN server are configured to
+ accept IPv6 traffic, and that the TURN server is listening on the local
+ IPv6 address that is mapped by NAT to the external IPv6 address.
+
+1. Logging
+
+ If `eturnal` was started by systemd, log files are written into the
+ `/var/log/eturnal` directory by default. In order to log to the [journal](https://www.freedesktop.org/software/systemd/man/systemd-journald.service.html)
+ instead, the `log_dir` option can be set to `stdout` in the configuration file.
+
+1. Security considerations
+
+ Consider your security settings. TURN lets users request a relay which will
+ connect to arbitrary IP addresses and ports. The following configuration is
+ suggested as a minimum starting point, [see also the official documentation](https://eturnal.net/documentation/#blacklist):
+
+ ```yaml
+ ## Reject TURN relaying from/to the following addresses/networks:
+ blacklist: # This is the default blacklist.
+ - "127.0.0.0/8" # IPv4 loopback.
+ - "::1" # IPv6 loopback.
+ - recommended # Expands to a number of networks recommended to be
+ # blocked, but includes private networks. Those
+ # would have to be 'whitelist'ed if eturnal serves
+ # local clients/peers within such networks.
+ ```
+
+ To whitelist IP addresses or specific (private) networks, you need to **add** a
+ whitelist part into the configuration file, e.g.:
+
+ ```yaml
+ whitelist:
+ - "192.168.0.0/16"
+ - "203.0.113.113"
+ - "2001:db8::/64"
+ ```
+
+ The more specific, the better.
+
+1. TURNS (TURN via TLS/DTLS)
+
+ Also consider supporting TLS/DTLS. To do this, adjust the following settings
+ in the `eturnal.yml` configuration file (TLS parts should not be commented anymore):
+
+ ```yaml
+ listen:
+ - ip: "::"
+ port: 3478
+ transport: udp
+ - ip: "::"
+ port: 3478
+ transport: tcp
+ - ip: "::"
+ port: 5349
+ transport: tls
+
+ ## TLS certificate/key files (must be readable by 'eturnal' user!):
+ tls_crt_file: /etc/eturnal/tls/crt.pem
+ tls_key_file: /etc/eturnal/tls/key.pem
+ ```
+
+ In this case, replace the `turn:` schemes in homeserver's `turn_uris` settings
+ with `turns:`. More is described [here](../../usage/configuration/config_documentation.md#turn_uris).
+
+ We recommend that you only try to set up TLS/DTLS once you have set up a
+ basic installation and got it working.
+
+ NB: If your TLS certificate was provided by Let's Encrypt, TLS/DTLS will
+ not work with any Matrix client that uses Chromium's WebRTC library. This
+ currently includes Element Android & iOS; for more details, see their
+ [respective](https://github.com/vector-im/element-android/issues/1533)
+ [issues](https://github.com/vector-im/element-ios/issues/2712) as well as the underlying
+ [WebRTC issue](https://bugs.chromium.org/p/webrtc/issues/detail?id=11710).
+ Consider using a ZeroSSL certificate for your TURN server as a working alternative.
+
+1. Firewall
+
+ Ensure your firewall allows traffic into the TURN server on the ports
+ you've configured it to listen on (By default: 3478 and 5349 for TURN
+ traffic (remember to allow both TCP and UDP traffic), and ports 49152-65535
+ for the UDP relay.)
+
+1. Reload/ restarting `eturnal`
+
+ Changes in the configuration file require `eturnal` to reload/ restart, this
+ can be achieved by:
+
+ ```sh
+ eturnalctl reload
+ ```
+
+ `eturnal` performs a configuration check before actually reloading/ restarting
+ and provides hints, if something is not correctly configured.
+
+### eturnalctl opterations script
+
+`eturnal` offers a handy [operations script](https://eturnal.net/documentation/#Operation)
+which can be called e.g. to check, whether the service is up, to restart the service,
+to query how many active sessions exist, to change logging behaviour and so on.
+
+Hint: If `eturnalctl` is not part of your `$PATH`, consider either sym-linking it (e.g. ´ln -s /opt/eturnal/bin/eturnalctl /usr/local/bin/eturnalctl´) or call it from the default `eturnal` directory directly: e.g. `/opt/eturnal/bin/eturnalctl info`
diff --git a/docs/systemd-with-workers/workers/federation_sender.yaml b/docs/systemd-with-workers/workers/federation_sender.yaml
new file mode 100644
index 0000000000..5c591aec2c
--- /dev/null
+++ b/docs/systemd-with-workers/workers/federation_sender.yaml
@@ -0,0 +1,8 @@
+worker_app: synapse.app.federation_sender
+worker_name: federation_sender1
+
+# The replication listener on the main synapse process.
+worker_replication_host: 127.0.0.1
+worker_replication_http_port: 9093
+
+worker_log_config: /etc/matrix-synapse/federation-sender-log.yaml
diff --git a/docs/systemd-with-workers/workers/media_worker.yaml b/docs/systemd-with-workers/workers/media_worker.yaml
new file mode 100644
index 0000000000..eb34d12492
--- /dev/null
+++ b/docs/systemd-with-workers/workers/media_worker.yaml
@@ -0,0 +1,14 @@
+worker_app: synapse.app.media_repository
+worker_name: media_worker
+
+# The replication listener on the main synapse process.
+worker_replication_host: 127.0.0.1
+worker_replication_http_port: 9093
+
+worker_listeners:
+ - type: http
+ port: 8085
+ resources:
+ - names: [media]
+
+worker_log_config: /etc/matrix-synapse/media-worker-log.yaml
diff --git a/docs/systemd-with-workers/workers/pusher_worker.yaml b/docs/systemd-with-workers/workers/pusher_worker.yaml
new file mode 100644
index 0000000000..46e22c6f06
--- /dev/null
+++ b/docs/systemd-with-workers/workers/pusher_worker.yaml
@@ -0,0 +1,8 @@
+worker_app: synapse.app.pusher
+worker_name: pusher_worker1
+
+# The replication listener on the main synapse process.
+worker_replication_host: 127.0.0.1
+worker_replication_http_port: 9093
+
+worker_log_config: /etc/matrix-synapse/pusher-worker-log.yaml
diff --git a/docs/turn-howto.md b/docs/turn-howto.md
index 37a311ad9c..b466cab40c 100644
--- a/docs/turn-howto.md
+++ b/docs/turn-howto.md
@@ -9,222 +9,28 @@ allows the homeserver to generate credentials that are valid for use on the
TURN server through the use of a secret shared between the homeserver and the
TURN server.
-The following sections describe how to install [coturn]() (which implements the TURN REST API) and integrate it with synapse.
+This documentation provides two TURN server configuration examples:
+
+* [coturn](setup/turn/coturn.md)
+* [eturnal](setup/turn/eturnal.md)
## Requirements
-For TURN relaying with `coturn` to work, it must be hosted on a server/endpoint with a public IP.
+For TURN relaying to work, the TURN service must be hosted on a server/endpoint with a public IP.
Hosting TURN behind NAT requires port forwaring and for the NAT gateway to have a public IP.
However, even with appropriate configuration, NAT is known to cause issues and to often not work.
-## `coturn` setup
-
-### Initial installation
-
-The TURN daemon `coturn` is available from a variety of sources such as native package managers, or installation from source.
-
-#### Debian installation
-
-Just install the debian package:
-
-```sh
-apt install coturn
-```
-
-This will install and start a systemd service called `coturn`.
-
-#### Source installation
-
-1. Download the [latest release](https://github.com/coturn/coturn/releases/latest) from github. Unpack it and `cd` into the directory.
-
-1. Configure it:
-
- ```sh
- ./configure
- ```
-
- You may need to install `libevent2`: if so, you should do so in
- the way recommended by your operating system. You can ignore
- warnings about lack of database support: a database is unnecessary
- for this purpose.
-
-1. Build and install it:
-
- ```sh
- make
- make install
- ```
-
-### Configuration
-
-1. Create or edit the config file in `/etc/turnserver.conf`. The relevant
- lines, with example values, are:
-
- ```
- use-auth-secret
- static-auth-secret=[your secret key here]
- realm=turn.myserver.org
- ```
-
- See `turnserver.conf` for explanations of the options. One way to generate
- the `static-auth-secret` is with `pwgen`:
-
- ```sh
- pwgen -s 64 1
- ```
-
- A `realm` must be specified, but its value is somewhat arbitrary. (It is
- sent to clients as part of the authentication flow.) It is conventional to
- set it to be your server name.
-
-1. You will most likely want to configure coturn to write logs somewhere. The
- easiest way is normally to send them to the syslog:
-
- ```sh
- syslog
- ```
-
- (in which case, the logs will be available via `journalctl -u coturn` on a
- systemd system). Alternatively, coturn can be configured to write to a
- logfile - check the example config file supplied with coturn.
-
-1. Consider your security settings. TURN lets users request a relay which will
- connect to arbitrary IP addresses and ports. The following configuration is
- suggested as a minimum starting point:
-
- ```
- # VoIP traffic is all UDP. There is no reason to let users connect to arbitrary TCP endpoints via the relay.
- no-tcp-relay
-
- # don't let the relay ever try to connect to private IP address ranges within your network (if any)
- # given the turn server is likely behind your firewall, remember to include any privileged public IPs too.
- denied-peer-ip=10.0.0.0-10.255.255.255
- denied-peer-ip=192.168.0.0-192.168.255.255
- denied-peer-ip=172.16.0.0-172.31.255.255
-
- # recommended additional local peers to block, to mitigate external access to internal services.
- # https://www.rtcsec.com/article/slack-webrtc-turn-compromise-and-bug-bounty/#how-to-fix-an-open-turn-relay-to-address-this-vulnerability
- no-multicast-peers
- denied-peer-ip=0.0.0.0-0.255.255.255
- denied-peer-ip=100.64.0.0-100.127.255.255
- denied-peer-ip=127.0.0.0-127.255.255.255
- denied-peer-ip=169.254.0.0-169.254.255.255
- denied-peer-ip=192.0.0.0-192.0.0.255
- denied-peer-ip=192.0.2.0-192.0.2.255
- denied-peer-ip=192.88.99.0-192.88.99.255
- denied-peer-ip=198.18.0.0-198.19.255.255
- denied-peer-ip=198.51.100.0-198.51.100.255
- denied-peer-ip=203.0.113.0-203.0.113.255
- denied-peer-ip=240.0.0.0-255.255.255.255
-
- # special case the turn server itself so that client->TURN->TURN->client flows work
- # this should be one of the turn server's listening IPs
- allowed-peer-ip=10.0.0.1
-
- # consider whether you want to limit the quota of relayed streams per user (or total) to avoid risk of DoS.
- user-quota=12 # 4 streams per video call, so 12 streams = 3 simultaneous relayed calls per user.
- total-quota=1200
- ```
-
-1. Also consider supporting TLS/DTLS. To do this, add the following settings
- to `turnserver.conf`:
-
- ```
- # TLS certificates, including intermediate certs.
- # For Let's Encrypt certificates, use `fullchain.pem` here.
- cert=/path/to/fullchain.pem
-
- # TLS private key file
- pkey=/path/to/privkey.pem
-
- # Ensure the configuration lines that disable TLS/DTLS are commented-out or removed
- #no-tls
- #no-dtls
- ```
-
- In this case, replace the `turn:` schemes in the `turn_uris` settings below
- with `turns:`.
-
- We recommend that you only try to set up TLS/DTLS once you have set up a
- basic installation and got it working.
-
- NB: If your TLS certificate was provided by Let's Encrypt, TLS/DTLS will
- not work with any Matrix client that uses Chromium's WebRTC library. This
- currently includes Element Android & iOS; for more details, see their
- [respective](https://github.com/vector-im/element-android/issues/1533)
- [issues](https://github.com/vector-im/element-ios/issues/2712) as well as the underlying
- [WebRTC issue](https://bugs.chromium.org/p/webrtc/issues/detail?id=11710).
- Consider using a ZeroSSL certificate for your TURN server as a working alternative.
-
-1. Ensure your firewall allows traffic into the TURN server on the ports
- you've configured it to listen on (By default: 3478 and 5349 for TURN
- traffic (remember to allow both TCP and UDP traffic), and ports 49152-65535
- for the UDP relay.)
-
-1. If your TURN server is behind NAT, the NAT gateway must have an external,
- publicly-reachable IP address. You must configure coturn to advertise that
- address to connecting clients:
-
- ```
- external-ip=EXTERNAL_NAT_IPv4_ADDRESS
- ```
-
- You may optionally limit the TURN server to listen only on the local
- address that is mapped by NAT to the external address:
-
- ```
- listening-ip=INTERNAL_TURNSERVER_IPv4_ADDRESS
- ```
-
- If your NAT gateway is reachable over both IPv4 and IPv6, you may
- configure coturn to advertise each available address:
-
- ```
- external-ip=EXTERNAL_NAT_IPv4_ADDRESS
- external-ip=EXTERNAL_NAT_IPv6_ADDRESS
- ```
-
- When advertising an external IPv6 address, ensure that the firewall and
- network settings of the system running your TURN server are configured to
- accept IPv6 traffic, and that the TURN server is listening on the local
- IPv6 address that is mapped by NAT to the external IPv6 address.
-
-1. (Re)start the turn server:
-
- * If you used the Debian package (or have set up a systemd unit yourself):
- ```sh
- systemctl restart coturn
- ```
-
- * If you installed from source:
-
- ```sh
- bin/turnserver -o
- ```
+Afterwards, the homeserver needs some further configuration.
## Synapse setup
Your homeserver configuration file needs the following extra keys:
-1. "`turn_uris`": This needs to be a yaml list of public-facing URIs
- for your TURN server to be given out to your clients. Add separate
- entries for each transport your TURN server supports.
-2. "`turn_shared_secret`": This is the secret shared between your
- homeserver and your TURN server, so you should set it to the same
- string you used in turnserver.conf.
-3. "`turn_user_lifetime`": This is the amount of time credentials
- generated by your homeserver are valid for (in milliseconds).
- Shorter times offer less potential for abuse at the expense of
- increased traffic between web clients and your homeserver to
- refresh credentials. The TURN REST API specification recommends
- one day (86400000).
-4. "`turn_allow_guests`": Whether to allow guest users to use the
- TURN server. This is enabled by default, as otherwise VoIP will
- not work reliably for guests. However, it does introduce a
- security risk as it lets guests connect to arbitrary endpoints
- without having gone through a CAPTCHA or similar to register a
- real account.
+1. [`turn_uris`](usage/configuration/config_documentation.md#turn_uris)
+2. [`turn_shared_secret`](usage/configuration/config_documentation.md#turn_shared_secret)
+3. [`turn_user_lifetime`](usage/configuration/config_documentation.md#turn_user_lifetime)
+4. [`turn_allow_guests`](usage/configuration/config_documentation.md#turn_allow_guests)
As an example, here is the relevant section of the config file for `matrix.org`. The
`turn_uris` are appropriate for TURN servers listening on the default ports, with no TLS.
@@ -263,7 +69,7 @@ Here are a few things to try:
* Check that you have opened your firewall to allow UDP traffic to the UDP
relay ports (49152-65535 by default).
- * Try disabling `coturn`'s TLS/DTLS listeners and enable only its (unencrypted)
+ * Try disabling TLS/DTLS listeners and enable only its (unencrypted)
TCP/UDP listeners. (This will only leave signaling traffic unencrypted;
voice & video WebRTC traffic is always encrypted.)
@@ -288,12 +94,19 @@ Here are a few things to try:
* ensure that your TURN server uses the NAT gateway as its default route.
- * Enable more verbose logging in coturn via the `verbose` setting:
+ * Enable more verbose logging, in `coturn` via the `verbose` setting:
```
verbose
```
+ or with `eturnal` with the shell command `eturnalctl loglevel debug` or in the configuration file (the service needs to [reload](https://eturnal.net/documentation/#Operation) for it to become effective):
+
+ ```yaml
+ ## Logging configuration:
+ log_level: debug
+ ```
+
... and then see if there are any clues in its logs.
* If you are using a browser-based client under Chrome, check
@@ -317,7 +130,7 @@ Here are a few things to try:
matrix client to your homeserver in your browser's network inspector. In
the response you should see `username` and `password`. Or:
- * Use the following shell commands:
+ * Use the following shell commands for `coturn`:
```sh
secret=staticAuthSecretHere
@@ -327,11 +140,16 @@ Here are a few things to try:
echo -e "username: $u\npassword: $p"
```
- Or:
+ or for `eturnal`
- * Temporarily configure coturn to accept a static username/password. To do
- this, comment out `use-auth-secret` and `static-auth-secret` and add the
- following:
+ ```sh
+ eturnalctl credentials
+ ```
+
+
+ * Or (**coturn only**): Temporarily configure `coturn` to accept a static
+ username/password. To do this, comment out `use-auth-secret` and
+ `static-auth-secret` and add the following:
```
lt-cred-mech
diff --git a/docs/upgrade.md b/docs/upgrade.md
index 7d4c2392e1..2aa353e496 100644
--- a/docs/upgrade.md
+++ b/docs/upgrade.md
@@ -15,9 +15,8 @@ this document.
The website also offers convenient
summaries.
-- If Synapse was installed using [prebuilt
- packages](setup/installation.md#prebuilt-packages), you will need to follow the
- normal process for upgrading those packages.
+- If Synapse was installed using [prebuilt packages](setup/installation.md#prebuilt-packages),
+ you will need to follow the normal process for upgrading those packages.
- If Synapse was installed using pip then upgrade to the latest
version by running:
@@ -89,12 +88,163 @@ process, for example:
dpkg -i matrix-synapse-py3_1.3.0+stretch1_amd64.deb
```
+# Upgrading to v1.72.0
+
+## Dropping support for PostgreSQL 10
+
+In line with our [deprecation policy](deprecation_policy.md), we've dropped
+support for PostgreSQL 10, as it is no longer supported upstream.
+
+This release of Synapse requires PostgreSQL 11+.
+
+
+# Upgrading to v1.71.0
+
+## Removal of the `generate_short_term_login_token` module API method
+
+As announced with the release of [Synapse 1.69.0](#deprecation-of-the-generate_short_term_login_token-module-api-method), the deprecated `generate_short_term_login_token` module method has been removed.
+
+Modules relying on it can instead use the `create_login_token` method.
+
+
+## Changes to the events received by application services (interest)
+
+To align with spec (changed in
+[MSC3905](https://github.com/matrix-org/matrix-spec-proposals/pull/3905)), Synapse now
+only considers local users to be interesting. In other words, the `users` namespace
+regex is only be applied against local users of the homeserver.
+
+Please note, this probably doesn't affect the expected behavior of your application
+service, since an interesting local user in a room still means all messages in the room
+(from local or remote users) will still be considered interesting. And matching a room
+with the `rooms` or `aliases` namespace regex will still consider all events sent in the
+room to be interesting to the application service.
+
+If one of your application service's `users` regex was intending to match a remote user,
+this will no longer match as you expect. The behavioral mismatch between matching all
+local users and some remote users is why the spec was changed/clarified and this
+caveat is no longer supported.
+
+
+## Legacy Prometheus metric names are now disabled by default
+
+Synapse v1.71.0 disables legacy Prometheus metric names by default.
+For administrators that still rely on them and have not yet had chance to update their
+uses of the metrics, it's still possible to specify `enable_legacy_metrics: true` in
+the configuration to re-enable them temporarily.
+
+Synapse v1.73.0 will **remove legacy metric names altogether** and at that point,
+it will no longer be possible to re-enable them.
+
+If you do not use metrics or you have already updated your Grafana dashboard(s),
+Prometheus console(s) and alerting rule(s), there is no action needed.
+
+See [v1.69.0: Deprecation of legacy Prometheus metric names](#deprecation-of-legacy-prometheus-metric-names).
+
+
+# Upgrading to v1.69.0
+
+## Changes to the receipts replication streams
+
+Synapse now includes information indicating if a receipt applies to a thread when
+replicating it to other workers. This is a forwards- and backwards-incompatible
+change: v1.68 and workers cannot process receipts replicated by v1.69 workers, and
+vice versa.
+
+Once all workers are upgraded to v1.69 (or downgraded to v1.68), receipts
+replication will resume as normal.
+
+
+## Deprecation of legacy Prometheus metric names
+
+In current versions of Synapse, some Prometheus metrics are emitted under two different names,
+with one of the names being older but non-compliant with OpenMetrics and Prometheus conventions
+and one of the names being newer but compliant.
+
+Synapse v1.71.0 will turn the old metric names off *by default*.
+For administrators that still rely on them and have not had chance to update their
+uses of the metrics, it's possible to specify `enable_legacy_metrics: true` in
+the configuration to re-enable them temporarily.
+
+Synapse v1.73.0 will **remove legacy metric names altogether** and it will no longer
+be possible to re-enable them.
+
+The Grafana dashboard, Prometheus recording rules and Prometheus Consoles included
+in the `contrib` directory in the Synapse repository have been updated to no longer
+rely on the legacy names. These can be used on a current version of Synapse
+because current versions of Synapse emit both old and new names.
+
+You may need to update your alerting rules or any other rules that depend on
+the names of Prometheus metrics.
+If you want to test your changes before legacy names are disabled by default,
+you may specify `enable_legacy_metrics: false` in your homeserver configuration.
+
+A list of affected metrics is available on the [Metrics How-to page](https://matrix-org.github.io/synapse/v1.69/metrics-howto.html?highlight=metrics%20deprecated#renaming-of-metrics--deprecation-of-old-names-in-12).
+
+
+## Deprecation of the `generate_short_term_login_token` module API method
+
+The following method of the module API has been deprecated, and is scheduled to
+be remove in v1.71.0:
+
+```python
+def generate_short_term_login_token(
+ self,
+ user_id: str,
+ duration_in_ms: int = (2 * 60 * 1000),
+ auth_provider_id: str = "",
+ auth_provider_session_id: Optional[str] = None,
+) -> str:
+ ...
+```
+
+It has been replaced by an asynchronous equivalent:
+
+```python
+async def create_login_token(
+ self,
+ user_id: str,
+ duration_in_ms: int = (2 * 60 * 1000),
+ auth_provider_id: Optional[str] = None,
+ auth_provider_session_id: Optional[str] = None,
+) -> str:
+ ...
+```
+
+Synapse will log a warning when a module uses the deprecated method, to help
+administrators find modules using it.
+
+
# Upgrading to v1.68.0
-As announced in the upgrade notes for v1.67.0, Synapse now requires a SQLite
-version of 3.27.0 or higher if SQLite is in use and source checkouts of Synapse
-now require a recent Rust compiler.
+Two changes announced in the upgrade notes for v1.67.0 have now landed in v1.68.0.
+## SQLite version requirement
+
+Synapse now requires a SQLite version of 3.27.0 or higher if SQLite is configured as
+Synapse's database.
+
+Installations using
+
+- Docker images [from `matrixdotorg`](https://hub.docker.com/r/matrixdotorg/synapse),
+- Debian packages [from Matrix.org](https://packages.matrix.org/), or
+- a PostgreSQL database
+
+are not affected.
+
+## Rust requirement when building from source.
+
+Building from a source checkout of Synapse now requires a recent Rust compiler
+(currently Rust 1.58.1, but see also the
+[Platform Dependency Policy](https://matrix-org.github.io/synapse/latest/deprecation_policy.html)).
+
+Installations using
+
+- Docker images [from `matrixdotorg`](https://hub.docker.com/r/matrixdotorg/synapse),
+- Debian packages [from Matrix.org](https://packages.matrix.org/), or
+- PyPI wheels via `pip install matrix-synapse` (on supported platforms and architectures)
+
+will not be affected.
# Upgrading to v1.67.0
@@ -128,12 +278,12 @@ The simplest way of installing Rust is via [rustup.rs](https://rustup.rs/)
## SQLite version requirement in the next release
-From the next major release (v1.68.0) Synapse will require SQLite 3.27.0 or
+From the next major release (v1.68.0) Synapse will require SQLite 3.27.0 or
higher. Synapse v1.67.0 will be the last major release supporting SQLite
versions 3.22 to 3.26.
Those using Docker images or Debian packages from Matrix.org will not be
-affected. If you have installed from source, you should check the version of
+affected. If you have installed from source, you should check the version of
SQLite used by Python with:
```shell
diff --git a/docs/usage/administration/monthly_active_users.md b/docs/usage/administration/monthly_active_users.md
index d4e9037284..b1da6f17c2 100644
--- a/docs/usage/administration/monthly_active_users.md
+++ b/docs/usage/administration/monthly_active_users.md
@@ -73,12 +73,12 @@ When a request is blocked, the response will have the `errcode` `M_RESOURCE_LIMI
Synapse records several different prometheus metrics for MAU.
-`synapse_admin_mau:current` records the current MAU figure for native (non-application-service) users.
+`synapse_admin_mau_current` records the current MAU figure for native (non-application-service) users.
-`synapse_admin_mau:max` records the maximum MAU as dictated by the `max_mau_value` config value.
+`synapse_admin_mau_max` records the maximum MAU as dictated by the `max_mau_value` config value.
`synapse_admin_mau_current_mau_by_service` records the current MAU including application service users. The label `app_service` can be used
to filter by a specific service ID. This *also* includes non-application-service users under `app_service=native` .
-`synapse_admin_mau:registered_reserved_users` records the number of users specified in `mau_limits_reserved_threepids` which have
+`synapse_admin_mau_registered_reserved_users` records the number of users specified in `mau_limits_reserved_threepids` which have
registered accounts on the homeserver.
diff --git a/docs/usage/configuration/config_documentation.md b/docs/usage/configuration/config_documentation.md
index 77069f28a7..ed07e2180a 100644
--- a/docs/usage/configuration/config_documentation.md
+++ b/docs/usage/configuration/config_documentation.md
@@ -100,7 +100,7 @@ modules:
config: {}
```
---
-## Server ##
+## Server
Define your homeserver name and other base options.
@@ -160,7 +160,7 @@ including _matrix/...). This is the same URL a user might enter into the
'Custom Homeserver URL' field on their client. If you use Synapse with a
reverse proxy, this should be the URL to reach Synapse via the proxy.
Otherwise, it should be the URL to reach Synapse's client HTTP listener (see
-'listeners' below).
+['listeners'](#listeners) below).
Defaults to `https:///`.
@@ -180,7 +180,7 @@ This will tell other servers to send traffic to port 443 instead.
This option currently defaults to false.
-See https://matrix-org.github.io/synapse/latest/delegate.html for more
+See [Delegation of incoming federation traffic](../../delegate.md) for more
information.
Example configuration:
@@ -571,7 +571,7 @@ Example configuration:
delete_stale_devices_after: 1y
```
-## Homeserver blocking ##
+## Homeserver blocking
Useful options for Synapse admins.
---
@@ -923,7 +923,7 @@ retention:
interval: 1d
```
---
-## TLS ##
+## TLS
Options related to TLS.
@@ -1013,7 +1013,7 @@ federation_custom_ca_list:
- myCA3.pem
```
---
-## Federation ##
+## Federation
Options related to federation.
@@ -1072,7 +1072,7 @@ Example configuration:
allow_device_name_lookup_over_federation: true
```
---
-## Caching ##
+## Caching
Options related to caching.
@@ -1140,7 +1140,7 @@ number of entries that can be stored.
* `cache_autotuning` and its sub-options `max_cache_memory_usage`, `target_cache_memory_usage`, and
`min_cache_ttl` work in conjunction with each other to maintain a balance between cache memory
- usage and cache entry availability. You must be using [jemalloc](https://github.com/matrix-org/synapse#help-synapse-is-slow-and-eats-all-my-ramcpu)
+ usage and cache entry availability. You must be using [jemalloc](../administration/admin_faq.md#help-synapse-is-slow-and-eats-all-my-ramcpu)
to utilize this option, and all three of the options must be specified for this feature to work. This option
defaults to off, enable it by providing values for the sub-options listed below. Please note that the feature will not work
and may cause unstable behavior (such as excessive emptying of caches or exceptions) if all of the values are not provided.
@@ -1186,7 +1186,7 @@ file in Synapse's `contrib` directory, you can send a `SIGHUP` signal by using
`systemctl reload matrix-synapse`.
---
-## Database ##
+## Database
Config options related to database settings.
---
@@ -1333,20 +1333,21 @@ databases:
cp_max: 10
```
---
-## Logging ##
+## Logging
Config options related to logging.
---
### `log_config`
-This option specifies a yaml python logging config file as described [here](https://docs.python.org/3.7/library/logging.config.html#configuration-dictionary-schema).
+This option specifies a yaml python logging config file as described
+[here](https://docs.python.org/3/library/logging.config.html#configuration-dictionary-schema).
Example configuration:
```yaml
log_config: "CONFDIR/SERVERNAME.log.config"
```
---
-## Ratelimiting ##
+## Ratelimiting
Options related to ratelimiting in Synapse.
Each ratelimiting configuration is made of two parameters:
@@ -1577,7 +1578,7 @@ Example configuration:
federation_rr_transactions_per_room_per_second: 40
```
---
-## Media Store ##
+## Media Store
Config options related to Synapse's media store.
---
@@ -1767,7 +1768,7 @@ url_preview_ip_range_blacklist:
- 'ff00::/8'
- 'fec0::/10'
```
-----
+---
### `url_preview_ip_range_whitelist`
This option sets a list of IP address CIDR ranges that the URL preview spider is allowed
@@ -1861,7 +1862,7 @@ Example configuration:
- 'fr;q=0.8'
- '*;q=0.7'
```
-----
+---
### `oembed`
oEmbed allows for easier embedding content from a website. It can be
@@ -1878,7 +1879,7 @@ oembed:
- oembed/my_providers.json
```
---
-## Captcha ##
+## Captcha
See [here](../../CAPTCHA_SETUP.md) for full details on setting up captcha.
@@ -1927,7 +1928,7 @@ Example configuration:
recaptcha_siteverify_api: "https://my.recaptcha.site"
```
---
-## TURN ##
+## TURN
Options related to adding a TURN server to Synapse.
---
@@ -1948,7 +1949,7 @@ Example configuration:
```yaml
turn_shared_secret: "YOUR_SHARED_SECRET"
```
-----
+---
### `turn_username` and `turn_password`
The Username and password if the TURN server needs them and does not use a token.
@@ -2089,7 +2090,7 @@ set.
This is primarily intended for use with the `register_new_matrix_user` script
(see [Registering a user](../../setup/installation.md#registering-a-user));
-however, the interface is [documented](../admin_api/register_api.html).
+however, the interface is [documented](../../admin_api/register_api.html).
See also [`registration_shared_secret_path`](#registration_shared_secret_path).
@@ -2230,6 +2231,9 @@ homeserver. If the room already exists, make certain it is a publicly joinable
room, i.e. the join rule of the room must be set to 'public'. You can find more options
relating to auto-joining rooms below.
+As Spaces are just rooms under the hood, Space aliases may also be
+used.
+
Example configuration:
```yaml
auto_join_rooms:
@@ -2241,7 +2245,7 @@ auto_join_rooms:
Where `auto_join_rooms` are specified, setting this flag ensures that
the rooms exist by creating them when the first user on the
-homeserver registers.
+homeserver registers. This option will not create Spaces.
By default the auto-created rooms are publicly joinable from any federated
server. Use the `autocreate_auto_join_rooms_federated` and
@@ -2259,7 +2263,7 @@ autocreate_auto_join_rooms: false
---
### `autocreate_auto_join_rooms_federated`
-Whether the rooms listen in `auto_join_rooms` that are auto-created are available
+Whether the rooms listed in `auto_join_rooms` that are auto-created are available
via federation. Only has an effect if `autocreate_auto_join_rooms` is true.
Note that whether a room is federated cannot be modified after
@@ -2364,7 +2368,7 @@ Example configuration:
```yaml
session_lifetime: 24h
```
-----
+---
### `refresh_access_token_lifetime`
Time that an access token remains valid for, if the session is using refresh tokens.
@@ -2420,7 +2424,7 @@ nonrefreshable_access_token_lifetime: 24h
```
---
-## Metrics ###
+## Metrics
Config options related to metrics.
---
@@ -2434,6 +2438,31 @@ Example configuration:
enable_metrics: true
```
---
+### `enable_legacy_metrics`
+
+Set to `true` to publish both legacy and non-legacy Prometheus metric names,
+or to `false` to only publish non-legacy Prometheus metric names.
+Defaults to `false`. Has no effect if `enable_metrics` is `false`.
+**In Synapse v1.67.0 up to and including Synapse v1.70.1, this defaulted to `true`.**
+
+Legacy metric names include:
+- metrics containing colons in the name, such as `synapse_util_caches_response_cache:hits`, because colons are supposed to be reserved for user-defined recording rules;
+- counters that don't end with the `_total` suffix, such as `synapse_federation_client_sent_edus`, therefore not adhering to the OpenMetrics standard.
+
+These legacy metric names are unconventional and not compliant with OpenMetrics standards.
+They are included for backwards compatibility.
+
+Example configuration:
+```yaml
+enable_legacy_metrics: false
+```
+
+See https://github.com/matrix-org/synapse/issues/11106 for context.
+
+*Since v1.67.0.*
+
+**Will be removed in v1.73.0.**
+---
### `sentry`
Use this option to enable sentry integration. Provide the DSN assigned to you by sentry
@@ -2492,7 +2521,7 @@ Example configuration:
report_stats_endpoint: https://example.com/report-usage-stats/push
```
---
-## API Configuration ##
+## API Configuration
Config settings related to the client/server API
---
@@ -2592,7 +2621,7 @@ Example configuration:
form_secret:
```
---
-## Signing Keys ##
+## Signing Keys
Config options relating to signing keys
---
@@ -2653,6 +2682,12 @@ is still supported for backwards-compatibility, but it is deprecated.
warning on start-up. To suppress this warning, set
`suppress_key_server_warning` to true.
+If the use of a trusted key server has to be deactivated, e.g. in a private
+federation or for privacy reasons, this can be realised by setting
+an empty array (`trusted_key_servers: []`). Then Synapse will request the keys
+directly from the server that owns the keys. If Synapse does not get keys directly
+from the server, the events of this server will be rejected.
+
Options for each entry in the list include:
* `server_name`: the name of the server. Required.
* `verify_keys`: an optional map from key id to base64-encoded public key.
@@ -2701,7 +2736,7 @@ Example configuration:
key_server_signing_keys_path: "key_server_signing_keys.key"
```
---
-## Single sign-on integration ##
+## Single sign-on integration
The following settings can be used to make Synapse use a single sign-on
provider for authentication, instead of its internal password database.
@@ -2950,7 +2985,7 @@ Options for each entry include:
* `module`: The class name of a custom mapping module. Default is
`synapse.handlers.oidc.JinjaOidcMappingProvider`.
- See https://matrix-org.github.io/synapse/latest/sso_mapping_providers.html#openid-mapping-providers
+ See [OpenID Mapping Providers](../../sso_mapping_providers.md#openid-mapping-providers)
for information on implementing a custom mapping provider.
* `config`: Configuration for the mapping provider module. This section will
@@ -2987,6 +3022,15 @@ Options for each entry include:
which is set to the claims returned by the UserInfo Endpoint and/or
in the ID Token.
+* `backchannel_logout_enabled`: set to `true` to process OIDC Back-Channel Logout notifications.
+ Those notifications are expected to be received on `/_synapse/client/oidc/backchannel_logout`.
+ Defaults to `false`.
+
+* `backchannel_logout_ignore_sub`: by default, the OIDC Back-Channel Logout feature checks that the
+ `sub` claim matches the subject claim received during login. This check can be disabled by setting
+ this to `true`. Defaults to `false`.
+
+ You might want to disable this if the `subject_claim` returned by the mapping provider is not `sub`.
It is possible to configure Synapse to only allow logins if certain attributes
match particular values in the OIDC userinfo. The requirements can be listed under
@@ -3321,7 +3365,7 @@ email:
email_validation: "[%(server_name)s] Validate your email"
```
---
-## Push ##
+## Push
Configuration settings related to push notifications
---
@@ -3354,11 +3398,11 @@ push:
group_unread_count_by_room: false
```
---
-## Rooms ##
+## Rooms
Config options relating to rooms.
---
-### `encryption_enabled_by_default`
+### `encryption_enabled_by_default_for_room_type`
Controls whether locally-created rooms should be end-to-end encrypted by
default.
@@ -3391,13 +3435,15 @@ This option has the following sub-options:
the user directory. If false, search results will only contain users
visible in public rooms and users sharing a room with the requester.
Defaults to false.
+
NB. If you set this to true, and the last time the user_directory search
indexes were (re)built was before Synapse 1.44, you'll have to
rebuild the indexes in order to search through all known users.
+
These indexes are built the first time Synapse starts; admins can
- manually trigger a rebuild via API following the instructions at
- https://matrix-org.github.io/synapse/latest/usage/administration/admin_api/background_updates.html#run
- Set to true to return search results containing all known users, even if that
+ manually trigger a rebuild via the API following the instructions
+ [for running background updates](../administration/admin_api/background_updates.md#run),
+ set to true to return search results containing all known users, even if that
user does not share a room with the requester.
* `prefer_local_users`: Defines whether to prefer local users in search query results.
If set to true, local users are more likely to appear above remote users when searching the
@@ -3512,9 +3558,9 @@ Example configuration:
enable_room_list_search: false
```
---
-### `alias_creation`
+### `alias_creation_rules`
-The `alias_creation` option controls who is allowed to create aliases
+The `alias_creation_rules` option controls who is allowed to create aliases
on this server.
The format of this option is a list of rules that contain globs that
@@ -3598,7 +3644,7 @@ default_power_level_content_override:
```
---
-## Tracing ##
+## Tracing
Configuration options related to tracing support.
---
@@ -3644,14 +3690,71 @@ tracing:
#collector_endpoint: "http://localhost:14268/api/traces?format=jaeger.thrift"
```
---
-## Workers ##
-Configuration options related to workers.
+## Coordinating workers
+Configuration options related to workers which belong in the main config file
+(usually called `homeserver.yaml`).
+A Synapse deployment can scale horizontally by running multiple Synapse processes
+called _workers_. Incoming requests are distributed between workers to handle higher
+loads. Some workers are privileged and can accept requests from other workers.
+As a result, the worker configuration is divided into two parts.
+
+1. The first part (in this section of the manual) defines which shardable tasks
+ are delegated to privileged workers. This allows unprivileged workers to make
+ request a privileged worker to act on their behalf.
+1. [The second part](#individual-worker-configuration)
+ controls the behaviour of individual workers in isolation.
+
+For guidance on setting up workers, see the [worker documentation](../../workers.md).
+
+---
+### `worker_replication_secret`
+
+A shared secret used by the replication APIs on the main process to authenticate
+HTTP requests from workers.
+
+The default, this value is omitted (equivalently `null`), which means that
+traffic between the workers and the main process is not authenticated.
+
+Example configuration:
+```yaml
+worker_replication_secret: "secret_secret"
+```
+---
+### `start_pushers`
+
+Controls sending of push notifications on the main process. Set to `false`
+if using a [pusher worker](../../workers.md#synapseapppusher). Defaults to `true`.
+
+Example configuration:
+```yaml
+start_pushers: false
+```
+---
+### `pusher_instances`
+
+It is possible to run multiple [pusher workers](../../workers.md#synapseapppusher),
+in which case the work is balanced across them. Use this setting to list the pushers by
+[`worker_name`](#worker_name). Ensure the main process and all pusher workers are
+restarted after changing this option.
+
+If no or only one pusher worker is configured, this setting is not necessary.
+The main process will send out push notifications by default if you do not disable
+it by setting [`start_pushers: false`](#start_pushers).
+
+Example configuration:
+```yaml
+start_pushers: false
+pusher_instances:
+ - pusher_worker1
+ - pusher_worker2
+```
---
### `send_federation`
Controls sending of outbound federation transactions on the main process.
-Set to false if using a federation sender worker. Defaults to true.
+Set to `false` if using a [federation sender worker](../../workers.md#synapseappfederation_sender).
+Defaults to `true`.
Example configuration:
```yaml
@@ -3660,8 +3763,9 @@ send_federation: false
---
### `federation_sender_instances`
-It is possible to run multiple federation sender workers, in which case the
-work is balanced across them. Use this setting to list the senders.
+It is possible to run multiple
+[federation sender worker](../../workers.md#synapseappfederation_sender), in which
+case the work is balanced across them. Use this setting to list the senders.
This configuration setting must be shared between all federation sender workers, and if
changed all federation sender workers must be stopped at the same time and then
@@ -3670,14 +3774,19 @@ events may be dropped).
Example configuration:
```yaml
+send_federation: false
federation_sender_instances:
- federation_sender1
```
---
### `instance_map`
-When using workers this should be a map from worker name to the
+When using workers this should be a map from [`worker_name`](#worker_name) to the
HTTP replication listener of the worker, if configured.
+Each worker declared under [`stream_writers`](../../workers.md#stream-writers) needs
+a HTTP replication listener, and that listener should be included in the `instance_map`.
+(The main process also needs an HTTP replication listener, but it should not be
+listed in the `instance_map`.)
Example configuration:
```yaml
@@ -3690,8 +3799,11 @@ instance_map:
### `stream_writers`
Experimental: When using workers you can define which workers should
-handle event persistence and typing notifications. Any worker
-specified here must also be in the `instance_map`.
+handle writing to streams such as event persistence and typing notifications.
+Any worker specified here must also be in the [`instance_map`](#instance_map).
+
+See the list of available streams in the
+[worker documentation](../../workers.md#stream-writers).
Example configuration:
```yaml
@@ -3702,29 +3814,18 @@ stream_writers:
---
### `run_background_tasks_on`
-The worker that is used to run background tasks (e.g. cleaning up expired
-data). If not provided this defaults to the main process.
+The [worker](../../workers.md#background-tasks) that is used to run
+background tasks (e.g. cleaning up expired data). If not provided this
+defaults to the main process.
Example configuration:
```yaml
run_background_tasks_on: worker1
```
---
-### `worker_replication_secret`
-
-A shared secret used by the replication APIs to authenticate HTTP requests
-from workers.
-
-By default this is unused and traffic is not authenticated.
-
-Example configuration:
-```yaml
-worker_replication_secret: "secret_secret"
-```
### `redis`
-Configuration for Redis when using workers. This *must* be enabled when
-using workers (unless using old style direct TCP configuration).
+Configuration for Redis when using workers. This *must* be enabled when using workers.
This setting has the following sub-options:
* `enabled`: whether to use Redis support. Defaults to false.
* `host` and `port`: Optional host and port to use to connect to redis. Defaults to
@@ -3739,7 +3840,143 @@ redis:
port: 6379
password:
```
-## Background Updates ##
+---
+## Individual worker configuration
+These options configure an individual worker, in its worker configuration file.
+They should be not be provided when configuring the main process.
+
+Note also the configuration above for
+[coordinating a cluster of workers](#coordinating-workers).
+
+For guidance on setting up workers, see the [worker documentation](../../workers.md).
+
+---
+### `worker_app`
+
+The type of worker. The currently available worker applications are listed
+in [worker documentation](../../workers.md#available-worker-applications).
+
+The most common worker is the
+[`synapse.app.generic_worker`](../../workers.md#synapseappgeneric_worker).
+
+Example configuration:
+```yaml
+worker_app: synapse.app.generic_worker
+```
+---
+### `worker_name`
+
+A unique name for the worker. The worker needs a name to be addressed in
+further parameters and identification in log files. We strongly recommend
+giving each worker a unique `worker_name`.
+
+Example configuration:
+```yaml
+worker_name: generic_worker1
+```
+---
+### `worker_replication_host`
+
+The HTTP replication endpoint that it should talk to on the main Synapse process.
+The main Synapse process defines this with a `replication` resource in
+[`listeners` option](#listeners).
+
+Example configuration:
+```yaml
+worker_replication_host: 127.0.0.1
+```
+---
+### `worker_replication_http_port`
+
+The HTTP replication port that it should talk to on the main Synapse process.
+The main Synapse process defines this with a `replication` resource in
+[`listeners` option](#listeners).
+
+Example configuration:
+```yaml
+worker_replication_http_port: 9093
+```
+---
+### `worker_replication_http_tls`
+
+Whether TLS should be used for talking to the HTTP replication port on the main
+Synapse process.
+The main Synapse process defines this with the `tls` option on its [listener](#listeners) that
+has the `replication` resource enabled.
+
+**Please note:** by default, it is not safe to expose replication ports to the
+public Internet, even with TLS enabled.
+See [`worker_replication_secret`](#worker_replication_secret).
+
+Defaults to `false`.
+
+*Added in Synapse 1.72.0.*
+
+Example configuration:
+```yaml
+worker_replication_http_tls: true
+```
+---
+### `worker_listeners`
+
+A worker can handle HTTP requests. To do so, a `worker_listeners` option
+must be declared, in the same way as the [`listeners` option](#listeners)
+in the shared config.
+
+Workers declared in [`stream_writers`](#stream_writers) will need to include a
+`replication` listener here, in order to accept internal HTTP requests from
+other workers.
+
+Example configuration:
+```yaml
+worker_listeners:
+ - type: http
+ port: 8083
+ resources:
+ - names: [client, federation]
+```
+---
+### `worker_daemonize`
+
+Specifies whether the worker should be started as a daemon process.
+If Synapse is being managed by [systemd](../../systemd-with-workers/README.md), this option
+must be omitted or set to `false`.
+
+Defaults to `false`.
+
+Example configuration:
+```yaml
+worker_daemonize: true
+```
+---
+### `worker_pid_file`
+
+When running a worker as a daemon, we need a place to store the
+[PID](https://en.wikipedia.org/wiki/Process_identifier) of the worker.
+This option defines the location of that "pid file".
+
+This option is required if `worker_daemonize` is `true` and ignored
+otherwise. It has no default.
+
+See also the [`pid_file` option](#pid_file) option for the main Synapse process.
+
+Example configuration:
+```yaml
+worker_pid_file: DATADIR/generic_worker1.pid
+```
+---
+### `worker_log_config`
+
+This option specifies a yaml python logging config file as described
+[here](https://docs.python.org/3/library/logging.config.html#configuration-dictionary-schema).
+See also the [`log_config` option](#log_config) option for the main Synapse process.
+
+Example configuration:
+```yaml
+worker_log_config: /etc/matrix-synapse/generic-worker-log.yaml
+```
+---
+## Background Updates
Configuration settings related to background updates.
---
diff --git a/docs/workers.md b/docs/workers.md
index 40b1852313..27e54c5846 100644
--- a/docs/workers.md
+++ b/docs/workers.md
@@ -88,11 +88,12 @@ shared configuration file.
### Shared configuration
Normally, only a couple of changes are needed to make an existing configuration
-file suitable for use with workers. First, you need to enable an "HTTP replication
-listener" for the main process; and secondly, you need to enable redis-based
-replication. Optionally, a shared secret can be used to authenticate HTTP
-traffic between workers. For example:
-
+file suitable for use with workers. First, you need to enable an
+["HTTP replication listener"](usage/configuration/config_documentation.md#listeners)
+for the main process; and secondly, you need to enable
+[redis-based replication](usage/configuration/config_documentation.md#redis).
+Optionally, a [shared secret](usage/configuration/config_documentation.md#worker_replication_secret)
+can be used to authenticate HTTP traffic between workers. For example:
```yaml
# extend the existing `listeners` section. This defines the ports that the
@@ -112,26 +113,30 @@ redis:
enabled: true
```
-See the [configuration manual](usage/configuration/config_documentation.html) for the full documentation of each option.
+See the [configuration manual](usage/configuration/config_documentation.md)
+for the full documentation of each option.
Under **no circumstances** should the replication listener be exposed to the
public internet; replication traffic is:
* always unencrypted
-* unauthenticated, unless `worker_replication_secret` is configured
+* unauthenticated, unless [`worker_replication_secret`](usage/configuration/config_documentation.md#worker_replication_secret)
+ is configured
### Worker configuration
In the config file for each worker, you must specify:
- * The type of worker (`worker_app`). The currently available worker applications are listed below.
- * A unique name for the worker (`worker_name`).
+ * The type of worker ([`worker_app`](usage/configuration/config_documentation.md#worker_app)).
+ The currently available worker applications are listed [below](#available-worker-applications).
+ * A unique name for the worker ([`worker_name`](usage/configuration/config_documentation.md#worker_name)).
* The HTTP replication endpoint that it should talk to on the main synapse process
- (`worker_replication_host` and `worker_replication_http_port`)
- * If handling HTTP requests, a `worker_listeners` option with an `http`
- listener, in the same way as the `listeners` option in the shared config.
- * If handling the `^/_matrix/client/v3/keys/upload` endpoint, the HTTP URI for
- the main process (`worker_main_http_uri`).
+ ([`worker_replication_host`](usage/configuration/config_documentation.md#worker_replication_host) and
+ [`worker_replication_http_port`](usage/configuration/config_documentation.md#worker_replication_http_port)).
+ * If handling HTTP requests, a [`worker_listeners`](usage/configuration/config_documentation.md#worker_listeners) option
+ with an `http` listener.
+ * **Synapse 1.72 and older:** if handling the `^/_matrix/client/v3/keys/upload` endpoint, the HTTP URI for
+ the main process (`worker_main_http_uri`). This config option is no longer required and is ignored when running Synapse 1.73 and newer.
For example:
@@ -146,7 +151,6 @@ plain HTTP endpoint on port 8083 separately serving various endpoints, e.g.
Obviously you should configure your reverse-proxy to route the relevant
endpoints to the worker (`localhost:8083` in the above example).
-
### Running Synapse with workers
Finally, you need to start your worker processes. This can be done with either
@@ -203,6 +207,8 @@ information.
^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/members$
^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/state$
^/_matrix/client/v1/rooms/.*/hierarchy$
+ ^/_matrix/client/(v1|unstable)/rooms/.*/relations/
+ ^/_matrix/client/v1/rooms/.*/threads$
^/_matrix/client/unstable/org.matrix.msc2716/rooms/.*/batch_send$
^/_matrix/client/unstable/im.nheko.summary/rooms/.*/summary$
^/_matrix/client/(r0|v3|unstable)/account/3pid$
@@ -215,7 +221,6 @@ information.
^/_matrix/client/(api/v1|r0|v3|unstable)/search$
# Encryption requests
- # Note that ^/_matrix/client/(r0|v3|unstable)/keys/upload/ requires `worker_main_http_uri`
^/_matrix/client/(r0|v3|unstable)/keys/query$
^/_matrix/client/(r0|v3|unstable)/keys/changes$
^/_matrix/client/(r0|v3|unstable)/keys/claim$
@@ -285,8 +290,10 @@ For multiple workers not handling the SSO endpoints properly, see
[#7530](https://github.com/matrix-org/synapse/issues/7530) and
[#9427](https://github.com/matrix-org/synapse/issues/9427).
-Note that a HTTP listener with `client` and `federation` resources must be
-configured in the `worker_listeners` option in the worker config.
+Note that a [HTTP listener](usage/configuration/config_documentation.md#listeners)
+with `client` and `federation` `resources` must be configured in the
+[`worker_listeners`](usage/configuration/config_documentation.md#worker_listeners)
+option in the worker config.
#### Load balancing
@@ -297,9 +304,11 @@ may wish to run multiple groups of workers handling different endpoints so that
load balancing can be done in different ways.
For `/sync` and `/initialSync` requests it will be more efficient if all
-requests from a particular user are routed to a single instance. Extracting a
-user ID from the access token or `Authorization` header is currently left as an
-exercise for the reader. Admins may additionally wish to separate out `/sync`
+requests from a particular user are routed to a single instance. This can
+be done e.g. in nginx via IP `hash $http_x_forwarded_for;` or via
+`hash $http_authorization consistent;` which contains the users access token.
+
+Admins may additionally wish to separate out `/sync`
requests that have a `since` query parameter from those that don't (and
`/initialSync`), as requests that don't are known as "initial sync" that happens
when a user logs in on a new device and can be *very* resource intensive, so
@@ -326,10 +335,12 @@ effects of bursts of events from that bridge on events sent by normal users.
Additionally, the writing of specific streams (such as events) can be moved off
of the main process to a particular worker.
-To enable this, the worker must have a HTTP replication listener configured,
-have a `worker_name` and be listed in the `instance_map` config. The same worker
-can handle multiple streams, but unless otherwise documented, each stream can only
-have a single writer.
+To enable this, the worker must have a
+[HTTP `replication` listener](usage/configuration/config_documentation.md#listeners) configured,
+have a [`worker_name`](usage/configuration/config_documentation.md#worker_name)
+and be listed in the [`instance_map`](usage/configuration/config_documentation.md#instance_map)
+config. The same worker can handle multiple streams, but unless otherwise documented,
+each stream can only have a single writer.
For example, to move event persistence off to a dedicated worker, the shared
configuration would include:
@@ -356,9 +367,26 @@ streams and the endpoints associated with them:
##### The `events` stream
-The `events` stream experimentally supports having multiple writers, where work
-is sharded between them by room ID. Note that you *must* restart all worker
-instances when adding or removing event persisters. An example `stream_writers`
+The `events` stream experimentally supports having multiple writer workers, where load
+is sharded between them by room ID. Each writer is called an _event persister_. They are
+responsible for
+- receiving new events,
+- linking them to those already in the room [DAG](development/room-dag-concepts.md),
+- persisting them to the DB, and finally
+- updating the events stream.
+
+Because load is sharded in this way, you *must* restart all worker instances when
+adding or removing event persisters.
+
+An `event_persister` should not be mistaken for an `event_creator`.
+An `event_creator` listens for requests from clients to create new events and does
+so. It will then pass those events over HTTP replication to any configured event
+persisters (or the main process if none are configured).
+
+Note that `event_creator`s and `event_persister`s are implemented using the same
+[`synapse.app.generic_worker`](#synapse.app.generic_worker).
+
+An example [`stream_writers`](usage/configuration/config_documentation.md#stream_writers)
configuration with multiple writers:
```yaml
@@ -410,18 +438,20 @@ the stream writer for the `presence` stream:
There is also support for moving background tasks to a separate
worker. Background tasks are run periodically or started via replication. Exactly
which tasks are configured to run depends on your Synapse configuration (e.g. if
-stats is enabled).
+stats is enabled). This worker doesn't handle any REST endpoints itself.
-To enable this, the worker must have a `worker_name` and can be configured to run
-background tasks. For example, to move background tasks to a dedicated worker,
-the shared configuration would include:
+To enable this, the worker must have a unique
+[`worker_name`](usage/configuration/config_documentation.md#worker_name)
+and can be configured to run background tasks. For example, to move background tasks
+to a dedicated worker, the shared configuration would include:
```yaml
run_background_tasks_on: background_worker
```
-You might also wish to investigate the `update_user_directory_from_worker` and
-`media_instance_running_background_jobs` settings.
+You might also wish to investigate the
+[`update_user_directory_from_worker`](#updating-the-user-directory) and
+[`media_instance_running_background_jobs`](#synapseappmedia_repository) settings.
An example for a dedicated background worker instance:
@@ -457,8 +487,8 @@ worker application type.
#### Notifying Application Services
You can designate one generic worker to send output traffic to Application Services.
-
-Specify its name in the shared configuration as follows:
+Doesn't handle any REST endpoints itself, but you should specify its name in the
+shared configuration as follows:
```yaml
notify_appservices_from_worker: worker_name
@@ -474,18 +504,28 @@ worker application type.
### `synapse.app.pusher`
Handles sending push notifications to sygnal and email. Doesn't handle any
-REST endpoints itself, but you should set `start_pushers: False` in the
+REST endpoints itself, but you should set
+[`start_pushers: false`](usage/configuration/config_documentation.md#start_pushers) in the
shared configuration file to stop the main synapse sending push notifications.
-To run multiple instances at once the `pusher_instances` option should list all
-pusher instances by their worker name, e.g.:
+To run multiple instances at once the
+[`pusher_instances`](usage/configuration/config_documentation.md#pusher_instances)
+option should list all pusher instances by their
+[`worker_name`](usage/configuration/config_documentation.md#worker_name), e.g.:
```yaml
+start_pushers: false
pusher_instances:
- pusher_worker1
- pusher_worker2
```
+An example for a pusher instance:
+
+```yaml
+{{#include systemd-with-workers/workers/pusher_worker.yaml}}
+```
+
### `synapse.app.appservice`
@@ -502,20 +542,31 @@ Note this worker cannot be load-balanced: only one instance should be active.
### `synapse.app.federation_sender`
Handles sending federation traffic to other servers. Doesn't handle any
-REST endpoints itself, but you should set `send_federation: False` in the
-shared configuration file to stop the main synapse sending this traffic.
+REST endpoints itself, but you should set
+[`send_federation: false`](usage/configuration/config_documentation.md#send_federation)
+in the shared configuration file to stop the main synapse sending this traffic.
If running multiple federation senders then you must list each
-instance in the `federation_sender_instances` option by their `worker_name`.
+instance in the
+[`federation_sender_instances`](usage/configuration/config_documentation.md#federation_sender_instances)
+option by their
+[`worker_name`](usage/configuration/config_documentation.md#worker_name).
All instances must be stopped and started when adding or removing instances.
For example:
```yaml
+send_federation: false
federation_sender_instances:
- federation_sender1
- federation_sender2
```
+An example for a federation sender instance:
+
+```yaml
+{{#include systemd-with-workers/workers/federation_sender.yaml}}
+```
+
### `synapse.app.media_repository`
Handles the media repository. It can handle all endpoints starting with:
@@ -531,21 +582,19 @@ Handles the media repository. It can handle all endpoints starting with:
^/_synapse/admin/v1/quarantine_media/.*$
^/_synapse/admin/v1/users/.*/media$
-You should also set `enable_media_repo: False` in the shared configuration
+You should also set
+[`enable_media_repo: False`](usage/configuration/config_documentation.md#enable_media_repo)
+in the shared configuration
file to stop the main synapse running background jobs related to managing the
media repository. Note that doing so will prevent the main process from being
able to handle the above endpoints.
-In the `media_repository` worker configuration file, configure the http listener to
+In the `media_repository` worker configuration file, configure the
+[HTTP listener](usage/configuration/config_documentation.md#listeners) to
expose the `media` resource. For example:
```yaml
-worker_listeners:
- - type: http
- port: 8085
- resources:
- - names:
- - media
+{{#include systemd-with-workers/workers/media_worker.yaml}}
```
Note that if running multiple media repositories they must be on the same server
diff --git a/mypy.ini b/mypy.ini
index 533ebd4997..72051fc5a0 100644
--- a/mypy.ini
+++ b/mypy.ini
@@ -11,6 +11,7 @@ warn_unused_ignores = True
local_partial_types = True
no_implicit_optional = True
disallow_untyped_defs = True
+strict_equality = True
files =
docker/,
@@ -56,7 +57,6 @@ exclude = (?x)
|tests/rest/media/v1/test_media_storage.py
|tests/server.py
|tests/server_notices/test_resource_limits_server_notices.py
- |tests/test_metrics.py
|tests/test_state.py
|tests/test_terms_auth.py
|tests/util/caches/test_cached_call.py
@@ -106,12 +106,21 @@ disallow_untyped_defs = False
[mypy-tests.handlers.test_user_directory]
disallow_untyped_defs = True
+[mypy-tests.metrics.test_background_process_metrics]
+disallow_untyped_defs = True
+
+[mypy-tests.push.test_bulk_push_rule_evaluator]
+disallow_untyped_defs = True
+
[mypy-tests.test_server]
disallow_untyped_defs = True
[mypy-tests.state.test_profile]
disallow_untyped_defs = True
+[mypy-tests.storage.test_id_generators]
+disallow_untyped_defs = True
+
[mypy-tests.storage.test_profile]
disallow_untyped_defs = True
diff --git a/poetry.lock b/poetry.lock
index a106a972f1..5267ba1b36 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -1,34 +1,31 @@
[[package]]
name = "attrs"
-version = "21.4.0"
+version = "22.1.0"
description = "Classes Without Boilerplate"
category = "main"
optional = false
-python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*"
+python-versions = ">=3.5"
[package.extras]
-dev = ["cloudpickle", "coverage[toml] (>=5.0.2)", "furo", "hypothesis", "mypy", "pre-commit", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "six", "sphinx", "sphinx-notfound-page", "zope.interface"]
+dev = ["cloudpickle", "coverage[toml] (>=5.0.2)", "furo", "hypothesis", "mypy (>=0.900,!=0.940)", "pre-commit", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "sphinx", "sphinx-notfound-page", "zope.interface"]
docs = ["furo", "sphinx", "sphinx-notfound-page", "zope.interface"]
-tests = ["cloudpickle", "coverage[toml] (>=5.0.2)", "hypothesis", "mypy", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "six", "zope.interface"]
-tests_no_zope = ["cloudpickle", "coverage[toml] (>=5.0.2)", "hypothesis", "mypy", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "six"]
+tests = ["cloudpickle", "coverage[toml] (>=5.0.2)", "hypothesis", "mypy (>=0.900,!=0.940)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "zope.interface"]
+tests-no-zope = ["cloudpickle", "coverage[toml] (>=5.0.2)", "hypothesis", "mypy (>=0.900,!=0.940)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins"]
[[package]]
-name = "authlib"
-version = "0.15.5"
-description = "The ultimate Python library in building OAuth and OpenID Connect servers."
+name = "Authlib"
+version = "1.1.0"
+description = "The ultimate Python library in building OAuth and OpenID Connect servers and clients."
category = "main"
optional = true
python-versions = "*"
[package.dependencies]
-cryptography = "*"
-
-[package.extras]
-client = ["requests"]
+cryptography = ">=3.2"
[[package]]
name = "automat"
-version = "20.2.0"
+version = "22.10.0"
description = "Self-service finite-state machines for the programmer on the go."
category = "main"
optional = false
@@ -43,34 +40,30 @@ visualize = ["Twisted (>=16.1.1)", "graphviz (>0.5.1)"]
[[package]]
name = "bcrypt"
-version = "3.2.0"
+version = "4.0.1"
description = "Modern password hashing for your software and your servers"
category = "main"
optional = false
python-versions = ">=3.6"
-[package.dependencies]
-cffi = ">=1.1"
-six = ">=1.4.1"
-
[package.extras]
tests = ["pytest (>=3.2.1,!=3.3.0)"]
typecheck = ["mypy"]
[[package]]
name = "black"
-version = "22.3.0"
+version = "22.10.0"
description = "The uncompromising code formatter."
category = "dev"
optional = false
-python-versions = ">=3.6.2"
+python-versions = ">=3.7"
[package.dependencies]
click = ">=8.0.0"
mypy-extensions = ">=0.4.3"
pathspec = ">=0.9.0"
platformdirs = ">=2"
-tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""}
+tomli = {version = ">=1.1.0", markers = "python_full_version < \"3.11.0a7\""}
typed-ast = {version = ">=1.4.2", markers = "python_version < \"3.8\" and implementation_name == \"cpython\""}
typing-extensions = {version = ">=3.10.0.0", markers = "python_version < \"3.10\""}
@@ -82,20 +75,23 @@ uvloop = ["uvloop (>=0.15.2)"]
[[package]]
name = "bleach"
-version = "4.1.0"
+version = "5.0.1"
description = "An easy safelist-based HTML-sanitizing tool."
category = "main"
optional = false
-python-versions = ">=3.6"
+python-versions = ">=3.7"
[package.dependencies]
-packaging = "*"
six = ">=1.9.0"
webencodings = "*"
+[package.extras]
+css = ["tinycss2 (>=1.1.0,<1.2)"]
+dev = ["Sphinx (==4.3.2)", "black (==22.3.0)", "build (==0.8.0)", "flake8 (==4.0.1)", "hashin (==0.17.0)", "mypy (==0.961)", "pip-tools (==6.6.2)", "pytest (==7.1.2)", "tox (==3.25.0)", "twine (==4.0.1)", "wheel (==0.37.1)"]
+
[[package]]
name = "canonicaljson"
-version = "1.6.3"
+version = "1.6.4"
description = "Canonical JSON"
category = "main"
optional = false
@@ -140,7 +136,7 @@ unicode_backport = ["unicodedata2"]
[[package]]
name = "click"
-version = "8.1.1"
+version = "8.1.3"
description = "Composable command line interface toolkit"
category = "dev"
optional = false
@@ -190,7 +186,7 @@ python-versions = "*"
[[package]]
name = "cryptography"
-version = "36.0.1"
+version = "38.0.3"
description = "cryptography is a package which provides cryptographic recipes and primitives to Python developers."
category = "main"
optional = false
@@ -200,12 +196,12 @@ python-versions = ">=3.6"
cffi = ">=1.12"
[package.extras]
-docs = ["sphinx (>=1.6.5,!=1.8.0,!=3.1.0,!=3.1.1)", "sphinx_rtd_theme"]
+docs = ["sphinx (>=1.6.5,!=1.8.0,!=3.1.0,!=3.1.1)", "sphinx-rtd-theme"]
docstest = ["pyenchant (>=1.6.11)", "sphinxcontrib-spelling (>=4.0.1)", "twine (>=1.12.0)"]
pep8test = ["black", "flake8", "flake8-import-order", "pep8-naming"]
-sdist = ["setuptools_rust (>=0.11.4)"]
+sdist = ["setuptools-rust (>=0.11.4)"]
ssh = ["bcrypt (>=3.1.5)"]
-test = ["hypothesis (>=1.11.4,!=3.79.2)", "iso8601", "pretend", "pytest (>=6.2.0)", "pytest-cov", "pytest-subtests", "pytest-xdist", "pytz"]
+test = ["hypothesis (>=1.11.4,!=3.79.2)", "iso8601", "pretend", "pytest (>=6.2.0)", "pytest-benchmark", "pytest-cov", "pytest-subtests", "pytest-xdist", "pytz"]
[[package]]
name = "defusedxml"
@@ -250,36 +246,36 @@ dev = ["Sphinx", "coverage", "flake8", "lxml", "memory-profiler", "mypy (==0.910
[[package]]
name = "flake8"
-version = "4.0.1"
+version = "5.0.4"
description = "the modular source code checker: pep8 pyflakes and co"
category = "dev"
optional = false
-python-versions = ">=3.6"
+python-versions = ">=3.6.1"
[package.dependencies]
-importlib-metadata = {version = "<4.3", markers = "python_version < \"3.8\""}
-mccabe = ">=0.6.0,<0.7.0"
-pycodestyle = ">=2.8.0,<2.9.0"
-pyflakes = ">=2.4.0,<2.5.0"
+importlib-metadata = {version = ">=1.1.0,<4.3", markers = "python_version < \"3.8\""}
+mccabe = ">=0.7.0,<0.8.0"
+pycodestyle = ">=2.9.0,<2.10.0"
+pyflakes = ">=2.5.0,<2.6.0"
[[package]]
name = "flake8-bugbear"
-version = "21.3.2"
+version = "22.10.27"
description = "A plugin for flake8 finding likely bugs and design problems in your program. Contains warnings that don't belong in pyflakes and pycodestyle."
category = "dev"
optional = false
-python-versions = ">=3.6"
+python-versions = ">=3.7"
[package.dependencies]
attrs = ">=19.2.0"
flake8 = ">=3.0.0"
[package.extras]
-dev = ["black", "coverage", "hypothesis", "hypothesmith"]
+dev = ["coverage", "hypothesis", "hypothesmith (>=0.2)", "pre-commit", "tox"]
[[package]]
name = "flake8-comprehensions"
-version = "3.8.0"
+version = "3.10.1"
description = "A flake8 plugin to help you write better list/set/dict comprehensions."
category = "dev"
optional = false
@@ -291,7 +287,7 @@ importlib-metadata = {version = "*", markers = "python_version < \"3.8\""}
[[package]]
name = "frozendict"
-version = "2.3.3"
+version = "2.3.4"
description = "A simple immutable dictionary"
category = "main"
optional = false
@@ -310,7 +306,7 @@ smmap = ">=3.0.1,<6"
[[package]]
name = "gitpython"
-version = "3.1.27"
+version = "3.1.29"
description = "GitPython is a python library used to interact with Git repositories"
category = "dev"
optional = false
@@ -336,17 +332,17 @@ grpc = ["grpcio (>=1.0.0,<2.0.0dev)"]
[[package]]
name = "grpcio"
-version = "1.48.1"
+version = "1.50.0"
description = "HTTP/2-based RPC framework"
category = "main"
optional = true
-python-versions = ">=3.6"
+python-versions = ">=3.7"
[package.dependencies]
six = ">=1.5.2"
[package.extras]
-protobuf = ["grpcio-tools (>=1.48.1)"]
+protobuf = ["grpcio-tools (>=1.50.0)"]
[[package]]
name = "hiredis"
@@ -369,7 +365,7 @@ idna = ">=2.5"
[[package]]
name = "idna"
-version = "3.3"
+version = "3.4"
description = "Internationalized Domain Names in Applications (IDNA)"
category = "main"
optional = false
@@ -427,15 +423,16 @@ scripts = ["click (>=6.0)", "twisted (>=16.4.0)"]
[[package]]
name = "isort"
-version = "5.7.0"
+version = "5.10.1"
description = "A Python utility / library to sort Python imports."
category = "dev"
optional = false
-python-versions = ">=3.6,<4.0"
+python-versions = ">=3.6.1,<4.0"
[package.extras]
colors = ["colorama (>=0.4.3,<0.5.0)"]
pipfile_deprecated_finder = ["pipreqs", "requirementslib"]
+plugins = ["setuptools"]
requirements_deprecated_finder = ["pip-api", "pipreqs"]
[[package]]
@@ -452,11 +449,11 @@ trio = ["async_generator", "trio"]
[[package]]
name = "jinja2"
-version = "3.0.3"
+version = "3.1.2"
description = "A very fast and expressive template engine."
category = "main"
optional = false
-python-versions = ">=3.6"
+python-versions = ">=3.7"
[package.dependencies]
MarkupSafe = ">=2.0"
@@ -466,7 +463,7 @@ i18n = ["Babel (>=2.7)"]
[[package]]
name = "jsonschema"
-version = "4.4.0"
+version = "4.17.0"
description = "An implementation of JSON Schema validation for Python"
category = "main"
optional = false
@@ -476,12 +473,13 @@ python-versions = ">=3.7"
attrs = ">=17.4.0"
importlib-metadata = {version = "*", markers = "python_version < \"3.8\""}
importlib-resources = {version = ">=1.4.0", markers = "python_version < \"3.9\""}
+pkgutil-resolve-name = {version = ">=1.3.10", markers = "python_version < \"3.9\""}
pyrsistent = ">=0.14.0,<0.17.0 || >0.17.0,<0.17.1 || >0.17.1,<0.17.2 || >0.17.2"
typing-extensions = {version = "*", markers = "python_version < \"3.8\""}
[package.extras]
format = ["fqdn", "idna", "isoduration", "jsonpointer (>1.13)", "rfc3339-validator", "rfc3987", "uri-template", "webcolors (>=1.11)"]
-format_nongpl = ["fqdn", "idna", "isoduration", "jsonpointer (>1.13)", "rfc3339-validator", "rfc3986-validator (>0.1.0)", "uri-template", "webcolors (>=1.11)"]
+format-nongpl = ["fqdn", "idna", "isoduration", "jsonpointer (>1.13)", "rfc3339-validator", "rfc3986-validator (>0.1.0)", "uri-template", "webcolors (>=1.11)"]
[[package]]
name = "keyring"
@@ -568,27 +566,27 @@ dev = ["black (==22.3.0)", "flake8 (==4.0.1)", "isort (==5.9.3)", "ldaptor", "ma
[[package]]
name = "mccabe"
-version = "0.6.1"
+version = "0.7.0"
description = "McCabe checker, plugin for flake8"
category = "dev"
optional = false
-python-versions = "*"
+python-versions = ">=3.6"
[[package]]
name = "msgpack"
-version = "1.0.3"
-description = "MessagePack (de)serializer."
+version = "1.0.4"
+description = "MessagePack serializer"
category = "main"
optional = false
python-versions = "*"
[[package]]
name = "mypy"
-version = "0.950"
+version = "0.981"
description = "Optional static typing for Python"
category = "dev"
optional = false
-python-versions = ">=3.6"
+python-versions = ">=3.7"
[package.dependencies]
mypy-extensions = ">=0.4.3"
@@ -611,14 +609,14 @@ python-versions = "*"
[[package]]
name = "mypy-zope"
-version = "0.3.7"
+version = "0.3.11"
description = "Plugin for mypy to support zope interfaces"
category = "dev"
optional = false
python-versions = "*"
[package.dependencies]
-mypy = "0.950"
+mypy = "0.981"
"zope.interface" = "*"
"zope.schema" = "*"
@@ -635,35 +633,35 @@ python-versions = "*"
[[package]]
name = "opentelemetry-api"
-version = "1.12.0"
+version = "1.13.0"
description = "OpenTelemetry Python API"
category = "main"
optional = true
-python-versions = ">=3.6"
+python-versions = ">=3.7"
[package.dependencies]
-Deprecated = ">=1.2.6"
+deprecated = ">=1.2.6"
setuptools = ">=16.0"
[[package]]
name = "opentelemetry-exporter-jaeger"
-version = "1.12.0"
+version = "1.13.0"
description = "Jaeger Exporters for OpenTelemetry"
category = "main"
optional = true
-python-versions = ">=3.6"
+python-versions = ">=3.7"
[package.dependencies]
-opentelemetry-exporter-jaeger-proto-grpc = "1.12.0"
-opentelemetry-exporter-jaeger-thrift = "1.12.0"
+opentelemetry-exporter-jaeger-proto-grpc = "1.13.0"
+opentelemetry-exporter-jaeger-thrift = "1.13.0"
[[package]]
name = "opentelemetry-exporter-jaeger-proto-grpc"
-version = "1.12.0"
+version = "1.13.0"
description = "Jaeger Protobuf Exporter for OpenTelemetry"
category = "main"
optional = true
-python-versions = ">=3.6"
+python-versions = ">=3.7"
[package.dependencies]
googleapis-common-protos = ">=1.52,<1.56.3"
@@ -673,11 +671,11 @@ opentelemetry-sdk = ">=1.11,<2.0"
[[package]]
name = "opentelemetry-exporter-jaeger-thrift"
-version = "1.12.0"
+version = "1.13.0"
description = "Jaeger Thrift Exporter for OpenTelemetry"
category = "main"
optional = true
-python-versions = ">=3.6"
+python-versions = ">=3.7"
[package.dependencies]
opentelemetry-api = ">=1.3,<2.0"
@@ -686,25 +684,25 @@ thrift = ">=0.10.0"
[[package]]
name = "opentelemetry-sdk"
-version = "1.12.0"
+version = "1.13.0"
description = "OpenTelemetry Python SDK"
category = "main"
optional = true
-python-versions = ">=3.6"
+python-versions = ">=3.7"
[package.dependencies]
-opentelemetry-api = "1.12.0"
-opentelemetry-semantic-conventions = "0.33b0"
+opentelemetry-api = "1.13.0"
+opentelemetry-semantic-conventions = "0.34b0"
setuptools = ">=16.0"
typing-extensions = ">=3.7.4"
[[package]]
name = "opentelemetry-semantic-conventions"
-version = "0.33b0"
+version = "0.34b0"
description = "OpenTelemetry Semantic Conventions"
category = "main"
optional = true
-python-versions = ">=3.6"
+python-versions = ">=3.7"
[[package]]
name = "packaging"
@@ -738,7 +736,7 @@ python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,>=2.7"
[[package]]
name = "phonenumbers"
-version = "8.12.44"
+version = "8.12.56"
description = "Python version of Google's common library for parsing, formatting, storing and validating international phone numbers."
category = "main"
optional = false
@@ -746,12 +744,16 @@ python-versions = "*"
[[package]]
name = "pillow"
-version = "9.0.1"
+version = "9.3.0"
description = "Python Imaging Library (Fork)"
category = "main"
optional = false
python-versions = ">=3.7"
+[package.extras]
+docs = ["furo", "olefile", "sphinx (>=2.4)", "sphinx-copybutton", "sphinx-issues (>=3.0.1)", "sphinx-removed-in", "sphinxext-opengraph"]
+tests = ["check-manifest", "coverage", "defusedxml", "markdown2", "olefile", "packaging", "pyroma", "pytest", "pytest-cov", "pytest-timeout"]
+
[[package]]
name = "pkginfo"
version = "1.8.2"
@@ -763,6 +765,14 @@ python-versions = "*"
[package.extras]
testing = ["coverage", "nose"]
+[[package]]
+name = "pkgutil_resolve_name"
+version = "1.3.10"
+description = "Resolve a name to an object."
+category = "main"
+optional = false
+python-versions = ">=3.6"
+
[[package]]
name = "platformdirs"
version = "2.5.1"
@@ -777,7 +787,7 @@ test = ["appdirs (==1.4.4)", "pytest (>=6)", "pytest-cov (>=2.7)", "pytest-mock
[[package]]
name = "prometheus-client"
-version = "0.14.0"
+version = "0.15.0"
description = "Python client for the Prometheus monitoring system."
category = "main"
optional = false
@@ -788,7 +798,7 @@ twisted = ["twisted"]
[[package]]
name = "protobuf"
-version = "3.20.1"
+version = "3.20.3"
description = "Protocol Buffers"
category = "main"
optional = true
@@ -796,7 +806,7 @@ python-versions = ">=3.7"
[[package]]
name = "psycopg2"
-version = "2.9.3"
+version = "2.9.5"
description = "psycopg2 - Python-PostgreSQL Database Adapter"
category = "main"
optional = true
@@ -846,11 +856,11 @@ pyasn1 = ">=0.4.6,<0.5.0"
[[package]]
name = "pycodestyle"
-version = "2.8.0"
+version = "2.9.1"
description = "Python style guide checker"
category = "dev"
optional = false
-python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*"
+python-versions = ">=3.6"
[[package]]
name = "pycparser"
@@ -862,14 +872,14 @@ python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*"
[[package]]
name = "pydantic"
-version = "1.9.1"
+version = "1.10.2"
description = "Data validation and settings management using python type hints"
category = "main"
optional = false
-python-versions = ">=3.6.1"
+python-versions = ">=3.7"
[package.dependencies]
-typing-extensions = ">=3.7.4.3"
+typing-extensions = ">=4.1.0"
[package.extras]
dotenv = ["python-dotenv (>=0.10.4)"]
@@ -877,15 +887,15 @@ email = ["email-validator (>=1.0.3)"]
[[package]]
name = "pyflakes"
-version = "2.4.0"
+version = "2.5.0"
description = "passive checker of Python programs"
category = "dev"
optional = false
-python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*"
+python-versions = ">=3.6"
[[package]]
name = "pygithub"
-version = "1.55"
+version = "1.56"
description = "Use the full Github API v3"
category = "dev"
optional = false
@@ -993,14 +1003,14 @@ python-versions = ">=3.7"
[[package]]
name = "pysaml2"
-version = "7.1.2"
+version = "7.2.1"
description = "Python implementation of SAML Version 2 Standard"
category = "main"
optional = true
python-versions = "<4,>=3.6"
[package.dependencies]
-cryptography = ">=1.4"
+cryptography = ">=3.1"
defusedxml = "*"
importlib-resources = {version = "*", markers = "python_version < \"3.9\""}
pyOpenSSL = "*"
@@ -1051,11 +1061,11 @@ python-versions = ">=3.6"
[[package]]
name = "readme-renderer"
-version = "33.0"
+version = "37.2"
description = "readme_renderer is a library for rendering \"readme\" descriptions for Warehouse"
category = "dev"
optional = false
-python-versions = ">=3.6"
+python-versions = ">=3.7"
[package.dependencies]
bleach = ">=2.1.0"
@@ -1105,6 +1115,22 @@ python-versions = ">=3.7"
[package.extras]
idna2008 = ["idna"]
+[[package]]
+name = "rich"
+version = "12.6.0"
+description = "Render rich text, tables, progress bars, syntax highlighting, markdown and more to the terminal"
+category = "dev"
+optional = false
+python-versions = ">=3.6.3,<4.0.0"
+
+[package.dependencies]
+commonmark = ">=0.9.0,<0.10.0"
+pygments = ">=2.6.0,<3.0.0"
+typing-extensions = {version = ">=4.0.0,<5.0", markers = "python_version < \"3.9\""}
+
+[package.extras]
+jupyter = ["ipywidgets (>=7.5.1,<8.0.0)"]
+
[[package]]
name = "secretstorage"
version = "3.3.1"
@@ -1131,7 +1157,7 @@ doc = ["Sphinx", "sphinx-rtd-theme"]
[[package]]
name = "sentry-sdk"
-version = "1.5.11"
+version = "1.10.1"
description = "Python client for Sentry (https://sentry.io)"
category = "main"
optional = true
@@ -1139,7 +1165,7 @@ python-versions = "*"
[package.dependencies]
certifi = "*"
-urllib3 = ">=1.10.0"
+urllib3 = {version = ">=1.26.11", markers = "python_version >= \"3.6\""}
[package.extras]
aiohttp = ["aiohttp (>=3.5)"]
@@ -1149,6 +1175,7 @@ celery = ["celery (>=3)"]
chalice = ["chalice (>=1.16.0)"]
django = ["django (>=1.8)"]
falcon = ["falcon (>=1.4)"]
+fastapi = ["fastapi (>=0.79.0)"]
flask = ["blinker (>=1.1)", "flask (>=0.11)"]
httpx = ["httpx (>=0.16.0)"]
pure_eval = ["asttokens", "executing", "pure-eval"]
@@ -1157,6 +1184,7 @@ quart = ["blinker (>=1.1)", "quart (>=0.16.1)"]
rq = ["rq (>=0.6)"]
sanic = ["sanic (>=0.8)"]
sqlalchemy = ["sqlalchemy (>=1.2)"]
+starlette = ["starlette (>=0.19.1)"]
tornado = ["tornado (>=5)"]
[[package]]
@@ -1195,7 +1223,7 @@ testing-integration = ["build[virtualenv]", "filelock (>=3.4.0)", "jaraco.envs (
[[package]]
name = "setuptools-rust"
-version = "1.5.1"
+version = "1.5.2"
description = "Setuptools Rust extension plugin"
category = "main"
optional = false
@@ -1307,22 +1335,6 @@ tomli = {version = "*", markers = "python_version >= \"3.6\""}
[package.extras]
dev = ["packaging"]
-[[package]]
-name = "tqdm"
-version = "4.63.0"
-description = "Fast, Extensible Progress Meter"
-category = "dev"
-optional = false
-python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,>=2.7"
-
-[package.dependencies]
-colorama = {version = "*", markers = "platform_system == \"Windows\""}
-
-[package.extras]
-dev = ["py-make (>=0.1.0)", "twine", "wheel"]
-notebook = ["ipywidgets (>=6)"]
-telegram = ["requests"]
-
[[package]]
name = "treq"
version = "22.2.0"
@@ -1344,27 +1356,26 @@ docs = ["sphinx (>=1.4.8)"]
[[package]]
name = "twine"
-version = "3.8.0"
+version = "4.0.1"
description = "Collection of utilities for publishing packages on PyPI"
category = "dev"
optional = false
-python-versions = ">=3.6"
+python-versions = ">=3.7"
[package.dependencies]
-colorama = ">=0.4.3"
importlib-metadata = ">=3.6"
keyring = ">=15.1"
pkginfo = ">=1.8.1"
-readme-renderer = ">=21.0"
+readme-renderer = ">=35.0"
requests = ">=2.20"
requests-toolbelt = ">=0.8.0,<0.9.0 || >0.9.0"
rfc3986 = ">=1.4.0"
-tqdm = ">=4.14"
+rich = ">=12.0.0"
urllib3 = ">=1.26.0"
[[package]]
-name = "Twisted"
-version = "22.8.0"
+name = "twisted"
+version = "22.10.0"
description = "An asynchronous networking framework written in Python"
category = "main"
optional = false
@@ -1384,21 +1395,21 @@ typing-extensions = ">=3.6.5"
"zope.interface" = ">=4.4.2"
[package.extras]
-all_non_platform = ["PyHamcrest (>=1.9.0)", "appdirs (>=1.4.0)", "bcrypt (>=3.0.0)", "contextvars (>=2.4,<3)", "cryptography (>=2.6)", "cython-test-exception-raiser (>=1.0.2,<2)", "h2 (>=3.0,<5.0)", "idna (>=2.4)", "priority (>=1.1.0,<2.0)", "pyasn1", "pyopenssl (>=21.0.0)", "pyserial (>=3.0)", "pywin32 (!=226)", "service-identity (>=18.1.0)"]
+all-non-platform = ["PyHamcrest (>=1.9.0)", "appdirs (>=1.4.0)", "bcrypt (>=3.0.0)", "contextvars (>=2.4,<3)", "cryptography (>=2.6)", "cython-test-exception-raiser (>=1.0.2,<2)", "h2 (>=3.0,<5.0)", "hypothesis (>=6.0,<7.0)", "idna (>=2.4)", "priority (>=1.1.0,<2.0)", "pyasn1", "pyopenssl (>=21.0.0)", "pyserial (>=3.0)", "pywin32 (!=226)", "service-identity (>=18.1.0)"]
conch = ["appdirs (>=1.4.0)", "bcrypt (>=3.0.0)", "cryptography (>=2.6)", "pyasn1"]
conch_nacl = ["PyNaCl", "appdirs (>=1.4.0)", "bcrypt (>=3.0.0)", "cryptography (>=2.6)", "pyasn1"]
contextvars = ["contextvars (>=2.4,<3)"]
-dev = ["coverage (>=6b1,<7)", "pydoctor (>=22.7.0,<22.8.0)", "pyflakes (>=2.2,<3.0)", "python-subunit (>=1.4,<2.0)", "readthedocs-sphinx-ext (>=2.1,<3.0)", "sphinx (>=4.1.2,<6)", "sphinx-rtd-theme (>=0.5,<1.0)", "towncrier (>=19.2,<20.0)", "twistedchecker (>=0.7,<1.0)"]
-dev_release = ["pydoctor (>=22.7.0,<22.8.0)", "readthedocs-sphinx-ext (>=2.1,<3.0)", "sphinx (>=4.1.2,<6)", "sphinx-rtd-theme (>=0.5,<1.0)", "towncrier (>=19.2,<20.0)"]
-gtk_platform = ["PyHamcrest (>=1.9.0)", "appdirs (>=1.4.0)", "bcrypt (>=3.0.0)", "contextvars (>=2.4,<3)", "cryptography (>=2.6)", "cython-test-exception-raiser (>=1.0.2,<2)", "h2 (>=3.0,<5.0)", "idna (>=2.4)", "priority (>=1.1.0,<2.0)", "pyasn1", "pygobject", "pyopenssl (>=21.0.0)", "pyserial (>=3.0)", "pywin32 (!=226)", "service-identity (>=18.1.0)"]
+dev = ["coverage (>=6b1,<7)", "pydoctor (>=22.9.0,<22.10.0)", "pyflakes (>=2.2,<3.0)", "python-subunit (>=1.4,<2.0)", "readthedocs-sphinx-ext (>=2.1,<3.0)", "sphinx (>=5.0,<6)", "sphinx-rtd-theme (>=1.0,<2.0)", "towncrier (>=22.8,<23.0)", "twistedchecker (>=0.7,<1.0)"]
+dev-release = ["pydoctor (>=22.9.0,<22.10.0)", "readthedocs-sphinx-ext (>=2.1,<3.0)", "sphinx (>=5.0,<6)", "sphinx-rtd-theme (>=1.0,<2.0)", "towncrier (>=22.8,<23.0)"]
+gtk-platform = ["PyHamcrest (>=1.9.0)", "appdirs (>=1.4.0)", "bcrypt (>=3.0.0)", "contextvars (>=2.4,<3)", "cryptography (>=2.6)", "cython-test-exception-raiser (>=1.0.2,<2)", "h2 (>=3.0,<5.0)", "hypothesis (>=6.0,<7.0)", "idna (>=2.4)", "priority (>=1.1.0,<2.0)", "pyasn1", "pygobject", "pyopenssl (>=21.0.0)", "pyserial (>=3.0)", "pywin32 (!=226)", "service-identity (>=18.1.0)"]
http2 = ["h2 (>=3.0,<5.0)", "priority (>=1.1.0,<2.0)"]
-macos_platform = ["PyHamcrest (>=1.9.0)", "appdirs (>=1.4.0)", "bcrypt (>=3.0.0)", "contextvars (>=2.4,<3)", "cryptography (>=2.6)", "cython-test-exception-raiser (>=1.0.2,<2)", "h2 (>=3.0,<5.0)", "idna (>=2.4)", "priority (>=1.1.0,<2.0)", "pyasn1", "pyobjc-core", "pyobjc-framework-CFNetwork", "pyobjc-framework-Cocoa", "pyopenssl (>=21.0.0)", "pyserial (>=3.0)", "pywin32 (!=226)", "service-identity (>=18.1.0)"]
-mypy = ["PyHamcrest (>=1.9.0)", "PyNaCl", "appdirs (>=1.4.0)", "bcrypt (>=3.0.0)", "contextvars (>=2.4,<3)", "coverage (>=6b1,<7)", "cryptography (>=2.6)", "cython-test-exception-raiser (>=1.0.2,<2)", "h2 (>=3.0,<5.0)", "idna (>=2.4)", "mypy (==0.930)", "mypy-zope (==0.3.4)", "priority (>=1.1.0,<2.0)", "pyasn1", "pydoctor (>=22.7.0,<22.8.0)", "pyflakes (>=2.2,<3.0)", "pyopenssl (>=21.0.0)", "pyserial (>=3.0)", "python-subunit (>=1.4,<2.0)", "pywin32 (!=226)", "readthedocs-sphinx-ext (>=2.1,<3.0)", "service-identity (>=18.1.0)", "sphinx (>=4.1.2,<6)", "sphinx-rtd-theme (>=0.5,<1.0)", "towncrier (>=19.2,<20.0)", "twistedchecker (>=0.7,<1.0)", "types-pyOpenSSL", "types-setuptools"]
-osx_platform = ["PyHamcrest (>=1.9.0)", "appdirs (>=1.4.0)", "bcrypt (>=3.0.0)", "contextvars (>=2.4,<3)", "cryptography (>=2.6)", "cython-test-exception-raiser (>=1.0.2,<2)", "h2 (>=3.0,<5.0)", "idna (>=2.4)", "priority (>=1.1.0,<2.0)", "pyasn1", "pyobjc-core", "pyobjc-framework-CFNetwork", "pyobjc-framework-Cocoa", "pyopenssl (>=21.0.0)", "pyserial (>=3.0)", "pywin32 (!=226)", "service-identity (>=18.1.0)"]
+macos-platform = ["PyHamcrest (>=1.9.0)", "appdirs (>=1.4.0)", "bcrypt (>=3.0.0)", "contextvars (>=2.4,<3)", "cryptography (>=2.6)", "cython-test-exception-raiser (>=1.0.2,<2)", "h2 (>=3.0,<5.0)", "hypothesis (>=6.0,<7.0)", "idna (>=2.4)", "priority (>=1.1.0,<2.0)", "pyasn1", "pyobjc-core", "pyobjc-framework-CFNetwork", "pyobjc-framework-Cocoa", "pyopenssl (>=21.0.0)", "pyserial (>=3.0)", "pywin32 (!=226)", "service-identity (>=18.1.0)"]
+mypy = ["PyHamcrest (>=1.9.0)", "PyNaCl", "appdirs (>=1.4.0)", "bcrypt (>=3.0.0)", "contextvars (>=2.4,<3)", "coverage (>=6b1,<7)", "cryptography (>=2.6)", "cython-test-exception-raiser (>=1.0.2,<2)", "h2 (>=3.0,<5.0)", "hypothesis (>=6.0,<7.0)", "idna (>=2.4)", "mypy (==0.930)", "mypy-zope (==0.3.4)", "priority (>=1.1.0,<2.0)", "pyasn1", "pydoctor (>=22.9.0,<22.10.0)", "pyflakes (>=2.2,<3.0)", "pyopenssl (>=21.0.0)", "pyserial (>=3.0)", "python-subunit (>=1.4,<2.0)", "pywin32 (!=226)", "readthedocs-sphinx-ext (>=2.1,<3.0)", "service-identity (>=18.1.0)", "sphinx (>=5.0,<6)", "sphinx-rtd-theme (>=1.0,<2.0)", "towncrier (>=22.8,<23.0)", "twistedchecker (>=0.7,<1.0)", "types-pyOpenSSL", "types-setuptools"]
+osx-platform = ["PyHamcrest (>=1.9.0)", "appdirs (>=1.4.0)", "bcrypt (>=3.0.0)", "contextvars (>=2.4,<3)", "cryptography (>=2.6)", "cython-test-exception-raiser (>=1.0.2,<2)", "h2 (>=3.0,<5.0)", "hypothesis (>=6.0,<7.0)", "idna (>=2.4)", "priority (>=1.1.0,<2.0)", "pyasn1", "pyobjc-core", "pyobjc-framework-CFNetwork", "pyobjc-framework-Cocoa", "pyopenssl (>=21.0.0)", "pyserial (>=3.0)", "pywin32 (!=226)", "service-identity (>=18.1.0)"]
serial = ["pyserial (>=3.0)", "pywin32 (!=226)"]
-test = ["PyHamcrest (>=1.9.0)", "cython-test-exception-raiser (>=1.0.2,<2)"]
+test = ["PyHamcrest (>=1.9.0)", "cython-test-exception-raiser (>=1.0.2,<2)", "hypothesis (>=6.0,<7.0)"]
tls = ["idna (>=2.4)", "pyopenssl (>=21.0.0)", "service-identity (>=18.1.0)"]
-windows_platform = ["PyHamcrest (>=1.9.0)", "appdirs (>=1.4.0)", "bcrypt (>=3.0.0)", "contextvars (>=2.4,<3)", "cryptography (>=2.6)", "cython-test-exception-raiser (>=1.0.2,<2)", "h2 (>=3.0,<5.0)", "idna (>=2.4)", "priority (>=1.1.0,<2.0)", "pyasn1", "pyopenssl (>=21.0.0)", "pyserial (>=3.0)", "pywin32 (!=226)", "pywin32 (!=226)", "service-identity (>=18.1.0)"]
+windows-platform = ["PyHamcrest (>=1.9.0)", "appdirs (>=1.4.0)", "bcrypt (>=3.0.0)", "contextvars (>=2.4,<3)", "cryptography (>=2.6)", "cython-test-exception-raiser (>=1.0.2,<2)", "h2 (>=3.0,<5.0)", "hypothesis (>=6.0,<7.0)", "idna (>=2.4)", "priority (>=1.1.0,<2.0)", "pyasn1", "pyopenssl (>=21.0.0)", "pyserial (>=3.0)", "pywin32 (!=226)", "pywin32 (!=226)", "service-identity (>=18.1.0)"]
[[package]]
name = "twisted-iocpsupport"
@@ -1430,7 +1441,7 @@ python-versions = ">=3.6"
[[package]]
name = "types-bleach"
-version = "4.1.4"
+version = "5.0.3"
description = "Typing stubs for bleach"
category = "dev"
optional = false
@@ -1474,7 +1485,7 @@ python-versions = "*"
[[package]]
name = "types-jsonschema"
-version = "4.4.6"
+version = "4.17.0.1"
description = "Typing stubs for jsonschema"
category = "dev"
optional = false
@@ -1482,7 +1493,7 @@ python-versions = "*"
[[package]]
name = "types-pillow"
-version = "9.0.15"
+version = "9.2.2.1"
description = "Typing stubs for Pillow"
category = "dev"
optional = false
@@ -1490,7 +1501,7 @@ python-versions = "*"
[[package]]
name = "types-psycopg2"
-version = "2.9.9"
+version = "2.9.21.1"
description = "Typing stubs for psycopg2"
category = "dev"
optional = false
@@ -1498,7 +1509,7 @@ python-versions = "*"
[[package]]
name = "types-pyopenssl"
-version = "22.0.0"
+version = "22.1.0.2"
description = "Typing stubs for pyOpenSSL"
category = "dev"
optional = false
@@ -1509,7 +1520,7 @@ types-cryptography = "*"
[[package]]
name = "types-pyyaml"
-version = "6.0.4"
+version = "6.0.12.2"
description = "Typing stubs for PyYAML"
category = "dev"
optional = false
@@ -1517,7 +1528,7 @@ python-versions = "*"
[[package]]
name = "types-requests"
-version = "2.27.11"
+version = "2.28.11.2"
description = "Typing stubs for requests"
category = "dev"
optional = false
@@ -1528,7 +1539,7 @@ types-urllib3 = "<1.27"
[[package]]
name = "types-setuptools"
-version = "57.4.9"
+version = "65.5.0.3"
description = "Typing stubs for setuptools"
category = "dev"
optional = false
@@ -1544,11 +1555,11 @@ python-versions = "*"
[[package]]
name = "typing-extensions"
-version = "4.1.1"
-description = "Backported and Experimental Type Hints for Python 3.6+"
+version = "4.4.0"
+description = "Backported and Experimental Type Hints for Python 3.7+"
category = "main"
optional = false
-python-versions = ">=3.6"
+python-versions = ">=3.7"
[[package]]
name = "unpaddedbase64"
@@ -1560,15 +1571,15 @@ python-versions = ">=3.6,<4.0"
[[package]]
name = "urllib3"
-version = "1.26.8"
+version = "1.26.12"
description = "HTTP library with thread-safe connection pooling, file post, and more."
category = "main"
optional = false
-python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, <4"
+python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*, <4"
[package.extras]
-brotli = ["brotlipy (>=0.6.0)"]
-secure = ["certifi", "cryptography (>=1.3.4)", "idna (>=2.0.0)", "ipaddress", "pyOpenSSL (>=0.14)"]
+brotli = ["brotli (>=1.0.9)", "brotlicffi (>=0.8.0)", "brotlipy (>=0.6.0)"]
+secure = ["certifi", "cryptography (>=1.3.4)", "idna (>=2.0.0)", "ipaddress", "pyOpenSSL (>=0.14)", "urllib3-secure-extra"]
socks = ["PySocks (>=1.5.6,!=1.5.7,<2.0)"]
[[package]]
@@ -1581,7 +1592,7 @@ python-versions = "*"
[[package]]
name = "wrapt"
-version = "1.13.3"
+version = "1.14.1"
description = "Module for decorators, wrappers and monkey patching."
category = "main"
optional = false
@@ -1665,7 +1676,7 @@ test = ["zope.i18nmessageid", "zope.testing", "zope.testrunner"]
[extras]
all = ["matrix-synapse-ldap3", "psycopg2", "psycopg2cffi", "psycopg2cffi-compat", "pysaml2", "authlib", "lxml", "sentry-sdk", "opentelemetry-api", "opentelemetry-sdk", "opentelemetry-exporter-jaeger", "txredisapi", "hiredis", "Pympler"]
-cache_memory = ["Pympler"]
+cache-memory = ["Pympler"]
jwt = ["authlib"]
matrix-synapse-ldap3 = ["matrix-synapse-ldap3"]
oidc = ["authlib"]
@@ -1676,70 +1687,79 @@ saml2 = ["pysaml2"]
sentry = ["sentry-sdk"]
systemd = ["systemd-python"]
test = ["parameterized", "idna"]
-url_preview = ["lxml"]
+url-preview = ["lxml"]
[metadata]
lock-version = "1.1"
python-versions = "^3.7.1"
-content-hash = "8697b449d4c7eb1eee9e10d5ff030d2a973576c5a9c8ad64fed9337489d5b37a"
+content-hash = "27811bd21d56ceeb0f68ded5a00375efcd1a004928f0736f5b02927ce8594cb0"
[metadata.files]
attrs = [
- {file = "attrs-21.4.0-py2.py3-none-any.whl", hash = "sha256:2d27e3784d7a565d36ab851fe94887c5eccd6a463168875832a1be79c82828b4"},
- {file = "attrs-21.4.0.tar.gz", hash = "sha256:626ba8234211db98e869df76230a137c4c40a12d72445c45d5f5b716f076e2fd"},
+ {file = "attrs-22.1.0-py2.py3-none-any.whl", hash = "sha256:86efa402f67bf2df34f51a335487cf46b1ec130d02b8d39fd248abfd30da551c"},
+ {file = "attrs-22.1.0.tar.gz", hash = "sha256:29adc2665447e5191d0e7c568fde78b21f9672d344281d0c6e1ab085429b22b6"},
]
-authlib = [
- {file = "Authlib-0.15.5-py2.py3-none-any.whl", hash = "sha256:ecf4a7a9f2508c0bb07e93a752dd3c495cfaffc20e864ef0ffc95e3f40d2abaf"},
- {file = "Authlib-0.15.5.tar.gz", hash = "sha256:b83cf6360c8e92b0e9df0d1f32d675790bcc4e3c03977499b1eed24dcdef4252"},
+Authlib = [
+ {file = "Authlib-1.1.0-py2.py3-none-any.whl", hash = "sha256:be4b6a1dea51122336c210a6945b27a105b9ac572baffd15b07bcff4376c1523"},
+ {file = "Authlib-1.1.0.tar.gz", hash = "sha256:0a270c91409fc2b7b0fbee6996e09f2ee3187358762111a9a4225c874b94e891"},
]
automat = [
- {file = "Automat-20.2.0-py2.py3-none-any.whl", hash = "sha256:b6feb6455337df834f6c9962d6ccf771515b7d939bca142b29c20c2376bc6111"},
- {file = "Automat-20.2.0.tar.gz", hash = "sha256:7979803c74610e11ef0c0d68a2942b152df52da55336e0c9d58daf1831cbdf33"},
+ {file = "Automat-22.10.0-py2.py3-none-any.whl", hash = "sha256:c3164f8742b9dc440f3682482d32aaff7bb53f71740dd018533f9de286b64180"},
+ {file = "Automat-22.10.0.tar.gz", hash = "sha256:e56beb84edad19dcc11d30e8d9b895f75deeb5ef5e96b84a467066b3b84bb04e"},
]
bcrypt = [
- {file = "bcrypt-3.2.0-cp36-abi3-macosx_10_10_universal2.whl", hash = "sha256:b589229207630484aefe5899122fb938a5b017b0f4349f769b8c13e78d99a8fd"},
- {file = "bcrypt-3.2.0-cp36-abi3-macosx_10_9_x86_64.whl", hash = "sha256:c95d4cbebffafcdd28bd28bb4e25b31c50f6da605c81ffd9ad8a3d1b2ab7b1b6"},
- {file = "bcrypt-3.2.0-cp36-abi3-manylinux1_x86_64.whl", hash = "sha256:63d4e3ff96188e5898779b6057878fecf3f11cfe6ec3b313ea09955d587ec7a7"},
- {file = "bcrypt-3.2.0-cp36-abi3-manylinux2010_x86_64.whl", hash = "sha256:cd1ea2ff3038509ea95f687256c46b79f5fc382ad0aa3664d200047546d511d1"},
- {file = "bcrypt-3.2.0-cp36-abi3-manylinux2014_aarch64.whl", hash = "sha256:cdcdcb3972027f83fe24a48b1e90ea4b584d35f1cc279d76de6fc4b13376239d"},
- {file = "bcrypt-3.2.0-cp36-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:a0584a92329210fcd75eb8a3250c5a941633f8bfaf2a18f81009b097732839b7"},
- {file = "bcrypt-3.2.0-cp36-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:56e5da069a76470679f312a7d3d23deb3ac4519991a0361abc11da837087b61d"},
- {file = "bcrypt-3.2.0-cp36-abi3-win32.whl", hash = "sha256:a67fb841b35c28a59cebed05fbd3e80eea26e6d75851f0574a9273c80f3e9b55"},
- {file = "bcrypt-3.2.0-cp36-abi3-win_amd64.whl", hash = "sha256:81fec756feff5b6818ea7ab031205e1d323d8943d237303baca2c5f9c7846f34"},
- {file = "bcrypt-3.2.0.tar.gz", hash = "sha256:5b93c1726e50a93a033c36e5ca7fdcd29a5c7395af50a6892f5d9e7c6cfbfb29"},
+ {file = "bcrypt-4.0.1-cp36-abi3-macosx_10_10_universal2.whl", hash = "sha256:b1023030aec778185a6c16cf70f359cbb6e0c289fd564a7cfa29e727a1c38f8f"},
+ {file = "bcrypt-4.0.1-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:08d2947c490093a11416df18043c27abe3921558d2c03e2076ccb28a116cb6d0"},
+ {file = "bcrypt-4.0.1-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0eaa47d4661c326bfc9d08d16debbc4edf78778e6aaba29c1bc7ce67214d4410"},
+ {file = "bcrypt-4.0.1-cp36-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ae88eca3024bb34bb3430f964beab71226e761f51b912de5133470b649d82344"},
+ {file = "bcrypt-4.0.1-cp36-abi3-manylinux_2_24_x86_64.whl", hash = "sha256:a522427293d77e1c29e303fc282e2d71864579527a04ddcfda6d4f8396c6c36a"},
+ {file = "bcrypt-4.0.1-cp36-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:fbdaec13c5105f0c4e5c52614d04f0bca5f5af007910daa8b6b12095edaa67b3"},
+ {file = "bcrypt-4.0.1-cp36-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:ca3204d00d3cb2dfed07f2d74a25f12fc12f73e606fcaa6975d1f7ae69cacbb2"},
+ {file = "bcrypt-4.0.1-cp36-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:089098effa1bc35dc055366740a067a2fc76987e8ec75349eb9484061c54f535"},
+ {file = "bcrypt-4.0.1-cp36-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:e9a51bbfe7e9802b5f3508687758b564069ba937748ad7b9e890086290d2f79e"},
+ {file = "bcrypt-4.0.1-cp36-abi3-win32.whl", hash = "sha256:2caffdae059e06ac23fce178d31b4a702f2a3264c20bfb5ff541b338194d8fab"},
+ {file = "bcrypt-4.0.1-cp36-abi3-win_amd64.whl", hash = "sha256:8a68f4341daf7522fe8d73874de8906f3a339048ba406be6ddc1b3ccb16fc0d9"},
+ {file = "bcrypt-4.0.1-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bf4fa8b2ca74381bb5442c089350f09a3f17797829d958fad058d6e44d9eb83c"},
+ {file = "bcrypt-4.0.1-pp37-pypy37_pp73-manylinux_2_24_x86_64.whl", hash = "sha256:67a97e1c405b24f19d08890e7ae0c4f7ce1e56a712a016746c8b2d7732d65d4b"},
+ {file = "bcrypt-4.0.1-pp37-pypy37_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:b3b85202d95dd568efcb35b53936c5e3b3600c7cdcc6115ba461df3a8e89f38d"},
+ {file = "bcrypt-4.0.1-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cbb03eec97496166b704ed663a53680ab57c5084b2fc98ef23291987b525cb7d"},
+ {file = "bcrypt-4.0.1-pp38-pypy38_pp73-manylinux_2_24_x86_64.whl", hash = "sha256:5ad4d32a28b80c5fa6671ccfb43676e8c1cc232887759d1cd7b6f56ea4355215"},
+ {file = "bcrypt-4.0.1-pp38-pypy38_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:b57adba8a1444faf784394de3436233728a1ecaeb6e07e8c22c8848f179b893c"},
+ {file = "bcrypt-4.0.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:705b2cea8a9ed3d55b4491887ceadb0106acf7c6387699fca771af56b1cdeeda"},
+ {file = "bcrypt-4.0.1-pp39-pypy39_pp73-manylinux_2_24_x86_64.whl", hash = "sha256:2b3ac11cf45161628f1f3733263e63194f22664bf4d0c0f3ab34099c02134665"},
+ {file = "bcrypt-4.0.1-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:3100851841186c25f127731b9fa11909ab7b1df6fc4b9f8353f4f1fd952fbf71"},
+ {file = "bcrypt-4.0.1.tar.gz", hash = "sha256:27d375903ac8261cfe4047f6709d16f7d18d39b1ec92aaf72af989552a650ebd"},
]
black = [
- {file = "black-22.3.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:2497f9c2386572e28921fa8bec7be3e51de6801f7459dffd6e62492531c47e09"},
- {file = "black-22.3.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:5795a0375eb87bfe902e80e0c8cfaedf8af4d49694d69161e5bd3206c18618bb"},
- {file = "black-22.3.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:e3556168e2e5c49629f7b0f377070240bd5511e45e25a4497bb0073d9dda776a"},
- {file = "black-22.3.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:67c8301ec94e3bcc8906740fe071391bce40a862b7be0b86fb5382beefecd968"},
- {file = "black-22.3.0-cp310-cp310-win_amd64.whl", hash = "sha256:fd57160949179ec517d32ac2ac898b5f20d68ed1a9c977346efbac9c2f1e779d"},
- {file = "black-22.3.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:cc1e1de68c8e5444e8f94c3670bb48a2beef0e91dddfd4fcc29595ebd90bb9ce"},
- {file = "black-22.3.0-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6d2fc92002d44746d3e7db7cf9313cf4452f43e9ea77a2c939defce3b10b5c82"},
- {file = "black-22.3.0-cp36-cp36m-win_amd64.whl", hash = "sha256:a6342964b43a99dbc72f72812bf88cad8f0217ae9acb47c0d4f141a6416d2d7b"},
- {file = "black-22.3.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:328efc0cc70ccb23429d6be184a15ce613f676bdfc85e5fe8ea2a9354b4e9015"},
- {file = "black-22.3.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:06f9d8846f2340dfac80ceb20200ea5d1b3f181dd0556b47af4e8e0b24fa0a6b"},
- {file = "black-22.3.0-cp37-cp37m-win_amd64.whl", hash = "sha256:ad4efa5fad66b903b4a5f96d91461d90b9507a812b3c5de657d544215bb7877a"},
- {file = "black-22.3.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:e8477ec6bbfe0312c128e74644ac8a02ca06bcdb8982d4ee06f209be28cdf163"},
- {file = "black-22.3.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:637a4014c63fbf42a692d22b55d8ad6968a946b4a6ebc385c5505d9625b6a464"},
- {file = "black-22.3.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:863714200ada56cbc366dc9ae5291ceb936573155f8bf8e9de92aef51f3ad0f0"},
- {file = "black-22.3.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:10dbe6e6d2988049b4655b2b739f98785a884d4d6b85bc35133a8fb9a2233176"},
- {file = "black-22.3.0-cp38-cp38-win_amd64.whl", hash = "sha256:cee3e11161dde1b2a33a904b850b0899e0424cc331b7295f2a9698e79f9a69a0"},
- {file = "black-22.3.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:5891ef8abc06576985de8fa88e95ab70641de6c1fca97e2a15820a9b69e51b20"},
- {file = "black-22.3.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:30d78ba6bf080eeaf0b7b875d924b15cd46fec5fd044ddfbad38c8ea9171043a"},
- {file = "black-22.3.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:ee8f1f7228cce7dffc2b464f07ce769f478968bfb3dd1254a4c2eeed84928aad"},
- {file = "black-22.3.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6ee227b696ca60dd1c507be80a6bc849a5a6ab57ac7352aad1ffec9e8b805f21"},
- {file = "black-22.3.0-cp39-cp39-win_amd64.whl", hash = "sha256:9b542ced1ec0ceeff5b37d69838106a6348e60db7b8fdd245294dc1d26136265"},
- {file = "black-22.3.0-py3-none-any.whl", hash = "sha256:bc58025940a896d7e5356952228b68f793cf5fcb342be703c3a2669a1488cb72"},
- {file = "black-22.3.0.tar.gz", hash = "sha256:35020b8886c022ced9282b51b5a875b6d1ab0c387b31a065b84db7c33085ca79"},
+ {file = "black-22.10.0-1fixedarch-cp310-cp310-macosx_11_0_x86_64.whl", hash = "sha256:5cc42ca67989e9c3cf859e84c2bf014f6633db63d1cbdf8fdb666dcd9e77e3fa"},
+ {file = "black-22.10.0-1fixedarch-cp311-cp311-macosx_11_0_x86_64.whl", hash = "sha256:5d8f74030e67087b219b032aa33a919fae8806d49c867846bfacde57f43972ef"},
+ {file = "black-22.10.0-1fixedarch-cp37-cp37m-macosx_10_16_x86_64.whl", hash = "sha256:197df8509263b0b8614e1df1756b1dd41be6738eed2ba9e9769f3880c2b9d7b6"},
+ {file = "black-22.10.0-1fixedarch-cp38-cp38-macosx_10_16_x86_64.whl", hash = "sha256:2644b5d63633702bc2c5f3754b1b475378fbbfb481f62319388235d0cd104c2d"},
+ {file = "black-22.10.0-1fixedarch-cp39-cp39-macosx_11_0_x86_64.whl", hash = "sha256:e41a86c6c650bcecc6633ee3180d80a025db041a8e2398dcc059b3afa8382cd4"},
+ {file = "black-22.10.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:2039230db3c6c639bd84efe3292ec7b06e9214a2992cd9beb293d639c6402edb"},
+ {file = "black-22.10.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:14ff67aec0a47c424bc99b71005202045dc09270da44a27848d534600ac64fc7"},
+ {file = "black-22.10.0-cp310-cp310-win_amd64.whl", hash = "sha256:819dc789f4498ecc91438a7de64427c73b45035e2e3680c92e18795a839ebb66"},
+ {file = "black-22.10.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:5b9b29da4f564ba8787c119f37d174f2b69cdfdf9015b7d8c5c16121ddc054ae"},
+ {file = "black-22.10.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b8b49776299fece66bffaafe357d929ca9451450f5466e997a7285ab0fe28e3b"},
+ {file = "black-22.10.0-cp311-cp311-win_amd64.whl", hash = "sha256:21199526696b8f09c3997e2b4db8d0b108d801a348414264d2eb8eb2532e540d"},
+ {file = "black-22.10.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1e464456d24e23d11fced2bc8c47ef66d471f845c7b7a42f3bd77bf3d1789650"},
+ {file = "black-22.10.0-cp37-cp37m-win_amd64.whl", hash = "sha256:9311e99228ae10023300ecac05be5a296f60d2fd10fff31cf5c1fa4ca4b1988d"},
+ {file = "black-22.10.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:fba8a281e570adafb79f7755ac8721b6cf1bbf691186a287e990c7929c7692ff"},
+ {file = "black-22.10.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:915ace4ff03fdfff953962fa672d44be269deb2eaf88499a0f8805221bc68c87"},
+ {file = "black-22.10.0-cp38-cp38-win_amd64.whl", hash = "sha256:444ebfb4e441254e87bad00c661fe32df9969b2bf224373a448d8aca2132b395"},
+ {file = "black-22.10.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:974308c58d057a651d182208a484ce80a26dac0caef2895836a92dd6ebd725e0"},
+ {file = "black-22.10.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:72ef3925f30e12a184889aac03d77d031056860ccae8a1e519f6cbb742736383"},
+ {file = "black-22.10.0-cp39-cp39-win_amd64.whl", hash = "sha256:432247333090c8c5366e69627ccb363bc58514ae3e63f7fc75c54b1ea80fa7de"},
+ {file = "black-22.10.0-py3-none-any.whl", hash = "sha256:c957b2b4ea88587b46cf49d1dc17681c1e672864fd7af32fc1e9664d572b3458"},
+ {file = "black-22.10.0.tar.gz", hash = "sha256:f513588da599943e0cde4e32cc9879e825d58720d6557062d1098c5ad80080e1"},
]
bleach = [
- {file = "bleach-4.1.0-py2.py3-none-any.whl", hash = "sha256:4d2651ab93271d1129ac9cbc679f524565cc8a1b791909c4a51eac4446a15994"},
- {file = "bleach-4.1.0.tar.gz", hash = "sha256:0900d8b37eba61a802ee40ac0061f8c2b5dee29c1927dd1d233e075ebf5a71da"},
+ {file = "bleach-5.0.1-py3-none-any.whl", hash = "sha256:085f7f33c15bd408dd9b17a4ad77c577db66d76203e5984b1bd59baeee948b2a"},
+ {file = "bleach-5.0.1.tar.gz", hash = "sha256:0d03255c47eb9bd2f26aa9bb7f2107732e7e8fe195ca2f64709fcf3b0a4a085c"},
]
canonicaljson = [
- {file = "canonicaljson-1.6.3-py3-none-any.whl", hash = "sha256:6ba3cf1702fa3d209b3e915a4e9a3e4ef194f1e8fca189c1f0b7a2a7686a27e6"},
- {file = "canonicaljson-1.6.3.tar.gz", hash = "sha256:ca59760bc274a899a0da75809d6909ae43e5123381fd6ef040a44d1952c0b448"},
+ {file = "canonicaljson-1.6.4-py3-none-any.whl", hash = "sha256:55d282853b4245dbcd953fe54c39b91571813d7c44e1dbf66e3c4f97ff134a48"},
+ {file = "canonicaljson-1.6.4.tar.gz", hash = "sha256:6c09b2119511f30eb1126cfcd973a10824e20f1cfd25039cde3d1218dd9c8d8f"},
]
certifi = [
{file = "certifi-2021.10.8-py2.py3-none-any.whl", hash = "sha256:d62a0163eb4c2344ac042ab2bdf75399a71a2d8c7d47eac2e2ee91b9d6339569"},
@@ -1802,8 +1822,8 @@ charset-normalizer = [
{file = "charset_normalizer-2.0.12-py3-none-any.whl", hash = "sha256:6881edbebdb17b39b4eaaa821b438bf6eddffb4468cf344f09f89def34a8b1df"},
]
click = [
- {file = "click-8.1.1-py3-none-any.whl", hash = "sha256:5e0d195c2067da3136efb897449ec1e9e6c98282fbf30d7f9e164af9be901a6b"},
- {file = "click-8.1.1.tar.gz", hash = "sha256:7ab900e38149c9872376e8f9b5986ddcaf68c0f413cf73678a0bca5547e6f976"},
+ {file = "click-8.1.3-py3-none-any.whl", hash = "sha256:bb4d8133cb15a609f44e8213d9b391b0809795062913b383c62be0ee95b1db48"},
+ {file = "click-8.1.3.tar.gz", hash = "sha256:7682dc8afb30297001674575ea00d1814d808d6a36af415a82bd481d37ba7b8e"},
]
click-default-group = [
{file = "click-default-group-1.2.2.tar.gz", hash = "sha256:d9560e8e8dfa44b3562fbc9425042a0fd6d21956fcc2db0077f63f34253ab904"},
@@ -1821,26 +1841,32 @@ constantly = [
{file = "constantly-15.1.0.tar.gz", hash = "sha256:586372eb92059873e29eba4f9dec8381541b4d3834660707faf8ba59146dfc35"},
]
cryptography = [
- {file = "cryptography-36.0.1-cp36-abi3-macosx_10_10_universal2.whl", hash = "sha256:73bc2d3f2444bcfeac67dd130ff2ea598ea5f20b40e36d19821b4df8c9c5037b"},
- {file = "cryptography-36.0.1-cp36-abi3-macosx_10_10_x86_64.whl", hash = "sha256:2d87cdcb378d3cfed944dac30596da1968f88fb96d7fc34fdae30a99054b2e31"},
- {file = "cryptography-36.0.1-cp36-abi3-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:74d6c7e80609c0f4c2434b97b80c7f8fdfaa072ca4baab7e239a15d6d70ed73a"},
- {file = "cryptography-36.0.1-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:6c0c021f35b421ebf5976abf2daacc47e235f8b6082d3396a2fe3ccd537ab173"},
- {file = "cryptography-36.0.1-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5d59a9d55027a8b88fd9fd2826c4392bd487d74bf628bb9d39beecc62a644c12"},
- {file = "cryptography-36.0.1-cp36-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0a817b961b46894c5ca8a66b599c745b9a3d9f822725221f0e0fe49dc043a3a3"},
- {file = "cryptography-36.0.1-cp36-abi3-manylinux_2_24_x86_64.whl", hash = "sha256:94ae132f0e40fe48f310bba63f477f14a43116f05ddb69d6fa31e93f05848ae2"},
- {file = "cryptography-36.0.1-cp36-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:7be0eec337359c155df191d6ae00a5e8bbb63933883f4f5dffc439dac5348c3f"},
- {file = "cryptography-36.0.1-cp36-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:e0344c14c9cb89e76eb6a060e67980c9e35b3f36691e15e1b7a9e58a0a6c6dc3"},
- {file = "cryptography-36.0.1-cp36-abi3-win32.whl", hash = "sha256:4caa4b893d8fad33cf1964d3e51842cd78ba87401ab1d2e44556826df849a8ca"},
- {file = "cryptography-36.0.1-cp36-abi3-win_amd64.whl", hash = "sha256:391432971a66cfaf94b21c24ab465a4cc3e8bf4a939c1ca5c3e3a6e0abebdbcf"},
- {file = "cryptography-36.0.1-pp37-pypy37_pp73-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:bb5829d027ff82aa872d76158919045a7c1e91fbf241aec32cb07956e9ebd3c9"},
- {file = "cryptography-36.0.1-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ebc15b1c22e55c4d5566e3ca4db8689470a0ca2babef8e3a9ee057a8b82ce4b1"},
- {file = "cryptography-36.0.1-pp37-pypy37_pp73-manylinux_2_24_x86_64.whl", hash = "sha256:596f3cd67e1b950bc372c33f1a28a0692080625592ea6392987dba7f09f17a94"},
- {file = "cryptography-36.0.1-pp38-pypy38_pp73-macosx_10_10_x86_64.whl", hash = "sha256:30ee1eb3ebe1644d1c3f183d115a8c04e4e603ed6ce8e394ed39eea4a98469ac"},
- {file = "cryptography-36.0.1-pp38-pypy38_pp73-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:ec63da4e7e4a5f924b90af42eddf20b698a70e58d86a72d943857c4c6045b3ee"},
- {file = "cryptography-36.0.1-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ca238ceb7ba0bdf6ce88c1b74a87bffcee5afbfa1e41e173b1ceb095b39add46"},
- {file = "cryptography-36.0.1-pp38-pypy38_pp73-manylinux_2_24_x86_64.whl", hash = "sha256:ca28641954f767f9822c24e927ad894d45d5a1e501767599647259cbf030b903"},
- {file = "cryptography-36.0.1-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:39bdf8e70eee6b1c7b289ec6e5d84d49a6bfa11f8b8646b5b3dfe41219153316"},
- {file = "cryptography-36.0.1.tar.gz", hash = "sha256:53e5c1dc3d7a953de055d77bef2ff607ceef7a2aac0353b5d630ab67f7423638"},
+ {file = "cryptography-38.0.3-cp36-abi3-macosx_10_10_universal2.whl", hash = "sha256:984fe150f350a3c91e84de405fe49e688aa6092b3525f407a18b9646f6612320"},
+ {file = "cryptography-38.0.3-cp36-abi3-macosx_10_10_x86_64.whl", hash = "sha256:ed7b00096790213e09eb11c97cc6e2b757f15f3d2f85833cd2d3ec3fe37c1722"},
+ {file = "cryptography-38.0.3-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:bbf203f1a814007ce24bd4d51362991d5cb90ba0c177a9c08825f2cc304d871f"},
+ {file = "cryptography-38.0.3-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:554bec92ee7d1e9d10ded2f7e92a5d70c1f74ba9524947c0ba0c850c7b011828"},
+ {file = "cryptography-38.0.3-cp36-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b1b52c9e5f8aa2b802d48bd693190341fae201ea51c7a167d69fc48b60e8a959"},
+ {file = "cryptography-38.0.3-cp36-abi3-manylinux_2_24_x86_64.whl", hash = "sha256:728f2694fa743a996d7784a6194da430f197d5c58e2f4e278612b359f455e4a2"},
+ {file = "cryptography-38.0.3-cp36-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:dfb4f4dd568de1b6af9f4cda334adf7d72cf5bc052516e1b2608b683375dd95c"},
+ {file = "cryptography-38.0.3-cp36-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:5419a127426084933076132d317911e3c6eb77568a1ce23c3ac1e12d111e61e0"},
+ {file = "cryptography-38.0.3-cp36-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:9b24bcff7853ed18a63cfb0c2b008936a9554af24af2fb146e16d8e1aed75748"},
+ {file = "cryptography-38.0.3-cp36-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:25c1d1f19729fb09d42e06b4bf9895212292cb27bb50229f5aa64d039ab29146"},
+ {file = "cryptography-38.0.3-cp36-abi3-win32.whl", hash = "sha256:7f836217000342d448e1c9a342e9163149e45d5b5eca76a30e84503a5a96cab0"},
+ {file = "cryptography-38.0.3-cp36-abi3-win_amd64.whl", hash = "sha256:c46837ea467ed1efea562bbeb543994c2d1f6e800785bd5a2c98bc096f5cb220"},
+ {file = "cryptography-38.0.3-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:06fc3cc7b6f6cca87bd56ec80a580c88f1da5306f505876a71c8cfa7050257dd"},
+ {file = "cryptography-38.0.3-pp37-pypy37_pp73-manylinux_2_24_x86_64.whl", hash = "sha256:65535bc550b70bd6271984d9863a37741352b4aad6fb1b3344a54e6950249b55"},
+ {file = "cryptography-38.0.3-pp37-pypy37_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:5e89468fbd2fcd733b5899333bc54d0d06c80e04cd23d8c6f3e0542358c6060b"},
+ {file = "cryptography-38.0.3-pp38-pypy38_pp73-macosx_10_10_x86_64.whl", hash = "sha256:6ab9516b85bebe7aa83f309bacc5f44a61eeb90d0b4ec125d2d003ce41932d36"},
+ {file = "cryptography-38.0.3-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:068147f32fa662c81aebab95c74679b401b12b57494872886eb5c1139250ec5d"},
+ {file = "cryptography-38.0.3-pp38-pypy38_pp73-manylinux_2_24_x86_64.whl", hash = "sha256:402852a0aea73833d982cabb6d0c3bb582c15483d29fb7085ef2c42bfa7e38d7"},
+ {file = "cryptography-38.0.3-pp38-pypy38_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:b1b35d9d3a65542ed2e9d90115dfd16bbc027b3f07ee3304fc83580f26e43249"},
+ {file = "cryptography-38.0.3-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:6addc3b6d593cd980989261dc1cce38263c76954d758c3c94de51f1e010c9a50"},
+ {file = "cryptography-38.0.3-pp39-pypy39_pp73-macosx_10_10_x86_64.whl", hash = "sha256:be243c7e2bfcf6cc4cb350c0d5cdf15ca6383bbcb2a8ef51d3c9411a9d4386f0"},
+ {file = "cryptography-38.0.3-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:78cf5eefac2b52c10398a42765bfa981ce2372cbc0457e6bf9658f41ec3c41d8"},
+ {file = "cryptography-38.0.3-pp39-pypy39_pp73-manylinux_2_24_x86_64.whl", hash = "sha256:4e269dcd9b102c5a3d72be3c45d8ce20377b8076a43cbed6f660a1afe365e436"},
+ {file = "cryptography-38.0.3-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:8d41a46251bf0634e21fac50ffd643216ccecfaf3701a063257fe0b2be1b6548"},
+ {file = "cryptography-38.0.3-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:785e4056b5a8b28f05a533fab69febf5004458e20dad7e2e13a3120d8ecec75a"},
+ {file = "cryptography-38.0.3.tar.gz", hash = "sha256:bfbe6ee19615b07a98b1d2287d6a6073f734735b49ee45b11324d85efc4d5cbd"},
]
defusedxml = [
{file = "defusedxml-0.7.1-py2.py3-none-any.whl", hash = "sha256:a352e7e428770286cc899e2542b6cdaedb2b4953ff269a210103ec58f6198a61"},
@@ -1859,95 +1885,94 @@ elementpath = [
{file = "elementpath-2.5.0.tar.gz", hash = "sha256:3a27aaf3399929fccda013899cb76d3ff111734abf4281e5f9d3721ba0b9ffa3"},
]
flake8 = [
- {file = "flake8-4.0.1-py2.py3-none-any.whl", hash = "sha256:479b1304f72536a55948cb40a32dce8bb0ffe3501e26eaf292c7e60eb5e0428d"},
- {file = "flake8-4.0.1.tar.gz", hash = "sha256:806e034dda44114815e23c16ef92f95c91e4c71100ff52813adf7132a6ad870d"},
+ {file = "flake8-5.0.4-py2.py3-none-any.whl", hash = "sha256:7a1cf6b73744f5806ab95e526f6f0d8c01c66d7bbe349562d22dfca20610b248"},
+ {file = "flake8-5.0.4.tar.gz", hash = "sha256:6fbe320aad8d6b95cec8b8e47bc933004678dc63095be98528b7bdd2a9f510db"},
]
flake8-bugbear = [
- {file = "flake8-bugbear-21.3.2.tar.gz", hash = "sha256:cadce434ceef96463b45a7c3000f23527c04ea4b531d16c7ac8886051f516ca0"},
- {file = "flake8_bugbear-21.3.2-py36.py37.py38-none-any.whl", hash = "sha256:5d6ccb0c0676c738a6e066b4d50589c408dcc1c5bf1d73b464b18b73cd6c05c2"},
+ {file = "flake8-bugbear-22.10.27.tar.gz", hash = "sha256:a6708608965c9e0de5fff13904fed82e0ba21ac929fe4896459226a797e11cd5"},
+ {file = "flake8_bugbear-22.10.27-py3-none-any.whl", hash = "sha256:6ad0ab754507319060695e2f2be80e6d8977cfcea082293089a9226276bd825d"},
]
flake8-comprehensions = [
- {file = "flake8-comprehensions-3.8.0.tar.gz", hash = "sha256:8e108707637b1d13734f38e03435984f6b7854fa6b5a4e34f93e69534be8e521"},
- {file = "flake8_comprehensions-3.8.0-py3-none-any.whl", hash = "sha256:9406314803abe1193c064544ab14fdc43c58424c0882f6ff8a581eb73fc9bb58"},
+ {file = "flake8-comprehensions-3.10.1.tar.gz", hash = "sha256:412052ac4a947f36b891143430fef4859705af11b2572fbb689f90d372cf26ab"},
+ {file = "flake8_comprehensions-3.10.1-py3-none-any.whl", hash = "sha256:d763de3c74bc18a79c039a7ec732e0a1985b0c79309ceb51e56401ad0a2cd44e"},
]
frozendict = [
- {file = "frozendict-2.3.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:39942914c1217a5a49c7551495a103b3dbd216e19413687e003b859c6b0ebc12"},
- {file = "frozendict-2.3.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5589256058b31f2b91419fa30b8dc62dbdefe7710e688a3fd5b43849161eecc9"},
- {file = "frozendict-2.3.3-cp310-cp310-win_amd64.whl", hash = "sha256:35eb7e59e287c41f4f712d4d3d2333354175b155d217b97c99c201d2d8920790"},
- {file = "frozendict-2.3.3-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:310aaf81793abf4f471895e6fe65e0e74a28a2aaf7b25c2ba6ccd4e35af06842"},
- {file = "frozendict-2.3.3-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c353c11010a986566a0cb37f9a783c560ffff7d67d5e7fd52221fb03757cdc43"},
- {file = "frozendict-2.3.3-cp36-cp36m-win_amd64.whl", hash = "sha256:15b5f82aad108125336593cec1b6420c638bf45f449c57e50949fc7654ea5a41"},
- {file = "frozendict-2.3.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:a4737e5257756bd6b877504ff50185b705db577b5330d53040a6cf6417bb3cdb"},
- {file = "frozendict-2.3.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:80a14c11e33e8b0bc09e07bba3732c77a502c39edb8c3959fd9a0e490e031158"},
- {file = "frozendict-2.3.3-cp37-cp37m-win_amd64.whl", hash = "sha256:027952d1698ac9c766ef43711226b178cdd49d2acbdff396936639ad1d2a5615"},
- {file = "frozendict-2.3.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:ef818d66c85098a37cf42509545a4ba7dd0c4c679d6262123a8dc14cc474bab7"},
- {file = "frozendict-2.3.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:812279f2b270c980112dc4e367b168054f937108f8044eced4199e0ab2945a37"},
- {file = "frozendict-2.3.3-cp38-cp38-win_amd64.whl", hash = "sha256:c1fb7efbfebc2075f781be3d9774e4ba6ce4fc399148b02097f68d4b3c4bc00a"},
- {file = "frozendict-2.3.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:a0b46d4bf95bce843c0151959d54c3e5b8d0ce29cb44794e820b3ec980d63eee"},
- {file = "frozendict-2.3.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:38c4660f37fcc70a32ff997fe58e40b3fcc60b2017b286e33828efaa16b01308"},
- {file = "frozendict-2.3.3-cp39-cp39-win_amd64.whl", hash = "sha256:919e3609844fece11ab18bcbf28a3ed20f8108ad4149d7927d413687f281c6c9"},
- {file = "frozendict-2.3.3-py3-none-any.whl", hash = "sha256:f988b482d08972a196664718167a993a61c9e9f6fe7b0ca2443570b5f20ca44a"},
- {file = "frozendict-2.3.3.tar.gz", hash = "sha256:398539c52af3c647d103185bbaa1291679f0507ad035fe3bab2a8b0366d52cf1"},
+ {file = "frozendict-2.3.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:4a3b32d47282ae0098b9239a6d53ec539da720258bd762d62191b46f2f87c5fc"},
+ {file = "frozendict-2.3.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:84c9887179a245a66a50f52afa08d4d92ae0f269839fab82285c70a0fa0dd782"},
+ {file = "frozendict-2.3.4-cp310-cp310-win_amd64.whl", hash = "sha256:b98a0d65a59af6da03f794f90b0c3085a7ee14e7bf8f0ef36b079ee8aa992439"},
+ {file = "frozendict-2.3.4-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:3d8042b7dab5e992e30889c9b71b781d5feef19b372d47d735e4d7d45846fd4a"},
+ {file = "frozendict-2.3.4-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:25a6d2e8b7cf6b6e5677a1a4b53b4073e5d9ec640d1db30dc679627668d25e90"},
+ {file = "frozendict-2.3.4-cp36-cp36m-win_amd64.whl", hash = "sha256:dbbe1339ac2646523e0bb00d1896085d1f70de23780e4927ca82b36ab8a044d3"},
+ {file = "frozendict-2.3.4-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:95bac22f7f09d81f378f2b3f672b7a50a974ca180feae1507f5e21bc147e8bc8"},
+ {file = "frozendict-2.3.4-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dae686722c144b333c4dbdc16323a5de11406d26b76d2be1cc175f90afacb5ba"},
+ {file = "frozendict-2.3.4-cp37-cp37m-win_amd64.whl", hash = "sha256:389f395a74eb16992217ac1521e689c1dea2d70113bcb18714669ace1ed623b9"},
+ {file = "frozendict-2.3.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:ccb6450a416c9cc9acef7683e637e28356e3ceeabf83521f74cc2718883076b7"},
+ {file = "frozendict-2.3.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aca59108b77cadc13ba7dfea7e8f50811208c7652a13dc6c7f92d7782a24d299"},
+ {file = "frozendict-2.3.4-cp38-cp38-win_amd64.whl", hash = "sha256:3ec86ebf143dd685184215c27ec416c36e0ba1b80d81b1b9482f7d380c049b4e"},
+ {file = "frozendict-2.3.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:5809e6ff6b7257043a486f7a3b73a7da71cf69a38980b4171e4741291d0d9eb3"},
+ {file = "frozendict-2.3.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7c550ed7fdf1962984bec21630c584d722b3ee5d5f57a0ae2527a0121dc0414a"},
+ {file = "frozendict-2.3.4-cp39-cp39-win_amd64.whl", hash = "sha256:3e93aebc6e69a8ef329bbe9afb8342bd33c7b5c7a0c480cb9f7e60b0cbe48072"},
+ {file = "frozendict-2.3.4-py3-none-any.whl", hash = "sha256:d722f3d89db6ae35ef35ecc243c40c800eb344848c83dba4798353312cd37b15"},
+ {file = "frozendict-2.3.4.tar.gz", hash = "sha256:15b4b18346259392b0d27598f240e9390fafbff882137a9c48a1e0104fb17f78"},
]
gitdb = [
{file = "gitdb-4.0.9-py3-none-any.whl", hash = "sha256:8033ad4e853066ba6ca92050b9df2f89301b8fc8bf7e9324d412a63f8bf1a8fd"},
{file = "gitdb-4.0.9.tar.gz", hash = "sha256:bac2fd45c0a1c9cf619e63a90d62bdc63892ef92387424b855792a6cabe789aa"},
]
gitpython = [
- {file = "GitPython-3.1.27-py3-none-any.whl", hash = "sha256:5b68b000463593e05ff2b261acff0ff0972df8ab1b70d3cdbd41b546c8b8fc3d"},
- {file = "GitPython-3.1.27.tar.gz", hash = "sha256:1c885ce809e8ba2d88a29befeb385fcea06338d3640712b59ca623c220bb5704"},
+ {file = "GitPython-3.1.29-py3-none-any.whl", hash = "sha256:41eea0deec2deea139b459ac03656f0dd28fc4a3387240ec1d3c259a2c47850f"},
+ {file = "GitPython-3.1.29.tar.gz", hash = "sha256:cc36bfc4a3f913e66805a28e84703e419d9c264c1077e537b54f0e1af85dbefd"},
]
googleapis-common-protos = [
{file = "googleapis-common-protos-1.56.2.tar.gz", hash = "sha256:b09b56f5463070c2153753ef123f07d2e49235e89148e9b2459ec8ed2f68d7d3"},
{file = "googleapis_common_protos-1.56.2-py2.py3-none-any.whl", hash = "sha256:023eaea9d8c1cceccd9587c6af6c20f33eeeb05d4148670f2b0322dc1511700c"},
]
grpcio = [
- {file = "grpcio-1.48.1-cp310-cp310-linux_armv7l.whl", hash = "sha256:19f9c021ae858d3ef6d5ec4c0acf3f0b0a61e599e5aa36c36943c209520a0e66"},
- {file = "grpcio-1.48.1-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:b0fa666fecdb1b118d37823937e9237afa17fe734fc4dbe6dd642e1e4cca0246"},
- {file = "grpcio-1.48.1-cp310-cp310-manylinux_2_17_aarch64.whl", hash = "sha256:a661d4b9b314327dec1e92ed57e591e8e5eb055700e0ba9e9687f734d922dcb6"},
- {file = "grpcio-1.48.1-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:598c8c42420443c55431eba1821c7a2f72707f1ff674a4de9e0bb03282923cfb"},
- {file = "grpcio-1.48.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1c924d4e0493fd536ba3b82584b370e8b3c809ef341f9f828cff2dc3c761b3ab"},
- {file = "grpcio-1.48.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:a5edbcb8289681fcb5ded7542f2b7dd456489e83007a95e32fcaf55e9f18603e"},
- {file = "grpcio-1.48.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:9d116106cf220c79e91595523c893f1cf09ec0c2ea49de4fb82152528b7e6833"},
- {file = "grpcio-1.48.1-cp310-cp310-win32.whl", hash = "sha256:5d81cd3c161291339ed3b469250c2f5013c3083dea7796e93aedff8f05fdcec1"},
- {file = "grpcio-1.48.1-cp310-cp310-win_amd64.whl", hash = "sha256:d751f8beb383c4a5a95625d7ccc1ab183b98b02c6a88924814ea7fbff530872d"},
- {file = "grpcio-1.48.1-cp36-cp36m-linux_armv7l.whl", hash = "sha256:1471e6f25a8e47d9f88499f48c565fc5b2876e8ee91bfb0ff33eaadd188b7ea6"},
- {file = "grpcio-1.48.1-cp36-cp36m-macosx_10_10_x86_64.whl", hash = "sha256:9fba1d0ba7cf56811728f1951c800a9aca6677e86433c5e353f2cc2c4039fda6"},
- {file = "grpcio-1.48.1-cp36-cp36m-manylinux_2_17_aarch64.whl", hash = "sha256:f3a99ed422c38bd1bc893cb2cb2cea6d64173ec30927f699e95f5f58bdf625cf"},
- {file = "grpcio-1.48.1-cp36-cp36m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b005502c59835f9ba3c3f8742f64c19eeb3db41eae1a89b035a559b39b421803"},
- {file = "grpcio-1.48.1-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f0ef1dafb4eadeaca58aec8c721a5a73d551064b0c63d57fa003e233277c642e"},
- {file = "grpcio-1.48.1-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:9477967e605ba08715dcc769b5ee0f0d8b22bda40ef25a0df5a8759e5a4d21a5"},
- {file = "grpcio-1.48.1-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:dbba883c2b6d63949bc98ab1950bc22cf7c8d4e8cb68de6edde49d3cccd8fd26"},
- {file = "grpcio-1.48.1-cp36-cp36m-win32.whl", hash = "sha256:8bbaa6647986b874891bc682a1093df54cbdb073b5d4b844a2b480c47c7ffafd"},
- {file = "grpcio-1.48.1-cp36-cp36m-win_amd64.whl", hash = "sha256:e02f6ba10a3d4e289fa7ae91b301783a750d118b60f17924ca05e506c7d29bc8"},
- {file = "grpcio-1.48.1-cp37-cp37m-linux_armv7l.whl", hash = "sha256:97dc35a99c61d5f35ec6457d3df0a4695ba9bb04a35686e1c254462b15c53f98"},
- {file = "grpcio-1.48.1-cp37-cp37m-macosx_10_10_x86_64.whl", hash = "sha256:ca382028cdfd2d79b7704b2acb8ae1fb54e9e1a03a6765e1895ba89a6fcfaba1"},
- {file = "grpcio-1.48.1-cp37-cp37m-manylinux_2_17_aarch64.whl", hash = "sha256:3d319a0c89ffac9b8dfc75bfe727a4c835d18bbccc14203b20eb5949c6c7d87d"},
- {file = "grpcio-1.48.1-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a1b81849061c67c2ffaa6ed27aa3d9b0762e71e68e784e24b0330b7b1c67470a"},
- {file = "grpcio-1.48.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1ff1be0474846ed15682843b187e6062f845ddfeaceb2b28972073f474f7b735"},
- {file = "grpcio-1.48.1-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:53b6306f9473020bc47ddf64ca704356466e63d5f88f5c2a7bf0a4692e7f03c4"},
- {file = "grpcio-1.48.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:dad2501603f954f222a6e555413c454a5f8d763ab910fbab3855bcdfef6b3148"},
- {file = "grpcio-1.48.1-cp37-cp37m-win32.whl", hash = "sha256:4786323555a9f2c6380cd9a9922bcfd42165a51d68d242eebfcdfdc667651c96"},
- {file = "grpcio-1.48.1-cp37-cp37m-win_amd64.whl", hash = "sha256:934aad7350d9577f4275e787f3d91d3c8ff4efffa8d6b807d343d3c891ff53eb"},
- {file = "grpcio-1.48.1-cp38-cp38-linux_armv7l.whl", hash = "sha256:2563357697f5f2d7fd80c1b07a57ef4736551327ad84de604e7b9f6c1b6b4e20"},
- {file = "grpcio-1.48.1-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:1d065f40fe74b52b88a6c42d4373a0983f1b0090f952a0747f34f2c11d6cbc64"},
- {file = "grpcio-1.48.1-cp38-cp38-manylinux_2_17_aarch64.whl", hash = "sha256:3340cb2224cc397954def015729391d85fb31135b5a7efca363e73e6f1b0e908"},
- {file = "grpcio-1.48.1-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d03009a26f7edca9f0a581aa5d3153242b815b858cb4790e34a955afb303c6ba"},
- {file = "grpcio-1.48.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:53fa2fc1a1713195fa7acf7443a6f59b6ac7837607690f813c66cc18a9cb8135"},
- {file = "grpcio-1.48.1-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:5a6a750c8324f3974e95265d3f9a0541573c537af1f67b3f6f46bf9c0b2e1b36"},
- {file = "grpcio-1.48.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:626822d799d8fab08f07c8d95ef5c36213d24143f7cad3f548e97413db9f4110"},
- {file = "grpcio-1.48.1-cp38-cp38-win32.whl", hash = "sha256:ca5209ef89f7607be47a308fa92308cf079805ed556ecda672f00039a26e366f"},
- {file = "grpcio-1.48.1-cp38-cp38-win_amd64.whl", hash = "sha256:7cee20a4f873d61274d70c28ff63d19677d9eeea869c6a9cbaf3a00712336b6c"},
- {file = "grpcio-1.48.1-cp39-cp39-linux_armv7l.whl", hash = "sha256:460f5bec23fffa3c041aeba1f93a0f06b7a29e6a4da3658a52e1a866494920ab"},
- {file = "grpcio-1.48.1-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:c54734a6eb3be544d332e65c846236d02e5fc71325e8c53af91e83a46b87b506"},
- {file = "grpcio-1.48.1-cp39-cp39-manylinux_2_17_aarch64.whl", hash = "sha256:c6b6969c529521c86884a13745a4b68930db1ef2e051735c0f479d0a7adb25b6"},
- {file = "grpcio-1.48.1-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:346bef672a1536d59437210f16af35389d715d2b321bfe4899b3d6476a196706"},
- {file = "grpcio-1.48.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f29627d66ae816837fd32c9450dc9c54780962cd74d034513ed829ba3ab46652"},
- {file = "grpcio-1.48.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:b01faf7934c606d5050cf055c1d03943180f23d995d68d04cf50c80d1ef2c65a"},
- {file = "grpcio-1.48.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:741eeff39a26d26da2b6d74ff0559f882ee95ee4e3b20c0b4b829021cb917f96"},
- {file = "grpcio-1.48.1-cp39-cp39-win32.whl", hash = "sha256:a15409bc1d05c52ecb00f5e42ab8ff280e7149f2eb854728f628fb2a0a161a5b"},
- {file = "grpcio-1.48.1-cp39-cp39-win_amd64.whl", hash = "sha256:2b6c336409937fd1cd2bf78eb72651f44d292d88da5e63059a4e8bd01b9d7411"},
- {file = "grpcio-1.48.1.tar.gz", hash = "sha256:660217eccd2943bf23ea9a36e2a292024305aec04bf747fbcff1f5032b83610e"},
+ {file = "grpcio-1.50.0-cp310-cp310-linux_armv7l.whl", hash = "sha256:906f4d1beb83b3496be91684c47a5d870ee628715227d5d7c54b04a8de802974"},
+ {file = "grpcio-1.50.0-cp310-cp310-macosx_12_0_x86_64.whl", hash = "sha256:2d9fd6e38b16c4d286a01e1776fdf6c7a4123d99ae8d6b3f0b4a03a34bf6ce45"},
+ {file = "grpcio-1.50.0-cp310-cp310-manylinux_2_17_aarch64.whl", hash = "sha256:4b123fbb7a777a2fedec684ca0b723d85e1d2379b6032a9a9b7851829ed3ca9a"},
+ {file = "grpcio-1.50.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b2f77a90ba7b85bfb31329f8eab9d9540da2cf8a302128fb1241d7ea239a5469"},
+ {file = "grpcio-1.50.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9eea18a878cffc804506d39c6682d71f6b42ec1c151d21865a95fae743fda500"},
+ {file = "grpcio-1.50.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:2b71916fa8f9eb2abd93151fafe12e18cebb302686b924bd4ec39266211da525"},
+ {file = "grpcio-1.50.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:95ce51f7a09491fb3da8cf3935005bff19983b77c4e9437ef77235d787b06842"},
+ {file = "grpcio-1.50.0-cp310-cp310-win32.whl", hash = "sha256:f7025930039a011ed7d7e7ef95a1cb5f516e23c5a6ecc7947259b67bea8e06ca"},
+ {file = "grpcio-1.50.0-cp310-cp310-win_amd64.whl", hash = "sha256:05f7c248e440f538aaad13eee78ef35f0541e73498dd6f832fe284542ac4b298"},
+ {file = "grpcio-1.50.0-cp311-cp311-linux_armv7l.whl", hash = "sha256:ca8a2254ab88482936ce941485c1c20cdeaef0efa71a61dbad171ab6758ec998"},
+ {file = "grpcio-1.50.0-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:3b611b3de3dfd2c47549ca01abfa9bbb95937eb0ea546ea1d762a335739887be"},
+ {file = "grpcio-1.50.0-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1a4cd8cb09d1bc70b3ea37802be484c5ae5a576108bad14728f2516279165dd7"},
+ {file = "grpcio-1.50.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:156f8009e36780fab48c979c5605eda646065d4695deea4cfcbcfdd06627ddb6"},
+ {file = "grpcio-1.50.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:de411d2b030134b642c092e986d21aefb9d26a28bf5a18c47dd08ded411a3bc5"},
+ {file = "grpcio-1.50.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:d144ad10eeca4c1d1ce930faa105899f86f5d99cecfe0d7224f3c4c76265c15e"},
+ {file = "grpcio-1.50.0-cp311-cp311-win32.whl", hash = "sha256:92d7635d1059d40d2ec29c8bf5ec58900120b3ce5150ef7414119430a4b2dd5c"},
+ {file = "grpcio-1.50.0-cp311-cp311-win_amd64.whl", hash = "sha256:ce8513aee0af9c159319692bfbf488b718d1793d764798c3d5cff827a09e25ef"},
+ {file = "grpcio-1.50.0-cp37-cp37m-linux_armv7l.whl", hash = "sha256:8e8999a097ad89b30d584c034929f7c0be280cd7851ac23e9067111167dcbf55"},
+ {file = "grpcio-1.50.0-cp37-cp37m-macosx_10_10_x86_64.whl", hash = "sha256:a50a1be449b9e238b9bd43d3857d40edf65df9416dea988929891d92a9f8a778"},
+ {file = "grpcio-1.50.0-cp37-cp37m-manylinux_2_17_aarch64.whl", hash = "sha256:cf151f97f5f381163912e8952eb5b3afe89dec9ed723d1561d59cabf1e219a35"},
+ {file = "grpcio-1.50.0-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a23d47f2fc7111869f0ff547f771733661ff2818562b04b9ed674fa208e261f4"},
+ {file = "grpcio-1.50.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d84d04dec64cc4ed726d07c5d17b73c343c8ddcd6b59c7199c801d6bbb9d9ed1"},
+ {file = "grpcio-1.50.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:67dd41a31f6fc5c7db097a5c14a3fa588af54736ffc174af4411d34c4f306f68"},
+ {file = "grpcio-1.50.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:8d4c8e73bf20fb53fe5a7318e768b9734cf122fe671fcce75654b98ba12dfb75"},
+ {file = "grpcio-1.50.0-cp37-cp37m-win32.whl", hash = "sha256:7489dbb901f4fdf7aec8d3753eadd40839c9085967737606d2c35b43074eea24"},
+ {file = "grpcio-1.50.0-cp37-cp37m-win_amd64.whl", hash = "sha256:531f8b46f3d3db91d9ef285191825d108090856b3bc86a75b7c3930f16ce432f"},
+ {file = "grpcio-1.50.0-cp38-cp38-linux_armv7l.whl", hash = "sha256:d534d169673dd5e6e12fb57cc67664c2641361e1a0885545495e65a7b761b0f4"},
+ {file = "grpcio-1.50.0-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:1d8d02dbb616c0a9260ce587eb751c9c7dc689bc39efa6a88cc4fa3e9c138a7b"},
+ {file = "grpcio-1.50.0-cp38-cp38-manylinux_2_17_aarch64.whl", hash = "sha256:baab51dcc4f2aecabf4ed1e2f57bceab240987c8b03533f1cef90890e6502067"},
+ {file = "grpcio-1.50.0-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:40838061e24f960b853d7bce85086c8e1b81c6342b1f4c47ff0edd44bbae2722"},
+ {file = "grpcio-1.50.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:931e746d0f75b2a5cff0a1197d21827a3a2f400c06bace036762110f19d3d507"},
+ {file = "grpcio-1.50.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:15f9e6d7f564e8f0776770e6ef32dac172c6f9960c478616c366862933fa08b4"},
+ {file = "grpcio-1.50.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:a4c23e54f58e016761b576976da6a34d876420b993f45f66a2bfb00363ecc1f9"},
+ {file = "grpcio-1.50.0-cp38-cp38-win32.whl", hash = "sha256:3e4244c09cc1b65c286d709658c061f12c61c814be0b7030a2d9966ff02611e0"},
+ {file = "grpcio-1.50.0-cp38-cp38-win_amd64.whl", hash = "sha256:8e69aa4e9b7f065f01d3fdcecbe0397895a772d99954bb82eefbb1682d274518"},
+ {file = "grpcio-1.50.0-cp39-cp39-linux_armv7l.whl", hash = "sha256:af98d49e56605a2912cf330b4627e5286243242706c3a9fa0bcec6e6f68646fc"},
+ {file = "grpcio-1.50.0-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:080b66253f29e1646ac53ef288c12944b131a2829488ac3bac8f52abb4413c0d"},
+ {file = "grpcio-1.50.0-cp39-cp39-manylinux_2_17_aarch64.whl", hash = "sha256:ab5d0e3590f0a16cb88de4a3fa78d10eb66a84ca80901eb2c17c1d2c308c230f"},
+ {file = "grpcio-1.50.0-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cb11464f480e6103c59d558a3875bd84eed6723f0921290325ebe97262ae1347"},
+ {file = "grpcio-1.50.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e07fe0d7ae395897981d16be61f0db9791f482f03fee7d1851fe20ddb4f69c03"},
+ {file = "grpcio-1.50.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:d75061367a69808ab2e84c960e9dce54749bcc1e44ad3f85deee3a6c75b4ede9"},
+ {file = "grpcio-1.50.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:ae23daa7eda93c1c49a9ecc316e027ceb99adbad750fbd3a56fa9e4a2ffd5ae0"},
+ {file = "grpcio-1.50.0-cp39-cp39-win32.whl", hash = "sha256:177afaa7dba3ab5bfc211a71b90da1b887d441df33732e94e26860b3321434d9"},
+ {file = "grpcio-1.50.0-cp39-cp39-win_amd64.whl", hash = "sha256:ea8ccf95e4c7e20419b7827aa5b6da6f02720270686ac63bd3493a651830235c"},
+ {file = "grpcio-1.50.0.tar.gz", hash = "sha256:12b479839a5e753580b5e6053571de14006157f2ef9b71f38c56dc9b23b95ad6"},
]
hiredis = [
{file = "hiredis-2.0.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:b4c8b0bc5841e578d5fb32a16e0c305359b987b850a06964bd5a62739d688048"},
@@ -1997,8 +2022,8 @@ hyperlink = [
{file = "hyperlink-21.0.0.tar.gz", hash = "sha256:427af957daa58bc909471c6c40f74c5450fa123dd093fc53efd2e91d2705a56b"},
]
idna = [
- {file = "idna-3.3-py3-none-any.whl", hash = "sha256:84d9dd047ffa80596e0f246e2eab0b391788b0503584e8945f2368256d2735ff"},
- {file = "idna-3.3.tar.gz", hash = "sha256:9d643ff0a55b762d5cdb124b8eaa99c66322e2157b69160bc32796e824360e6d"},
+ {file = "idna-3.4-py3-none-any.whl", hash = "sha256:90b77e79eaa3eba6de819a0c442c0b4ceefc341a7a2ab77d7562bf49f425c5c2"},
+ {file = "idna-3.4.tar.gz", hash = "sha256:814f528e8dead7d329833b91c5faa87d60bf71824cd12a7530b5526063d02cb4"},
]
ijson = [
{file = "ijson-3.1.4-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:6c1a777096be5f75ffebb335c6d2ebc0e489b231496b7f2ca903aa061fe7d381"},
@@ -2077,20 +2102,20 @@ incremental = [
{file = "incremental-21.3.0.tar.gz", hash = "sha256:02f5de5aff48f6b9f665d99d48bfc7ec03b6e3943210de7cfc88856d755d6f57"},
]
isort = [
- {file = "isort-5.7.0-py3-none-any.whl", hash = "sha256:fff4f0c04e1825522ce6949973e83110a6e907750cd92d128b0d14aaaadbffdc"},
- {file = "isort-5.7.0.tar.gz", hash = "sha256:c729845434366216d320e936b8ad6f9d681aab72dc7cbc2d51bedc3582f3ad1e"},
+ {file = "isort-5.10.1-py3-none-any.whl", hash = "sha256:6f62d78e2f89b4500b080fe3a81690850cd254227f27f75c3a0c491a1f351ba7"},
+ {file = "isort-5.10.1.tar.gz", hash = "sha256:e8443a5e7a020e9d7f97f1d7d9cd17c88bcb3bc7e218bf9cf5095fe550be2951"},
]
jeepney = [
{file = "jeepney-0.7.1-py3-none-any.whl", hash = "sha256:1b5a0ea5c0e7b166b2f5895b91a08c14de8915afda4407fb5022a195224958ac"},
{file = "jeepney-0.7.1.tar.gz", hash = "sha256:fa9e232dfa0c498bd0b8a3a73b8d8a31978304dcef0515adc859d4e096f96f4f"},
]
jinja2 = [
- {file = "Jinja2-3.0.3-py3-none-any.whl", hash = "sha256:077ce6014f7b40d03b47d1f1ca4b0fc8328a692bd284016f806ed0eaca390ad8"},
- {file = "Jinja2-3.0.3.tar.gz", hash = "sha256:611bb273cd68f3b993fabdc4064fc858c5b47a973cb5aa7999ec1ba405c87cd7"},
+ {file = "Jinja2-3.1.2-py3-none-any.whl", hash = "sha256:6088930bfe239f0e6710546ab9c19c9ef35e29792895fed6e6e31a023a182a61"},
+ {file = "Jinja2-3.1.2.tar.gz", hash = "sha256:31351a702a408a9e7595a8fc6150fc3f43bb6bf7e319770cbc0db9df9437e852"},
]
jsonschema = [
- {file = "jsonschema-4.4.0-py3-none-any.whl", hash = "sha256:77281a1f71684953ee8b3d488371b162419767973789272434bbc3f29d9c8823"},
- {file = "jsonschema-4.4.0.tar.gz", hash = "sha256:636694eb41b3535ed608fe04129f26542b59ed99808b4f688aa32dcf55317a83"},
+ {file = "jsonschema-4.17.0-py3-none-any.whl", hash = "sha256:f660066c3966db7d6daeaea8a75e0b68237a48e51cf49882087757bb59916248"},
+ {file = "jsonschema-4.17.0.tar.gz", hash = "sha256:5bfcf2bca16a087ade17e02b282d34af7ccd749ef76241e7f9bd7c0cb8a9424d"},
]
keyring = [
{file = "keyring-23.5.0-py3-none-any.whl", hash = "sha256:b0d28928ac3ec8e42ef4cc227822647a19f1d544f21f96457965dc01cf555261"},
@@ -2226,105 +2251,124 @@ matrix-synapse-ldap3 = [
{file = "matrix_synapse_ldap3-0.2.2-py3-none-any.whl", hash = "sha256:66ee4c85d7952c6c27fd04c09cdfdf4847b8e8b7d6a7ada6ba1100013bda060f"},
]
mccabe = [
- {file = "mccabe-0.6.1-py2.py3-none-any.whl", hash = "sha256:ab8a6258860da4b6677da4bd2fe5dc2c659cff31b3ee4f7f5d64e79735b80d42"},
- {file = "mccabe-0.6.1.tar.gz", hash = "sha256:dd8d182285a0fe56bace7f45b5e7d1a6ebcbf524e8f3bd87eb0f125271b8831f"},
+ {file = "mccabe-0.7.0-py2.py3-none-any.whl", hash = "sha256:6c2d30ab6be0e4a46919781807b4f0d834ebdd6c6e3dca0bda5a15f863427b6e"},
+ {file = "mccabe-0.7.0.tar.gz", hash = "sha256:348e0240c33b60bbdf4e523192ef919f28cb2c3d7d5c7794f74009290f236325"},
]
msgpack = [
- {file = "msgpack-1.0.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:96acc674bb9c9be63fa8b6dabc3248fdc575c4adc005c440ad02f87ca7edd079"},
- {file = "msgpack-1.0.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:2c3ca57c96c8e69c1a0d2926a6acf2d9a522b41dc4253a8945c4c6cd4981a4e3"},
- {file = "msgpack-1.0.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b0a792c091bac433dfe0a70ac17fc2087d4595ab835b47b89defc8bbabcf5c73"},
- {file = "msgpack-1.0.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1c58cdec1cb5fcea8c2f1771d7b5fec79307d056874f746690bd2bdd609ab147"},
- {file = "msgpack-1.0.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2f97c0f35b3b096a330bb4a1a9247d0bd7e1f3a2eba7ab69795501504b1c2c39"},
- {file = "msgpack-1.0.3-cp310-cp310-win32.whl", hash = "sha256:36a64a10b16c2ab31dcd5f32d9787ed41fe68ab23dd66957ca2826c7f10d0b85"},
- {file = "msgpack-1.0.3-cp310-cp310-win_amd64.whl", hash = "sha256:c1ba333b4024c17c7591f0f372e2daa3c31db495a9b2af3cf664aef3c14354f7"},
- {file = "msgpack-1.0.3-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:c2140cf7a3ec475ef0938edb6eb363fa704159e0bf71dde15d953bacc1cf9d7d"},
- {file = "msgpack-1.0.3-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6f4c22717c74d44bcd7af353024ce71c6b55346dad5e2cc1ddc17ce8c4507c6b"},
- {file = "msgpack-1.0.3-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:47d733a15ade190540c703de209ffbc42a3367600421b62ac0c09fde594da6ec"},
- {file = "msgpack-1.0.3-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c7e03b06f2982aa98d4ddd082a210c3db200471da523f9ac197f2828e80e7770"},
- {file = "msgpack-1.0.3-cp36-cp36m-win32.whl", hash = "sha256:3d875631ecab42f65f9dce6f55ce6d736696ced240f2634633188de2f5f21af9"},
- {file = "msgpack-1.0.3-cp36-cp36m-win_amd64.whl", hash = "sha256:40fb89b4625d12d6027a19f4df18a4de5c64f6f3314325049f219683e07e678a"},
- {file = "msgpack-1.0.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:6eef0cf8db3857b2b556213d97dd82de76e28a6524853a9beb3264983391dc1a"},
- {file = "msgpack-1.0.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0d8c332f53ffff01953ad25131272506500b14750c1d0ce8614b17d098252fbc"},
- {file = "msgpack-1.0.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9c0903bd93cbd34653dd63bbfcb99d7539c372795201f39d16fdfde4418de43a"},
- {file = "msgpack-1.0.3-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bf1e6bfed4860d72106f4e0a1ab519546982b45689937b40257cfd820650b920"},
- {file = "msgpack-1.0.3-cp37-cp37m-win32.whl", hash = "sha256:d02cea2252abc3756b2ac31f781f7a98e89ff9759b2e7450a1c7a0d13302ff50"},
- {file = "msgpack-1.0.3-cp37-cp37m-win_amd64.whl", hash = "sha256:2f30dd0dc4dfe6231ad253b6f9f7128ac3202ae49edd3f10d311adc358772dba"},
- {file = "msgpack-1.0.3-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:f201d34dc89342fabb2a10ed7c9a9aaaed9b7af0f16a5923f1ae562b31258dea"},
- {file = "msgpack-1.0.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:bb87f23ae7d14b7b3c21009c4b1705ec107cb21ee71975992f6aca571fb4a42a"},
- {file = "msgpack-1.0.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8a3a5c4b16e9d0edb823fe54b59b5660cc8d4782d7bf2c214cb4b91a1940a8ef"},
- {file = "msgpack-1.0.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f74da1e5fcf20ade12c6bf1baa17a2dc3604958922de8dc83cbe3eff22e8b611"},
- {file = "msgpack-1.0.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:73a80bd6eb6bcb338c1ec0da273f87420829c266379c8c82fa14c23fb586cfa1"},
- {file = "msgpack-1.0.3-cp38-cp38-win32.whl", hash = "sha256:9fce00156e79af37bb6db4e7587b30d11e7ac6a02cb5bac387f023808cd7d7f4"},
- {file = "msgpack-1.0.3-cp38-cp38-win_amd64.whl", hash = "sha256:9b6f2d714c506e79cbead331de9aae6837c8dd36190d02da74cb409b36162e8a"},
- {file = "msgpack-1.0.3-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:89908aea5f46ee1474cc37fbc146677f8529ac99201bc2faf4ef8edc023c2bf3"},
- {file = "msgpack-1.0.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:973ad69fd7e31159eae8f580f3f707b718b61141838321c6fa4d891c4a2cca52"},
- {file = "msgpack-1.0.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:da24375ab4c50e5b7486c115a3198d207954fe10aaa5708f7b65105df09109b2"},
- {file = "msgpack-1.0.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a598d0685e4ae07a0672b59792d2cc767d09d7a7f39fd9bd37ff84e060b1a996"},
- {file = "msgpack-1.0.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e4c309a68cb5d6bbd0c50d5c71a25ae81f268c2dc675c6f4ea8ab2feec2ac4e2"},
- {file = "msgpack-1.0.3-cp39-cp39-win32.whl", hash = "sha256:494471d65b25a8751d19c83f1a482fd411d7ca7a3b9e17d25980a74075ba0e88"},
- {file = "msgpack-1.0.3-cp39-cp39-win_amd64.whl", hash = "sha256:f01b26c2290cbd74316990ba84a14ac3d599af9cebefc543d241a66e785cf17d"},
- {file = "msgpack-1.0.3.tar.gz", hash = "sha256:51fdc7fb93615286428ee7758cecc2f374d5ff363bdd884c7ea622a7a327a81e"},
+ {file = "msgpack-1.0.4-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:4ab251d229d10498e9a2f3b1e68ef64cb393394ec477e3370c457f9430ce9250"},
+ {file = "msgpack-1.0.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:112b0f93202d7c0fef0b7810d465fde23c746a2d482e1e2de2aafd2ce1492c88"},
+ {file = "msgpack-1.0.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:002b5c72b6cd9b4bafd790f364b8480e859b4712e91f43014fe01e4f957b8467"},
+ {file = "msgpack-1.0.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:35bc0faa494b0f1d851fd29129b2575b2e26d41d177caacd4206d81502d4c6a6"},
+ {file = "msgpack-1.0.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4733359808c56d5d7756628736061c432ded018e7a1dff2d35a02439043321aa"},
+ {file = "msgpack-1.0.4-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:eb514ad14edf07a1dbe63761fd30f89ae79b42625731e1ccf5e1f1092950eaa6"},
+ {file = "msgpack-1.0.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:c23080fdeec4716aede32b4e0ef7e213c7b1093eede9ee010949f2a418ced6ba"},
+ {file = "msgpack-1.0.4-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:49565b0e3d7896d9ea71d9095df15b7f75a035c49be733051c34762ca95bbf7e"},
+ {file = "msgpack-1.0.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:aca0f1644d6b5a73eb3e74d4d64d5d8c6c3d577e753a04c9e9c87d07692c58db"},
+ {file = "msgpack-1.0.4-cp310-cp310-win32.whl", hash = "sha256:0dfe3947db5fb9ce52aaea6ca28112a170db9eae75adf9339a1aec434dc954ef"},
+ {file = "msgpack-1.0.4-cp310-cp310-win_amd64.whl", hash = "sha256:4dea20515f660aa6b7e964433b1808d098dcfcabbebeaaad240d11f909298075"},
+ {file = "msgpack-1.0.4-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:e83f80a7fec1a62cf4e6c9a660e39c7f878f603737a0cdac8c13131d11d97f52"},
+ {file = "msgpack-1.0.4-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3c11a48cf5e59026ad7cb0dc29e29a01b5a66a3e333dc11c04f7e991fc5510a9"},
+ {file = "msgpack-1.0.4-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1276e8f34e139aeff1c77a3cefb295598b504ac5314d32c8c3d54d24fadb94c9"},
+ {file = "msgpack-1.0.4-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6c9566f2c39ccced0a38d37c26cc3570983b97833c365a6044edef3574a00c08"},
+ {file = "msgpack-1.0.4-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:fcb8a47f43acc113e24e910399376f7277cf8508b27e5b88499f053de6b115a8"},
+ {file = "msgpack-1.0.4-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:76ee788122de3a68a02ed6f3a16bbcd97bc7c2e39bd4d94be2f1821e7c4a64e6"},
+ {file = "msgpack-1.0.4-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:0a68d3ac0104e2d3510de90a1091720157c319ceeb90d74f7b5295a6bee51bae"},
+ {file = "msgpack-1.0.4-cp36-cp36m-win32.whl", hash = "sha256:85f279d88d8e833ec015650fd15ae5eddce0791e1e8a59165318f371158efec6"},
+ {file = "msgpack-1.0.4-cp36-cp36m-win_amd64.whl", hash = "sha256:c1683841cd4fa45ac427c18854c3ec3cd9b681694caf5bff04edb9387602d661"},
+ {file = "msgpack-1.0.4-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:a75dfb03f8b06f4ab093dafe3ddcc2d633259e6c3f74bb1b01996f5d8aa5868c"},
+ {file = "msgpack-1.0.4-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9667bdfdf523c40d2511f0e98a6c9d3603be6b371ae9a238b7ef2dc4e7a427b0"},
+ {file = "msgpack-1.0.4-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:11184bc7e56fd74c00ead4f9cc9a3091d62ecb96e97653add7a879a14b003227"},
+ {file = "msgpack-1.0.4-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ac5bd7901487c4a1dd51a8c58f2632b15d838d07ceedaa5e4c080f7190925bff"},
+ {file = "msgpack-1.0.4-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:1e91d641d2bfe91ba4c52039adc5bccf27c335356055825c7f88742c8bb900dd"},
+ {file = "msgpack-1.0.4-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:2a2df1b55a78eb5f5b7d2a4bb221cd8363913830145fad05374a80bf0877cb1e"},
+ {file = "msgpack-1.0.4-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:545e3cf0cf74f3e48b470f68ed19551ae6f9722814ea969305794645da091236"},
+ {file = "msgpack-1.0.4-cp37-cp37m-win32.whl", hash = "sha256:2cc5ca2712ac0003bcb625c96368fd08a0f86bbc1a5578802512d87bc592fe44"},
+ {file = "msgpack-1.0.4-cp37-cp37m-win_amd64.whl", hash = "sha256:eba96145051ccec0ec86611fe9cf693ce55f2a3ce89c06ed307de0e085730ec1"},
+ {file = "msgpack-1.0.4-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:7760f85956c415578c17edb39eed99f9181a48375b0d4a94076d84148cf67b2d"},
+ {file = "msgpack-1.0.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:449e57cc1ff18d3b444eb554e44613cffcccb32805d16726a5494038c3b93dab"},
+ {file = "msgpack-1.0.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:d603de2b8d2ea3f3bcb2efe286849aa7a81531abc52d8454da12f46235092bcb"},
+ {file = "msgpack-1.0.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:48f5d88c99f64c456413d74a975bd605a9b0526293218a3b77220a2c15458ba9"},
+ {file = "msgpack-1.0.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6916c78f33602ecf0509cc40379271ba0f9ab572b066bd4bdafd7434dee4bc6e"},
+ {file = "msgpack-1.0.4-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:81fc7ba725464651190b196f3cd848e8553d4d510114a954681fd0b9c479d7e1"},
+ {file = "msgpack-1.0.4-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:d5b5b962221fa2c5d3a7f8133f9abffc114fe218eb4365e40f17732ade576c8e"},
+ {file = "msgpack-1.0.4-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:77ccd2af37f3db0ea59fb280fa2165bf1b096510ba9fe0cc2bf8fa92a22fdb43"},
+ {file = "msgpack-1.0.4-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:b17be2478b622939e39b816e0aa8242611cc8d3583d1cd8ec31b249f04623243"},
+ {file = "msgpack-1.0.4-cp38-cp38-win32.whl", hash = "sha256:2bb8cdf50dd623392fa75525cce44a65a12a00c98e1e37bf0fb08ddce2ff60d2"},
+ {file = "msgpack-1.0.4-cp38-cp38-win_amd64.whl", hash = "sha256:26b8feaca40a90cbe031b03d82b2898bf560027160d3eae1423f4a67654ec5d6"},
+ {file = "msgpack-1.0.4-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:462497af5fd4e0edbb1559c352ad84f6c577ffbbb708566a0abaaa84acd9f3ae"},
+ {file = "msgpack-1.0.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:2999623886c5c02deefe156e8f869c3b0aaeba14bfc50aa2486a0415178fce55"},
+ {file = "msgpack-1.0.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:f0029245c51fd9473dc1aede1160b0a29f4a912e6b1dd353fa6d317085b219da"},
+ {file = "msgpack-1.0.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ed6f7b854a823ea44cf94919ba3f727e230da29feb4a99711433f25800cf747f"},
+ {file = "msgpack-1.0.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0df96d6eaf45ceca04b3f3b4b111b86b33785683d682c655063ef8057d61fd92"},
+ {file = "msgpack-1.0.4-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6a4192b1ab40f8dca3f2877b70e63799d95c62c068c84dc028b40a6cb03ccd0f"},
+ {file = "msgpack-1.0.4-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:0e3590f9fb9f7fbc36df366267870e77269c03172d086fa76bb4eba8b2b46624"},
+ {file = "msgpack-1.0.4-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:1576bd97527a93c44fa856770197dec00d223b0b9f36ef03f65bac60197cedf8"},
+ {file = "msgpack-1.0.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:63e29d6e8c9ca22b21846234913c3466b7e4ee6e422f205a2988083de3b08cae"},
+ {file = "msgpack-1.0.4-cp39-cp39-win32.whl", hash = "sha256:fb62ea4b62bfcb0b380d5680f9a4b3f9a2d166d9394e9bbd9666c0ee09a3645c"},
+ {file = "msgpack-1.0.4-cp39-cp39-win_amd64.whl", hash = "sha256:4d5834a2a48965a349da1c5a79760d94a1a0172fbb5ab6b5b33cbf8447e109ce"},
+ {file = "msgpack-1.0.4.tar.gz", hash = "sha256:f5d869c18f030202eb412f08b28d2afeea553d6613aee89e200d7aca7ef01f5f"},
]
mypy = [
- {file = "mypy-0.950-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:cf9c261958a769a3bd38c3e133801ebcd284ffb734ea12d01457cb09eacf7d7b"},
- {file = "mypy-0.950-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:b5b5bd0ffb11b4aba2bb6d31b8643902c48f990cc92fda4e21afac658044f0c0"},
- {file = "mypy-0.950-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:5e7647df0f8fc947388e6251d728189cfadb3b1e558407f93254e35abc026e22"},
- {file = "mypy-0.950-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:eaff8156016487c1af5ffa5304c3e3fd183edcb412f3e9c72db349faf3f6e0eb"},
- {file = "mypy-0.950-cp310-cp310-win_amd64.whl", hash = "sha256:563514c7dc504698fb66bb1cf897657a173a496406f1866afae73ab5b3cdb334"},
- {file = "mypy-0.950-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:dd4d670eee9610bf61c25c940e9ade2d0ed05eb44227275cce88701fee014b1f"},
- {file = "mypy-0.950-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:ca75ecf2783395ca3016a5e455cb322ba26b6d33b4b413fcdedfc632e67941dc"},
- {file = "mypy-0.950-cp36-cp36m-win_amd64.whl", hash = "sha256:6003de687c13196e8a1243a5e4bcce617d79b88f83ee6625437e335d89dfebe2"},
- {file = "mypy-0.950-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:4c653e4846f287051599ed8f4b3c044b80e540e88feec76b11044ddc5612ffed"},
- {file = "mypy-0.950-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:e19736af56947addedce4674c0971e5dceef1b5ec7d667fe86bcd2b07f8f9075"},
- {file = "mypy-0.950-cp37-cp37m-win_amd64.whl", hash = "sha256:ef7beb2a3582eb7a9f37beaf38a28acfd801988cde688760aea9e6cc4832b10b"},
- {file = "mypy-0.950-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:0112752a6ff07230f9ec2f71b0d3d4e088a910fdce454fdb6553e83ed0eced7d"},
- {file = "mypy-0.950-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:ee0a36edd332ed2c5208565ae6e3a7afc0eabb53f5327e281f2ef03a6bc7687a"},
- {file = "mypy-0.950-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:77423570c04aca807508a492037abbd72b12a1fb25a385847d191cd50b2c9605"},
- {file = "mypy-0.950-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:5ce6a09042b6da16d773d2110e44f169683d8cc8687e79ec6d1181a72cb028d2"},
- {file = "mypy-0.950-cp38-cp38-win_amd64.whl", hash = "sha256:5b231afd6a6e951381b9ef09a1223b1feabe13625388db48a8690f8daa9b71ff"},
- {file = "mypy-0.950-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:0384d9f3af49837baa92f559d3fa673e6d2652a16550a9ee07fc08c736f5e6f8"},
- {file = "mypy-0.950-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:1fdeb0a0f64f2a874a4c1f5271f06e40e1e9779bf55f9567f149466fc7a55038"},
- {file = "mypy-0.950-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:61504b9a5ae166ba5ecfed9e93357fd51aa693d3d434b582a925338a2ff57fd2"},
- {file = "mypy-0.950-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:a952b8bc0ae278fc6316e6384f67bb9a396eb30aced6ad034d3a76120ebcc519"},
- {file = "mypy-0.950-cp39-cp39-win_amd64.whl", hash = "sha256:eaea21d150fb26d7b4856766e7addcf929119dd19fc832b22e71d942835201ef"},
- {file = "mypy-0.950-py3-none-any.whl", hash = "sha256:a4d9898f46446bfb6405383b57b96737dcfd0a7f25b748e78ef3e8c576bba3cb"},
- {file = "mypy-0.950.tar.gz", hash = "sha256:1b333cfbca1762ff15808a0ef4f71b5d3eed8528b23ea1c3fb50543c867d68de"},
+ {file = "mypy-0.981-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:4bc460e43b7785f78862dab78674e62ec3cd523485baecfdf81a555ed29ecfa0"},
+ {file = "mypy-0.981-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:756fad8b263b3ba39e4e204ee53042671b660c36c9017412b43af210ddee7b08"},
+ {file = "mypy-0.981-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a16a0145d6d7d00fbede2da3a3096dcc9ecea091adfa8da48fa6a7b75d35562d"},
+ {file = "mypy-0.981-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ce65f70b14a21fdac84c294cde75e6dbdabbcff22975335e20827b3b94bdbf49"},
+ {file = "mypy-0.981-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:6e35d764784b42c3e256848fb8ed1d4292c9fc0098413adb28d84974c095b279"},
+ {file = "mypy-0.981-cp310-cp310-win_amd64.whl", hash = "sha256:e53773073c864d5f5cec7f3fc72fbbcef65410cde8cc18d4f7242dea60dac52e"},
+ {file = "mypy-0.981-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:6ee196b1d10b8b215e835f438e06965d7a480f6fe016eddbc285f13955cca659"},
+ {file = "mypy-0.981-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8ad21d4c9d3673726cf986ea1d0c9fb66905258709550ddf7944c8f885f208be"},
+ {file = "mypy-0.981-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:d1debb09043e1f5ee845fa1e96d180e89115b30e47c5d3ce53bc967bab53f62d"},
+ {file = "mypy-0.981-cp37-cp37m-win_amd64.whl", hash = "sha256:9f362470a3480165c4c6151786b5379351b790d56952005be18bdbdd4c7ce0ae"},
+ {file = "mypy-0.981-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:c9e0efb95ed6ca1654951bd5ec2f3fa91b295d78bf6527e026529d4aaa1e0c30"},
+ {file = "mypy-0.981-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:e178eaffc3c5cd211a87965c8c0df6da91ed7d258b5fc72b8e047c3771317ddb"},
+ {file = "mypy-0.981-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:06e1eac8d99bd404ed8dd34ca29673c4346e76dd8e612ea507763dccd7e13c7a"},
+ {file = "mypy-0.981-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fa38f82f53e1e7beb45557ff167c177802ba7b387ad017eab1663d567017c8ee"},
+ {file = "mypy-0.981-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:64e1f6af81c003f85f0dfed52db632817dabb51b65c0318ffbf5ff51995bbb08"},
+ {file = "mypy-0.981-cp38-cp38-win_amd64.whl", hash = "sha256:e1acf62a8c4f7c092462c738aa2c2489e275ed386320c10b2e9bff31f6f7e8d6"},
+ {file = "mypy-0.981-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:b6ede64e52257931315826fdbfc6ea878d89a965580d1a65638ef77cb551f56d"},
+ {file = "mypy-0.981-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:eb3978b191b9fa0488524bb4ffedf2c573340e8c2b4206fc191d44c7093abfb7"},
+ {file = "mypy-0.981-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:77f8fcf7b4b3cc0c74fb33ae54a4cd00bb854d65645c48beccf65fa10b17882c"},
+ {file = "mypy-0.981-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f64d2ce043a209a297df322eb4054dfbaa9de9e8738291706eaafda81ab2b362"},
+ {file = "mypy-0.981-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:2ee3dbc53d4df7e6e3b1c68ac6a971d3a4fb2852bf10a05fda228721dd44fae1"},
+ {file = "mypy-0.981-cp39-cp39-win_amd64.whl", hash = "sha256:8e8e49aa9cc23aa4c926dc200ce32959d3501c4905147a66ce032f05cb5ecb92"},
+ {file = "mypy-0.981-py3-none-any.whl", hash = "sha256:794f385653e2b749387a42afb1e14c2135e18daeb027e0d97162e4b7031210f8"},
+ {file = "mypy-0.981.tar.gz", hash = "sha256:ad77c13037d3402fbeffda07d51e3f228ba078d1c7096a73759c9419ea031bf4"},
]
mypy-extensions = [
{file = "mypy_extensions-0.4.3-py2.py3-none-any.whl", hash = "sha256:090fedd75945a69ae91ce1303b5824f428daf5a028d2f6ab8a299250a846f15d"},
{file = "mypy_extensions-0.4.3.tar.gz", hash = "sha256:2d82818f5bb3e369420cb3c4060a7970edba416647068eb4c5343488a6c604a8"},
]
mypy-zope = [
- {file = "mypy-zope-0.3.7.tar.gz", hash = "sha256:9da171e78e8ef7ac8922c86af1a62f1b7f3244f121020bd94a2246bc3f33c605"},
- {file = "mypy_zope-0.3.7-py3-none-any.whl", hash = "sha256:9c7637d066e4d1bafa0651abc091c752009769098043b236446e6725be2bc9c2"},
+ {file = "mypy-zope-0.3.11.tar.gz", hash = "sha256:d4255f9f04d48c79083bbd4e2fea06513a6ac7b8de06f8c4ce563fd85142ca05"},
+ {file = "mypy_zope-0.3.11-py3-none-any.whl", hash = "sha256:ec080a6508d1f7805c8d2054f9fdd13c849742ce96803519e1fdfa3d3cab7140"},
]
netaddr = [
{file = "netaddr-0.8.0-py2.py3-none-any.whl", hash = "sha256:9666d0232c32d2656e5e5f8d735f58fd6c7457ce52fc21c98d45f2af78f990ac"},
{file = "netaddr-0.8.0.tar.gz", hash = "sha256:d6cc57c7a07b1d9d2e917aa8b36ae8ce61c35ba3fcd1b83ca31c5a0ee2b5a243"},
]
opentelemetry-api = [
- {file = "opentelemetry-api-1.12.0.tar.gz", hash = "sha256:740c2cf9aa75e76c208b3ee04b3b3b3721f58bbac8e97019174f07ec12cde7af"},
- {file = "opentelemetry_api-1.12.0-py3-none-any.whl", hash = "sha256:2e1cef8ce175be6464f240422babfe1dfb581daec96f0daad5d0d0e951b38f7b"},
+ {file = "opentelemetry_api-1.13.0-py3-none-any.whl", hash = "sha256:2db1e8713f48a119bae457cd22304a7919d5e57190a380485c442c4f731a46dd"},
+ {file = "opentelemetry_api-1.13.0.tar.gz", hash = "sha256:e683e869471b99e77238c8739d6ee2f368803329f3b808dfa86a02d0b519c682"},
]
opentelemetry-exporter-jaeger = [
- {file = "opentelemetry-exporter-jaeger-1.12.0.tar.gz", hash = "sha256:e0e346466f2fee7d26e62e6fbad47bf5effb27ab17106e15a427cabfaa7e5879"},
- {file = "opentelemetry_exporter_jaeger-1.12.0-py3-none-any.whl", hash = "sha256:e42f5d2d2d5fb4e6a8f14424bb8ee515cba2da38f97ecd4498cba4da315d09dc"},
+ {file = "opentelemetry_exporter_jaeger-1.13.0-py3-none-any.whl", hash = "sha256:ee421cea47a6de3cafb2d5180181aede45fac2b316a31d4e51ac7c24362c7222"},
+ {file = "opentelemetry_exporter_jaeger-1.13.0.tar.gz", hash = "sha256:6c80a6eb66697f9a4cbaddfc7194759b0daf3b3d8708a1e6741e8cfa728ced2b"},
]
opentelemetry-exporter-jaeger-proto-grpc = [
- {file = "opentelemetry-exporter-jaeger-proto-grpc-1.12.0.tar.gz", hash = "sha256:dba063bbfc7c9927d05dd1806e0534c3d34a6aaac7548d5c1f66c210019f79a5"},
- {file = "opentelemetry_exporter_jaeger_proto_grpc-1.12.0-py3-none-any.whl", hash = "sha256:f029e5cddb42745e6fe73412b46151e066a30e4689ccad837913c683402d3aad"},
+ {file = "opentelemetry_exporter_jaeger_proto_grpc-1.13.0-py3-none-any.whl", hash = "sha256:a3c4738424b91b3430f9e35b1ea08c8907964a00025a53bc52199b151393ee54"},
+ {file = "opentelemetry_exporter_jaeger_proto_grpc-1.13.0.tar.gz", hash = "sha256:d3bd172971fb16d2ae7337e645a3ffa5c1a8b3b476fa8609a03f26c772796403"},
]
opentelemetry-exporter-jaeger-thrift = [
- {file = "opentelemetry-exporter-jaeger-thrift-1.12.0.tar.gz", hash = "sha256:8a87b0e63c62dee13ef9fa9a28ad1ca612e06f29e3fa9266f40d2f4969be3af3"},
- {file = "opentelemetry_exporter_jaeger_thrift-1.12.0-py3-none-any.whl", hash = "sha256:c60cac61637fef57bda4917432493c80f4168654067be24e2a3eb9065d76963e"},
+ {file = "opentelemetry_exporter_jaeger_thrift-1.13.0-py3-none-any.whl", hash = "sha256:795f396eaea008359ff195153745947816b3a278b5b8958eb36fe40dc455d920"},
+ {file = "opentelemetry_exporter_jaeger_thrift-1.13.0.tar.gz", hash = "sha256:f10865c5efcdf1b53906604c56797f92261f8825fd8a9ddc913167ff053e99cb"},
]
opentelemetry-sdk = [
- {file = "opentelemetry-sdk-1.12.0.tar.gz", hash = "sha256:bf37830ca4f93d0910cf109749237c5cb4465e31a54dfad8400011e9822a2a14"},
- {file = "opentelemetry_sdk-1.12.0-py3-none-any.whl", hash = "sha256:d13be09765441c0513a3de01b7a2f56a7da36d902f60bff7c97f338903a57c34"},
+ {file = "opentelemetry_sdk-1.13.0-py3-none-any.whl", hash = "sha256:c7b88e06ebedd22c226b374c207792d30b3f34074a6b8ad8c6dad04a8d16326b"},
+ {file = "opentelemetry_sdk-1.13.0.tar.gz", hash = "sha256:0eddcacd5a484fe2918116b9a4e31867e3d10322ff8392b1c7b0dae1ac724d48"},
]
opentelemetry-semantic-conventions = [
- {file = "opentelemetry-semantic-conventions-0.33b0.tar.gz", hash = "sha256:67d62461c87b683b958428ced79162ec4d567dabf30b050f270bbd01eff89ced"},
- {file = "opentelemetry_semantic_conventions-0.33b0-py3-none-any.whl", hash = "sha256:56b67b3f8f49413cbfbbeb32e9cf7b4c7dfb27a83064d959733766376ba11bc7"},
+ {file = "opentelemetry_semantic_conventions-0.34b0-py3-none-any.whl", hash = "sha256:b236bd027d2d470c5f7f7a466676182c7e02f486db8296caca25fae0649c3fa3"},
+ {file = "opentelemetry_semantic_conventions-0.34b0.tar.gz", hash = "sha256:0c88a5d1f45b820272e0c421fd52ff2188b74582b1bab7ba0f57891dc2f31edf"},
]
packaging = [
{file = "packaging-21.3-py3-none-any.whl", hash = "sha256:ef103e05f519cdc783ae24ea4e2e0f508a9c99b2d4969652eed6a2e1ea5bd522"},
@@ -2339,96 +2383,126 @@ pathspec = [
{file = "pathspec-0.9.0.tar.gz", hash = "sha256:e564499435a2673d586f6b2130bb5b95f04a3ba06f81b8f895b651a3c76aabb1"},
]
phonenumbers = [
- {file = "phonenumbers-8.12.44-py2.py3-none-any.whl", hash = "sha256:cc1299cf37b309ecab6214297663ab86cb3d64ae37fd5b88e904fe7983a874a6"},
- {file = "phonenumbers-8.12.44.tar.gz", hash = "sha256:26cfd0257d1704fe2f88caff2caabb70d16a877b1e65b6aae51f9fbbe10aa8ce"},
+ {file = "phonenumbers-8.12.56-py2.py3-none-any.whl", hash = "sha256:80a7422cf0999a6f9b7a2e6cfbdbbfcc56ab5b75414dc3b805bbec91276b64a3"},
+ {file = "phonenumbers-8.12.56.tar.gz", hash = "sha256:82a4f226c930d02dcdf6d4b29e4cfd8678991fe65c2efd5fdd143557186f0868"},
]
pillow = [
- {file = "Pillow-9.0.1-1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a5d24e1d674dd9d72c66ad3ea9131322819ff86250b30dc5821cbafcfa0b96b4"},
- {file = "Pillow-9.0.1-1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:2632d0f846b7c7600edf53c48f8f9f1e13e62f66a6dbc15191029d950bfed976"},
- {file = "Pillow-9.0.1-1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:b9618823bd237c0d2575283f2939655f54d51b4527ec3972907a927acbcc5bfc"},
- {file = "Pillow-9.0.1-cp310-cp310-macosx_10_10_universal2.whl", hash = "sha256:9bfdb82cdfeccec50aad441afc332faf8606dfa5e8efd18a6692b5d6e79f00fd"},
- {file = "Pillow-9.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:5100b45a4638e3c00e4d2320d3193bdabb2d75e79793af7c3eb139e4f569f16f"},
- {file = "Pillow-9.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:528a2a692c65dd5cafc130de286030af251d2ee0483a5bf50c9348aefe834e8a"},
- {file = "Pillow-9.0.1-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0f29d831e2151e0b7b39981756d201f7108d3d215896212ffe2e992d06bfe049"},
- {file = "Pillow-9.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:855c583f268edde09474b081e3ddcd5cf3b20c12f26e0d434e1386cc5d318e7a"},
- {file = "Pillow-9.0.1-cp310-cp310-win32.whl", hash = "sha256:d9d7942b624b04b895cb95af03a23407f17646815495ce4547f0e60e0b06f58e"},
- {file = "Pillow-9.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:81c4b81611e3a3cb30e59b0cf05b888c675f97e3adb2c8672c3154047980726b"},
- {file = "Pillow-9.0.1-cp37-cp37m-macosx_10_10_x86_64.whl", hash = "sha256:413ce0bbf9fc6278b2d63309dfeefe452835e1c78398efb431bab0672fe9274e"},
- {file = "Pillow-9.0.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:80fe64a6deb6fcfdf7b8386f2cf216d329be6f2781f7d90304351811fb591360"},
- {file = "Pillow-9.0.1-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cef9c85ccbe9bee00909758936ea841ef12035296c748aaceee535969e27d31b"},
- {file = "Pillow-9.0.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1d19397351f73a88904ad1aee421e800fe4bbcd1aeee6435fb62d0a05ccd1030"},
- {file = "Pillow-9.0.1-cp37-cp37m-win32.whl", hash = "sha256:d21237d0cd37acded35154e29aec853e945950321dd2ffd1a7d86fe686814669"},
- {file = "Pillow-9.0.1-cp37-cp37m-win_amd64.whl", hash = "sha256:ede5af4a2702444a832a800b8eb7f0a7a1c0eed55b644642e049c98d589e5092"},
- {file = "Pillow-9.0.1-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:b5b3f092fe345c03bca1e0b687dfbb39364b21ebb8ba90e3fa707374b7915204"},
- {file = "Pillow-9.0.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:335ace1a22325395c4ea88e00ba3dc89ca029bd66bd5a3c382d53e44f0ccd77e"},
- {file = "Pillow-9.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:db6d9fac65bd08cea7f3540b899977c6dee9edad959fa4eaf305940d9cbd861c"},
- {file = "Pillow-9.0.1-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f154d173286a5d1863637a7dcd8c3437bb557520b01bddb0be0258dcb72696b5"},
- {file = "Pillow-9.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:14d4b1341ac07ae07eb2cc682f459bec932a380c3b122f5540432d8977e64eae"},
- {file = "Pillow-9.0.1-cp38-cp38-win32.whl", hash = "sha256:effb7749713d5317478bb3acb3f81d9d7c7f86726d41c1facca068a04cf5bb4c"},
- {file = "Pillow-9.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:7f7609a718b177bf171ac93cea9fd2ddc0e03e84d8fa4e887bdfc39671d46b00"},
- {file = "Pillow-9.0.1-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:80ca33961ced9c63358056bd08403ff866512038883e74f3a4bf88ad3eb66838"},
- {file = "Pillow-9.0.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:1c3c33ac69cf059bbb9d1a71eeaba76781b450bc307e2291f8a4764d779a6b28"},
- {file = "Pillow-9.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:12875d118f21cf35604176872447cdb57b07126750a33748bac15e77f90f1f9c"},
- {file = "Pillow-9.0.1-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:514ceac913076feefbeaf89771fd6febde78b0c4c1b23aaeab082c41c694e81b"},
- {file = "Pillow-9.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d3c5c79ab7dfce6d88f1ba639b77e77a17ea33a01b07b99840d6ed08031cb2a7"},
- {file = "Pillow-9.0.1-cp39-cp39-win32.whl", hash = "sha256:718856856ba31f14f13ba885ff13874be7fefc53984d2832458f12c38205f7f7"},
- {file = "Pillow-9.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:f25ed6e28ddf50de7e7ea99d7a976d6a9c415f03adcaac9c41ff6ff41b6d86ac"},
- {file = "Pillow-9.0.1-pp37-pypy37_pp73-macosx_10_10_x86_64.whl", hash = "sha256:011233e0c42a4a7836498e98c1acf5e744c96a67dd5032a6f666cc1fb97eab97"},
- {file = "Pillow-9.0.1-pp37-pypy37_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:253e8a302a96df6927310a9d44e6103055e8fb96a6822f8b7f514bb7ef77de56"},
- {file = "Pillow-9.0.1-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6295f6763749b89c994fcb6d8a7f7ce03c3992e695f89f00b741b4580b199b7e"},
- {file = "Pillow-9.0.1-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:a9f44cd7e162ac6191491d7249cceb02b8116b0f7e847ee33f739d7cb1ea1f70"},
- {file = "Pillow-9.0.1.tar.gz", hash = "sha256:6c8bc8238a7dfdaf7a75f5ec5a663f4173f8c367e5a39f87e720495e1eed75fa"},
+ {file = "Pillow-9.3.0-1-cp37-cp37m-win32.whl", hash = "sha256:e6ea6b856a74d560d9326c0f5895ef8050126acfdc7ca08ad703eb0081e82b74"},
+ {file = "Pillow-9.3.0-1-cp37-cp37m-win_amd64.whl", hash = "sha256:32a44128c4bdca7f31de5be641187367fe2a450ad83b833ef78910397db491aa"},
+ {file = "Pillow-9.3.0-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:0b7257127d646ff8676ec8a15520013a698d1fdc48bc2a79ba4e53df792526f2"},
+ {file = "Pillow-9.3.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:b90f7616ea170e92820775ed47e136208e04c967271c9ef615b6fbd08d9af0e3"},
+ {file = "Pillow-9.3.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:68943d632f1f9e3dce98908e873b3a090f6cba1cbb1b892a9e8d97c938871fbe"},
+ {file = "Pillow-9.3.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:be55f8457cd1eac957af0c3f5ece7bc3f033f89b114ef30f710882717670b2a8"},
+ {file = "Pillow-9.3.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5d77adcd56a42d00cc1be30843d3426aa4e660cab4a61021dc84467123f7a00c"},
+ {file = "Pillow-9.3.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:829f97c8e258593b9daa80638aee3789b7df9da5cf1336035016d76f03b8860c"},
+ {file = "Pillow-9.3.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:801ec82e4188e935c7f5e22e006d01611d6b41661bba9fe45b60e7ac1a8f84de"},
+ {file = "Pillow-9.3.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:871b72c3643e516db4ecf20efe735deb27fe30ca17800e661d769faab45a18d7"},
+ {file = "Pillow-9.3.0-cp310-cp310-win32.whl", hash = "sha256:655a83b0058ba47c7c52e4e2df5ecf484c1b0b0349805896dd350cbc416bdd91"},
+ {file = "Pillow-9.3.0-cp310-cp310-win_amd64.whl", hash = "sha256:9f47eabcd2ded7698106b05c2c338672d16a6f2a485e74481f524e2a23c2794b"},
+ {file = "Pillow-9.3.0-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:57751894f6618fd4308ed8e0c36c333e2f5469744c34729a27532b3db106ee20"},
+ {file = "Pillow-9.3.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:7db8b751ad307d7cf238f02101e8e36a128a6cb199326e867d1398067381bff4"},
+ {file = "Pillow-9.3.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3033fbe1feb1b59394615a1cafaee85e49d01b51d54de0cbf6aa8e64182518a1"},
+ {file = "Pillow-9.3.0-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:22b012ea2d065fd163ca096f4e37e47cd8b59cf4b0fd47bfca6abb93df70b34c"},
+ {file = "Pillow-9.3.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b9a65733d103311331875c1dca05cb4606997fd33d6acfed695b1232ba1df193"},
+ {file = "Pillow-9.3.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:502526a2cbfa431d9fc2a079bdd9061a2397b842bb6bc4239bb176da00993812"},
+ {file = "Pillow-9.3.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:90fb88843d3902fe7c9586d439d1e8c05258f41da473952aa8b328d8b907498c"},
+ {file = "Pillow-9.3.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:89dca0ce00a2b49024df6325925555d406b14aa3efc2f752dbb5940c52c56b11"},
+ {file = "Pillow-9.3.0-cp311-cp311-win32.whl", hash = "sha256:3168434d303babf495d4ba58fc22d6604f6e2afb97adc6a423e917dab828939c"},
+ {file = "Pillow-9.3.0-cp311-cp311-win_amd64.whl", hash = "sha256:18498994b29e1cf86d505edcb7edbe814d133d2232d256db8c7a8ceb34d18cef"},
+ {file = "Pillow-9.3.0-cp37-cp37m-macosx_10_10_x86_64.whl", hash = "sha256:772a91fc0e03eaf922c63badeca75e91baa80fe2f5f87bdaed4280662aad25c9"},
+ {file = "Pillow-9.3.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:afa4107d1b306cdf8953edde0534562607fe8811b6c4d9a486298ad31de733b2"},
+ {file = "Pillow-9.3.0-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b4012d06c846dc2b80651b120e2cdd787b013deb39c09f407727ba90015c684f"},
+ {file = "Pillow-9.3.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:77ec3e7be99629898c9a6d24a09de089fa5356ee408cdffffe62d67bb75fdd72"},
+ {file = "Pillow-9.3.0-cp37-cp37m-manylinux_2_28_aarch64.whl", hash = "sha256:6c738585d7a9961d8c2821a1eb3dcb978d14e238be3d70f0a706f7fa9316946b"},
+ {file = "Pillow-9.3.0-cp37-cp37m-manylinux_2_28_x86_64.whl", hash = "sha256:828989c45c245518065a110434246c44a56a8b2b2f6347d1409c787e6e4651ee"},
+ {file = "Pillow-9.3.0-cp37-cp37m-win32.whl", hash = "sha256:82409ffe29d70fd733ff3c1025a602abb3e67405d41b9403b00b01debc4c9a29"},
+ {file = "Pillow-9.3.0-cp37-cp37m-win_amd64.whl", hash = "sha256:41e0051336807468be450d52b8edd12ac60bebaa97fe10c8b660f116e50b30e4"},
+ {file = "Pillow-9.3.0-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:b03ae6f1a1878233ac620c98f3459f79fd77c7e3c2b20d460284e1fb370557d4"},
+ {file = "Pillow-9.3.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:4390e9ce199fc1951fcfa65795f239a8a4944117b5935a9317fb320e7767b40f"},
+ {file = "Pillow-9.3.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:40e1ce476a7804b0fb74bcfa80b0a2206ea6a882938eaba917f7a0f004b42502"},
+ {file = "Pillow-9.3.0-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a0a06a052c5f37b4ed81c613a455a81f9a3a69429b4fd7bb913c3fa98abefc20"},
+ {file = "Pillow-9.3.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:03150abd92771742d4a8cd6f2fa6246d847dcd2e332a18d0c15cc75bf6703040"},
+ {file = "Pillow-9.3.0-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:15c42fb9dea42465dfd902fb0ecf584b8848ceb28b41ee2b58f866411be33f07"},
+ {file = "Pillow-9.3.0-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:51e0e543a33ed92db9f5ef69a0356e0b1a7a6b6a71b80df99f1d181ae5875636"},
+ {file = "Pillow-9.3.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:3dd6caf940756101205dffc5367babf288a30043d35f80936f9bfb37f8355b32"},
+ {file = "Pillow-9.3.0-cp38-cp38-win32.whl", hash = "sha256:f1ff2ee69f10f13a9596480335f406dd1f70c3650349e2be67ca3139280cade0"},
+ {file = "Pillow-9.3.0-cp38-cp38-win_amd64.whl", hash = "sha256:276a5ca930c913f714e372b2591a22c4bd3b81a418c0f6635ba832daec1cbcfc"},
+ {file = "Pillow-9.3.0-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:73bd195e43f3fadecfc50c682f5055ec32ee2c933243cafbfdec69ab1aa87cad"},
+ {file = "Pillow-9.3.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:1c7c8ae3864846fc95f4611c78129301e203aaa2af813b703c55d10cc1628535"},
+ {file = "Pillow-9.3.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2e0918e03aa0c72ea56edbb00d4d664294815aa11291a11504a377ea018330d3"},
+ {file = "Pillow-9.3.0-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b0915e734b33a474d76c28e07292f196cdf2a590a0d25bcc06e64e545f2d146c"},
+ {file = "Pillow-9.3.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:af0372acb5d3598f36ec0914deed2a63f6bcdb7b606da04dc19a88d31bf0c05b"},
+ {file = "Pillow-9.3.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:ad58d27a5b0262c0c19b47d54c5802db9b34d38bbf886665b626aff83c74bacd"},
+ {file = "Pillow-9.3.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:97aabc5c50312afa5e0a2b07c17d4ac5e865b250986f8afe2b02d772567a380c"},
+ {file = "Pillow-9.3.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:9aaa107275d8527e9d6e7670b64aabaaa36e5b6bd71a1015ddd21da0d4e06448"},
+ {file = "Pillow-9.3.0-cp39-cp39-win32.whl", hash = "sha256:bac18ab8d2d1e6b4ce25e3424f709aceef668347db8637c2296bcf41acb7cf48"},
+ {file = "Pillow-9.3.0-cp39-cp39-win_amd64.whl", hash = "sha256:b472b5ea442148d1c3e2209f20f1e0bb0eb556538690fa70b5e1f79fa0ba8dc2"},
+ {file = "Pillow-9.3.0-pp37-pypy37_pp73-macosx_10_10_x86_64.whl", hash = "sha256:ab388aaa3f6ce52ac1cb8e122c4bd46657c15905904b3120a6248b5b8b0bc228"},
+ {file = "Pillow-9.3.0-pp37-pypy37_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:dbb8e7f2abee51cef77673be97760abff1674ed32847ce04b4af90f610144c7b"},
+ {file = "Pillow-9.3.0-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bca31dd6014cb8b0b2db1e46081b0ca7d936f856da3b39744aef499db5d84d02"},
+ {file = "Pillow-9.3.0-pp37-pypy37_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:c7025dce65566eb6e89f56c9509d4f628fddcedb131d9465cacd3d8bac337e7e"},
+ {file = "Pillow-9.3.0-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:ebf2029c1f464c59b8bdbe5143c79fa2045a581ac53679733d3a91d400ff9efb"},
+ {file = "Pillow-9.3.0-pp38-pypy38_pp73-macosx_10_10_x86_64.whl", hash = "sha256:b59430236b8e58840a0dfb4099a0e8717ffb779c952426a69ae435ca1f57210c"},
+ {file = "Pillow-9.3.0-pp38-pypy38_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:12ce4932caf2ddf3e41d17fc9c02d67126935a44b86df6a206cf0d7161548627"},
+ {file = "Pillow-9.3.0-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ae5331c23ce118c53b172fa64a4c037eb83c9165aba3a7ba9ddd3ec9fa64a699"},
+ {file = "Pillow-9.3.0-pp38-pypy38_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:0b07fffc13f474264c336298d1b4ce01d9c5a011415b79d4ee5527bb69ae6f65"},
+ {file = "Pillow-9.3.0-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:073adb2ae23431d3b9bcbcff3fe698b62ed47211d0716b067385538a1b0f28b8"},
+ {file = "Pillow-9.3.0.tar.gz", hash = "sha256:c935a22a557a560108d780f9a0fc426dd7459940dc54faa49d83249c8d3e760f"},
]
pkginfo = [
{file = "pkginfo-1.8.2-py2.py3-none-any.whl", hash = "sha256:c24c487c6a7f72c66e816ab1796b96ac6c3d14d49338293d2141664330b55ffc"},
{file = "pkginfo-1.8.2.tar.gz", hash = "sha256:542e0d0b6750e2e21c20179803e40ab50598d8066d51097a0e382cba9eb02bff"},
]
+pkgutil_resolve_name = [
+ {file = "pkgutil_resolve_name-1.3.10-py3-none-any.whl", hash = "sha256:ca27cc078d25c5ad71a9de0a7a330146c4e014c2462d9af19c6b828280649c5e"},
+ {file = "pkgutil_resolve_name-1.3.10.tar.gz", hash = "sha256:357d6c9e6a755653cfd78893817c0853af365dd51ec97f3d358a819373bbd174"},
+]
platformdirs = [
{file = "platformdirs-2.5.1-py3-none-any.whl", hash = "sha256:bcae7cab893c2d310a711b70b24efb93334febe65f8de776ee320b517471e227"},
{file = "platformdirs-2.5.1.tar.gz", hash = "sha256:7535e70dfa32e84d4b34996ea99c5e432fa29a708d0f4e394bbcb2a8faa4f16d"},
]
prometheus-client = [
- {file = "prometheus_client-0.14.0-py3-none-any.whl", hash = "sha256:f4aba3fdd1735852049f537c1f0ab177159b7ab76f271ecc4d2f45aa2a1d01f2"},
- {file = "prometheus_client-0.14.0.tar.gz", hash = "sha256:8f7a922dd5455ad524b6ba212ce8eb2b4b05e073f4ec7218287f88b1cac34750"},
+ {file = "prometheus_client-0.15.0-py3-none-any.whl", hash = "sha256:db7c05cbd13a0f79975592d112320f2605a325969b270a94b71dcabc47b931d2"},
+ {file = "prometheus_client-0.15.0.tar.gz", hash = "sha256:be26aa452490cfcf6da953f9436e95a9f2b4d578ca80094b4458930e5f584ab1"},
]
protobuf = [
- {file = "protobuf-3.20.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:3cc797c9d15d7689ed507b165cd05913acb992d78b379f6014e013f9ecb20996"},
- {file = "protobuf-3.20.1-cp310-cp310-manylinux2014_aarch64.whl", hash = "sha256:ff8d8fa42675249bb456f5db06c00de6c2f4c27a065955917b28c4f15978b9c3"},
- {file = "protobuf-3.20.1-cp310-cp310-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:cd68be2559e2a3b84f517fb029ee611546f7812b1fdd0aa2ecc9bc6ec0e4fdde"},
- {file = "protobuf-3.20.1-cp310-cp310-win32.whl", hash = "sha256:9016d01c91e8e625141d24ec1b20fed584703e527d28512aa8c8707f105a683c"},
- {file = "protobuf-3.20.1-cp310-cp310-win_amd64.whl", hash = "sha256:32ca378605b41fd180dfe4e14d3226386d8d1b002ab31c969c366549e66a2bb7"},
- {file = "protobuf-3.20.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:9be73ad47579abc26c12024239d3540e6b765182a91dbc88e23658ab71767153"},
- {file = "protobuf-3.20.1-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:097c5d8a9808302fb0da7e20edf0b8d4703274d140fd25c5edabddcde43e081f"},
- {file = "protobuf-3.20.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:e250a42f15bf9d5b09fe1b293bdba2801cd520a9f5ea2d7fb7536d4441811d20"},
- {file = "protobuf-3.20.1-cp37-cp37m-manylinux2014_aarch64.whl", hash = "sha256:cdee09140e1cd184ba9324ec1df410e7147242b94b5f8b0c64fc89e38a8ba531"},
- {file = "protobuf-3.20.1-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:af0ebadc74e281a517141daad9d0f2c5d93ab78e9d455113719a45a49da9db4e"},
- {file = "protobuf-3.20.1-cp37-cp37m-win32.whl", hash = "sha256:755f3aee41354ae395e104d62119cb223339a8f3276a0cd009ffabfcdd46bb0c"},
- {file = "protobuf-3.20.1-cp37-cp37m-win_amd64.whl", hash = "sha256:62f1b5c4cd6c5402b4e2d63804ba49a327e0c386c99b1675c8a0fefda23b2067"},
- {file = "protobuf-3.20.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:06059eb6953ff01e56a25cd02cca1a9649a75a7e65397b5b9b4e929ed71d10cf"},
- {file = "protobuf-3.20.1-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:cb29edb9eab15742d791e1025dd7b6a8f6fcb53802ad2f6e3adcb102051063ab"},
- {file = "protobuf-3.20.1-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:69ccfdf3657ba59569c64295b7d51325f91af586f8d5793b734260dfe2e94e2c"},
- {file = "protobuf-3.20.1-cp38-cp38-win32.whl", hash = "sha256:dd5789b2948ca702c17027c84c2accb552fc30f4622a98ab5c51fcfe8c50d3e7"},
- {file = "protobuf-3.20.1-cp38-cp38-win_amd64.whl", hash = "sha256:77053d28427a29987ca9caf7b72ccafee011257561259faba8dd308fda9a8739"},
- {file = "protobuf-3.20.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:6f50601512a3d23625d8a85b1638d914a0970f17920ff39cec63aaef80a93fb7"},
- {file = "protobuf-3.20.1-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:284f86a6207c897542d7e956eb243a36bb8f9564c1742b253462386e96c6b78f"},
- {file = "protobuf-3.20.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:7403941f6d0992d40161aa8bb23e12575637008a5a02283a930addc0508982f9"},
- {file = "protobuf-3.20.1-cp39-cp39-win32.whl", hash = "sha256:db977c4ca738dd9ce508557d4fce0f5aebd105e158c725beec86feb1f6bc20d8"},
- {file = "protobuf-3.20.1-cp39-cp39-win_amd64.whl", hash = "sha256:7e371f10abe57cee5021797126c93479f59fccc9693dafd6bd5633ab67808a91"},
- {file = "protobuf-3.20.1-py2.py3-none-any.whl", hash = "sha256:adfc6cf69c7f8c50fd24c793964eef18f0ac321315439d94945820612849c388"},
- {file = "protobuf-3.20.1.tar.gz", hash = "sha256:adc31566d027f45efe3f44eeb5b1f329da43891634d61c75a5944e9be6dd42c9"},
+ {file = "protobuf-3.20.3-cp310-cp310-manylinux2014_aarch64.whl", hash = "sha256:f4bd856d702e5b0d96a00ec6b307b0f51c1982c2bf9c0052cf9019e9a544ba99"},
+ {file = "protobuf-3.20.3-cp310-cp310-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:9aae4406ea63d825636cc11ffb34ad3379335803216ee3a856787bcf5ccc751e"},
+ {file = "protobuf-3.20.3-cp310-cp310-win32.whl", hash = "sha256:28545383d61f55b57cf4df63eebd9827754fd2dc25f80c5253f9184235db242c"},
+ {file = "protobuf-3.20.3-cp310-cp310-win_amd64.whl", hash = "sha256:67a3598f0a2dcbc58d02dd1928544e7d88f764b47d4a286202913f0b2801c2e7"},
+ {file = "protobuf-3.20.3-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:899dc660cd599d7352d6f10d83c95df430a38b410c1b66b407a6b29265d66469"},
+ {file = "protobuf-3.20.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:e64857f395505ebf3d2569935506ae0dfc4a15cb80dc25261176c784662cdcc4"},
+ {file = "protobuf-3.20.3-cp37-cp37m-manylinux2014_aarch64.whl", hash = "sha256:d9e4432ff660d67d775c66ac42a67cf2453c27cb4d738fc22cb53b5d84c135d4"},
+ {file = "protobuf-3.20.3-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:74480f79a023f90dc6e18febbf7b8bac7508420f2006fabd512013c0c238f454"},
+ {file = "protobuf-3.20.3-cp37-cp37m-win32.whl", hash = "sha256:b6cc7ba72a8850621bfec987cb72623e703b7fe2b9127a161ce61e61558ad905"},
+ {file = "protobuf-3.20.3-cp37-cp37m-win_amd64.whl", hash = "sha256:8c0c984a1b8fef4086329ff8dd19ac77576b384079247c770f29cc8ce3afa06c"},
+ {file = "protobuf-3.20.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:de78575669dddf6099a8a0f46a27e82a1783c557ccc38ee620ed8cc96d3be7d7"},
+ {file = "protobuf-3.20.3-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:f4c42102bc82a51108e449cbb32b19b180022941c727bac0cfd50170341f16ee"},
+ {file = "protobuf-3.20.3-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:44246bab5dd4b7fbd3c0c80b6f16686808fab0e4aca819ade6e8d294a29c7050"},
+ {file = "protobuf-3.20.3-cp38-cp38-win32.whl", hash = "sha256:c02ce36ec760252242a33967d51c289fd0e1c0e6e5cc9397e2279177716add86"},
+ {file = "protobuf-3.20.3-cp38-cp38-win_amd64.whl", hash = "sha256:447d43819997825d4e71bf5769d869b968ce96848b6479397e29fc24c4a5dfe9"},
+ {file = "protobuf-3.20.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:398a9e0c3eaceb34ec1aee71894ca3299605fa8e761544934378bbc6c97de23b"},
+ {file = "protobuf-3.20.3-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:bf01b5720be110540be4286e791db73f84a2b721072a3711efff6c324cdf074b"},
+ {file = "protobuf-3.20.3-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:daa564862dd0d39c00f8086f88700fdbe8bc717e993a21e90711acfed02f2402"},
+ {file = "protobuf-3.20.3-cp39-cp39-win32.whl", hash = "sha256:819559cafa1a373b7096a482b504ae8a857c89593cf3a25af743ac9ecbd23480"},
+ {file = "protobuf-3.20.3-cp39-cp39-win_amd64.whl", hash = "sha256:03038ac1cfbc41aa21f6afcbcd357281d7521b4157926f30ebecc8d4ea59dcb7"},
+ {file = "protobuf-3.20.3-py2.py3-none-any.whl", hash = "sha256:a7ca6d488aa8ff7f329d4c545b2dbad8ac31464f1d8b1c87ad1346717731e4db"},
+ {file = "protobuf-3.20.3.tar.gz", hash = "sha256:2e3427429c9cffebf259491be0af70189607f365c2f41c7c3764af6f337105f2"},
]
psycopg2 = [
- {file = "psycopg2-2.9.3-cp310-cp310-win32.whl", hash = "sha256:083707a696e5e1c330af2508d8fab36f9700b26621ccbcb538abe22e15485362"},
- {file = "psycopg2-2.9.3-cp310-cp310-win_amd64.whl", hash = "sha256:d3ca6421b942f60c008f81a3541e8faf6865a28d5a9b48544b0ee4f40cac7fca"},
- {file = "psycopg2-2.9.3-cp36-cp36m-win32.whl", hash = "sha256:9572e08b50aed176ef6d66f15a21d823bb6f6d23152d35e8451d7d2d18fdac56"},
- {file = "psycopg2-2.9.3-cp36-cp36m-win_amd64.whl", hash = "sha256:a81e3866f99382dfe8c15a151f1ca5fde5815fde879348fe5a9884a7c092a305"},
- {file = "psycopg2-2.9.3-cp37-cp37m-win32.whl", hash = "sha256:cb10d44e6694d763fa1078a26f7f6137d69f555a78ec85dc2ef716c37447e4b2"},
- {file = "psycopg2-2.9.3-cp37-cp37m-win_amd64.whl", hash = "sha256:4295093a6ae3434d33ec6baab4ca5512a5082cc43c0505293087b8a46d108461"},
- {file = "psycopg2-2.9.3-cp38-cp38-win32.whl", hash = "sha256:34b33e0162cfcaad151f249c2649fd1030010c16f4bbc40a604c1cb77173dcf7"},
- {file = "psycopg2-2.9.3-cp38-cp38-win_amd64.whl", hash = "sha256:0762c27d018edbcb2d34d51596e4346c983bd27c330218c56c4dc25ef7e819bf"},
- {file = "psycopg2-2.9.3-cp39-cp39-win32.whl", hash = "sha256:8cf3878353cc04b053822896bc4922b194792df9df2f1ad8da01fb3043602126"},
- {file = "psycopg2-2.9.3-cp39-cp39-win_amd64.whl", hash = "sha256:06f32425949bd5fe8f625c49f17ebb9784e1e4fe928b7cce72edc36fb68e4c0c"},
- {file = "psycopg2-2.9.3.tar.gz", hash = "sha256:8e841d1bf3434da985cc5ef13e6f75c8981ced601fd70cc6bf33351b91562981"},
+ {file = "psycopg2-2.9.5-cp310-cp310-win32.whl", hash = "sha256:d3ef67e630b0de0779c42912fe2cbae3805ebaba30cda27fea2a3de650a9414f"},
+ {file = "psycopg2-2.9.5-cp310-cp310-win_amd64.whl", hash = "sha256:4cb9936316d88bfab614666eb9e32995e794ed0f8f6b3b718666c22819c1d7ee"},
+ {file = "psycopg2-2.9.5-cp311-cp311-win32.whl", hash = "sha256:093e3894d2d3c592ab0945d9eba9d139c139664dcf83a1c440b8a7aa9bb21955"},
+ {file = "psycopg2-2.9.5-cp311-cp311-win_amd64.whl", hash = "sha256:920bf418000dd17669d2904472efeab2b20546efd0548139618f8fa305d1d7ad"},
+ {file = "psycopg2-2.9.5-cp36-cp36m-win32.whl", hash = "sha256:b9ac1b0d8ecc49e05e4e182694f418d27f3aedcfca854ebd6c05bb1cffa10d6d"},
+ {file = "psycopg2-2.9.5-cp36-cp36m-win_amd64.whl", hash = "sha256:fc04dd5189b90d825509caa510f20d1d504761e78b8dfb95a0ede180f71d50e5"},
+ {file = "psycopg2-2.9.5-cp37-cp37m-win32.whl", hash = "sha256:922cc5f0b98a5f2b1ff481f5551b95cd04580fd6f0c72d9b22e6c0145a4840e0"},
+ {file = "psycopg2-2.9.5-cp37-cp37m-win_amd64.whl", hash = "sha256:1e5a38aa85bd660c53947bd28aeaafb6a97d70423606f1ccb044a03a1203fe4a"},
+ {file = "psycopg2-2.9.5-cp38-cp38-win32.whl", hash = "sha256:f5b6320dbc3cf6cfb9f25308286f9f7ab464e65cfb105b64cc9c52831748ced2"},
+ {file = "psycopg2-2.9.5-cp38-cp38-win_amd64.whl", hash = "sha256:1a5c7d7d577e0eabfcf15eb87d1e19314c8c4f0e722a301f98e0e3a65e238b4e"},
+ {file = "psycopg2-2.9.5-cp39-cp39-win32.whl", hash = "sha256:322fd5fca0b1113677089d4ebd5222c964b1760e361f151cbb2706c4912112c5"},
+ {file = "psycopg2-2.9.5-cp39-cp39-win_amd64.whl", hash = "sha256:190d51e8c1b25a47484e52a79638a8182451d6f6dff99f26ad9bd81e5359a0fa"},
+ {file = "psycopg2-2.9.5.tar.gz", hash = "sha256:a5246d2e683a972e2187a8714b5c2cf8156c064629f9a9b1a873c1730d9e245a"},
]
psycopg2cffi = [
{file = "psycopg2cffi-2.9.0.tar.gz", hash = "sha256:7e272edcd837de3a1d12b62185eb85c45a19feda9e62fa1b120c54f9e8d35c52"},
@@ -2467,57 +2541,58 @@ pyasn1-modules = [
{file = "pyasn1_modules-0.2.8-py3.7.egg", hash = "sha256:c29a5e5cc7a3f05926aff34e097e84f8589cd790ce0ed41b67aed6857b26aafd"},
]
pycodestyle = [
- {file = "pycodestyle-2.8.0-py2.py3-none-any.whl", hash = "sha256:720f8b39dde8b293825e7ff02c475f3077124006db4f440dcbc9a20b76548a20"},
- {file = "pycodestyle-2.8.0.tar.gz", hash = "sha256:eddd5847ef438ea1c7870ca7eb78a9d47ce0cdb4851a5523949f2601d0cbbe7f"},
+ {file = "pycodestyle-2.9.1-py2.py3-none-any.whl", hash = "sha256:d1735fc58b418fd7c5f658d28d943854f8a849b01a5d0a1e6f3f3fdd0166804b"},
+ {file = "pycodestyle-2.9.1.tar.gz", hash = "sha256:2c9607871d58c76354b697b42f5d57e1ada7d261c261efac224b664affdc5785"},
]
pycparser = [
{file = "pycparser-2.21-py2.py3-none-any.whl", hash = "sha256:8ee45429555515e1f6b185e78100aea234072576aa43ab53aefcae078162fca9"},
{file = "pycparser-2.21.tar.gz", hash = "sha256:e644fdec12f7872f86c58ff790da456218b10f863970249516d60a5eaca77206"},
]
pydantic = [
- {file = "pydantic-1.9.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c8098a724c2784bf03e8070993f6d46aa2eeca031f8d8a048dff277703e6e193"},
- {file = "pydantic-1.9.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c320c64dd876e45254bdd350f0179da737463eea41c43bacbee9d8c9d1021f11"},
- {file = "pydantic-1.9.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:18f3e912f9ad1bdec27fb06b8198a2ccc32f201e24174cec1b3424dda605a310"},
- {file = "pydantic-1.9.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c11951b404e08b01b151222a1cb1a9f0a860a8153ce8334149ab9199cd198131"},
- {file = "pydantic-1.9.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:8bc541a405423ce0e51c19f637050acdbdf8feca34150e0d17f675e72d119580"},
- {file = "pydantic-1.9.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:e565a785233c2d03724c4dc55464559639b1ba9ecf091288dd47ad9c629433bd"},
- {file = "pydantic-1.9.1-cp310-cp310-win_amd64.whl", hash = "sha256:a4a88dcd6ff8fd47c18b3a3709a89adb39a6373f4482e04c1b765045c7e282fd"},
- {file = "pydantic-1.9.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:447d5521575f18e18240906beadc58551e97ec98142266e521c34968c76c8761"},
- {file = "pydantic-1.9.1-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:985ceb5d0a86fcaa61e45781e567a59baa0da292d5ed2e490d612d0de5796918"},
- {file = "pydantic-1.9.1-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:059b6c1795170809103a1538255883e1983e5b831faea6558ef873d4955b4a74"},
- {file = "pydantic-1.9.1-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:d12f96b5b64bec3f43c8e82b4aab7599d0157f11c798c9f9c528a72b9e0b339a"},
- {file = "pydantic-1.9.1-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:ae72f8098acb368d877b210ebe02ba12585e77bd0db78ac04a1ee9b9f5dd2166"},
- {file = "pydantic-1.9.1-cp36-cp36m-win_amd64.whl", hash = "sha256:79b485767c13788ee314669008d01f9ef3bc05db9ea3298f6a50d3ef596a154b"},
- {file = "pydantic-1.9.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:494f7c8537f0c02b740c229af4cb47c0d39840b829ecdcfc93d91dcbb0779892"},
- {file = "pydantic-1.9.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f0f047e11febe5c3198ed346b507e1d010330d56ad615a7e0a89fae604065a0e"},
- {file = "pydantic-1.9.1-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:969dd06110cb780da01336b281f53e2e7eb3a482831df441fb65dd30403f4608"},
- {file = "pydantic-1.9.1-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:177071dfc0df6248fd22b43036f936cfe2508077a72af0933d0c1fa269b18537"},
- {file = "pydantic-1.9.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:9bcf8b6e011be08fb729d110f3e22e654a50f8a826b0575c7196616780683380"},
- {file = "pydantic-1.9.1-cp37-cp37m-win_amd64.whl", hash = "sha256:a955260d47f03df08acf45689bd163ed9df82c0e0124beb4251b1290fa7ae728"},
- {file = "pydantic-1.9.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:9ce157d979f742a915b75f792dbd6aa63b8eccaf46a1005ba03aa8a986bde34a"},
- {file = "pydantic-1.9.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:0bf07cab5b279859c253d26a9194a8906e6f4a210063b84b433cf90a569de0c1"},
- {file = "pydantic-1.9.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5d93d4e95eacd313d2c765ebe40d49ca9dd2ed90e5b37d0d421c597af830c195"},
- {file = "pydantic-1.9.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1542636a39c4892c4f4fa6270696902acb186a9aaeac6f6cf92ce6ae2e88564b"},
- {file = "pydantic-1.9.1-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:a9af62e9b5b9bc67b2a195ebc2c2662fdf498a822d62f902bf27cccb52dbbf49"},
- {file = "pydantic-1.9.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:fe4670cb32ea98ffbf5a1262f14c3e102cccd92b1869df3bb09538158ba90fe6"},
- {file = "pydantic-1.9.1-cp38-cp38-win_amd64.whl", hash = "sha256:9f659a5ee95c8baa2436d392267988fd0f43eb774e5eb8739252e5a7e9cf07e0"},
- {file = "pydantic-1.9.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:b83ba3825bc91dfa989d4eed76865e71aea3a6ca1388b59fc801ee04c4d8d0d6"},
- {file = "pydantic-1.9.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:1dd8fecbad028cd89d04a46688d2fcc14423e8a196d5b0a5c65105664901f810"},
- {file = "pydantic-1.9.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:02eefd7087268b711a3ff4db528e9916ac9aa18616da7bca69c1871d0b7a091f"},
- {file = "pydantic-1.9.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7eb57ba90929bac0b6cc2af2373893d80ac559adda6933e562dcfb375029acee"},
- {file = "pydantic-1.9.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:4ce9ae9e91f46c344bec3b03d6ee9612802682c1551aaf627ad24045ce090761"},
- {file = "pydantic-1.9.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:72ccb318bf0c9ab97fc04c10c37683d9eea952ed526707fabf9ac5ae59b701fd"},
- {file = "pydantic-1.9.1-cp39-cp39-win_amd64.whl", hash = "sha256:61b6760b08b7c395975d893e0b814a11cf011ebb24f7d869e7118f5a339a82e1"},
- {file = "pydantic-1.9.1-py3-none-any.whl", hash = "sha256:4988c0f13c42bfa9ddd2fe2f569c9d54646ce84adc5de84228cfe83396f3bd58"},
- {file = "pydantic-1.9.1.tar.gz", hash = "sha256:1ed987c3ff29fff7fd8c3ea3a3ea877ad310aae2ef9889a119e22d3f2db0691a"},
+ {file = "pydantic-1.10.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:bb6ad4489af1bac6955d38ebcb95079a836af31e4c4f74aba1ca05bb9f6027bd"},
+ {file = "pydantic-1.10.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a1f5a63a6dfe19d719b1b6e6106561869d2efaca6167f84f5ab9347887d78b98"},
+ {file = "pydantic-1.10.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:352aedb1d71b8b0736c6d56ad2bd34c6982720644b0624462059ab29bd6e5912"},
+ {file = "pydantic-1.10.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:19b3b9ccf97af2b7519c42032441a891a5e05c68368f40865a90eb88833c2559"},
+ {file = "pydantic-1.10.2-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:e9069e1b01525a96e6ff49e25876d90d5a563bc31c658289a8772ae186552236"},
+ {file = "pydantic-1.10.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:355639d9afc76bcb9b0c3000ddcd08472ae75318a6eb67a15866b87e2efa168c"},
+ {file = "pydantic-1.10.2-cp310-cp310-win_amd64.whl", hash = "sha256:ae544c47bec47a86bc7d350f965d8b15540e27e5aa4f55170ac6a75e5f73b644"},
+ {file = "pydantic-1.10.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:a4c805731c33a8db4b6ace45ce440c4ef5336e712508b4d9e1aafa617dc9907f"},
+ {file = "pydantic-1.10.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:d49f3db871575e0426b12e2f32fdb25e579dea16486a26e5a0474af87cb1ab0a"},
+ {file = "pydantic-1.10.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:37c90345ec7dd2f1bcef82ce49b6235b40f282b94d3eec47e801baf864d15525"},
+ {file = "pydantic-1.10.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7b5ba54d026c2bd2cb769d3468885f23f43710f651688e91f5fb1edcf0ee9283"},
+ {file = "pydantic-1.10.2-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:05e00dbebbe810b33c7a7362f231893183bcc4251f3f2ff991c31d5c08240c42"},
+ {file = "pydantic-1.10.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:2d0567e60eb01bccda3a4df01df677adf6b437958d35c12a3ac3e0f078b0ee52"},
+ {file = "pydantic-1.10.2-cp311-cp311-win_amd64.whl", hash = "sha256:c6f981882aea41e021f72779ce2a4e87267458cc4d39ea990729e21ef18f0f8c"},
+ {file = "pydantic-1.10.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:c4aac8e7103bf598373208f6299fa9a5cfd1fc571f2d40bf1dd1955a63d6eeb5"},
+ {file = "pydantic-1.10.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:81a7b66c3f499108b448f3f004801fcd7d7165fb4200acb03f1c2402da73ce4c"},
+ {file = "pydantic-1.10.2-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bedf309630209e78582ffacda64a21f96f3ed2e51fbf3962d4d488e503420254"},
+ {file = "pydantic-1.10.2-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:9300fcbebf85f6339a02c6994b2eb3ff1b9c8c14f502058b5bf349d42447dcf5"},
+ {file = "pydantic-1.10.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:216f3bcbf19c726b1cc22b099dd409aa371f55c08800bcea4c44c8f74b73478d"},
+ {file = "pydantic-1.10.2-cp37-cp37m-win_amd64.whl", hash = "sha256:dd3f9a40c16daf323cf913593083698caee97df2804aa36c4b3175d5ac1b92a2"},
+ {file = "pydantic-1.10.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:b97890e56a694486f772d36efd2ba31612739bc6f3caeee50e9e7e3ebd2fdd13"},
+ {file = "pydantic-1.10.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:9cabf4a7f05a776e7793e72793cd92cc865ea0e83a819f9ae4ecccb1b8aa6116"},
+ {file = "pydantic-1.10.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:06094d18dd5e6f2bbf93efa54991c3240964bb663b87729ac340eb5014310624"},
+ {file = "pydantic-1.10.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cc78cc83110d2f275ec1970e7a831f4e371ee92405332ebfe9860a715f8336e1"},
+ {file = "pydantic-1.10.2-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:1ee433e274268a4b0c8fde7ad9d58ecba12b069a033ecc4645bb6303c062d2e9"},
+ {file = "pydantic-1.10.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:7c2abc4393dea97a4ccbb4ec7d8658d4e22c4765b7b9b9445588f16c71ad9965"},
+ {file = "pydantic-1.10.2-cp38-cp38-win_amd64.whl", hash = "sha256:0b959f4d8211fc964772b595ebb25f7652da3f22322c007b6fed26846a40685e"},
+ {file = "pydantic-1.10.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:c33602f93bfb67779f9c507e4d69451664524389546bacfe1bee13cae6dc7488"},
+ {file = "pydantic-1.10.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:5760e164b807a48a8f25f8aa1a6d857e6ce62e7ec83ea5d5c5a802eac81bad41"},
+ {file = "pydantic-1.10.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6eb843dcc411b6a2237a694f5e1d649fc66c6064d02b204a7e9d194dff81eb4b"},
+ {file = "pydantic-1.10.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4b8795290deaae348c4eba0cebb196e1c6b98bdbe7f50b2d0d9a4a99716342fe"},
+ {file = "pydantic-1.10.2-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:e0bedafe4bc165ad0a56ac0bd7695df25c50f76961da29c050712596cf092d6d"},
+ {file = "pydantic-1.10.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:2e05aed07fa02231dbf03d0adb1be1d79cabb09025dd45aa094aa8b4e7b9dcda"},
+ {file = "pydantic-1.10.2-cp39-cp39-win_amd64.whl", hash = "sha256:c1ba1afb396148bbc70e9eaa8c06c1716fdddabaf86e7027c5988bae2a829ab6"},
+ {file = "pydantic-1.10.2-py3-none-any.whl", hash = "sha256:1b6ee725bd6e83ec78b1aa32c5b1fa67a3a65badddde3976bca5fe4568f27709"},
+ {file = "pydantic-1.10.2.tar.gz", hash = "sha256:91b8e218852ef6007c2b98cd861601c6a09f1aa32bbbb74fab5b1c33d4a1e410"},
]
pyflakes = [
- {file = "pyflakes-2.4.0-py2.py3-none-any.whl", hash = "sha256:3bb3a3f256f4b7968c9c788781e4ff07dce46bdf12339dcda61053375426ee2e"},
- {file = "pyflakes-2.4.0.tar.gz", hash = "sha256:05a85c2872edf37a4ed30b0cce2f6093e1d0581f8c19d7393122da7e25b2b24c"},
+ {file = "pyflakes-2.5.0-py2.py3-none-any.whl", hash = "sha256:4579f67d887f804e67edb544428f264b7b24f435b263c4614f384135cea553d2"},
+ {file = "pyflakes-2.5.0.tar.gz", hash = "sha256:491feb020dca48ccc562a8c0cbe8df07ee13078df59813b83959cbdada312ea3"},
]
pygithub = [
- {file = "PyGithub-1.55-py3-none-any.whl", hash = "sha256:2caf0054ea079b71e539741ae56c5a95e073b81fa472ce222e81667381b9601b"},
- {file = "PyGithub-1.55.tar.gz", hash = "sha256:1bbfff9372047ff3f21d5cd8e07720f3dbfdaf6462fcaed9d815f528f1ba7283"},
+ {file = "PyGithub-1.56-py3-none-any.whl", hash = "sha256:d15f13d82165306da8a68aefc0f848a6f6432d5febbff13b60a94758ce3ef8b5"},
+ {file = "PyGithub-1.56.tar.gz", hash = "sha256:80c6d85cf0f9418ffeb840fd105840af694c4f17e102970badbaf678251f2a01"},
]
pygments = [
{file = "Pygments-2.11.2-py3-none-any.whl", hash = "sha256:44238f1b60a76d78fc8ca0528ee429702aae011c265fe6a8dd8b63049ae41c65"},
@@ -2579,8 +2654,8 @@ pyrsistent = [
{file = "pyrsistent-0.18.1.tar.gz", hash = "sha256:d4d61f8b993a7255ba714df3aca52700f8125289f84f704cf80916517c46eb96"},
]
pysaml2 = [
- {file = "pysaml2-7.1.2-py2.py3-none-any.whl", hash = "sha256:d915961aaa4d4d97d952b30fe5d18d64cf053465acf3e38d8090b36c5ff08325"},
- {file = "pysaml2-7.1.2.tar.gz", hash = "sha256:1ec94442306511b93fe7a5710f224e05e0aba948682d506614d1e04f3232f827"},
+ {file = "pysaml2-7.2.1-py2.py3-none-any.whl", hash = "sha256:2ca155f4eeb1471b247a7b0cc79ccfd5780046d33d0b201e1199a00698dce795"},
+ {file = "pysaml2-7.2.1.tar.gz", hash = "sha256:f40f9576dce9afef156469179277ffeeca36829248be333252af0517a26d0b1f"},
]
python-dateutil = [
{file = "python-dateutil-2.8.2.tar.gz", hash = "sha256:0123cacc1627ae19ddf3c27a5de5bd67ee4586fbdd6440d9748f8abb483d3e86"},
@@ -2630,8 +2705,8 @@ pyyaml = [
{file = "PyYAML-6.0.tar.gz", hash = "sha256:68fb519c14306fec9720a2a5b45bc9f0c8d1b9c72adf45c37baedfcd949c35a2"},
]
readme-renderer = [
- {file = "readme_renderer-33.0-py3-none-any.whl", hash = "sha256:f02cee0c4de9636b5a62b6be50c9742427ba1b956aad1d938bfb087d0d72ccdf"},
- {file = "readme_renderer-33.0.tar.gz", hash = "sha256:e3b53bc84bd6af054e4cc1fe3567dc1ae19f554134221043a3f8c674e22209db"},
+ {file = "readme_renderer-37.2-py3-none-any.whl", hash = "sha256:d3f06a69e8c40fca9ab3174eca48f96d9771eddb43517b17d96583418427b106"},
+ {file = "readme_renderer-37.2.tar.gz", hash = "sha256:e8ad25293c98f781dbc2c5a36a309929390009f902f99e1798c761aaf04a7923"},
]
requests = [
{file = "requests-2.27.1-py2.py3-none-any.whl", hash = "sha256:f22fa1e554c9ddfd16e6e41ac79759e17be9e492b3587efa038054674760e72d"},
@@ -2645,6 +2720,10 @@ rfc3986 = [
{file = "rfc3986-2.0.0-py2.py3-none-any.whl", hash = "sha256:50b1502b60e289cb37883f3dfd34532b8873c7de9f49bb546641ce9cbd256ebd"},
{file = "rfc3986-2.0.0.tar.gz", hash = "sha256:97aacf9dbd4bfd829baad6e6309fa6573aaf1be3f6fa735c8ab05e46cecb261c"},
]
+rich = [
+ {file = "rich-12.6.0-py3-none-any.whl", hash = "sha256:a4eb26484f2c82589bd9a17c73d32a010b1e29d89f1604cd9bf3a2097b81bb5e"},
+ {file = "rich-12.6.0.tar.gz", hash = "sha256:ba3a3775974105c221d31141f2c116f4fd65c5ceb0698657a11e9f295ec93fd0"},
+]
secretstorage = [
{file = "SecretStorage-3.3.1-py3-none-any.whl", hash = "sha256:422d82c36172d88d6a0ed5afdec956514b189ddbfb72fefab0c8a1cee4eaf71f"},
{file = "SecretStorage-3.3.1.tar.gz", hash = "sha256:fd666c51a6bf200643495a04abb261f83229dcb6fd8472ec393df7ffc8b6f195"},
@@ -2654,8 +2733,8 @@ semantic-version = [
{file = "semantic_version-2.10.0.tar.gz", hash = "sha256:bdabb6d336998cbb378d4b9db3a4b56a1e3235701dc05ea2690d9a997ed5041c"},
]
sentry-sdk = [
- {file = "sentry-sdk-1.5.11.tar.gz", hash = "sha256:6c01d9d0b65935fd275adc120194737d1df317dce811e642cbf0394d0d37a007"},
- {file = "sentry_sdk-1.5.11-py2.py3-none-any.whl", hash = "sha256:c17179183cac614e900cbd048dab03f49a48e2820182ec686c25e7ce46f8548f"},
+ {file = "sentry-sdk-1.10.1.tar.gz", hash = "sha256:105faf7bd7b7fa25653404619ee261527266b14103fe1389e0ce077bd23a9691"},
+ {file = "sentry_sdk-1.10.1-py2.py3-none-any.whl", hash = "sha256:06c0fa9ccfdc80d7e3b5d2021978d6eb9351fa49db9b5847cf4d1f2a473414ad"},
]
service-identity = [
{file = "service-identity-21.1.0.tar.gz", hash = "sha256:6e6c6086ca271dc11b033d17c3a8bea9f24ebff920c587da090afc9519419d34"},
@@ -2666,8 +2745,8 @@ setuptools = [
{file = "setuptools-65.3.0.tar.gz", hash = "sha256:7732871f4f7fa58fb6bdcaeadb0161b2bd046c85905dbaa066bdcbcc81953b57"},
]
setuptools-rust = [
- {file = "setuptools-rust-1.5.1.tar.gz", hash = "sha256:0e05e456645d59429cb1021370aede73c0760e9360bbfdaaefb5bced530eb9d7"},
- {file = "setuptools_rust-1.5.1-py3-none-any.whl", hash = "sha256:306b236ff3aa5229180e58292610d0c2c51bb488191122d2fc559ae4caeb7d5e"},
+ {file = "setuptools-rust-1.5.2.tar.gz", hash = "sha256:d8daccb14dc0eae1b6b6eb3ecef79675bd37b4065369f79c35393dd5c55652c7"},
+ {file = "setuptools_rust-1.5.2-py3-none-any.whl", hash = "sha256:8eb45851e34288f2296cd5ab9e924535ac1757318b730a13fe6836867843f206"},
]
signedjson = [
{file = "signedjson-1.1.4-py3-none-any.whl", hash = "sha256:45569ec54241c65d2403fe3faf7169be5322547706a231e884ca2b427f23d228"},
@@ -2762,21 +2841,17 @@ towncrier = [
{file = "towncrier-21.9.0-py2.py3-none-any.whl", hash = "sha256:fc5a88a2a54988e3a8ed2b60d553599da8330f65722cc607c839614ed87e0f92"},
{file = "towncrier-21.9.0.tar.gz", hash = "sha256:9cb6f45c16e1a1eec9d0e7651165e7be60cd0ab81d13a5c96ca97a498ae87f48"},
]
-tqdm = [
- {file = "tqdm-4.63.0-py2.py3-none-any.whl", hash = "sha256:e643e071046f17139dea55b880dc9b33822ce21613b4a4f5ea57f202833dbc29"},
- {file = "tqdm-4.63.0.tar.gz", hash = "sha256:1d9835ede8e394bb8c9dcbffbca02d717217113adc679236873eeaac5bc0b3cd"},
-]
treq = [
{file = "treq-22.2.0-py3-none-any.whl", hash = "sha256:27d95b07c5c14be3e7b280416139b036087617ad5595be913b1f9b3ce981b9b2"},
{file = "treq-22.2.0.tar.gz", hash = "sha256:df757e3f141fc782ede076a604521194ffcb40fa2645cf48e5a37060307f52ec"},
]
twine = [
- {file = "twine-3.8.0-py3-none-any.whl", hash = "sha256:d0550fca9dc19f3d5e8eadfce0c227294df0a2a951251a4385797c8a6198b7c8"},
- {file = "twine-3.8.0.tar.gz", hash = "sha256:8efa52658e0ae770686a13b675569328f1fba9837e5de1867bfe5f46a9aefe19"},
+ {file = "twine-4.0.1-py3-none-any.whl", hash = "sha256:42026c18e394eac3e06693ee52010baa5313e4811d5a11050e7d48436cf41b9e"},
+ {file = "twine-4.0.1.tar.gz", hash = "sha256:96b1cf12f7ae611a4a40b6ae8e9570215daff0611828f5fe1f37a16255ab24a0"},
]
-Twisted = [
- {file = "Twisted-22.8.0-py3-none-any.whl", hash = "sha256:8d4718d1e48dcc28933f8beb48dc71cfe77a125e37ad1eb7a3d0acc49baf6c99"},
- {file = "Twisted-22.8.0.tar.gz", hash = "sha256:e5b60de39f2d1da153fbe1874d885fe3fcbdb21fcc446fa759a53e8fc3513bed"},
+twisted = [
+ {file = "Twisted-22.10.0-py3-none-any.whl", hash = "sha256:86c55f712cc5ab6f6d64e02503352464f0400f66d4f079096d744080afcccbd0"},
+ {file = "Twisted-22.10.0.tar.gz", hash = "sha256:32acbd40a94f5f46e7b42c109bfae2b302250945561783a8b7a059048f2d4d31"},
]
twisted-iocpsupport = [
{file = "twisted-iocpsupport-1.0.2.tar.gz", hash = "sha256:72068b206ee809c9c596b57b5287259ea41ddb4774d86725b19f35bf56aa32a9"},
@@ -2823,8 +2898,8 @@ typed-ast = [
{file = "typed_ast-1.5.2.tar.gz", hash = "sha256:525a2d4088e70a9f75b08b3f87a51acc9cde640e19cc523c7e41aa355564ae27"},
]
types-bleach = [
- {file = "types-bleach-4.1.4.tar.gz", hash = "sha256:2d30c2c4fb6854088ac636471352c9a51bf6c089289800d2a8060820a01cd43a"},
- {file = "types_bleach-4.1.4-py3-none-any.whl", hash = "sha256:edffe173ed6d7b6f3543036a96204a9319c3bf6c3645917b14274e43f000cc9b"},
+ {file = "types-bleach-5.0.3.tar.gz", hash = "sha256:f7b3df8278efe176d9670d0f063a66c866c77577f71f54b9c7a320e31b1a7bbd"},
+ {file = "types_bleach-5.0.3-py3-none-any.whl", hash = "sha256:5931525d03571f36b2bb40210c34b662c4d26c8fd6f2b1e1e83fe4d2d2fd63c7"},
]
types-commonmark = [
{file = "types-commonmark-0.9.2.tar.gz", hash = "sha256:b894b67750c52fd5abc9a40a9ceb9da4652a391d75c1b480bba9cef90f19fc86"},
@@ -2843,105 +2918,118 @@ types-ipaddress = [
{file = "types_ipaddress-1.0.8-py3-none-any.whl", hash = "sha256:4933b74da157ba877b1a705d64f6fa7742745e9ffd65e51011f370c11ebedb55"},
]
types-jsonschema = [
- {file = "types-jsonschema-4.4.6.tar.gz", hash = "sha256:7f2a804618756768c7c0616f8c794b61fcfe3077c7ee1ad47dcf01c5e5f692bb"},
- {file = "types_jsonschema-4.4.6-py3-none-any.whl", hash = "sha256:1db9031ca49a8444d01bd2ce8cf2f89318382b04610953b108321e6f8fb03390"},
+ {file = "types-jsonschema-4.17.0.1.tar.gz", hash = "sha256:62625d492e4930411a431909ac32301aeab6180500e70ee222f81d43204cfb3c"},
+ {file = "types_jsonschema-4.17.0.1-py3-none-any.whl", hash = "sha256:77badbe3881cbf79ac9561be2be2b1f37ab104b13afd2231840e6dd6e94e63c2"},
]
types-pillow = [
- {file = "types-Pillow-9.0.15.tar.gz", hash = "sha256:d2e385fe5c192e75970f18accce69f5c2a9f186f3feb578a9b91cd6fdf64211d"},
- {file = "types_Pillow-9.0.15-py3-none-any.whl", hash = "sha256:c9646595dfafdf8b63d4b1443292ead17ee0fc7b18a143e497b68e0ea2dc1eb6"},
+ {file = "types-Pillow-9.2.2.1.tar.gz", hash = "sha256:85c139e06e1c46ec5f9c634d5c54a156b0958d5d0e8be024ed353db0c804b426"},
+ {file = "types_Pillow-9.2.2.1-py3-none-any.whl", hash = "sha256:3a6a871cade8428433a21ef459bb0a65532b87d05f9e836a0664431ce445bdcf"},
]
types-psycopg2 = [
- {file = "types-psycopg2-2.9.9.tar.gz", hash = "sha256:4f9d4d52eeb343dc00fd5ed4f1513a8a5c18efba0a072eb82706d15cf4f20a2e"},
- {file = "types_psycopg2-2.9.9-py3-none-any.whl", hash = "sha256:cec9291d4318ad70b407310f8304b3d40f6d0358f09870448f7a65e3027c80af"},
+ {file = "types-psycopg2-2.9.21.1.tar.gz", hash = "sha256:f5532cf15afdc6b5ebb1e59b7d896617217321f488fd1fbd74e7efb94decfab6"},
+ {file = "types_psycopg2-2.9.21.1-py3-none-any.whl", hash = "sha256:858838f1972f39da2a6e28274201fed8619a40a235dd86e7f66f4548ec474395"},
]
types-pyopenssl = [
- {file = "types-pyOpenSSL-22.0.0.tar.gz", hash = "sha256:d86dde7f6fe2f1ac9fe0b6282e489f649f480364bdaa9d6a4696d52505f4477e"},
- {file = "types_pyOpenSSL-22.0.0-py3-none-any.whl", hash = "sha256:da685f57b864979f36df0157895139c8244ad4aad19b551f1678206fbad0108a"},
+ {file = "types-pyOpenSSL-22.1.0.2.tar.gz", hash = "sha256:7a350e29e55bc3ee4571f996b4b1c18c4e4098947db45f7485b016eaa35b44bc"},
+ {file = "types_pyOpenSSL-22.1.0.2-py3-none-any.whl", hash = "sha256:54606a6afb203eb261e0fca9b7f75fa6c24d5ff71e13903c162ffb951c2c64c6"},
]
types-pyyaml = [
- {file = "types-PyYAML-6.0.4.tar.gz", hash = "sha256:6252f62d785e730e454dfa0c9f0fb99d8dae254c5c3c686903cf878ea27c04b7"},
- {file = "types_PyYAML-6.0.4-py3-none-any.whl", hash = "sha256:693b01c713464a6851f36ff41077f8adbc6e355eda929addfb4a97208aea9b4b"},
+ {file = "types-PyYAML-6.0.12.2.tar.gz", hash = "sha256:6840819871c92deebe6a2067fb800c11b8a063632eb4e3e755914e7ab3604e83"},
+ {file = "types_PyYAML-6.0.12.2-py3-none-any.whl", hash = "sha256:1e94e80aafee07a7e798addb2a320e32956a373f376655128ae20637adb2655b"},
]
types-requests = [
- {file = "types-requests-2.27.11.tar.gz", hash = "sha256:6a7ed24b21780af4a5b5e24c310b2cd885fb612df5fd95584d03d87e5f2a195a"},
- {file = "types_requests-2.27.11-py3-none-any.whl", hash = "sha256:506279bad570c7b4b19ac1f22e50146538befbe0c133b2cea66a9b04a533a859"},
+ {file = "types-requests-2.28.11.2.tar.gz", hash = "sha256:fdcd7bd148139fb8eef72cf4a41ac7273872cad9e6ada14b11ff5dfdeee60ed3"},
+ {file = "types_requests-2.28.11.2-py3-none-any.whl", hash = "sha256:14941f8023a80b16441b3b46caffcbfce5265fd14555844d6029697824b5a2ef"},
]
types-setuptools = [
- {file = "types-setuptools-57.4.9.tar.gz", hash = "sha256:536ef74744f8e1e4be4fc719887f886e74e4cf3c792b4a06984320be4df450b5"},
- {file = "types_setuptools-57.4.9-py3-none-any.whl", hash = "sha256:948dc6863373750e2cd0b223a84f1fb608414cde5e55cf38ea657b93aeb411d2"},
+ {file = "types-setuptools-65.5.0.3.tar.gz", hash = "sha256:17769171f5f2a2dc69b25c0d3106552a5cda767bbf6b36cb6212b26dae5aa9fc"},
+ {file = "types_setuptools-65.5.0.3-py3-none-any.whl", hash = "sha256:9254c32b0cc91c486548e7d7561243b5bd185402a383e93c6691e1b9bc8d86e2"},
]
types-urllib3 = [
{file = "types-urllib3-1.26.10.tar.gz", hash = "sha256:a26898f530e6c3f43f25b907f2b884486868ffd56a9faa94cbf9b3eb6e165d6a"},
{file = "types_urllib3-1.26.10-py3-none-any.whl", hash = "sha256:d755278d5ecd7a7a6479a190e54230f241f1a99c19b81518b756b19dc69e518c"},
]
typing-extensions = [
- {file = "typing_extensions-4.1.1-py3-none-any.whl", hash = "sha256:21c85e0fe4b9a155d0799430b0ad741cdce7e359660ccbd8b530613e8df88ce2"},
- {file = "typing_extensions-4.1.1.tar.gz", hash = "sha256:1a9462dcc3347a79b1f1c0271fbe79e844580bb598bafa1ed208b94da3cdcd42"},
+ {file = "typing_extensions-4.4.0-py3-none-any.whl", hash = "sha256:16fa4864408f655d35ec496218b85f79b3437c829e93320c7c9215ccfd92489e"},
+ {file = "typing_extensions-4.4.0.tar.gz", hash = "sha256:1511434bb92bf8dd198c12b1cc812e800d4181cfcb867674e0f8279cc93087aa"},
]
unpaddedbase64 = [
{file = "unpaddedbase64-2.1.0-py3-none-any.whl", hash = "sha256:485eff129c30175d2cd6f0cd8d2310dff51e666f7f36175f738d75dfdbd0b1c6"},
{file = "unpaddedbase64-2.1.0.tar.gz", hash = "sha256:7273c60c089de39d90f5d6d4a7883a79e319dc9d9b1c8924a7fab96178a5f005"},
]
urllib3 = [
- {file = "urllib3-1.26.8-py2.py3-none-any.whl", hash = "sha256:000ca7f471a233c2251c6c7023ee85305721bfdf18621ebff4fd17a8653427ed"},
- {file = "urllib3-1.26.8.tar.gz", hash = "sha256:0e7c33d9a63e7ddfcb86780aac87befc2fbddf46c58dbb487e0855f7ceec283c"},
+ {file = "urllib3-1.26.12-py2.py3-none-any.whl", hash = "sha256:b930dd878d5a8afb066a637fbb35144fe7901e3b209d1cd4f524bd0e9deee997"},
+ {file = "urllib3-1.26.12.tar.gz", hash = "sha256:3fa96cf423e6987997fc326ae8df396db2a8b7c667747d47ddd8ecba91f4a74e"},
]
webencodings = [
{file = "webencodings-0.5.1-py2.py3-none-any.whl", hash = "sha256:a0af1213f3c2226497a97e2b3aa01a7e4bee4f403f95be16fc9acd2947514a78"},
{file = "webencodings-0.5.1.tar.gz", hash = "sha256:b36a1c245f2d304965eb4e0a82848379241dc04b865afcc4aab16748587e1923"},
]
wrapt = [
- {file = "wrapt-1.13.3-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:e05e60ff3b2b0342153be4d1b597bbcfd8330890056b9619f4ad6b8d5c96a81a"},
- {file = "wrapt-1.13.3-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:85148f4225287b6a0665eef08a178c15097366d46b210574a658c1ff5b377489"},
- {file = "wrapt-1.13.3-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:2dded5496e8f1592ec27079b28b6ad2a1ef0b9296d270f77b8e4a3a796cf6909"},
- {file = "wrapt-1.13.3-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:e94b7d9deaa4cc7bac9198a58a7240aaf87fe56c6277ee25fa5b3aa1edebd229"},
- {file = "wrapt-1.13.3-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:498e6217523111d07cd67e87a791f5e9ee769f9241fcf8a379696e25806965af"},
- {file = "wrapt-1.13.3-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:ec7e20258ecc5174029a0f391e1b948bf2906cd64c198a9b8b281b811cbc04de"},
- {file = "wrapt-1.13.3-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:87883690cae293541e08ba2da22cacaae0a092e0ed56bbba8d018cc486fbafbb"},
- {file = "wrapt-1.13.3-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:f99c0489258086308aad4ae57da9e8ecf9e1f3f30fa35d5e170b4d4896554d80"},
- {file = "wrapt-1.13.3-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:6a03d9917aee887690aa3f1747ce634e610f6db6f6b332b35c2dd89412912bca"},
- {file = "wrapt-1.13.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:936503cb0a6ed28dbfa87e8fcd0a56458822144e9d11a49ccee6d9a8adb2ac44"},
- {file = "wrapt-1.13.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:f9c51d9af9abb899bd34ace878fbec8bf357b3194a10c4e8e0a25512826ef056"},
- {file = "wrapt-1.13.3-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:220a869982ea9023e163ba915077816ca439489de6d2c09089b219f4e11b6785"},
- {file = "wrapt-1.13.3-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:0877fe981fd76b183711d767500e6b3111378ed2043c145e21816ee589d91096"},
- {file = "wrapt-1.13.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:43e69ffe47e3609a6aec0fe723001c60c65305784d964f5007d5b4fb1bc6bf33"},
- {file = "wrapt-1.13.3-cp310-cp310-win32.whl", hash = "sha256:78dea98c81915bbf510eb6a3c9c24915e4660302937b9ae05a0947164248020f"},
- {file = "wrapt-1.13.3-cp310-cp310-win_amd64.whl", hash = "sha256:ea3e746e29d4000cd98d572f3ee2a6050a4f784bb536f4ac1f035987fc1ed83e"},
- {file = "wrapt-1.13.3-cp35-cp35m-manylinux1_i686.whl", hash = "sha256:8c73c1a2ec7c98d7eaded149f6d225a692caa1bd7b2401a14125446e9e90410d"},
- {file = "wrapt-1.13.3-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:086218a72ec7d986a3eddb7707c8c4526d677c7b35e355875a0fe2918b059179"},
- {file = "wrapt-1.13.3-cp35-cp35m-manylinux2010_i686.whl", hash = "sha256:e92d0d4fa68ea0c02d39f1e2f9cb5bc4b4a71e8c442207433d8db47ee79d7aa3"},
- {file = "wrapt-1.13.3-cp35-cp35m-manylinux2010_x86_64.whl", hash = "sha256:d4a5f6146cfa5c7ba0134249665acd322a70d1ea61732723c7d3e8cc0fa80755"},
- {file = "wrapt-1.13.3-cp35-cp35m-win32.whl", hash = "sha256:8aab36778fa9bba1a8f06a4919556f9f8c7b33102bd71b3ab307bb3fecb21851"},
- {file = "wrapt-1.13.3-cp35-cp35m-win_amd64.whl", hash = "sha256:944b180f61f5e36c0634d3202ba8509b986b5fbaf57db3e94df11abee244ba13"},
- {file = "wrapt-1.13.3-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:2ebdde19cd3c8cdf8df3fc165bc7827334bc4e353465048b36f7deeae8ee0918"},
- {file = "wrapt-1.13.3-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:610f5f83dd1e0ad40254c306f4764fcdc846641f120c3cf424ff57a19d5f7ade"},
- {file = "wrapt-1.13.3-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:5601f44a0f38fed36cc07db004f0eedeaadbdcec90e4e90509480e7e6060a5bc"},
- {file = "wrapt-1.13.3-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:e6906d6f48437dfd80464f7d7af1740eadc572b9f7a4301e7dd3d65db285cacf"},
- {file = "wrapt-1.13.3-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:766b32c762e07e26f50d8a3468e3b4228b3736c805018e4b0ec8cc01ecd88125"},
- {file = "wrapt-1.13.3-cp36-cp36m-win32.whl", hash = "sha256:5f223101f21cfd41deec8ce3889dc59f88a59b409db028c469c9b20cfeefbe36"},
- {file = "wrapt-1.13.3-cp36-cp36m-win_amd64.whl", hash = "sha256:f122ccd12fdc69628786d0c947bdd9cb2733be8f800d88b5a37c57f1f1d73c10"},
- {file = "wrapt-1.13.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:46f7f3af321a573fc0c3586612db4decb7eb37172af1bc6173d81f5b66c2e068"},
- {file = "wrapt-1.13.3-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:778fd096ee96890c10ce96187c76b3e99b2da44e08c9e24d5652f356873f6709"},
- {file = "wrapt-1.13.3-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:0cb23d36ed03bf46b894cfec777eec754146d68429c30431c99ef28482b5c1df"},
- {file = "wrapt-1.13.3-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:96b81ae75591a795d8c90edc0bfaab44d3d41ffc1aae4d994c5aa21d9b8e19a2"},
- {file = "wrapt-1.13.3-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:7dd215e4e8514004c8d810a73e342c536547038fb130205ec4bba9f5de35d45b"},
- {file = "wrapt-1.13.3-cp37-cp37m-win32.whl", hash = "sha256:47f0a183743e7f71f29e4e21574ad3fa95676136f45b91afcf83f6a050914829"},
- {file = "wrapt-1.13.3-cp37-cp37m-win_amd64.whl", hash = "sha256:fd76c47f20984b43d93de9a82011bb6e5f8325df6c9ed4d8310029a55fa361ea"},
- {file = "wrapt-1.13.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:b73d4b78807bd299b38e4598b8e7bd34ed55d480160d2e7fdaabd9931afa65f9"},
- {file = "wrapt-1.13.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:ec9465dd69d5657b5d2fa6133b3e1e989ae27d29471a672416fd729b429eb554"},
- {file = "wrapt-1.13.3-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:dd91006848eb55af2159375134d724032a2d1d13bcc6f81cd8d3ed9f2b8e846c"},
- {file = "wrapt-1.13.3-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:ae9de71eb60940e58207f8e71fe113c639da42adb02fb2bcbcaccc1ccecd092b"},
- {file = "wrapt-1.13.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:51799ca950cfee9396a87f4a1240622ac38973b6df5ef7a41e7f0b98797099ce"},
- {file = "wrapt-1.13.3-cp38-cp38-win32.whl", hash = "sha256:4b9c458732450ec42578b5642ac53e312092acf8c0bfce140ada5ca1ac556f79"},
- {file = "wrapt-1.13.3-cp38-cp38-win_amd64.whl", hash = "sha256:7dde79d007cd6dfa65afe404766057c2409316135cb892be4b1c768e3f3a11cb"},
- {file = "wrapt-1.13.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:981da26722bebb9247a0601e2922cedf8bb7a600e89c852d063313102de6f2cb"},
- {file = "wrapt-1.13.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:705e2af1f7be4707e49ced9153f8d72131090e52be9278b5dbb1498c749a1e32"},
- {file = "wrapt-1.13.3-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:25b1b1d5df495d82be1c9d2fad408f7ce5ca8a38085e2da41bb63c914baadff7"},
- {file = "wrapt-1.13.3-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:77416e6b17926d953b5c666a3cb718d5945df63ecf922af0ee576206d7033b5e"},
- {file = "wrapt-1.13.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:865c0b50003616f05858b22174c40ffc27a38e67359fa1495605f96125f76640"},
- {file = "wrapt-1.13.3-cp39-cp39-win32.whl", hash = "sha256:0a017a667d1f7411816e4bf214646d0ad5b1da2c1ea13dec6c162736ff25a374"},
- {file = "wrapt-1.13.3-cp39-cp39-win_amd64.whl", hash = "sha256:81bd7c90d28a4b2e1df135bfbd7c23aee3050078ca6441bead44c42483f9ebfb"},
- {file = "wrapt-1.13.3.tar.gz", hash = "sha256:1fea9cd438686e6682271d36f3481a9f3636195578bab9ca3382e2f5f01fc185"},
+ {file = "wrapt-1.14.1-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:1b376b3f4896e7930f1f772ac4b064ac12598d1c38d04907e696cc4d794b43d3"},
+ {file = "wrapt-1.14.1-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:903500616422a40a98a5a3c4ff4ed9d0066f3b4c951fa286018ecdf0750194ef"},
+ {file = "wrapt-1.14.1-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:5a9a0d155deafd9448baff28c08e150d9b24ff010e899311ddd63c45c2445e28"},
+ {file = "wrapt-1.14.1-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:ddaea91abf8b0d13443f6dac52e89051a5063c7d014710dcb4d4abb2ff811a59"},
+ {file = "wrapt-1.14.1-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:36f582d0c6bc99d5f39cd3ac2a9062e57f3cf606ade29a0a0d6b323462f4dd87"},
+ {file = "wrapt-1.14.1-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:7ef58fb89674095bfc57c4069e95d7a31cfdc0939e2a579882ac7d55aadfd2a1"},
+ {file = "wrapt-1.14.1-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:e2f83e18fe2f4c9e7db597e988f72712c0c3676d337d8b101f6758107c42425b"},
+ {file = "wrapt-1.14.1-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:ee2b1b1769f6707a8a445162ea16dddf74285c3964f605877a20e38545c3c462"},
+ {file = "wrapt-1.14.1-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:833b58d5d0b7e5b9832869f039203389ac7cbf01765639c7309fd50ef619e0b1"},
+ {file = "wrapt-1.14.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:80bb5c256f1415f747011dc3604b59bc1f91c6e7150bd7db03b19170ee06b320"},
+ {file = "wrapt-1.14.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:07f7a7d0f388028b2df1d916e94bbb40624c59b48ecc6cbc232546706fac74c2"},
+ {file = "wrapt-1.14.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:02b41b633c6261feff8ddd8d11c711df6842aba629fdd3da10249a53211a72c4"},
+ {file = "wrapt-1.14.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2fe803deacd09a233e4762a1adcea5db5d31e6be577a43352936179d14d90069"},
+ {file = "wrapt-1.14.1-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:257fd78c513e0fb5cdbe058c27a0624c9884e735bbd131935fd49e9fe719d310"},
+ {file = "wrapt-1.14.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:4fcc4649dc762cddacd193e6b55bc02edca674067f5f98166d7713b193932b7f"},
+ {file = "wrapt-1.14.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:11871514607b15cfeb87c547a49bca19fde402f32e2b1c24a632506c0a756656"},
+ {file = "wrapt-1.14.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:8ad85f7f4e20964db4daadcab70b47ab05c7c1cf2a7c1e51087bfaa83831854c"},
+ {file = "wrapt-1.14.1-cp310-cp310-win32.whl", hash = "sha256:a9a52172be0b5aae932bef82a79ec0a0ce87288c7d132946d645eba03f0ad8a8"},
+ {file = "wrapt-1.14.1-cp310-cp310-win_amd64.whl", hash = "sha256:6d323e1554b3d22cfc03cd3243b5bb815a51f5249fdcbb86fda4bf62bab9e164"},
+ {file = "wrapt-1.14.1-cp35-cp35m-manylinux1_i686.whl", hash = "sha256:43ca3bbbe97af00f49efb06e352eae40434ca9d915906f77def219b88e85d907"},
+ {file = "wrapt-1.14.1-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:6b1a564e6cb69922c7fe3a678b9f9a3c54e72b469875aa8018f18b4d1dd1adf3"},
+ {file = "wrapt-1.14.1-cp35-cp35m-manylinux2010_i686.whl", hash = "sha256:00b6d4ea20a906c0ca56d84f93065b398ab74b927a7a3dbd470f6fc503f95dc3"},
+ {file = "wrapt-1.14.1-cp35-cp35m-manylinux2010_x86_64.whl", hash = "sha256:a85d2b46be66a71bedde836d9e41859879cc54a2a04fad1191eb50c2066f6e9d"},
+ {file = "wrapt-1.14.1-cp35-cp35m-win32.whl", hash = "sha256:dbcda74c67263139358f4d188ae5faae95c30929281bc6866d00573783c422b7"},
+ {file = "wrapt-1.14.1-cp35-cp35m-win_amd64.whl", hash = "sha256:b21bb4c09ffabfa0e85e3a6b623e19b80e7acd709b9f91452b8297ace2a8ab00"},
+ {file = "wrapt-1.14.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:9e0fd32e0148dd5dea6af5fee42beb949098564cc23211a88d799e434255a1f4"},
+ {file = "wrapt-1.14.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9736af4641846491aedb3c3f56b9bc5568d92b0692303b5a305301a95dfd38b1"},
+ {file = "wrapt-1.14.1-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5b02d65b9ccf0ef6c34cba6cf5bf2aab1bb2f49c6090bafeecc9cd81ad4ea1c1"},
+ {file = "wrapt-1.14.1-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:21ac0156c4b089b330b7666db40feee30a5d52634cc4560e1905d6529a3897ff"},
+ {file = "wrapt-1.14.1-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:9f3e6f9e05148ff90002b884fbc2a86bd303ae847e472f44ecc06c2cd2fcdb2d"},
+ {file = "wrapt-1.14.1-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:6e743de5e9c3d1b7185870f480587b75b1cb604832e380d64f9504a0535912d1"},
+ {file = "wrapt-1.14.1-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:d79d7d5dc8a32b7093e81e97dad755127ff77bcc899e845f41bf71747af0c569"},
+ {file = "wrapt-1.14.1-cp36-cp36m-win32.whl", hash = "sha256:81b19725065dcb43df02b37e03278c011a09e49757287dca60c5aecdd5a0b8ed"},
+ {file = "wrapt-1.14.1-cp36-cp36m-win_amd64.whl", hash = "sha256:b014c23646a467558be7da3d6b9fa409b2c567d2110599b7cf9a0c5992b3b471"},
+ {file = "wrapt-1.14.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:88bd7b6bd70a5b6803c1abf6bca012f7ed963e58c68d76ee20b9d751c74a3248"},
+ {file = "wrapt-1.14.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b5901a312f4d14c59918c221323068fad0540e34324925c8475263841dbdfe68"},
+ {file = "wrapt-1.14.1-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d77c85fedff92cf788face9bfa3ebaa364448ebb1d765302e9af11bf449ca36d"},
+ {file = "wrapt-1.14.1-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8d649d616e5c6a678b26d15ece345354f7c2286acd6db868e65fcc5ff7c24a77"},
+ {file = "wrapt-1.14.1-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:7d2872609603cb35ca513d7404a94d6d608fc13211563571117046c9d2bcc3d7"},
+ {file = "wrapt-1.14.1-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:ee6acae74a2b91865910eef5e7de37dc6895ad96fa23603d1d27ea69df545015"},
+ {file = "wrapt-1.14.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:2b39d38039a1fdad98c87279b48bc5dce2c0ca0d73483b12cb72aa9609278e8a"},
+ {file = "wrapt-1.14.1-cp37-cp37m-win32.whl", hash = "sha256:60db23fa423575eeb65ea430cee741acb7c26a1365d103f7b0f6ec412b893853"},
+ {file = "wrapt-1.14.1-cp37-cp37m-win_amd64.whl", hash = "sha256:709fe01086a55cf79d20f741f39325018f4df051ef39fe921b1ebe780a66184c"},
+ {file = "wrapt-1.14.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:8c0ce1e99116d5ab21355d8ebe53d9460366704ea38ae4d9f6933188f327b456"},
+ {file = "wrapt-1.14.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:e3fb1677c720409d5f671e39bac6c9e0e422584e5f518bfd50aa4cbbea02433f"},
+ {file = "wrapt-1.14.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:642c2e7a804fcf18c222e1060df25fc210b9c58db7c91416fb055897fc27e8cc"},
+ {file = "wrapt-1.14.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7b7c050ae976e286906dd3f26009e117eb000fb2cf3533398c5ad9ccc86867b1"},
+ {file = "wrapt-1.14.1-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ef3f72c9666bba2bab70d2a8b79f2c6d2c1a42a7f7e2b0ec83bb2f9e383950af"},
+ {file = "wrapt-1.14.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:01c205616a89d09827986bc4e859bcabd64f5a0662a7fe95e0d359424e0e071b"},
+ {file = "wrapt-1.14.1-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:5a0f54ce2c092aaf439813735584b9537cad479575a09892b8352fea5e988dc0"},
+ {file = "wrapt-1.14.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:2cf71233a0ed05ccdabe209c606fe0bac7379fdcf687f39b944420d2a09fdb57"},
+ {file = "wrapt-1.14.1-cp38-cp38-win32.whl", hash = "sha256:aa31fdcc33fef9eb2552cbcbfee7773d5a6792c137b359e82879c101e98584c5"},
+ {file = "wrapt-1.14.1-cp38-cp38-win_amd64.whl", hash = "sha256:d1967f46ea8f2db647c786e78d8cc7e4313dbd1b0aca360592d8027b8508e24d"},
+ {file = "wrapt-1.14.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:3232822c7d98d23895ccc443bbdf57c7412c5a65996c30442ebe6ed3df335383"},
+ {file = "wrapt-1.14.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:988635d122aaf2bdcef9e795435662bcd65b02f4f4c1ae37fbee7401c440b3a7"},
+ {file = "wrapt-1.14.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9cca3c2cdadb362116235fdbd411735de4328c61425b0aa9f872fd76d02c4e86"},
+ {file = "wrapt-1.14.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d52a25136894c63de15a35bc0bdc5adb4b0e173b9c0d07a2be9d3ca64a332735"},
+ {file = "wrapt-1.14.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:40e7bc81c9e2b2734ea4bc1aceb8a8f0ceaac7c5299bc5d69e37c44d9081d43b"},
+ {file = "wrapt-1.14.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:b9b7a708dd92306328117d8c4b62e2194d00c365f18eff11a9b53c6f923b01e3"},
+ {file = "wrapt-1.14.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:6a9a25751acb379b466ff6be78a315e2b439d4c94c1e99cb7266d40a537995d3"},
+ {file = "wrapt-1.14.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:34aa51c45f28ba7f12accd624225e2b1e5a3a45206aa191f6f9aac931d9d56fe"},
+ {file = "wrapt-1.14.1-cp39-cp39-win32.whl", hash = "sha256:dee0ce50c6a2dd9056c20db781e9c1cfd33e77d2d569f5d1d9321c641bb903d5"},
+ {file = "wrapt-1.14.1-cp39-cp39-win_amd64.whl", hash = "sha256:dee60e1de1898bde3b238f18340eec6148986da0455d8ba7848d50470a7a32fb"},
+ {file = "wrapt-1.14.1.tar.gz", hash = "sha256:380a85cf89e0e69b7cfbe2ea9f765f004ff419f34194018a6827ac0e3edfed4d"},
]
xmlschema = [
{file = "xmlschema-1.10.0-py3-none-any.whl", hash = "sha256:dbd68bded2fef00c19cf37110ca0565eca34cf0b6c9e1d3b62ad0de8cbb582ca"},
diff --git a/pyproject.toml b/pyproject.toml
index e669867aa0..690e3c07de 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -57,7 +57,7 @@ manifest-path = "rust/Cargo.toml"
[tool.poetry]
name = "matrix-synapse"
-version = "1.68.0rc2"
+version = "1.72.0rc1"
description = "Homeserver for the Matrix decentralised comms protocol"
authors = ["Matrix.org Team and Contributors "]
license = "Apache-2.0"
@@ -192,7 +192,7 @@ psycopg2 = { version = ">=2.8", markers = "platform_python_implementation != 'Py
psycopg2cffi = { version = ">=2.8", markers = "platform_python_implementation == 'PyPy'", optional = true }
psycopg2cffi-compat = { version = "==1.1", markers = "platform_python_implementation == 'PyPy'", optional = true }
pysaml2 = { version = ">=4.5.0", optional = true }
-authlib = { version = ">=0.14.0", optional = true }
+authlib = { version = ">=0.15.1", optional = true }
# systemd-python is necessary for logging to the systemd journal via
# `systemd.journal.JournalHandler`, as is documented in
# `contrib/systemd/log_config.yaml`.
@@ -220,7 +220,7 @@ oidc = ["authlib"]
# `systemd.journal.JournalHandler`, as is documented in
# `contrib/systemd/log_config.yaml`.
systemd = ["systemd-python"]
-url_preview = ["lxml"]
+url-preview = ["lxml"]
sentry = ["sentry-sdk"]
opentelemetry = ["opentelemetry-api", "opentelemetry-sdk", "opentelemetry-exporter-jaeger"]
jwt = ["authlib"]
@@ -228,7 +228,7 @@ jwt = ["authlib"]
# (if it is not installed, we fall back to slow code.)
redis = ["txredisapi", "hiredis"]
# Required to use experimental `caches.track_memory_usage` config option.
-cache_memory = ["pympler"]
+cache-memory = ["pympler"]
test = ["parameterized", "idna"]
# The duplication here is awful. I hate hate hate hate hate it. However, for now I want
@@ -251,7 +251,7 @@ all = [
"pysaml2",
# oidc and jwt
"authlib",
- # url_preview
+ # url-preview
"lxml",
# sentry
"sentry-sdk",
@@ -259,7 +259,7 @@ all = [
"opentelemetry-api", "opentelemetry-sdk", "opentelemetry-exporter-jaeger",
# redis
"txredisapi", "hiredis",
- # cache_memory
+ # cache-memory
"pympler",
# omitted:
# - test: it's useful to have this separate from dev deps in the olddeps job
@@ -268,10 +268,10 @@ all = [
[tool.poetry.dev-dependencies]
## We pin black so that our tests don't start failing on new releases.
-isort = "==5.7.0"
-black = "==22.3.0"
+isort = ">=5.10.1"
+black = ">=22.3.0"
flake8-comprehensions = "*"
-flake8-bugbear = "==21.3.2"
+flake8-bugbear = ">=21.3.2"
flake8 = "*"
# Typechecking
@@ -296,27 +296,32 @@ parameterized = ">=0.7.4"
idna = ">=2.5"
# The following are used by the release script
-click = "==8.1.1"
+click = ">=8.1.3"
# GitPython was == 3.1.14; bumped to 3.1.20, the first release with type hints.
GitPython = ">=3.1.20"
-commonmark = "==0.9.1"
-pygithub = "==1.55"
+commonmark = ">=0.9.1"
+pygithub = ">=1.55"
# The following are executed as commands by the release script.
twine = "*"
# Towncrier min version comes from #3425. Rationale unclear.
towncrier = ">=18.6.0rc1"
[build-system]
-requires = ["poetry-core>=1.0.0", "setuptools_rust>=1.3"]
+# The upper bounds here are defensive, intended to prevent situations like
+# #13849 and #14079 where we see buildtime or runtime errors caused by build
+# system changes.
+# We are happy to raise these upper bounds upon request,
+# provided we check that it's safe to do so (i.e. that CI passes).
+requires = ["poetry-core>=1.0.0,<=1.3.2", "setuptools_rust>=1.3,<=1.5.2"]
build-backend = "poetry.core.masonry.api"
[tool.cibuildwheel]
# Skip unsupported platforms (by us or by Rust).
-skip = "cp36* *-musllinux_i686"
+skip = "cp36* *-musllinux_i686 pp*aarch64 *-musllinux_aarch64"
# We need a rust compiler
-before-all = "curl https://sh.rustup.rs -sSf | sh -s -- --default-toolchain stable -y"
+before-all = "curl https://sh.rustup.rs -sSf | sh -s -- --default-toolchain stable -y --profile minimal"
environment= { PATH = "$PATH:$HOME/.cargo/bin" }
# For some reason if we don't manually clean the build directory we
@@ -325,3 +330,12 @@ environment= { PATH = "$PATH:$HOME/.cargo/bin" }
before-build = "rm -rf {project}/build"
build-frontend = "build"
test-command = "python -c 'from synapse.synapse_rust import sum_as_string; print(sum_as_string(1, 2))'"
+
+
+[tool.cibuildwheel.linux]
+# Wrap the repair command to correctly rename the built cpython wheels as ABI3.
+repair-wheel-command = "./.ci/scripts/auditwheel_wrapper.py -w {dest_dir} {wheel}"
+
+[tool.cibuildwheel.macos]
+# Wrap the repair command to correctly rename the built cpython wheels as ABI3.
+repair-wheel-command = "./.ci/scripts/auditwheel_wrapper.py --require-archs {delocate_archs} -w {dest_dir} {wheel}"
diff --git a/rust/Cargo.toml b/rust/Cargo.toml
index 44263bf77e..cffaa5b51b 100644
--- a/rust/Cargo.toml
+++ b/rust/Cargo.toml
@@ -11,7 +11,9 @@ rust-version = "1.58.1"
[lib]
name = "synapse"
-crate-type = ["cdylib"]
+# We generate a `cdylib` for Python and a standard `lib` for running
+# tests/benchmarks.
+crate-type = ["lib", "cdylib"]
[package.metadata.maturin]
# This is where we tell maturin where to place the built library.
diff --git a/rust/benches/evaluator.rs b/rust/benches/evaluator.rs
new file mode 100644
index 0000000000..ed411461d1
--- /dev/null
+++ b/rust/benches/evaluator.rs
@@ -0,0 +1,149 @@
+// Copyright 2022 The Matrix.org Foundation C.I.C.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#![feature(test)]
+use synapse::push::{
+ evaluator::PushRuleEvaluator, Condition, EventMatchCondition, FilteredPushRules, PushRules,
+};
+use test::Bencher;
+
+extern crate test;
+
+#[bench]
+fn bench_match_exact(b: &mut Bencher) {
+ let flattened_keys = [
+ ("type".to_string(), "m.text".to_string()),
+ ("room_id".to_string(), "!room:server".to_string()),
+ ("content.body".to_string(), "test message".to_string()),
+ ]
+ .into_iter()
+ .collect();
+
+ let eval = PushRuleEvaluator::py_new(
+ flattened_keys,
+ 10,
+ 0,
+ Default::default(),
+ Default::default(),
+ true,
+ )
+ .unwrap();
+
+ let condition = Condition::Known(synapse::push::KnownCondition::EventMatch(
+ EventMatchCondition {
+ key: "room_id".into(),
+ pattern: Some("!room:server".into()),
+ pattern_type: None,
+ },
+ ));
+
+ let matched = eval.match_condition(&condition, None, None).unwrap();
+ assert!(matched, "Didn't match");
+
+ b.iter(|| eval.match_condition(&condition, None, None).unwrap());
+}
+
+#[bench]
+fn bench_match_word(b: &mut Bencher) {
+ let flattened_keys = [
+ ("type".to_string(), "m.text".to_string()),
+ ("room_id".to_string(), "!room:server".to_string()),
+ ("content.body".to_string(), "test message".to_string()),
+ ]
+ .into_iter()
+ .collect();
+
+ let eval = PushRuleEvaluator::py_new(
+ flattened_keys,
+ 10,
+ 0,
+ Default::default(),
+ Default::default(),
+ true,
+ )
+ .unwrap();
+
+ let condition = Condition::Known(synapse::push::KnownCondition::EventMatch(
+ EventMatchCondition {
+ key: "content.body".into(),
+ pattern: Some("test".into()),
+ pattern_type: None,
+ },
+ ));
+
+ let matched = eval.match_condition(&condition, None, None).unwrap();
+ assert!(matched, "Didn't match");
+
+ b.iter(|| eval.match_condition(&condition, None, None).unwrap());
+}
+
+#[bench]
+fn bench_match_word_miss(b: &mut Bencher) {
+ let flattened_keys = [
+ ("type".to_string(), "m.text".to_string()),
+ ("room_id".to_string(), "!room:server".to_string()),
+ ("content.body".to_string(), "test message".to_string()),
+ ]
+ .into_iter()
+ .collect();
+
+ let eval = PushRuleEvaluator::py_new(
+ flattened_keys,
+ 10,
+ 0,
+ Default::default(),
+ Default::default(),
+ true,
+ )
+ .unwrap();
+
+ let condition = Condition::Known(synapse::push::KnownCondition::EventMatch(
+ EventMatchCondition {
+ key: "content.body".into(),
+ pattern: Some("foobar".into()),
+ pattern_type: None,
+ },
+ ));
+
+ let matched = eval.match_condition(&condition, None, None).unwrap();
+ assert!(!matched, "Didn't match");
+
+ b.iter(|| eval.match_condition(&condition, None, None).unwrap());
+}
+
+#[bench]
+fn bench_eval_message(b: &mut Bencher) {
+ let flattened_keys = [
+ ("type".to_string(), "m.text".to_string()),
+ ("room_id".to_string(), "!room:server".to_string()),
+ ("content.body".to_string(), "test message".to_string()),
+ ]
+ .into_iter()
+ .collect();
+
+ let eval = PushRuleEvaluator::py_new(
+ flattened_keys,
+ 10,
+ 0,
+ Default::default(),
+ Default::default(),
+ true,
+ )
+ .unwrap();
+
+ let rules =
+ FilteredPushRules::py_new(PushRules::new(Vec::new()), Default::default(), false, false);
+
+ b.iter(|| eval.run(&rules, Some("bob"), Some("person")));
+}
diff --git a/rust/benches/glob.rs b/rust/benches/glob.rs
new file mode 100644
index 0000000000..b6697d9285
--- /dev/null
+++ b/rust/benches/glob.rs
@@ -0,0 +1,40 @@
+// Copyright 2022 The Matrix.org Foundation C.I.C.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#![feature(test)]
+
+use synapse::push::utils::{glob_to_regex, GlobMatchType};
+use test::Bencher;
+
+extern crate test;
+
+#[bench]
+fn bench_whole(b: &mut Bencher) {
+ b.iter(|| glob_to_regex("test", GlobMatchType::Whole));
+}
+
+#[bench]
+fn bench_word(b: &mut Bencher) {
+ b.iter(|| glob_to_regex("test", GlobMatchType::Word));
+}
+
+#[bench]
+fn bench_whole_wildcard_run(b: &mut Bencher) {
+ b.iter(|| glob_to_regex("test***??*?*?foo", GlobMatchType::Whole));
+}
+
+#[bench]
+fn bench_word_wildcard_run(b: &mut Bencher) {
+ b.iter(|| glob_to_regex("test***??*?*?foo", GlobMatchType::Whole));
+}
diff --git a/rust/build.rs b/rust/build.rs
index 2117975e56..ef370e6b41 100644
--- a/rust/build.rs
+++ b/rust/build.rs
@@ -22,7 +22,7 @@ fn main() -> Result<(), std::io::Error> {
for entry in entries {
if entry.is_dir() {
- dirs.push(entry)
+ dirs.push(entry);
} else {
paths.push(entry.to_str().expect("valid rust paths").to_string());
}
diff --git a/rust/src/push/base_rules.rs b/rust/src/push/base_rules.rs
index 7c62bc4849..49802fa4eb 100644
--- a/rust/src/push/base_rules.rs
+++ b/rust/src/push/base_rules.rs
@@ -25,6 +25,7 @@ use crate::push::Action;
use crate::push::Condition;
use crate::push::EventMatchCondition;
use crate::push::PushRule;
+use crate::push::RelatedEventMatchCondition;
use crate::push::SetTweak;
use crate::push::TweakValue;
@@ -114,6 +115,22 @@ pub const BASE_APPEND_OVERRIDE_RULES: &[PushRule] = &[
default: true,
default_enabled: true,
},
+ PushRule {
+ rule_id: Cow::Borrowed("global/override/.im.nheko.msc3664.reply"),
+ priority_class: 5,
+ conditions: Cow::Borrowed(&[Condition::Known(KnownCondition::RelatedEventMatch(
+ RelatedEventMatchCondition {
+ key: Some(Cow::Borrowed("sender")),
+ pattern: None,
+ pattern_type: Some(Cow::Borrowed("user_id")),
+ rel_type: Cow::Borrowed("m.in_reply_to"),
+ include_fallbacks: None,
+ },
+ ))]),
+ actions: Cow::Borrowed(&[Action::Notify, HIGHLIGHT_ACTION, SOUND_ACTION]),
+ default: true,
+ default_enabled: true,
+ },
PushRule {
rule_id: Cow::Borrowed("global/override/.m.rule.contains_display_name"),
priority_class: 5,
@@ -173,7 +190,7 @@ pub const BASE_APPEND_OVERRIDE_RULES: &[PushRule] = &[
default_enabled: true,
},
PushRule {
- rule_id: Cow::Borrowed("global/override/.org.matrix.msc3786.rule.room.server_acl"),
+ rule_id: Cow::Borrowed("global/override/.m.rule.room.server_acl"),
priority_class: 5,
conditions: Cow::Borrowed(&[
Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
@@ -257,18 +274,6 @@ pub const BASE_APPEND_UNDERRIDE_RULES: &[PushRule] = &[
default: true,
default_enabled: true,
},
- PushRule {
- rule_id: Cow::Borrowed("global/underride/.org.matrix.msc3772.thread_reply"),
- priority_class: 1,
- conditions: Cow::Borrowed(&[Condition::Known(KnownCondition::RelationMatch {
- rel_type: Cow::Borrowed("m.thread"),
- sender: None,
- sender_type: Some(Cow::Borrowed("user_id")),
- })]),
- actions: Cow::Borrowed(&[Action::Notify, HIGHLIGHT_FALSE_ACTION]),
- default: true,
- default_enabled: true,
- },
PushRule {
rule_id: Cow::Borrowed("global/underride/.m.rule.message"),
priority_class: 1,
diff --git a/rust/src/push/evaluator.rs b/rust/src/push/evaluator.rs
new file mode 100644
index 0000000000..cedd42c54d
--- /dev/null
+++ b/rust/src/push/evaluator.rs
@@ -0,0 +1,370 @@
+// Copyright 2022 The Matrix.org Foundation C.I.C.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use std::collections::BTreeMap;
+
+use anyhow::{Context, Error};
+use lazy_static::lazy_static;
+use log::warn;
+use pyo3::prelude::*;
+use regex::Regex;
+
+use super::{
+ utils::{get_glob_matcher, get_localpart_from_id, GlobMatchType},
+ Action, Condition, EventMatchCondition, FilteredPushRules, KnownCondition,
+ RelatedEventMatchCondition,
+};
+
+lazy_static! {
+ /// Used to parse the `is` clause in the room member count condition.
+ static ref INEQUALITY_EXPR: Regex = Regex::new(r"^([=<>]*)([0-9]+)$").expect("valid regex");
+}
+
+/// Allows running a set of push rules against a particular event.
+#[pyclass]
+pub struct PushRuleEvaluator {
+ /// A mapping of "flattened" keys to string values in the event, e.g.
+ /// includes things like "type" and "content.msgtype".
+ flattened_keys: BTreeMap,
+
+ /// The "content.body", if any.
+ body: String,
+
+ /// The number of users in the room.
+ room_member_count: u64,
+
+ /// The `notifications` section of the current power levels in the room.
+ notification_power_levels: BTreeMap,
+
+ /// The power level of the sender of the event, or None if event is an
+ /// outlier.
+ sender_power_level: Option,
+
+ /// The related events, indexed by relation type. Flattened in the same manner as
+ /// `flattened_keys`.
+ related_events_flattened: BTreeMap>,
+
+ /// If msc3664, push rules for related events, is enabled.
+ related_event_match_enabled: bool,
+}
+
+#[pymethods]
+impl PushRuleEvaluator {
+ /// Create a new `PushRuleEvaluator`. See struct docstring for details.
+ #[new]
+ pub fn py_new(
+ flattened_keys: BTreeMap,
+ room_member_count: u64,
+ sender_power_level: Option,
+ notification_power_levels: BTreeMap,
+ related_events_flattened: BTreeMap>,
+ related_event_match_enabled: bool,
+ ) -> Result {
+ let body = flattened_keys
+ .get("content.body")
+ .cloned()
+ .unwrap_or_default();
+
+ Ok(PushRuleEvaluator {
+ flattened_keys,
+ body,
+ room_member_count,
+ notification_power_levels,
+ sender_power_level,
+ related_events_flattened,
+ related_event_match_enabled,
+ })
+ }
+
+ /// Run the evaluator with the given push rules, for the given user ID and
+ /// display name of the user.
+ ///
+ /// Passing in None will skip evaluating rules matching user ID and display
+ /// name.
+ ///
+ /// Returns the set of actions, if any, that match (filtering out any
+ /// `dont_notify` actions).
+ pub fn run(
+ &self,
+ push_rules: &FilteredPushRules,
+ user_id: Option<&str>,
+ display_name: Option<&str>,
+ ) -> Vec {
+ 'outer: for (push_rule, enabled) in push_rules.iter() {
+ if !enabled {
+ continue;
+ }
+
+ for condition in push_rule.conditions.iter() {
+ match self.match_condition(condition, user_id, display_name) {
+ Ok(true) => {}
+ Ok(false) => continue 'outer,
+ Err(err) => {
+ warn!("Condition match failed {err}");
+ continue 'outer;
+ }
+ }
+ }
+
+ let actions = push_rule
+ .actions
+ .iter()
+ // Filter out "dont_notify" actions, as we don't store them.
+ .filter(|a| **a != Action::DontNotify)
+ .cloned()
+ .collect();
+
+ return actions;
+ }
+
+ Vec::new()
+ }
+
+ /// Check if the given condition matches.
+ fn matches(
+ &self,
+ condition: Condition,
+ user_id: Option<&str>,
+ display_name: Option<&str>,
+ ) -> bool {
+ match self.match_condition(&condition, user_id, display_name) {
+ Ok(true) => true,
+ Ok(false) => false,
+ Err(err) => {
+ warn!("Condition match failed {err}");
+ false
+ }
+ }
+ }
+}
+
+impl PushRuleEvaluator {
+ /// Match a given `Condition` for a push rule.
+ pub fn match_condition(
+ &self,
+ condition: &Condition,
+ user_id: Option<&str>,
+ display_name: Option<&str>,
+ ) -> Result {
+ let known_condition = match condition {
+ Condition::Known(known) => known,
+ Condition::Unknown(_) => {
+ return Ok(false);
+ }
+ };
+
+ let result = match known_condition {
+ KnownCondition::EventMatch(event_match) => {
+ self.match_event_match(event_match, user_id)?
+ }
+ KnownCondition::RelatedEventMatch(event_match) => {
+ self.match_related_event_match(event_match, user_id)?
+ }
+ KnownCondition::ContainsDisplayName => {
+ if let Some(dn) = display_name {
+ if !dn.is_empty() {
+ get_glob_matcher(dn, GlobMatchType::Word)?.is_match(&self.body)?
+ } else {
+ // We specifically ignore empty display names, as otherwise
+ // they would always match.
+ false
+ }
+ } else {
+ false
+ }
+ }
+ KnownCondition::RoomMemberCount { is } => {
+ if let Some(is) = is {
+ self.match_member_count(is)?
+ } else {
+ false
+ }
+ }
+ KnownCondition::SenderNotificationPermission { key } => {
+ if let Some(sender_power_level) = &self.sender_power_level {
+ let required_level = self
+ .notification_power_levels
+ .get(key.as_ref())
+ .copied()
+ .unwrap_or(50);
+
+ *sender_power_level >= required_level
+ } else {
+ false
+ }
+ }
+ };
+
+ Ok(result)
+ }
+
+ /// Evaluates a `event_match` condition.
+ fn match_event_match(
+ &self,
+ event_match: &EventMatchCondition,
+ user_id: Option<&str>,
+ ) -> Result {
+ let pattern = if let Some(pattern) = &event_match.pattern {
+ pattern
+ } else if let Some(pattern_type) = &event_match.pattern_type {
+ // The `pattern_type` can either be "user_id" or "user_localpart",
+ // either way if we don't have a `user_id` then the condition can't
+ // match.
+ let user_id = if let Some(user_id) = user_id {
+ user_id
+ } else {
+ return Ok(false);
+ };
+
+ match &**pattern_type {
+ "user_id" => user_id,
+ "user_localpart" => get_localpart_from_id(user_id)?,
+ _ => return Ok(false),
+ }
+ } else {
+ return Ok(false);
+ };
+
+ let haystack = if let Some(haystack) = self.flattened_keys.get(&*event_match.key) {
+ haystack
+ } else {
+ return Ok(false);
+ };
+
+ // For the content.body we match against "words", but for everything
+ // else we match against the entire value.
+ let match_type = if event_match.key == "content.body" {
+ GlobMatchType::Word
+ } else {
+ GlobMatchType::Whole
+ };
+
+ let mut compiled_pattern = get_glob_matcher(pattern, match_type)?;
+ compiled_pattern.is_match(haystack)
+ }
+
+ /// Evaluates a `related_event_match` condition. (MSC3664)
+ fn match_related_event_match(
+ &self,
+ event_match: &RelatedEventMatchCondition,
+ user_id: Option<&str>,
+ ) -> Result {
+ // First check if related event matching is enabled...
+ if !self.related_event_match_enabled {
+ return Ok(false);
+ }
+
+ // get the related event, fail if there is none.
+ let event = if let Some(event) = self.related_events_flattened.get(&*event_match.rel_type) {
+ event
+ } else {
+ return Ok(false);
+ };
+
+ // If we are not matching fallbacks, don't match if our special key indicating this is a
+ // fallback relation is not present.
+ if !event_match.include_fallbacks.unwrap_or(false)
+ && event.contains_key("im.vector.is_falling_back")
+ {
+ return Ok(false);
+ }
+
+ // if we have no key, accept the event as matching, if it existed without matching any
+ // fields.
+ let key = if let Some(key) = &event_match.key {
+ key
+ } else {
+ return Ok(true);
+ };
+
+ let pattern = if let Some(pattern) = &event_match.pattern {
+ pattern
+ } else if let Some(pattern_type) = &event_match.pattern_type {
+ // The `pattern_type` can either be "user_id" or "user_localpart",
+ // either way if we don't have a `user_id` then the condition can't
+ // match.
+ let user_id = if let Some(user_id) = user_id {
+ user_id
+ } else {
+ return Ok(false);
+ };
+
+ match &**pattern_type {
+ "user_id" => user_id,
+ "user_localpart" => get_localpart_from_id(user_id)?,
+ _ => return Ok(false),
+ }
+ } else {
+ return Ok(false);
+ };
+
+ let haystack = if let Some(haystack) = event.get(&**key) {
+ haystack
+ } else {
+ return Ok(false);
+ };
+
+ // For the content.body we match against "words", but for everything
+ // else we match against the entire value.
+ let match_type = if key == "content.body" {
+ GlobMatchType::Word
+ } else {
+ GlobMatchType::Whole
+ };
+
+ let mut compiled_pattern = get_glob_matcher(pattern, match_type)?;
+ compiled_pattern.is_match(haystack)
+ }
+
+ /// Match the member count against an 'is' condition
+ /// The `is` condition can be things like '>2', '==3' or even just '4'.
+ fn match_member_count(&self, is: &str) -> Result {
+ let captures = INEQUALITY_EXPR.captures(is).context("bad 'is' clause")?;
+ let ineq = captures.get(1).map_or("==", |m| m.as_str());
+ let rhs: u64 = captures
+ .get(2)
+ .context("missing number")?
+ .as_str()
+ .parse()?;
+
+ let matches = match ineq {
+ "" | "==" => self.room_member_count == rhs,
+ "<" => self.room_member_count < rhs,
+ ">" => self.room_member_count > rhs,
+ ">=" => self.room_member_count >= rhs,
+ "<=" => self.room_member_count <= rhs,
+ _ => false,
+ };
+
+ Ok(matches)
+ }
+}
+
+#[test]
+fn push_rule_evaluator() {
+ let mut flattened_keys = BTreeMap::new();
+ flattened_keys.insert("content.body".to_string(), "foo bar bob hello".to_string());
+ let evaluator = PushRuleEvaluator::py_new(
+ flattened_keys,
+ 10,
+ Some(0),
+ BTreeMap::new(),
+ BTreeMap::new(),
+ true,
+ )
+ .unwrap();
+
+ let result = evaluator.run(&FilteredPushRules::default(), None, Some("bob"));
+ assert_eq!(result.len(), 3);
+}
diff --git a/rust/src/push/mod.rs b/rust/src/push/mod.rs
index de6764e7c5..d57800aa4a 100644
--- a/rust/src/push/mod.rs
+++ b/rust/src/push/mod.rs
@@ -42,7 +42,6 @@
//!
//! The set of "base rules" are the list of rules that every user has by default. A
//! user can modify their copy of the push rules in one of three ways:
-//!
//! 1. Adding a new push rule of a certain kind
//! 2. Changing the actions of a base rule
//! 3. Enabling/disabling a base rule.
@@ -58,12 +57,16 @@ use std::collections::{BTreeMap, HashMap, HashSet};
use anyhow::{Context, Error};
use log::warn;
use pyo3::prelude::*;
-use pythonize::pythonize;
+use pythonize::{depythonize, pythonize};
use serde::de::Error as _;
use serde::{Deserialize, Serialize};
use serde_json::Value;
+use self::evaluator::PushRuleEvaluator;
+
mod base_rules;
+pub mod evaluator;
+pub mod utils;
/// Called when registering modules with python.
pub fn register_module(py: Python<'_>, m: &PyModule) -> PyResult<()> {
@@ -71,6 +74,7 @@ pub fn register_module(py: Python<'_>, m: &PyModule) -> PyResult<()> {
child_module.add_class::()?;
child_module.add_class::()?;
child_module.add_class::()?;
+ child_module.add_class::()?;
child_module.add_function(wrap_pyfunction!(get_base_rule_ids, m)?)?;
m.add_submodule(child_module)?;
@@ -263,6 +267,8 @@ pub enum Condition {
#[serde(tag = "kind")]
pub enum KnownCondition {
EventMatch(EventMatchCondition),
+ #[serde(rename = "im.nheko.msc3664.related_event_match")]
+ RelatedEventMatch(RelatedEventMatchCondition),
ContainsDisplayName,
RoomMemberCount {
#[serde(skip_serializing_if = "Option::is_none")]
@@ -271,14 +277,6 @@ pub enum KnownCondition {
SenderNotificationPermission {
key: Cow<'static, str>,
},
- #[serde(rename = "org.matrix.msc3772.relation_match")]
- RelationMatch {
- rel_type: Cow<'static, str>,
- #[serde(skip_serializing_if = "Option::is_none")]
- sender: Option>,
- #[serde(skip_serializing_if = "Option::is_none")]
- sender_type: Option>,
- },
}
impl IntoPy for Condition {
@@ -287,20 +285,40 @@ impl IntoPy for Condition {
}
}
+impl<'source> FromPyObject<'source> for Condition {
+ fn extract(ob: &'source PyAny) -> PyResult {
+ Ok(depythonize(ob)?)
+ }
+}
+
/// The body of a [`Condition::EventMatch`]
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct EventMatchCondition {
- key: Cow<'static, str>,
+ pub key: Cow<'static, str>,
#[serde(skip_serializing_if = "Option::is_none")]
- pattern: Option>,
+ pub pattern: Option>,
#[serde(skip_serializing_if = "Option::is_none")]
- pattern_type: Option>,
+ pub pattern_type: Option>,
+}
+
+/// The body of a [`Condition::RelatedEventMatch`]
+#[derive(Serialize, Deserialize, Debug, Clone)]
+pub struct RelatedEventMatchCondition {
+ #[serde(skip_serializing_if = "Option::is_none")]
+ pub key: Option>,
+ #[serde(skip_serializing_if = "Option::is_none")]
+ pub pattern: Option>,
+ #[serde(skip_serializing_if = "Option::is_none")]
+ pub pattern_type: Option>,
+ pub rel_type: Cow<'static, str>,
+ #[serde(skip_serializing_if = "Option::is_none")]
+ pub include_fallbacks: Option,
}
/// The collection of push rules for a user.
#[derive(Debug, Clone, Default)]
#[pyclass(frozen)]
-struct PushRules {
+pub struct PushRules {
/// Custom push rules that override a base rule.
overridden_base_rules: HashMap, PushRule>,
@@ -319,7 +337,7 @@ struct PushRules {
#[pymethods]
impl PushRules {
#[new]
- fn new(rules: Vec) -> PushRules {
+ pub fn new(rules: Vec) -> PushRules {
let mut push_rules: PushRules = Default::default();
for rule in rules {
@@ -389,24 +407,21 @@ impl PushRules {
pub struct FilteredPushRules {
push_rules: PushRules,
enabled_map: BTreeMap,
- msc3786_enabled: bool,
- msc3772_enabled: bool,
+ msc3664_enabled: bool,
}
#[pymethods]
impl FilteredPushRules {
#[new]
- fn py_new(
+ pub fn py_new(
push_rules: PushRules,
enabled_map: BTreeMap,
- msc3786_enabled: bool,
- msc3772_enabled: bool,
+ msc3664_enabled: bool,
) -> Self {
Self {
push_rules,
enabled_map,
- msc3786_enabled,
- msc3772_enabled,
+ msc3664_enabled,
}
}
@@ -425,14 +440,8 @@ impl FilteredPushRules {
.iter()
.filter(|rule| {
// Ignore disabled experimental push rules
- if !self.msc3786_enabled
- && rule.rule_id == "global/override/.org.matrix.msc3786.rule.room.server_acl"
- {
- return false;
- }
-
- if !self.msc3772_enabled
- && rule.rule_id == "global/underride/.org.matrix.msc3772.thread_reply"
+ if !self.msc3664_enabled
+ && rule.rule_id == "global/override/.im.nheko.msc3664.reply"
{
return false;
}
@@ -471,6 +480,17 @@ fn test_deserialize_condition() {
let _: Condition = serde_json::from_str(json).unwrap();
}
+#[test]
+fn test_deserialize_unstable_msc3664_condition() {
+ let json = r#"{"kind":"im.nheko.msc3664.related_event_match","key":"content.body","pattern":"coffee","rel_type":"m.in_reply_to"}"#;
+
+ let condition: Condition = serde_json::from_str(json).unwrap();
+ assert!(matches!(
+ condition,
+ Condition::Known(KnownCondition::RelatedEventMatch(_))
+ ));
+}
+
#[test]
fn test_deserialize_custom_condition() {
let json = r#"{"kind":"custom_tag"}"#;
diff --git a/rust/src/push/utils.rs b/rust/src/push/utils.rs
new file mode 100644
index 0000000000..8759340473
--- /dev/null
+++ b/rust/src/push/utils.rs
@@ -0,0 +1,215 @@
+// Copyright 2022 The Matrix.org Foundation C.I.C.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use anyhow::bail;
+use anyhow::Context;
+use anyhow::Error;
+use lazy_static::lazy_static;
+use regex;
+use regex::Regex;
+use regex::RegexBuilder;
+
+lazy_static! {
+ /// Matches runs of non-wildcard characters followed by wildcard characters.
+ static ref WILDCARD_RUN: Regex = Regex::new(r"([^\?\*]*)([\?\*]*)").expect("valid regex");
+}
+
+/// Extract the localpart from a Matrix style ID
+pub(crate) fn get_localpart_from_id(id: &str) -> Result<&str, Error> {
+ let (localpart, _) = id
+ .split_once(':')
+ .with_context(|| format!("ID does not contain colon: {id}"))?;
+
+ // We need to strip off the first character, which is the ID type.
+ if localpart.is_empty() {
+ bail!("Invalid ID {id}");
+ }
+
+ Ok(&localpart[1..])
+}
+
+/// Used by `glob_to_regex` to specify what to match the regex against.
+#[derive(Debug, Clone, Copy, PartialEq, Eq)]
+pub enum GlobMatchType {
+ /// The generated regex will match against the entire input.
+ Whole,
+ /// The generated regex will match against words.
+ Word,
+}
+
+/// Convert a "glob" style expression to a regex, anchoring either to the entire
+/// input or to individual words.
+pub fn glob_to_regex(glob: &str, match_type: GlobMatchType) -> Result {
+ let mut chunks = Vec::new();
+
+ // Patterns with wildcards must be simplified to avoid performance cliffs
+ // - The glob `?**?**?` is equivalent to the glob `???*`
+ // - The glob `???*` is equivalent to the regex `.{3,}`
+ for captures in WILDCARD_RUN.captures_iter(glob) {
+ if let Some(chunk) = captures.get(1) {
+ chunks.push(regex::escape(chunk.as_str()));
+ }
+
+ if let Some(wildcards) = captures.get(2) {
+ if wildcards.as_str() == "" {
+ continue;
+ }
+
+ let question_marks = wildcards.as_str().chars().filter(|c| *c == '?').count();
+
+ if wildcards.as_str().contains('*') {
+ chunks.push(format!(".{{{question_marks},}}"));
+ } else {
+ chunks.push(format!(".{{{question_marks}}}"));
+ }
+ }
+ }
+
+ let joined = chunks.join("");
+
+ let regex_str = match match_type {
+ GlobMatchType::Whole => format!(r"\A{joined}\z"),
+
+ // `^|\W` and `\W|$` handle the case where `pattern` starts or ends with a non-word
+ // character.
+ GlobMatchType::Word => format!(r"(?:^|\b|\W){joined}(?:\b|\W|$)"),
+ };
+
+ Ok(RegexBuilder::new(®ex_str)
+ .case_insensitive(true)
+ .build()?)
+}
+
+/// Compiles the glob into a `Matcher`.
+pub fn get_glob_matcher(glob: &str, match_type: GlobMatchType) -> Result {
+ // There are a number of shortcuts we can make if the glob doesn't contain a
+ // wild card.
+ let matcher = if glob.contains(['*', '?']) {
+ let regex = glob_to_regex(glob, match_type)?;
+ Matcher::Regex(regex)
+ } else if match_type == GlobMatchType::Whole {
+ // If there aren't any wildcards and we're matching the whole thing,
+ // then we simply can do a case-insensitive string match.
+ Matcher::Whole(glob.to_lowercase())
+ } else {
+ // Otherwise, if we're matching against words then can first check
+ // if the haystack contains the glob at all.
+ Matcher::Word {
+ word: glob.to_lowercase(),
+ regex: None,
+ }
+ };
+
+ Ok(matcher)
+}
+
+/// Matches against a glob
+pub enum Matcher {
+ /// Plain regex matching.
+ Regex(Regex),
+
+ /// Case-insensitive equality.
+ Whole(String),
+
+ /// Word matching. `regex` is a cache of calling [`glob_to_regex`] on word.
+ Word { word: String, regex: Option },
+}
+
+impl Matcher {
+ /// Checks if the glob matches the given haystack.
+ pub fn is_match(&mut self, haystack: &str) -> Result {
+ // We want to to do case-insensitive matching, so we convert to
+ // lowercase first.
+ let haystack = haystack.to_lowercase();
+
+ match self {
+ Matcher::Regex(regex) => Ok(regex.is_match(&haystack)),
+ Matcher::Whole(whole) => Ok(whole == &haystack),
+ Matcher::Word { word, regex } => {
+ // If we're looking for a literal word, then we first check if
+ // the haystack contains the word as a substring.
+ if !haystack.contains(&*word) {
+ return Ok(false);
+ }
+
+ // If it does contain the word as a substring, then we need to
+ // check if it is an actual word by testing it against the regex.
+ let regex = if let Some(regex) = regex {
+ regex
+ } else {
+ let compiled_regex = glob_to_regex(word, GlobMatchType::Word)?;
+ regex.insert(compiled_regex)
+ };
+
+ Ok(regex.is_match(&haystack))
+ }
+ }
+ }
+}
+
+#[test]
+fn test_get_domain_from_id() {
+ get_localpart_from_id("").unwrap_err();
+ get_localpart_from_id(":").unwrap_err();
+ get_localpart_from_id(":asd").unwrap_err();
+ get_localpart_from_id("::as::asad").unwrap_err();
+
+ assert_eq!(get_localpart_from_id("@test:foo").unwrap(), "test");
+ assert_eq!(get_localpart_from_id("@:").unwrap(), "");
+ assert_eq!(get_localpart_from_id("@test:foo:907").unwrap(), "test");
+}
+
+#[test]
+fn tset_glob() -> Result<(), Error> {
+ assert_eq!(
+ glob_to_regex("simple", GlobMatchType::Whole)?.as_str(),
+ r"\Asimple\z"
+ );
+ assert_eq!(
+ glob_to_regex("simple*", GlobMatchType::Whole)?.as_str(),
+ r"\Asimple.{0,}\z"
+ );
+ assert_eq!(
+ glob_to_regex("simple?", GlobMatchType::Whole)?.as_str(),
+ r"\Asimple.{1}\z"
+ );
+ assert_eq!(
+ glob_to_regex("simple?*?*", GlobMatchType::Whole)?.as_str(),
+ r"\Asimple.{2,}\z"
+ );
+ assert_eq!(
+ glob_to_regex("simple???", GlobMatchType::Whole)?.as_str(),
+ r"\Asimple.{3}\z"
+ );
+
+ assert_eq!(
+ glob_to_regex("escape.", GlobMatchType::Whole)?.as_str(),
+ r"\Aescape\.\z"
+ );
+
+ assert!(glob_to_regex("simple", GlobMatchType::Whole)?.is_match("simple"));
+ assert!(!glob_to_regex("simple", GlobMatchType::Whole)?.is_match("simples"));
+ assert!(glob_to_regex("simple*", GlobMatchType::Whole)?.is_match("simples"));
+ assert!(glob_to_regex("simple?", GlobMatchType::Whole)?.is_match("simples"));
+ assert!(glob_to_regex("simple*", GlobMatchType::Whole)?.is_match("simple"));
+
+ assert!(glob_to_regex("simple", GlobMatchType::Word)?.is_match("some simple."));
+ assert!(glob_to_regex("simple", GlobMatchType::Word)?.is_match("simple"));
+ assert!(!glob_to_regex("simple", GlobMatchType::Word)?.is_match("simples"));
+
+ assert!(glob_to_regex("@user:foo", GlobMatchType::Word)?.is_match("Some @user:foo test"));
+ assert!(glob_to_regex("@user:foo", GlobMatchType::Word)?.is_match("@user:foo"));
+
+ Ok(())
+}
diff --git a/scripts-dev/build_debian_packages.py b/scripts-dev/build_debian_packages.py
index cd2e64b75f..7442300196 100755
--- a/scripts-dev/build_debian_packages.py
+++ b/scripts-dev/build_debian_packages.py
@@ -27,6 +27,7 @@ DISTS = (
"debian:sid",
"ubuntu:focal", # 20.04 LTS (our EOL forced by Py38 on 2024-10-14)
"ubuntu:jammy", # 22.04 LTS (EOL 2027-04)
+ "ubuntu:kinetic", # 22.10 (EOL 2023-07-20)
)
DESC = """\
diff --git a/scripts-dev/check_pydantic_models.py b/scripts-dev/check_pydantic_models.py
index d0fb811bdb..9f2b7ded5b 100755
--- a/scripts-dev/check_pydantic_models.py
+++ b/scripts-dev/check_pydantic_models.py
@@ -88,10 +88,9 @@ def make_wrapper(factory: Callable[P, R]) -> Callable[P, R]:
@functools.wraps(factory)
def wrapper(*args: P.args, **kwargs: P.kwargs) -> R:
- # type-ignore: should be redundant once we can use https://github.com/python/mypy/pull/12668
- if "strict" not in kwargs: # type: ignore[attr-defined]
+ if "strict" not in kwargs:
raise MissingStrictInConstrainedTypeException(factory.__name__)
- if not kwargs["strict"]: # type: ignore[index]
+ if not kwargs["strict"]:
raise MissingStrictInConstrainedTypeException(factory.__name__)
return factory(*args, **kwargs)
diff --git a/scripts-dev/complement.sh b/scripts-dev/complement.sh
index eab23f18f1..803c6ce92d 100755
--- a/scripts-dev/complement.sh
+++ b/scripts-dev/complement.sh
@@ -126,7 +126,7 @@ export COMPLEMENT_BASE_IMAGE=complement-synapse
extra_test_args=()
-test_tags="synapse_blacklist,msc2716,msc3030,msc3787"
+test_tags="synapse_blacklist,msc3787,msc3874"
# All environment variables starting with PASS_ will be shared.
# (The prefix is stripped off before reaching the container.)
@@ -139,6 +139,9 @@ if [[ -n "$WORKERS" ]]; then
# Use workers.
export PASS_SYNAPSE_COMPLEMENT_USE_WORKERS=true
+ # Pass through the workers defined. If none, it will be an empty string
+ export PASS_SYNAPSE_WORKER_TYPES="$WORKER_TYPES"
+
# Workers can only use Postgres as a database.
export PASS_SYNAPSE_COMPLEMENT_DATABASE=postgres
@@ -158,7 +161,10 @@ else
# We only test faster room joins on monoliths, because they are purposefully
# being developed without worker support to start with.
- test_tags="$test_tags,faster_joins"
+ #
+ # The tests for importing historical messages (MSC2716) and jump to date (MSC3030)
+ # also only pass with monoliths, currently.
+ test_tags="$test_tags,faster_joins,msc2716,msc3030"
fi
diff --git a/scripts-dev/release.py b/scripts-dev/release.py
index c82c58c54b..bf47b6c713 100755
--- a/scripts-dev/release.py
+++ b/scripts-dev/release.py
@@ -219,9 +219,7 @@ def _prepare() -> None:
update_branch(repo)
# Create the new release branch
- # Type ignore will no longer be needed after GitPython 3.1.28.
- # See https://github.com/gitpython-developers/GitPython/pull/1419
- repo.create_head(release_branch_name, commit=base_branch) # type: ignore[arg-type]
+ repo.create_head(release_branch_name, commit=base_branch)
# Special-case SyTest: we don't actually prepare any files so we may
# as well push it now (and only when we create a release branch;
diff --git a/stubs/synapse/synapse_rust/push.pyi b/stubs/synapse/synapse_rust/push.pyi
index 93c4e69d42..ceade65ef9 100644
--- a/stubs/synapse/synapse_rust/push.pyi
+++ b/stubs/synapse/synapse_rust/push.pyi
@@ -1,4 +1,4 @@
-from typing import Any, Collection, Dict, Mapping, Sequence, Tuple, Union
+from typing import Any, Collection, Dict, Mapping, Optional, Sequence, Tuple, Union
from synapse.types import JsonDict
@@ -26,12 +26,25 @@ class PushRules:
class FilteredPushRules:
def __init__(
- self,
- push_rules: PushRules,
- enabled_map: Dict[str, bool],
- msc3786_enabled: bool,
- msc3772_enabled: bool,
+ self, push_rules: PushRules, enabled_map: Dict[str, bool], msc3664_enabled: bool
): ...
def rules(self) -> Collection[Tuple[PushRule, bool]]: ...
def get_base_rule_ids() -> Collection[str]: ...
+
+class PushRuleEvaluator:
+ def __init__(
+ self,
+ flattened_keys: Mapping[str, str],
+ room_member_count: int,
+ sender_power_level: Optional[int],
+ notification_power_levels: Mapping[str, int],
+ related_events_flattened: Mapping[str, Mapping[str, str]],
+ related_event_match_enabled: bool,
+ ): ...
+ def run(
+ self,
+ push_rules: FilteredPushRules,
+ user_id: Optional[str],
+ display_name: Optional[str],
+ ) -> Collection[Union[Mapping, str]]: ...
diff --git a/synapse/__init__.py b/synapse/__init__.py
index 1bed6393bd..fbfd506a43 100644
--- a/synapse/__init__.py
+++ b/synapse/__init__.py
@@ -21,6 +21,7 @@ import os
import sys
from synapse.util.rust import check_rust_lib_up_to_date
+from synapse.util.stringutils import strtobool
# Check that we're not running on an unsupported Python version.
if sys.version_info < (3, 7):
@@ -28,25 +29,22 @@ if sys.version_info < (3, 7):
sys.exit(1)
# Allow using the asyncio reactor via env var.
-if bool(os.environ.get("SYNAPSE_ASYNC_IO_REACTOR", False)):
- try:
- from incremental import Version
+if strtobool(os.environ.get("SYNAPSE_ASYNC_IO_REACTOR", "0")):
+ from incremental import Version
- import twisted
+ import twisted
- # We need a bugfix that is included in Twisted 21.2.0:
- # https://twistedmatrix.com/trac/ticket/9787
- if twisted.version < Version("Twisted", 21, 2, 0):
- print("Using asyncio reactor requires Twisted>=21.2.0")
- sys.exit(1)
+ # We need a bugfix that is included in Twisted 21.2.0:
+ # https://twistedmatrix.com/trac/ticket/9787
+ if twisted.version < Version("Twisted", 21, 2, 0):
+ print("Using asyncio reactor requires Twisted>=21.2.0")
+ sys.exit(1)
- import asyncio
+ import asyncio
- from twisted.internet import asyncioreactor
+ from twisted.internet import asyncioreactor
- asyncioreactor.install(asyncio.get_event_loop())
- except ImportError:
- pass
+ asyncioreactor.install(asyncio.get_event_loop())
# Twisted and canonicaljson will fail to import when this file is executed to
# get the __version__ during a fresh install. That's OK and subsequent calls to
diff --git a/synapse/_scripts/synapse_port_db.py b/synapse/_scripts/synapse_port_db.py
index 450ba462ba..d850e54e17 100755
--- a/synapse/_scripts/synapse_port_db.py
+++ b/synapse/_scripts/synapse_port_db.py
@@ -72,6 +72,7 @@ from synapse.storage.databases.main.registration import (
RegistrationBackgroundUpdateStore,
find_max_generated_user_id_localpart,
)
+from synapse.storage.databases.main.relations import RelationsWorkerStore
from synapse.storage.databases.main.room import RoomBackgroundUpdateStore
from synapse.storage.databases.main.roommember import RoomMemberBackgroundUpdateStore
from synapse.storage.databases.main.search import SearchBackgroundUpdateStore
@@ -107,7 +108,7 @@ BOOLEAN_COLUMNS = {
"redactions": ["have_censored"],
"room_stats_state": ["is_federatable"],
"local_media_repository": ["safe_from_quarantine"],
- "users": ["shadow_banned"],
+ "users": ["shadow_banned", "approved"],
"e2e_fallback_keys_json": ["used"],
"access_tokens": ["used"],
"device_lists_changes_in_room": ["converted_to_destinations"],
@@ -206,6 +207,7 @@ class Store(
PusherWorkerStore,
PresenceBackgroundUpdateStore,
ReceiptsBackgroundUpdateStore,
+ RelationsWorkerStore,
):
def execute(self, f: Callable[..., R], *args: Any, **kwargs: Any) -> Awaitable[R]:
return self.db_pool.runInteraction(f.__name__, f, *args, **kwargs)
diff --git a/synapse/_scripts/update_synapse_database.py b/synapse/_scripts/update_synapse_database.py
old mode 100755
new mode 100644
index fb1fb83f50..0adf94bba6
--- a/synapse/_scripts/update_synapse_database.py
+++ b/synapse/_scripts/update_synapse_database.py
@@ -15,7 +15,6 @@
import argparse
import logging
-import sys
from typing import cast
import yaml
@@ -100,13 +99,6 @@ def main() -> None:
# Load, process and sanity-check the config.
hs_config = yaml.safe_load(args.database_config)
- if "database" not in hs_config and "databases" not in hs_config:
- sys.stderr.write(
- "The configuration file must have a 'database' or 'databases' section. "
- "See https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#database"
- )
- sys.exit(4)
-
config = HomeServerConfig()
config.parse_config_dict(hs_config, "", "")
diff --git a/synapse/api/constants.py b/synapse/api/constants.py
index 5747d36802..8aa646890d 100644
--- a/synapse/api/constants.py
+++ b/synapse/api/constants.py
@@ -31,6 +31,9 @@ MAX_ALIAS_LENGTH = 255
# the maximum length for a user id is 255 characters
MAX_USERID_LENGTH = 255
+# Constant value used for the pseudo-thread which is the main timeline.
+MAIN_TIMELINE: Final = "main"
+
class Membership:
@@ -122,6 +125,8 @@ class EventTypes:
MSC2716_BATCH: Final = "org.matrix.msc2716.batch"
MSC2716_MARKER: Final = "org.matrix.msc2716.marker"
+ Reaction: Final = "m.reaction"
+
class ToDeviceEventTypes:
RoomKeyRequest: Final = "m.room_key_request"
@@ -272,3 +277,14 @@ class PublicRoomsFilterFields:
GENERIC_SEARCH_TERM: Final = "generic_search_term"
ROOM_TYPES: Final = "room_types"
+
+
+class ApprovalNoticeMedium:
+ """Identifier for the medium this server will use to serve notice of approval for a
+ specific user's registration.
+
+ As defined in https://github.com/matrix-org/matrix-spec-proposals/blob/babolivier/m_not_approved/proposals/3866-user-not-approved-error.md
+ """
+
+ NONE = "org.matrix.msc3866.none"
+ EMAIL = "org.matrix.msc3866.email"
diff --git a/synapse/api/errors.py b/synapse/api/errors.py
index 1c6b53aa24..e2cfcea0f2 100644
--- a/synapse/api/errors.py
+++ b/synapse/api/errors.py
@@ -106,6 +106,8 @@ class Codes(str, Enum):
# Part of MSC3895.
UNABLE_DUE_TO_PARTIAL_STATE = "ORG.MATRIX.MSC3895_UNABLE_DUE_TO_PARTIAL_STATE"
+ USER_AWAITING_APPROVAL = "ORG.MATRIX.MSC3866_USER_AWAITING_APPROVAL"
+
class CodeMessageException(RuntimeError):
"""An exception with integer code and message string attributes.
@@ -153,7 +155,13 @@ class RedirectException(CodeMessageException):
class SynapseError(CodeMessageException):
"""A base exception type for matrix errors which have an errcode and error
- message (as well as an HTTP status code).
+ message (as well as an HTTP status code). These often bubble all the way up to the
+ client API response so the error code and status often reach the client directly as
+ defined here. If the error doesn't make sense to present to a client, then it
+ probably shouldn't be a `SynapseError`. For example, if we contact another
+ homeserver over federation, we shouldn't automatically ferry response errors back to
+ the client on our end (a 500 from a remote server does not make sense to a client
+ when our server did not experience a 500).
Attributes:
errcode: Matrix error code e.g 'M_FORBIDDEN'
@@ -566,6 +574,20 @@ class UnredactedContentDeletedError(SynapseError):
return cs_error(self.msg, self.errcode, **extra)
+class NotApprovedError(SynapseError):
+ def __init__(
+ self,
+ msg: str,
+ approval_notice_medium: str,
+ ):
+ super().__init__(
+ code=403,
+ msg=msg,
+ errcode=Codes.USER_AWAITING_APPROVAL,
+ additional_fields={"approval_notice_medium": approval_notice_medium},
+ )
+
+
def cs_error(msg: str, code: str = Codes.UNKNOWN, **kwargs: Any) -> "JsonDict":
"""Utility method for constructing an error response for client-server
interactions.
@@ -584,8 +606,20 @@ def cs_error(msg: str, code: str = Codes.UNKNOWN, **kwargs: Any) -> "JsonDict":
class FederationError(RuntimeError):
- """This class is used to inform remote homeservers about erroneous
- PDUs they sent us.
+ """
+ Raised when we process an erroneous PDU.
+
+ There are two kinds of scenarios where this exception can be raised:
+
+ 1. We may pull an invalid PDU from a remote homeserver (e.g. during backfill). We
+ raise this exception to signal an error to the rest of the application.
+ 2. We may be pushed an invalid PDU as part of a `/send` transaction from a remote
+ homeserver. We raise so that we can respond to the transaction and include the
+ error string in the "PDU Processing Result". The message which will likely be
+ ignored by the remote homeserver and is not machine parse-able since it's just a
+ string.
+
+ TODO: In the future, we should split these usage scenarios into their own error types.
FATAL: The remote server could not interpret the source event.
(e.g., it was missing a required field)
@@ -624,6 +658,27 @@ class FederationError(RuntimeError):
}
+class FederationPullAttemptBackoffError(RuntimeError):
+ """
+ Raised to indicate that we are are deliberately not attempting to pull the given
+ event over federation because we've already done so recently and are backing off.
+
+ Attributes:
+ event_id: The event_id which we are refusing to pull
+ message: A custom error message that gives more context
+ """
+
+ def __init__(self, event_ids: List[str], message: Optional[str]):
+ self.event_ids = event_ids
+
+ if message:
+ error_message = message
+ else:
+ error_message = f"Not attempting to pull event_ids={self.event_ids} because we already tried to pull them recently (backing off)."
+
+ super().__init__(error_message)
+
+
class HttpResponseException(CodeMessageException):
"""
Represents an HTTP-level failure of an outbound request
@@ -658,7 +713,7 @@ class HttpResponseException(CodeMessageException):
set to the reason code from the HTTP response.
Returns:
- SynapseError:
+ The error converted to a SynapseError.
"""
# try to parse the body as json, to get better errcode/msg, but
# default to M_UNKNOWN with the HTTP status as the error text
diff --git a/synapse/api/filtering.py b/synapse/api/filtering.py
index f7f46f8d80..a9888381b4 100644
--- a/synapse/api/filtering.py
+++ b/synapse/api/filtering.py
@@ -36,14 +36,14 @@ from jsonschema import FormatChecker
from synapse.api.constants import EduTypes, EventContentFields
from synapse.api.errors import SynapseError
from synapse.api.presence import UserPresenceState
-from synapse.events import EventBase
+from synapse.events import EventBase, relation_from_event
from synapse.types import JsonDict, RoomID, UserID
if TYPE_CHECKING:
from synapse.server import HomeServer
FILTER_SCHEMA = {
- "additionalProperties": False,
+ "additionalProperties": True, # Allow new fields for forward compatibility
"type": "object",
"properties": {
"limit": {"type": "number"},
@@ -53,11 +53,17 @@ FILTER_SCHEMA = {
# check types are valid event types
"types": {"type": "array", "items": {"type": "string"}},
"not_types": {"type": "array", "items": {"type": "string"}},
+ # MSC3874, filtering /messages.
+ "org.matrix.msc3874.rel_types": {"type": "array", "items": {"type": "string"}},
+ "org.matrix.msc3874.not_rel_types": {
+ "type": "array",
+ "items": {"type": "string"},
+ },
},
}
ROOM_FILTER_SCHEMA = {
- "additionalProperties": False,
+ "additionalProperties": True, # Allow new fields for forward compatibility
"type": "object",
"properties": {
"not_rooms": {"$ref": "#/definitions/room_id_array"},
@@ -71,7 +77,7 @@ ROOM_FILTER_SCHEMA = {
}
ROOM_EVENT_FILTER_SCHEMA = {
- "additionalProperties": False,
+ "additionalProperties": True, # Allow new fields for forward compatibility
"type": "object",
"properties": {
"limit": {"type": "number"},
@@ -84,6 +90,8 @@ ROOM_EVENT_FILTER_SCHEMA = {
"contains_url": {"type": "boolean"},
"lazy_load_members": {"type": "boolean"},
"include_redundant_members": {"type": "boolean"},
+ "unread_thread_notifications": {"type": "boolean"},
+ "org.matrix.msc3773.unread_thread_notifications": {"type": "boolean"},
# Include or exclude events with the provided labels.
# cf https://github.com/matrix-org/matrix-doc/pull/2326
"org.matrix.labels": {"type": "array", "items": {"type": "string"}},
@@ -135,7 +143,7 @@ USER_FILTER_SCHEMA = {
},
},
},
- "additionalProperties": False,
+ "additionalProperties": True, # Allow new fields for forward compatibility
}
@@ -240,6 +248,9 @@ class FilterCollection:
def include_redundant_members(self) -> bool:
return self._room_state_filter.include_redundant_members
+ def unread_thread_notifications(self) -> bool:
+ return self._room_timeline_filter.unread_thread_notifications
+
async def filter_presence(
self, events: Iterable[UserPresenceState]
) -> List[UserPresenceState]:
@@ -304,6 +315,16 @@ class Filter:
self.include_redundant_members = filter_json.get(
"include_redundant_members", False
)
+ self.unread_thread_notifications: bool = filter_json.get(
+ "unread_thread_notifications", False
+ )
+ if (
+ not self.unread_thread_notifications
+ and hs.config.experimental.msc3773_enabled
+ ):
+ self.unread_thread_notifications = filter_json.get(
+ "org.matrix.msc3773.unread_thread_notifications", False
+ )
self.types = filter_json.get("types", None)
self.not_types = filter_json.get("not_types", [])
@@ -319,8 +340,15 @@ class Filter:
self.labels = filter_json.get("org.matrix.labels", None)
self.not_labels = filter_json.get("org.matrix.not_labels", [])
- self.related_by_senders = self.filter_json.get("related_by_senders", None)
- self.related_by_rel_types = self.filter_json.get("related_by_rel_types", None)
+ self.related_by_senders = filter_json.get("related_by_senders", None)
+ self.related_by_rel_types = filter_json.get("related_by_rel_types", None)
+
+ # For compatibility with _check_fields.
+ self.rel_types = None
+ self.not_rel_types = []
+ if hs.config.experimental.msc3874_enabled:
+ self.rel_types = filter_json.get("org.matrix.msc3874.rel_types", None)
+ self.not_rel_types = filter_json.get("org.matrix.msc3874.not_rel_types", [])
def filters_all_types(self) -> bool:
return "*" in self.not_types
@@ -371,11 +399,19 @@ class Filter:
# check if there is a string url field in the content for filtering purposes
labels = content.get(EventContentFields.LABELS, [])
+ # Check if the event has a relation.
+ rel_type = None
+ if isinstance(event, EventBase):
+ relation = relation_from_event(event)
+ if relation:
+ rel_type = relation.rel_type
+
field_matchers = {
"rooms": lambda v: room_id == v,
"senders": lambda v: sender == v,
"types": lambda v: _matches_wildcard(ev_type, v),
"labels": lambda v: v in labels,
+ "rel_types": lambda v: rel_type == v,
}
result = self._check_fields(field_matchers)
diff --git a/synapse/api/ratelimiting.py b/synapse/api/ratelimiting.py
index 044c7d4926..511790c7c5 100644
--- a/synapse/api/ratelimiting.py
+++ b/synapse/api/ratelimiting.py
@@ -343,6 +343,7 @@ class RequestRatelimiter:
requester: Requester,
update: bool = True,
is_admin_redaction: bool = False,
+ n_actions: int = 1,
) -> None:
"""Ratelimits requests.
@@ -355,6 +356,8 @@ class RequestRatelimiter:
is_admin_redaction: Whether this is a room admin/moderator
redacting an event. If so then we may apply different
ratelimits depending on config.
+ n_actions: Multiplier for the number of actions to apply to the
+ rate limiter at once.
Raises:
LimitExceededError if the request should be ratelimited
@@ -383,7 +386,9 @@ class RequestRatelimiter:
if is_admin_redaction and self.admin_redaction_ratelimiter:
# If we have separate config for admin redactions, use a separate
# ratelimiter as to not have user_ids clash
- await self.admin_redaction_ratelimiter.ratelimit(requester, update=update)
+ await self.admin_redaction_ratelimiter.ratelimit(
+ requester, update=update, n_actions=n_actions
+ )
else:
# Override rate and burst count per-user
await self.request_ratelimiter.ratelimit(
@@ -391,4 +396,5 @@ class RequestRatelimiter:
rate_hz=messages_per_second,
burst_count=burst_count,
update=update,
+ n_actions=n_actions,
)
diff --git a/synapse/api/urls.py b/synapse/api/urls.py
index bd49fa6a5f..a918579f50 100644
--- a/synapse/api/urls.py
+++ b/synapse/api/urls.py
@@ -28,7 +28,7 @@ FEDERATION_V1_PREFIX = FEDERATION_PREFIX + "/v1"
FEDERATION_V2_PREFIX = FEDERATION_PREFIX + "/v2"
FEDERATION_UNSTABLE_PREFIX = FEDERATION_PREFIX + "/unstable"
STATIC_PREFIX = "/_matrix/static"
-SERVER_KEY_V2_PREFIX = "/_matrix/key/v2"
+SERVER_KEY_PREFIX = "/_matrix/key"
MEDIA_R0_PREFIX = "/_matrix/media/r0"
MEDIA_V3_PREFIX = "/_matrix/media/v3"
LEGACY_MEDIA_PREFIX = "/_matrix/media/v1"
diff --git a/synapse/app/_base.py b/synapse/app/_base.py
index 65f7dc81c4..ebac950bf2 100644
--- a/synapse/app/_base.py
+++ b/synapse/app/_base.py
@@ -47,6 +47,7 @@ from twisted.internet.tcp import Port
from twisted.logger import LoggingFile, LogLevel
from twisted.protocols.tls import TLSMemoryBIOFactory
from twisted.python.threadpool import ThreadPool
+from twisted.web.resource import Resource
import synapse.util.caches
from synapse.api.constants import MAX_PDU_SIZE
@@ -55,12 +56,13 @@ from synapse.app.phone_stats_home import start_phone_stats_home
from synapse.config import ConfigError
from synapse.config._base import format_config_error
from synapse.config.homeserver import HomeServerConfig
-from synapse.config.server import ManholeConfig
+from synapse.config.server import ListenerConfig, ManholeConfig
from synapse.crypto import context_factory
from synapse.events.presence_router import load_legacy_presence_router
from synapse.events.spamcheck import load_legacy_spam_checkers
from synapse.events.third_party_rules import load_legacy_third_party_event_rules
from synapse.handlers.auth import load_legacy_password_auth_providers
+from synapse.http.site import SynapseSite
from synapse.logging.context import PreserveLoggingContext
from synapse.logging.tracing import init_tracer
from synapse.metrics import install_gc_manager, register_threadpool
@@ -98,9 +100,7 @@ def register_sighup(func: Callable[P, None], *args: P.args, **kwargs: P.kwargs)
func: Function to be called when sent a SIGHUP signal.
*args, **kwargs: args and kwargs to be passed to the target function.
"""
- # This type-ignore should be redundant once we use a mypy release with
- # https://github.com/python/mypy/pull/12668.
- _sighup_callbacks.append((func, args, kwargs)) # type: ignore[arg-type]
+ _sighup_callbacks.append((func, args, kwargs))
def start_worker_reactor(
@@ -359,6 +359,55 @@ def listen_tcp(
return r # type: ignore[return-value]
+def listen_http(
+ listener_config: ListenerConfig,
+ root_resource: Resource,
+ version_string: str,
+ max_request_body_size: int,
+ context_factory: Optional[IOpenSSLContextFactory],
+ reactor: ISynapseReactor = reactor,
+) -> List[Port]:
+ port = listener_config.port
+ bind_addresses = listener_config.bind_addresses
+ tls = listener_config.tls
+
+ assert listener_config.http_options is not None
+
+ site_tag = listener_config.http_options.tag
+ if site_tag is None:
+ site_tag = str(port)
+
+ site = SynapseSite(
+ "synapse.access.%s.%s" % ("https" if tls else "http", site_tag),
+ site_tag,
+ listener_config,
+ root_resource,
+ version_string,
+ max_request_body_size=max_request_body_size,
+ reactor=reactor,
+ )
+ if tls:
+ # refresh_certificate should have been called before this.
+ assert context_factory is not None
+ ports = listen_ssl(
+ bind_addresses,
+ port,
+ site,
+ context_factory,
+ reactor=reactor,
+ )
+ logger.info("Synapse now listening on TCP port %d (TLS)", port)
+ else:
+ ports = listen_tcp(
+ bind_addresses,
+ port,
+ site,
+ reactor=reactor,
+ )
+ logger.info("Synapse now listening on TCP port %d", port)
+ return ports
+
+
def listen_ssl(
bind_addresses: Collection[str],
port: int,
@@ -560,7 +609,7 @@ def reload_cache_config(config: HomeServerConfig) -> None:
logger.warning(f)
else:
logger.debug(
- "New cache config. Was:\n %s\nNow:\n",
+ "New cache config. Was:\n %s\nNow:\n %s",
previous_cache_config.__dict__,
config.caches.__dict__,
)
diff --git a/synapse/app/admin_cmd.py b/synapse/app/admin_cmd.py
index 8a583d3ec6..165d1c5db0 100644
--- a/synapse/app/admin_cmd.py
+++ b/synapse/app/admin_cmd.py
@@ -28,10 +28,6 @@ from synapse.config.homeserver import HomeServerConfig
from synapse.config.logger import setup_logging
from synapse.events import EventBase
from synapse.handlers.admin import ExfiltrationWriter
-from synapse.replication.slave.storage.devices import SlavedDeviceStore
-from synapse.replication.slave.storage.events import SlavedEventStore
-from synapse.replication.slave.storage.filtering import SlavedFilteringStore
-from synapse.replication.slave.storage.push_rule import SlavedPushRuleStore
from synapse.server import HomeServer
from synapse.storage.database import DatabasePool, LoggingDatabaseConnection
from synapse.storage.databases.main.account_data import AccountDataWorkerStore
@@ -40,10 +36,24 @@ from synapse.storage.databases.main.appservice import (
ApplicationServiceWorkerStore,
)
from synapse.storage.databases.main.deviceinbox import DeviceInboxWorkerStore
+from synapse.storage.databases.main.devices import DeviceWorkerStore
+from synapse.storage.databases.main.event_federation import EventFederationWorkerStore
+from synapse.storage.databases.main.event_push_actions import (
+ EventPushActionsWorkerStore,
+)
+from synapse.storage.databases.main.events_worker import EventsWorkerStore
+from synapse.storage.databases.main.filtering import FilteringWorkerStore
+from synapse.storage.databases.main.push_rule import PushRulesWorkerStore
from synapse.storage.databases.main.receipts import ReceiptsWorkerStore
from synapse.storage.databases.main.registration import RegistrationWorkerStore
+from synapse.storage.databases.main.relations import RelationsWorkerStore
from synapse.storage.databases.main.room import RoomWorkerStore
+from synapse.storage.databases.main.roommember import RoomMemberWorkerStore
+from synapse.storage.databases.main.signatures import SignatureWorkerStore
+from synapse.storage.databases.main.state import StateGroupWorkerStore
+from synapse.storage.databases.main.stream import StreamWorkerStore
from synapse.storage.databases.main.tags import TagsWorkerStore
+from synapse.storage.databases.main.user_erasure_store import UserErasureWorkerStore
from synapse.types import StateMap
from synapse.util import SYNAPSE_VERSION
from synapse.util.logcontext import LoggingContext
@@ -52,17 +62,25 @@ logger = logging.getLogger("synapse.app.admin_cmd")
class AdminCmdSlavedStore(
- SlavedFilteringStore,
- SlavedDeviceStore,
- SlavedPushRuleStore,
- SlavedEventStore,
+ FilteringWorkerStore,
+ DeviceWorkerStore,
TagsWorkerStore,
DeviceInboxWorkerStore,
AccountDataWorkerStore,
+ PushRulesWorkerStore,
ApplicationServiceTransactionWorkerStore,
ApplicationServiceWorkerStore,
- RegistrationWorkerStore,
+ RoomMemberWorkerStore,
+ RelationsWorkerStore,
+ EventFederationWorkerStore,
+ EventPushActionsWorkerStore,
+ StateGroupWorkerStore,
+ SignatureWorkerStore,
+ UserErasureWorkerStore,
ReceiptsWorkerStore,
+ StreamWorkerStore,
+ EventsWorkerStore,
+ RegistrationWorkerStore,
RoomWorkerStore,
):
def __init__(
diff --git a/synapse/app/complement_fork_starter.py b/synapse/app/complement_fork_starter.py
index 89eb07df27..8c0f4a57e7 100644
--- a/synapse/app/complement_fork_starter.py
+++ b/synapse/app/complement_fork_starter.py
@@ -51,11 +51,18 @@ import argparse
import importlib
import itertools
import multiprocessing
+import os
+import signal
import sys
-from typing import Any, Callable, List
+from types import FrameType
+from typing import Any, Callable, Dict, List, Optional
from twisted.internet.main import installReactor
+# a list of the original signal handlers, before we installed our custom ones.
+# We restore these in our child processes.
+_original_signal_handlers: Dict[int, Any] = {}
+
class ProxiedReactor:
"""
@@ -105,6 +112,11 @@ def _worker_entrypoint(
sys.argv = args
+ # reset the custom signal handlers that we installed, so that the children start
+ # from a clean slate.
+ for sig, handler in _original_signal_handlers.items():
+ signal.signal(sig, handler)
+
from twisted.internet.epollreactor import EPollReactor
proxy_reactor._install_real_reactor(EPollReactor())
@@ -167,13 +179,29 @@ def main() -> None:
update_proc.join()
print("===== PREPARED DATABASE =====", file=sys.stderr)
+ processes: List[multiprocessing.Process] = []
+
+ # Install signal handlers to propagate signals to all our children, so that they
+ # shut down cleanly. This also inhibits our own exit, but that's good: we want to
+ # wait until the children have exited.
+ def handle_signal(signum: int, frame: Optional[FrameType]) -> None:
+ print(
+ f"complement_fork_starter: Caught signal {signum}. Stopping children.",
+ file=sys.stderr,
+ )
+ for p in processes:
+ if p.pid:
+ os.kill(p.pid, signum)
+
+ for sig in (signal.SIGINT, signal.SIGTERM):
+ _original_signal_handlers[sig] = signal.signal(sig, handle_signal)
+
# At this point, we've imported all the main entrypoints for all the workers.
# Now we basically just fork() out to create the workers we need.
# Because we're using fork(), all the workers get a clone of this launcher's
# memory space and don't need to repeat the work of loading the code!
# Instead of using fork() directly, we use the multiprocessing library,
# which uses fork() on Unix platforms.
- processes = []
for (func, worker_args) in zip(worker_functions, args_by_worker):
process = multiprocessing.Process(
target=_worker_entrypoint, args=(func, proxy_reactor, worker_args)
diff --git a/synapse/app/generic_worker.py b/synapse/app/generic_worker.py
index 5e3825fca6..74909b7d4a 100644
--- a/synapse/app/generic_worker.py
+++ b/synapse/app/generic_worker.py
@@ -14,21 +14,19 @@
# limitations under the License.
import logging
import sys
-from typing import Dict, List, Optional, Tuple
+from typing import Dict, List
-from twisted.internet import address
from twisted.web.resource import Resource
import synapse
import synapse.events
-from synapse.api.errors import HttpResponseException, RequestSendFailed, SynapseError
from synapse.api.urls import (
CLIENT_API_PREFIX,
FEDERATION_PREFIX,
LEGACY_MEDIA_PREFIX,
MEDIA_R0_PREFIX,
MEDIA_V3_PREFIX,
- SERVER_KEY_V2_PREFIX,
+ SERVER_KEY_PREFIX,
)
from synapse.app import _base
from synapse.app._base import (
@@ -43,17 +41,9 @@ from synapse.config.logger import setup_logging
from synapse.config.server import ListenerConfig
from synapse.federation.transport.server import TransportLayerServer
from synapse.http.server import JsonResource, OptionsResource
-from synapse.http.servlet import RestServlet, parse_json_object_from_request
-from synapse.http.site import SynapseRequest, SynapseSite
from synapse.logging.context import LoggingContext
from synapse.metrics import METRICS_PREFIX, MetricsResource, RegistryProxy
from synapse.replication.http import REPLICATION_PREFIX, ReplicationRestResource
-from synapse.replication.slave.storage.devices import SlavedDeviceStore
-from synapse.replication.slave.storage.events import SlavedEventStore
-from synapse.replication.slave.storage.filtering import SlavedFilteringStore
-from synapse.replication.slave.storage.keys import SlavedKeyStore
-from synapse.replication.slave.storage.push_rule import SlavedPushRuleStore
-from synapse.replication.slave.storage.pushers import SlavedPusherStore
from synapse.rest.admin import register_servlets_for_media_repo
from synapse.rest.client import (
account_data,
@@ -65,6 +55,7 @@ from synapse.rest.client import (
push_rule,
read_marker,
receipts,
+ relations,
room,
room_batch,
room_keys,
@@ -75,12 +66,12 @@ from synapse.rest.client import (
versions,
voip,
)
-from synapse.rest.client._base import client_patterns
from synapse.rest.client.account import ThreepidRestServlet, WhoamiRestServlet
from synapse.rest.client.devices import DevicesRestServlet
from synapse.rest.client.keys import (
KeyChangesServlet,
KeyQueryServlet,
+ KeyUploadServlet,
OneTimeKeyServlet,
)
from synapse.rest.client.register import (
@@ -88,7 +79,7 @@ from synapse.rest.client.register import (
RegistrationTokenValidityRestServlet,
)
from synapse.rest.health import HealthResource
-from synapse.rest.key.v2 import KeyApiV2Resource
+from synapse.rest.key.v2 import KeyResource
from synapse.rest.synapse.client import build_synapse_client_resource_tree
from synapse.rest.well_known import well_known_resource
from synapse.server import HomeServer
@@ -100,8 +91,16 @@ from synapse.storage.databases.main.appservice import (
from synapse.storage.databases.main.censor_events import CensorEventsStore
from synapse.storage.databases.main.client_ips import ClientIpWorkerStore
from synapse.storage.databases.main.deviceinbox import DeviceInboxWorkerStore
+from synapse.storage.databases.main.devices import DeviceWorkerStore
from synapse.storage.databases.main.directory import DirectoryWorkerStore
from synapse.storage.databases.main.e2e_room_keys import EndToEndRoomKeyStore
+from synapse.storage.databases.main.event_federation import EventFederationWorkerStore
+from synapse.storage.databases.main.event_push_actions import (
+ EventPushActionsWorkerStore,
+)
+from synapse.storage.databases.main.events_worker import EventsWorkerStore
+from synapse.storage.databases.main.filtering import FilteringWorkerStore
+from synapse.storage.databases.main.keys import KeyStore
from synapse.storage.databases.main.lock import LockStore
from synapse.storage.databases.main.media_repository import MediaRepositoryStore
from synapse.storage.databases.main.metrics import ServerMetricsStore
@@ -110,118 +109,31 @@ from synapse.storage.databases.main.monthly_active_users import (
)
from synapse.storage.databases.main.presence import PresenceStore
from synapse.storage.databases.main.profile import ProfileWorkerStore
+from synapse.storage.databases.main.push_rule import PushRulesWorkerStore
+from synapse.storage.databases.main.pusher import PusherWorkerStore
from synapse.storage.databases.main.receipts import ReceiptsWorkerStore
from synapse.storage.databases.main.registration import RegistrationWorkerStore
+from synapse.storage.databases.main.relations import RelationsWorkerStore
from synapse.storage.databases.main.room import RoomWorkerStore
from synapse.storage.databases.main.room_batch import RoomBatchStore
+from synapse.storage.databases.main.roommember import RoomMemberWorkerStore
from synapse.storage.databases.main.search import SearchStore
from synapse.storage.databases.main.session import SessionStore
+from synapse.storage.databases.main.signatures import SignatureWorkerStore
+from synapse.storage.databases.main.state import StateGroupWorkerStore
from synapse.storage.databases.main.stats import StatsStore
+from synapse.storage.databases.main.stream import StreamWorkerStore
from synapse.storage.databases.main.tags import TagsWorkerStore
from synapse.storage.databases.main.transactions import TransactionWorkerStore
from synapse.storage.databases.main.ui_auth import UIAuthWorkerStore
from synapse.storage.databases.main.user_directory import UserDirectoryStore
-from synapse.types import JsonDict
+from synapse.storage.databases.main.user_erasure_store import UserErasureWorkerStore
from synapse.util import SYNAPSE_VERSION
from synapse.util.httpresourcetree import create_resource_tree
logger = logging.getLogger("synapse.app.generic_worker")
-class KeyUploadServlet(RestServlet):
- """An implementation of the `KeyUploadServlet` that responds to read only
- requests, but otherwise proxies through to the master instance.
- """
-
- PATTERNS = client_patterns("/keys/upload(/(?P[^/]+))?$")
-
- def __init__(self, hs: HomeServer):
- """
- Args:
- hs: server
- """
- super().__init__()
- self.auth = hs.get_auth()
- self.store = hs.get_datastores().main
- self.http_client = hs.get_simple_http_client()
- self.main_uri = hs.config.worker.worker_main_http_uri
-
- async def on_POST(
- self, request: SynapseRequest, device_id: Optional[str]
- ) -> Tuple[int, JsonDict]:
- requester = await self.auth.get_user_by_req(request, allow_guest=True)
- user_id = requester.user.to_string()
- body = parse_json_object_from_request(request)
-
- if device_id is not None:
- # passing the device_id here is deprecated; however, we allow it
- # for now for compatibility with older clients.
- if requester.device_id is not None and device_id != requester.device_id:
- logger.warning(
- "Client uploading keys for a different device "
- "(logged in as %s, uploading for %s)",
- requester.device_id,
- device_id,
- )
- else:
- device_id = requester.device_id
-
- if device_id is None:
- raise SynapseError(
- 400, "To upload keys, you must pass device_id when authenticating"
- )
-
- if body:
- # They're actually trying to upload something, proxy to main synapse.
-
- # Proxy headers from the original request, such as the auth headers
- # (in case the access token is there) and the original IP /
- # User-Agent of the request.
- headers = {
- header: request.requestHeaders.getRawHeaders(header, [])
- for header in (b"Authorization", b"User-Agent")
- }
- # Add the previous hop to the X-Forwarded-For header.
- x_forwarded_for = request.requestHeaders.getRawHeaders(
- b"X-Forwarded-For", []
- )
- # we use request.client here, since we want the previous hop, not the
- # original client (as returned by request.getClientAddress()).
- if isinstance(request.client, (address.IPv4Address, address.IPv6Address)):
- previous_host = request.client.host.encode("ascii")
- # If the header exists, add to the comma-separated list of the first
- # instance of the header. Otherwise, generate a new header.
- if x_forwarded_for:
- x_forwarded_for = [x_forwarded_for[0] + b", " + previous_host]
- x_forwarded_for.extend(x_forwarded_for[1:])
- else:
- x_forwarded_for = [previous_host]
- headers[b"X-Forwarded-For"] = x_forwarded_for
-
- # Replicate the original X-Forwarded-Proto header. Note that
- # XForwardedForRequest overrides isSecure() to give us the original protocol
- # used by the client, as opposed to the protocol used by our upstream proxy
- # - which is what we want here.
- headers[b"X-Forwarded-Proto"] = [
- b"https" if request.isSecure() else b"http"
- ]
-
- try:
- result = await self.http_client.post_json_get_json(
- self.main_uri + request.uri.decode("ascii"), body, headers=headers
- )
- except HttpResponseException as e:
- raise e.to_synapse_error() from e
- except RequestSendFailed as e:
- raise SynapseError(502, "Failed to talk to master") from e
-
- return 200, result
- else:
- # Just interested in counts.
- result = await self.store.count_e2e_one_time_keys(user_id, device_id)
- return 200, {"one_time_key_counts": result}
-
-
class GenericWorkerSlavedStore(
# FIXME(#3714): We need to add UserDirectoryStore as we write directly
# rather than going via the correct worker.
@@ -231,26 +143,36 @@ class GenericWorkerSlavedStore(
EndToEndRoomKeyStore,
PresenceStore,
DeviceInboxWorkerStore,
- SlavedDeviceStore,
- SlavedPushRuleStore,
+ DeviceWorkerStore,
TagsWorkerStore,
AccountDataWorkerStore,
- SlavedPusherStore,
CensorEventsStore,
ClientIpWorkerStore,
- SlavedEventStore,
- SlavedKeyStore,
+ # KeyStore isn't really safe to use from a worker, but for now we do so and hope that
+ # the races it creates aren't too bad.
+ KeyStore,
RoomWorkerStore,
RoomBatchStore,
DirectoryWorkerStore,
+ PushRulesWorkerStore,
ApplicationServiceTransactionWorkerStore,
ApplicationServiceWorkerStore,
ProfileWorkerStore,
- SlavedFilteringStore,
+ FilteringWorkerStore,
MonthlyActiveUsersWorkerStore,
MediaRepositoryStore,
ServerMetricsStore,
+ PusherWorkerStore,
+ RoomMemberWorkerStore,
+ RelationsWorkerStore,
+ EventFederationWorkerStore,
+ EventPushActionsWorkerStore,
+ StateGroupWorkerStore,
+ SignatureWorkerStore,
+ UserErasureWorkerStore,
ReceiptsWorkerStore,
+ StreamWorkerStore,
+ EventsWorkerStore,
RegistrationWorkerStore,
SearchStore,
TransactionWorkerStore,
@@ -267,15 +189,9 @@ class GenericWorkerServer(HomeServer):
DATASTORE_CLASS = GenericWorkerSlavedStore # type: ignore
def _listen_http(self, listener_config: ListenerConfig) -> None:
- port = listener_config.port
- bind_addresses = listener_config.bind_addresses
assert listener_config.http_options is not None
- site_tag = listener_config.http_options.tag
- if site_tag is None:
- site_tag = str(port)
-
# We always include a health resource.
resources: Dict[str, Resource] = {"/health": HealthResource()}
@@ -308,6 +224,7 @@ class GenericWorkerServer(HomeServer):
sync.register_servlets(self, resource)
events.register_servlets(self, resource)
room.register_servlets(self, resource, is_worker=True)
+ relations.register_servlets(self, resource)
room.register_deprecated_servlets(self, resource)
initial_sync.register_servlets(self, resource)
room_batch.register_servlets(self, resource)
@@ -323,13 +240,13 @@ class GenericWorkerServer(HomeServer):
presence.register_servlets(self, resource)
- resources.update({CLIENT_API_PREFIX: resource})
+ resources[CLIENT_API_PREFIX] = resource
resources.update(build_synapse_client_resource_tree(self))
- resources.update({"/.well-known": well_known_resource(self)})
+ resources["/.well-known"] = well_known_resource(self)
elif name == "federation":
- resources.update({FEDERATION_PREFIX: TransportLayerServer(self)})
+ resources[FEDERATION_PREFIX] = TransportLayerServer(self)
elif name == "media":
if self.config.media.can_load_media_repo:
media_repo = self.get_media_repository_resource()
@@ -357,16 +274,12 @@ class GenericWorkerServer(HomeServer):
# Only load the openid resource separately if federation resource
# is not specified since federation resource includes openid
# resource.
- resources.update(
- {
- FEDERATION_PREFIX: TransportLayerServer(
- self, servlet_groups=["openid"]
- )
- }
+ resources[FEDERATION_PREFIX] = TransportLayerServer(
+ self, servlet_groups=["openid"]
)
if name in ["keys", "federation"]:
- resources[SERVER_KEY_V2_PREFIX] = KeyApiV2Resource(self)
+ resources[SERVER_KEY_PREFIX] = KeyResource(self)
if name == "replication":
resources[REPLICATION_PREFIX] = ReplicationRestResource(self)
@@ -377,23 +290,15 @@ class GenericWorkerServer(HomeServer):
root_resource = create_resource_tree(resources, OptionsResource())
- _base.listen_tcp(
- bind_addresses,
- port,
- SynapseSite(
- "synapse.access.http.%s" % (site_tag,),
- site_tag,
- listener_config,
- root_resource,
- self.version_string,
- max_request_body_size=max_request_body_size(self.config),
- reactor=self.get_reactor(),
- ),
+ _base.listen_http(
+ listener_config,
+ root_resource,
+ self.version_string,
+ max_request_body_size(self.config),
+ self.tls_server_context_factory,
reactor=self.get_reactor(),
)
- logger.info("Synapse worker now listening on port %d", port)
-
def start_listening(self) -> None:
for listener in self.config.worker.worker_listeners:
if listener.type == "http":
diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py
index 883f2fd2ec..4f4fee4782 100644
--- a/synapse/app/homeserver.py
+++ b/synapse/app/homeserver.py
@@ -31,14 +31,13 @@ from synapse.api.urls import (
LEGACY_MEDIA_PREFIX,
MEDIA_R0_PREFIX,
MEDIA_V3_PREFIX,
- SERVER_KEY_V2_PREFIX,
+ SERVER_KEY_PREFIX,
STATIC_PREFIX,
)
from synapse.app import _base
from synapse.app._base import (
handle_startup_exception,
- listen_ssl,
- listen_tcp,
+ listen_http,
max_request_body_size,
redirect_stdio_to_logs,
register_start,
@@ -53,14 +52,13 @@ from synapse.http.server import (
RootOptionsRedirectResource,
StaticResource,
)
-from synapse.http.site import SynapseSite
from synapse.logging.context import LoggingContext
from synapse.metrics import METRICS_PREFIX, MetricsResource, RegistryProxy
from synapse.replication.http import REPLICATION_PREFIX, ReplicationRestResource
from synapse.rest import ClientRestResource
from synapse.rest.admin import AdminRestResource
from synapse.rest.health import HealthResource
-from synapse.rest.key.v2 import KeyApiV2Resource
+from synapse.rest.key.v2 import KeyResource
from synapse.rest.synapse.client import build_synapse_client_resource_tree
from synapse.rest.well_known import well_known_resource
from synapse.server import HomeServer
@@ -83,8 +81,6 @@ class SynapseHomeServer(HomeServer):
self, config: HomeServerConfig, listener_config: ListenerConfig
) -> Iterable[Port]:
port = listener_config.port
- bind_addresses = listener_config.bind_addresses
- tls = listener_config.tls
# Must exist since this is an HTTP listener.
assert listener_config.http_options is not None
site_tag = listener_config.http_options.tag
@@ -140,37 +136,15 @@ class SynapseHomeServer(HomeServer):
else:
root_resource = OptionsResource()
- site = SynapseSite(
- "synapse.access.%s.%s" % ("https" if tls else "http", site_tag),
- site_tag,
+ ports = listen_http(
listener_config,
create_resource_tree(resources, root_resource),
self.version_string,
- max_request_body_size=max_request_body_size(self.config),
+ max_request_body_size(self.config),
+ self.tls_server_context_factory,
reactor=self.get_reactor(),
)
- if tls:
- # refresh_certificate should have been called before this.
- assert self.tls_server_context_factory is not None
- ports = listen_ssl(
- bind_addresses,
- port,
- site,
- self.tls_server_context_factory,
- reactor=self.get_reactor(),
- )
- logger.info("Synapse now listening on TCP port %d (TLS)", port)
-
- else:
- ports = listen_tcp(
- bind_addresses,
- port,
- site,
- reactor=self.get_reactor(),
- )
- logger.info("Synapse now listening on TCP port %d", port)
-
return ports
def _configure_named_resource(
@@ -215,30 +189,22 @@ class SynapseHomeServer(HomeServer):
consent_resource: Resource = ConsentResource(self)
if compress:
consent_resource = gz_wrap(consent_resource)
- resources.update({"/_matrix/consent": consent_resource})
+ resources["/_matrix/consent"] = consent_resource
if name == "federation":
federation_resource: Resource = TransportLayerServer(self)
if compress:
federation_resource = gz_wrap(federation_resource)
- resources.update({FEDERATION_PREFIX: federation_resource})
+ resources[FEDERATION_PREFIX] = federation_resource
if name == "openid":
- resources.update(
- {
- FEDERATION_PREFIX: TransportLayerServer(
- self, servlet_groups=["openid"]
- )
- }
+ resources[FEDERATION_PREFIX] = TransportLayerServer(
+ self, servlet_groups=["openid"]
)
if name in ["static", "client"]:
- resources.update(
- {
- STATIC_PREFIX: StaticResource(
- os.path.join(os.path.dirname(synapse.__file__), "static")
- )
- }
+ resources[STATIC_PREFIX] = StaticResource(
+ os.path.join(os.path.dirname(synapse.__file__), "static")
)
if name in ["media", "federation", "client"]:
@@ -257,7 +223,7 @@ class SynapseHomeServer(HomeServer):
)
if name in ["keys", "federation"]:
- resources[SERVER_KEY_V2_PREFIX] = KeyApiV2Resource(self)
+ resources[SERVER_KEY_PREFIX] = KeyResource(self)
if name == "metrics" and self.config.metrics.enable_metrics:
metrics_resource: Resource = MetricsResource(RegistryProxy)
diff --git a/synapse/appservice/__init__.py b/synapse/appservice/__init__.py
index 0dfa00df44..500bdde3a9 100644
--- a/synapse/appservice/__init__.py
+++ b/synapse/appservice/__init__.py
@@ -172,12 +172,24 @@ class ApplicationService:
Returns:
True if this service would like to know about this room.
"""
- member_list = await store.get_users_in_room(
+ # We can use `get_local_users_in_room(...)` here because an application service
+ # can only be interested in local users of the server it's on (ignore any remote
+ # users that might match the user namespace regex).
+ #
+ # In the future, we can consider re-using
+ # `store.get_app_service_users_in_room` which is very similar to this
+ # function but has a slightly worse performance than this because we
+ # have an early escape-hatch if we find a single user that the
+ # appservice is interested in. The juice would be worth the squeeze if
+ # `store.get_app_service_users_in_room` was used in more places besides
+ # an experimental MSC. But for now we can avoid doing more work and
+ # barely using it later.
+ local_user_ids = await store.get_local_users_in_room(
room_id, on_invalidate=cache_context.invalidate
)
# check joined member events
- for user_id in member_list:
+ for user_id in local_user_ids:
if self.is_interested_in_user(user_id):
return True
return False
diff --git a/synapse/appservice/api.py b/synapse/appservice/api.py
index 0963fb3bb4..60774b240d 100644
--- a/synapse/appservice/api.py
+++ b/synapse/appservice/api.py
@@ -120,7 +120,11 @@ class ApplicationServiceApi(SimpleHttpClient):
uri = service.url + ("/users/%s" % urllib.parse.quote(user_id))
try:
- response = await self.get_json(uri, {"access_token": service.hs_token})
+ response = await self.get_json(
+ uri,
+ {"access_token": service.hs_token},
+ headers={"Authorization": [f"Bearer {service.hs_token}"]},
+ )
if response is not None: # just an empty json object
return True
except CodeMessageException as e:
@@ -140,7 +144,11 @@ class ApplicationServiceApi(SimpleHttpClient):
uri = service.url + ("/rooms/%s" % urllib.parse.quote(alias))
try:
- response = await self.get_json(uri, {"access_token": service.hs_token})
+ response = await self.get_json(
+ uri,
+ {"access_token": service.hs_token},
+ headers={"Authorization": [f"Bearer {service.hs_token}"]},
+ )
if response is not None: # just an empty json object
return True
except CodeMessageException as e:
@@ -181,7 +189,11 @@ class ApplicationServiceApi(SimpleHttpClient):
**fields,
b"access_token": service.hs_token,
}
- response = await self.get_json(uri, args=args)
+ response = await self.get_json(
+ uri,
+ args=args,
+ headers={"Authorization": [f"Bearer {service.hs_token}"]},
+ )
if not isinstance(response, list):
logger.warning(
"query_3pe to %s returned an invalid response %r", uri, response
@@ -217,7 +229,11 @@ class ApplicationServiceApi(SimpleHttpClient):
urllib.parse.quote(protocol),
)
try:
- info = await self.get_json(uri, {"access_token": service.hs_token})
+ info = await self.get_json(
+ uri,
+ {"access_token": service.hs_token},
+ headers={"Authorization": [f"Bearer {service.hs_token}"]},
+ )
if not _is_valid_3pe_metadata(info):
logger.warning(
@@ -313,6 +329,7 @@ class ApplicationServiceApi(SimpleHttpClient):
uri=uri,
json_body=body,
args={"access_token": service.hs_token},
+ headers={"Authorization": [f"Bearer {service.hs_token}"]},
)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(
diff --git a/synapse/config/cache.py b/synapse/config/cache.py
index 2db8cfb005..eb4194a5a9 100644
--- a/synapse/config/cache.py
+++ b/synapse/config/cache.py
@@ -159,7 +159,7 @@ class CacheConfig(Config):
self.track_memory_usage = cache_config.get("track_memory_usage", False)
if self.track_memory_usage:
- check_requirements("cache_memory")
+ check_requirements("cache-memory")
expire_caches = cache_config.get("expire_caches", True)
cache_entry_ttl = cache_config.get("cache_entry_ttl", "30m")
diff --git a/synapse/config/experimental.py b/synapse/config/experimental.py
index 933779c23a..d4b71d1673 100644
--- a/synapse/config/experimental.py
+++ b/synapse/config/experimental.py
@@ -12,12 +12,27 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-from typing import Any
+from typing import Any, Optional
+
+import attr
from synapse.config._base import Config
from synapse.types import JsonDict
+@attr.s(auto_attribs=True, frozen=True, slots=True)
+class MSC3866Config:
+ """Configuration for MSC3866 (mandating approval for new users)"""
+
+ # Whether the base support for the approval process is enabled. This includes the
+ # ability for administrators to check and update the approval of users, even if no
+ # approval is currently required.
+ enabled: bool = False
+ # Whether to require that new users are approved by an admin before their account
+ # can be used. Note that this setting is ignored if 'enabled' is false.
+ require_approval_for_new_accounts: bool = False
+
+
class ExperimentalConfig(Config):
"""Config section for enabling experimental features"""
@@ -80,16 +95,11 @@ class ExperimentalConfig(Config):
# MSC2815 (allow room moderators to view redacted event content)
self.msc2815_enabled: bool = experimental.get("msc2815_enabled", False)
- # MSC3786 (Add a default push rule to ignore m.room.server_acl events)
- self.msc3786_enabled: bool = experimental.get("msc3786_enabled", False)
+ # MSC3773: Thread notifications
+ self.msc3773_enabled: bool = experimental.get("msc3773_enabled", False)
- # MSC3771: Thread read receipts
- self.msc3771_enabled: bool = experimental.get("msc3771_enabled", False)
- # MSC3772: A push rule for mutual relations.
- self.msc3772_enabled: bool = experimental.get("msc3772_enabled", False)
-
- # MSC3715: dir param on /relations.
- self.msc3715_enabled: bool = experimental.get("msc3715_enabled", False)
+ # MSC3664: Pushrules to match on related events
+ self.msc3664_enabled: bool = experimental.get("msc3664_enabled", False)
# MSC3848: Introduce errcodes for specific event sending failures
self.msc3848_enabled: bool = experimental.get("msc3848_enabled", False)
@@ -97,6 +107,10 @@ class ExperimentalConfig(Config):
# MSC3852: Expose last seen user agent field on /_matrix/client/v3/devices.
self.msc3852_enabled: bool = experimental.get("msc3852_enabled", False)
+ # MSC3866: M_USER_AWAITING_APPROVAL error code
+ raw_msc3866_config = experimental.get("msc3866", {})
+ self.msc3866 = MSC3866Config(**raw_msc3866_config)
+
# MSC3881: Remotely toggle push notifications for another client
self.msc3881_enabled: bool = experimental.get("msc3881_enabled", False)
@@ -106,3 +120,14 @@ class ExperimentalConfig(Config):
self.msc3882_token_timeout = self.parse_duration(
experimental.get("msc3882_token_timeout", "5m")
)
+
+ # MSC3874: Filtering /messages with rel_types / not_rel_types.
+ self.msc3874_enabled: bool = experimental.get("msc3874_enabled", False)
+
+ # MSC3886: Simple client rendezvous capability
+ self.msc3886_endpoint: Optional[str] = experimental.get(
+ "msc3886_endpoint", None
+ )
+
+ # MSC3912: Relation-based redactions.
+ self.msc3912_enabled: bool = experimental.get("msc3912_enabled", False)
diff --git a/synapse/config/groups.py b/synapse/config/groups.py
deleted file mode 100644
index baa051fdd4..0000000000
--- a/synapse/config/groups.py
+++ /dev/null
@@ -1,27 +0,0 @@
-# Copyright 2017 New Vector Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from typing import Any
-
-from synapse.types import JsonDict
-
-from ._base import Config
-
-
-class GroupsConfig(Config):
- section = "groups"
-
- def read_config(self, config: JsonDict, **kwargs: Any) -> None:
- self.enable_group_creation = config.get("enable_group_creation", False)
- self.group_creation_prefix = config.get("group_creation_prefix", "")
diff --git a/synapse/config/logger.py b/synapse/config/logger.py
index 6c1f78f8df..5468b963a2 100644
--- a/synapse/config/logger.py
+++ b/synapse/config/logger.py
@@ -53,7 +53,7 @@ DEFAULT_LOG_CONFIG = Template(
# Synapse also supports structured logging for machine readable logs which can
# be ingested by ELK stacks. See [2] for details.
#
-# [1]: https://docs.python.org/3.7/library/logging.config.html#configuration-dictionary-schema
+# [1]: https://docs.python.org/3/library/logging.config.html#configuration-dictionary-schema
# [2]: https://matrix-org.github.io/synapse/latest/structured_logging.html
version: 1
@@ -317,15 +317,16 @@ def setup_logging(
Set up the logging subsystem.
Args:
- config (LoggingConfig | synapse.config.worker.WorkerConfig):
- configuration data
+ config: configuration data
- use_worker_options (bool): True to use the 'worker_log_config' option
+ use_worker_options: True to use the 'worker_log_config' option
instead of 'log_config'.
logBeginner: The Twisted logBeginner to use.
"""
+ from twisted.internet import reactor
+
log_config_path = (
config.worker.worker_log_config
if use_worker_options
@@ -348,3 +349,4 @@ def setup_logging(
)
logging.info("Server hostname: %s", config.server.server_name)
logging.info("Instance name: %s", hs.get_instance_name())
+ logging.info("Twisted reactor: %s", type(reactor).__name__)
diff --git a/synapse/config/metrics.py b/synapse/config/metrics.py
index f3134834e5..6034a0346e 100644
--- a/synapse/config/metrics.py
+++ b/synapse/config/metrics.py
@@ -43,33 +43,7 @@ class MetricsConfig(Config):
def read_config(self, config: JsonDict, **kwargs: Any) -> None:
self.enable_metrics = config.get("enable_metrics", False)
- """
- ### `enable_legacy_metrics` (experimental)
-
- **Experimental: this option may be removed or have its behaviour
- changed at any time, with no notice.**
-
- Set to `true` to publish both legacy and non-legacy Prometheus metric names,
- or to `false` to only publish non-legacy Prometheus metric names.
- Defaults to `true`. Has no effect if `enable_metrics` is `false`.
-
- Legacy metric names include:
- - metrics containing colons in the name, such as `synapse_util_caches_response_cache:hits`, because colons are supposed to be reserved for user-defined recording rules;
- - counters that don't end with the `_total` suffix, such as `synapse_federation_client_sent_edus`, therefore not adhering to the OpenMetrics standard.
-
- These legacy metric names are unconventional and not compliant with OpenMetrics standards.
- They are included for backwards compatibility.
-
- Example configuration:
- ```yaml
- enable_legacy_metrics: false
- ```
-
- See https://github.com/matrix-org/synapse/issues/11106 for context.
-
- *Since v1.67.0.*
- """
- self.enable_legacy_metrics = config.get("enable_legacy_metrics", True)
+ self.enable_legacy_metrics = config.get("enable_legacy_metrics", False)
self.report_stats = config.get("report_stats", None)
self.report_stats_endpoint = config.get(
diff --git a/synapse/config/oidc.py b/synapse/config/oidc.py
index 5418a332da..0bd83f4010 100644
--- a/synapse/config/oidc.py
+++ b/synapse/config/oidc.py
@@ -123,6 +123,8 @@ OIDC_PROVIDER_CONFIG_SCHEMA = {
"userinfo_endpoint": {"type": "string"},
"jwks_uri": {"type": "string"},
"skip_verification": {"type": "boolean"},
+ "backchannel_logout_enabled": {"type": "boolean"},
+ "backchannel_logout_ignore_sub": {"type": "boolean"},
"user_profile_method": {
"type": "string",
"enum": ["auto", "userinfo_endpoint"],
@@ -292,6 +294,10 @@ def _parse_oidc_config_dict(
token_endpoint=oidc_config.get("token_endpoint"),
userinfo_endpoint=oidc_config.get("userinfo_endpoint"),
jwks_uri=oidc_config.get("jwks_uri"),
+ backchannel_logout_enabled=oidc_config.get("backchannel_logout_enabled", False),
+ backchannel_logout_ignore_sub=oidc_config.get(
+ "backchannel_logout_ignore_sub", False
+ ),
skip_verification=oidc_config.get("skip_verification", False),
user_profile_method=oidc_config.get("user_profile_method", "auto"),
allow_existing_users=oidc_config.get("allow_existing_users", False),
@@ -368,6 +374,12 @@ class OidcProviderConfig:
# "openid" scope is used.
jwks_uri: Optional[str]
+ # Whether Synapse should react to backchannel logouts
+ backchannel_logout_enabled: bool
+
+ # Whether Synapse should ignore the `sub` claim in backchannel logouts or not.
+ backchannel_logout_ignore_sub: bool
+
# Whether to skip metadata verification
skip_verification: bool
diff --git a/synapse/config/ratelimiting.py b/synapse/config/ratelimiting.py
index 1ed001e105..5c13fe428a 100644
--- a/synapse/config/ratelimiting.py
+++ b/synapse/config/ratelimiting.py
@@ -150,8 +150,5 @@ class RatelimitConfig(Config):
self.rc_third_party_invite = RatelimitSettings(
config.get("rc_third_party_invite", {}),
- defaults={
- "per_second": self.rc_message.per_second,
- "burst_count": self.rc_message.burst_count,
- },
+ defaults={"per_second": 0.0025, "burst_count": 5},
)
diff --git a/synapse/config/repository.py b/synapse/config/repository.py
index 1033496bb4..e4759711ed 100644
--- a/synapse/config/repository.py
+++ b/synapse/config/repository.py
@@ -205,7 +205,7 @@ class ContentRepositoryConfig(Config):
)
self.url_preview_enabled = config.get("url_preview_enabled", False)
if self.url_preview_enabled:
- check_requirements("url_preview")
+ check_requirements("url-preview")
proxy_env = getproxies_environment()
if "url_preview_ip_range_blacklist" not in config:
diff --git a/synapse/config/server.py b/synapse/config/server.py
index f2353ce5fb..ec46ca63ad 100644
--- a/synapse/config/server.py
+++ b/synapse/config/server.py
@@ -207,6 +207,9 @@ class HttpListenerConfig:
additional_resources: Dict[str, dict] = attr.Factory(dict)
tag: Optional[str] = None
request_id_header: Optional[str] = None
+ # If true, the listener will return CORS response headers compatible with MSC3886:
+ # https://github.com/matrix-org/matrix-spec-proposals/pull/3886
+ experimental_cors_msc3886: bool = False
@attr.s(slots=True, frozen=True, auto_attribs=True)
@@ -935,6 +938,7 @@ def parse_listener_def(num: int, listener: Any) -> ListenerConfig:
additional_resources=listener.get("additional_resources", {}),
tag=listener.get("tag"),
request_id_header=listener.get("request_id_header"),
+ experimental_cors_msc3886=listener.get("experimental_cors_msc3886", False),
)
return ListenerConfig(port, bind_addresses, listener_type, tls, http_config)
diff --git a/synapse/config/workers.py b/synapse/config/workers.py
index 0fb725dd8f..913b83e174 100644
--- a/synapse/config/workers.py
+++ b/synapse/config/workers.py
@@ -67,6 +67,7 @@ class InstanceLocationConfig:
host: str
port: int
+ tls: bool = False
@attr.s
@@ -149,13 +150,25 @@ class WorkerConfig(Config):
# The port on the main synapse for HTTP replication endpoint
self.worker_replication_http_port = config.get("worker_replication_http_port")
+ # The tls mode on the main synapse for HTTP replication endpoint.
+ # For backward compatibility this defaults to False.
+ self.worker_replication_http_tls = config.get(
+ "worker_replication_http_tls", False
+ )
+
# The shared secret used for authentication when connecting to the main synapse.
self.worker_replication_secret = config.get("worker_replication_secret", None)
self.worker_name = config.get("worker_name", self.worker_app)
self.instance_name = self.worker_name or "master"
+ # FIXME: Remove this check after a suitable amount of time.
self.worker_main_http_uri = config.get("worker_main_http_uri", None)
+ if self.worker_main_http_uri is not None:
+ logger.warning(
+ "The config option worker_main_http_uri is unused since Synapse 1.73. "
+ "It can be safely removed from your configuration."
+ )
# This option is really only here to support `--manhole` command line
# argument.
diff --git a/synapse/crypto/keyring.py b/synapse/crypto/keyring.py
index c88afb2986..ed15f88350 100644
--- a/synapse/crypto/keyring.py
+++ b/synapse/crypto/keyring.py
@@ -213,7 +213,7 @@ class Keyring:
def verify_json_objects_for_server(
self, server_and_json: Iterable[Tuple[str, dict, int]]
- ) -> List[defer.Deferred]:
+ ) -> List["defer.Deferred[None]"]:
"""Bulk verifies signatures of json objects, bulk fetching keys as
necessary.
@@ -226,10 +226,9 @@ class Keyring:
valid.
Returns:
- List: for each input triplet, a deferred indicating success
- or failure to verify each json object's signature for the given
- server_name. The deferreds run their callbacks in the sentinel
- logcontext.
+ For each input triplet, a deferred indicating success or failure to
+ verify each json object's signature for the given server_name. The
+ deferreds run their callbacks in the sentinel logcontext.
"""
return [
run_in_background(
@@ -858,7 +857,7 @@ class ServerKeyFetcher(BaseV2KeyFetcher):
response = await self.client.get_json(
destination=server_name,
path="/_matrix/key/v2/server/"
- + urllib.parse.quote(requested_key_id),
+ + urllib.parse.quote(requested_key_id, safe=""),
ignore_backoff=True,
# we only give the remote server 10s to respond. It should be an
# easy request to handle, so if it doesn't reply within 10s, it's
diff --git a/synapse/event_auth.py b/synapse/event_auth.py
index c7d5ef92fc..bab31e33c5 100644
--- a/synapse/event_auth.py
+++ b/synapse/event_auth.py
@@ -15,7 +15,18 @@
import logging
import typing
-from typing import Any, Collection, Dict, Iterable, List, Optional, Set, Tuple, Union
+from typing import (
+ Any,
+ Collection,
+ Dict,
+ Iterable,
+ List,
+ Mapping,
+ Optional,
+ Set,
+ Tuple,
+ Union,
+)
from canonicaljson import encode_canonical_json
from signedjson.key import decode_verify_key_bytes
@@ -134,6 +145,7 @@ def validate_event_for_room_version(event: "EventBase") -> None:
async def check_state_independent_auth_rules(
store: _EventSourceStore,
event: "EventBase",
+ batched_auth_events: Optional[Mapping[str, "EventBase"]] = None,
) -> None:
"""Check that an event complies with auth rules that are independent of room state
@@ -143,6 +155,8 @@ async def check_state_independent_auth_rules(
Args:
store: the datastore; used to fetch the auth events for validation
event: the event being checked.
+ batched_auth_events: if the event being authed is part of a batch, any events
+ from the same batch that may be necessary to auth the current event
Raises:
AuthError if the checks fail
@@ -162,6 +176,9 @@ async def check_state_independent_auth_rules(
redact_behaviour=EventRedactBehaviour.as_is,
allow_rejected=True,
)
+ if batched_auth_events:
+ auth_events.update(batched_auth_events)
+
room_id = event.room_id
auth_dict: MutableStateMap[str] = {}
expected_auth_types = auth_types_for_event(event.room_version, event)
diff --git a/synapse/events/__init__.py b/synapse/events/__init__.py
index b2c9119fd0..8aca9a3ab9 100644
--- a/synapse/events/__init__.py
+++ b/synapse/events/__init__.py
@@ -289,6 +289,10 @@ class _EventInternalMetadata:
"""
return self._dict.get("historical", False)
+ def is_notifiable(self) -> bool:
+ """Whether this event can trigger a push notification"""
+ return not self.is_outlier() or self.is_out_of_band_membership()
+
class EventBase(metaclass=abc.ABCMeta):
@property
@@ -593,8 +597,7 @@ def _event_type_from_format_version(
format_version: The event format version
Returns:
- type: A type that can be initialized as per the initializer of
- `FrozenEvent`
+ A type that can be initialized as per the initializer of `FrozenEvent`
"""
if format_version == EventFormatVersions.ROOM_V1_V2:
diff --git a/synapse/events/builder.py b/synapse/events/builder.py
index e2ee10dd3d..d62906043f 100644
--- a/synapse/events/builder.py
+++ b/synapse/events/builder.py
@@ -128,6 +128,7 @@ class EventBuilder:
state_filter=StateFilter.from_types(
auth_types_for_event(self.room_version, self)
),
+ await_full_state=False,
)
auth_event_ids = self._event_auth_handler.compute_auth_events(
self, state_ids
diff --git a/synapse/events/snapshot.py b/synapse/events/snapshot.py
index d3c8083e4a..1c0e96bec7 100644
--- a/synapse/events/snapshot.py
+++ b/synapse/events/snapshot.py
@@ -65,7 +65,8 @@ class EventContext:
None does not necessarily mean that ``state_group`` does not have
a prev_group!
- If the event is a state event, this is normally the same as ``prev_group``.
+ If the event is a state event, this is normally the same as
+ ``state_group_before_event``.
If ``state_group`` is None (ie, the event is an outlier), ``prev_group``
will always also be ``None``.
diff --git a/synapse/federation/federation_base.py b/synapse/federation/federation_base.py
index 881d3d8e18..d5d1b06fa4 100644
--- a/synapse/federation/federation_base.py
+++ b/synapse/federation/federation_base.py
@@ -13,7 +13,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
-from typing import TYPE_CHECKING
+from typing import TYPE_CHECKING, Awaitable, Callable, Optional
from synapse.api.constants import MAX_DEPTH, EventContentFields, EventTypes, Membership
from synapse.api.errors import Codes, SynapseError
@@ -58,7 +58,12 @@ class FederationBase:
@trace
async def _check_sigs_and_hash(
- self, room_version: RoomVersion, pdu: EventBase
+ self,
+ room_version: RoomVersion,
+ pdu: EventBase,
+ record_failure_callback: Optional[
+ Callable[[EventBase, str], Awaitable[None]]
+ ] = None,
) -> EventBase:
"""Checks that event is correctly signed by the sending server.
@@ -70,6 +75,11 @@ class FederationBase:
Args:
room_version: The room version of the PDU
pdu: the event to be checked
+ record_failure_callback: A callback to run whenever the given event
+ fails signature or hash checks. This includes exceptions
+ that would be normally be thrown/raised but also things like
+ checking for event tampering where we just return the redacted
+ event.
Returns:
* the original event if the checks pass
@@ -80,7 +90,12 @@ class FederationBase:
InvalidEventSignatureError if the signature check failed. Nothing
will be logged in this case.
"""
- await _check_sigs_on_pdu(self.keyring, room_version, pdu)
+ try:
+ await _check_sigs_on_pdu(self.keyring, room_version, pdu)
+ except InvalidEventSignatureError as exc:
+ if record_failure_callback:
+ await record_failure_callback(pdu, str(exc))
+ raise exc
if not check_event_content_hash(pdu):
# let's try to distinguish between failures because the event was
@@ -116,6 +131,10 @@ class FederationBase:
"event_id": pdu.event_id,
}
)
+ if record_failure_callback:
+ await record_failure_callback(
+ pdu, "Event content has been tampered with"
+ )
return redacted_event
spam_check = await self.spam_checker.check_event_for_spam(pdu)
diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py
index 9e7527291b..0615ce0870 100644
--- a/synapse/federation/federation_client.py
+++ b/synapse/federation/federation_client.py
@@ -80,6 +80,18 @@ PDU_RETRY_TIME_MS = 1 * 60 * 1000
T = TypeVar("T")
+@attr.s(frozen=True, slots=True, auto_attribs=True)
+class PulledPduInfo:
+ """
+ A result object that stores the PDU and info about it like which homeserver we
+ pulled it from (`pull_origin`)
+ """
+
+ pdu: EventBase
+ # Which homeserver we pulled the PDU from
+ pull_origin: str
+
+
class InvalidResponseError(RuntimeError):
"""Helper for _try_destination_list: indicates that the server returned a response
we couldn't parse
@@ -114,7 +126,9 @@ class FederationClient(FederationBase):
self.hostname = hs.hostname
self.signing_key = hs.signing_key
- self._get_pdu_cache: ExpiringCache[str, EventBase] = ExpiringCache(
+ # Cache mapping `event_id` to a tuple of the event itself and the `pull_origin`
+ # (which server we pulled the event from)
+ self._get_pdu_cache: ExpiringCache[str, Tuple[EventBase, str]] = ExpiringCache(
cache_name="get_pdu_cache",
clock=self._clock,
max_len=1000,
@@ -278,7 +292,7 @@ class FederationClient(FederationBase):
pdus = [event_from_pdu_json(p, room_version) for p in transaction_data_pdus]
# Check signatures and hash of pdus, removing any from the list that fail checks
- pdus[:] = await self._check_sigs_and_hash_and_fetch(
+ pdus[:] = await self._check_sigs_and_hash_for_pulled_events_and_fetch(
dest, pdus, room_version=room_version
)
@@ -328,7 +342,17 @@ class FederationClient(FederationBase):
# Check signatures are correct.
try:
- signed_pdu = await self._check_sigs_and_hash(room_version, pdu)
+
+ async def _record_failure_callback(
+ event: EventBase, cause: str
+ ) -> None:
+ await self.store.record_event_failed_pull_attempt(
+ event.room_id, event.event_id, cause
+ )
+
+ signed_pdu = await self._check_sigs_and_hash(
+ room_version, pdu, _record_failure_callback
+ )
except InvalidEventSignatureError as e:
errmsg = f"event id {pdu.event_id}: {e}"
logger.warning("%s", errmsg)
@@ -342,11 +366,11 @@ class FederationClient(FederationBase):
@tag_args
async def get_pdu(
self,
- destinations: Iterable[str],
+ destinations: Collection[str],
event_id: str,
room_version: RoomVersion,
timeout: Optional[int] = None,
- ) -> Optional[EventBase]:
+ ) -> Optional[PulledPduInfo]:
"""Requests the PDU with given origin and ID from the remote home
servers.
@@ -361,11 +385,11 @@ class FederationClient(FederationBase):
moving to the next destination. None indicates no timeout.
Returns:
- The requested PDU, or None if we were unable to find it.
+ The requested PDU wrapped in `PulledPduInfo`, or None if we were unable to find it.
"""
logger.debug(
- "get_pdu: event_id=%s from destinations=%s", event_id, destinations
+ "get_pdu(event_id=%s): from destinations=%s", event_id, destinations
)
# TODO: Rate limit the number of times we try and get the same event.
@@ -374,19 +398,25 @@ class FederationClient(FederationBase):
# it gets persisted to the database), so we cache the results of the lookup.
# Note that this is separate to the regular get_event cache which caches
# events once they have been persisted.
- event = self._get_pdu_cache.get(event_id)
+ get_pdu_cache_entry = self._get_pdu_cache.get(event_id)
+ event = None
+ pull_origin = None
+ if get_pdu_cache_entry:
+ event, pull_origin = get_pdu_cache_entry
# If we don't see the event in the cache, go try to fetch it from the
# provided remote federated destinations
- if not event:
+ else:
pdu_attempts = self.pdu_destination_tried.setdefault(event_id, {})
+ # TODO: We can probably refactor this to use `_try_destination_list`
for destination in destinations:
now = self._clock.time_msec()
last_attempt = pdu_attempts.get(destination, 0)
if last_attempt + PDU_RETRY_TIME_MS > now:
logger.debug(
- "get_pdu: skipping destination=%s because we tried it recently last_attempt=%s and we only check every %s (now=%s)",
+ "get_pdu(event_id=%s): skipping destination=%s because we tried it recently last_attempt=%s and we only check every %s (now=%s)",
+ event_id,
destination,
last_attempt,
PDU_RETRY_TIME_MS,
@@ -401,43 +431,48 @@ class FederationClient(FederationBase):
room_version=room_version,
timeout=timeout,
)
+ pull_origin = destination
pdu_attempts[destination] = now
if event:
# Prime the cache
- self._get_pdu_cache[event.event_id] = event
+ self._get_pdu_cache[event.event_id] = (event, pull_origin)
# Now that we have an event, we can break out of this
# loop and stop asking other destinations.
break
+ except NotRetryingDestination as e:
+ logger.info("get_pdu(event_id=%s): %s", event_id, e)
+ continue
+ except FederationDeniedError:
+ logger.info(
+ "get_pdu(event_id=%s): Not attempting to fetch PDU from %s because the homeserver is not on our federation whitelist",
+ event_id,
+ destination,
+ )
+ continue
except SynapseError as e:
logger.info(
- "Failed to get PDU %s from %s because %s",
+ "get_pdu(event_id=%s): Failed to get PDU from %s because %s",
event_id,
destination,
e,
)
continue
- except NotRetryingDestination as e:
- logger.info(str(e))
- continue
- except FederationDeniedError as e:
- logger.info(str(e))
- continue
except Exception as e:
pdu_attempts[destination] = now
logger.info(
- "Failed to get PDU %s from %s because %s",
+ "get_pdu(event_id=%s): Failed to get PDU from %s because %s",
event_id,
destination,
e,
)
continue
- if not event:
+ if not event or not pull_origin:
return None
# `event` now refers to an object stored in `get_pdu_cache`. Our
@@ -449,7 +484,7 @@ class FederationClient(FederationBase):
event.room_version,
)
- return event_copy
+ return PulledPduInfo(event_copy, pull_origin)
@trace
@tag_args
@@ -547,24 +582,28 @@ class FederationClient(FederationBase):
len(auth_event_map),
)
- valid_auth_events = await self._check_sigs_and_hash_and_fetch(
+ valid_auth_events = await self._check_sigs_and_hash_for_pulled_events_and_fetch(
destination, auth_event_map.values(), room_version
)
- valid_state_events = await self._check_sigs_and_hash_and_fetch(
- destination, state_event_map.values(), room_version
+ valid_state_events = (
+ await self._check_sigs_and_hash_for_pulled_events_and_fetch(
+ destination, state_event_map.values(), room_version
+ )
)
return valid_state_events, valid_auth_events
@trace
- async def _check_sigs_and_hash_and_fetch(
+ async def _check_sigs_and_hash_for_pulled_events_and_fetch(
self,
origin: str,
pdus: Collection[EventBase],
room_version: RoomVersion,
) -> List[EventBase]:
- """Checks the signatures and hashes of a list of events.
+ """
+ Checks the signatures and hashes of a list of pulled events we got from
+ federation and records any signature failures as failed pull attempts.
If a PDU fails its signature check then we check if we have it in
the database, and if not then request it from the sender's server (if that
@@ -597,11 +636,17 @@ class FederationClient(FederationBase):
valid_pdus: List[EventBase] = []
+ async def _record_failure_callback(event: EventBase, cause: str) -> None:
+ await self.store.record_event_failed_pull_attempt(
+ event.room_id, event.event_id, cause
+ )
+
async def _execute(pdu: EventBase) -> None:
valid_pdu = await self._check_sigs_and_hash_and_fetch_one(
pdu=pdu,
origin=origin,
room_version=room_version,
+ record_failure_callback=_record_failure_callback,
)
if valid_pdu:
@@ -618,6 +663,9 @@ class FederationClient(FederationBase):
pdu: EventBase,
origin: str,
room_version: RoomVersion,
+ record_failure_callback: Optional[
+ Callable[[EventBase, str], Awaitable[None]]
+ ] = None,
) -> Optional[EventBase]:
"""Takes a PDU and checks its signatures and hashes.
@@ -634,6 +682,11 @@ class FederationClient(FederationBase):
origin
pdu
room_version
+ record_failure_callback: A callback to run whenever the given event
+ fails signature or hash checks. This includes exceptions
+ that would be normally be thrown/raised but also things like
+ checking for event tampering where we just return the redacted
+ event.
Returns:
The PDU (possibly redacted) if it has valid signatures and hashes.
@@ -641,7 +694,9 @@ class FederationClient(FederationBase):
"""
try:
- return await self._check_sigs_and_hash(room_version, pdu)
+ return await self._check_sigs_and_hash(
+ room_version, pdu, record_failure_callback
+ )
except InvalidEventSignatureError as e:
logger.warning(
"Signature on retrieved event %s was invalid (%s). "
@@ -669,12 +724,14 @@ class FederationClient(FederationBase):
pdu_origin = get_domain_from_id(pdu.sender)
if not res and pdu_origin != origin:
try:
- res = await self.get_pdu(
+ pulled_pdu_info = await self.get_pdu(
destinations=[pdu_origin],
event_id=pdu.event_id,
room_version=room_version,
timeout=10000,
)
+ if pulled_pdu_info is not None:
+ res = pulled_pdu_info.pdu
except SynapseError:
pass
@@ -694,7 +751,7 @@ class FederationClient(FederationBase):
auth_chain = [event_from_pdu_json(p, room_version) for p in res["auth_chain"]]
- signed_auth = await self._check_sigs_and_hash_and_fetch(
+ signed_auth = await self._check_sigs_and_hash_for_pulled_events_and_fetch(
destination, auth_chain, room_version=room_version
)
@@ -776,6 +833,7 @@ class FederationClient(FederationBase):
)
for destination in destinations:
+ # We don't want to ask our own server for information we don't have
if destination == self.server_name:
continue
@@ -784,9 +842,21 @@ class FederationClient(FederationBase):
except (
RequestSendFailed,
InvalidResponseError,
- NotRetryingDestination,
) as e:
logger.warning("Failed to %s via %s: %s", description, destination, e)
+ # Skip to the next homeserver in the list to try.
+ continue
+ except NotRetryingDestination as e:
+ logger.info("%s: %s", description, e)
+ continue
+ except FederationDeniedError:
+ logger.info(
+ "%s: Not attempting to %s from %s because the homeserver is not on our federation whitelist",
+ description,
+ description,
+ destination,
+ )
+ continue
except UnsupportedRoomVersionError:
raise
except HttpResponseException as e:
@@ -1264,7 +1334,7 @@ class FederationClient(FederationBase):
return resp[1]
async def send_knock(self, destinations: List[str], pdu: EventBase) -> JsonDict:
- """Attempts to send a knock event to given a list of servers. Iterates
+ """Attempts to send a knock event to a given list of servers. Iterates
through the list until one attempt succeeds.
Doing so will cause the remote server to add the event to the graph,
@@ -1401,7 +1471,7 @@ class FederationClient(FederationBase):
event_from_pdu_json(e, room_version) for e in content.get("events", [])
]
- signed_events = await self._check_sigs_and_hash_and_fetch(
+ signed_events = await self._check_sigs_and_hash_for_pulled_events_and_fetch(
destination, events, room_version=room_version
)
except HttpResponseException as e:
@@ -1579,6 +1649,54 @@ class FederationClient(FederationBase):
return result
async def timestamp_to_event(
+ self, *, destinations: List[str], room_id: str, timestamp: int, direction: str
+ ) -> Optional["TimestampToEventResponse"]:
+ """
+ Calls each remote federating server from `destinations` asking for their closest
+ event to the given timestamp in the given direction until we get a response.
+ Also validates the response to always return the expected keys or raises an
+ error.
+
+ Args:
+ destinations: The domains of homeservers to try fetching from
+ room_id: Room to fetch the event from
+ timestamp: The point in time (inclusive) we should navigate from in
+ the given direction to find the closest event.
+ direction: ["f"|"b"] to indicate whether we should navigate forward
+ or backward from the given timestamp to find the closest event.
+
+ Returns:
+ A parsed TimestampToEventResponse including the closest event_id
+ and origin_server_ts or None if no destination has a response.
+ """
+
+ async def _timestamp_to_event_from_destination(
+ destination: str,
+ ) -> TimestampToEventResponse:
+ return await self._timestamp_to_event_from_destination(
+ destination, room_id, timestamp, direction
+ )
+
+ try:
+ # Loop through each homeserver candidate until we get a succesful response
+ timestamp_to_event_response = await self._try_destination_list(
+ "timestamp_to_event",
+ destinations,
+ # TODO: The requested timestamp may lie in a part of the
+ # event graph that the remote server *also* didn't have,
+ # in which case they will have returned another event
+ # which may be nowhere near the requested timestamp. In
+ # the future, we may need to reconcile that gap and ask
+ # other homeservers, and/or extend `/timestamp_to_event`
+ # to return events on *both* sides of the timestamp to
+ # help reconcile the gap faster.
+ _timestamp_to_event_from_destination,
+ )
+ return timestamp_to_event_response
+ except SynapseError:
+ return None
+
+ async def _timestamp_to_event_from_destination(
self, destination: str, room_id: str, timestamp: int, direction: str
) -> "TimestampToEventResponse":
"""
diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py
index 641922a1d2..4f4a88b953 100644
--- a/synapse/federation/federation_server.py
+++ b/synapse/federation/federation_server.py
@@ -69,6 +69,8 @@ from synapse.replication.http.federation import (
)
from synapse.storage.databases.main.events import PartialStateConflictError
from synapse.storage.databases.main.lock import Lock
+from synapse.storage.databases.main.roommember import extract_heroes_from_room_summary
+from synapse.storage.roommember import MemberSummary
from synapse.types import JsonDict, StateMap, get_domain_from_id
from synapse.util import json_decoder, unwrapFirstError
from synapse.util.async_helpers import Linearizer, concurrently_execute, gather_results
@@ -476,6 +478,14 @@ class FederationServer(FederationBase):
pdu_results[pdu.event_id] = await process_pdu(pdu)
async def process_pdu(pdu: EventBase) -> JsonDict:
+ """
+ Processes a pushed PDU sent to us via a `/send` transaction
+
+ Returns:
+ JsonDict representing a "PDU Processing Result" that will be bundled up
+ with the other processed PDU's in the `/send` transaction and sent back
+ to remote homeserver.
+ """
event_id = pdu.event_id
with nested_logging_context(event_id):
try:
@@ -678,8 +688,9 @@ class FederationServer(FederationBase):
state_event_ids: Collection[str]
servers_in_room: Optional[Collection[str]]
if caller_supports_partial_state:
+ summary = await self.store.get_room_summary(room_id)
state_event_ids = _get_event_ids_for_partial_state_join(
- event, prev_state_ids
+ event, prev_state_ids, summary
)
servers_in_room = await self.state.get_hosts_in_room_at_events(
room_id, event_ids=event.prev_event_ids()
@@ -819,7 +830,14 @@ class FederationServer(FederationBase):
context, self._room_prejoin_state_types
)
)
- return {"knock_state_events": stripped_room_state}
+ return {
+ "knock_room_state": stripped_room_state,
+ # Since v1.37, Synapse incorrectly used "knock_state_events" for this field.
+ # Thus, we also populate a 'knock_state_events' with the same content to
+ # support old instances.
+ # See https://github.com/matrix-org/synapse/issues/14088.
+ "knock_state_events": stripped_room_state,
+ }
async def _on_send_membership_event(
self, origin: str, content: JsonDict, membership_type: str, room_id: str
@@ -1475,6 +1493,7 @@ class FederationHandlerRegistry:
def _get_event_ids_for_partial_state_join(
join_event: EventBase,
prev_state_ids: StateMap[str],
+ summary: Dict[str, MemberSummary],
) -> Collection[str]:
"""Calculate state to be retuned in a partial_state send_join
@@ -1501,8 +1520,19 @@ def _get_event_ids_for_partial_state_join(
if current_membership_event_id is not None:
state_event_ids.add(current_membership_event_id)
- # TODO: return a few more members:
- # - those with invites
- # - those that are kicked? / banned
+ name_id = prev_state_ids.get((EventTypes.Name, ""))
+ canonical_alias_id = prev_state_ids.get((EventTypes.CanonicalAlias, ""))
+ if not name_id and not canonical_alias_id:
+ # Also include the hero members of the room (for DM rooms without a title).
+ # To do this properly, we should select the correct subset of membership events
+ # from `prev_state_ids`. Instead, we are lazier and use the (cached)
+ # `get_room_summary` function, which is based on the current state of the room.
+ # This introduces races; we choose to ignore them because a) they should be rare
+ # and b) even if it's wrong, joining servers will get the full state eventually.
+ heroes = extract_heroes_from_room_summary(summary, join_event.state_key)
+ for hero in heroes:
+ membership_event_id = prev_state_ids.get((EventTypes.Member, hero))
+ if membership_event_id:
+ state_event_ids.add(membership_event_id)
return state_event_ids
diff --git a/synapse/federation/sender/__init__.py b/synapse/federation/sender/__init__.py
index a6cb3ba58f..3ad483efe0 100644
--- a/synapse/federation/sender/__init__.py
+++ b/synapse/federation/sender/__init__.py
@@ -353,21 +353,25 @@ class FederationSender(AbstractFederationSender):
last_token = await self.store.get_federation_out_pos("events")
(
next_token,
- events,
event_to_received_ts,
- ) = await self.store.get_all_new_events_stream(
+ ) = await self.store.get_all_new_event_ids_stream(
last_token, self._last_poked_id, limit=100
)
+ event_ids = event_to_received_ts.keys()
+ event_entries = await self.store.get_unredacted_events_from_cache_or_db(
+ event_ids
+ )
+
logger.debug(
"Handling %i -> %i: %i events to send (current id %i)",
last_token,
next_token,
- len(events),
+ len(event_entries),
self._last_poked_id,
)
- if not events and next_token >= self._last_poked_id:
+ if not event_entries and next_token >= self._last_poked_id:
logger.debug("All events processed")
break
@@ -508,8 +512,14 @@ class FederationSender(AbstractFederationSender):
await handle_event(event)
events_by_room: Dict[str, List[EventBase]] = {}
- for event in events:
- events_by_room.setdefault(event.room_id, []).append(event)
+
+ for event_id in event_ids:
+ # `event_entries` is unsorted, so we have to iterate over `event_ids`
+ # to ensure the events are in the right order
+ event_cache = event_entries.get(event_id)
+ if event_cache:
+ event = event_cache.event
+ events_by_room.setdefault(event.room_id, []).append(event)
await make_deferred_yieldable(
defer.gatherResults(
@@ -524,9 +534,9 @@ class FederationSender(AbstractFederationSender):
logger.debug("Successfully handled up to %i", next_token)
await self.store.update_federation_out_pos("events", next_token)
- if events:
+ if event_entries:
now = self.clock.time_msec()
- ts = event_to_received_ts[events[-1].event_id]
+ ts = max(t for t in event_to_received_ts.values() if t)
assert ts is not None
synapse.metrics.event_processing_lag.labels(
@@ -536,7 +546,7 @@ class FederationSender(AbstractFederationSender):
"federation_sender"
).set(ts)
- events_processed_counter.inc(len(events))
+ events_processed_counter.inc(len(event_entries))
event_processing_loop_room_count.labels("federation_sender").inc(
len(events_by_room)
diff --git a/synapse/federation/sender/per_destination_queue.py b/synapse/federation/sender/per_destination_queue.py
index 72bc935452..baeea3f1cd 100644
--- a/synapse/federation/sender/per_destination_queue.py
+++ b/synapse/federation/sender/per_destination_queue.py
@@ -646,10 +646,25 @@ class _TransactionQueueManager:
# We start by fetching device related EDUs, i.e device updates and to
# device messages. We have to keep 2 free slots for presence and rr_edus.
- limit = MAX_EDUS_PER_TRANSACTION - 2
+ device_edu_limit = MAX_EDUS_PER_TRANSACTION - 2
+
+ # We prioritize to-device messages so that existing encryption channels
+ # work. We also keep a few slots spare (by reducing the limit) so that
+ # we can still trickle out some device list updates.
+ (
+ to_device_edus,
+ device_stream_id,
+ ) = await self.queue._get_to_device_message_edus(device_edu_limit - 10)
+
+ if to_device_edus:
+ self._device_stream_id = device_stream_id
+ else:
+ self.queue._last_device_stream_id = device_stream_id
+
+ device_edu_limit -= len(to_device_edus)
device_update_edus, dev_list_id = await self.queue._get_device_update_edus(
- limit
+ device_edu_limit
)
if device_update_edus:
@@ -657,18 +672,6 @@ class _TransactionQueueManager:
else:
self.queue._last_device_list_stream_id = dev_list_id
- limit -= len(device_update_edus)
-
- (
- to_device_edus,
- device_stream_id,
- ) = await self.queue._get_to_device_message_edus(limit)
-
- if to_device_edus:
- self._device_stream_id = device_stream_id
- else:
- self.queue._last_device_stream_id = device_stream_id
-
pending_edus = device_update_edus + to_device_edus
# Now add the read receipt EDU.
diff --git a/synapse/federation/transport/client.py b/synapse/federation/transport/client.py
index 32074b8ca6..a3cfc701cd 100644
--- a/synapse/federation/transport/client.py
+++ b/synapse/federation/transport/client.py
@@ -45,6 +45,7 @@ from synapse.federation.units import Transaction
from synapse.http.matrixfederationclient import ByteParser
from synapse.http.types import QueryParams
from synapse.types import JsonDict
+from synapse.util import ExceptionBundle
logger = logging.getLogger(__name__)
@@ -279,12 +280,11 @@ class TransportLayerClient:
Note that this does not append any events to any graphs.
Args:
- destination (str): address of remote homeserver
- room_id (str): room to join/leave
- user_id (str): user to be joined/left
- membership (str): one of join/leave
- params (dict[str, str|Iterable[str]]): Query parameters to include in the
- request.
+ destination: address of remote homeserver
+ room_id: room to join/leave
+ user_id: user to be joined/left
+ membership: one of join/leave
+ params: Query parameters to include in the request.
Returns:
Succeeds when we get a 2xx HTTP response. The result
@@ -926,8 +926,7 @@ class SendJoinParser(ByteParser[SendJoinResponse]):
return len(data)
def finish(self) -> SendJoinResponse:
- for c in self._coros:
- c.close()
+ _close_coros(self._coros)
if self._response.event_dict:
self._response.event = make_event_from_dict(
@@ -970,6 +969,27 @@ class _StateParser(ByteParser[StateRequestResponse]):
return len(data)
def finish(self) -> StateRequestResponse:
- for c in self._coros:
- c.close()
+ _close_coros(self._coros)
return self._response
+
+
+def _close_coros(coros: Iterable[Generator[None, bytes, None]]) -> None:
+ """Close each of the given coroutines.
+
+ Always calls .close() on each coroutine, even if doing so raises an exception.
+ Any exceptions raised are aggregated into an ExceptionBundle.
+
+ :raises ExceptionBundle: if at least one coroutine fails to close.
+ """
+ exceptions = []
+ for c in coros:
+ try:
+ c.close()
+ except Exception as e:
+ exceptions.append(e)
+
+ if exceptions:
+ # raise from the first exception so that the traceback has slightly more context
+ raise ExceptionBundle(
+ f"There were {len(exceptions)} errors closing coroutines", exceptions
+ ) from exceptions[0]
diff --git a/synapse/federation/transport/server/_base.py b/synapse/federation/transport/server/_base.py
index cba690e795..fd927e0af7 100644
--- a/synapse/federation/transport/server/_base.py
+++ b/synapse/federation/transport/server/_base.py
@@ -226,10 +226,10 @@ class BaseFederationServlet:
With arguments:
- origin (unicode|None): The authenticated server_name of the calling server,
+ origin (str|None): The authenticated server_name of the calling server,
unless REQUIRE_AUTH is set to False and authentication failed.
- content (unicode|None): decoded json body of the request. None if the
+ content (str|None): decoded json body of the request. None if the
request was a GET.
query (dict[bytes, list[bytes]]): Query params from the request. url-decoded
diff --git a/synapse/federation/transport/server/federation.py b/synapse/federation/transport/server/federation.py
index 6bb4659c4c..205fd16daa 100644
--- a/synapse/federation/transport/server/federation.py
+++ b/synapse/federation/transport/server/federation.py
@@ -489,7 +489,7 @@ class FederationV2InviteServlet(BaseFederationServerServlet):
room_version = content["room_version"]
event = content["event"]
- invite_room_state = content["invite_room_state"]
+ invite_room_state = content.get("invite_room_state", [])
# Synapse expects invite_room_state to be in unsigned, as it is in v1
# API
@@ -499,6 +499,11 @@ class FederationV2InviteServlet(BaseFederationServerServlet):
result = await self.handler.on_invite_request(
origin, event, room_version_id=room_version
)
+
+ # We only store invite_room_state for internal use, so remove it before
+ # returning the event to the remote homeserver.
+ result["event"].get("unsigned", {}).pop("invite_room_state", None)
+
return 200, result
diff --git a/synapse/handlers/account_data.py b/synapse/handlers/account_data.py
index 0478448b47..fc21d58001 100644
--- a/synapse/handlers/account_data.py
+++ b/synapse/handlers/account_data.py
@@ -225,7 +225,7 @@ class AccountDataEventSource(EventSource[int, JsonDict]):
self,
user: UserID,
from_key: int,
- limit: Optional[int],
+ limit: int,
room_ids: Collection[str],
is_guest: bool,
explicit_room_id: Optional[str] = None,
diff --git a/synapse/handlers/admin.py b/synapse/handlers/admin.py
index cf9f19608a..5bf8e86387 100644
--- a/synapse/handlers/admin.py
+++ b/synapse/handlers/admin.py
@@ -32,6 +32,7 @@ class AdminHandler:
self.store = hs.get_datastores().main
self._storage_controllers = hs.get_storage_controllers()
self._state_storage_controller = self._storage_controllers.state
+ self._msc3866_enabled = hs.config.experimental.msc3866.enabled
async def get_whois(self, user: UserID) -> JsonDict:
connections = []
@@ -75,6 +76,10 @@ class AdminHandler:
"is_guest",
}
+ if self._msc3866_enabled:
+ # Only include the approved flag if support for MSC3866 is enabled.
+ user_info_to_return.add("approved")
+
# Restrict returned keys to a known set.
user_info_dict = {
key: value
@@ -95,6 +100,7 @@ class AdminHandler:
user_info_dict["avatar_url"] = profile.avatar_url
user_info_dict["threepids"] = threepids
user_info_dict["external_ids"] = external_ids
+ user_info_dict["erased"] = await self.store.is_user_erased(user.to_string())
return user_info_dict
diff --git a/synapse/handlers/appservice.py b/synapse/handlers/appservice.py
index 203b62e015..66f5b8d108 100644
--- a/synapse/handlers/appservice.py
+++ b/synapse/handlers/appservice.py
@@ -109,10 +109,13 @@ class ApplicationServicesHandler:
last_token = await self.store.get_appservice_last_pos()
(
upper_bound,
- events,
event_to_received_ts,
- ) = await self.store.get_all_new_events_stream(
- last_token, self.current_max, limit=100, get_prev_content=True
+ ) = await self.store.get_all_new_event_ids_stream(
+ last_token, self.current_max, limit=100
+ )
+
+ events = await self.store.get_events_as_list(
+ event_to_received_ts.keys(), get_prev_content=True
)
events_by_room: Dict[str, List[EventBase]] = {}
diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py
index eacd631ee0..8b9ef25d29 100644
--- a/synapse/handlers/auth.py
+++ b/synapse/handlers/auth.py
@@ -38,6 +38,7 @@ from typing import (
import attr
import bcrypt
import unpaddedbase64
+from prometheus_client import Counter
from twisted.internet.defer import CancelledError
from twisted.web.server import Request
@@ -48,6 +49,7 @@ from synapse.api.errors import (
Codes,
InteractiveAuthIncompleteError,
LoginError,
+ NotFoundError,
StoreError,
SynapseError,
UserDeactivatedError,
@@ -63,10 +65,14 @@ from synapse.http.server import finish_request, respond_with_html
from synapse.http.site import SynapseRequest
from synapse.logging.context import defer_to_thread
from synapse.metrics.background_process_metrics import run_as_background_process
+from synapse.storage.databases.main.registration import (
+ LoginTokenExpired,
+ LoginTokenLookupResult,
+ LoginTokenReused,
+)
from synapse.types import JsonDict, Requester, UserID
from synapse.util import stringutils as stringutils
from synapse.util.async_helpers import delay_cancellation, maybe_awaitable
-from synapse.util.macaroons import LoginTokenAttributes
from synapse.util.msisdn import phone_number_to_msisdn
from synapse.util.stringutils import base62_encode
from synapse.util.threepids import canonicalise_email
@@ -80,6 +86,12 @@ logger = logging.getLogger(__name__)
INVALID_USERNAME_OR_PASSWORD = "Invalid username or password"
+invalid_login_token_counter = Counter(
+ "synapse_user_login_invalid_login_tokens",
+ "Counts the number of rejected m.login.token on /login",
+ ["reason"],
+)
+
def convert_client_dict_legacy_fields_to_identifier(
submission: JsonDict,
@@ -883,6 +895,25 @@ class AuthHandler:
return True
+ async def create_login_token_for_user_id(
+ self,
+ user_id: str,
+ duration_ms: int = (2 * 60 * 1000),
+ auth_provider_id: Optional[str] = None,
+ auth_provider_session_id: Optional[str] = None,
+ ) -> str:
+ login_token = self.generate_login_token()
+ now = self._clock.time_msec()
+ expiry_ts = now + duration_ms
+ await self.store.add_login_token_to_user(
+ user_id=user_id,
+ token=login_token,
+ expiry_ts=expiry_ts,
+ auth_provider_id=auth_provider_id,
+ auth_provider_session_id=auth_provider_session_id,
+ )
+ return login_token
+
async def create_refresh_token_for_user_id(
self,
user_id: str,
@@ -1009,6 +1040,17 @@ class AuthHandler:
return res[0]
return None
+ async def is_user_approved(self, user_id: str) -> bool:
+ """Checks if a user is approved and therefore can be allowed to log in.
+
+ Args:
+ user_id: the user to check the approval status of.
+
+ Returns:
+ A boolean that is True if the user is approved, False otherwise.
+ """
+ return await self.store.is_user_approved(user_id)
+
async def _find_user_id_and_pwd_hash(
self, user_id: str
) -> Optional[Tuple[str, str]]:
@@ -1390,6 +1432,18 @@ class AuthHandler:
return None
return user_id
+ def generate_login_token(self) -> str:
+ """Generates an opaque string, for use as an short-term login token"""
+
+ # we use the following format for access tokens:
+ # syl__
+
+ random_string = stringutils.random_string(20)
+ base = f"syl_{random_string}"
+
+ crc = base62_encode(crc32(base.encode("ascii")), minwidth=6)
+ return f"{base}_{crc}"
+
def generate_access_token(self, for_user: UserID) -> str:
"""Generates an opaque string, for use as an access token"""
@@ -1416,16 +1470,17 @@ class AuthHandler:
crc = base62_encode(crc32(base.encode("ascii")), minwidth=6)
return f"{base}_{crc}"
- async def validate_short_term_login_token(
- self, login_token: str
- ) -> LoginTokenAttributes:
+ async def consume_login_token(self, login_token: str) -> LoginTokenLookupResult:
try:
- res = self.macaroon_gen.verify_short_term_login_token(login_token)
- except Exception:
- raise AuthError(403, "Invalid login token", errcode=Codes.FORBIDDEN)
+ return await self.store.consume_login_token(login_token)
+ except LoginTokenExpired:
+ invalid_login_token_counter.labels("expired").inc()
+ except LoginTokenReused:
+ invalid_login_token_counter.labels("reused").inc()
+ except NotFoundError:
+ invalid_login_token_counter.labels("not found").inc()
- await self.auth_blocking.check_auth_blocking(res.user_id)
- return res
+ raise AuthError(403, "Invalid login token", errcode=Codes.FORBIDDEN)
async def delete_access_token(self, access_token: str) -> None:
"""Invalidate a single access token
@@ -1700,7 +1755,7 @@ class AuthHandler:
)
# Create a login token
- login_token = self.macaroon_gen.generate_short_term_login_token(
+ login_token = await self.create_login_token_for_user_id(
registered_user_id,
auth_provider_id=auth_provider_id,
auth_provider_session_id=auth_provider_session_id,
diff --git a/synapse/handlers/cas.py b/synapse/handlers/cas.py
index 7163af8004..fc467bc7c1 100644
--- a/synapse/handlers/cas.py
+++ b/synapse/handlers/cas.py
@@ -130,6 +130,9 @@ class CasHandler:
except PartialDownloadError as pde:
# Twisted raises this error if the connection is closed,
# even if that's being used old-http style to signal end-of-data
+ # Assertion is for mypy's benefit. Error.response is Optional[bytes],
+ # but a PartialDownloadError should always have a non-None response.
+ assert pde.response is not None
body = pde.response
except HttpResponseException as e:
description = (
diff --git a/synapse/handlers/device.py b/synapse/handlers/device.py
index 961f8eb186..2567954679 100644
--- a/synapse/handlers/device.py
+++ b/synapse/handlers/device.py
@@ -273,11 +273,9 @@ class DeviceWorkerHandler:
possibly_left = possibly_changed | possibly_left
# Double check if we still share rooms with the given user.
- users_rooms = await self.store.get_rooms_for_users_with_stream_ordering(
- possibly_left
- )
+ users_rooms = await self.store.get_rooms_for_users(possibly_left)
for changed_user_id, entries in users_rooms.items():
- if any(e.room_id in room_ids for e in entries):
+ if any(rid in room_ids for rid in entries):
possibly_left.discard(changed_user_id)
else:
possibly_joined.discard(changed_user_id)
@@ -309,6 +307,17 @@ class DeviceWorkerHandler:
"self_signing_key": self_signing_key,
}
+ async def handle_room_un_partial_stated(self, room_id: str) -> None:
+ """Handles sending appropriate device list updates in a room that has
+ gone from partial to full state.
+ """
+
+ # TODO(faster_joins): worker mode support
+ # https://github.com/matrix-org/synapse/issues/12994
+ logger.error(
+ "Trying handling device list state for partial join: not supported on workers."
+ )
+
class DeviceHandler(DeviceWorkerHandler):
def __init__(self, hs: "HomeServer"):
@@ -746,6 +755,95 @@ class DeviceHandler(DeviceWorkerHandler):
finally:
self._handle_new_device_update_is_processing = False
+ async def handle_room_un_partial_stated(self, room_id: str) -> None:
+ """Handles sending appropriate device list updates in a room that has
+ gone from partial to full state.
+ """
+
+ # We defer to the device list updater to handle pending remote device
+ # list updates.
+ await self.device_list_updater.handle_room_un_partial_stated(room_id)
+
+ # Replay local updates.
+ (
+ join_event_id,
+ device_lists_stream_id,
+ ) = await self.store.get_join_event_id_and_device_lists_stream_id_for_partial_state(
+ room_id
+ )
+
+ # Get the local device list changes that have happened in the room since
+ # we started joining. If there are no updates there's nothing left to do.
+ changes = await self.store.get_device_list_changes_in_room(
+ room_id, device_lists_stream_id
+ )
+ local_changes = {(u, d) for u, d in changes if self.hs.is_mine_id(u)}
+ if not local_changes:
+ return
+
+ # Note: We have persisted the full state at this point, we just haven't
+ # cleared the `partial_room` flag.
+ join_state_ids = await self._state_storage.get_state_ids_for_event(
+ join_event_id, await_full_state=False
+ )
+ current_state_ids = await self.store.get_partial_current_state_ids(room_id)
+
+ # Now we need to work out all servers that might have been in the room
+ # at any point during our join.
+
+ # First we look for any membership states that have changed between the
+ # initial join and now...
+ all_keys = set(join_state_ids)
+ all_keys.update(current_state_ids)
+
+ potentially_changed_hosts = set()
+ for etype, state_key in all_keys:
+ if etype != EventTypes.Member:
+ continue
+
+ prev = join_state_ids.get((etype, state_key))
+ current = current_state_ids.get((etype, state_key))
+
+ if prev != current:
+ potentially_changed_hosts.add(get_domain_from_id(state_key))
+
+ # ... then we add all the hosts that are currently joined to the room...
+ current_hosts_in_room = await self.store.get_current_hosts_in_room(room_id)
+ potentially_changed_hosts.update(current_hosts_in_room)
+
+ # ... and finally we remove any hosts that we were told about, as we
+ # will have sent device list updates to those hosts when they happened.
+ known_hosts_at_join = await self.store.get_partial_state_servers_at_join(
+ room_id
+ )
+ potentially_changed_hosts.difference_update(known_hosts_at_join)
+
+ potentially_changed_hosts.discard(self.server_name)
+
+ if not potentially_changed_hosts:
+ # Nothing to do.
+ return
+
+ logger.info(
+ "Found %d changed hosts to send device list updates to",
+ len(potentially_changed_hosts),
+ )
+
+ for user_id, device_id in local_changes:
+ await self.store.add_device_list_outbound_pokes(
+ user_id=user_id,
+ device_id=device_id,
+ room_id=room_id,
+ stream_id=None,
+ hosts=potentially_changed_hosts,
+ context=None,
+ )
+
+ # Notify things that device lists need to be sent out.
+ self.notifier.notify_replication()
+ for host in potentially_changed_hosts:
+ self.federation_sender.send_device_messages(host, immediate=False)
+
def _update_device_from_client_ips(
device: JsonDict, client_ips: Mapping[Tuple[str, str], Mapping[str, Any]]
@@ -836,6 +934,19 @@ class DeviceListUpdater:
)
return
+ # Check if we are partially joining any rooms. If so we need to store
+ # all device list updates so that we can handle them correctly once we
+ # know who is in the room.
+ # TODO(faster joins): this fetches and processes a bunch of data that we don't
+ # use. Could be replaced by a tighter query e.g.
+ # SELECT EXISTS(SELECT 1 FROM partial_state_rooms)
+ partial_rooms = await self.store.get_partial_state_room_resync_info()
+ if partial_rooms:
+ await self.store.add_remote_device_list_to_pending(
+ user_id,
+ device_id,
+ )
+
room_ids = await self.store.get_rooms_for_user(user_id)
if not room_ids:
# We don't share any rooms with this user. Ignore update, as we
@@ -1175,3 +1286,35 @@ class DeviceListUpdater:
device_ids.append(verify_key.version)
return device_ids
+
+ async def handle_room_un_partial_stated(self, room_id: str) -> None:
+ """Handles sending appropriate device list updates in a room that has
+ gone from partial to full state.
+ """
+
+ pending_updates = (
+ await self.store.get_pending_remote_device_list_updates_for_room(room_id)
+ )
+
+ for user_id, device_id in pending_updates:
+ logger.info(
+ "Got pending device list update in room %s: %s / %s",
+ room_id,
+ user_id,
+ device_id,
+ )
+ position = await self.store.add_device_change_to_streams(
+ user_id,
+ [device_id],
+ room_ids=[room_id],
+ )
+
+ if not position:
+ # This should only happen if there are no updates, which
+ # shouldn't happen when we've passed in a non-empty set of
+ # device IDs.
+ continue
+
+ self.device_handler.notifier.on_new_event(
+ StreamKeyType.DEVICE_LIST, position, rooms=[room_id]
+ )
diff --git a/synapse/handlers/directory.py b/synapse/handlers/directory.py
index 7127d5aefc..2ea52257cb 100644
--- a/synapse/handlers/directory.py
+++ b/synapse/handlers/directory.py
@@ -16,6 +16,8 @@ import logging
import string
from typing import TYPE_CHECKING, Iterable, List, Optional
+from typing_extensions import Literal
+
from synapse.api.constants import MAX_ALIAS_LENGTH, EventTypes
from synapse.api.errors import (
AuthError,
@@ -83,7 +85,7 @@ class DirectoryHandler:
# TODO(erikj): Add transactions.
# TODO(erikj): Check if there is a current association.
if not servers:
- servers = await self._storage_controllers.state.get_current_hosts_in_room(
+ servers = await self._storage_controllers.state.get_current_hosts_in_room_or_partial_state_approximation(
room_id
)
@@ -288,7 +290,7 @@ class DirectoryHandler:
Codes.NOT_FOUND,
)
- extra_servers = await self._storage_controllers.state.get_current_hosts_in_room(
+ extra_servers = await self._storage_controllers.state.get_current_hosts_in_room_or_partial_state_approximation(
room_id
)
servers_set = set(extra_servers) | set(servers)
@@ -429,7 +431,10 @@ class DirectoryHandler:
return await self.auth.check_can_change_room_list(room_id, requester)
async def edit_published_room_list(
- self, requester: Requester, room_id: str, visibility: str
+ self,
+ requester: Requester,
+ room_id: str,
+ visibility: Literal["public", "private"],
) -> None:
"""Edit the entry of the room in the published room list.
@@ -451,9 +456,6 @@ class DirectoryHandler:
if requester.is_guest:
raise AuthError(403, "Guests cannot edit the published room list")
- if visibility not in ["public", "private"]:
- raise SynapseError(400, "Invalid visibility setting")
-
if visibility == "public" and not self.enable_room_list_search:
# The room list has been disabled.
raise AuthError(
@@ -505,7 +507,11 @@ class DirectoryHandler:
await self.store.set_room_is_public(room_id, making_public)
async def edit_published_appservice_room_list(
- self, appservice_id: str, network_id: str, room_id: str, visibility: str
+ self,
+ appservice_id: str,
+ network_id: str,
+ room_id: str,
+ visibility: Literal["public", "private"],
) -> None:
"""Add or remove a room from the appservice/network specific public
room list.
@@ -516,9 +522,6 @@ class DirectoryHandler:
room_id
visibility: either "public" or "private"
"""
- if visibility not in ["public", "private"]:
- raise SynapseError(400, "Invalid visibility setting")
-
await self.store.set_room_is_public_appservice(
room_id, appservice_id, network_id, visibility == "public"
)
diff --git a/synapse/handlers/e2e_keys.py b/synapse/handlers/e2e_keys.py
index cf788a4a86..5f84f1769b 100644
--- a/synapse/handlers/e2e_keys.py
+++ b/synapse/handlers/e2e_keys.py
@@ -49,6 +49,7 @@ logger = logging.getLogger(__name__)
class E2eKeysHandler:
def __init__(self, hs: "HomeServer"):
+ self.config = hs.config
self.store = hs.get_datastores().main
self.federation = hs.get_federation_client()
self.device_handler = hs.get_device_handler()
@@ -431,13 +432,17 @@ class E2eKeysHandler:
@trace
@cancellable
async def query_local_devices(
- self, query: Mapping[str, Optional[List[str]]]
+ self,
+ query: Mapping[str, Optional[List[str]]],
+ include_displaynames: bool = True,
) -> Dict[str, Dict[str, dict]]:
"""Get E2E device keys for local users
Args:
query: map from user_id to a list
of devices to query (None for all devices)
+ include_displaynames: Whether to include device displaynames in the returned
+ device details.
Returns:
A map from user_id -> device_id -> device details
@@ -469,7 +474,9 @@ class E2eKeysHandler:
# make sure that each queried user appears in the result dict
result_dict[user_id] = {}
- results = await self.store.get_e2e_device_keys_for_cs_api(local_query)
+ results = await self.store.get_e2e_device_keys_for_cs_api(
+ local_query, include_displaynames
+ )
# Build the result structure
for user_id, device_keys in results.items():
@@ -482,11 +489,33 @@ class E2eKeysHandler:
async def on_federation_query_client_keys(
self, query_body: Dict[str, Dict[str, Optional[List[str]]]]
) -> JsonDict:
- """Handle a device key query from a federated server"""
+ """Handle a device key query from a federated server:
+
+ Handles the path: GET /_matrix/federation/v1/users/keys/query
+
+ Args:
+ query_body: The body of the query request. Should contain a key
+ "device_keys" that map to a dictionary of user ID's -> list of
+ device IDs. If the list of device IDs is empty, all devices of
+ that user will be queried.
+
+ Returns:
+ A json dictionary containing the following:
+ - device_keys: A dictionary containing the requested device information.
+ - master_keys: An optional dictionary of user ID -> master cross-signing
+ key info.
+ - self_signing_key: An optional dictionary of user ID -> self-signing
+ key info.
+ """
device_keys_query: Dict[str, Optional[List[str]]] = query_body.get(
"device_keys", {}
)
- res = await self.query_local_devices(device_keys_query)
+ res = await self.query_local_devices(
+ device_keys_query,
+ include_displaynames=(
+ self.config.federation.allow_device_name_lookup_over_federation
+ ),
+ )
ret = {"device_keys": res}
# add in the cross-signing keys
@@ -841,7 +870,7 @@ class E2eKeysHandler:
- signatures of the user's master key by the user's devices.
Args:
- user_id (string): the user uploading the keys
+ user_id: the user uploading the keys
signatures (dict[string, dict]): map of devices to signed keys
Returns:
diff --git a/synapse/handlers/e2e_room_keys.py b/synapse/handlers/e2e_room_keys.py
index 8786534e54..098288f058 100644
--- a/synapse/handlers/e2e_room_keys.py
+++ b/synapse/handlers/e2e_room_keys.py
@@ -377,8 +377,9 @@ class E2eRoomKeysHandler:
"""Deletes a given version of the user's e2e_room_keys backup
Args:
- user_id(str): the user whose current backup version we're deleting
- version(str): the version id of the backup being deleted
+ user_id: the user whose current backup version we're deleting
+ version: Optional. the version ID of the backup version we're deleting
+ If missing, we delete the current backup version info.
Raises:
NotFoundError: if this backup version doesn't exist
"""
diff --git a/synapse/handlers/event_auth.py b/synapse/handlers/event_auth.py
index 8249ca1ed2..3bbad0271b 100644
--- a/synapse/handlers/event_auth.py
+++ b/synapse/handlers/event_auth.py
@@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
-from typing import TYPE_CHECKING, Collection, List, Optional, Union
+from typing import TYPE_CHECKING, Collection, List, Mapping, Optional, Union
from synapse import event_auth
from synapse.api.constants import (
@@ -29,7 +29,6 @@ from synapse.event_auth import (
)
from synapse.events import EventBase
from synapse.events.builder import EventBuilder
-from synapse.events.snapshot import EventContext
from synapse.types import StateMap, get_domain_from_id
if TYPE_CHECKING:
@@ -51,12 +50,21 @@ class EventAuthHandler:
async def check_auth_rules_from_context(
self,
event: EventBase,
- context: EventContext,
+ batched_auth_events: Optional[Mapping[str, EventBase]] = None,
) -> None:
- """Check an event passes the auth rules at its own auth events"""
- await check_state_independent_auth_rules(self._store, event)
+ """Check an event passes the auth rules at its own auth events
+ Args:
+ event: event to be authed
+ batched_auth_events: if the event being authed is part of a batch, any events
+ from the same batch that may be necessary to auth the current event
+ """
+ await check_state_independent_auth_rules(
+ self._store, event, batched_auth_events
+ )
auth_event_ids = event.auth_event_ids()
auth_events_by_id = await self._store.get_events(auth_event_ids)
+ if batched_auth_events:
+ auth_events_by_id.update(batched_auth_events)
check_state_dependent_auth_rules(event, auth_events_by_id.values())
def compute_auth_events(
diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py
index 73471fe041..79e792395f 100644
--- a/synapse/handlers/federation.py
+++ b/synapse/handlers/federation.py
@@ -38,13 +38,14 @@ from signedjson.sign import verify_signed_json
from unpaddedbase64 import decode_base64
from synapse import event_auth
-from synapse.api.constants import EventContentFields, EventTypes, Membership
+from synapse.api.constants import MAX_DEPTH, EventContentFields, EventTypes, Membership
from synapse.api.errors import (
AuthError,
CodeMessageException,
Codes,
FederationDeniedError,
FederationError,
+ FederationPullAttemptBackoffError,
HttpResponseException,
LimitExceededError,
NotFoundError,
@@ -155,6 +156,8 @@ class FederationHandler:
self.http_client = hs.get_proxied_blacklisted_http_client()
self._replication = hs.get_replication_data_handler()
self._federation_event_handler = hs.get_federation_event_handler()
+ self._device_handler = hs.get_device_handler()
+ self._bulk_push_rule_evaluator = hs.get_bulk_push_rule_evaluator()
self._clean_room_for_join_client = ReplicationCleanRoomRestServlet.make_client(
hs
@@ -215,7 +218,7 @@ class FederationHandler:
current_depth: int,
limit: int,
*,
- processing_start_time: int,
+ processing_start_time: Optional[int],
) -> bool:
"""
Checks whether the `current_depth` is at or approaching any backfill
@@ -227,12 +230,23 @@ class FederationHandler:
room_id: The room to backfill in.
current_depth: The depth to check at for any upcoming backfill points.
limit: The max number of events to request from the remote federated server.
- processing_start_time: The time when `maybe_backfill` started
- processing. Only used for timing.
+ processing_start_time: The time when `maybe_backfill` started processing.
+ Only used for timing. If `None`, no timing observation will be made.
"""
backwards_extremities = [
_BackfillPoint(event_id, depth, _BackfillPointType.BACKWARDS_EXTREMITY)
- for event_id, depth in await self.store.get_backfill_points_in_room(room_id)
+ for event_id, depth in await self.store.get_backfill_points_in_room(
+ room_id=room_id,
+ current_depth=current_depth,
+ # We only need to end up with 5 extremities combined with the
+ # insertion event extremities to make the `/backfill` request
+ # but fetch an order of magnitude more to make sure there is
+ # enough even after we filter them by whether visible in the
+ # history. This isn't fool-proof as all backfill points within
+ # our limit could be filtered out but seems like a good amount
+ # to try with at least.
+ limit=50,
+ )
]
insertion_events_to_be_backfilled: List[_BackfillPoint] = []
@@ -240,7 +254,12 @@ class FederationHandler:
insertion_events_to_be_backfilled = [
_BackfillPoint(event_id, depth, _BackfillPointType.INSERTION_PONT)
for event_id, depth in await self.store.get_insertion_event_backward_extremities_in_room(
- room_id
+ room_id=room_id,
+ current_depth=current_depth,
+ # We only need to end up with 5 extremities combined with
+ # the backfill points to make the `/backfill` request ...
+ # (see the other comment above for more context).
+ limit=50,
)
]
logger.debug(
@@ -249,10 +268,6 @@ class FederationHandler:
insertion_events_to_be_backfilled,
)
- if not backwards_extremities and not insertion_events_to_be_backfilled:
- logger.debug("Not backfilling as no extremeties found.")
- return False
-
# we now have a list of potential places to backpaginate from. We prefer to
# start with the most recent (ie, max depth), so let's sort the list.
sorted_backfill_points: List[_BackfillPoint] = sorted(
@@ -273,6 +288,33 @@ class FederationHandler:
sorted_backfill_points,
)
+ # If we have no backfill points lower than the `current_depth` then
+ # either we can a) bail or b) still attempt to backfill. We opt to try
+ # backfilling anyway just in case we do get relevant events.
+ if not sorted_backfill_points and current_depth != MAX_DEPTH:
+ logger.debug(
+ "_maybe_backfill_inner: all backfill points are *after* current depth. Trying again with later backfill points."
+ )
+ return await self._maybe_backfill_inner(
+ room_id=room_id,
+ # We use `MAX_DEPTH` so that we find all backfill points next
+ # time (all events are below the `MAX_DEPTH`)
+ current_depth=MAX_DEPTH,
+ limit=limit,
+ # We don't want to start another timing observation from this
+ # nested recursive call. The top-most call can record the time
+ # overall otherwise the smaller one will throw off the results.
+ processing_start_time=None,
+ )
+
+ # Even after recursing with `MAX_DEPTH`, we didn't find any
+ # backward extremities to backfill from.
+ if not sorted_backfill_points:
+ logger.debug(
+ "_maybe_backfill_inner: Not backfilling as no backward extremeties found."
+ )
+ return False
+
# If we're approaching an extremity we trigger a backfill, otherwise we
# no-op.
#
@@ -282,47 +324,16 @@ class FederationHandler:
# chose more than one times the limit in case of failure, but choosing a
# much larger factor will result in triggering a backfill request much
# earlier than necessary.
- #
- # XXX: shouldn't we do this *after* the filter by depth below? Again, we don't
- # care about events that have happened after our current position.
- #
- max_depth = sorted_backfill_points[0].depth
- if current_depth - 2 * limit > max_depth:
+ max_depth_of_backfill_points = sorted_backfill_points[0].depth
+ if current_depth - 2 * limit > max_depth_of_backfill_points:
logger.debug(
"Not backfilling as we don't need to. %d < %d - 2 * %d",
- max_depth,
+ max_depth_of_backfill_points,
current_depth,
limit,
)
return False
- # We ignore extremities that have a greater depth than our current depth
- # as:
- # 1. we don't really care about getting events that have happened
- # after our current position; and
- # 2. we have likely previously tried and failed to backfill from that
- # extremity, so to avoid getting "stuck" requesting the same
- # backfill repeatedly we drop those extremities.
- #
- # However, we need to check that the filtered extremities are non-empty.
- # If they are empty then either we can a) bail or b) still attempt to
- # backfill. We opt to try backfilling anyway just in case we do get
- # relevant events.
- #
- filtered_sorted_backfill_points = [
- t for t in sorted_backfill_points if t.depth <= current_depth
- ]
- if filtered_sorted_backfill_points:
- logger.debug(
- "_maybe_backfill_inner: backfill points before current depth: %s",
- filtered_sorted_backfill_points,
- )
- sorted_backfill_points = filtered_sorted_backfill_points
- else:
- logger.debug(
- "_maybe_backfill_inner: all backfill points are *after* current depth. Backfilling anyway."
- )
-
# For performance's sake, we only want to paginate from a particular extremity
# if we can actually see the events we'll get. Otherwise, we'd just spend a lot
# of resources to get redacted events. We check each extremity in turn and
@@ -413,11 +424,22 @@ class FederationHandler:
# First we try hosts that are already in the room.
# TODO: HEURISTIC ALERT.
likely_domains = (
- await self._storage_controllers.state.get_current_hosts_in_room(room_id)
+ await self._storage_controllers.state.get_current_hosts_in_room_ordered(
+ room_id
+ )
)
async def try_backfill(domains: Collection[str]) -> bool:
# TODO: Should we try multiple of these at a time?
+
+ # Number of contacted remote homeservers that have denied our backfill
+ # request with a 4xx code.
+ denied_count = 0
+
+ # Maximum number of contacted remote homeservers that can deny our
+ # backfill request with 4xx codes before we give up.
+ max_denied_count = 5
+
for dom in domains:
# We don't want to ask our own server for information we don't have
if dom == self.server_name:
@@ -431,40 +453,68 @@ class FederationHandler:
# appropriate stuff.
# TODO: We can probably do something more intelligent here.
return True
+ except NotRetryingDestination as e:
+ logger.info("_maybe_backfill_inner: %s", e)
+ continue
+ except FederationDeniedError:
+ logger.info(
+ "_maybe_backfill_inner: Not attempting to backfill from %s because the homeserver is not on our federation whitelist",
+ dom,
+ )
+ continue
except (SynapseError, InvalidResponseError) as e:
logger.info("Failed to backfill from %s because %s", dom, e)
continue
except HttpResponseException as e:
if 400 <= e.code < 500:
- raise e.to_synapse_error()
+ logger.warning(
+ "Backfill denied from %s because %s [%d/%d]",
+ dom,
+ e,
+ denied_count,
+ max_denied_count,
+ )
+ denied_count += 1
+ if denied_count >= max_denied_count:
+ return False
+ continue
logger.info("Failed to backfill from %s because %s", dom, e)
continue
except CodeMessageException as e:
if 400 <= e.code < 500:
- raise
+ logger.warning(
+ "Backfill denied from %s because %s [%d/%d]",
+ dom,
+ e,
+ denied_count,
+ max_denied_count,
+ )
+ denied_count += 1
+ if denied_count >= max_denied_count:
+ return False
+ continue
logger.info("Failed to backfill from %s because %s", dom, e)
continue
- except NotRetryingDestination as e:
- logger.info(str(e))
- continue
except RequestSendFailed as e:
logger.info("Failed to get backfill from %s because %s", dom, e)
continue
- except FederationDeniedError as e:
- logger.info(e)
- continue
except Exception as e:
logger.exception("Failed to backfill from %s because %s", dom, e)
continue
return False
- processing_end_time = self.clock.time_msec()
- backfill_processing_before_timer.observe(
- (processing_end_time - processing_start_time) / 1000
- )
+ # If we have the `processing_start_time`, then we can make an
+ # observation. We wouldn't have the `processing_start_time` in the case
+ # where `_maybe_backfill_inner` is recursively called to find any
+ # backfill points regardless of `current_depth`.
+ if processing_start_time is not None:
+ processing_end_time = self.clock.time_msec()
+ backfill_processing_before_timer.observe(
+ (processing_end_time - processing_start_time) / 1000
+ )
success = await try_backfill(likely_domains)
if success:
@@ -592,7 +642,12 @@ class FederationHandler:
# Mark the room as having partial state.
# The background process is responsible for unmarking this flag,
# even if the join fails.
- await self.store.store_partial_state_room(room_id, ret.servers_in_room)
+ await self.store.store_partial_state_room(
+ room_id=room_id,
+ servers=ret.servers_in_room,
+ device_lists_stream_id=self.store.get_device_stream_token(),
+ joined_via=origin,
+ )
try:
max_stream_id = (
@@ -617,6 +672,14 @@ class FederationHandler:
room_id,
)
raise LimitExceededError(msg=e.msg, errcode=e.errcode, retry_after_ms=0)
+ else:
+ # Record the join event id for future use (when we finish the full
+ # join). We have to do this after persisting the event to keep foreign
+ # key constraints intact.
+ if ret.partial_state:
+ await self.store.write_partial_state_rooms_join_event_id(
+ room_id, event.event_id
+ )
finally:
# Always kick off the background process that asynchronously fetches
# state for the room.
@@ -734,15 +797,27 @@ class FederationHandler:
# Send the signed event back to the room, and potentially receive some
# further information about the room in the form of partial state events
- stripped_room_state = await self.federation_client.send_knock(
- target_hosts, event
- )
+ knock_response = await self.federation_client.send_knock(target_hosts, event)
# Store any stripped room state events in the "unsigned" key of the event.
# This is a bit of a hack and is cribbing off of invites. Basically we
# store the room state here and retrieve it again when this event appears
# in the invitee's sync stream. It is stripped out for all other local users.
- event.unsigned["knock_room_state"] = stripped_room_state["knock_state_events"]
+ stripped_room_state = (
+ knock_response.get("knock_room_state")
+ # Since v1.37, Synapse incorrectly used "knock_state_events" for this field.
+ # Thus, we also check for a 'knock_state_events' to support old instances.
+ # See https://github.com/matrix-org/synapse/issues/14088.
+ or knock_response.get("knock_state_events")
+ )
+
+ if stripped_room_state is None:
+ raise KeyError(
+ "Missing 'knock_room_state' (or legacy 'knock_state_events') field in "
+ "send_knock response"
+ )
+
+ event.unsigned["knock_room_state"] = stripped_room_state
context = EventContext.for_outlier(self._storage_controllers)
stream_id = await self._federation_event_handler.persist_events_and_notify(
@@ -881,7 +956,7 @@ class FederationHandler:
# The remote hasn't signed it yet, obviously. We'll do the full checks
# when we get the event back in `on_send_join_request`
- await self._event_auth_handler.check_auth_rules_from_context(event, context)
+ await self._event_auth_handler.check_auth_rules_from_context(event)
return event
async def on_invite_request(
@@ -955,9 +1030,17 @@ class FederationHandler:
)
context = EventContext.for_outlier(self._storage_controllers)
- await self._federation_event_handler.persist_events_and_notify(
- event.room_id, [(event, context)]
+
+ await self._bulk_push_rule_evaluator.action_for_events_by_user(
+ [(event, context)]
)
+ try:
+ await self._federation_event_handler.persist_events_and_notify(
+ event.room_id, [(event, context)]
+ )
+ except Exception:
+ await self.store.remove_push_actions_from_staging(event.event_id)
+ raise
return event
@@ -1056,7 +1139,7 @@ class FederationHandler:
try:
# The remote hasn't signed it yet, obviously. We'll do the full checks
# when we get the event back in `on_send_leave_request`
- await self._event_auth_handler.check_auth_rules_from_context(event, context)
+ await self._event_auth_handler.check_auth_rules_from_context(event)
except AuthError as e:
logger.warning("Failed to create new leave %r because %s", event, e)
raise e
@@ -1115,7 +1198,7 @@ class FederationHandler:
try:
# The remote hasn't signed it yet, obviously. We'll do the full checks
# when we get the event back in `on_send_knock_request`
- await self._event_auth_handler.check_auth_rules_from_context(event, context)
+ await self._event_auth_handler.check_auth_rules_from_context(event)
except AuthError as e:
logger.warning("Failed to create new knock %r because %s", event, e)
raise e
@@ -1281,9 +1364,7 @@ class FederationHandler:
try:
validate_event_for_room_version(event)
- await self._event_auth_handler.check_auth_rules_from_context(
- event, context
- )
+ await self._event_auth_handler.check_auth_rules_from_context(event)
except AuthError as e:
logger.warning("Denying new third party invite %r because %s", event, e)
raise e
@@ -1333,7 +1414,7 @@ class FederationHandler:
try:
validate_event_for_room_version(event)
- await self._event_auth_handler.check_auth_rules_from_context(event, context)
+ await self._event_auth_handler.check_auth_rules_from_context(event)
except AuthError as e:
logger.warning("Denying third party invite %r because %s", event, e)
raise e
@@ -1526,8 +1607,8 @@ class FederationHandler:
Fetch the complexity of a remote room over federation.
Args:
- remote_room_hosts (list[str]): The remote servers to ask.
- room_id (str): The room ID to ask about.
+ remote_room_hosts: The remote servers to ask.
+ room_id: The room ID to ask about.
Returns:
Dict contains the complexity
@@ -1549,13 +1630,13 @@ class FederationHandler:
"""Resumes resyncing of all partial-state rooms after a restart."""
assert not self.config.worker.worker_app
- partial_state_rooms = await self.store.get_partial_state_rooms_and_servers()
- for room_id, servers_in_room in partial_state_rooms.items():
+ partial_state_rooms = await self.store.get_partial_state_room_resync_info()
+ for room_id, resync_info in partial_state_rooms.items():
run_as_background_process(
desc="sync_partial_state_room",
func=self._sync_partial_state_room,
- initial_destination=None,
- other_destinations=servers_in_room,
+ initial_destination=resync_info.joined_via,
+ other_destinations=resync_info.servers_in_room,
room_id=room_id,
)
@@ -1584,28 +1665,12 @@ class FederationHandler:
# really leave, that might mean we have difficulty getting the room state over
# federation.
# https://github.com/matrix-org/synapse/issues/12802
- #
- # TODO(faster_joins): we need some way of prioritising which homeservers in
- # `other_destinations` to try first, otherwise we'll spend ages trying dead
- # homeservers for large rooms.
- # https://github.com/matrix-org/synapse/issues/12999
-
- if initial_destination is None and len(other_destinations) == 0:
- raise ValueError(
- f"Cannot resync state of {room_id}: no destinations provided"
- )
# Make an infinite iterator of destinations to try. Once we find a working
# destination, we'll stick with it until it flakes.
- destinations: Collection[str]
- if initial_destination is not None:
- # Move `initial_destination` to the front of the list.
- destinations = list(other_destinations)
- if initial_destination in destinations:
- destinations.remove(initial_destination)
- destinations = [initial_destination] + destinations
- else:
- destinations = other_destinations
+ destinations = _prioritise_destinations_for_partial_state_resync(
+ initial_destination, other_destinations, room_id
+ )
destination_iter = itertools.cycle(destinations)
# `destination` is the current remote homeserver we're pulling from.
@@ -1623,6 +1688,9 @@ class FederationHandler:
# https://github.com/matrix-org/synapse/issues/12994
await self.state_handler.update_current_state(room_id)
+ logger.info("Handling any pending device list updates")
+ await self._device_handler.handle_room_un_partial_stated(room_id)
+
logger.info("Clearing partial-state flag for %s", room_id)
success = await self.store.clear_partial_state_room(room_id)
if success:
@@ -1652,7 +1720,22 @@ class FederationHandler:
destination, event
)
break
+ except FederationPullAttemptBackoffError as exc:
+ # Log a warning about why we failed to process the event (the error message
+ # for `FederationPullAttemptBackoffError` is pretty good)
+ logger.warning("_sync_partial_state_room: %s", exc)
+ # We do not record a failed pull attempt when we backoff fetching a missing
+ # `prev_event` because not being able to fetch the `prev_events` just means
+ # we won't be able to de-outlier the pulled event. But we can still use an
+ # `outlier` in the state/auth chain for another event. So we shouldn't stop
+ # a downstream event from trying to pull it.
+ #
+ # This avoids a cascade of backoff for all events in the DAG downstream from
+ # one event backoff upstream.
except FederationError as e:
+ # TODO: We should `record_event_failed_pull_attempt` here,
+ # see https://github.com/matrix-org/synapse/issues/13700
+
if attempt == len(destinations) - 1:
# We have tried every remote server for this event. Give up.
# TODO(faster_joins) giving up isn't the right thing to do
@@ -1685,3 +1768,29 @@ class FederationHandler:
room_id,
destination,
)
+
+
+def _prioritise_destinations_for_partial_state_resync(
+ initial_destination: Optional[str],
+ other_destinations: Collection[str],
+ room_id: str,
+) -> Collection[str]:
+ """Work out the order in which we should ask servers to resync events.
+
+ If an `initial_destination` is given, it takes top priority. Otherwise
+ all servers are treated equally.
+
+ :raises ValueError: if no destination is provided at all.
+ """
+ if initial_destination is None and len(other_destinations) == 0:
+ raise ValueError(f"Cannot resync state of {room_id}: no destinations provided")
+
+ if initial_destination is None:
+ return other_destinations
+
+ # Move `initial_destination` to the front of the list.
+ destinations = list(other_destinations)
+ if initial_destination in destinations:
+ destinations.remove(initial_destination)
+ destinations = [initial_destination] + destinations
+ return destinations
diff --git a/synapse/handlers/federation_event.py b/synapse/handlers/federation_event.py
index 00a8860ff3..378b863c5f 100644
--- a/synapse/handlers/federation_event.py
+++ b/synapse/handlers/federation_event.py
@@ -44,6 +44,7 @@ from synapse.api.errors import (
AuthError,
Codes,
FederationError,
+ FederationPullAttemptBackoffError,
HttpResponseException,
RequestSendFailed,
SynapseError,
@@ -57,7 +58,7 @@ from synapse.event_auth import (
)
from synapse.events import EventBase
from synapse.events.snapshot import EventContext
-from synapse.federation.federation_client import InvalidResponseError
+from synapse.federation.federation_client import InvalidResponseError, PulledPduInfo
from synapse.logging.context import nested_logging_context
from synapse.logging.tracing import (
SynapseTags,
@@ -414,7 +415,9 @@ class FederationEventHandler:
# First, precalculate the joined hosts so that the federation sender doesn't
# need to.
- await self._event_creation_handler.cache_joined_hosts_for_event(event, context)
+ await self._event_creation_handler.cache_joined_hosts_for_events(
+ [(event, context)]
+ )
await self._check_for_soft_fail(event, context=context, origin=origin)
await self._run_push_actions_and_persist_event(event, context)
@@ -565,6 +568,9 @@ class FederationEventHandler:
event: partial-state event to be de-partial-stated
Raises:
+ FederationPullAttemptBackoffError if we are are deliberately not attempting
+ to pull the given event over federation because we've already done so
+ recently and are backing off.
FederationError if we fail to request state from the remote server.
"""
logger.info("Updating state for %s", event.event_id)
@@ -792,9 +798,42 @@ class FederationEventHandler:
],
)
+ # Check if we already any of these have these events.
+ # Note: we currently make a lookup in the database directly here rather than
+ # checking the event cache, due to:
+ # https://github.com/matrix-org/synapse/issues/13476
+ existing_events_map = await self._store._get_events_from_db(
+ [event.event_id for event in events]
+ )
+
+ new_events = []
+ for event in events:
+ event_id = event.event_id
+
+ # If we've already seen this event ID...
+ if event_id in existing_events_map:
+ existing_event = existing_events_map[event_id]
+
+ # ...and the event itself was not previously stored as an outlier...
+ if not existing_event.event.internal_metadata.is_outlier():
+ # ...then there's no need to persist it. We have it already.
+ logger.info(
+ "_process_pulled_event: Ignoring received event %s which we "
+ "have already seen",
+ event.event_id,
+ )
+ continue
+
+ # While we have seen this event before, it was stored as an outlier.
+ # We'll now persist it as a non-outlier.
+ logger.info("De-outliering event %s", event_id)
+
+ # Continue on with the events that are new to us.
+ new_events.append(event)
+
# We want to sort these by depth so we process them and
# tell clients about them in order.
- sorted_events = sorted(events, key=lambda x: x.depth)
+ sorted_events = sorted(new_events, key=lambda x: x.depth)
for ev in sorted_events:
with nested_logging_context(ev.event_id):
await self._process_pulled_event(origin, ev, backfilled=backfilled)
@@ -846,18 +885,6 @@ class FederationEventHandler:
event_id = event.event_id
- existing = await self._store.get_event(
- event_id, allow_none=True, allow_rejected=True
- )
- if existing:
- if not existing.internal_metadata.is_outlier():
- logger.info(
- "_process_pulled_event: Ignoring received event %s which we have already seen",
- event_id,
- )
- return
- logger.info("De-outliering event %s", event_id)
-
try:
self._sanity_check_event(event)
except SynapseError as err:
@@ -866,11 +893,6 @@ class FederationEventHandler:
event.room_id, event_id, str(err)
)
return
- except Exception as exc:
- await self._store.record_event_failed_pull_attempt(
- event.room_id, event_id, str(exc)
- )
- raise exc
try:
try:
@@ -904,6 +926,18 @@ class FederationEventHandler:
context,
backfilled=backfilled,
)
+ except FederationPullAttemptBackoffError as exc:
+ # Log a warning about why we failed to process the event (the error message
+ # for `FederationPullAttemptBackoffError` is pretty good)
+ logger.warning("_process_pulled_event: %s", exc)
+ # We do not record a failed pull attempt when we backoff fetching a missing
+ # `prev_event` because not being able to fetch the `prev_events` just means
+ # we won't be able to de-outlier the pulled event. But we can still use an
+ # `outlier` in the state/auth chain for another event. So we shouldn't stop
+ # a downstream event from trying to pull it.
+ #
+ # This avoids a cascade of backoff for all events in the DAG downstream from
+ # one event backoff upstream.
except FederationError as e:
await self._store.record_event_failed_pull_attempt(
event.room_id, event_id, str(e)
@@ -913,11 +947,6 @@ class FederationEventHandler:
logger.warning("Pulled event %s failed history check.", event_id)
else:
raise
- except Exception as exc:
- await self._store.record_event_failed_pull_attempt(
- event.room_id, event_id, str(exc)
- )
- raise exc
@trace
async def _compute_event_context_with_maybe_missing_prevs(
@@ -955,6 +984,9 @@ class FederationEventHandler:
The event context.
Raises:
+ FederationPullAttemptBackoffError if we are are deliberately not attempting
+ to pull the given event over federation because we've already done so
+ recently and are backing off.
FederationError if we fail to get the state from the remote server after any
missing `prev_event`s.
"""
@@ -965,6 +997,18 @@ class FederationEventHandler:
seen = await self._store.have_events_in_timeline(prevs)
missing_prevs = prevs - seen
+ # If we've already recently attempted to pull this missing event, don't
+ # try it again so soon. Since we have to fetch all of the prev_events, we can
+ # bail early here if we find any to ignore.
+ prevs_to_ignore = await self._store.get_event_ids_to_not_pull_from_backoff(
+ room_id, missing_prevs
+ )
+ if len(prevs_to_ignore) > 0:
+ raise FederationPullAttemptBackoffError(
+ event_ids=prevs_to_ignore,
+ message=f"While computing context for event={event_id}, not attempting to pull missing prev_event={prevs_to_ignore[0]} because we already tried to pull recently (backing off).",
+ )
+
if not missing_prevs:
return await self._state_handler.compute_event_context(event)
@@ -1021,10 +1065,9 @@ class FederationEventHandler:
state_res_store=StateResolutionStore(self._store),
)
- except Exception:
+ except Exception as e:
logger.warning(
- "Error attempting to resolve state at missing prev_events",
- exc_info=True,
+ "Error attempting to resolve state at missing prev_events: %s", e
)
raise FederationError(
"ERROR",
@@ -1478,8 +1521,8 @@ class FederationEventHandler:
)
async def backfill_event_id(
- self, destination: str, room_id: str, event_id: str
- ) -> EventBase:
+ self, destinations: List[str], room_id: str, event_id: str
+ ) -> PulledPduInfo:
"""Backfill a single event and persist it as a non-outlier which means
we also pull in all of the state and auth events necessary for it.
@@ -1491,24 +1534,21 @@ class FederationEventHandler:
Raises:
FederationError if we are unable to find the event from the destination
"""
- logger.info(
- "backfill_event_id: event_id=%s from destination=%s", event_id, destination
- )
+ logger.info("backfill_event_id: event_id=%s", event_id)
room_version = await self._store.get_room_version(room_id)
- event_from_response = await self._federation_client.get_pdu(
- [destination],
+ pulled_pdu_info = await self._federation_client.get_pdu(
+ destinations,
event_id,
room_version,
)
- if not event_from_response:
+ if not pulled_pdu_info:
raise FederationError(
"ERROR",
404,
- "Unable to find event_id=%s from destination=%s to backfill."
- % (event_id, destination),
+ f"Unable to find event_id={event_id} from remote servers to backfill.",
affected=event_id,
)
@@ -1516,13 +1556,13 @@ class FederationEventHandler:
# and auth events to de-outlier it. This also sets up the necessary
# `state_groups` for the event.
await self._process_pulled_events(
- destination,
- [event_from_response],
+ pulled_pdu_info.pull_origin,
+ [pulled_pdu_info.pdu],
# Prevent notifications going to clients
backfilled=True,
)
- return event_from_response
+ return pulled_pdu_info
@trace
@tag_args
@@ -1545,19 +1585,19 @@ class FederationEventHandler:
async def get_event(event_id: str) -> None:
with nested_logging_context(event_id):
try:
- event = await self._federation_client.get_pdu(
+ pulled_pdu_info = await self._federation_client.get_pdu(
[destination],
event_id,
room_version,
)
- if event is None:
+ if pulled_pdu_info is None:
logger.warning(
"Server %s didn't return event %s",
destination,
event_id,
)
return
- events.append(event)
+ events.append(pulled_pdu_info.pdu)
except Exception as e:
logger.warning(
@@ -2132,8 +2172,8 @@ class FederationEventHandler:
min_depth,
)
else:
- await self._bulk_push_rule_evaluator.action_for_event_by_user(
- event, context
+ await self._bulk_push_rule_evaluator.action_for_events_by_user(
+ [(event, context)]
)
try:
@@ -2175,6 +2215,7 @@ class FederationEventHandler:
if instance != self._instance_name:
# Limit the number of events sent over replication. We choose 200
# here as that is what we default to in `max_request_body_size(..)`
+ result = {}
try:
for batch in batch_iter(event_and_contexts, 200):
result = await self._send_events(
@@ -2254,8 +2295,8 @@ class FederationEventHandler:
event_pos = PersistedEventPosition(
self._instance_name, event.internal_metadata.stream_ordering
)
- await self._notifier.on_new_room_event(
- event, event_pos, max_stream_token, extra_users=extra_users
+ await self._notifier.on_new_room_events(
+ [(event, event_pos)], max_stream_token, extra_users=extra_users
)
if event.type == EventTypes.Member and event.membership == Membership.JOIN:
diff --git a/synapse/handlers/identity.py b/synapse/handlers/identity.py
index 93d09e9939..848e46eb9b 100644
--- a/synapse/handlers/identity.py
+++ b/synapse/handlers/identity.py
@@ -711,7 +711,7 @@ class IdentityHandler:
inviter_display_name: The current display name of the
inviter.
inviter_avatar_url: The URL of the inviter's avatar.
- id_access_token (str): The access token to authenticate to the identity
+ id_access_token: The access token to authenticate to the identity
server with
Returns:
diff --git a/synapse/handlers/initial_sync.py b/synapse/handlers/initial_sync.py
index 860c82c110..9c335e6863 100644
--- a/synapse/handlers/initial_sync.py
+++ b/synapse/handlers/initial_sync.py
@@ -57,13 +57,7 @@ class InitialSyncHandler:
self.validator = EventValidator()
self.snapshot_cache: ResponseCache[
Tuple[
- str,
- Optional[StreamToken],
- Optional[StreamToken],
- str,
- Optional[int],
- bool,
- bool,
+ str, Optional[StreamToken], Optional[StreamToken], str, int, bool, bool
]
] = ResponseCache(hs.get_clock(), "initial_sync_cache")
self._event_serializer = hs.get_event_client_serializer()
@@ -154,11 +148,6 @@ class InitialSyncHandler:
public_room_ids = await self.store.get_public_room_ids()
- if pagin_config.limit is not None:
- limit = pagin_config.limit
- else:
- limit = 10
-
serializer_options = SerializeEventConfig(as_client_event=as_client_event)
async def handle_room(event: RoomsForUser) -> None:
@@ -210,7 +199,7 @@ class InitialSyncHandler:
run_in_background(
self.store.get_recent_events_for_room,
event.room_id,
- limit=limit,
+ limit=pagin_config.limit,
end_token=room_end_token,
),
deferred_room_state,
@@ -360,15 +349,11 @@ class InitialSyncHandler:
member_event_id
)
- limit = pagin_config.limit if pagin_config else None
- if limit is None:
- limit = 10
-
leave_position = await self.store.get_position_for_event(member_event_id)
stream_token = leave_position.to_room_stream_token()
messages, token = await self.store.get_recent_events_for_room(
- room_id, limit=limit, end_token=stream_token
+ room_id, limit=pagin_config.limit, end_token=stream_token
)
messages = await filter_events_for_client(
@@ -420,10 +405,6 @@ class InitialSyncHandler:
now_token = self.hs.get_event_sources().get_current_token()
- limit = pagin_config.limit if pagin_config else None
- if limit is None:
- limit = 10
-
room_members = [
m
for m in current_state.values()
@@ -467,7 +448,7 @@ class InitialSyncHandler:
run_in_background(
self.store.get_recent_events_for_room,
room_id,
- limit=limit,
+ limit=pagin_config.limit,
end_token=now_token.room_key,
),
),
diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py
index 10b5dad030..f2a0101733 100644
--- a/synapse/handlers/message.py
+++ b/synapse/handlers/message.py
@@ -56,13 +56,16 @@ from synapse.logging import tracing
from synapse.logging.context import make_deferred_yieldable, run_in_background
from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.replication.http.send_event import ReplicationSendEventRestServlet
+from synapse.replication.http.send_events import ReplicationSendEventsRestServlet
from synapse.storage.databases.main.events import PartialStateConflictError
from synapse.storage.databases.main.events_worker import EventRedactBehaviour
from synapse.storage.state import StateFilter
from synapse.types import (
MutableStateMap,
+ PersistedEventPosition,
Requester,
RoomAlias,
+ StateMap,
StreamToken,
UserID,
create_requester,
@@ -492,6 +495,7 @@ class EventCreationHandler:
self.membership_types_to_include_profile_data_in.add(Membership.INVITE)
self.send_event = ReplicationSendEventRestServlet.make_client(hs)
+ self.send_events = ReplicationSendEventsRestServlet.make_client(hs)
self.request_ratelimiter = hs.get_request_ratelimiter()
@@ -567,9 +571,17 @@ class EventCreationHandler:
outlier: bool = False,
historical: bool = False,
depth: Optional[int] = None,
+ state_map: Optional[StateMap[str]] = None,
+ for_batch: bool = False,
+ current_state_group: Optional[int] = None,
) -> Tuple[EventBase, EventContext]:
"""
- Given a dict from a client, create a new event.
+ Given a dict from a client, create a new event. If bool for_batch is true, will
+ create an event using the prev_event_ids, and will create an event context for
+ the event using the parameters state_map and current_state_group, thus these parameters
+ must be provided in this case if for_batch is True. The subsequently created event
+ and context are suitable for being batched up and bulk persisted to the database
+ with other similarly created events.
Creates an FrozenEvent object, filling out auth_events, prev_events,
etc.
@@ -612,16 +624,27 @@ class EventCreationHandler:
outlier: Indicates whether the event is an `outlier`, i.e. if
it's from an arbitrary point and floating in the DAG as
opposed to being inline with the current DAG.
+
historical: Indicates whether the message is being inserted
back in time around some existing events. This is used to skip
a few checks and mark the event as backfilled.
+
depth: Override the depth used to order the event in the DAG.
Should normally be set to None, which will cause the depth to be calculated
based on the prev_events.
+ state_map: A state map of previously created events, used only when creating events
+ for batch persisting
+
+ for_batch: whether the event is being created for batch persisting to the db
+
+ current_state_group: the current state group, used only for creating events for
+ batch persisting
+
Raises:
ResourceLimitError if server is blocked to some resource being
exceeded
+
Returns:
Tuple of created event, Context
"""
@@ -693,6 +716,9 @@ class EventCreationHandler:
auth_event_ids=auth_event_ids,
state_event_ids=state_event_ids,
depth=depth,
+ state_map=state_map,
+ for_batch=for_batch,
+ current_state_group=current_state_group,
)
# In an ideal world we wouldn't need the second part of this condition. However,
@@ -707,10 +733,14 @@ class EventCreationHandler:
# federation as well as those created locally. As of room v3, aliases events
# can be created by users that are not in the room, therefore we have to
# tolerate them in event_auth.check().
- prev_state_ids = await context.get_prev_state_ids(
- StateFilter.from_types([(EventTypes.Member, None)])
- )
- prev_event_id = prev_state_ids.get((EventTypes.Member, event.sender))
+ if for_batch:
+ assert state_map is not None
+ prev_event_id = state_map.get((EventTypes.Member, event.sender))
+ else:
+ prev_state_ids = await context.get_prev_state_ids(
+ StateFilter.from_types([(EventTypes.Member, None)])
+ )
+ prev_event_id = prev_state_ids.get((EventTypes.Member, event.sender))
prev_event = (
await self.store.get_event(prev_event_id, allow_none=True)
if prev_event_id
@@ -847,6 +877,36 @@ class EventCreationHandler:
return prev_event
return None
+ async def get_event_from_transaction(
+ self,
+ requester: Requester,
+ txn_id: str,
+ room_id: str,
+ ) -> Optional[EventBase]:
+ """For the given transaction ID and room ID, check if there is a matching event.
+ If so, fetch it and return it.
+
+ Args:
+ requester: The requester making the request in the context of which we want
+ to fetch the event.
+ txn_id: The transaction ID.
+ room_id: The room ID.
+
+ Returns:
+ An event if one could be found, None otherwise.
+ """
+ if requester.access_token_id:
+ existing_event_id = await self.store.get_event_id_from_transaction_id(
+ room_id,
+ requester.user.to_string(),
+ requester.access_token_id,
+ txn_id,
+ )
+ if existing_event_id:
+ return await self.store.get_event(existing_event_id)
+
+ return None
+
async def create_and_send_nonmember_event(
self,
requester: Requester,
@@ -926,18 +986,17 @@ class EventCreationHandler:
# extremities to pile up, which in turn leads to state resolution
# taking longer.
async with self.limiter.queue(event_dict["room_id"]):
- if txn_id and requester.access_token_id:
- existing_event_id = await self.store.get_event_id_from_transaction_id(
- event_dict["room_id"],
- requester.user.to_string(),
- requester.access_token_id,
- txn_id,
+ if txn_id:
+ event = await self.get_event_from_transaction(
+ requester, txn_id, event_dict["room_id"]
)
- if existing_event_id:
- event = await self.store.get_event(existing_event_id)
+ if event:
# we know it was persisted, so must have a stream ordering
assert event.internal_metadata.stream_ordering
- return event, event.internal_metadata.stream_ordering
+ return (
+ event,
+ event.internal_metadata.stream_ordering,
+ )
event, context = await self.create_event(
requester,
@@ -989,8 +1048,7 @@ class EventCreationHandler:
ev = await self.handle_new_client_event(
requester=requester,
- event=event,
- context=context,
+ events_and_context=[(event, context)],
ratelimit=ratelimit,
ignore_shadow_ban=ignore_shadow_ban,
)
@@ -1009,8 +1067,16 @@ class EventCreationHandler:
auth_event_ids: Optional[List[str]] = None,
state_event_ids: Optional[List[str]] = None,
depth: Optional[int] = None,
+ state_map: Optional[StateMap[str]] = None,
+ for_batch: bool = False,
+ current_state_group: Optional[int] = None,
) -> Tuple[EventBase, EventContext]:
- """Create a new event for a local client
+ """Create a new event for a local client. If bool for_batch is true, will
+ create an event using the prev_event_ids, and will create an event context for
+ the event using the parameters state_map and current_state_group, thus these parameters
+ must be provided in this case if for_batch is True. The subsequently created event
+ and context are suitable for being batched up and bulk persisted to the database
+ with other similarly created events.
Args:
builder:
@@ -1043,6 +1109,14 @@ class EventCreationHandler:
Should normally be set to None, which will cause the depth to be calculated
based on the prev_events.
+ state_map: A state map of previously created events, used only when creating events
+ for batch persisting
+
+ for_batch: whether the event is being created for batch persisting to the db
+
+ current_state_group: the current state group, used only for creating events for
+ batch persisting
+
Returns:
Tuple of created event, context
"""
@@ -1095,64 +1169,76 @@ class EventCreationHandler:
builder.type == EventTypes.Create or prev_event_ids
), "Attempting to create a non-m.room.create event with no prev_events"
- event = await builder.build(
- prev_event_ids=prev_event_ids,
- auth_event_ids=auth_event_ids,
- depth=depth,
- )
-
- # Pass on the outlier property from the builder to the event
- # after it is created
- if builder.internal_metadata.outlier:
- event.internal_metadata.outlier = True
- context = EventContext.for_outlier(self._storage_controllers)
- elif (
- event.type == EventTypes.MSC2716_INSERTION
- and state_event_ids
- and builder.internal_metadata.is_historical()
- ):
- # Add explicit state to the insertion event so it has state to derive
- # from even though it's floating with no `prev_events`. The rest of
- # the batch can derive from this state and state_group.
- #
- # TODO(faster_joins): figure out how this works, and make sure that the
- # old state is complete.
- # https://github.com/matrix-org/synapse/issues/13003
- metadata = await self.store.get_metadata_for_events(state_event_ids)
-
- state_map_for_event: MutableStateMap[str] = {}
- for state_id in state_event_ids:
- data = metadata.get(state_id)
- if data is None:
- # We're trying to persist a new historical batch of events
- # with the given state, e.g. via
- # `RoomBatchSendEventRestServlet`. The state can be inferred
- # by Synapse or set directly by the client.
- #
- # Either way, we should have persisted all the state before
- # getting here.
- raise Exception(
- f"State event {state_id} not found in DB,"
- " Synapse should have persisted it before using it."
- )
-
- if data.state_key is None:
- raise Exception(
- f"Trying to set non-state event {state_id} as state"
- )
-
- state_map_for_event[(data.event_type, data.state_key)] = state_id
-
- context = await self.state.compute_event_context(
- event,
- state_ids_before_event=state_map_for_event,
- # TODO(faster_joins): check how MSC2716 works and whether we can have
- # partial state here
- # https://github.com/matrix-org/synapse/issues/13003
- partial_state=False,
+ if for_batch:
+ assert prev_event_ids is not None
+ assert state_map is not None
+ assert current_state_group is not None
+ auth_ids = self._event_auth_handler.compute_auth_events(builder, state_map)
+ event = await builder.build(
+ prev_event_ids=prev_event_ids, auth_event_ids=auth_ids, depth=depth
+ )
+ context = await self.state.compute_event_context_for_batched(
+ event, state_map, current_state_group
)
else:
- context = await self.state.compute_event_context(event)
+ event = await builder.build(
+ prev_event_ids=prev_event_ids,
+ auth_event_ids=auth_event_ids,
+ depth=depth,
+ )
+
+ # Pass on the outlier property from the builder to the event
+ # after it is created
+ if builder.internal_metadata.outlier:
+ event.internal_metadata.outlier = True
+ context = EventContext.for_outlier(self._storage_controllers)
+ elif (
+ event.type == EventTypes.MSC2716_INSERTION
+ and state_event_ids
+ and builder.internal_metadata.is_historical()
+ ):
+ # Add explicit state to the insertion event so it has state to derive
+ # from even though it's floating with no `prev_events`. The rest of
+ # the batch can derive from this state and state_group.
+ #
+ # TODO(faster_joins): figure out how this works, and make sure that the
+ # old state is complete.
+ # https://github.com/matrix-org/synapse/issues/13003
+ metadata = await self.store.get_metadata_for_events(state_event_ids)
+
+ state_map_for_event: MutableStateMap[str] = {}
+ for state_id in state_event_ids:
+ data = metadata.get(state_id)
+ if data is None:
+ # We're trying to persist a new historical batch of events
+ # with the given state, e.g. via
+ # `RoomBatchSendEventRestServlet`. The state can be inferred
+ # by Synapse or set directly by the client.
+ #
+ # Either way, we should have persisted all the state before
+ # getting here.
+ raise Exception(
+ f"State event {state_id} not found in DB,"
+ " Synapse should have persisted it before using it."
+ )
+
+ if data.state_key is None:
+ raise Exception(
+ f"Trying to set non-state event {state_id} as state"
+ )
+
+ state_map_for_event[(data.event_type, data.state_key)] = state_id
+
+ context = await self.state.compute_event_context(
+ event,
+ state_ids_before_event=state_map_for_event,
+ # TODO(faster_joins): check how MSC2716 works and whether we can have
+ # partial state here
+ # https://github.com/matrix-org/synapse/issues/13003
+ partial_state=False,
+ )
+ else:
+ context = await self.state.compute_event_context(event)
if requester:
context.app_service = requester.app_service
@@ -1238,13 +1324,13 @@ class EventCreationHandler:
async def handle_new_client_event(
self,
requester: Requester,
- event: EventBase,
- context: EventContext,
+ events_and_context: List[Tuple[EventBase, EventContext]],
ratelimit: bool = True,
extra_users: Optional[List[UserID]] = None,
ignore_shadow_ban: bool = False,
) -> EventBase:
- """Processes a new event.
+ """Processes new events. Please note that if batch persisting events, an error in
+ handling any one of these events will result in all of the events being dropped.
This includes deduplicating, checking auth, persisting,
notifying users, sending to remote servers, etc.
@@ -1254,8 +1340,7 @@ class EventCreationHandler:
Args:
requester
- event
- context
+ events_and_context: A list of one or more tuples of event, context to be persisted
ratelimit
extra_users: Any extra users to notify about event
@@ -1273,67 +1358,76 @@ class EventCreationHandler:
"""
extra_users = extra_users or []
- # we don't apply shadow-banning to membership events here. Invites are blocked
- # higher up the stack, and we allow shadow-banned users to send join and leave
- # events as normal.
- if (
- event.type != EventTypes.Member
- and not ignore_shadow_ban
- and requester.shadow_banned
- ):
- # We randomly sleep a bit just to annoy the requester.
- await self.clock.sleep(random.randint(1, 10))
- raise ShadowBanError()
+ for event, context in events_and_context:
+ # we don't apply shadow-banning to membership events here. Invites are blocked
+ # higher up the stack, and we allow shadow-banned users to send join and leave
+ # events as normal.
+ if (
+ event.type != EventTypes.Member
+ and not ignore_shadow_ban
+ and requester.shadow_banned
+ ):
+ # We randomly sleep a bit just to annoy the requester.
+ await self.clock.sleep(random.randint(1, 10))
+ raise ShadowBanError()
- if event.is_state():
- prev_event = await self.deduplicate_state_event(event, context)
- if prev_event is not None:
- logger.info(
- "Not bothering to persist state event %s duplicated by %s",
- event.event_id,
- prev_event.event_id,
- )
- return prev_event
+ if event.is_state():
+ prev_event = await self.deduplicate_state_event(event, context)
+ if prev_event is not None:
+ logger.info(
+ "Not bothering to persist state event %s duplicated by %s",
+ event.event_id,
+ prev_event.event_id,
+ )
+ return prev_event
- if event.internal_metadata.is_out_of_band_membership():
- # the only sort of out-of-band-membership events we expect to see here are
- # invite rejections and rescinded knocks that we have generated ourselves.
- assert event.type == EventTypes.Member
- assert event.content["membership"] == Membership.LEAVE
- else:
+ if event.internal_metadata.is_out_of_band_membership():
+ # the only sort of out-of-band-membership events we expect to see here are
+ # invite rejections and rescinded knocks that we have generated ourselves.
+ assert event.type == EventTypes.Member
+ assert event.content["membership"] == Membership.LEAVE
+ else:
+ try:
+ validate_event_for_room_version(event)
+ # If we are persisting a batch of events the event(s) needed to auth the
+ # current event may be part of the batch and will not be in the DB yet
+ event_id_to_event = {e.event_id: e for e, _ in events_and_context}
+ batched_auth_events = {}
+ for event_id in event.auth_event_ids():
+ auth_event = event_id_to_event.get(event_id)
+ if auth_event:
+ batched_auth_events[event_id] = auth_event
+ await self._event_auth_handler.check_auth_rules_from_context(
+ event, batched_auth_events
+ )
+ except AuthError as err:
+ logger.warning("Denying new event %r because %s", event, err)
+ raise err
+
+ # Ensure that we can round trip before trying to persist in db
try:
- validate_event_for_room_version(event)
- await self._event_auth_handler.check_auth_rules_from_context(
- event, context
- )
- except AuthError as err:
- logger.warning("Denying new event %r because %s", event, err)
- raise err
-
- # Ensure that we can round trip before trying to persist in db
- try:
- dump = json_encoder.encode(event.content)
- json_decoder.decode(dump)
- except Exception:
- logger.exception("Failed to encode content: %r", event.content)
- raise
+ dump = json_encoder.encode(event.content)
+ json_decoder.decode(dump)
+ except Exception:
+ logger.exception("Failed to encode content: %r", event.content)
+ raise
# We now persist the event (and update the cache in parallel, since we
# don't want to block on it).
+ event, context = events_and_context[0]
try:
result, _ = await make_deferred_yieldable(
gather_results(
(
run_in_background(
- self._persist_event,
+ self._persist_events,
requester=requester,
- event=event,
- context=context,
+ events_and_context=events_and_context,
ratelimit=ratelimit,
extra_users=extra_users,
),
run_in_background(
- self.cache_joined_hosts_for_event, event, context
+ self.cache_joined_hosts_for_events, events_and_context
).addErrback(
log_failure, "cache_joined_hosts_for_event failed"
),
@@ -1352,45 +1446,40 @@ class EventCreationHandler:
return result
- async def _persist_event(
+ async def _persist_events(
self,
requester: Requester,
- event: EventBase,
- context: EventContext,
+ events_and_context: List[Tuple[EventBase, EventContext]],
ratelimit: bool = True,
extra_users: Optional[List[UserID]] = None,
) -> EventBase:
- """Actually persists the event. Should only be called by
+ """Actually persists new events. Should only be called by
`handle_new_client_event`, and see its docstring for documentation of
- the arguments.
+ the arguments. Please note that if batch persisting events, an error in
+ handling any one of these events will result in all of the events being dropped.
PartialStateConflictError: if attempting to persist a partial state event in
a room that has been un-partial stated.
"""
- # Skip push notification actions for historical messages
- # because we don't want to notify people about old history back in time.
- # The historical messages also do not have the proper `context.current_state_ids`
- # and `state_groups` because they have `prev_events` that aren't persisted yet
- # (historical messages persisted in reverse-chronological order).
- if not event.internal_metadata.is_historical():
- with tracing.start_active_span("calculate_push_actions"):
- await self._bulk_push_rule_evaluator.action_for_event_by_user(
- event, context
- )
+ with tracing.start_active_span("calculate_push_actions"):
+ await self._bulk_push_rule_evaluator.action_for_events_by_user(
+ events_and_context
+ )
try:
# If we're a worker we need to hit out to the master.
- writer_instance = self._events_shard_config.get_instance(event.room_id)
+ first_event, _ = events_and_context[0]
+ writer_instance = self._events_shard_config.get_instance(
+ first_event.room_id
+ )
if writer_instance != self._instance_name:
try:
- result = await self.send_event(
+ result = await self.send_events(
instance_name=writer_instance,
- event_id=event.event_id,
+ events_and_context=events_and_context,
store=self.store,
requester=requester,
- event=event,
- context=context,
ratelimit=ratelimit,
extra_users=extra_users,
)
@@ -1400,6 +1489,11 @@ class EventCreationHandler:
raise
stream_id = result["stream_id"]
event_id = result["event_id"]
+
+ # If we batch persisted events we return the last persisted event, otherwise
+ # we return the one event that was persisted
+ event, _ = events_and_context[-1]
+
if event_id != event.event_id:
# If we get a different event back then it means that its
# been de-duplicated, so we replace the given event with the
@@ -1412,73 +1506,80 @@ class EventCreationHandler:
event.internal_metadata.stream_ordering = stream_id
return event
- event = await self.persist_and_notify_client_event(
- requester, event, context, ratelimit=ratelimit, extra_users=extra_users
+ event = await self.persist_and_notify_client_events(
+ requester,
+ events_and_context,
+ ratelimit=ratelimit,
+ extra_users=extra_users,
)
return event
except Exception:
- # Ensure that we actually remove the entries in the push actions
- # staging area, if we calculated them.
- await self.store.remove_push_actions_from_staging(event.event_id)
+ for event, _ in events_and_context:
+ # Ensure that we actually remove the entries in the push actions
+ # staging area, if we calculated them.
+ await self.store.remove_push_actions_from_staging(event.event_id)
raise
- async def cache_joined_hosts_for_event(
- self, event: EventBase, context: EventContext
+ async def cache_joined_hosts_for_events(
+ self, events_and_context: List[Tuple[EventBase, EventContext]]
) -> None:
- """Precalculate the joined hosts at the event, when using Redis, so that
+ """Precalculate the joined hosts at each of the given events, when using Redis, so that
external federation senders don't have to recalculate it themselves.
"""
- if not self._external_cache.is_enabled():
- return
-
- # If external cache is enabled we should always have this.
- assert self._external_cache_joined_hosts_updates is not None
-
- # We actually store two mappings, event ID -> prev state group,
- # state group -> joined hosts, which is much more space efficient
- # than event ID -> joined hosts.
- #
- # Note: We have to cache event ID -> prev state group, as we don't
- # store that in the DB.
- #
- # Note: We set the state group -> joined hosts cache if it hasn't been
- # set for a while, so that the expiry time is reset.
-
- state_entry = await self.state.resolve_state_groups_for_events(
- event.room_id, event_ids=event.prev_event_ids()
- )
-
- if state_entry.state_group:
- await self._external_cache.set(
- "event_to_prev_state_group",
- event.event_id,
- state_entry.state_group,
- expiry_ms=60 * 60 * 1000,
- )
-
- if state_entry.state_group in self._external_cache_joined_hosts_updates:
+ for event, _ in events_and_context:
+ if not self._external_cache.is_enabled():
return
- state = await state_entry.get_state(
- self._storage_controllers.state, StateFilter.all()
+ # If external cache is enabled we should always have this.
+ assert self._external_cache_joined_hosts_updates is not None
+
+ # We actually store two mappings, event ID -> prev state group,
+ # state group -> joined hosts, which is much more space efficient
+ # than event ID -> joined hosts.
+ #
+ # Note: We have to cache event ID -> prev state group, as we don't
+ # store that in the DB.
+ #
+ # Note: We set the state group -> joined hosts cache if it hasn't been
+ # set for a while, so that the expiry time is reset.
+
+ state_entry = await self.state.resolve_state_groups_for_events(
+ event.room_id, event_ids=event.prev_event_ids()
)
- with tracing.start_active_span("get_joined_hosts"):
- joined_hosts = await self.store.get_joined_hosts(
- event.room_id, state, state_entry
+
+ if state_entry.state_group:
+ await self._external_cache.set(
+ "event_to_prev_state_group",
+ event.event_id,
+ state_entry.state_group,
+ expiry_ms=60 * 60 * 1000,
)
- # Note that the expiry times must be larger than the expiry time in
- # _external_cache_joined_hosts_updates.
- await self._external_cache.set(
- "get_joined_hosts",
- str(state_entry.state_group),
- list(joined_hosts),
- expiry_ms=60 * 60 * 1000,
- )
+ if state_entry.state_group in self._external_cache_joined_hosts_updates:
+ return
- self._external_cache_joined_hosts_updates[state_entry.state_group] = None
+ state = await state_entry.get_state(
+ self._storage_controllers.state, StateFilter.all()
+ )
+ with tracing.start_active_span("get_joined_hosts"):
+ joined_hosts = await self.store.get_joined_hosts(
+ event.room_id, state, state_entry
+ )
+
+ # Note that the expiry times must be larger than the expiry time in
+ # _external_cache_joined_hosts_updates.
+ await self._external_cache.set(
+ "get_joined_hosts",
+ str(state_entry.state_group),
+ list(joined_hosts),
+ expiry_ms=60 * 60 * 1000,
+ )
+
+ self._external_cache_joined_hosts_updates[
+ state_entry.state_group
+ ] = None
async def _validate_canonical_alias(
self,
@@ -1514,23 +1615,26 @@ class EventCreationHandler:
Codes.BAD_ALIAS,
)
- async def persist_and_notify_client_event(
+ async def persist_and_notify_client_events(
self,
requester: Requester,
- event: EventBase,
- context: EventContext,
+ events_and_context: List[Tuple[EventBase, EventContext]],
ratelimit: bool = True,
extra_users: Optional[List[UserID]] = None,
) -> EventBase:
- """Called when we have fully built the event, have already
- calculated the push actions for the event, and checked auth.
+ """Called when we have fully built the events, have already
+ calculated the push actions for the events, and checked auth.
This should only be run on the instance in charge of persisting events.
+ Please note that if batch persisting events, an error in
+ handling any one of these events will result in all of the events being dropped.
+
Returns:
- The persisted event. This may be different than the given event if
- it was de-duplicated (e.g. because we had already persisted an
- event with the same transaction ID.)
+ The persisted event, if one event is passed in, or the last event in the
+ list in the case of batch persisting. If only one event was persisted, the
+ returned event may be different than the given event if it was de-duplicated
+ (e.g. because we had already persisted an event with the same transaction ID.)
Raises:
PartialStateConflictError: if attempting to persist a partial state event in
@@ -1538,17 +1642,132 @@ class EventCreationHandler:
"""
extra_users = extra_users or []
- assert self._storage_controllers.persistence is not None
- assert self._events_shard_config.should_handle(
- self._instance_name, event.room_id
- )
+ for event, context in events_and_context:
+ assert self._events_shard_config.should_handle(
+ self._instance_name, event.room_id
+ )
+
+ if ratelimit:
+ # We check if this is a room admin redacting an event so that we
+ # can apply different ratelimiting. We do this by simply checking
+ # it's not a self-redaction (to avoid having to look up whether the
+ # user is actually admin or not).
+ is_admin_redaction = False
+ if event.type == EventTypes.Redaction:
+ assert event.redacts is not None
+
+ original_event = await self.store.get_event(
+ event.redacts,
+ redact_behaviour=EventRedactBehaviour.as_is,
+ get_prev_content=False,
+ allow_rejected=False,
+ allow_none=True,
+ )
+
+ is_admin_redaction = bool(
+ original_event and event.sender != original_event.sender
+ )
+
+ await self.request_ratelimiter.ratelimit(
+ requester, is_admin_redaction=is_admin_redaction
+ )
+
+ # run checks/actions on event based on type
+ if event.type == EventTypes.Member and event.membership == Membership.JOIN:
+ (
+ current_membership,
+ _,
+ ) = await self.store.get_local_current_membership_for_user_in_room(
+ event.state_key, event.room_id
+ )
+ if current_membership != Membership.JOIN:
+ self._notifier.notify_user_joined_room(
+ event.event_id, event.room_id
+ )
+
+ await self._maybe_kick_guest_users(event, context)
+
+ if event.type == EventTypes.CanonicalAlias:
+ # Validate a newly added alias or newly added alt_aliases.
+
+ original_alias = None
+ original_alt_aliases: object = []
+
+ original_event_id = event.unsigned.get("replaces_state")
+ if original_event_id:
+ original_alias_event = await self.store.get_event(original_event_id)
+
+ if original_alias_event:
+ original_alias = original_alias_event.content.get("alias", None)
+ original_alt_aliases = original_alias_event.content.get(
+ "alt_aliases", []
+ )
+
+ # Check the alias is currently valid (if it has changed).
+ room_alias_str = event.content.get("alias", None)
+ directory_handler = self.hs.get_directory_handler()
+ if room_alias_str and room_alias_str != original_alias:
+ await self._validate_canonical_alias(
+ directory_handler, room_alias_str, event.room_id
+ )
+
+ # Check that alt_aliases is the proper form.
+ alt_aliases = event.content.get("alt_aliases", [])
+ if not isinstance(alt_aliases, (list, tuple)):
+ raise SynapseError(
+ 400,
+ "The alt_aliases property must be a list.",
+ Codes.INVALID_PARAM,
+ )
+
+ # If the old version of alt_aliases is of an unknown form,
+ # completely replace it.
+ if not isinstance(original_alt_aliases, (list, tuple)):
+ # TODO: check that the original_alt_aliases' entries are all strings
+ original_alt_aliases = []
+
+ # Check that each alias is currently valid.
+ new_alt_aliases = set(alt_aliases) - set(original_alt_aliases)
+ if new_alt_aliases:
+ for alias_str in new_alt_aliases:
+ await self._validate_canonical_alias(
+ directory_handler, alias_str, event.room_id
+ )
+
+ federation_handler = self.hs.get_federation_handler()
+
+ if event.type == EventTypes.Member:
+ if event.content["membership"] == Membership.INVITE:
+ event.unsigned[
+ "invite_room_state"
+ ] = await self.store.get_stripped_room_state_from_event_context(
+ context,
+ self.room_prejoin_state_types,
+ membership_user_id=event.sender,
+ )
+
+ invitee = UserID.from_string(event.state_key)
+ if not self.hs.is_mine(invitee):
+ # TODO: Can we add signature from remote server in a nicer
+ # way? If we have been invited by a remote server, we need
+ # to get them to sign the event.
+
+ returned_invite = await federation_handler.send_invite(
+ invitee.domain, event
+ )
+ event.unsigned.pop("room_state", None)
+
+ # TODO: Make sure the signatures actually are correct.
+ event.signatures.update(returned_invite.signatures)
+
+ if event.content["membership"] == Membership.KNOCK:
+ event.unsigned[
+ "knock_room_state"
+ ] = await self.store.get_stripped_room_state_from_event_context(
+ context,
+ self.room_prejoin_state_types,
+ )
- if ratelimit:
- # We check if this is a room admin redacting an event so that we
- # can apply different ratelimiting. We do this by simply checking
- # it's not a self-redaction (to avoid having to look up whether the
- # user is actually admin or not).
- is_admin_redaction = False
if event.type == EventTypes.Redaction:
assert event.redacts is not None
@@ -1560,255 +1779,159 @@ class EventCreationHandler:
allow_none=True,
)
- is_admin_redaction = bool(
- original_event and event.sender != original_event.sender
- )
-
- await self.request_ratelimiter.ratelimit(
- requester, is_admin_redaction=is_admin_redaction
- )
-
- if event.type == EventTypes.Member and event.membership == Membership.JOIN:
- (
- current_membership,
- _,
- ) = await self.store.get_local_current_membership_for_user_in_room(
- event.state_key, event.room_id
- )
- if current_membership != Membership.JOIN:
- self._notifier.notify_user_joined_room(event.event_id, event.room_id)
-
- await self._maybe_kick_guest_users(event, context)
-
- if event.type == EventTypes.CanonicalAlias:
- # Validate a newly added alias or newly added alt_aliases.
-
- original_alias = None
- original_alt_aliases: object = []
-
- original_event_id = event.unsigned.get("replaces_state")
- if original_event_id:
- original_event = await self.store.get_event(original_event_id)
+ room_version = await self.store.get_room_version_id(event.room_id)
+ room_version_obj = KNOWN_ROOM_VERSIONS[room_version]
+ # we can make some additional checks now if we have the original event.
if original_event:
- original_alias = original_event.content.get("alias", None)
- original_alt_aliases = original_event.content.get("alt_aliases", [])
+ if original_event.type == EventTypes.Create:
+ raise AuthError(403, "Redacting create events is not permitted")
- # Check the alias is currently valid (if it has changed).
- room_alias_str = event.content.get("alias", None)
- directory_handler = self.hs.get_directory_handler()
- if room_alias_str and room_alias_str != original_alias:
- await self._validate_canonical_alias(
- directory_handler, room_alias_str, event.room_id
- )
-
- # Check that alt_aliases is the proper form.
- alt_aliases = event.content.get("alt_aliases", [])
- if not isinstance(alt_aliases, (list, tuple)):
- raise SynapseError(
- 400, "The alt_aliases property must be a list.", Codes.INVALID_PARAM
- )
-
- # If the old version of alt_aliases is of an unknown form,
- # completely replace it.
- if not isinstance(original_alt_aliases, (list, tuple)):
- # TODO: check that the original_alt_aliases' entries are all strings
- original_alt_aliases = []
-
- # Check that each alias is currently valid.
- new_alt_aliases = set(alt_aliases) - set(original_alt_aliases)
- if new_alt_aliases:
- for alias_str in new_alt_aliases:
- await self._validate_canonical_alias(
- directory_handler, alias_str, event.room_id
- )
-
- federation_handler = self.hs.get_federation_handler()
-
- if event.type == EventTypes.Member:
- if event.content["membership"] == Membership.INVITE:
- event.unsigned[
- "invite_room_state"
- ] = await self.store.get_stripped_room_state_from_event_context(
- context,
- self.room_prejoin_state_types,
- membership_user_id=event.sender,
- )
-
- invitee = UserID.from_string(event.state_key)
- if not self.hs.is_mine(invitee):
- # TODO: Can we add signature from remote server in a nicer
- # way? If we have been invited by a remote server, we need
- # to get them to sign the event.
-
- returned_invite = await federation_handler.send_invite(
- invitee.domain, event
- )
- event.unsigned.pop("room_state", None)
-
- # TODO: Make sure the signatures actually are correct.
- event.signatures.update(returned_invite.signatures)
-
- if event.content["membership"] == Membership.KNOCK:
- event.unsigned[
- "knock_room_state"
- ] = await self.store.get_stripped_room_state_from_event_context(
- context,
- self.room_prejoin_state_types,
- )
-
- if event.type == EventTypes.Redaction:
- assert event.redacts is not None
-
- original_event = await self.store.get_event(
- event.redacts,
- redact_behaviour=EventRedactBehaviour.as_is,
- get_prev_content=False,
- allow_rejected=False,
- allow_none=True,
- )
-
- room_version = await self.store.get_room_version_id(event.room_id)
- room_version_obj = KNOWN_ROOM_VERSIONS[room_version]
-
- # we can make some additional checks now if we have the original event.
- if original_event:
- if original_event.type == EventTypes.Create:
- raise AuthError(403, "Redacting create events is not permitted")
-
- if original_event.room_id != event.room_id:
- raise SynapseError(400, "Cannot redact event from a different room")
-
- if original_event.type == EventTypes.ServerACL:
- raise AuthError(403, "Redacting server ACL events is not permitted")
-
- # Add a little safety stop-gap to prevent people from trying to
- # redact MSC2716 related events when they're in a room version
- # which does not support it yet. We allow people to use MSC2716
- # events in existing room versions but only from the room
- # creator since it does not require any changes to the auth
- # rules and in effect, the redaction algorithm . In the
- # supported room version, we add the `historical` power level to
- # auth the MSC2716 related events and adjust the redaction
- # algorthim to keep the `historical` field around (redacting an
- # event should only strip fields which don't affect the
- # structural protocol level).
- is_msc2716_event = (
- original_event.type == EventTypes.MSC2716_INSERTION
- or original_event.type == EventTypes.MSC2716_BATCH
- or original_event.type == EventTypes.MSC2716_MARKER
- )
- if not room_version_obj.msc2716_historical and is_msc2716_event:
- raise AuthError(
- 403,
- "Redacting MSC2716 events is not supported in this room version",
- )
-
- event_types = event_auth.auth_types_for_event(event.room_version, event)
- prev_state_ids = await context.get_prev_state_ids(
- StateFilter.from_types(event_types)
- )
-
- auth_events_ids = self._event_auth_handler.compute_auth_events(
- event, prev_state_ids, for_verification=True
- )
- auth_events_map = await self.store.get_events(auth_events_ids)
- auth_events = {(e.type, e.state_key): e for e in auth_events_map.values()}
-
- if event_auth.check_redaction(
- room_version_obj, event, auth_events=auth_events
- ):
- # this user doesn't have 'redact' rights, so we need to do some more
- # checks on the original event. Let's start by checking the original
- # event exists.
- if not original_event:
- raise NotFoundError("Could not find event %s" % (event.redacts,))
-
- if event.user_id != original_event.user_id:
- raise AuthError(403, "You don't have permission to redact events")
-
- # all the checks are done.
- event.internal_metadata.recheck_redaction = False
-
- if event.type == EventTypes.Create:
- prev_state_ids = await context.get_prev_state_ids()
- if prev_state_ids:
- raise AuthError(403, "Changing the room create event is forbidden")
-
- if event.type == EventTypes.MSC2716_INSERTION:
- room_version = await self.store.get_room_version_id(event.room_id)
- room_version_obj = KNOWN_ROOM_VERSIONS[room_version]
-
- create_event = await self.store.get_create_event_for_room(event.room_id)
- room_creator = create_event.content.get(EventContentFields.ROOM_CREATOR)
-
- # Only check an insertion event if the room version
- # supports it or the event is from the room creator.
- if room_version_obj.msc2716_historical or (
- self.config.experimental.msc2716_enabled
- and event.sender == room_creator
- ):
- next_batch_id = event.content.get(
- EventContentFields.MSC2716_NEXT_BATCH_ID
- )
- conflicting_insertion_event_id = None
- if next_batch_id:
- conflicting_insertion_event_id = (
- await self.store.get_insertion_event_id_by_batch_id(
- event.room_id, next_batch_id
+ if original_event.room_id != event.room_id:
+ raise SynapseError(
+ 400, "Cannot redact event from a different room"
)
- )
- if conflicting_insertion_event_id is not None:
- # The current insertion event that we're processing is invalid
- # because an insertion event already exists in the room with the
- # same next_batch_id. We can't allow multiple because the batch
- # pointing will get weird, e.g. we can't determine which insertion
- # event the batch event is pointing to.
- raise SynapseError(
- HTTPStatus.BAD_REQUEST,
- "Another insertion event already exists with the same next_batch_id",
- errcode=Codes.INVALID_PARAM,
- )
- # Mark any `m.historical` messages as backfilled so they don't appear
- # in `/sync` and have the proper decrementing `stream_ordering` as we import
- backfilled = False
- if event.internal_metadata.is_historical():
- backfilled = True
+ if original_event.type == EventTypes.ServerACL:
+ raise AuthError(
+ 403, "Redacting server ACL events is not permitted"
+ )
- # Note that this returns the event that was persisted, which may not be
- # the same as we passed in if it was deduplicated due transaction IDs.
+ # Add a little safety stop-gap to prevent people from trying to
+ # redact MSC2716 related events when they're in a room version
+ # which does not support it yet. We allow people to use MSC2716
+ # events in existing room versions but only from the room
+ # creator since it does not require any changes to the auth
+ # rules and in effect, the redaction algorithm . In the
+ # supported room version, we add the `historical` power level to
+ # auth the MSC2716 related events and adjust the redaction
+ # algorthim to keep the `historical` field around (redacting an
+ # event should only strip fields which don't affect the
+ # structural protocol level).
+ is_msc2716_event = (
+ original_event.type == EventTypes.MSC2716_INSERTION
+ or original_event.type == EventTypes.MSC2716_BATCH
+ or original_event.type == EventTypes.MSC2716_MARKER
+ )
+ if not room_version_obj.msc2716_historical and is_msc2716_event:
+ raise AuthError(
+ 403,
+ "Redacting MSC2716 events is not supported in this room version",
+ )
+
+ event_types = event_auth.auth_types_for_event(event.room_version, event)
+ prev_state_ids = await context.get_prev_state_ids(
+ StateFilter.from_types(event_types)
+ )
+
+ auth_events_ids = self._event_auth_handler.compute_auth_events(
+ event, prev_state_ids, for_verification=True
+ )
+ auth_events_map = await self.store.get_events(auth_events_ids)
+ auth_events = {
+ (e.type, e.state_key): e for e in auth_events_map.values()
+ }
+
+ if event_auth.check_redaction(
+ room_version_obj, event, auth_events=auth_events
+ ):
+ # this user doesn't have 'redact' rights, so we need to do some more
+ # checks on the original event. Let's start by checking the original
+ # event exists.
+ if not original_event:
+ raise NotFoundError(
+ "Could not find event %s" % (event.redacts,)
+ )
+
+ if event.user_id != original_event.user_id:
+ raise AuthError(
+ 403, "You don't have permission to redact events"
+ )
+
+ # all the checks are done.
+ event.internal_metadata.recheck_redaction = False
+
+ if event.type == EventTypes.Create:
+ prev_state_ids = await context.get_prev_state_ids()
+ if prev_state_ids:
+ raise AuthError(403, "Changing the room create event is forbidden")
+
+ if event.type == EventTypes.MSC2716_INSERTION:
+ room_version = await self.store.get_room_version_id(event.room_id)
+ room_version_obj = KNOWN_ROOM_VERSIONS[room_version]
+
+ create_event = await self.store.get_create_event_for_room(event.room_id)
+ room_creator = create_event.content.get(EventContentFields.ROOM_CREATOR)
+
+ # Only check an insertion event if the room version
+ # supports it or the event is from the room creator.
+ if room_version_obj.msc2716_historical or (
+ self.config.experimental.msc2716_enabled
+ and event.sender == room_creator
+ ):
+ next_batch_id = event.content.get(
+ EventContentFields.MSC2716_NEXT_BATCH_ID
+ )
+ conflicting_insertion_event_id = None
+ if next_batch_id:
+ conflicting_insertion_event_id = (
+ await self.store.get_insertion_event_id_by_batch_id(
+ event.room_id, next_batch_id
+ )
+ )
+ if conflicting_insertion_event_id is not None:
+ # The current insertion event that we're processing is invalid
+ # because an insertion event already exists in the room with the
+ # same next_batch_id. We can't allow multiple because the batch
+ # pointing will get weird, e.g. we can't determine which insertion
+ # event the batch event is pointing to.
+ raise SynapseError(
+ HTTPStatus.BAD_REQUEST,
+ "Another insertion event already exists with the same next_batch_id",
+ errcode=Codes.INVALID_PARAM,
+ )
+
+ # Mark any `m.historical` messages as backfilled so they don't appear
+ # in `/sync` and have the proper decrementing `stream_ordering` as we import
+ backfilled = False
+ if event.internal_metadata.is_historical():
+ backfilled = True
+
+ assert self._storage_controllers.persistence is not None
(
- event,
- event_pos,
+ persisted_events,
max_stream_token,
- ) = await self._storage_controllers.persistence.persist_event(
- event, context=context, backfilled=backfilled
+ ) = await self._storage_controllers.persistence.persist_events(
+ events_and_context, backfilled=backfilled
)
- if self._ephemeral_events_enabled:
- # If there's an expiry timestamp on the event, schedule its expiry.
- self._message_handler.maybe_schedule_expiry(event)
+ events_and_pos = []
+ for event in persisted_events:
+ if self._ephemeral_events_enabled:
+ # If there's an expiry timestamp on the event, schedule its expiry.
+ self._message_handler.maybe_schedule_expiry(event)
+
+ stream_ordering = event.internal_metadata.stream_ordering
+ assert stream_ordering is not None
+ pos = PersistedEventPosition(self._instance_name, stream_ordering)
+ events_and_pos.append((event, pos))
+
+ if event.type == EventTypes.Message:
+ # We don't want to block sending messages on any presence code. This
+ # matters as sometimes presence code can take a while.
+ run_in_background(self._bump_active_time, requester.user)
async def _notify() -> None:
try:
- await self.notifier.on_new_room_event(
- event, event_pos, max_stream_token, extra_users=extra_users
+ await self.notifier.on_new_room_events(
+ events_and_pos, max_stream_token, extra_users=extra_users
)
except Exception:
- logger.exception(
- "Error notifying about new room event %s",
- event.event_id,
- )
+ logger.exception("Error notifying about new room events")
run_in_background(_notify)
- if event.type == EventTypes.Message:
- # We don't want to block sending messages on any presence code. This
- # matters as sometimes presence code can take a while.
- run_in_background(self._bump_active_time, requester.user)
-
- return event
+ return persisted_events[-1]
async def _maybe_kick_guest_users(
self, event: EventBase, context: EventContext
@@ -1897,8 +2020,7 @@ class EventCreationHandler:
# shadow-banned user.
await self.handle_new_client_event(
requester,
- event,
- context,
+ events_and_context=[(event, context)],
ratelimit=False,
ignore_shadow_ban=True,
)
diff --git a/synapse/handlers/oidc.py b/synapse/handlers/oidc.py
index d7a8226900..41c675f408 100644
--- a/synapse/handlers/oidc.py
+++ b/synapse/handlers/oidc.py
@@ -12,14 +12,28 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
+import binascii
import inspect
+import json
import logging
-from typing import TYPE_CHECKING, Any, Dict, Generic, List, Optional, TypeVar, Union
+from typing import (
+ TYPE_CHECKING,
+ Any,
+ Dict,
+ Generic,
+ List,
+ Optional,
+ Type,
+ TypeVar,
+ Union,
+)
from urllib.parse import urlencode, urlparse
import attr
+import unpaddedbase64
from authlib.common.security import generate_token
-from authlib.jose import JsonWebToken, jwt
+from authlib.jose import JsonWebToken, JWTClaims
+from authlib.jose.errors import InvalidClaimError, JoseError, MissingClaimError
from authlib.oauth2.auth import ClientAuth
from authlib.oauth2.rfc6749.parameters import prepare_grant_uri
from authlib.oidc.core import CodeIDToken, UserInfo
@@ -35,9 +49,12 @@ from typing_extensions import TypedDict
from twisted.web.client import readBody
from twisted.web.http_headers import Headers
+from synapse.api.errors import SynapseError
from synapse.config import ConfigError
from synapse.config.oidc import OidcProviderClientSecretJwtKey, OidcProviderConfig
from synapse.handlers.sso import MappingException, UserAttributes
+from synapse.http.server import finish_request
+from synapse.http.servlet import parse_string
from synapse.http.site import SynapseRequest
from synapse.logging.context import make_deferred_yieldable
from synapse.types import JsonDict, UserID, map_username_to_mxid_localpart
@@ -88,6 +105,8 @@ class Token(TypedDict):
#: there is no real point of doing this in our case.
JWK = Dict[str, str]
+C = TypeVar("C")
+
#: A JWK Set, as per RFC7517 sec 5.
class JWKS(TypedDict):
@@ -247,6 +266,80 @@ class OidcHandler:
await oidc_provider.handle_oidc_callback(request, session_data, code)
+ async def handle_backchannel_logout(self, request: SynapseRequest) -> None:
+ """Handle an incoming request to /_synapse/client/oidc/backchannel_logout
+
+ This extracts the logout_token from the request and tries to figure out
+ which OpenID Provider it is comming from. This works by matching the iss claim
+ with the issuer and the aud claim with the client_id.
+
+ Since at this point we don't know who signed the JWT, we can't just
+ decode it using authlib since it will always verifies the signature. We
+ have to decode it manually without validating the signature. The actual JWT
+ verification is done in the `OidcProvider.handler_backchannel_logout` method,
+ once we figured out which provider sent the request.
+
+ Args:
+ request: the incoming request from the browser.
+ """
+ logout_token = parse_string(request, "logout_token")
+ if logout_token is None:
+ raise SynapseError(400, "Missing logout_token in request")
+
+ # A JWT looks like this:
+ # header.payload.signature
+ # where all parts are encoded with urlsafe base64.
+ # The aud and iss claims we care about are in the payload part, which
+ # is a JSON object.
+ try:
+ # By destructuring the list after splitting, we ensure that we have
+ # exactly 3 segments
+ _, payload, _ = logout_token.split(".")
+ except ValueError:
+ raise SynapseError(400, "Invalid logout_token in request")
+
+ try:
+ payload_bytes = unpaddedbase64.decode_base64(payload)
+ claims = json_decoder.decode(payload_bytes.decode("utf-8"))
+ except (json.JSONDecodeError, binascii.Error, UnicodeError):
+ raise SynapseError(400, "Invalid logout_token payload in request")
+
+ try:
+ # Let's extract the iss and aud claims
+ iss = claims["iss"]
+ aud = claims["aud"]
+ # The aud claim can be either a string or a list of string. Here we
+ # normalize it as a list of strings.
+ if isinstance(aud, str):
+ aud = [aud]
+
+ # Check that we have the right types for the aud and the iss claims
+ if not isinstance(iss, str) or not isinstance(aud, list):
+ raise TypeError()
+ for a in aud:
+ if not isinstance(a, str):
+ raise TypeError()
+
+ # At this point we properly checked both claims types
+ issuer: str = iss
+ audience: List[str] = aud
+ except (TypeError, KeyError):
+ raise SynapseError(400, "Invalid issuer/audience in logout_token")
+
+ # Now that we know the audience and the issuer, we can figure out from
+ # what provider it is coming from
+ oidc_provider: Optional[OidcProvider] = None
+ for provider in self._providers.values():
+ if provider.issuer == issuer and provider.client_id in audience:
+ oidc_provider = provider
+ break
+
+ if oidc_provider is None:
+ raise SynapseError(400, "Could not find the OP that issued this event")
+
+ # Ask the provider to handle the logout request.
+ await oidc_provider.handle_backchannel_logout(request, logout_token)
+
class OidcError(Exception):
"""Used to catch errors when calling the token_endpoint"""
@@ -275,6 +368,7 @@ class OidcProvider:
provider: OidcProviderConfig,
):
self._store = hs.get_datastores().main
+ self._clock = hs.get_clock()
self._macaroon_generaton = macaroon_generator
@@ -341,6 +435,7 @@ class OidcProvider:
self.idp_brand = provider.idp_brand
self._sso_handler = hs.get_sso_handler()
+ self._device_handler = hs.get_device_handler()
self._sso_handler.register_identity_provider(self)
@@ -399,6 +494,41 @@ class OidcProvider:
# If we're not using userinfo, we need a valid jwks to validate the ID token
m.validate_jwks_uri()
+ if self._config.backchannel_logout_enabled:
+ if not m.get("backchannel_logout_supported", False):
+ logger.warning(
+ "OIDC Back-Channel Logout is enabled for issuer %r"
+ "but it does not advertise support for it",
+ self.issuer,
+ )
+
+ elif not m.get("backchannel_logout_session_supported", False):
+ logger.warning(
+ "OIDC Back-Channel Logout is enabled and supported "
+ "by issuer %r but it might not send a session ID with "
+ "logout tokens, which is required for the logouts to work",
+ self.issuer,
+ )
+
+ if not self._config.backchannel_logout_ignore_sub:
+ # If OIDC backchannel logouts are enabled, the provider mapping provider
+ # should use the `sub` claim. We verify that by mapping a dumb user and
+ # see if we get back the sub claim
+ user = UserInfo({"sub": "thisisasubject"})
+ try:
+ subject = self._user_mapping_provider.get_remote_user_id(user)
+ if subject != user["sub"]:
+ raise ValueError("Unexpected subject")
+ except Exception:
+ logger.warning(
+ f"OIDC Back-Channel Logout is enabled for issuer {self.issuer!r} "
+ "but it looks like the configured `user_mapping_provider` "
+ "does not use the `sub` claim as subject. If it is the case, "
+ "and you want Synapse to ignore the `sub` claim in OIDC "
+ "Back-Channel Logouts, set `backchannel_logout_ignore_sub` "
+ "to `true` in the issuer config."
+ )
+
@property
def _uses_userinfo(self) -> bool:
"""Returns True if the ``userinfo_endpoint`` should be used.
@@ -414,6 +544,16 @@ class OidcProvider:
or self._user_profile_method == "userinfo_endpoint"
)
+ @property
+ def issuer(self) -> str:
+ """The issuer identifying this provider."""
+ return self._config.issuer
+
+ @property
+ def client_id(self) -> str:
+ """The client_id used when interacting with this provider."""
+ return self._config.client_id
+
async def load_metadata(self, force: bool = False) -> OpenIDProviderMetadata:
"""Return the provider metadata.
@@ -647,7 +787,7 @@ class OidcProvider:
Must include an ``access_token`` field.
Returns:
- UserInfo: an object representing the user.
+ an object representing the user.
"""
logger.debug("Using the OAuth2 access_token to request userinfo")
metadata = await self.load_metadata()
@@ -661,6 +801,59 @@ class OidcProvider:
return UserInfo(resp)
+ async def _verify_jwt(
+ self,
+ alg_values: List[str],
+ token: str,
+ claims_cls: Type[C],
+ claims_options: Optional[dict] = None,
+ claims_params: Optional[dict] = None,
+ ) -> C:
+ """Decode and validate a JWT, re-fetching the JWKS as needed.
+
+ Args:
+ alg_values: list of `alg` values allowed when verifying the JWT.
+ token: the JWT.
+ claims_cls: the JWTClaims class to use to validate the claims.
+ claims_options: dict of options passed to the `claims_cls` constructor.
+ claims_params: dict of params passed to the `claims_cls` constructor.
+
+ Returns:
+ The decoded claims in the JWT.
+ """
+ jwt = JsonWebToken(alg_values)
+
+ logger.debug("Attempting to decode JWT (%s) %r", claims_cls.__name__, token)
+
+ # Try to decode the keys in cache first, then retry by forcing the keys
+ # to be reloaded
+ jwk_set = await self.load_jwks()
+ try:
+ claims = jwt.decode(
+ token,
+ key=jwk_set,
+ claims_cls=claims_cls,
+ claims_options=claims_options,
+ claims_params=claims_params,
+ )
+ except ValueError:
+ logger.info("Reloading JWKS after decode error")
+ jwk_set = await self.load_jwks(force=True) # try reloading the jwks
+ claims = jwt.decode(
+ token,
+ key=jwk_set,
+ claims_cls=claims_cls,
+ claims_options=claims_options,
+ claims_params=claims_params,
+ )
+
+ logger.debug("Decoded JWT (%s) %r; validating", claims_cls.__name__, claims)
+
+ claims.validate(
+ now=self._clock.time(), leeway=120
+ ) # allows 2 min of clock skew
+ return claims
+
async def _parse_id_token(self, token: Token, nonce: str) -> CodeIDToken:
"""Return an instance of UserInfo from token's ``id_token``.
@@ -673,7 +866,14 @@ class OidcProvider:
Returns:
The decoded claims in the ID token.
"""
+ id_token = token.get("id_token")
+
+ # That has been theoritically been checked by the caller, so even though
+ # assertion are not enabled in production, it is mainly here to appease mypy
+ assert id_token is not None
+
metadata = await self.load_metadata()
+
claims_params = {
"nonce": nonce,
"client_id": self._client_auth.client_id,
@@ -683,39 +883,17 @@ class OidcProvider:
# in the `id_token` that we can check against.
claims_params["access_token"] = token["access_token"]
+ claims_options = {"iss": {"values": [metadata["issuer"]]}}
+
alg_values = metadata.get("id_token_signing_alg_values_supported", ["RS256"])
- jwt = JsonWebToken(alg_values)
- claim_options = {"iss": {"values": [metadata["issuer"]]}}
-
- id_token = token["id_token"]
- logger.debug("Attempting to decode JWT id_token %r", id_token)
-
- # Try to decode the keys in cache first, then retry by forcing the keys
- # to be reloaded
- jwk_set = await self.load_jwks()
- try:
- claims = jwt.decode(
- id_token,
- key=jwk_set,
- claims_cls=CodeIDToken,
- claims_options=claim_options,
- claims_params=claims_params,
- )
- except ValueError:
- logger.info("Reloading JWKS after decode error")
- jwk_set = await self.load_jwks(force=True) # try reloading the jwks
- claims = jwt.decode(
- id_token,
- key=jwk_set,
- claims_cls=CodeIDToken,
- claims_options=claim_options,
- claims_params=claims_params,
- )
-
- logger.debug("Decoded id_token JWT %r; validating", claims)
-
- claims.validate(leeway=120) # allows 2 min of clock skew
+ claims = await self._verify_jwt(
+ alg_values=alg_values,
+ token=id_token,
+ claims_cls=CodeIDToken,
+ claims_options=claims_options,
+ claims_params=claims_params,
+ )
return claims
@@ -1036,6 +1214,146 @@ class OidcProvider:
# to be strings.
return str(remote_user_id)
+ async def handle_backchannel_logout(
+ self, request: SynapseRequest, logout_token: str
+ ) -> None:
+ """Handle an incoming request to /_synapse/client/oidc/backchannel_logout
+
+ The OIDC Provider posts a logout token to this endpoint when a user
+ session ends. That token is a JWT signed with the same keys as
+ ID tokens. The OpenID Connect Back-Channel Logout draft explains how to
+ validate the JWT and figure out what session to end.
+
+ Args:
+ request: The request to respond to
+ logout_token: The logout token (a JWT) extracted from the request body
+ """
+ # Back-Channel Logout can be disabled in the config, hence this check.
+ # This is not that important for now since Synapse is registered
+ # manually to the OP, so not specifying the backchannel-logout URI is
+ # as effective than disabling it here. It might make more sense if we
+ # support dynamic registration in Synapse at some point.
+ if not self._config.backchannel_logout_enabled:
+ logger.warning(
+ f"Received an OIDC Back-Channel Logout request from issuer {self.issuer!r} but it is disabled in config"
+ )
+
+ # TODO: this responds with a 400 status code, which is what the OIDC
+ # Back-Channel Logout spec expects, but spec also suggests answering with
+ # a JSON object, with the `error` and `error_description` fields set, which
+ # we are not doing here.
+ # See https://openid.net/specs/openid-connect-backchannel-1_0.html#BCResponse
+ raise SynapseError(
+ 400, "OpenID Connect Back-Channel Logout is disabled for this provider"
+ )
+
+ metadata = await self.load_metadata()
+
+ # As per OIDC Back-Channel Logout 1.0 sec. 2.4:
+ # A Logout Token MUST be signed and MAY also be encrypted. The same
+ # keys are used to sign and encrypt Logout Tokens as are used for ID
+ # Tokens. If the Logout Token is encrypted, it SHOULD replicate the
+ # iss (issuer) claim in the JWT Header Parameters, as specified in
+ # Section 5.3 of [JWT].
+ alg_values = metadata.get("id_token_signing_alg_values_supported", ["RS256"])
+
+ # As per sec. 2.6:
+ # 3. Validate the iss, aud, and iat Claims in the same way they are
+ # validated in ID Tokens.
+ # Which means the audience should contain Synapse's client_id and the
+ # issuer should be the IdP issuer
+ claims_options = {
+ "iss": {"values": [metadata["issuer"]]},
+ "aud": {"values": [self.client_id]},
+ }
+
+ try:
+ claims = await self._verify_jwt(
+ alg_values=alg_values,
+ token=logout_token,
+ claims_cls=LogoutToken,
+ claims_options=claims_options,
+ )
+ except JoseError:
+ logger.exception("Invalid logout_token")
+ raise SynapseError(400, "Invalid logout_token")
+
+ # As per sec. 2.6:
+ # 4. Verify that the Logout Token contains a sub Claim, a sid Claim,
+ # or both.
+ # 5. Verify that the Logout Token contains an events Claim whose
+ # value is JSON object containing the member name
+ # http://schemas.openid.net/event/backchannel-logout.
+ # 6. Verify that the Logout Token does not contain a nonce Claim.
+ # This is all verified by the LogoutToken claims class, so at this
+ # point the `sid` claim exists and is a string.
+ sid: str = claims.get("sid")
+
+ # If the `sub` claim was included in the logout token, we check that it matches
+ # that it matches the right user. We can have cases where the `sub` claim is not
+ # the ID saved in database, so we let admins disable this check in config.
+ sub: Optional[str] = claims.get("sub")
+ expected_user_id: Optional[str] = None
+ if sub is not None and not self._config.backchannel_logout_ignore_sub:
+ expected_user_id = await self._store.get_user_by_external_id(
+ self.idp_id, sub
+ )
+
+ # Invalidate any running user-mapping sessions, in-flight login tokens and
+ # active devices
+ await self._sso_handler.revoke_sessions_for_provider_session_id(
+ auth_provider_id=self.idp_id,
+ auth_provider_session_id=sid,
+ expected_user_id=expected_user_id,
+ )
+
+ request.setResponseCode(200)
+ request.setHeader(b"Cache-Control", b"no-cache, no-store")
+ request.setHeader(b"Pragma", b"no-cache")
+ finish_request(request)
+
+
+class LogoutToken(JWTClaims):
+ """
+ Holds and verify claims of a logout token, as per
+ https://openid.net/specs/openid-connect-backchannel-1_0.html#LogoutToken
+ """
+
+ REGISTERED_CLAIMS = ["iss", "sub", "aud", "iat", "jti", "events", "sid"]
+
+ def validate(self, now: Optional[int] = None, leeway: int = 0) -> None:
+ """Validate everything in claims payload."""
+ super().validate(now, leeway)
+ self.validate_sid()
+ self.validate_events()
+ self.validate_nonce()
+
+ def validate_sid(self) -> None:
+ """Ensure the sid claim is present"""
+ sid = self.get("sid")
+ if not sid:
+ raise MissingClaimError("sid")
+
+ if not isinstance(sid, str):
+ raise InvalidClaimError("sid")
+
+ def validate_nonce(self) -> None:
+ """Ensure the nonce claim is absent"""
+ if "nonce" in self:
+ raise InvalidClaimError("nonce")
+
+ def validate_events(self) -> None:
+ """Ensure the events claim is present and with the right value"""
+ events = self.get("events")
+ if not events:
+ raise MissingClaimError("events")
+
+ if not isinstance(events, dict):
+ raise InvalidClaimError("events")
+
+ if "http://schemas.openid.net/event/backchannel-logout" not in events:
+ raise InvalidClaimError("events")
+
# number of seconds a newly-generated client secret should be valid for
CLIENT_SECRET_VALIDITY_SECONDS = 3600
@@ -1105,6 +1423,7 @@ class JwtClientSecret:
logger.info(
"Generating new JWT for %s: %s %s", self._oauth_issuer, header, payload
)
+ jwt = JsonWebToken(header["alg"])
self._cached_secret = jwt.encode(header, payload, self._key.key)
self._cached_secret_replacement_time = (
expires_at - CLIENT_SECRET_MIN_VALIDITY_SECONDS
@@ -1119,9 +1438,6 @@ class UserAttributeDict(TypedDict):
emails: List[str]
-C = TypeVar("C")
-
-
class OidcMappingProvider(Generic[C]):
"""A mapping provider maps a UserInfo object to user attributes.
diff --git a/synapse/handlers/pagination.py b/synapse/handlers/pagination.py
index d865ee6e73..fcb8572348 100644
--- a/synapse/handlers/pagination.py
+++ b/synapse/handlers/pagination.py
@@ -458,11 +458,6 @@ class PaginationHandler:
# `/messages` should still works with live tokens when manually provided.
assert from_token.room_key.topological is not None
- if pagin_config.limit is None:
- # This shouldn't happen as we've set a default limit before this
- # gets called.
- raise Exception("limit not set")
-
room_token = from_token.room_key
async with self.pagination_lock.read(room_id):
diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py
index 4e575ffbaa..cf08737d11 100644
--- a/synapse/handlers/presence.py
+++ b/synapse/handlers/presence.py
@@ -201,7 +201,7 @@ class BasePresenceHandler(abc.ABC):
"""Get the current presence state for multiple users.
Returns:
- dict: `user_id` -> `UserPresenceState`
+ A mapping of `user_id` -> `UserPresenceState`
"""
states = {}
missing = []
@@ -256,7 +256,7 @@ class BasePresenceHandler(abc.ABC):
with the app.
"""
- async def update_external_syncs_row(
+ async def update_external_syncs_row( # noqa: B027 (no-op by design)
self, process_id: str, user_id: str, is_syncing: bool, sync_time_msec: int
) -> None:
"""Update the syncing users for an external process as a delta.
@@ -272,7 +272,9 @@ class BasePresenceHandler(abc.ABC):
sync_time_msec: Time in ms when the user was last syncing
"""
- async def update_external_syncs_clear(self, process_id: str) -> None:
+ async def update_external_syncs_clear( # noqa: B027 (no-op by design)
+ self, process_id: str
+ ) -> None:
"""Marks all users that had been marked as syncing by a given process
as offline.
@@ -476,7 +478,7 @@ class WorkerPresenceHandler(BasePresenceHandler):
return _NullContextManager()
prev_state = await self.current_state_for_user(user_id)
- if prev_state != PresenceState.BUSY:
+ if prev_state.state != PresenceState.BUSY:
# We set state here but pass ignore_status_msg = True as we don't want to
# cause the status message to be cleared.
# Note that this causes last_active_ts to be incremented which is not
@@ -1596,7 +1598,9 @@ class PresenceEventSource(EventSource[int, UserPresenceState]):
self,
user: UserID,
from_key: Optional[int],
- limit: Optional[int] = None,
+ # Having a default limit doesn't match the EventSource API, but some
+ # callers do not provide it. It is unused in this class.
+ limit: int = 0,
room_ids: Optional[Collection[str]] = None,
is_guest: bool = False,
explicit_room_id: Optional[str] = None,
diff --git a/synapse/handlers/profile.py b/synapse/handlers/profile.py
index d8ff5289b5..4bf9a047a3 100644
--- a/synapse/handlers/profile.py
+++ b/synapse/handlers/profile.py
@@ -307,7 +307,11 @@ class ProfileHandler:
if not self.max_avatar_size and not self.allowed_avatar_mimetypes:
return True
- server_name, _, media_id = parse_and_validate_mxc_uri(mxc)
+ host, port, media_id = parse_and_validate_mxc_uri(mxc)
+ if port is not None:
+ server_name = host + ":" + str(port)
+ else:
+ server_name = host
if server_name == self.server_name:
media_info = await self.store.get_local_media(media_id)
diff --git a/synapse/handlers/receipts.py b/synapse/handlers/receipts.py
index 4768a34c07..ac01582442 100644
--- a/synapse/handlers/receipts.py
+++ b/synapse/handlers/receipts.py
@@ -63,8 +63,6 @@ class ReceiptsHandler:
self.clock = self.hs.get_clock()
self.state = hs.get_state_handler()
- self._msc3771_enabled = hs.config.experimental.msc3771_enabled
-
async def _received_remote_receipt(self, origin: str, content: JsonDict) -> None:
"""Called when we receive an EDU of type m.receipt from a remote HS."""
receipts = []
@@ -96,11 +94,10 @@ class ReceiptsHandler:
# Check if these receipts apply to a thread.
thread_id = None
data = user_values.get("data", {})
- if self._msc3771_enabled and isinstance(data, dict):
- thread_id = data.get("thread_id")
- # If the thread ID is invalid, consider it missing.
- if not isinstance(thread_id, str):
- thread_id = None
+ thread_id = data.get("thread_id")
+ # If the thread ID is invalid, consider it missing.
+ if not isinstance(thread_id, str):
+ thread_id = None
receipts.append(
ReadReceipt(
@@ -260,7 +257,7 @@ class ReceiptEventSource(EventSource[int, JsonDict]):
self,
user: UserID,
from_key: int,
- limit: Optional[int],
+ limit: int,
room_ids: Iterable[str],
is_guest: bool,
explicit_room_id: Optional[str] = None,
diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py
index cfcadb34db..ca1c7a1866 100644
--- a/synapse/handlers/register.py
+++ b/synapse/handlers/register.py
@@ -220,6 +220,7 @@ class RegistrationHandler:
by_admin: bool = False,
user_agent_ips: Optional[List[Tuple[str, str]]] = None,
auth_provider_id: Optional[str] = None,
+ approved: bool = False,
) -> str:
"""Registers a new client on the server.
@@ -246,6 +247,8 @@ class RegistrationHandler:
user_agent_ips: Tuples of user-agents and IP addresses used
during the registration process.
auth_provider_id: The SSO IdP the user used, if any.
+ approved: True if the new user should be considered already
+ approved by an administrator.
Returns:
The registered user_id.
Raises:
@@ -307,6 +310,7 @@ class RegistrationHandler:
user_type=user_type,
address=address,
shadow_banned=shadow_banned,
+ approved=approved,
)
profile = await self.store.get_profileinfo(localpart)
@@ -695,6 +699,7 @@ class RegistrationHandler:
user_type: Optional[str] = None,
address: Optional[str] = None,
shadow_banned: bool = False,
+ approved: bool = False,
) -> None:
"""Register user in the datastore.
@@ -713,6 +718,7 @@ class RegistrationHandler:
api.constants.UserTypes, or None for a normal user.
address: the IP address used to perform the registration.
shadow_banned: Whether to shadow-ban the user
+ approved: Whether to mark the user as approved by an administrator
"""
if self.hs.config.worker.worker_app:
await self._register_client(
@@ -726,6 +732,7 @@ class RegistrationHandler:
user_type=user_type,
address=address,
shadow_banned=shadow_banned,
+ approved=approved,
)
else:
await self.store.register_user(
@@ -738,6 +745,7 @@ class RegistrationHandler:
admin=admin,
user_type=user_type,
shadow_banned=shadow_banned,
+ approved=approved,
)
# Only call the account validity module(s) on the main process, to avoid
diff --git a/synapse/handlers/relations.py b/synapse/handlers/relations.py
index 48cc9c1ac5..64d373e9d7 100644
--- a/synapse/handlers/relations.py
+++ b/synapse/handlers/relations.py
@@ -11,16 +11,18 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
+import enum
import logging
from typing import TYPE_CHECKING, Dict, FrozenSet, Iterable, List, Optional, Tuple
import attr
-from synapse.api.constants import RelationTypes
+from synapse.api.constants import EventTypes, RelationTypes
from synapse.api.errors import SynapseError
from synapse.events import EventBase, relation_from_event
from synapse.logging.tracing import SynapseTags, set_attribute, trace
-from synapse.storage.databases.main.relations import _RelatedEvent
+from synapse.storage.databases.main.relations import ThreadsNextBatch, _RelatedEvent
+from synapse.streams.config import PaginationConfig
from synapse.types import JsonDict, Requester, StreamToken, UserID
from synapse.visibility import filter_events_for_client
@@ -31,6 +33,13 @@ if TYPE_CHECKING:
logger = logging.getLogger(__name__)
+class ThreadsListInclude(str, enum.Enum):
+ """Valid values for the 'include' flag of /threads."""
+
+ all = "all"
+ participated = "participated"
+
+
@attr.s(slots=True, frozen=True, auto_attribs=True)
class _ThreadAggregation:
# The latest event in the thread.
@@ -66,18 +75,17 @@ class RelationsHandler:
self._clock = hs.get_clock()
self._event_handler = hs.get_event_handler()
self._event_serializer = hs.get_event_client_serializer()
+ self._event_creation_handler = hs.get_event_creation_handler()
async def get_relations(
self,
requester: Requester,
event_id: str,
room_id: str,
+ pagin_config: PaginationConfig,
+ include_original_event: bool,
relation_type: Optional[str] = None,
event_type: Optional[str] = None,
- limit: int = 5,
- direction: str = "b",
- from_token: Optional[StreamToken] = None,
- to_token: Optional[StreamToken] = None,
) -> JsonDict:
"""Get related events of a event, ordered by topological ordering.
@@ -87,13 +95,10 @@ class RelationsHandler:
requester: The user requesting the relations.
event_id: Fetch events that relate to this event ID.
room_id: The room the event belongs to.
+ pagin_config: The pagination config rules to apply, if any.
+ include_original_event: Whether to include the parent event.
relation_type: Only fetch events with this relation type, if given.
event_type: Only fetch events with this event type, if given.
- limit: Only fetch the most recent `limit` events.
- direction: Whether to fetch the most recent first (`"b"`) or the
- oldest first (`"f"`).
- from_token: Fetch rows from the given token, or from the start if None.
- to_token: Fetch rows up to the given token, or up to the end if None.
Returns:
The pagination chunk.
@@ -121,10 +126,10 @@ class RelationsHandler:
room_id=room_id,
relation_type=relation_type,
event_type=event_type,
- limit=limit,
- direction=direction,
- from_token=from_token,
- to_token=to_token,
+ limit=pagin_config.limit,
+ direction=pagin_config.direction,
+ from_token=pagin_config.from_token,
+ to_token=pagin_config.to_token,
)
events = await self._main_store.get_events_as_list(
@@ -138,31 +143,32 @@ class RelationsHandler:
is_peeking=(member_event_id is None),
)
- now = self._clock.time_msec()
- # Do not bundle aggregations when retrieving the original event because
- # we want the content before relations are applied to it.
- original_event = self._event_serializer.serialize_event(
- event, now, bundle_aggregations=None
- )
# The relations returned for the requested event do include their
# bundled aggregations.
aggregations = await self.get_bundled_aggregations(
events, requester.user.to_string()
)
- serialized_events = self._event_serializer.serialize_events(
- events, now, bundle_aggregations=aggregations
- )
- return_value = {
- "chunk": serialized_events,
- "original_event": original_event,
+ now = self._clock.time_msec()
+ return_value: JsonDict = {
+ "chunk": self._event_serializer.serialize_events(
+ events, now, bundle_aggregations=aggregations
+ ),
}
+ if include_original_event:
+ # Do not bundle aggregations when retrieving the original event because
+ # we want the content before relations are applied to it.
+ return_value["original_event"] = self._event_serializer.serialize_event(
+ event, now, bundle_aggregations=None
+ )
if next_token:
return_value["next_batch"] = await next_token.to_string(self._main_store)
- if from_token:
- return_value["prev_batch"] = await from_token.to_string(self._main_store)
+ if pagin_config.from_token:
+ return_value["prev_batch"] = await pagin_config.from_token.to_string(
+ self._main_store
+ )
return return_value
@@ -201,6 +207,59 @@ class RelationsHandler:
return related_events, next_token
+ async def redact_events_related_to(
+ self,
+ requester: Requester,
+ event_id: str,
+ initial_redaction_event: EventBase,
+ relation_types: List[str],
+ ) -> None:
+ """Redacts all events related to the given event ID with one of the given
+ relation types.
+
+ This method is expected to be called when redacting the event referred to by
+ the given event ID.
+
+ If an event cannot be redacted (e.g. because of insufficient permissions), log
+ the error and try to redact the next one.
+
+ Args:
+ requester: The requester to redact events on behalf of.
+ event_id: The event IDs to look and redact relations of.
+ initial_redaction_event: The redaction for the event referred to by
+ event_id.
+ relation_types: The types of relations to look for.
+
+ Raises:
+ ShadowBanError if the requester is shadow-banned
+ """
+ related_event_ids = (
+ await self._main_store.get_all_relations_for_event_with_types(
+ event_id, relation_types
+ )
+ )
+
+ for related_event_id in related_event_ids:
+ try:
+ await self._event_creation_handler.create_and_send_nonmember_event(
+ requester,
+ {
+ "type": EventTypes.Redaction,
+ "content": initial_redaction_event.content,
+ "room_id": initial_redaction_event.room_id,
+ "sender": requester.user.to_string(),
+ "redacts": related_event_id,
+ },
+ ratelimit=False,
+ )
+ except SynapseError as e:
+ logger.warning(
+ "Failed to redact event %s (related to event %s): %s",
+ related_event_id,
+ event_id,
+ e.msg,
+ )
+
@trace
async def get_annotations_for_event(
self,
@@ -490,3 +549,79 @@ class RelationsHandler:
results.setdefault(event_id, BundledAggregations()).replace = edit
return results
+
+ async def get_threads(
+ self,
+ requester: Requester,
+ room_id: str,
+ include: ThreadsListInclude,
+ limit: int = 5,
+ from_token: Optional[ThreadsNextBatch] = None,
+ ) -> JsonDict:
+ """Get related events of a event, ordered by topological ordering.
+
+ Args:
+ requester: The user requesting the relations.
+ room_id: The room the event belongs to.
+ include: One of "all" or "participated" to indicate which threads should
+ be returned.
+ limit: Only fetch the most recent `limit` events.
+ from_token: Fetch rows from the given token, or from the start if None.
+
+ Returns:
+ The pagination chunk.
+ """
+
+ user_id = requester.user.to_string()
+
+ # TODO Properly handle a user leaving a room.
+ (_, member_event_id) = await self._auth.check_user_in_room_or_world_readable(
+ room_id, requester, allow_departed_users=True
+ )
+
+ # Note that ignored users are not passed into get_relations_for_event
+ # below. Ignored users are handled in filter_events_for_client (and by
+ # not passing them in here we should get a better cache hit rate).
+ thread_roots, next_batch = await self._main_store.get_threads(
+ room_id=room_id, limit=limit, from_token=from_token
+ )
+
+ events = await self._main_store.get_events_as_list(thread_roots)
+
+ if include == ThreadsListInclude.participated:
+ # Pre-seed thread participation with whether the requester sent the event.
+ participated = {event.event_id: event.sender == user_id for event in events}
+ # For events the requester did not send, check the database for whether
+ # the requester sent a threaded reply.
+ participated.update(
+ await self._main_store.get_threads_participated(
+ [eid for eid, p in participated.items() if not p],
+ user_id,
+ )
+ )
+
+ # Limit the returned threads to those the user has participated in.
+ events = [event for event in events if participated[event.event_id]]
+
+ events = await filter_events_for_client(
+ self._storage_controllers,
+ user_id,
+ events,
+ is_peeking=(member_event_id is None),
+ )
+
+ aggregations = await self.get_bundled_aggregations(
+ events, requester.user.to_string()
+ )
+
+ now = self._clock.time_msec()
+ serialized_events = self._event_serializer.serialize_events(
+ events, now, bundle_aggregations=aggregations
+ )
+
+ return_value: JsonDict = {"chunk": serialized_events}
+
+ if next_batch:
+ return_value["next_batch"] = str(next_batch)
+
+ return return_value
diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py
index 33e9a87002..6dcfd86fdf 100644
--- a/synapse/handlers/room.py
+++ b/synapse/handlers/room.py
@@ -49,7 +49,6 @@ from synapse.api.constants import (
from synapse.api.errors import (
AuthError,
Codes,
- HttpResponseException,
LimitExceededError,
NotFoundError,
StoreError,
@@ -60,7 +59,6 @@ from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersion
from synapse.event_auth import validate_event_for_room_version
from synapse.events import EventBase
from synapse.events.utils import copy_and_fixup_power_levels_contents
-from synapse.federation.federation_client import InvalidResponseError
from synapse.handlers.relations import BundledAggregations
from synapse.module_api import NOT_SPAM
from synapse.rest.admin._base import assert_user_is_admin
@@ -229,9 +227,7 @@ class RoomCreationHandler:
},
)
validate_event_for_room_version(tombstone_event)
- await self._event_auth_handler.check_auth_rules_from_context(
- tombstone_event, tombstone_context
- )
+ await self._event_auth_handler.check_auth_rules_from_context(tombstone_event)
# Upgrade the room
#
@@ -301,8 +297,7 @@ class RoomCreationHandler:
# now send the tombstone
await self.event_creation_handler.handle_new_client_event(
requester=requester,
- event=tombstone_event,
- context=tombstone_context,
+ events_and_context=[(tombstone_event, tombstone_context)],
)
state_filter = StateFilter.from_types(
@@ -562,7 +557,6 @@ class RoomCreationHandler:
invite_list=[],
initial_state=initial_state,
creation_content=creation_content,
- ratelimit=False,
)
# Transfer membership events
@@ -716,7 +710,7 @@ class RoomCreationHandler:
if (
self._server_notices_mxid is not None
- and requester.user.to_string() == self._server_notices_mxid
+ and user_id == self._server_notices_mxid
):
# allow the server notices mxid to create rooms
is_requester_admin = True
@@ -756,6 +750,10 @@ class RoomCreationHandler:
)
if ratelimit:
+ # Rate limit once in advance, but don't rate limit the individual
+ # events in the room — room creation isn't atomic and it's very
+ # janky if half the events in the initial state don't make it because
+ # of rate limiting.
await self.request_ratelimiter.ratelimit(requester)
room_version_id = config.get(
@@ -916,7 +914,6 @@ class RoomCreationHandler:
room_alias=room_alias,
power_level_content_override=power_level_content_override,
creator_join_profile=creator_join_profile,
- ratelimit=ratelimit,
)
if "name" in config:
@@ -1040,26 +1037,36 @@ class RoomCreationHandler:
room_alias: Optional[RoomAlias] = None,
power_level_content_override: Optional[JsonDict] = None,
creator_join_profile: Optional[JsonDict] = None,
- ratelimit: bool = True,
) -> Tuple[int, str, int]:
- """Sends the initial events into a new room.
+ """Sends the initial events into a new room. Sends the room creation, membership,
+ and power level events into the room sequentially, then creates and batches up the
+ rest of the events to persist as a batch to the DB.
`power_level_content_override` doesn't apply when initial state has
power level state event content.
+ Rate limiting should already have been applied by this point.
+
Returns:
A tuple containing the stream ID, event ID and depth of the last
event sent to the room.
"""
creator_id = creator.user.to_string()
-
event_keys = {"room_id": room_id, "sender": creator_id, "state_key": ""}
-
depth = 1
- last_sent_event_id: Optional[str] = None
- def create(etype: str, content: JsonDict, **kwargs: Any) -> JsonDict:
+ # the most recently created event
+ prev_event: List[str] = []
+ # a map of event types, state keys -> event_ids. We collect these mappings this as events are
+ # created (but not persisted to the db) to determine state for future created events
+ # (as this info can't be pulled from the db)
+ state_map: MutableStateMap[str] = {}
+ # current_state_group of last event created. Used for computing event context of
+ # events to be batched
+ current_state_group = None
+
+ def create_event_dict(etype: str, content: JsonDict, **kwargs: Any) -> JsonDict:
e = {"type": etype, "content": content}
e.update(event_keys)
@@ -1067,32 +1074,44 @@ class RoomCreationHandler:
return e
- async def send(etype: str, content: JsonDict, **kwargs: Any) -> int:
- nonlocal last_sent_event_id
+ async def create_event(
+ etype: str,
+ content: JsonDict,
+ for_batch: bool,
+ **kwargs: Any,
+ ) -> Tuple[EventBase, synapse.events.snapshot.EventContext]:
+ """
+ Creates an event and associated event context.
+ Args:
+ etype: the type of event to be created
+ content: content of the event
+ for_batch: whether the event is being created for batch persisting. If
+ bool for_batch is true, this will create an event using the prev_event_ids,
+ and will create an event context for the event using the parameters state_map
+ and current_state_group, thus these parameters must be provided in this
+ case if for_batch is True. The subsequently created event and context
+ are suitable for being batched up and bulk persisted to the database
+ with other similarly created events.
+ """
nonlocal depth
+ nonlocal prev_event
- event = create(etype, content, **kwargs)
- logger.debug("Sending %s in new room", etype)
- # Allow these events to be sent even if the user is shadow-banned to
- # allow the room creation to complete.
- (
- sent_event,
- last_stream_id,
- ) = await self.event_creation_handler.create_and_send_nonmember_event(
+ event_dict = create_event_dict(etype, content, **kwargs)
+
+ new_event, new_context = await self.event_creation_handler.create_event(
creator,
- event,
- ratelimit=False,
- ignore_shadow_ban=True,
- # Note: we don't pass state_event_ids here because this triggers
- # an additional query per event to look them up from the events table.
- prev_event_ids=[last_sent_event_id] if last_sent_event_id else [],
+ event_dict,
+ prev_event_ids=prev_event,
depth=depth,
+ state_map=state_map,
+ for_batch=for_batch,
+ current_state_group=current_state_group,
)
-
- last_sent_event_id = sent_event.event_id
depth += 1
+ prev_event = [new_event.event_id]
+ state_map[(new_event.type, new_event.state_key)] = new_event.event_id
- return last_stream_id
+ return new_event, new_context
try:
config = self._presets_dict[preset_config]
@@ -1102,31 +1121,55 @@ class RoomCreationHandler:
)
creation_content.update({"creator": creator_id})
- await send(etype=EventTypes.Create, content=creation_content)
+ creation_event, creation_context = await create_event(
+ EventTypes.Create, creation_content, False
+ )
logger.debug("Sending %s in new room", EventTypes.Member)
- # Room create event must exist at this point
- assert last_sent_event_id is not None
+ ev = await self.event_creation_handler.handle_new_client_event(
+ requester=creator,
+ events_and_context=[(creation_event, creation_context)],
+ ratelimit=False,
+ ignore_shadow_ban=True,
+ )
+ last_sent_event_id = ev.event_id
+
member_event_id, _ = await self.room_member_handler.update_membership(
creator,
creator.user,
room_id,
"join",
- ratelimit=ratelimit,
+ ratelimit=False,
content=creator_join_profile,
new_room=True,
prev_event_ids=[last_sent_event_id],
depth=depth,
)
- last_sent_event_id = member_event_id
+ prev_event = [member_event_id]
+ # update the depth and state map here as the membership event has been created
+ # through a different code path
+ depth += 1
+ state_map[(EventTypes.Member, creator.user.to_string())] = member_event_id
+
+ # we need the state group of the membership event as it is the current state group
+ event_to_state = (
+ await self._storage_controllers.state.get_state_group_for_events(
+ [member_event_id]
+ )
+ )
+ current_state_group = event_to_state[member_event_id]
+
+ events_to_send = []
# We treat the power levels override specially as this needs to be one
# of the first events that get sent into a room.
pl_content = initial_state.pop((EventTypes.PowerLevels, ""), None)
if pl_content is not None:
- last_sent_stream_id = await send(
- etype=EventTypes.PowerLevels, content=pl_content
+ power_event, power_context = await create_event(
+ EventTypes.PowerLevels, pl_content, True
)
+ current_state_group = power_context._state_group
+ events_to_send.append((power_event, power_context))
else:
power_level_content: JsonDict = {
"users": {creator_id: 100},
@@ -1169,48 +1212,73 @@ class RoomCreationHandler:
# apply those.
if power_level_content_override:
power_level_content.update(power_level_content_override)
-
- last_sent_stream_id = await send(
- etype=EventTypes.PowerLevels, content=power_level_content
+ pl_event, pl_context = await create_event(
+ EventTypes.PowerLevels,
+ power_level_content,
+ True,
)
+ current_state_group = pl_context._state_group
+ events_to_send.append((pl_event, pl_context))
if room_alias and (EventTypes.CanonicalAlias, "") not in initial_state:
- last_sent_stream_id = await send(
- etype=EventTypes.CanonicalAlias,
- content={"alias": room_alias.to_string()},
+ room_alias_event, room_alias_context = await create_event(
+ EventTypes.CanonicalAlias, {"alias": room_alias.to_string()}, True
)
+ current_state_group = room_alias_context._state_group
+ events_to_send.append((room_alias_event, room_alias_context))
if (EventTypes.JoinRules, "") not in initial_state:
- last_sent_stream_id = await send(
- etype=EventTypes.JoinRules, content={"join_rule": config["join_rules"]}
+ join_rules_event, join_rules_context = await create_event(
+ EventTypes.JoinRules,
+ {"join_rule": config["join_rules"]},
+ True,
)
+ current_state_group = join_rules_context._state_group
+ events_to_send.append((join_rules_event, join_rules_context))
if (EventTypes.RoomHistoryVisibility, "") not in initial_state:
- last_sent_stream_id = await send(
- etype=EventTypes.RoomHistoryVisibility,
- content={"history_visibility": config["history_visibility"]},
+ visibility_event, visibility_context = await create_event(
+ EventTypes.RoomHistoryVisibility,
+ {"history_visibility": config["history_visibility"]},
+ True,
)
+ current_state_group = visibility_context._state_group
+ events_to_send.append((visibility_event, visibility_context))
if config["guest_can_join"]:
if (EventTypes.GuestAccess, "") not in initial_state:
- last_sent_stream_id = await send(
- etype=EventTypes.GuestAccess,
- content={EventContentFields.GUEST_ACCESS: GuestAccess.CAN_JOIN},
+ guest_access_event, guest_access_context = await create_event(
+ EventTypes.GuestAccess,
+ {EventContentFields.GUEST_ACCESS: GuestAccess.CAN_JOIN},
+ True,
)
+ current_state_group = guest_access_context._state_group
+ events_to_send.append((guest_access_event, guest_access_context))
for (etype, state_key), content in initial_state.items():
- last_sent_stream_id = await send(
- etype=etype, state_key=state_key, content=content
+ event, context = await create_event(
+ etype, content, True, state_key=state_key
)
+ current_state_group = context._state_group
+ events_to_send.append((event, context))
if config["encrypted"]:
- last_sent_stream_id = await send(
- etype=EventTypes.RoomEncryption,
+ encryption_event, encryption_context = await create_event(
+ EventTypes.RoomEncryption,
+ {"algorithm": RoomEncryptionAlgorithms.DEFAULT},
+ True,
state_key="",
- content={"algorithm": RoomEncryptionAlgorithms.DEFAULT},
)
+ events_to_send.append((encryption_event, encryption_context))
- return last_sent_stream_id, last_sent_event_id, depth
+ last_event = await self.event_creation_handler.handle_new_client_event(
+ creator,
+ events_to_send,
+ ignore_shadow_ban=True,
+ ratelimit=False,
+ )
+ assert last_event.internal_metadata.stream_ordering is not None
+ return last_event.internal_metadata.stream_ordering, last_event.event_id, depth
def _generate_room_id(self) -> str:
"""Generates a random room ID.
@@ -1383,7 +1451,7 @@ class RoomContextHandler:
events_before=events_before,
event=event,
events_after=events_after,
- state=await filter_evts(state_events),
+ state=state_events,
aggregations=aggregations,
start=await token.copy_and_replace(
StreamKeyType.ROOM, results.start
@@ -1429,7 +1497,12 @@ class TimestampLookupHandler:
Raises:
SynapseError if unable to find any event locally in the given direction
"""
-
+ logger.debug(
+ "get_event_for_timestamp(room_id=%s, timestamp=%s, direction=%s) Finding closest event...",
+ room_id,
+ timestamp,
+ direction,
+ )
local_event_id = await self.store.get_event_id_for_timestamp(
room_id, timestamp, direction
)
@@ -1476,88 +1549,59 @@ class TimestampLookupHandler:
)
likely_domains = (
- await self._storage_controllers.state.get_current_hosts_in_room(room_id)
+ await self._storage_controllers.state.get_current_hosts_in_room_ordered(
+ room_id
+ )
)
- # Loop through each homeserver candidate until we get a succesful response
- for domain in likely_domains:
- # We don't want to ask our own server for information we don't have
- if domain == self.server_name:
- continue
+ remote_response = await self.federation_client.timestamp_to_event(
+ destinations=likely_domains,
+ room_id=room_id,
+ timestamp=timestamp,
+ direction=direction,
+ )
+ if remote_response is not None:
+ logger.debug(
+ "get_event_for_timestamp: remote_response=%s",
+ remote_response,
+ )
- try:
- remote_response = await self.federation_client.timestamp_to_event(
- domain, room_id, timestamp, direction
- )
- logger.debug(
- "get_event_for_timestamp: response from domain(%s)=%s",
- domain,
- remote_response,
+ remote_event_id = remote_response.event_id
+ remote_origin_server_ts = remote_response.origin_server_ts
+
+ # Backfill this event so we can get a pagination token for
+ # it with `/context` and paginate `/messages` from this
+ # point.
+ pulled_pdu_info = await self.federation_event_handler.backfill_event_id(
+ likely_domains, room_id, remote_event_id
+ )
+ remote_event = pulled_pdu_info.pdu
+
+ # XXX: When we see that the remote server is not trustworthy,
+ # maybe we should not ask them first in the future.
+ if remote_origin_server_ts != remote_event.origin_server_ts:
+ logger.info(
+ "get_event_for_timestamp: Remote server (%s) claimed that remote_event_id=%s occured at remote_origin_server_ts=%s but that isn't true (actually occured at %s). Their claims are dubious and we should consider not trusting them.",
+ pulled_pdu_info.pull_origin,
+ remote_event_id,
+ remote_origin_server_ts,
+ remote_event.origin_server_ts,
)
- remote_event_id = remote_response.event_id
- remote_origin_server_ts = remote_response.origin_server_ts
-
- # Backfill this event so we can get a pagination token for
- # it with `/context` and paginate `/messages` from this
- # point.
- #
- # TODO: The requested timestamp may lie in a part of the
- # event graph that the remote server *also* didn't have,
- # in which case they will have returned another event
- # which may be nowhere near the requested timestamp. In
- # the future, we may need to reconcile that gap and ask
- # other homeservers, and/or extend `/timestamp_to_event`
- # to return events on *both* sides of the timestamp to
- # help reconcile the gap faster.
- remote_event = (
- await self.federation_event_handler.backfill_event_id(
- domain, room_id, remote_event_id
- )
- )
-
- # XXX: When we see that the remote server is not trustworthy,
- # maybe we should not ask them first in the future.
- if remote_origin_server_ts != remote_event.origin_server_ts:
- logger.info(
- "get_event_for_timestamp: Remote server (%s) claimed that remote_event_id=%s occured at remote_origin_server_ts=%s but that isn't true (actually occured at %s). Their claims are dubious and we should consider not trusting them.",
- domain,
- remote_event_id,
- remote_origin_server_ts,
- remote_event.origin_server_ts,
- )
-
- # Only return the remote event if it's closer than the local event
- if not local_event or (
- abs(remote_event.origin_server_ts - timestamp)
- < abs(local_event.origin_server_ts - timestamp)
- ):
- logger.info(
- "get_event_for_timestamp: returning remote_event_id=%s (%s) since it's closer to timestamp=%s than local_event=%s (%s)",
- remote_event_id,
- remote_event.origin_server_ts,
- timestamp,
- local_event.event_id if local_event else None,
- local_event.origin_server_ts if local_event else None,
- )
- return remote_event_id, remote_origin_server_ts
- except (HttpResponseException, InvalidResponseError) as ex:
- # Let's not put a high priority on some other homeserver
- # failing to respond or giving a random response
- logger.debug(
- "get_event_for_timestamp: Failed to fetch /timestamp_to_event from %s because of exception(%s) %s args=%s",
- domain,
- type(ex).__name__,
- ex,
- ex.args,
- )
- except Exception:
- # But we do want to see some exceptions in our code
- logger.warning(
- "get_event_for_timestamp: Failed to fetch /timestamp_to_event from %s because of exception",
- domain,
- exc_info=True,
+ # Only return the remote event if it's closer than the local event
+ if not local_event or (
+ abs(remote_event.origin_server_ts - timestamp)
+ < abs(local_event.origin_server_ts - timestamp)
+ ):
+ logger.info(
+ "get_event_for_timestamp: returning remote_event_id=%s (%s) since it's closer to timestamp=%s than local_event=%s (%s)",
+ remote_event_id,
+ remote_event.origin_server_ts,
+ timestamp,
+ local_event.event_id if local_event else None,
+ local_event.origin_server_ts if local_event else None,
)
+ return remote_event_id, remote_origin_server_ts
# To appease mypy, we have to add both of these conditions to check for
# `None`. We only expect `local_event` to be `None` when
@@ -1580,7 +1624,7 @@ class RoomEventSource(EventSource[RoomStreamToken, EventBase]):
self,
user: UserID,
from_key: RoomStreamToken,
- limit: Optional[int],
+ limit: int,
room_ids: Collection[str],
is_guest: bool,
explicit_room_id: Optional[str] = None,
diff --git a/synapse/handlers/room_batch.py b/synapse/handlers/room_batch.py
index 1414e575d6..411a6fb22f 100644
--- a/synapse/handlers/room_batch.py
+++ b/synapse/handlers/room_batch.py
@@ -379,8 +379,7 @@ class RoomBatchHandler:
await self.create_requester_for_user_id_from_app_service(
event.sender, app_service_requester.app_service
),
- event=event,
- context=context,
+ events_and_context=[(event, context)],
)
return event_ids
diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py
index e0d0a8941c..2ebd2e6eb7 100644
--- a/synapse/handlers/room_member.py
+++ b/synapse/handlers/room_member.py
@@ -322,6 +322,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
require_consent: bool = True,
outlier: bool = False,
historical: bool = False,
+ origin_server_ts: Optional[int] = None,
) -> Tuple[str, int]:
"""
Internal membership update function to get an existing event or create
@@ -361,6 +362,8 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
historical: Indicates whether the message is being inserted
back in time around some existing events. This is used to skip
a few checks and mark the event as backfilled.
+ origin_server_ts: The origin_server_ts to use if a new event is created. Uses
+ the current timestamp if set to None.
Returns:
Tuple of event ID and stream ordering position
@@ -399,6 +402,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
"state_key": user_id,
# For backwards compatibility:
"membership": membership,
+ "origin_server_ts": origin_server_ts,
},
txn_id=txn_id,
allow_no_prev_events=allow_no_prev_events,
@@ -432,8 +436,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
with tracing.start_active_span("handle_new_client_event"):
result_event = await self.event_creation_handler.handle_new_client_event(
requester,
- event,
- context,
+ events_and_context=[(event, context)],
extra_users=[target],
ratelimit=ratelimit,
)
@@ -505,6 +508,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
prev_event_ids: Optional[List[str]] = None,
state_event_ids: Optional[List[str]] = None,
depth: Optional[int] = None,
+ origin_server_ts: Optional[int] = None,
) -> Tuple[str, int]:
"""Update a user's membership in a room.
@@ -543,6 +547,8 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
depth: Override the depth used to order the event in the DAG.
Should normally be set to None, which will cause the depth to be calculated
based on the prev_events.
+ origin_server_ts: The origin_server_ts to use if a new event is created. Uses
+ the current timestamp if set to None.
Returns:
A tuple of the new event ID and stream ID.
@@ -584,6 +590,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
prev_event_ids=prev_event_ids,
state_event_ids=state_event_ids,
depth=depth,
+ origin_server_ts=origin_server_ts,
)
return result
@@ -607,6 +614,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
prev_event_ids: Optional[List[str]] = None,
state_event_ids: Optional[List[str]] = None,
depth: Optional[int] = None,
+ origin_server_ts: Optional[int] = None,
) -> Tuple[str, int]:
"""Helper for update_membership.
@@ -647,6 +655,8 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
depth: Override the depth used to order the event in the DAG.
Should normally be set to None, which will cause the depth to be calculated
based on the prev_events.
+ origin_server_ts: The origin_server_ts to use if a new event is created. Uses
+ the current timestamp if set to None.
Returns:
A tuple of the new event ID and stream ID.
@@ -786,6 +796,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
require_consent=require_consent,
outlier=outlier,
historical=historical,
+ origin_server_ts=origin_server_ts,
)
latest_event_ids = await self.store.get_prev_events_for_room(room_id)
@@ -1031,6 +1042,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
content=content,
require_consent=require_consent,
outlier=outlier,
+ origin_server_ts=origin_server_ts,
)
async def _should_perform_remote_join(
@@ -1151,8 +1163,8 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
logger.info("Transferring room state from %s to %s", old_room_id, room_id)
# Find all local users that were in the old room and copy over each user's state
- users = await self.store.get_users_in_room(old_room_id)
- await self.copy_user_state_on_room_upgrade(old_room_id, room_id, users)
+ local_users = await self.store.get_local_users_in_room(old_room_id)
+ await self.copy_user_state_on_room_upgrade(old_room_id, room_id, local_users)
# Add new room to the room directory if the old room was there
# Remove old room from the room directory
@@ -1252,7 +1264,10 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
raise SynapseError(403, "This room has been blocked on this server")
event = await self.event_creation_handler.handle_new_client_event(
- requester, event, context, extra_users=[target_user], ratelimit=ratelimit
+ requester,
+ events_and_context=[(event, context)],
+ extra_users=[target_user],
+ ratelimit=ratelimit,
)
prev_member_event_id = prev_state_ids.get(
@@ -1860,8 +1875,7 @@ class RoomMemberMasterHandler(RoomMemberHandler):
result_event = await self.event_creation_handler.handle_new_client_event(
requester,
- event,
- context,
+ events_and_context=[(event, context)],
extra_users=[UserID.from_string(target_user)],
)
# we know it was persisted, so must have a stream ordering
diff --git a/synapse/handlers/saml.py b/synapse/handlers/saml.py
index 9602f0d0bb..874860d461 100644
--- a/synapse/handlers/saml.py
+++ b/synapse/handlers/saml.py
@@ -441,7 +441,7 @@ class DefaultSamlMappingProvider:
client_redirect_url: where the client wants to redirect to
Returns:
- dict: A dict containing new user attributes. Possible keys:
+ A dict containing new user attributes. Possible keys:
* mxid_localpart (str): Required. The localpart of the user's mxid
* displayname (str): The displayname of the user
* emails (list[str]): Any emails for the user
@@ -483,7 +483,7 @@ class DefaultSamlMappingProvider:
Args:
config: A dictionary containing configuration options for this provider
Returns:
- SamlConfig: A custom config object for this module
+ A custom config object for this module
"""
# Parse config options and use defaults where necessary
mxid_source_attribute = config.get("mxid_source_attribute", "uid")
diff --git a/synapse/handlers/send_email.py b/synapse/handlers/send_email.py
index e2844799e8..804cc6e81e 100644
--- a/synapse/handlers/send_email.py
+++ b/synapse/handlers/send_email.py
@@ -187,6 +187,19 @@ class SendEmailHandler:
multipart_msg["To"] = email_address
multipart_msg["Date"] = email.utils.formatdate()
multipart_msg["Message-ID"] = email.utils.make_msgid()
+ # Discourage automatic responses to Synapse's emails.
+ # Per RFC 3834, automatic responses should not be sent if the "Auto-Submitted"
+ # header is present with any value other than "no". See
+ # https://www.rfc-editor.org/rfc/rfc3834.html#section-5.1
+ multipart_msg["Auto-Submitted"] = "auto-generated"
+ # Also include a Microsoft-Exchange specific header:
+ # https://learn.microsoft.com/en-us/openspecs/exchange_server_protocols/ms-oxcmail/ced68690-498a-4567-9d14-5c01f974d8b1
+ # which suggests it can take the value "All" to "suppress all auto-replies",
+ # or a comma separated list of auto-reply classes to suppress.
+ # The following stack overflow question has a little more context:
+ # https://stackoverflow.com/a/25324691/5252017
+ # https://stackoverflow.com/a/61646381/5252017
+ multipart_msg["X-Auto-Response-Suppress"] = "All"
multipart_msg.attach(text_part)
multipart_msg.attach(html_part)
diff --git a/synapse/handlers/sso.py b/synapse/handlers/sso.py
index 6bc1cbd787..749d7e93b0 100644
--- a/synapse/handlers/sso.py
+++ b/synapse/handlers/sso.py
@@ -147,6 +147,9 @@ class UsernameMappingSession:
# A unique identifier for this SSO provider, e.g. "oidc" or "saml".
auth_provider_id: str
+ # An optional session ID from the IdP.
+ auth_provider_session_id: Optional[str]
+
# user ID on the IdP server
remote_user_id: str
@@ -188,6 +191,7 @@ class SsoHandler:
self._server_name = hs.hostname
self._registration_handler = hs.get_registration_handler()
self._auth_handler = hs.get_auth_handler()
+ self._device_handler = hs.get_device_handler()
self._error_template = hs.config.sso.sso_error_template
self._bad_user_template = hs.config.sso.sso_auth_bad_user_template
self._profile_handler = hs.get_profile_handler()
@@ -464,6 +468,7 @@ class SsoHandler:
client_redirect_url,
next_step_url,
extra_login_attributes,
+ auth_provider_session_id,
)
user_id = await self._register_mapped_user(
@@ -585,6 +590,7 @@ class SsoHandler:
client_redirect_url: str,
next_step_url: bytes,
extra_login_attributes: Optional[JsonDict],
+ auth_provider_session_id: Optional[str],
) -> NoReturn:
"""Creates a UsernameMappingSession and redirects the browser
@@ -607,6 +613,8 @@ class SsoHandler:
extra_login_attributes: An optional dictionary of extra
attributes to be provided to the client in the login response.
+ auth_provider_session_id: An optional session ID from the IdP.
+
Raises:
RedirectException
"""
@@ -615,6 +623,7 @@ class SsoHandler:
now = self._clock.time_msec()
session = UsernameMappingSession(
auth_provider_id=auth_provider_id,
+ auth_provider_session_id=auth_provider_session_id,
remote_user_id=remote_user_id,
display_name=attributes.display_name,
emails=attributes.emails,
@@ -866,7 +875,7 @@ class SsoHandler:
)
async def handle_terms_accepted(
- self, request: Request, session_id: str, terms_version: str
+ self, request: SynapseRequest, session_id: str, terms_version: str
) -> None:
"""Handle a request to the new-user 'consent' endpoint
@@ -968,6 +977,7 @@ class SsoHandler:
session.client_redirect_url,
session.extra_login_attributes,
new_user=True,
+ auth_provider_session_id=session.auth_provider_session_id,
)
def _expire_old_sessions(self) -> None:
@@ -1017,6 +1027,76 @@ class SsoHandler:
return True
+ async def revoke_sessions_for_provider_session_id(
+ self,
+ auth_provider_id: str,
+ auth_provider_session_id: str,
+ expected_user_id: Optional[str] = None,
+ ) -> None:
+ """Revoke any devices and in-flight logins tied to a provider session.
+
+ Args:
+ auth_provider_id: A unique identifier for this SSO provider, e.g.
+ "oidc" or "saml".
+ auth_provider_session_id: The session ID from the provider to logout
+ expected_user_id: The user we're expecting to logout. If set, it will ignore
+ sessions belonging to other users and log an error.
+ """
+ # Invalidate any running user-mapping sessions
+ to_delete = []
+ for session_id, session in self._username_mapping_sessions.items():
+ if (
+ session.auth_provider_id == auth_provider_id
+ and session.auth_provider_session_id == auth_provider_session_id
+ ):
+ to_delete.append(session_id)
+
+ for session_id in to_delete:
+ logger.info("Revoking mapping session %s", session_id)
+ del self._username_mapping_sessions[session_id]
+
+ # Invalidate any in-flight login tokens
+ await self._store.invalidate_login_tokens_by_session_id(
+ auth_provider_id=auth_provider_id,
+ auth_provider_session_id=auth_provider_session_id,
+ )
+
+ # Fetch any device(s) in the store associated with the session ID.
+ devices = await self._store.get_devices_by_auth_provider_session_id(
+ auth_provider_id=auth_provider_id,
+ auth_provider_session_id=auth_provider_session_id,
+ )
+
+ # We have no guarantee that all the devices of that session are for the same
+ # `user_id`. Hence, we have to iterate over the list of devices and log them out
+ # one by one.
+ for device in devices:
+ user_id = device["user_id"]
+ device_id = device["device_id"]
+
+ # If the user_id associated with that device/session is not the one we got
+ # out of the `sub` claim, skip that device and show log an error.
+ if expected_user_id is not None and user_id != expected_user_id:
+ logger.error(
+ "Received a logout notification from SSO provider "
+ f"{auth_provider_id!r} for the user {expected_user_id!r}, but with "
+ f"a session ID ({auth_provider_session_id!r}) which belongs to "
+ f"{user_id!r}. This may happen when the SSO provider user mapper "
+ "uses something else than the standard attribute as mapping ID. "
+ "For OIDC providers, set `backchannel_logout_ignore_sub` to `true` "
+ "in the provider config if that is the case."
+ )
+ continue
+
+ logger.info(
+ "Logging out %r (device %r) via SSO (%r) logout notification (session %r).",
+ user_id,
+ device_id,
+ auth_provider_id,
+ auth_provider_session_id,
+ )
+ await self._device_handler.delete_devices(user_id, [device_id])
+
def get_username_mapping_session_cookie_from_request(request: IRequest) -> str:
"""Extract the session ID from the cookie
diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py
index f7fd6d7933..c00f30518a 100644
--- a/synapse/handlers/sync.py
+++ b/synapse/handlers/sync.py
@@ -45,7 +45,8 @@ from synapse.logging.tracing import (
start_active_span,
)
from synapse.push.clientformat import format_push_rules_for_user
-from synapse.storage.databases.main.event_push_actions import NotifCounts
+from synapse.storage.databases.main.event_push_actions import RoomNotifCounts
+from synapse.storage.databases.main.roommember import extract_heroes_from_room_summary
from synapse.storage.roommember import MemberSummary
from synapse.storage.state import StateFilter
from synapse.types import (
@@ -133,6 +134,7 @@ class JoinedSyncResult:
ephemeral: List[JsonDict]
account_data: List[JsonDict]
unread_notifications: JsonDict
+ unread_thread_notifications: JsonDict
summary: Optional[JsonDict]
unread_count: int
@@ -809,18 +811,6 @@ class SyncHandler:
if canonical_alias and canonical_alias.content.get("alias"):
return summary
- me = sync_config.user.to_string()
-
- joined_user_ids = [
- r[0] for r in details.get(Membership.JOIN, empty_ms).members if r[0] != me
- ]
- invited_user_ids = [
- r[0] for r in details.get(Membership.INVITE, empty_ms).members if r[0] != me
- ]
- gone_user_ids = [
- r[0] for r in details.get(Membership.LEAVE, empty_ms).members if r[0] != me
- ] + [r[0] for r in details.get(Membership.BAN, empty_ms).members if r[0] != me]
-
# FIXME: only build up a member_ids list for our heroes
member_ids = {}
for membership in (
@@ -832,11 +822,8 @@ class SyncHandler:
for user_id, event_id in details.get(membership, empty_ms).members:
member_ids[user_id] = event_id
- # FIXME: order by stream ordering rather than as returned by SQL
- if joined_user_ids or invited_user_ids:
- summary["m.heroes"] = sorted(joined_user_ids + invited_user_ids)[0:5]
- else:
- summary["m.heroes"] = sorted(gone_user_ids)[0:5]
+ me = sync_config.user.to_string()
+ summary["m.heroes"] = extract_heroes_from_room_summary(details, me)
if not sync_config.filter_collection.lazy_load_members():
return summary
@@ -1196,7 +1183,9 @@ class SyncHandler:
room_id: The partial state room to find the remaining memberships for.
members_to_fetch: The memberships to find.
events_with_membership_auth: A mapping from user IDs to events whose auth
- events are known to contain their membership.
+ events would contain their prior membership, if one exists.
+ Note that join events will not cite a prior membership if a user has
+ never been in a room before.
found_state_ids: A dict from (type, state_key) -> state_event_id, containing
memberships that have been previously found. Entries in
`members_to_fetch` that have a membership in `found_state_ids` are
@@ -1206,6 +1195,10 @@ class SyncHandler:
A dict from ("m.room.member", state_key) -> state_event_id, containing the
memberships missing from `found_state_ids`.
+ When `events_with_membership_auth` contains a join event for a given user
+ which does not cite a prior membership, no membership is returned for that
+ user.
+
Raises:
KeyError: if `events_with_membership_auth` does not have an entry for a
missing membership. Memberships in `found_state_ids` do not need an
@@ -1223,8 +1216,18 @@ class SyncHandler:
if (EventTypes.Member, member) in found_state_ids:
continue
- missing_members.add(member)
event_with_membership_auth = events_with_membership_auth[member]
+ is_join = (
+ event_with_membership_auth.is_state()
+ and event_with_membership_auth.type == EventTypes.Member
+ and event_with_membership_auth.state_key == member
+ and event_with_membership_auth.content.get("membership")
+ == Membership.JOIN
+ )
+ if not is_join:
+ # The event must include the desired membership as an auth event, unless
+ # it's the first join event for a given user.
+ missing_members.add(member)
auth_event_ids.update(event_with_membership_auth.auth_event_ids())
auth_events = await self.store.get_events(auth_event_ids)
@@ -1248,7 +1251,7 @@ class SyncHandler:
auth_event.type == EventTypes.Member
and auth_event.state_key == member
):
- missing_members.remove(member)
+ missing_members.discard(member)
additional_state_ids[
(EventTypes.Member, member)
] = auth_event.event_id
@@ -1277,7 +1280,7 @@ class SyncHandler:
async def unread_notifs_for_room_id(
self, room_id: str, sync_config: SyncConfig
- ) -> NotifCounts:
+ ) -> RoomNotifCounts:
with Measure(self.clock, "unread_notifs_for_room_id"):
return await self.store.get_unread_event_push_actions_by_room_for_user(
@@ -1303,6 +1306,19 @@ class SyncHandler:
At the end, we transfer data from the `sync_result_builder` to a new `SyncResult`
instance to signify that the sync calculation is complete.
"""
+
+ user_id = sync_config.user.to_string()
+ app_service = self.store.get_app_service_by_user_id(user_id)
+ if app_service:
+ # We no longer support AS users using /sync directly.
+ # See https://github.com/matrix-org/matrix-doc/issues/1144
+ raise NotImplementedError()
+
+ # Note: we get the users room list *before* we get the current token, this
+ # avoids checking back in history if rooms are joined after the token is fetched.
+ token_before_rooms = self.event_sources.get_current_token()
+ mutable_joined_room_ids = set(await self.store.get_rooms_for_user(user_id))
+
# NB: The now_token gets changed by some of the generate_sync_* methods,
# this is due to some of the underlying streams not supporting the ability
# to query up to a given point.
@@ -1310,6 +1326,57 @@ class SyncHandler:
now_token = self.event_sources.get_current_token()
log_kv({"now_token": str(now_token)})
+ # Since we fetched the users room list before the token, there's a small window
+ # during which membership events may have been persisted, so we fetch these now
+ # and modify the joined room list for any changes between the get_rooms_for_user
+ # call and the get_current_token call.
+ membership_change_events = []
+ if since_token:
+ membership_change_events = await self.store.get_membership_changes_for_user(
+ user_id, since_token.room_key, now_token.room_key, self.rooms_to_exclude
+ )
+
+ mem_last_change_by_room_id: Dict[str, EventBase] = {}
+ for event in membership_change_events:
+ mem_last_change_by_room_id[event.room_id] = event
+
+ # For the latest membership event in each room found, add/remove the room ID
+ # from the joined room list accordingly. In this case we only care if the
+ # latest change is JOIN.
+
+ for room_id, event in mem_last_change_by_room_id.items():
+ assert event.internal_metadata.stream_ordering
+ if (
+ event.internal_metadata.stream_ordering
+ < token_before_rooms.room_key.stream
+ ):
+ continue
+
+ logger.info(
+ "User membership change between getting rooms and current token: %s %s %s",
+ user_id,
+ event.membership,
+ room_id,
+ )
+ # User joined a room - we have to then check the room state to ensure we
+ # respect any bans if there's a race between the join and ban events.
+ if event.membership == Membership.JOIN:
+ user_ids_in_room = await self.store.get_users_in_room(room_id)
+ if user_id in user_ids_in_room:
+ mutable_joined_room_ids.add(room_id)
+ # The user left the room, or left and was re-invited but not joined yet
+ else:
+ mutable_joined_room_ids.discard(room_id)
+
+ # Now we have our list of joined room IDs, exclude as configured and freeze
+ joined_room_ids = frozenset(
+ (
+ room_id
+ for room_id in mutable_joined_room_ids
+ if room_id not in self.rooms_to_exclude
+ )
+ )
+
logger.debug(
"Calculating sync response for %r between %s and %s",
sync_config.user,
@@ -1317,22 +1384,13 @@ class SyncHandler:
now_token,
)
- user_id = sync_config.user.to_string()
- app_service = self.store.get_app_service_by_user_id(user_id)
- if app_service:
- # We no longer support AS users using /sync directly.
- # See https://github.com/matrix-org/matrix-doc/issues/1144
- raise NotImplementedError()
- else:
- joined_room_ids = await self.get_rooms_for_user_at(
- user_id, now_token.room_key
- )
sync_result_builder = SyncResultBuilder(
sync_config,
full_state,
since_token=since_token,
now_token=now_token,
joined_room_ids=joined_room_ids,
+ membership_change_events=membership_change_events,
)
logger.debug("Fetching account data")
@@ -1479,16 +1537,14 @@ class SyncHandler:
since_token.device_list_key
)
if changed_users is not None:
- result = await self.store.get_rooms_for_users_with_stream_ordering(
- changed_users
- )
+ result = await self.store.get_rooms_for_users(changed_users)
for changed_user_id, entries in result.items():
# Check if the changed user shares any rooms with the user,
# or if the changed user is the syncing user (as we always
# want to include device list updates of their own devices).
if user_id == changed_user_id or any(
- e.room_id in joined_rooms for e in entries
+ rid in joined_rooms for rid in entries
):
users_that_have_changed.add(changed_user_id)
else:
@@ -1522,13 +1578,9 @@ class SyncHandler:
newly_left_users.update(left_users)
# Remove any users that we still share a room with.
- left_users_rooms = (
- await self.store.get_rooms_for_users_with_stream_ordering(
- newly_left_users
- )
- )
+ left_users_rooms = await self.store.get_rooms_for_users(newly_left_users)
for user_id, entries in left_users_rooms.items():
- if any(e.room_id in joined_rooms for e in entries):
+ if any(rid in joined_rooms for rid in entries):
newly_left_users.discard(user_id)
return DeviceListUpdates(
@@ -1819,19 +1871,12 @@ class SyncHandler:
Does not modify the `sync_result_builder`.
"""
- user_id = sync_result_builder.sync_config.user.to_string()
since_token = sync_result_builder.since_token
- now_token = sync_result_builder.now_token
+ membership_change_events = sync_result_builder.membership_change_events
assert since_token
- # Get a list of membership change events that have happened to the user
- # requesting the sync.
- membership_changes = await self.store.get_membership_changes_for_user(
- user_id, since_token.room_key, now_token.room_key
- )
-
- if membership_changes:
+ if membership_change_events:
return True
stream_id = since_token.room_key.stream
@@ -1870,16 +1915,10 @@ class SyncHandler:
since_token = sync_result_builder.since_token
now_token = sync_result_builder.now_token
sync_config = sync_result_builder.sync_config
+ membership_change_events = sync_result_builder.membership_change_events
assert since_token
- # TODO: we've already called this function and ran this query in
- # _have_rooms_changed. We could keep the results in memory to avoid a
- # second query, at the cost of more complicated source code.
- membership_change_events = await self.store.get_membership_changes_for_user(
- user_id, since_token.room_key, now_token.room_key, self.rooms_to_exclude
- )
-
mem_change_events_by_room_id: Dict[str, List[EventBase]] = {}
for event in membership_change_events:
mem_change_events_by_room_id.setdefault(event.room_id, []).append(event)
@@ -2348,6 +2387,7 @@ class SyncHandler:
ephemeral=ephemeral,
account_data=account_data_events,
unread_notifications=unread_notifications,
+ unread_thread_notifications={},
summary=summary,
unread_count=0,
)
@@ -2355,10 +2395,33 @@ class SyncHandler:
if room_sync or always_include:
notifs = await self.unread_notifs_for_room_id(room_id, sync_config)
- unread_notifications["notification_count"] = notifs.notify_count
- unread_notifications["highlight_count"] = notifs.highlight_count
+ # Notifications for the main timeline.
+ notify_count = notifs.main_timeline.notify_count
+ highlight_count = notifs.main_timeline.highlight_count
+ unread_count = notifs.main_timeline.unread_count
- room_sync.unread_count = notifs.unread_count
+ # Check the sync configuration.
+ if sync_config.filter_collection.unread_thread_notifications():
+ # And add info for each thread.
+ room_sync.unread_thread_notifications = {
+ thread_id: {
+ "notification_count": thread_notifs.notify_count,
+ "highlight_count": thread_notifs.highlight_count,
+ }
+ for thread_id, thread_notifs in notifs.threads.items()
+ if thread_id is not None
+ }
+
+ else:
+ # Combine the unread counts for all threads and main timeline.
+ for thread_notifs in notifs.threads.values():
+ notify_count += thread_notifs.notify_count
+ highlight_count += thread_notifs.highlight_count
+ unread_count += thread_notifs.unread_count
+
+ unread_notifications["notification_count"] = notify_count
+ unread_notifications["highlight_count"] = highlight_count
+ room_sync.unread_count = unread_count
sync_result_builder.joined.append(room_sync)
@@ -2380,60 +2443,6 @@ class SyncHandler:
else:
raise Exception("Unrecognized rtype: %r", room_builder.rtype)
- async def get_rooms_for_user_at(
- self,
- user_id: str,
- room_key: RoomStreamToken,
- ) -> FrozenSet[str]:
- """Get set of joined rooms for a user at the given stream ordering.
-
- The stream ordering *must* be recent, otherwise this may throw an
- exception if older than a month. (This function is called with the
- current token, which should be perfectly fine).
-
- Args:
- user_id
- stream_ordering
-
- ReturnValue:
- Set of room_ids the user is in at given stream_ordering.
- """
- joined_rooms = await self.store.get_rooms_for_user_with_stream_ordering(user_id)
-
- joined_room_ids = set()
-
- # We need to check that the stream ordering of the join for each room
- # is before the stream_ordering asked for. This might not be the case
- # if the user joins a room between us getting the current token and
- # calling `get_rooms_for_user_with_stream_ordering`.
- # If the membership's stream ordering is after the given stream
- # ordering, we need to go and work out if the user was in the room
- # before.
- # We also need to check whether the room should be excluded from sync
- # responses as per the homeserver config.
- for joined_room in joined_rooms:
- if joined_room.room_id in self.rooms_to_exclude:
- continue
-
- if not joined_room.event_pos.persisted_after(room_key):
- joined_room_ids.add(joined_room.room_id)
- continue
-
- logger.info("User joined room after current token: %s", joined_room.room_id)
-
- extrems = (
- await self.store.get_forward_extremities_for_room_at_stream_ordering(
- joined_room.room_id, joined_room.event_pos.stream
- )
- )
- user_ids_in_room = await self.state.get_current_user_ids_in_room(
- joined_room.room_id, extrems
- )
- if user_id in user_ids_in_room:
- joined_room_ids.add(joined_room.room_id)
-
- return frozenset(joined_room_ids)
-
def _action_has_highlight(actions: List[JsonDict]) -> bool:
for action in actions:
@@ -2530,6 +2539,7 @@ class SyncResultBuilder:
since_token: Optional[StreamToken]
now_token: StreamToken
joined_room_ids: FrozenSet[str]
+ membership_change_events: List[EventBase]
presence: List[UserPresenceState] = attr.Factory(list)
account_data: List[JsonDict] = attr.Factory(list)
diff --git a/synapse/handlers/typing.py b/synapse/handlers/typing.py
index f953691669..a0ea719430 100644
--- a/synapse/handlers/typing.py
+++ b/synapse/handlers/typing.py
@@ -513,7 +513,7 @@ class TypingNotificationEventSource(EventSource[int, JsonDict]):
self,
user: UserID,
from_key: int,
- limit: Optional[int],
+ limit: int,
room_ids: Iterable[str],
is_guest: bool,
explicit_room_id: Optional[str] = None,
diff --git a/synapse/handlers/ui_auth/checkers.py b/synapse/handlers/ui_auth/checkers.py
index a744d68c64..332edcca24 100644
--- a/synapse/handlers/ui_auth/checkers.py
+++ b/synapse/handlers/ui_auth/checkers.py
@@ -119,6 +119,9 @@ class RecaptchaAuthChecker(UserInteractiveAuthChecker):
except PartialDownloadError as pde:
# Twisted is silly
data = pde.response
+ # For mypy's benefit. A general Error.response is Optional[bytes], but
+ # a PartialDownloadError.response should be bytes AFAICS.
+ assert data is not None
resp_body = json_decoder.decode(data.decode("utf-8"))
if "success" in resp_body:
diff --git a/synapse/handlers/user_directory.py b/synapse/handlers/user_directory.py
index 8c3c52e1ca..3610b6bf78 100644
--- a/synapse/handlers/user_directory.py
+++ b/synapse/handlers/user_directory.py
@@ -13,7 +13,7 @@
# limitations under the License.
import logging
-from typing import TYPE_CHECKING, Any, Dict, List, Optional
+from typing import TYPE_CHECKING, Any, Dict, List, Optional, Set, Tuple
import synapse.metrics
from synapse.api.constants import EventTypes, HistoryVisibility, JoinRules, Membership
@@ -379,7 +379,7 @@ class UserDirectoryHandler(StateDeltasHandler):
user_id, event.content.get("displayname"), event.content.get("avatar_url")
)
- async def _track_user_joined_room(self, room_id: str, user_id: str) -> None:
+ async def _track_user_joined_room(self, room_id: str, joining_user_id: str) -> None:
"""Someone's just joined a room. Update `users_in_public_rooms` or
`users_who_share_private_rooms` as appropriate.
@@ -390,32 +390,44 @@ class UserDirectoryHandler(StateDeltasHandler):
room_id
)
if is_public:
- await self.store.add_users_in_public_rooms(room_id, (user_id,))
+ await self.store.add_users_in_public_rooms(room_id, (joining_user_id,))
else:
users_in_room = await self.store.get_users_in_room(room_id)
other_users_in_room = [
other
for other in users_in_room
- if other != user_id
+ if other != joining_user_id
and (
+ # We can't apply any special rules to remote users so
+ # they're always included
not self.is_mine_id(other)
+ # Check the special rules whether the local user should be
+ # included in the user directory
or await self.store.should_include_local_user_in_dir(other)
)
]
- to_insert = set()
+ updates_to_users_who_share_rooms: Set[Tuple[str, str]] = set()
- # First, if they're our user then we need to update for every user
- if self.is_mine_id(user_id):
+ # First, if the joining user is our local user then we need an
+ # update for every other user in the room.
+ if self.is_mine_id(joining_user_id):
for other_user_id in other_users_in_room:
- to_insert.add((user_id, other_user_id))
+ updates_to_users_who_share_rooms.add(
+ (joining_user_id, other_user_id)
+ )
- # Next we need to update for every local user in the room
+ # Next, we need an update for every other local user in the room
+ # that they now share a room with the joining user.
for other_user_id in other_users_in_room:
if self.is_mine_id(other_user_id):
- to_insert.add((other_user_id, user_id))
+ updates_to_users_who_share_rooms.add(
+ (other_user_id, joining_user_id)
+ )
- if to_insert:
- await self.store.add_users_who_share_private_room(room_id, to_insert)
+ if updates_to_users_who_share_rooms:
+ await self.store.add_users_who_share_private_room(
+ room_id, updates_to_users_who_share_rooms
+ )
async def _handle_remove_user(self, room_id: str, user_id: str) -> None:
"""Called when when someone leaves a room. The user may be local or remote.
diff --git a/synapse/http/additional_resource.py b/synapse/http/additional_resource.py
index 6a9f6635d2..8729630581 100644
--- a/synapse/http/additional_resource.py
+++ b/synapse/http/additional_resource.py
@@ -45,8 +45,7 @@ class AdditionalResource(DirectServeJsonResource):
Args:
hs: homeserver
- handler ((twisted.web.server.Request) -> twisted.internet.defer.Deferred):
- function to be called to handle the request.
+ handler: function to be called to handle the request.
"""
super().__init__()
self._handler = handler
diff --git a/synapse/http/client.py b/synapse/http/client.py
index 89bd403312..a7f93a2989 100644
--- a/synapse/http/client.py
+++ b/synapse/http/client.py
@@ -25,7 +25,6 @@ from typing import (
List,
Mapping,
Optional,
- Sequence,
Tuple,
Union,
)
@@ -96,14 +95,29 @@ incoming_responses_counter = Counter(
"synapse_http_client_responses", "", ["method", "code"]
)
-# the type of the headers list, to be passed to the t.w.h.Headers.
-# Actually we can mix str and bytes keys, but Mapping treats 'key' as invariant so
-# we simplify.
+# the type of the headers map, to be passed to the t.w.h.Headers.
+#
+# The actual type accepted by Twisted is
+# Mapping[Union[str, bytes], Sequence[Union[str, bytes]] ,
+# allowing us to mix and match str and bytes freely. However: any str is also a
+# Sequence[str]; passing a header string value which is a
+# standalone str is interpreted as a sequence of 1-codepoint strings. This is a disastrous footgun.
+# We use a narrower value type (RawHeaderValue) to avoid this footgun.
+#
+# We also simplify the keys to be either all str or all bytes. This helps because
+# Dict[K, V] is invariant in K (and indeed V).
RawHeaders = Union[Mapping[str, "RawHeaderValue"], Mapping[bytes, "RawHeaderValue"]]
# the value actually has to be a List, but List is invariant so we can't specify that
# the entries can either be Lists or bytes.
-RawHeaderValue = Sequence[Union[str, bytes]]
+RawHeaderValue = Union[
+ List[str],
+ List[bytes],
+ List[Union[str, bytes]],
+ Tuple[str, ...],
+ Tuple[bytes, ...],
+ Tuple[Union[str, bytes], ...],
+]
def check_against_blacklist(
diff --git a/synapse/http/federation/matrix_federation_agent.py b/synapse/http/federation/matrix_federation_agent.py
index 2f0177f1e2..0359231e7d 100644
--- a/synapse/http/federation/matrix_federation_agent.py
+++ b/synapse/http/federation/matrix_federation_agent.py
@@ -155,11 +155,10 @@ class MatrixFederationAgent:
a file for a file upload). Or None if the request is to have
no body.
Returns:
- Deferred[twisted.web.iweb.IResponse]:
- fires when the header of the response has been received (regardless of the
- response status code). Fails if there is any problem which prevents that
- response from being received (including problems that prevent the request
- from being sent).
+ A deferred which fires when the header of the response has been received
+ (regardless of the response status code). Fails if there is any problem
+ which prevents that response from being received (including problems that
+ prevent the request from being sent).
"""
# We use urlparse as that will set `port` to None if there is no
# explicit port.
diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py
index 00704a6a7c..c35a5a12d5 100644
--- a/synapse/http/matrixfederationclient.py
+++ b/synapse/http/matrixfederationclient.py
@@ -957,8 +957,7 @@ class MatrixFederationHttpClient:
args: query params
Returns:
- dict|list: Succeeds when we get a 2xx HTTP response. The
- result will be the decoded JSON body.
+ Succeeds when we get a 2xx HTTP response. The result will be the decoded JSON body.
Raises:
HttpResponseException: If we get an HTTP response code >= 300
diff --git a/synapse/http/proxyagent.py b/synapse/http/proxyagent.py
index 1f8227896f..18899bc6d1 100644
--- a/synapse/http/proxyagent.py
+++ b/synapse/http/proxyagent.py
@@ -34,7 +34,7 @@ from twisted.web.client import (
)
from twisted.web.error import SchemeNotSupported
from twisted.web.http_headers import Headers
-from twisted.web.iweb import IAgent, IBodyProducer, IPolicyForHTTPS
+from twisted.web.iweb import IAgent, IBodyProducer, IPolicyForHTTPS, IResponse
from synapse.http import redact_uri
from synapse.http.connectproxyclient import HTTPConnectProxyEndpoint, ProxyCredentials
@@ -134,7 +134,7 @@ class ProxyAgent(_AgentBase):
uri: bytes,
headers: Optional[Headers] = None,
bodyProducer: Optional[IBodyProducer] = None,
- ) -> defer.Deferred:
+ ) -> "defer.Deferred[IResponse]":
"""
Issue a request to the server indicated by the given uri.
@@ -157,17 +157,17 @@ class ProxyAgent(_AgentBase):
a file upload). Or, None if the request is to have no body.
Returns:
- Deferred[IResponse]: completes when the header of the response has
- been received (regardless of the response status code).
+ A deferred which completes when the header of the response has
+ been received (regardless of the response status code).
- Can fail with:
- SchemeNotSupported: if the uri is not http or https
+ Can fail with:
+ SchemeNotSupported: if the uri is not http or https
- twisted.internet.error.TimeoutError if the server we are connecting
- to (proxy or destination) does not accept a connection before
- connectTimeout.
+ twisted.internet.error.TimeoutError if the server we are connecting
+ to (proxy or destination) does not accept a connection before
+ connectTimeout.
- ... other things too.
+ ... other things too.
"""
uri = uri.strip()
if not _VALID_URI.match(uri):
diff --git a/synapse/http/server.py b/synapse/http/server.py
index be551483bb..69e7147a2d 100644
--- a/synapse/http/server.py
+++ b/synapse/http/server.py
@@ -19,6 +19,7 @@ import logging
import types
import urllib
from http import HTTPStatus
+from http.client import FOUND
from inspect import isawaitable
from typing import (
TYPE_CHECKING,
@@ -266,7 +267,7 @@ class HttpServer(Protocol):
request. The first argument will be the request object and
subsequent arguments will be any matched groups from the regex.
This should return either tuple of (code, response), or None.
- servlet_classname (str): The name of the handler to be used in prometheus
+ servlet_classname: The name of the handler to be used in prometheus
and tracing logs.
"""
@@ -339,7 +340,7 @@ class _AsyncResource(resource.Resource, metaclass=abc.ABCMeta):
return callback_return
- _unrecognised_request_handler(request)
+ return _unrecognised_request_handler(request)
@abc.abstractmethod
def _send_response(
@@ -598,7 +599,7 @@ class RootRedirect(resource.Resource):
class OptionsResource(resource.Resource):
"""Responds to OPTION requests for itself and all children."""
- def render_OPTIONS(self, request: Request) -> bytes:
+ def render_OPTIONS(self, request: SynapseRequest) -> bytes:
request.setResponseCode(204)
request.setHeader(b"Content-Length", b"0")
@@ -763,7 +764,7 @@ def respond_with_json(
def respond_with_json_bytes(
- request: Request,
+ request: SynapseRequest,
code: int,
json_bytes: bytes,
send_cors: bool = False,
@@ -859,7 +860,7 @@ def _write_bytes_to_request(request: Request, bytes_to_write: bytes) -> None:
_ByteProducer(request, bytes_generator)
-def set_cors_headers(request: Request) -> None:
+def set_cors_headers(request: SynapseRequest) -> None:
"""Set the CORS headers so that javascript running in a web browsers can
use this API
@@ -870,10 +871,20 @@ def set_cors_headers(request: Request) -> None:
request.setHeader(
b"Access-Control-Allow-Methods", b"GET, HEAD, POST, PUT, DELETE, OPTIONS"
)
- request.setHeader(
- b"Access-Control-Allow-Headers",
- b"X-Requested-With, Content-Type, Authorization, Date",
- )
+ if request.experimental_cors_msc3886:
+ request.setHeader(
+ b"Access-Control-Allow-Headers",
+ b"X-Requested-With, Content-Type, Authorization, Date, If-Match, If-None-Match",
+ )
+ request.setHeader(
+ b"Access-Control-Expose-Headers",
+ b"ETag, Location, X-Max-Bytes",
+ )
+ else:
+ request.setHeader(
+ b"Access-Control-Allow-Headers",
+ b"X-Requested-With, Content-Type, Authorization, Date",
+ )
def set_corp_headers(request: Request) -> None:
@@ -942,10 +953,25 @@ def set_clickjacking_protection_headers(request: Request) -> None:
request.setHeader(b"Content-Security-Policy", b"frame-ancestors 'none';")
-def respond_with_redirect(request: Request, url: bytes) -> None:
- """Write a 302 response to the request, if it is still alive."""
+def respond_with_redirect(
+ request: SynapseRequest, url: bytes, statusCode: int = FOUND, cors: bool = False
+) -> None:
+ """
+ Write a 302 (or other specified status code) response to the request, if it is still alive.
+
+ Args:
+ request: The http request to respond to.
+ url: The URL to redirect to.
+ statusCode: The HTTP status code to use for the redirect (defaults to 302).
+ cors: Whether to set CORS headers on the response.
+ """
logger.debug("Redirect to %s", url.decode("utf-8"))
- request.redirect(url)
+
+ if cors:
+ set_cors_headers(request)
+
+ request.setResponseCode(statusCode)
+ request.setHeader(b"location", url)
finish_request(request)
diff --git a/synapse/http/servlet.py b/synapse/http/servlet.py
index 80acbdcf3c..dead02cd5c 100644
--- a/synapse/http/servlet.py
+++ b/synapse/http/servlet.py
@@ -35,6 +35,7 @@ from typing_extensions import Literal
from twisted.web.server import Request
from synapse.api.errors import Codes, SynapseError
+from synapse.http import redact_uri
from synapse.http.server import HttpServer
from synapse.types import JsonDict, RoomAlias, RoomID
from synapse.util import json_decoder
@@ -664,7 +665,13 @@ def parse_json_value_from_request(
try:
content = json_decoder.decode(content_bytes.decode("utf-8"))
except Exception as e:
- logger.warning("Unable to parse JSON: %s (%s)", e, content_bytes)
+ logger.warning(
+ "Unable to parse JSON from %s %s response: %s (%s)",
+ request.method.decode("ascii", errors="replace"),
+ redact_uri(request.uri.decode("ascii", errors="replace")),
+ e,
+ content_bytes,
+ )
raise SynapseError(
HTTPStatus.BAD_REQUEST, "Content not JSON.", errcode=Codes.NOT_JSON
)
diff --git a/synapse/http/site.py b/synapse/http/site.py
index d9cd0aab83..2de618e46a 100644
--- a/synapse/http/site.py
+++ b/synapse/http/site.py
@@ -82,6 +82,7 @@ class SynapseRequest(Request):
self.reactor = site.reactor
self._channel = channel # this is used by the tests
self.start_time = 0.0
+ self.experimental_cors_msc3886 = site.experimental_cors_msc3886
# The requester, if authenticated. For federation requests this is the
# server name, for client requests this is the Requester object.
@@ -407,7 +408,7 @@ class SynapseRequest(Request):
be sure to call finished_processing.
Args:
- servlet_name (str): the name of the servlet which will be
+ servlet_name: the name of the servlet which will be
processing this request. This is used in the metrics.
It is possible to update this afterwards by updating
@@ -630,6 +631,8 @@ class SynapseSite(Site):
request_id_header = config.http_options.request_id_header
+ self.experimental_cors_msc3886 = config.http_options.experimental_cors_msc3886
+
def request_factory(channel: HTTPChannel, queued: bool) -> Request:
return request_class(
channel,
diff --git a/synapse/logging/context.py b/synapse/logging/context.py
index a417b13ffd..56243aa5e7 100644
--- a/synapse/logging/context.py
+++ b/synapse/logging/context.py
@@ -116,8 +116,7 @@ class ContextResourceUsage:
"""Create a new ContextResourceUsage
Args:
- copy_from (ContextResourceUsage|None): if not None, an object to
- copy stats from
+ copy_from: if not None, an object to copy stats from
"""
if copy_from is None:
self.reset()
@@ -161,7 +160,7 @@ class ContextResourceUsage:
"""Add another ContextResourceUsage's stats to this one's.
Args:
- other (ContextResourceUsage): the other resource usage object
+ other: the other resource usage object
"""
self.ru_utime += other.ru_utime
self.ru_stime += other.ru_stime
@@ -335,7 +334,7 @@ class LoggingContext:
called directly.
Returns:
- LoggingContext: the current logging context
+ The current logging context
"""
warnings.warn(
"synapse.logging.context.LoggingContext.current_context() is deprecated "
@@ -355,7 +354,8 @@ class LoggingContext:
called directly.
Args:
- context(LoggingContext): The context to activate.
+ context: The context to activate.
+
Returns:
The context that was previously active
"""
@@ -467,8 +467,7 @@ class LoggingContext:
"""Get resources used by this logcontext so far.
Returns:
- ContextResourceUsage: a *copy* of the object tracking resource
- usage so far
+ A *copy* of the object tracking resource usage so far
"""
# we always return a copy, for consistency
res = self._resource_usage.copy()
@@ -579,7 +578,7 @@ class LoggingContextFilter(logging.Filter):
True to include the record in the log output.
"""
context = current_context()
- record.request = self._default_request # type: ignore
+ record.request = self._default_request
# context should never be None, but if it somehow ends up being, then
# we end up in a death spiral of infinite loops, so let's check, for
@@ -587,21 +586,21 @@ class LoggingContextFilter(logging.Filter):
if context is not None:
# Logging is interested in the request ID. Note that for backwards
# compatibility this is stored as the "request" on the record.
- record.request = str(context) # type: ignore
+ record.request = str(context)
# Add some data from the HTTP request.
request = context.request
if request is None:
return True
- record.ip_address = request.ip_address # type: ignore
- record.site_tag = request.site_tag # type: ignore
- record.requester = request.requester # type: ignore
- record.authenticated_entity = request.authenticated_entity # type: ignore
- record.method = request.method # type: ignore
- record.url = request.url # type: ignore
- record.protocol = request.protocol # type: ignore
- record.user_agent = request.user_agent # type: ignore
+ record.ip_address = request.ip_address
+ record.site_tag = request.site_tag
+ record.requester = request.requester
+ record.authenticated_entity = request.authenticated_entity
+ record.method = request.method
+ record.url = request.url
+ record.protocol = request.protocol
+ record.user_agent = request.user_agent
return True
@@ -656,7 +655,8 @@ def current_context() -> LoggingContextOrSentinel:
def set_current_context(context: LoggingContextOrSentinel) -> LoggingContextOrSentinel:
"""Set the current logging context in thread local storage
Args:
- context(LoggingContext): The context to activate.
+ context: The context to activate.
+
Returns:
The context that was previously active
"""
@@ -693,7 +693,7 @@ def nested_logging_context(suffix: str) -> LoggingContext:
suffix: suffix to add to the parent context's 'name'.
Returns:
- LoggingContext: new logging context.
+ A new logging context.
"""
curr_context = current_context()
if not curr_context:
@@ -891,20 +891,19 @@ def defer_to_thread(
on it.
Args:
- reactor (twisted.internet.base.ReactorBase): The reactor in whose main thread
- the Deferred will be invoked, and whose threadpool we should use for the
- function.
+ reactor: The reactor in whose main thread the Deferred will be invoked,
+ and whose threadpool we should use for the function.
Normally this will be hs.get_reactor().
- f (callable): The function to call.
+ f: The function to call.
args: positional arguments to pass to f.
kwargs: keyword arguments to pass to f.
Returns:
- Deferred: A Deferred which fires a callback with the result of `f`, or an
+ A Deferred which fires a callback with the result of `f`, or an
errback if `f` throws an exception.
"""
return defer_to_threadpool(reactor, reactor.getThreadPool(), f, *args, **kwargs)
@@ -932,20 +931,20 @@ def defer_to_threadpool(
on it.
Args:
- reactor (twisted.internet.base.ReactorBase): The reactor in whose main thread
- the Deferred will be invoked. Normally this will be hs.get_reactor().
+ reactor: The reactor in whose main thread the Deferred will be invoked.
+ Normally this will be hs.get_reactor().
- threadpool (twisted.python.threadpool.ThreadPool): The threadpool to use for
- running `f`. Normally this will be hs.get_reactor().getThreadPool().
+ threadpool: The threadpool to use for running `f`. Normally this will be
+ hs.get_reactor().getThreadPool().
- f (callable): The function to call.
+ f: The function to call.
args: positional arguments to pass to f.
kwargs: keyword arguments to pass to f.
Returns:
- Deferred: A Deferred which fires a callback with the result of `f`, or an
+ A Deferred which fires a callback with the result of `f`, or an
errback if `f` throws an exception.
"""
curr_context = current_context()
diff --git a/synapse/logging/tracing.py b/synapse/logging/tracing.py
index 7e767038df..c92769e1a0 100644
--- a/synapse/logging/tracing.py
+++ b/synapse/logging/tracing.py
@@ -209,7 +209,10 @@ class _DummyLookup(object):
def __init__(self, value: T) -> None:
self.value = value
- def __getattribute__(self, name: str) -> T:
+ # type-ignore: Because mypy says "A function returning TypeVar should receive at
+ # least one argument containing the same Typevar" but this is just a dummy
+ # stand-in that doesn't need any input.
+ def __getattribute__(self, name: str) -> T: # type: ignore[type-var]
return object.__getattribute__(self, "value")
@@ -949,9 +952,9 @@ def tag_args(func: Callable[P, R]) -> Callable[P, R]:
# FIXME: We could update this to handle any type of function by ignoring the
# first argument only if it's named `self` or `cls`. This isn't fool-proof
# but handles the idiomatic cases.
- for i, arg in enumerate(args[1:], start=1): # type: ignore[index]
+ for i, arg in enumerate(args[1:], start=1):
set_attribute(SynapseTags.FUNC_ARG_PREFIX + argspec.args[i], str(arg))
- set_attribute(SynapseTags.FUNC_ARGS, str(args[len(argspec.args) :])) # type: ignore[index]
+ set_attribute(SynapseTags.FUNC_ARGS, str(args[len(argspec.args) :]))
set_attribute(SynapseTags.FUNC_KWARGS, str(kwargs))
yield
diff --git a/synapse/metrics/_legacy_exposition.py b/synapse/metrics/_legacy_exposition.py
index 563d8cc2c6..1459f9d224 100644
--- a/synapse/metrics/_legacy_exposition.py
+++ b/synapse/metrics/_legacy_exposition.py
@@ -20,7 +20,7 @@ Due to the renaming of metrics in prometheus_client 0.4.0, this customised
vendoring of the code will emit both the old versions that Synapse dashboards
expect, and the newer "best practice" version of the up-to-date official client.
"""
-
+import logging
import math
import threading
from http.server import BaseHTTPRequestHandler, HTTPServer
@@ -34,6 +34,7 @@ from prometheus_client.core import Sample
from twisted.web.resource import Resource
from twisted.web.server import Request
+logger = logging.getLogger(__name__)
CONTENT_TYPE_LATEST = "text/plain; version=0.0.4; charset=utf-8"
@@ -219,11 +220,16 @@ class MetricsHandler(BaseHTTPRequestHandler):
except Exception:
self.send_error(500, "error generating metric output")
raise
- self.send_response(200)
- self.send_header("Content-Type", CONTENT_TYPE_LATEST)
- self.send_header("Content-Length", str(len(output)))
- self.end_headers()
- self.wfile.write(output)
+ try:
+ self.send_response(200)
+ self.send_header("Content-Type", CONTENT_TYPE_LATEST)
+ self.send_header("Content-Length", str(len(output)))
+ self.end_headers()
+ self.wfile.write(output)
+ except BrokenPipeError as e:
+ logger.warning(
+ "BrokenPipeError when serving metrics (%s). Did Prometheus restart?", e
+ )
def log_message(self, format: str, *args: Any) -> None:
"""Log nothing."""
diff --git a/synapse/metrics/background_process_metrics.py b/synapse/metrics/background_process_metrics.py
index ad639fe223..1de06c3850 100644
--- a/synapse/metrics/background_process_metrics.py
+++ b/synapse/metrics/background_process_metrics.py
@@ -174,8 +174,10 @@ class _BackgroundProcess:
diff = new_stats - self._reported_stats
self._reported_stats = new_stats
- _background_process_ru_utime.labels(self.desc).inc(diff.ru_utime)
- _background_process_ru_stime.labels(self.desc).inc(diff.ru_stime)
+ # For unknown reasons, the difference in times can be negative. See comment in
+ # synapse.http.request_metrics.RequestMetrics.update_metrics.
+ _background_process_ru_utime.labels(self.desc).inc(max(diff.ru_utime, 0))
+ _background_process_ru_stime.labels(self.desc).inc(max(diff.ru_stime, 0))
_background_process_db_txn_count.labels(self.desc).inc(diff.db_txn_count)
_background_process_db_txn_duration.labels(self.desc).inc(
diff.db_txn_duration_sec
diff --git a/synapse/module_api/__init__.py b/synapse/module_api/__init__.py
index 59755bff6d..1adc1fd64f 100644
--- a/synapse/module_api/__init__.py
+++ b/synapse/module_api/__init__.py
@@ -748,16 +748,16 @@ class ModuleApi:
)
)
- def generate_short_term_login_token(
+ async def create_login_token(
self,
user_id: str,
duration_in_ms: int = (2 * 60 * 1000),
- auth_provider_id: str = "",
+ auth_provider_id: Optional[str] = None,
auth_provider_session_id: Optional[str] = None,
) -> str:
- """Generate a login token suitable for m.login.token authentication
+ """Create a login token suitable for m.login.token authentication
- Added in Synapse v1.9.0.
+ Added in Synapse v1.69.0.
Args:
user_id: gives the ID of the user that the token is for
@@ -765,14 +765,17 @@ class ModuleApi:
duration_in_ms: the time that the token will be valid for
auth_provider_id: the ID of the SSO IdP that the user used to authenticate
- to get this token, if any. This is encoded in the token so that
- /login can report stats on number of successful logins by IdP.
+ to get this token, if any. This is encoded in the token so that
+ /login can report stats on number of successful logins by IdP.
+
+ auth_provider_session_id: The session ID got during login from the SSO IdP,
+ if any.
"""
- return self._hs.get_macaroon_generator().generate_short_term_login_token(
+ return await self._hs.get_auth_handler().create_login_token_for_user_id(
user_id,
+ duration_in_ms,
auth_provider_id,
auth_provider_session_id,
- duration_in_ms,
)
@defer.inlineCallbacks
@@ -784,7 +787,7 @@ class ModuleApi:
Added in Synapse v0.25.0.
Args:
- access_token(str): access token
+ access_token: access token
Returns:
twisted.internet.defer.Deferred - resolves once the access token
@@ -829,7 +832,7 @@ class ModuleApi:
**kwargs: named args to be passed to func
Returns:
- Deferred[object]: result of func
+ Result of func
"""
# type-ignore: See https://github.com/python/mypy/issues/8862
return defer.ensureDeferred(
@@ -842,6 +845,8 @@ class ModuleApi:
however invalidation that needs to go to other workers needs to call `invalidate_cache`
on the module API instead.
+ Added in Synapse v1.69.0.
+
Args:
cached_function: The cached function that will be registered to receive invalidation
locally and from other workers.
@@ -856,6 +861,8 @@ class ModuleApi:
"""Invalidate a cache entry of a cached function across workers. The cached function
needs to be registered on all workers first with `register_cached_function`.
+ Added in Synapse v1.69.0.
+
Args:
cached_function: The cached function that needs an invalidation
keys: keys of the entry to invalidate, usually matching the arguments of the
@@ -917,8 +924,7 @@ class ModuleApi:
to represent 'any') of the room state to acquire.
Returns:
- twisted.internet.defer.Deferred[list(synapse.events.FrozenEvent)]:
- The filtered state events in the room.
+ The filtered state events in the room.
"""
state_ids = yield defer.ensureDeferred(
self._storage_controllers.state.get_current_state_ids(
diff --git a/synapse/notifier.py b/synapse/notifier.py
index 8fd8cb8100..fe7d154659 100644
--- a/synapse/notifier.py
+++ b/synapse/notifier.py
@@ -294,35 +294,31 @@ class Notifier:
"""
self._new_join_in_room_callbacks.append(cb)
- async def on_new_room_event(
+ async def on_new_room_events(
self,
- event: EventBase,
- event_pos: PersistedEventPosition,
+ events_and_pos: List[Tuple[EventBase, PersistedEventPosition]],
max_room_stream_token: RoomStreamToken,
extra_users: Optional[Collection[UserID]] = None,
) -> None:
- """Unwraps event and calls `on_new_room_event_args`."""
- await self.on_new_room_event_args(
- event_pos=event_pos,
- room_id=event.room_id,
- event_id=event.event_id,
- event_type=event.type,
- state_key=event.get("state_key"),
- membership=event.content.get("membership"),
- max_room_stream_token=max_room_stream_token,
- extra_users=extra_users or [],
- )
+ """Creates a _PendingRoomEventEntry for each of the listed events and calls
+ notify_new_room_events with the results."""
+ event_entries = []
+ for event, pos in events_and_pos:
+ entry = self.create_pending_room_event_entry(
+ pos,
+ extra_users,
+ event.room_id,
+ event.type,
+ event.get("state_key"),
+ event.content.get("membership"),
+ )
+ event_entries.append((entry, event.event_id))
+ await self.notify_new_room_events(event_entries, max_room_stream_token)
- async def on_new_room_event_args(
+ async def notify_new_room_events(
self,
- room_id: str,
- event_id: str,
- event_type: str,
- state_key: Optional[str],
- membership: Optional[str],
- event_pos: PersistedEventPosition,
+ event_entries: List[Tuple[_PendingRoomEventEntry, str]],
max_room_stream_token: RoomStreamToken,
- extra_users: Optional[Collection[UserID]] = None,
) -> None:
"""Used by handlers to inform the notifier something has happened
in the room, room event wise.
@@ -338,22 +334,33 @@ class Notifier:
until all previous events have been persisted before notifying
the client streams.
"""
- self.pending_new_room_events.append(
- _PendingRoomEventEntry(
- event_pos=event_pos,
- extra_users=extra_users or [],
- room_id=room_id,
- type=event_type,
- state_key=state_key,
- membership=membership,
- )
- )
+ for event_entry, event_id in event_entries:
+ self.pending_new_room_events.append(event_entry)
+ await self._third_party_rules.on_new_event(event_id)
+
self._notify_pending_new_room_events(max_room_stream_token)
- await self._third_party_rules.on_new_event(event_id)
-
self.notify_replication()
+ def create_pending_room_event_entry(
+ self,
+ event_pos: PersistedEventPosition,
+ extra_users: Optional[Collection[UserID]],
+ room_id: str,
+ event_type: str,
+ state_key: Optional[str],
+ membership: Optional[str],
+ ) -> _PendingRoomEventEntry:
+ """Creates and returns a _PendingRoomEventEntry"""
+ return _PendingRoomEventEntry(
+ event_pos=event_pos,
+ extra_users=extra_users or [],
+ room_id=room_id,
+ type=event_type,
+ state_key=state_key,
+ membership=membership,
+ )
+
def _notify_pending_new_room_events(
self, max_room_stream_token: RoomStreamToken
) -> None:
diff --git a/synapse/push/bulk_push_rule_evaluator.py b/synapse/push/bulk_push_rule_evaluator.py
index 404379ef67..75b7e126ca 100644
--- a/synapse/push/bulk_push_rule_evaluator.py
+++ b/synapse/push/bulk_push_rule_evaluator.py
@@ -13,43 +13,38 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-import itertools
import logging
from typing import (
TYPE_CHECKING,
+ Any,
Collection,
Dict,
- Iterable,
List,
Mapping,
Optional,
- Set,
Tuple,
Union,
)
from prometheus_client import Counter
-from synapse.api.constants import EventTypes, Membership, RelationTypes
+from synapse.api.constants import MAIN_TIMELINE, EventTypes, Membership, RelationTypes
from synapse.event_auth import auth_types_for_event, get_user_power_level
from synapse.events import EventBase, relation_from_event
from synapse.events.snapshot import EventContext
from synapse.state import POWER_KEY
from synapse.storage.databases.main.roommember import EventIdMembership
from synapse.storage.state import StateFilter
-from synapse.synapse_rust.push import FilteredPushRules, PushRule
+from synapse.synapse_rust.push import FilteredPushRules, PushRuleEvaluator
from synapse.util.caches import register_cache
from synapse.util.metrics import measure_func
from synapse.visibility import filter_event_for_clients_with_state
-from .push_rule_evaluator import PushRuleEvaluatorForEvent
-
if TYPE_CHECKING:
from synapse.server import HomeServer
logger = logging.getLogger(__name__)
-
push_rules_invalidation_counter = Counter(
"synapse_push_bulk_push_rule_evaluator_push_rules_invalidation_counter", ""
)
@@ -111,6 +106,8 @@ class BulkPushRuleEvaluator:
self.clock = hs.get_clock()
self._event_auth_handler = hs.get_event_auth_handler()
+ self._related_event_match_enabled = self.hs.config.experimental.msc3664_enabled
+
self.room_push_rule_cache_metrics = register_cache(
"cache",
"room_push_rule_cache",
@@ -118,9 +115,6 @@ class BulkPushRuleEvaluator:
resizable=False,
)
- # Whether to support MSC3772 is supported.
- self._relations_match_enabled = self.hs.config.experimental.msc3772_enabled
-
async def _get_rules_for_event(
self,
event: EventBase,
@@ -172,23 +166,51 @@ class BulkPushRuleEvaluator:
return rules_by_user
async def _get_power_levels_and_sender_level(
- self, event: EventBase, context: EventContext
- ) -> Tuple[dict, int]:
+ self,
+ event: EventBase,
+ context: EventContext,
+ event_id_to_event: Mapping[str, EventBase],
+ ) -> Tuple[dict, Optional[int]]:
+ """
+ Given an event and an event context, get the power level event relevant to the event
+ and the power level of the sender of the event.
+ Args:
+ event: event to check
+ context: context of event to check
+ event_id_to_event: a mapping of event_id to event for a set of events being
+ batch persisted. This is needed as the sought-after power level event may
+ be in this batch rather than the DB
+ """
+ # There are no power levels and sender levels possible to get from outlier
+ if event.internal_metadata.is_outlier():
+ return {}, None
+
event_types = auth_types_for_event(event.room_version, event)
prev_state_ids = await context.get_prev_state_ids(
StateFilter.from_types(event_types)
)
pl_event_id = prev_state_ids.get(POWER_KEY)
+ # fastpath: if there's a power level event, that's all we need, and
+ # not having a power level event is an extreme edge case
if pl_event_id:
- # fastpath: if there's a power level event, that's all we need, and
- # not having a power level event is an extreme edge case
- auth_events = {POWER_KEY: await self.store.get_event(pl_event_id)}
+ # Get the power level event from the batch, or fall back to the database.
+ pl_event = event_id_to_event.get(pl_event_id)
+ if pl_event:
+ auth_events = {POWER_KEY: pl_event}
+ else:
+ auth_events = {POWER_KEY: await self.store.get_event(pl_event_id)}
else:
auth_events_ids = self._event_auth_handler.compute_auth_events(
event, prev_state_ids, for_verification=False
)
auth_events_dict = await self.store.get_events(auth_events_ids)
+ # Some needed auth events might be in the batch, combine them with those
+ # fetched from the database.
+ for auth_event_id in auth_events_ids:
+ auth_event = event_id_to_event.get(auth_event_id)
+ if auth_event:
+ auth_events_dict[auth_event_id] = auth_event
auth_events = {(e.type, e.state_key): e for e in auth_events_dict.values()}
sender_level = get_user_power_level(event.sender, auth_events)
@@ -197,61 +219,80 @@ class BulkPushRuleEvaluator:
return pl_event.content if pl_event else {}, sender_level
- async def _get_mutual_relations(
- self, parent_id: str, rules: Iterable[Tuple[PushRule, bool]]
- ) -> Dict[str, Set[Tuple[str, str]]]:
- """
- Fetch event metadata for events which related to the same event as the given event.
-
- If the given event has no relation information, returns an empty dictionary.
-
- Args:
- parent_id: The event ID which is targeted by relations.
- rules: The push rules which will be processed for this event.
+ async def _related_events(self, event: EventBase) -> Dict[str, Dict[str, str]]:
+ """Fetches the related events for 'event'. Sets the im.vector.is_falling_back key if the event is from a fallback relation
Returns:
- A dictionary of relation type to:
- A set of tuples of:
- The sender
- The event type
+ Mapping of relation type to flattened events.
"""
+ related_events: Dict[str, Dict[str, str]] = {}
+ if self._related_event_match_enabled:
+ related_event_id = event.content.get("m.relates_to", {}).get("event_id")
+ relation_type = event.content.get("m.relates_to", {}).get("rel_type")
+ if related_event_id is not None and relation_type is not None:
+ related_event = await self.store.get_event(
+ related_event_id, allow_none=True
+ )
+ if related_event is not None:
+ related_events[relation_type] = _flatten_dict(related_event)
- # If the experimental feature is not enabled, skip fetching relations.
- if not self._relations_match_enabled:
- return {}
+ reply_event_id = (
+ event.content.get("m.relates_to", {})
+ .get("m.in_reply_to", {})
+ .get("event_id")
+ )
- # Pre-filter to figure out which relation types are interesting.
- rel_types = set()
- for rule, enabled in rules:
- if not enabled:
- continue
+ # convert replies to pseudo relations
+ if reply_event_id is not None:
+ related_event = await self.store.get_event(
+ reply_event_id, allow_none=True
+ )
- for condition in rule.conditions:
- if condition["kind"] != "org.matrix.msc3772.relation_match":
- continue
+ if related_event is not None:
+ related_events["m.in_reply_to"] = _flatten_dict(related_event)
- # rel_type is required.
- rel_type = condition.get("rel_type")
- if rel_type:
- rel_types.add(rel_type)
+ # indicate that this is from a fallback relation.
+ if relation_type == "m.thread" and event.content.get(
+ "m.relates_to", {}
+ ).get("is_falling_back", False):
+ related_events["m.in_reply_to"][
+ "im.vector.is_falling_back"
+ ] = ""
- # If no valid rules were found, no mutual relations.
- if not rel_types:
- return {}
+ return related_events
- # If any valid rules were found, fetch the mutual relations.
- return await self.store.get_mutual_event_relations(parent_id, rel_types)
+ async def action_for_events_by_user(
+ self, events_and_context: List[Tuple[EventBase, EventContext]]
+ ) -> None:
+ """Given a list of events and their associated contexts, evaluate the push rules
+ for each event, check if the message should increment the unread count, and
+ insert the results into the event_push_actions_staging table.
+ """
+ # For batched events the power level events may not have been persisted yet,
+ # so we pass in the batched events. Thus if the event cannot be found in the
+ # database we can check in the batch.
+ event_id_to_event = {e.event_id: e for e, _ in events_and_context}
+ for event, context in events_and_context:
+ await self._action_for_event_by_user(event, context, event_id_to_event)
@measure_func("action_for_event_by_user")
- async def action_for_event_by_user(
- self, event: EventBase, context: EventContext
+ async def _action_for_event_by_user(
+ self,
+ event: EventBase,
+ context: EventContext,
+ event_id_to_event: Mapping[str, EventBase],
) -> None:
- """Given an event and context, evaluate the push rules, check if the message
- should increment the unread count, and insert the results into the
- event_push_actions_staging table.
- """
- if event.internal_metadata.is_outlier():
- # This can happen due to out of band memberships
+
+ if (
+ not event.internal_metadata.is_notifiable()
+ or event.internal_metadata.is_historical()
+ ):
+ # Push rules for events that aren't notifiable can't be processed by this and
+ # we want to skip push notification actions for historical messages
+ # because we don't want to notify people about old history back in time.
+ # The historical messages also do not have the proper `context.current_state_ids`
+ # and `state_groups` because they have `prev_events` that aren't persisted yet
+ # (historical messages persisted in reverse-chronological order).
return
# Disable counting as unread unless the experimental configuration is
@@ -271,28 +312,39 @@ class BulkPushRuleEvaluator:
(
power_levels,
sender_power_level,
- ) = await self._get_power_levels_and_sender_level(event, context)
+ ) = await self._get_power_levels_and_sender_level(
+ event, context, event_id_to_event
+ )
+ # Find the event's thread ID.
relation = relation_from_event(event)
- # If the event does not have a relation, then cannot have any mutual
- # relations or thread ID.
- relations = {}
- thread_id = "main"
+ # If the event does not have a relation, then it cannot have a thread ID.
+ thread_id = MAIN_TIMELINE
if relation:
- relations = await self._get_mutual_relations(
- relation.parent_id,
- itertools.chain(*(r.rules() for r in rules_by_user.values())),
- )
+ # Recursively attempt to find the thread this event relates to.
if relation.rel_type == RelationTypes.THREAD:
thread_id = relation.parent_id
+ else:
+ # Since the event has not yet been persisted we check whether
+ # the parent is part of a thread.
+ thread_id = await self.store.get_thread_id(relation.parent_id)
- evaluator = PushRuleEvaluatorForEvent(
- event,
+ related_events = await self._related_events(event)
+
+ # It's possible that old room versions have non-integer power levels (floats or
+ # strings). Workaround this by explicitly converting to int.
+ notification_levels = power_levels.get("notifications", {})
+ if not event.room_version.msc3667_int_only_power_levels:
+ for user_id, level in notification_levels.items():
+ notification_levels[user_id] = int(level)
+
+ evaluator = PushRuleEvaluator(
+ _flatten_dict(event),
room_member_count,
sender_power_level,
- power_levels,
- relations,
- self._relations_match_enabled,
+ notification_levels,
+ related_events,
+ self._related_event_match_enabled,
)
users = rules_by_user.keys()
@@ -300,20 +352,10 @@ class BulkPushRuleEvaluator:
event.room_id, users
)
- # This is a check for the case where user joins a room without being
- # allowed to see history, and then the server receives a delayed event
- # from before the user joined, which they should not be pushed for
- uids_with_visibility = await filter_event_for_clients_with_state(
- self.store, users, event, context
- )
-
for uid, rules in rules_by_user.items():
if event.sender == uid:
continue
- if uid not in uids_with_visibility:
- continue
-
display_name = None
profile = profiles.get(uid)
if profile:
@@ -334,17 +376,30 @@ class BulkPushRuleEvaluator:
# current user, it'll be added to the dict later.
actions_by_user[uid] = []
- for rule, enabled in rules.rules():
- if not enabled:
- continue
+ actions = evaluator.run(rules, uid, display_name)
+ if "notify" in actions:
+ # Push rules say we should notify the user of this event
+ actions_by_user[uid] = actions
- matches = evaluator.check_conditions(rule.conditions, uid, display_name)
- if matches:
- actions = [x for x in rule.actions if x != "dont_notify"]
- if actions and "notify" in actions:
- # Push rules say we should notify the user of this event
- actions_by_user[uid] = actions
- break
+ # If there aren't any actions then we can skip the rest of the
+ # processing.
+ if not actions_by_user:
+ return
+
+ # This is a check for the case where user joins a room without being
+ # allowed to see history, and then the server receives a delayed event
+ # from before the user joined, which they should not be pushed for
+ #
+ # We do this *after* calculating the push actions as a) its unlikely
+ # that we'll filter anyone out and b) for large rooms its likely that
+ # most users will have push disabled and so the set of users to check is
+ # much smaller.
+ uids_with_visibility = await filter_event_for_clients_with_state(
+ self.store, actions_by_user.keys(), event, context
+ )
+
+ for user_id in set(actions_by_user).difference(uids_with_visibility):
+ actions_by_user.pop(user_id, None)
# Mark in the DB staging area the push actions for users who should be
# notified for this event. (This will then get handled when we persist
@@ -361,3 +416,21 @@ MemberMap = Dict[str, Optional[EventIdMembership]]
Rule = Dict[str, dict]
RulesByUser = Dict[str, List[Rule]]
StateGroup = Union[object, int]
+
+
+def _flatten_dict(
+ d: Union[EventBase, Mapping[str, Any]],
+ prefix: Optional[List[str]] = None,
+ result: Optional[Dict[str, str]] = None,
+) -> Dict[str, str]:
+ if prefix is None:
+ prefix = []
+ if result is None:
+ result = {}
+ for key, value in d.items():
+ if isinstance(value, str):
+ result[".".join(prefix + [key])] = value.lower()
+ elif isinstance(value, Mapping):
+ _flatten_dict(value, prefix=(prefix + [key]), result=result)
+
+ return result
diff --git a/synapse/push/clientformat.py b/synapse/push/clientformat.py
index ebc13beda1..622a1e35c5 100644
--- a/synapse/push/clientformat.py
+++ b/synapse/push/clientformat.py
@@ -44,6 +44,12 @@ def format_push_rules_for_user(
rulearray.append(template_rule)
+ pattern_type = template_rule.pop("pattern_type", None)
+ if pattern_type == "user_id":
+ template_rule["pattern"] = user.to_string()
+ elif pattern_type == "user_localpart":
+ template_rule["pattern"] = user.localpart
+
template_rule["enabled"] = enabled
if "conditions" not in template_rule:
@@ -93,19 +99,21 @@ def _rule_to_template(rule: PushRule) -> Optional[Dict[str, Any]]:
if len(rule.conditions) != 1:
return None
thecond = rule.conditions[0]
- if "pattern" not in thecond:
- return None
+
templaterule = {"actions": rule.actions}
- templaterule["pattern"] = thecond["pattern"]
+ if "pattern" in thecond:
+ templaterule["pattern"] = thecond["pattern"]
+ elif "pattern_type" in thecond:
+ templaterule["pattern_type"] = thecond["pattern_type"]
+ else:
+ return None
else:
# This should not be reached unless this function is not kept in sync
# with PRIORITY_CLASS_INVERSE_MAP.
raise ValueError("Unexpected template_name: %s" % (template_name,))
- if unscoped_rule_id:
- templaterule["rule_id"] = unscoped_rule_id
- if rule.default:
- templaterule["default"] = True
+ templaterule["rule_id"] = unscoped_rule_id
+ templaterule["default"] = rule.default
return templaterule
diff --git a/synapse/push/httppusher.py b/synapse/push/httppusher.py
index 11299367d2..3149492ede 100644
--- a/synapse/push/httppusher.py
+++ b/synapse/push/httppusher.py
@@ -14,7 +14,7 @@
# limitations under the License.
import logging
import urllib.parse
-from typing import TYPE_CHECKING, Any, Dict, Iterable, Optional, Union
+from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Optional, Union
from prometheus_client import Counter
@@ -28,7 +28,7 @@ from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.push import Pusher, PusherConfig, PusherConfigException
from synapse.storage.databases.main.event_push_actions import HttpPushAction
-from . import push_rule_evaluator, push_tools
+from . import push_tools
if TYPE_CHECKING:
from synapse.server import HomeServer
@@ -56,6 +56,39 @@ http_badges_failed_counter = Counter(
)
+def tweaks_for_actions(actions: List[Union[str, Dict]]) -> Dict[str, Any]:
+ """
+ Converts a list of actions into a `tweaks` dict (which can then be passed to
+ the push gateway).
+
+ This function ignores all actions other than `set_tweak` actions, and treats
+ absent `value`s as `True`, which agrees with the only spec-defined treatment
+ of absent `value`s (namely, for `highlight` tweaks).
+
+ Args:
+ actions: list of actions
+ e.g. [
+ {"set_tweak": "a", "value": "AAA"},
+ {"set_tweak": "b", "value": "BBB"},
+ {"set_tweak": "highlight"},
+ "notify"
+ ]
+
+ Returns:
+ dictionary of tweaks for those actions
+ e.g. {"a": "AAA", "b": "BBB", "highlight": True}
+ """
+ tweaks = {}
+ for a in actions:
+ if not isinstance(a, dict):
+ continue
+ if "set_tweak" in a:
+ # value is allowed to be absent in which case the value assumed
+ # should be True.
+ tweaks[a["set_tweak"]] = a.get("value", True)
+ return tweaks
+
+
class HttpPusher(Pusher):
INITIAL_BACKOFF_SEC = 1 # in seconds because that's what Twisted takes
MAX_BACKOFF_SEC = 60 * 60
@@ -281,7 +314,7 @@ class HttpPusher(Pusher):
if "notify" not in push_action.actions:
return True
- tweaks = push_rule_evaluator.tweaks_for_actions(push_action.actions)
+ tweaks = tweaks_for_actions(push_action.actions)
badge = await push_tools.get_badge_count(
self.hs.get_datastores().main,
self.user_id,
diff --git a/synapse/push/push_rule_evaluator.py b/synapse/push/push_rule_evaluator.py
deleted file mode 100644
index 3c5632cd91..0000000000
--- a/synapse/push/push_rule_evaluator.py
+++ /dev/null
@@ -1,361 +0,0 @@
-# Copyright 2015, 2016 OpenMarket Ltd
-# Copyright 2017 New Vector Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import logging
-import re
-from typing import (
- Any,
- Dict,
- List,
- Mapping,
- Optional,
- Pattern,
- Sequence,
- Set,
- Tuple,
- Union,
-)
-
-from matrix_common.regex import glob_to_regex, to_word_pattern
-
-from synapse.events import EventBase
-from synapse.types import UserID
-from synapse.util.caches.lrucache import LruCache
-
-logger = logging.getLogger(__name__)
-
-
-GLOB_REGEX = re.compile(r"\\\[(\\\!|)(.*)\\\]")
-IS_GLOB = re.compile(r"[\?\*\[\]]")
-INEQUALITY_EXPR = re.compile("^([=<>]*)([0-9]*)$")
-
-
-def _room_member_count(
- ev: EventBase, condition: Mapping[str, Any], room_member_count: int
-) -> bool:
- return _test_ineq_condition(condition, room_member_count)
-
-
-def _sender_notification_permission(
- ev: EventBase,
- condition: Mapping[str, Any],
- sender_power_level: int,
- power_levels: Dict[str, Union[int, Dict[str, int]]],
-) -> bool:
- notif_level_key = condition.get("key")
- if notif_level_key is None:
- return False
-
- notif_levels = power_levels.get("notifications", {})
- assert isinstance(notif_levels, dict)
- room_notif_level = notif_levels.get(notif_level_key, 50)
-
- return sender_power_level >= room_notif_level
-
-
-def _test_ineq_condition(condition: Mapping[str, Any], number: int) -> bool:
- if "is" not in condition:
- return False
- m = INEQUALITY_EXPR.match(condition["is"])
- if not m:
- return False
- ineq = m.group(1)
- rhs = m.group(2)
- if not rhs.isdigit():
- return False
- rhs_int = int(rhs)
-
- if ineq == "" or ineq == "==":
- return number == rhs_int
- elif ineq == "<":
- return number < rhs_int
- elif ineq == ">":
- return number > rhs_int
- elif ineq == ">=":
- return number >= rhs_int
- elif ineq == "<=":
- return number <= rhs_int
- else:
- return False
-
-
-def tweaks_for_actions(actions: List[Union[str, Dict]]) -> Dict[str, Any]:
- """
- Converts a list of actions into a `tweaks` dict (which can then be passed to
- the push gateway).
-
- This function ignores all actions other than `set_tweak` actions, and treats
- absent `value`s as `True`, which agrees with the only spec-defined treatment
- of absent `value`s (namely, for `highlight` tweaks).
-
- Args:
- actions: list of actions
- e.g. [
- {"set_tweak": "a", "value": "AAA"},
- {"set_tweak": "b", "value": "BBB"},
- {"set_tweak": "highlight"},
- "notify"
- ]
-
- Returns:
- dictionary of tweaks for those actions
- e.g. {"a": "AAA", "b": "BBB", "highlight": True}
- """
- tweaks = {}
- for a in actions:
- if not isinstance(a, dict):
- continue
- if "set_tweak" in a:
- # value is allowed to be absent in which case the value assumed
- # should be True.
- tweaks[a["set_tweak"]] = a.get("value", True)
- return tweaks
-
-
-class PushRuleEvaluatorForEvent:
- def __init__(
- self,
- event: EventBase,
- room_member_count: int,
- sender_power_level: int,
- power_levels: Dict[str, Union[int, Dict[str, int]]],
- relations: Dict[str, Set[Tuple[str, str]]],
- relations_match_enabled: bool,
- ):
- self._event = event
- self._room_member_count = room_member_count
- self._sender_power_level = sender_power_level
- self._power_levels = power_levels
- self._relations = relations
- self._relations_match_enabled = relations_match_enabled
-
- # Maps strings of e.g. 'content.body' -> event["content"]["body"]
- self._value_cache = _flatten_dict(event)
-
- # Maps cache keys to final values.
- self._condition_cache: Dict[str, bool] = {}
-
- def check_conditions(
- self, conditions: Sequence[Mapping], uid: str, display_name: Optional[str]
- ) -> bool:
- """
- Returns true if a user's conditions/user ID/display name match the event.
-
- Args:
- conditions: The user's conditions to match.
- uid: The user's MXID.
- display_name: The display name.
-
- Returns:
- True if all conditions match the event, False otherwise.
- """
- for cond in conditions:
- _cache_key = cond.get("_cache_key", None)
- if _cache_key:
- res = self._condition_cache.get(_cache_key, None)
- if res is False:
- return False
- elif res is True:
- continue
-
- res = self.matches(cond, uid, display_name)
- if _cache_key:
- self._condition_cache[_cache_key] = bool(res)
-
- if not res:
- return False
-
- return True
-
- def matches(
- self, condition: Mapping[str, Any], user_id: str, display_name: Optional[str]
- ) -> bool:
- """
- Returns true if a user's condition/user ID/display name match the event.
-
- Args:
- condition: The user's condition to match.
- uid: The user's MXID.
- display_name: The display name, or None if there is not one.
-
- Returns:
- True if the condition matches the event, False otherwise.
- """
- if condition["kind"] == "event_match":
- return self._event_match(condition, user_id)
- elif condition["kind"] == "contains_display_name":
- return self._contains_display_name(display_name)
- elif condition["kind"] == "room_member_count":
- return _room_member_count(self._event, condition, self._room_member_count)
- elif condition["kind"] == "sender_notification_permission":
- return _sender_notification_permission(
- self._event, condition, self._sender_power_level, self._power_levels
- )
- elif (
- condition["kind"] == "org.matrix.msc3772.relation_match"
- and self._relations_match_enabled
- ):
- return self._relation_match(condition, user_id)
- else:
- # XXX This looks incorrect -- we have reached an unknown condition
- # kind and are unconditionally returning that it matches. Note
- # that it seems possible to provide a condition to the /pushrules
- # endpoint with an unknown kind, see _rule_tuple_from_request_object.
- return True
-
- def _event_match(self, condition: Mapping, user_id: str) -> bool:
- """
- Check an "event_match" push rule condition.
-
- Args:
- condition: The "event_match" push rule condition to match.
- user_id: The user's MXID.
-
- Returns:
- True if the condition matches the event, False otherwise.
- """
- pattern = condition.get("pattern", None)
-
- if not pattern:
- pattern_type = condition.get("pattern_type", None)
- if pattern_type == "user_id":
- pattern = user_id
- elif pattern_type == "user_localpart":
- pattern = UserID.from_string(user_id).localpart
-
- if not pattern:
- logger.warning("event_match condition with no pattern")
- return False
-
- # XXX: optimisation: cache our pattern regexps
- if condition["key"] == "content.body":
- body = self._event.content.get("body", None)
- if not body or not isinstance(body, str):
- return False
-
- return _glob_matches(pattern, body, word_boundary=True)
- else:
- haystack = self._value_cache.get(condition["key"], None)
- if haystack is None:
- return False
-
- return _glob_matches(pattern, haystack)
-
- def _contains_display_name(self, display_name: Optional[str]) -> bool:
- """
- Check an "event_match" push rule condition.
-
- Args:
- display_name: The display name, or None if there is not one.
-
- Returns:
- True if the display name is found in the event body, False otherwise.
- """
- if not display_name:
- return False
-
- body = self._event.content.get("body", None)
- if not body or not isinstance(body, str):
- return False
-
- # Similar to _glob_matches, but do not treat display_name as a glob.
- r = regex_cache.get((display_name, False, True), None)
- if not r:
- r1 = re.escape(display_name)
- r1 = to_word_pattern(r1)
- r = re.compile(r1, flags=re.IGNORECASE)
- regex_cache[(display_name, False, True)] = r
-
- return bool(r.search(body))
-
- def _relation_match(self, condition: Mapping, user_id: str) -> bool:
- """
- Check an "relation_match" push rule condition.
-
- Args:
- condition: The "event_match" push rule condition to match.
- user_id: The user's MXID.
-
- Returns:
- True if the condition matches the event, False otherwise.
- """
- rel_type = condition.get("rel_type")
- if not rel_type:
- logger.warning("relation_match condition missing rel_type")
- return False
-
- sender_pattern = condition.get("sender")
- if sender_pattern is None:
- sender_type = condition.get("sender_type")
- if sender_type == "user_id":
- sender_pattern = user_id
- type_pattern = condition.get("type")
-
- # If any other relations matches, return True.
- for sender, event_type in self._relations.get(rel_type, ()):
- if sender_pattern and not _glob_matches(sender_pattern, sender):
- continue
- if type_pattern and not _glob_matches(type_pattern, event_type):
- continue
- # All values must have matched.
- return True
-
- # No relations matched.
- return False
-
-
-# Caches (string, is_glob, word_boundary) -> regex for push. See _glob_matches
-regex_cache: LruCache[Tuple[str, bool, bool], Pattern] = LruCache(
- 50000, "regex_push_cache"
-)
-
-
-def _glob_matches(glob: str, value: str, word_boundary: bool = False) -> bool:
- """Tests if value matches glob.
-
- Args:
- glob
- value: String to test against glob.
- word_boundary: Whether to match against word boundaries or entire
- string. Defaults to False.
- """
-
- try:
- r = regex_cache.get((glob, True, word_boundary), None)
- if not r:
- r = glob_to_regex(glob, word_boundary=word_boundary)
- regex_cache[(glob, True, word_boundary)] = r
- return bool(r.search(value))
- except re.error:
- logger.warning("Failed to parse glob to regex: %r", glob)
- return False
-
-
-def _flatten_dict(
- d: Union[EventBase, Mapping[str, Any]],
- prefix: Optional[List[str]] = None,
- result: Optional[Dict[str, str]] = None,
-) -> Dict[str, str]:
- if prefix is None:
- prefix = []
- if result is None:
- result = {}
- for key, value in d.items():
- if isinstance(value, str):
- result[".".join(prefix + [key])] = value.lower()
- elif isinstance(value, Mapping):
- _flatten_dict(value, prefix=(prefix + [key]), result=result)
-
- return result
diff --git a/synapse/push/push_tools.py b/synapse/push/push_tools.py
index 658bf373b7..edeba27a45 100644
--- a/synapse/push/push_tools.py
+++ b/synapse/push/push_tools.py
@@ -39,7 +39,12 @@ async def get_badge_count(store: DataStore, user_id: str, group_by_room: bool) -
await concurrently_execute(get_room_unread_count, joins, 10)
for notifs in room_notifs:
- if notifs.notify_count == 0:
+ # Combine the counts from all the threads.
+ notify_count = notifs.main_timeline.notify_count + sum(
+ n.notify_count for n in notifs.threads.values()
+ )
+
+ if notify_count == 0:
continue
if group_by_room:
@@ -47,7 +52,7 @@ async def get_badge_count(store: DataStore, user_id: str, group_by_room: bool) -
badge += 1
else:
# increment the badge count by the number of unread messages in the room
- badge += notifs.notify_count
+ badge += notify_count
return badge
diff --git a/synapse/replication/http/__init__.py b/synapse/replication/http/__init__.py
index 53aa7fa4c6..ac9a92240a 100644
--- a/synapse/replication/http/__init__.py
+++ b/synapse/replication/http/__init__.py
@@ -25,6 +25,7 @@ from synapse.replication.http import (
push,
register,
send_event,
+ send_events,
state,
streams,
)
@@ -43,6 +44,7 @@ class ReplicationRestResource(JsonResource):
def register_servlets(self, hs: "HomeServer") -> None:
send_event.register_servlets(hs, self)
+ send_events.register_servlets(hs, self)
federation.register_servlets(hs, self)
presence.register_servlets(hs, self)
membership.register_servlets(hs, self)
diff --git a/synapse/replication/http/_base.py b/synapse/replication/http/_base.py
index aff7976165..3f012f1b5e 100644
--- a/synapse/replication/http/_base.py
+++ b/synapse/replication/http/_base.py
@@ -153,7 +153,7 @@ class ReplicationEndpoint(metaclass=abc.ABCMeta):
argument list.
Returns:
- dict: If POST/PUT request then dictionary must be JSON serialisable,
+ If POST/PUT request then dictionary must be JSON serialisable,
otherwise must be appropriate for adding as query args.
"""
return {}
@@ -184,8 +184,10 @@ class ReplicationEndpoint(metaclass=abc.ABCMeta):
client = hs.get_simple_http_client()
local_instance_name = hs.get_instance_name()
+ # The value of these option should match the replication listener settings
master_host = hs.config.worker.worker_replication_host
master_port = hs.config.worker.worker_replication_http_port
+ master_tls = hs.config.worker.worker_replication_http_tls
instance_map = hs.config.worker.instance_map
@@ -205,9 +207,11 @@ class ReplicationEndpoint(metaclass=abc.ABCMeta):
if instance_name == "master":
host = master_host
port = master_port
+ tls = master_tls
elif instance_name in instance_map:
host = instance_map[instance_name].host
port = instance_map[instance_name].port
+ tls = instance_map[instance_name].tls
else:
raise Exception(
"Instance %r not in 'instance_map' config" % (instance_name,)
@@ -238,7 +242,11 @@ class ReplicationEndpoint(metaclass=abc.ABCMeta):
"Unknown METHOD on %s replication endpoint" % (cls.NAME,)
)
- uri = "http://%s:%s/_synapse/replication/%s/%s" % (
+ # Here the protocol is hard coded to be http by default or https in case the replication
+ # port is set to have tls true.
+ scheme = "https" if tls else "http"
+ uri = "%s://%s:%s/_synapse/replication/%s/%s" % (
+ scheme,
host,
port,
cls.NAME,
diff --git a/synapse/replication/http/devices.py b/synapse/replication/http/devices.py
index 3d63645726..c21629def8 100644
--- a/synapse/replication/http/devices.py
+++ b/synapse/replication/http/devices.py
@@ -18,6 +18,7 @@ from typing import TYPE_CHECKING, Tuple
from twisted.web.server import Request
from synapse.http.server import HttpServer
+from synapse.http.servlet import parse_json_object_from_request
from synapse.replication.http._base import ReplicationEndpoint
from synapse.types import JsonDict
@@ -78,5 +79,71 @@ class ReplicationUserDevicesResyncRestServlet(ReplicationEndpoint):
return 200, user_devices
+class ReplicationUploadKeysForUserRestServlet(ReplicationEndpoint):
+ """Ask master to upload keys for the user and send them out over federation to
+ update other servers.
+
+ For now, only the master is permitted to handle key upload requests;
+ any worker can handle key query requests (since they're read-only).
+
+ Calls to e2e_keys_handler.upload_keys_for_user(user_id, device_id, keys) on
+ the main process to accomplish this.
+
+ Defined in https://spec.matrix.org/v1.4/client-server-api/#post_matrixclientv3keysupload
+ Request format(borrowed and expanded from KeyUploadServlet):
+
+ POST /_synapse/replication/upload_keys_for_user
+
+ {
+ "user_id": "",
+ "device_id": "",
+ "keys": {
+ ....this part can be found in KeyUploadServlet in rest/client/keys.py....
+ }
+ }
+
+ Response is equivalent to ` /_matrix/client/v3/keys/upload` found in KeyUploadServlet
+
+ """
+
+ NAME = "upload_keys_for_user"
+ PATH_ARGS = ()
+ CACHE = False
+
+ def __init__(self, hs: "HomeServer"):
+ super().__init__(hs)
+
+ self.e2e_keys_handler = hs.get_e2e_keys_handler()
+ self.store = hs.get_datastores().main
+ self.clock = hs.get_clock()
+
+ @staticmethod
+ async def _serialize_payload( # type: ignore[override]
+ user_id: str, device_id: str, keys: JsonDict
+ ) -> JsonDict:
+
+ return {
+ "user_id": user_id,
+ "device_id": device_id,
+ "keys": keys,
+ }
+
+ async def _handle_request( # type: ignore[override]
+ self, request: Request
+ ) -> Tuple[int, JsonDict]:
+ content = parse_json_object_from_request(request)
+
+ user_id = content["user_id"]
+ device_id = content["device_id"]
+ keys = content["keys"]
+
+ results = await self.e2e_keys_handler.upload_keys_for_user(
+ user_id, device_id, keys
+ )
+
+ return 200, results
+
+
def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
ReplicationUserDevicesResyncRestServlet(hs).register(http_server)
+ ReplicationUploadKeysForUserRestServlet(hs).register(http_server)
diff --git a/synapse/replication/http/register.py b/synapse/replication/http/register.py
index 6c8f8388fd..976c283360 100644
--- a/synapse/replication/http/register.py
+++ b/synapse/replication/http/register.py
@@ -39,6 +39,16 @@ class ReplicationRegisterServlet(ReplicationEndpoint):
self.store = hs.get_datastores().main
self.registration_handler = hs.get_registration_handler()
+ # Default value if the worker that sent the replication request did not include
+ # an 'approved' property.
+ if (
+ hs.config.experimental.msc3866.enabled
+ and hs.config.experimental.msc3866.require_approval_for_new_accounts
+ ):
+ self._approval_default = False
+ else:
+ self._approval_default = True
+
@staticmethod
async def _serialize_payload( # type: ignore[override]
user_id: str,
@@ -51,6 +61,7 @@ class ReplicationRegisterServlet(ReplicationEndpoint):
user_type: Optional[str],
address: Optional[str],
shadow_banned: bool,
+ approved: bool,
) -> JsonDict:
"""
Args:
@@ -68,6 +79,8 @@ class ReplicationRegisterServlet(ReplicationEndpoint):
or None for a normal user.
address: the IP address used to perform the regitration.
shadow_banned: Whether to shadow-ban the user
+ approved: Whether the user should be considered already approved by an
+ administrator.
"""
return {
"password_hash": password_hash,
@@ -79,6 +92,7 @@ class ReplicationRegisterServlet(ReplicationEndpoint):
"user_type": user_type,
"address": address,
"shadow_banned": shadow_banned,
+ "approved": approved,
}
async def _handle_request( # type: ignore[override]
@@ -88,6 +102,12 @@ class ReplicationRegisterServlet(ReplicationEndpoint):
await self.registration_handler.check_registration_ratelimit(content["address"])
+ # Always default admin users to approved (since it means they were created by
+ # an admin).
+ approved_default = self._approval_default
+ if content["admin"]:
+ approved_default = True
+
await self.registration_handler.register_with_store(
user_id=user_id,
password_hash=content["password_hash"],
@@ -99,6 +119,7 @@ class ReplicationRegisterServlet(ReplicationEndpoint):
user_type=content["user_type"],
address=content["address"],
shadow_banned=content["shadow_banned"],
+ approved=content.get("approved", approved_default),
)
return 200, {}
diff --git a/synapse/replication/http/send_event.py b/synapse/replication/http/send_event.py
index 486f04723c..4215a1c1bc 100644
--- a/synapse/replication/http/send_event.py
+++ b/synapse/replication/http/send_event.py
@@ -141,8 +141,8 @@ class ReplicationSendEventRestServlet(ReplicationEndpoint):
"Got event to send with ID: %s into room: %s", event.event_id, event.room_id
)
- event = await self.event_creation_handler.persist_and_notify_client_event(
- requester, event, context, ratelimit=ratelimit, extra_users=extra_users
+ event = await self.event_creation_handler.persist_and_notify_client_events(
+ requester, [(event, context)], ratelimit=ratelimit, extra_users=extra_users
)
return (
diff --git a/synapse/replication/http/send_events.py b/synapse/replication/http/send_events.py
new file mode 100644
index 0000000000..8889bbb644
--- /dev/null
+++ b/synapse/replication/http/send_events.py
@@ -0,0 +1,171 @@
+# Copyright 2022 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+from typing import TYPE_CHECKING, List, Tuple
+
+from twisted.web.server import Request
+
+from synapse.api.room_versions import KNOWN_ROOM_VERSIONS
+from synapse.events import EventBase, make_event_from_dict
+from synapse.events.snapshot import EventContext
+from synapse.http.server import HttpServer
+from synapse.http.servlet import parse_json_object_from_request
+from synapse.replication.http._base import ReplicationEndpoint
+from synapse.types import JsonDict, Requester, UserID
+from synapse.util.metrics import Measure
+
+if TYPE_CHECKING:
+ from synapse.server import HomeServer
+ from synapse.storage.databases.main import DataStore
+
+logger = logging.getLogger(__name__)
+
+
+class ReplicationSendEventsRestServlet(ReplicationEndpoint):
+ """Handles batches of newly created events on workers, including persisting and
+ notifying.
+
+ The API looks like:
+
+ POST /_synapse/replication/send_events/:txn_id
+
+ {
+ "events": [{
+ "event": { .. serialized event .. },
+ "room_version": .., // "1", "2", "3", etc: the version of the room
+ // containing the event
+ "event_format_version": .., // 1,2,3 etc: the event format version
+ "internal_metadata": { .. serialized internal_metadata .. },
+ "outlier": true|false,
+ "rejected_reason": .., // The event.rejected_reason field
+ "context": { .. serialized event context .. },
+ "requester": { .. serialized requester .. },
+ "ratelimit": true,
+ }]
+ }
+
+ 200 OK
+
+ { "stream_id": 12345, "event_id": "$abcdef..." }
+
+ Responds with a 409 when a `PartialStateConflictError` is raised due to an event
+ context that needs to be recomputed due to the un-partial stating of a room.
+
+ """
+
+ NAME = "send_events"
+ PATH_ARGS = ()
+
+ def __init__(self, hs: "HomeServer"):
+ super().__init__(hs)
+
+ self.event_creation_handler = hs.get_event_creation_handler()
+ self.store = hs.get_datastores().main
+ self._storage_controllers = hs.get_storage_controllers()
+ self.clock = hs.get_clock()
+
+ @staticmethod
+ async def _serialize_payload( # type: ignore[override]
+ events_and_context: List[Tuple[EventBase, EventContext]],
+ store: "DataStore",
+ requester: Requester,
+ ratelimit: bool,
+ extra_users: List[UserID],
+ ) -> JsonDict:
+ """
+ Args:
+ store
+ requester
+ events_and_ctx
+ ratelimit
+ """
+ serialized_events = []
+
+ for event, context in events_and_context:
+ serialized_context = await context.serialize(event, store)
+ serialized_event = {
+ "event": event.get_pdu_json(),
+ "room_version": event.room_version.identifier,
+ "event_format_version": event.format_version,
+ "internal_metadata": event.internal_metadata.get_dict(),
+ "outlier": event.internal_metadata.is_outlier(),
+ "rejected_reason": event.rejected_reason,
+ "context": serialized_context,
+ "requester": requester.serialize(),
+ "ratelimit": ratelimit,
+ "extra_users": [u.to_string() for u in extra_users],
+ }
+ serialized_events.append(serialized_event)
+
+ payload = {"events": serialized_events}
+
+ return payload
+
+ async def _handle_request( # type: ignore[override]
+ self, request: Request
+ ) -> Tuple[int, JsonDict]:
+ with Measure(self.clock, "repl_send_events_parse"):
+ payload = parse_json_object_from_request(request)
+ events_and_context = []
+ events = payload["events"]
+
+ for event_payload in events:
+ event_dict = event_payload["event"]
+ room_ver = KNOWN_ROOM_VERSIONS[event_payload["room_version"]]
+ internal_metadata = event_payload["internal_metadata"]
+ rejected_reason = event_payload["rejected_reason"]
+
+ event = make_event_from_dict(
+ event_dict, room_ver, internal_metadata, rejected_reason
+ )
+ event.internal_metadata.outlier = event_payload["outlier"]
+
+ requester = Requester.deserialize(
+ self.store, event_payload["requester"]
+ )
+ context = EventContext.deserialize(
+ self._storage_controllers, event_payload["context"]
+ )
+
+ ratelimit = event_payload["ratelimit"]
+ events_and_context.append((event, context))
+
+ extra_users = [
+ UserID.from_string(u) for u in event_payload["extra_users"]
+ ]
+
+ logger.info(
+ "Got batch of events to send, last ID of batch is: %s, sending into room: %s",
+ event.event_id,
+ event.room_id,
+ )
+
+ last_event = (
+ await self.event_creation_handler.persist_and_notify_client_events(
+ requester, events_and_context, ratelimit, extra_users
+ )
+ )
+
+ return (
+ 200,
+ {
+ "stream_id": last_event.internal_metadata.stream_ordering,
+ "event_id": last_event.event_id,
+ },
+ )
+
+
+def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
+ ReplicationSendEventsRestServlet(hs).register(http_server)
diff --git a/synapse/replication/slave/__init__.py b/synapse/replication/slave/__init__.py
deleted file mode 100644
index f43a360a80..0000000000
--- a/synapse/replication/slave/__init__.py
+++ /dev/null
@@ -1,13 +0,0 @@
-# Copyright 2016 OpenMarket Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
diff --git a/synapse/replication/slave/storage/__init__.py b/synapse/replication/slave/storage/__init__.py
deleted file mode 100644
index f43a360a80..0000000000
--- a/synapse/replication/slave/storage/__init__.py
+++ /dev/null
@@ -1,13 +0,0 @@
-# Copyright 2016 OpenMarket Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
diff --git a/synapse/replication/slave/storage/_slaved_id_tracker.py b/synapse/replication/slave/storage/_slaved_id_tracker.py
deleted file mode 100644
index 8f3f953ed4..0000000000
--- a/synapse/replication/slave/storage/_slaved_id_tracker.py
+++ /dev/null
@@ -1,50 +0,0 @@
-# Copyright 2016 OpenMarket Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-from typing import List, Optional, Tuple
-
-from synapse.storage.database import LoggingDatabaseConnection
-from synapse.storage.util.id_generators import AbstractStreamIdTracker, _load_current_id
-
-
-class SlavedIdTracker(AbstractStreamIdTracker):
- """Tracks the "current" stream ID of a stream with a single writer.
-
- See `AbstractStreamIdTracker` for more details.
-
- Note that this class does not work correctly when there are multiple
- writers.
- """
-
- def __init__(
- self,
- db_conn: LoggingDatabaseConnection,
- table: str,
- column: str,
- extra_tables: Optional[List[Tuple[str, str]]] = None,
- step: int = 1,
- ):
- self.step = step
- self._current = _load_current_id(db_conn, table, column, step)
- if extra_tables:
- for table, column in extra_tables:
- self.advance(None, _load_current_id(db_conn, table, column))
-
- def advance(self, instance_name: Optional[str], new_id: int) -> None:
- self._current = (max if self.step > 0 else min)(self._current, new_id)
-
- def get_current_token(self) -> int:
- return self._current
-
- def get_current_token_for_writer(self, instance_name: str) -> int:
- return self.get_current_token()
diff --git a/synapse/replication/slave/storage/devices.py b/synapse/replication/slave/storage/devices.py
deleted file mode 100644
index 6fcade510a..0000000000
--- a/synapse/replication/slave/storage/devices.py
+++ /dev/null
@@ -1,79 +0,0 @@
-# Copyright 2016 OpenMarket Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from typing import TYPE_CHECKING, Any, Iterable
-
-from synapse.replication.slave.storage._slaved_id_tracker import SlavedIdTracker
-from synapse.replication.tcp.streams._base import DeviceListsStream, UserSignatureStream
-from synapse.storage.database import DatabasePool, LoggingDatabaseConnection
-from synapse.storage.databases.main.devices import DeviceWorkerStore
-
-if TYPE_CHECKING:
- from synapse.server import HomeServer
-
-
-class SlavedDeviceStore(DeviceWorkerStore):
- def __init__(
- self,
- database: DatabasePool,
- db_conn: LoggingDatabaseConnection,
- hs: "HomeServer",
- ):
- self.hs = hs
-
- self._device_list_id_gen = SlavedIdTracker(
- db_conn,
- "device_lists_stream",
- "stream_id",
- extra_tables=[
- ("user_signature_stream", "stream_id"),
- ("device_lists_outbound_pokes", "stream_id"),
- ("device_lists_changes_in_room", "stream_id"),
- ],
- )
-
- super().__init__(database, db_conn, hs)
-
- def get_device_stream_token(self) -> int:
- return self._device_list_id_gen.get_current_token()
-
- def process_replication_rows(
- self, stream_name: str, instance_name: str, token: int, rows: Iterable[Any]
- ) -> None:
- if stream_name == DeviceListsStream.NAME:
- self._device_list_id_gen.advance(instance_name, token)
- self._invalidate_caches_for_devices(token, rows)
- elif stream_name == UserSignatureStream.NAME:
- self._device_list_id_gen.advance(instance_name, token)
- for row in rows:
- self._user_signature_stream_cache.entity_has_changed(row.user_id, token)
- return super().process_replication_rows(stream_name, instance_name, token, rows)
-
- def _invalidate_caches_for_devices(
- self, token: int, rows: Iterable[DeviceListsStream.DeviceListsStreamRow]
- ) -> None:
- for row in rows:
- # The entities are either user IDs (starting with '@') whose devices
- # have changed, or remote servers that we need to tell about
- # changes.
- if row.entity.startswith("@"):
- self._device_list_stream_cache.entity_has_changed(row.entity, token)
- self.get_cached_devices_for_user.invalidate((row.entity,))
- self._get_cached_user_device.invalidate((row.entity,))
- self.get_device_list_last_stream_id_for_remote.invalidate((row.entity,))
-
- else:
- self._device_list_federation_stream_cache.entity_has_changed(
- row.entity, token
- )
diff --git a/synapse/replication/slave/storage/events.py b/synapse/replication/slave/storage/events.py
deleted file mode 100644
index fe47778cb1..0000000000
--- a/synapse/replication/slave/storage/events.py
+++ /dev/null
@@ -1,79 +0,0 @@
-# Copyright 2016 OpenMarket Ltd
-# Copyright 2018 New Vector Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-import logging
-from typing import TYPE_CHECKING
-
-from synapse.storage.database import DatabasePool, LoggingDatabaseConnection
-from synapse.storage.databases.main.event_federation import EventFederationWorkerStore
-from synapse.storage.databases.main.event_push_actions import (
- EventPushActionsWorkerStore,
-)
-from synapse.storage.databases.main.events_worker import EventsWorkerStore
-from synapse.storage.databases.main.relations import RelationsWorkerStore
-from synapse.storage.databases.main.roommember import RoomMemberWorkerStore
-from synapse.storage.databases.main.signatures import SignatureWorkerStore
-from synapse.storage.databases.main.state import StateGroupWorkerStore
-from synapse.storage.databases.main.stream import StreamWorkerStore
-from synapse.storage.databases.main.user_erasure_store import UserErasureWorkerStore
-from synapse.util.caches.stream_change_cache import StreamChangeCache
-
-if TYPE_CHECKING:
- from synapse.server import HomeServer
-
-logger = logging.getLogger(__name__)
-
-
-# So, um, we want to borrow a load of functions intended for reading from
-# a DataStore, but we don't want to take functions that either write to the
-# DataStore or are cached and don't have cache invalidation logic.
-#
-# Rather than write duplicate versions of those functions, or lift them to
-# a common base class, we going to grab the underlying __func__ object from
-# the method descriptor on the DataStore and chuck them into our class.
-
-
-class SlavedEventStore(
- EventFederationWorkerStore,
- RoomMemberWorkerStore,
- EventPushActionsWorkerStore,
- StreamWorkerStore,
- StateGroupWorkerStore,
- SignatureWorkerStore,
- EventsWorkerStore,
- UserErasureWorkerStore,
- RelationsWorkerStore,
-):
- def __init__(
- self,
- database: DatabasePool,
- db_conn: LoggingDatabaseConnection,
- hs: "HomeServer",
- ):
- super().__init__(database, db_conn, hs)
-
- events_max = self._stream_id_gen.get_current_token()
- curr_state_delta_prefill, min_curr_state_delta_id = self.db_pool.get_cache_dict(
- db_conn,
- "current_state_delta_stream",
- entity_column="room_id",
- stream_column="stream_id",
- max_value=events_max, # As we share the stream id with events token
- limit=1000,
- )
- self._curr_state_delta_stream_cache = StreamChangeCache(
- "_curr_state_delta_stream_cache",
- min_curr_state_delta_id,
- prefilled_cache=curr_state_delta_prefill,
- )
diff --git a/synapse/replication/slave/storage/keys.py b/synapse/replication/slave/storage/keys.py
deleted file mode 100644
index a00b38c512..0000000000
--- a/synapse/replication/slave/storage/keys.py
+++ /dev/null
@@ -1,20 +0,0 @@
-# Copyright 2015, 2016 OpenMarket Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from synapse.storage.databases.main.keys import KeyStore
-
-# KeyStore isn't really safe to use from a worker, but for now we do so and hope that
-# the races it creates aren't too bad.
-
-SlavedKeyStore = KeyStore
diff --git a/synapse/replication/slave/storage/push_rule.py b/synapse/replication/slave/storage/push_rule.py
deleted file mode 100644
index 5e65eaf1e0..0000000000
--- a/synapse/replication/slave/storage/push_rule.py
+++ /dev/null
@@ -1,35 +0,0 @@
-# Copyright 2015, 2016 OpenMarket Ltd
-# Copyright 2018 New Vector Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-from typing import Any, Iterable
-
-from synapse.replication.tcp.streams import PushRulesStream
-from synapse.storage.databases.main.push_rule import PushRulesWorkerStore
-
-from .events import SlavedEventStore
-
-
-class SlavedPushRuleStore(SlavedEventStore, PushRulesWorkerStore):
- def get_max_push_rules_stream_id(self) -> int:
- return self._push_rules_stream_id_gen.get_current_token()
-
- def process_replication_rows(
- self, stream_name: str, instance_name: str, token: int, rows: Iterable[Any]
- ) -> None:
- if stream_name == PushRulesStream.NAME:
- self._push_rules_stream_id_gen.advance(instance_name, token)
- for row in rows:
- self.get_push_rules_for_user.invalidate((row.user_id,))
- self.push_rules_stream_cache.entity_has_changed(row.user_id, token)
- return super().process_replication_rows(stream_name, instance_name, token, rows)
diff --git a/synapse/replication/slave/storage/pushers.py b/synapse/replication/slave/storage/pushers.py
deleted file mode 100644
index 44ed20e424..0000000000
--- a/synapse/replication/slave/storage/pushers.py
+++ /dev/null
@@ -1,47 +0,0 @@
-# Copyright 2016 OpenMarket Ltd
-# Copyright 2018 New Vector Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-from typing import TYPE_CHECKING, Any, Iterable
-
-from synapse.replication.tcp.streams import PushersStream
-from synapse.storage.database import DatabasePool, LoggingDatabaseConnection
-from synapse.storage.databases.main.pusher import PusherWorkerStore
-
-from ._slaved_id_tracker import SlavedIdTracker
-
-if TYPE_CHECKING:
- from synapse.server import HomeServer
-
-
-class SlavedPusherStore(PusherWorkerStore):
- def __init__(
- self,
- database: DatabasePool,
- db_conn: LoggingDatabaseConnection,
- hs: "HomeServer",
- ):
- super().__init__(database, db_conn, hs)
- self._pushers_id_gen = SlavedIdTracker( # type: ignore
- db_conn, "pushers", "id", extra_tables=[("deleted_pushers", "stream_id")]
- )
-
- def get_pushers_stream_token(self) -> int:
- return self._pushers_id_gen.get_current_token()
-
- def process_replication_rows(
- self, stream_name: str, instance_name: str, token: int, rows: Iterable[Any]
- ) -> None:
- if stream_name == PushersStream.NAME:
- self._pushers_id_gen.advance(instance_name, token)
- return super().process_replication_rows(stream_name, instance_name, token, rows)
diff --git a/synapse/replication/tcp/client.py b/synapse/replication/tcp/client.py
index b2522f98ca..18252a2958 100644
--- a/synapse/replication/tcp/client.py
+++ b/synapse/replication/tcp/client.py
@@ -210,15 +210,16 @@ class ReplicationDataHandler:
max_token = self.store.get_room_max_token()
event_pos = PersistedEventPosition(instance_name, token)
- await self.notifier.on_new_room_event_args(
- event_pos=event_pos,
- max_room_stream_token=max_token,
- extra_users=extra_users,
- room_id=row.data.room_id,
- event_id=row.data.event_id,
- event_type=row.data.type,
- state_key=row.data.state_key,
- membership=row.data.membership,
+ event_entry = self.notifier.create_pending_room_event_entry(
+ event_pos,
+ extra_users,
+ row.data.room_id,
+ row.data.type,
+ row.data.state_key,
+ row.data.membership,
+ )
+ await self.notifier.notify_new_room_events(
+ [(event_entry, row.data.event_id)], max_token
)
# If this event is a join, make a note of it so we have an accurate
diff --git a/synapse/replication/tcp/protocol.py b/synapse/replication/tcp/protocol.py
index 7763ffb2d0..56a5c21910 100644
--- a/synapse/replication/tcp/protocol.py
+++ b/synapse/replication/tcp/protocol.py
@@ -245,7 +245,7 @@ class BaseReplicationStreamProtocol(LineOnlyReceiver):
self._parse_and_dispatch_line(line)
def _parse_and_dispatch_line(self, line: bytes) -> None:
- if line.strip() == "":
+ if line.strip() == b"":
# Ignore blank lines
return
diff --git a/synapse/res/templates/_base.html b/synapse/res/templates/_base.html
new file mode 100644
index 0000000000..46439fce6a
--- /dev/null
+++ b/synapse/res/templates/_base.html
@@ -0,0 +1,29 @@
+
+
+
+
+
+
+ {% block title %}{% endblock %}
+
+ {% block header %}{% endblock %}
+
+
+
+ {% if app_name == "Riot" %}
+
+ {% elif app_name == "Vector" %}
+
+ {% elif app_name == "Element" %}
+
+ {% else %}
+
+ {% endif %}
+
+
+{% block body %}{% endblock %}
+
+
+
diff --git a/synapse/res/templates/account_previously_renewed.html b/synapse/res/templates/account_previously_renewed.html
index bd4f7cea97..91582a8af0 100644
--- a/synapse/res/templates/account_previously_renewed.html
+++ b/synapse/res/templates/account_previously_renewed.html
@@ -1,12 +1,6 @@
-
-
-
-
-
-
- Your account is valid until {{ expiration_ts|format_ts("%d-%m-%Y") }}.
-
-
- Your account is valid until {{ expiration_ts|format_ts("%d-%m-%Y") }}.
-
-
\ No newline at end of file
+{% extends "_base.html" %}
+{% block title %}Your account is valid until {{ expiration_ts|format_ts("%d-%m-%Y") }}.{% endblock %}
+
+{% block body %}
+
Your account is valid until {{ expiration_ts|format_ts("%d-%m-%Y") }}.
+{% endblock %}
diff --git a/synapse/res/templates/account_renewed.html b/synapse/res/templates/account_renewed.html
index 57b319f375..18a57833f1 100644
--- a/synapse/res/templates/account_renewed.html
+++ b/synapse/res/templates/account_renewed.html
@@ -1,12 +1,6 @@
-
-
-
-
-
-
- Your account has been successfully renewed and is valid until {{ expiration_ts|format_ts("%d-%m-%Y") }}.
-
-
- Your account has been successfully renewed and is valid until {{ expiration_ts|format_ts("%d-%m-%Y") }}.
-
-
\ No newline at end of file
+{% extends "_base.html" %}
+{% block title %}Your account has been successfully renewed and is valid until {{ expiration_ts|format_ts("%d-%m-%Y") }}.{% endblock %}
+
+{% block body %}
+
Your account has been successfully renewed and is valid until {{ expiration_ts|format_ts("%d-%m-%Y") }}.
+{% endblock %}
diff --git a/synapse/res/templates/add_threepid.html b/synapse/res/templates/add_threepid.html
index 71f2215b7a..33c883936a 100644
--- a/synapse/res/templates/add_threepid.html
+++ b/synapse/res/templates/add_threepid.html
@@ -1,14 +1,8 @@
-
-
-
-
-
-
- Request to add an email address to your Matrix account
-
-
-
A request to add an email address to your Matrix account has been received. If this was you, please click the link below to confirm adding this email:
- Your account might have been deactivated by the server administrator.
- You can either try to create a new account or contact the server’s
- administrator.
-
-
- {% include "sso_footer.html" without context %}
-
-
+{% extends "_base.html" %}
+{% block title %}SSO account deactivated{% endblock %}
+
+{% block header %}
+
+{% endblock %}
+
+{% block body %}
+
+
+
Your account has been deactivated
+
+ No account found
+
+
+ Your account might have been deactivated by the server administrator.
+ You can either try to create a new account or contact the server’s
+ administrator.
+
+
+
+{% include "sso_footer.html" without context %}
+{% endblock %}
diff --git a/synapse/res/templates/sso_auth_account_details.html b/synapse/res/templates/sso_auth_account_details.html
index 2d1db386e1..11636d7f5d 100644
--- a/synapse/res/templates/sso_auth_account_details.html
+++ b/synapse/res/templates/sso_auth_account_details.html
@@ -1,189 +1,186 @@
-
-
-
- Create your account
-
-
-
-
-
-
-
-
-
Create your account
-
This is required. Continue to create your account on {{ server_name }}. You can't change this later.