diff --git a/.ci/scripts/auditwheel_wrapper.py b/.ci/scripts/auditwheel_wrapper.py
new file mode 100755
index 0000000000..a33b39314f
--- /dev/null
+++ b/.ci/scripts/auditwheel_wrapper.py
@@ -0,0 +1,132 @@
+#!/usr/bin/env python
+# Copyright 2022 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Wraps `auditwheel repair` to first check if we're repairing a potentially abi3
+# compatible wheel, if so rename the wheel before repairing it.
+
+import argparse
+import os
+import subprocess
+from typing import Optional
+from zipfile import ZipFile
+
+from packaging.tags import Tag
+from packaging.utils import parse_wheel_filename
+from packaging.version import Version
+
+
+def check_is_abi3_compatible(wheel_file: str) -> None:
+ """Check the contents of the built wheel for any `.so` files that are *not*
+ abi3 compatible.
+ """
+
+ with ZipFile(wheel_file, "r") as wheel:
+ for file in wheel.namelist():
+ if not file.endswith(".so"):
+ continue
+
+ if not file.endswith(".abi3.so"):
+ raise Exception(f"Found non-abi3 lib: {file}")
+
+
+def cpython(wheel_file: str, name: str, version: Version, tag: Tag) -> str:
+ """Replaces the cpython wheel file with a ABI3 compatible wheel"""
+
+ if tag.abi == "abi3":
+ # Nothing to do.
+ return wheel_file
+
+ check_is_abi3_compatible(wheel_file)
+
+ abi3_tag = Tag(tag.interpreter, "abi3", tag.platform)
+
+ dirname = os.path.dirname(wheel_file)
+ new_wheel_file = os.path.join(
+ dirname,
+ f"{name}-{version}-{abi3_tag}.whl",
+ )
+
+ os.rename(wheel_file, new_wheel_file)
+
+ print("Renamed wheel to", new_wheel_file)
+
+ return new_wheel_file
+
+
+def main(wheel_file: str, dest_dir: str, archs: Optional[str]) -> None:
+ """Entry point"""
+
+ # Parse the wheel file name into its parts. Note that `parse_wheel_filename`
+ # normalizes the package name (i.e. it converts matrix_synapse ->
+ # matrix-synapse), which is not what we want.
+ _, version, build, tags = parse_wheel_filename(os.path.basename(wheel_file))
+ name = os.path.basename(wheel_file).split("-")[0]
+
+ if len(tags) != 1:
+ # We expect only a wheel file with only a single tag
+ raise Exception(f"Unexpectedly found multiple tags: {tags}")
+
+ tag = next(iter(tags))
+
+ if build:
+ # We don't use build tags in Synapse
+ raise Exception(f"Unexpected build tag: {build}")
+
+ # If the wheel is for cpython then convert it into an abi3 wheel.
+ if tag.interpreter.startswith("cp"):
+ wheel_file = cpython(wheel_file, name, version, tag)
+
+ # Finally, repair the wheel.
+ if archs is not None:
+ # If we are given archs then we are on macos and need to use
+ # `delocate-listdeps`.
+ subprocess.run(["delocate-listdeps", wheel_file], check=True)
+ subprocess.run(
+ ["delocate-wheel", "--require-archs", archs, "-w", dest_dir, wheel_file],
+ check=True,
+ )
+ else:
+ subprocess.run(["auditwheel", "repair", "-w", dest_dir, wheel_file], check=True)
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser(description="Tag wheel as abi3 and repair it.")
+
+ parser.add_argument(
+ "--wheel-dir",
+ "-w",
+ metavar="WHEEL_DIR",
+ help="Directory to store delocated wheels",
+ required=True,
+ )
+
+ parser.add_argument(
+ "--require-archs",
+ metavar="archs",
+ default=None,
+ )
+
+ parser.add_argument(
+ "wheel_file",
+ metavar="WHEEL_FILE",
+ )
+
+ args = parser.parse_args()
+
+ wheel_file = args.wheel_file
+ wheel_dir = args.wheel_dir
+ archs = args.require_archs
+
+ main(wheel_file, wheel_dir, archs)
diff --git a/.ci/scripts/calculate_jobs.py b/.ci/scripts/calculate_jobs.py
index c2198e0dd4..0cdc20e19c 100755
--- a/.ci/scripts/calculate_jobs.py
+++ b/.ci/scripts/calculate_jobs.py
@@ -46,7 +46,7 @@ if not IS_PR:
"database": "sqlite",
"extras": "all",
}
- for version in ("3.8", "3.9", "3.10")
+ for version in ("3.8", "3.9", "3.10", "3.11")
)
@@ -54,7 +54,7 @@ trial_postgres_tests = [
{
"python-version": "3.7",
"database": "postgres",
- "postgres-version": "10",
+ "postgres-version": "11",
"extras": "all",
}
]
@@ -62,9 +62,9 @@ trial_postgres_tests = [
if not IS_PR:
trial_postgres_tests.append(
{
- "python-version": "3.10",
+ "python-version": "3.11",
"database": "postgres",
- "postgres-version": "14",
+ "postgres-version": "15",
"extras": "all",
}
)
diff --git a/.github/dependabot.yml b/.github/dependabot.yml
index 9ee62bf539..7ce353ed64 100644
--- a/.github/dependabot.yml
+++ b/.github/dependabot.yml
@@ -18,5 +18,6 @@ updates:
- package-ecosystem: "cargo"
directory: "/"
+ versioning-strategy: "lockfile-only"
schedule:
interval: "weekly"
diff --git a/.github/workflows/docs-pr-netlify.yaml b/.github/workflows/docs-pr-netlify.yaml
new file mode 100644
index 0000000000..231982f681
--- /dev/null
+++ b/.github/workflows/docs-pr-netlify.yaml
@@ -0,0 +1,34 @@
+name: Deploy documentation PR preview
+
+on:
+ workflow_run:
+ workflows: [ "Prepare documentation PR preview" ]
+ types:
+ - completed
+
+jobs:
+ netlify:
+ if: github.event.workflow_run.conclusion == 'success' && github.event.workflow_run.event == 'pull_request'
+ runs-on: ubuntu-latest
+ steps:
+ # There's a 'download artifact' action, but it hasn't been updated for the workflow_run action
+ # (https://github.com/actions/download-artifact/issues/60) so instead we get this mess:
+ - name: 📥 Download artifact
+ uses: dawidd6/action-download-artifact@e6e25ac3a2b93187502a8be1ef9e9603afc34925 # v2.24.2
+ with:
+ workflow: docs-pr.yaml
+ run_id: ${{ github.event.workflow_run.id }}
+ name: book
+ path: book
+
+ - name: 📤 Deploy to Netlify
+ uses: matrix-org/netlify-pr-preview@v1
+ with:
+ path: book
+ owner: ${{ github.event.workflow_run.head_repository.owner.login }}
+ branch: ${{ github.event.workflow_run.head_branch }}
+ revision: ${{ github.event.workflow_run.head_sha }}
+ token: ${{ secrets.NETLIFY_AUTH_TOKEN }}
+ site_id: ${{ secrets.NETLIFY_SITE_ID }}
+ desc: Documentation preview
+ deployment_env: PR Documentation Preview
diff --git a/.github/workflows/docs-pr.yaml b/.github/workflows/docs-pr.yaml
new file mode 100644
index 0000000000..cde6cf511e
--- /dev/null
+++ b/.github/workflows/docs-pr.yaml
@@ -0,0 +1,34 @@
+name: Prepare documentation PR preview
+
+on:
+ pull_request:
+ paths:
+ - docs/**
+
+jobs:
+ pages:
+ name: GitHub Pages
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v2
+
+ - name: Setup mdbook
+ uses: peaceiris/actions-mdbook@adeb05db28a0c0004681db83893d56c0388ea9ea # v1.2.0
+ with:
+ mdbook-version: '0.4.17'
+
+ - name: Build the documentation
+ # mdbook will only create an index.html if we're including docs/README.md in SUMMARY.md.
+ # However, we're using docs/README.md for other purposes and need to pick a new page
+ # as the default. Let's opt for the welcome page instead.
+ run: |
+ mdbook build
+ cp book/welcome_and_overview.html book/index.html
+
+ - name: Upload Artifact
+ uses: actions/upload-artifact@v3
+ with:
+ name: book
+ path: book
+ # We'll only use this in a workflow_run, then we're done with it
+ retention-days: 1
diff --git a/.github/workflows/docs.yaml b/.github/workflows/docs.yaml
index 798e308f20..575412d965 100644
--- a/.github/workflows/docs.yaml
+++ b/.github/workflows/docs.yaml
@@ -20,7 +20,7 @@ jobs:
- uses: actions/checkout@v3
- name: Setup mdbook
- uses: peaceiris/actions-mdbook@4b5ef36b314c2599664ca107bb8c02412548d79d # v1.1.14
+ uses: peaceiris/actions-mdbook@adeb05db28a0c0004681db83893d56c0388ea9ea # v1.2.0
with:
mdbook-version: '0.4.17'
@@ -58,7 +58,7 @@ jobs:
# Deploy to the target directory.
- name: Deploy to gh pages
- uses: peaceiris/actions-gh-pages@068dc23d9710f1ba62e86896f84735d869951305 # v3.8.0
+ uses: peaceiris/actions-gh-pages@de7ea6f8efb354206b205ef54722213d99067935 # v3.9.0
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
publish_dir: ./book
diff --git a/.github/workflows/latest_deps.yml b/.github/workflows/latest_deps.yml
index b1e45ee648..c6f481cdaa 100644
--- a/.github/workflows/latest_deps.yml
+++ b/.github/workflows/latest_deps.yml
@@ -27,10 +27,9 @@ jobs:
steps:
- uses: actions/checkout@v3
- name: Install Rust
- uses: actions-rs/toolchain@v1
+ uses: dtolnay/rust-toolchain@55c7845fad90d0ae8b2e83715cb900e5e861e8cb
with:
- toolchain: stable
- override: true
+ toolchain: stable
- uses: Swatinem/rust-cache@v2
# The dev dependencies aren't exposed in the wheel metadata (at least with current
@@ -62,10 +61,9 @@ jobs:
- uses: actions/checkout@v3
- name: Install Rust
- uses: actions-rs/toolchain@v1
+ uses: dtolnay/rust-toolchain@55c7845fad90d0ae8b2e83715cb900e5e861e8cb
with:
- toolchain: stable
- override: true
+ toolchain: stable
- uses: Swatinem/rust-cache@v2
- run: sudo apt-get -qq install xmlsec1
@@ -136,10 +134,9 @@ jobs:
- uses: actions/checkout@v3
- name: Install Rust
- uses: actions-rs/toolchain@v1
+ uses: dtolnay/rust-toolchain@55c7845fad90d0ae8b2e83715cb900e5e861e8cb
with:
- toolchain: stable
- override: true
+ toolchain: stable
- uses: Swatinem/rust-cache@v2
- name: Ensure sytest runs `pip install`
diff --git a/.github/workflows/release-artifacts.yml b/.github/workflows/release-artifacts.yml
index 60db6ad7f0..0601a7dbaf 100644
--- a/.github/workflows/release-artifacts.yml
+++ b/.github/workflows/release-artifacts.yml
@@ -99,7 +99,7 @@ jobs:
runs-on: ${{ matrix.os }}
strategy:
matrix:
- os: [ubuntu-20.04, macos-10.15]
+ os: [ubuntu-20.04, macos-11]
arch: [x86_64, aarch64]
# is_pr is a flag used to exclude certain jobs from the matrix on PRs.
# It is not read by the rest of the workflow.
@@ -109,9 +109,9 @@ jobs:
exclude:
# Don't build macos wheels on PR CI.
- is_pr: true
- os: "macos-10.15"
+ os: "macos-11"
# Don't build aarch64 wheels on mac.
- - os: "macos-10.15"
+ - os: "macos-11"
arch: aarch64
# Don't build aarch64 wheels on PR CI.
- is_pr: true
diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml
index 1115dcc5f3..ec5ab79f9c 100644
--- a/.github/workflows/tests.yml
+++ b/.github/workflows/tests.yml
@@ -33,6 +33,8 @@ jobs:
steps:
- uses: actions/checkout@v3
- uses: actions/setup-python@v4
+ with:
+ python-version: "3.x"
- uses: matrix-org/setup-python-poetry@v1
with:
extras: "all"
@@ -44,6 +46,8 @@ jobs:
steps:
- uses: actions/checkout@v3
- uses: actions/setup-python@v4
+ with:
+ python-version: "3.x"
- run: "pip install 'click==8.1.1' 'GitPython>=3.1.20'"
- run: scripts-dev/check_schema_delta.py --force-colors
@@ -68,6 +72,8 @@ jobs:
ref: ${{ github.event.pull_request.head.sha }}
fetch-depth: 0
- uses: actions/setup-python@v4
+ with:
+ python-version: "3.x"
- run: "pip install 'towncrier>=18.6.0rc1'"
- run: scripts-dev/check-newsfragment.sh
env:
@@ -93,10 +99,12 @@ jobs:
- uses: actions/checkout@v3
- name: Install Rust
- uses: actions-rs/toolchain@v1
+ # There don't seem to be versioned releases of this action per se: for each rust
+ # version there is a branch which gets constantly rebased on top of master.
+ # We pin to a specific commit for paranoia's sake.
+ uses: dtolnay/rust-toolchain@55c7845fad90d0ae8b2e83715cb900e5e861e8cb
with:
toolchain: 1.58.1
- override: true
components: clippy
- uses: Swatinem/rust-cache@v2
@@ -111,11 +119,13 @@ jobs:
- uses: actions/checkout@v3
- name: Install Rust
- uses: actions-rs/toolchain@v1
+ # There don't seem to be versioned releases of this action per se: for each rust
+ # version there is a branch which gets constantly rebased on top of master.
+ # We pin to a specific commit for paranoia's sake.
+ uses: dtolnay/rust-toolchain@55c7845fad90d0ae8b2e83715cb900e5e861e8cb
with:
- toolchain: 1.58.1
- override: true
- components: rustfmt
+ toolchain: 1.58.1
+ components: rustfmt
- uses: Swatinem/rust-cache@v2
- run: cargo fmt --check
@@ -143,6 +153,8 @@ jobs:
steps:
- uses: actions/checkout@v3
- uses: actions/setup-python@v4
+ with:
+ python-version: "3.x"
- id: get-matrix
run: .ci/scripts/calculate_jobs.py
outputs:
@@ -167,6 +179,16 @@ jobs:
-e POSTGRES_PASSWORD=postgres \
-e POSTGRES_INITDB_ARGS="--lc-collate C --lc-ctype C --encoding UTF8" \
postgres:${{ matrix.job.postgres-version }}
+
+ - name: Install Rust
+ # There don't seem to be versioned releases of this action per se: for each rust
+ # version there is a branch which gets constantly rebased on top of master.
+ # We pin to a specific commit for paranoia's sake.
+ uses: dtolnay/rust-toolchain@55c7845fad90d0ae8b2e83715cb900e5e861e8cb
+ with:
+ toolchain: 1.58.1
+ - uses: Swatinem/rust-cache@v2
+
- uses: matrix-org/setup-python-poetry@v1
with:
python-version: ${{ matrix.job.python-version }}
@@ -203,10 +225,12 @@ jobs:
- uses: actions/checkout@v3
- name: Install Rust
- uses: actions-rs/toolchain@v1
+ # There don't seem to be versioned releases of this action per se: for each rust
+ # version there is a branch which gets constantly rebased on top of master.
+ # We pin to a specific commit for paranoia's sake.
+ uses: dtolnay/rust-toolchain@55c7845fad90d0ae8b2e83715cb900e5e861e8cb
with:
toolchain: 1.58.1
- override: true
- uses: Swatinem/rust-cache@v2
# There aren't wheels for some of the older deps, so we need to install
@@ -319,10 +343,12 @@ jobs:
run: cat sytest-blacklist .ci/worker-blacklist > synapse-blacklist-with-workers
- name: Install Rust
- uses: actions-rs/toolchain@v1
+ # There don't seem to be versioned releases of this action per se: for each rust
+ # version there is a branch which gets constantly rebased on top of master.
+ # We pin to a specific commit for paranoia's sake.
+ uses: dtolnay/rust-toolchain@55c7845fad90d0ae8b2e83715cb900e5e861e8cb
with:
toolchain: 1.58.1
- override: true
- uses: Swatinem/rust-cache@v2
- name: Run SyTest
@@ -383,10 +409,10 @@ jobs:
matrix:
include:
- python-version: "3.7"
- postgres-version: "10"
+ postgres-version: "11"
- - python-version: "3.10"
- postgres-version: "14"
+ - python-version: "3.11"
+ postgres-version: "15"
services:
postgres:
@@ -404,6 +430,15 @@ jobs:
steps:
- uses: actions/checkout@v3
+ - name: Add PostgreSQL apt repository
+ # We need a version of pg_dump that can handle the version of
+ # PostgreSQL being tested against. The Ubuntu package repository lags
+ # behind new releases, so we have to use the PostreSQL apt repository.
+ # Steps taken from https://www.postgresql.org/download/linux/ubuntu/
+ run: |
+ sudo sh -c 'echo "deb http://apt.postgresql.org/pub/repos/apt $(lsb_release -cs)-pgdg main" > /etc/apt/sources.list.d/pgdg.list'
+ wget --quiet -O - https://www.postgresql.org/media/keys/ACCC4CF8.asc | sudo apt-key add -
+ sudo apt-get update
- run: sudo apt-get -qq install xmlsec1 postgresql-client
- uses: matrix-org/setup-python-poetry@v1
with:
@@ -451,10 +486,12 @@ jobs:
path: synapse
- name: Install Rust
- uses: actions-rs/toolchain@v1
+ # There don't seem to be versioned releases of this action per se: for each rust
+ # version there is a branch which gets constantly rebased on top of master.
+ # We pin to a specific commit for paranoia's sake.
+ uses: dtolnay/rust-toolchain@55c7845fad90d0ae8b2e83715cb900e5e861e8cb
with:
toolchain: 1.58.1
- override: true
- uses: Swatinem/rust-cache@v2
- name: Prepare Complement's Prerequisites
@@ -477,10 +514,12 @@ jobs:
- uses: actions/checkout@v3
- name: Install Rust
- uses: actions-rs/toolchain@v1
+ # There don't seem to be versioned releases of this action per se: for each rust
+ # version there is a branch which gets constantly rebased on top of master.
+ # We pin to a specific commit for paranoia's sake.
+ uses: dtolnay/rust-toolchain@55c7845fad90d0ae8b2e83715cb900e5e861e8cb
with:
toolchain: 1.58.1
- override: true
- uses: Swatinem/rust-cache@v2
- run: cargo test
diff --git a/.github/workflows/triage_labelled.yml b/.github/workflows/triage_labelled.yml
index fbd55de17f..d1ac4357b1 100644
--- a/.github/workflows/triage_labelled.yml
+++ b/.github/workflows/triage_labelled.yml
@@ -11,34 +11,34 @@ jobs:
if: >
contains(github.event.issue.labels.*.name, 'X-Needs-Info')
steps:
- - uses: octokit/graphql-action@v2.x
- id: add_to_project
+ - uses: actions/add-to-project@main
+ id: add_project
with:
- headers: '{"GraphQL-Features": "projects_next_graphql"}'
- query: |
- mutation {
- updateProjectV2ItemFieldValue(
- input: {
- projectId: $projectid
- itemId: $contentid
- fieldId: $fieldid
- value: {
- singleSelectOptionId: "Todo"
+ project-url: "https://github.com/orgs/matrix-org/projects/67"
+ github-token: ${{ secrets.ELEMENT_BOT_TOKEN }}
+ - name: Set status
+ env:
+ GITHUB_TOKEN: ${{ secrets.ELEMENT_BOT_TOKEN }}
+ run: |
+ gh api graphql -f query='
+ mutation(
+ $project: ID!
+ $item: ID!
+ $fieldid: ID!
+ $columnid: String!
+ ) {
+ updateProjectV2ItemFieldValue(
+ input: {
+ projectId: $project
+ itemId: $item
+ fieldId: $fieldid
+ value: {
+ singleSelectOptionId: $columnid
}
- }
- ) {
- projectV2Item {
- id
- }
+ }
+ ) {
+ projectV2Item {
+ id
}
}
-
- projectid: ${{ env.PROJECT_ID }}
- contentid: ${{ github.event.issue.node_id }}
- fieldid: ${{ env.FIELD_ID }}
- optionid: ${{ env.OPTION_ID }}
- env:
- PROJECT_ID: "PVT_kwDOAIB0Bs4AFDdZ"
- GITHUB_TOKEN: ${{ secrets.ELEMENT_BOT_TOKEN }}
- FIELD_ID: "PVTSSF_lADOAIB0Bs4AFDdZzgC6ZA4"
- OPTION_ID: "ba22e43c"
+ }' -f project="PVT_kwDOAIB0Bs4AFDdZ" -f item=${{ steps.add_project.outputs.itemId }} -f fieldid="PVTSSF_lADOAIB0Bs4AFDdZzgC6ZA4" -f columnid=ba22e43c --silent
diff --git a/.github/workflows/twisted_trunk.yml b/.github/workflows/twisted_trunk.yml
index b4e26118c0..6a047193f6 100644
--- a/.github/workflows/twisted_trunk.yml
+++ b/.github/workflows/twisted_trunk.yml
@@ -18,10 +18,9 @@ jobs:
- uses: actions/checkout@v3
- name: Install Rust
- uses: actions-rs/toolchain@v1
+ uses: dtolnay/rust-toolchain@55c7845fad90d0ae8b2e83715cb900e5e861e8cb
with:
- toolchain: stable
- override: true
+ toolchain: stable
- uses: Swatinem/rust-cache@v2
- uses: matrix-org/setup-python-poetry@v1
@@ -44,10 +43,9 @@ jobs:
- run: sudo apt-get -qq install xmlsec1
- name: Install Rust
- uses: actions-rs/toolchain@v1
+ uses: dtolnay/rust-toolchain@55c7845fad90d0ae8b2e83715cb900e5e861e8cb
with:
- toolchain: stable
- override: true
+ toolchain: stable
- uses: Swatinem/rust-cache@v2
- uses: matrix-org/setup-python-poetry@v1
@@ -84,10 +82,9 @@ jobs:
- uses: actions/checkout@v3
- name: Install Rust
- uses: actions-rs/toolchain@v1
+ uses: dtolnay/rust-toolchain@55c7845fad90d0ae8b2e83715cb900e5e861e8cb
with:
- toolchain: stable
- override: true
+ toolchain: stable
- uses: Swatinem/rust-cache@v2
- name: Patch dependencies
@@ -151,12 +148,11 @@ jobs:
run: |
set -x
DEBIAN_FRONTEND=noninteractive sudo apt-get install -yqq python3 pipx
- pipx install poetry==1.1.14
+ pipx install poetry==1.2.0
poetry remove -n twisted
poetry add -n --extras tls git+https://github.com/twisted/twisted.git#trunk
poetry lock --no-update
- # NOT IN 1.1.14 poetry lock --check
working-directory: synapse
- run: |
diff --git a/CHANGES.md b/CHANGES.md
index da7213cb68..0fa6f7fab7 100644
--- a/CHANGES.md
+++ b/CHANGES.md
@@ -1,3 +1,233 @@
+Synapse 1.72.0rc1 (2022-11-16)
+==============================
+
+Please note that we now only support PostgreSQL 11+, because PostgreSQL 10 has reached end-of-life, c.f. our [Deprecation Policy](https://github.com/matrix-org/synapse/blob/develop/docs/deprecation_policy.md).
+
+Features
+--------
+
+- Add experimental support for [MSC3912](https://github.com/matrix-org/matrix-spec-proposals/pull/3912): Relation-based redactions. ([\#14260](https://github.com/matrix-org/synapse/issues/14260))
+- Build Debian packages for Ubuntu 22.10 (Kinetic Kudu). ([\#14396](https://github.com/matrix-org/synapse/issues/14396))
+- Add an [Admin API](https://matrix-org.github.io/synapse/latest/usage/administration/admin_api/index.html) endpoint for user lookup based on third-party ID (3PID). Contributed by @ashfame. ([\#14405](https://github.com/matrix-org/synapse/issues/14405))
+- Faster joins: include heroes' membership events in the partial join response, for rooms without a name or canonical alias. ([\#14442](https://github.com/matrix-org/synapse/issues/14442))
+
+
+Bugfixes
+--------
+
+- Faster joins: do not block creation of or queries for room aliases during the resync. ([\#14292](https://github.com/matrix-org/synapse/issues/14292))
+- Fix a bug introduced in Synapse 1.64.0rc1 which could cause log spam when fetching events from other homeservers. ([\#14347](https://github.com/matrix-org/synapse/issues/14347))
+- Fix a bug introduced in 1.66 which would not send certain pushrules to clients. Contributed by Nico. ([\#14356](https://github.com/matrix-org/synapse/issues/14356))
+- Fix a bug introduced in v1.71.0rc1 where the power level event was incorrectly created during initial room creation. ([\#14361](https://github.com/matrix-org/synapse/issues/14361))
+- Fix the refresh token endpoint to be under /r0 and /v3 instead of /v1. Contributed by Tulir @ Beeper. ([\#14364](https://github.com/matrix-org/synapse/issues/14364))
+- Fix a long-standing bug where Synapse would raise an error when encountering an unrecognised field in a `/sync` filter, instead of ignoring it for forward compatibility. ([\#14369](https://github.com/matrix-org/synapse/issues/14369))
+- Fix a background database update, introduced in Synapse 1.64.0, which could cause poor database performance. ([\#14374](https://github.com/matrix-org/synapse/issues/14374))
+- Fix PostgreSQL sometimes using table scans for queries against the `event_search` table, taking a long time and a large amount of IO. ([\#14409](https://github.com/matrix-org/synapse/issues/14409))
+- Fix rendering of some HTML templates (including emails). Introduced in v1.71.0. ([\#14448](https://github.com/matrix-org/synapse/issues/14448))
+- Fix a bug introduced in Synapse 1.70.0 where the background updates to add non-thread unique indexes on receipts could fail when upgrading from 1.67.0 or earlier. ([\#14453](https://github.com/matrix-org/synapse/issues/14453))
+
+
+Updates to the Docker image
+---------------------------
+
+- Add all Stream Writer worker types to `configure_workers_and_start.py`. ([\#14197](https://github.com/matrix-org/synapse/issues/14197))
+- Remove references to legacy worker types in the multi-worker Dockerfile. ([\#14294](https://github.com/matrix-org/synapse/issues/14294))
+
+
+Improved Documentation
+----------------------
+
+- Upload documentation PRs to Netlify. ([\#12947](https://github.com/matrix-org/synapse/issues/12947), [\#14370](https://github.com/matrix-org/synapse/issues/14370))
+- Add addtional TURN server configuration example based on [eturnal](https://github.com/processone/eturnal) and adjust general TURN server doc structure. ([\#14293](https://github.com/matrix-org/synapse/issues/14293))
+- Add example on how to load balance /sync requests. Contributed by [aceArt](https://aceart.de). ([\#14297](https://github.com/matrix-org/synapse/issues/14297))
+- Edit sample Nginx reverse proxy configuration to use HTTP/1.1. Contributed by Brad Jones. ([\#14414](https://github.com/matrix-org/synapse/issues/14414))
+
+
+Deprecations and Removals
+-------------------------
+
+- Remove support for PostgreSQL 10. ([\#14392](https://github.com/matrix-org/synapse/issues/14392), [\#14397](https://github.com/matrix-org/synapse/issues/14397))
+
+
+Internal Changes
+----------------
+
+- Run unit tests against Python 3.11. ([\#13812](https://github.com/matrix-org/synapse/issues/13812))
+- Add TLS support for generic worker endpoints. ([\#14128](https://github.com/matrix-org/synapse/issues/14128), [\#14455](https://github.com/matrix-org/synapse/issues/14455))
+- Switch to a maintained action for installing Rust in CI. ([\#14313](https://github.com/matrix-org/synapse/issues/14313))
+- Add override ability to `complement.sh` command line script to request certain types of workers. ([\#14324](https://github.com/matrix-org/synapse/issues/14324))
+- Enabling testing of [MSC3874](https://github.com/matrix-org/matrix-spec-proposals/pull/3874) (filtering of `/messages` by relation type) in complement. ([\#14339](https://github.com/matrix-org/synapse/issues/14339))
+- Concisely log a failure to resolve state due to missing `prev_events`. ([\#14346](https://github.com/matrix-org/synapse/issues/14346))
+- Use a maintained Github action to install Rust. ([\#14351](https://github.com/matrix-org/synapse/issues/14351))
+- Cleanup old worker datastore classes. Contributed by Nick @ Beeper (@fizzadar). ([\#14375](https://github.com/matrix-org/synapse/issues/14375))
+- Test against PostgreSQL 15 in CI. ([\#14394](https://github.com/matrix-org/synapse/issues/14394))
+- Remove unreachable code. ([\#14410](https://github.com/matrix-org/synapse/issues/14410))
+- Clean-up event persistence code. ([\#14411](https://github.com/matrix-org/synapse/issues/14411))
+- Update docstring to clarify that `get_partial_state_events_batch` does not just give you completely arbitrary partial-state events. ([\#14417](https://github.com/matrix-org/synapse/issues/14417))
+- Fix mypy errors introduced by bumping the locked version of `attrs` and `gitpython`. ([\#14433](https://github.com/matrix-org/synapse/issues/14433))
+- Make Dependabot only bump Rust deps in the lock file. ([\#14434](https://github.com/matrix-org/synapse/issues/14434))
+- Fix an incorrect stub return type for `PushRuleEvaluator.run`. ([\#14451](https://github.com/matrix-org/synapse/issues/14451))
+- Improve performance of `/context` in large rooms. ([\#14461](https://github.com/matrix-org/synapse/issues/14461))
+
+
+Synapse 1.71.0 (2022-11-08)
+===========================
+
+Please note that, as announced in the release notes for Synapse 1.69.0, legacy Prometheus metric names are now disabled by default.
+They will be removed altogether in Synapse 1.73.0.
+If not already done, server administrators should update their dashboards and alerting rules to avoid using the deprecated metric names.
+See the [upgrade notes](https://matrix-org.github.io/synapse/v1.71/upgrade.html#upgrading-to-v1710) for more details.
+
+**Note:** in line with our [deprecation policy](https://matrix-org.github.io/synapse/latest/deprecation_policy.html) for platform dependencies, this will be the last release to support PostgreSQL 10, which reaches upstream end-of-life on November 10th, 2022. Future releases of Synapse will require PostgreSQL 11+.
+
+No significant changes since 1.71.0rc2.
+
+
+Synapse 1.71.0rc2 (2022-11-04)
+==============================
+
+Improved Documentation
+----------------------
+
+- Document the changes to monthly active user metrics due to deprecation of legacy Prometheus metric names. ([\#14358](https://github.com/matrix-org/synapse/issues/14358), [\#14360](https://github.com/matrix-org/synapse/issues/14360))
+
+
+Deprecations and Removals
+-------------------------
+
+- Disable legacy Prometheus metric names by default. They can still be re-enabled for now, but they will be removed altogether in Synapse 1.73.0. ([\#14353](https://github.com/matrix-org/synapse/issues/14353))
+
+
+Internal Changes
+----------------
+
+- Run unit tests against Python 3.11. ([\#13812](https://github.com/matrix-org/synapse/issues/13812))
+
+
+Synapse 1.71.0rc1 (2022-11-01)
+==============================
+
+Features
+--------
+
+- Support back-channel logouts from OpenID Connect providers. ([\#11414](https://github.com/matrix-org/synapse/issues/11414))
+- Allow use of Postgres and SQLlite full-text search operators in search queries. ([\#11635](https://github.com/matrix-org/synapse/issues/11635), [\#14310](https://github.com/matrix-org/synapse/issues/14310), [\#14311](https://github.com/matrix-org/synapse/issues/14311))
+- Implement [MSC3664](https://github.com/matrix-org/matrix-doc/pull/3664), Pushrules for relations. Contributed by Nico. ([\#11804](https://github.com/matrix-org/synapse/issues/11804))
+- Improve aesthetics of HTML templates. Note that these changes do not retroactively apply to templates which have been [customised](https://matrix-org.github.io/synapse/latest/templates.html#templates) by server admins. ([\#13652](https://github.com/matrix-org/synapse/issues/13652))
+- Enable write-ahead logging for SQLite installations. Contributed by [@asymmetric](https://github.com/asymmetric). ([\#13897](https://github.com/matrix-org/synapse/issues/13897))
+- Show erasure status when [listing users](https://matrix-org.github.io/synapse/latest/admin_api/user_admin_api.html#query-user-account) in the Admin API. ([\#14205](https://github.com/matrix-org/synapse/issues/14205))
+- Provide a specific error code when a `/sync` request provides a filter which doesn't represent a JSON object. ([\#14262](https://github.com/matrix-org/synapse/issues/14262))
+
+
+Bugfixes
+--------
+
+- Fix a long-standing bug where the `update_synapse_database` script could not be run with multiple databases. Contributed by @thefinn93 @ Beeper. ([\#13422](https://github.com/matrix-org/synapse/issues/13422))
+- Fix a bug which prevented setting an avatar on homeservers which have an explicit port in their `server_name` and have `max_avatar_size` and/or `allowed_avatar_mimetypes` configuration. Contributed by @ashfame. ([\#13927](https://github.com/matrix-org/synapse/issues/13927))
+- Check appservice user interest against the local users instead of all users in the room to align with [MSC3905](https://github.com/matrix-org/matrix-spec-proposals/pull/3905). ([\#13958](https://github.com/matrix-org/synapse/issues/13958))
+- Fix a long-standing bug where Synapse would accidentally include extra information in the response to [`PUT /_matrix/federation/v2/invite/{roomId}/{eventId}`](https://spec.matrix.org/v1.4/server-server-api/#put_matrixfederationv2inviteroomideventid). ([\#14064](https://github.com/matrix-org/synapse/issues/14064))
+- Fix a bug introduced in Synapse 1.64.0 where presence updates could be missing from `/sync` responses. ([\#14243](https://github.com/matrix-org/synapse/issues/14243))
+- Fix a bug introduced in Synapse 1.60.0 which caused an error to be logged when Synapse received a SIGHUP signal if debug logging was enabled. ([\#14258](https://github.com/matrix-org/synapse/issues/14258))
+- Prevent history insertion ([MSC2716](https://github.com/matrix-org/matrix-spec-proposals/pull/2716)) during an partial join ([MSC3706](https://github.com/matrix-org/matrix-spec-proposals/pull/3706)). ([\#14291](https://github.com/matrix-org/synapse/issues/14291))
+- Fix a bug introduced in Synapse 1.34.0 where device names would be returned via a federation user key query request when `allow_device_name_lookup_over_federation` was set to `false`. ([\#14304](https://github.com/matrix-org/synapse/issues/14304))
+- Fix a bug introduced in Synapse 0.34.0 where logs could include error spam when background processes are measured as taking a negative amount of time. ([\#14323](https://github.com/matrix-org/synapse/issues/14323))
+- Fix a bug introduced in Synapse 1.70.0 where clients were unable to PUT new [dehydrated devices](https://github.com/matrix-org/matrix-spec-proposals/pull/2697). ([\#14336](https://github.com/matrix-org/synapse/issues/14336))
+
+
+Improved Documentation
+----------------------
+
+- Explain how to disable the use of [`trusted_key_servers`](https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#trusted_key_servers). ([\#13999](https://github.com/matrix-org/synapse/issues/13999))
+- Add workers settings to [configuration manual](https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#individual-worker-configuration). ([\#14086](https://github.com/matrix-org/synapse/issues/14086))
+- Correct the name of the config option [`encryption_enabled_by_default_for_room_type`](https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#encryption_enabled_by_default_for_room_type). ([\#14110](https://github.com/matrix-org/synapse/issues/14110))
+- Update docstrings of `SynapseError` and `FederationError` to bettter describe what they are used for and the effects of using them are. ([\#14191](https://github.com/matrix-org/synapse/issues/14191))
+
+
+Internal Changes
+----------------
+
+- Remove unused `@lru_cache` decorator. ([\#13595](https://github.com/matrix-org/synapse/issues/13595))
+- Save login tokens in database and prevent login token reuse. ([\#13844](https://github.com/matrix-org/synapse/issues/13844))
+- Refactor OIDC tests to better mimic an actual OIDC provider. ([\#13910](https://github.com/matrix-org/synapse/issues/13910))
+- Fix type annotation causing import time error in the Complement forking launcher. ([\#14084](https://github.com/matrix-org/synapse/issues/14084))
+- Refactor [MSC3030](https://github.com/matrix-org/matrix-spec-proposals/pull/3030) `/timestamp_to_event` endpoint to loop over federation destinations with standard pattern and error handling. ([\#14096](https://github.com/matrix-org/synapse/issues/14096))
+- Add initial power level event to batch of bulk persisted events when creating a new room. ([\#14228](https://github.com/matrix-org/synapse/issues/14228))
+- Refactor `/key/` endpoints to use `RestServlet` classes. ([\#14229](https://github.com/matrix-org/synapse/issues/14229))
+- Switch to using the `matrix-org/backend-meta` version of `triage-incoming` for new issues in CI. ([\#14230](https://github.com/matrix-org/synapse/issues/14230))
+- Build wheels on macos 11, not 10.15. ([\#14249](https://github.com/matrix-org/synapse/issues/14249))
+- Add debugging to help diagnose lost device list updates. ([\#14268](https://github.com/matrix-org/synapse/issues/14268))
+- Add Rust cache to CI for `trial` runs. ([\#14287](https://github.com/matrix-org/synapse/issues/14287))
+- Improve type hinting of `RawHeaders`. ([\#14303](https://github.com/matrix-org/synapse/issues/14303))
+- Use Poetry 1.2.0 in the Twisted Trunk CI job. ([\#14305](https://github.com/matrix-org/synapse/issues/14305))
+
+
+Dependency updates
+
+Runtime:
+
+- Bump anyhow from 1.0.65 to 1.0.66. ([\#14278](https://github.com/matrix-org/synapse/issues/14278))
+- Bump jinja2 from 3.0.3 to 3.1.2. ([\#14271](https://github.com/matrix-org/synapse/issues/14271))
+- Bump prometheus-client from 0.14.0 to 0.15.0. ([\#14274](https://github.com/matrix-org/synapse/issues/14274))
+- Bump psycopg2 from 2.9.4 to 2.9.5. ([\#14331](https://github.com/matrix-org/synapse/issues/14331))
+- Bump pysaml2 from 7.1.2 to 7.2.1. ([\#14270](https://github.com/matrix-org/synapse/issues/14270))
+- Bump sentry-sdk from 1.5.11 to 1.10.1. ([\#14330](https://github.com/matrix-org/synapse/issues/14330))
+- Bump serde from 1.0.145 to 1.0.147. ([\#14277](https://github.com/matrix-org/synapse/issues/14277))
+- Bump serde_json from 1.0.86 to 1.0.87. ([\#14279](https://github.com/matrix-org/synapse/issues/14279))
+
+Tooling and CI:
+
+- Bump black from 22.3.0 to 22.10.0. ([\#14328](https://github.com/matrix-org/synapse/issues/14328))
+- Bump flake8-bugbear from 21.3.2 to 22.9.23. ([\#14042](https://github.com/matrix-org/synapse/issues/14042))
+- Bump peaceiris/actions-gh-pages from 3.8.0 to 3.9.0. ([\#14276](https://github.com/matrix-org/synapse/issues/14276))
+- Bump peaceiris/actions-mdbook from 1.1.14 to 1.2.0. ([\#14275](https://github.com/matrix-org/synapse/issues/14275))
+- Bump setuptools-rust from 1.5.1 to 1.5.2. ([\#14273](https://github.com/matrix-org/synapse/issues/14273))
+- Bump twine from 3.8.0 to 4.0.1. ([\#14332](https://github.com/matrix-org/synapse/issues/14332))
+- Bump types-opentracing from 2.4.7 to 2.4.10. ([\#14133](https://github.com/matrix-org/synapse/issues/14133))
+- Bump types-requests from 2.28.11 to 2.28.11.2. ([\#14272](https://github.com/matrix-org/synapse/issues/14272))
+
+
+Synapse 1.70.1 (2022-10-28)
+===========================
+
+This release fixes some regressions that were discovered in 1.70.0.
+
+[#14300](https://github.com/matrix-org/synapse/issues/14300)
+was previously reported to be a regression in 1.70.0 as well. However, we have
+since concluded that it was limited to the reporter and thus have not needed
+to include any fix for it in 1.70.1.
+
+
+Bugfixes
+--------
+
+- Fix a bug introduced in Synapse 1.70.0rc1 where the access tokens sent to application services as headers were malformed. Application services which were obtaining access tokens from query parameters were not affected. ([\#14301](https://github.com/matrix-org/synapse/issues/14301))
+- Fix room creation being rate limited too aggressively since Synapse v1.69.0. ([\#14314](https://github.com/matrix-org/synapse/issues/14314))
+
+
+Synapse 1.70.0 (2022-10-26)
+===========================
+
+No significant changes since 1.70.0rc2.
+
+
+Synapse 1.70.0rc2 (2022-10-25)
+==============================
+
+Bugfixes
+--------
+
+- Fix a bug introduced in Synapse 1.70.0rc1 where the information returned from the `/threads` API could be stale when threaded events are redacted. ([\#14248](https://github.com/matrix-org/synapse/issues/14248))
+- Fix a bug introduced in Synapse 1.70.0rc1 leading to broken outbound federation when using Python 3.7. ([\#14280](https://github.com/matrix-org/synapse/issues/14280))
+- Fix a bug introduced in Synapse 1.70.0rc1 where edits to non-message events were aggregated by the homeserver. ([\#14283](https://github.com/matrix-org/synapse/issues/14283))
+
+
+Internal Changes
+----------------
+
+- Build ABI3 wheels for CPython. ([\#14253](https://github.com/matrix-org/synapse/issues/14253))
+- For the aarch64 architecture, only build wheels for CPython manylinux. ([\#14259](https://github.com/matrix-org/synapse/issues/14259))
+
+
Synapse 1.70.0rc1 (2022-10-19)
==============================
@@ -13,7 +243,7 @@ Features
- The `/relations` endpoint can now be used on workers. ([\#14028](https://github.com/matrix-org/synapse/issues/14028))
- Advertise support for Matrix 1.3 and 1.4 on `/_matrix/client/versions`. ([\#14032](https://github.com/matrix-org/synapse/issues/14032), [\#14184](https://github.com/matrix-org/synapse/issues/14184))
- Improve validation of request bodies for the [Device Management](https://spec.matrix.org/v1.4/client-server-api/#device-management) and [MSC2697 Device Dehyrdation](https://github.com/matrix-org/matrix-spec-proposals/pull/2697) client-server API endpoints. ([\#14054](https://github.com/matrix-org/synapse/issues/14054))
-- Experimental support for [MSC3874](https://github.com/matrix-org/matrix-spec-proposals/pull/3874). ([\#14148](https://github.com/matrix-org/synapse/issues/14148))
+- Experimental support for [MSC3874](https://github.com/matrix-org/matrix-spec-proposals/pull/3874): Filtering threads from the `/messages` endpoint. ([\#14148](https://github.com/matrix-org/synapse/issues/14148))
- Improve the validation of the following PUT endpoints: [`/directory/room/{roomAlias}`](https://spec.matrix.org/v1.4/client-server-api/#put_matrixclientv3directoryroomroomalias), [`/directory/list/room/{roomId}`](https://spec.matrix.org/v1.4/client-server-api/#put_matrixclientv3directorylistroomroomid) and [`/directory/list/appservice/{networkId}/{roomId}`](https://spec.matrix.org/v1.4/application-service-api/#put_matrixclientv3directorylistappservicenetworkidroomid). ([\#14179](https://github.com/matrix-org/synapse/issues/14179))
- Build and publish binary wheels for `aarch64` platforms. ([\#14212](https://github.com/matrix-org/synapse/issues/14212))
@@ -21,7 +251,7 @@ Features
Bugfixes
--------
-- Prevent device names from appearing in device list updates when `allow_device_name_lookup_over_federation` is `false`. ([\#10015](https://github.com/matrix-org/synapse/issues/10015))
+- Prevent device names from appearing in device list updates in some situations when `allow_device_name_lookup_over_federation` is `false`. (This is not comprehensive: see [\#13114](https://github.com/matrix-org/synapse/issues/13114).) ([\#10015](https://github.com/matrix-org/synapse/issues/10015))
- Fix a long-standing bug where redactions were not being sent over federation if we did not have the original event. ([\#13813](https://github.com/matrix-org/synapse/issues/13813))
- Fix a long-standing bug where edits of non-`m.room.message` events would not be correctly bundled or have their new content applied. ([\#14034](https://github.com/matrix-org/synapse/issues/14034))
- Fix a bug introduced in Synapse 1.53.0 when querying `/publicRooms` with both a `room_type` filter and a `third_party_instance_id`. ([\#14053](https://github.com/matrix-org/synapse/issues/14053))
diff --git a/Cargo.lock b/Cargo.lock
index b3090ad726..8a8099bc6d 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -13,9 +13,9 @@ dependencies = [
[[package]]
name = "anyhow"
-version = "1.0.65"
+version = "1.0.66"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "98161a4e3e2184da77bb14f02184cdd111e83bbbcc9979dfee3c44b9a85f5602"
+checksum = "216261ddc8289130e551ddcd5ce8a064710c0d064a4d2895c67151c92b5443f6"
[[package]]
name = "arc-swap"
@@ -37,9 +37,9 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a"
[[package]]
name = "blake2"
-version = "0.10.4"
+version = "0.10.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b9cf849ee05b2ee5fba5e36f97ff8ec2533916700fc0758d40d92136a42f3388"
+checksum = "b12e5fd123190ce1c2e559308a94c9bacad77907d4c6005d9e58fe1a0689e55e"
dependencies = [
"digest",
]
@@ -194,9 +194,9 @@ dependencies = [
[[package]]
name = "pyo3"
-version = "0.17.2"
+version = "0.17.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "201b6887e5576bf2f945fe65172c1fcbf3fcf285b23e4d71eb171d9736e38d32"
+checksum = "268be0c73583c183f2b14052337465768c07726936a260f480f0857cb95ba543"
dependencies = [
"anyhow",
"cfg-if",
@@ -212,9 +212,9 @@ dependencies = [
[[package]]
name = "pyo3-build-config"
-version = "0.17.2"
+version = "0.17.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "bf0708c9ed01692635cbf056e286008e5a2927ab1a5e48cdd3aeb1ba5a6fef47"
+checksum = "28fcd1e73f06ec85bf3280c48c67e731d8290ad3d730f8be9dc07946923005c8"
dependencies = [
"once_cell",
"target-lexicon",
@@ -222,9 +222,9 @@ dependencies = [
[[package]]
name = "pyo3-ffi"
-version = "0.17.2"
+version = "0.17.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "90352dea4f486932b72ddf776264d293f85b79a1d214de1d023927b41461132d"
+checksum = "0f6cb136e222e49115b3c51c32792886defbfb0adead26a688142b346a0b9ffc"
dependencies = [
"libc",
"pyo3-build-config",
@@ -243,9 +243,9 @@ dependencies = [
[[package]]
name = "pyo3-macros"
-version = "0.17.2"
+version = "0.17.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7eb24b804a2d9e88bfcc480a5a6dd76f006c1e3edaf064e8250423336e2cd79d"
+checksum = "94144a1266e236b1c932682136dc35a9dee8d3589728f68130c7c3861ef96b28"
dependencies = [
"proc-macro2",
"pyo3-macros-backend",
@@ -255,9 +255,9 @@ dependencies = [
[[package]]
name = "pyo3-macros-backend"
-version = "0.17.2"
+version = "0.17.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f22bb49f6a7348c253d7ac67a6875f2dc65f36c2ae64a82c381d528972bea6d6"
+checksum = "c8df9be978a2d2f0cdebabb03206ed73b11314701a5bfe71b0d753b81997777f"
dependencies = [
"proc-macro2",
"quote",
@@ -294,9 +294,9 @@ dependencies = [
[[package]]
name = "regex"
-version = "1.6.0"
+version = "1.7.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "4c4eb3267174b8c6c2f654116623910a0fef09c4753f8dd83db29c48a0df988b"
+checksum = "e076559ef8e241f2ae3479e36f97bd5741c0330689e217ad51ce2c76808b868a"
dependencies = [
"aho-corasick",
"memchr",
@@ -323,18 +323,18 @@ checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd"
[[package]]
name = "serde"
-version = "1.0.145"
+version = "1.0.147"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "728eb6351430bccb993660dfffc5a72f91ccc1295abaa8ce19b27ebe4f75568b"
+checksum = "d193d69bae983fc11a79df82342761dfbf28a99fc8d203dca4c3c1b590948965"
dependencies = [
"serde_derive",
]
[[package]]
name = "serde_derive"
-version = "1.0.145"
+version = "1.0.147"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "81fa1584d3d1bcacd84c277a0dfe21f5b0f6accf4a23d04d4c6d61f1af522b4c"
+checksum = "4f1d362ca8fc9c3e3a7484440752472d68a6caa98f1ab81d99b5dfe517cec852"
dependencies = [
"proc-macro2",
"quote",
@@ -343,9 +343,9 @@ dependencies = [
[[package]]
name = "serde_json"
-version = "1.0.86"
+version = "1.0.87"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "41feea4228a6f1cd09ec7a3593a682276702cd67b5273544757dae23c096f074"
+checksum = "6ce777b7b150d76b9cf60d28b55f5847135a003f7d7350c6be7a773508ce7d45"
dependencies = [
"itoa",
"ryu",
diff --git a/changelog.d/13422.bugfix b/changelog.d/13422.bugfix
deleted file mode 100644
index 3a099acbe6..0000000000
--- a/changelog.d/13422.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix a long-standing bug where the `update_synapse_database` script could not be run with multiple databases. Contributed by @thefinn93 @ Beeper.
\ No newline at end of file
diff --git a/changelog.d/14042.misc b/changelog.d/14042.misc
deleted file mode 100644
index 868d55e76a..0000000000
--- a/changelog.d/14042.misc
+++ /dev/null
@@ -1 +0,0 @@
-Bump flake8-bugbear from 21.3.2 to 22.9.23.
diff --git a/changelog.d/14064.bugfix b/changelog.d/14064.bugfix
deleted file mode 100644
index cce6ef3b71..0000000000
--- a/changelog.d/14064.bugfix
+++ /dev/null
@@ -1 +0,0 @@
- Fix a long-standing bug where Synapse would accidentally include extra information in the response to [`PUT /_matrix/federation/v2/invite/{roomId}/{eventId}`](https://spec.matrix.org/v1.4/server-server-api/#put_matrixfederationv2inviteroomideventid).
\ No newline at end of file
diff --git a/changelog.d/14110.doc b/changelog.d/14110.doc
deleted file mode 100644
index 0927e31e4f..0000000000
--- a/changelog.d/14110.doc
+++ /dev/null
@@ -1 +0,0 @@
-Correct the name of the config option [`encryption_enabled_by_default_for_room_type`](https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#encryption_enabled_by_default_for_room_type).
diff --git a/changelog.d/14133.misc b/changelog.d/14133.misc
deleted file mode 100644
index 5180019c68..0000000000
--- a/changelog.d/14133.misc
+++ /dev/null
@@ -1 +0,0 @@
-Bump types-opentracing from 2.4.7 to 2.4.10.
diff --git a/changelog.d/14191.doc b/changelog.d/14191.doc
deleted file mode 100644
index 6b0eeb1ae1..0000000000
--- a/changelog.d/14191.doc
+++ /dev/null
@@ -1 +0,0 @@
-Update docstrings of `SynapseError` and `FederationError` to bettter describe what they are used for and the effects of using them are.
diff --git a/changelog.d/14229.misc b/changelog.d/14229.misc
deleted file mode 100644
index b9cd9a34d5..0000000000
--- a/changelog.d/14229.misc
+++ /dev/null
@@ -1 +0,0 @@
-Refactor `/key/` endpoints to use `RestServlet` classes.
diff --git a/changelog.d/14230.misc b/changelog.d/14230.misc
deleted file mode 100644
index c9b4b112cb..0000000000
--- a/changelog.d/14230.misc
+++ /dev/null
@@ -1 +0,0 @@
-Switch to using the `matrix-org/backend-meta` version of `triage-incoming` for new issues in CI.
diff --git a/changelog.d/14376.misc b/changelog.d/14376.misc
new file mode 100644
index 0000000000..2ca326fea6
--- /dev/null
+++ b/changelog.d/14376.misc
@@ -0,0 +1 @@
+Remove old stream ID tracking code. Contributed by Nick @Beeper (@fizzadar).
diff --git a/changelog.d/14393.bugfix b/changelog.d/14393.bugfix
new file mode 100644
index 0000000000..97177bc62f
--- /dev/null
+++ b/changelog.d/14393.bugfix
@@ -0,0 +1 @@
+Fix a bug introduced in 1.58.0 where a user with presence state 'org.matrix.msc3026.busy' would mistakenly be set to 'online' when calling `/sync` or `/events` on a worker process.
\ No newline at end of file
diff --git a/changelog.d/14400.misc b/changelog.d/14400.misc
new file mode 100644
index 0000000000..6e025329c4
--- /dev/null
+++ b/changelog.d/14400.misc
@@ -0,0 +1 @@
+Remove the `worker_main_http_uri` configuration setting. This is now handled via internal replication.
diff --git a/changelog.d/14403.misc b/changelog.d/14403.misc
new file mode 100644
index 0000000000..ff28a2712a
--- /dev/null
+++ b/changelog.d/14403.misc
@@ -0,0 +1 @@
+Faster joins: do not wait for full state when creating events to send.
diff --git a/changelog.d/14412.misc b/changelog.d/14412.misc
new file mode 100644
index 0000000000..4da061d461
--- /dev/null
+++ b/changelog.d/14412.misc
@@ -0,0 +1 @@
+Remove duplicated type information from type hints.
diff --git a/changelog.d/14449.misc b/changelog.d/14449.misc
new file mode 100644
index 0000000000..320c0b6fae
--- /dev/null
+++ b/changelog.d/14449.misc
@@ -0,0 +1 @@
+Fix type logic in TCP replication code that prevented correctly ignoring blank commands.
\ No newline at end of file
diff --git a/changelog.d/14452.misc b/changelog.d/14452.misc
new file mode 100644
index 0000000000..cb190c0823
--- /dev/null
+++ b/changelog.d/14452.misc
@@ -0,0 +1 @@
+Enable mypy's [`strict_equality` check](https://mypy.readthedocs.io/en/stable/command_line.html#cmdoption-mypy-strict-equality) by default.
\ No newline at end of file
diff --git a/changelog.d/14468.misc b/changelog.d/14468.misc
new file mode 100644
index 0000000000..2ca326fea6
--- /dev/null
+++ b/changelog.d/14468.misc
@@ -0,0 +1 @@
+Remove old stream ID tracking code. Contributed by Nick @Beeper (@fizzadar).
diff --git a/changelog.d/14476.misc b/changelog.d/14476.misc
new file mode 100644
index 0000000000..6e025329c4
--- /dev/null
+++ b/changelog.d/14476.misc
@@ -0,0 +1 @@
+Remove the `worker_main_http_uri` configuration setting. This is now handled via internal replication.
diff --git a/changelog.d/14487.misc b/changelog.d/14487.misc
new file mode 100644
index 0000000000..f6b47a1d8e
--- /dev/null
+++ b/changelog.d/14487.misc
@@ -0,0 +1 @@
+Reduce default third party invite rate limit to 216 invites per day.
diff --git a/changelog.d/14490.misc b/changelog.d/14490.misc
new file mode 100644
index 0000000000..c0a4daa885
--- /dev/null
+++ b/changelog.d/14490.misc
@@ -0,0 +1 @@
+Fix a bug introduced in Synapse 0.9 where it would fail to fetch server keys whose IDs contain a forward slash.
diff --git a/debian/changelog b/debian/changelog
index 60ec92b47d..57d7b18078 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -1,3 +1,45 @@
+matrix-synapse-py3 (1.72.0~rc1) stable; urgency=medium
+
+ * New Synapse release 1.72.0rc1.
+
+ -- Synapse Packaging team Wed, 16 Nov 2022 15:10:59 +0000
+
+matrix-synapse-py3 (1.71.0) stable; urgency=medium
+
+ * New Synapse release 1.71.0.
+
+ -- Synapse Packaging team Tue, 08 Nov 2022 10:38:10 +0000
+
+matrix-synapse-py3 (1.71.0~rc2) stable; urgency=medium
+
+ * New Synapse release 1.71.0rc2.
+
+ -- Synapse Packaging team Fri, 04 Nov 2022 12:00:33 +0000
+
+matrix-synapse-py3 (1.71.0~rc1) stable; urgency=medium
+
+ * New Synapse release 1.71.0rc1.
+
+ -- Synapse Packaging team Tue, 01 Nov 2022 12:10:17 +0000
+
+matrix-synapse-py3 (1.70.1) stable; urgency=medium
+
+ * New Synapse release 1.70.1.
+
+ -- Synapse Packaging team Fri, 28 Oct 2022 12:10:21 +0100
+
+matrix-synapse-py3 (1.70.0) stable; urgency=medium
+
+ * New Synapse release 1.70.0.
+
+ -- Synapse Packaging team Wed, 26 Oct 2022 11:11:50 +0100
+
+matrix-synapse-py3 (1.70.0~rc2) stable; urgency=medium
+
+ * New Synapse release 1.70.0rc2.
+
+ -- Synapse Packaging team Tue, 25 Oct 2022 10:59:47 +0100
+
matrix-synapse-py3 (1.70.0~rc1) stable; urgency=medium
* New Synapse release 1.70.0rc1.
diff --git a/docker/complement/conf/start_for_complement.sh b/docker/complement/conf/start_for_complement.sh
index bb85d9fed7..49d79745b0 100755
--- a/docker/complement/conf/start_for_complement.sh
+++ b/docker/complement/conf/start_for_complement.sh
@@ -45,7 +45,12 @@ esac
if [[ -n "$SYNAPSE_COMPLEMENT_USE_WORKERS" ]]; then
# Specify the workers to test with
- export SYNAPSE_WORKER_TYPES="\
+ # Allow overriding by explicitly setting SYNAPSE_WORKER_TYPES outside, while still
+ # utilizing WORKERS=1 for backwards compatibility.
+ # -n True if the length of string is non-zero.
+ # -z True if the length of string is zero.
+ if [[ -z "$SYNAPSE_WORKER_TYPES" ]]; then
+ export SYNAPSE_WORKER_TYPES="\
event_persister, \
event_persister, \
background_worker, \
@@ -61,6 +66,8 @@ if [[ -n "$SYNAPSE_COMPLEMENT_USE_WORKERS" ]]; then
appservice, \
pusher"
+ fi
+ log "Workers requested: $SYNAPSE_WORKER_TYPES"
# Improve startup times by using a launcher based on fork()
export SYNAPSE_USE_EXPERIMENTAL_FORKING_LAUNCHER=1
else
diff --git a/docker/complement/conf/workers-shared-extra.yaml.j2 b/docker/complement/conf/workers-shared-extra.yaml.j2
index c651645115..883a87159c 100644
--- a/docker/complement/conf/workers-shared-extra.yaml.j2
+++ b/docker/complement/conf/workers-shared-extra.yaml.j2
@@ -92,8 +92,6 @@ allow_device_name_lookup_over_federation: true
## Experimental Features ##
experimental_features:
- # Enable spaces support
- spaces_enabled: true
# Enable history backfilling support
msc2716_enabled: true
# server-side support for partial state in /send_join responses
@@ -104,6 +102,8 @@ experimental_features:
{% endif %}
# Enable jump to date endpoint
msc3030_enabled: true
+ # Filtering /messages by relation type.
+ msc3874_enabled: true
server_notices:
system_mxid_localpart: _server
diff --git a/docker/configure_workers_and_start.py b/docker/configure_workers_and_start.py
index 1ea456b2f8..c1e1544536 100755
--- a/docker/configure_workers_and_start.py
+++ b/docker/configure_workers_and_start.py
@@ -20,7 +20,7 @@
# * SYNAPSE_SERVER_NAME: The desired server_name of the homeserver.
# * SYNAPSE_REPORT_STATS: Whether to report stats.
# * SYNAPSE_WORKER_TYPES: A comma separated list of worker names as specified in WORKER_CONFIG
-# below. Leave empty for no workers, or set to '*' for all possible workers.
+# below. Leave empty for no workers.
# * SYNAPSE_AS_REGISTRATION_DIR: If specified, a directory in which .yaml and .yml files
# will be treated as Application Service registration files.
# * SYNAPSE_TLS_CERT: Path to a TLS certificate in PEM format.
@@ -50,13 +50,18 @@ from jinja2 import Environment, FileSystemLoader
MAIN_PROCESS_HTTP_LISTENER_PORT = 8080
-
+# Workers with exposed endpoints needs either "client", "federation", or "media" listener_resources
+# Watching /_matrix/client needs a "client" listener
+# Watching /_matrix/federation needs a "federation" listener
+# Watching /_matrix/media and related needs a "media" listener
+# Stream Writers require "client" and "replication" listeners because they
+# have to attach by instance_map to the master process and have client endpoints.
WORKERS_CONFIG: Dict[str, Dict[str, Any]] = {
"pusher": {
- "app": "synapse.app.pusher",
+ "app": "synapse.app.generic_worker",
"listener_resources": [],
"endpoint_patterns": [],
- "shared_extra_conf": {"start_pushers": False},
+ "shared_extra_conf": {},
"worker_extra_conf": "",
},
"user_dir": {
@@ -79,7 +84,11 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = {
"^/_synapse/admin/v1/media/.*$",
"^/_synapse/admin/v1/quarantine_media/.*$",
],
- "shared_extra_conf": {"enable_media_repo": False},
+ # The first configured media worker will run the media background jobs
+ "shared_extra_conf": {
+ "enable_media_repo": False,
+ "media_instance_running_background_jobs": "media_repository1",
+ },
"worker_extra_conf": "enable_media_repo: true",
},
"appservice": {
@@ -90,10 +99,10 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = {
"worker_extra_conf": "",
},
"federation_sender": {
- "app": "synapse.app.federation_sender",
+ "app": "synapse.app.generic_worker",
"listener_resources": [],
"endpoint_patterns": [],
- "shared_extra_conf": {"send_federation": False},
+ "shared_extra_conf": {},
"worker_extra_conf": "",
},
"synchrotron": {
@@ -200,14 +209,54 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = {
"worker_extra_conf": "",
},
"frontend_proxy": {
- "app": "synapse.app.frontend_proxy",
+ "app": "synapse.app.generic_worker",
"listener_resources": ["client", "replication"],
"endpoint_patterns": ["^/_matrix/client/(api/v1|r0|v3|unstable)/keys/upload"],
"shared_extra_conf": {},
- "worker_extra_conf": (
- "worker_main_http_uri: http://127.0.0.1:%d"
- % (MAIN_PROCESS_HTTP_LISTENER_PORT,)
- ),
+ "worker_extra_conf": "",
+ },
+ "account_data": {
+ "app": "synapse.app.generic_worker",
+ "listener_resources": ["client", "replication"],
+ "endpoint_patterns": [
+ "^/_matrix/client/(r0|v3|unstable)/.*/tags",
+ "^/_matrix/client/(r0|v3|unstable)/.*/account_data",
+ ],
+ "shared_extra_conf": {},
+ "worker_extra_conf": "",
+ },
+ "presence": {
+ "app": "synapse.app.generic_worker",
+ "listener_resources": ["client", "replication"],
+ "endpoint_patterns": ["^/_matrix/client/(api/v1|r0|v3|unstable)/presence/"],
+ "shared_extra_conf": {},
+ "worker_extra_conf": "",
+ },
+ "receipts": {
+ "app": "synapse.app.generic_worker",
+ "listener_resources": ["client", "replication"],
+ "endpoint_patterns": [
+ "^/_matrix/client/(r0|v3|unstable)/rooms/.*/receipt",
+ "^/_matrix/client/(r0|v3|unstable)/rooms/.*/read_markers",
+ ],
+ "shared_extra_conf": {},
+ "worker_extra_conf": "",
+ },
+ "to_device": {
+ "app": "synapse.app.generic_worker",
+ "listener_resources": ["client", "replication"],
+ "endpoint_patterns": ["^/_matrix/client/(r0|v3|unstable)/sendToDevice/"],
+ "shared_extra_conf": {},
+ "worker_extra_conf": "",
+ },
+ "typing": {
+ "app": "synapse.app.generic_worker",
+ "listener_resources": ["client", "replication"],
+ "endpoint_patterns": [
+ "^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/typing"
+ ],
+ "shared_extra_conf": {},
+ "worker_extra_conf": "",
},
}
@@ -271,14 +320,14 @@ def convert(src: str, dst: str, **template_vars: object) -> None:
outfile.write(rendered)
-def add_sharding_to_shared_config(
+def add_worker_roles_to_shared_config(
shared_config: dict,
worker_type: str,
worker_name: str,
worker_port: int,
) -> None:
"""Given a dictionary representing a config file shared across all workers,
- append sharded worker information to it for the current worker_type instance.
+ append appropriate worker information to it for the current worker_type instance.
Args:
shared_config: The config dict that all worker instances share (after being converted to YAML)
@@ -309,9 +358,19 @@ def add_sharding_to_shared_config(
"port": worker_port,
}
- elif worker_type == "media_repository":
- # The first configured media worker will run the media background jobs
- shared_config.setdefault("media_instance_running_background_jobs", worker_name)
+ elif worker_type in ["account_data", "presence", "receipts", "to_device", "typing"]:
+ # Update the list of stream writers
+ # It's convenient that the name of the worker type is the same as the stream to write
+ shared_config.setdefault("stream_writers", {}).setdefault(
+ worker_type, []
+ ).append(worker_name)
+
+ # Map of stream writer instance names to host/ports combos
+ # For now, all stream writers need http replication ports
+ instance_map[worker_name] = {
+ "host": "localhost",
+ "port": worker_port,
+ }
def generate_base_homeserver_config() -> None:
@@ -421,8 +480,7 @@ def generate_worker_files(
if worker_config:
worker_config = worker_config.copy()
else:
- log(worker_type + " is an unknown worker type! It will be ignored")
- continue
+ error(worker_type + " is an unknown worker type! Please fix!")
new_worker_count = worker_type_counter.setdefault(worker_type, 0) + 1
worker_type_counter[worker_type] = new_worker_count
@@ -441,11 +499,11 @@ def generate_worker_files(
# Check if more than one instance of this worker type has been specified
worker_type_total_count = worker_types.count(worker_type)
- if worker_type_total_count > 1:
- # Update the shared config with sharding-related options if necessary
- add_sharding_to_shared_config(
- shared_config, worker_type, worker_name, worker_port
- )
+
+ # Update the shared config with sharding-related options if necessary
+ add_worker_roles_to_shared_config(
+ shared_config, worker_type, worker_name, worker_port
+ )
# Enable the worker in supervisord
worker_descriptors.append(worker_config)
diff --git a/docs/SUMMARY.md b/docs/SUMMARY.md
index 38849593c6..e398895b36 100644
--- a/docs/SUMMARY.md
+++ b/docs/SUMMARY.md
@@ -9,6 +9,8 @@
- [Configuring a Reverse Proxy](reverse_proxy.md)
- [Configuring a Forward/Outbound Proxy](setup/forward_proxy.md)
- [Configuring a Turn Server](turn-howto.md)
+ - [coturn TURN server](setup/turn/coturn.md)
+ - [eturnal TURN server](setup/turn/eturnal.md)
- [Delegation](delegate.md)
# Upgrading
diff --git a/docs/admin_api/user_admin_api.md b/docs/admin_api/user_admin_api.md
index 3625c7b6c5..880bef4194 100644
--- a/docs/admin_api/user_admin_api.md
+++ b/docs/admin_api/user_admin_api.md
@@ -37,6 +37,7 @@ It returns a JSON body like the following:
"is_guest": 0,
"admin": 0,
"deactivated": 0,
+ "erased": false,
"shadow_banned": 0,
"creation_ts": 1560432506,
"appservice_id": null,
@@ -167,6 +168,7 @@ A response body like the following is returned:
"admin": 0,
"user_type": null,
"deactivated": 0,
+ "erased": false,
"shadow_banned": 0,
"displayname": "",
"avatar_url": null,
@@ -177,6 +179,7 @@ A response body like the following is returned:
"admin": 1,
"user_type": null,
"deactivated": 0,
+ "erased": false,
"shadow_banned": 0,
"displayname": "",
"avatar_url": "",
@@ -247,6 +250,7 @@ The following fields are returned in the JSON response body:
- `user_type` - string - Type of the user. Normal users are type `None`.
This allows user type specific behaviour. There are also types `support` and `bot`.
- `deactivated` - bool - Status if that user has been marked as deactivated.
+ - `erased` - bool - Status if that user has been marked as erased.
- `shadow_banned` - bool - Status if that user has been marked as shadow banned.
- `displayname` - string - The user's display name if they have set one.
- `avatar_url` - string - The user's avatar URL if they have set one.
@@ -1193,3 +1197,42 @@ Returns a `404` HTTP status code if no user was found, with a response body like
```
_Added in Synapse 1.68.0._
+
+
+### Find a user based on their Third Party ID (ThreePID or 3PID)
+
+The API is:
+
+```
+GET /_synapse/admin/v1/threepid/$medium/users/$address
+```
+
+When a user matched the given address for the given medium, an HTTP code `200` with a response body like the following is returned:
+
+```json
+{
+ "user_id": "@hello:example.org"
+}
+```
+
+**Parameters**
+
+The following parameters should be set in the URL:
+
+- `medium` - Kind of third-party ID, either `email` or `msisdn`.
+- `address` - Value of the third-party ID.
+
+The `address` may have characters that are not URL-safe, so it is advised to URL-encode those parameters.
+
+**Errors**
+
+Returns a `404` HTTP status code if no user was found, with a response body like this:
+
+```json
+{
+ "errcode":"M_NOT_FOUND",
+ "error":"User not found"
+}
+```
+
+_Added in Synapse 1.72.0._
diff --git a/docs/development/contributing_guide.md b/docs/development/contributing_guide.md
index 1e52f9808c..342bc1d340 100644
--- a/docs/development/contributing_guide.md
+++ b/docs/development/contributing_guide.md
@@ -324,6 +324,12 @@ The above will run a monolithic (single-process) Synapse with SQLite as the data
- Passing `POSTGRES=1` as an environment variable to use the Postgres database instead.
- Passing `WORKERS=1` as an environment variable to use a workerised setup instead. This option implies the use of Postgres.
+ - If setting `WORKERS=1`, optionally set `WORKER_TYPES=` to declare which worker
+ types you wish to test. A simple comma-delimited string containing the worker types
+ defined from the `WORKERS_CONFIG` template in
+ [here](https://github.com/matrix-org/synapse/blob/develop/docker/configure_workers_and_start.py#L54).
+ A safe example would be `WORKER_TYPES="federation_inbound, federation_sender, synchrotron"`.
+ See the [worker documentation](../workers.md) for additional information on workers.
To increase the log level for the tests, set `SYNAPSE_TEST_LOG_LEVEL`, e.g:
```sh
diff --git a/docs/metrics-howto.md b/docs/metrics-howto.md
index 8f1f11f2b2..16e4368f35 100644
--- a/docs/metrics-howto.md
+++ b/docs/metrics-howto.md
@@ -209,6 +209,9 @@ altogether in Synapse v1.73.0.**
| synapse_http_httppusher_http_pushes_failed_total | synapse_http_httppusher_http_pushes_failed |
| synapse_http_httppusher_badge_updates_processed_total | synapse_http_httppusher_badge_updates_processed |
| synapse_http_httppusher_badge_updates_failed_total | synapse_http_httppusher_badge_updates_failed |
+| synapse_admin_mau_current | synapse_admin_mau:current |
+| synapse_admin_mau_max | synapse_admin_mau:max |
+| synapse_admin_mau_registered_reserved_users | synapse_admin_mau:registered_reserved_users |
Removal of deprecated metrics & time based counters becoming histograms in 0.31.0
---------------------------------------------------------------------------------
diff --git a/docs/openid.md b/docs/openid.md
index 87ebea4c29..37c5eb244d 100644
--- a/docs/openid.md
+++ b/docs/openid.md
@@ -49,6 +49,13 @@ setting in your configuration file.
See the [configuration manual](usage/configuration/config_documentation.md#oidc_providers) for some sample settings, as well as
the text below for example configurations for specific providers.
+## OIDC Back-Channel Logout
+
+Synapse supports receiving [OpenID Connect Back-Channel Logout](https://openid.net/specs/openid-connect-backchannel-1_0.html) notifications.
+
+This lets the OpenID Connect Provider notify Synapse when a user logs out, so that Synapse can end that user session.
+This feature can be enabled by setting the `backchannel_logout_enabled` property to `true` in the provider configuration, and setting the following URL as destination for Back-Channel Logout notifications in your OpenID Connect Provider: `[synapse public baseurl]/_synapse/client/oidc/backchannel_logout`
+
## Sample configs
Here are a few configs for providers that should work with Synapse.
@@ -123,6 +130,9 @@ oidc_providers:
[Keycloak][keycloak-idp] is an opensource IdP maintained by Red Hat.
+Keycloak supports OIDC Back-Channel Logout, which sends logout notification to Synapse, so that Synapse users get logged out when they log out from Keycloak.
+This can be optionally enabled by setting `backchannel_logout_enabled` to `true` in the Synapse configuration, and by setting the "Backchannel Logout URL" in Keycloak.
+
Follow the [Getting Started Guide](https://www.keycloak.org/getting-started) to install Keycloak and set up a realm.
1. Click `Clients` in the sidebar and click `Create`
@@ -144,6 +154,8 @@ Follow the [Getting Started Guide](https://www.keycloak.org/getting-started) to
| Client Protocol | `openid-connect` |
| Access Type | `confidential` |
| Valid Redirect URIs | `[synapse public baseurl]/_synapse/client/oidc/callback` |
+| Backchannel Logout URL (optional) | `[synapse public baseurl]/_synapse/client/oidc/backchannel_logout` |
+| Backchannel Logout Session Required (optional) | `On` |
5. Click `Save`
6. On the Credentials tab, update the fields:
@@ -167,7 +179,9 @@ oidc_providers:
config:
localpart_template: "{{ user.preferred_username }}"
display_name_template: "{{ user.name }}"
+ backchannel_logout_enabled: true # Optional
```
+
### Auth0
[Auth0][auth0] is a hosted SaaS IdP solution.
diff --git a/docs/reverse_proxy.md b/docs/reverse_proxy.md
index 4e7a1d4435..48dbc1c58e 100644
--- a/docs/reverse_proxy.md
+++ b/docs/reverse_proxy.md
@@ -79,6 +79,9 @@ server {
# Nginx by default only allows file uploads up to 1M in size
# Increase client_max_body_size to match max_upload_size defined in homeserver.yaml
client_max_body_size 50M;
+
+ # Synapse responses may be chunked, which is an HTTP/1.1 feature.
+ proxy_http_version 1.1;
}
}
```
diff --git a/docs/sample_log_config.yaml b/docs/sample_log_config.yaml
index 3065a0e2d9..6339160d00 100644
--- a/docs/sample_log_config.yaml
+++ b/docs/sample_log_config.yaml
@@ -6,7 +6,7 @@
# Synapse also supports structured logging for machine readable logs which can
# be ingested by ELK stacks. See [2] for details.
#
-# [1]: https://docs.python.org/3.7/library/logging.config.html#configuration-dictionary-schema
+# [1]: https://docs.python.org/3/library/logging.config.html#configuration-dictionary-schema
# [2]: https://matrix-org.github.io/synapse/latest/structured_logging.html
version: 1
diff --git a/docs/setup/turn/coturn.md b/docs/setup/turn/coturn.md
new file mode 100644
index 0000000000..a1bb1e934c
--- /dev/null
+++ b/docs/setup/turn/coturn.md
@@ -0,0 +1,188 @@
+# coturn TURN server
+
+The following sections describe how to install [coturn]() (which implements the TURN REST API).
+
+## `coturn` setup
+
+### Initial installation
+
+The TURN daemon `coturn` is available from a variety of sources such as native package managers, or installation from source.
+
+#### Debian and Ubuntu based distributions
+
+Just install the debian package:
+
+```sh
+sudo apt install coturn
+```
+
+This will install and start a systemd service called `coturn`.
+
+#### Source installation
+
+1. Download the [latest release](https://github.com/coturn/coturn/releases/latest) from github. Unpack it and `cd` into the directory.
+
+1. Configure it:
+
+ ```sh
+ ./configure
+ ```
+
+ You may need to install `libevent2`: if so, you should do so in
+ the way recommended by your operating system. You can ignore
+ warnings about lack of database support: a database is unnecessary
+ for this purpose.
+
+1. Build and install it:
+
+ ```sh
+ make
+ sudo make install
+ ```
+
+### Configuration
+
+1. Create or edit the config file in `/etc/turnserver.conf`. The relevant
+ lines, with example values, are:
+
+ ```
+ use-auth-secret
+ static-auth-secret=[your secret key here]
+ realm=turn.myserver.org
+ ```
+
+ See `turnserver.conf` for explanations of the options. One way to generate
+ the `static-auth-secret` is with `pwgen`:
+
+ ```sh
+ pwgen -s 64 1
+ ```
+
+ A `realm` must be specified, but its value is somewhat arbitrary. (It is
+ sent to clients as part of the authentication flow.) It is conventional to
+ set it to be your server name.
+
+1. You will most likely want to configure `coturn` to write logs somewhere. The
+ easiest way is normally to send them to the syslog:
+
+ ```sh
+ syslog
+ ```
+
+ (in which case, the logs will be available via `journalctl -u coturn` on a
+ systemd system). Alternatively, `coturn` can be configured to write to a
+ logfile - check the example config file supplied with `coturn`.
+
+1. Consider your security settings. TURN lets users request a relay which will
+ connect to arbitrary IP addresses and ports. The following configuration is
+ suggested as a minimum starting point:
+
+ ```
+ # VoIP traffic is all UDP. There is no reason to let users connect to arbitrary TCP endpoints via the relay.
+ no-tcp-relay
+
+ # don't let the relay ever try to connect to private IP address ranges within your network (if any)
+ # given the turn server is likely behind your firewall, remember to include any privileged public IPs too.
+ denied-peer-ip=10.0.0.0-10.255.255.255
+ denied-peer-ip=192.168.0.0-192.168.255.255
+ denied-peer-ip=172.16.0.0-172.31.255.255
+
+ # recommended additional local peers to block, to mitigate external access to internal services.
+ # https://www.rtcsec.com/article/slack-webrtc-turn-compromise-and-bug-bounty/#how-to-fix-an-open-turn-relay-to-address-this-vulnerability
+ no-multicast-peers
+ denied-peer-ip=0.0.0.0-0.255.255.255
+ denied-peer-ip=100.64.0.0-100.127.255.255
+ denied-peer-ip=127.0.0.0-127.255.255.255
+ denied-peer-ip=169.254.0.0-169.254.255.255
+ denied-peer-ip=192.0.0.0-192.0.0.255
+ denied-peer-ip=192.0.2.0-192.0.2.255
+ denied-peer-ip=192.88.99.0-192.88.99.255
+ denied-peer-ip=198.18.0.0-198.19.255.255
+ denied-peer-ip=198.51.100.0-198.51.100.255
+ denied-peer-ip=203.0.113.0-203.0.113.255
+ denied-peer-ip=240.0.0.0-255.255.255.255
+
+ # special case the turn server itself so that client->TURN->TURN->client flows work
+ # this should be one of the turn server's listening IPs
+ allowed-peer-ip=10.0.0.1
+
+ # consider whether you want to limit the quota of relayed streams per user (or total) to avoid risk of DoS.
+ user-quota=12 # 4 streams per video call, so 12 streams = 3 simultaneous relayed calls per user.
+ total-quota=1200
+ ```
+
+1. Also consider supporting TLS/DTLS. To do this, add the following settings
+ to `turnserver.conf`:
+
+ ```
+ # TLS certificates, including intermediate certs.
+ # For Let's Encrypt certificates, use `fullchain.pem` here.
+ cert=/path/to/fullchain.pem
+
+ # TLS private key file
+ pkey=/path/to/privkey.pem
+
+ # Ensure the configuration lines that disable TLS/DTLS are commented-out or removed
+ #no-tls
+ #no-dtls
+ ```
+
+ In this case, replace the `turn:` schemes in the `turn_uris` settings below
+ with `turns:`.
+
+ We recommend that you only try to set up TLS/DTLS once you have set up a
+ basic installation and got it working.
+
+ NB: If your TLS certificate was provided by Let's Encrypt, TLS/DTLS will
+ not work with any Matrix client that uses Chromium's WebRTC library. This
+ currently includes Element Android & iOS; for more details, see their
+ [respective](https://github.com/vector-im/element-android/issues/1533)
+ [issues](https://github.com/vector-im/element-ios/issues/2712) as well as the underlying
+ [WebRTC issue](https://bugs.chromium.org/p/webrtc/issues/detail?id=11710).
+ Consider using a ZeroSSL certificate for your TURN server as a working alternative.
+
+1. Ensure your firewall allows traffic into the TURN server on the ports
+ you've configured it to listen on (By default: 3478 and 5349 for TURN
+ traffic (remember to allow both TCP and UDP traffic), and ports 49152-65535
+ for the UDP relay.)
+
+1. If your TURN server is behind NAT, the NAT gateway must have an external,
+ publicly-reachable IP address. You must configure `coturn` to advertise that
+ address to connecting clients:
+
+ ```
+ external-ip=EXTERNAL_NAT_IPv4_ADDRESS
+ ```
+
+ You may optionally limit the TURN server to listen only on the local
+ address that is mapped by NAT to the external address:
+
+ ```
+ listening-ip=INTERNAL_TURNSERVER_IPv4_ADDRESS
+ ```
+
+ If your NAT gateway is reachable over both IPv4 and IPv6, you may
+ configure `coturn` to advertise each available address:
+
+ ```
+ external-ip=EXTERNAL_NAT_IPv4_ADDRESS
+ external-ip=EXTERNAL_NAT_IPv6_ADDRESS
+ ```
+
+ When advertising an external IPv6 address, ensure that the firewall and
+ network settings of the system running your TURN server are configured to
+ accept IPv6 traffic, and that the TURN server is listening on the local
+ IPv6 address that is mapped by NAT to the external IPv6 address.
+
+1. (Re)start the turn server:
+
+ * If you used the Debian package (or have set up a systemd unit yourself):
+ ```sh
+ sudo systemctl restart coturn
+ ```
+
+ * If you built from source:
+
+ ```sh
+ /usr/local/bin/turnserver -o
+ ```
diff --git a/docs/setup/turn/eturnal.md b/docs/setup/turn/eturnal.md
new file mode 100644
index 0000000000..2e5a45673e
--- /dev/null
+++ b/docs/setup/turn/eturnal.md
@@ -0,0 +1,170 @@
+# eturnal TURN server
+
+The following sections describe how to install [eturnal]()
+(which implements the TURN REST API).
+
+## `eturnal` setup
+
+### Initial installation
+
+The `eturnal` TURN server implementation is available from a variety of sources
+such as native package managers, binary packages, installation from source or
+[container image](https://eturnal.net/documentation/code/docker.html). They are
+all described [here](https://github.com/processone/eturnal#installation).
+
+Quick-Test instructions in a [Linux Shell](https://github.com/processone/eturnal/blob/master/QUICK-TEST.md)
+or with [Docker](https://github.com/processone/eturnal/blob/master/docker-k8s/QUICK-TEST.md)
+are available as well.
+
+### Configuration
+
+After installation, `eturnal` usually ships a [default configuration file](https://github.com/processone/eturnal/blob/master/config/eturnal.yml)
+here: `/etc/eturnal.yml` (and, if not found there, there is a backup file here:
+`/opt/eturnal/etc/eturnal.yml`). It uses the (indentation-sensitive!) [YAML](https://en.wikipedia.org/wiki/YAML)
+format. The file contains further explanations.
+
+Here are some hints how to configure eturnal on your [host machine](https://github.com/processone/eturnal#configuration)
+or when using e.g. [Docker](https://eturnal.net/documentation/code/docker.html).
+You may also further deep dive into the [reference documentation](https://eturnal.net/documentation/).
+
+`eturnal` runs out of the box with the default configuration. To enable TURN and
+to integrate it with your homeserver, some aspects in `eturnal`'s default configuration file
+must be edited:
+
+1. Homeserver's [`turn_shared_secret`](../../usage/configuration/config_documentation.md#turn_shared_secret)
+ and eturnal's shared `secret` for authentication
+
+ Both need to have the same value. Uncomment and adjust this line in `eturnal`'s
+ configuration file:
+
+ ```yaml
+ secret: "long-and-cryptic" # Shared secret, CHANGE THIS.
+ ```
+
+ One way to generate a `secret` is with `pwgen`:
+
+ ```sh
+ pwgen -s 64 1
+ ```
+
+1. Public IP address
+
+ If your TURN server is behind NAT, the NAT gateway must have an external,
+ publicly-reachable IP address. `eturnal` tries to autodetect the public IP address,
+ however, it may also be configured by uncommenting and adjusting this line, so
+ `eturnal` advertises that address to connecting clients:
+
+ ```yaml
+ relay_ipv4_addr: "203.0.113.4" # The server's public IPv4 address.
+ ```
+
+ If your NAT gateway is reachable over both IPv4 and IPv6, you may
+ configure `eturnal` to advertise each available address:
+
+ ```yaml
+ relay_ipv4_addr: "203.0.113.4" # The server's public IPv4 address.
+ relay_ipv6_addr: "2001:db8::4" # The server's public IPv6 address (optional).
+ ```
+
+ When advertising an external IPv6 address, ensure that the firewall and
+ network settings of the system running your TURN server are configured to
+ accept IPv6 traffic, and that the TURN server is listening on the local
+ IPv6 address that is mapped by NAT to the external IPv6 address.
+
+1. Logging
+
+ If `eturnal` was started by systemd, log files are written into the
+ `/var/log/eturnal` directory by default. In order to log to the [journal](https://www.freedesktop.org/software/systemd/man/systemd-journald.service.html)
+ instead, the `log_dir` option can be set to `stdout` in the configuration file.
+
+1. Security considerations
+
+ Consider your security settings. TURN lets users request a relay which will
+ connect to arbitrary IP addresses and ports. The following configuration is
+ suggested as a minimum starting point, [see also the official documentation](https://eturnal.net/documentation/#blacklist):
+
+ ```yaml
+ ## Reject TURN relaying from/to the following addresses/networks:
+ blacklist: # This is the default blacklist.
+ - "127.0.0.0/8" # IPv4 loopback.
+ - "::1" # IPv6 loopback.
+ - recommended # Expands to a number of networks recommended to be
+ # blocked, but includes private networks. Those
+ # would have to be 'whitelist'ed if eturnal serves
+ # local clients/peers within such networks.
+ ```
+
+ To whitelist IP addresses or specific (private) networks, you need to **add** a
+ whitelist part into the configuration file, e.g.:
+
+ ```yaml
+ whitelist:
+ - "192.168.0.0/16"
+ - "203.0.113.113"
+ - "2001:db8::/64"
+ ```
+
+ The more specific, the better.
+
+1. TURNS (TURN via TLS/DTLS)
+
+ Also consider supporting TLS/DTLS. To do this, adjust the following settings
+ in the `eturnal.yml` configuration file (TLS parts should not be commented anymore):
+
+ ```yaml
+ listen:
+ - ip: "::"
+ port: 3478
+ transport: udp
+ - ip: "::"
+ port: 3478
+ transport: tcp
+ - ip: "::"
+ port: 5349
+ transport: tls
+
+ ## TLS certificate/key files (must be readable by 'eturnal' user!):
+ tls_crt_file: /etc/eturnal/tls/crt.pem
+ tls_key_file: /etc/eturnal/tls/key.pem
+ ```
+
+ In this case, replace the `turn:` schemes in homeserver's `turn_uris` settings
+ with `turns:`. More is described [here](../../usage/configuration/config_documentation.md#turn_uris).
+
+ We recommend that you only try to set up TLS/DTLS once you have set up a
+ basic installation and got it working.
+
+ NB: If your TLS certificate was provided by Let's Encrypt, TLS/DTLS will
+ not work with any Matrix client that uses Chromium's WebRTC library. This
+ currently includes Element Android & iOS; for more details, see their
+ [respective](https://github.com/vector-im/element-android/issues/1533)
+ [issues](https://github.com/vector-im/element-ios/issues/2712) as well as the underlying
+ [WebRTC issue](https://bugs.chromium.org/p/webrtc/issues/detail?id=11710).
+ Consider using a ZeroSSL certificate for your TURN server as a working alternative.
+
+1. Firewall
+
+ Ensure your firewall allows traffic into the TURN server on the ports
+ you've configured it to listen on (By default: 3478 and 5349 for TURN
+ traffic (remember to allow both TCP and UDP traffic), and ports 49152-65535
+ for the UDP relay.)
+
+1. Reload/ restarting `eturnal`
+
+ Changes in the configuration file require `eturnal` to reload/ restart, this
+ can be achieved by:
+
+ ```sh
+ eturnalctl reload
+ ```
+
+ `eturnal` performs a configuration check before actually reloading/ restarting
+ and provides hints, if something is not correctly configured.
+
+### eturnalctl opterations script
+
+`eturnal` offers a handy [operations script](https://eturnal.net/documentation/#Operation)
+which can be called e.g. to check, whether the service is up, to restart the service,
+to query how many active sessions exist, to change logging behaviour and so on.
+
+Hint: If `eturnalctl` is not part of your `$PATH`, consider either sym-linking it (e.g. ´ln -s /opt/eturnal/bin/eturnalctl /usr/local/bin/eturnalctl´) or call it from the default `eturnal` directory directly: e.g. `/opt/eturnal/bin/eturnalctl info`
diff --git a/docs/turn-howto.md b/docs/turn-howto.md
index 37a311ad9c..b466cab40c 100644
--- a/docs/turn-howto.md
+++ b/docs/turn-howto.md
@@ -9,222 +9,28 @@ allows the homeserver to generate credentials that are valid for use on the
TURN server through the use of a secret shared between the homeserver and the
TURN server.
-The following sections describe how to install [coturn]() (which implements the TURN REST API) and integrate it with synapse.
+This documentation provides two TURN server configuration examples:
+
+* [coturn](setup/turn/coturn.md)
+* [eturnal](setup/turn/eturnal.md)
## Requirements
-For TURN relaying with `coturn` to work, it must be hosted on a server/endpoint with a public IP.
+For TURN relaying to work, the TURN service must be hosted on a server/endpoint with a public IP.
Hosting TURN behind NAT requires port forwaring and for the NAT gateway to have a public IP.
However, even with appropriate configuration, NAT is known to cause issues and to often not work.
-## `coturn` setup
-
-### Initial installation
-
-The TURN daemon `coturn` is available from a variety of sources such as native package managers, or installation from source.
-
-#### Debian installation
-
-Just install the debian package:
-
-```sh
-apt install coturn
-```
-
-This will install and start a systemd service called `coturn`.
-
-#### Source installation
-
-1. Download the [latest release](https://github.com/coturn/coturn/releases/latest) from github. Unpack it and `cd` into the directory.
-
-1. Configure it:
-
- ```sh
- ./configure
- ```
-
- You may need to install `libevent2`: if so, you should do so in
- the way recommended by your operating system. You can ignore
- warnings about lack of database support: a database is unnecessary
- for this purpose.
-
-1. Build and install it:
-
- ```sh
- make
- make install
- ```
-
-### Configuration
-
-1. Create or edit the config file in `/etc/turnserver.conf`. The relevant
- lines, with example values, are:
-
- ```
- use-auth-secret
- static-auth-secret=[your secret key here]
- realm=turn.myserver.org
- ```
-
- See `turnserver.conf` for explanations of the options. One way to generate
- the `static-auth-secret` is with `pwgen`:
-
- ```sh
- pwgen -s 64 1
- ```
-
- A `realm` must be specified, but its value is somewhat arbitrary. (It is
- sent to clients as part of the authentication flow.) It is conventional to
- set it to be your server name.
-
-1. You will most likely want to configure coturn to write logs somewhere. The
- easiest way is normally to send them to the syslog:
-
- ```sh
- syslog
- ```
-
- (in which case, the logs will be available via `journalctl -u coturn` on a
- systemd system). Alternatively, coturn can be configured to write to a
- logfile - check the example config file supplied with coturn.
-
-1. Consider your security settings. TURN lets users request a relay which will
- connect to arbitrary IP addresses and ports. The following configuration is
- suggested as a minimum starting point:
-
- ```
- # VoIP traffic is all UDP. There is no reason to let users connect to arbitrary TCP endpoints via the relay.
- no-tcp-relay
-
- # don't let the relay ever try to connect to private IP address ranges within your network (if any)
- # given the turn server is likely behind your firewall, remember to include any privileged public IPs too.
- denied-peer-ip=10.0.0.0-10.255.255.255
- denied-peer-ip=192.168.0.0-192.168.255.255
- denied-peer-ip=172.16.0.0-172.31.255.255
-
- # recommended additional local peers to block, to mitigate external access to internal services.
- # https://www.rtcsec.com/article/slack-webrtc-turn-compromise-and-bug-bounty/#how-to-fix-an-open-turn-relay-to-address-this-vulnerability
- no-multicast-peers
- denied-peer-ip=0.0.0.0-0.255.255.255
- denied-peer-ip=100.64.0.0-100.127.255.255
- denied-peer-ip=127.0.0.0-127.255.255.255
- denied-peer-ip=169.254.0.0-169.254.255.255
- denied-peer-ip=192.0.0.0-192.0.0.255
- denied-peer-ip=192.0.2.0-192.0.2.255
- denied-peer-ip=192.88.99.0-192.88.99.255
- denied-peer-ip=198.18.0.0-198.19.255.255
- denied-peer-ip=198.51.100.0-198.51.100.255
- denied-peer-ip=203.0.113.0-203.0.113.255
- denied-peer-ip=240.0.0.0-255.255.255.255
-
- # special case the turn server itself so that client->TURN->TURN->client flows work
- # this should be one of the turn server's listening IPs
- allowed-peer-ip=10.0.0.1
-
- # consider whether you want to limit the quota of relayed streams per user (or total) to avoid risk of DoS.
- user-quota=12 # 4 streams per video call, so 12 streams = 3 simultaneous relayed calls per user.
- total-quota=1200
- ```
-
-1. Also consider supporting TLS/DTLS. To do this, add the following settings
- to `turnserver.conf`:
-
- ```
- # TLS certificates, including intermediate certs.
- # For Let's Encrypt certificates, use `fullchain.pem` here.
- cert=/path/to/fullchain.pem
-
- # TLS private key file
- pkey=/path/to/privkey.pem
-
- # Ensure the configuration lines that disable TLS/DTLS are commented-out or removed
- #no-tls
- #no-dtls
- ```
-
- In this case, replace the `turn:` schemes in the `turn_uris` settings below
- with `turns:`.
-
- We recommend that you only try to set up TLS/DTLS once you have set up a
- basic installation and got it working.
-
- NB: If your TLS certificate was provided by Let's Encrypt, TLS/DTLS will
- not work with any Matrix client that uses Chromium's WebRTC library. This
- currently includes Element Android & iOS; for more details, see their
- [respective](https://github.com/vector-im/element-android/issues/1533)
- [issues](https://github.com/vector-im/element-ios/issues/2712) as well as the underlying
- [WebRTC issue](https://bugs.chromium.org/p/webrtc/issues/detail?id=11710).
- Consider using a ZeroSSL certificate for your TURN server as a working alternative.
-
-1. Ensure your firewall allows traffic into the TURN server on the ports
- you've configured it to listen on (By default: 3478 and 5349 for TURN
- traffic (remember to allow both TCP and UDP traffic), and ports 49152-65535
- for the UDP relay.)
-
-1. If your TURN server is behind NAT, the NAT gateway must have an external,
- publicly-reachable IP address. You must configure coturn to advertise that
- address to connecting clients:
-
- ```
- external-ip=EXTERNAL_NAT_IPv4_ADDRESS
- ```
-
- You may optionally limit the TURN server to listen only on the local
- address that is mapped by NAT to the external address:
-
- ```
- listening-ip=INTERNAL_TURNSERVER_IPv4_ADDRESS
- ```
-
- If your NAT gateway is reachable over both IPv4 and IPv6, you may
- configure coturn to advertise each available address:
-
- ```
- external-ip=EXTERNAL_NAT_IPv4_ADDRESS
- external-ip=EXTERNAL_NAT_IPv6_ADDRESS
- ```
-
- When advertising an external IPv6 address, ensure that the firewall and
- network settings of the system running your TURN server are configured to
- accept IPv6 traffic, and that the TURN server is listening on the local
- IPv6 address that is mapped by NAT to the external IPv6 address.
-
-1. (Re)start the turn server:
-
- * If you used the Debian package (or have set up a systemd unit yourself):
- ```sh
- systemctl restart coturn
- ```
-
- * If you installed from source:
-
- ```sh
- bin/turnserver -o
- ```
+Afterwards, the homeserver needs some further configuration.
## Synapse setup
Your homeserver configuration file needs the following extra keys:
-1. "`turn_uris`": This needs to be a yaml list of public-facing URIs
- for your TURN server to be given out to your clients. Add separate
- entries for each transport your TURN server supports.
-2. "`turn_shared_secret`": This is the secret shared between your
- homeserver and your TURN server, so you should set it to the same
- string you used in turnserver.conf.
-3. "`turn_user_lifetime`": This is the amount of time credentials
- generated by your homeserver are valid for (in milliseconds).
- Shorter times offer less potential for abuse at the expense of
- increased traffic between web clients and your homeserver to
- refresh credentials. The TURN REST API specification recommends
- one day (86400000).
-4. "`turn_allow_guests`": Whether to allow guest users to use the
- TURN server. This is enabled by default, as otherwise VoIP will
- not work reliably for guests. However, it does introduce a
- security risk as it lets guests connect to arbitrary endpoints
- without having gone through a CAPTCHA or similar to register a
- real account.
+1. [`turn_uris`](usage/configuration/config_documentation.md#turn_uris)
+2. [`turn_shared_secret`](usage/configuration/config_documentation.md#turn_shared_secret)
+3. [`turn_user_lifetime`](usage/configuration/config_documentation.md#turn_user_lifetime)
+4. [`turn_allow_guests`](usage/configuration/config_documentation.md#turn_allow_guests)
As an example, here is the relevant section of the config file for `matrix.org`. The
`turn_uris` are appropriate for TURN servers listening on the default ports, with no TLS.
@@ -263,7 +69,7 @@ Here are a few things to try:
* Check that you have opened your firewall to allow UDP traffic to the UDP
relay ports (49152-65535 by default).
- * Try disabling `coturn`'s TLS/DTLS listeners and enable only its (unencrypted)
+ * Try disabling TLS/DTLS listeners and enable only its (unencrypted)
TCP/UDP listeners. (This will only leave signaling traffic unencrypted;
voice & video WebRTC traffic is always encrypted.)
@@ -288,12 +94,19 @@ Here are a few things to try:
* ensure that your TURN server uses the NAT gateway as its default route.
- * Enable more verbose logging in coturn via the `verbose` setting:
+ * Enable more verbose logging, in `coturn` via the `verbose` setting:
```
verbose
```
+ or with `eturnal` with the shell command `eturnalctl loglevel debug` or in the configuration file (the service needs to [reload](https://eturnal.net/documentation/#Operation) for it to become effective):
+
+ ```yaml
+ ## Logging configuration:
+ log_level: debug
+ ```
+
... and then see if there are any clues in its logs.
* If you are using a browser-based client under Chrome, check
@@ -317,7 +130,7 @@ Here are a few things to try:
matrix client to your homeserver in your browser's network inspector. In
the response you should see `username` and `password`. Or:
- * Use the following shell commands:
+ * Use the following shell commands for `coturn`:
```sh
secret=staticAuthSecretHere
@@ -327,11 +140,16 @@ Here are a few things to try:
echo -e "username: $u\npassword: $p"
```
- Or:
+ or for `eturnal`
- * Temporarily configure coturn to accept a static username/password. To do
- this, comment out `use-auth-secret` and `static-auth-secret` and add the
- following:
+ ```sh
+ eturnalctl credentials
+ ```
+
+
+ * Or (**coturn only**): Temporarily configure `coturn` to accept a static
+ username/password. To do this, comment out `use-auth-secret` and
+ `static-auth-secret` and add the following:
```
lt-cred-mech
diff --git a/docs/upgrade.md b/docs/upgrade.md
index b81385b191..2aa353e496 100644
--- a/docs/upgrade.md
+++ b/docs/upgrade.md
@@ -88,6 +88,60 @@ process, for example:
dpkg -i matrix-synapse-py3_1.3.0+stretch1_amd64.deb
```
+# Upgrading to v1.72.0
+
+## Dropping support for PostgreSQL 10
+
+In line with our [deprecation policy](deprecation_policy.md), we've dropped
+support for PostgreSQL 10, as it is no longer supported upstream.
+
+This release of Synapse requires PostgreSQL 11+.
+
+
+# Upgrading to v1.71.0
+
+## Removal of the `generate_short_term_login_token` module API method
+
+As announced with the release of [Synapse 1.69.0](#deprecation-of-the-generate_short_term_login_token-module-api-method), the deprecated `generate_short_term_login_token` module method has been removed.
+
+Modules relying on it can instead use the `create_login_token` method.
+
+
+## Changes to the events received by application services (interest)
+
+To align with spec (changed in
+[MSC3905](https://github.com/matrix-org/matrix-spec-proposals/pull/3905)), Synapse now
+only considers local users to be interesting. In other words, the `users` namespace
+regex is only be applied against local users of the homeserver.
+
+Please note, this probably doesn't affect the expected behavior of your application
+service, since an interesting local user in a room still means all messages in the room
+(from local or remote users) will still be considered interesting. And matching a room
+with the `rooms` or `aliases` namespace regex will still consider all events sent in the
+room to be interesting to the application service.
+
+If one of your application service's `users` regex was intending to match a remote user,
+this will no longer match as you expect. The behavioral mismatch between matching all
+local users and some remote users is why the spec was changed/clarified and this
+caveat is no longer supported.
+
+
+## Legacy Prometheus metric names are now disabled by default
+
+Synapse v1.71.0 disables legacy Prometheus metric names by default.
+For administrators that still rely on them and have not yet had chance to update their
+uses of the metrics, it's still possible to specify `enable_legacy_metrics: true` in
+the configuration to re-enable them temporarily.
+
+Synapse v1.73.0 will **remove legacy metric names altogether** and at that point,
+it will no longer be possible to re-enable them.
+
+If you do not use metrics or you have already updated your Grafana dashboard(s),
+Prometheus console(s) and alerting rule(s), there is no action needed.
+
+See [v1.69.0: Deprecation of legacy Prometheus metric names](#deprecation-of-legacy-prometheus-metric-names).
+
+
# Upgrading to v1.69.0
## Changes to the receipts replication streams
diff --git a/docs/usage/administration/monthly_active_users.md b/docs/usage/administration/monthly_active_users.md
index d4e9037284..b1da6f17c2 100644
--- a/docs/usage/administration/monthly_active_users.md
+++ b/docs/usage/administration/monthly_active_users.md
@@ -73,12 +73,12 @@ When a request is blocked, the response will have the `errcode` `M_RESOURCE_LIMI
Synapse records several different prometheus metrics for MAU.
-`synapse_admin_mau:current` records the current MAU figure for native (non-application-service) users.
+`synapse_admin_mau_current` records the current MAU figure for native (non-application-service) users.
-`synapse_admin_mau:max` records the maximum MAU as dictated by the `max_mau_value` config value.
+`synapse_admin_mau_max` records the maximum MAU as dictated by the `max_mau_value` config value.
`synapse_admin_mau_current_mau_by_service` records the current MAU including application service users. The label `app_service` can be used
to filter by a specific service ID. This *also* includes non-application-service users under `app_service=native` .
-`synapse_admin_mau:registered_reserved_users` records the number of users specified in `mau_limits_reserved_threepids` which have
+`synapse_admin_mau_registered_reserved_users` records the number of users specified in `mau_limits_reserved_threepids` which have
registered accounts on the homeserver.
diff --git a/docs/usage/configuration/config_documentation.md b/docs/usage/configuration/config_documentation.md
index 82ffd68c50..ed07e2180a 100644
--- a/docs/usage/configuration/config_documentation.md
+++ b/docs/usage/configuration/config_documentation.md
@@ -100,7 +100,7 @@ modules:
config: {}
```
---
-## Server ##
+## Server
Define your homeserver name and other base options.
@@ -160,7 +160,7 @@ including _matrix/...). This is the same URL a user might enter into the
'Custom Homeserver URL' field on their client. If you use Synapse with a
reverse proxy, this should be the URL to reach Synapse via the proxy.
Otherwise, it should be the URL to reach Synapse's client HTTP listener (see
-'listeners' below).
+['listeners'](#listeners) below).
Defaults to `https:///`.
@@ -571,7 +571,7 @@ Example configuration:
delete_stale_devices_after: 1y
```
-## Homeserver blocking ##
+## Homeserver blocking
Useful options for Synapse admins.
---
@@ -923,7 +923,7 @@ retention:
interval: 1d
```
---
-## TLS ##
+## TLS
Options related to TLS.
@@ -1013,7 +1013,7 @@ federation_custom_ca_list:
- myCA3.pem
```
---
-## Federation ##
+## Federation
Options related to federation.
@@ -1072,7 +1072,7 @@ Example configuration:
allow_device_name_lookup_over_federation: true
```
---
-## Caching ##
+## Caching
Options related to caching.
@@ -1186,7 +1186,7 @@ file in Synapse's `contrib` directory, you can send a `SIGHUP` signal by using
`systemctl reload matrix-synapse`.
---
-## Database ##
+## Database
Config options related to database settings.
---
@@ -1333,20 +1333,21 @@ databases:
cp_max: 10
```
---
-## Logging ##
+## Logging
Config options related to logging.
---
### `log_config`
-This option specifies a yaml python logging config file as described [here](https://docs.python.org/3.7/library/logging.config.html#configuration-dictionary-schema).
+This option specifies a yaml python logging config file as described
+[here](https://docs.python.org/3/library/logging.config.html#configuration-dictionary-schema).
Example configuration:
```yaml
log_config: "CONFDIR/SERVERNAME.log.config"
```
---
-## Ratelimiting ##
+## Ratelimiting
Options related to ratelimiting in Synapse.
Each ratelimiting configuration is made of two parameters:
@@ -1577,7 +1578,7 @@ Example configuration:
federation_rr_transactions_per_room_per_second: 40
```
---
-## Media Store ##
+## Media Store
Config options related to Synapse's media store.
---
@@ -1767,7 +1768,7 @@ url_preview_ip_range_blacklist:
- 'ff00::/8'
- 'fec0::/10'
```
-----
+---
### `url_preview_ip_range_whitelist`
This option sets a list of IP address CIDR ranges that the URL preview spider is allowed
@@ -1861,7 +1862,7 @@ Example configuration:
- 'fr;q=0.8'
- '*;q=0.7'
```
-----
+---
### `oembed`
oEmbed allows for easier embedding content from a website. It can be
@@ -1878,7 +1879,7 @@ oembed:
- oembed/my_providers.json
```
---
-## Captcha ##
+## Captcha
See [here](../../CAPTCHA_SETUP.md) for full details on setting up captcha.
@@ -1927,7 +1928,7 @@ Example configuration:
recaptcha_siteverify_api: "https://my.recaptcha.site"
```
---
-## TURN ##
+## TURN
Options related to adding a TURN server to Synapse.
---
@@ -1948,7 +1949,7 @@ Example configuration:
```yaml
turn_shared_secret: "YOUR_SHARED_SECRET"
```
-----
+---
### `turn_username` and `turn_password`
The Username and password if the TURN server needs them and does not use a token.
@@ -2367,7 +2368,7 @@ Example configuration:
```yaml
session_lifetime: 24h
```
-----
+---
### `refresh_access_token_lifetime`
Time that an access token remains valid for, if the session is using refresh tokens.
@@ -2423,7 +2424,7 @@ nonrefreshable_access_token_lifetime: 24h
```
---
-## Metrics ###
+## Metrics
Config options related to metrics.
---
@@ -2441,8 +2442,8 @@ enable_metrics: true
Set to `true` to publish both legacy and non-legacy Prometheus metric names,
or to `false` to only publish non-legacy Prometheus metric names.
-Defaults to `true`. Has no effect if `enable_metrics` is `false`.
-**In Synapse v1.71.0, this will default to `false` before being removed in Synapse v1.73.0.**
+Defaults to `false`. Has no effect if `enable_metrics` is `false`.
+**In Synapse v1.67.0 up to and including Synapse v1.70.1, this defaulted to `true`.**
Legacy metric names include:
- metrics containing colons in the name, such as `synapse_util_caches_response_cache:hits`, because colons are supposed to be reserved for user-defined recording rules;
@@ -2520,7 +2521,7 @@ Example configuration:
report_stats_endpoint: https://example.com/report-usage-stats/push
```
---
-## API Configuration ##
+## API Configuration
Config settings related to the client/server API
---
@@ -2620,7 +2621,7 @@ Example configuration:
form_secret:
```
---
-## Signing Keys ##
+## Signing Keys
Config options relating to signing keys
---
@@ -2681,6 +2682,12 @@ is still supported for backwards-compatibility, but it is deprecated.
warning on start-up. To suppress this warning, set
`suppress_key_server_warning` to true.
+If the use of a trusted key server has to be deactivated, e.g. in a private
+federation or for privacy reasons, this can be realised by setting
+an empty array (`trusted_key_servers: []`). Then Synapse will request the keys
+directly from the server that owns the keys. If Synapse does not get keys directly
+from the server, the events of this server will be rejected.
+
Options for each entry in the list include:
* `server_name`: the name of the server. Required.
* `verify_keys`: an optional map from key id to base64-encoded public key.
@@ -2729,7 +2736,7 @@ Example configuration:
key_server_signing_keys_path: "key_server_signing_keys.key"
```
---
-## Single sign-on integration ##
+## Single sign-on integration
The following settings can be used to make Synapse use a single sign-on
provider for authentication, instead of its internal password database.
@@ -3015,6 +3022,15 @@ Options for each entry include:
which is set to the claims returned by the UserInfo Endpoint and/or
in the ID Token.
+* `backchannel_logout_enabled`: set to `true` to process OIDC Back-Channel Logout notifications.
+ Those notifications are expected to be received on `/_synapse/client/oidc/backchannel_logout`.
+ Defaults to `false`.
+
+* `backchannel_logout_ignore_sub`: by default, the OIDC Back-Channel Logout feature checks that the
+ `sub` claim matches the subject claim received during login. This check can be disabled by setting
+ this to `true`. Defaults to `false`.
+
+ You might want to disable this if the `subject_claim` returned by the mapping provider is not `sub`.
It is possible to configure Synapse to only allow logins if certain attributes
match particular values in the OIDC userinfo. The requirements can be listed under
@@ -3349,7 +3365,7 @@ email:
email_validation: "[%(server_name)s] Validate your email"
```
---
-## Push ##
+## Push
Configuration settings related to push notifications
---
@@ -3382,7 +3398,7 @@ push:
group_unread_count_by_room: false
```
---
-## Rooms ##
+## Rooms
Config options relating to rooms.
---
@@ -3628,7 +3644,7 @@ default_power_level_content_override:
```
---
-## Tracing ##
+## Tracing
Configuration options related to tracing support.
---
@@ -3674,14 +3690,71 @@ tracing:
#collector_endpoint: "http://localhost:14268/api/traces?format=jaeger.thrift"
```
---
-## Workers ##
-Configuration options related to workers.
+## Coordinating workers
+Configuration options related to workers which belong in the main config file
+(usually called `homeserver.yaml`).
+A Synapse deployment can scale horizontally by running multiple Synapse processes
+called _workers_. Incoming requests are distributed between workers to handle higher
+loads. Some workers are privileged and can accept requests from other workers.
+As a result, the worker configuration is divided into two parts.
+
+1. The first part (in this section of the manual) defines which shardable tasks
+ are delegated to privileged workers. This allows unprivileged workers to make
+ request a privileged worker to act on their behalf.
+1. [The second part](#individual-worker-configuration)
+ controls the behaviour of individual workers in isolation.
+
+For guidance on setting up workers, see the [worker documentation](../../workers.md).
+
+---
+### `worker_replication_secret`
+
+A shared secret used by the replication APIs on the main process to authenticate
+HTTP requests from workers.
+
+The default, this value is omitted (equivalently `null`), which means that
+traffic between the workers and the main process is not authenticated.
+
+Example configuration:
+```yaml
+worker_replication_secret: "secret_secret"
+```
+---
+### `start_pushers`
+
+Controls sending of push notifications on the main process. Set to `false`
+if using a [pusher worker](../../workers.md#synapseapppusher). Defaults to `true`.
+
+Example configuration:
+```yaml
+start_pushers: false
+```
+---
+### `pusher_instances`
+
+It is possible to run multiple [pusher workers](../../workers.md#synapseapppusher),
+in which case the work is balanced across them. Use this setting to list the pushers by
+[`worker_name`](#worker_name). Ensure the main process and all pusher workers are
+restarted after changing this option.
+
+If no or only one pusher worker is configured, this setting is not necessary.
+The main process will send out push notifications by default if you do not disable
+it by setting [`start_pushers: false`](#start_pushers).
+
+Example configuration:
+```yaml
+start_pushers: false
+pusher_instances:
+ - pusher_worker1
+ - pusher_worker2
+```
---
### `send_federation`
Controls sending of outbound federation transactions on the main process.
-Set to false if using a federation sender worker. Defaults to true.
+Set to `false` if using a [federation sender worker](../../workers.md#synapseappfederation_sender).
+Defaults to `true`.
Example configuration:
```yaml
@@ -3690,8 +3763,9 @@ send_federation: false
---
### `federation_sender_instances`
-It is possible to run multiple federation sender workers, in which case the
-work is balanced across them. Use this setting to list the senders.
+It is possible to run multiple
+[federation sender worker](../../workers.md#synapseappfederation_sender), in which
+case the work is balanced across them. Use this setting to list the senders.
This configuration setting must be shared between all federation sender workers, and if
changed all federation sender workers must be stopped at the same time and then
@@ -3700,14 +3774,19 @@ events may be dropped).
Example configuration:
```yaml
+send_federation: false
federation_sender_instances:
- federation_sender1
```
---
### `instance_map`
-When using workers this should be a map from worker name to the
+When using workers this should be a map from [`worker_name`](#worker_name) to the
HTTP replication listener of the worker, if configured.
+Each worker declared under [`stream_writers`](../../workers.md#stream-writers) needs
+a HTTP replication listener, and that listener should be included in the `instance_map`.
+(The main process also needs an HTTP replication listener, but it should not be
+listed in the `instance_map`.)
Example configuration:
```yaml
@@ -3720,8 +3799,11 @@ instance_map:
### `stream_writers`
Experimental: When using workers you can define which workers should
-handle event persistence and typing notifications. Any worker
-specified here must also be in the `instance_map`.
+handle writing to streams such as event persistence and typing notifications.
+Any worker specified here must also be in the [`instance_map`](#instance_map).
+
+See the list of available streams in the
+[worker documentation](../../workers.md#stream-writers).
Example configuration:
```yaml
@@ -3732,29 +3814,18 @@ stream_writers:
---
### `run_background_tasks_on`
-The worker that is used to run background tasks (e.g. cleaning up expired
-data). If not provided this defaults to the main process.
+The [worker](../../workers.md#background-tasks) that is used to run
+background tasks (e.g. cleaning up expired data). If not provided this
+defaults to the main process.
Example configuration:
```yaml
run_background_tasks_on: worker1
```
---
-### `worker_replication_secret`
-
-A shared secret used by the replication APIs to authenticate HTTP requests
-from workers.
-
-By default this is unused and traffic is not authenticated.
-
-Example configuration:
-```yaml
-worker_replication_secret: "secret_secret"
-```
### `redis`
-Configuration for Redis when using workers. This *must* be enabled when
-using workers (unless using old style direct TCP configuration).
+Configuration for Redis when using workers. This *must* be enabled when using workers.
This setting has the following sub-options:
* `enabled`: whether to use Redis support. Defaults to false.
* `host` and `port`: Optional host and port to use to connect to redis. Defaults to
@@ -3769,7 +3840,143 @@ redis:
port: 6379
password:
```
-## Background Updates ##
+---
+## Individual worker configuration
+These options configure an individual worker, in its worker configuration file.
+They should be not be provided when configuring the main process.
+
+Note also the configuration above for
+[coordinating a cluster of workers](#coordinating-workers).
+
+For guidance on setting up workers, see the [worker documentation](../../workers.md).
+
+---
+### `worker_app`
+
+The type of worker. The currently available worker applications are listed
+in [worker documentation](../../workers.md#available-worker-applications).
+
+The most common worker is the
+[`synapse.app.generic_worker`](../../workers.md#synapseappgeneric_worker).
+
+Example configuration:
+```yaml
+worker_app: synapse.app.generic_worker
+```
+---
+### `worker_name`
+
+A unique name for the worker. The worker needs a name to be addressed in
+further parameters and identification in log files. We strongly recommend
+giving each worker a unique `worker_name`.
+
+Example configuration:
+```yaml
+worker_name: generic_worker1
+```
+---
+### `worker_replication_host`
+
+The HTTP replication endpoint that it should talk to on the main Synapse process.
+The main Synapse process defines this with a `replication` resource in
+[`listeners` option](#listeners).
+
+Example configuration:
+```yaml
+worker_replication_host: 127.0.0.1
+```
+---
+### `worker_replication_http_port`
+
+The HTTP replication port that it should talk to on the main Synapse process.
+The main Synapse process defines this with a `replication` resource in
+[`listeners` option](#listeners).
+
+Example configuration:
+```yaml
+worker_replication_http_port: 9093
+```
+---
+### `worker_replication_http_tls`
+
+Whether TLS should be used for talking to the HTTP replication port on the main
+Synapse process.
+The main Synapse process defines this with the `tls` option on its [listener](#listeners) that
+has the `replication` resource enabled.
+
+**Please note:** by default, it is not safe to expose replication ports to the
+public Internet, even with TLS enabled.
+See [`worker_replication_secret`](#worker_replication_secret).
+
+Defaults to `false`.
+
+*Added in Synapse 1.72.0.*
+
+Example configuration:
+```yaml
+worker_replication_http_tls: true
+```
+---
+### `worker_listeners`
+
+A worker can handle HTTP requests. To do so, a `worker_listeners` option
+must be declared, in the same way as the [`listeners` option](#listeners)
+in the shared config.
+
+Workers declared in [`stream_writers`](#stream_writers) will need to include a
+`replication` listener here, in order to accept internal HTTP requests from
+other workers.
+
+Example configuration:
+```yaml
+worker_listeners:
+ - type: http
+ port: 8083
+ resources:
+ - names: [client, federation]
+```
+---
+### `worker_daemonize`
+
+Specifies whether the worker should be started as a daemon process.
+If Synapse is being managed by [systemd](../../systemd-with-workers/README.md), this option
+must be omitted or set to `false`.
+
+Defaults to `false`.
+
+Example configuration:
+```yaml
+worker_daemonize: true
+```
+---
+### `worker_pid_file`
+
+When running a worker as a daemon, we need a place to store the
+[PID](https://en.wikipedia.org/wiki/Process_identifier) of the worker.
+This option defines the location of that "pid file".
+
+This option is required if `worker_daemonize` is `true` and ignored
+otherwise. It has no default.
+
+See also the [`pid_file` option](#pid_file) option for the main Synapse process.
+
+Example configuration:
+```yaml
+worker_pid_file: DATADIR/generic_worker1.pid
+```
+---
+### `worker_log_config`
+
+This option specifies a yaml python logging config file as described
+[here](https://docs.python.org/3/library/logging.config.html#configuration-dictionary-schema).
+See also the [`log_config` option](#log_config) option for the main Synapse process.
+
+Example configuration:
+```yaml
+worker_log_config: /etc/matrix-synapse/generic-worker-log.yaml
+```
+---
+## Background Updates
Configuration settings related to background updates.
---
diff --git a/docs/workers.md b/docs/workers.md
index c27b3f8bd5..27e54c5846 100644
--- a/docs/workers.md
+++ b/docs/workers.md
@@ -88,10 +88,12 @@ shared configuration file.
### Shared configuration
Normally, only a couple of changes are needed to make an existing configuration
-file suitable for use with workers. First, you need to enable an "HTTP replication
-listener" for the main process; and secondly, you need to enable redis-based
-replication. Optionally, a shared secret can be used to authenticate HTTP
-traffic between workers. For example:
+file suitable for use with workers. First, you need to enable an
+["HTTP replication listener"](usage/configuration/config_documentation.md#listeners)
+for the main process; and secondly, you need to enable
+[redis-based replication](usage/configuration/config_documentation.md#redis).
+Optionally, a [shared secret](usage/configuration/config_documentation.md#worker_replication_secret)
+can be used to authenticate HTTP traffic between workers. For example:
```yaml
# extend the existing `listeners` section. This defines the ports that the
@@ -111,27 +113,30 @@ redis:
enabled: true
```
-See the [configuration manual](usage/configuration/config_documentation.html) for the full documentation of each option.
+See the [configuration manual](usage/configuration/config_documentation.md)
+for the full documentation of each option.
Under **no circumstances** should the replication listener be exposed to the
public internet; replication traffic is:
* always unencrypted
-* unauthenticated, unless `worker_replication_secret` is configured
+* unauthenticated, unless [`worker_replication_secret`](usage/configuration/config_documentation.md#worker_replication_secret)
+ is configured
### Worker configuration
In the config file for each worker, you must specify:
- * The type of worker (`worker_app`). The currently available worker applications are listed below.
- * A unique name for the worker (`worker_name`).
+ * The type of worker ([`worker_app`](usage/configuration/config_documentation.md#worker_app)).
+ The currently available worker applications are listed [below](#available-worker-applications).
+ * A unique name for the worker ([`worker_name`](usage/configuration/config_documentation.md#worker_name)).
* The HTTP replication endpoint that it should talk to on the main synapse process
- (`worker_replication_host` and `worker_replication_http_port`)
- * If handling HTTP requests, a `worker_listeners` option with an `http`
- listener, in the same way as the [`listeners`](usage/configuration/config_documentation.md#listeners)
- option in the shared config.
- * If handling the `^/_matrix/client/v3/keys/upload` endpoint, the HTTP URI for
- the main process (`worker_main_http_uri`).
+ ([`worker_replication_host`](usage/configuration/config_documentation.md#worker_replication_host) and
+ [`worker_replication_http_port`](usage/configuration/config_documentation.md#worker_replication_http_port)).
+ * If handling HTTP requests, a [`worker_listeners`](usage/configuration/config_documentation.md#worker_listeners) option
+ with an `http` listener.
+ * **Synapse 1.72 and older:** if handling the `^/_matrix/client/v3/keys/upload` endpoint, the HTTP URI for
+ the main process (`worker_main_http_uri`). This config option is no longer required and is ignored when running Synapse 1.73 and newer.
For example:
@@ -146,7 +151,6 @@ plain HTTP endpoint on port 8083 separately serving various endpoints, e.g.
Obviously you should configure your reverse-proxy to route the relevant
endpoints to the worker (`localhost:8083` in the above example).
-
### Running Synapse with workers
Finally, you need to start your worker processes. This can be done with either
@@ -217,7 +221,6 @@ information.
^/_matrix/client/(api/v1|r0|v3|unstable)/search$
# Encryption requests
- # Note that ^/_matrix/client/(r0|v3|unstable)/keys/upload/ requires `worker_main_http_uri`
^/_matrix/client/(r0|v3|unstable)/keys/query$
^/_matrix/client/(r0|v3|unstable)/keys/changes$
^/_matrix/client/(r0|v3|unstable)/keys/claim$
@@ -288,7 +291,8 @@ For multiple workers not handling the SSO endpoints properly, see
[#9427](https://github.com/matrix-org/synapse/issues/9427).
Note that a [HTTP listener](usage/configuration/config_documentation.md#listeners)
-with `client` and `federation` `resources` must be configured in the `worker_listeners`
+with `client` and `federation` `resources` must be configured in the
+[`worker_listeners`](usage/configuration/config_documentation.md#worker_listeners)
option in the worker config.
#### Load balancing
@@ -300,9 +304,11 @@ may wish to run multiple groups of workers handling different endpoints so that
load balancing can be done in different ways.
For `/sync` and `/initialSync` requests it will be more efficient if all
-requests from a particular user are routed to a single instance. Extracting a
-user ID from the access token or `Authorization` header is currently left as an
-exercise for the reader. Admins may additionally wish to separate out `/sync`
+requests from a particular user are routed to a single instance. This can
+be done e.g. in nginx via IP `hash $http_x_forwarded_for;` or via
+`hash $http_authorization consistent;` which contains the users access token.
+
+Admins may additionally wish to separate out `/sync`
requests that have a `since` query parameter from those that don't (and
`/initialSync`), as requests that don't are known as "initial sync" that happens
when a user logs in on a new device and can be *very* resource intensive, so
@@ -331,9 +337,10 @@ of the main process to a particular worker.
To enable this, the worker must have a
[HTTP `replication` listener](usage/configuration/config_documentation.md#listeners) configured,
-have a `worker_name` and be listed in the `instance_map` config. The same worker
-can handle multiple streams, but unless otherwise documented, each stream can only
-have a single writer.
+have a [`worker_name`](usage/configuration/config_documentation.md#worker_name)
+and be listed in the [`instance_map`](usage/configuration/config_documentation.md#instance_map)
+config. The same worker can handle multiple streams, but unless otherwise documented,
+each stream can only have a single writer.
For example, to move event persistence off to a dedicated worker, the shared
configuration would include:
@@ -360,9 +367,26 @@ streams and the endpoints associated with them:
##### The `events` stream
-The `events` stream experimentally supports having multiple writers, where work
-is sharded between them by room ID. Note that you *must* restart all worker
-instances when adding or removing event persisters. An example `stream_writers`
+The `events` stream experimentally supports having multiple writer workers, where load
+is sharded between them by room ID. Each writer is called an _event persister_. They are
+responsible for
+- receiving new events,
+- linking them to those already in the room [DAG](development/room-dag-concepts.md),
+- persisting them to the DB, and finally
+- updating the events stream.
+
+Because load is sharded in this way, you *must* restart all worker instances when
+adding or removing event persisters.
+
+An `event_persister` should not be mistaken for an `event_creator`.
+An `event_creator` listens for requests from clients to create new events and does
+so. It will then pass those events over HTTP replication to any configured event
+persisters (or the main process if none are configured).
+
+Note that `event_creator`s and `event_persister`s are implemented using the same
+[`synapse.app.generic_worker`](#synapse.app.generic_worker).
+
+An example [`stream_writers`](usage/configuration/config_documentation.md#stream_writers)
configuration with multiple writers:
```yaml
@@ -416,16 +440,18 @@ worker. Background tasks are run periodically or started via replication. Exactl
which tasks are configured to run depends on your Synapse configuration (e.g. if
stats is enabled). This worker doesn't handle any REST endpoints itself.
-To enable this, the worker must have a `worker_name` and can be configured to run
-background tasks. For example, to move background tasks to a dedicated worker,
-the shared configuration would include:
+To enable this, the worker must have a unique
+[`worker_name`](usage/configuration/config_documentation.md#worker_name)
+and can be configured to run background tasks. For example, to move background tasks
+to a dedicated worker, the shared configuration would include:
```yaml
run_background_tasks_on: background_worker
```
-You might also wish to investigate the `update_user_directory_from_worker` and
-`media_instance_running_background_jobs` settings.
+You might also wish to investigate the
+[`update_user_directory_from_worker`](#updating-the-user-directory) and
+[`media_instance_running_background_jobs`](#synapseappmedia_repository) settings.
An example for a dedicated background worker instance:
@@ -478,13 +504,17 @@ worker application type.
### `synapse.app.pusher`
Handles sending push notifications to sygnal and email. Doesn't handle any
-REST endpoints itself, but you should set `start_pushers: False` in the
+REST endpoints itself, but you should set
+[`start_pushers: false`](usage/configuration/config_documentation.md#start_pushers) in the
shared configuration file to stop the main synapse sending push notifications.
-To run multiple instances at once the `pusher_instances` option should list all
-pusher instances by their worker name, e.g.:
+To run multiple instances at once the
+[`pusher_instances`](usage/configuration/config_documentation.md#pusher_instances)
+option should list all pusher instances by their
+[`worker_name`](usage/configuration/config_documentation.md#worker_name), e.g.:
```yaml
+start_pushers: false
pusher_instances:
- pusher_worker1
- pusher_worker2
@@ -512,15 +542,20 @@ Note this worker cannot be load-balanced: only one instance should be active.
### `synapse.app.federation_sender`
Handles sending federation traffic to other servers. Doesn't handle any
-REST endpoints itself, but you should set `send_federation: False` in the
-shared configuration file to stop the main synapse sending this traffic.
+REST endpoints itself, but you should set
+[`send_federation: false`](usage/configuration/config_documentation.md#send_federation)
+in the shared configuration file to stop the main synapse sending this traffic.
If running multiple federation senders then you must list each
-instance in the `federation_sender_instances` option by their `worker_name`.
+instance in the
+[`federation_sender_instances`](usage/configuration/config_documentation.md#federation_sender_instances)
+option by their
+[`worker_name`](usage/configuration/config_documentation.md#worker_name).
All instances must be stopped and started when adding or removing instances.
For example:
```yaml
+send_federation: false
federation_sender_instances:
- federation_sender1
- federation_sender2
@@ -547,7 +582,9 @@ Handles the media repository. It can handle all endpoints starting with:
^/_synapse/admin/v1/quarantine_media/.*$
^/_synapse/admin/v1/users/.*/media$
-You should also set `enable_media_repo: False` in the shared configuration
+You should also set
+[`enable_media_repo: False`](usage/configuration/config_documentation.md#enable_media_repo)
+in the shared configuration
file to stop the main synapse running background jobs related to managing the
media repository. Note that doing so will prevent the main process from being
able to handle the above endpoints.
diff --git a/mypy.ini b/mypy.ini
index f4ce4917d6..72051fc5a0 100644
--- a/mypy.ini
+++ b/mypy.ini
@@ -11,6 +11,7 @@ warn_unused_ignores = True
local_partial_types = True
no_implicit_optional = True
disallow_untyped_defs = True
+strict_equality = True
files =
docker/,
@@ -56,7 +57,6 @@ exclude = (?x)
|tests/rest/media/v1/test_media_storage.py
|tests/server.py
|tests/server_notices/test_resource_limits_server_notices.py
- |tests/test_metrics.py
|tests/test_state.py
|tests/test_terms_auth.py
|tests/util/caches/test_cached_call.py
@@ -106,6 +106,9 @@ disallow_untyped_defs = False
[mypy-tests.handlers.test_user_directory]
disallow_untyped_defs = True
+[mypy-tests.metrics.test_background_process_metrics]
+disallow_untyped_defs = True
+
[mypy-tests.push.test_bulk_push_rule_evaluator]
disallow_untyped_defs = True
@@ -115,6 +118,9 @@ disallow_untyped_defs = True
[mypy-tests.state.test_profile]
disallow_untyped_defs = True
+[mypy-tests.storage.test_id_generators]
+disallow_untyped_defs = True
+
[mypy-tests.storage.test_profile]
disallow_untyped_defs = True
diff --git a/poetry.lock b/poetry.lock
index 67258702c8..5267ba1b36 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -1,16 +1,16 @@
[[package]]
name = "attrs"
-version = "21.4.0"
+version = "22.1.0"
description = "Classes Without Boilerplate"
category = "main"
optional = false
-python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*"
+python-versions = ">=3.5"
[package.extras]
-dev = ["cloudpickle", "coverage[toml] (>=5.0.2)", "furo", "hypothesis", "mypy", "pre-commit", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "six", "sphinx", "sphinx-notfound-page", "zope.interface"]
+dev = ["cloudpickle", "coverage[toml] (>=5.0.2)", "furo", "hypothesis", "mypy (>=0.900,!=0.940)", "pre-commit", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "sphinx", "sphinx-notfound-page", "zope.interface"]
docs = ["furo", "sphinx", "sphinx-notfound-page", "zope.interface"]
-tests = ["cloudpickle", "coverage[toml] (>=5.0.2)", "hypothesis", "mypy", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "six", "zope.interface"]
-tests_no_zope = ["cloudpickle", "coverage[toml] (>=5.0.2)", "hypothesis", "mypy", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "six"]
+tests = ["cloudpickle", "coverage[toml] (>=5.0.2)", "hypothesis", "mypy (>=0.900,!=0.940)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "zope.interface"]
+tests-no-zope = ["cloudpickle", "coverage[toml] (>=5.0.2)", "hypothesis", "mypy (>=0.900,!=0.940)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins"]
[[package]]
name = "Authlib"
@@ -25,7 +25,7 @@ cryptography = ">=3.2"
[[package]]
name = "automat"
-version = "20.2.0"
+version = "22.10.0"
description = "Self-service finite-state machines for the programmer on the go."
category = "main"
optional = false
@@ -52,18 +52,18 @@ typecheck = ["mypy"]
[[package]]
name = "black"
-version = "22.3.0"
+version = "22.10.0"
description = "The uncompromising code formatter."
category = "dev"
optional = false
-python-versions = ">=3.6.2"
+python-versions = ">=3.7"
[package.dependencies]
click = ">=8.0.0"
mypy-extensions = ">=0.4.3"
pathspec = ">=0.9.0"
platformdirs = ">=2"
-tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""}
+tomli = {version = ">=1.1.0", markers = "python_full_version < \"3.11.0a7\""}
typed-ast = {version = ">=1.4.2", markers = "python_version < \"3.8\" and implementation_name == \"cpython\""}
typing-extensions = {version = ">=3.10.0.0", markers = "python_version < \"3.10\""}
@@ -91,7 +91,7 @@ dev = ["Sphinx (==4.3.2)", "black (==22.3.0)", "build (==0.8.0)", "flake8 (==4.0
[[package]]
name = "canonicaljson"
-version = "1.6.3"
+version = "1.6.4"
description = "Canonical JSON"
category = "main"
optional = false
@@ -186,7 +186,7 @@ python-versions = "*"
[[package]]
name = "cryptography"
-version = "36.0.1"
+version = "38.0.3"
description = "cryptography is a package which provides cryptographic recipes and primitives to Python developers."
category = "main"
optional = false
@@ -196,12 +196,12 @@ python-versions = ">=3.6"
cffi = ">=1.12"
[package.extras]
-docs = ["sphinx (>=1.6.5,!=1.8.0,!=3.1.0,!=3.1.1)", "sphinx_rtd_theme"]
+docs = ["sphinx (>=1.6.5,!=1.8.0,!=3.1.0,!=3.1.1)", "sphinx-rtd-theme"]
docstest = ["pyenchant (>=1.6.11)", "sphinxcontrib-spelling (>=4.0.1)", "twine (>=1.12.0)"]
pep8test = ["black", "flake8", "flake8-import-order", "pep8-naming"]
-sdist = ["setuptools_rust (>=0.11.4)"]
+sdist = ["setuptools-rust (>=0.11.4)"]
ssh = ["bcrypt (>=3.1.5)"]
-test = ["hypothesis (>=1.11.4,!=3.79.2)", "iso8601", "pretend", "pytest (>=6.2.0)", "pytest-cov", "pytest-subtests", "pytest-xdist", "pytz"]
+test = ["hypothesis (>=1.11.4,!=3.79.2)", "iso8601", "pretend", "pytest (>=6.2.0)", "pytest-benchmark", "pytest-cov", "pytest-subtests", "pytest-xdist", "pytz"]
[[package]]
name = "defusedxml"
@@ -246,36 +246,36 @@ dev = ["Sphinx", "coverage", "flake8", "lxml", "memory-profiler", "mypy (==0.910
[[package]]
name = "flake8"
-version = "4.0.1"
+version = "5.0.4"
description = "the modular source code checker: pep8 pyflakes and co"
category = "dev"
optional = false
-python-versions = ">=3.6"
+python-versions = ">=3.6.1"
[package.dependencies]
-importlib-metadata = {version = "<4.3", markers = "python_version < \"3.8\""}
-mccabe = ">=0.6.0,<0.7.0"
-pycodestyle = ">=2.8.0,<2.9.0"
-pyflakes = ">=2.4.0,<2.5.0"
+importlib-metadata = {version = ">=1.1.0,<4.3", markers = "python_version < \"3.8\""}
+mccabe = ">=0.7.0,<0.8.0"
+pycodestyle = ">=2.9.0,<2.10.0"
+pyflakes = ">=2.5.0,<2.6.0"
[[package]]
name = "flake8-bugbear"
-version = "22.9.23"
+version = "22.10.27"
description = "A plugin for flake8 finding likely bugs and design problems in your program. Contains warnings that don't belong in pyflakes and pycodestyle."
category = "dev"
optional = false
-python-versions = ">=3.6"
+python-versions = ">=3.7"
[package.dependencies]
attrs = ">=19.2.0"
flake8 = ">=3.0.0"
[package.extras]
-dev = ["coverage", "hypothesis", "hypothesmith (>=0.2)", "pre-commit"]
+dev = ["coverage", "hypothesis", "hypothesmith (>=0.2)", "pre-commit", "tox"]
[[package]]
name = "flake8-comprehensions"
-version = "3.8.0"
+version = "3.10.1"
description = "A flake8 plugin to help you write better list/set/dict comprehensions."
category = "dev"
optional = false
@@ -306,7 +306,7 @@ smmap = ">=3.0.1,<6"
[[package]]
name = "gitpython"
-version = "3.1.27"
+version = "3.1.29"
description = "GitPython is a python library used to interact with Git repositories"
category = "dev"
optional = false
@@ -449,11 +449,11 @@ trio = ["async_generator", "trio"]
[[package]]
name = "jinja2"
-version = "3.0.3"
+version = "3.1.2"
description = "A very fast and expressive template engine."
category = "main"
optional = false
-python-versions = ">=3.6"
+python-versions = ">=3.7"
[package.dependencies]
MarkupSafe = ">=2.0"
@@ -463,7 +463,7 @@ i18n = ["Babel (>=2.7)"]
[[package]]
name = "jsonschema"
-version = "4.16.0"
+version = "4.17.0"
description = "An implementation of JSON Schema validation for Python"
category = "main"
optional = false
@@ -566,11 +566,11 @@ dev = ["black (==22.3.0)", "flake8 (==4.0.1)", "isort (==5.9.3)", "ldaptor", "ma
[[package]]
name = "mccabe"
-version = "0.6.1"
+version = "0.7.0"
description = "McCabe checker, plugin for flake8"
category = "dev"
optional = false
-python-versions = "*"
+python-versions = ">=3.6"
[[package]]
name = "msgpack"
@@ -744,12 +744,16 @@ python-versions = "*"
[[package]]
name = "pillow"
-version = "9.0.1"
+version = "9.3.0"
description = "Python Imaging Library (Fork)"
category = "main"
optional = false
python-versions = ">=3.7"
+[package.extras]
+docs = ["furo", "olefile", "sphinx (>=2.4)", "sphinx-copybutton", "sphinx-issues (>=3.0.1)", "sphinx-removed-in", "sphinxext-opengraph"]
+tests = ["check-manifest", "coverage", "defusedxml", "markdown2", "olefile", "packaging", "pyroma", "pytest", "pytest-cov", "pytest-timeout"]
+
[[package]]
name = "pkginfo"
version = "1.8.2"
@@ -783,7 +787,7 @@ test = ["appdirs (==1.4.4)", "pytest (>=6)", "pytest-cov (>=2.7)", "pytest-mock
[[package]]
name = "prometheus-client"
-version = "0.14.0"
+version = "0.15.0"
description = "Python client for the Prometheus monitoring system."
category = "main"
optional = false
@@ -802,7 +806,7 @@ python-versions = ">=3.7"
[[package]]
name = "psycopg2"
-version = "2.9.4"
+version = "2.9.5"
description = "psycopg2 - Python-PostgreSQL Database Adapter"
category = "main"
optional = true
@@ -852,11 +856,11 @@ pyasn1 = ">=0.4.6,<0.5.0"
[[package]]
name = "pycodestyle"
-version = "2.8.0"
+version = "2.9.1"
description = "Python style guide checker"
category = "dev"
optional = false
-python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*"
+python-versions = ">=3.6"
[[package]]
name = "pycparser"
@@ -883,11 +887,11 @@ email = ["email-validator (>=1.0.3)"]
[[package]]
name = "pyflakes"
-version = "2.4.0"
+version = "2.5.0"
description = "passive checker of Python programs"
category = "dev"
optional = false
-python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*"
+python-versions = ">=3.6"
[[package]]
name = "pygithub"
@@ -999,14 +1003,14 @@ python-versions = ">=3.7"
[[package]]
name = "pysaml2"
-version = "7.1.2"
+version = "7.2.1"
description = "Python implementation of SAML Version 2 Standard"
category = "main"
optional = true
python-versions = "<4,>=3.6"
[package.dependencies]
-cryptography = ">=1.4"
+cryptography = ">=3.1"
defusedxml = "*"
importlib-resources = {version = "*", markers = "python_version < \"3.9\""}
pyOpenSSL = "*"
@@ -1057,11 +1061,11 @@ python-versions = ">=3.6"
[[package]]
name = "readme-renderer"
-version = "33.0"
+version = "37.2"
description = "readme_renderer is a library for rendering \"readme\" descriptions for Warehouse"
category = "dev"
optional = false
-python-versions = ">=3.6"
+python-versions = ">=3.7"
[package.dependencies]
bleach = ">=2.1.0"
@@ -1111,6 +1115,22 @@ python-versions = ">=3.7"
[package.extras]
idna2008 = ["idna"]
+[[package]]
+name = "rich"
+version = "12.6.0"
+description = "Render rich text, tables, progress bars, syntax highlighting, markdown and more to the terminal"
+category = "dev"
+optional = false
+python-versions = ">=3.6.3,<4.0.0"
+
+[package.dependencies]
+commonmark = ">=0.9.0,<0.10.0"
+pygments = ">=2.6.0,<3.0.0"
+typing-extensions = {version = ">=4.0.0,<5.0", markers = "python_version < \"3.9\""}
+
+[package.extras]
+jupyter = ["ipywidgets (>=7.5.1,<8.0.0)"]
+
[[package]]
name = "secretstorage"
version = "3.3.1"
@@ -1137,7 +1157,7 @@ doc = ["Sphinx", "sphinx-rtd-theme"]
[[package]]
name = "sentry-sdk"
-version = "1.5.11"
+version = "1.10.1"
description = "Python client for Sentry (https://sentry.io)"
category = "main"
optional = true
@@ -1145,7 +1165,7 @@ python-versions = "*"
[package.dependencies]
certifi = "*"
-urllib3 = ">=1.10.0"
+urllib3 = {version = ">=1.26.11", markers = "python_version >= \"3.6\""}
[package.extras]
aiohttp = ["aiohttp (>=3.5)"]
@@ -1155,6 +1175,7 @@ celery = ["celery (>=3)"]
chalice = ["chalice (>=1.16.0)"]
django = ["django (>=1.8)"]
falcon = ["falcon (>=1.4)"]
+fastapi = ["fastapi (>=0.79.0)"]
flask = ["blinker (>=1.1)", "flask (>=0.11)"]
httpx = ["httpx (>=0.16.0)"]
pure_eval = ["asttokens", "executing", "pure-eval"]
@@ -1163,6 +1184,7 @@ quart = ["blinker (>=1.1)", "quart (>=0.16.1)"]
rq = ["rq (>=0.6)"]
sanic = ["sanic (>=0.8)"]
sqlalchemy = ["sqlalchemy (>=1.2)"]
+starlette = ["starlette (>=0.19.1)"]
tornado = ["tornado (>=5)"]
[[package]]
@@ -1201,7 +1223,7 @@ testing-integration = ["build[virtualenv]", "filelock (>=3.4.0)", "jaraco.envs (
[[package]]
name = "setuptools-rust"
-version = "1.5.1"
+version = "1.5.2"
description = "Setuptools Rust extension plugin"
category = "main"
optional = false
@@ -1313,22 +1335,6 @@ tomli = {version = "*", markers = "python_version >= \"3.6\""}
[package.extras]
dev = ["packaging"]
-[[package]]
-name = "tqdm"
-version = "4.63.0"
-description = "Fast, Extensible Progress Meter"
-category = "dev"
-optional = false
-python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,>=2.7"
-
-[package.dependencies]
-colorama = {version = "*", markers = "platform_system == \"Windows\""}
-
-[package.extras]
-dev = ["py-make (>=0.1.0)", "twine", "wheel"]
-notebook = ["ipywidgets (>=6)"]
-telegram = ["requests"]
-
[[package]]
name = "treq"
version = "22.2.0"
@@ -1350,27 +1356,26 @@ docs = ["sphinx (>=1.4.8)"]
[[package]]
name = "twine"
-version = "3.8.0"
+version = "4.0.1"
description = "Collection of utilities for publishing packages on PyPI"
category = "dev"
optional = false
-python-versions = ">=3.6"
+python-versions = ">=3.7"
[package.dependencies]
-colorama = ">=0.4.3"
importlib-metadata = ">=3.6"
keyring = ">=15.1"
pkginfo = ">=1.8.1"
-readme-renderer = ">=21.0"
+readme-renderer = ">=35.0"
requests = ">=2.20"
requests-toolbelt = ">=0.8.0,<0.9.0 || >0.9.0"
rfc3986 = ">=1.4.0"
-tqdm = ">=4.14"
+rich = ">=12.0.0"
urllib3 = ">=1.26.0"
[[package]]
name = "twisted"
-version = "22.8.0"
+version = "22.10.0"
description = "An asynchronous networking framework written in Python"
category = "main"
optional = false
@@ -1390,21 +1395,21 @@ typing-extensions = ">=3.6.5"
"zope.interface" = ">=4.4.2"
[package.extras]
-all_non_platform = ["PyHamcrest (>=1.9.0)", "appdirs (>=1.4.0)", "bcrypt (>=3.0.0)", "contextvars (>=2.4,<3)", "cryptography (>=2.6)", "cython-test-exception-raiser (>=1.0.2,<2)", "h2 (>=3.0,<5.0)", "idna (>=2.4)", "priority (>=1.1.0,<2.0)", "pyasn1", "pyopenssl (>=21.0.0)", "pyserial (>=3.0)", "pywin32 (!=226)", "service-identity (>=18.1.0)"]
+all-non-platform = ["PyHamcrest (>=1.9.0)", "appdirs (>=1.4.0)", "bcrypt (>=3.0.0)", "contextvars (>=2.4,<3)", "cryptography (>=2.6)", "cython-test-exception-raiser (>=1.0.2,<2)", "h2 (>=3.0,<5.0)", "hypothesis (>=6.0,<7.0)", "idna (>=2.4)", "priority (>=1.1.0,<2.0)", "pyasn1", "pyopenssl (>=21.0.0)", "pyserial (>=3.0)", "pywin32 (!=226)", "service-identity (>=18.1.0)"]
conch = ["appdirs (>=1.4.0)", "bcrypt (>=3.0.0)", "cryptography (>=2.6)", "pyasn1"]
conch_nacl = ["PyNaCl", "appdirs (>=1.4.0)", "bcrypt (>=3.0.0)", "cryptography (>=2.6)", "pyasn1"]
contextvars = ["contextvars (>=2.4,<3)"]
-dev = ["coverage (>=6b1,<7)", "pydoctor (>=22.7.0,<22.8.0)", "pyflakes (>=2.2,<3.0)", "python-subunit (>=1.4,<2.0)", "readthedocs-sphinx-ext (>=2.1,<3.0)", "sphinx (>=4.1.2,<6)", "sphinx-rtd-theme (>=0.5,<1.0)", "towncrier (>=19.2,<20.0)", "twistedchecker (>=0.7,<1.0)"]
-dev_release = ["pydoctor (>=22.7.0,<22.8.0)", "readthedocs-sphinx-ext (>=2.1,<3.0)", "sphinx (>=4.1.2,<6)", "sphinx-rtd-theme (>=0.5,<1.0)", "towncrier (>=19.2,<20.0)"]
-gtk_platform = ["PyHamcrest (>=1.9.0)", "appdirs (>=1.4.0)", "bcrypt (>=3.0.0)", "contextvars (>=2.4,<3)", "cryptography (>=2.6)", "cython-test-exception-raiser (>=1.0.2,<2)", "h2 (>=3.0,<5.0)", "idna (>=2.4)", "priority (>=1.1.0,<2.0)", "pyasn1", "pygobject", "pyopenssl (>=21.0.0)", "pyserial (>=3.0)", "pywin32 (!=226)", "service-identity (>=18.1.0)"]
+dev = ["coverage (>=6b1,<7)", "pydoctor (>=22.9.0,<22.10.0)", "pyflakes (>=2.2,<3.0)", "python-subunit (>=1.4,<2.0)", "readthedocs-sphinx-ext (>=2.1,<3.0)", "sphinx (>=5.0,<6)", "sphinx-rtd-theme (>=1.0,<2.0)", "towncrier (>=22.8,<23.0)", "twistedchecker (>=0.7,<1.0)"]
+dev-release = ["pydoctor (>=22.9.0,<22.10.0)", "readthedocs-sphinx-ext (>=2.1,<3.0)", "sphinx (>=5.0,<6)", "sphinx-rtd-theme (>=1.0,<2.0)", "towncrier (>=22.8,<23.0)"]
+gtk-platform = ["PyHamcrest (>=1.9.0)", "appdirs (>=1.4.0)", "bcrypt (>=3.0.0)", "contextvars (>=2.4,<3)", "cryptography (>=2.6)", "cython-test-exception-raiser (>=1.0.2,<2)", "h2 (>=3.0,<5.0)", "hypothesis (>=6.0,<7.0)", "idna (>=2.4)", "priority (>=1.1.0,<2.0)", "pyasn1", "pygobject", "pyopenssl (>=21.0.0)", "pyserial (>=3.0)", "pywin32 (!=226)", "service-identity (>=18.1.0)"]
http2 = ["h2 (>=3.0,<5.0)", "priority (>=1.1.0,<2.0)"]
-macos_platform = ["PyHamcrest (>=1.9.0)", "appdirs (>=1.4.0)", "bcrypt (>=3.0.0)", "contextvars (>=2.4,<3)", "cryptography (>=2.6)", "cython-test-exception-raiser (>=1.0.2,<2)", "h2 (>=3.0,<5.0)", "idna (>=2.4)", "priority (>=1.1.0,<2.0)", "pyasn1", "pyobjc-core", "pyobjc-framework-CFNetwork", "pyobjc-framework-Cocoa", "pyopenssl (>=21.0.0)", "pyserial (>=3.0)", "pywin32 (!=226)", "service-identity (>=18.1.0)"]
-mypy = ["PyHamcrest (>=1.9.0)", "PyNaCl", "appdirs (>=1.4.0)", "bcrypt (>=3.0.0)", "contextvars (>=2.4,<3)", "coverage (>=6b1,<7)", "cryptography (>=2.6)", "cython-test-exception-raiser (>=1.0.2,<2)", "h2 (>=3.0,<5.0)", "idna (>=2.4)", "mypy (==0.930)", "mypy-zope (==0.3.4)", "priority (>=1.1.0,<2.0)", "pyasn1", "pydoctor (>=22.7.0,<22.8.0)", "pyflakes (>=2.2,<3.0)", "pyopenssl (>=21.0.0)", "pyserial (>=3.0)", "python-subunit (>=1.4,<2.0)", "pywin32 (!=226)", "readthedocs-sphinx-ext (>=2.1,<3.0)", "service-identity (>=18.1.0)", "sphinx (>=4.1.2,<6)", "sphinx-rtd-theme (>=0.5,<1.0)", "towncrier (>=19.2,<20.0)", "twistedchecker (>=0.7,<1.0)", "types-pyOpenSSL", "types-setuptools"]
-osx_platform = ["PyHamcrest (>=1.9.0)", "appdirs (>=1.4.0)", "bcrypt (>=3.0.0)", "contextvars (>=2.4,<3)", "cryptography (>=2.6)", "cython-test-exception-raiser (>=1.0.2,<2)", "h2 (>=3.0,<5.0)", "idna (>=2.4)", "priority (>=1.1.0,<2.0)", "pyasn1", "pyobjc-core", "pyobjc-framework-CFNetwork", "pyobjc-framework-Cocoa", "pyopenssl (>=21.0.0)", "pyserial (>=3.0)", "pywin32 (!=226)", "service-identity (>=18.1.0)"]
+macos-platform = ["PyHamcrest (>=1.9.0)", "appdirs (>=1.4.0)", "bcrypt (>=3.0.0)", "contextvars (>=2.4,<3)", "cryptography (>=2.6)", "cython-test-exception-raiser (>=1.0.2,<2)", "h2 (>=3.0,<5.0)", "hypothesis (>=6.0,<7.0)", "idna (>=2.4)", "priority (>=1.1.0,<2.0)", "pyasn1", "pyobjc-core", "pyobjc-framework-CFNetwork", "pyobjc-framework-Cocoa", "pyopenssl (>=21.0.0)", "pyserial (>=3.0)", "pywin32 (!=226)", "service-identity (>=18.1.0)"]
+mypy = ["PyHamcrest (>=1.9.0)", "PyNaCl", "appdirs (>=1.4.0)", "bcrypt (>=3.0.0)", "contextvars (>=2.4,<3)", "coverage (>=6b1,<7)", "cryptography (>=2.6)", "cython-test-exception-raiser (>=1.0.2,<2)", "h2 (>=3.0,<5.0)", "hypothesis (>=6.0,<7.0)", "idna (>=2.4)", "mypy (==0.930)", "mypy-zope (==0.3.4)", "priority (>=1.1.0,<2.0)", "pyasn1", "pydoctor (>=22.9.0,<22.10.0)", "pyflakes (>=2.2,<3.0)", "pyopenssl (>=21.0.0)", "pyserial (>=3.0)", "python-subunit (>=1.4,<2.0)", "pywin32 (!=226)", "readthedocs-sphinx-ext (>=2.1,<3.0)", "service-identity (>=18.1.0)", "sphinx (>=5.0,<6)", "sphinx-rtd-theme (>=1.0,<2.0)", "towncrier (>=22.8,<23.0)", "twistedchecker (>=0.7,<1.0)", "types-pyOpenSSL", "types-setuptools"]
+osx-platform = ["PyHamcrest (>=1.9.0)", "appdirs (>=1.4.0)", "bcrypt (>=3.0.0)", "contextvars (>=2.4,<3)", "cryptography (>=2.6)", "cython-test-exception-raiser (>=1.0.2,<2)", "h2 (>=3.0,<5.0)", "hypothesis (>=6.0,<7.0)", "idna (>=2.4)", "priority (>=1.1.0,<2.0)", "pyasn1", "pyobjc-core", "pyobjc-framework-CFNetwork", "pyobjc-framework-Cocoa", "pyopenssl (>=21.0.0)", "pyserial (>=3.0)", "pywin32 (!=226)", "service-identity (>=18.1.0)"]
serial = ["pyserial (>=3.0)", "pywin32 (!=226)"]
-test = ["PyHamcrest (>=1.9.0)", "cython-test-exception-raiser (>=1.0.2,<2)"]
+test = ["PyHamcrest (>=1.9.0)", "cython-test-exception-raiser (>=1.0.2,<2)", "hypothesis (>=6.0,<7.0)"]
tls = ["idna (>=2.4)", "pyopenssl (>=21.0.0)", "service-identity (>=18.1.0)"]
-windows_platform = ["PyHamcrest (>=1.9.0)", "appdirs (>=1.4.0)", "bcrypt (>=3.0.0)", "contextvars (>=2.4,<3)", "cryptography (>=2.6)", "cython-test-exception-raiser (>=1.0.2,<2)", "h2 (>=3.0,<5.0)", "idna (>=2.4)", "priority (>=1.1.0,<2.0)", "pyasn1", "pyopenssl (>=21.0.0)", "pyserial (>=3.0)", "pywin32 (!=226)", "pywin32 (!=226)", "service-identity (>=18.1.0)"]
+windows-platform = ["PyHamcrest (>=1.9.0)", "appdirs (>=1.4.0)", "bcrypt (>=3.0.0)", "contextvars (>=2.4,<3)", "cryptography (>=2.6)", "cython-test-exception-raiser (>=1.0.2,<2)", "h2 (>=3.0,<5.0)", "hypothesis (>=6.0,<7.0)", "idna (>=2.4)", "priority (>=1.1.0,<2.0)", "pyasn1", "pyopenssl (>=21.0.0)", "pyserial (>=3.0)", "pywin32 (!=226)", "pywin32 (!=226)", "service-identity (>=18.1.0)"]
[[package]]
name = "twisted-iocpsupport"
@@ -1480,7 +1485,7 @@ python-versions = "*"
[[package]]
name = "types-jsonschema"
-version = "4.4.6"
+version = "4.17.0.1"
description = "Typing stubs for jsonschema"
category = "dev"
optional = false
@@ -1503,8 +1508,8 @@ optional = false
python-versions = "*"
[[package]]
-name = "types-pyOpenSSL"
-version = "22.0.10"
+name = "types-pyopenssl"
+version = "22.1.0.2"
description = "Typing stubs for pyOpenSSL"
category = "dev"
optional = false
@@ -1514,8 +1519,8 @@ python-versions = "*"
types-cryptography = "*"
[[package]]
-name = "types-PyYAML"
-version = "6.0.12"
+name = "types-pyyaml"
+version = "6.0.12.2"
description = "Typing stubs for PyYAML"
category = "dev"
optional = false
@@ -1523,7 +1528,7 @@ python-versions = "*"
[[package]]
name = "types-requests"
-version = "2.28.11"
+version = "2.28.11.2"
description = "Typing stubs for requests"
category = "dev"
optional = false
@@ -1534,7 +1539,7 @@ types-urllib3 = "<1.27"
[[package]]
name = "types-setuptools"
-version = "65.5.0.1"
+version = "65.5.0.3"
description = "Typing stubs for setuptools"
category = "dev"
optional = false
@@ -1566,15 +1571,15 @@ python-versions = ">=3.6,<4.0"
[[package]]
name = "urllib3"
-version = "1.26.8"
+version = "1.26.12"
description = "HTTP library with thread-safe connection pooling, file post, and more."
category = "main"
optional = false
-python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, <4"
+python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*, <4"
[package.extras]
-brotli = ["brotlipy (>=0.6.0)"]
-secure = ["certifi", "cryptography (>=1.3.4)", "idna (>=2.0.0)", "ipaddress", "pyOpenSSL (>=0.14)"]
+brotli = ["brotli (>=1.0.9)", "brotlicffi (>=0.8.0)", "brotlipy (>=0.6.0)"]
+secure = ["certifi", "cryptography (>=1.3.4)", "idna (>=2.0.0)", "ipaddress", "pyOpenSSL (>=0.14)", "urllib3-secure-extra"]
socks = ["PySocks (>=1.5.6,!=1.5.7,<2.0)"]
[[package]]
@@ -1587,7 +1592,7 @@ python-versions = "*"
[[package]]
name = "wrapt"
-version = "1.13.3"
+version = "1.14.1"
description = "Module for decorators, wrappers and monkey patching."
category = "main"
optional = false
@@ -1687,20 +1692,20 @@ url-preview = ["lxml"]
[metadata]
lock-version = "1.1"
python-versions = "^3.7.1"
-content-hash = "0ec917f0893013c5b93328281ee17d94a9943e2405c0bd635676b389e300b206"
+content-hash = "27811bd21d56ceeb0f68ded5a00375efcd1a004928f0736f5b02927ce8594cb0"
[metadata.files]
attrs = [
- {file = "attrs-21.4.0-py2.py3-none-any.whl", hash = "sha256:2d27e3784d7a565d36ab851fe94887c5eccd6a463168875832a1be79c82828b4"},
- {file = "attrs-21.4.0.tar.gz", hash = "sha256:626ba8234211db98e869df76230a137c4c40a12d72445c45d5f5b716f076e2fd"},
+ {file = "attrs-22.1.0-py2.py3-none-any.whl", hash = "sha256:86efa402f67bf2df34f51a335487cf46b1ec130d02b8d39fd248abfd30da551c"},
+ {file = "attrs-22.1.0.tar.gz", hash = "sha256:29adc2665447e5191d0e7c568fde78b21f9672d344281d0c6e1ab085429b22b6"},
]
Authlib = [
{file = "Authlib-1.1.0-py2.py3-none-any.whl", hash = "sha256:be4b6a1dea51122336c210a6945b27a105b9ac572baffd15b07bcff4376c1523"},
{file = "Authlib-1.1.0.tar.gz", hash = "sha256:0a270c91409fc2b7b0fbee6996e09f2ee3187358762111a9a4225c874b94e891"},
]
automat = [
- {file = "Automat-20.2.0-py2.py3-none-any.whl", hash = "sha256:b6feb6455337df834f6c9962d6ccf771515b7d939bca142b29c20c2376bc6111"},
- {file = "Automat-20.2.0.tar.gz", hash = "sha256:7979803c74610e11ef0c0d68a2942b152df52da55336e0c9d58daf1831cbdf33"},
+ {file = "Automat-22.10.0-py2.py3-none-any.whl", hash = "sha256:c3164f8742b9dc440f3682482d32aaff7bb53f71740dd018533f9de286b64180"},
+ {file = "Automat-22.10.0.tar.gz", hash = "sha256:e56beb84edad19dcc11d30e8d9b895f75deeb5ef5e96b84a467066b3b84bb04e"},
]
bcrypt = [
{file = "bcrypt-4.0.1-cp36-abi3-macosx_10_10_universal2.whl", hash = "sha256:b1023030aec778185a6c16cf70f359cbb6e0c289fd564a7cfa29e727a1c38f8f"},
@@ -1726,37 +1731,35 @@ bcrypt = [
{file = "bcrypt-4.0.1.tar.gz", hash = "sha256:27d375903ac8261cfe4047f6709d16f7d18d39b1ec92aaf72af989552a650ebd"},
]
black = [
- {file = "black-22.3.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:2497f9c2386572e28921fa8bec7be3e51de6801f7459dffd6e62492531c47e09"},
- {file = "black-22.3.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:5795a0375eb87bfe902e80e0c8cfaedf8af4d49694d69161e5bd3206c18618bb"},
- {file = "black-22.3.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:e3556168e2e5c49629f7b0f377070240bd5511e45e25a4497bb0073d9dda776a"},
- {file = "black-22.3.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:67c8301ec94e3bcc8906740fe071391bce40a862b7be0b86fb5382beefecd968"},
- {file = "black-22.3.0-cp310-cp310-win_amd64.whl", hash = "sha256:fd57160949179ec517d32ac2ac898b5f20d68ed1a9c977346efbac9c2f1e779d"},
- {file = "black-22.3.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:cc1e1de68c8e5444e8f94c3670bb48a2beef0e91dddfd4fcc29595ebd90bb9ce"},
- {file = "black-22.3.0-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6d2fc92002d44746d3e7db7cf9313cf4452f43e9ea77a2c939defce3b10b5c82"},
- {file = "black-22.3.0-cp36-cp36m-win_amd64.whl", hash = "sha256:a6342964b43a99dbc72f72812bf88cad8f0217ae9acb47c0d4f141a6416d2d7b"},
- {file = "black-22.3.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:328efc0cc70ccb23429d6be184a15ce613f676bdfc85e5fe8ea2a9354b4e9015"},
- {file = "black-22.3.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:06f9d8846f2340dfac80ceb20200ea5d1b3f181dd0556b47af4e8e0b24fa0a6b"},
- {file = "black-22.3.0-cp37-cp37m-win_amd64.whl", hash = "sha256:ad4efa5fad66b903b4a5f96d91461d90b9507a812b3c5de657d544215bb7877a"},
- {file = "black-22.3.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:e8477ec6bbfe0312c128e74644ac8a02ca06bcdb8982d4ee06f209be28cdf163"},
- {file = "black-22.3.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:637a4014c63fbf42a692d22b55d8ad6968a946b4a6ebc385c5505d9625b6a464"},
- {file = "black-22.3.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:863714200ada56cbc366dc9ae5291ceb936573155f8bf8e9de92aef51f3ad0f0"},
- {file = "black-22.3.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:10dbe6e6d2988049b4655b2b739f98785a884d4d6b85bc35133a8fb9a2233176"},
- {file = "black-22.3.0-cp38-cp38-win_amd64.whl", hash = "sha256:cee3e11161dde1b2a33a904b850b0899e0424cc331b7295f2a9698e79f9a69a0"},
- {file = "black-22.3.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:5891ef8abc06576985de8fa88e95ab70641de6c1fca97e2a15820a9b69e51b20"},
- {file = "black-22.3.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:30d78ba6bf080eeaf0b7b875d924b15cd46fec5fd044ddfbad38c8ea9171043a"},
- {file = "black-22.3.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:ee8f1f7228cce7dffc2b464f07ce769f478968bfb3dd1254a4c2eeed84928aad"},
- {file = "black-22.3.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6ee227b696ca60dd1c507be80a6bc849a5a6ab57ac7352aad1ffec9e8b805f21"},
- {file = "black-22.3.0-cp39-cp39-win_amd64.whl", hash = "sha256:9b542ced1ec0ceeff5b37d69838106a6348e60db7b8fdd245294dc1d26136265"},
- {file = "black-22.3.0-py3-none-any.whl", hash = "sha256:bc58025940a896d7e5356952228b68f793cf5fcb342be703c3a2669a1488cb72"},
- {file = "black-22.3.0.tar.gz", hash = "sha256:35020b8886c022ced9282b51b5a875b6d1ab0c387b31a065b84db7c33085ca79"},
+ {file = "black-22.10.0-1fixedarch-cp310-cp310-macosx_11_0_x86_64.whl", hash = "sha256:5cc42ca67989e9c3cf859e84c2bf014f6633db63d1cbdf8fdb666dcd9e77e3fa"},
+ {file = "black-22.10.0-1fixedarch-cp311-cp311-macosx_11_0_x86_64.whl", hash = "sha256:5d8f74030e67087b219b032aa33a919fae8806d49c867846bfacde57f43972ef"},
+ {file = "black-22.10.0-1fixedarch-cp37-cp37m-macosx_10_16_x86_64.whl", hash = "sha256:197df8509263b0b8614e1df1756b1dd41be6738eed2ba9e9769f3880c2b9d7b6"},
+ {file = "black-22.10.0-1fixedarch-cp38-cp38-macosx_10_16_x86_64.whl", hash = "sha256:2644b5d63633702bc2c5f3754b1b475378fbbfb481f62319388235d0cd104c2d"},
+ {file = "black-22.10.0-1fixedarch-cp39-cp39-macosx_11_0_x86_64.whl", hash = "sha256:e41a86c6c650bcecc6633ee3180d80a025db041a8e2398dcc059b3afa8382cd4"},
+ {file = "black-22.10.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:2039230db3c6c639bd84efe3292ec7b06e9214a2992cd9beb293d639c6402edb"},
+ {file = "black-22.10.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:14ff67aec0a47c424bc99b71005202045dc09270da44a27848d534600ac64fc7"},
+ {file = "black-22.10.0-cp310-cp310-win_amd64.whl", hash = "sha256:819dc789f4498ecc91438a7de64427c73b45035e2e3680c92e18795a839ebb66"},
+ {file = "black-22.10.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:5b9b29da4f564ba8787c119f37d174f2b69cdfdf9015b7d8c5c16121ddc054ae"},
+ {file = "black-22.10.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b8b49776299fece66bffaafe357d929ca9451450f5466e997a7285ab0fe28e3b"},
+ {file = "black-22.10.0-cp311-cp311-win_amd64.whl", hash = "sha256:21199526696b8f09c3997e2b4db8d0b108d801a348414264d2eb8eb2532e540d"},
+ {file = "black-22.10.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1e464456d24e23d11fced2bc8c47ef66d471f845c7b7a42f3bd77bf3d1789650"},
+ {file = "black-22.10.0-cp37-cp37m-win_amd64.whl", hash = "sha256:9311e99228ae10023300ecac05be5a296f60d2fd10fff31cf5c1fa4ca4b1988d"},
+ {file = "black-22.10.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:fba8a281e570adafb79f7755ac8721b6cf1bbf691186a287e990c7929c7692ff"},
+ {file = "black-22.10.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:915ace4ff03fdfff953962fa672d44be269deb2eaf88499a0f8805221bc68c87"},
+ {file = "black-22.10.0-cp38-cp38-win_amd64.whl", hash = "sha256:444ebfb4e441254e87bad00c661fe32df9969b2bf224373a448d8aca2132b395"},
+ {file = "black-22.10.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:974308c58d057a651d182208a484ce80a26dac0caef2895836a92dd6ebd725e0"},
+ {file = "black-22.10.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:72ef3925f30e12a184889aac03d77d031056860ccae8a1e519f6cbb742736383"},
+ {file = "black-22.10.0-cp39-cp39-win_amd64.whl", hash = "sha256:432247333090c8c5366e69627ccb363bc58514ae3e63f7fc75c54b1ea80fa7de"},
+ {file = "black-22.10.0-py3-none-any.whl", hash = "sha256:c957b2b4ea88587b46cf49d1dc17681c1e672864fd7af32fc1e9664d572b3458"},
+ {file = "black-22.10.0.tar.gz", hash = "sha256:f513588da599943e0cde4e32cc9879e825d58720d6557062d1098c5ad80080e1"},
]
bleach = [
{file = "bleach-5.0.1-py3-none-any.whl", hash = "sha256:085f7f33c15bd408dd9b17a4ad77c577db66d76203e5984b1bd59baeee948b2a"},
{file = "bleach-5.0.1.tar.gz", hash = "sha256:0d03255c47eb9bd2f26aa9bb7f2107732e7e8fe195ca2f64709fcf3b0a4a085c"},
]
canonicaljson = [
- {file = "canonicaljson-1.6.3-py3-none-any.whl", hash = "sha256:6ba3cf1702fa3d209b3e915a4e9a3e4ef194f1e8fca189c1f0b7a2a7686a27e6"},
- {file = "canonicaljson-1.6.3.tar.gz", hash = "sha256:ca59760bc274a899a0da75809d6909ae43e5123381fd6ef040a44d1952c0b448"},
+ {file = "canonicaljson-1.6.4-py3-none-any.whl", hash = "sha256:55d282853b4245dbcd953fe54c39b91571813d7c44e1dbf66e3c4f97ff134a48"},
+ {file = "canonicaljson-1.6.4.tar.gz", hash = "sha256:6c09b2119511f30eb1126cfcd973a10824e20f1cfd25039cde3d1218dd9c8d8f"},
]
certifi = [
{file = "certifi-2021.10.8-py2.py3-none-any.whl", hash = "sha256:d62a0163eb4c2344ac042ab2bdf75399a71a2d8c7d47eac2e2ee91b9d6339569"},
@@ -1838,26 +1841,32 @@ constantly = [
{file = "constantly-15.1.0.tar.gz", hash = "sha256:586372eb92059873e29eba4f9dec8381541b4d3834660707faf8ba59146dfc35"},
]
cryptography = [
- {file = "cryptography-36.0.1-cp36-abi3-macosx_10_10_universal2.whl", hash = "sha256:73bc2d3f2444bcfeac67dd130ff2ea598ea5f20b40e36d19821b4df8c9c5037b"},
- {file = "cryptography-36.0.1-cp36-abi3-macosx_10_10_x86_64.whl", hash = "sha256:2d87cdcb378d3cfed944dac30596da1968f88fb96d7fc34fdae30a99054b2e31"},
- {file = "cryptography-36.0.1-cp36-abi3-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:74d6c7e80609c0f4c2434b97b80c7f8fdfaa072ca4baab7e239a15d6d70ed73a"},
- {file = "cryptography-36.0.1-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:6c0c021f35b421ebf5976abf2daacc47e235f8b6082d3396a2fe3ccd537ab173"},
- {file = "cryptography-36.0.1-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5d59a9d55027a8b88fd9fd2826c4392bd487d74bf628bb9d39beecc62a644c12"},
- {file = "cryptography-36.0.1-cp36-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0a817b961b46894c5ca8a66b599c745b9a3d9f822725221f0e0fe49dc043a3a3"},
- {file = "cryptography-36.0.1-cp36-abi3-manylinux_2_24_x86_64.whl", hash = "sha256:94ae132f0e40fe48f310bba63f477f14a43116f05ddb69d6fa31e93f05848ae2"},
- {file = "cryptography-36.0.1-cp36-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:7be0eec337359c155df191d6ae00a5e8bbb63933883f4f5dffc439dac5348c3f"},
- {file = "cryptography-36.0.1-cp36-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:e0344c14c9cb89e76eb6a060e67980c9e35b3f36691e15e1b7a9e58a0a6c6dc3"},
- {file = "cryptography-36.0.1-cp36-abi3-win32.whl", hash = "sha256:4caa4b893d8fad33cf1964d3e51842cd78ba87401ab1d2e44556826df849a8ca"},
- {file = "cryptography-36.0.1-cp36-abi3-win_amd64.whl", hash = "sha256:391432971a66cfaf94b21c24ab465a4cc3e8bf4a939c1ca5c3e3a6e0abebdbcf"},
- {file = "cryptography-36.0.1-pp37-pypy37_pp73-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:bb5829d027ff82aa872d76158919045a7c1e91fbf241aec32cb07956e9ebd3c9"},
- {file = "cryptography-36.0.1-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ebc15b1c22e55c4d5566e3ca4db8689470a0ca2babef8e3a9ee057a8b82ce4b1"},
- {file = "cryptography-36.0.1-pp37-pypy37_pp73-manylinux_2_24_x86_64.whl", hash = "sha256:596f3cd67e1b950bc372c33f1a28a0692080625592ea6392987dba7f09f17a94"},
- {file = "cryptography-36.0.1-pp38-pypy38_pp73-macosx_10_10_x86_64.whl", hash = "sha256:30ee1eb3ebe1644d1c3f183d115a8c04e4e603ed6ce8e394ed39eea4a98469ac"},
- {file = "cryptography-36.0.1-pp38-pypy38_pp73-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:ec63da4e7e4a5f924b90af42eddf20b698a70e58d86a72d943857c4c6045b3ee"},
- {file = "cryptography-36.0.1-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ca238ceb7ba0bdf6ce88c1b74a87bffcee5afbfa1e41e173b1ceb095b39add46"},
- {file = "cryptography-36.0.1-pp38-pypy38_pp73-manylinux_2_24_x86_64.whl", hash = "sha256:ca28641954f767f9822c24e927ad894d45d5a1e501767599647259cbf030b903"},
- {file = "cryptography-36.0.1-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:39bdf8e70eee6b1c7b289ec6e5d84d49a6bfa11f8b8646b5b3dfe41219153316"},
- {file = "cryptography-36.0.1.tar.gz", hash = "sha256:53e5c1dc3d7a953de055d77bef2ff607ceef7a2aac0353b5d630ab67f7423638"},
+ {file = "cryptography-38.0.3-cp36-abi3-macosx_10_10_universal2.whl", hash = "sha256:984fe150f350a3c91e84de405fe49e688aa6092b3525f407a18b9646f6612320"},
+ {file = "cryptography-38.0.3-cp36-abi3-macosx_10_10_x86_64.whl", hash = "sha256:ed7b00096790213e09eb11c97cc6e2b757f15f3d2f85833cd2d3ec3fe37c1722"},
+ {file = "cryptography-38.0.3-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:bbf203f1a814007ce24bd4d51362991d5cb90ba0c177a9c08825f2cc304d871f"},
+ {file = "cryptography-38.0.3-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:554bec92ee7d1e9d10ded2f7e92a5d70c1f74ba9524947c0ba0c850c7b011828"},
+ {file = "cryptography-38.0.3-cp36-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b1b52c9e5f8aa2b802d48bd693190341fae201ea51c7a167d69fc48b60e8a959"},
+ {file = "cryptography-38.0.3-cp36-abi3-manylinux_2_24_x86_64.whl", hash = "sha256:728f2694fa743a996d7784a6194da430f197d5c58e2f4e278612b359f455e4a2"},
+ {file = "cryptography-38.0.3-cp36-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:dfb4f4dd568de1b6af9f4cda334adf7d72cf5bc052516e1b2608b683375dd95c"},
+ {file = "cryptography-38.0.3-cp36-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:5419a127426084933076132d317911e3c6eb77568a1ce23c3ac1e12d111e61e0"},
+ {file = "cryptography-38.0.3-cp36-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:9b24bcff7853ed18a63cfb0c2b008936a9554af24af2fb146e16d8e1aed75748"},
+ {file = "cryptography-38.0.3-cp36-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:25c1d1f19729fb09d42e06b4bf9895212292cb27bb50229f5aa64d039ab29146"},
+ {file = "cryptography-38.0.3-cp36-abi3-win32.whl", hash = "sha256:7f836217000342d448e1c9a342e9163149e45d5b5eca76a30e84503a5a96cab0"},
+ {file = "cryptography-38.0.3-cp36-abi3-win_amd64.whl", hash = "sha256:c46837ea467ed1efea562bbeb543994c2d1f6e800785bd5a2c98bc096f5cb220"},
+ {file = "cryptography-38.0.3-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:06fc3cc7b6f6cca87bd56ec80a580c88f1da5306f505876a71c8cfa7050257dd"},
+ {file = "cryptography-38.0.3-pp37-pypy37_pp73-manylinux_2_24_x86_64.whl", hash = "sha256:65535bc550b70bd6271984d9863a37741352b4aad6fb1b3344a54e6950249b55"},
+ {file = "cryptography-38.0.3-pp37-pypy37_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:5e89468fbd2fcd733b5899333bc54d0d06c80e04cd23d8c6f3e0542358c6060b"},
+ {file = "cryptography-38.0.3-pp38-pypy38_pp73-macosx_10_10_x86_64.whl", hash = "sha256:6ab9516b85bebe7aa83f309bacc5f44a61eeb90d0b4ec125d2d003ce41932d36"},
+ {file = "cryptography-38.0.3-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:068147f32fa662c81aebab95c74679b401b12b57494872886eb5c1139250ec5d"},
+ {file = "cryptography-38.0.3-pp38-pypy38_pp73-manylinux_2_24_x86_64.whl", hash = "sha256:402852a0aea73833d982cabb6d0c3bb582c15483d29fb7085ef2c42bfa7e38d7"},
+ {file = "cryptography-38.0.3-pp38-pypy38_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:b1b35d9d3a65542ed2e9d90115dfd16bbc027b3f07ee3304fc83580f26e43249"},
+ {file = "cryptography-38.0.3-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:6addc3b6d593cd980989261dc1cce38263c76954d758c3c94de51f1e010c9a50"},
+ {file = "cryptography-38.0.3-pp39-pypy39_pp73-macosx_10_10_x86_64.whl", hash = "sha256:be243c7e2bfcf6cc4cb350c0d5cdf15ca6383bbcb2a8ef51d3c9411a9d4386f0"},
+ {file = "cryptography-38.0.3-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:78cf5eefac2b52c10398a42765bfa981ce2372cbc0457e6bf9658f41ec3c41d8"},
+ {file = "cryptography-38.0.3-pp39-pypy39_pp73-manylinux_2_24_x86_64.whl", hash = "sha256:4e269dcd9b102c5a3d72be3c45d8ce20377b8076a43cbed6f660a1afe365e436"},
+ {file = "cryptography-38.0.3-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:8d41a46251bf0634e21fac50ffd643216ccecfaf3701a063257fe0b2be1b6548"},
+ {file = "cryptography-38.0.3-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:785e4056b5a8b28f05a533fab69febf5004458e20dad7e2e13a3120d8ecec75a"},
+ {file = "cryptography-38.0.3.tar.gz", hash = "sha256:bfbe6ee19615b07a98b1d2287d6a6073f734735b49ee45b11324d85efc4d5cbd"},
]
defusedxml = [
{file = "defusedxml-0.7.1-py2.py3-none-any.whl", hash = "sha256:a352e7e428770286cc899e2542b6cdaedb2b4953ff269a210103ec58f6198a61"},
@@ -1876,16 +1885,16 @@ elementpath = [
{file = "elementpath-2.5.0.tar.gz", hash = "sha256:3a27aaf3399929fccda013899cb76d3ff111734abf4281e5f9d3721ba0b9ffa3"},
]
flake8 = [
- {file = "flake8-4.0.1-py2.py3-none-any.whl", hash = "sha256:479b1304f72536a55948cb40a32dce8bb0ffe3501e26eaf292c7e60eb5e0428d"},
- {file = "flake8-4.0.1.tar.gz", hash = "sha256:806e034dda44114815e23c16ef92f95c91e4c71100ff52813adf7132a6ad870d"},
+ {file = "flake8-5.0.4-py2.py3-none-any.whl", hash = "sha256:7a1cf6b73744f5806ab95e526f6f0d8c01c66d7bbe349562d22dfca20610b248"},
+ {file = "flake8-5.0.4.tar.gz", hash = "sha256:6fbe320aad8d6b95cec8b8e47bc933004678dc63095be98528b7bdd2a9f510db"},
]
flake8-bugbear = [
- {file = "flake8-bugbear-22.9.23.tar.gz", hash = "sha256:17b9623325e6e0dcdcc80ed9e4aa811287fcc81d7e03313b8736ea5733759937"},
- {file = "flake8_bugbear-22.9.23-py3-none-any.whl", hash = "sha256:cd2779b2b7ada212d7a322814a1e5651f1868ab0d3f24cc9da66169ab8fda474"},
+ {file = "flake8-bugbear-22.10.27.tar.gz", hash = "sha256:a6708608965c9e0de5fff13904fed82e0ba21ac929fe4896459226a797e11cd5"},
+ {file = "flake8_bugbear-22.10.27-py3-none-any.whl", hash = "sha256:6ad0ab754507319060695e2f2be80e6d8977cfcea082293089a9226276bd825d"},
]
flake8-comprehensions = [
- {file = "flake8-comprehensions-3.8.0.tar.gz", hash = "sha256:8e108707637b1d13734f38e03435984f6b7854fa6b5a4e34f93e69534be8e521"},
- {file = "flake8_comprehensions-3.8.0-py3-none-any.whl", hash = "sha256:9406314803abe1193c064544ab14fdc43c58424c0882f6ff8a581eb73fc9bb58"},
+ {file = "flake8-comprehensions-3.10.1.tar.gz", hash = "sha256:412052ac4a947f36b891143430fef4859705af11b2572fbb689f90d372cf26ab"},
+ {file = "flake8_comprehensions-3.10.1-py3-none-any.whl", hash = "sha256:d763de3c74bc18a79c039a7ec732e0a1985b0c79309ceb51e56401ad0a2cd44e"},
]
frozendict = [
{file = "frozendict-2.3.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:4a3b32d47282ae0098b9239a6d53ec539da720258bd762d62191b46f2f87c5fc"},
@@ -1911,8 +1920,8 @@ gitdb = [
{file = "gitdb-4.0.9.tar.gz", hash = "sha256:bac2fd45c0a1c9cf619e63a90d62bdc63892ef92387424b855792a6cabe789aa"},
]
gitpython = [
- {file = "GitPython-3.1.27-py3-none-any.whl", hash = "sha256:5b68b000463593e05ff2b261acff0ff0972df8ab1b70d3cdbd41b546c8b8fc3d"},
- {file = "GitPython-3.1.27.tar.gz", hash = "sha256:1c885ce809e8ba2d88a29befeb385fcea06338d3640712b59ca623c220bb5704"},
+ {file = "GitPython-3.1.29-py3-none-any.whl", hash = "sha256:41eea0deec2deea139b459ac03656f0dd28fc4a3387240ec1d3c259a2c47850f"},
+ {file = "GitPython-3.1.29.tar.gz", hash = "sha256:cc36bfc4a3f913e66805a28e84703e419d9c264c1077e537b54f0e1af85dbefd"},
]
googleapis-common-protos = [
{file = "googleapis-common-protos-1.56.2.tar.gz", hash = "sha256:b09b56f5463070c2153753ef123f07d2e49235e89148e9b2459ec8ed2f68d7d3"},
@@ -2101,12 +2110,12 @@ jeepney = [
{file = "jeepney-0.7.1.tar.gz", hash = "sha256:fa9e232dfa0c498bd0b8a3a73b8d8a31978304dcef0515adc859d4e096f96f4f"},
]
jinja2 = [
- {file = "Jinja2-3.0.3-py3-none-any.whl", hash = "sha256:077ce6014f7b40d03b47d1f1ca4b0fc8328a692bd284016f806ed0eaca390ad8"},
- {file = "Jinja2-3.0.3.tar.gz", hash = "sha256:611bb273cd68f3b993fabdc4064fc858c5b47a973cb5aa7999ec1ba405c87cd7"},
+ {file = "Jinja2-3.1.2-py3-none-any.whl", hash = "sha256:6088930bfe239f0e6710546ab9c19c9ef35e29792895fed6e6e31a023a182a61"},
+ {file = "Jinja2-3.1.2.tar.gz", hash = "sha256:31351a702a408a9e7595a8fc6150fc3f43bb6bf7e319770cbc0db9df9437e852"},
]
jsonschema = [
- {file = "jsonschema-4.16.0-py3-none-any.whl", hash = "sha256:9e74b8f9738d6a946d70705dc692b74b5429cd0960d58e79ffecfc43b2221eb9"},
- {file = "jsonschema-4.16.0.tar.gz", hash = "sha256:165059f076eff6971bae5b742fc029a7b4ef3f9bcf04c14e4776a7605de14b23"},
+ {file = "jsonschema-4.17.0-py3-none-any.whl", hash = "sha256:f660066c3966db7d6daeaea8a75e0b68237a48e51cf49882087757bb59916248"},
+ {file = "jsonschema-4.17.0.tar.gz", hash = "sha256:5bfcf2bca16a087ade17e02b282d34af7ccd749ef76241e7f9bd7c0cb8a9424d"},
]
keyring = [
{file = "keyring-23.5.0-py3-none-any.whl", hash = "sha256:b0d28928ac3ec8e42ef4cc227822647a19f1d544f21f96457965dc01cf555261"},
@@ -2242,8 +2251,8 @@ matrix-synapse-ldap3 = [
{file = "matrix_synapse_ldap3-0.2.2-py3-none-any.whl", hash = "sha256:66ee4c85d7952c6c27fd04c09cdfdf4847b8e8b7d6a7ada6ba1100013bda060f"},
]
mccabe = [
- {file = "mccabe-0.6.1-py2.py3-none-any.whl", hash = "sha256:ab8a6258860da4b6677da4bd2fe5dc2c659cff31b3ee4f7f5d64e79735b80d42"},
- {file = "mccabe-0.6.1.tar.gz", hash = "sha256:dd8d182285a0fe56bace7f45b5e7d1a6ebcbf524e8f3bd87eb0f125271b8831f"},
+ {file = "mccabe-0.7.0-py2.py3-none-any.whl", hash = "sha256:6c2d30ab6be0e4a46919781807b4f0d834ebdd6c6e3dca0bda5a15f863427b6e"},
+ {file = "mccabe-0.7.0.tar.gz", hash = "sha256:348e0240c33b60bbdf4e523192ef919f28cb2c3d7d5c7794f74009290f236325"},
]
msgpack = [
{file = "msgpack-1.0.4-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:4ab251d229d10498e9a2f3b1e68ef64cb393394ec477e3370c457f9430ce9250"},
@@ -2378,41 +2387,67 @@ phonenumbers = [
{file = "phonenumbers-8.12.56.tar.gz", hash = "sha256:82a4f226c930d02dcdf6d4b29e4cfd8678991fe65c2efd5fdd143557186f0868"},
]
pillow = [
- {file = "Pillow-9.0.1-1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a5d24e1d674dd9d72c66ad3ea9131322819ff86250b30dc5821cbafcfa0b96b4"},
- {file = "Pillow-9.0.1-1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:2632d0f846b7c7600edf53c48f8f9f1e13e62f66a6dbc15191029d950bfed976"},
- {file = "Pillow-9.0.1-1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:b9618823bd237c0d2575283f2939655f54d51b4527ec3972907a927acbcc5bfc"},
- {file = "Pillow-9.0.1-cp310-cp310-macosx_10_10_universal2.whl", hash = "sha256:9bfdb82cdfeccec50aad441afc332faf8606dfa5e8efd18a6692b5d6e79f00fd"},
- {file = "Pillow-9.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:5100b45a4638e3c00e4d2320d3193bdabb2d75e79793af7c3eb139e4f569f16f"},
- {file = "Pillow-9.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:528a2a692c65dd5cafc130de286030af251d2ee0483a5bf50c9348aefe834e8a"},
- {file = "Pillow-9.0.1-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0f29d831e2151e0b7b39981756d201f7108d3d215896212ffe2e992d06bfe049"},
- {file = "Pillow-9.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:855c583f268edde09474b081e3ddcd5cf3b20c12f26e0d434e1386cc5d318e7a"},
- {file = "Pillow-9.0.1-cp310-cp310-win32.whl", hash = "sha256:d9d7942b624b04b895cb95af03a23407f17646815495ce4547f0e60e0b06f58e"},
- {file = "Pillow-9.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:81c4b81611e3a3cb30e59b0cf05b888c675f97e3adb2c8672c3154047980726b"},
- {file = "Pillow-9.0.1-cp37-cp37m-macosx_10_10_x86_64.whl", hash = "sha256:413ce0bbf9fc6278b2d63309dfeefe452835e1c78398efb431bab0672fe9274e"},
- {file = "Pillow-9.0.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:80fe64a6deb6fcfdf7b8386f2cf216d329be6f2781f7d90304351811fb591360"},
- {file = "Pillow-9.0.1-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cef9c85ccbe9bee00909758936ea841ef12035296c748aaceee535969e27d31b"},
- {file = "Pillow-9.0.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1d19397351f73a88904ad1aee421e800fe4bbcd1aeee6435fb62d0a05ccd1030"},
- {file = "Pillow-9.0.1-cp37-cp37m-win32.whl", hash = "sha256:d21237d0cd37acded35154e29aec853e945950321dd2ffd1a7d86fe686814669"},
- {file = "Pillow-9.0.1-cp37-cp37m-win_amd64.whl", hash = "sha256:ede5af4a2702444a832a800b8eb7f0a7a1c0eed55b644642e049c98d589e5092"},
- {file = "Pillow-9.0.1-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:b5b3f092fe345c03bca1e0b687dfbb39364b21ebb8ba90e3fa707374b7915204"},
- {file = "Pillow-9.0.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:335ace1a22325395c4ea88e00ba3dc89ca029bd66bd5a3c382d53e44f0ccd77e"},
- {file = "Pillow-9.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:db6d9fac65bd08cea7f3540b899977c6dee9edad959fa4eaf305940d9cbd861c"},
- {file = "Pillow-9.0.1-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f154d173286a5d1863637a7dcd8c3437bb557520b01bddb0be0258dcb72696b5"},
- {file = "Pillow-9.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:14d4b1341ac07ae07eb2cc682f459bec932a380c3b122f5540432d8977e64eae"},
- {file = "Pillow-9.0.1-cp38-cp38-win32.whl", hash = "sha256:effb7749713d5317478bb3acb3f81d9d7c7f86726d41c1facca068a04cf5bb4c"},
- {file = "Pillow-9.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:7f7609a718b177bf171ac93cea9fd2ddc0e03e84d8fa4e887bdfc39671d46b00"},
- {file = "Pillow-9.0.1-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:80ca33961ced9c63358056bd08403ff866512038883e74f3a4bf88ad3eb66838"},
- {file = "Pillow-9.0.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:1c3c33ac69cf059bbb9d1a71eeaba76781b450bc307e2291f8a4764d779a6b28"},
- {file = "Pillow-9.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:12875d118f21cf35604176872447cdb57b07126750a33748bac15e77f90f1f9c"},
- {file = "Pillow-9.0.1-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:514ceac913076feefbeaf89771fd6febde78b0c4c1b23aaeab082c41c694e81b"},
- {file = "Pillow-9.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d3c5c79ab7dfce6d88f1ba639b77e77a17ea33a01b07b99840d6ed08031cb2a7"},
- {file = "Pillow-9.0.1-cp39-cp39-win32.whl", hash = "sha256:718856856ba31f14f13ba885ff13874be7fefc53984d2832458f12c38205f7f7"},
- {file = "Pillow-9.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:f25ed6e28ddf50de7e7ea99d7a976d6a9c415f03adcaac9c41ff6ff41b6d86ac"},
- {file = "Pillow-9.0.1-pp37-pypy37_pp73-macosx_10_10_x86_64.whl", hash = "sha256:011233e0c42a4a7836498e98c1acf5e744c96a67dd5032a6f666cc1fb97eab97"},
- {file = "Pillow-9.0.1-pp37-pypy37_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:253e8a302a96df6927310a9d44e6103055e8fb96a6822f8b7f514bb7ef77de56"},
- {file = "Pillow-9.0.1-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6295f6763749b89c994fcb6d8a7f7ce03c3992e695f89f00b741b4580b199b7e"},
- {file = "Pillow-9.0.1-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:a9f44cd7e162ac6191491d7249cceb02b8116b0f7e847ee33f739d7cb1ea1f70"},
- {file = "Pillow-9.0.1.tar.gz", hash = "sha256:6c8bc8238a7dfdaf7a75f5ec5a663f4173f8c367e5a39f87e720495e1eed75fa"},
+ {file = "Pillow-9.3.0-1-cp37-cp37m-win32.whl", hash = "sha256:e6ea6b856a74d560d9326c0f5895ef8050126acfdc7ca08ad703eb0081e82b74"},
+ {file = "Pillow-9.3.0-1-cp37-cp37m-win_amd64.whl", hash = "sha256:32a44128c4bdca7f31de5be641187367fe2a450ad83b833ef78910397db491aa"},
+ {file = "Pillow-9.3.0-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:0b7257127d646ff8676ec8a15520013a698d1fdc48bc2a79ba4e53df792526f2"},
+ {file = "Pillow-9.3.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:b90f7616ea170e92820775ed47e136208e04c967271c9ef615b6fbd08d9af0e3"},
+ {file = "Pillow-9.3.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:68943d632f1f9e3dce98908e873b3a090f6cba1cbb1b892a9e8d97c938871fbe"},
+ {file = "Pillow-9.3.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:be55f8457cd1eac957af0c3f5ece7bc3f033f89b114ef30f710882717670b2a8"},
+ {file = "Pillow-9.3.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5d77adcd56a42d00cc1be30843d3426aa4e660cab4a61021dc84467123f7a00c"},
+ {file = "Pillow-9.3.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:829f97c8e258593b9daa80638aee3789b7df9da5cf1336035016d76f03b8860c"},
+ {file = "Pillow-9.3.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:801ec82e4188e935c7f5e22e006d01611d6b41661bba9fe45b60e7ac1a8f84de"},
+ {file = "Pillow-9.3.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:871b72c3643e516db4ecf20efe735deb27fe30ca17800e661d769faab45a18d7"},
+ {file = "Pillow-9.3.0-cp310-cp310-win32.whl", hash = "sha256:655a83b0058ba47c7c52e4e2df5ecf484c1b0b0349805896dd350cbc416bdd91"},
+ {file = "Pillow-9.3.0-cp310-cp310-win_amd64.whl", hash = "sha256:9f47eabcd2ded7698106b05c2c338672d16a6f2a485e74481f524e2a23c2794b"},
+ {file = "Pillow-9.3.0-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:57751894f6618fd4308ed8e0c36c333e2f5469744c34729a27532b3db106ee20"},
+ {file = "Pillow-9.3.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:7db8b751ad307d7cf238f02101e8e36a128a6cb199326e867d1398067381bff4"},
+ {file = "Pillow-9.3.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3033fbe1feb1b59394615a1cafaee85e49d01b51d54de0cbf6aa8e64182518a1"},
+ {file = "Pillow-9.3.0-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:22b012ea2d065fd163ca096f4e37e47cd8b59cf4b0fd47bfca6abb93df70b34c"},
+ {file = "Pillow-9.3.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b9a65733d103311331875c1dca05cb4606997fd33d6acfed695b1232ba1df193"},
+ {file = "Pillow-9.3.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:502526a2cbfa431d9fc2a079bdd9061a2397b842bb6bc4239bb176da00993812"},
+ {file = "Pillow-9.3.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:90fb88843d3902fe7c9586d439d1e8c05258f41da473952aa8b328d8b907498c"},
+ {file = "Pillow-9.3.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:89dca0ce00a2b49024df6325925555d406b14aa3efc2f752dbb5940c52c56b11"},
+ {file = "Pillow-9.3.0-cp311-cp311-win32.whl", hash = "sha256:3168434d303babf495d4ba58fc22d6604f6e2afb97adc6a423e917dab828939c"},
+ {file = "Pillow-9.3.0-cp311-cp311-win_amd64.whl", hash = "sha256:18498994b29e1cf86d505edcb7edbe814d133d2232d256db8c7a8ceb34d18cef"},
+ {file = "Pillow-9.3.0-cp37-cp37m-macosx_10_10_x86_64.whl", hash = "sha256:772a91fc0e03eaf922c63badeca75e91baa80fe2f5f87bdaed4280662aad25c9"},
+ {file = "Pillow-9.3.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:afa4107d1b306cdf8953edde0534562607fe8811b6c4d9a486298ad31de733b2"},
+ {file = "Pillow-9.3.0-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b4012d06c846dc2b80651b120e2cdd787b013deb39c09f407727ba90015c684f"},
+ {file = "Pillow-9.3.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:77ec3e7be99629898c9a6d24a09de089fa5356ee408cdffffe62d67bb75fdd72"},
+ {file = "Pillow-9.3.0-cp37-cp37m-manylinux_2_28_aarch64.whl", hash = "sha256:6c738585d7a9961d8c2821a1eb3dcb978d14e238be3d70f0a706f7fa9316946b"},
+ {file = "Pillow-9.3.0-cp37-cp37m-manylinux_2_28_x86_64.whl", hash = "sha256:828989c45c245518065a110434246c44a56a8b2b2f6347d1409c787e6e4651ee"},
+ {file = "Pillow-9.3.0-cp37-cp37m-win32.whl", hash = "sha256:82409ffe29d70fd733ff3c1025a602abb3e67405d41b9403b00b01debc4c9a29"},
+ {file = "Pillow-9.3.0-cp37-cp37m-win_amd64.whl", hash = "sha256:41e0051336807468be450d52b8edd12ac60bebaa97fe10c8b660f116e50b30e4"},
+ {file = "Pillow-9.3.0-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:b03ae6f1a1878233ac620c98f3459f79fd77c7e3c2b20d460284e1fb370557d4"},
+ {file = "Pillow-9.3.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:4390e9ce199fc1951fcfa65795f239a8a4944117b5935a9317fb320e7767b40f"},
+ {file = "Pillow-9.3.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:40e1ce476a7804b0fb74bcfa80b0a2206ea6a882938eaba917f7a0f004b42502"},
+ {file = "Pillow-9.3.0-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a0a06a052c5f37b4ed81c613a455a81f9a3a69429b4fd7bb913c3fa98abefc20"},
+ {file = "Pillow-9.3.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:03150abd92771742d4a8cd6f2fa6246d847dcd2e332a18d0c15cc75bf6703040"},
+ {file = "Pillow-9.3.0-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:15c42fb9dea42465dfd902fb0ecf584b8848ceb28b41ee2b58f866411be33f07"},
+ {file = "Pillow-9.3.0-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:51e0e543a33ed92db9f5ef69a0356e0b1a7a6b6a71b80df99f1d181ae5875636"},
+ {file = "Pillow-9.3.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:3dd6caf940756101205dffc5367babf288a30043d35f80936f9bfb37f8355b32"},
+ {file = "Pillow-9.3.0-cp38-cp38-win32.whl", hash = "sha256:f1ff2ee69f10f13a9596480335f406dd1f70c3650349e2be67ca3139280cade0"},
+ {file = "Pillow-9.3.0-cp38-cp38-win_amd64.whl", hash = "sha256:276a5ca930c913f714e372b2591a22c4bd3b81a418c0f6635ba832daec1cbcfc"},
+ {file = "Pillow-9.3.0-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:73bd195e43f3fadecfc50c682f5055ec32ee2c933243cafbfdec69ab1aa87cad"},
+ {file = "Pillow-9.3.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:1c7c8ae3864846fc95f4611c78129301e203aaa2af813b703c55d10cc1628535"},
+ {file = "Pillow-9.3.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2e0918e03aa0c72ea56edbb00d4d664294815aa11291a11504a377ea018330d3"},
+ {file = "Pillow-9.3.0-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b0915e734b33a474d76c28e07292f196cdf2a590a0d25bcc06e64e545f2d146c"},
+ {file = "Pillow-9.3.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:af0372acb5d3598f36ec0914deed2a63f6bcdb7b606da04dc19a88d31bf0c05b"},
+ {file = "Pillow-9.3.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:ad58d27a5b0262c0c19b47d54c5802db9b34d38bbf886665b626aff83c74bacd"},
+ {file = "Pillow-9.3.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:97aabc5c50312afa5e0a2b07c17d4ac5e865b250986f8afe2b02d772567a380c"},
+ {file = "Pillow-9.3.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:9aaa107275d8527e9d6e7670b64aabaaa36e5b6bd71a1015ddd21da0d4e06448"},
+ {file = "Pillow-9.3.0-cp39-cp39-win32.whl", hash = "sha256:bac18ab8d2d1e6b4ce25e3424f709aceef668347db8637c2296bcf41acb7cf48"},
+ {file = "Pillow-9.3.0-cp39-cp39-win_amd64.whl", hash = "sha256:b472b5ea442148d1c3e2209f20f1e0bb0eb556538690fa70b5e1f79fa0ba8dc2"},
+ {file = "Pillow-9.3.0-pp37-pypy37_pp73-macosx_10_10_x86_64.whl", hash = "sha256:ab388aaa3f6ce52ac1cb8e122c4bd46657c15905904b3120a6248b5b8b0bc228"},
+ {file = "Pillow-9.3.0-pp37-pypy37_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:dbb8e7f2abee51cef77673be97760abff1674ed32847ce04b4af90f610144c7b"},
+ {file = "Pillow-9.3.0-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bca31dd6014cb8b0b2db1e46081b0ca7d936f856da3b39744aef499db5d84d02"},
+ {file = "Pillow-9.3.0-pp37-pypy37_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:c7025dce65566eb6e89f56c9509d4f628fddcedb131d9465cacd3d8bac337e7e"},
+ {file = "Pillow-9.3.0-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:ebf2029c1f464c59b8bdbe5143c79fa2045a581ac53679733d3a91d400ff9efb"},
+ {file = "Pillow-9.3.0-pp38-pypy38_pp73-macosx_10_10_x86_64.whl", hash = "sha256:b59430236b8e58840a0dfb4099a0e8717ffb779c952426a69ae435ca1f57210c"},
+ {file = "Pillow-9.3.0-pp38-pypy38_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:12ce4932caf2ddf3e41d17fc9c02d67126935a44b86df6a206cf0d7161548627"},
+ {file = "Pillow-9.3.0-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ae5331c23ce118c53b172fa64a4c037eb83c9165aba3a7ba9ddd3ec9fa64a699"},
+ {file = "Pillow-9.3.0-pp38-pypy38_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:0b07fffc13f474264c336298d1b4ce01d9c5a011415b79d4ee5527bb69ae6f65"},
+ {file = "Pillow-9.3.0-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:073adb2ae23431d3b9bcbcff3fe698b62ed47211d0716b067385538a1b0f28b8"},
+ {file = "Pillow-9.3.0.tar.gz", hash = "sha256:c935a22a557a560108d780f9a0fc426dd7459940dc54faa49d83249c8d3e760f"},
]
pkginfo = [
{file = "pkginfo-1.8.2-py2.py3-none-any.whl", hash = "sha256:c24c487c6a7f72c66e816ab1796b96ac6c3d14d49338293d2141664330b55ffc"},
@@ -2427,8 +2462,8 @@ platformdirs = [
{file = "platformdirs-2.5.1.tar.gz", hash = "sha256:7535e70dfa32e84d4b34996ea99c5e432fa29a708d0f4e394bbcb2a8faa4f16d"},
]
prometheus-client = [
- {file = "prometheus_client-0.14.0-py3-none-any.whl", hash = "sha256:f4aba3fdd1735852049f537c1f0ab177159b7ab76f271ecc4d2f45aa2a1d01f2"},
- {file = "prometheus_client-0.14.0.tar.gz", hash = "sha256:8f7a922dd5455ad524b6ba212ce8eb2b4b05e073f4ec7218287f88b1cac34750"},
+ {file = "prometheus_client-0.15.0-py3-none-any.whl", hash = "sha256:db7c05cbd13a0f79975592d112320f2605a325969b270a94b71dcabc47b931d2"},
+ {file = "prometheus_client-0.15.0.tar.gz", hash = "sha256:be26aa452490cfcf6da953f9436e95a9f2b4d578ca80094b4458930e5f584ab1"},
]
protobuf = [
{file = "protobuf-3.20.3-cp310-cp310-manylinux2014_aarch64.whl", hash = "sha256:f4bd856d702e5b0d96a00ec6b307b0f51c1982c2bf9c0052cf9019e9a544ba99"},
@@ -2455,17 +2490,19 @@ protobuf = [
{file = "protobuf-3.20.3.tar.gz", hash = "sha256:2e3427429c9cffebf259491be0af70189607f365c2f41c7c3764af6f337105f2"},
]
psycopg2 = [
- {file = "psycopg2-2.9.4-cp310-cp310-win32.whl", hash = "sha256:8de6a9fc5f42fa52f559e65120dcd7502394692490c98fed1221acf0819d7797"},
- {file = "psycopg2-2.9.4-cp310-cp310-win_amd64.whl", hash = "sha256:1da77c061bdaab450581458932ae5e469cc6e36e0d62f988376e9f513f11cb5c"},
- {file = "psycopg2-2.9.4-cp36-cp36m-win32.whl", hash = "sha256:a11946bad3557ca254f17357d5a4ed63bdca45163e7a7d2bfb8e695df069cc3a"},
- {file = "psycopg2-2.9.4-cp36-cp36m-win_amd64.whl", hash = "sha256:46361c054df612c3cc813fdb343733d56543fb93565cff0f8ace422e4da06acb"},
- {file = "psycopg2-2.9.4-cp37-cp37m-win32.whl", hash = "sha256:aafa96f2da0071d6dd0cbb7633406d99f414b40ab0f918c9d9af7df928a1accb"},
- {file = "psycopg2-2.9.4-cp37-cp37m-win_amd64.whl", hash = "sha256:aa184d551a767ad25df3b8d22a0a62ef2962e0e374c04f6cbd1204947f540d61"},
- {file = "psycopg2-2.9.4-cp38-cp38-win32.whl", hash = "sha256:839f9ea8f6098e39966d97fcb8d08548fbc57c523a1e27a1f0609addf40f777c"},
- {file = "psycopg2-2.9.4-cp38-cp38-win_amd64.whl", hash = "sha256:c7fa041b4acb913f6968fce10169105af5200f296028251d817ab37847c30184"},
- {file = "psycopg2-2.9.4-cp39-cp39-win32.whl", hash = "sha256:07b90a24d5056687781ddaef0ea172fd951f2f7293f6ffdd03d4f5077801f426"},
- {file = "psycopg2-2.9.4-cp39-cp39-win_amd64.whl", hash = "sha256:849bd868ae3369932127f0771c08d1109b254f08d48dc42493c3d1b87cb2d308"},
- {file = "psycopg2-2.9.4.tar.gz", hash = "sha256:d529926254e093a1b669f692a3aa50069bc71faf5b0ecd91686a78f62767d52f"},
+ {file = "psycopg2-2.9.5-cp310-cp310-win32.whl", hash = "sha256:d3ef67e630b0de0779c42912fe2cbae3805ebaba30cda27fea2a3de650a9414f"},
+ {file = "psycopg2-2.9.5-cp310-cp310-win_amd64.whl", hash = "sha256:4cb9936316d88bfab614666eb9e32995e794ed0f8f6b3b718666c22819c1d7ee"},
+ {file = "psycopg2-2.9.5-cp311-cp311-win32.whl", hash = "sha256:093e3894d2d3c592ab0945d9eba9d139c139664dcf83a1c440b8a7aa9bb21955"},
+ {file = "psycopg2-2.9.5-cp311-cp311-win_amd64.whl", hash = "sha256:920bf418000dd17669d2904472efeab2b20546efd0548139618f8fa305d1d7ad"},
+ {file = "psycopg2-2.9.5-cp36-cp36m-win32.whl", hash = "sha256:b9ac1b0d8ecc49e05e4e182694f418d27f3aedcfca854ebd6c05bb1cffa10d6d"},
+ {file = "psycopg2-2.9.5-cp36-cp36m-win_amd64.whl", hash = "sha256:fc04dd5189b90d825509caa510f20d1d504761e78b8dfb95a0ede180f71d50e5"},
+ {file = "psycopg2-2.9.5-cp37-cp37m-win32.whl", hash = "sha256:922cc5f0b98a5f2b1ff481f5551b95cd04580fd6f0c72d9b22e6c0145a4840e0"},
+ {file = "psycopg2-2.9.5-cp37-cp37m-win_amd64.whl", hash = "sha256:1e5a38aa85bd660c53947bd28aeaafb6a97d70423606f1ccb044a03a1203fe4a"},
+ {file = "psycopg2-2.9.5-cp38-cp38-win32.whl", hash = "sha256:f5b6320dbc3cf6cfb9f25308286f9f7ab464e65cfb105b64cc9c52831748ced2"},
+ {file = "psycopg2-2.9.5-cp38-cp38-win_amd64.whl", hash = "sha256:1a5c7d7d577e0eabfcf15eb87d1e19314c8c4f0e722a301f98e0e3a65e238b4e"},
+ {file = "psycopg2-2.9.5-cp39-cp39-win32.whl", hash = "sha256:322fd5fca0b1113677089d4ebd5222c964b1760e361f151cbb2706c4912112c5"},
+ {file = "psycopg2-2.9.5-cp39-cp39-win_amd64.whl", hash = "sha256:190d51e8c1b25a47484e52a79638a8182451d6f6dff99f26ad9bd81e5359a0fa"},
+ {file = "psycopg2-2.9.5.tar.gz", hash = "sha256:a5246d2e683a972e2187a8714b5c2cf8156c064629f9a9b1a873c1730d9e245a"},
]
psycopg2cffi = [
{file = "psycopg2cffi-2.9.0.tar.gz", hash = "sha256:7e272edcd837de3a1d12b62185eb85c45a19feda9e62fa1b120c54f9e8d35c52"},
@@ -2504,8 +2541,8 @@ pyasn1-modules = [
{file = "pyasn1_modules-0.2.8-py3.7.egg", hash = "sha256:c29a5e5cc7a3f05926aff34e097e84f8589cd790ce0ed41b67aed6857b26aafd"},
]
pycodestyle = [
- {file = "pycodestyle-2.8.0-py2.py3-none-any.whl", hash = "sha256:720f8b39dde8b293825e7ff02c475f3077124006db4f440dcbc9a20b76548a20"},
- {file = "pycodestyle-2.8.0.tar.gz", hash = "sha256:eddd5847ef438ea1c7870ca7eb78a9d47ce0cdb4851a5523949f2601d0cbbe7f"},
+ {file = "pycodestyle-2.9.1-py2.py3-none-any.whl", hash = "sha256:d1735fc58b418fd7c5f658d28d943854f8a849b01a5d0a1e6f3f3fdd0166804b"},
+ {file = "pycodestyle-2.9.1.tar.gz", hash = "sha256:2c9607871d58c76354b697b42f5d57e1ada7d261c261efac224b664affdc5785"},
]
pycparser = [
{file = "pycparser-2.21-py2.py3-none-any.whl", hash = "sha256:8ee45429555515e1f6b185e78100aea234072576aa43ab53aefcae078162fca9"},
@@ -2550,8 +2587,8 @@ pydantic = [
{file = "pydantic-1.10.2.tar.gz", hash = "sha256:91b8e218852ef6007c2b98cd861601c6a09f1aa32bbbb74fab5b1c33d4a1e410"},
]
pyflakes = [
- {file = "pyflakes-2.4.0-py2.py3-none-any.whl", hash = "sha256:3bb3a3f256f4b7968c9c788781e4ff07dce46bdf12339dcda61053375426ee2e"},
- {file = "pyflakes-2.4.0.tar.gz", hash = "sha256:05a85c2872edf37a4ed30b0cce2f6093e1d0581f8c19d7393122da7e25b2b24c"},
+ {file = "pyflakes-2.5.0-py2.py3-none-any.whl", hash = "sha256:4579f67d887f804e67edb544428f264b7b24f435b263c4614f384135cea553d2"},
+ {file = "pyflakes-2.5.0.tar.gz", hash = "sha256:491feb020dca48ccc562a8c0cbe8df07ee13078df59813b83959cbdada312ea3"},
]
pygithub = [
{file = "PyGithub-1.56-py3-none-any.whl", hash = "sha256:d15f13d82165306da8a68aefc0f848a6f6432d5febbff13b60a94758ce3ef8b5"},
@@ -2617,8 +2654,8 @@ pyrsistent = [
{file = "pyrsistent-0.18.1.tar.gz", hash = "sha256:d4d61f8b993a7255ba714df3aca52700f8125289f84f704cf80916517c46eb96"},
]
pysaml2 = [
- {file = "pysaml2-7.1.2-py2.py3-none-any.whl", hash = "sha256:d915961aaa4d4d97d952b30fe5d18d64cf053465acf3e38d8090b36c5ff08325"},
- {file = "pysaml2-7.1.2.tar.gz", hash = "sha256:1ec94442306511b93fe7a5710f224e05e0aba948682d506614d1e04f3232f827"},
+ {file = "pysaml2-7.2.1-py2.py3-none-any.whl", hash = "sha256:2ca155f4eeb1471b247a7b0cc79ccfd5780046d33d0b201e1199a00698dce795"},
+ {file = "pysaml2-7.2.1.tar.gz", hash = "sha256:f40f9576dce9afef156469179277ffeeca36829248be333252af0517a26d0b1f"},
]
python-dateutil = [
{file = "python-dateutil-2.8.2.tar.gz", hash = "sha256:0123cacc1627ae19ddf3c27a5de5bd67ee4586fbdd6440d9748f8abb483d3e86"},
@@ -2668,8 +2705,8 @@ pyyaml = [
{file = "PyYAML-6.0.tar.gz", hash = "sha256:68fb519c14306fec9720a2a5b45bc9f0c8d1b9c72adf45c37baedfcd949c35a2"},
]
readme-renderer = [
- {file = "readme_renderer-33.0-py3-none-any.whl", hash = "sha256:f02cee0c4de9636b5a62b6be50c9742427ba1b956aad1d938bfb087d0d72ccdf"},
- {file = "readme_renderer-33.0.tar.gz", hash = "sha256:e3b53bc84bd6af054e4cc1fe3567dc1ae19f554134221043a3f8c674e22209db"},
+ {file = "readme_renderer-37.2-py3-none-any.whl", hash = "sha256:d3f06a69e8c40fca9ab3174eca48f96d9771eddb43517b17d96583418427b106"},
+ {file = "readme_renderer-37.2.tar.gz", hash = "sha256:e8ad25293c98f781dbc2c5a36a309929390009f902f99e1798c761aaf04a7923"},
]
requests = [
{file = "requests-2.27.1-py2.py3-none-any.whl", hash = "sha256:f22fa1e554c9ddfd16e6e41ac79759e17be9e492b3587efa038054674760e72d"},
@@ -2683,6 +2720,10 @@ rfc3986 = [
{file = "rfc3986-2.0.0-py2.py3-none-any.whl", hash = "sha256:50b1502b60e289cb37883f3dfd34532b8873c7de9f49bb546641ce9cbd256ebd"},
{file = "rfc3986-2.0.0.tar.gz", hash = "sha256:97aacf9dbd4bfd829baad6e6309fa6573aaf1be3f6fa735c8ab05e46cecb261c"},
]
+rich = [
+ {file = "rich-12.6.0-py3-none-any.whl", hash = "sha256:a4eb26484f2c82589bd9a17c73d32a010b1e29d89f1604cd9bf3a2097b81bb5e"},
+ {file = "rich-12.6.0.tar.gz", hash = "sha256:ba3a3775974105c221d31141f2c116f4fd65c5ceb0698657a11e9f295ec93fd0"},
+]
secretstorage = [
{file = "SecretStorage-3.3.1-py3-none-any.whl", hash = "sha256:422d82c36172d88d6a0ed5afdec956514b189ddbfb72fefab0c8a1cee4eaf71f"},
{file = "SecretStorage-3.3.1.tar.gz", hash = "sha256:fd666c51a6bf200643495a04abb261f83229dcb6fd8472ec393df7ffc8b6f195"},
@@ -2692,8 +2733,8 @@ semantic-version = [
{file = "semantic_version-2.10.0.tar.gz", hash = "sha256:bdabb6d336998cbb378d4b9db3a4b56a1e3235701dc05ea2690d9a997ed5041c"},
]
sentry-sdk = [
- {file = "sentry-sdk-1.5.11.tar.gz", hash = "sha256:6c01d9d0b65935fd275adc120194737d1df317dce811e642cbf0394d0d37a007"},
- {file = "sentry_sdk-1.5.11-py2.py3-none-any.whl", hash = "sha256:c17179183cac614e900cbd048dab03f49a48e2820182ec686c25e7ce46f8548f"},
+ {file = "sentry-sdk-1.10.1.tar.gz", hash = "sha256:105faf7bd7b7fa25653404619ee261527266b14103fe1389e0ce077bd23a9691"},
+ {file = "sentry_sdk-1.10.1-py2.py3-none-any.whl", hash = "sha256:06c0fa9ccfdc80d7e3b5d2021978d6eb9351fa49db9b5847cf4d1f2a473414ad"},
]
service-identity = [
{file = "service-identity-21.1.0.tar.gz", hash = "sha256:6e6c6086ca271dc11b033d17c3a8bea9f24ebff920c587da090afc9519419d34"},
@@ -2704,8 +2745,8 @@ setuptools = [
{file = "setuptools-65.3.0.tar.gz", hash = "sha256:7732871f4f7fa58fb6bdcaeadb0161b2bd046c85905dbaa066bdcbcc81953b57"},
]
setuptools-rust = [
- {file = "setuptools-rust-1.5.1.tar.gz", hash = "sha256:0e05e456645d59429cb1021370aede73c0760e9360bbfdaaefb5bced530eb9d7"},
- {file = "setuptools_rust-1.5.1-py3-none-any.whl", hash = "sha256:306b236ff3aa5229180e58292610d0c2c51bb488191122d2fc559ae4caeb7d5e"},
+ {file = "setuptools-rust-1.5.2.tar.gz", hash = "sha256:d8daccb14dc0eae1b6b6eb3ecef79675bd37b4065369f79c35393dd5c55652c7"},
+ {file = "setuptools_rust-1.5.2-py3-none-any.whl", hash = "sha256:8eb45851e34288f2296cd5ab9e924535ac1757318b730a13fe6836867843f206"},
]
signedjson = [
{file = "signedjson-1.1.4-py3-none-any.whl", hash = "sha256:45569ec54241c65d2403fe3faf7169be5322547706a231e884ca2b427f23d228"},
@@ -2800,21 +2841,17 @@ towncrier = [
{file = "towncrier-21.9.0-py2.py3-none-any.whl", hash = "sha256:fc5a88a2a54988e3a8ed2b60d553599da8330f65722cc607c839614ed87e0f92"},
{file = "towncrier-21.9.0.tar.gz", hash = "sha256:9cb6f45c16e1a1eec9d0e7651165e7be60cd0ab81d13a5c96ca97a498ae87f48"},
]
-tqdm = [
- {file = "tqdm-4.63.0-py2.py3-none-any.whl", hash = "sha256:e643e071046f17139dea55b880dc9b33822ce21613b4a4f5ea57f202833dbc29"},
- {file = "tqdm-4.63.0.tar.gz", hash = "sha256:1d9835ede8e394bb8c9dcbffbca02d717217113adc679236873eeaac5bc0b3cd"},
-]
treq = [
{file = "treq-22.2.0-py3-none-any.whl", hash = "sha256:27d95b07c5c14be3e7b280416139b036087617ad5595be913b1f9b3ce981b9b2"},
{file = "treq-22.2.0.tar.gz", hash = "sha256:df757e3f141fc782ede076a604521194ffcb40fa2645cf48e5a37060307f52ec"},
]
twine = [
- {file = "twine-3.8.0-py3-none-any.whl", hash = "sha256:d0550fca9dc19f3d5e8eadfce0c227294df0a2a951251a4385797c8a6198b7c8"},
- {file = "twine-3.8.0.tar.gz", hash = "sha256:8efa52658e0ae770686a13b675569328f1fba9837e5de1867bfe5f46a9aefe19"},
+ {file = "twine-4.0.1-py3-none-any.whl", hash = "sha256:42026c18e394eac3e06693ee52010baa5313e4811d5a11050e7d48436cf41b9e"},
+ {file = "twine-4.0.1.tar.gz", hash = "sha256:96b1cf12f7ae611a4a40b6ae8e9570215daff0611828f5fe1f37a16255ab24a0"},
]
twisted = [
- {file = "Twisted-22.8.0-py3-none-any.whl", hash = "sha256:8d4718d1e48dcc28933f8beb48dc71cfe77a125e37ad1eb7a3d0acc49baf6c99"},
- {file = "Twisted-22.8.0.tar.gz", hash = "sha256:e5b60de39f2d1da153fbe1874d885fe3fcbdb21fcc446fa759a53e8fc3513bed"},
+ {file = "Twisted-22.10.0-py3-none-any.whl", hash = "sha256:86c55f712cc5ab6f6d64e02503352464f0400f66d4f079096d744080afcccbd0"},
+ {file = "Twisted-22.10.0.tar.gz", hash = "sha256:32acbd40a94f5f46e7b42c109bfae2b302250945561783a8b7a059048f2d4d31"},
]
twisted-iocpsupport = [
{file = "twisted-iocpsupport-1.0.2.tar.gz", hash = "sha256:72068b206ee809c9c596b57b5287259ea41ddb4774d86725b19f35bf56aa32a9"},
@@ -2881,8 +2918,8 @@ types-ipaddress = [
{file = "types_ipaddress-1.0.8-py3-none-any.whl", hash = "sha256:4933b74da157ba877b1a705d64f6fa7742745e9ffd65e51011f370c11ebedb55"},
]
types-jsonschema = [
- {file = "types-jsonschema-4.4.6.tar.gz", hash = "sha256:7f2a804618756768c7c0616f8c794b61fcfe3077c7ee1ad47dcf01c5e5f692bb"},
- {file = "types_jsonschema-4.4.6-py3-none-any.whl", hash = "sha256:1db9031ca49a8444d01bd2ce8cf2f89318382b04610953b108321e6f8fb03390"},
+ {file = "types-jsonschema-4.17.0.1.tar.gz", hash = "sha256:62625d492e4930411a431909ac32301aeab6180500e70ee222f81d43204cfb3c"},
+ {file = "types_jsonschema-4.17.0.1-py3-none-any.whl", hash = "sha256:77badbe3881cbf79ac9561be2be2b1f37ab104b13afd2231840e6dd6e94e63c2"},
]
types-pillow = [
{file = "types-Pillow-9.2.2.1.tar.gz", hash = "sha256:85c139e06e1c46ec5f9c634d5c54a156b0958d5d0e8be024ed353db0c804b426"},
@@ -2892,21 +2929,21 @@ types-psycopg2 = [
{file = "types-psycopg2-2.9.21.1.tar.gz", hash = "sha256:f5532cf15afdc6b5ebb1e59b7d896617217321f488fd1fbd74e7efb94decfab6"},
{file = "types_psycopg2-2.9.21.1-py3-none-any.whl", hash = "sha256:858838f1972f39da2a6e28274201fed8619a40a235dd86e7f66f4548ec474395"},
]
-types-pyOpenSSL = [
- {file = "types-pyOpenSSL-22.0.10.tar.gz", hash = "sha256:f943b834f5b97e5e808764c2f6e37be1a2e226c46792296f61558196acfcc3a1"},
- {file = "types_pyOpenSSL-22.0.10-py3-none-any.whl", hash = "sha256:63baea211768bea580a769ac5c0d637ae8cd3150314aadc5726ca22e4c4f241a"},
+types-pyopenssl = [
+ {file = "types-pyOpenSSL-22.1.0.2.tar.gz", hash = "sha256:7a350e29e55bc3ee4571f996b4b1c18c4e4098947db45f7485b016eaa35b44bc"},
+ {file = "types_pyOpenSSL-22.1.0.2-py3-none-any.whl", hash = "sha256:54606a6afb203eb261e0fca9b7f75fa6c24d5ff71e13903c162ffb951c2c64c6"},
]
-types-PyYAML = [
- {file = "types-PyYAML-6.0.12.tar.gz", hash = "sha256:f6f350418125872f3f0409d96a62a5a5ceb45231af5cc07ee0034ec48a3c82fa"},
- {file = "types_PyYAML-6.0.12-py3-none-any.whl", hash = "sha256:29228db9f82df4f1b7febee06bbfb601677882e98a3da98132e31c6874163e15"},
+types-pyyaml = [
+ {file = "types-PyYAML-6.0.12.2.tar.gz", hash = "sha256:6840819871c92deebe6a2067fb800c11b8a063632eb4e3e755914e7ab3604e83"},
+ {file = "types_PyYAML-6.0.12.2-py3-none-any.whl", hash = "sha256:1e94e80aafee07a7e798addb2a320e32956a373f376655128ae20637adb2655b"},
]
types-requests = [
- {file = "types-requests-2.28.11.tar.gz", hash = "sha256:7ee827eb8ce611b02b5117cfec5da6455365b6a575f5e3ff19f655ba603e6b4e"},
- {file = "types_requests-2.28.11-py3-none-any.whl", hash = "sha256:af5f55e803cabcfb836dad752bd6d8a0fc8ef1cd84243061c0e27dee04ccf4fd"},
+ {file = "types-requests-2.28.11.2.tar.gz", hash = "sha256:fdcd7bd148139fb8eef72cf4a41ac7273872cad9e6ada14b11ff5dfdeee60ed3"},
+ {file = "types_requests-2.28.11.2-py3-none-any.whl", hash = "sha256:14941f8023a80b16441b3b46caffcbfce5265fd14555844d6029697824b5a2ef"},
]
types-setuptools = [
- {file = "types-setuptools-65.5.0.1.tar.gz", hash = "sha256:5b297081c8f1fbd992cd8b305a97ed96ee6ffc765e9115124029597dd10b8a71"},
- {file = "types_setuptools-65.5.0.1-py3-none-any.whl", hash = "sha256:601d45b5e9979d2b931de5403aa11153626a1eadd1ce9727b21f24673ced5ceb"},
+ {file = "types-setuptools-65.5.0.3.tar.gz", hash = "sha256:17769171f5f2a2dc69b25c0d3106552a5cda767bbf6b36cb6212b26dae5aa9fc"},
+ {file = "types_setuptools-65.5.0.3-py3-none-any.whl", hash = "sha256:9254c32b0cc91c486548e7d7561243b5bd185402a383e93c6691e1b9bc8d86e2"},
]
types-urllib3 = [
{file = "types-urllib3-1.26.10.tar.gz", hash = "sha256:a26898f530e6c3f43f25b907f2b884486868ffd56a9faa94cbf9b3eb6e165d6a"},
@@ -2921,65 +2958,78 @@ unpaddedbase64 = [
{file = "unpaddedbase64-2.1.0.tar.gz", hash = "sha256:7273c60c089de39d90f5d6d4a7883a79e319dc9d9b1c8924a7fab96178a5f005"},
]
urllib3 = [
- {file = "urllib3-1.26.8-py2.py3-none-any.whl", hash = "sha256:000ca7f471a233c2251c6c7023ee85305721bfdf18621ebff4fd17a8653427ed"},
- {file = "urllib3-1.26.8.tar.gz", hash = "sha256:0e7c33d9a63e7ddfcb86780aac87befc2fbddf46c58dbb487e0855f7ceec283c"},
+ {file = "urllib3-1.26.12-py2.py3-none-any.whl", hash = "sha256:b930dd878d5a8afb066a637fbb35144fe7901e3b209d1cd4f524bd0e9deee997"},
+ {file = "urllib3-1.26.12.tar.gz", hash = "sha256:3fa96cf423e6987997fc326ae8df396db2a8b7c667747d47ddd8ecba91f4a74e"},
]
webencodings = [
{file = "webencodings-0.5.1-py2.py3-none-any.whl", hash = "sha256:a0af1213f3c2226497a97e2b3aa01a7e4bee4f403f95be16fc9acd2947514a78"},
{file = "webencodings-0.5.1.tar.gz", hash = "sha256:b36a1c245f2d304965eb4e0a82848379241dc04b865afcc4aab16748587e1923"},
]
wrapt = [
- {file = "wrapt-1.13.3-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:e05e60ff3b2b0342153be4d1b597bbcfd8330890056b9619f4ad6b8d5c96a81a"},
- {file = "wrapt-1.13.3-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:85148f4225287b6a0665eef08a178c15097366d46b210574a658c1ff5b377489"},
- {file = "wrapt-1.13.3-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:2dded5496e8f1592ec27079b28b6ad2a1ef0b9296d270f77b8e4a3a796cf6909"},
- {file = "wrapt-1.13.3-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:e94b7d9deaa4cc7bac9198a58a7240aaf87fe56c6277ee25fa5b3aa1edebd229"},
- {file = "wrapt-1.13.3-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:498e6217523111d07cd67e87a791f5e9ee769f9241fcf8a379696e25806965af"},
- {file = "wrapt-1.13.3-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:ec7e20258ecc5174029a0f391e1b948bf2906cd64c198a9b8b281b811cbc04de"},
- {file = "wrapt-1.13.3-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:87883690cae293541e08ba2da22cacaae0a092e0ed56bbba8d018cc486fbafbb"},
- {file = "wrapt-1.13.3-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:f99c0489258086308aad4ae57da9e8ecf9e1f3f30fa35d5e170b4d4896554d80"},
- {file = "wrapt-1.13.3-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:6a03d9917aee887690aa3f1747ce634e610f6db6f6b332b35c2dd89412912bca"},
- {file = "wrapt-1.13.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:936503cb0a6ed28dbfa87e8fcd0a56458822144e9d11a49ccee6d9a8adb2ac44"},
- {file = "wrapt-1.13.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:f9c51d9af9abb899bd34ace878fbec8bf357b3194a10c4e8e0a25512826ef056"},
- {file = "wrapt-1.13.3-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:220a869982ea9023e163ba915077816ca439489de6d2c09089b219f4e11b6785"},
- {file = "wrapt-1.13.3-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:0877fe981fd76b183711d767500e6b3111378ed2043c145e21816ee589d91096"},
- {file = "wrapt-1.13.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:43e69ffe47e3609a6aec0fe723001c60c65305784d964f5007d5b4fb1bc6bf33"},
- {file = "wrapt-1.13.3-cp310-cp310-win32.whl", hash = "sha256:78dea98c81915bbf510eb6a3c9c24915e4660302937b9ae05a0947164248020f"},
- {file = "wrapt-1.13.3-cp310-cp310-win_amd64.whl", hash = "sha256:ea3e746e29d4000cd98d572f3ee2a6050a4f784bb536f4ac1f035987fc1ed83e"},
- {file = "wrapt-1.13.3-cp35-cp35m-manylinux1_i686.whl", hash = "sha256:8c73c1a2ec7c98d7eaded149f6d225a692caa1bd7b2401a14125446e9e90410d"},
- {file = "wrapt-1.13.3-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:086218a72ec7d986a3eddb7707c8c4526d677c7b35e355875a0fe2918b059179"},
- {file = "wrapt-1.13.3-cp35-cp35m-manylinux2010_i686.whl", hash = "sha256:e92d0d4fa68ea0c02d39f1e2f9cb5bc4b4a71e8c442207433d8db47ee79d7aa3"},
- {file = "wrapt-1.13.3-cp35-cp35m-manylinux2010_x86_64.whl", hash = "sha256:d4a5f6146cfa5c7ba0134249665acd322a70d1ea61732723c7d3e8cc0fa80755"},
- {file = "wrapt-1.13.3-cp35-cp35m-win32.whl", hash = "sha256:8aab36778fa9bba1a8f06a4919556f9f8c7b33102bd71b3ab307bb3fecb21851"},
- {file = "wrapt-1.13.3-cp35-cp35m-win_amd64.whl", hash = "sha256:944b180f61f5e36c0634d3202ba8509b986b5fbaf57db3e94df11abee244ba13"},
- {file = "wrapt-1.13.3-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:2ebdde19cd3c8cdf8df3fc165bc7827334bc4e353465048b36f7deeae8ee0918"},
- {file = "wrapt-1.13.3-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:610f5f83dd1e0ad40254c306f4764fcdc846641f120c3cf424ff57a19d5f7ade"},
- {file = "wrapt-1.13.3-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:5601f44a0f38fed36cc07db004f0eedeaadbdcec90e4e90509480e7e6060a5bc"},
- {file = "wrapt-1.13.3-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:e6906d6f48437dfd80464f7d7af1740eadc572b9f7a4301e7dd3d65db285cacf"},
- {file = "wrapt-1.13.3-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:766b32c762e07e26f50d8a3468e3b4228b3736c805018e4b0ec8cc01ecd88125"},
- {file = "wrapt-1.13.3-cp36-cp36m-win32.whl", hash = "sha256:5f223101f21cfd41deec8ce3889dc59f88a59b409db028c469c9b20cfeefbe36"},
- {file = "wrapt-1.13.3-cp36-cp36m-win_amd64.whl", hash = "sha256:f122ccd12fdc69628786d0c947bdd9cb2733be8f800d88b5a37c57f1f1d73c10"},
- {file = "wrapt-1.13.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:46f7f3af321a573fc0c3586612db4decb7eb37172af1bc6173d81f5b66c2e068"},
- {file = "wrapt-1.13.3-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:778fd096ee96890c10ce96187c76b3e99b2da44e08c9e24d5652f356873f6709"},
- {file = "wrapt-1.13.3-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:0cb23d36ed03bf46b894cfec777eec754146d68429c30431c99ef28482b5c1df"},
- {file = "wrapt-1.13.3-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:96b81ae75591a795d8c90edc0bfaab44d3d41ffc1aae4d994c5aa21d9b8e19a2"},
- {file = "wrapt-1.13.3-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:7dd215e4e8514004c8d810a73e342c536547038fb130205ec4bba9f5de35d45b"},
- {file = "wrapt-1.13.3-cp37-cp37m-win32.whl", hash = "sha256:47f0a183743e7f71f29e4e21574ad3fa95676136f45b91afcf83f6a050914829"},
- {file = "wrapt-1.13.3-cp37-cp37m-win_amd64.whl", hash = "sha256:fd76c47f20984b43d93de9a82011bb6e5f8325df6c9ed4d8310029a55fa361ea"},
- {file = "wrapt-1.13.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:b73d4b78807bd299b38e4598b8e7bd34ed55d480160d2e7fdaabd9931afa65f9"},
- {file = "wrapt-1.13.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:ec9465dd69d5657b5d2fa6133b3e1e989ae27d29471a672416fd729b429eb554"},
- {file = "wrapt-1.13.3-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:dd91006848eb55af2159375134d724032a2d1d13bcc6f81cd8d3ed9f2b8e846c"},
- {file = "wrapt-1.13.3-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:ae9de71eb60940e58207f8e71fe113c639da42adb02fb2bcbcaccc1ccecd092b"},
- {file = "wrapt-1.13.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:51799ca950cfee9396a87f4a1240622ac38973b6df5ef7a41e7f0b98797099ce"},
- {file = "wrapt-1.13.3-cp38-cp38-win32.whl", hash = "sha256:4b9c458732450ec42578b5642ac53e312092acf8c0bfce140ada5ca1ac556f79"},
- {file = "wrapt-1.13.3-cp38-cp38-win_amd64.whl", hash = "sha256:7dde79d007cd6dfa65afe404766057c2409316135cb892be4b1c768e3f3a11cb"},
- {file = "wrapt-1.13.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:981da26722bebb9247a0601e2922cedf8bb7a600e89c852d063313102de6f2cb"},
- {file = "wrapt-1.13.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:705e2af1f7be4707e49ced9153f8d72131090e52be9278b5dbb1498c749a1e32"},
- {file = "wrapt-1.13.3-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:25b1b1d5df495d82be1c9d2fad408f7ce5ca8a38085e2da41bb63c914baadff7"},
- {file = "wrapt-1.13.3-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:77416e6b17926d953b5c666a3cb718d5945df63ecf922af0ee576206d7033b5e"},
- {file = "wrapt-1.13.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:865c0b50003616f05858b22174c40ffc27a38e67359fa1495605f96125f76640"},
- {file = "wrapt-1.13.3-cp39-cp39-win32.whl", hash = "sha256:0a017a667d1f7411816e4bf214646d0ad5b1da2c1ea13dec6c162736ff25a374"},
- {file = "wrapt-1.13.3-cp39-cp39-win_amd64.whl", hash = "sha256:81bd7c90d28a4b2e1df135bfbd7c23aee3050078ca6441bead44c42483f9ebfb"},
- {file = "wrapt-1.13.3.tar.gz", hash = "sha256:1fea9cd438686e6682271d36f3481a9f3636195578bab9ca3382e2f5f01fc185"},
+ {file = "wrapt-1.14.1-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:1b376b3f4896e7930f1f772ac4b064ac12598d1c38d04907e696cc4d794b43d3"},
+ {file = "wrapt-1.14.1-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:903500616422a40a98a5a3c4ff4ed9d0066f3b4c951fa286018ecdf0750194ef"},
+ {file = "wrapt-1.14.1-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:5a9a0d155deafd9448baff28c08e150d9b24ff010e899311ddd63c45c2445e28"},
+ {file = "wrapt-1.14.1-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:ddaea91abf8b0d13443f6dac52e89051a5063c7d014710dcb4d4abb2ff811a59"},
+ {file = "wrapt-1.14.1-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:36f582d0c6bc99d5f39cd3ac2a9062e57f3cf606ade29a0a0d6b323462f4dd87"},
+ {file = "wrapt-1.14.1-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:7ef58fb89674095bfc57c4069e95d7a31cfdc0939e2a579882ac7d55aadfd2a1"},
+ {file = "wrapt-1.14.1-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:e2f83e18fe2f4c9e7db597e988f72712c0c3676d337d8b101f6758107c42425b"},
+ {file = "wrapt-1.14.1-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:ee2b1b1769f6707a8a445162ea16dddf74285c3964f605877a20e38545c3c462"},
+ {file = "wrapt-1.14.1-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:833b58d5d0b7e5b9832869f039203389ac7cbf01765639c7309fd50ef619e0b1"},
+ {file = "wrapt-1.14.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:80bb5c256f1415f747011dc3604b59bc1f91c6e7150bd7db03b19170ee06b320"},
+ {file = "wrapt-1.14.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:07f7a7d0f388028b2df1d916e94bbb40624c59b48ecc6cbc232546706fac74c2"},
+ {file = "wrapt-1.14.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:02b41b633c6261feff8ddd8d11c711df6842aba629fdd3da10249a53211a72c4"},
+ {file = "wrapt-1.14.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2fe803deacd09a233e4762a1adcea5db5d31e6be577a43352936179d14d90069"},
+ {file = "wrapt-1.14.1-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:257fd78c513e0fb5cdbe058c27a0624c9884e735bbd131935fd49e9fe719d310"},
+ {file = "wrapt-1.14.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:4fcc4649dc762cddacd193e6b55bc02edca674067f5f98166d7713b193932b7f"},
+ {file = "wrapt-1.14.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:11871514607b15cfeb87c547a49bca19fde402f32e2b1c24a632506c0a756656"},
+ {file = "wrapt-1.14.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:8ad85f7f4e20964db4daadcab70b47ab05c7c1cf2a7c1e51087bfaa83831854c"},
+ {file = "wrapt-1.14.1-cp310-cp310-win32.whl", hash = "sha256:a9a52172be0b5aae932bef82a79ec0a0ce87288c7d132946d645eba03f0ad8a8"},
+ {file = "wrapt-1.14.1-cp310-cp310-win_amd64.whl", hash = "sha256:6d323e1554b3d22cfc03cd3243b5bb815a51f5249fdcbb86fda4bf62bab9e164"},
+ {file = "wrapt-1.14.1-cp35-cp35m-manylinux1_i686.whl", hash = "sha256:43ca3bbbe97af00f49efb06e352eae40434ca9d915906f77def219b88e85d907"},
+ {file = "wrapt-1.14.1-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:6b1a564e6cb69922c7fe3a678b9f9a3c54e72b469875aa8018f18b4d1dd1adf3"},
+ {file = "wrapt-1.14.1-cp35-cp35m-manylinux2010_i686.whl", hash = "sha256:00b6d4ea20a906c0ca56d84f93065b398ab74b927a7a3dbd470f6fc503f95dc3"},
+ {file = "wrapt-1.14.1-cp35-cp35m-manylinux2010_x86_64.whl", hash = "sha256:a85d2b46be66a71bedde836d9e41859879cc54a2a04fad1191eb50c2066f6e9d"},
+ {file = "wrapt-1.14.1-cp35-cp35m-win32.whl", hash = "sha256:dbcda74c67263139358f4d188ae5faae95c30929281bc6866d00573783c422b7"},
+ {file = "wrapt-1.14.1-cp35-cp35m-win_amd64.whl", hash = "sha256:b21bb4c09ffabfa0e85e3a6b623e19b80e7acd709b9f91452b8297ace2a8ab00"},
+ {file = "wrapt-1.14.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:9e0fd32e0148dd5dea6af5fee42beb949098564cc23211a88d799e434255a1f4"},
+ {file = "wrapt-1.14.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9736af4641846491aedb3c3f56b9bc5568d92b0692303b5a305301a95dfd38b1"},
+ {file = "wrapt-1.14.1-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5b02d65b9ccf0ef6c34cba6cf5bf2aab1bb2f49c6090bafeecc9cd81ad4ea1c1"},
+ {file = "wrapt-1.14.1-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:21ac0156c4b089b330b7666db40feee30a5d52634cc4560e1905d6529a3897ff"},
+ {file = "wrapt-1.14.1-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:9f3e6f9e05148ff90002b884fbc2a86bd303ae847e472f44ecc06c2cd2fcdb2d"},
+ {file = "wrapt-1.14.1-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:6e743de5e9c3d1b7185870f480587b75b1cb604832e380d64f9504a0535912d1"},
+ {file = "wrapt-1.14.1-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:d79d7d5dc8a32b7093e81e97dad755127ff77bcc899e845f41bf71747af0c569"},
+ {file = "wrapt-1.14.1-cp36-cp36m-win32.whl", hash = "sha256:81b19725065dcb43df02b37e03278c011a09e49757287dca60c5aecdd5a0b8ed"},
+ {file = "wrapt-1.14.1-cp36-cp36m-win_amd64.whl", hash = "sha256:b014c23646a467558be7da3d6b9fa409b2c567d2110599b7cf9a0c5992b3b471"},
+ {file = "wrapt-1.14.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:88bd7b6bd70a5b6803c1abf6bca012f7ed963e58c68d76ee20b9d751c74a3248"},
+ {file = "wrapt-1.14.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b5901a312f4d14c59918c221323068fad0540e34324925c8475263841dbdfe68"},
+ {file = "wrapt-1.14.1-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d77c85fedff92cf788face9bfa3ebaa364448ebb1d765302e9af11bf449ca36d"},
+ {file = "wrapt-1.14.1-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8d649d616e5c6a678b26d15ece345354f7c2286acd6db868e65fcc5ff7c24a77"},
+ {file = "wrapt-1.14.1-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:7d2872609603cb35ca513d7404a94d6d608fc13211563571117046c9d2bcc3d7"},
+ {file = "wrapt-1.14.1-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:ee6acae74a2b91865910eef5e7de37dc6895ad96fa23603d1d27ea69df545015"},
+ {file = "wrapt-1.14.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:2b39d38039a1fdad98c87279b48bc5dce2c0ca0d73483b12cb72aa9609278e8a"},
+ {file = "wrapt-1.14.1-cp37-cp37m-win32.whl", hash = "sha256:60db23fa423575eeb65ea430cee741acb7c26a1365d103f7b0f6ec412b893853"},
+ {file = "wrapt-1.14.1-cp37-cp37m-win_amd64.whl", hash = "sha256:709fe01086a55cf79d20f741f39325018f4df051ef39fe921b1ebe780a66184c"},
+ {file = "wrapt-1.14.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:8c0ce1e99116d5ab21355d8ebe53d9460366704ea38ae4d9f6933188f327b456"},
+ {file = "wrapt-1.14.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:e3fb1677c720409d5f671e39bac6c9e0e422584e5f518bfd50aa4cbbea02433f"},
+ {file = "wrapt-1.14.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:642c2e7a804fcf18c222e1060df25fc210b9c58db7c91416fb055897fc27e8cc"},
+ {file = "wrapt-1.14.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7b7c050ae976e286906dd3f26009e117eb000fb2cf3533398c5ad9ccc86867b1"},
+ {file = "wrapt-1.14.1-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ef3f72c9666bba2bab70d2a8b79f2c6d2c1a42a7f7e2b0ec83bb2f9e383950af"},
+ {file = "wrapt-1.14.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:01c205616a89d09827986bc4e859bcabd64f5a0662a7fe95e0d359424e0e071b"},
+ {file = "wrapt-1.14.1-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:5a0f54ce2c092aaf439813735584b9537cad479575a09892b8352fea5e988dc0"},
+ {file = "wrapt-1.14.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:2cf71233a0ed05ccdabe209c606fe0bac7379fdcf687f39b944420d2a09fdb57"},
+ {file = "wrapt-1.14.1-cp38-cp38-win32.whl", hash = "sha256:aa31fdcc33fef9eb2552cbcbfee7773d5a6792c137b359e82879c101e98584c5"},
+ {file = "wrapt-1.14.1-cp38-cp38-win_amd64.whl", hash = "sha256:d1967f46ea8f2db647c786e78d8cc7e4313dbd1b0aca360592d8027b8508e24d"},
+ {file = "wrapt-1.14.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:3232822c7d98d23895ccc443bbdf57c7412c5a65996c30442ebe6ed3df335383"},
+ {file = "wrapt-1.14.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:988635d122aaf2bdcef9e795435662bcd65b02f4f4c1ae37fbee7401c440b3a7"},
+ {file = "wrapt-1.14.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9cca3c2cdadb362116235fdbd411735de4328c61425b0aa9f872fd76d02c4e86"},
+ {file = "wrapt-1.14.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d52a25136894c63de15a35bc0bdc5adb4b0e173b9c0d07a2be9d3ca64a332735"},
+ {file = "wrapt-1.14.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:40e7bc81c9e2b2734ea4bc1aceb8a8f0ceaac7c5299bc5d69e37c44d9081d43b"},
+ {file = "wrapt-1.14.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:b9b7a708dd92306328117d8c4b62e2194d00c365f18eff11a9b53c6f923b01e3"},
+ {file = "wrapt-1.14.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:6a9a25751acb379b466ff6be78a315e2b439d4c94c1e99cb7266d40a537995d3"},
+ {file = "wrapt-1.14.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:34aa51c45f28ba7f12accd624225e2b1e5a3a45206aa191f6f9aac931d9d56fe"},
+ {file = "wrapt-1.14.1-cp39-cp39-win32.whl", hash = "sha256:dee0ce50c6a2dd9056c20db781e9c1cfd33e77d2d569f5d1d9321c641bb903d5"},
+ {file = "wrapt-1.14.1-cp39-cp39-win_amd64.whl", hash = "sha256:dee60e1de1898bde3b238f18340eec6148986da0455d8ba7848d50470a7a32fb"},
+ {file = "wrapt-1.14.1.tar.gz", hash = "sha256:380a85cf89e0e69b7cfbe2ea9f765f004ff419f34194018a6827ac0e3edfed4d"},
]
xmlschema = [
{file = "xmlschema-1.10.0-py3-none-any.whl", hash = "sha256:dbd68bded2fef00c19cf37110ca0565eca34cf0b6c9e1d3b62ad0de8cbb582ca"},
diff --git a/pyproject.toml b/pyproject.toml
index 8772b42965..690e3c07de 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -57,7 +57,7 @@ manifest-path = "rust/Cargo.toml"
[tool.poetry]
name = "matrix-synapse"
-version = "1.70.0rc1"
+version = "1.72.0rc1"
description = "Homeserver for the Matrix decentralised comms protocol"
authors = ["Matrix.org Team and Contributors "]
license = "Apache-2.0"
@@ -192,7 +192,7 @@ psycopg2 = { version = ">=2.8", markers = "platform_python_implementation != 'Py
psycopg2cffi = { version = ">=2.8", markers = "platform_python_implementation == 'PyPy'", optional = true }
psycopg2cffi-compat = { version = "==1.1", markers = "platform_python_implementation == 'PyPy'", optional = true }
pysaml2 = { version = ">=4.5.0", optional = true }
-authlib = { version = ">=0.14.0", optional = true }
+authlib = { version = ">=0.15.1", optional = true }
# systemd-python is necessary for logging to the systemd journal via
# `systemd.journal.JournalHandler`, as is documented in
# `contrib/systemd/log_config.yaml`.
@@ -318,7 +318,7 @@ build-backend = "poetry.core.masonry.api"
[tool.cibuildwheel]
# Skip unsupported platforms (by us or by Rust).
-skip = "cp36* *-musllinux_i686"
+skip = "cp36* *-musllinux_i686 pp*aarch64 *-musllinux_aarch64"
# We need a rust compiler
before-all = "curl https://sh.rustup.rs -sSf | sh -s -- --default-toolchain stable -y --profile minimal"
@@ -330,3 +330,12 @@ environment= { PATH = "$PATH:$HOME/.cargo/bin" }
before-build = "rm -rf {project}/build"
build-frontend = "build"
test-command = "python -c 'from synapse.synapse_rust import sum_as_string; print(sum_as_string(1, 2))'"
+
+
+[tool.cibuildwheel.linux]
+# Wrap the repair command to correctly rename the built cpython wheels as ABI3.
+repair-wheel-command = "./.ci/scripts/auditwheel_wrapper.py -w {dest_dir} {wheel}"
+
+[tool.cibuildwheel.macos]
+# Wrap the repair command to correctly rename the built cpython wheels as ABI3.
+repair-wheel-command = "./.ci/scripts/auditwheel_wrapper.py --require-archs {delocate_archs} -w {dest_dir} {wheel}"
diff --git a/rust/src/push/base_rules.rs b/rust/src/push/base_rules.rs
index 63240cacfc..49802fa4eb 100644
--- a/rust/src/push/base_rules.rs
+++ b/rust/src/push/base_rules.rs
@@ -25,6 +25,7 @@ use crate::push::Action;
use crate::push::Condition;
use crate::push::EventMatchCondition;
use crate::push::PushRule;
+use crate::push::RelatedEventMatchCondition;
use crate::push::SetTweak;
use crate::push::TweakValue;
@@ -114,6 +115,22 @@ pub const BASE_APPEND_OVERRIDE_RULES: &[PushRule] = &[
default: true,
default_enabled: true,
},
+ PushRule {
+ rule_id: Cow::Borrowed("global/override/.im.nheko.msc3664.reply"),
+ priority_class: 5,
+ conditions: Cow::Borrowed(&[Condition::Known(KnownCondition::RelatedEventMatch(
+ RelatedEventMatchCondition {
+ key: Some(Cow::Borrowed("sender")),
+ pattern: None,
+ pattern_type: Some(Cow::Borrowed("user_id")),
+ rel_type: Cow::Borrowed("m.in_reply_to"),
+ include_fallbacks: None,
+ },
+ ))]),
+ actions: Cow::Borrowed(&[Action::Notify, HIGHLIGHT_ACTION, SOUND_ACTION]),
+ default: true,
+ default_enabled: true,
+ },
PushRule {
rule_id: Cow::Borrowed("global/override/.m.rule.contains_display_name"),
priority_class: 5,
diff --git a/rust/src/push/evaluator.rs b/rust/src/push/evaluator.rs
index 0365dd01dc..cedd42c54d 100644
--- a/rust/src/push/evaluator.rs
+++ b/rust/src/push/evaluator.rs
@@ -23,6 +23,7 @@ use regex::Regex;
use super::{
utils::{get_glob_matcher, get_localpart_from_id, GlobMatchType},
Action, Condition, EventMatchCondition, FilteredPushRules, KnownCondition,
+ RelatedEventMatchCondition,
};
lazy_static! {
@@ -49,6 +50,13 @@ pub struct PushRuleEvaluator {
/// The power level of the sender of the event, or None if event is an
/// outlier.
sender_power_level: Option,
+
+ /// The related events, indexed by relation type. Flattened in the same manner as
+ /// `flattened_keys`.
+ related_events_flattened: BTreeMap>,
+
+ /// If msc3664, push rules for related events, is enabled.
+ related_event_match_enabled: bool,
}
#[pymethods]
@@ -60,6 +68,8 @@ impl PushRuleEvaluator {
room_member_count: u64,
sender_power_level: Option,
notification_power_levels: BTreeMap,
+ related_events_flattened: BTreeMap>,
+ related_event_match_enabled: bool,
) -> Result {
let body = flattened_keys
.get("content.body")
@@ -72,6 +82,8 @@ impl PushRuleEvaluator {
room_member_count,
notification_power_levels,
sender_power_level,
+ related_events_flattened,
+ related_event_match_enabled,
})
}
@@ -156,6 +168,9 @@ impl PushRuleEvaluator {
KnownCondition::EventMatch(event_match) => {
self.match_event_match(event_match, user_id)?
}
+ KnownCondition::RelatedEventMatch(event_match) => {
+ self.match_related_event_match(event_match, user_id)?
+ }
KnownCondition::ContainsDisplayName => {
if let Some(dn) = display_name {
if !dn.is_empty() {
@@ -239,6 +254,79 @@ impl PushRuleEvaluator {
compiled_pattern.is_match(haystack)
}
+ /// Evaluates a `related_event_match` condition. (MSC3664)
+ fn match_related_event_match(
+ &self,
+ event_match: &RelatedEventMatchCondition,
+ user_id: Option<&str>,
+ ) -> Result {
+ // First check if related event matching is enabled...
+ if !self.related_event_match_enabled {
+ return Ok(false);
+ }
+
+ // get the related event, fail if there is none.
+ let event = if let Some(event) = self.related_events_flattened.get(&*event_match.rel_type) {
+ event
+ } else {
+ return Ok(false);
+ };
+
+ // If we are not matching fallbacks, don't match if our special key indicating this is a
+ // fallback relation is not present.
+ if !event_match.include_fallbacks.unwrap_or(false)
+ && event.contains_key("im.vector.is_falling_back")
+ {
+ return Ok(false);
+ }
+
+ // if we have no key, accept the event as matching, if it existed without matching any
+ // fields.
+ let key = if let Some(key) = &event_match.key {
+ key
+ } else {
+ return Ok(true);
+ };
+
+ let pattern = if let Some(pattern) = &event_match.pattern {
+ pattern
+ } else if let Some(pattern_type) = &event_match.pattern_type {
+ // The `pattern_type` can either be "user_id" or "user_localpart",
+ // either way if we don't have a `user_id` then the condition can't
+ // match.
+ let user_id = if let Some(user_id) = user_id {
+ user_id
+ } else {
+ return Ok(false);
+ };
+
+ match &**pattern_type {
+ "user_id" => user_id,
+ "user_localpart" => get_localpart_from_id(user_id)?,
+ _ => return Ok(false),
+ }
+ } else {
+ return Ok(false);
+ };
+
+ let haystack = if let Some(haystack) = event.get(&**key) {
+ haystack
+ } else {
+ return Ok(false);
+ };
+
+ // For the content.body we match against "words", but for everything
+ // else we match against the entire value.
+ let match_type = if key == "content.body" {
+ GlobMatchType::Word
+ } else {
+ GlobMatchType::Whole
+ };
+
+ let mut compiled_pattern = get_glob_matcher(pattern, match_type)?;
+ compiled_pattern.is_match(haystack)
+ }
+
/// Match the member count against an 'is' condition
/// The `is` condition can be things like '>2', '==3' or even just '4'.
fn match_member_count(&self, is: &str) -> Result {
@@ -267,8 +355,15 @@ impl PushRuleEvaluator {
fn push_rule_evaluator() {
let mut flattened_keys = BTreeMap::new();
flattened_keys.insert("content.body".to_string(), "foo bar bob hello".to_string());
- let evaluator =
- PushRuleEvaluator::py_new(flattened_keys, 10, Some(0), BTreeMap::new()).unwrap();
+ let evaluator = PushRuleEvaluator::py_new(
+ flattened_keys,
+ 10,
+ Some(0),
+ BTreeMap::new(),
+ BTreeMap::new(),
+ true,
+ )
+ .unwrap();
let result = evaluator.run(&FilteredPushRules::default(), None, Some("bob"));
assert_eq!(result.len(), 3);
diff --git a/rust/src/push/mod.rs b/rust/src/push/mod.rs
index 0dabfab8b8..d57800aa4a 100644
--- a/rust/src/push/mod.rs
+++ b/rust/src/push/mod.rs
@@ -267,6 +267,8 @@ pub enum Condition {
#[serde(tag = "kind")]
pub enum KnownCondition {
EventMatch(EventMatchCondition),
+ #[serde(rename = "im.nheko.msc3664.related_event_match")]
+ RelatedEventMatch(RelatedEventMatchCondition),
ContainsDisplayName,
RoomMemberCount {
#[serde(skip_serializing_if = "Option::is_none")]
@@ -299,6 +301,20 @@ pub struct EventMatchCondition {
pub pattern_type: Option>,
}
+/// The body of a [`Condition::RelatedEventMatch`]
+#[derive(Serialize, Deserialize, Debug, Clone)]
+pub struct RelatedEventMatchCondition {
+ #[serde(skip_serializing_if = "Option::is_none")]
+ pub key: Option>,
+ #[serde(skip_serializing_if = "Option::is_none")]
+ pub pattern: Option>,
+ #[serde(skip_serializing_if = "Option::is_none")]
+ pub pattern_type: Option>,
+ pub rel_type: Cow<'static, str>,
+ #[serde(skip_serializing_if = "Option::is_none")]
+ pub include_fallbacks: Option,
+}
+
/// The collection of push rules for a user.
#[derive(Debug, Clone, Default)]
#[pyclass(frozen)]
@@ -391,15 +407,21 @@ impl PushRules {
pub struct FilteredPushRules {
push_rules: PushRules,
enabled_map: BTreeMap,
+ msc3664_enabled: bool,
}
#[pymethods]
impl FilteredPushRules {
#[new]
- pub fn py_new(push_rules: PushRules, enabled_map: BTreeMap) -> Self {
+ pub fn py_new(
+ push_rules: PushRules,
+ enabled_map: BTreeMap,
+ msc3664_enabled: bool,
+ ) -> Self {
Self {
push_rules,
enabled_map,
+ msc3664_enabled,
}
}
@@ -414,13 +436,25 @@ impl FilteredPushRules {
/// Iterates over all the rules and their enabled state, including base
/// rules, in the order they should be executed in.
fn iter(&self) -> impl Iterator {
- self.push_rules.iter().map(|r| {
- let enabled = *self
- .enabled_map
- .get(&*r.rule_id)
- .unwrap_or(&r.default_enabled);
- (r, enabled)
- })
+ self.push_rules
+ .iter()
+ .filter(|rule| {
+ // Ignore disabled experimental push rules
+ if !self.msc3664_enabled
+ && rule.rule_id == "global/override/.im.nheko.msc3664.reply"
+ {
+ return false;
+ }
+
+ true
+ })
+ .map(|r| {
+ let enabled = *self
+ .enabled_map
+ .get(&*r.rule_id)
+ .unwrap_or(&r.default_enabled);
+ (r, enabled)
+ })
}
}
@@ -446,6 +480,17 @@ fn test_deserialize_condition() {
let _: Condition = serde_json::from_str(json).unwrap();
}
+#[test]
+fn test_deserialize_unstable_msc3664_condition() {
+ let json = r#"{"kind":"im.nheko.msc3664.related_event_match","key":"content.body","pattern":"coffee","rel_type":"m.in_reply_to"}"#;
+
+ let condition: Condition = serde_json::from_str(json).unwrap();
+ assert!(matches!(
+ condition,
+ Condition::Known(KnownCondition::RelatedEventMatch(_))
+ ));
+}
+
#[test]
fn test_deserialize_custom_condition() {
let json = r#"{"kind":"custom_tag"}"#;
diff --git a/scripts-dev/build_debian_packages.py b/scripts-dev/build_debian_packages.py
index cd2e64b75f..7442300196 100755
--- a/scripts-dev/build_debian_packages.py
+++ b/scripts-dev/build_debian_packages.py
@@ -27,6 +27,7 @@ DISTS = (
"debian:sid",
"ubuntu:focal", # 20.04 LTS (our EOL forced by Py38 on 2024-10-14)
"ubuntu:jammy", # 22.04 LTS (EOL 2027-04)
+ "ubuntu:kinetic", # 22.10 (EOL 2023-07-20)
)
DESC = """\
diff --git a/scripts-dev/complement.sh b/scripts-dev/complement.sh
index a7b1e1e3a8..803c6ce92d 100755
--- a/scripts-dev/complement.sh
+++ b/scripts-dev/complement.sh
@@ -126,7 +126,7 @@ export COMPLEMENT_BASE_IMAGE=complement-synapse
extra_test_args=()
-test_tags="synapse_blacklist,msc3787"
+test_tags="synapse_blacklist,msc3787,msc3874"
# All environment variables starting with PASS_ will be shared.
# (The prefix is stripped off before reaching the container.)
@@ -139,6 +139,9 @@ if [[ -n "$WORKERS" ]]; then
# Use workers.
export PASS_SYNAPSE_COMPLEMENT_USE_WORKERS=true
+ # Pass through the workers defined. If none, it will be an empty string
+ export PASS_SYNAPSE_WORKER_TYPES="$WORKER_TYPES"
+
# Workers can only use Postgres as a database.
export PASS_SYNAPSE_COMPLEMENT_DATABASE=postgres
diff --git a/scripts-dev/release.py b/scripts-dev/release.py
index c82c58c54b..bf47b6c713 100755
--- a/scripts-dev/release.py
+++ b/scripts-dev/release.py
@@ -219,9 +219,7 @@ def _prepare() -> None:
update_branch(repo)
# Create the new release branch
- # Type ignore will no longer be needed after GitPython 3.1.28.
- # See https://github.com/gitpython-developers/GitPython/pull/1419
- repo.create_head(release_branch_name, commit=base_branch) # type: ignore[arg-type]
+ repo.create_head(release_branch_name, commit=base_branch)
# Special-case SyTest: we don't actually prepare any files so we may
# as well push it now (and only when we create a release branch;
diff --git a/stubs/synapse/synapse_rust/push.pyi b/stubs/synapse/synapse_rust/push.pyi
index f2a61df660..ceade65ef9 100644
--- a/stubs/synapse/synapse_rust/push.pyi
+++ b/stubs/synapse/synapse_rust/push.pyi
@@ -1,4 +1,4 @@
-from typing import Any, Collection, Dict, Mapping, Optional, Sequence, Set, Tuple, Union
+from typing import Any, Collection, Dict, Mapping, Optional, Sequence, Tuple, Union
from synapse.types import JsonDict
@@ -25,7 +25,9 @@ class PushRules:
def rules(self) -> Collection[PushRule]: ...
class FilteredPushRules:
- def __init__(self, push_rules: PushRules, enabled_map: Dict[str, bool]): ...
+ def __init__(
+ self, push_rules: PushRules, enabled_map: Dict[str, bool], msc3664_enabled: bool
+ ): ...
def rules(self) -> Collection[Tuple[PushRule, bool]]: ...
def get_base_rule_ids() -> Collection[str]: ...
@@ -37,10 +39,12 @@ class PushRuleEvaluator:
room_member_count: int,
sender_power_level: Optional[int],
notification_power_levels: Mapping[str, int],
+ related_events_flattened: Mapping[str, Mapping[str, str]],
+ related_event_match_enabled: bool,
): ...
def run(
self,
push_rules: FilteredPushRules,
user_id: Optional[str],
display_name: Optional[str],
- ) -> Collection[dict]: ...
+ ) -> Collection[Union[Mapping, str]]: ...
diff --git a/synapse/api/constants.py b/synapse/api/constants.py
index 35b8beefae..8aa646890d 100644
--- a/synapse/api/constants.py
+++ b/synapse/api/constants.py
@@ -125,6 +125,8 @@ class EventTypes:
MSC2716_BATCH: Final = "org.matrix.msc2716.batch"
MSC2716_MARKER: Final = "org.matrix.msc2716.marker"
+ Reaction: Final = "m.reaction"
+
class ToDeviceEventTypes:
RoomKeyRequest: Final = "m.room_key_request"
diff --git a/synapse/api/errors.py b/synapse/api/errors.py
index 400dd12aba..e2cfcea0f2 100644
--- a/synapse/api/errors.py
+++ b/synapse/api/errors.py
@@ -713,7 +713,7 @@ class HttpResponseException(CodeMessageException):
set to the reason code from the HTTP response.
Returns:
- SynapseError:
+ The error converted to a SynapseError.
"""
# try to parse the body as json, to get better errcode/msg, but
# default to M_UNKNOWN with the HTTP status as the error text
diff --git a/synapse/api/filtering.py b/synapse/api/filtering.py
index 26be377d03..a9888381b4 100644
--- a/synapse/api/filtering.py
+++ b/synapse/api/filtering.py
@@ -43,7 +43,7 @@ if TYPE_CHECKING:
from synapse.server import HomeServer
FILTER_SCHEMA = {
- "additionalProperties": False,
+ "additionalProperties": True, # Allow new fields for forward compatibility
"type": "object",
"properties": {
"limit": {"type": "number"},
@@ -63,7 +63,7 @@ FILTER_SCHEMA = {
}
ROOM_FILTER_SCHEMA = {
- "additionalProperties": False,
+ "additionalProperties": True, # Allow new fields for forward compatibility
"type": "object",
"properties": {
"not_rooms": {"$ref": "#/definitions/room_id_array"},
@@ -77,7 +77,7 @@ ROOM_FILTER_SCHEMA = {
}
ROOM_EVENT_FILTER_SCHEMA = {
- "additionalProperties": False,
+ "additionalProperties": True, # Allow new fields for forward compatibility
"type": "object",
"properties": {
"limit": {"type": "number"},
@@ -143,7 +143,7 @@ USER_FILTER_SCHEMA = {
},
},
},
- "additionalProperties": False,
+ "additionalProperties": True, # Allow new fields for forward compatibility
}
diff --git a/synapse/api/ratelimiting.py b/synapse/api/ratelimiting.py
index 044c7d4926..511790c7c5 100644
--- a/synapse/api/ratelimiting.py
+++ b/synapse/api/ratelimiting.py
@@ -343,6 +343,7 @@ class RequestRatelimiter:
requester: Requester,
update: bool = True,
is_admin_redaction: bool = False,
+ n_actions: int = 1,
) -> None:
"""Ratelimits requests.
@@ -355,6 +356,8 @@ class RequestRatelimiter:
is_admin_redaction: Whether this is a room admin/moderator
redacting an event. If so then we may apply different
ratelimits depending on config.
+ n_actions: Multiplier for the number of actions to apply to the
+ rate limiter at once.
Raises:
LimitExceededError if the request should be ratelimited
@@ -383,7 +386,9 @@ class RequestRatelimiter:
if is_admin_redaction and self.admin_redaction_ratelimiter:
# If we have separate config for admin redactions, use a separate
# ratelimiter as to not have user_ids clash
- await self.admin_redaction_ratelimiter.ratelimit(requester, update=update)
+ await self.admin_redaction_ratelimiter.ratelimit(
+ requester, update=update, n_actions=n_actions
+ )
else:
# Override rate and burst count per-user
await self.request_ratelimiter.ratelimit(
@@ -391,4 +396,5 @@ class RequestRatelimiter:
rate_hz=messages_per_second,
burst_count=burst_count,
update=update,
+ n_actions=n_actions,
)
diff --git a/synapse/app/_base.py b/synapse/app/_base.py
index 62626c2fe7..ebac950bf2 100644
--- a/synapse/app/_base.py
+++ b/synapse/app/_base.py
@@ -47,6 +47,7 @@ from twisted.internet.tcp import Port
from twisted.logger import LoggingFile, LogLevel
from twisted.protocols.tls import TLSMemoryBIOFactory
from twisted.python.threadpool import ThreadPool
+from twisted.web.resource import Resource
import synapse.util.caches
from synapse.api.constants import MAX_PDU_SIZE
@@ -55,12 +56,13 @@ from synapse.app.phone_stats_home import start_phone_stats_home
from synapse.config import ConfigError
from synapse.config._base import format_config_error
from synapse.config.homeserver import HomeServerConfig
-from synapse.config.server import ManholeConfig
+from synapse.config.server import ListenerConfig, ManholeConfig
from synapse.crypto import context_factory
from synapse.events.presence_router import load_legacy_presence_router
from synapse.events.spamcheck import load_legacy_spam_checkers
from synapse.events.third_party_rules import load_legacy_third_party_event_rules
from synapse.handlers.auth import load_legacy_password_auth_providers
+from synapse.http.site import SynapseSite
from synapse.logging.context import PreserveLoggingContext
from synapse.logging.tracing import init_tracer
from synapse.metrics import install_gc_manager, register_threadpool
@@ -357,6 +359,55 @@ def listen_tcp(
return r # type: ignore[return-value]
+def listen_http(
+ listener_config: ListenerConfig,
+ root_resource: Resource,
+ version_string: str,
+ max_request_body_size: int,
+ context_factory: Optional[IOpenSSLContextFactory],
+ reactor: ISynapseReactor = reactor,
+) -> List[Port]:
+ port = listener_config.port
+ bind_addresses = listener_config.bind_addresses
+ tls = listener_config.tls
+
+ assert listener_config.http_options is not None
+
+ site_tag = listener_config.http_options.tag
+ if site_tag is None:
+ site_tag = str(port)
+
+ site = SynapseSite(
+ "synapse.access.%s.%s" % ("https" if tls else "http", site_tag),
+ site_tag,
+ listener_config,
+ root_resource,
+ version_string,
+ max_request_body_size=max_request_body_size,
+ reactor=reactor,
+ )
+ if tls:
+ # refresh_certificate should have been called before this.
+ assert context_factory is not None
+ ports = listen_ssl(
+ bind_addresses,
+ port,
+ site,
+ context_factory,
+ reactor=reactor,
+ )
+ logger.info("Synapse now listening on TCP port %d (TLS)", port)
+ else:
+ ports = listen_tcp(
+ bind_addresses,
+ port,
+ site,
+ reactor=reactor,
+ )
+ logger.info("Synapse now listening on TCP port %d", port)
+ return ports
+
+
def listen_ssl(
bind_addresses: Collection[str],
port: int,
@@ -558,7 +609,7 @@ def reload_cache_config(config: HomeServerConfig) -> None:
logger.warning(f)
else:
logger.debug(
- "New cache config. Was:\n %s\nNow:\n",
+ "New cache config. Was:\n %s\nNow:\n %s",
previous_cache_config.__dict__,
config.caches.__dict__,
)
diff --git a/synapse/app/admin_cmd.py b/synapse/app/admin_cmd.py
index 3c8c00ea5b..165d1c5db0 100644
--- a/synapse/app/admin_cmd.py
+++ b/synapse/app/admin_cmd.py
@@ -28,10 +28,6 @@ from synapse.config.homeserver import HomeServerConfig
from synapse.config.logger import setup_logging
from synapse.events import EventBase
from synapse.handlers.admin import ExfiltrationWriter
-from synapse.replication.slave.storage.devices import SlavedDeviceStore
-from synapse.replication.slave.storage.events import SlavedEventStore
-from synapse.replication.slave.storage.filtering import SlavedFilteringStore
-from synapse.replication.slave.storage.push_rule import SlavedPushRuleStore
from synapse.server import HomeServer
from synapse.storage.database import DatabasePool, LoggingDatabaseConnection
from synapse.storage.databases.main.account_data import AccountDataWorkerStore
@@ -40,10 +36,24 @@ from synapse.storage.databases.main.appservice import (
ApplicationServiceWorkerStore,
)
from synapse.storage.databases.main.deviceinbox import DeviceInboxWorkerStore
+from synapse.storage.databases.main.devices import DeviceWorkerStore
+from synapse.storage.databases.main.event_federation import EventFederationWorkerStore
+from synapse.storage.databases.main.event_push_actions import (
+ EventPushActionsWorkerStore,
+)
+from synapse.storage.databases.main.events_worker import EventsWorkerStore
+from synapse.storage.databases.main.filtering import FilteringWorkerStore
+from synapse.storage.databases.main.push_rule import PushRulesWorkerStore
from synapse.storage.databases.main.receipts import ReceiptsWorkerStore
from synapse.storage.databases.main.registration import RegistrationWorkerStore
+from synapse.storage.databases.main.relations import RelationsWorkerStore
from synapse.storage.databases.main.room import RoomWorkerStore
+from synapse.storage.databases.main.roommember import RoomMemberWorkerStore
+from synapse.storage.databases.main.signatures import SignatureWorkerStore
+from synapse.storage.databases.main.state import StateGroupWorkerStore
+from synapse.storage.databases.main.stream import StreamWorkerStore
from synapse.storage.databases.main.tags import TagsWorkerStore
+from synapse.storage.databases.main.user_erasure_store import UserErasureWorkerStore
from synapse.types import StateMap
from synapse.util import SYNAPSE_VERSION
from synapse.util.logcontext import LoggingContext
@@ -52,17 +62,25 @@ logger = logging.getLogger("synapse.app.admin_cmd")
class AdminCmdSlavedStore(
- SlavedFilteringStore,
- SlavedPushRuleStore,
- SlavedEventStore,
- SlavedDeviceStore,
+ FilteringWorkerStore,
+ DeviceWorkerStore,
TagsWorkerStore,
DeviceInboxWorkerStore,
AccountDataWorkerStore,
+ PushRulesWorkerStore,
ApplicationServiceTransactionWorkerStore,
ApplicationServiceWorkerStore,
- RegistrationWorkerStore,
+ RoomMemberWorkerStore,
+ RelationsWorkerStore,
+ EventFederationWorkerStore,
+ EventPushActionsWorkerStore,
+ StateGroupWorkerStore,
+ SignatureWorkerStore,
+ UserErasureWorkerStore,
ReceiptsWorkerStore,
+ StreamWorkerStore,
+ EventsWorkerStore,
+ RegistrationWorkerStore,
RoomWorkerStore,
):
def __init__(
diff --git a/synapse/app/complement_fork_starter.py b/synapse/app/complement_fork_starter.py
index b22f315453..8c0f4a57e7 100644
--- a/synapse/app/complement_fork_starter.py
+++ b/synapse/app/complement_fork_starter.py
@@ -55,13 +55,13 @@ import os
import signal
import sys
from types import FrameType
-from typing import Any, Callable, List, Optional
+from typing import Any, Callable, Dict, List, Optional
from twisted.internet.main import installReactor
# a list of the original signal handlers, before we installed our custom ones.
# We restore these in our child processes.
-_original_signal_handlers: dict[int, Any] = {}
+_original_signal_handlers: Dict[int, Any] = {}
class ProxiedReactor:
diff --git a/synapse/app/generic_worker.py b/synapse/app/generic_worker.py
index 2a9f039367..74909b7d4a 100644
--- a/synapse/app/generic_worker.py
+++ b/synapse/app/generic_worker.py
@@ -14,14 +14,12 @@
# limitations under the License.
import logging
import sys
-from typing import Dict, List, Optional, Tuple
+from typing import Dict, List
-from twisted.internet import address
from twisted.web.resource import Resource
import synapse
import synapse.events
-from synapse.api.errors import HttpResponseException, RequestSendFailed, SynapseError
from synapse.api.urls import (
CLIENT_API_PREFIX,
FEDERATION_PREFIX,
@@ -43,17 +41,9 @@ from synapse.config.logger import setup_logging
from synapse.config.server import ListenerConfig
from synapse.federation.transport.server import TransportLayerServer
from synapse.http.server import JsonResource, OptionsResource
-from synapse.http.servlet import RestServlet, parse_json_object_from_request
-from synapse.http.site import SynapseRequest, SynapseSite
from synapse.logging.context import LoggingContext
from synapse.metrics import METRICS_PREFIX, MetricsResource, RegistryProxy
from synapse.replication.http import REPLICATION_PREFIX, ReplicationRestResource
-from synapse.replication.slave.storage.devices import SlavedDeviceStore
-from synapse.replication.slave.storage.events import SlavedEventStore
-from synapse.replication.slave.storage.filtering import SlavedFilteringStore
-from synapse.replication.slave.storage.keys import SlavedKeyStore
-from synapse.replication.slave.storage.push_rule import SlavedPushRuleStore
-from synapse.replication.slave.storage.pushers import SlavedPusherStore
from synapse.rest.admin import register_servlets_for_media_repo
from synapse.rest.client import (
account_data,
@@ -76,12 +66,12 @@ from synapse.rest.client import (
versions,
voip,
)
-from synapse.rest.client._base import client_patterns
from synapse.rest.client.account import ThreepidRestServlet, WhoamiRestServlet
from synapse.rest.client.devices import DevicesRestServlet
from synapse.rest.client.keys import (
KeyChangesServlet,
KeyQueryServlet,
+ KeyUploadServlet,
OneTimeKeyServlet,
)
from synapse.rest.client.register import (
@@ -101,8 +91,16 @@ from synapse.storage.databases.main.appservice import (
from synapse.storage.databases.main.censor_events import CensorEventsStore
from synapse.storage.databases.main.client_ips import ClientIpWorkerStore
from synapse.storage.databases.main.deviceinbox import DeviceInboxWorkerStore
+from synapse.storage.databases.main.devices import DeviceWorkerStore
from synapse.storage.databases.main.directory import DirectoryWorkerStore
from synapse.storage.databases.main.e2e_room_keys import EndToEndRoomKeyStore
+from synapse.storage.databases.main.event_federation import EventFederationWorkerStore
+from synapse.storage.databases.main.event_push_actions import (
+ EventPushActionsWorkerStore,
+)
+from synapse.storage.databases.main.events_worker import EventsWorkerStore
+from synapse.storage.databases.main.filtering import FilteringWorkerStore
+from synapse.storage.databases.main.keys import KeyStore
from synapse.storage.databases.main.lock import LockStore
from synapse.storage.databases.main.media_repository import MediaRepositoryStore
from synapse.storage.databases.main.metrics import ServerMetricsStore
@@ -111,118 +109,31 @@ from synapse.storage.databases.main.monthly_active_users import (
)
from synapse.storage.databases.main.presence import PresenceStore
from synapse.storage.databases.main.profile import ProfileWorkerStore
+from synapse.storage.databases.main.push_rule import PushRulesWorkerStore
+from synapse.storage.databases.main.pusher import PusherWorkerStore
from synapse.storage.databases.main.receipts import ReceiptsWorkerStore
from synapse.storage.databases.main.registration import RegistrationWorkerStore
+from synapse.storage.databases.main.relations import RelationsWorkerStore
from synapse.storage.databases.main.room import RoomWorkerStore
from synapse.storage.databases.main.room_batch import RoomBatchStore
+from synapse.storage.databases.main.roommember import RoomMemberWorkerStore
from synapse.storage.databases.main.search import SearchStore
from synapse.storage.databases.main.session import SessionStore
+from synapse.storage.databases.main.signatures import SignatureWorkerStore
+from synapse.storage.databases.main.state import StateGroupWorkerStore
from synapse.storage.databases.main.stats import StatsStore
+from synapse.storage.databases.main.stream import StreamWorkerStore
from synapse.storage.databases.main.tags import TagsWorkerStore
from synapse.storage.databases.main.transactions import TransactionWorkerStore
from synapse.storage.databases.main.ui_auth import UIAuthWorkerStore
from synapse.storage.databases.main.user_directory import UserDirectoryStore
-from synapse.types import JsonDict
+from synapse.storage.databases.main.user_erasure_store import UserErasureWorkerStore
from synapse.util import SYNAPSE_VERSION
from synapse.util.httpresourcetree import create_resource_tree
logger = logging.getLogger("synapse.app.generic_worker")
-class KeyUploadServlet(RestServlet):
- """An implementation of the `KeyUploadServlet` that responds to read only
- requests, but otherwise proxies through to the master instance.
- """
-
- PATTERNS = client_patterns("/keys/upload(/(?P[^/]+))?$")
-
- def __init__(self, hs: HomeServer):
- """
- Args:
- hs: server
- """
- super().__init__()
- self.auth = hs.get_auth()
- self.store = hs.get_datastores().main
- self.http_client = hs.get_simple_http_client()
- self.main_uri = hs.config.worker.worker_main_http_uri
-
- async def on_POST(
- self, request: SynapseRequest, device_id: Optional[str]
- ) -> Tuple[int, JsonDict]:
- requester = await self.auth.get_user_by_req(request, allow_guest=True)
- user_id = requester.user.to_string()
- body = parse_json_object_from_request(request)
-
- if device_id is not None:
- # passing the device_id here is deprecated; however, we allow it
- # for now for compatibility with older clients.
- if requester.device_id is not None and device_id != requester.device_id:
- logger.warning(
- "Client uploading keys for a different device "
- "(logged in as %s, uploading for %s)",
- requester.device_id,
- device_id,
- )
- else:
- device_id = requester.device_id
-
- if device_id is None:
- raise SynapseError(
- 400, "To upload keys, you must pass device_id when authenticating"
- )
-
- if body:
- # They're actually trying to upload something, proxy to main synapse.
-
- # Proxy headers from the original request, such as the auth headers
- # (in case the access token is there) and the original IP /
- # User-Agent of the request.
- headers = {
- header: request.requestHeaders.getRawHeaders(header, [])
- for header in (b"Authorization", b"User-Agent")
- }
- # Add the previous hop to the X-Forwarded-For header.
- x_forwarded_for = request.requestHeaders.getRawHeaders(
- b"X-Forwarded-For", []
- )
- # we use request.client here, since we want the previous hop, not the
- # original client (as returned by request.getClientAddress()).
- if isinstance(request.client, (address.IPv4Address, address.IPv6Address)):
- previous_host = request.client.host.encode("ascii")
- # If the header exists, add to the comma-separated list of the first
- # instance of the header. Otherwise, generate a new header.
- if x_forwarded_for:
- x_forwarded_for = [x_forwarded_for[0] + b", " + previous_host]
- x_forwarded_for.extend(x_forwarded_for[1:])
- else:
- x_forwarded_for = [previous_host]
- headers[b"X-Forwarded-For"] = x_forwarded_for
-
- # Replicate the original X-Forwarded-Proto header. Note that
- # XForwardedForRequest overrides isSecure() to give us the original protocol
- # used by the client, as opposed to the protocol used by our upstream proxy
- # - which is what we want here.
- headers[b"X-Forwarded-Proto"] = [
- b"https" if request.isSecure() else b"http"
- ]
-
- try:
- result = await self.http_client.post_json_get_json(
- self.main_uri + request.uri.decode("ascii"), body, headers=headers
- )
- except HttpResponseException as e:
- raise e.to_synapse_error() from e
- except RequestSendFailed as e:
- raise SynapseError(502, "Failed to talk to master") from e
-
- return 200, result
- else:
- # Just interested in counts.
- result = await self.store.count_e2e_one_time_keys(user_id, device_id)
- return 200, {"one_time_key_counts": result}
-
-
class GenericWorkerSlavedStore(
# FIXME(#3714): We need to add UserDirectoryStore as we write directly
# rather than going via the correct worker.
@@ -232,26 +143,36 @@ class GenericWorkerSlavedStore(
EndToEndRoomKeyStore,
PresenceStore,
DeviceInboxWorkerStore,
- SlavedDeviceStore,
- SlavedPushRuleStore,
+ DeviceWorkerStore,
TagsWorkerStore,
AccountDataWorkerStore,
- SlavedPusherStore,
CensorEventsStore,
ClientIpWorkerStore,
- SlavedEventStore,
- SlavedKeyStore,
+ # KeyStore isn't really safe to use from a worker, but for now we do so and hope that
+ # the races it creates aren't too bad.
+ KeyStore,
RoomWorkerStore,
RoomBatchStore,
DirectoryWorkerStore,
+ PushRulesWorkerStore,
ApplicationServiceTransactionWorkerStore,
ApplicationServiceWorkerStore,
ProfileWorkerStore,
- SlavedFilteringStore,
+ FilteringWorkerStore,
MonthlyActiveUsersWorkerStore,
MediaRepositoryStore,
ServerMetricsStore,
+ PusherWorkerStore,
+ RoomMemberWorkerStore,
+ RelationsWorkerStore,
+ EventFederationWorkerStore,
+ EventPushActionsWorkerStore,
+ StateGroupWorkerStore,
+ SignatureWorkerStore,
+ UserErasureWorkerStore,
ReceiptsWorkerStore,
+ StreamWorkerStore,
+ EventsWorkerStore,
RegistrationWorkerStore,
SearchStore,
TransactionWorkerStore,
@@ -268,15 +189,9 @@ class GenericWorkerServer(HomeServer):
DATASTORE_CLASS = GenericWorkerSlavedStore # type: ignore
def _listen_http(self, listener_config: ListenerConfig) -> None:
- port = listener_config.port
- bind_addresses = listener_config.bind_addresses
assert listener_config.http_options is not None
- site_tag = listener_config.http_options.tag
- if site_tag is None:
- site_tag = str(port)
-
# We always include a health resource.
resources: Dict[str, Resource] = {"/health": HealthResource()}
@@ -375,23 +290,15 @@ class GenericWorkerServer(HomeServer):
root_resource = create_resource_tree(resources, OptionsResource())
- _base.listen_tcp(
- bind_addresses,
- port,
- SynapseSite(
- "synapse.access.http.%s" % (site_tag,),
- site_tag,
- listener_config,
- root_resource,
- self.version_string,
- max_request_body_size=max_request_body_size(self.config),
- reactor=self.get_reactor(),
- ),
+ _base.listen_http(
+ listener_config,
+ root_resource,
+ self.version_string,
+ max_request_body_size(self.config),
+ self.tls_server_context_factory,
reactor=self.get_reactor(),
)
- logger.info("Synapse worker now listening on port %d", port)
-
def start_listening(self) -> None:
for listener in self.config.worker.worker_listeners:
if listener.type == "http":
diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py
index de3f08876f..4f4fee4782 100644
--- a/synapse/app/homeserver.py
+++ b/synapse/app/homeserver.py
@@ -37,8 +37,7 @@ from synapse.api.urls import (
from synapse.app import _base
from synapse.app._base import (
handle_startup_exception,
- listen_ssl,
- listen_tcp,
+ listen_http,
max_request_body_size,
redirect_stdio_to_logs,
register_start,
@@ -53,7 +52,6 @@ from synapse.http.server import (
RootOptionsRedirectResource,
StaticResource,
)
-from synapse.http.site import SynapseSite
from synapse.logging.context import LoggingContext
from synapse.metrics import METRICS_PREFIX, MetricsResource, RegistryProxy
from synapse.replication.http import REPLICATION_PREFIX, ReplicationRestResource
@@ -83,8 +81,6 @@ class SynapseHomeServer(HomeServer):
self, config: HomeServerConfig, listener_config: ListenerConfig
) -> Iterable[Port]:
port = listener_config.port
- bind_addresses = listener_config.bind_addresses
- tls = listener_config.tls
# Must exist since this is an HTTP listener.
assert listener_config.http_options is not None
site_tag = listener_config.http_options.tag
@@ -140,37 +136,15 @@ class SynapseHomeServer(HomeServer):
else:
root_resource = OptionsResource()
- site = SynapseSite(
- "synapse.access.%s.%s" % ("https" if tls else "http", site_tag),
- site_tag,
+ ports = listen_http(
listener_config,
create_resource_tree(resources, root_resource),
self.version_string,
- max_request_body_size=max_request_body_size(self.config),
+ max_request_body_size(self.config),
+ self.tls_server_context_factory,
reactor=self.get_reactor(),
)
- if tls:
- # refresh_certificate should have been called before this.
- assert self.tls_server_context_factory is not None
- ports = listen_ssl(
- bind_addresses,
- port,
- site,
- self.tls_server_context_factory,
- reactor=self.get_reactor(),
- )
- logger.info("Synapse now listening on TCP port %d (TLS)", port)
-
- else:
- ports = listen_tcp(
- bind_addresses,
- port,
- site,
- reactor=self.get_reactor(),
- )
- logger.info("Synapse now listening on TCP port %d", port)
-
return ports
def _configure_named_resource(
diff --git a/synapse/appservice/__init__.py b/synapse/appservice/__init__.py
index 0dfa00df44..500bdde3a9 100644
--- a/synapse/appservice/__init__.py
+++ b/synapse/appservice/__init__.py
@@ -172,12 +172,24 @@ class ApplicationService:
Returns:
True if this service would like to know about this room.
"""
- member_list = await store.get_users_in_room(
+ # We can use `get_local_users_in_room(...)` here because an application service
+ # can only be interested in local users of the server it's on (ignore any remote
+ # users that might match the user namespace regex).
+ #
+ # In the future, we can consider re-using
+ # `store.get_app_service_users_in_room` which is very similar to this
+ # function but has a slightly worse performance than this because we
+ # have an early escape-hatch if we find a single user that the
+ # appservice is interested in. The juice would be worth the squeeze if
+ # `store.get_app_service_users_in_room` was used in more places besides
+ # an experimental MSC. But for now we can avoid doing more work and
+ # barely using it later.
+ local_user_ids = await store.get_local_users_in_room(
room_id, on_invalidate=cache_context.invalidate
)
# check joined member events
- for user_id in member_list:
+ for user_id in local_user_ids:
if self.is_interested_in_user(user_id):
return True
return False
diff --git a/synapse/appservice/api.py b/synapse/appservice/api.py
index fbac4375b0..60774b240d 100644
--- a/synapse/appservice/api.py
+++ b/synapse/appservice/api.py
@@ -123,7 +123,7 @@ class ApplicationServiceApi(SimpleHttpClient):
response = await self.get_json(
uri,
{"access_token": service.hs_token},
- headers={"Authorization": f"Bearer {service.hs_token}"},
+ headers={"Authorization": [f"Bearer {service.hs_token}"]},
)
if response is not None: # just an empty json object
return True
@@ -147,7 +147,7 @@ class ApplicationServiceApi(SimpleHttpClient):
response = await self.get_json(
uri,
{"access_token": service.hs_token},
- headers={"Authorization": f"Bearer {service.hs_token}"},
+ headers={"Authorization": [f"Bearer {service.hs_token}"]},
)
if response is not None: # just an empty json object
return True
@@ -190,7 +190,9 @@ class ApplicationServiceApi(SimpleHttpClient):
b"access_token": service.hs_token,
}
response = await self.get_json(
- uri, args=args, headers={"Authorization": f"Bearer {service.hs_token}"}
+ uri,
+ args=args,
+ headers={"Authorization": [f"Bearer {service.hs_token}"]},
)
if not isinstance(response, list):
logger.warning(
@@ -230,7 +232,7 @@ class ApplicationServiceApi(SimpleHttpClient):
info = await self.get_json(
uri,
{"access_token": service.hs_token},
- headers={"Authorization": f"Bearer {service.hs_token}"},
+ headers={"Authorization": [f"Bearer {service.hs_token}"]},
)
if not _is_valid_3pe_metadata(info):
@@ -327,7 +329,7 @@ class ApplicationServiceApi(SimpleHttpClient):
uri=uri,
json_body=body,
args={"access_token": service.hs_token},
- headers={"Authorization": f"Bearer {service.hs_token}"},
+ headers={"Authorization": [f"Bearer {service.hs_token}"]},
)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(
diff --git a/synapse/config/experimental.py b/synapse/config/experimental.py
index 4009add01d..d4b71d1673 100644
--- a/synapse/config/experimental.py
+++ b/synapse/config/experimental.py
@@ -98,6 +98,9 @@ class ExperimentalConfig(Config):
# MSC3773: Thread notifications
self.msc3773_enabled: bool = experimental.get("msc3773_enabled", False)
+ # MSC3664: Pushrules to match on related events
+ self.msc3664_enabled: bool = experimental.get("msc3664_enabled", False)
+
# MSC3848: Introduce errcodes for specific event sending failures
self.msc3848_enabled: bool = experimental.get("msc3848_enabled", False)
@@ -125,3 +128,6 @@ class ExperimentalConfig(Config):
self.msc3886_endpoint: Optional[str] = experimental.get(
"msc3886_endpoint", None
)
+
+ # MSC3912: Relation-based redactions.
+ self.msc3912_enabled: bool = experimental.get("msc3912_enabled", False)
diff --git a/synapse/config/logger.py b/synapse/config/logger.py
index b62b3b9205..5468b963a2 100644
--- a/synapse/config/logger.py
+++ b/synapse/config/logger.py
@@ -53,7 +53,7 @@ DEFAULT_LOG_CONFIG = Template(
# Synapse also supports structured logging for machine readable logs which can
# be ingested by ELK stacks. See [2] for details.
#
-# [1]: https://docs.python.org/3.7/library/logging.config.html#configuration-dictionary-schema
+# [1]: https://docs.python.org/3/library/logging.config.html#configuration-dictionary-schema
# [2]: https://matrix-org.github.io/synapse/latest/structured_logging.html
version: 1
@@ -317,10 +317,9 @@ def setup_logging(
Set up the logging subsystem.
Args:
- config (LoggingConfig | synapse.config.worker.WorkerConfig):
- configuration data
+ config: configuration data
- use_worker_options (bool): True to use the 'worker_log_config' option
+ use_worker_options: True to use the 'worker_log_config' option
instead of 'log_config'.
logBeginner: The Twisted logBeginner to use.
diff --git a/synapse/config/metrics.py b/synapse/config/metrics.py
index bb065f9f2f..6034a0346e 100644
--- a/synapse/config/metrics.py
+++ b/synapse/config/metrics.py
@@ -43,7 +43,7 @@ class MetricsConfig(Config):
def read_config(self, config: JsonDict, **kwargs: Any) -> None:
self.enable_metrics = config.get("enable_metrics", False)
- self.enable_legacy_metrics = config.get("enable_legacy_metrics", True)
+ self.enable_legacy_metrics = config.get("enable_legacy_metrics", False)
self.report_stats = config.get("report_stats", None)
self.report_stats_endpoint = config.get(
diff --git a/synapse/config/oidc.py b/synapse/config/oidc.py
index 5418a332da..0bd83f4010 100644
--- a/synapse/config/oidc.py
+++ b/synapse/config/oidc.py
@@ -123,6 +123,8 @@ OIDC_PROVIDER_CONFIG_SCHEMA = {
"userinfo_endpoint": {"type": "string"},
"jwks_uri": {"type": "string"},
"skip_verification": {"type": "boolean"},
+ "backchannel_logout_enabled": {"type": "boolean"},
+ "backchannel_logout_ignore_sub": {"type": "boolean"},
"user_profile_method": {
"type": "string",
"enum": ["auto", "userinfo_endpoint"],
@@ -292,6 +294,10 @@ def _parse_oidc_config_dict(
token_endpoint=oidc_config.get("token_endpoint"),
userinfo_endpoint=oidc_config.get("userinfo_endpoint"),
jwks_uri=oidc_config.get("jwks_uri"),
+ backchannel_logout_enabled=oidc_config.get("backchannel_logout_enabled", False),
+ backchannel_logout_ignore_sub=oidc_config.get(
+ "backchannel_logout_ignore_sub", False
+ ),
skip_verification=oidc_config.get("skip_verification", False),
user_profile_method=oidc_config.get("user_profile_method", "auto"),
allow_existing_users=oidc_config.get("allow_existing_users", False),
@@ -368,6 +374,12 @@ class OidcProviderConfig:
# "openid" scope is used.
jwks_uri: Optional[str]
+ # Whether Synapse should react to backchannel logouts
+ backchannel_logout_enabled: bool
+
+ # Whether Synapse should ignore the `sub` claim in backchannel logouts or not.
+ backchannel_logout_ignore_sub: bool
+
# Whether to skip metadata verification
skip_verification: bool
diff --git a/synapse/config/ratelimiting.py b/synapse/config/ratelimiting.py
index 1ed001e105..5c13fe428a 100644
--- a/synapse/config/ratelimiting.py
+++ b/synapse/config/ratelimiting.py
@@ -150,8 +150,5 @@ class RatelimitConfig(Config):
self.rc_third_party_invite = RatelimitSettings(
config.get("rc_third_party_invite", {}),
- defaults={
- "per_second": self.rc_message.per_second,
- "burst_count": self.rc_message.burst_count,
- },
+ defaults={"per_second": 0.0025, "burst_count": 5},
)
diff --git a/synapse/config/workers.py b/synapse/config/workers.py
index 0fb725dd8f..913b83e174 100644
--- a/synapse/config/workers.py
+++ b/synapse/config/workers.py
@@ -67,6 +67,7 @@ class InstanceLocationConfig:
host: str
port: int
+ tls: bool = False
@attr.s
@@ -149,13 +150,25 @@ class WorkerConfig(Config):
# The port on the main synapse for HTTP replication endpoint
self.worker_replication_http_port = config.get("worker_replication_http_port")
+ # The tls mode on the main synapse for HTTP replication endpoint.
+ # For backward compatibility this defaults to False.
+ self.worker_replication_http_tls = config.get(
+ "worker_replication_http_tls", False
+ )
+
# The shared secret used for authentication when connecting to the main synapse.
self.worker_replication_secret = config.get("worker_replication_secret", None)
self.worker_name = config.get("worker_name", self.worker_app)
self.instance_name = self.worker_name or "master"
+ # FIXME: Remove this check after a suitable amount of time.
self.worker_main_http_uri = config.get("worker_main_http_uri", None)
+ if self.worker_main_http_uri is not None:
+ logger.warning(
+ "The config option worker_main_http_uri is unused since Synapse 1.73. "
+ "It can be safely removed from your configuration."
+ )
# This option is really only here to support `--manhole` command line
# argument.
diff --git a/synapse/crypto/keyring.py b/synapse/crypto/keyring.py
index c88afb2986..ed15f88350 100644
--- a/synapse/crypto/keyring.py
+++ b/synapse/crypto/keyring.py
@@ -213,7 +213,7 @@ class Keyring:
def verify_json_objects_for_server(
self, server_and_json: Iterable[Tuple[str, dict, int]]
- ) -> List[defer.Deferred]:
+ ) -> List["defer.Deferred[None]"]:
"""Bulk verifies signatures of json objects, bulk fetching keys as
necessary.
@@ -226,10 +226,9 @@ class Keyring:
valid.
Returns:
- List: for each input triplet, a deferred indicating success
- or failure to verify each json object's signature for the given
- server_name. The deferreds run their callbacks in the sentinel
- logcontext.
+ For each input triplet, a deferred indicating success or failure to
+ verify each json object's signature for the given server_name. The
+ deferreds run their callbacks in the sentinel logcontext.
"""
return [
run_in_background(
@@ -858,7 +857,7 @@ class ServerKeyFetcher(BaseV2KeyFetcher):
response = await self.client.get_json(
destination=server_name,
path="/_matrix/key/v2/server/"
- + urllib.parse.quote(requested_key_id),
+ + urllib.parse.quote(requested_key_id, safe=""),
ignore_backoff=True,
# we only give the remote server 10s to respond. It should be an
# easy request to handle, so if it doesn't reply within 10s, it's
diff --git a/synapse/events/__init__.py b/synapse/events/__init__.py
index 030c3ca408..8aca9a3ab9 100644
--- a/synapse/events/__init__.py
+++ b/synapse/events/__init__.py
@@ -597,8 +597,7 @@ def _event_type_from_format_version(
format_version: The event format version
Returns:
- type: A type that can be initialized as per the initializer of
- `FrozenEvent`
+ A type that can be initialized as per the initializer of `FrozenEvent`
"""
if format_version == EventFormatVersions.ROOM_V1_V2:
diff --git a/synapse/events/builder.py b/synapse/events/builder.py
index e2ee10dd3d..d62906043f 100644
--- a/synapse/events/builder.py
+++ b/synapse/events/builder.py
@@ -128,6 +128,7 @@ class EventBuilder:
state_filter=StateFilter.from_types(
auth_types_for_event(self.room_version, self)
),
+ await_full_state=False,
)
auth_event_ids = self._event_auth_handler.compute_auth_events(
self, state_ids
diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py
index 094249cb35..0615ce0870 100644
--- a/synapse/federation/federation_client.py
+++ b/synapse/federation/federation_client.py
@@ -80,6 +80,18 @@ PDU_RETRY_TIME_MS = 1 * 60 * 1000
T = TypeVar("T")
+@attr.s(frozen=True, slots=True, auto_attribs=True)
+class PulledPduInfo:
+ """
+ A result object that stores the PDU and info about it like which homeserver we
+ pulled it from (`pull_origin`)
+ """
+
+ pdu: EventBase
+ # Which homeserver we pulled the PDU from
+ pull_origin: str
+
+
class InvalidResponseError(RuntimeError):
"""Helper for _try_destination_list: indicates that the server returned a response
we couldn't parse
@@ -114,7 +126,9 @@ class FederationClient(FederationBase):
self.hostname = hs.hostname
self.signing_key = hs.signing_key
- self._get_pdu_cache: ExpiringCache[str, EventBase] = ExpiringCache(
+ # Cache mapping `event_id` to a tuple of the event itself and the `pull_origin`
+ # (which server we pulled the event from)
+ self._get_pdu_cache: ExpiringCache[str, Tuple[EventBase, str]] = ExpiringCache(
cache_name="get_pdu_cache",
clock=self._clock,
max_len=1000,
@@ -352,11 +366,11 @@ class FederationClient(FederationBase):
@tag_args
async def get_pdu(
self,
- destinations: Iterable[str],
+ destinations: Collection[str],
event_id: str,
room_version: RoomVersion,
timeout: Optional[int] = None,
- ) -> Optional[EventBase]:
+ ) -> Optional[PulledPduInfo]:
"""Requests the PDU with given origin and ID from the remote home
servers.
@@ -371,11 +385,11 @@ class FederationClient(FederationBase):
moving to the next destination. None indicates no timeout.
Returns:
- The requested PDU, or None if we were unable to find it.
+ The requested PDU wrapped in `PulledPduInfo`, or None if we were unable to find it.
"""
logger.debug(
- "get_pdu: event_id=%s from destinations=%s", event_id, destinations
+ "get_pdu(event_id=%s): from destinations=%s", event_id, destinations
)
# TODO: Rate limit the number of times we try and get the same event.
@@ -384,19 +398,25 @@ class FederationClient(FederationBase):
# it gets persisted to the database), so we cache the results of the lookup.
# Note that this is separate to the regular get_event cache which caches
# events once they have been persisted.
- event = self._get_pdu_cache.get(event_id)
+ get_pdu_cache_entry = self._get_pdu_cache.get(event_id)
+ event = None
+ pull_origin = None
+ if get_pdu_cache_entry:
+ event, pull_origin = get_pdu_cache_entry
# If we don't see the event in the cache, go try to fetch it from the
# provided remote federated destinations
- if not event:
+ else:
pdu_attempts = self.pdu_destination_tried.setdefault(event_id, {})
+ # TODO: We can probably refactor this to use `_try_destination_list`
for destination in destinations:
now = self._clock.time_msec()
last_attempt = pdu_attempts.get(destination, 0)
if last_attempt + PDU_RETRY_TIME_MS > now:
logger.debug(
- "get_pdu: skipping destination=%s because we tried it recently last_attempt=%s and we only check every %s (now=%s)",
+ "get_pdu(event_id=%s): skipping destination=%s because we tried it recently last_attempt=%s and we only check every %s (now=%s)",
+ event_id,
destination,
last_attempt,
PDU_RETRY_TIME_MS,
@@ -411,43 +431,48 @@ class FederationClient(FederationBase):
room_version=room_version,
timeout=timeout,
)
+ pull_origin = destination
pdu_attempts[destination] = now
if event:
# Prime the cache
- self._get_pdu_cache[event.event_id] = event
+ self._get_pdu_cache[event.event_id] = (event, pull_origin)
# Now that we have an event, we can break out of this
# loop and stop asking other destinations.
break
+ except NotRetryingDestination as e:
+ logger.info("get_pdu(event_id=%s): %s", event_id, e)
+ continue
+ except FederationDeniedError:
+ logger.info(
+ "get_pdu(event_id=%s): Not attempting to fetch PDU from %s because the homeserver is not on our federation whitelist",
+ event_id,
+ destination,
+ )
+ continue
except SynapseError as e:
logger.info(
- "Failed to get PDU %s from %s because %s",
+ "get_pdu(event_id=%s): Failed to get PDU from %s because %s",
event_id,
destination,
e,
)
continue
- except NotRetryingDestination as e:
- logger.info(str(e))
- continue
- except FederationDeniedError as e:
- logger.info(str(e))
- continue
except Exception as e:
pdu_attempts[destination] = now
logger.info(
- "Failed to get PDU %s from %s because %s",
+ "get_pdu(event_id=%s): Failed to get PDU from %s because %s",
event_id,
destination,
e,
)
continue
- if not event:
+ if not event or not pull_origin:
return None
# `event` now refers to an object stored in `get_pdu_cache`. Our
@@ -459,7 +484,7 @@ class FederationClient(FederationBase):
event.room_version,
)
- return event_copy
+ return PulledPduInfo(event_copy, pull_origin)
@trace
@tag_args
@@ -699,12 +724,14 @@ class FederationClient(FederationBase):
pdu_origin = get_domain_from_id(pdu.sender)
if not res and pdu_origin != origin:
try:
- res = await self.get_pdu(
+ pulled_pdu_info = await self.get_pdu(
destinations=[pdu_origin],
event_id=pdu.event_id,
room_version=room_version,
timeout=10000,
)
+ if pulled_pdu_info is not None:
+ res = pulled_pdu_info.pdu
except SynapseError:
pass
@@ -806,6 +833,7 @@ class FederationClient(FederationBase):
)
for destination in destinations:
+ # We don't want to ask our own server for information we don't have
if destination == self.server_name:
continue
@@ -814,9 +842,21 @@ class FederationClient(FederationBase):
except (
RequestSendFailed,
InvalidResponseError,
- NotRetryingDestination,
) as e:
logger.warning("Failed to %s via %s: %s", description, destination, e)
+ # Skip to the next homeserver in the list to try.
+ continue
+ except NotRetryingDestination as e:
+ logger.info("%s: %s", description, e)
+ continue
+ except FederationDeniedError:
+ logger.info(
+ "%s: Not attempting to %s from %s because the homeserver is not on our federation whitelist",
+ description,
+ description,
+ destination,
+ )
+ continue
except UnsupportedRoomVersionError:
raise
except HttpResponseException as e:
@@ -1609,6 +1649,54 @@ class FederationClient(FederationBase):
return result
async def timestamp_to_event(
+ self, *, destinations: List[str], room_id: str, timestamp: int, direction: str
+ ) -> Optional["TimestampToEventResponse"]:
+ """
+ Calls each remote federating server from `destinations` asking for their closest
+ event to the given timestamp in the given direction until we get a response.
+ Also validates the response to always return the expected keys or raises an
+ error.
+
+ Args:
+ destinations: The domains of homeservers to try fetching from
+ room_id: Room to fetch the event from
+ timestamp: The point in time (inclusive) we should navigate from in
+ the given direction to find the closest event.
+ direction: ["f"|"b"] to indicate whether we should navigate forward
+ or backward from the given timestamp to find the closest event.
+
+ Returns:
+ A parsed TimestampToEventResponse including the closest event_id
+ and origin_server_ts or None if no destination has a response.
+ """
+
+ async def _timestamp_to_event_from_destination(
+ destination: str,
+ ) -> TimestampToEventResponse:
+ return await self._timestamp_to_event_from_destination(
+ destination, room_id, timestamp, direction
+ )
+
+ try:
+ # Loop through each homeserver candidate until we get a succesful response
+ timestamp_to_event_response = await self._try_destination_list(
+ "timestamp_to_event",
+ destinations,
+ # TODO: The requested timestamp may lie in a part of the
+ # event graph that the remote server *also* didn't have,
+ # in which case they will have returned another event
+ # which may be nowhere near the requested timestamp. In
+ # the future, we may need to reconcile that gap and ask
+ # other homeservers, and/or extend `/timestamp_to_event`
+ # to return events on *both* sides of the timestamp to
+ # help reconcile the gap faster.
+ _timestamp_to_event_from_destination,
+ )
+ return timestamp_to_event_response
+ except SynapseError:
+ return None
+
+ async def _timestamp_to_event_from_destination(
self, destination: str, room_id: str, timestamp: int, direction: str
) -> "TimestampToEventResponse":
"""
diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py
index 493b5d0830..4f4a88b953 100644
--- a/synapse/federation/federation_server.py
+++ b/synapse/federation/federation_server.py
@@ -69,6 +69,8 @@ from synapse.replication.http.federation import (
)
from synapse.storage.databases.main.events import PartialStateConflictError
from synapse.storage.databases.main.lock import Lock
+from synapse.storage.databases.main.roommember import extract_heroes_from_room_summary
+from synapse.storage.roommember import MemberSummary
from synapse.types import JsonDict, StateMap, get_domain_from_id
from synapse.util import json_decoder, unwrapFirstError
from synapse.util.async_helpers import Linearizer, concurrently_execute, gather_results
@@ -686,8 +688,9 @@ class FederationServer(FederationBase):
state_event_ids: Collection[str]
servers_in_room: Optional[Collection[str]]
if caller_supports_partial_state:
+ summary = await self.store.get_room_summary(room_id)
state_event_ids = _get_event_ids_for_partial_state_join(
- event, prev_state_ids
+ event, prev_state_ids, summary
)
servers_in_room = await self.state.get_hosts_in_room_at_events(
room_id, event_ids=event.prev_event_ids()
@@ -1490,6 +1493,7 @@ class FederationHandlerRegistry:
def _get_event_ids_for_partial_state_join(
join_event: EventBase,
prev_state_ids: StateMap[str],
+ summary: Dict[str, MemberSummary],
) -> Collection[str]:
"""Calculate state to be retuned in a partial_state send_join
@@ -1516,8 +1520,19 @@ def _get_event_ids_for_partial_state_join(
if current_membership_event_id is not None:
state_event_ids.add(current_membership_event_id)
- # TODO: return a few more members:
- # - those with invites
- # - those that are kicked? / banned
+ name_id = prev_state_ids.get((EventTypes.Name, ""))
+ canonical_alias_id = prev_state_ids.get((EventTypes.CanonicalAlias, ""))
+ if not name_id and not canonical_alias_id:
+ # Also include the hero members of the room (for DM rooms without a title).
+ # To do this properly, we should select the correct subset of membership events
+ # from `prev_state_ids`. Instead, we are lazier and use the (cached)
+ # `get_room_summary` function, which is based on the current state of the room.
+ # This introduces races; we choose to ignore them because a) they should be rare
+ # and b) even if it's wrong, joining servers will get the full state eventually.
+ heroes = extract_heroes_from_room_summary(summary, join_event.state_key)
+ for hero in heroes:
+ membership_event_id = prev_state_ids.get((EventTypes.Member, hero))
+ if membership_event_id:
+ state_event_ids.add(membership_event_id)
return state_event_ids
diff --git a/synapse/federation/sender/__init__.py b/synapse/federation/sender/__init__.py
index 774ecd81b6..3ad483efe0 100644
--- a/synapse/federation/sender/__init__.py
+++ b/synapse/federation/sender/__init__.py
@@ -536,8 +536,7 @@ class FederationSender(AbstractFederationSender):
if event_entries:
now = self.clock.time_msec()
- last_id = next(reversed(event_ids))
- ts = event_to_received_ts[last_id]
+ ts = max(t for t in event_to_received_ts.values() if t)
assert ts is not None
synapse.metrics.event_processing_lag.labels(
diff --git a/synapse/federation/transport/client.py b/synapse/federation/transport/client.py
index cd39d4d111..a3cfc701cd 100644
--- a/synapse/federation/transport/client.py
+++ b/synapse/federation/transport/client.py
@@ -280,12 +280,11 @@ class TransportLayerClient:
Note that this does not append any events to any graphs.
Args:
- destination (str): address of remote homeserver
- room_id (str): room to join/leave
- user_id (str): user to be joined/left
- membership (str): one of join/leave
- params (dict[str, str|Iterable[str]]): Query parameters to include in the
- request.
+ destination: address of remote homeserver
+ room_id: room to join/leave
+ user_id: user to be joined/left
+ membership: one of join/leave
+ params: Query parameters to include in the request.
Returns:
Succeeds when we get a 2xx HTTP response. The result
diff --git a/synapse/federation/transport/server/_base.py b/synapse/federation/transport/server/_base.py
index cba690e795..fd927e0af7 100644
--- a/synapse/federation/transport/server/_base.py
+++ b/synapse/federation/transport/server/_base.py
@@ -226,10 +226,10 @@ class BaseFederationServlet:
With arguments:
- origin (unicode|None): The authenticated server_name of the calling server,
+ origin (str|None): The authenticated server_name of the calling server,
unless REQUIRE_AUTH is set to False and authentication failed.
- content (unicode|None): decoded json body of the request. None if the
+ content (str|None): decoded json body of the request. None if the
request was a GET.
query (dict[bytes, list[bytes]]): Query params from the request. url-decoded
diff --git a/synapse/handlers/admin.py b/synapse/handlers/admin.py
index f2989cc4a2..5bf8e86387 100644
--- a/synapse/handlers/admin.py
+++ b/synapse/handlers/admin.py
@@ -100,6 +100,7 @@ class AdminHandler:
user_info_dict["avatar_url"] = profile.avatar_url
user_info_dict["threepids"] = threepids
user_info_dict["external_ids"] = external_ids
+ user_info_dict["erased"] = await self.store.is_user_erased(user.to_string())
return user_info_dict
diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py
index f5f0e0e7a7..8b9ef25d29 100644
--- a/synapse/handlers/auth.py
+++ b/synapse/handlers/auth.py
@@ -38,6 +38,7 @@ from typing import (
import attr
import bcrypt
import unpaddedbase64
+from prometheus_client import Counter
from twisted.internet.defer import CancelledError
from twisted.web.server import Request
@@ -48,6 +49,7 @@ from synapse.api.errors import (
Codes,
InteractiveAuthIncompleteError,
LoginError,
+ NotFoundError,
StoreError,
SynapseError,
UserDeactivatedError,
@@ -63,10 +65,14 @@ from synapse.http.server import finish_request, respond_with_html
from synapse.http.site import SynapseRequest
from synapse.logging.context import defer_to_thread
from synapse.metrics.background_process_metrics import run_as_background_process
+from synapse.storage.databases.main.registration import (
+ LoginTokenExpired,
+ LoginTokenLookupResult,
+ LoginTokenReused,
+)
from synapse.types import JsonDict, Requester, UserID
from synapse.util import stringutils as stringutils
from synapse.util.async_helpers import delay_cancellation, maybe_awaitable
-from synapse.util.macaroons import LoginTokenAttributes
from synapse.util.msisdn import phone_number_to_msisdn
from synapse.util.stringutils import base62_encode
from synapse.util.threepids import canonicalise_email
@@ -80,6 +86,12 @@ logger = logging.getLogger(__name__)
INVALID_USERNAME_OR_PASSWORD = "Invalid username or password"
+invalid_login_token_counter = Counter(
+ "synapse_user_login_invalid_login_tokens",
+ "Counts the number of rejected m.login.token on /login",
+ ["reason"],
+)
+
def convert_client_dict_legacy_fields_to_identifier(
submission: JsonDict,
@@ -883,6 +895,25 @@ class AuthHandler:
return True
+ async def create_login_token_for_user_id(
+ self,
+ user_id: str,
+ duration_ms: int = (2 * 60 * 1000),
+ auth_provider_id: Optional[str] = None,
+ auth_provider_session_id: Optional[str] = None,
+ ) -> str:
+ login_token = self.generate_login_token()
+ now = self._clock.time_msec()
+ expiry_ts = now + duration_ms
+ await self.store.add_login_token_to_user(
+ user_id=user_id,
+ token=login_token,
+ expiry_ts=expiry_ts,
+ auth_provider_id=auth_provider_id,
+ auth_provider_session_id=auth_provider_session_id,
+ )
+ return login_token
+
async def create_refresh_token_for_user_id(
self,
user_id: str,
@@ -1401,6 +1432,18 @@ class AuthHandler:
return None
return user_id
+ def generate_login_token(self) -> str:
+ """Generates an opaque string, for use as an short-term login token"""
+
+ # we use the following format for access tokens:
+ # syl__
+
+ random_string = stringutils.random_string(20)
+ base = f"syl_{random_string}"
+
+ crc = base62_encode(crc32(base.encode("ascii")), minwidth=6)
+ return f"{base}_{crc}"
+
def generate_access_token(self, for_user: UserID) -> str:
"""Generates an opaque string, for use as an access token"""
@@ -1427,16 +1470,17 @@ class AuthHandler:
crc = base62_encode(crc32(base.encode("ascii")), minwidth=6)
return f"{base}_{crc}"
- async def validate_short_term_login_token(
- self, login_token: str
- ) -> LoginTokenAttributes:
+ async def consume_login_token(self, login_token: str) -> LoginTokenLookupResult:
try:
- res = self.macaroon_gen.verify_short_term_login_token(login_token)
- except Exception:
- raise AuthError(403, "Invalid login token", errcode=Codes.FORBIDDEN)
+ return await self.store.consume_login_token(login_token)
+ except LoginTokenExpired:
+ invalid_login_token_counter.labels("expired").inc()
+ except LoginTokenReused:
+ invalid_login_token_counter.labels("reused").inc()
+ except NotFoundError:
+ invalid_login_token_counter.labels("not found").inc()
- await self.auth_blocking.check_auth_blocking(res.user_id)
- return res
+ raise AuthError(403, "Invalid login token", errcode=Codes.FORBIDDEN)
async def delete_access_token(self, access_token: str) -> None:
"""Invalidate a single access token
@@ -1711,7 +1755,7 @@ class AuthHandler:
)
# Create a login token
- login_token = self.macaroon_gen.generate_short_term_login_token(
+ login_token = await self.create_login_token_for_user_id(
registered_user_id,
auth_provider_id=auth_provider_id,
auth_provider_session_id=auth_provider_session_id,
diff --git a/synapse/handlers/directory.py b/synapse/handlers/directory.py
index d52ebada6b..2ea52257cb 100644
--- a/synapse/handlers/directory.py
+++ b/synapse/handlers/directory.py
@@ -85,7 +85,7 @@ class DirectoryHandler:
# TODO(erikj): Add transactions.
# TODO(erikj): Check if there is a current association.
if not servers:
- servers = await self._storage_controllers.state.get_current_hosts_in_room(
+ servers = await self._storage_controllers.state.get_current_hosts_in_room_or_partial_state_approximation(
room_id
)
@@ -290,7 +290,7 @@ class DirectoryHandler:
Codes.NOT_FOUND,
)
- extra_servers = await self._storage_controllers.state.get_current_hosts_in_room(
+ extra_servers = await self._storage_controllers.state.get_current_hosts_in_room_or_partial_state_approximation(
room_id
)
servers_set = set(extra_servers) | set(servers)
diff --git a/synapse/handlers/e2e_keys.py b/synapse/handlers/e2e_keys.py
index cf788a4a86..5f84f1769b 100644
--- a/synapse/handlers/e2e_keys.py
+++ b/synapse/handlers/e2e_keys.py
@@ -49,6 +49,7 @@ logger = logging.getLogger(__name__)
class E2eKeysHandler:
def __init__(self, hs: "HomeServer"):
+ self.config = hs.config
self.store = hs.get_datastores().main
self.federation = hs.get_federation_client()
self.device_handler = hs.get_device_handler()
@@ -431,13 +432,17 @@ class E2eKeysHandler:
@trace
@cancellable
async def query_local_devices(
- self, query: Mapping[str, Optional[List[str]]]
+ self,
+ query: Mapping[str, Optional[List[str]]],
+ include_displaynames: bool = True,
) -> Dict[str, Dict[str, dict]]:
"""Get E2E device keys for local users
Args:
query: map from user_id to a list
of devices to query (None for all devices)
+ include_displaynames: Whether to include device displaynames in the returned
+ device details.
Returns:
A map from user_id -> device_id -> device details
@@ -469,7 +474,9 @@ class E2eKeysHandler:
# make sure that each queried user appears in the result dict
result_dict[user_id] = {}
- results = await self.store.get_e2e_device_keys_for_cs_api(local_query)
+ results = await self.store.get_e2e_device_keys_for_cs_api(
+ local_query, include_displaynames
+ )
# Build the result structure
for user_id, device_keys in results.items():
@@ -482,11 +489,33 @@ class E2eKeysHandler:
async def on_federation_query_client_keys(
self, query_body: Dict[str, Dict[str, Optional[List[str]]]]
) -> JsonDict:
- """Handle a device key query from a federated server"""
+ """Handle a device key query from a federated server:
+
+ Handles the path: GET /_matrix/federation/v1/users/keys/query
+
+ Args:
+ query_body: The body of the query request. Should contain a key
+ "device_keys" that map to a dictionary of user ID's -> list of
+ device IDs. If the list of device IDs is empty, all devices of
+ that user will be queried.
+
+ Returns:
+ A json dictionary containing the following:
+ - device_keys: A dictionary containing the requested device information.
+ - master_keys: An optional dictionary of user ID -> master cross-signing
+ key info.
+ - self_signing_key: An optional dictionary of user ID -> self-signing
+ key info.
+ """
device_keys_query: Dict[str, Optional[List[str]]] = query_body.get(
"device_keys", {}
)
- res = await self.query_local_devices(device_keys_query)
+ res = await self.query_local_devices(
+ device_keys_query,
+ include_displaynames=(
+ self.config.federation.allow_device_name_lookup_over_federation
+ ),
+ )
ret = {"device_keys": res}
# add in the cross-signing keys
@@ -841,7 +870,7 @@ class E2eKeysHandler:
- signatures of the user's master key by the user's devices.
Args:
- user_id (string): the user uploading the keys
+ user_id: the user uploading the keys
signatures (dict[string, dict]): map of devices to signed keys
Returns:
diff --git a/synapse/handlers/e2e_room_keys.py b/synapse/handlers/e2e_room_keys.py
index 8786534e54..098288f058 100644
--- a/synapse/handlers/e2e_room_keys.py
+++ b/synapse/handlers/e2e_room_keys.py
@@ -377,8 +377,9 @@ class E2eRoomKeysHandler:
"""Deletes a given version of the user's e2e_room_keys backup
Args:
- user_id(str): the user whose current backup version we're deleting
- version(str): the version id of the backup being deleted
+ user_id: the user whose current backup version we're deleting
+ version: Optional. the version ID of the backup version we're deleting
+ If missing, we delete the current backup version info.
Raises:
NotFoundError: if this backup version doesn't exist
"""
diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py
index 57856b8ec5..f9000d326a 100644
--- a/synapse/handlers/federation.py
+++ b/synapse/handlers/federation.py
@@ -442,6 +442,15 @@ class FederationHandler:
# appropriate stuff.
# TODO: We can probably do something more intelligent here.
return True
+ except NotRetryingDestination as e:
+ logger.info("_maybe_backfill_inner: %s", e)
+ continue
+ except FederationDeniedError:
+ logger.info(
+ "_maybe_backfill_inner: Not attempting to backfill from %s because the homeserver is not on our federation whitelist",
+ dom,
+ )
+ continue
except (SynapseError, InvalidResponseError) as e:
logger.info("Failed to backfill from %s because %s", dom, e)
continue
@@ -477,15 +486,9 @@ class FederationHandler:
logger.info("Failed to backfill from %s because %s", dom, e)
continue
- except NotRetryingDestination as e:
- logger.info(str(e))
- continue
except RequestSendFailed as e:
logger.info("Failed to get backfill from %s because %s", dom, e)
continue
- except FederationDeniedError as e:
- logger.info(e)
- continue
except Exception as e:
logger.exception("Failed to backfill from %s because %s", dom, e)
continue
@@ -1017,7 +1020,9 @@ class FederationHandler:
context = EventContext.for_outlier(self._storage_controllers)
- await self._bulk_push_rule_evaluator.action_for_event_by_user(event, context)
+ await self._bulk_push_rule_evaluator.action_for_events_by_user(
+ [(event, context)]
+ )
try:
await self._federation_event_handler.persist_events_and_notify(
event.room_id, [(event, context)]
@@ -1591,8 +1596,8 @@ class FederationHandler:
Fetch the complexity of a remote room over federation.
Args:
- remote_room_hosts (list[str]): The remote servers to ask.
- room_id (str): The room ID to ask about.
+ remote_room_hosts: The remote servers to ask.
+ room_id: The room ID to ask about.
Returns:
Dict contains the complexity
diff --git a/synapse/handlers/federation_event.py b/synapse/handlers/federation_event.py
index 4e1d6dea2c..8357f6f84e 100644
--- a/synapse/handlers/federation_event.py
+++ b/synapse/handlers/federation_event.py
@@ -58,7 +58,7 @@ from synapse.event_auth import (
)
from synapse.events import EventBase
from synapse.events.snapshot import EventContext
-from synapse.federation.federation_client import InvalidResponseError
+from synapse.federation.federation_client import InvalidResponseError, PulledPduInfo
from synapse.logging.context import nested_logging_context
from synapse.logging.tracing import (
SynapseTags,
@@ -1065,10 +1065,9 @@ class FederationEventHandler:
state_res_store=StateResolutionStore(self._store),
)
- except Exception:
+ except Exception as e:
logger.warning(
- "Error attempting to resolve state at missing prev_events",
- exc_info=True,
+ "Error attempting to resolve state at missing prev_events: %s", e
)
raise FederationError(
"ERROR",
@@ -1517,8 +1516,8 @@ class FederationEventHandler:
)
async def backfill_event_id(
- self, destination: str, room_id: str, event_id: str
- ) -> EventBase:
+ self, destinations: List[str], room_id: str, event_id: str
+ ) -> PulledPduInfo:
"""Backfill a single event and persist it as a non-outlier which means
we also pull in all of the state and auth events necessary for it.
@@ -1530,24 +1529,21 @@ class FederationEventHandler:
Raises:
FederationError if we are unable to find the event from the destination
"""
- logger.info(
- "backfill_event_id: event_id=%s from destination=%s", event_id, destination
- )
+ logger.info("backfill_event_id: event_id=%s", event_id)
room_version = await self._store.get_room_version(room_id)
- event_from_response = await self._federation_client.get_pdu(
- [destination],
+ pulled_pdu_info = await self._federation_client.get_pdu(
+ destinations,
event_id,
room_version,
)
- if not event_from_response:
+ if not pulled_pdu_info:
raise FederationError(
"ERROR",
404,
- "Unable to find event_id=%s from destination=%s to backfill."
- % (event_id, destination),
+ f"Unable to find event_id={event_id} from remote servers to backfill.",
affected=event_id,
)
@@ -1555,13 +1551,13 @@ class FederationEventHandler:
# and auth events to de-outlier it. This also sets up the necessary
# `state_groups` for the event.
await self._process_pulled_events(
- destination,
- [event_from_response],
+ pulled_pdu_info.pull_origin,
+ [pulled_pdu_info.pdu],
# Prevent notifications going to clients
backfilled=True,
)
- return event_from_response
+ return pulled_pdu_info
@trace
@tag_args
@@ -1584,19 +1580,19 @@ class FederationEventHandler:
async def get_event(event_id: str) -> None:
with nested_logging_context(event_id):
try:
- event = await self._federation_client.get_pdu(
+ pulled_pdu_info = await self._federation_client.get_pdu(
[destination],
event_id,
room_version,
)
- if event is None:
+ if pulled_pdu_info is None:
logger.warning(
"Server %s didn't return event %s",
destination,
event_id,
)
return
- events.append(event)
+ events.append(pulled_pdu_info.pdu)
except Exception as e:
logger.warning(
@@ -2171,8 +2167,8 @@ class FederationEventHandler:
min_depth,
)
else:
- await self._bulk_push_rule_evaluator.action_for_event_by_user(
- event, context
+ await self._bulk_push_rule_evaluator.action_for_events_by_user(
+ [(event, context)]
)
try:
diff --git a/synapse/handlers/identity.py b/synapse/handlers/identity.py
index 93d09e9939..848e46eb9b 100644
--- a/synapse/handlers/identity.py
+++ b/synapse/handlers/identity.py
@@ -711,7 +711,7 @@ class IdentityHandler:
inviter_display_name: The current display name of the
inviter.
inviter_avatar_url: The URL of the inviter's avatar.
- id_access_token (str): The access token to authenticate to the identity
+ id_access_token: The access token to authenticate to the identity
server with
Returns:
diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py
index e8b0a758a4..f2a0101733 100644
--- a/synapse/handlers/message.py
+++ b/synapse/handlers/message.py
@@ -877,6 +877,36 @@ class EventCreationHandler:
return prev_event
return None
+ async def get_event_from_transaction(
+ self,
+ requester: Requester,
+ txn_id: str,
+ room_id: str,
+ ) -> Optional[EventBase]:
+ """For the given transaction ID and room ID, check if there is a matching event.
+ If so, fetch it and return it.
+
+ Args:
+ requester: The requester making the request in the context of which we want
+ to fetch the event.
+ txn_id: The transaction ID.
+ room_id: The room ID.
+
+ Returns:
+ An event if one could be found, None otherwise.
+ """
+ if requester.access_token_id:
+ existing_event_id = await self.store.get_event_id_from_transaction_id(
+ room_id,
+ requester.user.to_string(),
+ requester.access_token_id,
+ txn_id,
+ )
+ if existing_event_id:
+ return await self.store.get_event(existing_event_id)
+
+ return None
+
async def create_and_send_nonmember_event(
self,
requester: Requester,
@@ -956,18 +986,17 @@ class EventCreationHandler:
# extremities to pile up, which in turn leads to state resolution
# taking longer.
async with self.limiter.queue(event_dict["room_id"]):
- if txn_id and requester.access_token_id:
- existing_event_id = await self.store.get_event_id_from_transaction_id(
- event_dict["room_id"],
- requester.user.to_string(),
- requester.access_token_id,
- txn_id,
+ if txn_id:
+ event = await self.get_event_from_transaction(
+ requester, txn_id, event_dict["room_id"]
)
- if existing_event_id:
- event = await self.store.get_event(existing_event_id)
+ if event:
# we know it was persisted, so must have a stream ordering
assert event.internal_metadata.stream_ordering
- return event, event.internal_metadata.stream_ordering
+ return (
+ event,
+ event.internal_metadata.stream_ordering,
+ )
event, context = await self.create_event(
requester,
@@ -1433,17 +1462,10 @@ class EventCreationHandler:
a room that has been un-partial stated.
"""
- for event, context in events_and_context:
- # Skip push notification actions for historical messages
- # because we don't want to notify people about old history back in time.
- # The historical messages also do not have the proper `context.current_state_ids`
- # and `state_groups` because they have `prev_events` that aren't persisted yet
- # (historical messages persisted in reverse-chronological order).
- if not event.internal_metadata.is_historical():
- with tracing.start_active_span("calculate_push_actions"):
- await self._bulk_push_rule_evaluator.action_for_event_by_user(
- event, context
- )
+ with tracing.start_active_span("calculate_push_actions"):
+ await self._bulk_push_rule_evaluator.action_for_events_by_user(
+ events_and_context
+ )
try:
# If we're a worker we need to hit out to the master.
diff --git a/synapse/handlers/oidc.py b/synapse/handlers/oidc.py
index d7a8226900..41c675f408 100644
--- a/synapse/handlers/oidc.py
+++ b/synapse/handlers/oidc.py
@@ -12,14 +12,28 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
+import binascii
import inspect
+import json
import logging
-from typing import TYPE_CHECKING, Any, Dict, Generic, List, Optional, TypeVar, Union
+from typing import (
+ TYPE_CHECKING,
+ Any,
+ Dict,
+ Generic,
+ List,
+ Optional,
+ Type,
+ TypeVar,
+ Union,
+)
from urllib.parse import urlencode, urlparse
import attr
+import unpaddedbase64
from authlib.common.security import generate_token
-from authlib.jose import JsonWebToken, jwt
+from authlib.jose import JsonWebToken, JWTClaims
+from authlib.jose.errors import InvalidClaimError, JoseError, MissingClaimError
from authlib.oauth2.auth import ClientAuth
from authlib.oauth2.rfc6749.parameters import prepare_grant_uri
from authlib.oidc.core import CodeIDToken, UserInfo
@@ -35,9 +49,12 @@ from typing_extensions import TypedDict
from twisted.web.client import readBody
from twisted.web.http_headers import Headers
+from synapse.api.errors import SynapseError
from synapse.config import ConfigError
from synapse.config.oidc import OidcProviderClientSecretJwtKey, OidcProviderConfig
from synapse.handlers.sso import MappingException, UserAttributes
+from synapse.http.server import finish_request
+from synapse.http.servlet import parse_string
from synapse.http.site import SynapseRequest
from synapse.logging.context import make_deferred_yieldable
from synapse.types import JsonDict, UserID, map_username_to_mxid_localpart
@@ -88,6 +105,8 @@ class Token(TypedDict):
#: there is no real point of doing this in our case.
JWK = Dict[str, str]
+C = TypeVar("C")
+
#: A JWK Set, as per RFC7517 sec 5.
class JWKS(TypedDict):
@@ -247,6 +266,80 @@ class OidcHandler:
await oidc_provider.handle_oidc_callback(request, session_data, code)
+ async def handle_backchannel_logout(self, request: SynapseRequest) -> None:
+ """Handle an incoming request to /_synapse/client/oidc/backchannel_logout
+
+ This extracts the logout_token from the request and tries to figure out
+ which OpenID Provider it is comming from. This works by matching the iss claim
+ with the issuer and the aud claim with the client_id.
+
+ Since at this point we don't know who signed the JWT, we can't just
+ decode it using authlib since it will always verifies the signature. We
+ have to decode it manually without validating the signature. The actual JWT
+ verification is done in the `OidcProvider.handler_backchannel_logout` method,
+ once we figured out which provider sent the request.
+
+ Args:
+ request: the incoming request from the browser.
+ """
+ logout_token = parse_string(request, "logout_token")
+ if logout_token is None:
+ raise SynapseError(400, "Missing logout_token in request")
+
+ # A JWT looks like this:
+ # header.payload.signature
+ # where all parts are encoded with urlsafe base64.
+ # The aud and iss claims we care about are in the payload part, which
+ # is a JSON object.
+ try:
+ # By destructuring the list after splitting, we ensure that we have
+ # exactly 3 segments
+ _, payload, _ = logout_token.split(".")
+ except ValueError:
+ raise SynapseError(400, "Invalid logout_token in request")
+
+ try:
+ payload_bytes = unpaddedbase64.decode_base64(payload)
+ claims = json_decoder.decode(payload_bytes.decode("utf-8"))
+ except (json.JSONDecodeError, binascii.Error, UnicodeError):
+ raise SynapseError(400, "Invalid logout_token payload in request")
+
+ try:
+ # Let's extract the iss and aud claims
+ iss = claims["iss"]
+ aud = claims["aud"]
+ # The aud claim can be either a string or a list of string. Here we
+ # normalize it as a list of strings.
+ if isinstance(aud, str):
+ aud = [aud]
+
+ # Check that we have the right types for the aud and the iss claims
+ if not isinstance(iss, str) or not isinstance(aud, list):
+ raise TypeError()
+ for a in aud:
+ if not isinstance(a, str):
+ raise TypeError()
+
+ # At this point we properly checked both claims types
+ issuer: str = iss
+ audience: List[str] = aud
+ except (TypeError, KeyError):
+ raise SynapseError(400, "Invalid issuer/audience in logout_token")
+
+ # Now that we know the audience and the issuer, we can figure out from
+ # what provider it is coming from
+ oidc_provider: Optional[OidcProvider] = None
+ for provider in self._providers.values():
+ if provider.issuer == issuer and provider.client_id in audience:
+ oidc_provider = provider
+ break
+
+ if oidc_provider is None:
+ raise SynapseError(400, "Could not find the OP that issued this event")
+
+ # Ask the provider to handle the logout request.
+ await oidc_provider.handle_backchannel_logout(request, logout_token)
+
class OidcError(Exception):
"""Used to catch errors when calling the token_endpoint"""
@@ -275,6 +368,7 @@ class OidcProvider:
provider: OidcProviderConfig,
):
self._store = hs.get_datastores().main
+ self._clock = hs.get_clock()
self._macaroon_generaton = macaroon_generator
@@ -341,6 +435,7 @@ class OidcProvider:
self.idp_brand = provider.idp_brand
self._sso_handler = hs.get_sso_handler()
+ self._device_handler = hs.get_device_handler()
self._sso_handler.register_identity_provider(self)
@@ -399,6 +494,41 @@ class OidcProvider:
# If we're not using userinfo, we need a valid jwks to validate the ID token
m.validate_jwks_uri()
+ if self._config.backchannel_logout_enabled:
+ if not m.get("backchannel_logout_supported", False):
+ logger.warning(
+ "OIDC Back-Channel Logout is enabled for issuer %r"
+ "but it does not advertise support for it",
+ self.issuer,
+ )
+
+ elif not m.get("backchannel_logout_session_supported", False):
+ logger.warning(
+ "OIDC Back-Channel Logout is enabled and supported "
+ "by issuer %r but it might not send a session ID with "
+ "logout tokens, which is required for the logouts to work",
+ self.issuer,
+ )
+
+ if not self._config.backchannel_logout_ignore_sub:
+ # If OIDC backchannel logouts are enabled, the provider mapping provider
+ # should use the `sub` claim. We verify that by mapping a dumb user and
+ # see if we get back the sub claim
+ user = UserInfo({"sub": "thisisasubject"})
+ try:
+ subject = self._user_mapping_provider.get_remote_user_id(user)
+ if subject != user["sub"]:
+ raise ValueError("Unexpected subject")
+ except Exception:
+ logger.warning(
+ f"OIDC Back-Channel Logout is enabled for issuer {self.issuer!r} "
+ "but it looks like the configured `user_mapping_provider` "
+ "does not use the `sub` claim as subject. If it is the case, "
+ "and you want Synapse to ignore the `sub` claim in OIDC "
+ "Back-Channel Logouts, set `backchannel_logout_ignore_sub` "
+ "to `true` in the issuer config."
+ )
+
@property
def _uses_userinfo(self) -> bool:
"""Returns True if the ``userinfo_endpoint`` should be used.
@@ -414,6 +544,16 @@ class OidcProvider:
or self._user_profile_method == "userinfo_endpoint"
)
+ @property
+ def issuer(self) -> str:
+ """The issuer identifying this provider."""
+ return self._config.issuer
+
+ @property
+ def client_id(self) -> str:
+ """The client_id used when interacting with this provider."""
+ return self._config.client_id
+
async def load_metadata(self, force: bool = False) -> OpenIDProviderMetadata:
"""Return the provider metadata.
@@ -647,7 +787,7 @@ class OidcProvider:
Must include an ``access_token`` field.
Returns:
- UserInfo: an object representing the user.
+ an object representing the user.
"""
logger.debug("Using the OAuth2 access_token to request userinfo")
metadata = await self.load_metadata()
@@ -661,6 +801,59 @@ class OidcProvider:
return UserInfo(resp)
+ async def _verify_jwt(
+ self,
+ alg_values: List[str],
+ token: str,
+ claims_cls: Type[C],
+ claims_options: Optional[dict] = None,
+ claims_params: Optional[dict] = None,
+ ) -> C:
+ """Decode and validate a JWT, re-fetching the JWKS as needed.
+
+ Args:
+ alg_values: list of `alg` values allowed when verifying the JWT.
+ token: the JWT.
+ claims_cls: the JWTClaims class to use to validate the claims.
+ claims_options: dict of options passed to the `claims_cls` constructor.
+ claims_params: dict of params passed to the `claims_cls` constructor.
+
+ Returns:
+ The decoded claims in the JWT.
+ """
+ jwt = JsonWebToken(alg_values)
+
+ logger.debug("Attempting to decode JWT (%s) %r", claims_cls.__name__, token)
+
+ # Try to decode the keys in cache first, then retry by forcing the keys
+ # to be reloaded
+ jwk_set = await self.load_jwks()
+ try:
+ claims = jwt.decode(
+ token,
+ key=jwk_set,
+ claims_cls=claims_cls,
+ claims_options=claims_options,
+ claims_params=claims_params,
+ )
+ except ValueError:
+ logger.info("Reloading JWKS after decode error")
+ jwk_set = await self.load_jwks(force=True) # try reloading the jwks
+ claims = jwt.decode(
+ token,
+ key=jwk_set,
+ claims_cls=claims_cls,
+ claims_options=claims_options,
+ claims_params=claims_params,
+ )
+
+ logger.debug("Decoded JWT (%s) %r; validating", claims_cls.__name__, claims)
+
+ claims.validate(
+ now=self._clock.time(), leeway=120
+ ) # allows 2 min of clock skew
+ return claims
+
async def _parse_id_token(self, token: Token, nonce: str) -> CodeIDToken:
"""Return an instance of UserInfo from token's ``id_token``.
@@ -673,7 +866,14 @@ class OidcProvider:
Returns:
The decoded claims in the ID token.
"""
+ id_token = token.get("id_token")
+
+ # That has been theoritically been checked by the caller, so even though
+ # assertion are not enabled in production, it is mainly here to appease mypy
+ assert id_token is not None
+
metadata = await self.load_metadata()
+
claims_params = {
"nonce": nonce,
"client_id": self._client_auth.client_id,
@@ -683,39 +883,17 @@ class OidcProvider:
# in the `id_token` that we can check against.
claims_params["access_token"] = token["access_token"]
+ claims_options = {"iss": {"values": [metadata["issuer"]]}}
+
alg_values = metadata.get("id_token_signing_alg_values_supported", ["RS256"])
- jwt = JsonWebToken(alg_values)
- claim_options = {"iss": {"values": [metadata["issuer"]]}}
-
- id_token = token["id_token"]
- logger.debug("Attempting to decode JWT id_token %r", id_token)
-
- # Try to decode the keys in cache first, then retry by forcing the keys
- # to be reloaded
- jwk_set = await self.load_jwks()
- try:
- claims = jwt.decode(
- id_token,
- key=jwk_set,
- claims_cls=CodeIDToken,
- claims_options=claim_options,
- claims_params=claims_params,
- )
- except ValueError:
- logger.info("Reloading JWKS after decode error")
- jwk_set = await self.load_jwks(force=True) # try reloading the jwks
- claims = jwt.decode(
- id_token,
- key=jwk_set,
- claims_cls=CodeIDToken,
- claims_options=claim_options,
- claims_params=claims_params,
- )
-
- logger.debug("Decoded id_token JWT %r; validating", claims)
-
- claims.validate(leeway=120) # allows 2 min of clock skew
+ claims = await self._verify_jwt(
+ alg_values=alg_values,
+ token=id_token,
+ claims_cls=CodeIDToken,
+ claims_options=claims_options,
+ claims_params=claims_params,
+ )
return claims
@@ -1036,6 +1214,146 @@ class OidcProvider:
# to be strings.
return str(remote_user_id)
+ async def handle_backchannel_logout(
+ self, request: SynapseRequest, logout_token: str
+ ) -> None:
+ """Handle an incoming request to /_synapse/client/oidc/backchannel_logout
+
+ The OIDC Provider posts a logout token to this endpoint when a user
+ session ends. That token is a JWT signed with the same keys as
+ ID tokens. The OpenID Connect Back-Channel Logout draft explains how to
+ validate the JWT and figure out what session to end.
+
+ Args:
+ request: The request to respond to
+ logout_token: The logout token (a JWT) extracted from the request body
+ """
+ # Back-Channel Logout can be disabled in the config, hence this check.
+ # This is not that important for now since Synapse is registered
+ # manually to the OP, so not specifying the backchannel-logout URI is
+ # as effective than disabling it here. It might make more sense if we
+ # support dynamic registration in Synapse at some point.
+ if not self._config.backchannel_logout_enabled:
+ logger.warning(
+ f"Received an OIDC Back-Channel Logout request from issuer {self.issuer!r} but it is disabled in config"
+ )
+
+ # TODO: this responds with a 400 status code, which is what the OIDC
+ # Back-Channel Logout spec expects, but spec also suggests answering with
+ # a JSON object, with the `error` and `error_description` fields set, which
+ # we are not doing here.
+ # See https://openid.net/specs/openid-connect-backchannel-1_0.html#BCResponse
+ raise SynapseError(
+ 400, "OpenID Connect Back-Channel Logout is disabled for this provider"
+ )
+
+ metadata = await self.load_metadata()
+
+ # As per OIDC Back-Channel Logout 1.0 sec. 2.4:
+ # A Logout Token MUST be signed and MAY also be encrypted. The same
+ # keys are used to sign and encrypt Logout Tokens as are used for ID
+ # Tokens. If the Logout Token is encrypted, it SHOULD replicate the
+ # iss (issuer) claim in the JWT Header Parameters, as specified in
+ # Section 5.3 of [JWT].
+ alg_values = metadata.get("id_token_signing_alg_values_supported", ["RS256"])
+
+ # As per sec. 2.6:
+ # 3. Validate the iss, aud, and iat Claims in the same way they are
+ # validated in ID Tokens.
+ # Which means the audience should contain Synapse's client_id and the
+ # issuer should be the IdP issuer
+ claims_options = {
+ "iss": {"values": [metadata["issuer"]]},
+ "aud": {"values": [self.client_id]},
+ }
+
+ try:
+ claims = await self._verify_jwt(
+ alg_values=alg_values,
+ token=logout_token,
+ claims_cls=LogoutToken,
+ claims_options=claims_options,
+ )
+ except JoseError:
+ logger.exception("Invalid logout_token")
+ raise SynapseError(400, "Invalid logout_token")
+
+ # As per sec. 2.6:
+ # 4. Verify that the Logout Token contains a sub Claim, a sid Claim,
+ # or both.
+ # 5. Verify that the Logout Token contains an events Claim whose
+ # value is JSON object containing the member name
+ # http://schemas.openid.net/event/backchannel-logout.
+ # 6. Verify that the Logout Token does not contain a nonce Claim.
+ # This is all verified by the LogoutToken claims class, so at this
+ # point the `sid` claim exists and is a string.
+ sid: str = claims.get("sid")
+
+ # If the `sub` claim was included in the logout token, we check that it matches
+ # that it matches the right user. We can have cases where the `sub` claim is not
+ # the ID saved in database, so we let admins disable this check in config.
+ sub: Optional[str] = claims.get("sub")
+ expected_user_id: Optional[str] = None
+ if sub is not None and not self._config.backchannel_logout_ignore_sub:
+ expected_user_id = await self._store.get_user_by_external_id(
+ self.idp_id, sub
+ )
+
+ # Invalidate any running user-mapping sessions, in-flight login tokens and
+ # active devices
+ await self._sso_handler.revoke_sessions_for_provider_session_id(
+ auth_provider_id=self.idp_id,
+ auth_provider_session_id=sid,
+ expected_user_id=expected_user_id,
+ )
+
+ request.setResponseCode(200)
+ request.setHeader(b"Cache-Control", b"no-cache, no-store")
+ request.setHeader(b"Pragma", b"no-cache")
+ finish_request(request)
+
+
+class LogoutToken(JWTClaims):
+ """
+ Holds and verify claims of a logout token, as per
+ https://openid.net/specs/openid-connect-backchannel-1_0.html#LogoutToken
+ """
+
+ REGISTERED_CLAIMS = ["iss", "sub", "aud", "iat", "jti", "events", "sid"]
+
+ def validate(self, now: Optional[int] = None, leeway: int = 0) -> None:
+ """Validate everything in claims payload."""
+ super().validate(now, leeway)
+ self.validate_sid()
+ self.validate_events()
+ self.validate_nonce()
+
+ def validate_sid(self) -> None:
+ """Ensure the sid claim is present"""
+ sid = self.get("sid")
+ if not sid:
+ raise MissingClaimError("sid")
+
+ if not isinstance(sid, str):
+ raise InvalidClaimError("sid")
+
+ def validate_nonce(self) -> None:
+ """Ensure the nonce claim is absent"""
+ if "nonce" in self:
+ raise InvalidClaimError("nonce")
+
+ def validate_events(self) -> None:
+ """Ensure the events claim is present and with the right value"""
+ events = self.get("events")
+ if not events:
+ raise MissingClaimError("events")
+
+ if not isinstance(events, dict):
+ raise InvalidClaimError("events")
+
+ if "http://schemas.openid.net/event/backchannel-logout" not in events:
+ raise InvalidClaimError("events")
+
# number of seconds a newly-generated client secret should be valid for
CLIENT_SECRET_VALIDITY_SECONDS = 3600
@@ -1105,6 +1423,7 @@ class JwtClientSecret:
logger.info(
"Generating new JWT for %s: %s %s", self._oauth_issuer, header, payload
)
+ jwt = JsonWebToken(header["alg"])
self._cached_secret = jwt.encode(header, payload, self._key.key)
self._cached_secret_replacement_time = (
expires_at - CLIENT_SECRET_MIN_VALIDITY_SECONDS
@@ -1119,9 +1438,6 @@ class UserAttributeDict(TypedDict):
emails: List[str]
-C = TypeVar("C")
-
-
class OidcMappingProvider(Generic[C]):
"""A mapping provider maps a UserInfo object to user attributes.
diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py
index 2670e561d7..cf08737d11 100644
--- a/synapse/handlers/presence.py
+++ b/synapse/handlers/presence.py
@@ -201,7 +201,7 @@ class BasePresenceHandler(abc.ABC):
"""Get the current presence state for multiple users.
Returns:
- dict: `user_id` -> `UserPresenceState`
+ A mapping of `user_id` -> `UserPresenceState`
"""
states = {}
missing = []
@@ -256,7 +256,7 @@ class BasePresenceHandler(abc.ABC):
with the app.
"""
- async def update_external_syncs_row(
+ async def update_external_syncs_row( # noqa: B027 (no-op by design)
self, process_id: str, user_id: str, is_syncing: bool, sync_time_msec: int
) -> None:
"""Update the syncing users for an external process as a delta.
@@ -272,7 +272,9 @@ class BasePresenceHandler(abc.ABC):
sync_time_msec: Time in ms when the user was last syncing
"""
- async def update_external_syncs_clear(self, process_id: str) -> None:
+ async def update_external_syncs_clear( # noqa: B027 (no-op by design)
+ self, process_id: str
+ ) -> None:
"""Marks all users that had been marked as syncing by a given process
as offline.
@@ -476,7 +478,7 @@ class WorkerPresenceHandler(BasePresenceHandler):
return _NullContextManager()
prev_state = await self.current_state_for_user(user_id)
- if prev_state != PresenceState.BUSY:
+ if prev_state.state != PresenceState.BUSY:
# We set state here but pass ignore_status_msg = True as we don't want to
# cause the status message to be cleared.
# Note that this causes last_active_ts to be incremented which is not
diff --git a/synapse/handlers/profile.py b/synapse/handlers/profile.py
index d8ff5289b5..4bf9a047a3 100644
--- a/synapse/handlers/profile.py
+++ b/synapse/handlers/profile.py
@@ -307,7 +307,11 @@ class ProfileHandler:
if not self.max_avatar_size and not self.allowed_avatar_mimetypes:
return True
- server_name, _, media_id = parse_and_validate_mxc_uri(mxc)
+ host, port, media_id = parse_and_validate_mxc_uri(mxc)
+ if port is not None:
+ server_name = host + ":" + str(port)
+ else:
+ server_name = host
if server_name == self.server_name:
media_info = await self.store.get_local_media(media_id)
diff --git a/synapse/handlers/relations.py b/synapse/handlers/relations.py
index 8fdb9df012..2c990aab0a 100644
--- a/synapse/handlers/relations.py
+++ b/synapse/handlers/relations.py
@@ -17,7 +17,7 @@ from typing import TYPE_CHECKING, Dict, FrozenSet, Iterable, List, Optional, Tup
import attr
-from synapse.api.constants import RelationTypes
+from synapse.api.constants import EventTypes, RelationTypes
from synapse.api.errors import SynapseError
from synapse.events import EventBase, relation_from_event
from synapse.logging.tracing import trace
@@ -75,6 +75,7 @@ class RelationsHandler:
self._clock = hs.get_clock()
self._event_handler = hs.get_event_handler()
self._event_serializer = hs.get_event_client_serializer()
+ self._event_creation_handler = hs.get_event_creation_handler()
async def get_relations(
self,
@@ -205,6 +206,59 @@ class RelationsHandler:
return related_events, next_token
+ async def redact_events_related_to(
+ self,
+ requester: Requester,
+ event_id: str,
+ initial_redaction_event: EventBase,
+ relation_types: List[str],
+ ) -> None:
+ """Redacts all events related to the given event ID with one of the given
+ relation types.
+
+ This method is expected to be called when redacting the event referred to by
+ the given event ID.
+
+ If an event cannot be redacted (e.g. because of insufficient permissions), log
+ the error and try to redact the next one.
+
+ Args:
+ requester: The requester to redact events on behalf of.
+ event_id: The event IDs to look and redact relations of.
+ initial_redaction_event: The redaction for the event referred to by
+ event_id.
+ relation_types: The types of relations to look for.
+
+ Raises:
+ ShadowBanError if the requester is shadow-banned
+ """
+ related_event_ids = (
+ await self._main_store.get_all_relations_for_event_with_types(
+ event_id, relation_types
+ )
+ )
+
+ for related_event_id in related_event_ids:
+ try:
+ await self._event_creation_handler.create_and_send_nonmember_event(
+ requester,
+ {
+ "type": EventTypes.Redaction,
+ "content": initial_redaction_event.content,
+ "room_id": initial_redaction_event.room_id,
+ "sender": requester.user.to_string(),
+ "redacts": related_event_id,
+ },
+ ratelimit=False,
+ )
+ except SynapseError as e:
+ logger.warning(
+ "Failed to redact event %s (related to event %s): %s",
+ related_event_id,
+ event_id,
+ e.msg,
+ )
+
async def get_annotations_for_event(
self,
event_id: str,
diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py
index 638f54051a..6dcfd86fdf 100644
--- a/synapse/handlers/room.py
+++ b/synapse/handlers/room.py
@@ -49,7 +49,6 @@ from synapse.api.constants import (
from synapse.api.errors import (
AuthError,
Codes,
- HttpResponseException,
LimitExceededError,
NotFoundError,
StoreError,
@@ -60,7 +59,6 @@ from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersion
from synapse.event_auth import validate_event_for_room_version
from synapse.events import EventBase
from synapse.events.utils import copy_and_fixup_power_levels_contents
-from synapse.federation.federation_client import InvalidResponseError
from synapse.handlers.relations import BundledAggregations
from synapse.module_api import NOT_SPAM
from synapse.rest.admin._base import assert_user_is_admin
@@ -559,7 +557,6 @@ class RoomCreationHandler:
invite_list=[],
initial_state=initial_state,
creation_content=creation_content,
- ratelimit=False,
)
# Transfer membership events
@@ -753,6 +750,10 @@ class RoomCreationHandler:
)
if ratelimit:
+ # Rate limit once in advance, but don't rate limit the individual
+ # events in the room — room creation isn't atomic and it's very
+ # janky if half the events in the initial state don't make it because
+ # of rate limiting.
await self.request_ratelimiter.ratelimit(requester)
room_version_id = config.get(
@@ -913,7 +914,6 @@ class RoomCreationHandler:
room_alias=room_alias,
power_level_content_override=power_level_content_override,
creator_join_profile=creator_join_profile,
- ratelimit=ratelimit,
)
if "name" in config:
@@ -1037,7 +1037,6 @@ class RoomCreationHandler:
room_alias: Optional[RoomAlias] = None,
power_level_content_override: Optional[JsonDict] = None,
creator_join_profile: Optional[JsonDict] = None,
- ratelimit: bool = True,
) -> Tuple[int, str, int]:
"""Sends the initial events into a new room. Sends the room creation, membership,
and power level events into the room sequentially, then creates and batches up the
@@ -1046,6 +1045,8 @@ class RoomCreationHandler:
`power_level_content_override` doesn't apply when initial state has
power level state event content.
+ Rate limiting should already have been applied by this point.
+
Returns:
A tuple containing the stream ID, event ID and depth of the last
event sent to the room.
@@ -1055,9 +1056,6 @@ class RoomCreationHandler:
event_keys = {"room_id": room_id, "sender": creator_id, "state_key": ""}
depth = 1
- # the last event sent/persisted to the db
- last_sent_event_id: Optional[str] = None
-
# the most recently created event
prev_event: List[str] = []
# a map of event types, state keys -> event_ids. We collect these mappings this as events are
@@ -1082,6 +1080,19 @@ class RoomCreationHandler:
for_batch: bool,
**kwargs: Any,
) -> Tuple[EventBase, synapse.events.snapshot.EventContext]:
+ """
+ Creates an event and associated event context.
+ Args:
+ etype: the type of event to be created
+ content: content of the event
+ for_batch: whether the event is being created for batch persisting. If
+ bool for_batch is true, this will create an event using the prev_event_ids,
+ and will create an event context for the event using the parameters state_map
+ and current_state_group, thus these parameters must be provided in this
+ case if for_batch is True. The subsequently created event and context
+ are suitable for being batched up and bulk persisted to the database
+ with other similarly created events.
+ """
nonlocal depth
nonlocal prev_event
@@ -1102,26 +1113,6 @@ class RoomCreationHandler:
return new_event, new_context
- async def send(
- event: EventBase,
- context: synapse.events.snapshot.EventContext,
- creator: Requester,
- ) -> int:
- nonlocal last_sent_event_id
-
- ev = await self.event_creation_handler.handle_new_client_event(
- requester=creator,
- events_and_context=[(event, context)],
- ratelimit=False,
- ignore_shadow_ban=True,
- )
-
- last_sent_event_id = ev.event_id
-
- # we know it was persisted, so must have a stream ordering
- assert ev.internal_metadata.stream_ordering
- return ev.internal_metadata.stream_ordering
-
try:
config = self._presets_dict[preset_config]
except KeyError:
@@ -1135,16 +1126,20 @@ class RoomCreationHandler:
)
logger.debug("Sending %s in new room", EventTypes.Member)
- await send(creation_event, creation_context, creator)
+ ev = await self.event_creation_handler.handle_new_client_event(
+ requester=creator,
+ events_and_context=[(creation_event, creation_context)],
+ ratelimit=False,
+ ignore_shadow_ban=True,
+ )
+ last_sent_event_id = ev.event_id
- # Room create event must exist at this point
- assert last_sent_event_id is not None
member_event_id, _ = await self.room_member_handler.update_membership(
creator,
creator.user,
room_id,
"join",
- ratelimit=ratelimit,
+ ratelimit=False,
content=creator_join_profile,
new_room=True,
prev_event_ids=[last_sent_event_id],
@@ -1157,15 +1152,24 @@ class RoomCreationHandler:
depth += 1
state_map[(EventTypes.Member, creator.user.to_string())] = member_event_id
+ # we need the state group of the membership event as it is the current state group
+ event_to_state = (
+ await self._storage_controllers.state.get_state_group_for_events(
+ [member_event_id]
+ )
+ )
+ current_state_group = event_to_state[member_event_id]
+
+ events_to_send = []
# We treat the power levels override specially as this needs to be one
# of the first events that get sent into a room.
pl_content = initial_state.pop((EventTypes.PowerLevels, ""), None)
if pl_content is not None:
power_event, power_context = await create_event(
- EventTypes.PowerLevels, pl_content, False
+ EventTypes.PowerLevels, pl_content, True
)
current_state_group = power_context._state_group
- await send(power_event, power_context, creator)
+ events_to_send.append((power_event, power_context))
else:
power_level_content: JsonDict = {
"users": {creator_id: 100},
@@ -1211,12 +1215,11 @@ class RoomCreationHandler:
pl_event, pl_context = await create_event(
EventTypes.PowerLevels,
power_level_content,
- False,
+ True,
)
current_state_group = pl_context._state_group
- await send(pl_event, pl_context, creator)
+ events_to_send.append((pl_event, pl_context))
- events_to_send = []
if room_alias and (EventTypes.CanonicalAlias, "") not in initial_state:
room_alias_event, room_alias_context = await create_event(
EventTypes.CanonicalAlias, {"alias": room_alias.to_string()}, True
@@ -1269,7 +1272,10 @@ class RoomCreationHandler:
events_to_send.append((encryption_event, encryption_context))
last_event = await self.event_creation_handler.handle_new_client_event(
- creator, events_to_send, ignore_shadow_ban=True
+ creator,
+ events_to_send,
+ ignore_shadow_ban=True,
+ ratelimit=False,
)
assert last_event.internal_metadata.stream_ordering is not None
return last_event.internal_metadata.stream_ordering, last_event.event_id, depth
@@ -1445,7 +1451,7 @@ class RoomContextHandler:
events_before=events_before,
event=event,
events_after=events_after,
- state=await filter_evts(state_events),
+ state=state_events,
aggregations=aggregations,
start=await token.copy_and_replace(
StreamKeyType.ROOM, results.start
@@ -1491,7 +1497,12 @@ class TimestampLookupHandler:
Raises:
SynapseError if unable to find any event locally in the given direction
"""
-
+ logger.debug(
+ "get_event_for_timestamp(room_id=%s, timestamp=%s, direction=%s) Finding closest event...",
+ room_id,
+ timestamp,
+ direction,
+ )
local_event_id = await self.store.get_event_id_for_timestamp(
room_id, timestamp, direction
)
@@ -1543,85 +1554,54 @@ class TimestampLookupHandler:
)
)
- # Loop through each homeserver candidate until we get a succesful response
- for domain in likely_domains:
- # We don't want to ask our own server for information we don't have
- if domain == self.server_name:
- continue
+ remote_response = await self.federation_client.timestamp_to_event(
+ destinations=likely_domains,
+ room_id=room_id,
+ timestamp=timestamp,
+ direction=direction,
+ )
+ if remote_response is not None:
+ logger.debug(
+ "get_event_for_timestamp: remote_response=%s",
+ remote_response,
+ )
- try:
- remote_response = await self.federation_client.timestamp_to_event(
- domain, room_id, timestamp, direction
- )
- logger.debug(
- "get_event_for_timestamp: response from domain(%s)=%s",
- domain,
- remote_response,
+ remote_event_id = remote_response.event_id
+ remote_origin_server_ts = remote_response.origin_server_ts
+
+ # Backfill this event so we can get a pagination token for
+ # it with `/context` and paginate `/messages` from this
+ # point.
+ pulled_pdu_info = await self.federation_event_handler.backfill_event_id(
+ likely_domains, room_id, remote_event_id
+ )
+ remote_event = pulled_pdu_info.pdu
+
+ # XXX: When we see that the remote server is not trustworthy,
+ # maybe we should not ask them first in the future.
+ if remote_origin_server_ts != remote_event.origin_server_ts:
+ logger.info(
+ "get_event_for_timestamp: Remote server (%s) claimed that remote_event_id=%s occured at remote_origin_server_ts=%s but that isn't true (actually occured at %s). Their claims are dubious and we should consider not trusting them.",
+ pulled_pdu_info.pull_origin,
+ remote_event_id,
+ remote_origin_server_ts,
+ remote_event.origin_server_ts,
)
- remote_event_id = remote_response.event_id
- remote_origin_server_ts = remote_response.origin_server_ts
-
- # Backfill this event so we can get a pagination token for
- # it with `/context` and paginate `/messages` from this
- # point.
- #
- # TODO: The requested timestamp may lie in a part of the
- # event graph that the remote server *also* didn't have,
- # in which case they will have returned another event
- # which may be nowhere near the requested timestamp. In
- # the future, we may need to reconcile that gap and ask
- # other homeservers, and/or extend `/timestamp_to_event`
- # to return events on *both* sides of the timestamp to
- # help reconcile the gap faster.
- remote_event = (
- await self.federation_event_handler.backfill_event_id(
- domain, room_id, remote_event_id
- )
- )
-
- # XXX: When we see that the remote server is not trustworthy,
- # maybe we should not ask them first in the future.
- if remote_origin_server_ts != remote_event.origin_server_ts:
- logger.info(
- "get_event_for_timestamp: Remote server (%s) claimed that remote_event_id=%s occured at remote_origin_server_ts=%s but that isn't true (actually occured at %s). Their claims are dubious and we should consider not trusting them.",
- domain,
- remote_event_id,
- remote_origin_server_ts,
- remote_event.origin_server_ts,
- )
-
- # Only return the remote event if it's closer than the local event
- if not local_event or (
- abs(remote_event.origin_server_ts - timestamp)
- < abs(local_event.origin_server_ts - timestamp)
- ):
- logger.info(
- "get_event_for_timestamp: returning remote_event_id=%s (%s) since it's closer to timestamp=%s than local_event=%s (%s)",
- remote_event_id,
- remote_event.origin_server_ts,
- timestamp,
- local_event.event_id if local_event else None,
- local_event.origin_server_ts if local_event else None,
- )
- return remote_event_id, remote_origin_server_ts
- except (HttpResponseException, InvalidResponseError) as ex:
- # Let's not put a high priority on some other homeserver
- # failing to respond or giving a random response
- logger.debug(
- "get_event_for_timestamp: Failed to fetch /timestamp_to_event from %s because of exception(%s) %s args=%s",
- domain,
- type(ex).__name__,
- ex,
- ex.args,
- )
- except Exception:
- # But we do want to see some exceptions in our code
- logger.warning(
- "get_event_for_timestamp: Failed to fetch /timestamp_to_event from %s because of exception",
- domain,
- exc_info=True,
+ # Only return the remote event if it's closer than the local event
+ if not local_event or (
+ abs(remote_event.origin_server_ts - timestamp)
+ < abs(local_event.origin_server_ts - timestamp)
+ ):
+ logger.info(
+ "get_event_for_timestamp: returning remote_event_id=%s (%s) since it's closer to timestamp=%s than local_event=%s (%s)",
+ remote_event_id,
+ remote_event.origin_server_ts,
+ timestamp,
+ local_event.event_id if local_event else None,
+ local_event.origin_server_ts if local_event else None,
)
+ return remote_event_id, remote_origin_server_ts
# To appease mypy, we have to add both of these conditions to check for
# `None`. We only expect `local_event` to be `None` when
diff --git a/synapse/handlers/saml.py b/synapse/handlers/saml.py
index 9602f0d0bb..874860d461 100644
--- a/synapse/handlers/saml.py
+++ b/synapse/handlers/saml.py
@@ -441,7 +441,7 @@ class DefaultSamlMappingProvider:
client_redirect_url: where the client wants to redirect to
Returns:
- dict: A dict containing new user attributes. Possible keys:
+ A dict containing new user attributes. Possible keys:
* mxid_localpart (str): Required. The localpart of the user's mxid
* displayname (str): The displayname of the user
* emails (list[str]): Any emails for the user
@@ -483,7 +483,7 @@ class DefaultSamlMappingProvider:
Args:
config: A dictionary containing configuration options for this provider
Returns:
- SamlConfig: A custom config object for this module
+ A custom config object for this module
"""
# Parse config options and use defaults where necessary
mxid_source_attribute = config.get("mxid_source_attribute", "uid")
diff --git a/synapse/handlers/sso.py b/synapse/handlers/sso.py
index 5943f08e91..749d7e93b0 100644
--- a/synapse/handlers/sso.py
+++ b/synapse/handlers/sso.py
@@ -191,6 +191,7 @@ class SsoHandler:
self._server_name = hs.hostname
self._registration_handler = hs.get_registration_handler()
self._auth_handler = hs.get_auth_handler()
+ self._device_handler = hs.get_device_handler()
self._error_template = hs.config.sso.sso_error_template
self._bad_user_template = hs.config.sso.sso_auth_bad_user_template
self._profile_handler = hs.get_profile_handler()
@@ -1026,6 +1027,76 @@ class SsoHandler:
return True
+ async def revoke_sessions_for_provider_session_id(
+ self,
+ auth_provider_id: str,
+ auth_provider_session_id: str,
+ expected_user_id: Optional[str] = None,
+ ) -> None:
+ """Revoke any devices and in-flight logins tied to a provider session.
+
+ Args:
+ auth_provider_id: A unique identifier for this SSO provider, e.g.
+ "oidc" or "saml".
+ auth_provider_session_id: The session ID from the provider to logout
+ expected_user_id: The user we're expecting to logout. If set, it will ignore
+ sessions belonging to other users and log an error.
+ """
+ # Invalidate any running user-mapping sessions
+ to_delete = []
+ for session_id, session in self._username_mapping_sessions.items():
+ if (
+ session.auth_provider_id == auth_provider_id
+ and session.auth_provider_session_id == auth_provider_session_id
+ ):
+ to_delete.append(session_id)
+
+ for session_id in to_delete:
+ logger.info("Revoking mapping session %s", session_id)
+ del self._username_mapping_sessions[session_id]
+
+ # Invalidate any in-flight login tokens
+ await self._store.invalidate_login_tokens_by_session_id(
+ auth_provider_id=auth_provider_id,
+ auth_provider_session_id=auth_provider_session_id,
+ )
+
+ # Fetch any device(s) in the store associated with the session ID.
+ devices = await self._store.get_devices_by_auth_provider_session_id(
+ auth_provider_id=auth_provider_id,
+ auth_provider_session_id=auth_provider_session_id,
+ )
+
+ # We have no guarantee that all the devices of that session are for the same
+ # `user_id`. Hence, we have to iterate over the list of devices and log them out
+ # one by one.
+ for device in devices:
+ user_id = device["user_id"]
+ device_id = device["device_id"]
+
+ # If the user_id associated with that device/session is not the one we got
+ # out of the `sub` claim, skip that device and show log an error.
+ if expected_user_id is not None and user_id != expected_user_id:
+ logger.error(
+ "Received a logout notification from SSO provider "
+ f"{auth_provider_id!r} for the user {expected_user_id!r}, but with "
+ f"a session ID ({auth_provider_session_id!r}) which belongs to "
+ f"{user_id!r}. This may happen when the SSO provider user mapper "
+ "uses something else than the standard attribute as mapping ID. "
+ "For OIDC providers, set `backchannel_logout_ignore_sub` to `true` "
+ "in the provider config if that is the case."
+ )
+ continue
+
+ logger.info(
+ "Logging out %r (device %r) via SSO (%r) logout notification (session %r).",
+ user_id,
+ device_id,
+ auth_provider_id,
+ auth_provider_session_id,
+ )
+ await self._device_handler.delete_devices(user_id, [device_id])
+
def get_username_mapping_session_cookie_from_request(request: IRequest) -> str:
"""Extract the session ID from the cookie
diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py
index 05eb041ec7..c00f30518a 100644
--- a/synapse/handlers/sync.py
+++ b/synapse/handlers/sync.py
@@ -46,6 +46,7 @@ from synapse.logging.tracing import (
)
from synapse.push.clientformat import format_push_rules_for_user
from synapse.storage.databases.main.event_push_actions import RoomNotifCounts
+from synapse.storage.databases.main.roommember import extract_heroes_from_room_summary
from synapse.storage.roommember import MemberSummary
from synapse.storage.state import StateFilter
from synapse.types import (
@@ -810,18 +811,6 @@ class SyncHandler:
if canonical_alias and canonical_alias.content.get("alias"):
return summary
- me = sync_config.user.to_string()
-
- joined_user_ids = [
- r[0] for r in details.get(Membership.JOIN, empty_ms).members if r[0] != me
- ]
- invited_user_ids = [
- r[0] for r in details.get(Membership.INVITE, empty_ms).members if r[0] != me
- ]
- gone_user_ids = [
- r[0] for r in details.get(Membership.LEAVE, empty_ms).members if r[0] != me
- ] + [r[0] for r in details.get(Membership.BAN, empty_ms).members if r[0] != me]
-
# FIXME: only build up a member_ids list for our heroes
member_ids = {}
for membership in (
@@ -833,11 +822,8 @@ class SyncHandler:
for user_id, event_id in details.get(membership, empty_ms).members:
member_ids[user_id] = event_id
- # FIXME: order by stream ordering rather than as returned by SQL
- if joined_user_ids or invited_user_ids:
- summary["m.heroes"] = sorted(joined_user_ids + invited_user_ids)[0:5]
- else:
- summary["m.heroes"] = sorted(gone_user_ids)[0:5]
+ me = sync_config.user.to_string()
+ summary["m.heroes"] = extract_heroes_from_room_summary(details, me)
if not sync_config.filter_collection.lazy_load_members():
return summary
diff --git a/synapse/http/additional_resource.py b/synapse/http/additional_resource.py
index 6a9f6635d2..8729630581 100644
--- a/synapse/http/additional_resource.py
+++ b/synapse/http/additional_resource.py
@@ -45,8 +45,7 @@ class AdditionalResource(DirectServeJsonResource):
Args:
hs: homeserver
- handler ((twisted.web.server.Request) -> twisted.internet.defer.Deferred):
- function to be called to handle the request.
+ handler: function to be called to handle the request.
"""
super().__init__()
self._handler = handler
diff --git a/synapse/http/client.py b/synapse/http/client.py
index 89bd403312..a7f93a2989 100644
--- a/synapse/http/client.py
+++ b/synapse/http/client.py
@@ -25,7 +25,6 @@ from typing import (
List,
Mapping,
Optional,
- Sequence,
Tuple,
Union,
)
@@ -96,14 +95,29 @@ incoming_responses_counter = Counter(
"synapse_http_client_responses", "", ["method", "code"]
)
-# the type of the headers list, to be passed to the t.w.h.Headers.
-# Actually we can mix str and bytes keys, but Mapping treats 'key' as invariant so
-# we simplify.
+# the type of the headers map, to be passed to the t.w.h.Headers.
+#
+# The actual type accepted by Twisted is
+# Mapping[Union[str, bytes], Sequence[Union[str, bytes]] ,
+# allowing us to mix and match str and bytes freely. However: any str is also a
+# Sequence[str]; passing a header string value which is a
+# standalone str is interpreted as a sequence of 1-codepoint strings. This is a disastrous footgun.
+# We use a narrower value type (RawHeaderValue) to avoid this footgun.
+#
+# We also simplify the keys to be either all str or all bytes. This helps because
+# Dict[K, V] is invariant in K (and indeed V).
RawHeaders = Union[Mapping[str, "RawHeaderValue"], Mapping[bytes, "RawHeaderValue"]]
# the value actually has to be a List, but List is invariant so we can't specify that
# the entries can either be Lists or bytes.
-RawHeaderValue = Sequence[Union[str, bytes]]
+RawHeaderValue = Union[
+ List[str],
+ List[bytes],
+ List[Union[str, bytes]],
+ Tuple[str, ...],
+ Tuple[bytes, ...],
+ Tuple[Union[str, bytes], ...],
+]
def check_against_blacklist(
diff --git a/synapse/http/federation/matrix_federation_agent.py b/synapse/http/federation/matrix_federation_agent.py
index 2f0177f1e2..0359231e7d 100644
--- a/synapse/http/federation/matrix_federation_agent.py
+++ b/synapse/http/federation/matrix_federation_agent.py
@@ -155,11 +155,10 @@ class MatrixFederationAgent:
a file for a file upload). Or None if the request is to have
no body.
Returns:
- Deferred[twisted.web.iweb.IResponse]:
- fires when the header of the response has been received (regardless of the
- response status code). Fails if there is any problem which prevents that
- response from being received (including problems that prevent the request
- from being sent).
+ A deferred which fires when the header of the response has been received
+ (regardless of the response status code). Fails if there is any problem
+ which prevents that response from being received (including problems that
+ prevent the request from being sent).
"""
# We use urlparse as that will set `port` to None if there is no
# explicit port.
diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py
index 00704a6a7c..c35a5a12d5 100644
--- a/synapse/http/matrixfederationclient.py
+++ b/synapse/http/matrixfederationclient.py
@@ -957,8 +957,7 @@ class MatrixFederationHttpClient:
args: query params
Returns:
- dict|list: Succeeds when we get a 2xx HTTP response. The
- result will be the decoded JSON body.
+ Succeeds when we get a 2xx HTTP response. The result will be the decoded JSON body.
Raises:
HttpResponseException: If we get an HTTP response code >= 300
diff --git a/synapse/http/proxyagent.py b/synapse/http/proxyagent.py
index 1f8227896f..18899bc6d1 100644
--- a/synapse/http/proxyagent.py
+++ b/synapse/http/proxyagent.py
@@ -34,7 +34,7 @@ from twisted.web.client import (
)
from twisted.web.error import SchemeNotSupported
from twisted.web.http_headers import Headers
-from twisted.web.iweb import IAgent, IBodyProducer, IPolicyForHTTPS
+from twisted.web.iweb import IAgent, IBodyProducer, IPolicyForHTTPS, IResponse
from synapse.http import redact_uri
from synapse.http.connectproxyclient import HTTPConnectProxyEndpoint, ProxyCredentials
@@ -134,7 +134,7 @@ class ProxyAgent(_AgentBase):
uri: bytes,
headers: Optional[Headers] = None,
bodyProducer: Optional[IBodyProducer] = None,
- ) -> defer.Deferred:
+ ) -> "defer.Deferred[IResponse]":
"""
Issue a request to the server indicated by the given uri.
@@ -157,17 +157,17 @@ class ProxyAgent(_AgentBase):
a file upload). Or, None if the request is to have no body.
Returns:
- Deferred[IResponse]: completes when the header of the response has
- been received (regardless of the response status code).
+ A deferred which completes when the header of the response has
+ been received (regardless of the response status code).
- Can fail with:
- SchemeNotSupported: if the uri is not http or https
+ Can fail with:
+ SchemeNotSupported: if the uri is not http or https
- twisted.internet.error.TimeoutError if the server we are connecting
- to (proxy or destination) does not accept a connection before
- connectTimeout.
+ twisted.internet.error.TimeoutError if the server we are connecting
+ to (proxy or destination) does not accept a connection before
+ connectTimeout.
- ... other things too.
+ ... other things too.
"""
uri = uri.strip()
if not _VALID_URI.match(uri):
diff --git a/synapse/http/server.py b/synapse/http/server.py
index 19ac0b9dc5..69e7147a2d 100644
--- a/synapse/http/server.py
+++ b/synapse/http/server.py
@@ -267,7 +267,7 @@ class HttpServer(Protocol):
request. The first argument will be the request object and
subsequent arguments will be any matched groups from the regex.
This should return either tuple of (code, response), or None.
- servlet_classname (str): The name of the handler to be used in prometheus
+ servlet_classname: The name of the handler to be used in prometheus
and tracing logs.
"""
diff --git a/synapse/http/site.py b/synapse/http/site.py
index ab34436146..2de618e46a 100644
--- a/synapse/http/site.py
+++ b/synapse/http/site.py
@@ -408,7 +408,7 @@ class SynapseRequest(Request):
be sure to call finished_processing.
Args:
- servlet_name (str): the name of the servlet which will be
+ servlet_name: the name of the servlet which will be
processing this request. This is used in the metrics.
It is possible to update this afterwards by updating
diff --git a/synapse/logging/context.py b/synapse/logging/context.py
index 9bbe07f21d..56243aa5e7 100644
--- a/synapse/logging/context.py
+++ b/synapse/logging/context.py
@@ -116,8 +116,7 @@ class ContextResourceUsage:
"""Create a new ContextResourceUsage
Args:
- copy_from (ContextResourceUsage|None): if not None, an object to
- copy stats from
+ copy_from: if not None, an object to copy stats from
"""
if copy_from is None:
self.reset()
@@ -161,7 +160,7 @@ class ContextResourceUsage:
"""Add another ContextResourceUsage's stats to this one's.
Args:
- other (ContextResourceUsage): the other resource usage object
+ other: the other resource usage object
"""
self.ru_utime += other.ru_utime
self.ru_stime += other.ru_stime
@@ -335,7 +334,7 @@ class LoggingContext:
called directly.
Returns:
- LoggingContext: the current logging context
+ The current logging context
"""
warnings.warn(
"synapse.logging.context.LoggingContext.current_context() is deprecated "
@@ -355,7 +354,8 @@ class LoggingContext:
called directly.
Args:
- context(LoggingContext): The context to activate.
+ context: The context to activate.
+
Returns:
The context that was previously active
"""
@@ -467,8 +467,7 @@ class LoggingContext:
"""Get resources used by this logcontext so far.
Returns:
- ContextResourceUsage: a *copy* of the object tracking resource
- usage so far
+ A *copy* of the object tracking resource usage so far
"""
# we always return a copy, for consistency
res = self._resource_usage.copy()
@@ -656,7 +655,8 @@ def current_context() -> LoggingContextOrSentinel:
def set_current_context(context: LoggingContextOrSentinel) -> LoggingContextOrSentinel:
"""Set the current logging context in thread local storage
Args:
- context(LoggingContext): The context to activate.
+ context: The context to activate.
+
Returns:
The context that was previously active
"""
@@ -693,7 +693,7 @@ def nested_logging_context(suffix: str) -> LoggingContext:
suffix: suffix to add to the parent context's 'name'.
Returns:
- LoggingContext: new logging context.
+ A new logging context.
"""
curr_context = current_context()
if not curr_context:
@@ -891,20 +891,19 @@ def defer_to_thread(
on it.
Args:
- reactor (twisted.internet.base.ReactorBase): The reactor in whose main thread
- the Deferred will be invoked, and whose threadpool we should use for the
- function.
+ reactor: The reactor in whose main thread the Deferred will be invoked,
+ and whose threadpool we should use for the function.
Normally this will be hs.get_reactor().
- f (callable): The function to call.
+ f: The function to call.
args: positional arguments to pass to f.
kwargs: keyword arguments to pass to f.
Returns:
- Deferred: A Deferred which fires a callback with the result of `f`, or an
+ A Deferred which fires a callback with the result of `f`, or an
errback if `f` throws an exception.
"""
return defer_to_threadpool(reactor, reactor.getThreadPool(), f, *args, **kwargs)
@@ -932,20 +931,20 @@ def defer_to_threadpool(
on it.
Args:
- reactor (twisted.internet.base.ReactorBase): The reactor in whose main thread
- the Deferred will be invoked. Normally this will be hs.get_reactor().
+ reactor: The reactor in whose main thread the Deferred will be invoked.
+ Normally this will be hs.get_reactor().
- threadpool (twisted.python.threadpool.ThreadPool): The threadpool to use for
- running `f`. Normally this will be hs.get_reactor().getThreadPool().
+ threadpool: The threadpool to use for running `f`. Normally this will be
+ hs.get_reactor().getThreadPool().
- f (callable): The function to call.
+ f: The function to call.
args: positional arguments to pass to f.
kwargs: keyword arguments to pass to f.
Returns:
- Deferred: A Deferred which fires a callback with the result of `f`, or an
+ A Deferred which fires a callback with the result of `f`, or an
errback if `f` throws an exception.
"""
curr_context = current_context()
diff --git a/synapse/metrics/background_process_metrics.py b/synapse/metrics/background_process_metrics.py
index ad639fe223..1de06c3850 100644
--- a/synapse/metrics/background_process_metrics.py
+++ b/synapse/metrics/background_process_metrics.py
@@ -174,8 +174,10 @@ class _BackgroundProcess:
diff = new_stats - self._reported_stats
self._reported_stats = new_stats
- _background_process_ru_utime.labels(self.desc).inc(diff.ru_utime)
- _background_process_ru_stime.labels(self.desc).inc(diff.ru_stime)
+ # For unknown reasons, the difference in times can be negative. See comment in
+ # synapse.http.request_metrics.RequestMetrics.update_metrics.
+ _background_process_ru_utime.labels(self.desc).inc(max(diff.ru_utime, 0))
+ _background_process_ru_stime.labels(self.desc).inc(max(diff.ru_stime, 0))
_background_process_db_txn_count.labels(self.desc).inc(diff.db_txn_count)
_background_process_db_txn_duration.labels(self.desc).inc(
diff.db_txn_duration_sec
diff --git a/synapse/module_api/__init__.py b/synapse/module_api/__init__.py
index 6a6ae208d1..1adc1fd64f 100644
--- a/synapse/module_api/__init__.py
+++ b/synapse/module_api/__init__.py
@@ -771,50 +771,11 @@ class ModuleApi:
auth_provider_session_id: The session ID got during login from the SSO IdP,
if any.
"""
- # The deprecated `generate_short_term_login_token` method defaulted to an empty
- # string for the `auth_provider_id` because of how the underlying macaroon was
- # generated. This will change to a proper NULL-able field when the tokens get
- # moved to the database.
- return self._hs.get_macaroon_generator().generate_short_term_login_token(
+ return await self._hs.get_auth_handler().create_login_token_for_user_id(
user_id,
- auth_provider_id or "",
- auth_provider_session_id,
duration_in_ms,
- )
-
- def generate_short_term_login_token(
- self,
- user_id: str,
- duration_in_ms: int = (2 * 60 * 1000),
- auth_provider_id: str = "",
- auth_provider_session_id: Optional[str] = None,
- ) -> str:
- """Generate a login token suitable for m.login.token authentication
-
- Added in Synapse v1.9.0.
-
- This was deprecated in Synapse v1.69.0 in favor of create_login_token, and will
- be removed in Synapse 1.71.0.
-
- Args:
- user_id: gives the ID of the user that the token is for
-
- duration_in_ms: the time that the token will be valid for
-
- auth_provider_id: the ID of the SSO IdP that the user used to authenticate
- to get this token, if any. This is encoded in the token so that
- /login can report stats on number of successful logins by IdP.
- """
- logger.warn(
- "A module configured on this server uses ModuleApi.generate_short_term_login_token(), "
- "which is deprecated in favor of ModuleApi.create_login_token(), and will be removed in "
- "Synapse 1.71.0",
- )
- return self._hs.get_macaroon_generator().generate_short_term_login_token(
- user_id,
auth_provider_id,
auth_provider_session_id,
- duration_in_ms,
)
@defer.inlineCallbacks
@@ -826,7 +787,7 @@ class ModuleApi:
Added in Synapse v0.25.0.
Args:
- access_token(str): access token
+ access_token: access token
Returns:
twisted.internet.defer.Deferred - resolves once the access token
@@ -871,7 +832,7 @@ class ModuleApi:
**kwargs: named args to be passed to func
Returns:
- Deferred[object]: result of func
+ Result of func
"""
# type-ignore: See https://github.com/python/mypy/issues/8862
return defer.ensureDeferred(
@@ -963,8 +924,7 @@ class ModuleApi:
to represent 'any') of the room state to acquire.
Returns:
- twisted.internet.defer.Deferred[list(synapse.events.FrozenEvent)]:
- The filtered state events in the room.
+ The filtered state events in the room.
"""
state_ids = yield defer.ensureDeferred(
self._storage_controllers.state.get_current_state_ids(
diff --git a/synapse/push/bulk_push_rule_evaluator.py b/synapse/push/bulk_push_rule_evaluator.py
index a75386f6a0..75b7e126ca 100644
--- a/synapse/push/bulk_push_rule_evaluator.py
+++ b/synapse/push/bulk_push_rule_evaluator.py
@@ -45,7 +45,6 @@ if TYPE_CHECKING:
logger = logging.getLogger(__name__)
-
push_rules_invalidation_counter = Counter(
"synapse_push_bulk_push_rule_evaluator_push_rules_invalidation_counter", ""
)
@@ -107,6 +106,8 @@ class BulkPushRuleEvaluator:
self.clock = hs.get_clock()
self._event_auth_handler = hs.get_event_auth_handler()
+ self._related_event_match_enabled = self.hs.config.experimental.msc3664_enabled
+
self.room_push_rule_cache_metrics = register_cache(
"cache",
"room_push_rule_cache",
@@ -165,8 +166,21 @@ class BulkPushRuleEvaluator:
return rules_by_user
async def _get_power_levels_and_sender_level(
- self, event: EventBase, context: EventContext
+ self,
+ event: EventBase,
+ context: EventContext,
+ event_id_to_event: Mapping[str, EventBase],
) -> Tuple[dict, Optional[int]]:
+ """
+ Given an event and an event context, get the power level event relevant to the event
+ and the power level of the sender of the event.
+ Args:
+ event: event to check
+ context: context of event to check
+ event_id_to_event: a mapping of event_id to event for a set of events being
+ batch persisted. This is needed as the sought-after power level event may
+ be in this batch rather than the DB
+ """
# There are no power levels and sender levels possible to get from outlier
if event.internal_metadata.is_outlier():
return {}, None
@@ -177,15 +191,26 @@ class BulkPushRuleEvaluator:
)
pl_event_id = prev_state_ids.get(POWER_KEY)
+ # fastpath: if there's a power level event, that's all we need, and
+ # not having a power level event is an extreme edge case
if pl_event_id:
- # fastpath: if there's a power level event, that's all we need, and
- # not having a power level event is an extreme edge case
- auth_events = {POWER_KEY: await self.store.get_event(pl_event_id)}
+ # Get the power level event from the batch, or fall back to the database.
+ pl_event = event_id_to_event.get(pl_event_id)
+ if pl_event:
+ auth_events = {POWER_KEY: pl_event}
+ else:
+ auth_events = {POWER_KEY: await self.store.get_event(pl_event_id)}
else:
auth_events_ids = self._event_auth_handler.compute_auth_events(
event, prev_state_ids, for_verification=False
)
auth_events_dict = await self.store.get_events(auth_events_ids)
+ # Some needed auth events might be in the batch, combine them with those
+ # fetched from the database.
+ for auth_event_id in auth_events_ids:
+ auth_event = event_id_to_event.get(auth_event_id)
+ if auth_event:
+ auth_events_dict[auth_event_id] = auth_event
auth_events = {(e.type, e.state_key): e for e in auth_events_dict.values()}
sender_level = get_user_power_level(event.sender, auth_events)
@@ -194,16 +219,80 @@ class BulkPushRuleEvaluator:
return pl_event.content if pl_event else {}, sender_level
- @measure_func("action_for_event_by_user")
- async def action_for_event_by_user(
- self, event: EventBase, context: EventContext
- ) -> None:
- """Given an event and context, evaluate the push rules, check if the message
- should increment the unread count, and insert the results into the
- event_push_actions_staging table.
+ async def _related_events(self, event: EventBase) -> Dict[str, Dict[str, str]]:
+ """Fetches the related events for 'event'. Sets the im.vector.is_falling_back key if the event is from a fallback relation
+
+ Returns:
+ Mapping of relation type to flattened events.
"""
- if not event.internal_metadata.is_notifiable():
- # Push rules for events that aren't notifiable can't be processed by this
+ related_events: Dict[str, Dict[str, str]] = {}
+ if self._related_event_match_enabled:
+ related_event_id = event.content.get("m.relates_to", {}).get("event_id")
+ relation_type = event.content.get("m.relates_to", {}).get("rel_type")
+ if related_event_id is not None and relation_type is not None:
+ related_event = await self.store.get_event(
+ related_event_id, allow_none=True
+ )
+ if related_event is not None:
+ related_events[relation_type] = _flatten_dict(related_event)
+
+ reply_event_id = (
+ event.content.get("m.relates_to", {})
+ .get("m.in_reply_to", {})
+ .get("event_id")
+ )
+
+ # convert replies to pseudo relations
+ if reply_event_id is not None:
+ related_event = await self.store.get_event(
+ reply_event_id, allow_none=True
+ )
+
+ if related_event is not None:
+ related_events["m.in_reply_to"] = _flatten_dict(related_event)
+
+ # indicate that this is from a fallback relation.
+ if relation_type == "m.thread" and event.content.get(
+ "m.relates_to", {}
+ ).get("is_falling_back", False):
+ related_events["m.in_reply_to"][
+ "im.vector.is_falling_back"
+ ] = ""
+
+ return related_events
+
+ async def action_for_events_by_user(
+ self, events_and_context: List[Tuple[EventBase, EventContext]]
+ ) -> None:
+ """Given a list of events and their associated contexts, evaluate the push rules
+ for each event, check if the message should increment the unread count, and
+ insert the results into the event_push_actions_staging table.
+ """
+ # For batched events the power level events may not have been persisted yet,
+ # so we pass in the batched events. Thus if the event cannot be found in the
+ # database we can check in the batch.
+ event_id_to_event = {e.event_id: e for e, _ in events_and_context}
+ for event, context in events_and_context:
+ await self._action_for_event_by_user(event, context, event_id_to_event)
+
+ @measure_func("action_for_event_by_user")
+ async def _action_for_event_by_user(
+ self,
+ event: EventBase,
+ context: EventContext,
+ event_id_to_event: Mapping[str, EventBase],
+ ) -> None:
+
+ if (
+ not event.internal_metadata.is_notifiable()
+ or event.internal_metadata.is_historical()
+ ):
+ # Push rules for events that aren't notifiable can't be processed by this and
+ # we want to skip push notification actions for historical messages
+ # because we don't want to notify people about old history back in time.
+ # The historical messages also do not have the proper `context.current_state_ids`
+ # and `state_groups` because they have `prev_events` that aren't persisted yet
+ # (historical messages persisted in reverse-chronological order).
return
# Disable counting as unread unless the experimental configuration is
@@ -223,7 +312,9 @@ class BulkPushRuleEvaluator:
(
power_levels,
sender_power_level,
- ) = await self._get_power_levels_and_sender_level(event, context)
+ ) = await self._get_power_levels_and_sender_level(
+ event, context, event_id_to_event
+ )
# Find the event's thread ID.
relation = relation_from_event(event)
@@ -238,6 +329,8 @@ class BulkPushRuleEvaluator:
# the parent is part of a thread.
thread_id = await self.store.get_thread_id(relation.parent_id)
+ related_events = await self._related_events(event)
+
# It's possible that old room versions have non-integer power levels (floats or
# strings). Workaround this by explicitly converting to int.
notification_levels = power_levels.get("notifications", {})
@@ -250,6 +343,8 @@ class BulkPushRuleEvaluator:
room_member_count,
sender_power_level,
notification_levels,
+ related_events,
+ self._related_event_match_enabled,
)
users = rules_by_user.keys()
diff --git a/synapse/push/clientformat.py b/synapse/push/clientformat.py
index 7095ae83f9..622a1e35c5 100644
--- a/synapse/push/clientformat.py
+++ b/synapse/push/clientformat.py
@@ -44,6 +44,12 @@ def format_push_rules_for_user(
rulearray.append(template_rule)
+ pattern_type = template_rule.pop("pattern_type", None)
+ if pattern_type == "user_id":
+ template_rule["pattern"] = user.to_string()
+ elif pattern_type == "user_localpart":
+ template_rule["pattern"] = user.localpart
+
template_rule["enabled"] = enabled
if "conditions" not in template_rule:
@@ -93,10 +99,14 @@ def _rule_to_template(rule: PushRule) -> Optional[Dict[str, Any]]:
if len(rule.conditions) != 1:
return None
thecond = rule.conditions[0]
- if "pattern" not in thecond:
- return None
+
templaterule = {"actions": rule.actions}
- templaterule["pattern"] = thecond["pattern"]
+ if "pattern" in thecond:
+ templaterule["pattern"] = thecond["pattern"]
+ elif "pattern_type" in thecond:
+ templaterule["pattern_type"] = thecond["pattern_type"]
+ else:
+ return None
else:
# This should not be reached unless this function is not kept in sync
# with PRIORITY_CLASS_INVERSE_MAP.
diff --git a/synapse/replication/http/_base.py b/synapse/replication/http/_base.py
index aff7976165..3f012f1b5e 100644
--- a/synapse/replication/http/_base.py
+++ b/synapse/replication/http/_base.py
@@ -153,7 +153,7 @@ class ReplicationEndpoint(metaclass=abc.ABCMeta):
argument list.
Returns:
- dict: If POST/PUT request then dictionary must be JSON serialisable,
+ If POST/PUT request then dictionary must be JSON serialisable,
otherwise must be appropriate for adding as query args.
"""
return {}
@@ -184,8 +184,10 @@ class ReplicationEndpoint(metaclass=abc.ABCMeta):
client = hs.get_simple_http_client()
local_instance_name = hs.get_instance_name()
+ # The value of these option should match the replication listener settings
master_host = hs.config.worker.worker_replication_host
master_port = hs.config.worker.worker_replication_http_port
+ master_tls = hs.config.worker.worker_replication_http_tls
instance_map = hs.config.worker.instance_map
@@ -205,9 +207,11 @@ class ReplicationEndpoint(metaclass=abc.ABCMeta):
if instance_name == "master":
host = master_host
port = master_port
+ tls = master_tls
elif instance_name in instance_map:
host = instance_map[instance_name].host
port = instance_map[instance_name].port
+ tls = instance_map[instance_name].tls
else:
raise Exception(
"Instance %r not in 'instance_map' config" % (instance_name,)
@@ -238,7 +242,11 @@ class ReplicationEndpoint(metaclass=abc.ABCMeta):
"Unknown METHOD on %s replication endpoint" % (cls.NAME,)
)
- uri = "http://%s:%s/_synapse/replication/%s/%s" % (
+ # Here the protocol is hard coded to be http by default or https in case the replication
+ # port is set to have tls true.
+ scheme = "https" if tls else "http"
+ uri = "%s://%s:%s/_synapse/replication/%s/%s" % (
+ scheme,
host,
port,
cls.NAME,
diff --git a/synapse/replication/http/devices.py b/synapse/replication/http/devices.py
index 3d63645726..c21629def8 100644
--- a/synapse/replication/http/devices.py
+++ b/synapse/replication/http/devices.py
@@ -18,6 +18,7 @@ from typing import TYPE_CHECKING, Tuple
from twisted.web.server import Request
from synapse.http.server import HttpServer
+from synapse.http.servlet import parse_json_object_from_request
from synapse.replication.http._base import ReplicationEndpoint
from synapse.types import JsonDict
@@ -78,5 +79,71 @@ class ReplicationUserDevicesResyncRestServlet(ReplicationEndpoint):
return 200, user_devices
+class ReplicationUploadKeysForUserRestServlet(ReplicationEndpoint):
+ """Ask master to upload keys for the user and send them out over federation to
+ update other servers.
+
+ For now, only the master is permitted to handle key upload requests;
+ any worker can handle key query requests (since they're read-only).
+
+ Calls to e2e_keys_handler.upload_keys_for_user(user_id, device_id, keys) on
+ the main process to accomplish this.
+
+ Defined in https://spec.matrix.org/v1.4/client-server-api/#post_matrixclientv3keysupload
+ Request format(borrowed and expanded from KeyUploadServlet):
+
+ POST /_synapse/replication/upload_keys_for_user
+
+ {
+ "user_id": "",
+ "device_id": "",
+ "keys": {
+ ....this part can be found in KeyUploadServlet in rest/client/keys.py....
+ }
+ }
+
+ Response is equivalent to ` /_matrix/client/v3/keys/upload` found in KeyUploadServlet
+
+ """
+
+ NAME = "upload_keys_for_user"
+ PATH_ARGS = ()
+ CACHE = False
+
+ def __init__(self, hs: "HomeServer"):
+ super().__init__(hs)
+
+ self.e2e_keys_handler = hs.get_e2e_keys_handler()
+ self.store = hs.get_datastores().main
+ self.clock = hs.get_clock()
+
+ @staticmethod
+ async def _serialize_payload( # type: ignore[override]
+ user_id: str, device_id: str, keys: JsonDict
+ ) -> JsonDict:
+
+ return {
+ "user_id": user_id,
+ "device_id": device_id,
+ "keys": keys,
+ }
+
+ async def _handle_request( # type: ignore[override]
+ self, request: Request
+ ) -> Tuple[int, JsonDict]:
+ content = parse_json_object_from_request(request)
+
+ user_id = content["user_id"]
+ device_id = content["device_id"]
+ keys = content["keys"]
+
+ results = await self.e2e_keys_handler.upload_keys_for_user(
+ user_id, device_id, keys
+ )
+
+ return 200, results
+
+
def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
ReplicationUserDevicesResyncRestServlet(hs).register(http_server)
+ ReplicationUploadKeysForUserRestServlet(hs).register(http_server)
diff --git a/synapse/replication/slave/__init__.py b/synapse/replication/slave/__init__.py
deleted file mode 100644
index f43a360a80..0000000000
--- a/synapse/replication/slave/__init__.py
+++ /dev/null
@@ -1,13 +0,0 @@
-# Copyright 2016 OpenMarket Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
diff --git a/synapse/replication/slave/storage/__init__.py b/synapse/replication/slave/storage/__init__.py
deleted file mode 100644
index f43a360a80..0000000000
--- a/synapse/replication/slave/storage/__init__.py
+++ /dev/null
@@ -1,13 +0,0 @@
-# Copyright 2016 OpenMarket Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
diff --git a/synapse/replication/slave/storage/_slaved_id_tracker.py b/synapse/replication/slave/storage/_slaved_id_tracker.py
deleted file mode 100644
index 8f3f953ed4..0000000000
--- a/synapse/replication/slave/storage/_slaved_id_tracker.py
+++ /dev/null
@@ -1,50 +0,0 @@
-# Copyright 2016 OpenMarket Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-from typing import List, Optional, Tuple
-
-from synapse.storage.database import LoggingDatabaseConnection
-from synapse.storage.util.id_generators import AbstractStreamIdTracker, _load_current_id
-
-
-class SlavedIdTracker(AbstractStreamIdTracker):
- """Tracks the "current" stream ID of a stream with a single writer.
-
- See `AbstractStreamIdTracker` for more details.
-
- Note that this class does not work correctly when there are multiple
- writers.
- """
-
- def __init__(
- self,
- db_conn: LoggingDatabaseConnection,
- table: str,
- column: str,
- extra_tables: Optional[List[Tuple[str, str]]] = None,
- step: int = 1,
- ):
- self.step = step
- self._current = _load_current_id(db_conn, table, column, step)
- if extra_tables:
- for table, column in extra_tables:
- self.advance(None, _load_current_id(db_conn, table, column))
-
- def advance(self, instance_name: Optional[str], new_id: int) -> None:
- self._current = (max if self.step > 0 else min)(self._current, new_id)
-
- def get_current_token(self) -> int:
- return self._current
-
- def get_current_token_for_writer(self, instance_name: str) -> int:
- return self.get_current_token()
diff --git a/synapse/replication/slave/storage/devices.py b/synapse/replication/slave/storage/devices.py
deleted file mode 100644
index 6fcade510a..0000000000
--- a/synapse/replication/slave/storage/devices.py
+++ /dev/null
@@ -1,79 +0,0 @@
-# Copyright 2016 OpenMarket Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from typing import TYPE_CHECKING, Any, Iterable
-
-from synapse.replication.slave.storage._slaved_id_tracker import SlavedIdTracker
-from synapse.replication.tcp.streams._base import DeviceListsStream, UserSignatureStream
-from synapse.storage.database import DatabasePool, LoggingDatabaseConnection
-from synapse.storage.databases.main.devices import DeviceWorkerStore
-
-if TYPE_CHECKING:
- from synapse.server import HomeServer
-
-
-class SlavedDeviceStore(DeviceWorkerStore):
- def __init__(
- self,
- database: DatabasePool,
- db_conn: LoggingDatabaseConnection,
- hs: "HomeServer",
- ):
- self.hs = hs
-
- self._device_list_id_gen = SlavedIdTracker(
- db_conn,
- "device_lists_stream",
- "stream_id",
- extra_tables=[
- ("user_signature_stream", "stream_id"),
- ("device_lists_outbound_pokes", "stream_id"),
- ("device_lists_changes_in_room", "stream_id"),
- ],
- )
-
- super().__init__(database, db_conn, hs)
-
- def get_device_stream_token(self) -> int:
- return self._device_list_id_gen.get_current_token()
-
- def process_replication_rows(
- self, stream_name: str, instance_name: str, token: int, rows: Iterable[Any]
- ) -> None:
- if stream_name == DeviceListsStream.NAME:
- self._device_list_id_gen.advance(instance_name, token)
- self._invalidate_caches_for_devices(token, rows)
- elif stream_name == UserSignatureStream.NAME:
- self._device_list_id_gen.advance(instance_name, token)
- for row in rows:
- self._user_signature_stream_cache.entity_has_changed(row.user_id, token)
- return super().process_replication_rows(stream_name, instance_name, token, rows)
-
- def _invalidate_caches_for_devices(
- self, token: int, rows: Iterable[DeviceListsStream.DeviceListsStreamRow]
- ) -> None:
- for row in rows:
- # The entities are either user IDs (starting with '@') whose devices
- # have changed, or remote servers that we need to tell about
- # changes.
- if row.entity.startswith("@"):
- self._device_list_stream_cache.entity_has_changed(row.entity, token)
- self.get_cached_devices_for_user.invalidate((row.entity,))
- self._get_cached_user_device.invalidate((row.entity,))
- self.get_device_list_last_stream_id_for_remote.invalidate((row.entity,))
-
- else:
- self._device_list_federation_stream_cache.entity_has_changed(
- row.entity, token
- )
diff --git a/synapse/replication/slave/storage/events.py b/synapse/replication/slave/storage/events.py
deleted file mode 100644
index fe47778cb1..0000000000
--- a/synapse/replication/slave/storage/events.py
+++ /dev/null
@@ -1,79 +0,0 @@
-# Copyright 2016 OpenMarket Ltd
-# Copyright 2018 New Vector Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-import logging
-from typing import TYPE_CHECKING
-
-from synapse.storage.database import DatabasePool, LoggingDatabaseConnection
-from synapse.storage.databases.main.event_federation import EventFederationWorkerStore
-from synapse.storage.databases.main.event_push_actions import (
- EventPushActionsWorkerStore,
-)
-from synapse.storage.databases.main.events_worker import EventsWorkerStore
-from synapse.storage.databases.main.relations import RelationsWorkerStore
-from synapse.storage.databases.main.roommember import RoomMemberWorkerStore
-from synapse.storage.databases.main.signatures import SignatureWorkerStore
-from synapse.storage.databases.main.state import StateGroupWorkerStore
-from synapse.storage.databases.main.stream import StreamWorkerStore
-from synapse.storage.databases.main.user_erasure_store import UserErasureWorkerStore
-from synapse.util.caches.stream_change_cache import StreamChangeCache
-
-if TYPE_CHECKING:
- from synapse.server import HomeServer
-
-logger = logging.getLogger(__name__)
-
-
-# So, um, we want to borrow a load of functions intended for reading from
-# a DataStore, but we don't want to take functions that either write to the
-# DataStore or are cached and don't have cache invalidation logic.
-#
-# Rather than write duplicate versions of those functions, or lift them to
-# a common base class, we going to grab the underlying __func__ object from
-# the method descriptor on the DataStore and chuck them into our class.
-
-
-class SlavedEventStore(
- EventFederationWorkerStore,
- RoomMemberWorkerStore,
- EventPushActionsWorkerStore,
- StreamWorkerStore,
- StateGroupWorkerStore,
- SignatureWorkerStore,
- EventsWorkerStore,
- UserErasureWorkerStore,
- RelationsWorkerStore,
-):
- def __init__(
- self,
- database: DatabasePool,
- db_conn: LoggingDatabaseConnection,
- hs: "HomeServer",
- ):
- super().__init__(database, db_conn, hs)
-
- events_max = self._stream_id_gen.get_current_token()
- curr_state_delta_prefill, min_curr_state_delta_id = self.db_pool.get_cache_dict(
- db_conn,
- "current_state_delta_stream",
- entity_column="room_id",
- stream_column="stream_id",
- max_value=events_max, # As we share the stream id with events token
- limit=1000,
- )
- self._curr_state_delta_stream_cache = StreamChangeCache(
- "_curr_state_delta_stream_cache",
- min_curr_state_delta_id,
- prefilled_cache=curr_state_delta_prefill,
- )
diff --git a/synapse/replication/slave/storage/keys.py b/synapse/replication/slave/storage/keys.py
deleted file mode 100644
index a00b38c512..0000000000
--- a/synapse/replication/slave/storage/keys.py
+++ /dev/null
@@ -1,20 +0,0 @@
-# Copyright 2015, 2016 OpenMarket Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from synapse.storage.databases.main.keys import KeyStore
-
-# KeyStore isn't really safe to use from a worker, but for now we do so and hope that
-# the races it creates aren't too bad.
-
-SlavedKeyStore = KeyStore
diff --git a/synapse/replication/slave/storage/push_rule.py b/synapse/replication/slave/storage/push_rule.py
deleted file mode 100644
index 5e65eaf1e0..0000000000
--- a/synapse/replication/slave/storage/push_rule.py
+++ /dev/null
@@ -1,35 +0,0 @@
-# Copyright 2015, 2016 OpenMarket Ltd
-# Copyright 2018 New Vector Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-from typing import Any, Iterable
-
-from synapse.replication.tcp.streams import PushRulesStream
-from synapse.storage.databases.main.push_rule import PushRulesWorkerStore
-
-from .events import SlavedEventStore
-
-
-class SlavedPushRuleStore(SlavedEventStore, PushRulesWorkerStore):
- def get_max_push_rules_stream_id(self) -> int:
- return self._push_rules_stream_id_gen.get_current_token()
-
- def process_replication_rows(
- self, stream_name: str, instance_name: str, token: int, rows: Iterable[Any]
- ) -> None:
- if stream_name == PushRulesStream.NAME:
- self._push_rules_stream_id_gen.advance(instance_name, token)
- for row in rows:
- self.get_push_rules_for_user.invalidate((row.user_id,))
- self.push_rules_stream_cache.entity_has_changed(row.user_id, token)
- return super().process_replication_rows(stream_name, instance_name, token, rows)
diff --git a/synapse/replication/slave/storage/pushers.py b/synapse/replication/slave/storage/pushers.py
deleted file mode 100644
index 44ed20e424..0000000000
--- a/synapse/replication/slave/storage/pushers.py
+++ /dev/null
@@ -1,47 +0,0 @@
-# Copyright 2016 OpenMarket Ltd
-# Copyright 2018 New Vector Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-from typing import TYPE_CHECKING, Any, Iterable
-
-from synapse.replication.tcp.streams import PushersStream
-from synapse.storage.database import DatabasePool, LoggingDatabaseConnection
-from synapse.storage.databases.main.pusher import PusherWorkerStore
-
-from ._slaved_id_tracker import SlavedIdTracker
-
-if TYPE_CHECKING:
- from synapse.server import HomeServer
-
-
-class SlavedPusherStore(PusherWorkerStore):
- def __init__(
- self,
- database: DatabasePool,
- db_conn: LoggingDatabaseConnection,
- hs: "HomeServer",
- ):
- super().__init__(database, db_conn, hs)
- self._pushers_id_gen = SlavedIdTracker( # type: ignore
- db_conn, "pushers", "id", extra_tables=[("deleted_pushers", "stream_id")]
- )
-
- def get_pushers_stream_token(self) -> int:
- return self._pushers_id_gen.get_current_token()
-
- def process_replication_rows(
- self, stream_name: str, instance_name: str, token: int, rows: Iterable[Any]
- ) -> None:
- if stream_name == PushersStream.NAME:
- self._pushers_id_gen.advance(instance_name, token)
- return super().process_replication_rows(stream_name, instance_name, token, rows)
diff --git a/synapse/replication/tcp/protocol.py b/synapse/replication/tcp/protocol.py
index 7763ffb2d0..56a5c21910 100644
--- a/synapse/replication/tcp/protocol.py
+++ b/synapse/replication/tcp/protocol.py
@@ -245,7 +245,7 @@ class BaseReplicationStreamProtocol(LineOnlyReceiver):
self._parse_and_dispatch_line(line)
def _parse_and_dispatch_line(self, line: bytes) -> None:
- if line.strip() == "":
+ if line.strip() == b"":
# Ignore blank lines
return
diff --git a/synapse/res/templates/_base.html b/synapse/res/templates/_base.html
new file mode 100644
index 0000000000..46439fce6a
--- /dev/null
+++ b/synapse/res/templates/_base.html
@@ -0,0 +1,29 @@
+
+
+
+
+
+
+ {% block title %}{% endblock %}
+
+ {% block header %}{% endblock %}
+
+
+
+ {% if app_name == "Riot" %}
+
+ {% elif app_name == "Vector" %}
+
+ {% elif app_name == "Element" %}
+
+ {% else %}
+
+ {% endif %}
+
+
+{% block body %}{% endblock %}
+
+
+
diff --git a/synapse/res/templates/account_previously_renewed.html b/synapse/res/templates/account_previously_renewed.html
index bd4f7cea97..91582a8af0 100644
--- a/synapse/res/templates/account_previously_renewed.html
+++ b/synapse/res/templates/account_previously_renewed.html
@@ -1,12 +1,6 @@
-
-
-
-
-
-
- Your account is valid until {{ expiration_ts|format_ts("%d-%m-%Y") }}.
-
-
- Your account is valid until {{ expiration_ts|format_ts("%d-%m-%Y") }}.
-
-
\ No newline at end of file
+{% extends "_base.html" %}
+{% block title %}Your account is valid until {{ expiration_ts|format_ts("%d-%m-%Y") }}.{% endblock %}
+
+{% block body %}
+
Your account is valid until {{ expiration_ts|format_ts("%d-%m-%Y") }}.
+{% endblock %}
diff --git a/synapse/res/templates/account_renewed.html b/synapse/res/templates/account_renewed.html
index 57b319f375..18a57833f1 100644
--- a/synapse/res/templates/account_renewed.html
+++ b/synapse/res/templates/account_renewed.html
@@ -1,12 +1,6 @@
-
-
-
-
-
-
- Your account has been successfully renewed and is valid until {{ expiration_ts|format_ts("%d-%m-%Y") }}.
-
-
- Your account has been successfully renewed and is valid until {{ expiration_ts|format_ts("%d-%m-%Y") }}.
-
-
\ No newline at end of file
+{% extends "_base.html" %}
+{% block title %}Your account has been successfully renewed and is valid until {{ expiration_ts|format_ts("%d-%m-%Y") }}.{% endblock %}
+
+{% block body %}
+
Your account has been successfully renewed and is valid until {{ expiration_ts|format_ts("%d-%m-%Y") }}.
+{% endblock %}
diff --git a/synapse/res/templates/add_threepid.html b/synapse/res/templates/add_threepid.html
index 71f2215b7a..33c883936a 100644
--- a/synapse/res/templates/add_threepid.html
+++ b/synapse/res/templates/add_threepid.html
@@ -1,14 +1,8 @@
-
-
-
-
-
-
- Request to add an email address to your Matrix account
-
-
-
A request to add an email address to your Matrix account has been received. If this was you, please click the link below to confirm adding this email:
- Your account might have been deactivated by the server administrator.
- You can either try to create a new account or contact the server’s
- administrator.
-
-
- {% include "sso_footer.html" without context %}
-
-
+{% extends "_base.html" %}
+{% block title %}SSO account deactivated{% endblock %}
+
+{% block header %}
+
+{% endblock %}
+
+{% block body %}
+
+
+
Your account has been deactivated
+
+ No account found
+
+
+ Your account might have been deactivated by the server administrator.
+ You can either try to create a new account or contact the server’s
+ administrator.
+
+
+
+{% include "sso_footer.html" without context %}
+{% endblock %}
diff --git a/synapse/res/templates/sso_auth_account_details.html b/synapse/res/templates/sso_auth_account_details.html
index 2d1db386e1..11636d7f5d 100644
--- a/synapse/res/templates/sso_auth_account_details.html
+++ b/synapse/res/templates/sso_auth_account_details.html
@@ -1,189 +1,186 @@
-
-
-
- Create your account
-
-
-
-
-
-
-
-
-
Create your account
-
This is required. Continue to create your account on {{ server_name }}. You can't change this later.