Merge branch 'develop' into madlittlemods/11850-migrate-to-opentelemetry
Conflicts: docs/usage/configuration/config_documentation.md poetry.lock synapse/handlers/message.py synapse/http/server.py synapse/logging/opentracing.py synapse/rest/client/keys.py synapse/rest/client/room.py
This commit is contained in:
Executable
+132
@@ -0,0 +1,132 @@
|
||||
#!/usr/bin/env python
|
||||
# Copyright 2022 The Matrix.org Foundation C.I.C.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# Wraps `auditwheel repair` to first check if we're repairing a potentially abi3
|
||||
# compatible wheel, if so rename the wheel before repairing it.
|
||||
|
||||
import argparse
|
||||
import os
|
||||
import subprocess
|
||||
from typing import Optional
|
||||
from zipfile import ZipFile
|
||||
|
||||
from packaging.tags import Tag
|
||||
from packaging.utils import parse_wheel_filename
|
||||
from packaging.version import Version
|
||||
|
||||
|
||||
def check_is_abi3_compatible(wheel_file: str) -> None:
|
||||
"""Check the contents of the built wheel for any `.so` files that are *not*
|
||||
abi3 compatible.
|
||||
"""
|
||||
|
||||
with ZipFile(wheel_file, "r") as wheel:
|
||||
for file in wheel.namelist():
|
||||
if not file.endswith(".so"):
|
||||
continue
|
||||
|
||||
if not file.endswith(".abi3.so"):
|
||||
raise Exception(f"Found non-abi3 lib: {file}")
|
||||
|
||||
|
||||
def cpython(wheel_file: str, name: str, version: Version, tag: Tag) -> str:
|
||||
"""Replaces the cpython wheel file with a ABI3 compatible wheel"""
|
||||
|
||||
if tag.abi == "abi3":
|
||||
# Nothing to do.
|
||||
return wheel_file
|
||||
|
||||
check_is_abi3_compatible(wheel_file)
|
||||
|
||||
abi3_tag = Tag(tag.interpreter, "abi3", tag.platform)
|
||||
|
||||
dirname = os.path.dirname(wheel_file)
|
||||
new_wheel_file = os.path.join(
|
||||
dirname,
|
||||
f"{name}-{version}-{abi3_tag}.whl",
|
||||
)
|
||||
|
||||
os.rename(wheel_file, new_wheel_file)
|
||||
|
||||
print("Renamed wheel to", new_wheel_file)
|
||||
|
||||
return new_wheel_file
|
||||
|
||||
|
||||
def main(wheel_file: str, dest_dir: str, archs: Optional[str]) -> None:
|
||||
"""Entry point"""
|
||||
|
||||
# Parse the wheel file name into its parts. Note that `parse_wheel_filename`
|
||||
# normalizes the package name (i.e. it converts matrix_synapse ->
|
||||
# matrix-synapse), which is not what we want.
|
||||
_, version, build, tags = parse_wheel_filename(os.path.basename(wheel_file))
|
||||
name = os.path.basename(wheel_file).split("-")[0]
|
||||
|
||||
if len(tags) != 1:
|
||||
# We expect only a wheel file with only a single tag
|
||||
raise Exception(f"Unexpectedly found multiple tags: {tags}")
|
||||
|
||||
tag = next(iter(tags))
|
||||
|
||||
if build:
|
||||
# We don't use build tags in Synapse
|
||||
raise Exception(f"Unexpected build tag: {build}")
|
||||
|
||||
# If the wheel is for cpython then convert it into an abi3 wheel.
|
||||
if tag.interpreter.startswith("cp"):
|
||||
wheel_file = cpython(wheel_file, name, version, tag)
|
||||
|
||||
# Finally, repair the wheel.
|
||||
if archs is not None:
|
||||
# If we are given archs then we are on macos and need to use
|
||||
# `delocate-listdeps`.
|
||||
subprocess.run(["delocate-listdeps", wheel_file], check=True)
|
||||
subprocess.run(
|
||||
["delocate-wheel", "--require-archs", archs, "-w", dest_dir, wheel_file],
|
||||
check=True,
|
||||
)
|
||||
else:
|
||||
subprocess.run(["auditwheel", "repair", "-w", dest_dir, wheel_file], check=True)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser(description="Tag wheel as abi3 and repair it.")
|
||||
|
||||
parser.add_argument(
|
||||
"--wheel-dir",
|
||||
"-w",
|
||||
metavar="WHEEL_DIR",
|
||||
help="Directory to store delocated wheels",
|
||||
required=True,
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"--require-archs",
|
||||
metavar="archs",
|
||||
default=None,
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"wheel_file",
|
||||
metavar="WHEEL_FILE",
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
wheel_file = args.wheel_file
|
||||
wheel_dir = args.wheel_dir
|
||||
archs = args.require_archs
|
||||
|
||||
main(wheel_file, wheel_dir, archs)
|
||||
@@ -46,7 +46,7 @@ if not IS_PR:
|
||||
"database": "sqlite",
|
||||
"extras": "all",
|
||||
}
|
||||
for version in ("3.8", "3.9", "3.10")
|
||||
for version in ("3.8", "3.9", "3.10", "3.11")
|
||||
)
|
||||
|
||||
|
||||
@@ -54,7 +54,7 @@ trial_postgres_tests = [
|
||||
{
|
||||
"python-version": "3.7",
|
||||
"database": "postgres",
|
||||
"postgres-version": "10",
|
||||
"postgres-version": "11",
|
||||
"extras": "all",
|
||||
}
|
||||
]
|
||||
@@ -62,9 +62,9 @@ trial_postgres_tests = [
|
||||
if not IS_PR:
|
||||
trial_postgres_tests.append(
|
||||
{
|
||||
"python-version": "3.10",
|
||||
"python-version": "3.11",
|
||||
"database": "postgres",
|
||||
"postgres-version": "14",
|
||||
"postgres-version": "15",
|
||||
"extras": "all",
|
||||
}
|
||||
)
|
||||
|
||||
@@ -18,5 +18,6 @@ updates:
|
||||
|
||||
- package-ecosystem: "cargo"
|
||||
directory: "/"
|
||||
versioning-strategy: "lockfile-only"
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
|
||||
@@ -0,0 +1,34 @@
|
||||
name: Deploy documentation PR preview
|
||||
|
||||
on:
|
||||
workflow_run:
|
||||
workflows: [ "Prepare documentation PR preview" ]
|
||||
types:
|
||||
- completed
|
||||
|
||||
jobs:
|
||||
netlify:
|
||||
if: github.event.workflow_run.conclusion == 'success' && github.event.workflow_run.event == 'pull_request'
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
# There's a 'download artifact' action, but it hasn't been updated for the workflow_run action
|
||||
# (https://github.com/actions/download-artifact/issues/60) so instead we get this mess:
|
||||
- name: 📥 Download artifact
|
||||
uses: dawidd6/action-download-artifact@e6e25ac3a2b93187502a8be1ef9e9603afc34925 # v2.24.2
|
||||
with:
|
||||
workflow: docs-pr.yaml
|
||||
run_id: ${{ github.event.workflow_run.id }}
|
||||
name: book
|
||||
path: book
|
||||
|
||||
- name: 📤 Deploy to Netlify
|
||||
uses: matrix-org/netlify-pr-preview@v1
|
||||
with:
|
||||
path: book
|
||||
owner: ${{ github.event.workflow_run.head_repository.owner.login }}
|
||||
branch: ${{ github.event.workflow_run.head_branch }}
|
||||
revision: ${{ github.event.workflow_run.head_sha }}
|
||||
token: ${{ secrets.NETLIFY_AUTH_TOKEN }}
|
||||
site_id: ${{ secrets.NETLIFY_SITE_ID }}
|
||||
desc: Documentation preview
|
||||
deployment_env: PR Documentation Preview
|
||||
@@ -0,0 +1,34 @@
|
||||
name: Prepare documentation PR preview
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
paths:
|
||||
- docs/**
|
||||
|
||||
jobs:
|
||||
pages:
|
||||
name: GitHub Pages
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
- name: Setup mdbook
|
||||
uses: peaceiris/actions-mdbook@adeb05db28a0c0004681db83893d56c0388ea9ea # v1.2.0
|
||||
with:
|
||||
mdbook-version: '0.4.17'
|
||||
|
||||
- name: Build the documentation
|
||||
# mdbook will only create an index.html if we're including docs/README.md in SUMMARY.md.
|
||||
# However, we're using docs/README.md for other purposes and need to pick a new page
|
||||
# as the default. Let's opt for the welcome page instead.
|
||||
run: |
|
||||
mdbook build
|
||||
cp book/welcome_and_overview.html book/index.html
|
||||
|
||||
- name: Upload Artifact
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: book
|
||||
path: book
|
||||
# We'll only use this in a workflow_run, then we're done with it
|
||||
retention-days: 1
|
||||
@@ -20,7 +20,7 @@ jobs:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Setup mdbook
|
||||
uses: peaceiris/actions-mdbook@4b5ef36b314c2599664ca107bb8c02412548d79d # v1.1.14
|
||||
uses: peaceiris/actions-mdbook@adeb05db28a0c0004681db83893d56c0388ea9ea # v1.2.0
|
||||
with:
|
||||
mdbook-version: '0.4.17'
|
||||
|
||||
@@ -58,7 +58,7 @@ jobs:
|
||||
|
||||
# Deploy to the target directory.
|
||||
- name: Deploy to gh pages
|
||||
uses: peaceiris/actions-gh-pages@068dc23d9710f1ba62e86896f84735d869951305 # v3.8.0
|
||||
uses: peaceiris/actions-gh-pages@de7ea6f8efb354206b205ef54722213d99067935 # v3.9.0
|
||||
with:
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
publish_dir: ./book
|
||||
|
||||
@@ -27,10 +27,9 @@ jobs:
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Install Rust
|
||||
uses: actions-rs/toolchain@v1
|
||||
uses: dtolnay/rust-toolchain@55c7845fad90d0ae8b2e83715cb900e5e861e8cb
|
||||
with:
|
||||
toolchain: stable
|
||||
override: true
|
||||
toolchain: stable
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
|
||||
# The dev dependencies aren't exposed in the wheel metadata (at least with current
|
||||
@@ -62,10 +61,9 @@ jobs:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Install Rust
|
||||
uses: actions-rs/toolchain@v1
|
||||
uses: dtolnay/rust-toolchain@55c7845fad90d0ae8b2e83715cb900e5e861e8cb
|
||||
with:
|
||||
toolchain: stable
|
||||
override: true
|
||||
toolchain: stable
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
|
||||
- run: sudo apt-get -qq install xmlsec1
|
||||
@@ -136,10 +134,9 @@ jobs:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Install Rust
|
||||
uses: actions-rs/toolchain@v1
|
||||
uses: dtolnay/rust-toolchain@55c7845fad90d0ae8b2e83715cb900e5e861e8cb
|
||||
with:
|
||||
toolchain: stable
|
||||
override: true
|
||||
toolchain: stable
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
|
||||
- name: Ensure sytest runs `pip install`
|
||||
|
||||
@@ -99,7 +99,7 @@ jobs:
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
os: [ubuntu-20.04, macos-10.15]
|
||||
os: [ubuntu-20.04, macos-11]
|
||||
arch: [x86_64, aarch64]
|
||||
# is_pr is a flag used to exclude certain jobs from the matrix on PRs.
|
||||
# It is not read by the rest of the workflow.
|
||||
@@ -109,9 +109,9 @@ jobs:
|
||||
exclude:
|
||||
# Don't build macos wheels on PR CI.
|
||||
- is_pr: true
|
||||
os: "macos-10.15"
|
||||
os: "macos-11"
|
||||
# Don't build aarch64 wheels on mac.
|
||||
- os: "macos-10.15"
|
||||
- os: "macos-11"
|
||||
arch: aarch64
|
||||
# Don't build aarch64 wheels on PR CI.
|
||||
- is_pr: true
|
||||
|
||||
+56
-17
@@ -33,6 +33,8 @@ jobs:
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: "3.x"
|
||||
- uses: matrix-org/setup-python-poetry@v1
|
||||
with:
|
||||
extras: "all"
|
||||
@@ -44,6 +46,8 @@ jobs:
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: "3.x"
|
||||
- run: "pip install 'click==8.1.1' 'GitPython>=3.1.20'"
|
||||
- run: scripts-dev/check_schema_delta.py --force-colors
|
||||
|
||||
@@ -68,6 +72,8 @@ jobs:
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
fetch-depth: 0
|
||||
- uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: "3.x"
|
||||
- run: "pip install 'towncrier>=18.6.0rc1'"
|
||||
- run: scripts-dev/check-newsfragment.sh
|
||||
env:
|
||||
@@ -93,10 +99,12 @@ jobs:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Install Rust
|
||||
uses: actions-rs/toolchain@v1
|
||||
# There don't seem to be versioned releases of this action per se: for each rust
|
||||
# version there is a branch which gets constantly rebased on top of master.
|
||||
# We pin to a specific commit for paranoia's sake.
|
||||
uses: dtolnay/rust-toolchain@55c7845fad90d0ae8b2e83715cb900e5e861e8cb
|
||||
with:
|
||||
toolchain: 1.58.1
|
||||
override: true
|
||||
components: clippy
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
|
||||
@@ -111,11 +119,13 @@ jobs:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Install Rust
|
||||
uses: actions-rs/toolchain@v1
|
||||
# There don't seem to be versioned releases of this action per se: for each rust
|
||||
# version there is a branch which gets constantly rebased on top of master.
|
||||
# We pin to a specific commit for paranoia's sake.
|
||||
uses: dtolnay/rust-toolchain@55c7845fad90d0ae8b2e83715cb900e5e861e8cb
|
||||
with:
|
||||
toolchain: 1.58.1
|
||||
override: true
|
||||
components: rustfmt
|
||||
toolchain: 1.58.1
|
||||
components: rustfmt
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
|
||||
- run: cargo fmt --check
|
||||
@@ -143,6 +153,8 @@ jobs:
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: "3.x"
|
||||
- id: get-matrix
|
||||
run: .ci/scripts/calculate_jobs.py
|
||||
outputs:
|
||||
@@ -167,6 +179,16 @@ jobs:
|
||||
-e POSTGRES_PASSWORD=postgres \
|
||||
-e POSTGRES_INITDB_ARGS="--lc-collate C --lc-ctype C --encoding UTF8" \
|
||||
postgres:${{ matrix.job.postgres-version }}
|
||||
|
||||
- name: Install Rust
|
||||
# There don't seem to be versioned releases of this action per se: for each rust
|
||||
# version there is a branch which gets constantly rebased on top of master.
|
||||
# We pin to a specific commit for paranoia's sake.
|
||||
uses: dtolnay/rust-toolchain@55c7845fad90d0ae8b2e83715cb900e5e861e8cb
|
||||
with:
|
||||
toolchain: 1.58.1
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
|
||||
- uses: matrix-org/setup-python-poetry@v1
|
||||
with:
|
||||
python-version: ${{ matrix.job.python-version }}
|
||||
@@ -203,10 +225,12 @@ jobs:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Install Rust
|
||||
uses: actions-rs/toolchain@v1
|
||||
# There don't seem to be versioned releases of this action per se: for each rust
|
||||
# version there is a branch which gets constantly rebased on top of master.
|
||||
# We pin to a specific commit for paranoia's sake.
|
||||
uses: dtolnay/rust-toolchain@55c7845fad90d0ae8b2e83715cb900e5e861e8cb
|
||||
with:
|
||||
toolchain: 1.58.1
|
||||
override: true
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
|
||||
# There aren't wheels for some of the older deps, so we need to install
|
||||
@@ -319,10 +343,12 @@ jobs:
|
||||
run: cat sytest-blacklist .ci/worker-blacklist > synapse-blacklist-with-workers
|
||||
|
||||
- name: Install Rust
|
||||
uses: actions-rs/toolchain@v1
|
||||
# There don't seem to be versioned releases of this action per se: for each rust
|
||||
# version there is a branch which gets constantly rebased on top of master.
|
||||
# We pin to a specific commit for paranoia's sake.
|
||||
uses: dtolnay/rust-toolchain@55c7845fad90d0ae8b2e83715cb900e5e861e8cb
|
||||
with:
|
||||
toolchain: 1.58.1
|
||||
override: true
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
|
||||
- name: Run SyTest
|
||||
@@ -383,10 +409,10 @@ jobs:
|
||||
matrix:
|
||||
include:
|
||||
- python-version: "3.7"
|
||||
postgres-version: "10"
|
||||
postgres-version: "11"
|
||||
|
||||
- python-version: "3.10"
|
||||
postgres-version: "14"
|
||||
- python-version: "3.11"
|
||||
postgres-version: "15"
|
||||
|
||||
services:
|
||||
postgres:
|
||||
@@ -404,6 +430,15 @@ jobs:
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Add PostgreSQL apt repository
|
||||
# We need a version of pg_dump that can handle the version of
|
||||
# PostgreSQL being tested against. The Ubuntu package repository lags
|
||||
# behind new releases, so we have to use the PostreSQL apt repository.
|
||||
# Steps taken from https://www.postgresql.org/download/linux/ubuntu/
|
||||
run: |
|
||||
sudo sh -c 'echo "deb http://apt.postgresql.org/pub/repos/apt $(lsb_release -cs)-pgdg main" > /etc/apt/sources.list.d/pgdg.list'
|
||||
wget --quiet -O - https://www.postgresql.org/media/keys/ACCC4CF8.asc | sudo apt-key add -
|
||||
sudo apt-get update
|
||||
- run: sudo apt-get -qq install xmlsec1 postgresql-client
|
||||
- uses: matrix-org/setup-python-poetry@v1
|
||||
with:
|
||||
@@ -451,10 +486,12 @@ jobs:
|
||||
path: synapse
|
||||
|
||||
- name: Install Rust
|
||||
uses: actions-rs/toolchain@v1
|
||||
# There don't seem to be versioned releases of this action per se: for each rust
|
||||
# version there is a branch which gets constantly rebased on top of master.
|
||||
# We pin to a specific commit for paranoia's sake.
|
||||
uses: dtolnay/rust-toolchain@55c7845fad90d0ae8b2e83715cb900e5e861e8cb
|
||||
with:
|
||||
toolchain: 1.58.1
|
||||
override: true
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
|
||||
- name: Prepare Complement's Prerequisites
|
||||
@@ -477,10 +514,12 @@ jobs:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Install Rust
|
||||
uses: actions-rs/toolchain@v1
|
||||
# There don't seem to be versioned releases of this action per se: for each rust
|
||||
# version there is a branch which gets constantly rebased on top of master.
|
||||
# We pin to a specific commit for paranoia's sake.
|
||||
uses: dtolnay/rust-toolchain@55c7845fad90d0ae8b2e83715cb900e5e861e8cb
|
||||
with:
|
||||
toolchain: 1.58.1
|
||||
override: true
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
|
||||
- run: cargo test
|
||||
|
||||
@@ -11,34 +11,34 @@ jobs:
|
||||
if: >
|
||||
contains(github.event.issue.labels.*.name, 'X-Needs-Info')
|
||||
steps:
|
||||
- uses: octokit/graphql-action@v2.x
|
||||
id: add_to_project
|
||||
- uses: actions/add-to-project@main
|
||||
id: add_project
|
||||
with:
|
||||
headers: '{"GraphQL-Features": "projects_next_graphql"}'
|
||||
query: |
|
||||
mutation {
|
||||
updateProjectV2ItemFieldValue(
|
||||
input: {
|
||||
projectId: $projectid
|
||||
itemId: $contentid
|
||||
fieldId: $fieldid
|
||||
value: {
|
||||
singleSelectOptionId: "Todo"
|
||||
project-url: "https://github.com/orgs/matrix-org/projects/67"
|
||||
github-token: ${{ secrets.ELEMENT_BOT_TOKEN }}
|
||||
- name: Set status
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.ELEMENT_BOT_TOKEN }}
|
||||
run: |
|
||||
gh api graphql -f query='
|
||||
mutation(
|
||||
$project: ID!
|
||||
$item: ID!
|
||||
$fieldid: ID!
|
||||
$columnid: String!
|
||||
) {
|
||||
updateProjectV2ItemFieldValue(
|
||||
input: {
|
||||
projectId: $project
|
||||
itemId: $item
|
||||
fieldId: $fieldid
|
||||
value: {
|
||||
singleSelectOptionId: $columnid
|
||||
}
|
||||
}
|
||||
) {
|
||||
projectV2Item {
|
||||
id
|
||||
}
|
||||
}
|
||||
) {
|
||||
projectV2Item {
|
||||
id
|
||||
}
|
||||
}
|
||||
|
||||
projectid: ${{ env.PROJECT_ID }}
|
||||
contentid: ${{ github.event.issue.node_id }}
|
||||
fieldid: ${{ env.FIELD_ID }}
|
||||
optionid: ${{ env.OPTION_ID }}
|
||||
env:
|
||||
PROJECT_ID: "PVT_kwDOAIB0Bs4AFDdZ"
|
||||
GITHUB_TOKEN: ${{ secrets.ELEMENT_BOT_TOKEN }}
|
||||
FIELD_ID: "PVTSSF_lADOAIB0Bs4AFDdZzgC6ZA4"
|
||||
OPTION_ID: "ba22e43c"
|
||||
}' -f project="PVT_kwDOAIB0Bs4AFDdZ" -f item=${{ steps.add_project.outputs.itemId }} -f fieldid="PVTSSF_lADOAIB0Bs4AFDdZzgC6ZA4" -f columnid=ba22e43c --silent
|
||||
|
||||
@@ -18,10 +18,9 @@ jobs:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Install Rust
|
||||
uses: actions-rs/toolchain@v1
|
||||
uses: dtolnay/rust-toolchain@55c7845fad90d0ae8b2e83715cb900e5e861e8cb
|
||||
with:
|
||||
toolchain: stable
|
||||
override: true
|
||||
toolchain: stable
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
|
||||
- uses: matrix-org/setup-python-poetry@v1
|
||||
@@ -44,10 +43,9 @@ jobs:
|
||||
- run: sudo apt-get -qq install xmlsec1
|
||||
|
||||
- name: Install Rust
|
||||
uses: actions-rs/toolchain@v1
|
||||
uses: dtolnay/rust-toolchain@55c7845fad90d0ae8b2e83715cb900e5e861e8cb
|
||||
with:
|
||||
toolchain: stable
|
||||
override: true
|
||||
toolchain: stable
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
|
||||
- uses: matrix-org/setup-python-poetry@v1
|
||||
@@ -84,10 +82,9 @@ jobs:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Install Rust
|
||||
uses: actions-rs/toolchain@v1
|
||||
uses: dtolnay/rust-toolchain@55c7845fad90d0ae8b2e83715cb900e5e861e8cb
|
||||
with:
|
||||
toolchain: stable
|
||||
override: true
|
||||
toolchain: stable
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
|
||||
- name: Patch dependencies
|
||||
@@ -151,12 +148,11 @@ jobs:
|
||||
run: |
|
||||
set -x
|
||||
DEBIAN_FRONTEND=noninteractive sudo apt-get install -yqq python3 pipx
|
||||
pipx install poetry==1.1.14
|
||||
pipx install poetry==1.2.0
|
||||
|
||||
poetry remove -n twisted
|
||||
poetry add -n --extras tls git+https://github.com/twisted/twisted.git#trunk
|
||||
poetry lock --no-update
|
||||
# NOT IN 1.1.14 poetry lock --check
|
||||
working-directory: synapse
|
||||
|
||||
- run: |
|
||||
|
||||
+232
-2
@@ -1,3 +1,233 @@
|
||||
Synapse 1.72.0rc1 (2022-11-16)
|
||||
==============================
|
||||
|
||||
Please note that we now only support PostgreSQL 11+, because PostgreSQL 10 has reached end-of-life, c.f. our [Deprecation Policy](https://github.com/matrix-org/synapse/blob/develop/docs/deprecation_policy.md).
|
||||
|
||||
Features
|
||||
--------
|
||||
|
||||
- Add experimental support for [MSC3912](https://github.com/matrix-org/matrix-spec-proposals/pull/3912): Relation-based redactions. ([\#14260](https://github.com/matrix-org/synapse/issues/14260))
|
||||
- Build Debian packages for Ubuntu 22.10 (Kinetic Kudu). ([\#14396](https://github.com/matrix-org/synapse/issues/14396))
|
||||
- Add an [Admin API](https://matrix-org.github.io/synapse/latest/usage/administration/admin_api/index.html) endpoint for user lookup based on third-party ID (3PID). Contributed by @ashfame. ([\#14405](https://github.com/matrix-org/synapse/issues/14405))
|
||||
- Faster joins: include heroes' membership events in the partial join response, for rooms without a name or canonical alias. ([\#14442](https://github.com/matrix-org/synapse/issues/14442))
|
||||
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Faster joins: do not block creation of or queries for room aliases during the resync. ([\#14292](https://github.com/matrix-org/synapse/issues/14292))
|
||||
- Fix a bug introduced in Synapse 1.64.0rc1 which could cause log spam when fetching events from other homeservers. ([\#14347](https://github.com/matrix-org/synapse/issues/14347))
|
||||
- Fix a bug introduced in 1.66 which would not send certain pushrules to clients. Contributed by Nico. ([\#14356](https://github.com/matrix-org/synapse/issues/14356))
|
||||
- Fix a bug introduced in v1.71.0rc1 where the power level event was incorrectly created during initial room creation. ([\#14361](https://github.com/matrix-org/synapse/issues/14361))
|
||||
- Fix the refresh token endpoint to be under /r0 and /v3 instead of /v1. Contributed by Tulir @ Beeper. ([\#14364](https://github.com/matrix-org/synapse/issues/14364))
|
||||
- Fix a long-standing bug where Synapse would raise an error when encountering an unrecognised field in a `/sync` filter, instead of ignoring it for forward compatibility. ([\#14369](https://github.com/matrix-org/synapse/issues/14369))
|
||||
- Fix a background database update, introduced in Synapse 1.64.0, which could cause poor database performance. ([\#14374](https://github.com/matrix-org/synapse/issues/14374))
|
||||
- Fix PostgreSQL sometimes using table scans for queries against the `event_search` table, taking a long time and a large amount of IO. ([\#14409](https://github.com/matrix-org/synapse/issues/14409))
|
||||
- Fix rendering of some HTML templates (including emails). Introduced in v1.71.0. ([\#14448](https://github.com/matrix-org/synapse/issues/14448))
|
||||
- Fix a bug introduced in Synapse 1.70.0 where the background updates to add non-thread unique indexes on receipts could fail when upgrading from 1.67.0 or earlier. ([\#14453](https://github.com/matrix-org/synapse/issues/14453))
|
||||
|
||||
|
||||
Updates to the Docker image
|
||||
---------------------------
|
||||
|
||||
- Add all Stream Writer worker types to `configure_workers_and_start.py`. ([\#14197](https://github.com/matrix-org/synapse/issues/14197))
|
||||
- Remove references to legacy worker types in the multi-worker Dockerfile. ([\#14294](https://github.com/matrix-org/synapse/issues/14294))
|
||||
|
||||
|
||||
Improved Documentation
|
||||
----------------------
|
||||
|
||||
- Upload documentation PRs to Netlify. ([\#12947](https://github.com/matrix-org/synapse/issues/12947), [\#14370](https://github.com/matrix-org/synapse/issues/14370))
|
||||
- Add addtional TURN server configuration example based on [eturnal](https://github.com/processone/eturnal) and adjust general TURN server doc structure. ([\#14293](https://github.com/matrix-org/synapse/issues/14293))
|
||||
- Add example on how to load balance /sync requests. Contributed by [aceArt](https://aceart.de). ([\#14297](https://github.com/matrix-org/synapse/issues/14297))
|
||||
- Edit sample Nginx reverse proxy configuration to use HTTP/1.1. Contributed by Brad Jones. ([\#14414](https://github.com/matrix-org/synapse/issues/14414))
|
||||
|
||||
|
||||
Deprecations and Removals
|
||||
-------------------------
|
||||
|
||||
- Remove support for PostgreSQL 10. ([\#14392](https://github.com/matrix-org/synapse/issues/14392), [\#14397](https://github.com/matrix-org/synapse/issues/14397))
|
||||
|
||||
|
||||
Internal Changes
|
||||
----------------
|
||||
|
||||
- Run unit tests against Python 3.11. ([\#13812](https://github.com/matrix-org/synapse/issues/13812))
|
||||
- Add TLS support for generic worker endpoints. ([\#14128](https://github.com/matrix-org/synapse/issues/14128), [\#14455](https://github.com/matrix-org/synapse/issues/14455))
|
||||
- Switch to a maintained action for installing Rust in CI. ([\#14313](https://github.com/matrix-org/synapse/issues/14313))
|
||||
- Add override ability to `complement.sh` command line script to request certain types of workers. ([\#14324](https://github.com/matrix-org/synapse/issues/14324))
|
||||
- Enabling testing of [MSC3874](https://github.com/matrix-org/matrix-spec-proposals/pull/3874) (filtering of `/messages` by relation type) in complement. ([\#14339](https://github.com/matrix-org/synapse/issues/14339))
|
||||
- Concisely log a failure to resolve state due to missing `prev_events`. ([\#14346](https://github.com/matrix-org/synapse/issues/14346))
|
||||
- Use a maintained Github action to install Rust. ([\#14351](https://github.com/matrix-org/synapse/issues/14351))
|
||||
- Cleanup old worker datastore classes. Contributed by Nick @ Beeper (@fizzadar). ([\#14375](https://github.com/matrix-org/synapse/issues/14375))
|
||||
- Test against PostgreSQL 15 in CI. ([\#14394](https://github.com/matrix-org/synapse/issues/14394))
|
||||
- Remove unreachable code. ([\#14410](https://github.com/matrix-org/synapse/issues/14410))
|
||||
- Clean-up event persistence code. ([\#14411](https://github.com/matrix-org/synapse/issues/14411))
|
||||
- Update docstring to clarify that `get_partial_state_events_batch` does not just give you completely arbitrary partial-state events. ([\#14417](https://github.com/matrix-org/synapse/issues/14417))
|
||||
- Fix mypy errors introduced by bumping the locked version of `attrs` and `gitpython`. ([\#14433](https://github.com/matrix-org/synapse/issues/14433))
|
||||
- Make Dependabot only bump Rust deps in the lock file. ([\#14434](https://github.com/matrix-org/synapse/issues/14434))
|
||||
- Fix an incorrect stub return type for `PushRuleEvaluator.run`. ([\#14451](https://github.com/matrix-org/synapse/issues/14451))
|
||||
- Improve performance of `/context` in large rooms. ([\#14461](https://github.com/matrix-org/synapse/issues/14461))
|
||||
|
||||
|
||||
Synapse 1.71.0 (2022-11-08)
|
||||
===========================
|
||||
|
||||
Please note that, as announced in the release notes for Synapse 1.69.0, legacy Prometheus metric names are now disabled by default.
|
||||
They will be removed altogether in Synapse 1.73.0.
|
||||
If not already done, server administrators should update their dashboards and alerting rules to avoid using the deprecated metric names.
|
||||
See the [upgrade notes](https://matrix-org.github.io/synapse/v1.71/upgrade.html#upgrading-to-v1710) for more details.
|
||||
|
||||
**Note:** in line with our [deprecation policy](https://matrix-org.github.io/synapse/latest/deprecation_policy.html) for platform dependencies, this will be the last release to support PostgreSQL 10, which reaches upstream end-of-life on November 10th, 2022. Future releases of Synapse will require PostgreSQL 11+.
|
||||
|
||||
No significant changes since 1.71.0rc2.
|
||||
|
||||
|
||||
Synapse 1.71.0rc2 (2022-11-04)
|
||||
==============================
|
||||
|
||||
Improved Documentation
|
||||
----------------------
|
||||
|
||||
- Document the changes to monthly active user metrics due to deprecation of legacy Prometheus metric names. ([\#14358](https://github.com/matrix-org/synapse/issues/14358), [\#14360](https://github.com/matrix-org/synapse/issues/14360))
|
||||
|
||||
|
||||
Deprecations and Removals
|
||||
-------------------------
|
||||
|
||||
- Disable legacy Prometheus metric names by default. They can still be re-enabled for now, but they will be removed altogether in Synapse 1.73.0. ([\#14353](https://github.com/matrix-org/synapse/issues/14353))
|
||||
|
||||
|
||||
Internal Changes
|
||||
----------------
|
||||
|
||||
- Run unit tests against Python 3.11. ([\#13812](https://github.com/matrix-org/synapse/issues/13812))
|
||||
|
||||
|
||||
Synapse 1.71.0rc1 (2022-11-01)
|
||||
==============================
|
||||
|
||||
Features
|
||||
--------
|
||||
|
||||
- Support back-channel logouts from OpenID Connect providers. ([\#11414](https://github.com/matrix-org/synapse/issues/11414))
|
||||
- Allow use of Postgres and SQLlite full-text search operators in search queries. ([\#11635](https://github.com/matrix-org/synapse/issues/11635), [\#14310](https://github.com/matrix-org/synapse/issues/14310), [\#14311](https://github.com/matrix-org/synapse/issues/14311))
|
||||
- Implement [MSC3664](https://github.com/matrix-org/matrix-doc/pull/3664), Pushrules for relations. Contributed by Nico. ([\#11804](https://github.com/matrix-org/synapse/issues/11804))
|
||||
- Improve aesthetics of HTML templates. Note that these changes do not retroactively apply to templates which have been [customised](https://matrix-org.github.io/synapse/latest/templates.html#templates) by server admins. ([\#13652](https://github.com/matrix-org/synapse/issues/13652))
|
||||
- Enable write-ahead logging for SQLite installations. Contributed by [@asymmetric](https://github.com/asymmetric). ([\#13897](https://github.com/matrix-org/synapse/issues/13897))
|
||||
- Show erasure status when [listing users](https://matrix-org.github.io/synapse/latest/admin_api/user_admin_api.html#query-user-account) in the Admin API. ([\#14205](https://github.com/matrix-org/synapse/issues/14205))
|
||||
- Provide a specific error code when a `/sync` request provides a filter which doesn't represent a JSON object. ([\#14262](https://github.com/matrix-org/synapse/issues/14262))
|
||||
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Fix a long-standing bug where the `update_synapse_database` script could not be run with multiple databases. Contributed by @thefinn93 @ Beeper. ([\#13422](https://github.com/matrix-org/synapse/issues/13422))
|
||||
- Fix a bug which prevented setting an avatar on homeservers which have an explicit port in their `server_name` and have `max_avatar_size` and/or `allowed_avatar_mimetypes` configuration. Contributed by @ashfame. ([\#13927](https://github.com/matrix-org/synapse/issues/13927))
|
||||
- Check appservice user interest against the local users instead of all users in the room to align with [MSC3905](https://github.com/matrix-org/matrix-spec-proposals/pull/3905). ([\#13958](https://github.com/matrix-org/synapse/issues/13958))
|
||||
- Fix a long-standing bug where Synapse would accidentally include extra information in the response to [`PUT /_matrix/federation/v2/invite/{roomId}/{eventId}`](https://spec.matrix.org/v1.4/server-server-api/#put_matrixfederationv2inviteroomideventid). ([\#14064](https://github.com/matrix-org/synapse/issues/14064))
|
||||
- Fix a bug introduced in Synapse 1.64.0 where presence updates could be missing from `/sync` responses. ([\#14243](https://github.com/matrix-org/synapse/issues/14243))
|
||||
- Fix a bug introduced in Synapse 1.60.0 which caused an error to be logged when Synapse received a SIGHUP signal if debug logging was enabled. ([\#14258](https://github.com/matrix-org/synapse/issues/14258))
|
||||
- Prevent history insertion ([MSC2716](https://github.com/matrix-org/matrix-spec-proposals/pull/2716)) during an partial join ([MSC3706](https://github.com/matrix-org/matrix-spec-proposals/pull/3706)). ([\#14291](https://github.com/matrix-org/synapse/issues/14291))
|
||||
- Fix a bug introduced in Synapse 1.34.0 where device names would be returned via a federation user key query request when `allow_device_name_lookup_over_federation` was set to `false`. ([\#14304](https://github.com/matrix-org/synapse/issues/14304))
|
||||
- Fix a bug introduced in Synapse 0.34.0 where logs could include error spam when background processes are measured as taking a negative amount of time. ([\#14323](https://github.com/matrix-org/synapse/issues/14323))
|
||||
- Fix a bug introduced in Synapse 1.70.0 where clients were unable to PUT new [dehydrated devices](https://github.com/matrix-org/matrix-spec-proposals/pull/2697). ([\#14336](https://github.com/matrix-org/synapse/issues/14336))
|
||||
|
||||
|
||||
Improved Documentation
|
||||
----------------------
|
||||
|
||||
- Explain how to disable the use of [`trusted_key_servers`](https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#trusted_key_servers). ([\#13999](https://github.com/matrix-org/synapse/issues/13999))
|
||||
- Add workers settings to [configuration manual](https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#individual-worker-configuration). ([\#14086](https://github.com/matrix-org/synapse/issues/14086))
|
||||
- Correct the name of the config option [`encryption_enabled_by_default_for_room_type`](https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#encryption_enabled_by_default_for_room_type). ([\#14110](https://github.com/matrix-org/synapse/issues/14110))
|
||||
- Update docstrings of `SynapseError` and `FederationError` to bettter describe what they are used for and the effects of using them are. ([\#14191](https://github.com/matrix-org/synapse/issues/14191))
|
||||
|
||||
|
||||
Internal Changes
|
||||
----------------
|
||||
|
||||
- Remove unused `@lru_cache` decorator. ([\#13595](https://github.com/matrix-org/synapse/issues/13595))
|
||||
- Save login tokens in database and prevent login token reuse. ([\#13844](https://github.com/matrix-org/synapse/issues/13844))
|
||||
- Refactor OIDC tests to better mimic an actual OIDC provider. ([\#13910](https://github.com/matrix-org/synapse/issues/13910))
|
||||
- Fix type annotation causing import time error in the Complement forking launcher. ([\#14084](https://github.com/matrix-org/synapse/issues/14084))
|
||||
- Refactor [MSC3030](https://github.com/matrix-org/matrix-spec-proposals/pull/3030) `/timestamp_to_event` endpoint to loop over federation destinations with standard pattern and error handling. ([\#14096](https://github.com/matrix-org/synapse/issues/14096))
|
||||
- Add initial power level event to batch of bulk persisted events when creating a new room. ([\#14228](https://github.com/matrix-org/synapse/issues/14228))
|
||||
- Refactor `/key/` endpoints to use `RestServlet` classes. ([\#14229](https://github.com/matrix-org/synapse/issues/14229))
|
||||
- Switch to using the `matrix-org/backend-meta` version of `triage-incoming` for new issues in CI. ([\#14230](https://github.com/matrix-org/synapse/issues/14230))
|
||||
- Build wheels on macos 11, not 10.15. ([\#14249](https://github.com/matrix-org/synapse/issues/14249))
|
||||
- Add debugging to help diagnose lost device list updates. ([\#14268](https://github.com/matrix-org/synapse/issues/14268))
|
||||
- Add Rust cache to CI for `trial` runs. ([\#14287](https://github.com/matrix-org/synapse/issues/14287))
|
||||
- Improve type hinting of `RawHeaders`. ([\#14303](https://github.com/matrix-org/synapse/issues/14303))
|
||||
- Use Poetry 1.2.0 in the Twisted Trunk CI job. ([\#14305](https://github.com/matrix-org/synapse/issues/14305))
|
||||
|
||||
<details>
|
||||
<summary>Dependency updates</summary>
|
||||
|
||||
Runtime:
|
||||
|
||||
- Bump anyhow from 1.0.65 to 1.0.66. ([\#14278](https://github.com/matrix-org/synapse/issues/14278))
|
||||
- Bump jinja2 from 3.0.3 to 3.1.2. ([\#14271](https://github.com/matrix-org/synapse/issues/14271))
|
||||
- Bump prometheus-client from 0.14.0 to 0.15.0. ([\#14274](https://github.com/matrix-org/synapse/issues/14274))
|
||||
- Bump psycopg2 from 2.9.4 to 2.9.5. ([\#14331](https://github.com/matrix-org/synapse/issues/14331))
|
||||
- Bump pysaml2 from 7.1.2 to 7.2.1. ([\#14270](https://github.com/matrix-org/synapse/issues/14270))
|
||||
- Bump sentry-sdk from 1.5.11 to 1.10.1. ([\#14330](https://github.com/matrix-org/synapse/issues/14330))
|
||||
- Bump serde from 1.0.145 to 1.0.147. ([\#14277](https://github.com/matrix-org/synapse/issues/14277))
|
||||
- Bump serde_json from 1.0.86 to 1.0.87. ([\#14279](https://github.com/matrix-org/synapse/issues/14279))
|
||||
|
||||
Tooling and CI:
|
||||
|
||||
- Bump black from 22.3.0 to 22.10.0. ([\#14328](https://github.com/matrix-org/synapse/issues/14328))
|
||||
- Bump flake8-bugbear from 21.3.2 to 22.9.23. ([\#14042](https://github.com/matrix-org/synapse/issues/14042))
|
||||
- Bump peaceiris/actions-gh-pages from 3.8.0 to 3.9.0. ([\#14276](https://github.com/matrix-org/synapse/issues/14276))
|
||||
- Bump peaceiris/actions-mdbook from 1.1.14 to 1.2.0. ([\#14275](https://github.com/matrix-org/synapse/issues/14275))
|
||||
- Bump setuptools-rust from 1.5.1 to 1.5.2. ([\#14273](https://github.com/matrix-org/synapse/issues/14273))
|
||||
- Bump twine from 3.8.0 to 4.0.1. ([\#14332](https://github.com/matrix-org/synapse/issues/14332))
|
||||
- Bump types-opentracing from 2.4.7 to 2.4.10. ([\#14133](https://github.com/matrix-org/synapse/issues/14133))
|
||||
- Bump types-requests from 2.28.11 to 2.28.11.2. ([\#14272](https://github.com/matrix-org/synapse/issues/14272))
|
||||
</details>
|
||||
|
||||
Synapse 1.70.1 (2022-10-28)
|
||||
===========================
|
||||
|
||||
This release fixes some regressions that were discovered in 1.70.0.
|
||||
|
||||
[#14300](https://github.com/matrix-org/synapse/issues/14300)
|
||||
was previously reported to be a regression in 1.70.0 as well. However, we have
|
||||
since concluded that it was limited to the reporter and thus have not needed
|
||||
to include any fix for it in 1.70.1.
|
||||
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Fix a bug introduced in Synapse 1.70.0rc1 where the access tokens sent to application services as headers were malformed. Application services which were obtaining access tokens from query parameters were not affected. ([\#14301](https://github.com/matrix-org/synapse/issues/14301))
|
||||
- Fix room creation being rate limited too aggressively since Synapse v1.69.0. ([\#14314](https://github.com/matrix-org/synapse/issues/14314))
|
||||
|
||||
|
||||
Synapse 1.70.0 (2022-10-26)
|
||||
===========================
|
||||
|
||||
No significant changes since 1.70.0rc2.
|
||||
|
||||
|
||||
Synapse 1.70.0rc2 (2022-10-25)
|
||||
==============================
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Fix a bug introduced in Synapse 1.70.0rc1 where the information returned from the `/threads` API could be stale when threaded events are redacted. ([\#14248](https://github.com/matrix-org/synapse/issues/14248))
|
||||
- Fix a bug introduced in Synapse 1.70.0rc1 leading to broken outbound federation when using Python 3.7. ([\#14280](https://github.com/matrix-org/synapse/issues/14280))
|
||||
- Fix a bug introduced in Synapse 1.70.0rc1 where edits to non-message events were aggregated by the homeserver. ([\#14283](https://github.com/matrix-org/synapse/issues/14283))
|
||||
|
||||
|
||||
Internal Changes
|
||||
----------------
|
||||
|
||||
- Build ABI3 wheels for CPython. ([\#14253](https://github.com/matrix-org/synapse/issues/14253))
|
||||
- For the aarch64 architecture, only build wheels for CPython manylinux. ([\#14259](https://github.com/matrix-org/synapse/issues/14259))
|
||||
|
||||
|
||||
Synapse 1.70.0rc1 (2022-10-19)
|
||||
==============================
|
||||
|
||||
@@ -13,7 +243,7 @@ Features
|
||||
- The `/relations` endpoint can now be used on workers. ([\#14028](https://github.com/matrix-org/synapse/issues/14028))
|
||||
- Advertise support for Matrix 1.3 and 1.4 on `/_matrix/client/versions`. ([\#14032](https://github.com/matrix-org/synapse/issues/14032), [\#14184](https://github.com/matrix-org/synapse/issues/14184))
|
||||
- Improve validation of request bodies for the [Device Management](https://spec.matrix.org/v1.4/client-server-api/#device-management) and [MSC2697 Device Dehyrdation](https://github.com/matrix-org/matrix-spec-proposals/pull/2697) client-server API endpoints. ([\#14054](https://github.com/matrix-org/synapse/issues/14054))
|
||||
- Experimental support for [MSC3874](https://github.com/matrix-org/matrix-spec-proposals/pull/3874). ([\#14148](https://github.com/matrix-org/synapse/issues/14148))
|
||||
- Experimental support for [MSC3874](https://github.com/matrix-org/matrix-spec-proposals/pull/3874): Filtering threads from the `/messages` endpoint. ([\#14148](https://github.com/matrix-org/synapse/issues/14148))
|
||||
- Improve the validation of the following PUT endpoints: [`/directory/room/{roomAlias}`](https://spec.matrix.org/v1.4/client-server-api/#put_matrixclientv3directoryroomroomalias), [`/directory/list/room/{roomId}`](https://spec.matrix.org/v1.4/client-server-api/#put_matrixclientv3directorylistroomroomid) and [`/directory/list/appservice/{networkId}/{roomId}`](https://spec.matrix.org/v1.4/application-service-api/#put_matrixclientv3directorylistappservicenetworkidroomid). ([\#14179](https://github.com/matrix-org/synapse/issues/14179))
|
||||
- Build and publish binary wheels for `aarch64` platforms. ([\#14212](https://github.com/matrix-org/synapse/issues/14212))
|
||||
|
||||
@@ -21,7 +251,7 @@ Features
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Prevent device names from appearing in device list updates when `allow_device_name_lookup_over_federation` is `false`. ([\#10015](https://github.com/matrix-org/synapse/issues/10015))
|
||||
- Prevent device names from appearing in device list updates in some situations when `allow_device_name_lookup_over_federation` is `false`. (This is not comprehensive: see [\#13114](https://github.com/matrix-org/synapse/issues/13114).) ([\#10015](https://github.com/matrix-org/synapse/issues/10015))
|
||||
- Fix a long-standing bug where redactions were not being sent over federation if we did not have the original event. ([\#13813](https://github.com/matrix-org/synapse/issues/13813))
|
||||
- Fix a long-standing bug where edits of non-`m.room.message` events would not be correctly bundled or have their new content applied. ([\#14034](https://github.com/matrix-org/synapse/issues/14034))
|
||||
- Fix a bug introduced in Synapse 1.53.0 when querying `/publicRooms` with both a `room_type` filter and a `third_party_instance_id`. ([\#14053](https://github.com/matrix-org/synapse/issues/14053))
|
||||
|
||||
Generated
+22
-22
@@ -13,9 +13,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "anyhow"
|
||||
version = "1.0.65"
|
||||
version = "1.0.66"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "98161a4e3e2184da77bb14f02184cdd111e83bbbcc9979dfee3c44b9a85f5602"
|
||||
checksum = "216261ddc8289130e551ddcd5ce8a064710c0d064a4d2895c67151c92b5443f6"
|
||||
|
||||
[[package]]
|
||||
name = "arc-swap"
|
||||
@@ -37,9 +37,9 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a"
|
||||
|
||||
[[package]]
|
||||
name = "blake2"
|
||||
version = "0.10.4"
|
||||
version = "0.10.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b9cf849ee05b2ee5fba5e36f97ff8ec2533916700fc0758d40d92136a42f3388"
|
||||
checksum = "b12e5fd123190ce1c2e559308a94c9bacad77907d4c6005d9e58fe1a0689e55e"
|
||||
dependencies = [
|
||||
"digest",
|
||||
]
|
||||
@@ -194,9 +194,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "pyo3"
|
||||
version = "0.17.2"
|
||||
version = "0.17.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "201b6887e5576bf2f945fe65172c1fcbf3fcf285b23e4d71eb171d9736e38d32"
|
||||
checksum = "268be0c73583c183f2b14052337465768c07726936a260f480f0857cb95ba543"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"cfg-if",
|
||||
@@ -212,9 +212,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "pyo3-build-config"
|
||||
version = "0.17.2"
|
||||
version = "0.17.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "bf0708c9ed01692635cbf056e286008e5a2927ab1a5e48cdd3aeb1ba5a6fef47"
|
||||
checksum = "28fcd1e73f06ec85bf3280c48c67e731d8290ad3d730f8be9dc07946923005c8"
|
||||
dependencies = [
|
||||
"once_cell",
|
||||
"target-lexicon",
|
||||
@@ -222,9 +222,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "pyo3-ffi"
|
||||
version = "0.17.2"
|
||||
version = "0.17.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "90352dea4f486932b72ddf776264d293f85b79a1d214de1d023927b41461132d"
|
||||
checksum = "0f6cb136e222e49115b3c51c32792886defbfb0adead26a688142b346a0b9ffc"
|
||||
dependencies = [
|
||||
"libc",
|
||||
"pyo3-build-config",
|
||||
@@ -243,9 +243,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "pyo3-macros"
|
||||
version = "0.17.2"
|
||||
version = "0.17.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7eb24b804a2d9e88bfcc480a5a6dd76f006c1e3edaf064e8250423336e2cd79d"
|
||||
checksum = "94144a1266e236b1c932682136dc35a9dee8d3589728f68130c7c3861ef96b28"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"pyo3-macros-backend",
|
||||
@@ -255,9 +255,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "pyo3-macros-backend"
|
||||
version = "0.17.2"
|
||||
version = "0.17.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f22bb49f6a7348c253d7ac67a6875f2dc65f36c2ae64a82c381d528972bea6d6"
|
||||
checksum = "c8df9be978a2d2f0cdebabb03206ed73b11314701a5bfe71b0d753b81997777f"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
@@ -294,9 +294,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "regex"
|
||||
version = "1.6.0"
|
||||
version = "1.7.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "4c4eb3267174b8c6c2f654116623910a0fef09c4753f8dd83db29c48a0df988b"
|
||||
checksum = "e076559ef8e241f2ae3479e36f97bd5741c0330689e217ad51ce2c76808b868a"
|
||||
dependencies = [
|
||||
"aho-corasick",
|
||||
"memchr",
|
||||
@@ -323,18 +323,18 @@ checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd"
|
||||
|
||||
[[package]]
|
||||
name = "serde"
|
||||
version = "1.0.145"
|
||||
version = "1.0.147"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "728eb6351430bccb993660dfffc5a72f91ccc1295abaa8ce19b27ebe4f75568b"
|
||||
checksum = "d193d69bae983fc11a79df82342761dfbf28a99fc8d203dca4c3c1b590948965"
|
||||
dependencies = [
|
||||
"serde_derive",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "serde_derive"
|
||||
version = "1.0.145"
|
||||
version = "1.0.147"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "81fa1584d3d1bcacd84c277a0dfe21f5b0f6accf4a23d04d4c6d61f1af522b4c"
|
||||
checksum = "4f1d362ca8fc9c3e3a7484440752472d68a6caa98f1ab81d99b5dfe517cec852"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
@@ -343,9 +343,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "serde_json"
|
||||
version = "1.0.86"
|
||||
version = "1.0.87"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "41feea4228a6f1cd09ec7a3593a682276702cd67b5273544757dae23c096f074"
|
||||
checksum = "6ce777b7b150d76b9cf60d28b55f5847135a003f7d7350c6be7a773508ce7d45"
|
||||
dependencies = [
|
||||
"itoa",
|
||||
"ryu",
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
Fix a long-standing bug where the `update_synapse_database` script could not be run with multiple databases. Contributed by @thefinn93 @ Beeper.
|
||||
@@ -1 +0,0 @@
|
||||
Bump flake8-bugbear from 21.3.2 to 22.9.23.
|
||||
@@ -1 +0,0 @@
|
||||
Fix a long-standing bug where Synapse would accidentally include extra information in the response to [`PUT /_matrix/federation/v2/invite/{roomId}/{eventId}`](https://spec.matrix.org/v1.4/server-server-api/#put_matrixfederationv2inviteroomideventid).
|
||||
@@ -1 +0,0 @@
|
||||
Correct the name of the config option [`encryption_enabled_by_default_for_room_type`](https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#encryption_enabled_by_default_for_room_type).
|
||||
@@ -1 +0,0 @@
|
||||
Bump types-opentracing from 2.4.7 to 2.4.10.
|
||||
@@ -1 +0,0 @@
|
||||
Update docstrings of `SynapseError` and `FederationError` to bettter describe what they are used for and the effects of using them are.
|
||||
@@ -1 +0,0 @@
|
||||
Refactor `/key/` endpoints to use `RestServlet` classes.
|
||||
@@ -1 +0,0 @@
|
||||
Switch to using the `matrix-org/backend-meta` version of `triage-incoming` for new issues in CI.
|
||||
@@ -0,0 +1 @@
|
||||
Remove old stream ID tracking code. Contributed by Nick @Beeper (@fizzadar).
|
||||
@@ -0,0 +1 @@
|
||||
Fix a bug introduced in 1.58.0 where a user with presence state 'org.matrix.msc3026.busy' would mistakenly be set to 'online' when calling `/sync` or `/events` on a worker process.
|
||||
@@ -0,0 +1 @@
|
||||
Remove the `worker_main_http_uri` configuration setting. This is now handled via internal replication.
|
||||
@@ -0,0 +1 @@
|
||||
Faster joins: do not wait for full state when creating events to send.
|
||||
@@ -0,0 +1 @@
|
||||
Remove duplicated type information from type hints.
|
||||
@@ -0,0 +1 @@
|
||||
Fix type logic in TCP replication code that prevented correctly ignoring blank commands.
|
||||
@@ -0,0 +1 @@
|
||||
Enable mypy's [`strict_equality` check](https://mypy.readthedocs.io/en/stable/command_line.html#cmdoption-mypy-strict-equality) by default.
|
||||
@@ -0,0 +1 @@
|
||||
Remove old stream ID tracking code. Contributed by Nick @Beeper (@fizzadar).
|
||||
@@ -0,0 +1 @@
|
||||
Remove the `worker_main_http_uri` configuration setting. This is now handled via internal replication.
|
||||
@@ -0,0 +1 @@
|
||||
Reduce default third party invite rate limit to 216 invites per day.
|
||||
@@ -0,0 +1 @@
|
||||
Fix a bug introduced in Synapse 0.9 where it would fail to fetch server keys whose IDs contain a forward slash.
|
||||
Vendored
+42
@@ -1,3 +1,45 @@
|
||||
matrix-synapse-py3 (1.72.0~rc1) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.72.0rc1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Wed, 16 Nov 2022 15:10:59 +0000
|
||||
|
||||
matrix-synapse-py3 (1.71.0) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.71.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 08 Nov 2022 10:38:10 +0000
|
||||
|
||||
matrix-synapse-py3 (1.71.0~rc2) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.71.0rc2.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Fri, 04 Nov 2022 12:00:33 +0000
|
||||
|
||||
matrix-synapse-py3 (1.71.0~rc1) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.71.0rc1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 01 Nov 2022 12:10:17 +0000
|
||||
|
||||
matrix-synapse-py3 (1.70.1) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.70.1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Fri, 28 Oct 2022 12:10:21 +0100
|
||||
|
||||
matrix-synapse-py3 (1.70.0) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.70.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Wed, 26 Oct 2022 11:11:50 +0100
|
||||
|
||||
matrix-synapse-py3 (1.70.0~rc2) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.70.0rc2.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 25 Oct 2022 10:59:47 +0100
|
||||
|
||||
matrix-synapse-py3 (1.70.0~rc1) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.70.0rc1.
|
||||
|
||||
@@ -45,7 +45,12 @@ esac
|
||||
|
||||
if [[ -n "$SYNAPSE_COMPLEMENT_USE_WORKERS" ]]; then
|
||||
# Specify the workers to test with
|
||||
export SYNAPSE_WORKER_TYPES="\
|
||||
# Allow overriding by explicitly setting SYNAPSE_WORKER_TYPES outside, while still
|
||||
# utilizing WORKERS=1 for backwards compatibility.
|
||||
# -n True if the length of string is non-zero.
|
||||
# -z True if the length of string is zero.
|
||||
if [[ -z "$SYNAPSE_WORKER_TYPES" ]]; then
|
||||
export SYNAPSE_WORKER_TYPES="\
|
||||
event_persister, \
|
||||
event_persister, \
|
||||
background_worker, \
|
||||
@@ -61,6 +66,8 @@ if [[ -n "$SYNAPSE_COMPLEMENT_USE_WORKERS" ]]; then
|
||||
appservice, \
|
||||
pusher"
|
||||
|
||||
fi
|
||||
log "Workers requested: $SYNAPSE_WORKER_TYPES"
|
||||
# Improve startup times by using a launcher based on fork()
|
||||
export SYNAPSE_USE_EXPERIMENTAL_FORKING_LAUNCHER=1
|
||||
else
|
||||
|
||||
@@ -92,8 +92,6 @@ allow_device_name_lookup_over_federation: true
|
||||
## Experimental Features ##
|
||||
|
||||
experimental_features:
|
||||
# Enable spaces support
|
||||
spaces_enabled: true
|
||||
# Enable history backfilling support
|
||||
msc2716_enabled: true
|
||||
# server-side support for partial state in /send_join responses
|
||||
@@ -104,6 +102,8 @@ experimental_features:
|
||||
{% endif %}
|
||||
# Enable jump to date endpoint
|
||||
msc3030_enabled: true
|
||||
# Filtering /messages by relation type.
|
||||
msc3874_enabled: true
|
||||
|
||||
server_notices:
|
||||
system_mxid_localpart: _server
|
||||
|
||||
@@ -20,7 +20,7 @@
|
||||
# * SYNAPSE_SERVER_NAME: The desired server_name of the homeserver.
|
||||
# * SYNAPSE_REPORT_STATS: Whether to report stats.
|
||||
# * SYNAPSE_WORKER_TYPES: A comma separated list of worker names as specified in WORKER_CONFIG
|
||||
# below. Leave empty for no workers, or set to '*' for all possible workers.
|
||||
# below. Leave empty for no workers.
|
||||
# * SYNAPSE_AS_REGISTRATION_DIR: If specified, a directory in which .yaml and .yml files
|
||||
# will be treated as Application Service registration files.
|
||||
# * SYNAPSE_TLS_CERT: Path to a TLS certificate in PEM format.
|
||||
@@ -50,13 +50,18 @@ from jinja2 import Environment, FileSystemLoader
|
||||
|
||||
MAIN_PROCESS_HTTP_LISTENER_PORT = 8080
|
||||
|
||||
|
||||
# Workers with exposed endpoints needs either "client", "federation", or "media" listener_resources
|
||||
# Watching /_matrix/client needs a "client" listener
|
||||
# Watching /_matrix/federation needs a "federation" listener
|
||||
# Watching /_matrix/media and related needs a "media" listener
|
||||
# Stream Writers require "client" and "replication" listeners because they
|
||||
# have to attach by instance_map to the master process and have client endpoints.
|
||||
WORKERS_CONFIG: Dict[str, Dict[str, Any]] = {
|
||||
"pusher": {
|
||||
"app": "synapse.app.pusher",
|
||||
"app": "synapse.app.generic_worker",
|
||||
"listener_resources": [],
|
||||
"endpoint_patterns": [],
|
||||
"shared_extra_conf": {"start_pushers": False},
|
||||
"shared_extra_conf": {},
|
||||
"worker_extra_conf": "",
|
||||
},
|
||||
"user_dir": {
|
||||
@@ -79,7 +84,11 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = {
|
||||
"^/_synapse/admin/v1/media/.*$",
|
||||
"^/_synapse/admin/v1/quarantine_media/.*$",
|
||||
],
|
||||
"shared_extra_conf": {"enable_media_repo": False},
|
||||
# The first configured media worker will run the media background jobs
|
||||
"shared_extra_conf": {
|
||||
"enable_media_repo": False,
|
||||
"media_instance_running_background_jobs": "media_repository1",
|
||||
},
|
||||
"worker_extra_conf": "enable_media_repo: true",
|
||||
},
|
||||
"appservice": {
|
||||
@@ -90,10 +99,10 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = {
|
||||
"worker_extra_conf": "",
|
||||
},
|
||||
"federation_sender": {
|
||||
"app": "synapse.app.federation_sender",
|
||||
"app": "synapse.app.generic_worker",
|
||||
"listener_resources": [],
|
||||
"endpoint_patterns": [],
|
||||
"shared_extra_conf": {"send_federation": False},
|
||||
"shared_extra_conf": {},
|
||||
"worker_extra_conf": "",
|
||||
},
|
||||
"synchrotron": {
|
||||
@@ -200,14 +209,54 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = {
|
||||
"worker_extra_conf": "",
|
||||
},
|
||||
"frontend_proxy": {
|
||||
"app": "synapse.app.frontend_proxy",
|
||||
"app": "synapse.app.generic_worker",
|
||||
"listener_resources": ["client", "replication"],
|
||||
"endpoint_patterns": ["^/_matrix/client/(api/v1|r0|v3|unstable)/keys/upload"],
|
||||
"shared_extra_conf": {},
|
||||
"worker_extra_conf": (
|
||||
"worker_main_http_uri: http://127.0.0.1:%d"
|
||||
% (MAIN_PROCESS_HTTP_LISTENER_PORT,)
|
||||
),
|
||||
"worker_extra_conf": "",
|
||||
},
|
||||
"account_data": {
|
||||
"app": "synapse.app.generic_worker",
|
||||
"listener_resources": ["client", "replication"],
|
||||
"endpoint_patterns": [
|
||||
"^/_matrix/client/(r0|v3|unstable)/.*/tags",
|
||||
"^/_matrix/client/(r0|v3|unstable)/.*/account_data",
|
||||
],
|
||||
"shared_extra_conf": {},
|
||||
"worker_extra_conf": "",
|
||||
},
|
||||
"presence": {
|
||||
"app": "synapse.app.generic_worker",
|
||||
"listener_resources": ["client", "replication"],
|
||||
"endpoint_patterns": ["^/_matrix/client/(api/v1|r0|v3|unstable)/presence/"],
|
||||
"shared_extra_conf": {},
|
||||
"worker_extra_conf": "",
|
||||
},
|
||||
"receipts": {
|
||||
"app": "synapse.app.generic_worker",
|
||||
"listener_resources": ["client", "replication"],
|
||||
"endpoint_patterns": [
|
||||
"^/_matrix/client/(r0|v3|unstable)/rooms/.*/receipt",
|
||||
"^/_matrix/client/(r0|v3|unstable)/rooms/.*/read_markers",
|
||||
],
|
||||
"shared_extra_conf": {},
|
||||
"worker_extra_conf": "",
|
||||
},
|
||||
"to_device": {
|
||||
"app": "synapse.app.generic_worker",
|
||||
"listener_resources": ["client", "replication"],
|
||||
"endpoint_patterns": ["^/_matrix/client/(r0|v3|unstable)/sendToDevice/"],
|
||||
"shared_extra_conf": {},
|
||||
"worker_extra_conf": "",
|
||||
},
|
||||
"typing": {
|
||||
"app": "synapse.app.generic_worker",
|
||||
"listener_resources": ["client", "replication"],
|
||||
"endpoint_patterns": [
|
||||
"^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/typing"
|
||||
],
|
||||
"shared_extra_conf": {},
|
||||
"worker_extra_conf": "",
|
||||
},
|
||||
}
|
||||
|
||||
@@ -271,14 +320,14 @@ def convert(src: str, dst: str, **template_vars: object) -> None:
|
||||
outfile.write(rendered)
|
||||
|
||||
|
||||
def add_sharding_to_shared_config(
|
||||
def add_worker_roles_to_shared_config(
|
||||
shared_config: dict,
|
||||
worker_type: str,
|
||||
worker_name: str,
|
||||
worker_port: int,
|
||||
) -> None:
|
||||
"""Given a dictionary representing a config file shared across all workers,
|
||||
append sharded worker information to it for the current worker_type instance.
|
||||
append appropriate worker information to it for the current worker_type instance.
|
||||
|
||||
Args:
|
||||
shared_config: The config dict that all worker instances share (after being converted to YAML)
|
||||
@@ -309,9 +358,19 @@ def add_sharding_to_shared_config(
|
||||
"port": worker_port,
|
||||
}
|
||||
|
||||
elif worker_type == "media_repository":
|
||||
# The first configured media worker will run the media background jobs
|
||||
shared_config.setdefault("media_instance_running_background_jobs", worker_name)
|
||||
elif worker_type in ["account_data", "presence", "receipts", "to_device", "typing"]:
|
||||
# Update the list of stream writers
|
||||
# It's convenient that the name of the worker type is the same as the stream to write
|
||||
shared_config.setdefault("stream_writers", {}).setdefault(
|
||||
worker_type, []
|
||||
).append(worker_name)
|
||||
|
||||
# Map of stream writer instance names to host/ports combos
|
||||
# For now, all stream writers need http replication ports
|
||||
instance_map[worker_name] = {
|
||||
"host": "localhost",
|
||||
"port": worker_port,
|
||||
}
|
||||
|
||||
|
||||
def generate_base_homeserver_config() -> None:
|
||||
@@ -421,8 +480,7 @@ def generate_worker_files(
|
||||
if worker_config:
|
||||
worker_config = worker_config.copy()
|
||||
else:
|
||||
log(worker_type + " is an unknown worker type! It will be ignored")
|
||||
continue
|
||||
error(worker_type + " is an unknown worker type! Please fix!")
|
||||
|
||||
new_worker_count = worker_type_counter.setdefault(worker_type, 0) + 1
|
||||
worker_type_counter[worker_type] = new_worker_count
|
||||
@@ -441,11 +499,11 @@ def generate_worker_files(
|
||||
|
||||
# Check if more than one instance of this worker type has been specified
|
||||
worker_type_total_count = worker_types.count(worker_type)
|
||||
if worker_type_total_count > 1:
|
||||
# Update the shared config with sharding-related options if necessary
|
||||
add_sharding_to_shared_config(
|
||||
shared_config, worker_type, worker_name, worker_port
|
||||
)
|
||||
|
||||
# Update the shared config with sharding-related options if necessary
|
||||
add_worker_roles_to_shared_config(
|
||||
shared_config, worker_type, worker_name, worker_port
|
||||
)
|
||||
|
||||
# Enable the worker in supervisord
|
||||
worker_descriptors.append(worker_config)
|
||||
|
||||
@@ -9,6 +9,8 @@
|
||||
- [Configuring a Reverse Proxy](reverse_proxy.md)
|
||||
- [Configuring a Forward/Outbound Proxy](setup/forward_proxy.md)
|
||||
- [Configuring a Turn Server](turn-howto.md)
|
||||
- [coturn TURN server](setup/turn/coturn.md)
|
||||
- [eturnal TURN server](setup/turn/eturnal.md)
|
||||
- [Delegation](delegate.md)
|
||||
|
||||
# Upgrading
|
||||
|
||||
@@ -37,6 +37,7 @@ It returns a JSON body like the following:
|
||||
"is_guest": 0,
|
||||
"admin": 0,
|
||||
"deactivated": 0,
|
||||
"erased": false,
|
||||
"shadow_banned": 0,
|
||||
"creation_ts": 1560432506,
|
||||
"appservice_id": null,
|
||||
@@ -167,6 +168,7 @@ A response body like the following is returned:
|
||||
"admin": 0,
|
||||
"user_type": null,
|
||||
"deactivated": 0,
|
||||
"erased": false,
|
||||
"shadow_banned": 0,
|
||||
"displayname": "<User One>",
|
||||
"avatar_url": null,
|
||||
@@ -177,6 +179,7 @@ A response body like the following is returned:
|
||||
"admin": 1,
|
||||
"user_type": null,
|
||||
"deactivated": 0,
|
||||
"erased": false,
|
||||
"shadow_banned": 0,
|
||||
"displayname": "<User Two>",
|
||||
"avatar_url": "<avatar_url>",
|
||||
@@ -247,6 +250,7 @@ The following fields are returned in the JSON response body:
|
||||
- `user_type` - string - Type of the user. Normal users are type `None`.
|
||||
This allows user type specific behaviour. There are also types `support` and `bot`.
|
||||
- `deactivated` - bool - Status if that user has been marked as deactivated.
|
||||
- `erased` - bool - Status if that user has been marked as erased.
|
||||
- `shadow_banned` - bool - Status if that user has been marked as shadow banned.
|
||||
- `displayname` - string - The user's display name if they have set one.
|
||||
- `avatar_url` - string - The user's avatar URL if they have set one.
|
||||
@@ -1193,3 +1197,42 @@ Returns a `404` HTTP status code if no user was found, with a response body like
|
||||
```
|
||||
|
||||
_Added in Synapse 1.68.0._
|
||||
|
||||
|
||||
### Find a user based on their Third Party ID (ThreePID or 3PID)
|
||||
|
||||
The API is:
|
||||
|
||||
```
|
||||
GET /_synapse/admin/v1/threepid/$medium/users/$address
|
||||
```
|
||||
|
||||
When a user matched the given address for the given medium, an HTTP code `200` with a response body like the following is returned:
|
||||
|
||||
```json
|
||||
{
|
||||
"user_id": "@hello:example.org"
|
||||
}
|
||||
```
|
||||
|
||||
**Parameters**
|
||||
|
||||
The following parameters should be set in the URL:
|
||||
|
||||
- `medium` - Kind of third-party ID, either `email` or `msisdn`.
|
||||
- `address` - Value of the third-party ID.
|
||||
|
||||
The `address` may have characters that are not URL-safe, so it is advised to URL-encode those parameters.
|
||||
|
||||
**Errors**
|
||||
|
||||
Returns a `404` HTTP status code if no user was found, with a response body like this:
|
||||
|
||||
```json
|
||||
{
|
||||
"errcode":"M_NOT_FOUND",
|
||||
"error":"User not found"
|
||||
}
|
||||
```
|
||||
|
||||
_Added in Synapse 1.72.0._
|
||||
|
||||
@@ -324,6 +324,12 @@ The above will run a monolithic (single-process) Synapse with SQLite as the data
|
||||
|
||||
- Passing `POSTGRES=1` as an environment variable to use the Postgres database instead.
|
||||
- Passing `WORKERS=1` as an environment variable to use a workerised setup instead. This option implies the use of Postgres.
|
||||
- If setting `WORKERS=1`, optionally set `WORKER_TYPES=` to declare which worker
|
||||
types you wish to test. A simple comma-delimited string containing the worker types
|
||||
defined from the `WORKERS_CONFIG` template in
|
||||
[here](https://github.com/matrix-org/synapse/blob/develop/docker/configure_workers_and_start.py#L54).
|
||||
A safe example would be `WORKER_TYPES="federation_inbound, federation_sender, synchrotron"`.
|
||||
See the [worker documentation](../workers.md) for additional information on workers.
|
||||
|
||||
To increase the log level for the tests, set `SYNAPSE_TEST_LOG_LEVEL`, e.g:
|
||||
```sh
|
||||
|
||||
@@ -209,6 +209,9 @@ altogether in Synapse v1.73.0.**
|
||||
| synapse_http_httppusher_http_pushes_failed_total | synapse_http_httppusher_http_pushes_failed |
|
||||
| synapse_http_httppusher_badge_updates_processed_total | synapse_http_httppusher_badge_updates_processed |
|
||||
| synapse_http_httppusher_badge_updates_failed_total | synapse_http_httppusher_badge_updates_failed |
|
||||
| synapse_admin_mau_current | synapse_admin_mau:current |
|
||||
| synapse_admin_mau_max | synapse_admin_mau:max |
|
||||
| synapse_admin_mau_registered_reserved_users | synapse_admin_mau:registered_reserved_users |
|
||||
|
||||
Removal of deprecated metrics & time based counters becoming histograms in 0.31.0
|
||||
---------------------------------------------------------------------------------
|
||||
|
||||
@@ -49,6 +49,13 @@ setting in your configuration file.
|
||||
See the [configuration manual](usage/configuration/config_documentation.md#oidc_providers) for some sample settings, as well as
|
||||
the text below for example configurations for specific providers.
|
||||
|
||||
## OIDC Back-Channel Logout
|
||||
|
||||
Synapse supports receiving [OpenID Connect Back-Channel Logout](https://openid.net/specs/openid-connect-backchannel-1_0.html) notifications.
|
||||
|
||||
This lets the OpenID Connect Provider notify Synapse when a user logs out, so that Synapse can end that user session.
|
||||
This feature can be enabled by setting the `backchannel_logout_enabled` property to `true` in the provider configuration, and setting the following URL as destination for Back-Channel Logout notifications in your OpenID Connect Provider: `[synapse public baseurl]/_synapse/client/oidc/backchannel_logout`
|
||||
|
||||
## Sample configs
|
||||
|
||||
Here are a few configs for providers that should work with Synapse.
|
||||
@@ -123,6 +130,9 @@ oidc_providers:
|
||||
|
||||
[Keycloak][keycloak-idp] is an opensource IdP maintained by Red Hat.
|
||||
|
||||
Keycloak supports OIDC Back-Channel Logout, which sends logout notification to Synapse, so that Synapse users get logged out when they log out from Keycloak.
|
||||
This can be optionally enabled by setting `backchannel_logout_enabled` to `true` in the Synapse configuration, and by setting the "Backchannel Logout URL" in Keycloak.
|
||||
|
||||
Follow the [Getting Started Guide](https://www.keycloak.org/getting-started) to install Keycloak and set up a realm.
|
||||
|
||||
1. Click `Clients` in the sidebar and click `Create`
|
||||
@@ -144,6 +154,8 @@ Follow the [Getting Started Guide](https://www.keycloak.org/getting-started) to
|
||||
| Client Protocol | `openid-connect` |
|
||||
| Access Type | `confidential` |
|
||||
| Valid Redirect URIs | `[synapse public baseurl]/_synapse/client/oidc/callback` |
|
||||
| Backchannel Logout URL (optional) | `[synapse public baseurl]/_synapse/client/oidc/backchannel_logout` |
|
||||
| Backchannel Logout Session Required (optional) | `On` |
|
||||
|
||||
5. Click `Save`
|
||||
6. On the Credentials tab, update the fields:
|
||||
@@ -167,7 +179,9 @@ oidc_providers:
|
||||
config:
|
||||
localpart_template: "{{ user.preferred_username }}"
|
||||
display_name_template: "{{ user.name }}"
|
||||
backchannel_logout_enabled: true # Optional
|
||||
```
|
||||
|
||||
### Auth0
|
||||
|
||||
[Auth0][auth0] is a hosted SaaS IdP solution.
|
||||
|
||||
@@ -79,6 +79,9 @@ server {
|
||||
# Nginx by default only allows file uploads up to 1M in size
|
||||
# Increase client_max_body_size to match max_upload_size defined in homeserver.yaml
|
||||
client_max_body_size 50M;
|
||||
|
||||
# Synapse responses may be chunked, which is an HTTP/1.1 feature.
|
||||
proxy_http_version 1.1;
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
@@ -6,7 +6,7 @@
|
||||
# Synapse also supports structured logging for machine readable logs which can
|
||||
# be ingested by ELK stacks. See [2] for details.
|
||||
#
|
||||
# [1]: https://docs.python.org/3.7/library/logging.config.html#configuration-dictionary-schema
|
||||
# [1]: https://docs.python.org/3/library/logging.config.html#configuration-dictionary-schema
|
||||
# [2]: https://matrix-org.github.io/synapse/latest/structured_logging.html
|
||||
|
||||
version: 1
|
||||
|
||||
@@ -0,0 +1,188 @@
|
||||
# coturn TURN server
|
||||
|
||||
The following sections describe how to install [coturn](<https://github.com/coturn/coturn>) (which implements the TURN REST API).
|
||||
|
||||
## `coturn` setup
|
||||
|
||||
### Initial installation
|
||||
|
||||
The TURN daemon `coturn` is available from a variety of sources such as native package managers, or installation from source.
|
||||
|
||||
#### Debian and Ubuntu based distributions
|
||||
|
||||
Just install the debian package:
|
||||
|
||||
```sh
|
||||
sudo apt install coturn
|
||||
```
|
||||
|
||||
This will install and start a systemd service called `coturn`.
|
||||
|
||||
#### Source installation
|
||||
|
||||
1. Download the [latest release](https://github.com/coturn/coturn/releases/latest) from github. Unpack it and `cd` into the directory.
|
||||
|
||||
1. Configure it:
|
||||
|
||||
```sh
|
||||
./configure
|
||||
```
|
||||
|
||||
You may need to install `libevent2`: if so, you should do so in
|
||||
the way recommended by your operating system. You can ignore
|
||||
warnings about lack of database support: a database is unnecessary
|
||||
for this purpose.
|
||||
|
||||
1. Build and install it:
|
||||
|
||||
```sh
|
||||
make
|
||||
sudo make install
|
||||
```
|
||||
|
||||
### Configuration
|
||||
|
||||
1. Create or edit the config file in `/etc/turnserver.conf`. The relevant
|
||||
lines, with example values, are:
|
||||
|
||||
```
|
||||
use-auth-secret
|
||||
static-auth-secret=[your secret key here]
|
||||
realm=turn.myserver.org
|
||||
```
|
||||
|
||||
See `turnserver.conf` for explanations of the options. One way to generate
|
||||
the `static-auth-secret` is with `pwgen`:
|
||||
|
||||
```sh
|
||||
pwgen -s 64 1
|
||||
```
|
||||
|
||||
A `realm` must be specified, but its value is somewhat arbitrary. (It is
|
||||
sent to clients as part of the authentication flow.) It is conventional to
|
||||
set it to be your server name.
|
||||
|
||||
1. You will most likely want to configure `coturn` to write logs somewhere. The
|
||||
easiest way is normally to send them to the syslog:
|
||||
|
||||
```sh
|
||||
syslog
|
||||
```
|
||||
|
||||
(in which case, the logs will be available via `journalctl -u coturn` on a
|
||||
systemd system). Alternatively, `coturn` can be configured to write to a
|
||||
logfile - check the example config file supplied with `coturn`.
|
||||
|
||||
1. Consider your security settings. TURN lets users request a relay which will
|
||||
connect to arbitrary IP addresses and ports. The following configuration is
|
||||
suggested as a minimum starting point:
|
||||
|
||||
```
|
||||
# VoIP traffic is all UDP. There is no reason to let users connect to arbitrary TCP endpoints via the relay.
|
||||
no-tcp-relay
|
||||
|
||||
# don't let the relay ever try to connect to private IP address ranges within your network (if any)
|
||||
# given the turn server is likely behind your firewall, remember to include any privileged public IPs too.
|
||||
denied-peer-ip=10.0.0.0-10.255.255.255
|
||||
denied-peer-ip=192.168.0.0-192.168.255.255
|
||||
denied-peer-ip=172.16.0.0-172.31.255.255
|
||||
|
||||
# recommended additional local peers to block, to mitigate external access to internal services.
|
||||
# https://www.rtcsec.com/article/slack-webrtc-turn-compromise-and-bug-bounty/#how-to-fix-an-open-turn-relay-to-address-this-vulnerability
|
||||
no-multicast-peers
|
||||
denied-peer-ip=0.0.0.0-0.255.255.255
|
||||
denied-peer-ip=100.64.0.0-100.127.255.255
|
||||
denied-peer-ip=127.0.0.0-127.255.255.255
|
||||
denied-peer-ip=169.254.0.0-169.254.255.255
|
||||
denied-peer-ip=192.0.0.0-192.0.0.255
|
||||
denied-peer-ip=192.0.2.0-192.0.2.255
|
||||
denied-peer-ip=192.88.99.0-192.88.99.255
|
||||
denied-peer-ip=198.18.0.0-198.19.255.255
|
||||
denied-peer-ip=198.51.100.0-198.51.100.255
|
||||
denied-peer-ip=203.0.113.0-203.0.113.255
|
||||
denied-peer-ip=240.0.0.0-255.255.255.255
|
||||
|
||||
# special case the turn server itself so that client->TURN->TURN->client flows work
|
||||
# this should be one of the turn server's listening IPs
|
||||
allowed-peer-ip=10.0.0.1
|
||||
|
||||
# consider whether you want to limit the quota of relayed streams per user (or total) to avoid risk of DoS.
|
||||
user-quota=12 # 4 streams per video call, so 12 streams = 3 simultaneous relayed calls per user.
|
||||
total-quota=1200
|
||||
```
|
||||
|
||||
1. Also consider supporting TLS/DTLS. To do this, add the following settings
|
||||
to `turnserver.conf`:
|
||||
|
||||
```
|
||||
# TLS certificates, including intermediate certs.
|
||||
# For Let's Encrypt certificates, use `fullchain.pem` here.
|
||||
cert=/path/to/fullchain.pem
|
||||
|
||||
# TLS private key file
|
||||
pkey=/path/to/privkey.pem
|
||||
|
||||
# Ensure the configuration lines that disable TLS/DTLS are commented-out or removed
|
||||
#no-tls
|
||||
#no-dtls
|
||||
```
|
||||
|
||||
In this case, replace the `turn:` schemes in the `turn_uris` settings below
|
||||
with `turns:`.
|
||||
|
||||
We recommend that you only try to set up TLS/DTLS once you have set up a
|
||||
basic installation and got it working.
|
||||
|
||||
NB: If your TLS certificate was provided by Let's Encrypt, TLS/DTLS will
|
||||
not work with any Matrix client that uses Chromium's WebRTC library. This
|
||||
currently includes Element Android & iOS; for more details, see their
|
||||
[respective](https://github.com/vector-im/element-android/issues/1533)
|
||||
[issues](https://github.com/vector-im/element-ios/issues/2712) as well as the underlying
|
||||
[WebRTC issue](https://bugs.chromium.org/p/webrtc/issues/detail?id=11710).
|
||||
Consider using a ZeroSSL certificate for your TURN server as a working alternative.
|
||||
|
||||
1. Ensure your firewall allows traffic into the TURN server on the ports
|
||||
you've configured it to listen on (By default: 3478 and 5349 for TURN
|
||||
traffic (remember to allow both TCP and UDP traffic), and ports 49152-65535
|
||||
for the UDP relay.)
|
||||
|
||||
1. If your TURN server is behind NAT, the NAT gateway must have an external,
|
||||
publicly-reachable IP address. You must configure `coturn` to advertise that
|
||||
address to connecting clients:
|
||||
|
||||
```
|
||||
external-ip=EXTERNAL_NAT_IPv4_ADDRESS
|
||||
```
|
||||
|
||||
You may optionally limit the TURN server to listen only on the local
|
||||
address that is mapped by NAT to the external address:
|
||||
|
||||
```
|
||||
listening-ip=INTERNAL_TURNSERVER_IPv4_ADDRESS
|
||||
```
|
||||
|
||||
If your NAT gateway is reachable over both IPv4 and IPv6, you may
|
||||
configure `coturn` to advertise each available address:
|
||||
|
||||
```
|
||||
external-ip=EXTERNAL_NAT_IPv4_ADDRESS
|
||||
external-ip=EXTERNAL_NAT_IPv6_ADDRESS
|
||||
```
|
||||
|
||||
When advertising an external IPv6 address, ensure that the firewall and
|
||||
network settings of the system running your TURN server are configured to
|
||||
accept IPv6 traffic, and that the TURN server is listening on the local
|
||||
IPv6 address that is mapped by NAT to the external IPv6 address.
|
||||
|
||||
1. (Re)start the turn server:
|
||||
|
||||
* If you used the Debian package (or have set up a systemd unit yourself):
|
||||
```sh
|
||||
sudo systemctl restart coturn
|
||||
```
|
||||
|
||||
* If you built from source:
|
||||
|
||||
```sh
|
||||
/usr/local/bin/turnserver -o
|
||||
```
|
||||
@@ -0,0 +1,170 @@
|
||||
# eturnal TURN server
|
||||
|
||||
The following sections describe how to install [eturnal](<https://github.com/processone/eturnal>)
|
||||
(which implements the TURN REST API).
|
||||
|
||||
## `eturnal` setup
|
||||
|
||||
### Initial installation
|
||||
|
||||
The `eturnal` TURN server implementation is available from a variety of sources
|
||||
such as native package managers, binary packages, installation from source or
|
||||
[container image](https://eturnal.net/documentation/code/docker.html). They are
|
||||
all described [here](https://github.com/processone/eturnal#installation).
|
||||
|
||||
Quick-Test instructions in a [Linux Shell](https://github.com/processone/eturnal/blob/master/QUICK-TEST.md)
|
||||
or with [Docker](https://github.com/processone/eturnal/blob/master/docker-k8s/QUICK-TEST.md)
|
||||
are available as well.
|
||||
|
||||
### Configuration
|
||||
|
||||
After installation, `eturnal` usually ships a [default configuration file](https://github.com/processone/eturnal/blob/master/config/eturnal.yml)
|
||||
here: `/etc/eturnal.yml` (and, if not found there, there is a backup file here:
|
||||
`/opt/eturnal/etc/eturnal.yml`). It uses the (indentation-sensitive!) [YAML](https://en.wikipedia.org/wiki/YAML)
|
||||
format. The file contains further explanations.
|
||||
|
||||
Here are some hints how to configure eturnal on your [host machine](https://github.com/processone/eturnal#configuration)
|
||||
or when using e.g. [Docker](https://eturnal.net/documentation/code/docker.html).
|
||||
You may also further deep dive into the [reference documentation](https://eturnal.net/documentation/).
|
||||
|
||||
`eturnal` runs out of the box with the default configuration. To enable TURN and
|
||||
to integrate it with your homeserver, some aspects in `eturnal`'s default configuration file
|
||||
must be edited:
|
||||
|
||||
1. Homeserver's [`turn_shared_secret`](../../usage/configuration/config_documentation.md#turn_shared_secret)
|
||||
and eturnal's shared `secret` for authentication
|
||||
|
||||
Both need to have the same value. Uncomment and adjust this line in `eturnal`'s
|
||||
configuration file:
|
||||
|
||||
```yaml
|
||||
secret: "long-and-cryptic" # Shared secret, CHANGE THIS.
|
||||
```
|
||||
|
||||
One way to generate a `secret` is with `pwgen`:
|
||||
|
||||
```sh
|
||||
pwgen -s 64 1
|
||||
```
|
||||
|
||||
1. Public IP address
|
||||
|
||||
If your TURN server is behind NAT, the NAT gateway must have an external,
|
||||
publicly-reachable IP address. `eturnal` tries to autodetect the public IP address,
|
||||
however, it may also be configured by uncommenting and adjusting this line, so
|
||||
`eturnal` advertises that address to connecting clients:
|
||||
|
||||
```yaml
|
||||
relay_ipv4_addr: "203.0.113.4" # The server's public IPv4 address.
|
||||
```
|
||||
|
||||
If your NAT gateway is reachable over both IPv4 and IPv6, you may
|
||||
configure `eturnal` to advertise each available address:
|
||||
|
||||
```yaml
|
||||
relay_ipv4_addr: "203.0.113.4" # The server's public IPv4 address.
|
||||
relay_ipv6_addr: "2001:db8::4" # The server's public IPv6 address (optional).
|
||||
```
|
||||
|
||||
When advertising an external IPv6 address, ensure that the firewall and
|
||||
network settings of the system running your TURN server are configured to
|
||||
accept IPv6 traffic, and that the TURN server is listening on the local
|
||||
IPv6 address that is mapped by NAT to the external IPv6 address.
|
||||
|
||||
1. Logging
|
||||
|
||||
If `eturnal` was started by systemd, log files are written into the
|
||||
`/var/log/eturnal` directory by default. In order to log to the [journal](https://www.freedesktop.org/software/systemd/man/systemd-journald.service.html)
|
||||
instead, the `log_dir` option can be set to `stdout` in the configuration file.
|
||||
|
||||
1. Security considerations
|
||||
|
||||
Consider your security settings. TURN lets users request a relay which will
|
||||
connect to arbitrary IP addresses and ports. The following configuration is
|
||||
suggested as a minimum starting point, [see also the official documentation](https://eturnal.net/documentation/#blacklist):
|
||||
|
||||
```yaml
|
||||
## Reject TURN relaying from/to the following addresses/networks:
|
||||
blacklist: # This is the default blacklist.
|
||||
- "127.0.0.0/8" # IPv4 loopback.
|
||||
- "::1" # IPv6 loopback.
|
||||
- recommended # Expands to a number of networks recommended to be
|
||||
# blocked, but includes private networks. Those
|
||||
# would have to be 'whitelist'ed if eturnal serves
|
||||
# local clients/peers within such networks.
|
||||
```
|
||||
|
||||
To whitelist IP addresses or specific (private) networks, you need to **add** a
|
||||
whitelist part into the configuration file, e.g.:
|
||||
|
||||
```yaml
|
||||
whitelist:
|
||||
- "192.168.0.0/16"
|
||||
- "203.0.113.113"
|
||||
- "2001:db8::/64"
|
||||
```
|
||||
|
||||
The more specific, the better.
|
||||
|
||||
1. TURNS (TURN via TLS/DTLS)
|
||||
|
||||
Also consider supporting TLS/DTLS. To do this, adjust the following settings
|
||||
in the `eturnal.yml` configuration file (TLS parts should not be commented anymore):
|
||||
|
||||
```yaml
|
||||
listen:
|
||||
- ip: "::"
|
||||
port: 3478
|
||||
transport: udp
|
||||
- ip: "::"
|
||||
port: 3478
|
||||
transport: tcp
|
||||
- ip: "::"
|
||||
port: 5349
|
||||
transport: tls
|
||||
|
||||
## TLS certificate/key files (must be readable by 'eturnal' user!):
|
||||
tls_crt_file: /etc/eturnal/tls/crt.pem
|
||||
tls_key_file: /etc/eturnal/tls/key.pem
|
||||
```
|
||||
|
||||
In this case, replace the `turn:` schemes in homeserver's `turn_uris` settings
|
||||
with `turns:`. More is described [here](../../usage/configuration/config_documentation.md#turn_uris).
|
||||
|
||||
We recommend that you only try to set up TLS/DTLS once you have set up a
|
||||
basic installation and got it working.
|
||||
|
||||
NB: If your TLS certificate was provided by Let's Encrypt, TLS/DTLS will
|
||||
not work with any Matrix client that uses Chromium's WebRTC library. This
|
||||
currently includes Element Android & iOS; for more details, see their
|
||||
[respective](https://github.com/vector-im/element-android/issues/1533)
|
||||
[issues](https://github.com/vector-im/element-ios/issues/2712) as well as the underlying
|
||||
[WebRTC issue](https://bugs.chromium.org/p/webrtc/issues/detail?id=11710).
|
||||
Consider using a ZeroSSL certificate for your TURN server as a working alternative.
|
||||
|
||||
1. Firewall
|
||||
|
||||
Ensure your firewall allows traffic into the TURN server on the ports
|
||||
you've configured it to listen on (By default: 3478 and 5349 for TURN
|
||||
traffic (remember to allow both TCP and UDP traffic), and ports 49152-65535
|
||||
for the UDP relay.)
|
||||
|
||||
1. Reload/ restarting `eturnal`
|
||||
|
||||
Changes in the configuration file require `eturnal` to reload/ restart, this
|
||||
can be achieved by:
|
||||
|
||||
```sh
|
||||
eturnalctl reload
|
||||
```
|
||||
|
||||
`eturnal` performs a configuration check before actually reloading/ restarting
|
||||
and provides hints, if something is not correctly configured.
|
||||
|
||||
### eturnalctl opterations script
|
||||
|
||||
`eturnal` offers a handy [operations script](https://eturnal.net/documentation/#Operation)
|
||||
which can be called e.g. to check, whether the service is up, to restart the service,
|
||||
to query how many active sessions exist, to change logging behaviour and so on.
|
||||
|
||||
Hint: If `eturnalctl` is not part of your `$PATH`, consider either sym-linking it (e.g. ´ln -s /opt/eturnal/bin/eturnalctl /usr/local/bin/eturnalctl´) or call it from the default `eturnal` directory directly: e.g. `/opt/eturnal/bin/eturnalctl info`
|
||||
+29
-211
@@ -9,222 +9,28 @@ allows the homeserver to generate credentials that are valid for use on the
|
||||
TURN server through the use of a secret shared between the homeserver and the
|
||||
TURN server.
|
||||
|
||||
The following sections describe how to install [coturn](<https://github.com/coturn/coturn>) (which implements the TURN REST API) and integrate it with synapse.
|
||||
This documentation provides two TURN server configuration examples:
|
||||
|
||||
* [coturn](setup/turn/coturn.md)
|
||||
* [eturnal](setup/turn/eturnal.md)
|
||||
|
||||
## Requirements
|
||||
|
||||
For TURN relaying with `coturn` to work, it must be hosted on a server/endpoint with a public IP.
|
||||
For TURN relaying to work, the TURN service must be hosted on a server/endpoint with a public IP.
|
||||
|
||||
Hosting TURN behind NAT requires port forwaring and for the NAT gateway to have a public IP.
|
||||
However, even with appropriate configuration, NAT is known to cause issues and to often not work.
|
||||
|
||||
## `coturn` setup
|
||||
|
||||
### Initial installation
|
||||
|
||||
The TURN daemon `coturn` is available from a variety of sources such as native package managers, or installation from source.
|
||||
|
||||
#### Debian installation
|
||||
|
||||
Just install the debian package:
|
||||
|
||||
```sh
|
||||
apt install coturn
|
||||
```
|
||||
|
||||
This will install and start a systemd service called `coturn`.
|
||||
|
||||
#### Source installation
|
||||
|
||||
1. Download the [latest release](https://github.com/coturn/coturn/releases/latest) from github. Unpack it and `cd` into the directory.
|
||||
|
||||
1. Configure it:
|
||||
|
||||
```sh
|
||||
./configure
|
||||
```
|
||||
|
||||
You may need to install `libevent2`: if so, you should do so in
|
||||
the way recommended by your operating system. You can ignore
|
||||
warnings about lack of database support: a database is unnecessary
|
||||
for this purpose.
|
||||
|
||||
1. Build and install it:
|
||||
|
||||
```sh
|
||||
make
|
||||
make install
|
||||
```
|
||||
|
||||
### Configuration
|
||||
|
||||
1. Create or edit the config file in `/etc/turnserver.conf`. The relevant
|
||||
lines, with example values, are:
|
||||
|
||||
```
|
||||
use-auth-secret
|
||||
static-auth-secret=[your secret key here]
|
||||
realm=turn.myserver.org
|
||||
```
|
||||
|
||||
See `turnserver.conf` for explanations of the options. One way to generate
|
||||
the `static-auth-secret` is with `pwgen`:
|
||||
|
||||
```sh
|
||||
pwgen -s 64 1
|
||||
```
|
||||
|
||||
A `realm` must be specified, but its value is somewhat arbitrary. (It is
|
||||
sent to clients as part of the authentication flow.) It is conventional to
|
||||
set it to be your server name.
|
||||
|
||||
1. You will most likely want to configure coturn to write logs somewhere. The
|
||||
easiest way is normally to send them to the syslog:
|
||||
|
||||
```sh
|
||||
syslog
|
||||
```
|
||||
|
||||
(in which case, the logs will be available via `journalctl -u coturn` on a
|
||||
systemd system). Alternatively, coturn can be configured to write to a
|
||||
logfile - check the example config file supplied with coturn.
|
||||
|
||||
1. Consider your security settings. TURN lets users request a relay which will
|
||||
connect to arbitrary IP addresses and ports. The following configuration is
|
||||
suggested as a minimum starting point:
|
||||
|
||||
```
|
||||
# VoIP traffic is all UDP. There is no reason to let users connect to arbitrary TCP endpoints via the relay.
|
||||
no-tcp-relay
|
||||
|
||||
# don't let the relay ever try to connect to private IP address ranges within your network (if any)
|
||||
# given the turn server is likely behind your firewall, remember to include any privileged public IPs too.
|
||||
denied-peer-ip=10.0.0.0-10.255.255.255
|
||||
denied-peer-ip=192.168.0.0-192.168.255.255
|
||||
denied-peer-ip=172.16.0.0-172.31.255.255
|
||||
|
||||
# recommended additional local peers to block, to mitigate external access to internal services.
|
||||
# https://www.rtcsec.com/article/slack-webrtc-turn-compromise-and-bug-bounty/#how-to-fix-an-open-turn-relay-to-address-this-vulnerability
|
||||
no-multicast-peers
|
||||
denied-peer-ip=0.0.0.0-0.255.255.255
|
||||
denied-peer-ip=100.64.0.0-100.127.255.255
|
||||
denied-peer-ip=127.0.0.0-127.255.255.255
|
||||
denied-peer-ip=169.254.0.0-169.254.255.255
|
||||
denied-peer-ip=192.0.0.0-192.0.0.255
|
||||
denied-peer-ip=192.0.2.0-192.0.2.255
|
||||
denied-peer-ip=192.88.99.0-192.88.99.255
|
||||
denied-peer-ip=198.18.0.0-198.19.255.255
|
||||
denied-peer-ip=198.51.100.0-198.51.100.255
|
||||
denied-peer-ip=203.0.113.0-203.0.113.255
|
||||
denied-peer-ip=240.0.0.0-255.255.255.255
|
||||
|
||||
# special case the turn server itself so that client->TURN->TURN->client flows work
|
||||
# this should be one of the turn server's listening IPs
|
||||
allowed-peer-ip=10.0.0.1
|
||||
|
||||
# consider whether you want to limit the quota of relayed streams per user (or total) to avoid risk of DoS.
|
||||
user-quota=12 # 4 streams per video call, so 12 streams = 3 simultaneous relayed calls per user.
|
||||
total-quota=1200
|
||||
```
|
||||
|
||||
1. Also consider supporting TLS/DTLS. To do this, add the following settings
|
||||
to `turnserver.conf`:
|
||||
|
||||
```
|
||||
# TLS certificates, including intermediate certs.
|
||||
# For Let's Encrypt certificates, use `fullchain.pem` here.
|
||||
cert=/path/to/fullchain.pem
|
||||
|
||||
# TLS private key file
|
||||
pkey=/path/to/privkey.pem
|
||||
|
||||
# Ensure the configuration lines that disable TLS/DTLS are commented-out or removed
|
||||
#no-tls
|
||||
#no-dtls
|
||||
```
|
||||
|
||||
In this case, replace the `turn:` schemes in the `turn_uris` settings below
|
||||
with `turns:`.
|
||||
|
||||
We recommend that you only try to set up TLS/DTLS once you have set up a
|
||||
basic installation and got it working.
|
||||
|
||||
NB: If your TLS certificate was provided by Let's Encrypt, TLS/DTLS will
|
||||
not work with any Matrix client that uses Chromium's WebRTC library. This
|
||||
currently includes Element Android & iOS; for more details, see their
|
||||
[respective](https://github.com/vector-im/element-android/issues/1533)
|
||||
[issues](https://github.com/vector-im/element-ios/issues/2712) as well as the underlying
|
||||
[WebRTC issue](https://bugs.chromium.org/p/webrtc/issues/detail?id=11710).
|
||||
Consider using a ZeroSSL certificate for your TURN server as a working alternative.
|
||||
|
||||
1. Ensure your firewall allows traffic into the TURN server on the ports
|
||||
you've configured it to listen on (By default: 3478 and 5349 for TURN
|
||||
traffic (remember to allow both TCP and UDP traffic), and ports 49152-65535
|
||||
for the UDP relay.)
|
||||
|
||||
1. If your TURN server is behind NAT, the NAT gateway must have an external,
|
||||
publicly-reachable IP address. You must configure coturn to advertise that
|
||||
address to connecting clients:
|
||||
|
||||
```
|
||||
external-ip=EXTERNAL_NAT_IPv4_ADDRESS
|
||||
```
|
||||
|
||||
You may optionally limit the TURN server to listen only on the local
|
||||
address that is mapped by NAT to the external address:
|
||||
|
||||
```
|
||||
listening-ip=INTERNAL_TURNSERVER_IPv4_ADDRESS
|
||||
```
|
||||
|
||||
If your NAT gateway is reachable over both IPv4 and IPv6, you may
|
||||
configure coturn to advertise each available address:
|
||||
|
||||
```
|
||||
external-ip=EXTERNAL_NAT_IPv4_ADDRESS
|
||||
external-ip=EXTERNAL_NAT_IPv6_ADDRESS
|
||||
```
|
||||
|
||||
When advertising an external IPv6 address, ensure that the firewall and
|
||||
network settings of the system running your TURN server are configured to
|
||||
accept IPv6 traffic, and that the TURN server is listening on the local
|
||||
IPv6 address that is mapped by NAT to the external IPv6 address.
|
||||
|
||||
1. (Re)start the turn server:
|
||||
|
||||
* If you used the Debian package (or have set up a systemd unit yourself):
|
||||
```sh
|
||||
systemctl restart coturn
|
||||
```
|
||||
|
||||
* If you installed from source:
|
||||
|
||||
```sh
|
||||
bin/turnserver -o
|
||||
```
|
||||
Afterwards, the homeserver needs some further configuration.
|
||||
|
||||
## Synapse setup
|
||||
|
||||
Your homeserver configuration file needs the following extra keys:
|
||||
|
||||
1. "`turn_uris`": This needs to be a yaml list of public-facing URIs
|
||||
for your TURN server to be given out to your clients. Add separate
|
||||
entries for each transport your TURN server supports.
|
||||
2. "`turn_shared_secret`": This is the secret shared between your
|
||||
homeserver and your TURN server, so you should set it to the same
|
||||
string you used in turnserver.conf.
|
||||
3. "`turn_user_lifetime`": This is the amount of time credentials
|
||||
generated by your homeserver are valid for (in milliseconds).
|
||||
Shorter times offer less potential for abuse at the expense of
|
||||
increased traffic between web clients and your homeserver to
|
||||
refresh credentials. The TURN REST API specification recommends
|
||||
one day (86400000).
|
||||
4. "`turn_allow_guests`": Whether to allow guest users to use the
|
||||
TURN server. This is enabled by default, as otherwise VoIP will
|
||||
not work reliably for guests. However, it does introduce a
|
||||
security risk as it lets guests connect to arbitrary endpoints
|
||||
without having gone through a CAPTCHA or similar to register a
|
||||
real account.
|
||||
1. [`turn_uris`](usage/configuration/config_documentation.md#turn_uris)
|
||||
2. [`turn_shared_secret`](usage/configuration/config_documentation.md#turn_shared_secret)
|
||||
3. [`turn_user_lifetime`](usage/configuration/config_documentation.md#turn_user_lifetime)
|
||||
4. [`turn_allow_guests`](usage/configuration/config_documentation.md#turn_allow_guests)
|
||||
|
||||
As an example, here is the relevant section of the config file for `matrix.org`. The
|
||||
`turn_uris` are appropriate for TURN servers listening on the default ports, with no TLS.
|
||||
@@ -263,7 +69,7 @@ Here are a few things to try:
|
||||
* Check that you have opened your firewall to allow UDP traffic to the UDP
|
||||
relay ports (49152-65535 by default).
|
||||
|
||||
* Try disabling `coturn`'s TLS/DTLS listeners and enable only its (unencrypted)
|
||||
* Try disabling TLS/DTLS listeners and enable only its (unencrypted)
|
||||
TCP/UDP listeners. (This will only leave signaling traffic unencrypted;
|
||||
voice & video WebRTC traffic is always encrypted.)
|
||||
|
||||
@@ -288,12 +94,19 @@ Here are a few things to try:
|
||||
|
||||
* ensure that your TURN server uses the NAT gateway as its default route.
|
||||
|
||||
* Enable more verbose logging in coturn via the `verbose` setting:
|
||||
* Enable more verbose logging, in `coturn` via the `verbose` setting:
|
||||
|
||||
```
|
||||
verbose
|
||||
```
|
||||
|
||||
or with `eturnal` with the shell command `eturnalctl loglevel debug` or in the configuration file (the service needs to [reload](https://eturnal.net/documentation/#Operation) for it to become effective):
|
||||
|
||||
```yaml
|
||||
## Logging configuration:
|
||||
log_level: debug
|
||||
```
|
||||
|
||||
... and then see if there are any clues in its logs.
|
||||
|
||||
* If you are using a browser-based client under Chrome, check
|
||||
@@ -317,7 +130,7 @@ Here are a few things to try:
|
||||
matrix client to your homeserver in your browser's network inspector. In
|
||||
the response you should see `username` and `password`. Or:
|
||||
|
||||
* Use the following shell commands:
|
||||
* Use the following shell commands for `coturn`:
|
||||
|
||||
```sh
|
||||
secret=staticAuthSecretHere
|
||||
@@ -327,11 +140,16 @@ Here are a few things to try:
|
||||
echo -e "username: $u\npassword: $p"
|
||||
```
|
||||
|
||||
Or:
|
||||
or for `eturnal`
|
||||
|
||||
* Temporarily configure coturn to accept a static username/password. To do
|
||||
this, comment out `use-auth-secret` and `static-auth-secret` and add the
|
||||
following:
|
||||
```sh
|
||||
eturnalctl credentials
|
||||
```
|
||||
|
||||
|
||||
* Or (**coturn only**): Temporarily configure `coturn` to accept a static
|
||||
username/password. To do this, comment out `use-auth-secret` and
|
||||
`static-auth-secret` and add the following:
|
||||
|
||||
```
|
||||
lt-cred-mech
|
||||
|
||||
@@ -88,6 +88,60 @@ process, for example:
|
||||
dpkg -i matrix-synapse-py3_1.3.0+stretch1_amd64.deb
|
||||
```
|
||||
|
||||
# Upgrading to v1.72.0
|
||||
|
||||
## Dropping support for PostgreSQL 10
|
||||
|
||||
In line with our [deprecation policy](deprecation_policy.md), we've dropped
|
||||
support for PostgreSQL 10, as it is no longer supported upstream.
|
||||
|
||||
This release of Synapse requires PostgreSQL 11+.
|
||||
|
||||
|
||||
# Upgrading to v1.71.0
|
||||
|
||||
## Removal of the `generate_short_term_login_token` module API method
|
||||
|
||||
As announced with the release of [Synapse 1.69.0](#deprecation-of-the-generate_short_term_login_token-module-api-method), the deprecated `generate_short_term_login_token` module method has been removed.
|
||||
|
||||
Modules relying on it can instead use the `create_login_token` method.
|
||||
|
||||
|
||||
## Changes to the events received by application services (interest)
|
||||
|
||||
To align with spec (changed in
|
||||
[MSC3905](https://github.com/matrix-org/matrix-spec-proposals/pull/3905)), Synapse now
|
||||
only considers local users to be interesting. In other words, the `users` namespace
|
||||
regex is only be applied against local users of the homeserver.
|
||||
|
||||
Please note, this probably doesn't affect the expected behavior of your application
|
||||
service, since an interesting local user in a room still means all messages in the room
|
||||
(from local or remote users) will still be considered interesting. And matching a room
|
||||
with the `rooms` or `aliases` namespace regex will still consider all events sent in the
|
||||
room to be interesting to the application service.
|
||||
|
||||
If one of your application service's `users` regex was intending to match a remote user,
|
||||
this will no longer match as you expect. The behavioral mismatch between matching all
|
||||
local users and some remote users is why the spec was changed/clarified and this
|
||||
caveat is no longer supported.
|
||||
|
||||
|
||||
## Legacy Prometheus metric names are now disabled by default
|
||||
|
||||
Synapse v1.71.0 disables legacy Prometheus metric names by default.
|
||||
For administrators that still rely on them and have not yet had chance to update their
|
||||
uses of the metrics, it's still possible to specify `enable_legacy_metrics: true` in
|
||||
the configuration to re-enable them temporarily.
|
||||
|
||||
Synapse v1.73.0 will **remove legacy metric names altogether** and at that point,
|
||||
it will no longer be possible to re-enable them.
|
||||
|
||||
If you do not use metrics or you have already updated your Grafana dashboard(s),
|
||||
Prometheus console(s) and alerting rule(s), there is no action needed.
|
||||
|
||||
See [v1.69.0: Deprecation of legacy Prometheus metric names](#deprecation-of-legacy-prometheus-metric-names).
|
||||
|
||||
|
||||
# Upgrading to v1.69.0
|
||||
|
||||
## Changes to the receipts replication streams
|
||||
|
||||
@@ -73,12 +73,12 @@ When a request is blocked, the response will have the `errcode` `M_RESOURCE_LIMI
|
||||
|
||||
Synapse records several different prometheus metrics for MAU.
|
||||
|
||||
`synapse_admin_mau:current` records the current MAU figure for native (non-application-service) users.
|
||||
`synapse_admin_mau_current` records the current MAU figure for native (non-application-service) users.
|
||||
|
||||
`synapse_admin_mau:max` records the maximum MAU as dictated by the `max_mau_value` config value.
|
||||
`synapse_admin_mau_max` records the maximum MAU as dictated by the `max_mau_value` config value.
|
||||
|
||||
`synapse_admin_mau_current_mau_by_service` records the current MAU including application service users. The label `app_service` can be used
|
||||
to filter by a specific service ID. This *also* includes non-application-service users under `app_service=native` .
|
||||
|
||||
`synapse_admin_mau:registered_reserved_users` records the number of users specified in `mau_limits_reserved_threepids` which have
|
||||
`synapse_admin_mau_registered_reserved_users` records the number of users specified in `mau_limits_reserved_threepids` which have
|
||||
registered accounts on the homeserver.
|
||||
|
||||
@@ -100,7 +100,7 @@ modules:
|
||||
config: {}
|
||||
```
|
||||
---
|
||||
## Server ##
|
||||
## Server
|
||||
|
||||
Define your homeserver name and other base options.
|
||||
|
||||
@@ -160,7 +160,7 @@ including _matrix/...). This is the same URL a user might enter into the
|
||||
'Custom Homeserver URL' field on their client. If you use Synapse with a
|
||||
reverse proxy, this should be the URL to reach Synapse via the proxy.
|
||||
Otherwise, it should be the URL to reach Synapse's client HTTP listener (see
|
||||
'listeners' below).
|
||||
['listeners'](#listeners) below).
|
||||
|
||||
Defaults to `https://<server_name>/`.
|
||||
|
||||
@@ -571,7 +571,7 @@ Example configuration:
|
||||
delete_stale_devices_after: 1y
|
||||
```
|
||||
|
||||
## Homeserver blocking ##
|
||||
## Homeserver blocking
|
||||
Useful options for Synapse admins.
|
||||
|
||||
---
|
||||
@@ -923,7 +923,7 @@ retention:
|
||||
interval: 1d
|
||||
```
|
||||
---
|
||||
## TLS ##
|
||||
## TLS
|
||||
|
||||
Options related to TLS.
|
||||
|
||||
@@ -1013,7 +1013,7 @@ federation_custom_ca_list:
|
||||
- myCA3.pem
|
||||
```
|
||||
---
|
||||
## Federation ##
|
||||
## Federation
|
||||
|
||||
Options related to federation.
|
||||
|
||||
@@ -1072,7 +1072,7 @@ Example configuration:
|
||||
allow_device_name_lookup_over_federation: true
|
||||
```
|
||||
---
|
||||
## Caching ##
|
||||
## Caching
|
||||
|
||||
Options related to caching.
|
||||
|
||||
@@ -1186,7 +1186,7 @@ file in Synapse's `contrib` directory, you can send a `SIGHUP` signal by using
|
||||
`systemctl reload matrix-synapse`.
|
||||
|
||||
---
|
||||
## Database ##
|
||||
## Database
|
||||
Config options related to database settings.
|
||||
|
||||
---
|
||||
@@ -1333,20 +1333,21 @@ databases:
|
||||
cp_max: 10
|
||||
```
|
||||
---
|
||||
## Logging ##
|
||||
## Logging
|
||||
Config options related to logging.
|
||||
|
||||
---
|
||||
### `log_config`
|
||||
|
||||
This option specifies a yaml python logging config file as described [here](https://docs.python.org/3.7/library/logging.config.html#configuration-dictionary-schema).
|
||||
This option specifies a yaml python logging config file as described
|
||||
[here](https://docs.python.org/3/library/logging.config.html#configuration-dictionary-schema).
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
log_config: "CONFDIR/SERVERNAME.log.config"
|
||||
```
|
||||
---
|
||||
## Ratelimiting ##
|
||||
## Ratelimiting
|
||||
Options related to ratelimiting in Synapse.
|
||||
|
||||
Each ratelimiting configuration is made of two parameters:
|
||||
@@ -1577,7 +1578,7 @@ Example configuration:
|
||||
federation_rr_transactions_per_room_per_second: 40
|
||||
```
|
||||
---
|
||||
## Media Store ##
|
||||
## Media Store
|
||||
Config options related to Synapse's media store.
|
||||
|
||||
---
|
||||
@@ -1767,7 +1768,7 @@ url_preview_ip_range_blacklist:
|
||||
- 'ff00::/8'
|
||||
- 'fec0::/10'
|
||||
```
|
||||
----
|
||||
---
|
||||
### `url_preview_ip_range_whitelist`
|
||||
|
||||
This option sets a list of IP address CIDR ranges that the URL preview spider is allowed
|
||||
@@ -1861,7 +1862,7 @@ Example configuration:
|
||||
- 'fr;q=0.8'
|
||||
- '*;q=0.7'
|
||||
```
|
||||
----
|
||||
---
|
||||
### `oembed`
|
||||
|
||||
oEmbed allows for easier embedding content from a website. It can be
|
||||
@@ -1878,7 +1879,7 @@ oembed:
|
||||
- oembed/my_providers.json
|
||||
```
|
||||
---
|
||||
## Captcha ##
|
||||
## Captcha
|
||||
|
||||
See [here](../../CAPTCHA_SETUP.md) for full details on setting up captcha.
|
||||
|
||||
@@ -1927,7 +1928,7 @@ Example configuration:
|
||||
recaptcha_siteverify_api: "https://my.recaptcha.site"
|
||||
```
|
||||
---
|
||||
## TURN ##
|
||||
## TURN
|
||||
Options related to adding a TURN server to Synapse.
|
||||
|
||||
---
|
||||
@@ -1948,7 +1949,7 @@ Example configuration:
|
||||
```yaml
|
||||
turn_shared_secret: "YOUR_SHARED_SECRET"
|
||||
```
|
||||
----
|
||||
---
|
||||
### `turn_username` and `turn_password`
|
||||
|
||||
The Username and password if the TURN server needs them and does not use a token.
|
||||
@@ -2367,7 +2368,7 @@ Example configuration:
|
||||
```yaml
|
||||
session_lifetime: 24h
|
||||
```
|
||||
----
|
||||
---
|
||||
### `refresh_access_token_lifetime`
|
||||
|
||||
Time that an access token remains valid for, if the session is using refresh tokens.
|
||||
@@ -2423,7 +2424,7 @@ nonrefreshable_access_token_lifetime: 24h
|
||||
```
|
||||
|
||||
---
|
||||
## Metrics ###
|
||||
## Metrics
|
||||
Config options related to metrics.
|
||||
|
||||
---
|
||||
@@ -2441,8 +2442,8 @@ enable_metrics: true
|
||||
|
||||
Set to `true` to publish both legacy and non-legacy Prometheus metric names,
|
||||
or to `false` to only publish non-legacy Prometheus metric names.
|
||||
Defaults to `true`. Has no effect if `enable_metrics` is `false`.
|
||||
**In Synapse v1.71.0, this will default to `false` before being removed in Synapse v1.73.0.**
|
||||
Defaults to `false`. Has no effect if `enable_metrics` is `false`.
|
||||
**In Synapse v1.67.0 up to and including Synapse v1.70.1, this defaulted to `true`.**
|
||||
|
||||
Legacy metric names include:
|
||||
- metrics containing colons in the name, such as `synapse_util_caches_response_cache:hits`, because colons are supposed to be reserved for user-defined recording rules;
|
||||
@@ -2520,7 +2521,7 @@ Example configuration:
|
||||
report_stats_endpoint: https://example.com/report-usage-stats/push
|
||||
```
|
||||
---
|
||||
## API Configuration ##
|
||||
## API Configuration
|
||||
Config settings related to the client/server API
|
||||
|
||||
---
|
||||
@@ -2620,7 +2621,7 @@ Example configuration:
|
||||
form_secret: <PRIVATE STRING>
|
||||
```
|
||||
---
|
||||
## Signing Keys ##
|
||||
## Signing Keys
|
||||
Config options relating to signing keys
|
||||
|
||||
---
|
||||
@@ -2681,6 +2682,12 @@ is still supported for backwards-compatibility, but it is deprecated.
|
||||
warning on start-up. To suppress this warning, set
|
||||
`suppress_key_server_warning` to true.
|
||||
|
||||
If the use of a trusted key server has to be deactivated, e.g. in a private
|
||||
federation or for privacy reasons, this can be realised by setting
|
||||
an empty array (`trusted_key_servers: []`). Then Synapse will request the keys
|
||||
directly from the server that owns the keys. If Synapse does not get keys directly
|
||||
from the server, the events of this server will be rejected.
|
||||
|
||||
Options for each entry in the list include:
|
||||
* `server_name`: the name of the server. Required.
|
||||
* `verify_keys`: an optional map from key id to base64-encoded public key.
|
||||
@@ -2729,7 +2736,7 @@ Example configuration:
|
||||
key_server_signing_keys_path: "key_server_signing_keys.key"
|
||||
```
|
||||
---
|
||||
## Single sign-on integration ##
|
||||
## Single sign-on integration
|
||||
|
||||
The following settings can be used to make Synapse use a single sign-on
|
||||
provider for authentication, instead of its internal password database.
|
||||
@@ -3015,6 +3022,15 @@ Options for each entry include:
|
||||
which is set to the claims returned by the UserInfo Endpoint and/or
|
||||
in the ID Token.
|
||||
|
||||
* `backchannel_logout_enabled`: set to `true` to process OIDC Back-Channel Logout notifications.
|
||||
Those notifications are expected to be received on `/_synapse/client/oidc/backchannel_logout`.
|
||||
Defaults to `false`.
|
||||
|
||||
* `backchannel_logout_ignore_sub`: by default, the OIDC Back-Channel Logout feature checks that the
|
||||
`sub` claim matches the subject claim received during login. This check can be disabled by setting
|
||||
this to `true`. Defaults to `false`.
|
||||
|
||||
You might want to disable this if the `subject_claim` returned by the mapping provider is not `sub`.
|
||||
|
||||
It is possible to configure Synapse to only allow logins if certain attributes
|
||||
match particular values in the OIDC userinfo. The requirements can be listed under
|
||||
@@ -3349,7 +3365,7 @@ email:
|
||||
email_validation: "[%(server_name)s] Validate your email"
|
||||
```
|
||||
---
|
||||
## Push ##
|
||||
## Push
|
||||
Configuration settings related to push notifications
|
||||
|
||||
---
|
||||
@@ -3382,7 +3398,7 @@ push:
|
||||
group_unread_count_by_room: false
|
||||
```
|
||||
---
|
||||
## Rooms ##
|
||||
## Rooms
|
||||
Config options relating to rooms.
|
||||
|
||||
---
|
||||
@@ -3628,7 +3644,7 @@ default_power_level_content_override:
|
||||
```
|
||||
|
||||
---
|
||||
## Tracing ##
|
||||
## Tracing
|
||||
Configuration options related to tracing support.
|
||||
|
||||
---
|
||||
@@ -3674,14 +3690,71 @@ tracing:
|
||||
#collector_endpoint: "http://localhost:14268/api/traces?format=jaeger.thrift"
|
||||
```
|
||||
---
|
||||
## Workers ##
|
||||
Configuration options related to workers.
|
||||
## Coordinating workers
|
||||
Configuration options related to workers which belong in the main config file
|
||||
(usually called `homeserver.yaml`).
|
||||
A Synapse deployment can scale horizontally by running multiple Synapse processes
|
||||
called _workers_. Incoming requests are distributed between workers to handle higher
|
||||
loads. Some workers are privileged and can accept requests from other workers.
|
||||
|
||||
As a result, the worker configuration is divided into two parts.
|
||||
|
||||
1. The first part (in this section of the manual) defines which shardable tasks
|
||||
are delegated to privileged workers. This allows unprivileged workers to make
|
||||
request a privileged worker to act on their behalf.
|
||||
1. [The second part](#individual-worker-configuration)
|
||||
controls the behaviour of individual workers in isolation.
|
||||
|
||||
For guidance on setting up workers, see the [worker documentation](../../workers.md).
|
||||
|
||||
---
|
||||
### `worker_replication_secret`
|
||||
|
||||
A shared secret used by the replication APIs on the main process to authenticate
|
||||
HTTP requests from workers.
|
||||
|
||||
The default, this value is omitted (equivalently `null`), which means that
|
||||
traffic between the workers and the main process is not authenticated.
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
worker_replication_secret: "secret_secret"
|
||||
```
|
||||
---
|
||||
### `start_pushers`
|
||||
|
||||
Controls sending of push notifications on the main process. Set to `false`
|
||||
if using a [pusher worker](../../workers.md#synapseapppusher). Defaults to `true`.
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
start_pushers: false
|
||||
```
|
||||
---
|
||||
### `pusher_instances`
|
||||
|
||||
It is possible to run multiple [pusher workers](../../workers.md#synapseapppusher),
|
||||
in which case the work is balanced across them. Use this setting to list the pushers by
|
||||
[`worker_name`](#worker_name). Ensure the main process and all pusher workers are
|
||||
restarted after changing this option.
|
||||
|
||||
If no or only one pusher worker is configured, this setting is not necessary.
|
||||
The main process will send out push notifications by default if you do not disable
|
||||
it by setting [`start_pushers: false`](#start_pushers).
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
start_pushers: false
|
||||
pusher_instances:
|
||||
- pusher_worker1
|
||||
- pusher_worker2
|
||||
```
|
||||
---
|
||||
### `send_federation`
|
||||
|
||||
Controls sending of outbound federation transactions on the main process.
|
||||
Set to false if using a federation sender worker. Defaults to true.
|
||||
Set to `false` if using a [federation sender worker](../../workers.md#synapseappfederation_sender).
|
||||
Defaults to `true`.
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
@@ -3690,8 +3763,9 @@ send_federation: false
|
||||
---
|
||||
### `federation_sender_instances`
|
||||
|
||||
It is possible to run multiple federation sender workers, in which case the
|
||||
work is balanced across them. Use this setting to list the senders.
|
||||
It is possible to run multiple
|
||||
[federation sender worker](../../workers.md#synapseappfederation_sender), in which
|
||||
case the work is balanced across them. Use this setting to list the senders.
|
||||
|
||||
This configuration setting must be shared between all federation sender workers, and if
|
||||
changed all federation sender workers must be stopped at the same time and then
|
||||
@@ -3700,14 +3774,19 @@ events may be dropped).
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
send_federation: false
|
||||
federation_sender_instances:
|
||||
- federation_sender1
|
||||
```
|
||||
---
|
||||
### `instance_map`
|
||||
|
||||
When using workers this should be a map from worker name to the
|
||||
When using workers this should be a map from [`worker_name`](#worker_name) to the
|
||||
HTTP replication listener of the worker, if configured.
|
||||
Each worker declared under [`stream_writers`](../../workers.md#stream-writers) needs
|
||||
a HTTP replication listener, and that listener should be included in the `instance_map`.
|
||||
(The main process also needs an HTTP replication listener, but it should not be
|
||||
listed in the `instance_map`.)
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
@@ -3720,8 +3799,11 @@ instance_map:
|
||||
### `stream_writers`
|
||||
|
||||
Experimental: When using workers you can define which workers should
|
||||
handle event persistence and typing notifications. Any worker
|
||||
specified here must also be in the `instance_map`.
|
||||
handle writing to streams such as event persistence and typing notifications.
|
||||
Any worker specified here must also be in the [`instance_map`](#instance_map).
|
||||
|
||||
See the list of available streams in the
|
||||
[worker documentation](../../workers.md#stream-writers).
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
@@ -3732,29 +3814,18 @@ stream_writers:
|
||||
---
|
||||
### `run_background_tasks_on`
|
||||
|
||||
The worker that is used to run background tasks (e.g. cleaning up expired
|
||||
data). If not provided this defaults to the main process.
|
||||
The [worker](../../workers.md#background-tasks) that is used to run
|
||||
background tasks (e.g. cleaning up expired data). If not provided this
|
||||
defaults to the main process.
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
run_background_tasks_on: worker1
|
||||
```
|
||||
---
|
||||
### `worker_replication_secret`
|
||||
|
||||
A shared secret used by the replication APIs to authenticate HTTP requests
|
||||
from workers.
|
||||
|
||||
By default this is unused and traffic is not authenticated.
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
worker_replication_secret: "secret_secret"
|
||||
```
|
||||
### `redis`
|
||||
|
||||
Configuration for Redis when using workers. This *must* be enabled when
|
||||
using workers (unless using old style direct TCP configuration).
|
||||
Configuration for Redis when using workers. This *must* be enabled when using workers.
|
||||
This setting has the following sub-options:
|
||||
* `enabled`: whether to use Redis support. Defaults to false.
|
||||
* `host` and `port`: Optional host and port to use to connect to redis. Defaults to
|
||||
@@ -3769,7 +3840,143 @@ redis:
|
||||
port: 6379
|
||||
password: <secret_password>
|
||||
```
|
||||
## Background Updates ##
|
||||
---
|
||||
## Individual worker configuration
|
||||
These options configure an individual worker, in its worker configuration file.
|
||||
They should be not be provided when configuring the main process.
|
||||
|
||||
Note also the configuration above for
|
||||
[coordinating a cluster of workers](#coordinating-workers).
|
||||
|
||||
For guidance on setting up workers, see the [worker documentation](../../workers.md).
|
||||
|
||||
---
|
||||
### `worker_app`
|
||||
|
||||
The type of worker. The currently available worker applications are listed
|
||||
in [worker documentation](../../workers.md#available-worker-applications).
|
||||
|
||||
The most common worker is the
|
||||
[`synapse.app.generic_worker`](../../workers.md#synapseappgeneric_worker).
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
worker_app: synapse.app.generic_worker
|
||||
```
|
||||
---
|
||||
### `worker_name`
|
||||
|
||||
A unique name for the worker. The worker needs a name to be addressed in
|
||||
further parameters and identification in log files. We strongly recommend
|
||||
giving each worker a unique `worker_name`.
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
worker_name: generic_worker1
|
||||
```
|
||||
---
|
||||
### `worker_replication_host`
|
||||
|
||||
The HTTP replication endpoint that it should talk to on the main Synapse process.
|
||||
The main Synapse process defines this with a `replication` resource in
|
||||
[`listeners` option](#listeners).
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
worker_replication_host: 127.0.0.1
|
||||
```
|
||||
---
|
||||
### `worker_replication_http_port`
|
||||
|
||||
The HTTP replication port that it should talk to on the main Synapse process.
|
||||
The main Synapse process defines this with a `replication` resource in
|
||||
[`listeners` option](#listeners).
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
worker_replication_http_port: 9093
|
||||
```
|
||||
---
|
||||
### `worker_replication_http_tls`
|
||||
|
||||
Whether TLS should be used for talking to the HTTP replication port on the main
|
||||
Synapse process.
|
||||
The main Synapse process defines this with the `tls` option on its [listener](#listeners) that
|
||||
has the `replication` resource enabled.
|
||||
|
||||
**Please note:** by default, it is not safe to expose replication ports to the
|
||||
public Internet, even with TLS enabled.
|
||||
See [`worker_replication_secret`](#worker_replication_secret).
|
||||
|
||||
Defaults to `false`.
|
||||
|
||||
*Added in Synapse 1.72.0.*
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
worker_replication_http_tls: true
|
||||
```
|
||||
---
|
||||
### `worker_listeners`
|
||||
|
||||
A worker can handle HTTP requests. To do so, a `worker_listeners` option
|
||||
must be declared, in the same way as the [`listeners` option](#listeners)
|
||||
in the shared config.
|
||||
|
||||
Workers declared in [`stream_writers`](#stream_writers) will need to include a
|
||||
`replication` listener here, in order to accept internal HTTP requests from
|
||||
other workers.
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
worker_listeners:
|
||||
- type: http
|
||||
port: 8083
|
||||
resources:
|
||||
- names: [client, federation]
|
||||
```
|
||||
---
|
||||
### `worker_daemonize`
|
||||
|
||||
Specifies whether the worker should be started as a daemon process.
|
||||
If Synapse is being managed by [systemd](../../systemd-with-workers/README.md), this option
|
||||
must be omitted or set to `false`.
|
||||
|
||||
Defaults to `false`.
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
worker_daemonize: true
|
||||
```
|
||||
---
|
||||
### `worker_pid_file`
|
||||
|
||||
When running a worker as a daemon, we need a place to store the
|
||||
[PID](https://en.wikipedia.org/wiki/Process_identifier) of the worker.
|
||||
This option defines the location of that "pid file".
|
||||
|
||||
This option is required if `worker_daemonize` is `true` and ignored
|
||||
otherwise. It has no default.
|
||||
|
||||
See also the [`pid_file` option](#pid_file) option for the main Synapse process.
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
worker_pid_file: DATADIR/generic_worker1.pid
|
||||
```
|
||||
---
|
||||
### `worker_log_config`
|
||||
|
||||
This option specifies a yaml python logging config file as described
|
||||
[here](https://docs.python.org/3/library/logging.config.html#configuration-dictionary-schema).
|
||||
See also the [`log_config` option](#log_config) option for the main Synapse process.
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
worker_log_config: /etc/matrix-synapse/generic-worker-log.yaml
|
||||
```
|
||||
---
|
||||
## Background Updates
|
||||
Configuration settings related to background updates.
|
||||
|
||||
---
|
||||
|
||||
+75
-38
@@ -88,10 +88,12 @@ shared configuration file.
|
||||
### Shared configuration
|
||||
|
||||
Normally, only a couple of changes are needed to make an existing configuration
|
||||
file suitable for use with workers. First, you need to enable an "HTTP replication
|
||||
listener" for the main process; and secondly, you need to enable redis-based
|
||||
replication. Optionally, a shared secret can be used to authenticate HTTP
|
||||
traffic between workers. For example:
|
||||
file suitable for use with workers. First, you need to enable an
|
||||
["HTTP replication listener"](usage/configuration/config_documentation.md#listeners)
|
||||
for the main process; and secondly, you need to enable
|
||||
[redis-based replication](usage/configuration/config_documentation.md#redis).
|
||||
Optionally, a [shared secret](usage/configuration/config_documentation.md#worker_replication_secret)
|
||||
can be used to authenticate HTTP traffic between workers. For example:
|
||||
|
||||
```yaml
|
||||
# extend the existing `listeners` section. This defines the ports that the
|
||||
@@ -111,27 +113,30 @@ redis:
|
||||
enabled: true
|
||||
```
|
||||
|
||||
See the [configuration manual](usage/configuration/config_documentation.html) for the full documentation of each option.
|
||||
See the [configuration manual](usage/configuration/config_documentation.md)
|
||||
for the full documentation of each option.
|
||||
|
||||
Under **no circumstances** should the replication listener be exposed to the
|
||||
public internet; replication traffic is:
|
||||
|
||||
* always unencrypted
|
||||
* unauthenticated, unless `worker_replication_secret` is configured
|
||||
* unauthenticated, unless [`worker_replication_secret`](usage/configuration/config_documentation.md#worker_replication_secret)
|
||||
is configured
|
||||
|
||||
|
||||
### Worker configuration
|
||||
|
||||
In the config file for each worker, you must specify:
|
||||
* The type of worker (`worker_app`). The currently available worker applications are listed below.
|
||||
* A unique name for the worker (`worker_name`).
|
||||
* The type of worker ([`worker_app`](usage/configuration/config_documentation.md#worker_app)).
|
||||
The currently available worker applications are listed [below](#available-worker-applications).
|
||||
* A unique name for the worker ([`worker_name`](usage/configuration/config_documentation.md#worker_name)).
|
||||
* The HTTP replication endpoint that it should talk to on the main synapse process
|
||||
(`worker_replication_host` and `worker_replication_http_port`)
|
||||
* If handling HTTP requests, a `worker_listeners` option with an `http`
|
||||
listener, in the same way as the [`listeners`](usage/configuration/config_documentation.md#listeners)
|
||||
option in the shared config.
|
||||
* If handling the `^/_matrix/client/v3/keys/upload` endpoint, the HTTP URI for
|
||||
the main process (`worker_main_http_uri`).
|
||||
([`worker_replication_host`](usage/configuration/config_documentation.md#worker_replication_host) and
|
||||
[`worker_replication_http_port`](usage/configuration/config_documentation.md#worker_replication_http_port)).
|
||||
* If handling HTTP requests, a [`worker_listeners`](usage/configuration/config_documentation.md#worker_listeners) option
|
||||
with an `http` listener.
|
||||
* **Synapse 1.72 and older:** if handling the `^/_matrix/client/v3/keys/upload` endpoint, the HTTP URI for
|
||||
the main process (`worker_main_http_uri`). This config option is no longer required and is ignored when running Synapse 1.73 and newer.
|
||||
|
||||
For example:
|
||||
|
||||
@@ -146,7 +151,6 @@ plain HTTP endpoint on port 8083 separately serving various endpoints, e.g.
|
||||
Obviously you should configure your reverse-proxy to route the relevant
|
||||
endpoints to the worker (`localhost:8083` in the above example).
|
||||
|
||||
|
||||
### Running Synapse with workers
|
||||
|
||||
Finally, you need to start your worker processes. This can be done with either
|
||||
@@ -217,7 +221,6 @@ information.
|
||||
^/_matrix/client/(api/v1|r0|v3|unstable)/search$
|
||||
|
||||
# Encryption requests
|
||||
# Note that ^/_matrix/client/(r0|v3|unstable)/keys/upload/ requires `worker_main_http_uri`
|
||||
^/_matrix/client/(r0|v3|unstable)/keys/query$
|
||||
^/_matrix/client/(r0|v3|unstable)/keys/changes$
|
||||
^/_matrix/client/(r0|v3|unstable)/keys/claim$
|
||||
@@ -288,7 +291,8 @@ For multiple workers not handling the SSO endpoints properly, see
|
||||
[#9427](https://github.com/matrix-org/synapse/issues/9427).
|
||||
|
||||
Note that a [HTTP listener](usage/configuration/config_documentation.md#listeners)
|
||||
with `client` and `federation` `resources` must be configured in the `worker_listeners`
|
||||
with `client` and `federation` `resources` must be configured in the
|
||||
[`worker_listeners`](usage/configuration/config_documentation.md#worker_listeners)
|
||||
option in the worker config.
|
||||
|
||||
#### Load balancing
|
||||
@@ -300,9 +304,11 @@ may wish to run multiple groups of workers handling different endpoints so that
|
||||
load balancing can be done in different ways.
|
||||
|
||||
For `/sync` and `/initialSync` requests it will be more efficient if all
|
||||
requests from a particular user are routed to a single instance. Extracting a
|
||||
user ID from the access token or `Authorization` header is currently left as an
|
||||
exercise for the reader. Admins may additionally wish to separate out `/sync`
|
||||
requests from a particular user are routed to a single instance. This can
|
||||
be done e.g. in nginx via IP `hash $http_x_forwarded_for;` or via
|
||||
`hash $http_authorization consistent;` which contains the users access token.
|
||||
|
||||
Admins may additionally wish to separate out `/sync`
|
||||
requests that have a `since` query parameter from those that don't (and
|
||||
`/initialSync`), as requests that don't are known as "initial sync" that happens
|
||||
when a user logs in on a new device and can be *very* resource intensive, so
|
||||
@@ -331,9 +337,10 @@ of the main process to a particular worker.
|
||||
|
||||
To enable this, the worker must have a
|
||||
[HTTP `replication` listener](usage/configuration/config_documentation.md#listeners) configured,
|
||||
have a `worker_name` and be listed in the `instance_map` config. The same worker
|
||||
can handle multiple streams, but unless otherwise documented, each stream can only
|
||||
have a single writer.
|
||||
have a [`worker_name`](usage/configuration/config_documentation.md#worker_name)
|
||||
and be listed in the [`instance_map`](usage/configuration/config_documentation.md#instance_map)
|
||||
config. The same worker can handle multiple streams, but unless otherwise documented,
|
||||
each stream can only have a single writer.
|
||||
|
||||
For example, to move event persistence off to a dedicated worker, the shared
|
||||
configuration would include:
|
||||
@@ -360,9 +367,26 @@ streams and the endpoints associated with them:
|
||||
|
||||
##### The `events` stream
|
||||
|
||||
The `events` stream experimentally supports having multiple writers, where work
|
||||
is sharded between them by room ID. Note that you *must* restart all worker
|
||||
instances when adding or removing event persisters. An example `stream_writers`
|
||||
The `events` stream experimentally supports having multiple writer workers, where load
|
||||
is sharded between them by room ID. Each writer is called an _event persister_. They are
|
||||
responsible for
|
||||
- receiving new events,
|
||||
- linking them to those already in the room [DAG](development/room-dag-concepts.md),
|
||||
- persisting them to the DB, and finally
|
||||
- updating the events stream.
|
||||
|
||||
Because load is sharded in this way, you *must* restart all worker instances when
|
||||
adding or removing event persisters.
|
||||
|
||||
An `event_persister` should not be mistaken for an `event_creator`.
|
||||
An `event_creator` listens for requests from clients to create new events and does
|
||||
so. It will then pass those events over HTTP replication to any configured event
|
||||
persisters (or the main process if none are configured).
|
||||
|
||||
Note that `event_creator`s and `event_persister`s are implemented using the same
|
||||
[`synapse.app.generic_worker`](#synapse.app.generic_worker).
|
||||
|
||||
An example [`stream_writers`](usage/configuration/config_documentation.md#stream_writers)
|
||||
configuration with multiple writers:
|
||||
|
||||
```yaml
|
||||
@@ -416,16 +440,18 @@ worker. Background tasks are run periodically or started via replication. Exactl
|
||||
which tasks are configured to run depends on your Synapse configuration (e.g. if
|
||||
stats is enabled). This worker doesn't handle any REST endpoints itself.
|
||||
|
||||
To enable this, the worker must have a `worker_name` and can be configured to run
|
||||
background tasks. For example, to move background tasks to a dedicated worker,
|
||||
the shared configuration would include:
|
||||
To enable this, the worker must have a unique
|
||||
[`worker_name`](usage/configuration/config_documentation.md#worker_name)
|
||||
and can be configured to run background tasks. For example, to move background tasks
|
||||
to a dedicated worker, the shared configuration would include:
|
||||
|
||||
```yaml
|
||||
run_background_tasks_on: background_worker
|
||||
```
|
||||
|
||||
You might also wish to investigate the `update_user_directory_from_worker` and
|
||||
`media_instance_running_background_jobs` settings.
|
||||
You might also wish to investigate the
|
||||
[`update_user_directory_from_worker`](#updating-the-user-directory) and
|
||||
[`media_instance_running_background_jobs`](#synapseappmedia_repository) settings.
|
||||
|
||||
An example for a dedicated background worker instance:
|
||||
|
||||
@@ -478,13 +504,17 @@ worker application type.
|
||||
### `synapse.app.pusher`
|
||||
|
||||
Handles sending push notifications to sygnal and email. Doesn't handle any
|
||||
REST endpoints itself, but you should set `start_pushers: False` in the
|
||||
REST endpoints itself, but you should set
|
||||
[`start_pushers: false`](usage/configuration/config_documentation.md#start_pushers) in the
|
||||
shared configuration file to stop the main synapse sending push notifications.
|
||||
|
||||
To run multiple instances at once the `pusher_instances` option should list all
|
||||
pusher instances by their worker name, e.g.:
|
||||
To run multiple instances at once the
|
||||
[`pusher_instances`](usage/configuration/config_documentation.md#pusher_instances)
|
||||
option should list all pusher instances by their
|
||||
[`worker_name`](usage/configuration/config_documentation.md#worker_name), e.g.:
|
||||
|
||||
```yaml
|
||||
start_pushers: false
|
||||
pusher_instances:
|
||||
- pusher_worker1
|
||||
- pusher_worker2
|
||||
@@ -512,15 +542,20 @@ Note this worker cannot be load-balanced: only one instance should be active.
|
||||
### `synapse.app.federation_sender`
|
||||
|
||||
Handles sending federation traffic to other servers. Doesn't handle any
|
||||
REST endpoints itself, but you should set `send_federation: False` in the
|
||||
shared configuration file to stop the main synapse sending this traffic.
|
||||
REST endpoints itself, but you should set
|
||||
[`send_federation: false`](usage/configuration/config_documentation.md#send_federation)
|
||||
in the shared configuration file to stop the main synapse sending this traffic.
|
||||
|
||||
If running multiple federation senders then you must list each
|
||||
instance in the `federation_sender_instances` option by their `worker_name`.
|
||||
instance in the
|
||||
[`federation_sender_instances`](usage/configuration/config_documentation.md#federation_sender_instances)
|
||||
option by their
|
||||
[`worker_name`](usage/configuration/config_documentation.md#worker_name).
|
||||
All instances must be stopped and started when adding or removing instances.
|
||||
For example:
|
||||
|
||||
```yaml
|
||||
send_federation: false
|
||||
federation_sender_instances:
|
||||
- federation_sender1
|
||||
- federation_sender2
|
||||
@@ -547,7 +582,9 @@ Handles the media repository. It can handle all endpoints starting with:
|
||||
^/_synapse/admin/v1/quarantine_media/.*$
|
||||
^/_synapse/admin/v1/users/.*/media$
|
||||
|
||||
You should also set `enable_media_repo: False` in the shared configuration
|
||||
You should also set
|
||||
[`enable_media_repo: False`](usage/configuration/config_documentation.md#enable_media_repo)
|
||||
in the shared configuration
|
||||
file to stop the main synapse running background jobs related to managing the
|
||||
media repository. Note that doing so will prevent the main process from being
|
||||
able to handle the above endpoints.
|
||||
|
||||
@@ -11,6 +11,7 @@ warn_unused_ignores = True
|
||||
local_partial_types = True
|
||||
no_implicit_optional = True
|
||||
disallow_untyped_defs = True
|
||||
strict_equality = True
|
||||
|
||||
files =
|
||||
docker/,
|
||||
@@ -56,7 +57,6 @@ exclude = (?x)
|
||||
|tests/rest/media/v1/test_media_storage.py
|
||||
|tests/server.py
|
||||
|tests/server_notices/test_resource_limits_server_notices.py
|
||||
|tests/test_metrics.py
|
||||
|tests/test_state.py
|
||||
|tests/test_terms_auth.py
|
||||
|tests/util/caches/test_cached_call.py
|
||||
@@ -106,6 +106,9 @@ disallow_untyped_defs = False
|
||||
[mypy-tests.handlers.test_user_directory]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-tests.metrics.test_background_process_metrics]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-tests.push.test_bulk_push_rule_evaluator]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
@@ -115,6 +118,9 @@ disallow_untyped_defs = True
|
||||
[mypy-tests.state.test_profile]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-tests.storage.test_id_generators]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-tests.storage.test_profile]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
|
||||
Generated
+334
-284
File diff suppressed because it is too large
Load Diff
+12
-3
@@ -57,7 +57,7 @@ manifest-path = "rust/Cargo.toml"
|
||||
|
||||
[tool.poetry]
|
||||
name = "matrix-synapse"
|
||||
version = "1.70.0rc1"
|
||||
version = "1.72.0rc1"
|
||||
description = "Homeserver for the Matrix decentralised comms protocol"
|
||||
authors = ["Matrix.org Team and Contributors <packages@matrix.org>"]
|
||||
license = "Apache-2.0"
|
||||
@@ -192,7 +192,7 @@ psycopg2 = { version = ">=2.8", markers = "platform_python_implementation != 'Py
|
||||
psycopg2cffi = { version = ">=2.8", markers = "platform_python_implementation == 'PyPy'", optional = true }
|
||||
psycopg2cffi-compat = { version = "==1.1", markers = "platform_python_implementation == 'PyPy'", optional = true }
|
||||
pysaml2 = { version = ">=4.5.0", optional = true }
|
||||
authlib = { version = ">=0.14.0", optional = true }
|
||||
authlib = { version = ">=0.15.1", optional = true }
|
||||
# systemd-python is necessary for logging to the systemd journal via
|
||||
# `systemd.journal.JournalHandler`, as is documented in
|
||||
# `contrib/systemd/log_config.yaml`.
|
||||
@@ -318,7 +318,7 @@ build-backend = "poetry.core.masonry.api"
|
||||
|
||||
[tool.cibuildwheel]
|
||||
# Skip unsupported platforms (by us or by Rust).
|
||||
skip = "cp36* *-musllinux_i686"
|
||||
skip = "cp36* *-musllinux_i686 pp*aarch64 *-musllinux_aarch64"
|
||||
|
||||
# We need a rust compiler
|
||||
before-all = "curl https://sh.rustup.rs -sSf | sh -s -- --default-toolchain stable -y --profile minimal"
|
||||
@@ -330,3 +330,12 @@ environment= { PATH = "$PATH:$HOME/.cargo/bin" }
|
||||
before-build = "rm -rf {project}/build"
|
||||
build-frontend = "build"
|
||||
test-command = "python -c 'from synapse.synapse_rust import sum_as_string; print(sum_as_string(1, 2))'"
|
||||
|
||||
|
||||
[tool.cibuildwheel.linux]
|
||||
# Wrap the repair command to correctly rename the built cpython wheels as ABI3.
|
||||
repair-wheel-command = "./.ci/scripts/auditwheel_wrapper.py -w {dest_dir} {wheel}"
|
||||
|
||||
[tool.cibuildwheel.macos]
|
||||
# Wrap the repair command to correctly rename the built cpython wheels as ABI3.
|
||||
repair-wheel-command = "./.ci/scripts/auditwheel_wrapper.py --require-archs {delocate_archs} -w {dest_dir} {wheel}"
|
||||
|
||||
@@ -25,6 +25,7 @@ use crate::push::Action;
|
||||
use crate::push::Condition;
|
||||
use crate::push::EventMatchCondition;
|
||||
use crate::push::PushRule;
|
||||
use crate::push::RelatedEventMatchCondition;
|
||||
use crate::push::SetTweak;
|
||||
use crate::push::TweakValue;
|
||||
|
||||
@@ -114,6 +115,22 @@ pub const BASE_APPEND_OVERRIDE_RULES: &[PushRule] = &[
|
||||
default: true,
|
||||
default_enabled: true,
|
||||
},
|
||||
PushRule {
|
||||
rule_id: Cow::Borrowed("global/override/.im.nheko.msc3664.reply"),
|
||||
priority_class: 5,
|
||||
conditions: Cow::Borrowed(&[Condition::Known(KnownCondition::RelatedEventMatch(
|
||||
RelatedEventMatchCondition {
|
||||
key: Some(Cow::Borrowed("sender")),
|
||||
pattern: None,
|
||||
pattern_type: Some(Cow::Borrowed("user_id")),
|
||||
rel_type: Cow::Borrowed("m.in_reply_to"),
|
||||
include_fallbacks: None,
|
||||
},
|
||||
))]),
|
||||
actions: Cow::Borrowed(&[Action::Notify, HIGHLIGHT_ACTION, SOUND_ACTION]),
|
||||
default: true,
|
||||
default_enabled: true,
|
||||
},
|
||||
PushRule {
|
||||
rule_id: Cow::Borrowed("global/override/.m.rule.contains_display_name"),
|
||||
priority_class: 5,
|
||||
|
||||
@@ -23,6 +23,7 @@ use regex::Regex;
|
||||
use super::{
|
||||
utils::{get_glob_matcher, get_localpart_from_id, GlobMatchType},
|
||||
Action, Condition, EventMatchCondition, FilteredPushRules, KnownCondition,
|
||||
RelatedEventMatchCondition,
|
||||
};
|
||||
|
||||
lazy_static! {
|
||||
@@ -49,6 +50,13 @@ pub struct PushRuleEvaluator {
|
||||
/// The power level of the sender of the event, or None if event is an
|
||||
/// outlier.
|
||||
sender_power_level: Option<i64>,
|
||||
|
||||
/// The related events, indexed by relation type. Flattened in the same manner as
|
||||
/// `flattened_keys`.
|
||||
related_events_flattened: BTreeMap<String, BTreeMap<String, String>>,
|
||||
|
||||
/// If msc3664, push rules for related events, is enabled.
|
||||
related_event_match_enabled: bool,
|
||||
}
|
||||
|
||||
#[pymethods]
|
||||
@@ -60,6 +68,8 @@ impl PushRuleEvaluator {
|
||||
room_member_count: u64,
|
||||
sender_power_level: Option<i64>,
|
||||
notification_power_levels: BTreeMap<String, i64>,
|
||||
related_events_flattened: BTreeMap<String, BTreeMap<String, String>>,
|
||||
related_event_match_enabled: bool,
|
||||
) -> Result<Self, Error> {
|
||||
let body = flattened_keys
|
||||
.get("content.body")
|
||||
@@ -72,6 +82,8 @@ impl PushRuleEvaluator {
|
||||
room_member_count,
|
||||
notification_power_levels,
|
||||
sender_power_level,
|
||||
related_events_flattened,
|
||||
related_event_match_enabled,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -156,6 +168,9 @@ impl PushRuleEvaluator {
|
||||
KnownCondition::EventMatch(event_match) => {
|
||||
self.match_event_match(event_match, user_id)?
|
||||
}
|
||||
KnownCondition::RelatedEventMatch(event_match) => {
|
||||
self.match_related_event_match(event_match, user_id)?
|
||||
}
|
||||
KnownCondition::ContainsDisplayName => {
|
||||
if let Some(dn) = display_name {
|
||||
if !dn.is_empty() {
|
||||
@@ -239,6 +254,79 @@ impl PushRuleEvaluator {
|
||||
compiled_pattern.is_match(haystack)
|
||||
}
|
||||
|
||||
/// Evaluates a `related_event_match` condition. (MSC3664)
|
||||
fn match_related_event_match(
|
||||
&self,
|
||||
event_match: &RelatedEventMatchCondition,
|
||||
user_id: Option<&str>,
|
||||
) -> Result<bool, Error> {
|
||||
// First check if related event matching is enabled...
|
||||
if !self.related_event_match_enabled {
|
||||
return Ok(false);
|
||||
}
|
||||
|
||||
// get the related event, fail if there is none.
|
||||
let event = if let Some(event) = self.related_events_flattened.get(&*event_match.rel_type) {
|
||||
event
|
||||
} else {
|
||||
return Ok(false);
|
||||
};
|
||||
|
||||
// If we are not matching fallbacks, don't match if our special key indicating this is a
|
||||
// fallback relation is not present.
|
||||
if !event_match.include_fallbacks.unwrap_or(false)
|
||||
&& event.contains_key("im.vector.is_falling_back")
|
||||
{
|
||||
return Ok(false);
|
||||
}
|
||||
|
||||
// if we have no key, accept the event as matching, if it existed without matching any
|
||||
// fields.
|
||||
let key = if let Some(key) = &event_match.key {
|
||||
key
|
||||
} else {
|
||||
return Ok(true);
|
||||
};
|
||||
|
||||
let pattern = if let Some(pattern) = &event_match.pattern {
|
||||
pattern
|
||||
} else if let Some(pattern_type) = &event_match.pattern_type {
|
||||
// The `pattern_type` can either be "user_id" or "user_localpart",
|
||||
// either way if we don't have a `user_id` then the condition can't
|
||||
// match.
|
||||
let user_id = if let Some(user_id) = user_id {
|
||||
user_id
|
||||
} else {
|
||||
return Ok(false);
|
||||
};
|
||||
|
||||
match &**pattern_type {
|
||||
"user_id" => user_id,
|
||||
"user_localpart" => get_localpart_from_id(user_id)?,
|
||||
_ => return Ok(false),
|
||||
}
|
||||
} else {
|
||||
return Ok(false);
|
||||
};
|
||||
|
||||
let haystack = if let Some(haystack) = event.get(&**key) {
|
||||
haystack
|
||||
} else {
|
||||
return Ok(false);
|
||||
};
|
||||
|
||||
// For the content.body we match against "words", but for everything
|
||||
// else we match against the entire value.
|
||||
let match_type = if key == "content.body" {
|
||||
GlobMatchType::Word
|
||||
} else {
|
||||
GlobMatchType::Whole
|
||||
};
|
||||
|
||||
let mut compiled_pattern = get_glob_matcher(pattern, match_type)?;
|
||||
compiled_pattern.is_match(haystack)
|
||||
}
|
||||
|
||||
/// Match the member count against an 'is' condition
|
||||
/// The `is` condition can be things like '>2', '==3' or even just '4'.
|
||||
fn match_member_count(&self, is: &str) -> Result<bool, Error> {
|
||||
@@ -267,8 +355,15 @@ impl PushRuleEvaluator {
|
||||
fn push_rule_evaluator() {
|
||||
let mut flattened_keys = BTreeMap::new();
|
||||
flattened_keys.insert("content.body".to_string(), "foo bar bob hello".to_string());
|
||||
let evaluator =
|
||||
PushRuleEvaluator::py_new(flattened_keys, 10, Some(0), BTreeMap::new()).unwrap();
|
||||
let evaluator = PushRuleEvaluator::py_new(
|
||||
flattened_keys,
|
||||
10,
|
||||
Some(0),
|
||||
BTreeMap::new(),
|
||||
BTreeMap::new(),
|
||||
true,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let result = evaluator.run(&FilteredPushRules::default(), None, Some("bob"));
|
||||
assert_eq!(result.len(), 3);
|
||||
|
||||
+53
-8
@@ -267,6 +267,8 @@ pub enum Condition {
|
||||
#[serde(tag = "kind")]
|
||||
pub enum KnownCondition {
|
||||
EventMatch(EventMatchCondition),
|
||||
#[serde(rename = "im.nheko.msc3664.related_event_match")]
|
||||
RelatedEventMatch(RelatedEventMatchCondition),
|
||||
ContainsDisplayName,
|
||||
RoomMemberCount {
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
@@ -299,6 +301,20 @@ pub struct EventMatchCondition {
|
||||
pub pattern_type: Option<Cow<'static, str>>,
|
||||
}
|
||||
|
||||
/// The body of a [`Condition::RelatedEventMatch`]
|
||||
#[derive(Serialize, Deserialize, Debug, Clone)]
|
||||
pub struct RelatedEventMatchCondition {
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub key: Option<Cow<'static, str>>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub pattern: Option<Cow<'static, str>>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub pattern_type: Option<Cow<'static, str>>,
|
||||
pub rel_type: Cow<'static, str>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub include_fallbacks: Option<bool>,
|
||||
}
|
||||
|
||||
/// The collection of push rules for a user.
|
||||
#[derive(Debug, Clone, Default)]
|
||||
#[pyclass(frozen)]
|
||||
@@ -391,15 +407,21 @@ impl PushRules {
|
||||
pub struct FilteredPushRules {
|
||||
push_rules: PushRules,
|
||||
enabled_map: BTreeMap<String, bool>,
|
||||
msc3664_enabled: bool,
|
||||
}
|
||||
|
||||
#[pymethods]
|
||||
impl FilteredPushRules {
|
||||
#[new]
|
||||
pub fn py_new(push_rules: PushRules, enabled_map: BTreeMap<String, bool>) -> Self {
|
||||
pub fn py_new(
|
||||
push_rules: PushRules,
|
||||
enabled_map: BTreeMap<String, bool>,
|
||||
msc3664_enabled: bool,
|
||||
) -> Self {
|
||||
Self {
|
||||
push_rules,
|
||||
enabled_map,
|
||||
msc3664_enabled,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -414,13 +436,25 @@ impl FilteredPushRules {
|
||||
/// Iterates over all the rules and their enabled state, including base
|
||||
/// rules, in the order they should be executed in.
|
||||
fn iter(&self) -> impl Iterator<Item = (&PushRule, bool)> {
|
||||
self.push_rules.iter().map(|r| {
|
||||
let enabled = *self
|
||||
.enabled_map
|
||||
.get(&*r.rule_id)
|
||||
.unwrap_or(&r.default_enabled);
|
||||
(r, enabled)
|
||||
})
|
||||
self.push_rules
|
||||
.iter()
|
||||
.filter(|rule| {
|
||||
// Ignore disabled experimental push rules
|
||||
if !self.msc3664_enabled
|
||||
&& rule.rule_id == "global/override/.im.nheko.msc3664.reply"
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
true
|
||||
})
|
||||
.map(|r| {
|
||||
let enabled = *self
|
||||
.enabled_map
|
||||
.get(&*r.rule_id)
|
||||
.unwrap_or(&r.default_enabled);
|
||||
(r, enabled)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -446,6 +480,17 @@ fn test_deserialize_condition() {
|
||||
let _: Condition = serde_json::from_str(json).unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_deserialize_unstable_msc3664_condition() {
|
||||
let json = r#"{"kind":"im.nheko.msc3664.related_event_match","key":"content.body","pattern":"coffee","rel_type":"m.in_reply_to"}"#;
|
||||
|
||||
let condition: Condition = serde_json::from_str(json).unwrap();
|
||||
assert!(matches!(
|
||||
condition,
|
||||
Condition::Known(KnownCondition::RelatedEventMatch(_))
|
||||
));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_deserialize_custom_condition() {
|
||||
let json = r#"{"kind":"custom_tag"}"#;
|
||||
|
||||
@@ -27,6 +27,7 @@ DISTS = (
|
||||
"debian:sid",
|
||||
"ubuntu:focal", # 20.04 LTS (our EOL forced by Py38 on 2024-10-14)
|
||||
"ubuntu:jammy", # 22.04 LTS (EOL 2027-04)
|
||||
"ubuntu:kinetic", # 22.10 (EOL 2023-07-20)
|
||||
)
|
||||
|
||||
DESC = """\
|
||||
|
||||
@@ -126,7 +126,7 @@ export COMPLEMENT_BASE_IMAGE=complement-synapse
|
||||
|
||||
extra_test_args=()
|
||||
|
||||
test_tags="synapse_blacklist,msc3787"
|
||||
test_tags="synapse_blacklist,msc3787,msc3874"
|
||||
|
||||
# All environment variables starting with PASS_ will be shared.
|
||||
# (The prefix is stripped off before reaching the container.)
|
||||
@@ -139,6 +139,9 @@ if [[ -n "$WORKERS" ]]; then
|
||||
# Use workers.
|
||||
export PASS_SYNAPSE_COMPLEMENT_USE_WORKERS=true
|
||||
|
||||
# Pass through the workers defined. If none, it will be an empty string
|
||||
export PASS_SYNAPSE_WORKER_TYPES="$WORKER_TYPES"
|
||||
|
||||
# Workers can only use Postgres as a database.
|
||||
export PASS_SYNAPSE_COMPLEMENT_DATABASE=postgres
|
||||
|
||||
|
||||
@@ -219,9 +219,7 @@ def _prepare() -> None:
|
||||
update_branch(repo)
|
||||
|
||||
# Create the new release branch
|
||||
# Type ignore will no longer be needed after GitPython 3.1.28.
|
||||
# See https://github.com/gitpython-developers/GitPython/pull/1419
|
||||
repo.create_head(release_branch_name, commit=base_branch) # type: ignore[arg-type]
|
||||
repo.create_head(release_branch_name, commit=base_branch)
|
||||
|
||||
# Special-case SyTest: we don't actually prepare any files so we may
|
||||
# as well push it now (and only when we create a release branch;
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
from typing import Any, Collection, Dict, Mapping, Optional, Sequence, Set, Tuple, Union
|
||||
from typing import Any, Collection, Dict, Mapping, Optional, Sequence, Tuple, Union
|
||||
|
||||
from synapse.types import JsonDict
|
||||
|
||||
@@ -25,7 +25,9 @@ class PushRules:
|
||||
def rules(self) -> Collection[PushRule]: ...
|
||||
|
||||
class FilteredPushRules:
|
||||
def __init__(self, push_rules: PushRules, enabled_map: Dict[str, bool]): ...
|
||||
def __init__(
|
||||
self, push_rules: PushRules, enabled_map: Dict[str, bool], msc3664_enabled: bool
|
||||
): ...
|
||||
def rules(self) -> Collection[Tuple[PushRule, bool]]: ...
|
||||
|
||||
def get_base_rule_ids() -> Collection[str]: ...
|
||||
@@ -37,10 +39,12 @@ class PushRuleEvaluator:
|
||||
room_member_count: int,
|
||||
sender_power_level: Optional[int],
|
||||
notification_power_levels: Mapping[str, int],
|
||||
related_events_flattened: Mapping[str, Mapping[str, str]],
|
||||
related_event_match_enabled: bool,
|
||||
): ...
|
||||
def run(
|
||||
self,
|
||||
push_rules: FilteredPushRules,
|
||||
user_id: Optional[str],
|
||||
display_name: Optional[str],
|
||||
) -> Collection[dict]: ...
|
||||
) -> Collection[Union[Mapping, str]]: ...
|
||||
|
||||
@@ -125,6 +125,8 @@ class EventTypes:
|
||||
MSC2716_BATCH: Final = "org.matrix.msc2716.batch"
|
||||
MSC2716_MARKER: Final = "org.matrix.msc2716.marker"
|
||||
|
||||
Reaction: Final = "m.reaction"
|
||||
|
||||
|
||||
class ToDeviceEventTypes:
|
||||
RoomKeyRequest: Final = "m.room_key_request"
|
||||
|
||||
@@ -713,7 +713,7 @@ class HttpResponseException(CodeMessageException):
|
||||
set to the reason code from the HTTP response.
|
||||
|
||||
Returns:
|
||||
SynapseError:
|
||||
The error converted to a SynapseError.
|
||||
"""
|
||||
# try to parse the body as json, to get better errcode/msg, but
|
||||
# default to M_UNKNOWN with the HTTP status as the error text
|
||||
|
||||
@@ -43,7 +43,7 @@ if TYPE_CHECKING:
|
||||
from synapse.server import HomeServer
|
||||
|
||||
FILTER_SCHEMA = {
|
||||
"additionalProperties": False,
|
||||
"additionalProperties": True, # Allow new fields for forward compatibility
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"limit": {"type": "number"},
|
||||
@@ -63,7 +63,7 @@ FILTER_SCHEMA = {
|
||||
}
|
||||
|
||||
ROOM_FILTER_SCHEMA = {
|
||||
"additionalProperties": False,
|
||||
"additionalProperties": True, # Allow new fields for forward compatibility
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"not_rooms": {"$ref": "#/definitions/room_id_array"},
|
||||
@@ -77,7 +77,7 @@ ROOM_FILTER_SCHEMA = {
|
||||
}
|
||||
|
||||
ROOM_EVENT_FILTER_SCHEMA = {
|
||||
"additionalProperties": False,
|
||||
"additionalProperties": True, # Allow new fields for forward compatibility
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"limit": {"type": "number"},
|
||||
@@ -143,7 +143,7 @@ USER_FILTER_SCHEMA = {
|
||||
},
|
||||
},
|
||||
},
|
||||
"additionalProperties": False,
|
||||
"additionalProperties": True, # Allow new fields for forward compatibility
|
||||
}
|
||||
|
||||
|
||||
|
||||
@@ -343,6 +343,7 @@ class RequestRatelimiter:
|
||||
requester: Requester,
|
||||
update: bool = True,
|
||||
is_admin_redaction: bool = False,
|
||||
n_actions: int = 1,
|
||||
) -> None:
|
||||
"""Ratelimits requests.
|
||||
|
||||
@@ -355,6 +356,8 @@ class RequestRatelimiter:
|
||||
is_admin_redaction: Whether this is a room admin/moderator
|
||||
redacting an event. If so then we may apply different
|
||||
ratelimits depending on config.
|
||||
n_actions: Multiplier for the number of actions to apply to the
|
||||
rate limiter at once.
|
||||
|
||||
Raises:
|
||||
LimitExceededError if the request should be ratelimited
|
||||
@@ -383,7 +386,9 @@ class RequestRatelimiter:
|
||||
if is_admin_redaction and self.admin_redaction_ratelimiter:
|
||||
# If we have separate config for admin redactions, use a separate
|
||||
# ratelimiter as to not have user_ids clash
|
||||
await self.admin_redaction_ratelimiter.ratelimit(requester, update=update)
|
||||
await self.admin_redaction_ratelimiter.ratelimit(
|
||||
requester, update=update, n_actions=n_actions
|
||||
)
|
||||
else:
|
||||
# Override rate and burst count per-user
|
||||
await self.request_ratelimiter.ratelimit(
|
||||
@@ -391,4 +396,5 @@ class RequestRatelimiter:
|
||||
rate_hz=messages_per_second,
|
||||
burst_count=burst_count,
|
||||
update=update,
|
||||
n_actions=n_actions,
|
||||
)
|
||||
|
||||
+53
-2
@@ -47,6 +47,7 @@ from twisted.internet.tcp import Port
|
||||
from twisted.logger import LoggingFile, LogLevel
|
||||
from twisted.protocols.tls import TLSMemoryBIOFactory
|
||||
from twisted.python.threadpool import ThreadPool
|
||||
from twisted.web.resource import Resource
|
||||
|
||||
import synapse.util.caches
|
||||
from synapse.api.constants import MAX_PDU_SIZE
|
||||
@@ -55,12 +56,13 @@ from synapse.app.phone_stats_home import start_phone_stats_home
|
||||
from synapse.config import ConfigError
|
||||
from synapse.config._base import format_config_error
|
||||
from synapse.config.homeserver import HomeServerConfig
|
||||
from synapse.config.server import ManholeConfig
|
||||
from synapse.config.server import ListenerConfig, ManholeConfig
|
||||
from synapse.crypto import context_factory
|
||||
from synapse.events.presence_router import load_legacy_presence_router
|
||||
from synapse.events.spamcheck import load_legacy_spam_checkers
|
||||
from synapse.events.third_party_rules import load_legacy_third_party_event_rules
|
||||
from synapse.handlers.auth import load_legacy_password_auth_providers
|
||||
from synapse.http.site import SynapseSite
|
||||
from synapse.logging.context import PreserveLoggingContext
|
||||
from synapse.logging.tracing import init_tracer
|
||||
from synapse.metrics import install_gc_manager, register_threadpool
|
||||
@@ -357,6 +359,55 @@ def listen_tcp(
|
||||
return r # type: ignore[return-value]
|
||||
|
||||
|
||||
def listen_http(
|
||||
listener_config: ListenerConfig,
|
||||
root_resource: Resource,
|
||||
version_string: str,
|
||||
max_request_body_size: int,
|
||||
context_factory: Optional[IOpenSSLContextFactory],
|
||||
reactor: ISynapseReactor = reactor,
|
||||
) -> List[Port]:
|
||||
port = listener_config.port
|
||||
bind_addresses = listener_config.bind_addresses
|
||||
tls = listener_config.tls
|
||||
|
||||
assert listener_config.http_options is not None
|
||||
|
||||
site_tag = listener_config.http_options.tag
|
||||
if site_tag is None:
|
||||
site_tag = str(port)
|
||||
|
||||
site = SynapseSite(
|
||||
"synapse.access.%s.%s" % ("https" if tls else "http", site_tag),
|
||||
site_tag,
|
||||
listener_config,
|
||||
root_resource,
|
||||
version_string,
|
||||
max_request_body_size=max_request_body_size,
|
||||
reactor=reactor,
|
||||
)
|
||||
if tls:
|
||||
# refresh_certificate should have been called before this.
|
||||
assert context_factory is not None
|
||||
ports = listen_ssl(
|
||||
bind_addresses,
|
||||
port,
|
||||
site,
|
||||
context_factory,
|
||||
reactor=reactor,
|
||||
)
|
||||
logger.info("Synapse now listening on TCP port %d (TLS)", port)
|
||||
else:
|
||||
ports = listen_tcp(
|
||||
bind_addresses,
|
||||
port,
|
||||
site,
|
||||
reactor=reactor,
|
||||
)
|
||||
logger.info("Synapse now listening on TCP port %d", port)
|
||||
return ports
|
||||
|
||||
|
||||
def listen_ssl(
|
||||
bind_addresses: Collection[str],
|
||||
port: int,
|
||||
@@ -558,7 +609,7 @@ def reload_cache_config(config: HomeServerConfig) -> None:
|
||||
logger.warning(f)
|
||||
else:
|
||||
logger.debug(
|
||||
"New cache config. Was:\n %s\nNow:\n",
|
||||
"New cache config. Was:\n %s\nNow:\n %s",
|
||||
previous_cache_config.__dict__,
|
||||
config.caches.__dict__,
|
||||
)
|
||||
|
||||
@@ -28,10 +28,6 @@ from synapse.config.homeserver import HomeServerConfig
|
||||
from synapse.config.logger import setup_logging
|
||||
from synapse.events import EventBase
|
||||
from synapse.handlers.admin import ExfiltrationWriter
|
||||
from synapse.replication.slave.storage.devices import SlavedDeviceStore
|
||||
from synapse.replication.slave.storage.events import SlavedEventStore
|
||||
from synapse.replication.slave.storage.filtering import SlavedFilteringStore
|
||||
from synapse.replication.slave.storage.push_rule import SlavedPushRuleStore
|
||||
from synapse.server import HomeServer
|
||||
from synapse.storage.database import DatabasePool, LoggingDatabaseConnection
|
||||
from synapse.storage.databases.main.account_data import AccountDataWorkerStore
|
||||
@@ -40,10 +36,24 @@ from synapse.storage.databases.main.appservice import (
|
||||
ApplicationServiceWorkerStore,
|
||||
)
|
||||
from synapse.storage.databases.main.deviceinbox import DeviceInboxWorkerStore
|
||||
from synapse.storage.databases.main.devices import DeviceWorkerStore
|
||||
from synapse.storage.databases.main.event_federation import EventFederationWorkerStore
|
||||
from synapse.storage.databases.main.event_push_actions import (
|
||||
EventPushActionsWorkerStore,
|
||||
)
|
||||
from synapse.storage.databases.main.events_worker import EventsWorkerStore
|
||||
from synapse.storage.databases.main.filtering import FilteringWorkerStore
|
||||
from synapse.storage.databases.main.push_rule import PushRulesWorkerStore
|
||||
from synapse.storage.databases.main.receipts import ReceiptsWorkerStore
|
||||
from synapse.storage.databases.main.registration import RegistrationWorkerStore
|
||||
from synapse.storage.databases.main.relations import RelationsWorkerStore
|
||||
from synapse.storage.databases.main.room import RoomWorkerStore
|
||||
from synapse.storage.databases.main.roommember import RoomMemberWorkerStore
|
||||
from synapse.storage.databases.main.signatures import SignatureWorkerStore
|
||||
from synapse.storage.databases.main.state import StateGroupWorkerStore
|
||||
from synapse.storage.databases.main.stream import StreamWorkerStore
|
||||
from synapse.storage.databases.main.tags import TagsWorkerStore
|
||||
from synapse.storage.databases.main.user_erasure_store import UserErasureWorkerStore
|
||||
from synapse.types import StateMap
|
||||
from synapse.util import SYNAPSE_VERSION
|
||||
from synapse.util.logcontext import LoggingContext
|
||||
@@ -52,17 +62,25 @@ logger = logging.getLogger("synapse.app.admin_cmd")
|
||||
|
||||
|
||||
class AdminCmdSlavedStore(
|
||||
SlavedFilteringStore,
|
||||
SlavedPushRuleStore,
|
||||
SlavedEventStore,
|
||||
SlavedDeviceStore,
|
||||
FilteringWorkerStore,
|
||||
DeviceWorkerStore,
|
||||
TagsWorkerStore,
|
||||
DeviceInboxWorkerStore,
|
||||
AccountDataWorkerStore,
|
||||
PushRulesWorkerStore,
|
||||
ApplicationServiceTransactionWorkerStore,
|
||||
ApplicationServiceWorkerStore,
|
||||
RegistrationWorkerStore,
|
||||
RoomMemberWorkerStore,
|
||||
RelationsWorkerStore,
|
||||
EventFederationWorkerStore,
|
||||
EventPushActionsWorkerStore,
|
||||
StateGroupWorkerStore,
|
||||
SignatureWorkerStore,
|
||||
UserErasureWorkerStore,
|
||||
ReceiptsWorkerStore,
|
||||
StreamWorkerStore,
|
||||
EventsWorkerStore,
|
||||
RegistrationWorkerStore,
|
||||
RoomWorkerStore,
|
||||
):
|
||||
def __init__(
|
||||
|
||||
@@ -55,13 +55,13 @@ import os
|
||||
import signal
|
||||
import sys
|
||||
from types import FrameType
|
||||
from typing import Any, Callable, List, Optional
|
||||
from typing import Any, Callable, Dict, List, Optional
|
||||
|
||||
from twisted.internet.main import installReactor
|
||||
|
||||
# a list of the original signal handlers, before we installed our custom ones.
|
||||
# We restore these in our child processes.
|
||||
_original_signal_handlers: dict[int, Any] = {}
|
||||
_original_signal_handlers: Dict[int, Any] = {}
|
||||
|
||||
|
||||
class ProxiedReactor:
|
||||
|
||||
+40
-133
@@ -14,14 +14,12 @@
|
||||
# limitations under the License.
|
||||
import logging
|
||||
import sys
|
||||
from typing import Dict, List, Optional, Tuple
|
||||
from typing import Dict, List
|
||||
|
||||
from twisted.internet import address
|
||||
from twisted.web.resource import Resource
|
||||
|
||||
import synapse
|
||||
import synapse.events
|
||||
from synapse.api.errors import HttpResponseException, RequestSendFailed, SynapseError
|
||||
from synapse.api.urls import (
|
||||
CLIENT_API_PREFIX,
|
||||
FEDERATION_PREFIX,
|
||||
@@ -43,17 +41,9 @@ from synapse.config.logger import setup_logging
|
||||
from synapse.config.server import ListenerConfig
|
||||
from synapse.federation.transport.server import TransportLayerServer
|
||||
from synapse.http.server import JsonResource, OptionsResource
|
||||
from synapse.http.servlet import RestServlet, parse_json_object_from_request
|
||||
from synapse.http.site import SynapseRequest, SynapseSite
|
||||
from synapse.logging.context import LoggingContext
|
||||
from synapse.metrics import METRICS_PREFIX, MetricsResource, RegistryProxy
|
||||
from synapse.replication.http import REPLICATION_PREFIX, ReplicationRestResource
|
||||
from synapse.replication.slave.storage.devices import SlavedDeviceStore
|
||||
from synapse.replication.slave.storage.events import SlavedEventStore
|
||||
from synapse.replication.slave.storage.filtering import SlavedFilteringStore
|
||||
from synapse.replication.slave.storage.keys import SlavedKeyStore
|
||||
from synapse.replication.slave.storage.push_rule import SlavedPushRuleStore
|
||||
from synapse.replication.slave.storage.pushers import SlavedPusherStore
|
||||
from synapse.rest.admin import register_servlets_for_media_repo
|
||||
from synapse.rest.client import (
|
||||
account_data,
|
||||
@@ -76,12 +66,12 @@ from synapse.rest.client import (
|
||||
versions,
|
||||
voip,
|
||||
)
|
||||
from synapse.rest.client._base import client_patterns
|
||||
from synapse.rest.client.account import ThreepidRestServlet, WhoamiRestServlet
|
||||
from synapse.rest.client.devices import DevicesRestServlet
|
||||
from synapse.rest.client.keys import (
|
||||
KeyChangesServlet,
|
||||
KeyQueryServlet,
|
||||
KeyUploadServlet,
|
||||
OneTimeKeyServlet,
|
||||
)
|
||||
from synapse.rest.client.register import (
|
||||
@@ -101,8 +91,16 @@ from synapse.storage.databases.main.appservice import (
|
||||
from synapse.storage.databases.main.censor_events import CensorEventsStore
|
||||
from synapse.storage.databases.main.client_ips import ClientIpWorkerStore
|
||||
from synapse.storage.databases.main.deviceinbox import DeviceInboxWorkerStore
|
||||
from synapse.storage.databases.main.devices import DeviceWorkerStore
|
||||
from synapse.storage.databases.main.directory import DirectoryWorkerStore
|
||||
from synapse.storage.databases.main.e2e_room_keys import EndToEndRoomKeyStore
|
||||
from synapse.storage.databases.main.event_federation import EventFederationWorkerStore
|
||||
from synapse.storage.databases.main.event_push_actions import (
|
||||
EventPushActionsWorkerStore,
|
||||
)
|
||||
from synapse.storage.databases.main.events_worker import EventsWorkerStore
|
||||
from synapse.storage.databases.main.filtering import FilteringWorkerStore
|
||||
from synapse.storage.databases.main.keys import KeyStore
|
||||
from synapse.storage.databases.main.lock import LockStore
|
||||
from synapse.storage.databases.main.media_repository import MediaRepositoryStore
|
||||
from synapse.storage.databases.main.metrics import ServerMetricsStore
|
||||
@@ -111,118 +109,31 @@ from synapse.storage.databases.main.monthly_active_users import (
|
||||
)
|
||||
from synapse.storage.databases.main.presence import PresenceStore
|
||||
from synapse.storage.databases.main.profile import ProfileWorkerStore
|
||||
from synapse.storage.databases.main.push_rule import PushRulesWorkerStore
|
||||
from synapse.storage.databases.main.pusher import PusherWorkerStore
|
||||
from synapse.storage.databases.main.receipts import ReceiptsWorkerStore
|
||||
from synapse.storage.databases.main.registration import RegistrationWorkerStore
|
||||
from synapse.storage.databases.main.relations import RelationsWorkerStore
|
||||
from synapse.storage.databases.main.room import RoomWorkerStore
|
||||
from synapse.storage.databases.main.room_batch import RoomBatchStore
|
||||
from synapse.storage.databases.main.roommember import RoomMemberWorkerStore
|
||||
from synapse.storage.databases.main.search import SearchStore
|
||||
from synapse.storage.databases.main.session import SessionStore
|
||||
from synapse.storage.databases.main.signatures import SignatureWorkerStore
|
||||
from synapse.storage.databases.main.state import StateGroupWorkerStore
|
||||
from synapse.storage.databases.main.stats import StatsStore
|
||||
from synapse.storage.databases.main.stream import StreamWorkerStore
|
||||
from synapse.storage.databases.main.tags import TagsWorkerStore
|
||||
from synapse.storage.databases.main.transactions import TransactionWorkerStore
|
||||
from synapse.storage.databases.main.ui_auth import UIAuthWorkerStore
|
||||
from synapse.storage.databases.main.user_directory import UserDirectoryStore
|
||||
from synapse.types import JsonDict
|
||||
from synapse.storage.databases.main.user_erasure_store import UserErasureWorkerStore
|
||||
from synapse.util import SYNAPSE_VERSION
|
||||
from synapse.util.httpresourcetree import create_resource_tree
|
||||
|
||||
logger = logging.getLogger("synapse.app.generic_worker")
|
||||
|
||||
|
||||
class KeyUploadServlet(RestServlet):
|
||||
"""An implementation of the `KeyUploadServlet` that responds to read only
|
||||
requests, but otherwise proxies through to the master instance.
|
||||
"""
|
||||
|
||||
PATTERNS = client_patterns("/keys/upload(/(?P<device_id>[^/]+))?$")
|
||||
|
||||
def __init__(self, hs: HomeServer):
|
||||
"""
|
||||
Args:
|
||||
hs: server
|
||||
"""
|
||||
super().__init__()
|
||||
self.auth = hs.get_auth()
|
||||
self.store = hs.get_datastores().main
|
||||
self.http_client = hs.get_simple_http_client()
|
||||
self.main_uri = hs.config.worker.worker_main_http_uri
|
||||
|
||||
async def on_POST(
|
||||
self, request: SynapseRequest, device_id: Optional[str]
|
||||
) -> Tuple[int, JsonDict]:
|
||||
requester = await self.auth.get_user_by_req(request, allow_guest=True)
|
||||
user_id = requester.user.to_string()
|
||||
body = parse_json_object_from_request(request)
|
||||
|
||||
if device_id is not None:
|
||||
# passing the device_id here is deprecated; however, we allow it
|
||||
# for now for compatibility with older clients.
|
||||
if requester.device_id is not None and device_id != requester.device_id:
|
||||
logger.warning(
|
||||
"Client uploading keys for a different device "
|
||||
"(logged in as %s, uploading for %s)",
|
||||
requester.device_id,
|
||||
device_id,
|
||||
)
|
||||
else:
|
||||
device_id = requester.device_id
|
||||
|
||||
if device_id is None:
|
||||
raise SynapseError(
|
||||
400, "To upload keys, you must pass device_id when authenticating"
|
||||
)
|
||||
|
||||
if body:
|
||||
# They're actually trying to upload something, proxy to main synapse.
|
||||
|
||||
# Proxy headers from the original request, such as the auth headers
|
||||
# (in case the access token is there) and the original IP /
|
||||
# User-Agent of the request.
|
||||
headers = {
|
||||
header: request.requestHeaders.getRawHeaders(header, [])
|
||||
for header in (b"Authorization", b"User-Agent")
|
||||
}
|
||||
# Add the previous hop to the X-Forwarded-For header.
|
||||
x_forwarded_for = request.requestHeaders.getRawHeaders(
|
||||
b"X-Forwarded-For", []
|
||||
)
|
||||
# we use request.client here, since we want the previous hop, not the
|
||||
# original client (as returned by request.getClientAddress()).
|
||||
if isinstance(request.client, (address.IPv4Address, address.IPv6Address)):
|
||||
previous_host = request.client.host.encode("ascii")
|
||||
# If the header exists, add to the comma-separated list of the first
|
||||
# instance of the header. Otherwise, generate a new header.
|
||||
if x_forwarded_for:
|
||||
x_forwarded_for = [x_forwarded_for[0] + b", " + previous_host]
|
||||
x_forwarded_for.extend(x_forwarded_for[1:])
|
||||
else:
|
||||
x_forwarded_for = [previous_host]
|
||||
headers[b"X-Forwarded-For"] = x_forwarded_for
|
||||
|
||||
# Replicate the original X-Forwarded-Proto header. Note that
|
||||
# XForwardedForRequest overrides isSecure() to give us the original protocol
|
||||
# used by the client, as opposed to the protocol used by our upstream proxy
|
||||
# - which is what we want here.
|
||||
headers[b"X-Forwarded-Proto"] = [
|
||||
b"https" if request.isSecure() else b"http"
|
||||
]
|
||||
|
||||
try:
|
||||
result = await self.http_client.post_json_get_json(
|
||||
self.main_uri + request.uri.decode("ascii"), body, headers=headers
|
||||
)
|
||||
except HttpResponseException as e:
|
||||
raise e.to_synapse_error() from e
|
||||
except RequestSendFailed as e:
|
||||
raise SynapseError(502, "Failed to talk to master") from e
|
||||
|
||||
return 200, result
|
||||
else:
|
||||
# Just interested in counts.
|
||||
result = await self.store.count_e2e_one_time_keys(user_id, device_id)
|
||||
return 200, {"one_time_key_counts": result}
|
||||
|
||||
|
||||
class GenericWorkerSlavedStore(
|
||||
# FIXME(#3714): We need to add UserDirectoryStore as we write directly
|
||||
# rather than going via the correct worker.
|
||||
@@ -232,26 +143,36 @@ class GenericWorkerSlavedStore(
|
||||
EndToEndRoomKeyStore,
|
||||
PresenceStore,
|
||||
DeviceInboxWorkerStore,
|
||||
SlavedDeviceStore,
|
||||
SlavedPushRuleStore,
|
||||
DeviceWorkerStore,
|
||||
TagsWorkerStore,
|
||||
AccountDataWorkerStore,
|
||||
SlavedPusherStore,
|
||||
CensorEventsStore,
|
||||
ClientIpWorkerStore,
|
||||
SlavedEventStore,
|
||||
SlavedKeyStore,
|
||||
# KeyStore isn't really safe to use from a worker, but for now we do so and hope that
|
||||
# the races it creates aren't too bad.
|
||||
KeyStore,
|
||||
RoomWorkerStore,
|
||||
RoomBatchStore,
|
||||
DirectoryWorkerStore,
|
||||
PushRulesWorkerStore,
|
||||
ApplicationServiceTransactionWorkerStore,
|
||||
ApplicationServiceWorkerStore,
|
||||
ProfileWorkerStore,
|
||||
SlavedFilteringStore,
|
||||
FilteringWorkerStore,
|
||||
MonthlyActiveUsersWorkerStore,
|
||||
MediaRepositoryStore,
|
||||
ServerMetricsStore,
|
||||
PusherWorkerStore,
|
||||
RoomMemberWorkerStore,
|
||||
RelationsWorkerStore,
|
||||
EventFederationWorkerStore,
|
||||
EventPushActionsWorkerStore,
|
||||
StateGroupWorkerStore,
|
||||
SignatureWorkerStore,
|
||||
UserErasureWorkerStore,
|
||||
ReceiptsWorkerStore,
|
||||
StreamWorkerStore,
|
||||
EventsWorkerStore,
|
||||
RegistrationWorkerStore,
|
||||
SearchStore,
|
||||
TransactionWorkerStore,
|
||||
@@ -268,15 +189,9 @@ class GenericWorkerServer(HomeServer):
|
||||
DATASTORE_CLASS = GenericWorkerSlavedStore # type: ignore
|
||||
|
||||
def _listen_http(self, listener_config: ListenerConfig) -> None:
|
||||
port = listener_config.port
|
||||
bind_addresses = listener_config.bind_addresses
|
||||
|
||||
assert listener_config.http_options is not None
|
||||
|
||||
site_tag = listener_config.http_options.tag
|
||||
if site_tag is None:
|
||||
site_tag = str(port)
|
||||
|
||||
# We always include a health resource.
|
||||
resources: Dict[str, Resource] = {"/health": HealthResource()}
|
||||
|
||||
@@ -375,23 +290,15 @@ class GenericWorkerServer(HomeServer):
|
||||
|
||||
root_resource = create_resource_tree(resources, OptionsResource())
|
||||
|
||||
_base.listen_tcp(
|
||||
bind_addresses,
|
||||
port,
|
||||
SynapseSite(
|
||||
"synapse.access.http.%s" % (site_tag,),
|
||||
site_tag,
|
||||
listener_config,
|
||||
root_resource,
|
||||
self.version_string,
|
||||
max_request_body_size=max_request_body_size(self.config),
|
||||
reactor=self.get_reactor(),
|
||||
),
|
||||
_base.listen_http(
|
||||
listener_config,
|
||||
root_resource,
|
||||
self.version_string,
|
||||
max_request_body_size(self.config),
|
||||
self.tls_server_context_factory,
|
||||
reactor=self.get_reactor(),
|
||||
)
|
||||
|
||||
logger.info("Synapse worker now listening on port %d", port)
|
||||
|
||||
def start_listening(self) -> None:
|
||||
for listener in self.config.worker.worker_listeners:
|
||||
if listener.type == "http":
|
||||
|
||||
@@ -37,8 +37,7 @@ from synapse.api.urls import (
|
||||
from synapse.app import _base
|
||||
from synapse.app._base import (
|
||||
handle_startup_exception,
|
||||
listen_ssl,
|
||||
listen_tcp,
|
||||
listen_http,
|
||||
max_request_body_size,
|
||||
redirect_stdio_to_logs,
|
||||
register_start,
|
||||
@@ -53,7 +52,6 @@ from synapse.http.server import (
|
||||
RootOptionsRedirectResource,
|
||||
StaticResource,
|
||||
)
|
||||
from synapse.http.site import SynapseSite
|
||||
from synapse.logging.context import LoggingContext
|
||||
from synapse.metrics import METRICS_PREFIX, MetricsResource, RegistryProxy
|
||||
from synapse.replication.http import REPLICATION_PREFIX, ReplicationRestResource
|
||||
@@ -83,8 +81,6 @@ class SynapseHomeServer(HomeServer):
|
||||
self, config: HomeServerConfig, listener_config: ListenerConfig
|
||||
) -> Iterable[Port]:
|
||||
port = listener_config.port
|
||||
bind_addresses = listener_config.bind_addresses
|
||||
tls = listener_config.tls
|
||||
# Must exist since this is an HTTP listener.
|
||||
assert listener_config.http_options is not None
|
||||
site_tag = listener_config.http_options.tag
|
||||
@@ -140,37 +136,15 @@ class SynapseHomeServer(HomeServer):
|
||||
else:
|
||||
root_resource = OptionsResource()
|
||||
|
||||
site = SynapseSite(
|
||||
"synapse.access.%s.%s" % ("https" if tls else "http", site_tag),
|
||||
site_tag,
|
||||
ports = listen_http(
|
||||
listener_config,
|
||||
create_resource_tree(resources, root_resource),
|
||||
self.version_string,
|
||||
max_request_body_size=max_request_body_size(self.config),
|
||||
max_request_body_size(self.config),
|
||||
self.tls_server_context_factory,
|
||||
reactor=self.get_reactor(),
|
||||
)
|
||||
|
||||
if tls:
|
||||
# refresh_certificate should have been called before this.
|
||||
assert self.tls_server_context_factory is not None
|
||||
ports = listen_ssl(
|
||||
bind_addresses,
|
||||
port,
|
||||
site,
|
||||
self.tls_server_context_factory,
|
||||
reactor=self.get_reactor(),
|
||||
)
|
||||
logger.info("Synapse now listening on TCP port %d (TLS)", port)
|
||||
|
||||
else:
|
||||
ports = listen_tcp(
|
||||
bind_addresses,
|
||||
port,
|
||||
site,
|
||||
reactor=self.get_reactor(),
|
||||
)
|
||||
logger.info("Synapse now listening on TCP port %d", port)
|
||||
|
||||
return ports
|
||||
|
||||
def _configure_named_resource(
|
||||
|
||||
@@ -172,12 +172,24 @@ class ApplicationService:
|
||||
Returns:
|
||||
True if this service would like to know about this room.
|
||||
"""
|
||||
member_list = await store.get_users_in_room(
|
||||
# We can use `get_local_users_in_room(...)` here because an application service
|
||||
# can only be interested in local users of the server it's on (ignore any remote
|
||||
# users that might match the user namespace regex).
|
||||
#
|
||||
# In the future, we can consider re-using
|
||||
# `store.get_app_service_users_in_room` which is very similar to this
|
||||
# function but has a slightly worse performance than this because we
|
||||
# have an early escape-hatch if we find a single user that the
|
||||
# appservice is interested in. The juice would be worth the squeeze if
|
||||
# `store.get_app_service_users_in_room` was used in more places besides
|
||||
# an experimental MSC. But for now we can avoid doing more work and
|
||||
# barely using it later.
|
||||
local_user_ids = await store.get_local_users_in_room(
|
||||
room_id, on_invalidate=cache_context.invalidate
|
||||
)
|
||||
|
||||
# check joined member events
|
||||
for user_id in member_list:
|
||||
for user_id in local_user_ids:
|
||||
if self.is_interested_in_user(user_id):
|
||||
return True
|
||||
return False
|
||||
|
||||
@@ -123,7 +123,7 @@ class ApplicationServiceApi(SimpleHttpClient):
|
||||
response = await self.get_json(
|
||||
uri,
|
||||
{"access_token": service.hs_token},
|
||||
headers={"Authorization": f"Bearer {service.hs_token}"},
|
||||
headers={"Authorization": [f"Bearer {service.hs_token}"]},
|
||||
)
|
||||
if response is not None: # just an empty json object
|
||||
return True
|
||||
@@ -147,7 +147,7 @@ class ApplicationServiceApi(SimpleHttpClient):
|
||||
response = await self.get_json(
|
||||
uri,
|
||||
{"access_token": service.hs_token},
|
||||
headers={"Authorization": f"Bearer {service.hs_token}"},
|
||||
headers={"Authorization": [f"Bearer {service.hs_token}"]},
|
||||
)
|
||||
if response is not None: # just an empty json object
|
||||
return True
|
||||
@@ -190,7 +190,9 @@ class ApplicationServiceApi(SimpleHttpClient):
|
||||
b"access_token": service.hs_token,
|
||||
}
|
||||
response = await self.get_json(
|
||||
uri, args=args, headers={"Authorization": f"Bearer {service.hs_token}"}
|
||||
uri,
|
||||
args=args,
|
||||
headers={"Authorization": [f"Bearer {service.hs_token}"]},
|
||||
)
|
||||
if not isinstance(response, list):
|
||||
logger.warning(
|
||||
@@ -230,7 +232,7 @@ class ApplicationServiceApi(SimpleHttpClient):
|
||||
info = await self.get_json(
|
||||
uri,
|
||||
{"access_token": service.hs_token},
|
||||
headers={"Authorization": f"Bearer {service.hs_token}"},
|
||||
headers={"Authorization": [f"Bearer {service.hs_token}"]},
|
||||
)
|
||||
|
||||
if not _is_valid_3pe_metadata(info):
|
||||
@@ -327,7 +329,7 @@ class ApplicationServiceApi(SimpleHttpClient):
|
||||
uri=uri,
|
||||
json_body=body,
|
||||
args={"access_token": service.hs_token},
|
||||
headers={"Authorization": f"Bearer {service.hs_token}"},
|
||||
headers={"Authorization": [f"Bearer {service.hs_token}"]},
|
||||
)
|
||||
if logger.isEnabledFor(logging.DEBUG):
|
||||
logger.debug(
|
||||
|
||||
@@ -98,6 +98,9 @@ class ExperimentalConfig(Config):
|
||||
# MSC3773: Thread notifications
|
||||
self.msc3773_enabled: bool = experimental.get("msc3773_enabled", False)
|
||||
|
||||
# MSC3664: Pushrules to match on related events
|
||||
self.msc3664_enabled: bool = experimental.get("msc3664_enabled", False)
|
||||
|
||||
# MSC3848: Introduce errcodes for specific event sending failures
|
||||
self.msc3848_enabled: bool = experimental.get("msc3848_enabled", False)
|
||||
|
||||
@@ -125,3 +128,6 @@ class ExperimentalConfig(Config):
|
||||
self.msc3886_endpoint: Optional[str] = experimental.get(
|
||||
"msc3886_endpoint", None
|
||||
)
|
||||
|
||||
# MSC3912: Relation-based redactions.
|
||||
self.msc3912_enabled: bool = experimental.get("msc3912_enabled", False)
|
||||
|
||||
@@ -53,7 +53,7 @@ DEFAULT_LOG_CONFIG = Template(
|
||||
# Synapse also supports structured logging for machine readable logs which can
|
||||
# be ingested by ELK stacks. See [2] for details.
|
||||
#
|
||||
# [1]: https://docs.python.org/3.7/library/logging.config.html#configuration-dictionary-schema
|
||||
# [1]: https://docs.python.org/3/library/logging.config.html#configuration-dictionary-schema
|
||||
# [2]: https://matrix-org.github.io/synapse/latest/structured_logging.html
|
||||
|
||||
version: 1
|
||||
@@ -317,10 +317,9 @@ def setup_logging(
|
||||
Set up the logging subsystem.
|
||||
|
||||
Args:
|
||||
config (LoggingConfig | synapse.config.worker.WorkerConfig):
|
||||
configuration data
|
||||
config: configuration data
|
||||
|
||||
use_worker_options (bool): True to use the 'worker_log_config' option
|
||||
use_worker_options: True to use the 'worker_log_config' option
|
||||
instead of 'log_config'.
|
||||
|
||||
logBeginner: The Twisted logBeginner to use.
|
||||
|
||||
@@ -43,7 +43,7 @@ class MetricsConfig(Config):
|
||||
def read_config(self, config: JsonDict, **kwargs: Any) -> None:
|
||||
self.enable_metrics = config.get("enable_metrics", False)
|
||||
|
||||
self.enable_legacy_metrics = config.get("enable_legacy_metrics", True)
|
||||
self.enable_legacy_metrics = config.get("enable_legacy_metrics", False)
|
||||
|
||||
self.report_stats = config.get("report_stats", None)
|
||||
self.report_stats_endpoint = config.get(
|
||||
|
||||
@@ -123,6 +123,8 @@ OIDC_PROVIDER_CONFIG_SCHEMA = {
|
||||
"userinfo_endpoint": {"type": "string"},
|
||||
"jwks_uri": {"type": "string"},
|
||||
"skip_verification": {"type": "boolean"},
|
||||
"backchannel_logout_enabled": {"type": "boolean"},
|
||||
"backchannel_logout_ignore_sub": {"type": "boolean"},
|
||||
"user_profile_method": {
|
||||
"type": "string",
|
||||
"enum": ["auto", "userinfo_endpoint"],
|
||||
@@ -292,6 +294,10 @@ def _parse_oidc_config_dict(
|
||||
token_endpoint=oidc_config.get("token_endpoint"),
|
||||
userinfo_endpoint=oidc_config.get("userinfo_endpoint"),
|
||||
jwks_uri=oidc_config.get("jwks_uri"),
|
||||
backchannel_logout_enabled=oidc_config.get("backchannel_logout_enabled", False),
|
||||
backchannel_logout_ignore_sub=oidc_config.get(
|
||||
"backchannel_logout_ignore_sub", False
|
||||
),
|
||||
skip_verification=oidc_config.get("skip_verification", False),
|
||||
user_profile_method=oidc_config.get("user_profile_method", "auto"),
|
||||
allow_existing_users=oidc_config.get("allow_existing_users", False),
|
||||
@@ -368,6 +374,12 @@ class OidcProviderConfig:
|
||||
# "openid" scope is used.
|
||||
jwks_uri: Optional[str]
|
||||
|
||||
# Whether Synapse should react to backchannel logouts
|
||||
backchannel_logout_enabled: bool
|
||||
|
||||
# Whether Synapse should ignore the `sub` claim in backchannel logouts or not.
|
||||
backchannel_logout_ignore_sub: bool
|
||||
|
||||
# Whether to skip metadata verification
|
||||
skip_verification: bool
|
||||
|
||||
|
||||
@@ -150,8 +150,5 @@ class RatelimitConfig(Config):
|
||||
|
||||
self.rc_third_party_invite = RatelimitSettings(
|
||||
config.get("rc_third_party_invite", {}),
|
||||
defaults={
|
||||
"per_second": self.rc_message.per_second,
|
||||
"burst_count": self.rc_message.burst_count,
|
||||
},
|
||||
defaults={"per_second": 0.0025, "burst_count": 5},
|
||||
)
|
||||
|
||||
@@ -67,6 +67,7 @@ class InstanceLocationConfig:
|
||||
|
||||
host: str
|
||||
port: int
|
||||
tls: bool = False
|
||||
|
||||
|
||||
@attr.s
|
||||
@@ -149,13 +150,25 @@ class WorkerConfig(Config):
|
||||
# The port on the main synapse for HTTP replication endpoint
|
||||
self.worker_replication_http_port = config.get("worker_replication_http_port")
|
||||
|
||||
# The tls mode on the main synapse for HTTP replication endpoint.
|
||||
# For backward compatibility this defaults to False.
|
||||
self.worker_replication_http_tls = config.get(
|
||||
"worker_replication_http_tls", False
|
||||
)
|
||||
|
||||
# The shared secret used for authentication when connecting to the main synapse.
|
||||
self.worker_replication_secret = config.get("worker_replication_secret", None)
|
||||
|
||||
self.worker_name = config.get("worker_name", self.worker_app)
|
||||
self.instance_name = self.worker_name or "master"
|
||||
|
||||
# FIXME: Remove this check after a suitable amount of time.
|
||||
self.worker_main_http_uri = config.get("worker_main_http_uri", None)
|
||||
if self.worker_main_http_uri is not None:
|
||||
logger.warning(
|
||||
"The config option worker_main_http_uri is unused since Synapse 1.73. "
|
||||
"It can be safely removed from your configuration."
|
||||
)
|
||||
|
||||
# This option is really only here to support `--manhole` command line
|
||||
# argument.
|
||||
|
||||
@@ -213,7 +213,7 @@ class Keyring:
|
||||
|
||||
def verify_json_objects_for_server(
|
||||
self, server_and_json: Iterable[Tuple[str, dict, int]]
|
||||
) -> List[defer.Deferred]:
|
||||
) -> List["defer.Deferred[None]"]:
|
||||
"""Bulk verifies signatures of json objects, bulk fetching keys as
|
||||
necessary.
|
||||
|
||||
@@ -226,10 +226,9 @@ class Keyring:
|
||||
valid.
|
||||
|
||||
Returns:
|
||||
List<Deferred[None]>: for each input triplet, a deferred indicating success
|
||||
or failure to verify each json object's signature for the given
|
||||
server_name. The deferreds run their callbacks in the sentinel
|
||||
logcontext.
|
||||
For each input triplet, a deferred indicating success or failure to
|
||||
verify each json object's signature for the given server_name. The
|
||||
deferreds run their callbacks in the sentinel logcontext.
|
||||
"""
|
||||
return [
|
||||
run_in_background(
|
||||
@@ -858,7 +857,7 @@ class ServerKeyFetcher(BaseV2KeyFetcher):
|
||||
response = await self.client.get_json(
|
||||
destination=server_name,
|
||||
path="/_matrix/key/v2/server/"
|
||||
+ urllib.parse.quote(requested_key_id),
|
||||
+ urllib.parse.quote(requested_key_id, safe=""),
|
||||
ignore_backoff=True,
|
||||
# we only give the remote server 10s to respond. It should be an
|
||||
# easy request to handle, so if it doesn't reply within 10s, it's
|
||||
|
||||
@@ -597,8 +597,7 @@ def _event_type_from_format_version(
|
||||
format_version: The event format version
|
||||
|
||||
Returns:
|
||||
type: A type that can be initialized as per the initializer of
|
||||
`FrozenEvent`
|
||||
A type that can be initialized as per the initializer of `FrozenEvent`
|
||||
"""
|
||||
|
||||
if format_version == EventFormatVersions.ROOM_V1_V2:
|
||||
|
||||
@@ -128,6 +128,7 @@ class EventBuilder:
|
||||
state_filter=StateFilter.from_types(
|
||||
auth_types_for_event(self.room_version, self)
|
||||
),
|
||||
await_full_state=False,
|
||||
)
|
||||
auth_event_ids = self._event_auth_handler.compute_auth_events(
|
||||
self, state_ids
|
||||
|
||||
@@ -80,6 +80,18 @@ PDU_RETRY_TIME_MS = 1 * 60 * 1000
|
||||
T = TypeVar("T")
|
||||
|
||||
|
||||
@attr.s(frozen=True, slots=True, auto_attribs=True)
|
||||
class PulledPduInfo:
|
||||
"""
|
||||
A result object that stores the PDU and info about it like which homeserver we
|
||||
pulled it from (`pull_origin`)
|
||||
"""
|
||||
|
||||
pdu: EventBase
|
||||
# Which homeserver we pulled the PDU from
|
||||
pull_origin: str
|
||||
|
||||
|
||||
class InvalidResponseError(RuntimeError):
|
||||
"""Helper for _try_destination_list: indicates that the server returned a response
|
||||
we couldn't parse
|
||||
@@ -114,7 +126,9 @@ class FederationClient(FederationBase):
|
||||
self.hostname = hs.hostname
|
||||
self.signing_key = hs.signing_key
|
||||
|
||||
self._get_pdu_cache: ExpiringCache[str, EventBase] = ExpiringCache(
|
||||
# Cache mapping `event_id` to a tuple of the event itself and the `pull_origin`
|
||||
# (which server we pulled the event from)
|
||||
self._get_pdu_cache: ExpiringCache[str, Tuple[EventBase, str]] = ExpiringCache(
|
||||
cache_name="get_pdu_cache",
|
||||
clock=self._clock,
|
||||
max_len=1000,
|
||||
@@ -352,11 +366,11 @@ class FederationClient(FederationBase):
|
||||
@tag_args
|
||||
async def get_pdu(
|
||||
self,
|
||||
destinations: Iterable[str],
|
||||
destinations: Collection[str],
|
||||
event_id: str,
|
||||
room_version: RoomVersion,
|
||||
timeout: Optional[int] = None,
|
||||
) -> Optional[EventBase]:
|
||||
) -> Optional[PulledPduInfo]:
|
||||
"""Requests the PDU with given origin and ID from the remote home
|
||||
servers.
|
||||
|
||||
@@ -371,11 +385,11 @@ class FederationClient(FederationBase):
|
||||
moving to the next destination. None indicates no timeout.
|
||||
|
||||
Returns:
|
||||
The requested PDU, or None if we were unable to find it.
|
||||
The requested PDU wrapped in `PulledPduInfo`, or None if we were unable to find it.
|
||||
"""
|
||||
|
||||
logger.debug(
|
||||
"get_pdu: event_id=%s from destinations=%s", event_id, destinations
|
||||
"get_pdu(event_id=%s): from destinations=%s", event_id, destinations
|
||||
)
|
||||
|
||||
# TODO: Rate limit the number of times we try and get the same event.
|
||||
@@ -384,19 +398,25 @@ class FederationClient(FederationBase):
|
||||
# it gets persisted to the database), so we cache the results of the lookup.
|
||||
# Note that this is separate to the regular get_event cache which caches
|
||||
# events once they have been persisted.
|
||||
event = self._get_pdu_cache.get(event_id)
|
||||
get_pdu_cache_entry = self._get_pdu_cache.get(event_id)
|
||||
|
||||
event = None
|
||||
pull_origin = None
|
||||
if get_pdu_cache_entry:
|
||||
event, pull_origin = get_pdu_cache_entry
|
||||
# If we don't see the event in the cache, go try to fetch it from the
|
||||
# provided remote federated destinations
|
||||
if not event:
|
||||
else:
|
||||
pdu_attempts = self.pdu_destination_tried.setdefault(event_id, {})
|
||||
|
||||
# TODO: We can probably refactor this to use `_try_destination_list`
|
||||
for destination in destinations:
|
||||
now = self._clock.time_msec()
|
||||
last_attempt = pdu_attempts.get(destination, 0)
|
||||
if last_attempt + PDU_RETRY_TIME_MS > now:
|
||||
logger.debug(
|
||||
"get_pdu: skipping destination=%s because we tried it recently last_attempt=%s and we only check every %s (now=%s)",
|
||||
"get_pdu(event_id=%s): skipping destination=%s because we tried it recently last_attempt=%s and we only check every %s (now=%s)",
|
||||
event_id,
|
||||
destination,
|
||||
last_attempt,
|
||||
PDU_RETRY_TIME_MS,
|
||||
@@ -411,43 +431,48 @@ class FederationClient(FederationBase):
|
||||
room_version=room_version,
|
||||
timeout=timeout,
|
||||
)
|
||||
pull_origin = destination
|
||||
|
||||
pdu_attempts[destination] = now
|
||||
|
||||
if event:
|
||||
# Prime the cache
|
||||
self._get_pdu_cache[event.event_id] = event
|
||||
self._get_pdu_cache[event.event_id] = (event, pull_origin)
|
||||
|
||||
# Now that we have an event, we can break out of this
|
||||
# loop and stop asking other destinations.
|
||||
break
|
||||
|
||||
except NotRetryingDestination as e:
|
||||
logger.info("get_pdu(event_id=%s): %s", event_id, e)
|
||||
continue
|
||||
except FederationDeniedError:
|
||||
logger.info(
|
||||
"get_pdu(event_id=%s): Not attempting to fetch PDU from %s because the homeserver is not on our federation whitelist",
|
||||
event_id,
|
||||
destination,
|
||||
)
|
||||
continue
|
||||
except SynapseError as e:
|
||||
logger.info(
|
||||
"Failed to get PDU %s from %s because %s",
|
||||
"get_pdu(event_id=%s): Failed to get PDU from %s because %s",
|
||||
event_id,
|
||||
destination,
|
||||
e,
|
||||
)
|
||||
continue
|
||||
except NotRetryingDestination as e:
|
||||
logger.info(str(e))
|
||||
continue
|
||||
except FederationDeniedError as e:
|
||||
logger.info(str(e))
|
||||
continue
|
||||
except Exception as e:
|
||||
pdu_attempts[destination] = now
|
||||
|
||||
logger.info(
|
||||
"Failed to get PDU %s from %s because %s",
|
||||
"get_pdu(event_id=%s): Failed to get PDU from %s because %s",
|
||||
event_id,
|
||||
destination,
|
||||
e,
|
||||
)
|
||||
continue
|
||||
|
||||
if not event:
|
||||
if not event or not pull_origin:
|
||||
return None
|
||||
|
||||
# `event` now refers to an object stored in `get_pdu_cache`. Our
|
||||
@@ -459,7 +484,7 @@ class FederationClient(FederationBase):
|
||||
event.room_version,
|
||||
)
|
||||
|
||||
return event_copy
|
||||
return PulledPduInfo(event_copy, pull_origin)
|
||||
|
||||
@trace
|
||||
@tag_args
|
||||
@@ -699,12 +724,14 @@ class FederationClient(FederationBase):
|
||||
pdu_origin = get_domain_from_id(pdu.sender)
|
||||
if not res and pdu_origin != origin:
|
||||
try:
|
||||
res = await self.get_pdu(
|
||||
pulled_pdu_info = await self.get_pdu(
|
||||
destinations=[pdu_origin],
|
||||
event_id=pdu.event_id,
|
||||
room_version=room_version,
|
||||
timeout=10000,
|
||||
)
|
||||
if pulled_pdu_info is not None:
|
||||
res = pulled_pdu_info.pdu
|
||||
except SynapseError:
|
||||
pass
|
||||
|
||||
@@ -806,6 +833,7 @@ class FederationClient(FederationBase):
|
||||
)
|
||||
|
||||
for destination in destinations:
|
||||
# We don't want to ask our own server for information we don't have
|
||||
if destination == self.server_name:
|
||||
continue
|
||||
|
||||
@@ -814,9 +842,21 @@ class FederationClient(FederationBase):
|
||||
except (
|
||||
RequestSendFailed,
|
||||
InvalidResponseError,
|
||||
NotRetryingDestination,
|
||||
) as e:
|
||||
logger.warning("Failed to %s via %s: %s", description, destination, e)
|
||||
# Skip to the next homeserver in the list to try.
|
||||
continue
|
||||
except NotRetryingDestination as e:
|
||||
logger.info("%s: %s", description, e)
|
||||
continue
|
||||
except FederationDeniedError:
|
||||
logger.info(
|
||||
"%s: Not attempting to %s from %s because the homeserver is not on our federation whitelist",
|
||||
description,
|
||||
description,
|
||||
destination,
|
||||
)
|
||||
continue
|
||||
except UnsupportedRoomVersionError:
|
||||
raise
|
||||
except HttpResponseException as e:
|
||||
@@ -1609,6 +1649,54 @@ class FederationClient(FederationBase):
|
||||
return result
|
||||
|
||||
async def timestamp_to_event(
|
||||
self, *, destinations: List[str], room_id: str, timestamp: int, direction: str
|
||||
) -> Optional["TimestampToEventResponse"]:
|
||||
"""
|
||||
Calls each remote federating server from `destinations` asking for their closest
|
||||
event to the given timestamp in the given direction until we get a response.
|
||||
Also validates the response to always return the expected keys or raises an
|
||||
error.
|
||||
|
||||
Args:
|
||||
destinations: The domains of homeservers to try fetching from
|
||||
room_id: Room to fetch the event from
|
||||
timestamp: The point in time (inclusive) we should navigate from in
|
||||
the given direction to find the closest event.
|
||||
direction: ["f"|"b"] to indicate whether we should navigate forward
|
||||
or backward from the given timestamp to find the closest event.
|
||||
|
||||
Returns:
|
||||
A parsed TimestampToEventResponse including the closest event_id
|
||||
and origin_server_ts or None if no destination has a response.
|
||||
"""
|
||||
|
||||
async def _timestamp_to_event_from_destination(
|
||||
destination: str,
|
||||
) -> TimestampToEventResponse:
|
||||
return await self._timestamp_to_event_from_destination(
|
||||
destination, room_id, timestamp, direction
|
||||
)
|
||||
|
||||
try:
|
||||
# Loop through each homeserver candidate until we get a succesful response
|
||||
timestamp_to_event_response = await self._try_destination_list(
|
||||
"timestamp_to_event",
|
||||
destinations,
|
||||
# TODO: The requested timestamp may lie in a part of the
|
||||
# event graph that the remote server *also* didn't have,
|
||||
# in which case they will have returned another event
|
||||
# which may be nowhere near the requested timestamp. In
|
||||
# the future, we may need to reconcile that gap and ask
|
||||
# other homeservers, and/or extend `/timestamp_to_event`
|
||||
# to return events on *both* sides of the timestamp to
|
||||
# help reconcile the gap faster.
|
||||
_timestamp_to_event_from_destination,
|
||||
)
|
||||
return timestamp_to_event_response
|
||||
except SynapseError:
|
||||
return None
|
||||
|
||||
async def _timestamp_to_event_from_destination(
|
||||
self, destination: str, room_id: str, timestamp: int, direction: str
|
||||
) -> "TimestampToEventResponse":
|
||||
"""
|
||||
|
||||
@@ -69,6 +69,8 @@ from synapse.replication.http.federation import (
|
||||
)
|
||||
from synapse.storage.databases.main.events import PartialStateConflictError
|
||||
from synapse.storage.databases.main.lock import Lock
|
||||
from synapse.storage.databases.main.roommember import extract_heroes_from_room_summary
|
||||
from synapse.storage.roommember import MemberSummary
|
||||
from synapse.types import JsonDict, StateMap, get_domain_from_id
|
||||
from synapse.util import json_decoder, unwrapFirstError
|
||||
from synapse.util.async_helpers import Linearizer, concurrently_execute, gather_results
|
||||
@@ -686,8 +688,9 @@ class FederationServer(FederationBase):
|
||||
state_event_ids: Collection[str]
|
||||
servers_in_room: Optional[Collection[str]]
|
||||
if caller_supports_partial_state:
|
||||
summary = await self.store.get_room_summary(room_id)
|
||||
state_event_ids = _get_event_ids_for_partial_state_join(
|
||||
event, prev_state_ids
|
||||
event, prev_state_ids, summary
|
||||
)
|
||||
servers_in_room = await self.state.get_hosts_in_room_at_events(
|
||||
room_id, event_ids=event.prev_event_ids()
|
||||
@@ -1490,6 +1493,7 @@ class FederationHandlerRegistry:
|
||||
def _get_event_ids_for_partial_state_join(
|
||||
join_event: EventBase,
|
||||
prev_state_ids: StateMap[str],
|
||||
summary: Dict[str, MemberSummary],
|
||||
) -> Collection[str]:
|
||||
"""Calculate state to be retuned in a partial_state send_join
|
||||
|
||||
@@ -1516,8 +1520,19 @@ def _get_event_ids_for_partial_state_join(
|
||||
if current_membership_event_id is not None:
|
||||
state_event_ids.add(current_membership_event_id)
|
||||
|
||||
# TODO: return a few more members:
|
||||
# - those with invites
|
||||
# - those that are kicked? / banned
|
||||
name_id = prev_state_ids.get((EventTypes.Name, ""))
|
||||
canonical_alias_id = prev_state_ids.get((EventTypes.CanonicalAlias, ""))
|
||||
if not name_id and not canonical_alias_id:
|
||||
# Also include the hero members of the room (for DM rooms without a title).
|
||||
# To do this properly, we should select the correct subset of membership events
|
||||
# from `prev_state_ids`. Instead, we are lazier and use the (cached)
|
||||
# `get_room_summary` function, which is based on the current state of the room.
|
||||
# This introduces races; we choose to ignore them because a) they should be rare
|
||||
# and b) even if it's wrong, joining servers will get the full state eventually.
|
||||
heroes = extract_heroes_from_room_summary(summary, join_event.state_key)
|
||||
for hero in heroes:
|
||||
membership_event_id = prev_state_ids.get((EventTypes.Member, hero))
|
||||
if membership_event_id:
|
||||
state_event_ids.add(membership_event_id)
|
||||
|
||||
return state_event_ids
|
||||
|
||||
@@ -536,8 +536,7 @@ class FederationSender(AbstractFederationSender):
|
||||
|
||||
if event_entries:
|
||||
now = self.clock.time_msec()
|
||||
last_id = next(reversed(event_ids))
|
||||
ts = event_to_received_ts[last_id]
|
||||
ts = max(t for t in event_to_received_ts.values() if t)
|
||||
assert ts is not None
|
||||
|
||||
synapse.metrics.event_processing_lag.labels(
|
||||
|
||||
@@ -280,12 +280,11 @@ class TransportLayerClient:
|
||||
Note that this does not append any events to any graphs.
|
||||
|
||||
Args:
|
||||
destination (str): address of remote homeserver
|
||||
room_id (str): room to join/leave
|
||||
user_id (str): user to be joined/left
|
||||
membership (str): one of join/leave
|
||||
params (dict[str, str|Iterable[str]]): Query parameters to include in the
|
||||
request.
|
||||
destination: address of remote homeserver
|
||||
room_id: room to join/leave
|
||||
user_id: user to be joined/left
|
||||
membership: one of join/leave
|
||||
params: Query parameters to include in the request.
|
||||
|
||||
Returns:
|
||||
Succeeds when we get a 2xx HTTP response. The result
|
||||
|
||||
@@ -226,10 +226,10 @@ class BaseFederationServlet:
|
||||
|
||||
With arguments:
|
||||
|
||||
origin (unicode|None): The authenticated server_name of the calling server,
|
||||
origin (str|None): The authenticated server_name of the calling server,
|
||||
unless REQUIRE_AUTH is set to False and authentication failed.
|
||||
|
||||
content (unicode|None): decoded json body of the request. None if the
|
||||
content (str|None): decoded json body of the request. None if the
|
||||
request was a GET.
|
||||
|
||||
query (dict[bytes, list[bytes]]): Query params from the request. url-decoded
|
||||
|
||||
@@ -100,6 +100,7 @@ class AdminHandler:
|
||||
user_info_dict["avatar_url"] = profile.avatar_url
|
||||
user_info_dict["threepids"] = threepids
|
||||
user_info_dict["external_ids"] = external_ids
|
||||
user_info_dict["erased"] = await self.store.is_user_erased(user.to_string())
|
||||
|
||||
return user_info_dict
|
||||
|
||||
|
||||
+54
-10
@@ -38,6 +38,7 @@ from typing import (
|
||||
import attr
|
||||
import bcrypt
|
||||
import unpaddedbase64
|
||||
from prometheus_client import Counter
|
||||
|
||||
from twisted.internet.defer import CancelledError
|
||||
from twisted.web.server import Request
|
||||
@@ -48,6 +49,7 @@ from synapse.api.errors import (
|
||||
Codes,
|
||||
InteractiveAuthIncompleteError,
|
||||
LoginError,
|
||||
NotFoundError,
|
||||
StoreError,
|
||||
SynapseError,
|
||||
UserDeactivatedError,
|
||||
@@ -63,10 +65,14 @@ from synapse.http.server import finish_request, respond_with_html
|
||||
from synapse.http.site import SynapseRequest
|
||||
from synapse.logging.context import defer_to_thread
|
||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||
from synapse.storage.databases.main.registration import (
|
||||
LoginTokenExpired,
|
||||
LoginTokenLookupResult,
|
||||
LoginTokenReused,
|
||||
)
|
||||
from synapse.types import JsonDict, Requester, UserID
|
||||
from synapse.util import stringutils as stringutils
|
||||
from synapse.util.async_helpers import delay_cancellation, maybe_awaitable
|
||||
from synapse.util.macaroons import LoginTokenAttributes
|
||||
from synapse.util.msisdn import phone_number_to_msisdn
|
||||
from synapse.util.stringutils import base62_encode
|
||||
from synapse.util.threepids import canonicalise_email
|
||||
@@ -80,6 +86,12 @@ logger = logging.getLogger(__name__)
|
||||
|
||||
INVALID_USERNAME_OR_PASSWORD = "Invalid username or password"
|
||||
|
||||
invalid_login_token_counter = Counter(
|
||||
"synapse_user_login_invalid_login_tokens",
|
||||
"Counts the number of rejected m.login.token on /login",
|
||||
["reason"],
|
||||
)
|
||||
|
||||
|
||||
def convert_client_dict_legacy_fields_to_identifier(
|
||||
submission: JsonDict,
|
||||
@@ -883,6 +895,25 @@ class AuthHandler:
|
||||
|
||||
return True
|
||||
|
||||
async def create_login_token_for_user_id(
|
||||
self,
|
||||
user_id: str,
|
||||
duration_ms: int = (2 * 60 * 1000),
|
||||
auth_provider_id: Optional[str] = None,
|
||||
auth_provider_session_id: Optional[str] = None,
|
||||
) -> str:
|
||||
login_token = self.generate_login_token()
|
||||
now = self._clock.time_msec()
|
||||
expiry_ts = now + duration_ms
|
||||
await self.store.add_login_token_to_user(
|
||||
user_id=user_id,
|
||||
token=login_token,
|
||||
expiry_ts=expiry_ts,
|
||||
auth_provider_id=auth_provider_id,
|
||||
auth_provider_session_id=auth_provider_session_id,
|
||||
)
|
||||
return login_token
|
||||
|
||||
async def create_refresh_token_for_user_id(
|
||||
self,
|
||||
user_id: str,
|
||||
@@ -1401,6 +1432,18 @@ class AuthHandler:
|
||||
return None
|
||||
return user_id
|
||||
|
||||
def generate_login_token(self) -> str:
|
||||
"""Generates an opaque string, for use as an short-term login token"""
|
||||
|
||||
# we use the following format for access tokens:
|
||||
# syl_<random string>_<base62 crc check>
|
||||
|
||||
random_string = stringutils.random_string(20)
|
||||
base = f"syl_{random_string}"
|
||||
|
||||
crc = base62_encode(crc32(base.encode("ascii")), minwidth=6)
|
||||
return f"{base}_{crc}"
|
||||
|
||||
def generate_access_token(self, for_user: UserID) -> str:
|
||||
"""Generates an opaque string, for use as an access token"""
|
||||
|
||||
@@ -1427,16 +1470,17 @@ class AuthHandler:
|
||||
crc = base62_encode(crc32(base.encode("ascii")), minwidth=6)
|
||||
return f"{base}_{crc}"
|
||||
|
||||
async def validate_short_term_login_token(
|
||||
self, login_token: str
|
||||
) -> LoginTokenAttributes:
|
||||
async def consume_login_token(self, login_token: str) -> LoginTokenLookupResult:
|
||||
try:
|
||||
res = self.macaroon_gen.verify_short_term_login_token(login_token)
|
||||
except Exception:
|
||||
raise AuthError(403, "Invalid login token", errcode=Codes.FORBIDDEN)
|
||||
return await self.store.consume_login_token(login_token)
|
||||
except LoginTokenExpired:
|
||||
invalid_login_token_counter.labels("expired").inc()
|
||||
except LoginTokenReused:
|
||||
invalid_login_token_counter.labels("reused").inc()
|
||||
except NotFoundError:
|
||||
invalid_login_token_counter.labels("not found").inc()
|
||||
|
||||
await self.auth_blocking.check_auth_blocking(res.user_id)
|
||||
return res
|
||||
raise AuthError(403, "Invalid login token", errcode=Codes.FORBIDDEN)
|
||||
|
||||
async def delete_access_token(self, access_token: str) -> None:
|
||||
"""Invalidate a single access token
|
||||
@@ -1711,7 +1755,7 @@ class AuthHandler:
|
||||
)
|
||||
|
||||
# Create a login token
|
||||
login_token = self.macaroon_gen.generate_short_term_login_token(
|
||||
login_token = await self.create_login_token_for_user_id(
|
||||
registered_user_id,
|
||||
auth_provider_id=auth_provider_id,
|
||||
auth_provider_session_id=auth_provider_session_id,
|
||||
|
||||
@@ -85,7 +85,7 @@ class DirectoryHandler:
|
||||
# TODO(erikj): Add transactions.
|
||||
# TODO(erikj): Check if there is a current association.
|
||||
if not servers:
|
||||
servers = await self._storage_controllers.state.get_current_hosts_in_room(
|
||||
servers = await self._storage_controllers.state.get_current_hosts_in_room_or_partial_state_approximation(
|
||||
room_id
|
||||
)
|
||||
|
||||
@@ -290,7 +290,7 @@ class DirectoryHandler:
|
||||
Codes.NOT_FOUND,
|
||||
)
|
||||
|
||||
extra_servers = await self._storage_controllers.state.get_current_hosts_in_room(
|
||||
extra_servers = await self._storage_controllers.state.get_current_hosts_in_room_or_partial_state_approximation(
|
||||
room_id
|
||||
)
|
||||
servers_set = set(extra_servers) | set(servers)
|
||||
|
||||
@@ -49,6 +49,7 @@ logger = logging.getLogger(__name__)
|
||||
|
||||
class E2eKeysHandler:
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
self.config = hs.config
|
||||
self.store = hs.get_datastores().main
|
||||
self.federation = hs.get_federation_client()
|
||||
self.device_handler = hs.get_device_handler()
|
||||
@@ -431,13 +432,17 @@ class E2eKeysHandler:
|
||||
@trace
|
||||
@cancellable
|
||||
async def query_local_devices(
|
||||
self, query: Mapping[str, Optional[List[str]]]
|
||||
self,
|
||||
query: Mapping[str, Optional[List[str]]],
|
||||
include_displaynames: bool = True,
|
||||
) -> Dict[str, Dict[str, dict]]:
|
||||
"""Get E2E device keys for local users
|
||||
|
||||
Args:
|
||||
query: map from user_id to a list
|
||||
of devices to query (None for all devices)
|
||||
include_displaynames: Whether to include device displaynames in the returned
|
||||
device details.
|
||||
|
||||
Returns:
|
||||
A map from user_id -> device_id -> device details
|
||||
@@ -469,7 +474,9 @@ class E2eKeysHandler:
|
||||
# make sure that each queried user appears in the result dict
|
||||
result_dict[user_id] = {}
|
||||
|
||||
results = await self.store.get_e2e_device_keys_for_cs_api(local_query)
|
||||
results = await self.store.get_e2e_device_keys_for_cs_api(
|
||||
local_query, include_displaynames
|
||||
)
|
||||
|
||||
# Build the result structure
|
||||
for user_id, device_keys in results.items():
|
||||
@@ -482,11 +489,33 @@ class E2eKeysHandler:
|
||||
async def on_federation_query_client_keys(
|
||||
self, query_body: Dict[str, Dict[str, Optional[List[str]]]]
|
||||
) -> JsonDict:
|
||||
"""Handle a device key query from a federated server"""
|
||||
"""Handle a device key query from a federated server:
|
||||
|
||||
Handles the path: GET /_matrix/federation/v1/users/keys/query
|
||||
|
||||
Args:
|
||||
query_body: The body of the query request. Should contain a key
|
||||
"device_keys" that map to a dictionary of user ID's -> list of
|
||||
device IDs. If the list of device IDs is empty, all devices of
|
||||
that user will be queried.
|
||||
|
||||
Returns:
|
||||
A json dictionary containing the following:
|
||||
- device_keys: A dictionary containing the requested device information.
|
||||
- master_keys: An optional dictionary of user ID -> master cross-signing
|
||||
key info.
|
||||
- self_signing_key: An optional dictionary of user ID -> self-signing
|
||||
key info.
|
||||
"""
|
||||
device_keys_query: Dict[str, Optional[List[str]]] = query_body.get(
|
||||
"device_keys", {}
|
||||
)
|
||||
res = await self.query_local_devices(device_keys_query)
|
||||
res = await self.query_local_devices(
|
||||
device_keys_query,
|
||||
include_displaynames=(
|
||||
self.config.federation.allow_device_name_lookup_over_federation
|
||||
),
|
||||
)
|
||||
ret = {"device_keys": res}
|
||||
|
||||
# add in the cross-signing keys
|
||||
@@ -841,7 +870,7 @@ class E2eKeysHandler:
|
||||
- signatures of the user's master key by the user's devices.
|
||||
|
||||
Args:
|
||||
user_id (string): the user uploading the keys
|
||||
user_id: the user uploading the keys
|
||||
signatures (dict[string, dict]): map of devices to signed keys
|
||||
|
||||
Returns:
|
||||
|
||||
@@ -377,8 +377,9 @@ class E2eRoomKeysHandler:
|
||||
"""Deletes a given version of the user's e2e_room_keys backup
|
||||
|
||||
Args:
|
||||
user_id(str): the user whose current backup version we're deleting
|
||||
version(str): the version id of the backup being deleted
|
||||
user_id: the user whose current backup version we're deleting
|
||||
version: Optional. the version ID of the backup version we're deleting
|
||||
If missing, we delete the current backup version info.
|
||||
Raises:
|
||||
NotFoundError: if this backup version doesn't exist
|
||||
"""
|
||||
|
||||
@@ -442,6 +442,15 @@ class FederationHandler:
|
||||
# appropriate stuff.
|
||||
# TODO: We can probably do something more intelligent here.
|
||||
return True
|
||||
except NotRetryingDestination as e:
|
||||
logger.info("_maybe_backfill_inner: %s", e)
|
||||
continue
|
||||
except FederationDeniedError:
|
||||
logger.info(
|
||||
"_maybe_backfill_inner: Not attempting to backfill from %s because the homeserver is not on our federation whitelist",
|
||||
dom,
|
||||
)
|
||||
continue
|
||||
except (SynapseError, InvalidResponseError) as e:
|
||||
logger.info("Failed to backfill from %s because %s", dom, e)
|
||||
continue
|
||||
@@ -477,15 +486,9 @@ class FederationHandler:
|
||||
|
||||
logger.info("Failed to backfill from %s because %s", dom, e)
|
||||
continue
|
||||
except NotRetryingDestination as e:
|
||||
logger.info(str(e))
|
||||
continue
|
||||
except RequestSendFailed as e:
|
||||
logger.info("Failed to get backfill from %s because %s", dom, e)
|
||||
continue
|
||||
except FederationDeniedError as e:
|
||||
logger.info(e)
|
||||
continue
|
||||
except Exception as e:
|
||||
logger.exception("Failed to backfill from %s because %s", dom, e)
|
||||
continue
|
||||
@@ -1017,7 +1020,9 @@ class FederationHandler:
|
||||
|
||||
context = EventContext.for_outlier(self._storage_controllers)
|
||||
|
||||
await self._bulk_push_rule_evaluator.action_for_event_by_user(event, context)
|
||||
await self._bulk_push_rule_evaluator.action_for_events_by_user(
|
||||
[(event, context)]
|
||||
)
|
||||
try:
|
||||
await self._federation_event_handler.persist_events_and_notify(
|
||||
event.room_id, [(event, context)]
|
||||
@@ -1591,8 +1596,8 @@ class FederationHandler:
|
||||
Fetch the complexity of a remote room over federation.
|
||||
|
||||
Args:
|
||||
remote_room_hosts (list[str]): The remote servers to ask.
|
||||
room_id (str): The room ID to ask about.
|
||||
remote_room_hosts: The remote servers to ask.
|
||||
room_id: The room ID to ask about.
|
||||
|
||||
Returns:
|
||||
Dict contains the complexity
|
||||
|
||||
@@ -58,7 +58,7 @@ from synapse.event_auth import (
|
||||
)
|
||||
from synapse.events import EventBase
|
||||
from synapse.events.snapshot import EventContext
|
||||
from synapse.federation.federation_client import InvalidResponseError
|
||||
from synapse.federation.federation_client import InvalidResponseError, PulledPduInfo
|
||||
from synapse.logging.context import nested_logging_context
|
||||
from synapse.logging.tracing import (
|
||||
SynapseTags,
|
||||
@@ -1065,10 +1065,9 @@ class FederationEventHandler:
|
||||
state_res_store=StateResolutionStore(self._store),
|
||||
)
|
||||
|
||||
except Exception:
|
||||
except Exception as e:
|
||||
logger.warning(
|
||||
"Error attempting to resolve state at missing prev_events",
|
||||
exc_info=True,
|
||||
"Error attempting to resolve state at missing prev_events: %s", e
|
||||
)
|
||||
raise FederationError(
|
||||
"ERROR",
|
||||
@@ -1517,8 +1516,8 @@ class FederationEventHandler:
|
||||
)
|
||||
|
||||
async def backfill_event_id(
|
||||
self, destination: str, room_id: str, event_id: str
|
||||
) -> EventBase:
|
||||
self, destinations: List[str], room_id: str, event_id: str
|
||||
) -> PulledPduInfo:
|
||||
"""Backfill a single event and persist it as a non-outlier which means
|
||||
we also pull in all of the state and auth events necessary for it.
|
||||
|
||||
@@ -1530,24 +1529,21 @@ class FederationEventHandler:
|
||||
Raises:
|
||||
FederationError if we are unable to find the event from the destination
|
||||
"""
|
||||
logger.info(
|
||||
"backfill_event_id: event_id=%s from destination=%s", event_id, destination
|
||||
)
|
||||
logger.info("backfill_event_id: event_id=%s", event_id)
|
||||
|
||||
room_version = await self._store.get_room_version(room_id)
|
||||
|
||||
event_from_response = await self._federation_client.get_pdu(
|
||||
[destination],
|
||||
pulled_pdu_info = await self._federation_client.get_pdu(
|
||||
destinations,
|
||||
event_id,
|
||||
room_version,
|
||||
)
|
||||
|
||||
if not event_from_response:
|
||||
if not pulled_pdu_info:
|
||||
raise FederationError(
|
||||
"ERROR",
|
||||
404,
|
||||
"Unable to find event_id=%s from destination=%s to backfill."
|
||||
% (event_id, destination),
|
||||
f"Unable to find event_id={event_id} from remote servers to backfill.",
|
||||
affected=event_id,
|
||||
)
|
||||
|
||||
@@ -1555,13 +1551,13 @@ class FederationEventHandler:
|
||||
# and auth events to de-outlier it. This also sets up the necessary
|
||||
# `state_groups` for the event.
|
||||
await self._process_pulled_events(
|
||||
destination,
|
||||
[event_from_response],
|
||||
pulled_pdu_info.pull_origin,
|
||||
[pulled_pdu_info.pdu],
|
||||
# Prevent notifications going to clients
|
||||
backfilled=True,
|
||||
)
|
||||
|
||||
return event_from_response
|
||||
return pulled_pdu_info
|
||||
|
||||
@trace
|
||||
@tag_args
|
||||
@@ -1584,19 +1580,19 @@ class FederationEventHandler:
|
||||
async def get_event(event_id: str) -> None:
|
||||
with nested_logging_context(event_id):
|
||||
try:
|
||||
event = await self._federation_client.get_pdu(
|
||||
pulled_pdu_info = await self._federation_client.get_pdu(
|
||||
[destination],
|
||||
event_id,
|
||||
room_version,
|
||||
)
|
||||
if event is None:
|
||||
if pulled_pdu_info is None:
|
||||
logger.warning(
|
||||
"Server %s didn't return event %s",
|
||||
destination,
|
||||
event_id,
|
||||
)
|
||||
return
|
||||
events.append(event)
|
||||
events.append(pulled_pdu_info.pdu)
|
||||
|
||||
except Exception as e:
|
||||
logger.warning(
|
||||
@@ -2171,8 +2167,8 @@ class FederationEventHandler:
|
||||
min_depth,
|
||||
)
|
||||
else:
|
||||
await self._bulk_push_rule_evaluator.action_for_event_by_user(
|
||||
event, context
|
||||
await self._bulk_push_rule_evaluator.action_for_events_by_user(
|
||||
[(event, context)]
|
||||
)
|
||||
|
||||
try:
|
||||
|
||||
@@ -711,7 +711,7 @@ class IdentityHandler:
|
||||
inviter_display_name: The current display name of the
|
||||
inviter.
|
||||
inviter_avatar_url: The URL of the inviter's avatar.
|
||||
id_access_token (str): The access token to authenticate to the identity
|
||||
id_access_token: The access token to authenticate to the identity
|
||||
server with
|
||||
|
||||
Returns:
|
||||
|
||||
+42
-20
@@ -877,6 +877,36 @@ class EventCreationHandler:
|
||||
return prev_event
|
||||
return None
|
||||
|
||||
async def get_event_from_transaction(
|
||||
self,
|
||||
requester: Requester,
|
||||
txn_id: str,
|
||||
room_id: str,
|
||||
) -> Optional[EventBase]:
|
||||
"""For the given transaction ID and room ID, check if there is a matching event.
|
||||
If so, fetch it and return it.
|
||||
|
||||
Args:
|
||||
requester: The requester making the request in the context of which we want
|
||||
to fetch the event.
|
||||
txn_id: The transaction ID.
|
||||
room_id: The room ID.
|
||||
|
||||
Returns:
|
||||
An event if one could be found, None otherwise.
|
||||
"""
|
||||
if requester.access_token_id:
|
||||
existing_event_id = await self.store.get_event_id_from_transaction_id(
|
||||
room_id,
|
||||
requester.user.to_string(),
|
||||
requester.access_token_id,
|
||||
txn_id,
|
||||
)
|
||||
if existing_event_id:
|
||||
return await self.store.get_event(existing_event_id)
|
||||
|
||||
return None
|
||||
|
||||
async def create_and_send_nonmember_event(
|
||||
self,
|
||||
requester: Requester,
|
||||
@@ -956,18 +986,17 @@ class EventCreationHandler:
|
||||
# extremities to pile up, which in turn leads to state resolution
|
||||
# taking longer.
|
||||
async with self.limiter.queue(event_dict["room_id"]):
|
||||
if txn_id and requester.access_token_id:
|
||||
existing_event_id = await self.store.get_event_id_from_transaction_id(
|
||||
event_dict["room_id"],
|
||||
requester.user.to_string(),
|
||||
requester.access_token_id,
|
||||
txn_id,
|
||||
if txn_id:
|
||||
event = await self.get_event_from_transaction(
|
||||
requester, txn_id, event_dict["room_id"]
|
||||
)
|
||||
if existing_event_id:
|
||||
event = await self.store.get_event(existing_event_id)
|
||||
if event:
|
||||
# we know it was persisted, so must have a stream ordering
|
||||
assert event.internal_metadata.stream_ordering
|
||||
return event, event.internal_metadata.stream_ordering
|
||||
return (
|
||||
event,
|
||||
event.internal_metadata.stream_ordering,
|
||||
)
|
||||
|
||||
event, context = await self.create_event(
|
||||
requester,
|
||||
@@ -1433,17 +1462,10 @@ class EventCreationHandler:
|
||||
a room that has been un-partial stated.
|
||||
"""
|
||||
|
||||
for event, context in events_and_context:
|
||||
# Skip push notification actions for historical messages
|
||||
# because we don't want to notify people about old history back in time.
|
||||
# The historical messages also do not have the proper `context.current_state_ids`
|
||||
# and `state_groups` because they have `prev_events` that aren't persisted yet
|
||||
# (historical messages persisted in reverse-chronological order).
|
||||
if not event.internal_metadata.is_historical():
|
||||
with tracing.start_active_span("calculate_push_actions"):
|
||||
await self._bulk_push_rule_evaluator.action_for_event_by_user(
|
||||
event, context
|
||||
)
|
||||
with tracing.start_active_span("calculate_push_actions"):
|
||||
await self._bulk_push_rule_evaluator.action_for_events_by_user(
|
||||
events_and_context
|
||||
)
|
||||
|
||||
try:
|
||||
# If we're a worker we need to hit out to the master.
|
||||
|
||||
+353
-37
@@ -12,14 +12,28 @@
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import binascii
|
||||
import inspect
|
||||
import json
|
||||
import logging
|
||||
from typing import TYPE_CHECKING, Any, Dict, Generic, List, Optional, TypeVar, Union
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
Any,
|
||||
Dict,
|
||||
Generic,
|
||||
List,
|
||||
Optional,
|
||||
Type,
|
||||
TypeVar,
|
||||
Union,
|
||||
)
|
||||
from urllib.parse import urlencode, urlparse
|
||||
|
||||
import attr
|
||||
import unpaddedbase64
|
||||
from authlib.common.security import generate_token
|
||||
from authlib.jose import JsonWebToken, jwt
|
||||
from authlib.jose import JsonWebToken, JWTClaims
|
||||
from authlib.jose.errors import InvalidClaimError, JoseError, MissingClaimError
|
||||
from authlib.oauth2.auth import ClientAuth
|
||||
from authlib.oauth2.rfc6749.parameters import prepare_grant_uri
|
||||
from authlib.oidc.core import CodeIDToken, UserInfo
|
||||
@@ -35,9 +49,12 @@ from typing_extensions import TypedDict
|
||||
from twisted.web.client import readBody
|
||||
from twisted.web.http_headers import Headers
|
||||
|
||||
from synapse.api.errors import SynapseError
|
||||
from synapse.config import ConfigError
|
||||
from synapse.config.oidc import OidcProviderClientSecretJwtKey, OidcProviderConfig
|
||||
from synapse.handlers.sso import MappingException, UserAttributes
|
||||
from synapse.http.server import finish_request
|
||||
from synapse.http.servlet import parse_string
|
||||
from synapse.http.site import SynapseRequest
|
||||
from synapse.logging.context import make_deferred_yieldable
|
||||
from synapse.types import JsonDict, UserID, map_username_to_mxid_localpart
|
||||
@@ -88,6 +105,8 @@ class Token(TypedDict):
|
||||
#: there is no real point of doing this in our case.
|
||||
JWK = Dict[str, str]
|
||||
|
||||
C = TypeVar("C")
|
||||
|
||||
|
||||
#: A JWK Set, as per RFC7517 sec 5.
|
||||
class JWKS(TypedDict):
|
||||
@@ -247,6 +266,80 @@ class OidcHandler:
|
||||
|
||||
await oidc_provider.handle_oidc_callback(request, session_data, code)
|
||||
|
||||
async def handle_backchannel_logout(self, request: SynapseRequest) -> None:
|
||||
"""Handle an incoming request to /_synapse/client/oidc/backchannel_logout
|
||||
|
||||
This extracts the logout_token from the request and tries to figure out
|
||||
which OpenID Provider it is comming from. This works by matching the iss claim
|
||||
with the issuer and the aud claim with the client_id.
|
||||
|
||||
Since at this point we don't know who signed the JWT, we can't just
|
||||
decode it using authlib since it will always verifies the signature. We
|
||||
have to decode it manually without validating the signature. The actual JWT
|
||||
verification is done in the `OidcProvider.handler_backchannel_logout` method,
|
||||
once we figured out which provider sent the request.
|
||||
|
||||
Args:
|
||||
request: the incoming request from the browser.
|
||||
"""
|
||||
logout_token = parse_string(request, "logout_token")
|
||||
if logout_token is None:
|
||||
raise SynapseError(400, "Missing logout_token in request")
|
||||
|
||||
# A JWT looks like this:
|
||||
# header.payload.signature
|
||||
# where all parts are encoded with urlsafe base64.
|
||||
# The aud and iss claims we care about are in the payload part, which
|
||||
# is a JSON object.
|
||||
try:
|
||||
# By destructuring the list after splitting, we ensure that we have
|
||||
# exactly 3 segments
|
||||
_, payload, _ = logout_token.split(".")
|
||||
except ValueError:
|
||||
raise SynapseError(400, "Invalid logout_token in request")
|
||||
|
||||
try:
|
||||
payload_bytes = unpaddedbase64.decode_base64(payload)
|
||||
claims = json_decoder.decode(payload_bytes.decode("utf-8"))
|
||||
except (json.JSONDecodeError, binascii.Error, UnicodeError):
|
||||
raise SynapseError(400, "Invalid logout_token payload in request")
|
||||
|
||||
try:
|
||||
# Let's extract the iss and aud claims
|
||||
iss = claims["iss"]
|
||||
aud = claims["aud"]
|
||||
# The aud claim can be either a string or a list of string. Here we
|
||||
# normalize it as a list of strings.
|
||||
if isinstance(aud, str):
|
||||
aud = [aud]
|
||||
|
||||
# Check that we have the right types for the aud and the iss claims
|
||||
if not isinstance(iss, str) or not isinstance(aud, list):
|
||||
raise TypeError()
|
||||
for a in aud:
|
||||
if not isinstance(a, str):
|
||||
raise TypeError()
|
||||
|
||||
# At this point we properly checked both claims types
|
||||
issuer: str = iss
|
||||
audience: List[str] = aud
|
||||
except (TypeError, KeyError):
|
||||
raise SynapseError(400, "Invalid issuer/audience in logout_token")
|
||||
|
||||
# Now that we know the audience and the issuer, we can figure out from
|
||||
# what provider it is coming from
|
||||
oidc_provider: Optional[OidcProvider] = None
|
||||
for provider in self._providers.values():
|
||||
if provider.issuer == issuer and provider.client_id in audience:
|
||||
oidc_provider = provider
|
||||
break
|
||||
|
||||
if oidc_provider is None:
|
||||
raise SynapseError(400, "Could not find the OP that issued this event")
|
||||
|
||||
# Ask the provider to handle the logout request.
|
||||
await oidc_provider.handle_backchannel_logout(request, logout_token)
|
||||
|
||||
|
||||
class OidcError(Exception):
|
||||
"""Used to catch errors when calling the token_endpoint"""
|
||||
@@ -275,6 +368,7 @@ class OidcProvider:
|
||||
provider: OidcProviderConfig,
|
||||
):
|
||||
self._store = hs.get_datastores().main
|
||||
self._clock = hs.get_clock()
|
||||
|
||||
self._macaroon_generaton = macaroon_generator
|
||||
|
||||
@@ -341,6 +435,7 @@ class OidcProvider:
|
||||
self.idp_brand = provider.idp_brand
|
||||
|
||||
self._sso_handler = hs.get_sso_handler()
|
||||
self._device_handler = hs.get_device_handler()
|
||||
|
||||
self._sso_handler.register_identity_provider(self)
|
||||
|
||||
@@ -399,6 +494,41 @@ class OidcProvider:
|
||||
# If we're not using userinfo, we need a valid jwks to validate the ID token
|
||||
m.validate_jwks_uri()
|
||||
|
||||
if self._config.backchannel_logout_enabled:
|
||||
if not m.get("backchannel_logout_supported", False):
|
||||
logger.warning(
|
||||
"OIDC Back-Channel Logout is enabled for issuer %r"
|
||||
"but it does not advertise support for it",
|
||||
self.issuer,
|
||||
)
|
||||
|
||||
elif not m.get("backchannel_logout_session_supported", False):
|
||||
logger.warning(
|
||||
"OIDC Back-Channel Logout is enabled and supported "
|
||||
"by issuer %r but it might not send a session ID with "
|
||||
"logout tokens, which is required for the logouts to work",
|
||||
self.issuer,
|
||||
)
|
||||
|
||||
if not self._config.backchannel_logout_ignore_sub:
|
||||
# If OIDC backchannel logouts are enabled, the provider mapping provider
|
||||
# should use the `sub` claim. We verify that by mapping a dumb user and
|
||||
# see if we get back the sub claim
|
||||
user = UserInfo({"sub": "thisisasubject"})
|
||||
try:
|
||||
subject = self._user_mapping_provider.get_remote_user_id(user)
|
||||
if subject != user["sub"]:
|
||||
raise ValueError("Unexpected subject")
|
||||
except Exception:
|
||||
logger.warning(
|
||||
f"OIDC Back-Channel Logout is enabled for issuer {self.issuer!r} "
|
||||
"but it looks like the configured `user_mapping_provider` "
|
||||
"does not use the `sub` claim as subject. If it is the case, "
|
||||
"and you want Synapse to ignore the `sub` claim in OIDC "
|
||||
"Back-Channel Logouts, set `backchannel_logout_ignore_sub` "
|
||||
"to `true` in the issuer config."
|
||||
)
|
||||
|
||||
@property
|
||||
def _uses_userinfo(self) -> bool:
|
||||
"""Returns True if the ``userinfo_endpoint`` should be used.
|
||||
@@ -414,6 +544,16 @@ class OidcProvider:
|
||||
or self._user_profile_method == "userinfo_endpoint"
|
||||
)
|
||||
|
||||
@property
|
||||
def issuer(self) -> str:
|
||||
"""The issuer identifying this provider."""
|
||||
return self._config.issuer
|
||||
|
||||
@property
|
||||
def client_id(self) -> str:
|
||||
"""The client_id used when interacting with this provider."""
|
||||
return self._config.client_id
|
||||
|
||||
async def load_metadata(self, force: bool = False) -> OpenIDProviderMetadata:
|
||||
"""Return the provider metadata.
|
||||
|
||||
@@ -647,7 +787,7 @@ class OidcProvider:
|
||||
Must include an ``access_token`` field.
|
||||
|
||||
Returns:
|
||||
UserInfo: an object representing the user.
|
||||
an object representing the user.
|
||||
"""
|
||||
logger.debug("Using the OAuth2 access_token to request userinfo")
|
||||
metadata = await self.load_metadata()
|
||||
@@ -661,6 +801,59 @@ class OidcProvider:
|
||||
|
||||
return UserInfo(resp)
|
||||
|
||||
async def _verify_jwt(
|
||||
self,
|
||||
alg_values: List[str],
|
||||
token: str,
|
||||
claims_cls: Type[C],
|
||||
claims_options: Optional[dict] = None,
|
||||
claims_params: Optional[dict] = None,
|
||||
) -> C:
|
||||
"""Decode and validate a JWT, re-fetching the JWKS as needed.
|
||||
|
||||
Args:
|
||||
alg_values: list of `alg` values allowed when verifying the JWT.
|
||||
token: the JWT.
|
||||
claims_cls: the JWTClaims class to use to validate the claims.
|
||||
claims_options: dict of options passed to the `claims_cls` constructor.
|
||||
claims_params: dict of params passed to the `claims_cls` constructor.
|
||||
|
||||
Returns:
|
||||
The decoded claims in the JWT.
|
||||
"""
|
||||
jwt = JsonWebToken(alg_values)
|
||||
|
||||
logger.debug("Attempting to decode JWT (%s) %r", claims_cls.__name__, token)
|
||||
|
||||
# Try to decode the keys in cache first, then retry by forcing the keys
|
||||
# to be reloaded
|
||||
jwk_set = await self.load_jwks()
|
||||
try:
|
||||
claims = jwt.decode(
|
||||
token,
|
||||
key=jwk_set,
|
||||
claims_cls=claims_cls,
|
||||
claims_options=claims_options,
|
||||
claims_params=claims_params,
|
||||
)
|
||||
except ValueError:
|
||||
logger.info("Reloading JWKS after decode error")
|
||||
jwk_set = await self.load_jwks(force=True) # try reloading the jwks
|
||||
claims = jwt.decode(
|
||||
token,
|
||||
key=jwk_set,
|
||||
claims_cls=claims_cls,
|
||||
claims_options=claims_options,
|
||||
claims_params=claims_params,
|
||||
)
|
||||
|
||||
logger.debug("Decoded JWT (%s) %r; validating", claims_cls.__name__, claims)
|
||||
|
||||
claims.validate(
|
||||
now=self._clock.time(), leeway=120
|
||||
) # allows 2 min of clock skew
|
||||
return claims
|
||||
|
||||
async def _parse_id_token(self, token: Token, nonce: str) -> CodeIDToken:
|
||||
"""Return an instance of UserInfo from token's ``id_token``.
|
||||
|
||||
@@ -673,7 +866,14 @@ class OidcProvider:
|
||||
Returns:
|
||||
The decoded claims in the ID token.
|
||||
"""
|
||||
id_token = token.get("id_token")
|
||||
|
||||
# That has been theoritically been checked by the caller, so even though
|
||||
# assertion are not enabled in production, it is mainly here to appease mypy
|
||||
assert id_token is not None
|
||||
|
||||
metadata = await self.load_metadata()
|
||||
|
||||
claims_params = {
|
||||
"nonce": nonce,
|
||||
"client_id": self._client_auth.client_id,
|
||||
@@ -683,39 +883,17 @@ class OidcProvider:
|
||||
# in the `id_token` that we can check against.
|
||||
claims_params["access_token"] = token["access_token"]
|
||||
|
||||
claims_options = {"iss": {"values": [metadata["issuer"]]}}
|
||||
|
||||
alg_values = metadata.get("id_token_signing_alg_values_supported", ["RS256"])
|
||||
jwt = JsonWebToken(alg_values)
|
||||
|
||||
claim_options = {"iss": {"values": [metadata["issuer"]]}}
|
||||
|
||||
id_token = token["id_token"]
|
||||
logger.debug("Attempting to decode JWT id_token %r", id_token)
|
||||
|
||||
# Try to decode the keys in cache first, then retry by forcing the keys
|
||||
# to be reloaded
|
||||
jwk_set = await self.load_jwks()
|
||||
try:
|
||||
claims = jwt.decode(
|
||||
id_token,
|
||||
key=jwk_set,
|
||||
claims_cls=CodeIDToken,
|
||||
claims_options=claim_options,
|
||||
claims_params=claims_params,
|
||||
)
|
||||
except ValueError:
|
||||
logger.info("Reloading JWKS after decode error")
|
||||
jwk_set = await self.load_jwks(force=True) # try reloading the jwks
|
||||
claims = jwt.decode(
|
||||
id_token,
|
||||
key=jwk_set,
|
||||
claims_cls=CodeIDToken,
|
||||
claims_options=claim_options,
|
||||
claims_params=claims_params,
|
||||
)
|
||||
|
||||
logger.debug("Decoded id_token JWT %r; validating", claims)
|
||||
|
||||
claims.validate(leeway=120) # allows 2 min of clock skew
|
||||
claims = await self._verify_jwt(
|
||||
alg_values=alg_values,
|
||||
token=id_token,
|
||||
claims_cls=CodeIDToken,
|
||||
claims_options=claims_options,
|
||||
claims_params=claims_params,
|
||||
)
|
||||
|
||||
return claims
|
||||
|
||||
@@ -1036,6 +1214,146 @@ class OidcProvider:
|
||||
# to be strings.
|
||||
return str(remote_user_id)
|
||||
|
||||
async def handle_backchannel_logout(
|
||||
self, request: SynapseRequest, logout_token: str
|
||||
) -> None:
|
||||
"""Handle an incoming request to /_synapse/client/oidc/backchannel_logout
|
||||
|
||||
The OIDC Provider posts a logout token to this endpoint when a user
|
||||
session ends. That token is a JWT signed with the same keys as
|
||||
ID tokens. The OpenID Connect Back-Channel Logout draft explains how to
|
||||
validate the JWT and figure out what session to end.
|
||||
|
||||
Args:
|
||||
request: The request to respond to
|
||||
logout_token: The logout token (a JWT) extracted from the request body
|
||||
"""
|
||||
# Back-Channel Logout can be disabled in the config, hence this check.
|
||||
# This is not that important for now since Synapse is registered
|
||||
# manually to the OP, so not specifying the backchannel-logout URI is
|
||||
# as effective than disabling it here. It might make more sense if we
|
||||
# support dynamic registration in Synapse at some point.
|
||||
if not self._config.backchannel_logout_enabled:
|
||||
logger.warning(
|
||||
f"Received an OIDC Back-Channel Logout request from issuer {self.issuer!r} but it is disabled in config"
|
||||
)
|
||||
|
||||
# TODO: this responds with a 400 status code, which is what the OIDC
|
||||
# Back-Channel Logout spec expects, but spec also suggests answering with
|
||||
# a JSON object, with the `error` and `error_description` fields set, which
|
||||
# we are not doing here.
|
||||
# See https://openid.net/specs/openid-connect-backchannel-1_0.html#BCResponse
|
||||
raise SynapseError(
|
||||
400, "OpenID Connect Back-Channel Logout is disabled for this provider"
|
||||
)
|
||||
|
||||
metadata = await self.load_metadata()
|
||||
|
||||
# As per OIDC Back-Channel Logout 1.0 sec. 2.4:
|
||||
# A Logout Token MUST be signed and MAY also be encrypted. The same
|
||||
# keys are used to sign and encrypt Logout Tokens as are used for ID
|
||||
# Tokens. If the Logout Token is encrypted, it SHOULD replicate the
|
||||
# iss (issuer) claim in the JWT Header Parameters, as specified in
|
||||
# Section 5.3 of [JWT].
|
||||
alg_values = metadata.get("id_token_signing_alg_values_supported", ["RS256"])
|
||||
|
||||
# As per sec. 2.6:
|
||||
# 3. Validate the iss, aud, and iat Claims in the same way they are
|
||||
# validated in ID Tokens.
|
||||
# Which means the audience should contain Synapse's client_id and the
|
||||
# issuer should be the IdP issuer
|
||||
claims_options = {
|
||||
"iss": {"values": [metadata["issuer"]]},
|
||||
"aud": {"values": [self.client_id]},
|
||||
}
|
||||
|
||||
try:
|
||||
claims = await self._verify_jwt(
|
||||
alg_values=alg_values,
|
||||
token=logout_token,
|
||||
claims_cls=LogoutToken,
|
||||
claims_options=claims_options,
|
||||
)
|
||||
except JoseError:
|
||||
logger.exception("Invalid logout_token")
|
||||
raise SynapseError(400, "Invalid logout_token")
|
||||
|
||||
# As per sec. 2.6:
|
||||
# 4. Verify that the Logout Token contains a sub Claim, a sid Claim,
|
||||
# or both.
|
||||
# 5. Verify that the Logout Token contains an events Claim whose
|
||||
# value is JSON object containing the member name
|
||||
# http://schemas.openid.net/event/backchannel-logout.
|
||||
# 6. Verify that the Logout Token does not contain a nonce Claim.
|
||||
# This is all verified by the LogoutToken claims class, so at this
|
||||
# point the `sid` claim exists and is a string.
|
||||
sid: str = claims.get("sid")
|
||||
|
||||
# If the `sub` claim was included in the logout token, we check that it matches
|
||||
# that it matches the right user. We can have cases where the `sub` claim is not
|
||||
# the ID saved in database, so we let admins disable this check in config.
|
||||
sub: Optional[str] = claims.get("sub")
|
||||
expected_user_id: Optional[str] = None
|
||||
if sub is not None and not self._config.backchannel_logout_ignore_sub:
|
||||
expected_user_id = await self._store.get_user_by_external_id(
|
||||
self.idp_id, sub
|
||||
)
|
||||
|
||||
# Invalidate any running user-mapping sessions, in-flight login tokens and
|
||||
# active devices
|
||||
await self._sso_handler.revoke_sessions_for_provider_session_id(
|
||||
auth_provider_id=self.idp_id,
|
||||
auth_provider_session_id=sid,
|
||||
expected_user_id=expected_user_id,
|
||||
)
|
||||
|
||||
request.setResponseCode(200)
|
||||
request.setHeader(b"Cache-Control", b"no-cache, no-store")
|
||||
request.setHeader(b"Pragma", b"no-cache")
|
||||
finish_request(request)
|
||||
|
||||
|
||||
class LogoutToken(JWTClaims):
|
||||
"""
|
||||
Holds and verify claims of a logout token, as per
|
||||
https://openid.net/specs/openid-connect-backchannel-1_0.html#LogoutToken
|
||||
"""
|
||||
|
||||
REGISTERED_CLAIMS = ["iss", "sub", "aud", "iat", "jti", "events", "sid"]
|
||||
|
||||
def validate(self, now: Optional[int] = None, leeway: int = 0) -> None:
|
||||
"""Validate everything in claims payload."""
|
||||
super().validate(now, leeway)
|
||||
self.validate_sid()
|
||||
self.validate_events()
|
||||
self.validate_nonce()
|
||||
|
||||
def validate_sid(self) -> None:
|
||||
"""Ensure the sid claim is present"""
|
||||
sid = self.get("sid")
|
||||
if not sid:
|
||||
raise MissingClaimError("sid")
|
||||
|
||||
if not isinstance(sid, str):
|
||||
raise InvalidClaimError("sid")
|
||||
|
||||
def validate_nonce(self) -> None:
|
||||
"""Ensure the nonce claim is absent"""
|
||||
if "nonce" in self:
|
||||
raise InvalidClaimError("nonce")
|
||||
|
||||
def validate_events(self) -> None:
|
||||
"""Ensure the events claim is present and with the right value"""
|
||||
events = self.get("events")
|
||||
if not events:
|
||||
raise MissingClaimError("events")
|
||||
|
||||
if not isinstance(events, dict):
|
||||
raise InvalidClaimError("events")
|
||||
|
||||
if "http://schemas.openid.net/event/backchannel-logout" not in events:
|
||||
raise InvalidClaimError("events")
|
||||
|
||||
|
||||
# number of seconds a newly-generated client secret should be valid for
|
||||
CLIENT_SECRET_VALIDITY_SECONDS = 3600
|
||||
@@ -1105,6 +1423,7 @@ class JwtClientSecret:
|
||||
logger.info(
|
||||
"Generating new JWT for %s: %s %s", self._oauth_issuer, header, payload
|
||||
)
|
||||
jwt = JsonWebToken(header["alg"])
|
||||
self._cached_secret = jwt.encode(header, payload, self._key.key)
|
||||
self._cached_secret_replacement_time = (
|
||||
expires_at - CLIENT_SECRET_MIN_VALIDITY_SECONDS
|
||||
@@ -1119,9 +1438,6 @@ class UserAttributeDict(TypedDict):
|
||||
emails: List[str]
|
||||
|
||||
|
||||
C = TypeVar("C")
|
||||
|
||||
|
||||
class OidcMappingProvider(Generic[C]):
|
||||
"""A mapping provider maps a UserInfo object to user attributes.
|
||||
|
||||
|
||||
@@ -201,7 +201,7 @@ class BasePresenceHandler(abc.ABC):
|
||||
"""Get the current presence state for multiple users.
|
||||
|
||||
Returns:
|
||||
dict: `user_id` -> `UserPresenceState`
|
||||
A mapping of `user_id` -> `UserPresenceState`
|
||||
"""
|
||||
states = {}
|
||||
missing = []
|
||||
@@ -256,7 +256,7 @@ class BasePresenceHandler(abc.ABC):
|
||||
with the app.
|
||||
"""
|
||||
|
||||
async def update_external_syncs_row(
|
||||
async def update_external_syncs_row( # noqa: B027 (no-op by design)
|
||||
self, process_id: str, user_id: str, is_syncing: bool, sync_time_msec: int
|
||||
) -> None:
|
||||
"""Update the syncing users for an external process as a delta.
|
||||
@@ -272,7 +272,9 @@ class BasePresenceHandler(abc.ABC):
|
||||
sync_time_msec: Time in ms when the user was last syncing
|
||||
"""
|
||||
|
||||
async def update_external_syncs_clear(self, process_id: str) -> None:
|
||||
async def update_external_syncs_clear( # noqa: B027 (no-op by design)
|
||||
self, process_id: str
|
||||
) -> None:
|
||||
"""Marks all users that had been marked as syncing by a given process
|
||||
as offline.
|
||||
|
||||
@@ -476,7 +478,7 @@ class WorkerPresenceHandler(BasePresenceHandler):
|
||||
return _NullContextManager()
|
||||
|
||||
prev_state = await self.current_state_for_user(user_id)
|
||||
if prev_state != PresenceState.BUSY:
|
||||
if prev_state.state != PresenceState.BUSY:
|
||||
# We set state here but pass ignore_status_msg = True as we don't want to
|
||||
# cause the status message to be cleared.
|
||||
# Note that this causes last_active_ts to be incremented which is not
|
||||
|
||||
@@ -307,7 +307,11 @@ class ProfileHandler:
|
||||
if not self.max_avatar_size and not self.allowed_avatar_mimetypes:
|
||||
return True
|
||||
|
||||
server_name, _, media_id = parse_and_validate_mxc_uri(mxc)
|
||||
host, port, media_id = parse_and_validate_mxc_uri(mxc)
|
||||
if port is not None:
|
||||
server_name = host + ":" + str(port)
|
||||
else:
|
||||
server_name = host
|
||||
|
||||
if server_name == self.server_name:
|
||||
media_info = await self.store.get_local_media(media_id)
|
||||
|
||||
@@ -17,7 +17,7 @@ from typing import TYPE_CHECKING, Dict, FrozenSet, Iterable, List, Optional, Tup
|
||||
|
||||
import attr
|
||||
|
||||
from synapse.api.constants import RelationTypes
|
||||
from synapse.api.constants import EventTypes, RelationTypes
|
||||
from synapse.api.errors import SynapseError
|
||||
from synapse.events import EventBase, relation_from_event
|
||||
from synapse.logging.tracing import trace
|
||||
@@ -75,6 +75,7 @@ class RelationsHandler:
|
||||
self._clock = hs.get_clock()
|
||||
self._event_handler = hs.get_event_handler()
|
||||
self._event_serializer = hs.get_event_client_serializer()
|
||||
self._event_creation_handler = hs.get_event_creation_handler()
|
||||
|
||||
async def get_relations(
|
||||
self,
|
||||
@@ -205,6 +206,59 @@ class RelationsHandler:
|
||||
|
||||
return related_events, next_token
|
||||
|
||||
async def redact_events_related_to(
|
||||
self,
|
||||
requester: Requester,
|
||||
event_id: str,
|
||||
initial_redaction_event: EventBase,
|
||||
relation_types: List[str],
|
||||
) -> None:
|
||||
"""Redacts all events related to the given event ID with one of the given
|
||||
relation types.
|
||||
|
||||
This method is expected to be called when redacting the event referred to by
|
||||
the given event ID.
|
||||
|
||||
If an event cannot be redacted (e.g. because of insufficient permissions), log
|
||||
the error and try to redact the next one.
|
||||
|
||||
Args:
|
||||
requester: The requester to redact events on behalf of.
|
||||
event_id: The event IDs to look and redact relations of.
|
||||
initial_redaction_event: The redaction for the event referred to by
|
||||
event_id.
|
||||
relation_types: The types of relations to look for.
|
||||
|
||||
Raises:
|
||||
ShadowBanError if the requester is shadow-banned
|
||||
"""
|
||||
related_event_ids = (
|
||||
await self._main_store.get_all_relations_for_event_with_types(
|
||||
event_id, relation_types
|
||||
)
|
||||
)
|
||||
|
||||
for related_event_id in related_event_ids:
|
||||
try:
|
||||
await self._event_creation_handler.create_and_send_nonmember_event(
|
||||
requester,
|
||||
{
|
||||
"type": EventTypes.Redaction,
|
||||
"content": initial_redaction_event.content,
|
||||
"room_id": initial_redaction_event.room_id,
|
||||
"sender": requester.user.to_string(),
|
||||
"redacts": related_event_id,
|
||||
},
|
||||
ratelimit=False,
|
||||
)
|
||||
except SynapseError as e:
|
||||
logger.warning(
|
||||
"Failed to redact event %s (related to event %s): %s",
|
||||
related_event_id,
|
||||
event_id,
|
||||
e.msg,
|
||||
)
|
||||
|
||||
async def get_annotations_for_event(
|
||||
self,
|
||||
event_id: str,
|
||||
|
||||
+95
-115
@@ -49,7 +49,6 @@ from synapse.api.constants import (
|
||||
from synapse.api.errors import (
|
||||
AuthError,
|
||||
Codes,
|
||||
HttpResponseException,
|
||||
LimitExceededError,
|
||||
NotFoundError,
|
||||
StoreError,
|
||||
@@ -60,7 +59,6 @@ from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersion
|
||||
from synapse.event_auth import validate_event_for_room_version
|
||||
from synapse.events import EventBase
|
||||
from synapse.events.utils import copy_and_fixup_power_levels_contents
|
||||
from synapse.federation.federation_client import InvalidResponseError
|
||||
from synapse.handlers.relations import BundledAggregations
|
||||
from synapse.module_api import NOT_SPAM
|
||||
from synapse.rest.admin._base import assert_user_is_admin
|
||||
@@ -559,7 +557,6 @@ class RoomCreationHandler:
|
||||
invite_list=[],
|
||||
initial_state=initial_state,
|
||||
creation_content=creation_content,
|
||||
ratelimit=False,
|
||||
)
|
||||
|
||||
# Transfer membership events
|
||||
@@ -753,6 +750,10 @@ class RoomCreationHandler:
|
||||
)
|
||||
|
||||
if ratelimit:
|
||||
# Rate limit once in advance, but don't rate limit the individual
|
||||
# events in the room — room creation isn't atomic and it's very
|
||||
# janky if half the events in the initial state don't make it because
|
||||
# of rate limiting.
|
||||
await self.request_ratelimiter.ratelimit(requester)
|
||||
|
||||
room_version_id = config.get(
|
||||
@@ -913,7 +914,6 @@ class RoomCreationHandler:
|
||||
room_alias=room_alias,
|
||||
power_level_content_override=power_level_content_override,
|
||||
creator_join_profile=creator_join_profile,
|
||||
ratelimit=ratelimit,
|
||||
)
|
||||
|
||||
if "name" in config:
|
||||
@@ -1037,7 +1037,6 @@ class RoomCreationHandler:
|
||||
room_alias: Optional[RoomAlias] = None,
|
||||
power_level_content_override: Optional[JsonDict] = None,
|
||||
creator_join_profile: Optional[JsonDict] = None,
|
||||
ratelimit: bool = True,
|
||||
) -> Tuple[int, str, int]:
|
||||
"""Sends the initial events into a new room. Sends the room creation, membership,
|
||||
and power level events into the room sequentially, then creates and batches up the
|
||||
@@ -1046,6 +1045,8 @@ class RoomCreationHandler:
|
||||
`power_level_content_override` doesn't apply when initial state has
|
||||
power level state event content.
|
||||
|
||||
Rate limiting should already have been applied by this point.
|
||||
|
||||
Returns:
|
||||
A tuple containing the stream ID, event ID and depth of the last
|
||||
event sent to the room.
|
||||
@@ -1055,9 +1056,6 @@ class RoomCreationHandler:
|
||||
event_keys = {"room_id": room_id, "sender": creator_id, "state_key": ""}
|
||||
depth = 1
|
||||
|
||||
# the last event sent/persisted to the db
|
||||
last_sent_event_id: Optional[str] = None
|
||||
|
||||
# the most recently created event
|
||||
prev_event: List[str] = []
|
||||
# a map of event types, state keys -> event_ids. We collect these mappings this as events are
|
||||
@@ -1082,6 +1080,19 @@ class RoomCreationHandler:
|
||||
for_batch: bool,
|
||||
**kwargs: Any,
|
||||
) -> Tuple[EventBase, synapse.events.snapshot.EventContext]:
|
||||
"""
|
||||
Creates an event and associated event context.
|
||||
Args:
|
||||
etype: the type of event to be created
|
||||
content: content of the event
|
||||
for_batch: whether the event is being created for batch persisting. If
|
||||
bool for_batch is true, this will create an event using the prev_event_ids,
|
||||
and will create an event context for the event using the parameters state_map
|
||||
and current_state_group, thus these parameters must be provided in this
|
||||
case if for_batch is True. The subsequently created event and context
|
||||
are suitable for being batched up and bulk persisted to the database
|
||||
with other similarly created events.
|
||||
"""
|
||||
nonlocal depth
|
||||
nonlocal prev_event
|
||||
|
||||
@@ -1102,26 +1113,6 @@ class RoomCreationHandler:
|
||||
|
||||
return new_event, new_context
|
||||
|
||||
async def send(
|
||||
event: EventBase,
|
||||
context: synapse.events.snapshot.EventContext,
|
||||
creator: Requester,
|
||||
) -> int:
|
||||
nonlocal last_sent_event_id
|
||||
|
||||
ev = await self.event_creation_handler.handle_new_client_event(
|
||||
requester=creator,
|
||||
events_and_context=[(event, context)],
|
||||
ratelimit=False,
|
||||
ignore_shadow_ban=True,
|
||||
)
|
||||
|
||||
last_sent_event_id = ev.event_id
|
||||
|
||||
# we know it was persisted, so must have a stream ordering
|
||||
assert ev.internal_metadata.stream_ordering
|
||||
return ev.internal_metadata.stream_ordering
|
||||
|
||||
try:
|
||||
config = self._presets_dict[preset_config]
|
||||
except KeyError:
|
||||
@@ -1135,16 +1126,20 @@ class RoomCreationHandler:
|
||||
)
|
||||
|
||||
logger.debug("Sending %s in new room", EventTypes.Member)
|
||||
await send(creation_event, creation_context, creator)
|
||||
ev = await self.event_creation_handler.handle_new_client_event(
|
||||
requester=creator,
|
||||
events_and_context=[(creation_event, creation_context)],
|
||||
ratelimit=False,
|
||||
ignore_shadow_ban=True,
|
||||
)
|
||||
last_sent_event_id = ev.event_id
|
||||
|
||||
# Room create event must exist at this point
|
||||
assert last_sent_event_id is not None
|
||||
member_event_id, _ = await self.room_member_handler.update_membership(
|
||||
creator,
|
||||
creator.user,
|
||||
room_id,
|
||||
"join",
|
||||
ratelimit=ratelimit,
|
||||
ratelimit=False,
|
||||
content=creator_join_profile,
|
||||
new_room=True,
|
||||
prev_event_ids=[last_sent_event_id],
|
||||
@@ -1157,15 +1152,24 @@ class RoomCreationHandler:
|
||||
depth += 1
|
||||
state_map[(EventTypes.Member, creator.user.to_string())] = member_event_id
|
||||
|
||||
# we need the state group of the membership event as it is the current state group
|
||||
event_to_state = (
|
||||
await self._storage_controllers.state.get_state_group_for_events(
|
||||
[member_event_id]
|
||||
)
|
||||
)
|
||||
current_state_group = event_to_state[member_event_id]
|
||||
|
||||
events_to_send = []
|
||||
# We treat the power levels override specially as this needs to be one
|
||||
# of the first events that get sent into a room.
|
||||
pl_content = initial_state.pop((EventTypes.PowerLevels, ""), None)
|
||||
if pl_content is not None:
|
||||
power_event, power_context = await create_event(
|
||||
EventTypes.PowerLevels, pl_content, False
|
||||
EventTypes.PowerLevels, pl_content, True
|
||||
)
|
||||
current_state_group = power_context._state_group
|
||||
await send(power_event, power_context, creator)
|
||||
events_to_send.append((power_event, power_context))
|
||||
else:
|
||||
power_level_content: JsonDict = {
|
||||
"users": {creator_id: 100},
|
||||
@@ -1211,12 +1215,11 @@ class RoomCreationHandler:
|
||||
pl_event, pl_context = await create_event(
|
||||
EventTypes.PowerLevels,
|
||||
power_level_content,
|
||||
False,
|
||||
True,
|
||||
)
|
||||
current_state_group = pl_context._state_group
|
||||
await send(pl_event, pl_context, creator)
|
||||
events_to_send.append((pl_event, pl_context))
|
||||
|
||||
events_to_send = []
|
||||
if room_alias and (EventTypes.CanonicalAlias, "") not in initial_state:
|
||||
room_alias_event, room_alias_context = await create_event(
|
||||
EventTypes.CanonicalAlias, {"alias": room_alias.to_string()}, True
|
||||
@@ -1269,7 +1272,10 @@ class RoomCreationHandler:
|
||||
events_to_send.append((encryption_event, encryption_context))
|
||||
|
||||
last_event = await self.event_creation_handler.handle_new_client_event(
|
||||
creator, events_to_send, ignore_shadow_ban=True
|
||||
creator,
|
||||
events_to_send,
|
||||
ignore_shadow_ban=True,
|
||||
ratelimit=False,
|
||||
)
|
||||
assert last_event.internal_metadata.stream_ordering is not None
|
||||
return last_event.internal_metadata.stream_ordering, last_event.event_id, depth
|
||||
@@ -1445,7 +1451,7 @@ class RoomContextHandler:
|
||||
events_before=events_before,
|
||||
event=event,
|
||||
events_after=events_after,
|
||||
state=await filter_evts(state_events),
|
||||
state=state_events,
|
||||
aggregations=aggregations,
|
||||
start=await token.copy_and_replace(
|
||||
StreamKeyType.ROOM, results.start
|
||||
@@ -1491,7 +1497,12 @@ class TimestampLookupHandler:
|
||||
Raises:
|
||||
SynapseError if unable to find any event locally in the given direction
|
||||
"""
|
||||
|
||||
logger.debug(
|
||||
"get_event_for_timestamp(room_id=%s, timestamp=%s, direction=%s) Finding closest event...",
|
||||
room_id,
|
||||
timestamp,
|
||||
direction,
|
||||
)
|
||||
local_event_id = await self.store.get_event_id_for_timestamp(
|
||||
room_id, timestamp, direction
|
||||
)
|
||||
@@ -1543,85 +1554,54 @@ class TimestampLookupHandler:
|
||||
)
|
||||
)
|
||||
|
||||
# Loop through each homeserver candidate until we get a succesful response
|
||||
for domain in likely_domains:
|
||||
# We don't want to ask our own server for information we don't have
|
||||
if domain == self.server_name:
|
||||
continue
|
||||
remote_response = await self.federation_client.timestamp_to_event(
|
||||
destinations=likely_domains,
|
||||
room_id=room_id,
|
||||
timestamp=timestamp,
|
||||
direction=direction,
|
||||
)
|
||||
if remote_response is not None:
|
||||
logger.debug(
|
||||
"get_event_for_timestamp: remote_response=%s",
|
||||
remote_response,
|
||||
)
|
||||
|
||||
try:
|
||||
remote_response = await self.federation_client.timestamp_to_event(
|
||||
domain, room_id, timestamp, direction
|
||||
)
|
||||
logger.debug(
|
||||
"get_event_for_timestamp: response from domain(%s)=%s",
|
||||
domain,
|
||||
remote_response,
|
||||
remote_event_id = remote_response.event_id
|
||||
remote_origin_server_ts = remote_response.origin_server_ts
|
||||
|
||||
# Backfill this event so we can get a pagination token for
|
||||
# it with `/context` and paginate `/messages` from this
|
||||
# point.
|
||||
pulled_pdu_info = await self.federation_event_handler.backfill_event_id(
|
||||
likely_domains, room_id, remote_event_id
|
||||
)
|
||||
remote_event = pulled_pdu_info.pdu
|
||||
|
||||
# XXX: When we see that the remote server is not trustworthy,
|
||||
# maybe we should not ask them first in the future.
|
||||
if remote_origin_server_ts != remote_event.origin_server_ts:
|
||||
logger.info(
|
||||
"get_event_for_timestamp: Remote server (%s) claimed that remote_event_id=%s occured at remote_origin_server_ts=%s but that isn't true (actually occured at %s). Their claims are dubious and we should consider not trusting them.",
|
||||
pulled_pdu_info.pull_origin,
|
||||
remote_event_id,
|
||||
remote_origin_server_ts,
|
||||
remote_event.origin_server_ts,
|
||||
)
|
||||
|
||||
remote_event_id = remote_response.event_id
|
||||
remote_origin_server_ts = remote_response.origin_server_ts
|
||||
|
||||
# Backfill this event so we can get a pagination token for
|
||||
# it with `/context` and paginate `/messages` from this
|
||||
# point.
|
||||
#
|
||||
# TODO: The requested timestamp may lie in a part of the
|
||||
# event graph that the remote server *also* didn't have,
|
||||
# in which case they will have returned another event
|
||||
# which may be nowhere near the requested timestamp. In
|
||||
# the future, we may need to reconcile that gap and ask
|
||||
# other homeservers, and/or extend `/timestamp_to_event`
|
||||
# to return events on *both* sides of the timestamp to
|
||||
# help reconcile the gap faster.
|
||||
remote_event = (
|
||||
await self.federation_event_handler.backfill_event_id(
|
||||
domain, room_id, remote_event_id
|
||||
)
|
||||
)
|
||||
|
||||
# XXX: When we see that the remote server is not trustworthy,
|
||||
# maybe we should not ask them first in the future.
|
||||
if remote_origin_server_ts != remote_event.origin_server_ts:
|
||||
logger.info(
|
||||
"get_event_for_timestamp: Remote server (%s) claimed that remote_event_id=%s occured at remote_origin_server_ts=%s but that isn't true (actually occured at %s). Their claims are dubious and we should consider not trusting them.",
|
||||
domain,
|
||||
remote_event_id,
|
||||
remote_origin_server_ts,
|
||||
remote_event.origin_server_ts,
|
||||
)
|
||||
|
||||
# Only return the remote event if it's closer than the local event
|
||||
if not local_event or (
|
||||
abs(remote_event.origin_server_ts - timestamp)
|
||||
< abs(local_event.origin_server_ts - timestamp)
|
||||
):
|
||||
logger.info(
|
||||
"get_event_for_timestamp: returning remote_event_id=%s (%s) since it's closer to timestamp=%s than local_event=%s (%s)",
|
||||
remote_event_id,
|
||||
remote_event.origin_server_ts,
|
||||
timestamp,
|
||||
local_event.event_id if local_event else None,
|
||||
local_event.origin_server_ts if local_event else None,
|
||||
)
|
||||
return remote_event_id, remote_origin_server_ts
|
||||
except (HttpResponseException, InvalidResponseError) as ex:
|
||||
# Let's not put a high priority on some other homeserver
|
||||
# failing to respond or giving a random response
|
||||
logger.debug(
|
||||
"get_event_for_timestamp: Failed to fetch /timestamp_to_event from %s because of exception(%s) %s args=%s",
|
||||
domain,
|
||||
type(ex).__name__,
|
||||
ex,
|
||||
ex.args,
|
||||
)
|
||||
except Exception:
|
||||
# But we do want to see some exceptions in our code
|
||||
logger.warning(
|
||||
"get_event_for_timestamp: Failed to fetch /timestamp_to_event from %s because of exception",
|
||||
domain,
|
||||
exc_info=True,
|
||||
# Only return the remote event if it's closer than the local event
|
||||
if not local_event or (
|
||||
abs(remote_event.origin_server_ts - timestamp)
|
||||
< abs(local_event.origin_server_ts - timestamp)
|
||||
):
|
||||
logger.info(
|
||||
"get_event_for_timestamp: returning remote_event_id=%s (%s) since it's closer to timestamp=%s than local_event=%s (%s)",
|
||||
remote_event_id,
|
||||
remote_event.origin_server_ts,
|
||||
timestamp,
|
||||
local_event.event_id if local_event else None,
|
||||
local_event.origin_server_ts if local_event else None,
|
||||
)
|
||||
return remote_event_id, remote_origin_server_ts
|
||||
|
||||
# To appease mypy, we have to add both of these conditions to check for
|
||||
# `None`. We only expect `local_event` to be `None` when
|
||||
|
||||
@@ -441,7 +441,7 @@ class DefaultSamlMappingProvider:
|
||||
client_redirect_url: where the client wants to redirect to
|
||||
|
||||
Returns:
|
||||
dict: A dict containing new user attributes. Possible keys:
|
||||
A dict containing new user attributes. Possible keys:
|
||||
* mxid_localpart (str): Required. The localpart of the user's mxid
|
||||
* displayname (str): The displayname of the user
|
||||
* emails (list[str]): Any emails for the user
|
||||
@@ -483,7 +483,7 @@ class DefaultSamlMappingProvider:
|
||||
Args:
|
||||
config: A dictionary containing configuration options for this provider
|
||||
Returns:
|
||||
SamlConfig: A custom config object for this module
|
||||
A custom config object for this module
|
||||
"""
|
||||
# Parse config options and use defaults where necessary
|
||||
mxid_source_attribute = config.get("mxid_source_attribute", "uid")
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user