1
0

Compare commits

..

1 Commits

Author SHA1 Message Date
Olivier Wilkinson (reivilibre) dffbabbecc Newsfile
Signed-off-by: Olivier Wilkinson (reivilibre) <oliverw@matrix.org>
2023-06-15 17:16:13 +01:00
218 changed files with 6992 additions and 10621 deletions
+10 -6
View File
@@ -29,12 +29,11 @@ IS_PR = os.environ["GITHUB_REF"].startswith("refs/pull/")
# First calculate the various trial jobs.
#
# For PRs, we only run each type of test with the oldest Python version supported (which
# is Python 3.8 right now)
# For each type of test we only run on Py3.7 on PRs
trial_sqlite_tests = [
{
"python-version": "3.8",
"python-version": "3.7",
"database": "sqlite",
"extras": "all",
}
@@ -47,13 +46,13 @@ if not IS_PR:
"database": "sqlite",
"extras": "all",
}
for version in ("3.9", "3.10", "3.11")
for version in ("3.8", "3.9", "3.10", "3.11")
)
trial_postgres_tests = [
{
"python-version": "3.8",
"python-version": "3.7",
"database": "postgres",
"postgres-version": "11",
"extras": "all",
@@ -72,7 +71,7 @@ if not IS_PR:
trial_no_extra_tests = [
{
"python-version": "3.8",
"python-version": "3.7",
"database": "sqlite",
"extras": "",
}
@@ -134,6 +133,11 @@ if not IS_PR:
"sytest-tag": "testing",
"postgres": "postgres",
},
{
"sytest-tag": "buster",
"postgres": "multi-postgres",
"workers": "workers",
},
]
)
+1 -13
View File
@@ -29,16 +29,6 @@ jobs:
- name: Inspect builder
run: docker buildx inspect
- name: Checkout repository
uses: actions/checkout@v3
- name: Extract version from pyproject.toml
# Note: explicitly requesting bash will mean bash is invoked with `-eo pipefail`, see
# https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsshell
shell: bash
run: |
echo "SYNAPSE_VERSION=$(grep "^version" pyproject.toml | sed -E 's/version\s*=\s*["]([^"]*)["]/\1/')" >> $GITHUB_ENV
- name: Log in to DockerHub
uses: docker/login-action@v2
with:
@@ -71,9 +61,7 @@ jobs:
uses: docker/build-push-action@v4
with:
push: true
labels: |
gitsha1=${{ github.sha }}
org.opencontainers.image.version=${{ env.SYNAPSE_VERSION }}
labels: "gitsha1=${{ github.sha }}"
tags: "${{ steps.set-tag.outputs.tags }}"
file: "docker/Dockerfile"
platforms: linux/amd64,linux/arm64
+1 -1
View File
@@ -144,7 +144,7 @@ jobs:
- name: Only build a single wheel on PR
if: startsWith(github.ref, 'refs/pull/')
run: echo "CIBW_BUILD="cp38-manylinux_${{ matrix.arch }}"" >> $GITHUB_ENV
run: echo "CIBW_BUILD="cp37-manylinux_${{ matrix.arch }}"" >> $GITHUB_ENV
- name: Build wheels
run: python -m cibuildwheel --output-dir wheelhouse
+5 -5
View File
@@ -320,7 +320,7 @@ jobs:
- uses: actions/setup-python@v4
with:
python-version: '3.8'
python-version: '3.7'
- name: Prepare old deps
if: steps.cache-poetry-old-deps.outputs.cache-hit != 'true'
@@ -362,7 +362,7 @@ jobs:
runs-on: ubuntu-latest
strategy:
matrix:
python-version: ["pypy-3.8"]
python-version: ["pypy-3.7"]
extras: ["all"]
steps:
@@ -399,8 +399,8 @@ jobs:
env:
SYTEST_BRANCH: ${{ github.head_ref }}
POSTGRES: ${{ matrix.job.postgres && 1}}
MULTI_POSTGRES: ${{ (matrix.job.postgres == 'multi-postgres') || '' }}
ASYNCIO_REACTOR: ${{ (matrix.job.reactor == 'asyncio') || '' }}
MULTI_POSTGRES: ${{ (matrix.job.postgres == 'multi-postgres') && 1}}
ASYNCIO_REACTOR: ${{ (matrix.job.reactor == 'asyncio') && 1 }}
WORKERS: ${{ matrix.job.workers && 1 }}
BLACKLIST: ${{ matrix.job.workers && 'synapse-blacklist-with-workers' }}
TOP: ${{ github.workspace }}
@@ -477,7 +477,7 @@ jobs:
strategy:
matrix:
include:
- python-version: "3.8"
- python-version: "3.7"
postgres-version: "11"
- python-version: "3.11"
+1 -5
View File
@@ -96,11 +96,7 @@ jobs:
if: needs.check_repo.outputs.should_run_workflow == 'true'
runs-on: ubuntu-latest
container:
# We're using ubuntu:focal because it uses Python 3.8 which is our minimum supported Python version.
# This job is a canary to warn us about unreleased twisted changes that would cause problems for us if
# they were to be released immediately. For simplicity's sake (and to save CI runners) we use the oldest
# version, assuming that any incompatibilities on newer versions would also be present on the oldest.
image: matrixdotorg/sytest-synapse:focal
image: matrixdotorg/sytest-synapse:buster
volumes:
- ${{ github.workspace }}:/src
-1
View File
@@ -34,7 +34,6 @@ __pycache__/
/logs
/media_store/
/uploads
/homeserver-config-overrides.d
# For direnv users
/.envrc
+2764 -273
View File
File diff suppressed because it is too large Load Diff
Generated
+21 -33
View File
@@ -13,9 +13,9 @@ dependencies = [
[[package]]
name = "anyhow"
version = "1.0.72"
version = "1.0.71"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3b13c32d80ecc7ab747b80c3784bce54ee8a7a0cc4fbda9bf4cda2cf6fe90854"
checksum = "9c7d0618f0e0b7e8ff11427422b64564d5fb0be1940354bfe2e0529b18a9d9b8"
[[package]]
name = "arc-swap"
@@ -182,9 +182,9 @@ dependencies = [
[[package]]
name = "proc-macro2"
version = "1.0.64"
version = "1.0.52"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "78803b62cbf1f46fde80d7c0e803111524b9877184cfe7c3033659490ac7a7da"
checksum = "1d0e1ae9e836cc3beddd63db0df682593d7e2d3d891ae8c9083d2113e1744224"
dependencies = [
"unicode-ident",
]
@@ -229,9 +229,9 @@ dependencies = [
[[package]]
name = "pyo3-log"
version = "0.8.3"
version = "0.8.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f47b0777feb17f61eea78667d61103758b243a871edc09a7786500a50467b605"
checksum = "c94ff6535a6bae58d7d0b85e60d4c53f7f84d0d0aa35d6a28c3f3e70bfe51444"
dependencies = [
"arc-swap",
"log",
@@ -273,9 +273,9 @@ dependencies = [
[[package]]
name = "quote"
version = "1.0.29"
version = "1.0.26"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "573015e8ab27661678357f27dc26460738fd2b6c86e46f386fde94cb5d913105"
checksum = "4424af4bf778aae2051a77b60283332f386554255d722233d09fbfc7e30da2fc"
dependencies = [
"proc-macro2",
]
@@ -291,21 +291,9 @@ dependencies = [
[[package]]
name = "regex"
version = "1.9.3"
version = "1.8.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "81bc1d4caf89fac26a70747fe603c130093b53c773888797a6329091246d651a"
dependencies = [
"aho-corasick",
"memchr",
"regex-automata",
"regex-syntax",
]
[[package]]
name = "regex-automata"
version = "0.3.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fed1ceff11a1dddaee50c9dc8e4938bd106e9d89ae372f192311e7da498e3b69"
checksum = "d0ab3ca65655bb1e41f2a8c8cd662eb4fb035e67c3f78da1d61dffe89d07300f"
dependencies = [
"aho-corasick",
"memchr",
@@ -314,9 +302,9 @@ dependencies = [
[[package]]
name = "regex-syntax"
version = "0.7.4"
version = "0.7.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e5ea92a5b6195c6ef2a0295ea818b312502c6fc94dde986c5553242e18fd4ce2"
checksum = "436b050e76ed2903236f032a59761c1eb99e1b0aead2c257922771dab1fc8c78"
[[package]]
name = "ryu"
@@ -332,29 +320,29 @@ checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd"
[[package]]
name = "serde"
version = "1.0.183"
version = "1.0.164"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "32ac8da02677876d532745a130fc9d8e6edfa81a269b107c5b00829b91d8eb3c"
checksum = "9e8c8cf938e98f769bc164923b06dce91cea1751522f46f8466461af04c9027d"
dependencies = [
"serde_derive",
]
[[package]]
name = "serde_derive"
version = "1.0.183"
version = "1.0.164"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "aafe972d60b0b9bee71a91b92fee2d4fb3c9d7e8f6b179aa99f27203d99a4816"
checksum = "d9735b638ccc51c28bf6914d90a2e9725b377144fc612c49a611fddd1b631d68"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.28",
"syn 2.0.10",
]
[[package]]
name = "serde_json"
version = "1.0.104"
version = "1.0.96"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "076066c5f1078eac5b722a31827a8832fe108bed65dfa75e233c89f8206e976c"
checksum = "057d394a50403bcac12672b2b18fb387ab6d289d957dab67dd201875391e52f1"
dependencies = [
"itoa",
"ryu",
@@ -386,9 +374,9 @@ dependencies = [
[[package]]
name = "syn"
version = "2.0.28"
version = "2.0.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "04361975b3f5e348b2189d8dc55bc942f278b2d482a6a0365de5bdd62d351567"
checksum = "5aad1363ed6d37b84299588d62d3a7d95b5a5c2d9aad5c85609fda12afaa1f40"
dependencies = [
"proc-macro2",
"quote",
-1
View File
@@ -3,4 +3,3 @@
[workspace]
members = ["rust"]
resolver = "2"
+1
View File
@@ -0,0 +1 @@
Replace `EventContext` fields `prev_group` and `delta_ids` with field `state_group_deltas`.
+1
View File
@@ -0,0 +1 @@
Fix a long-standing bug where media files were served in an unsafe manner. Contributed by @joshqou.
+1
View File
@@ -0,0 +1 @@
Improve `/messages` response time by avoiding backfill when we already have messages to return.
+1
View File
@@ -0,0 +1 @@
Fix requesting multiple keys at once over federation, related to [MSC3983](https://github.com/matrix-org/matrix-spec-proposals/pull/3983).
+1
View File
@@ -0,0 +1 @@
Avoid invalidating a cache that was just prefilled.
+1
View File
@@ -0,0 +1 @@
Fix requesting multiple keys at once over federation, related to [MSC3983](https://github.com/matrix-org/matrix-spec-proposals/pull/3983).
+1
View File
@@ -0,0 +1 @@
Document `looping_call()` functionality that will wait for the given function to finish before scheduling another.
+1
View File
@@ -0,0 +1 @@
Fix joining rooms through aliases where the alias server isn't a real homeserver. Contributed by @tulir @ Beeper.
+1
View File
@@ -0,0 +1 @@
Nop.
+1 -1
View File
@@ -63,7 +63,7 @@
"uid": "${DS_PROMETHEUS}"
},
"enable": true,
"expr": "changes(process_start_time_seconds{instance=\"$instance\",job=~\"synapse\"}[$bucket_size]) * on (instance, job) group_left(version) synapse_build_info{instance=\"$instance\",job=\"synapse\"}",
"expr": "changes(process_start_time_seconds{instance=\"matrix.org\",job=~\"synapse\"}[$bucket_size]) * on (instance, job) group_left(version) synapse_build_info{instance=\"matrix.org\",job=\"synapse\"}",
"iconColor": "purple",
"name": "deploys",
"titleFormat": "Deployed {{version}}"
+1 -1
View File
@@ -29,7 +29,7 @@
"level": "error"
},
{
"line": "my-matrix-server-federation-sender-1 | 2023-01-25 20:56:20,995 - synapse.http.matrixfederationclient - 709 - WARNING - federation_transaction_transmission_loop-3 - {PUT-O-3} [example.com] Request failed: PUT matrix-federation://example.com/_matrix/federation/v1/send/1674680155797: HttpResponseException('403: Forbidden')",
"line": "my-matrix-server-federation-sender-1 | 2023-01-25 20:56:20,995 - synapse.http.matrixfederationclient - 709 - WARNING - federation_transaction_transmission_loop-3 - {PUT-O-3} [example.com] Request failed: PUT matrix://example.com/_matrix/federation/v1/send/1674680155797: HttpResponseException('403: Forbidden')",
"level": "warning"
},
{
-54
View File
@@ -1,57 +1,3 @@
matrix-synapse-py3 (1.90.0) stable; urgency=medium
* New Synapse release 1.90.0.
-- Synapse Packaging team <packages@matrix.org> Tue, 15 Aug 2023 11:17:34 +0100
matrix-synapse-py3 (1.90.0~rc1) stable; urgency=medium
* New Synapse release 1.90.0rc1.
-- Synapse Packaging team <packages@matrix.org> Tue, 08 Aug 2023 15:29:34 +0100
matrix-synapse-py3 (1.89.0) stable; urgency=medium
* New Synapse release 1.89.0.
-- Synapse Packaging team <packages@matrix.org> Tue, 01 Aug 2023 11:07:15 +0100
matrix-synapse-py3 (1.89.0~rc1) stable; urgency=medium
* New Synapse release 1.89.0rc1.
-- Synapse Packaging team <packages@matrix.org> Tue, 25 Jul 2023 14:31:07 +0200
matrix-synapse-py3 (1.88.0) stable; urgency=medium
* New Synapse release 1.88.0.
-- Synapse Packaging team <packages@matrix.org> Tue, 18 Jul 2023 13:59:28 +0100
matrix-synapse-py3 (1.88.0~rc1) stable; urgency=medium
* New Synapse release 1.88.0rc1.
-- Synapse Packaging team <packages@matrix.org> Tue, 11 Jul 2023 10:20:19 +0100
matrix-synapse-py3 (1.87.0) stable; urgency=medium
* New Synapse release 1.87.0.
-- Synapse Packaging team <packages@matrix.org> Tue, 04 Jul 2023 16:24:00 +0100
matrix-synapse-py3 (1.87.0~rc1) stable; urgency=medium
* New synapse release 1.87.0rc1.
-- Synapse Packaging team <packages@matrix.org> Tue, 27 Jun 2023 15:27:04 +0000
matrix-synapse-py3 (1.86.0) stable; urgency=medium
* New Synapse release 1.86.0.
-- Synapse Packaging team <packages@matrix.org> Tue, 20 Jun 2023 17:22:46 +0200
matrix-synapse-py3 (1.86.0~rc2) stable; urgency=medium
* New Synapse release 1.86.0rc2.
+27 -23
View File
@@ -28,12 +28,12 @@ FROM docker.io/library/${distro} as builder
RUN apt-get update -qq -o Acquire::Languages=none
RUN env DEBIAN_FRONTEND=noninteractive apt-get install \
-yqq --no-install-recommends \
build-essential \
ca-certificates \
devscripts \
equivs \
wget
-yqq --no-install-recommends \
build-essential \
ca-certificates \
devscripts \
equivs \
wget
# fetch and unpack the package
# We are temporarily using a fork of dh-virtualenv due to an incompatibility with Python 3.11, which ships with
@@ -62,29 +62,33 @@ FROM docker.io/library/${distro}
ARG distro=""
ENV distro ${distro}
# Python < 3.7 assumes LANG="C" means ASCII-only and throws on printing unicode
# http://bugs.python.org/issue19846
ENV LANG C.UTF-8
# Install the build dependencies
#
# NB: keep this list in sync with the list of build-deps in debian/control
# TODO: it would be nice to do that automatically.
RUN apt-get update -qq -o Acquire::Languages=none \
&& env DEBIAN_FRONTEND=noninteractive apt-get install \
-yqq --no-install-recommends -o Dpkg::Options::=--force-unsafe-io \
build-essential \
curl \
debhelper \
devscripts \
libsystemd-dev \
lsb-release \
pkg-config \
python3-dev \
python3-pip \
python3-setuptools \
python3-venv \
sqlite3 \
libpq-dev \
libicu-dev \
pkg-config \
xmlsec1
-yqq --no-install-recommends -o Dpkg::Options::=--force-unsafe-io \
build-essential \
curl \
debhelper \
devscripts \
libsystemd-dev \
lsb-release \
pkg-config \
python3-dev \
python3-pip \
python3-setuptools \
python3-venv \
sqlite3 \
libpq-dev \
libicu-dev \
pkg-config \
xmlsec1
# Install rust and ensure it's in the PATH
ENV RUSTUP_HOME=/rust
@@ -92,6 +92,8 @@ allow_device_name_lookup_over_federation: true
## Experimental Features ##
experimental_features:
# Enable history backfilling support
msc2716_enabled: true
# client-side support for partial state in /send_join responses
faster_joins: true
# Enable support for polls
-4
View File
@@ -35,11 +35,7 @@ server {
# Send all other traffic to the main process
location ~* ^(\\/_matrix|\\/_synapse) {
{% if using_unix_sockets %}
proxy_pass http://unix:/run/main_public.sock;
{% else %}
proxy_pass http://localhost:8080;
{% endif %}
proxy_set_header X-Forwarded-For $remote_addr;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header Host $host;
-3
View File
@@ -6,9 +6,6 @@
{% if enable_redis %}
redis:
enabled: true
{% if using_unix_sockets %}
path: /tmp/redis.sock
{% endif %}
{% endif %}
{% if appservice_registrations is not none %}
-4
View File
@@ -19,11 +19,7 @@ username=www-data
autorestart=true
[program:redis]
{% if using_unix_sockets %}
command=/usr/local/bin/prefix-log /usr/local/bin/redis-server --unixsocket /tmp/redis.sock
{% else %}
command=/usr/local/bin/prefix-log /usr/local/bin/redis-server
{% endif %}
priority=1
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
-4
View File
@@ -8,11 +8,7 @@ worker_name: "{{ name }}"
worker_listeners:
- type: http
{% if using_unix_sockets %}
path: "/run/worker.{{ port }}"
{% else %}
port: {{ port }}
{% endif %}
{% if listener_resources %}
resources:
- names:
+1 -9
View File
@@ -36,17 +36,12 @@ listeners:
# Allow configuring in case we want to reverse proxy 8008
# using another process in the same container
{% if SYNAPSE_USE_UNIX_SOCKET %}
# Unix sockets don't care about TLS or IP addresses or ports
- path: '/run/main_public.sock'
type: http
{% else %}
- port: {{ SYNAPSE_HTTP_PORT or 8008 }}
tls: false
bind_addresses: ['::']
type: http
x_forwarded: false
{% endif %}
resources:
- names: [client]
compress: true
@@ -62,11 +57,8 @@ database:
user: "{{ POSTGRES_USER or "synapse" }}"
password: "{{ POSTGRES_PASSWORD }}"
database: "{{ POSTGRES_DB or "synapse" }}"
{% if not SYNAPSE_USE_UNIX_SOCKET %}
{# Synapse will use a default unix socket for Postgres when host/port is not specified (behavior from `psycopg2`). #}
host: "{{ POSTGRES_HOST or "db" }}"
port: "{{ POSTGRES_PORT or "5432" }}"
{% endif %}
cp_min: 5
cp_max: 10
{% else %}
+27 -78
View File
@@ -74,9 +74,6 @@ MAIN_PROCESS_HTTP_LISTENER_PORT = 8080
MAIN_PROCESS_INSTANCE_NAME = "main"
MAIN_PROCESS_LOCALHOST_ADDRESS = "127.0.0.1"
MAIN_PROCESS_REPLICATION_PORT = 9093
# Obviously, these would only be used with the UNIX socket option
MAIN_PROCESS_UNIX_SOCKET_PUBLIC_PATH = "/run/main_public.sock"
MAIN_PROCESS_UNIX_SOCKET_PRIVATE_PATH = "/run/main_private.sock"
# A simple name used as a placeholder in the WORKERS_CONFIG below. This will be replaced
# during processing with the name of the worker.
@@ -247,6 +244,7 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = {
"^/_matrix/client/(api/v1|r0|v3|unstable)/join/",
"^/_matrix/client/(api/v1|r0|v3|unstable)/knock/",
"^/_matrix/client/(api/v1|r0|v3|unstable)/profile/",
"^/_matrix/client/(v1|unstable/org.matrix.msc2716)/rooms/.*/batch_send",
],
"shared_extra_conf": {},
"worker_extra_conf": "",
@@ -410,15 +408,11 @@ def add_worker_roles_to_shared_config(
)
# Map of stream writer instance names to host/ports combos
if os.environ.get("SYNAPSE_USE_UNIX_SOCKET", False):
instance_map[worker_name] = {
"path": f"/run/worker.{worker_port}",
}
else:
instance_map[worker_name] = {
"host": "localhost",
"port": worker_port,
}
instance_map[worker_name] = {
"host": "localhost",
"port": worker_port,
}
# Update the list of stream writers. It's convenient that the name of the worker
# type is the same as the stream to write. Iterate over the whole list in case there
# is more than one.
@@ -430,15 +424,10 @@ def add_worker_roles_to_shared_config(
# Map of stream writer instance names to host/ports combos
# For now, all stream writers need http replication ports
if os.environ.get("SYNAPSE_USE_UNIX_SOCKET", False):
instance_map[worker_name] = {
"path": f"/run/worker.{worker_port}",
}
else:
instance_map[worker_name] = {
"host": "localhost",
"port": worker_port,
}
instance_map[worker_name] = {
"host": "localhost",
"port": worker_port,
}
def merge_worker_template_configs(
@@ -730,29 +719,17 @@ def generate_worker_files(
# Note that yaml cares about indentation, so care should be taken to insert lines
# into files at the correct indentation below.
# Convenience helper for if using unix sockets instead of host:port
using_unix_sockets = environ.get("SYNAPSE_USE_UNIX_SOCKET", False)
# First read the original config file and extract the listeners block. Then we'll
# add another listener for replication. Later we'll write out the result to the
# shared config file.
listeners: List[Any]
if using_unix_sockets:
listeners = [
{
"path": MAIN_PROCESS_UNIX_SOCKET_PRIVATE_PATH,
"type": "http",
"resources": [{"names": ["replication"]}],
}
]
else:
listeners = [
{
"port": MAIN_PROCESS_REPLICATION_PORT,
"bind_address": MAIN_PROCESS_LOCALHOST_ADDRESS,
"type": "http",
"resources": [{"names": ["replication"]}],
}
]
listeners = [
{
"port": MAIN_PROCESS_REPLICATION_PORT,
"bind_address": MAIN_PROCESS_LOCALHOST_ADDRESS,
"type": "http",
"resources": [{"names": ["replication"]}],
}
]
with open(config_path) as file_stream:
original_config = yaml.safe_load(file_stream)
original_listeners = original_config.get("listeners")
@@ -793,17 +770,7 @@ def generate_worker_files(
# A list of internal endpoints to healthcheck, starting with the main process
# which exists even if no workers do.
# This list ends up being part of the command line to curl, (curl added support for
# Unix sockets in version 7.40).
if using_unix_sockets:
healthcheck_urls = [
f"--unix-socket {MAIN_PROCESS_UNIX_SOCKET_PUBLIC_PATH} "
# The scheme and hostname from the following URL are ignored.
# The only thing that matters is the path `/health`
"http://localhost/health"
]
else:
healthcheck_urls = ["http://localhost:8080/health"]
healthcheck_urls = ["http://localhost:8080/health"]
# Get the set of all worker types that we have configured
all_worker_types_in_use = set(chain(*requested_worker_types.values()))
@@ -840,12 +807,8 @@ def generate_worker_files(
# given worker_type needs to stay assigned and not be replaced.
worker_config["shared_extra_conf"].update(shared_config)
shared_config = worker_config["shared_extra_conf"]
if using_unix_sockets:
healthcheck_urls.append(
f"--unix-socket /run/worker.{worker_port} http://localhost/health"
)
else:
healthcheck_urls.append("http://localhost:%d/health" % (worker_port,))
healthcheck_urls.append("http://localhost:%d/health" % (worker_port,))
# Update the shared config with sharding-related options if necessary
add_worker_roles_to_shared_config(
@@ -864,7 +827,6 @@ def generate_worker_files(
"/conf/workers/{name}.yaml".format(name=worker_name),
**worker_config,
worker_log_config_filepath=log_config_filepath,
using_unix_sockets=using_unix_sockets,
)
# Save this worker's port number to the correct nginx upstreams
@@ -885,13 +847,8 @@ def generate_worker_files(
nginx_upstream_config = ""
for upstream_worker_base_name, upstream_worker_ports in nginx_upstreams.items():
body = ""
if using_unix_sockets:
for port in upstream_worker_ports:
body += f" server unix:/run/worker.{port};\n"
else:
for port in upstream_worker_ports:
body += f" server localhost:{port};\n"
for port in upstream_worker_ports:
body += f" server localhost:{port};\n"
# Add to the list of configured upstreams
nginx_upstream_config += NGINX_UPSTREAM_CONFIG_BLOCK.format(
@@ -921,15 +878,10 @@ def generate_worker_files(
# If there are workers, add the main process to the instance_map too.
if workers_in_use:
instance_map = shared_config.setdefault("instance_map", {})
if using_unix_sockets:
instance_map[MAIN_PROCESS_INSTANCE_NAME] = {
"path": MAIN_PROCESS_UNIX_SOCKET_PRIVATE_PATH,
}
else:
instance_map[MAIN_PROCESS_INSTANCE_NAME] = {
"host": MAIN_PROCESS_LOCALHOST_ADDRESS,
"port": MAIN_PROCESS_REPLICATION_PORT,
}
instance_map[MAIN_PROCESS_INSTANCE_NAME] = {
"host": MAIN_PROCESS_LOCALHOST_ADDRESS,
"port": MAIN_PROCESS_REPLICATION_PORT,
}
# Shared homeserver config
convert(
@@ -939,7 +891,6 @@ def generate_worker_files(
appservice_registrations=appservice_registrations,
enable_redis=workers_in_use,
workers_in_use=workers_in_use,
using_unix_sockets=using_unix_sockets,
)
# Nginx config
@@ -950,7 +901,6 @@ def generate_worker_files(
upstream_directives=nginx_upstream_config,
tls_cert_path=os.environ.get("SYNAPSE_TLS_CERT"),
tls_key_path=os.environ.get("SYNAPSE_TLS_KEY"),
using_unix_sockets=using_unix_sockets,
)
# Supervisord config
@@ -960,7 +910,6 @@ def generate_worker_files(
"/etc/supervisor/supervisord.conf",
main_config_path=config_path,
enable_redis=workers_in_use,
using_unix_sockets=using_unix_sockets,
)
convert(
-1
View File
@@ -97,7 +97,6 @@
- [Cancellation](development/synapse_architecture/cancellation.md)
- [Log Contexts](log_contexts.md)
- [Replication](replication.md)
- [Streams](development/synapse_architecture/streams.md)
- [TCP Replication](tcp_replication.md)
- [Faster remote joins](development/synapse_architecture/faster_joins.md)
- [Internal Documentation](development/internal_documentation/README.md)
+1 -1
View File
@@ -419,7 +419,7 @@ The following query parameters are available:
* `from` (required) - The token to start returning events from. This token can be obtained from a prev_batch
or next_batch token returned by the /sync endpoint, or from an end token returned by a previous request to this endpoint.
* `to` - The token to stop returning events at.
* `to` - The token to spot returning events at.
* `limit` - The maximum number of events to return. Defaults to `10`.
* `filter` - A JSON RoomEventFilter to filter returned events with.
* `dir` - The direction to return events from. Either `f` for forwards or `b` for backwards. Setting
+4 -8
View File
@@ -242,9 +242,6 @@ The following parameters should be set in the URL:
- `dir` - Direction of media order. Either `f` for forwards or `b` for backwards.
Setting this value to `b` will reverse the above sort order. Defaults to `f`.
- `not_user_type` - Exclude certain user types, such as bot users, from the request.
Can be provided multiple times. Possible values are `bot`, `support` or "empty string".
"empty string" here means to exclude users without a type.
Caution. The database only has indexes on the columns `name` and `creation_ts`.
This means that if a different sort order is used (`is_guest`, `admin`,
@@ -732,8 +729,7 @@ POST /_synapse/admin/v1/users/<user_id>/login
An optional `valid_until_ms` field can be specified in the request body as an
integer timestamp that specifies when the token should expire. By default tokens
do not expire. Note that this API does not allow a user to login as themselves
(to create more tokens).
do not expire.
A response body like the following is returned:
@@ -1184,7 +1180,7 @@ The following parameters should be set in the URL:
- `user_id` - The fully qualified MXID: for example, `@user:server.com`. The user must
be local.
## Check username availability
### Check username availability
Checks to see if a username is available, and valid, for the server. See [the client-server
API](https://matrix.org/docs/spec/client_server/r0.6.0#get-matrix-client-r0-register-available)
@@ -1202,7 +1198,7 @@ GET /_synapse/admin/v1/username_available?username=$localpart
The request and response format is the same as the
[/_matrix/client/r0/register/available](https://matrix.org/docs/spec/client_server/r0.6.0#get-matrix-client-r0-register-available) API.
## Find a user based on their ID in an auth provider
### Find a user based on their ID in an auth provider
The API is:
@@ -1241,7 +1237,7 @@ Returns a `404` HTTP status code if no user was found, with a response body like
_Added in Synapse 1.68.0._
## Find a user based on their Third Party ID (ThreePID or 3PID)
### Find a user based on their Third Party ID (ThreePID or 3PID)
The API is:
File diff suppressed because it is too large Load Diff
+1 -1
View File
@@ -23,7 +23,7 @@ people building from source should ensure they can fetch recent versions of Rust
(e.g. by using [rustup](https://rustup.rs/)).
The oldest supported version of SQLite is the version
[provided](https://packages.debian.org/bullseye/libsqlite3-0) by
[provided](https://packages.debian.org/buster/libsqlite3-0) by
[Debian oldstable](https://wiki.debian.org/DebianOldStable).
Context
+1 -2
View File
@@ -322,7 +322,7 @@ The following command will let you run the integration test with the most common
configuration:
```sh
$ docker run --rm -it -v /path/where/you/have/cloned/the/repository\:/src:ro -v /path/to/where/you/want/logs\:/logs matrixdotorg/sytest-synapse:focal
$ docker run --rm -it -v /path/where/you/have/cloned/the/repository\:/src:ro -v /path/to/where/you/want/logs\:/logs matrixdotorg/sytest-synapse:buster
```
(Note that the paths must be full paths! You could also write `$(realpath relative/path)` if needed.)
@@ -370,7 +370,6 @@ The above will run a monolithic (single-process) Synapse with SQLite as the data
See the [worker documentation](../workers.md) for additional information on workers.
- Passing `ASYNCIO_REACTOR=1` as an environment variable to use the Twisted asyncio reactor instead of the default one.
- Passing `PODMAN=1` will use the [podman](https://podman.io/) container runtime, instead of docker.
- Passing `UNIX_SOCKETS=1` will utilise Unix socket functionality for Synapse, Redis, and Postgres(when applicable).
To increase the log level for the tests, set `SYNAPSE_TEST_LOG_LEVEL`, e.g:
```sh
@@ -6,7 +6,7 @@ This is a work-in-progress set of notes with two goals:
See also [MSC3902](https://github.com/matrix-org/matrix-spec-proposals/pull/3902).
The key idea is described by [MSC3706](https://github.com/matrix-org/matrix-spec-proposals/pull/3706). This allows servers to
The key idea is described by [MSC706](https://github.com/matrix-org/matrix-spec-proposals/pull/3902). This allows servers to
request a lightweight response to the federation `/send_join` endpoint.
This is called a **faster join**, also known as a **partial join**. In these
notes we'll usually use the word "partial" as it matches the database schema.
@@ -1,157 +0,0 @@
## Streams
Synapse has a concept of "streams", which are roughly described in [`id_generators.py`](
https://github.com/matrix-org/synapse/blob/develop/synapse/storage/util/id_generators.py
).
Generally speaking, streams are a series of notifications that something in Synapse's database has changed that the application might need to respond to.
For example:
- The events stream reports new events (PDUs) that Synapse creates, or that Synapse accepts from another homeserver.
- The account data stream reports changes to users' [account data](https://spec.matrix.org/v1.7/client-server-api/#client-config).
- The to-device stream reports when a device has a new [to-device message](https://spec.matrix.org/v1.7/client-server-api/#send-to-device-messaging).
See [`synapse.replication.tcp.streams`](
https://github.com/matrix-org/synapse/blob/develop/synapse/replication/tcp/streams/__init__.py
) for the full list of streams.
It is very helpful to understand the streams mechanism when working on any part of Synapse that needs to respond to changes—especially if those changes are made by different workers.
To that end, let's describe streams formally, paraphrasing from the docstring of [`AbstractStreamIdGenerator`](
https://github.com/matrix-org/synapse/blob/a719b703d9bd0dade2565ddcad0e2f3a7a9d4c37/synapse/storage/util/id_generators.py#L96
).
### Definition
A stream is an append-only log `T1, T2, ..., Tn, ...` of facts[^1] which grows over time.
Only "writers" can add facts to a stream, and there may be multiple writers.
Each fact has an ID, called its "stream ID".
Readers should only process facts in ascending stream ID order.
Roughly speaking, each stream is backed by a database table.
It should have a `stream_id` (or similar) bigint column holding stream IDs, plus additional columns as necessary to describe the fact.
Typically, a fact is expressed with a single row in its backing table.[^2]
Within a stream, no two facts may have the same stream_id.
> _Aside_. Some additional notes on streams' backing tables.
>
> 1. Rich would like to [ditch the backing tables](https://github.com/matrix-org/synapse/issues/13456).
> 2. The backing tables may have other uses.
> For example, the events table serves backs the events stream, and is read when processing new events.
> But old rows are read from the table all the time, whenever Synapse needs to lookup some facts about an event.
> 3. Rich suspects that sometimes the stream is backed by multiple tables, so the stream proper is the union of those tables.
Stream writers can "reserve" a stream ID, and then later mark it as having being completed.
Stream writers need to track the completion of each stream fact.
In the happy case, completion means a fact has been written to the stream table.
But unhappy cases (e.g. transaction rollback due to an error) also count as completion.
Once completed, the rows written with that stream ID are fixed, and no new rows
will be inserted with that ID.
### Current stream ID
For any given stream reader (including writers themselves), we may define a per-writer current stream ID:
> The current stream ID _for a writer W_ is the largest stream ID such that
> all transactions added by W with equal or smaller ID have completed.
Similarly, there is a "linear" notion of current stream ID:
> The "linear" current stream ID is the largest stream ID such that
> all facts (added by any writer) with equal or smaller ID have completed.
Because different stream readers A and B learn about new facts at different times, A and B may disagree about current stream IDs.
Put differently: we should think of stream readers as being independent of each other, proceeding through a stream of facts at different rates.
**NB.** For both senses of "current", that if a writer opens a transaction that never completes, the current stream ID will never advance beyond that writer's last written stream ID.
For single-writer streams, the per-writer current ID and the linear current ID are the same.
Both senses of current ID are monotonic, but they may "skip" or jump over IDs because facts complete out of order.
_Example_.
Consider a single-writer stream which is initially at ID 1.
| Action | Current stream ID | Notes |
|------------|-------------------|-------------------------------------------------|
| | 1 | |
| Reserve 2 | 1 | |
| Reserve 3 | 1 | |
| Complete 3 | 1 | current ID unchanged, waiting for 2 to complete |
| Complete 2 | 3 | current ID jumps from 1 -> 3 |
| Reserve 4 | 3 | |
| Reserve 5 | 3 | |
| Reserve 6 | 3 | |
| Complete 5 | 3 | |
| Complete 4 | 5 | current ID jumps 3->5, even though 6 is pending |
| Complete 6 | 6 | |
### Multi-writer streams
There are two ways to view a multi-writer stream.
1. Treat it as a collection of distinct single-writer streams, one
for each writer.
2. Treat it as a single stream.
The single stream (option 2) is conceptually simpler, and easier to represent (a single stream id).
However, it requires each reader to know about the entire set of writers, to ensures that readers don't erroneously advance their current stream position too early and miss a fact from an unknown writer.
In contrast, multiple parallel streams (option 1) are more complex, requiring more state to represent (map from writer to stream id).
The payoff for doing so is that readers can "peek" ahead to facts that completed on one writer no matter the state of the others, reducing latency.
Note that a multi-writer stream can be viewed in both ways.
For example, the events stream is treated as multiple single-writer streams (option 1) by the sync handler, so that events are sent to clients as soon as possible.
But the background process that works through events treats them as a single linear stream.
Another useful example is the cache invalidation stream.
The facts this stream holds are instructions to "you should now invalidate these cache entries".
We only ever treat this as a multiple single-writer streams as there is no important ordering between cache invalidations.
(Invalidations are self-contained facts; and the invalidations commute/are idempotent).
### Writing to streams
Writers need to track:
- track their current position (i.e. its own per-writer stream ID).
- their facts currently awaiting completion.
At startup,
- the current position of that writer can be found by querying the database (which suggests that facts need to be written to the database atomically, in a transaction); and
- there are no facts awaiting completion.
To reserve a stream ID, call [`nextval`](https://www.postgresql.org/docs/current/functions-sequence.html) on the appropriate postgres sequence.
To write a fact to the stream: insert the appropriate rows to the appropriate backing table.
To complete a fact, first remove it from your map of facts currently awaiting completion.
Then, if no earlier fact is awaiting completion, the writer can advance its current position in that stream.
Upon doing so it should emit an `RDATA` message[^3], once for every fact between the old and the new stream ID.
### Subscribing to streams
Readers need to track the current position of every writer.
At startup, they can find this by contacting each writer with a `REPLICATE` message,
requesting that all writers reply describing their current position in their streams.
Writers reply with a `POSITION` message.
To learn about new facts, readers should listen for `RDATA` messages and process them to respond to the new fact.
The `RDATA` itself is not a self-contained representation of the fact;
readers will have to query the stream tables for the full details.
Readers must also advance their record of the writer's current position for that stream.
# Summary
In a nutshell: we have an append-only log with a "buffer/scratchpad" at the end where we have to wait for the sequence to be linear and contiguous.
---
[^1]: we use the word _fact_ here for two reasons.
Firstly, the word "event" is already heavily overloaded (PDUs, EDUs, account data, ...) and we don't need to make that worse.
Secondly, "fact" emphasises that the things we append to a stream cannot change after the fact.
[^2]: A fact might be expressed with 0 rows, e.g. if we opened a transaction to persist an event, but failed and rolled the transaction back before marking the fact as completed.
In principle a fact might be expressed with 2 or more rows; if so, each of those rows should share the fact's stream ID.
[^3]: This communication used to happen directly with the writers [over TCP](../../tcp_replication.md);
nowadays it's done via Redis's Pubsub.
-36
View File
@@ -348,42 +348,6 @@ callback returns `False`, Synapse falls through to the next one. The value of th
callback that does not return `False` will be used. If this happens, Synapse will not call
any of the subsequent implementations of this callback.
### `check_login_for_spam`
_First introduced in Synapse v1.87.0_
```python
async def check_login_for_spam(
user_id: str,
device_id: Optional[str],
initial_display_name: Optional[str],
request_info: Collection[Tuple[Optional[str], str]],
auth_provider_id: Optional[str] = None,
) -> Union["synapse.module_api.NOT_SPAM", "synapse.module_api.errors.Codes"]
```
Called when a user logs in.
The arguments passed to this callback are:
* `user_id`: The user ID the user is logging in with
* `device_id`: The device ID the user is re-logging into.
* `initial_display_name`: The device display name, if any.
* `request_info`: A collection of tuples, which first item is a user agent, and which
second item is an IP address. These user agents and IP addresses are the ones that were
used during the login process.
* `auth_provider_id`: The identifier of the SSO authentication provider, if any.
If multiple modules implement this callback, they will be considered in order. If a
callback returns `synapse.module_api.NOT_SPAM`, Synapse falls through to the next one.
The value of the first callback that does not return `synapse.module_api.NOT_SPAM` will
be used. If this happens, Synapse will not call any of the subsequent implementations of
this callback.
*Note:* This will not be called when a user registers.
## Example
The example below is a module that implements the spam checker callback
+1 -1
View File
@@ -95,7 +95,7 @@ matrix.example.com {
}
example.com:8448 {
reverse_proxy /_matrix/* localhost:8008
reverse_proxy localhost:8008
}
```
+3 -3
View File
@@ -135,8 +135,8 @@ Unofficial package are built for SLES 15 in the openSUSE:Backports:SLE-15 reposi
#### ArchLinux
The quickest way to get up and running with ArchLinux is probably with the package provided by ArchLinux
<https://archlinux.org/packages/extra/x86_64/matrix-synapse/>, which should pull in most of
The quickest way to get up and running with ArchLinux is probably with the community package
<https://archlinux.org/packages/community/x86_64/matrix-synapse/>, which should pull in most of
the necessary dependencies.
pip may be outdated (6.0.7-1 and needs to be upgraded to 6.0.8-1 ):
@@ -200,7 +200,7 @@ When following this route please make sure that the [Platform-specific prerequis
System requirements:
- POSIX-compliant system (tested on Linux & OS X)
- Python 3.8 or later, up to Python 3.11.
- Python 3.7 or later, up to Python 3.11.
- At least 1GB of free RAM if you want to join large public rooms like #matrix:matrix.org
If building on an uncommon architecture for which pre-built wheels are
@@ -1,4 +1,8 @@
worker_app: synapse.app.generic_worker
worker_name: background_worker
# The replication listener on the main synapse process.
worker_replication_host: 127.0.0.1
worker_replication_http_port: 9093
worker_log_config: /etc/matrix-synapse/background-worker-log.yaml
@@ -1,5 +1,9 @@
worker_app: synapse.app.generic_worker
worker_name: event_persister1
worker_name: event_persister1
# The replication listener on the main synapse process.
worker_replication_host: 127.0.0.1
worker_replication_http_port: 9093
worker_listeners:
- type: http
@@ -1,4 +1,8 @@
worker_app: synapse.app.federation_sender
worker_name: federation_sender1
# The replication listener on the main synapse process.
worker_replication_host: 127.0.0.1
worker_replication_http_port: 9093
worker_log_config: /etc/matrix-synapse/federation-sender-log.yaml
@@ -1,6 +1,10 @@
worker_app: synapse.app.media_repository
worker_name: media_worker
# The replication listener on the main synapse process.
worker_replication_host: 127.0.0.1
worker_replication_http_port: 9093
worker_listeners:
- type: http
port: 8085
@@ -1,4 +1,8 @@
worker_app: synapse.app.pusher
worker_name: pusher_worker1
# The replication listener on the main synapse process.
worker_replication_host: 127.0.0.1
worker_replication_http_port: 9093
worker_log_config: /etc/matrix-synapse/pusher-worker-log.yaml
-51
View File
@@ -87,57 +87,6 @@ process, for example:
wget https://packages.matrix.org/debian/pool/main/m/matrix-synapse-py3/matrix-synapse-py3_1.3.0+stretch1_amd64.deb
dpkg -i matrix-synapse-py3_1.3.0+stretch1_amd64.deb
```
# Upgrading to v1.90.0
## App service query parameter authorization is now a configuration option
Synapse v1.81.0 deprecated application service authorization via query parameters as this is
considered insecure - and from Synapse v1.71.0 forwards the application service token has also been sent via
[the `Authorization` header](https://spec.matrix.org/v1.6/application-service-api/#authorization)], making the insecure
query parameter authorization redundant. Since removing the ability to continue to use query parameters could break
backwards compatibility it has now been put behind a configuration option, `use_appservice_legacy_authorization`.
This option defaults to false, but can be activated by adding
```yaml
use_appservice_legacy_authorization: true
```
to your configuration.
# Upgrading to v1.89.0
## Removal of unspecced `user` property for `/register`
Application services can no longer call `/register` with a `user` property to create new users.
The standard `username` property should be used instead. See the
[Application Service specification](https://spec.matrix.org/v1.7/application-service-api/#server-admin-style-permissions)
for more information.
# Upgrading to v1.88.0
## Minimum supported Python version
The minimum supported Python version has been increased from v3.7 to v3.8.
You will need Python 3.8 to run Synapse v1.88.0 (due out July 18th, 2023).
If you use current versions of the Matrix.org-distributed Debian
packages or Docker images, no action is required.
## Removal of `worker_replication_*` settings
As mentioned previously in [Upgrading to v1.84.0](#upgrading-to-v1840), the following deprecated settings
are being removed in this release of Synapse:
* [`worker_replication_host`](https://matrix-org.github.io/synapse/v1.86/usage/configuration/config_documentation.html#worker_replication_host)
* [`worker_replication_http_port`](https://matrix-org.github.io/synapse/v1.86/usage/configuration/config_documentation.html#worker_replication_http_port)
* [`worker_replication_http_tls`](https://matrix-org.github.io/synapse/v1.86/usage/configuration/config_documentation.html#worker_replication_http_tls)
Please ensure that you have migrated to using `main` on your shared configuration's `instance_map`
(or create one if necessary). This is required if you have ***any*** workers at all;
administrators of single-process (monolith) installations don't need to do anything.
For an illustrative example, please see [Upgrading to v1.84.0](#upgrading-to-v1840) below.
# Upgrading to v1.86.0
## Minimum supported Rust version
+53 -128
View File
@@ -462,20 +462,6 @@ See the docs [request log format](../administration/request_log.md).
* `additional_resources`: Only valid for an 'http' listener. A map of
additional endpoints which should be loaded via dynamic modules.
Unix socket support (_Added in Synapse 1.89.0_):
* `path`: A path and filename for a Unix socket. Make sure it is located in a
directory with read and write permissions, and that it already exists (the directory
will not be created). Defaults to `None`.
* **Note**: The use of both `path` and `port` options for the same `listener` is not
compatible.
* The `x_forwarded` option defaults to true when using Unix sockets and can be omitted.
* Other options that would not make sense to use with a UNIX socket, such as
`bind_addresses` and `tls` will be ignored and can be removed.
* `mode`: The file permissions to set on the UNIX socket. Defaults to `666`
* **Note:** Must be set as `type: http` (does not support `metrics` and `manhole`).
Also make sure that `metrics` is not included in `resources` -> `names`
Valid resource names are:
* `client`: the client-server API (/_matrix/client), and the synapse admin API (/_synapse/admin). Also implies `media` and `static`.
@@ -488,7 +474,7 @@ Valid resource names are:
* `media`: the media API (/_matrix/media).
* `metrics`: the metrics interface. See [here](../../metrics-howto.md). (Not compatible with Unix sockets)
* `metrics`: the metrics interface. See [here](../../metrics-howto.md).
* `openid`: OpenID authentication. See [here](../../openid.md).
@@ -547,22 +533,6 @@ listeners:
bind_addresses: ['::1', '127.0.0.1']
type: manhole
```
Example configuration #3:
```yaml
listeners:
# Unix socket listener: Ideal for Synapse deployments behind a reverse proxy, offering
# lightweight interprocess communication without TCP/IP overhead, avoid port
# conflicts, and providing enhanced security through system file permissions.
#
# Note that x_forwarded will default to true, when using a UNIX socket. Please see
# https://matrix-org.github.io/synapse/latest/reverse_proxy.html.
#
- path: /var/run/synapse/main_public.sock
type: http
resources:
- names: [client, federation]
```
---
### `manhole_settings`
@@ -1226,43 +1196,6 @@ Example configuration:
allow_device_name_lookup_over_federation: true
```
---
### `federation`
The federation section defines some sub-options related to federation.
The following options are related to configuring timeout and retry logic for one request,
independently of the others.
Short retry algorithm is used when something or someone will wait for the request to have an
answer, while long retry is used for requests that happen in the background,
like sending a federation transaction.
* `client_timeout`: timeout for the federation requests. Default to 60s.
* `max_short_retry_delay`: maximum delay to be used for the short retry algo. Default to 2s.
* `max_long_retry_delay`: maximum delay to be used for the short retry algo. Default to 60s.
* `max_short_retries`: maximum number of retries for the short retry algo. Default to 3 attempts.
* `max_long_retries`: maximum number of retries for the long retry algo. Default to 10 attempts.
The following options control the retry logic when communicating with a specific homeserver destination.
Unlike the previous configuration options, these values apply across all requests
for a given destination and the state of the backoff is stored in the database.
* `destination_min_retry_interval`: the initial backoff, after the first request fails. Defaults to 10m.
* `destination_retry_multiplier`: how much we multiply the backoff by after each subsequent fail. Defaults to 2.
* `destination_max_retry_interval`: a cap on the backoff. Defaults to a week.
Example configuration:
```yaml
federation:
client_timeout: 180s
max_short_retry_delay: 7s
max_long_retry_delay: 100s
max_short_retries: 5
max_long_retries: 20
destination_min_retry_interval: 30s
destination_retry_multiplier: 5
destination_max_retry_interval: 12h
```
---
## Caching
Options related to caching.
@@ -2848,20 +2781,6 @@ Example configuration:
```yaml
track_appservice_user_ips: true
```
---
### `use_appservice_legacy_authorization`
Whether to send the application service access tokens via the `access_token` query parameter
per older versions of the Matrix specification. Defaults to false. Set to true to enable sending
access tokens via a query parameter.
**Enabling this option is considered insecure and is not recommended. **
Example configuration:
```yaml
use_appservice_legacy_authorization: true
```
---
### `macaroon_secret_key`
@@ -3985,14 +3904,13 @@ federation_sender_instances:
---
### `instance_map`
When using workers this should be a map from [`worker_name`](#worker_name) to the HTTP
replication listener of the worker, if configured, and to the main process. Each worker
declared under [`stream_writers`](../../workers.md#stream-writers) and
[`outbound_federation_restricted_to`](#outbound_federation_restricted_to) needs a HTTP
replication listener, and that listener should be included in the `instance_map`. The
main process also needs an entry on the `instance_map`, and it should be listed under
`main` **if even one other worker exists**. Ensure the port matches with what is
declared inside the `listener` block for a `replication` listener.
When using workers this should be a map from [`worker_name`](#worker_name) to the
HTTP replication listener of the worker, if configured, and to the main process.
Each worker declared under [`stream_writers`](../../workers.md#stream-writers) needs
a HTTP replication listener, and that listener should be included in the `instance_map`.
The main process also needs an entry on the `instance_map`, and it should be listed under
`main` **if even one other worker exists**. Ensure the port matches with what is declared
inside the `listener` block for a `replication` listener.
Example configuration:
@@ -4005,14 +3923,6 @@ instance_map:
host: localhost
port: 8034
```
Example configuration(#2, for UNIX sockets):
```yaml
instance_map:
main:
path: /var/run/synapse/main_replication.sock
worker1:
path: /var/run/synapse/worker1_replication.sock
```
---
### `stream_writers`
@@ -4030,24 +3940,6 @@ stream_writers:
typing: worker1
```
---
### `outbound_federation_restricted_to`
When using workers, you can restrict outbound federation traffic to only go through a
specific subset of workers. Any worker specified here must also be in the
[`instance_map`](#instance_map).
[`worker_replication_secret`](#worker_replication_secret) must also be configured to
authorize inter-worker communication.
```yaml
outbound_federation_restricted_to:
- federation_sender1
- federation_sender2
```
Also see the [worker
documentation](../../workers.md#restrict-outbound-federation-traffic-to-a-specific-set-of-workers)
for more info.
---
### `run_background_tasks_on`
The [worker](../../workers.md#background-tasks) that is used to run
@@ -4172,6 +4064,51 @@ Example configuration:
worker_name: generic_worker1
```
---
### `worker_replication_host`
*Deprecated as of version 1.84.0. Place `host` under `main` entry on the [`instance_map`](#instance_map) in your shared yaml configuration instead.*
The HTTP replication endpoint that it should talk to on the main Synapse process.
The main Synapse process defines this with a `replication` resource in
[`listeners` option](#listeners).
Example configuration:
```yaml
worker_replication_host: 127.0.0.1
```
---
### `worker_replication_http_port`
*Deprecated as of version 1.84.0. Place `port` under `main` entry on the [`instance_map`](#instance_map) in your shared yaml configuration instead.*
The HTTP replication port that it should talk to on the main Synapse process.
The main Synapse process defines this with a `replication` resource in
[`listeners` option](#listeners).
Example configuration:
```yaml
worker_replication_http_port: 9093
```
---
### `worker_replication_http_tls`
*Deprecated as of version 1.84.0. Place `tls` under `main` entry on the [`instance_map`](#instance_map) in your shared yaml configuration instead.*
Whether TLS should be used for talking to the HTTP replication port on the main
Synapse process.
The main Synapse process defines this with the `tls` option on its [listener](#listeners) that
has the `replication` resource enabled.
**Please note:** by default, it is not safe to expose replication ports to the
public Internet, even with TLS enabled.
See [`worker_replication_secret`](#worker_replication_secret).
Defaults to `false`.
*Added in Synapse 1.72.0.*
Example configuration:
```yaml
worker_replication_http_tls: true
```
---
### `worker_listeners`
A worker can handle HTTP requests. To do so, a `worker_listeners` option
@@ -4190,18 +4127,6 @@ worker_listeners:
resources:
- names: [client, federation]
```
Example configuration(#2, using UNIX sockets with a `replication` listener):
```yaml
worker_listeners:
- type: http
path: /var/run/synapse/worker_public.sock
resources:
- names: [client, federation]
- type: http
path: /var/run/synapse/worker_replication.sock
resources:
- names: [replication]
```
---
### `worker_manhole`
+9 -32
View File
@@ -95,12 +95,9 @@ for the main process
* Secondly, you need to enable
[redis-based replication](usage/configuration/config_documentation.md#redis)
* You will need to add an [`instance_map`](usage/configuration/config_documentation.md#instance_map)
with the `main` process defined, as well as the relevant connection information from
it's HTTP `replication` listener (defined in step 1 above).
* Note that the `host` defined is the address the worker needs to look for the `main`
process at, not necessarily the same address that is bound to.
* If you are using Unix sockets for the `replication` resource, make sure to
use a `path` to the socket file instead of a `port`.
with the `main` process defined, as well as the relevant connection information from
it's HTTP `replication` listener (defined in step 1 above). Note that the `host` defined
is the address the worker needs to look for the `main` process at, not necessarily the same address that is bound to.
* Optionally, a [shared secret](usage/configuration/config_documentation.md#worker_replication_secret)
can be used to authenticate HTTP traffic between workers. For example:
@@ -148,6 +145,9 @@ In the config file for each worker, you must specify:
with an `http` listener.
* **Synapse 1.72 and older:** if handling the `^/_matrix/client/v3/keys/upload` endpoint, the HTTP URI for
the main process (`worker_main_http_uri`). This config option is no longer required and is ignored when running Synapse 1.73 and newer.
* **Synapse 1.83 and older:** The HTTP replication endpoint that the worker should talk to on the main synapse process
([`worker_replication_host`](usage/configuration/config_documentation.md#worker_replication_host) and
[`worker_replication_http_port`](usage/configuration/config_documentation.md#worker_replication_http_port)). If using Synapse 1.84 and newer, these are not needed if `main` is defined on the [shared configuration](#shared-configuration) `instance_map`
For example:
@@ -177,11 +177,11 @@ The following applies to Synapse installations that have been installed from sou
You can start the main Synapse process with Poetry by running the following command:
```console
poetry run synapse_homeserver --config-file [your homeserver.yaml]
poetry run synapse_homeserver -c [your homeserver.yaml]
```
For worker setups, you can run the following command
```console
poetry run synapse_worker --config-file [your homeserver.yaml] --config-file [your worker.yaml]
poetry run synapse_worker -c [your worker.yaml]
```
## Available worker applications
@@ -232,6 +232,7 @@ information.
^/_matrix/client/v1/rooms/.*/hierarchy$
^/_matrix/client/(v1|unstable)/rooms/.*/relations/
^/_matrix/client/v1/rooms/.*/threads$
^/_matrix/client/unstable/org.matrix.msc2716/rooms/.*/batch_send$
^/_matrix/client/unstable/im.nheko.summary/rooms/.*/summary$
^/_matrix/client/(r0|v3|unstable)/account/3pid$
^/_matrix/client/(r0|v3|unstable)/account/whoami$
@@ -531,30 +532,6 @@ the stream writer for the `presence` stream:
^/_matrix/client/(api/v1|r0|v3|unstable)/presence/
#### Restrict outbound federation traffic to a specific set of workers
The
[`outbound_federation_restricted_to`](usage/configuration/config_documentation.md#outbound_federation_restricted_to)
configuration is useful to make sure outbound federation traffic only goes through a
specified subset of workers. This allows you to set more strict access controls (like a
firewall) for all workers and only allow the `federation_sender`'s to contact the
outside world.
```yaml
instance_map:
main:
host: localhost
port: 8030
federation_sender1:
host: localhost
port: 8034
outbound_federation_restricted_to:
- federation_sender1
worker_replication_secret: "secret_secret"
```
#### Background tasks
There is also support for moving background tasks to a separate
Generated
+49 -97
View File
@@ -8,11 +8,11 @@
"pre-commit-hooks": "pre-commit-hooks"
},
"locked": {
"lastModified": 1690534632,
"narHash": "sha256-kOXS9x5y17VKliC7wZxyszAYrWdRl1JzggbQl0gyo94=",
"lastModified": 1683102061,
"narHash": "sha256-kOphT6V0uQUlFNBP3GBjs7DAU7fyZGGqCs9ue1gNY6E=",
"owner": "cachix",
"repo": "devenv",
"rev": "6568e7e485a46bbf32051e4d6347fa1fed8b2f25",
"rev": "ff1f29e41756553174d596cafe3a9fa77595100b",
"type": "github"
},
"original": {
@@ -22,6 +22,27 @@
"type": "github"
}
},
"fenix": {
"inputs": {
"nixpkgs": [
"nixpkgs"
],
"rust-analyzer-src": "rust-analyzer-src"
},
"locked": {
"lastModified": 1682490133,
"narHash": "sha256-tR2Qx0uuk97WySpSSk4rGS/oH7xb5LykbjATcw1vw1I=",
"owner": "nix-community",
"repo": "fenix",
"rev": "4e9412753ab75ef0e038a5fe54a062fb44c27c6a",
"type": "github"
},
"original": {
"owner": "nix-community",
"repo": "fenix",
"type": "github"
}
},
"flake-compat": {
"flake": false,
"locked": {
@@ -39,33 +60,12 @@
}
},
"flake-utils": {
"inputs": {
"systems": "systems"
},
"locked": {
"lastModified": 1685518550,
"narHash": "sha256-o2d0KcvaXzTrPRIo0kOLV0/QXHhDQ5DTi+OxcjO8xqY=",
"lastModified": 1667395993,
"narHash": "sha256-nuEHfE/LcWyuSWnS8t12N1wc105Qtau+/OdUAjtQ0rA=",
"owner": "numtide",
"repo": "flake-utils",
"rev": "a1720a10a6cfe8234c0e93907ffe81be440f4cef",
"type": "github"
},
"original": {
"owner": "numtide",
"repo": "flake-utils",
"type": "github"
}
},
"flake-utils_2": {
"inputs": {
"systems": "systems_2"
},
"locked": {
"lastModified": 1681202837,
"narHash": "sha256-H+Rh19JDwRtpVPAWp64F+rlEtxUWBAQW28eAi3SRSzg=",
"owner": "numtide",
"repo": "flake-utils",
"rev": "cfacdce06f30d2b68473a46042957675eebb3401",
"rev": "5aed5285a952e0b949eb3ba02c12fa4fcfef535f",
"type": "github"
},
"original": {
@@ -170,27 +170,27 @@
},
"nixpkgs-stable": {
"locked": {
"lastModified": 1685801374,
"narHash": "sha256-otaSUoFEMM+LjBI1XL/xGB5ao6IwnZOXc47qhIgJe8U=",
"lastModified": 1673800717,
"narHash": "sha256-SFHraUqLSu5cC6IxTprex/nTsI81ZQAtDvlBvGDWfnA=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "c37ca420157f4abc31e26f436c1145f8951ff373",
"rev": "2f9fd351ec37f5d479556cd48be4ca340da59b8f",
"type": "github"
},
"original": {
"owner": "NixOS",
"ref": "nixos-23.05",
"ref": "nixos-22.11",
"repo": "nixpkgs",
"type": "github"
}
},
"nixpkgs_2": {
"locked": {
"lastModified": 1690535733,
"narHash": "sha256-WgjUPscQOw3cB8yySDGlyzo6cZNihnRzUwE9kadv/5I=",
"lastModified": 1682519441,
"narHash": "sha256-Vsq/8NOtvW1AoC6shCBxRxZyMQ+LhvPuJT6ltbzuv+Y=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "8cacc05fbfffeaab910e8c2c9e2a7c6b32ce881a",
"rev": "7a32a141db568abde9bc389845949dc2a454dfd3",
"type": "github"
},
"original": {
@@ -200,22 +200,6 @@
"type": "github"
}
},
"nixpkgs_3": {
"locked": {
"lastModified": 1681358109,
"narHash": "sha256-eKyxW4OohHQx9Urxi7TQlFBTDWII+F+x2hklDOQPB50=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "96ba1c52e54e74c3197f4d43026b3f3d92e83ff9",
"type": "github"
},
"original": {
"owner": "NixOS",
"ref": "nixpkgs-unstable",
"repo": "nixpkgs",
"type": "github"
}
},
"pre-commit-hooks": {
"inputs": {
"flake-compat": [
@@ -231,11 +215,11 @@
"nixpkgs-stable": "nixpkgs-stable"
},
"locked": {
"lastModified": 1688056373,
"narHash": "sha256-2+SDlNRTKsgo3LBRiMUcoEUb6sDViRNQhzJquZ4koOI=",
"lastModified": 1678376203,
"narHash": "sha256-3tyYGyC8h7fBwncLZy5nCUjTJPrHbmNwp47LlNLOHSM=",
"owner": "cachix",
"repo": "pre-commit-hooks.nix",
"rev": "5843cf069272d92b60c3ed9e55b7a8989c01d4c7",
"rev": "1a20b9708962096ec2481eeb2ddca29ed747770a",
"type": "github"
},
"original": {
@@ -247,27 +231,25 @@
"root": {
"inputs": {
"devenv": "devenv",
"fenix": "fenix",
"nixpkgs": "nixpkgs_2",
"rust-overlay": "rust-overlay",
"systems": "systems_3"
"systems": "systems"
}
},
"rust-overlay": {
"inputs": {
"flake-utils": "flake-utils_2",
"nixpkgs": "nixpkgs_3"
},
"rust-analyzer-src": {
"flake": false,
"locked": {
"lastModified": 1690510705,
"narHash": "sha256-6mjs3Gl9/xrseFh9iNcNq1u5yJ/MIoAmjoaG7SXZDIE=",
"owner": "oxalica",
"repo": "rust-overlay",
"rev": "851ae4c128905a62834d53ce7704ebc1ba481bea",
"lastModified": 1682426789,
"narHash": "sha256-UqnLmJESRZE0tTEaGbRAw05Hm19TWIPA+R3meqi5I4w=",
"owner": "rust-lang",
"repo": "rust-analyzer",
"rev": "943d2a8a1ca15e8b28a1f51f5a5c135e3728da04",
"type": "github"
},
"original": {
"owner": "oxalica",
"repo": "rust-overlay",
"owner": "rust-lang",
"ref": "nightly",
"repo": "rust-analyzer",
"type": "github"
}
},
@@ -285,36 +267,6 @@
"repo": "default",
"type": "github"
}
},
"systems_2": {
"locked": {
"lastModified": 1681028828,
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
"owner": "nix-systems",
"repo": "default",
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
"type": "github"
},
"original": {
"owner": "nix-systems",
"repo": "default",
"type": "github"
}
},
"systems_3": {
"locked": {
"lastModified": 1681028828,
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
"owner": "nix-systems",
"repo": "default",
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
"type": "github"
},
"original": {
"owner": "nix-systems",
"repo": "default",
"type": "github"
}
}
},
"root": "root",
+13 -26
View File
@@ -39,27 +39,27 @@
{
inputs = {
# Use the master/unstable branch of nixpkgs. Used to fetch the latest
# available versions of packages.
# Use the master/unstable branch of nixpkgs. The latest stable, 22.11,
# does not contain 'perl536Packages.NetAsyncHTTP', needed by Sytest.
nixpkgs.url = "github:NixOS/nixpkgs/master";
# Output a development shell for x86_64/aarch64 Linux/Darwin (MacOS).
systems.url = "github:nix-systems/default";
# A development environment manager built on Nix. See https://devenv.sh.
devenv.url = "github:cachix/devenv/main";
# Rust toolchain.
rust-overlay.url = "github:oxalica/rust-overlay";
# Rust toolchains and rust-analyzer nightly.
fenix = {
url = "github:nix-community/fenix";
inputs.nixpkgs.follows = "nixpkgs";
};
};
outputs = { self, nixpkgs, devenv, systems, rust-overlay, ... } @ inputs:
outputs = { self, nixpkgs, devenv, systems, ... } @ inputs:
let
forEachSystem = nixpkgs.lib.genAttrs (import systems);
in {
devShells = forEachSystem (system:
let
overlays = [ (import rust-overlay) ];
pkgs = import nixpkgs {
inherit system overlays;
};
pkgs = nixpkgs.legacyPackages.${system};
in {
# Everything is configured via devenv - a Nix module for creating declarative
# developer environments. See https://devenv.sh/reference/options/ for a list
@@ -76,20 +76,6 @@
# Configure packages to install.
# Search for package names at https://search.nixos.org/packages?channel=unstable
packages = with pkgs; [
# The rust toolchain and related tools.
# This will install the "default" profile of rust components.
# https://rust-lang.github.io/rustup/concepts/profiles.html
#
# NOTE: We currently need to set the Rust version unnecessarily high
# in order to work around https://github.com/matrix-org/synapse/issues/15939
(rust-bin.stable."1.70.0".default.override {
# Additionally install the "rust-src" extension to allow diving into the
# Rust source code in an IDE (rust-analyzer will also make use of it).
extensions = [ "rust-src" ];
})
# The rust-analyzer language server implementation.
rust-analyzer
# Native dependencies for running Synapse.
icu
libffi
@@ -138,11 +124,12 @@
# Install dependencies for the additional programming languages
# involved with Synapse development.
#
# * Rust is used for developing and running Synapse.
# * Golang is needed to run the Complement test suite.
# * Perl is needed to run the SyTest test suite.
# * Rust is used for developing and running Synapse.
# It is installed manually with `packages` above.
languages.go.enable = true;
languages.rust.enable = true;
languages.rust.version = "stable";
languages.perl.enable = true;
# Postgres is needed to run Synapse with postgres support and
@@ -191,7 +178,7 @@
EOF
'';
# Start synapse when `devenv up` is run.
processes.synapse.exec = "poetry run python -m synapse.app.homeserver -c homeserver.yaml -c homeserver-config-overrides.d";
processes.synapse.exec = "poetry run python -m synapse.app.homeserver -c homeserver.yaml --config-directory homeserver-config-overrides.d";
# Define the perl modules we require to run SyTest.
#
Generated
+534 -586
View File
File diff suppressed because it is too large Load Diff
+8 -14
View File
@@ -89,7 +89,7 @@ manifest-path = "rust/Cargo.toml"
[tool.poetry]
name = "matrix-synapse"
version = "1.90.0"
version = "1.86.0rc2"
description = "Homeserver for the Matrix decentralised comms protocol"
authors = ["Matrix.org Team and Contributors <packages@matrix.org>"]
license = "Apache-2.0"
@@ -147,7 +147,7 @@ synapse_review_recent_signups = "synapse._scripts.review_recent_signups:main"
update_synapse_database = "synapse._scripts.update_synapse_database:main"
[tool.poetry.dependencies]
python = "^3.8.0"
python = "^3.7.1"
# Mandatory Dependencies
# ----------------------
@@ -203,9 +203,11 @@ ijson = ">=3.1.4"
matrix-common = "^1.3.0"
# We need packaging.requirements.Requirement, added in 16.1.
packaging = ">=16.1"
# At the time of writing, we only use functions from the version `importlib.metadata`
# which shipped in Python 3.8. This corresponds to version 1.4 of the backport.
importlib_metadata = { version = ">=1.4", python = "<3.8" }
# This is the most recent version of Pydantic with available on common distros.
# We are currently incompatible with >=2.0.0: (https://github.com/matrix-org/synapse/issues/15858)
pydantic = "^1.7.4"
pydantic = ">=1.7.4"
# This is for building the rust components during "poetry install", which
# currently ignores the `build-system.requires` directive (c.f.
@@ -309,7 +311,7 @@ all = [
# We pin black so that our tests don't start failing on new releases.
isort = ">=5.10.1"
black = ">=22.3.0"
ruff = "0.0.277"
ruff = "0.0.265"
# Typechecking
lxml-stubs = ">=0.4.0"
@@ -373,15 +375,7 @@ build-backend = "poetry.core.masonry.api"
[tool.cibuildwheel]
# Skip unsupported platforms (by us or by Rust).
# See https://cibuildwheel.readthedocs.io/en/stable/options/#build-skip for the list of build targets.
# We skip:
# - CPython 3.6 and 3.7: EOLed
# - PyPy 3.7: we only support Python 3.8+
# - musllinux i686: excluded to reduce number of wheels we build.
# c.f. https://github.com/matrix-org/synapse/pull/12595#discussion_r963107677
# - PyPy on Aarch64 and musllinux on aarch64: too slow to build.
# c.f. https://github.com/matrix-org/synapse/pull/14259
skip = "cp36* cp37* pp37* *-musllinux_i686 pp*aarch64 *-musllinux_aarch64"
skip = "cp36* *-musllinux_i686 pp*aarch64 *-musllinux_aarch64"
# We need a rust compiler
before-all = "curl https://sh.rustup.rs -sSf | sh -s -- --default-toolchain stable -y --profile minimal"
+12 -15
View File
@@ -13,9 +13,6 @@
// limitations under the License.
#![feature(test)]
use std::borrow::Cow;
use synapse::push::{
evaluator::PushRuleEvaluator, Condition, EventMatchCondition, FilteredPushRules, JsonValue,
PushRules, SimpleJsonValue,
@@ -29,15 +26,15 @@ fn bench_match_exact(b: &mut Bencher) {
let flattened_keys = [
(
"type".to_string(),
JsonValue::Value(SimpleJsonValue::Str(Cow::Borrowed("m.text"))),
JsonValue::Value(SimpleJsonValue::Str("m.text".to_string())),
),
(
"room_id".to_string(),
JsonValue::Value(SimpleJsonValue::Str(Cow::Borrowed("!room:server"))),
JsonValue::Value(SimpleJsonValue::Str("!room:server".to_string())),
),
(
"content.body".to_string(),
JsonValue::Value(SimpleJsonValue::Str(Cow::Borrowed("test message"))),
JsonValue::Value(SimpleJsonValue::Str("test message".to_string())),
),
]
.into_iter()
@@ -74,15 +71,15 @@ fn bench_match_word(b: &mut Bencher) {
let flattened_keys = [
(
"type".to_string(),
JsonValue::Value(SimpleJsonValue::Str(Cow::Borrowed("m.text"))),
JsonValue::Value(SimpleJsonValue::Str("m.text".to_string())),
),
(
"room_id".to_string(),
JsonValue::Value(SimpleJsonValue::Str(Cow::Borrowed("!room:server"))),
JsonValue::Value(SimpleJsonValue::Str("!room:server".to_string())),
),
(
"content.body".to_string(),
JsonValue::Value(SimpleJsonValue::Str(Cow::Borrowed("test message"))),
JsonValue::Value(SimpleJsonValue::Str("test message".to_string())),
),
]
.into_iter()
@@ -119,15 +116,15 @@ fn bench_match_word_miss(b: &mut Bencher) {
let flattened_keys = [
(
"type".to_string(),
JsonValue::Value(SimpleJsonValue::Str(Cow::Borrowed("m.text"))),
JsonValue::Value(SimpleJsonValue::Str("m.text".to_string())),
),
(
"room_id".to_string(),
JsonValue::Value(SimpleJsonValue::Str(Cow::Borrowed("!room:server"))),
JsonValue::Value(SimpleJsonValue::Str("!room:server".to_string())),
),
(
"content.body".to_string(),
JsonValue::Value(SimpleJsonValue::Str(Cow::Borrowed("test message"))),
JsonValue::Value(SimpleJsonValue::Str("test message".to_string())),
),
]
.into_iter()
@@ -164,15 +161,15 @@ fn bench_eval_message(b: &mut Bencher) {
let flattened_keys = [
(
"type".to_string(),
JsonValue::Value(SimpleJsonValue::Str(Cow::Borrowed("m.text"))),
JsonValue::Value(SimpleJsonValue::Str("m.text".to_string())),
),
(
"room_id".to_string(),
JsonValue::Value(SimpleJsonValue::Str(Cow::Borrowed("!room:server"))),
JsonValue::Value(SimpleJsonValue::Str("!room:server".to_string())),
),
(
"content.body".to_string(),
JsonValue::Value(SimpleJsonValue::Str(Cow::Borrowed("test message"))),
JsonValue::Value(SimpleJsonValue::Str("test message".to_string())),
),
]
.into_iter()
+21 -20
View File
@@ -63,6 +63,22 @@ pub const BASE_PREPEND_OVERRIDE_RULES: &[PushRule] = &[PushRule {
}];
pub const BASE_APPEND_OVERRIDE_RULES: &[PushRule] = &[
// We don't want to notify on edits. Not only can this be confusing in real
// time (2 notifications, one message) but it's especially confusing
// if a bridge needs to edit a previously backfilled message.
PushRule {
rule_id: Cow::Borrowed("global/override/.com.beeper.suppress_edits"),
priority_class: 5,
conditions: Cow::Borrowed(&[Condition::Known(KnownCondition::EventMatch(
EventMatchCondition {
key: Cow::Borrowed("content.m\\.relates_to.rel_type"),
pattern: Cow::Borrowed("m.replace"),
},
))]),
actions: Cow::Borrowed(&[]),
default: true,
default_enabled: true,
},
PushRule {
rule_id: Cow::Borrowed("global/override/.m.rule.suppress_notices"),
priority_class: 5,
@@ -126,11 +142,11 @@ pub const BASE_APPEND_OVERRIDE_RULES: &[PushRule] = &[
default_enabled: true,
},
PushRule {
rule_id: Cow::Borrowed("global/override/.m.rule.is_user_mention"),
rule_id: Cow::Borrowed("global/override/.m.is_user_mention"),
priority_class: 5,
conditions: Cow::Borrowed(&[Condition::Known(
KnownCondition::ExactEventPropertyContainsType(EventPropertyIsTypeCondition {
key: Cow::Borrowed(r"content.m\.mentions.user_ids"),
key: Cow::Borrowed("content.m\\.mentions.user_ids"),
value_type: Cow::Borrowed(&EventMatchPatternType::UserId),
}),
)]),
@@ -147,12 +163,12 @@ pub const BASE_APPEND_OVERRIDE_RULES: &[PushRule] = &[
default_enabled: true,
},
PushRule {
rule_id: Cow::Borrowed("global/override/.m.rule.is_room_mention"),
rule_id: Cow::Borrowed("global/override/.m.is_room_mention"),
priority_class: 5,
conditions: Cow::Borrowed(&[
Condition::Known(KnownCondition::EventPropertyIs(EventPropertyIsCondition {
key: Cow::Borrowed(r"content.m\.mentions.room"),
value: Cow::Owned(SimpleJsonValue::Bool(true)),
key: Cow::Borrowed("content.m\\.mentions.room"),
value: Cow::Borrowed(&SimpleJsonValue::Bool(true)),
})),
Condition::Known(KnownCondition::SenderNotificationPermission {
key: Cow::Borrowed("room"),
@@ -225,21 +241,6 @@ pub const BASE_APPEND_OVERRIDE_RULES: &[PushRule] = &[
default: true,
default_enabled: true,
},
// We don't want to notify on edits *unless* the edit directly mentions a
// user, which is handled above.
PushRule {
rule_id: Cow::Borrowed("global/override/.org.matrix.msc3958.suppress_edits"),
priority_class: 5,
conditions: Cow::Borrowed(&[Condition::Known(KnownCondition::EventPropertyIs(
EventPropertyIsCondition {
key: Cow::Borrowed(r"content.m\.relates_to.rel_type"),
value: Cow::Owned(SimpleJsonValue::Str(Cow::Borrowed("m.replace"))),
},
))]),
actions: Cow::Borrowed(&[]),
default: true,
default_enabled: true,
},
PushRule {
rule_id: Cow::Borrowed("global/override/.org.matrix.msc3930.rule.poll_response"),
priority_class: 5,
+6 -8
View File
@@ -117,7 +117,7 @@ impl PushRuleEvaluator {
msc3931_enabled: bool,
) -> Result<Self, Error> {
let body = match flattened_keys.get("content.body") {
Some(JsonValue::Value(SimpleJsonValue::Str(s))) => s.clone().into_owned(),
Some(JsonValue::Value(SimpleJsonValue::Str(s))) => s.clone(),
_ => String::new(),
};
@@ -313,15 +313,13 @@ impl PushRuleEvaluator {
};
let pattern = match &*exact_event_match.value_type {
EventMatchPatternType::UserId => user_id.to_owned(),
EventMatchPatternType::UserLocalpart => {
get_localpart_from_id(user_id)?.to_owned()
}
EventMatchPatternType::UserId => user_id,
EventMatchPatternType::UserLocalpart => get_localpart_from_id(user_id)?,
};
self.match_event_property_contains(
exact_event_match.key.clone(),
Cow::Borrowed(&SimpleJsonValue::Str(Cow::Owned(pattern))),
Cow::Borrowed(&SimpleJsonValue::Str(pattern.to_string())),
)?
}
KnownCondition::ContainsDisplayName => {
@@ -496,7 +494,7 @@ fn push_rule_evaluator() {
let mut flattened_keys = BTreeMap::new();
flattened_keys.insert(
"content.body".to_string(),
JsonValue::Value(SimpleJsonValue::Str(Cow::Borrowed("foo bar bob hello"))),
JsonValue::Value(SimpleJsonValue::Str("foo bar bob hello".to_string())),
);
let evaluator = PushRuleEvaluator::py_new(
flattened_keys,
@@ -524,7 +522,7 @@ fn test_requires_room_version_supports_condition() {
let mut flattened_keys = BTreeMap::new();
flattened_keys.insert(
"content.body".to_string(),
JsonValue::Value(SimpleJsonValue::Str(Cow::Borrowed("foo bar bob hello"))),
JsonValue::Value(SimpleJsonValue::Str("foo bar bob hello".to_string())),
);
let flags = vec![RoomVersionFeatures::ExtensibleEvents.as_str().to_string()];
let evaluator = PushRuleEvaluator::py_new(
+3 -3
View File
@@ -256,7 +256,7 @@ impl<'de> Deserialize<'de> for Action {
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq)]
#[serde(untagged)]
pub enum SimpleJsonValue {
Str(Cow<'static, str>),
Str(String),
Int(i64),
Bool(bool),
Null,
@@ -265,7 +265,7 @@ pub enum SimpleJsonValue {
impl<'source> FromPyObject<'source> for SimpleJsonValue {
fn extract(ob: &'source PyAny) -> PyResult<Self> {
if let Ok(s) = <PyString as pyo3::PyTryFrom>::try_from(ob) {
Ok(SimpleJsonValue::Str(Cow::Owned(s.to_string())))
Ok(SimpleJsonValue::Str(s.to_string()))
// A bool *is* an int, ensure we try bool first.
} else if let Ok(b) = <PyBool as pyo3::PyTryFrom>::try_from(ob) {
Ok(SimpleJsonValue::Bool(b.extract()?))
@@ -585,7 +585,7 @@ impl FilteredPushRules {
}
if !self.msc3958_suppress_edits_enabled
&& rule.rule_id == "global/override/.org.matrix.msc3958.suppress_edits"
&& rule.rule_id == "global/override/.com.beeper.suppress_edits"
{
return false;
}
+8 -12
View File
@@ -22,19 +22,15 @@ from typing import Collection, Optional, Sequence, Set
# These are expanded inside the dockerfile to be a fully qualified image name.
# e.g. docker.io/library/debian:bullseye
#
# If an EOL is forced by a Python version and we're dropping support for it, make sure
# to remove references to the distibution across Synapse (search for "bullseye" for
# example)
DISTS = (
"debian:bullseye", # (EOL ~2024-07) (our EOL forced by Python 3.9 is 2025-10-05)
"debian:bookworm", # (EOL not specified yet) (our EOL forced by Python 3.11 is 2027-10-24)
"debian:sid", # (EOL not specified yet) (our EOL forced by Python 3.11 is 2027-10-24)
"ubuntu:focal", # 20.04 LTS (EOL 2025-04) (our EOL forced by Python 3.8 is 2024-10-14)
"ubuntu:jammy", # 22.04 LTS (EOL 2027-04) (our EOL forced by Python 3.10 is 2026-10-04)
"ubuntu:kinetic", # 22.10 (EOL 2023-07-20) (our EOL forced by Python 3.10 is 2026-10-04)
"ubuntu:lunar", # 23.04 (EOL 2024-01) (our EOL forced by Python 3.11 is 2027-10-24)
"debian:trixie", # (EOL not specified yet)
"debian:buster", # oldstable: EOL 2022-08
"debian:bullseye",
"debian:bookworm",
"debian:sid",
"ubuntu:focal", # 20.04 LTS (our EOL forced by Py38 on 2024-10-14)
"ubuntu:jammy", # 22.04 LTS (EOL 2027-04)
"ubuntu:kinetic", # 22.10 (EOL 2023-07-20)
"ubuntu:lunar", # 23.04 (EOL 2024-01)
)
DESC = """\
+5 -5
View File
@@ -214,7 +214,7 @@ fi
extra_test_args=()
test_tags="synapse_blacklist,msc3874,msc3890,msc3391,msc3930,faster_joins"
test_tags="synapse_blacklist,msc3787,msc3874,msc3890,msc3391,msc3930,faster_joins"
# All environment variables starting with PASS_ will be shared.
# (The prefix is stripped off before reaching the container.)
@@ -246,6 +246,10 @@ else
else
export PASS_SYNAPSE_COMPLEMENT_DATABASE=sqlite
fi
# The tests for importing historical messages (MSC2716)
# only pass with monoliths, currently.
test_tags="$test_tags,msc2716"
fi
if [[ -n "$ASYNCIO_REACTOR" ]]; then
@@ -253,10 +257,6 @@ if [[ -n "$ASYNCIO_REACTOR" ]]; then
export PASS_SYNAPSE_COMPLEMENT_USE_ASYNCIO_REACTOR=true
fi
if [[ -n "$UNIX_SOCKETS" ]]; then
# Enable full on Unix socket mode for Synapse, Redis and Postgresql
export PASS_SYNAPSE_USE_UNIX_SOCKET=1
fi
if [[ -n "$SYNAPSE_TEST_LOG_LEVEL" ]]; then
# Set the log level to what is desired
+2 -2
View File
@@ -136,11 +136,11 @@ def request(
authorization_headers.append(header)
print("Authorization: %s" % header, file=sys.stderr)
dest = "matrix-federation://%s%s" % (destination, path)
dest = "matrix://%s%s" % (destination, path)
print("Requesting %s" % dest, file=sys.stderr)
s = requests.Session()
s.mount("matrix-federation://", MatrixConnectionAdapter())
s.mount("matrix://", MatrixConnectionAdapter())
headers: Dict[str, str] = {
"Authorization": authorization_headers[0],
+2 -2
View File
@@ -25,8 +25,8 @@ from synapse.util.rust import check_rust_lib_up_to_date
from synapse.util.stringutils import strtobool
# Check that we're not running on an unsupported Python version.
if sys.version_info < (3, 8):
print("Synapse requires Python 3.8 or above.")
if sys.version_info < (3, 7):
print("Synapse requires Python 3.7 or above.")
sys.exit(1)
# Allow using the asyncio reactor via env var.
+4 -28
View File
@@ -61,7 +61,6 @@ from synapse.storage.databases.main.deviceinbox import DeviceInboxBackgroundUpda
from synapse.storage.databases.main.devices import DeviceBackgroundUpdateStore
from synapse.storage.databases.main.e2e_room_keys import EndToEndRoomKeyBackgroundStore
from synapse.storage.databases.main.end_to_end_keys import EndToEndKeyBackgroundStore
from synapse.storage.databases.main.event_federation import EventFederationWorkerStore
from synapse.storage.databases.main.event_push_actions import EventPushActionsStore
from synapse.storage.databases.main.events_bg_updates import (
EventsBackgroundUpdatesStore,
@@ -197,11 +196,6 @@ IGNORED_TABLES = {
"ui_auth_sessions",
"ui_auth_sessions_credentials",
"ui_auth_sessions_ips",
# Ignore the worker locks table, as a) there shouldn't be any acquired locks
# after porting, and b) the circular foreign key constraints make it hard to
# port.
"worker_read_write_locks_mode",
"worker_read_write_locks",
}
@@ -245,7 +239,6 @@ class Store(
PresenceBackgroundUpdateStore,
ReceiptsBackgroundUpdateStore,
RelationsWorkerStore,
EventFederationWorkerStore,
):
def execute(self, f: Callable[..., R], *args: Any, **kwargs: Any) -> Awaitable[R]:
return self.db_pool.runInteraction(f.__name__, f, *args, **kwargs)
@@ -761,7 +754,7 @@ class Porter:
# Step 2. Set up sequences
#
# We do this before porting the tables so that even if we fail half
# We do this before porting the tables so that event if we fail half
# way through the postgres DB always have sequences that are greater
# than their respective tables. If we don't then creating the
# `DataStore` object will fail due to the inconsistency.
@@ -769,10 +762,6 @@ class Porter:
await self._setup_state_group_id_seq()
await self._setup_user_id_seq()
await self._setup_events_stream_seqs()
await self._setup_sequence(
"un_partial_stated_event_stream_sequence",
("un_partial_stated_event_stream",),
)
await self._setup_sequence(
"device_inbox_sequence", ("device_inbox", "device_federation_outbox")
)
@@ -783,11 +772,6 @@ class Porter:
await self._setup_sequence("receipts_sequence", ("receipts_linearized",))
await self._setup_sequence("presence_stream_sequence", ("presence_stream",))
await self._setup_auth_chain_sequence()
await self._setup_sequence(
"application_services_txn_id_seq",
("application_services_txns",),
"txn_id",
)
# Step 3. Get tables.
self.progress.set_state("Fetching tables")
@@ -819,9 +803,7 @@ class Porter:
)
# Map from table name to args passed to `handle_table`, i.e. a tuple
# of: `postgres_size`, `table_size`, `forward_chunk`, `backward_chunk`.
tables_to_port_info_map = {
r[0]: r[1:] for r in setup_res if r[0] not in IGNORED_TABLES
}
tables_to_port_info_map = {r[0]: r[1:] for r in setup_res}
# Step 5. Do the copying.
#
@@ -1092,10 +1074,7 @@ class Porter:
)
async def _setup_sequence(
self,
sequence_name: str,
stream_id_tables: Iterable[str],
column_name: str = "stream_id",
self, sequence_name: str, stream_id_tables: Iterable[str]
) -> None:
"""Set a sequence to the correct value."""
current_stream_ids = []
@@ -1105,7 +1084,7 @@ class Porter:
await self.sqlite_store.db_pool.simple_select_one_onecol(
table=stream_id_table,
keyvalues={},
retcol=f"COALESCE(MAX({column_name}), 1)",
retcol="COALESCE(MAX(stream_id), 1)",
allow_none=True,
),
)
@@ -1390,9 +1369,6 @@ def main() -> None:
sys.stderr.write("Database must use the 'psycopg2' connector.\n")
sys.exit(3)
# Don't run the background tasks that get started by the data stores.
hs_config["run_background_tasks_on"] = "some_other_process"
config = HomeServerConfig()
config.parse_config_dict(hs_config, "", "")
+14
View File
@@ -123,6 +123,10 @@ class EventTypes:
SpaceChild: Final = "m.space.child"
SpaceParent: Final = "m.space.parent"
MSC2716_INSERTION: Final = "org.matrix.msc2716.insertion"
MSC2716_BATCH: Final = "org.matrix.msc2716.batch"
MSC2716_MARKER: Final = "org.matrix.msc2716.marker"
Reaction: Final = "m.reaction"
@@ -218,6 +222,16 @@ class EventContentFields:
# Used in m.room.guest_access events.
GUEST_ACCESS: Final = "guest_access"
# Used on normal messages to indicate they were historically imported after the fact
MSC2716_HISTORICAL: Final = "org.matrix.msc2716.historical"
# For "insertion" events to indicate what the next batch ID should be in
# order to connect to it
MSC2716_NEXT_BATCH_ID: Final = "next_batch_id"
# Used on "batch" events to indicate which insertion event it connects to
MSC2716_BATCH_ID: Final = "batch_id"
# For "marker" events
MSC2716_INSERTION_EVENT_REFERENCE: Final = "insertion_event_reference"
# The authorising user for joining a restricted room.
AUTHORISING_USER: Final = "join_authorised_via_users_server"
-7
View File
@@ -217,13 +217,6 @@ class InvalidAPICallError(SynapseError):
super().__init__(HTTPStatus.BAD_REQUEST, msg, Codes.BAD_JSON)
class InvalidProxyCredentialsError(SynapseError):
"""Error raised when the proxy credentials are invalid."""
def __init__(self, msg: str, errcode: str = Codes.UNKNOWN):
super().__init__(401, msg, errcode)
class ProxiedRequestError(SynapseError):
"""An error from a general matrix endpoint, eg. from a proxied Matrix API call.
+285 -105
View File
@@ -78,29 +78,41 @@ class RoomVersion:
# MSC2209: Check 'notifications' key while verifying
# m.room.power_levels auth rules.
limit_notifications_power_levels: bool
# No longer include the creator in m.room.create events.
implicit_room_creator: bool
# Apply updated redaction rules algorithm from room version 11.
updated_redaction_rules: bool
# Support the 'restricted' join rule.
restricted_join_rule: bool
# Support for the proper redaction rules for the restricted join rule. This requires
# restricted_join_rule to be enabled.
restricted_join_rule_fix: bool
# Support the 'knock' join rule.
knock_join_rule: bool
# MSC2175: No longer include the creator in m.room.create events.
msc2175_implicit_room_creator: bool
# MSC2174/MSC2176: Apply updated redaction rules algorithm, move redacts to
# content property.
msc2176_redaction_rules: bool
# MSC3083: Support the 'restricted' join_rule.
msc3083_join_rules: bool
# MSC3375: Support for the proper redaction rules for MSC3083. This mustn't
# be enabled if MSC3083 is not.
msc3375_redaction_rules: bool
# MSC2403: Allows join_rules to be set to 'knock', changes auth rules to allow sending
# m.room.membership event with membership 'knock'.
msc2403_knocking: bool
# MSC2716: Adds m.room.power_levels -> content.historical field to control
# whether "insertion", "chunk", "marker" events can be sent
msc2716_historical: bool
# MSC2716: Adds support for redacting "insertion", "chunk", and "marker" events
msc2716_redactions: bool
# MSC3389: Protect relation information from redaction.
msc3389_relation_redactions: bool
# Support the 'knock_restricted' join rule.
knock_restricted_join_rule: bool
# Enforce integer power levels
enforce_int_power_levels: bool
# MSC3787: Adds support for a `knock_restricted` join rule, mixing concepts of
# knocks and restricted join rules into the same join condition.
msc3787_knock_restricted_join_rule: bool
# MSC3667: Enforce integer power levels
msc3667_int_only_power_levels: bool
# MSC3821: Do not redact the third_party_invite content field for membership events.
msc3821_redaction_rules: bool
# MSC3931: Adds a push rule condition for "room version feature flags", making
# some push rules room version dependent. Note that adding a flag to this list
# is not enough to mark it "supported": the push rule evaluator also needs to
# support the flag. Unknown flags are ignored by the evaluator, making conditions
# fail if used.
msc3931_push_features: Tuple[str, ...] # values from PushRuleRoomFlag
# MSC3989: Redact the origin field.
msc3989_redaction_rules: bool
class RoomVersions:
@@ -113,15 +125,19 @@ class RoomVersions:
special_case_aliases_auth=True,
strict_canonicaljson=False,
limit_notifications_power_levels=False,
implicit_room_creator=False,
updated_redaction_rules=False,
restricted_join_rule=False,
restricted_join_rule_fix=False,
knock_join_rule=False,
msc2175_implicit_room_creator=False,
msc2176_redaction_rules=False,
msc3083_join_rules=False,
msc3375_redaction_rules=False,
msc2403_knocking=False,
msc2716_historical=False,
msc2716_redactions=False,
msc3389_relation_redactions=False,
knock_restricted_join_rule=False,
enforce_int_power_levels=False,
msc3787_knock_restricted_join_rule=False,
msc3667_int_only_power_levels=False,
msc3821_redaction_rules=False,
msc3931_push_features=(),
msc3989_redaction_rules=False,
)
V2 = RoomVersion(
"2",
@@ -132,15 +148,19 @@ class RoomVersions:
special_case_aliases_auth=True,
strict_canonicaljson=False,
limit_notifications_power_levels=False,
implicit_room_creator=False,
updated_redaction_rules=False,
restricted_join_rule=False,
restricted_join_rule_fix=False,
knock_join_rule=False,
msc2175_implicit_room_creator=False,
msc2176_redaction_rules=False,
msc3083_join_rules=False,
msc3375_redaction_rules=False,
msc2403_knocking=False,
msc2716_historical=False,
msc2716_redactions=False,
msc3389_relation_redactions=False,
knock_restricted_join_rule=False,
enforce_int_power_levels=False,
msc3787_knock_restricted_join_rule=False,
msc3667_int_only_power_levels=False,
msc3821_redaction_rules=False,
msc3931_push_features=(),
msc3989_redaction_rules=False,
)
V3 = RoomVersion(
"3",
@@ -151,15 +171,19 @@ class RoomVersions:
special_case_aliases_auth=True,
strict_canonicaljson=False,
limit_notifications_power_levels=False,
implicit_room_creator=False,
updated_redaction_rules=False,
restricted_join_rule=False,
restricted_join_rule_fix=False,
knock_join_rule=False,
msc2175_implicit_room_creator=False,
msc2176_redaction_rules=False,
msc3083_join_rules=False,
msc3375_redaction_rules=False,
msc2403_knocking=False,
msc2716_historical=False,
msc2716_redactions=False,
msc3389_relation_redactions=False,
knock_restricted_join_rule=False,
enforce_int_power_levels=False,
msc3787_knock_restricted_join_rule=False,
msc3667_int_only_power_levels=False,
msc3821_redaction_rules=False,
msc3931_push_features=(),
msc3989_redaction_rules=False,
)
V4 = RoomVersion(
"4",
@@ -170,15 +194,19 @@ class RoomVersions:
special_case_aliases_auth=True,
strict_canonicaljson=False,
limit_notifications_power_levels=False,
implicit_room_creator=False,
updated_redaction_rules=False,
restricted_join_rule=False,
restricted_join_rule_fix=False,
knock_join_rule=False,
msc2175_implicit_room_creator=False,
msc2176_redaction_rules=False,
msc3083_join_rules=False,
msc3375_redaction_rules=False,
msc2403_knocking=False,
msc2716_historical=False,
msc2716_redactions=False,
msc3389_relation_redactions=False,
knock_restricted_join_rule=False,
enforce_int_power_levels=False,
msc3787_knock_restricted_join_rule=False,
msc3667_int_only_power_levels=False,
msc3821_redaction_rules=False,
msc3931_push_features=(),
msc3989_redaction_rules=False,
)
V5 = RoomVersion(
"5",
@@ -189,15 +217,19 @@ class RoomVersions:
special_case_aliases_auth=True,
strict_canonicaljson=False,
limit_notifications_power_levels=False,
implicit_room_creator=False,
updated_redaction_rules=False,
restricted_join_rule=False,
restricted_join_rule_fix=False,
knock_join_rule=False,
msc2175_implicit_room_creator=False,
msc2176_redaction_rules=False,
msc3083_join_rules=False,
msc3375_redaction_rules=False,
msc2403_knocking=False,
msc2716_historical=False,
msc2716_redactions=False,
msc3389_relation_redactions=False,
knock_restricted_join_rule=False,
enforce_int_power_levels=False,
msc3787_knock_restricted_join_rule=False,
msc3667_int_only_power_levels=False,
msc3821_redaction_rules=False,
msc3931_push_features=(),
msc3989_redaction_rules=False,
)
V6 = RoomVersion(
"6",
@@ -208,15 +240,42 @@ class RoomVersions:
special_case_aliases_auth=False,
strict_canonicaljson=True,
limit_notifications_power_levels=True,
implicit_room_creator=False,
updated_redaction_rules=False,
restricted_join_rule=False,
restricted_join_rule_fix=False,
knock_join_rule=False,
msc2175_implicit_room_creator=False,
msc2176_redaction_rules=False,
msc3083_join_rules=False,
msc3375_redaction_rules=False,
msc2403_knocking=False,
msc2716_historical=False,
msc2716_redactions=False,
msc3389_relation_redactions=False,
knock_restricted_join_rule=False,
enforce_int_power_levels=False,
msc3787_knock_restricted_join_rule=False,
msc3667_int_only_power_levels=False,
msc3821_redaction_rules=False,
msc3931_push_features=(),
msc3989_redaction_rules=False,
)
MSC2176 = RoomVersion(
"org.matrix.msc2176",
RoomDisposition.UNSTABLE,
EventFormatVersions.ROOM_V4_PLUS,
StateResolutionVersions.V2,
enforce_key_validity=True,
special_case_aliases_auth=False,
strict_canonicaljson=True,
limit_notifications_power_levels=True,
msc2175_implicit_room_creator=False,
msc2176_redaction_rules=True,
msc3083_join_rules=False,
msc3375_redaction_rules=False,
msc2403_knocking=False,
msc2716_historical=False,
msc2716_redactions=False,
msc3389_relation_redactions=False,
msc3787_knock_restricted_join_rule=False,
msc3667_int_only_power_levels=False,
msc3821_redaction_rules=False,
msc3931_push_features=(),
msc3989_redaction_rules=False,
)
V7 = RoomVersion(
"7",
@@ -227,15 +286,19 @@ class RoomVersions:
special_case_aliases_auth=False,
strict_canonicaljson=True,
limit_notifications_power_levels=True,
implicit_room_creator=False,
updated_redaction_rules=False,
restricted_join_rule=False,
restricted_join_rule_fix=False,
knock_join_rule=True,
msc2175_implicit_room_creator=False,
msc2176_redaction_rules=False,
msc3083_join_rules=False,
msc3375_redaction_rules=False,
msc2403_knocking=True,
msc2716_historical=False,
msc2716_redactions=False,
msc3389_relation_redactions=False,
knock_restricted_join_rule=False,
enforce_int_power_levels=False,
msc3787_knock_restricted_join_rule=False,
msc3667_int_only_power_levels=False,
msc3821_redaction_rules=False,
msc3931_push_features=(),
msc3989_redaction_rules=False,
)
V8 = RoomVersion(
"8",
@@ -246,15 +309,19 @@ class RoomVersions:
special_case_aliases_auth=False,
strict_canonicaljson=True,
limit_notifications_power_levels=True,
implicit_room_creator=False,
updated_redaction_rules=False,
restricted_join_rule=True,
restricted_join_rule_fix=False,
knock_join_rule=True,
msc2175_implicit_room_creator=False,
msc2176_redaction_rules=False,
msc3083_join_rules=True,
msc3375_redaction_rules=False,
msc2403_knocking=True,
msc2716_historical=False,
msc2716_redactions=False,
msc3389_relation_redactions=False,
knock_restricted_join_rule=False,
enforce_int_power_levels=False,
msc3787_knock_restricted_join_rule=False,
msc3667_int_only_power_levels=False,
msc3821_redaction_rules=False,
msc3931_push_features=(),
msc3989_redaction_rules=False,
)
V9 = RoomVersion(
"9",
@@ -265,15 +332,65 @@ class RoomVersions:
special_case_aliases_auth=False,
strict_canonicaljson=True,
limit_notifications_power_levels=True,
implicit_room_creator=False,
updated_redaction_rules=False,
restricted_join_rule=True,
restricted_join_rule_fix=True,
knock_join_rule=True,
msc2175_implicit_room_creator=False,
msc2176_redaction_rules=False,
msc3083_join_rules=True,
msc3375_redaction_rules=True,
msc2403_knocking=True,
msc2716_historical=False,
msc2716_redactions=False,
msc3389_relation_redactions=False,
knock_restricted_join_rule=False,
enforce_int_power_levels=False,
msc3787_knock_restricted_join_rule=False,
msc3667_int_only_power_levels=False,
msc3821_redaction_rules=False,
msc3931_push_features=(),
msc3989_redaction_rules=False,
)
MSC3787 = RoomVersion(
"org.matrix.msc3787",
RoomDisposition.UNSTABLE,
EventFormatVersions.ROOM_V4_PLUS,
StateResolutionVersions.V2,
enforce_key_validity=True,
special_case_aliases_auth=False,
strict_canonicaljson=True,
limit_notifications_power_levels=True,
msc2175_implicit_room_creator=False,
msc2176_redaction_rules=False,
msc3083_join_rules=True,
msc3375_redaction_rules=True,
msc2403_knocking=True,
msc2716_historical=False,
msc2716_redactions=False,
msc3389_relation_redactions=False,
msc3787_knock_restricted_join_rule=True,
msc3667_int_only_power_levels=False,
msc3821_redaction_rules=False,
msc3931_push_features=(),
msc3989_redaction_rules=False,
)
MSC3821 = RoomVersion(
"org.matrix.msc3821.opt1",
RoomDisposition.UNSTABLE,
EventFormatVersions.ROOM_V4_PLUS,
StateResolutionVersions.V2,
enforce_key_validity=True,
special_case_aliases_auth=False,
strict_canonicaljson=True,
limit_notifications_power_levels=True,
msc2175_implicit_room_creator=False,
msc2176_redaction_rules=False,
msc3083_join_rules=True,
msc3375_redaction_rules=True,
msc2403_knocking=True,
msc2716_historical=False,
msc2716_redactions=False,
msc3389_relation_redactions=False,
msc3787_knock_restricted_join_rule=False,
msc3667_int_only_power_levels=False,
msc3821_redaction_rules=True,
msc3931_push_features=(),
msc3989_redaction_rules=False,
)
V10 = RoomVersion(
"10",
@@ -284,15 +401,42 @@ class RoomVersions:
special_case_aliases_auth=False,
strict_canonicaljson=True,
limit_notifications_power_levels=True,
implicit_room_creator=False,
updated_redaction_rules=False,
restricted_join_rule=True,
restricted_join_rule_fix=True,
knock_join_rule=True,
msc2175_implicit_room_creator=False,
msc2176_redaction_rules=False,
msc3083_join_rules=True,
msc3375_redaction_rules=True,
msc2403_knocking=True,
msc2716_historical=False,
msc2716_redactions=False,
msc3389_relation_redactions=False,
knock_restricted_join_rule=True,
enforce_int_power_levels=True,
msc3787_knock_restricted_join_rule=True,
msc3667_int_only_power_levels=True,
msc3821_redaction_rules=False,
msc3931_push_features=(),
msc3989_redaction_rules=False,
)
MSC2716v4 = RoomVersion(
"org.matrix.msc2716v4",
RoomDisposition.UNSTABLE,
EventFormatVersions.ROOM_V4_PLUS,
StateResolutionVersions.V2,
enforce_key_validity=True,
special_case_aliases_auth=False,
strict_canonicaljson=True,
limit_notifications_power_levels=True,
msc2175_implicit_room_creator=False,
msc2176_redaction_rules=False,
msc3083_join_rules=False,
msc3375_redaction_rules=False,
msc2403_knocking=True,
msc2716_historical=True,
msc2716_redactions=True,
msc3389_relation_redactions=False,
msc3787_knock_restricted_join_rule=False,
msc3667_int_only_power_levels=False,
msc3821_redaction_rules=False,
msc3931_push_features=(),
msc3989_redaction_rules=False,
)
MSC1767v10 = RoomVersion(
# MSC1767 (Extensible Events) based on room version "10"
@@ -304,34 +448,66 @@ class RoomVersions:
special_case_aliases_auth=False,
strict_canonicaljson=True,
limit_notifications_power_levels=True,
implicit_room_creator=False,
updated_redaction_rules=False,
restricted_join_rule=True,
restricted_join_rule_fix=True,
knock_join_rule=True,
msc2175_implicit_room_creator=False,
msc2176_redaction_rules=False,
msc3083_join_rules=True,
msc3375_redaction_rules=True,
msc2403_knocking=True,
msc2716_historical=False,
msc2716_redactions=False,
msc3389_relation_redactions=False,
knock_restricted_join_rule=True,
enforce_int_power_levels=True,
msc3787_knock_restricted_join_rule=True,
msc3667_int_only_power_levels=True,
msc3821_redaction_rules=False,
msc3931_push_features=(PushRuleRoomFlag.EXTENSIBLE_EVENTS,),
msc3989_redaction_rules=False,
)
V11 = RoomVersion(
"11",
RoomDisposition.STABLE,
MSC3989 = RoomVersion(
"org.matrix.msc3989",
RoomDisposition.UNSTABLE,
EventFormatVersions.ROOM_V4_PLUS,
StateResolutionVersions.V2,
enforce_key_validity=True,
special_case_aliases_auth=False,
strict_canonicaljson=True,
limit_notifications_power_levels=True,
implicit_room_creator=True, # Used by MSC3820
updated_redaction_rules=True, # Used by MSC3820
restricted_join_rule=True,
restricted_join_rule_fix=True,
knock_join_rule=True,
msc2175_implicit_room_creator=False,
msc2176_redaction_rules=False,
msc3083_join_rules=True,
msc3375_redaction_rules=True,
msc2403_knocking=True,
msc2716_historical=False,
msc2716_redactions=False,
msc3389_relation_redactions=False,
knock_restricted_join_rule=True,
enforce_int_power_levels=True,
msc3787_knock_restricted_join_rule=True,
msc3667_int_only_power_levels=True,
msc3821_redaction_rules=False,
msc3931_push_features=(),
msc3989_redaction_rules=True,
)
MSC3820opt2 = RoomVersion(
# Based upon v10
"org.matrix.msc3820.opt2",
RoomDisposition.UNSTABLE,
EventFormatVersions.ROOM_V4_PLUS,
StateResolutionVersions.V2,
enforce_key_validity=True,
special_case_aliases_auth=False,
strict_canonicaljson=True,
limit_notifications_power_levels=True,
msc2175_implicit_room_creator=True, # Used by MSC3820
msc2176_redaction_rules=True, # Used by MSC3820
msc3083_join_rules=True,
msc3375_redaction_rules=True,
msc2403_knocking=True,
msc2716_historical=False,
msc2716_redactions=False,
msc3389_relation_redactions=False,
msc3787_knock_restricted_join_rule=True,
msc3667_int_only_power_levels=True,
msc3821_redaction_rules=True, # Used by MSC3820
msc3931_push_features=(),
msc3989_redaction_rules=True, # Used by MSC3820
)
@@ -344,11 +520,15 @@ KNOWN_ROOM_VERSIONS: Dict[str, RoomVersion] = {
RoomVersions.V4,
RoomVersions.V5,
RoomVersions.V6,
RoomVersions.MSC2176,
RoomVersions.V7,
RoomVersions.V8,
RoomVersions.V9,
RoomVersions.MSC3787,
RoomVersions.V10,
RoomVersions.V11,
RoomVersions.MSC2716v4,
RoomVersions.MSC3989,
RoomVersions.MSC3820opt2,
)
}
@@ -377,12 +557,12 @@ MSC3244_CAPABILITIES = {
RoomVersionCapability(
"knock",
RoomVersions.V7,
lambda room_version: room_version.knock_join_rule,
lambda room_version: room_version.msc2403_knocking,
),
RoomVersionCapability(
"restricted",
RoomVersions.V9,
lambda room_version: room_version.restricted_join_rule,
lambda room_version: room_version.msc3083_join_rules,
),
)
}
-2
View File
@@ -386,7 +386,6 @@ def listen_unix(
def listen_http(
hs: "HomeServer",
listener_config: ListenerConfig,
root_resource: Resource,
version_string: str,
@@ -407,7 +406,6 @@ def listen_http(
version_string,
max_request_body_size=max_request_body_size,
reactor=reactor,
hs=hs,
)
if isinstance(listener_config, TCPListenerConfig):
+2 -1
View File
@@ -83,6 +83,7 @@ from synapse.storage.databases.main.receipts import ReceiptsWorkerStore
from synapse.storage.databases.main.registration import RegistrationWorkerStore
from synapse.storage.databases.main.relations import RelationsWorkerStore
from synapse.storage.databases.main.room import RoomWorkerStore
from synapse.storage.databases.main.room_batch import RoomBatchStore
from synapse.storage.databases.main.roommember import RoomMemberWorkerStore
from synapse.storage.databases.main.search import SearchStore
from synapse.storage.databases.main.session import SessionStore
@@ -119,6 +120,7 @@ class GenericWorkerStore(
# the races it creates aren't too bad.
KeyStore,
RoomWorkerStore,
RoomBatchStore,
DirectoryWorkerStore,
PushRulesWorkerStore,
ApplicationServiceTransactionWorkerStore,
@@ -221,7 +223,6 @@ class GenericWorkerServer(HomeServer):
root_resource = create_resource_tree(resources, OptionsResource())
_base.listen_http(
self,
listener_config,
root_resource,
self.version_string,
-1
View File
@@ -139,7 +139,6 @@ class SynapseHomeServer(HomeServer):
root_resource = OptionsResource()
ports = listen_http(
self,
listener_config,
create_resource_tree(resources, root_resource),
self.version_string,
+80 -36
View File
@@ -16,6 +16,9 @@ import logging
import urllib.parse
from typing import (
TYPE_CHECKING,
Any,
Awaitable,
Callable,
Dict,
Iterable,
List,
@@ -24,11 +27,10 @@ from typing import (
Sequence,
Tuple,
TypeVar,
Union,
)
from prometheus_client import Counter
from typing_extensions import ParamSpec, TypeGuard
from typing_extensions import Concatenate, ParamSpec, TypeGuard
from synapse.api.constants import EventTypes, Membership, ThirdPartyEntityKind
from synapse.api.errors import CodeMessageException, HttpResponseException
@@ -78,7 +80,9 @@ sent_todevice_counter = Counter(
HOUR_IN_MS = 60 * 60 * 1000
APP_SERVICE_PREFIX = "/_matrix/app/v1"
APP_SERVICE_UNSTABLE_PREFIX = "/_matrix/app/unstable"
P = ParamSpec("P")
R = TypeVar("R")
@@ -119,12 +123,52 @@ class ApplicationServiceApi(SimpleHttpClient):
def __init__(self, hs: "HomeServer"):
super().__init__(hs)
self.clock = hs.get_clock()
self.config = hs.config.appservice
self.protocol_meta_cache: ResponseCache[Tuple[str, str]] = ResponseCache(
hs.get_clock(), "as_protocol_meta", timeout_ms=HOUR_IN_MS
)
async def _send_with_fallbacks(
self,
service: "ApplicationService",
prefixes: List[str],
path: str,
func: Callable[Concatenate[str, P], Awaitable[R]],
*args: P.args,
**kwargs: P.kwargs,
) -> R:
"""
Attempt to call an application service with multiple paths, falling back
until one succeeds.
Args:
service: The appliacation service, this provides the base URL.
prefixes: A last of paths to try in order for the requests.
path: A suffix to append to each prefix.
func: The function to call, the first argument will be the full
endpoint to fetch. Other arguments are provided by args/kwargs.
Returns:
The return value of func.
"""
for i, prefix in enumerate(prefixes, start=1):
uri = f"{service.url}{prefix}{path}"
try:
return await func(uri, *args, **kwargs)
except HttpResponseException as e:
# If an error is received that is due to an unrecognised path,
# fallback to next path (if one exists). Otherwise, consider it
# a legitimate error and raise.
if i < len(prefixes) and is_unknown_endpoint(e):
continue
raise
except Exception:
# Unexpected exceptions get sent to the caller.
raise
# The function should always exit via the return or raise above this.
raise RuntimeError("Unexpected fallback behaviour. This should never be seen.")
async def query_user(self, service: "ApplicationService", user_id: str) -> bool:
if service.url is None:
return False
@@ -133,12 +177,12 @@ class ApplicationServiceApi(SimpleHttpClient):
assert service.hs_token is not None
try:
args = None
if self.config.use_appservice_legacy_authorization:
args = {"access_token": service.hs_token}
response = await self.get_json(
f"{service.url}{APP_SERVICE_PREFIX}/users/{urllib.parse.quote(user_id)}",
args,
response = await self._send_with_fallbacks(
service,
[APP_SERVICE_PREFIX, ""],
f"/users/{urllib.parse.quote(user_id)}",
self.get_json,
{"access_token": service.hs_token},
headers={"Authorization": [f"Bearer {service.hs_token}"]},
)
if response is not None: # just an empty json object
@@ -159,12 +203,12 @@ class ApplicationServiceApi(SimpleHttpClient):
assert service.hs_token is not None
try:
args = None
if self.config.use_appservice_legacy_authorization:
args = {"access_token": service.hs_token}
response = await self.get_json(
f"{service.url}{APP_SERVICE_PREFIX}/rooms/{urllib.parse.quote(alias)}",
args,
response = await self._send_with_fallbacks(
service,
[APP_SERVICE_PREFIX, ""],
f"/rooms/{urllib.parse.quote(alias)}",
self.get_json,
{"access_token": service.hs_token},
headers={"Authorization": [f"Bearer {service.hs_token}"]},
)
if response is not None: # just an empty json object
@@ -197,14 +241,15 @@ class ApplicationServiceApi(SimpleHttpClient):
assert service.hs_token is not None
try:
args: Mapping[bytes, Union[List[bytes], str]] = fields
if self.config.use_appservice_legacy_authorization:
args = {
**fields,
b"access_token": service.hs_token,
}
response = await self.get_json(
f"{service.url}{APP_SERVICE_PREFIX}/thirdparty/{kind}/{urllib.parse.quote(protocol)}",
args: Mapping[Any, Any] = {
**fields,
b"access_token": service.hs_token,
}
response = await self._send_with_fallbacks(
service,
[APP_SERVICE_PREFIX, APP_SERVICE_UNSTABLE_PREFIX],
f"/thirdparty/{kind}/{urllib.parse.quote(protocol)}",
self.get_json,
args=args,
headers={"Authorization": [f"Bearer {service.hs_token}"]},
)
@@ -240,12 +285,12 @@ class ApplicationServiceApi(SimpleHttpClient):
# This is required by the configuration.
assert service.hs_token is not None
try:
args = None
if self.config.use_appservice_legacy_authorization:
args = {"access_token": service.hs_token}
info = await self.get_json(
f"{service.url}{APP_SERVICE_PREFIX}/thirdparty/protocol/{urllib.parse.quote(protocol)}",
args,
info = await self._send_with_fallbacks(
service,
[APP_SERVICE_PREFIX, APP_SERVICE_UNSTABLE_PREFIX],
f"/thirdparty/protocol/{urllib.parse.quote(protocol)}",
self.get_json,
{"access_token": service.hs_token},
headers={"Authorization": [f"Bearer {service.hs_token}"]},
)
@@ -356,14 +401,13 @@ class ApplicationServiceApi(SimpleHttpClient):
}
try:
args = None
if self.config.use_appservice_legacy_authorization:
args = {"access_token": service.hs_token}
await self.put_json(
f"{service.url}{APP_SERVICE_PREFIX}/transactions/{urllib.parse.quote(str(txn_id))}",
await self._send_with_fallbacks(
service,
[APP_SERVICE_PREFIX, ""],
f"/transactions/{urllib.parse.quote(str(txn_id))}",
self.put_json,
json_body=body,
args=args,
args={"access_token": service.hs_token},
headers={"Authorization": [f"Bearer {service.hs_token}"]},
)
if logger.isEnabledFor(logging.DEBUG):
-8
View File
@@ -43,14 +43,6 @@ class AppServiceConfig(Config):
)
self.track_appservice_user_ips = config.get("track_appservice_user_ips", False)
self.use_appservice_legacy_authorization = config.get(
"use_appservice_legacy_authorization", False
)
if self.use_appservice_legacy_authorization:
logger.warning(
"The use of appservice legacy authorization via query params is deprecated"
" and should be considered insecure."
)
def load_appservices(
+1 -1
View File
@@ -31,7 +31,7 @@ class AuthConfig(Config):
# The default value of password_config.enabled is True, unless msc3861 is enabled.
msc3861_enabled = (
(config.get("experimental_features") or {})
config.get("experimental_features", {})
.get("msc3861", {})
.get("enabled", False)
)
+14 -20
View File
@@ -216,6 +216,12 @@ class MSC3861:
("session_lifetime",),
)
if not root.experimental.msc3970_enabled:
raise ConfigError(
"experimental_features.msc3970_enabled must be 'true' when OAuth delegation is enabled",
("experimental_features", "msc3970_enabled"),
)
@attr.s(auto_attribs=True, frozen=True, slots=True)
class MSC3866Config:
@@ -241,26 +247,8 @@ class ExperimentalConfig(Config):
# MSC3026 (busy presence state)
self.msc3026_enabled: bool = experimental.get("msc3026_enabled", False)
# MSC2697 (device dehydration)
# Enabled by default since this option was added after adding the feature.
# It is not recommended that both MSC2697 and MSC3814 both be enabled at
# once.
self.msc2697_enabled: bool = experimental.get("msc2697_enabled", True)
# MSC3814 (dehydrated devices with SSSS)
# This is an alternative method to achieve the same goals as MSC2697.
# It is not recommended that both MSC2697 and MSC3814 both be enabled at
# once.
self.msc3814_enabled: bool = experimental.get("msc3814_enabled", False)
if self.msc2697_enabled and self.msc3814_enabled:
raise ConfigError(
"MSC2697 and MSC3814 should not both be enabled.",
(
"experimental_features",
"msc3814_enabled",
),
)
# MSC2716 (importing historical messages)
self.msc2716_enabled: bool = experimental.get("msc2716_enabled", False)
# MSC3244 (room version capabilities)
self.msc3244_enabled: bool = experimental.get("msc3244_enabled", True)
@@ -391,9 +379,15 @@ class ExperimentalConfig(Config):
"Invalid MSC3861 configuration", ("experimental", "msc3861")
) from exc
# MSC3970: Scope transaction IDs to devices
self.msc3970_enabled = experimental.get("msc3970_enabled", self.msc3861.enabled)
# Check that none of the other config options conflict with MSC3861 when enabled
self.msc3861.check_config_conflicts(self.root)
# MSC4009: E.164 Matrix IDs
self.msc4009_e164_mxids = experimental.get("msc4009_e164_mxids", False)
# MSC4010: Do not allow setting m.push_rules account data.
self.msc4010_push_rules_account_data = experimental.get(
"msc4010_push_rules_account_data", False
-34
View File
@@ -22,8 +22,6 @@ class FederationConfig(Config):
section = "federation"
def read_config(self, config: JsonDict, **kwargs: Any) -> None:
federation_config = config.setdefault("federation", {})
# FIXME: federation_domain_whitelist needs sytests
self.federation_domain_whitelist: Optional[dict] = None
federation_domain_whitelist = config.get("federation_domain_whitelist", None)
@@ -51,37 +49,5 @@ class FederationConfig(Config):
"allow_device_name_lookup_over_federation", False
)
# Allow for the configuration of timeout, max request retries
# and min/max retry delays in the matrix federation client.
self.client_timeout_ms = Config.parse_duration(
federation_config.get("client_timeout", "60s")
)
self.max_long_retry_delay_ms = Config.parse_duration(
federation_config.get("max_long_retry_delay", "60s")
)
self.max_short_retry_delay_ms = Config.parse_duration(
federation_config.get("max_short_retry_delay", "2s")
)
self.max_long_retries = federation_config.get("max_long_retries", 10)
self.max_short_retries = federation_config.get("max_short_retries", 3)
# Allow for the configuration of the backoff algorithm used
# when trying to reach an unavailable destination.
# Unlike previous configuration those values applies across
# multiple requests and the state of the backoff is stored on DB.
self.destination_min_retry_interval_ms = Config.parse_duration(
federation_config.get("destination_min_retry_interval", "10m")
)
self.destination_retry_multiplier = federation_config.get(
"destination_retry_multiplier", 2
)
self.destination_max_retry_interval_ms = min(
Config.parse_duration(
federation_config.get("destination_max_retry_interval", "7d")
),
# Set a hard-limit to not overflow the database column.
2**62,
)
_METRICS_FOR_DOMAINS_SCHEMA = {"type": "array", "items": {"type": "string"}}
+24 -95
View File
@@ -15,7 +15,7 @@
import argparse
import logging
from typing import Any, Dict, List, Optional, Union
from typing import Any, Dict, List, Union
import attr
from pydantic import BaseModel, Extra, StrictBool, StrictInt, StrictStr
@@ -41,17 +41,11 @@ Synapse version. Please use ``%s: name_of_worker`` instead.
_MISSING_MAIN_PROCESS_INSTANCE_MAP_DATA = """
Missing data for a worker to connect to main process. Please include '%s' in the
`instance_map` declared in your shared yaml configuration as defined in configuration
documentation here:
`https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#instance_map`
"""
WORKER_REPLICATION_SETTING_DEPRECATED_MESSAGE = """
'%s' is no longer a supported worker setting, please place '%s' onto your shared
configuration under `main` inside the `instance_map`. See workers documentation here:
`instance_map` declared in your shared yaml configuration, or optionally(as a deprecated
solution) in every worker's yaml as various `worker_replication_*` settings as defined
in workers documentation here:
`https://matrix-org.github.io/synapse/latest/workers.html#worker-configuration`
"""
# This allows for a handy knob when it's time to change from 'master' to
# something with less 'history'
MAIN_PROCESS_INSTANCE_NAME = "master"
@@ -94,7 +88,7 @@ class ConfigModel(BaseModel):
allow_mutation = False
class InstanceTcpLocationConfig(ConfigModel):
class InstanceLocationConfig(ConfigModel):
"""The host and port to talk to an instance via HTTP replication."""
host: StrictStr
@@ -110,23 +104,6 @@ class InstanceTcpLocationConfig(ConfigModel):
return f"{self.host}:{self.port}"
class InstanceUnixLocationConfig(ConfigModel):
"""The socket file to talk to an instance via HTTP replication."""
path: StrictStr
def scheme(self) -> str:
"""Hardcode a retrievable scheme"""
return "unix"
def netloc(self) -> str:
"""Nicely format the address location data"""
return f"{self.path}"
InstanceLocationConfig = Union[InstanceTcpLocationConfig, InstanceUnixLocationConfig]
@attr.s
class WriterLocations:
"""Specifies the instances that write various streams.
@@ -171,27 +148,6 @@ class WriterLocations:
)
@attr.s(auto_attribs=True)
class OutboundFederationRestrictedTo:
"""Whether we limit outbound federation to a certain set of instances.
Attributes:
instances: optional list of instances that can make outbound federation
requests. If None then all instances can make federation requests.
locations: list of instance locations to connect to proxy via.
"""
instances: Optional[List[str]]
locations: List[InstanceLocationConfig] = attr.Factory(list)
def __contains__(self, instance: str) -> bool:
# It feels a bit dirty to return `True` if `instances` is `None`, but it makes
# sense in downstream usage in the sense that if
# `outbound_federation_restricted_to` is not configured, then any instance can
# talk to federation (no restrictions so always return `True`).
return self.instances is None or instance in self.instances
class WorkerConfig(Config):
"""The workers are processes run separately to the main synapse process.
They have their own pid_file and listener configuration. They use the
@@ -260,37 +216,22 @@ class WorkerConfig(Config):
)
# A map from instance name to host/port of their HTTP replication endpoint.
# Check if the main process is declared. The main process itself doesn't need
# this data as it would never have to talk to itself.
# Check if the main process is declared. Inject it into the map if it's not,
# based first on if a 'main' block is declared then on 'worker_replication_*'
# data. If both are available, default to instance_map. The main process
# itself doesn't need this data as it would never have to talk to itself.
instance_map: Dict[str, Any] = config.get("instance_map", {})
if self.instance_name is not MAIN_PROCESS_INSTANCE_NAME:
# TODO: The next 3 condition blocks can be deleted after some time has
# passed and we're ready to stop checking for these settings.
# The host used to connect to the main synapse
main_host = config.get("worker_replication_host", None)
if main_host:
raise ConfigError(
WORKER_REPLICATION_SETTING_DEPRECATED_MESSAGE
% ("worker_replication_host", main_host)
)
# The port on the main synapse for HTTP replication endpoint
main_port = config.get("worker_replication_http_port")
if main_port:
raise ConfigError(
WORKER_REPLICATION_SETTING_DEPRECATED_MESSAGE
% ("worker_replication_http_port", main_port)
)
# The tls mode on the main synapse for HTTP replication endpoint.
# For backward compatibility this defaults to False.
main_tls = config.get("worker_replication_http_tls", False)
if main_tls:
raise ConfigError(
WORKER_REPLICATION_SETTING_DEPRECATED_MESSAGE
% ("worker_replication_http_tls", main_tls)
)
# For now, accept 'main' in the instance_map, but the replication system
# expects 'master', force that into being until it's changed later.
@@ -300,20 +241,30 @@ class WorkerConfig(Config):
]
del instance_map[MAIN_PROCESS_INSTANCE_MAP_NAME]
# This is the backwards compatibility bit that handles the
# worker_replication_* bits using setdefault() to not overwrite anything.
elif main_host is not None and main_port is not None:
instance_map.setdefault(
MAIN_PROCESS_INSTANCE_NAME,
{
"host": main_host,
"port": main_port,
"tls": main_tls,
},
)
else:
# If we've gotten here, it means that the main process is not on the
# instance_map.
# instance_map and that not enough worker_replication_* variables
# were declared in the worker's yaml.
raise ConfigError(
_MISSING_MAIN_PROCESS_INSTANCE_MAP_DATA
% MAIN_PROCESS_INSTANCE_MAP_NAME
)
# type-ignore: the expression `Union[A, B]` is not a Type[Union[A, B]] currently
self.instance_map: Dict[
str, InstanceLocationConfig
] = parse_and_validate_mapping(
instance_map, InstanceLocationConfig # type: ignore[arg-type]
)
] = parse_and_validate_mapping(instance_map, InstanceLocationConfig)
# Map from type of streams to source, c.f. WriterLocations.
writers = config.get("stream_writers") or {}
@@ -406,28 +357,6 @@ class WorkerConfig(Config):
new_option_name="update_user_directory_from_worker",
)
outbound_federation_restricted_to = config.get(
"outbound_federation_restricted_to", None
)
self.outbound_federation_restricted_to = OutboundFederationRestrictedTo(
outbound_federation_restricted_to
)
if outbound_federation_restricted_to:
if not self.worker_replication_secret:
raise ConfigError(
"`worker_replication_secret` must be configured when using `outbound_federation_restricted_to`."
)
for instance in outbound_federation_restricted_to:
if instance not in self.instance_map:
raise ConfigError(
"Instance %r is configured in 'outbound_federation_restricted_to' but does not appear in `instance_map` config."
% (instance,)
)
self.outbound_federation_restricted_to.locations.append(
self.instance_map[instance]
)
def _should_this_worker_perform_duty(
self,
config: Dict[str, Any],
+55 -13
View File
@@ -126,7 +126,7 @@ def validate_event_for_room_version(event: "EventBase") -> None:
raise AuthError(403, "Event not signed by sending server")
is_invite_via_allow_rule = (
event.room_version.restricted_join_rule
event.room_version.msc3083_join_rules
and event.type == EventTypes.Member
and event.membership == Membership.JOIN
and EventContentFields.AUTHORISING_USER in event.content
@@ -339,6 +339,13 @@ def check_state_dependent_auth_rules(
if event.type == EventTypes.Redaction:
check_redaction(event.room_version, event, auth_dict)
if (
event.type == EventTypes.MSC2716_INSERTION
or event.type == EventTypes.MSC2716_BATCH
or event.type == EventTypes.MSC2716_MARKER
):
check_historical(event.room_version, event, auth_dict)
logger.debug("Allowing! %s", event)
@@ -352,10 +359,13 @@ LENIENT_EVENT_BYTE_LIMITS_ROOM_VERSIONS = {
RoomVersions.V4,
RoomVersions.V5,
RoomVersions.V6,
RoomVersions.MSC2176,
RoomVersions.V7,
RoomVersions.V8,
RoomVersions.V9,
RoomVersions.MSC3787,
RoomVersions.V10,
RoomVersions.MSC2716v4,
RoomVersions.MSC1767v10,
}
@@ -447,7 +457,7 @@ def _check_create(event: "EventBase") -> None:
# 1.4 If content has no creator field, reject if the room version requires it.
if (
not event.room_version.implicit_room_creator
not event.room_version.msc2175_implicit_room_creator
and EventContentFields.ROOM_CREATOR not in event.content
):
raise AuthError(403, "Create event lacks a 'creator' property")
@@ -484,7 +494,7 @@ def _is_membership_change_allowed(
key = (EventTypes.Create, "")
create = auth_events.get(key)
if create and event.prev_event_ids()[0] == create.event_id:
if room_version.implicit_room_creator:
if room_version.msc2175_implicit_room_creator:
creator = create.sender
else:
creator = create.content[EventContentFields.ROOM_CREATOR]
@@ -507,7 +517,7 @@ def _is_membership_change_allowed(
caller_invited = caller and caller.membership == Membership.INVITE
caller_knocked = (
caller
and room_version.knock_join_rule
and room_version.msc2403_knocking
and caller.membership == Membership.KNOCK
)
@@ -607,9 +617,9 @@ def _is_membership_change_allowed(
elif join_rule == JoinRules.PUBLIC:
pass
elif (
room_version.restricted_join_rule and join_rule == JoinRules.RESTRICTED
room_version.msc3083_join_rules and join_rule == JoinRules.RESTRICTED
) or (
room_version.knock_restricted_join_rule
room_version.msc3787_knock_restricted_join_rule
and join_rule == JoinRules.KNOCK_RESTRICTED
):
# This is the same as public, but the event must contain a reference
@@ -639,9 +649,9 @@ def _is_membership_change_allowed(
elif (
join_rule == JoinRules.INVITE
or (room_version.knock_join_rule and join_rule == JoinRules.KNOCK)
or (room_version.msc2403_knocking and join_rule == JoinRules.KNOCK)
or (
room_version.knock_restricted_join_rule
room_version.msc3787_knock_restricted_join_rule
and join_rule == JoinRules.KNOCK_RESTRICTED
)
):
@@ -675,9 +685,9 @@ def _is_membership_change_allowed(
"You don't have permission to ban",
errcode=Codes.INSUFFICIENT_POWER,
)
elif room_version.knock_join_rule and Membership.KNOCK == membership:
elif room_version.msc2403_knocking and Membership.KNOCK == membership:
if join_rule != JoinRules.KNOCK and (
not room_version.knock_restricted_join_rule
not room_version.msc3787_knock_restricted_join_rule
or join_rule != JoinRules.KNOCK_RESTRICTED
):
raise AuthError(403, "You don't have permission to knock")
@@ -813,6 +823,38 @@ def check_redaction(
raise AuthError(403, "You don't have permission to redact events")
def check_historical(
room_version_obj: RoomVersion,
event: "EventBase",
auth_events: StateMap["EventBase"],
) -> None:
"""Check whether the event sender is allowed to send historical related
events like "insertion", "batch", and "marker".
Returns:
None
Raises:
AuthError if the event sender is not allowed to send historical related events
("insertion", "batch", and "marker").
"""
# Ignore the auth checks in room versions that do not support historical
# events
if not room_version_obj.msc2716_historical:
return
user_level = get_user_power_level(event.user_id, auth_events)
historical_level = get_named_level(auth_events, "historical", 100)
if user_level < historical_level:
raise UnstableSpecAuthError(
403,
'You don\'t have permission to send send historical related events ("insertion", "batch", and "marker")',
errcode=Codes.INSUFFICIENT_POWER,
)
def _check_power_levels(
room_version_obj: RoomVersion,
event: "EventBase",
@@ -834,7 +876,7 @@ def _check_power_levels(
# Reject events with stringy power levels if required by room version
if (
event.type == EventTypes.PowerLevels
and room_version_obj.enforce_int_power_levels
and room_version_obj.msc3667_int_only_power_levels
):
for k, v in event.content.items():
if k in {
@@ -970,7 +1012,7 @@ def get_user_power_level(user_id: str, auth_events: StateMap["EventBase"]) -> in
key = (EventTypes.Create, "")
create_event = auth_events.get(key)
if create_event is not None:
if create_event.room_version.implicit_room_creator:
if create_event.room_version.msc2175_implicit_room_creator:
creator = create_event.sender
else:
creator = create_event.content[EventContentFields.ROOM_CREATOR]
@@ -1108,7 +1150,7 @@ def auth_types_for_event(
)
auth_types.add(key)
if room_version.restricted_join_rule and membership == Membership.JOIN:
if room_version.msc3083_join_rules and membership == Membership.JOIN:
if EventContentFields.AUTHORISING_USER in event.content:
key = (
EventTypes.Member,
+10 -1
View File
@@ -198,6 +198,7 @@ class _EventInternalMetadata:
soft_failed: DictProperty[bool] = DictProperty("soft_failed")
proactively_send: DictProperty[bool] = DictProperty("proactively_send")
redacted: DictProperty[bool] = DictProperty("redacted")
historical: DictProperty[bool] = DictProperty("historical")
txn_id: DictProperty[str] = DictProperty("txn_id")
"""The transaction ID, if it was set when the event was created."""
@@ -287,6 +288,14 @@ class _EventInternalMetadata:
"""
return self._dict.get("redacted", False)
def is_historical(self) -> bool:
"""Whether this is a historical message.
This is used by the batchsend historical message endpoint and
is needed to and mark the event as backfilled and skip some checks
like push notifications.
"""
return self._dict.get("historical", False)
def is_notifiable(self) -> bool:
"""Whether this event can trigger a push notification"""
return not self.is_outlier() or self.is_out_of_band_membership()
@@ -346,7 +355,7 @@ class EventBase(metaclass=abc.ABCMeta):
@property
def redacts(self) -> Optional[str]:
"""MSC2176 moved the redacts field into the content."""
if self.room_version.updated_redaction_rules:
if self.room_version.msc2176_redaction_rules:
return self.content.get("redacts")
return self.get("redacts")
+1 -1
View File
@@ -175,7 +175,7 @@ class EventBuilder:
# MSC2174 moves the redacts property to the content, it is invalid to
# provide it as a top-level property.
if self._redacts is not None and not self.room_version.updated_redaction_rules:
if self._redacts is not None and not self.room_version.msc2176_redaction_rules:
event_dict["redacts"] = self._redacts
if self._origin_server_ts is not None:
+11 -1
View File
@@ -186,6 +186,9 @@ class EventContext(UnpersistedEventContextBase):
),
"app_service_id": self.app_service.id if self.app_service else None,
"partial_state": self.partial_state,
# add dummy delta_ids and prev_group for backwards compatibility
"delta_ids": None,
"prev_group": None,
}
@staticmethod
@@ -200,6 +203,13 @@ class EventContext(UnpersistedEventContextBase):
Returns:
The event context.
"""
# workaround for backwards/forwards compatibility: if the input doesn't have a value
# for "state_group_deltas" just assign an empty dict
state_group_deltas = input.get("state_group_deltas", None)
if state_group_deltas:
state_group_deltas = _decode_state_group_delta(state_group_deltas)
else:
state_group_deltas = {}
context = EventContext(
# We use the state_group and prev_state_id stuff to pull the
@@ -207,7 +217,7 @@ class EventContext(UnpersistedEventContextBase):
storage=storage,
state_group=input["state_group"],
state_group_before_event=input["state_group_before_event"],
state_group_deltas=_decode_state_group_delta(input["state_group_deltas"]),
state_group_deltas=state_group_deltas,
state_delta_due_to_event=_decode_state_dict(
input["state_delta_due_to_event"]
),
+46 -46
View File
@@ -108,9 +108,13 @@ def prune_event_dict(room_version: RoomVersion, event_dict: JsonDict) -> JsonDic
"origin_server_ts",
]
# Earlier room versions from had additional allowed keys.
if not room_version.updated_redaction_rules:
allowed_keys.extend(["prev_state", "membership", "origin"])
# Room versions from before MSC2176 had additional allowed keys.
if not room_version.msc2176_redaction_rules:
allowed_keys.extend(["prev_state", "membership"])
# Room versions before MSC3989 kept the origin field.
if not room_version.msc3989_redaction_rules:
allowed_keys.append("origin")
event_type = event_dict["type"]
@@ -123,9 +127,9 @@ def prune_event_dict(room_version: RoomVersion, event_dict: JsonDict) -> JsonDic
if event_type == EventTypes.Member:
add_fields("membership")
if room_version.restricted_join_rule_fix:
if room_version.msc3375_redaction_rules:
add_fields(EventContentFields.AUTHORISING_USER)
if room_version.updated_redaction_rules:
if room_version.msc3821_redaction_rules:
# Preserve the signed field under third_party_invite.
third_party_invite = event_dict["content"].get("third_party_invite")
if isinstance(third_party_invite, collections.abc.Mapping):
@@ -136,16 +140,14 @@ def prune_event_dict(room_version: RoomVersion, event_dict: JsonDict) -> JsonDic
]
elif event_type == EventTypes.Create:
if room_version.updated_redaction_rules:
# MSC2176 rules state that create events cannot have their `content` redacted.
new_content = event_dict["content"]
elif not room_version.implicit_room_creator:
# Some room versions give meaning to `creator`
add_fields("creator")
# MSC2176 rules state that create events cannot be redacted.
if room_version.msc2176_redaction_rules:
return event_dict
add_fields("creator")
elif event_type == EventTypes.JoinRules:
add_fields("join_rule")
if room_version.restricted_join_rule:
if room_version.msc3083_join_rules:
add_fields("allow")
elif event_type == EventTypes.PowerLevels:
add_fields(
@@ -159,15 +161,24 @@ def prune_event_dict(room_version: RoomVersion, event_dict: JsonDict) -> JsonDic
"redact",
)
if room_version.updated_redaction_rules:
if room_version.msc2176_redaction_rules:
add_fields("invite")
if room_version.msc2716_historical:
add_fields("historical")
elif event_type == EventTypes.Aliases and room_version.special_case_aliases_auth:
add_fields("aliases")
elif event_type == EventTypes.RoomHistoryVisibility:
add_fields("history_visibility")
elif event_type == EventTypes.Redaction and room_version.updated_redaction_rules:
elif event_type == EventTypes.Redaction and room_version.msc2176_redaction_rules:
add_fields("redacts")
elif room_version.msc2716_redactions and event_type == EventTypes.MSC2716_INSERTION:
add_fields(EventContentFields.MSC2716_NEXT_BATCH_ID)
elif room_version.msc2716_redactions and event_type == EventTypes.MSC2716_BATCH:
add_fields(EventContentFields.MSC2716_BATCH_ID)
elif room_version.msc2716_redactions and event_type == EventTypes.MSC2716_MARKER:
add_fields(EventContentFields.MSC2716_INSERTION_EVENT_REFERENCE)
# Protect the rel_type and event_id fields under the m.relates_to field.
if room_version.msc3389_relation_redactions:
@@ -394,6 +405,7 @@ def serialize_event(
time_now_ms: int,
*,
config: SerializeEventConfig = _DEFAULT_SERIALIZE_EVENT_CONFIG,
msc3970_enabled: bool = False,
) -> JsonDict:
"""Serialize event for clients
@@ -401,6 +413,8 @@ def serialize_event(
e
time_now_ms
config: Event serialization config
msc3970_enabled: Whether MSC3970 is enabled. It changes whether we should
include the `transaction_id` in the event's `unsigned` section.
Returns:
The serialized event dictionary.
@@ -426,46 +440,38 @@ def serialize_event(
e.unsigned["redacted_because"],
time_now_ms,
config=config,
msc3970_enabled=msc3970_enabled,
)
# If we have a txn_id saved in the internal_metadata, we should include it in the
# unsigned section of the event if it was sent by the same session as the one
# requesting the event.
txn_id: Optional[str] = getattr(e.internal_metadata, "txn_id", None)
if (
txn_id is not None
and config.requester is not None
and config.requester.user.to_string() == e.sender
):
# Some events do not have the device ID stored in the internal metadata,
# this includes old events as well as those created by appservice, guests,
# or with tokens minted with the admin API. For those events, fallback
# to using the access token instead.
if txn_id is not None and config.requester is not None:
# For the MSC3970 rules to be applied, we *need* to have the device ID in the
# event internal metadata. Since we were not recording them before, if it hasn't
# been recorded, we fallback to the old behaviour.
event_device_id: Optional[str] = getattr(e.internal_metadata, "device_id", None)
if event_device_id is not None:
if msc3970_enabled and event_device_id is not None:
if event_device_id == config.requester.device_id:
d["unsigned"]["transaction_id"] = txn_id
else:
# Fallback behaviour: only include the transaction ID if the event
# was sent from the same access token.
#
# For regular users, the access token ID can be used to determine this.
# This includes access tokens minted with the admin API.
#
# For guests and appservice users, we can't check the access token ID
# so assume it is the same session.
# The pre-MSC3970 behaviour is to only include the transaction ID if the
# event was sent from the same access token. For regular users, we can use
# the access token ID to determine this. For guests, we can't, but since
# each guest only has one access token, we can just check that the event was
# sent by the same user as the one requesting the event.
event_token_id: Optional[int] = getattr(
e.internal_metadata, "token_id", None
)
if (
if config.requester.user.to_string() == e.sender and (
(
event_token_id is not None
and config.requester.access_token_id is not None
and event_token_id == config.requester.access_token_id
)
or config.requester.is_guest
or config.requester.app_service
):
d["unsigned"]["transaction_id"] = txn_id
@@ -480,17 +486,6 @@ def serialize_event(
if config.as_client_event:
d = config.event_format(d)
# If the event is a redaction, the field with the redacted event ID appears
# in a different location depending on the room version. e.redacts handles
# fetching from the proper location; copy it to the other location for forwards-
# and backwards-compatibility with clients.
if e.type == EventTypes.Redaction and e.redacts is not None:
if e.room_version.updated_redaction_rules:
d["redacts"] = e.redacts
else:
d["content"] = dict(d["content"])
d["content"]["redacts"] = e.redacts
only_event_fields = config.only_event_fields
if only_event_fields:
if not isinstance(only_event_fields, list) or not all(
@@ -509,6 +504,9 @@ class EventClientSerializer:
clients.
"""
def __init__(self, *, msc3970_enabled: bool = False):
self._msc3970_enabled = msc3970_enabled
def serialize_event(
self,
event: Union[JsonDict, EventBase],
@@ -533,7 +531,9 @@ class EventClientSerializer:
if not isinstance(event, EventBase):
return event
serialized_event = serialize_event(event, time_now, config=config)
serialized_event = serialize_event(
event, time_now, config=config, msc3970_enabled=self._msc3970_enabled
)
# Check if there are any bundled aggregations to include with the event.
if bundle_aggregations:
+1 -1
View File
@@ -231,7 +231,7 @@ async def _check_sigs_on_pdu(
# If this is a join event for a restricted room it may have been authorised
# via a different server from the sending server. Check those signatures.
if (
room_version.restricted_join_rule
room_version.msc3083_join_rules
and pdu.type == EventTypes.Member
and pdu.membership == Membership.JOIN
and EventContentFields.AUTHORISING_USER in pdu.content
+3 -3
View File
@@ -983,7 +983,7 @@ class FederationClient(FederationBase):
if not room_version:
raise UnsupportedRoomVersionError()
if not room_version.knock_join_rule and membership == Membership.KNOCK:
if not room_version.msc2403_knocking and membership == Membership.KNOCK:
raise SynapseError(
400,
"This room version does not support knocking",
@@ -1069,7 +1069,7 @@ class FederationClient(FederationBase):
# * Ensure the signatures are good.
#
# Otherwise, fallback to the provided event.
if room_version.restricted_join_rule and response.event:
if room_version.msc3083_join_rules and response.event:
event = response.event
valid_pdu = await self._check_sigs_and_hash_and_fetch_one(
@@ -1195,7 +1195,7 @@ class FederationClient(FederationBase):
# MSC3083 defines additional error codes for room joins.
failover_errcodes = None
if room_version.restricted_join_rule:
if room_version.msc3083_join_rules:
failover_errcodes = (
Codes.UNABLE_AUTHORISE_JOIN,
Codes.UNABLE_TO_GRANT_JOIN,
+6 -17
View File
@@ -63,7 +63,6 @@ from synapse.federation.federation_base import (
)
from synapse.federation.persistence import TransactionActions
from synapse.federation.units import Edu, Transaction
from synapse.handlers.worker_lock import DELETE_ROOM_LOCK_NAME
from synapse.http.servlet import assert_params_in_dict
from synapse.logging.context import (
make_deferred_yieldable,
@@ -138,7 +137,6 @@ class FederationServer(FederationBase):
self._event_auth_handler = hs.get_event_auth_handler()
self._room_member_handler = hs.get_room_member_handler()
self._e2e_keys_handler = hs.get_e2e_keys_handler()
self._worker_lock_handler = hs.get_worker_locks_handler()
self._state_storage_controller = hs.get_storage_controllers().state
@@ -808,7 +806,7 @@ class FederationServer(FederationBase):
raise IncompatibleRoomVersionError(room_version=room_version.identifier)
# Check that this room supports knocking as defined by its room version
if not room_version.knock_join_rule:
if not room_version.msc2403_knocking:
raise SynapseError(
403,
"This room version does not support knocking",
@@ -911,7 +909,7 @@ class FederationServer(FederationBase):
errcode=Codes.NOT_FOUND,
)
if membership_type == Membership.KNOCK and not room_version.knock_join_rule:
if membership_type == Membership.KNOCK and not room_version.msc2403_knocking:
raise SynapseError(
403,
"This room version does not support knocking",
@@ -935,7 +933,7 @@ class FederationServer(FederationBase):
# the event is valid to be sent into the room. Currently this is only done
# if the user is being joined via restricted join rules.
if (
room_version.restricted_join_rule
room_version.msc3083_join_rules
and event.membership == Membership.JOIN
and EventContentFields.AUTHORISING_USER in event.content
):
@@ -1238,18 +1236,9 @@ class FederationServer(FederationBase):
logger.info("handling received PDU in room %s: %s", room_id, event)
try:
with nested_logging_context(event.event_id):
# We're taking out a lock within a lock, which could
# lead to deadlocks if we're not careful. However, it is
# safe on this occasion as we only ever take a write
# lock when deleting a room, which we would never do
# while holding the `_INBOUND_EVENT_HANDLING_LOCK_NAME`
# lock.
async with self._worker_lock_handler.acquire_read_write_lock(
DELETE_ROOM_LOCK_NAME, room_id, write=False
):
await self._federation_event_handler.on_receive_pdu(
origin, event
)
await self._federation_event_handler.on_receive_pdu(
origin, event
)
except FederationError as e:
# XXX: Ideally we'd inform the remote we failed to process
# the event, but we can't return an error in the transaction
+18 -16
View File
@@ -109,8 +109,10 @@ was enabled*, Catch-Up Mode is exited and we return to `_transaction_transmissio
If a remote server is unreachable over federation, we back off from that server,
with an exponentially-increasing retry interval.
We automatically retry after the retry interval expires (roughly, the logic to do so
being triggered every minute).
Whilst we don't automatically retry after the interval, we prevent making new attempts
until such time as the back-off has cleared.
Once the back-off is cleared and a new PDU or EDU arrives for transmission, the transmission
loop resumes and empties the queue by making federation requests.
If the backoff grows too large (> 1 hour), the in-memory queue is emptied (to prevent
unbounded growth) and Catch-Up Mode is entered.
@@ -143,6 +145,7 @@ from prometheus_client import Counter
from typing_extensions import Literal
from twisted.internet import defer
from twisted.internet.interfaces import IDelayedCall
import synapse.metrics
from synapse.api.presence import UserPresenceState
@@ -181,18 +184,14 @@ sent_pdus_destination_dist_total = Counter(
"Total number of PDUs queued for sending across all destinations",
)
# Time (in s) to wait before trying to wake up destinations that have
# catch-up outstanding. This will also be the delay applied at startup
# before trying the same.
# Please note that rate limiting still applies, so while the loop is
# executed every X seconds the destinations may not be wake up because
# they are being rate limited following previous attempt failures.
WAKEUP_RETRY_PERIOD_SEC = 60
# Time (in s) after Synapse's startup that we will begin to wake up destinations
# that have catch-up outstanding.
CATCH_UP_STARTUP_DELAY_SEC = 15
# Time (in s) to wait in between waking up each destination, i.e. one destination
# will be woken up every <x> seconds until we have woken every destination
# has outstanding catch-up.
WAKEUP_INTERVAL_BETWEEN_DESTINATIONS_SEC = 5
# will be woken up every <x> seconds after Synapse's startup until we have woken
# every destination has outstanding catch-up.
CATCH_UP_STARTUP_INTERVAL_SEC = 5
class AbstractFederationSender(metaclass=abc.ABCMeta):
@@ -416,10 +415,12 @@ class FederationSender(AbstractFederationSender):
/ hs.config.ratelimiting.federation_rr_transactions_per_room_per_second
)
# Regularly wake up destinations that have outstanding PDUs to be caught up
self.clock.looping_call(
# wake up destinations that have outstanding PDUs to be caught up
self._catchup_after_startup_timer: Optional[
IDelayedCall
] = self.clock.call_later(
CATCH_UP_STARTUP_DELAY_SEC,
run_as_background_process,
WAKEUP_RETRY_PERIOD_SEC * 1000.0,
"wake_destinations_needing_catchup",
self._wake_destinations_needing_catchup,
)
@@ -965,6 +966,7 @@ class FederationSender(AbstractFederationSender):
if not destinations_to_wake:
# finished waking all destinations!
self._catchup_after_startup_timer = None
break
last_processed = destinations_to_wake[-1]
@@ -981,4 +983,4 @@ class FederationSender(AbstractFederationSender):
last_processed,
)
self.wake_destination(destination)
await self.clock.sleep(WAKEUP_INTERVAL_BETWEEN_DESTINATIONS_SEC)
await self.clock.sleep(CATCH_UP_STARTUP_INTERVAL_SEC)
@@ -432,6 +432,15 @@ class FederationV2SendJoinServlet(BaseFederationServerServlet):
PREFIX = FEDERATION_V2_PREFIX
def __init__(
self,
hs: "HomeServer",
authenticator: Authenticator,
ratelimiter: FederationRateLimiter,
server_name: str,
):
super().__init__(hs, authenticator, ratelimiter, server_name)
async def on_PUT(
self,
origin: str,
+2 -27
View File
@@ -653,7 +653,6 @@ class DeviceHandler(DeviceWorkerHandler):
async def store_dehydrated_device(
self,
user_id: str,
device_id: Optional[str],
device_data: JsonDict,
initial_device_display_name: Optional[str] = None,
) -> str:
@@ -662,7 +661,6 @@ class DeviceHandler(DeviceWorkerHandler):
Args:
user_id: the user that we are storing the device for
device_id: device id supplied by client
device_data: the dehydrated device information
initial_device_display_name: The display name to use for the device
Returns:
@@ -670,7 +668,7 @@ class DeviceHandler(DeviceWorkerHandler):
"""
device_id = await self.check_device_registered(
user_id,
device_id,
None,
initial_device_display_name,
)
old_device_id = await self.store.store_dehydrated_device(
@@ -722,22 +720,6 @@ class DeviceHandler(DeviceWorkerHandler):
return {"success": True}
async def delete_dehydrated_device(self, user_id: str, device_id: str) -> None:
"""
Delete a stored dehydrated device.
Args:
user_id: the user_id to delete the device from
device_id: id of the dehydrated device to delete
"""
success = await self.store.remove_dehydrated_device(user_id, device_id)
if not success:
raise errors.NotFoundError()
await self.delete_devices(user_id, [device_id])
await self.store.delete_e2e_keys_by_device(user_id=user_id, device_id=device_id)
@wrap_as_background_process("_handle_new_device_update_async")
async def _handle_new_device_update_async(self) -> None:
"""Called when we have a new local device list update that we need to
@@ -1142,14 +1124,7 @@ class DeviceListUpdater(DeviceListWorkerUpdater):
)
if resync:
# We mark as stale up front in case we get restarted.
await self.store.mark_remote_users_device_caches_as_stale([user_id])
run_as_background_process(
"_maybe_retry_device_resync",
self.multi_user_device_resync,
[user_id],
False,
)
await self.multi_user_device_resync([user_id])
else:
# Simply update the single device, since we know that is the only
# change (because of the single prev_id matching the current cache)
+2 -106
View File
@@ -13,11 +13,10 @@
# limitations under the License.
import logging
from http import HTTPStatus
from typing import TYPE_CHECKING, Any, Dict, Optional
from typing import TYPE_CHECKING, Any, Dict
from synapse.api.constants import EduTypes, EventContentFields, ToDeviceEventTypes
from synapse.api.errors import Codes, SynapseError
from synapse.api.errors import SynapseError
from synapse.api.ratelimiting import Ratelimiter
from synapse.logging.context import run_in_background
from synapse.logging.opentracing import (
@@ -49,9 +48,6 @@ class DeviceMessageHandler:
self.store = hs.get_datastores().main
self.notifier = hs.get_notifier()
self.is_mine = hs.is_mine
if hs.config.experimental.msc3814_enabled:
self.event_sources = hs.get_event_sources()
self.device_handler = hs.get_device_handler()
# We only need to poke the federation sender explicitly if its on the
# same instance. Other federation sender instances will get notified by
@@ -307,103 +303,3 @@ class DeviceMessageHandler:
# Enqueue a new federation transaction to send the new
# device messages to each remote destination.
self.federation_sender.send_device_messages(destination)
async def get_events_for_dehydrated_device(
self,
requester: Requester,
device_id: str,
since_token: Optional[str],
limit: int,
) -> JsonDict:
"""Fetches up to `limit` events sent to `device_id` starting from `since_token`
and returns the new since token. If there are no more messages, returns an empty
array.
Args:
requester: the user requesting the messages
device_id: ID of the dehydrated device
since_token: stream id to start from when fetching messages
limit: the number of messages to fetch
Returns:
A dict containing the to-device messages, as well as a token that the client
can provide in the next call to fetch the next batch of messages
"""
user_id = requester.user.to_string()
# only allow fetching messages for the dehydrated device id currently associated
# with the user
dehydrated_device = await self.device_handler.get_dehydrated_device(user_id)
if dehydrated_device is None:
raise SynapseError(
HTTPStatus.FORBIDDEN,
"No dehydrated device exists",
Codes.FORBIDDEN,
)
dehydrated_device_id, _ = dehydrated_device
if device_id != dehydrated_device_id:
raise SynapseError(
HTTPStatus.FORBIDDEN,
"You may only fetch messages for your dehydrated device",
Codes.FORBIDDEN,
)
since_stream_id = 0
if since_token:
if not since_token.startswith("d"):
raise SynapseError(
HTTPStatus.BAD_REQUEST,
"from parameter %r has an invalid format" % (since_token,),
errcode=Codes.INVALID_PARAM,
)
try:
since_stream_id = int(since_token[1:])
except Exception:
raise SynapseError(
HTTPStatus.BAD_REQUEST,
"from parameter %r has an invalid format" % (since_token,),
errcode=Codes.INVALID_PARAM,
)
# if we have a since token, delete any to-device messages before that token
# (since we now know that the device has received them)
deleted = await self.store.delete_messages_for_device(
user_id, device_id, since_stream_id
)
logger.debug(
"Deleted %d to-device messages up to %d for user_id %s device_id %s",
deleted,
since_stream_id,
user_id,
device_id,
)
to_token = self.event_sources.get_current_token().to_device_key
messages, stream_id = await self.store.get_messages_for_device(
user_id, device_id, since_stream_id, to_token, limit
)
for message in messages:
# Remove the message id before sending to client
message_id = message.pop("message_id", None)
if message_id:
set_tag(SynapseTags.TO_DEVICE_EDU_ID, message_id)
logger.debug(
"Returning %d to-device messages between %d and %d (current token: %d) for "
"dehydrated device %s, user_id %s",
len(messages),
since_stream_id,
stream_id,
to_token,
device_id,
user_id,
)
return {
"events": messages,
"next_batch": f"d{stream_id}",
}
+1 -3
View File
@@ -277,9 +277,7 @@ class DirectoryHandler:
except RequestSendFailed:
raise SynapseError(502, "Failed to fetch alias")
except CodeMessageException as e:
logging.warning(
"Error retrieving alias %s -> %s %s", room_alias, e.code, e.msg
)
logging.warning("Error retrieving alias")
if e.code == 404:
fed_result = None
else:
+2 -2
View File
@@ -277,7 +277,7 @@ class EventAuthHandler:
True if the proper room version and join rules are set for restricted access.
"""
# This only applies to room versions which support the new join rule.
if not room_version.restricted_join_rule:
if not room_version.msc3083_join_rules:
return False
# If there's no join rule, then it defaults to invite (so this doesn't apply).
@@ -292,7 +292,7 @@ class EventAuthHandler:
return True
# also check for MSC3787 behaviour
if room_version.knock_restricted_join_rule:
if room_version.msc3787_knock_restricted_join_rule:
return content_join_rule == JoinRules.KNOCK_RESTRICTED
return False
+35 -6
View File
@@ -105,12 +105,14 @@ backfill_processing_before_timer = Histogram(
)
# TODO: We can refactor this away now that there is only one backfill point again
class _BackfillPointType(Enum):
# a regular backwards extremity (ie, an event which we don't yet have, but which
# is referred to by other events in the DAG)
BACKWARDS_EXTREMITY = enum.auto()
# an MSC2716 "insertion event"
INSERTION_PONT = enum.auto()
@attr.s(slots=True, auto_attribs=True, frozen=True)
class _BackfillPoint:
@@ -271,10 +273,32 @@ class FederationHandler:
)
]
insertion_events_to_be_backfilled: List[_BackfillPoint] = []
if self.hs.config.experimental.msc2716_enabled:
insertion_events_to_be_backfilled = [
_BackfillPoint(event_id, depth, _BackfillPointType.INSERTION_PONT)
for event_id, depth in await self.store.get_insertion_event_backward_extremities_in_room(
room_id=room_id,
current_depth=current_depth,
# We only need to end up with 5 extremities combined with
# the backfill points to make the `/backfill` request ...
# (see the other comment above for more context).
limit=50,
)
]
logger.debug(
"_maybe_backfill_inner: backwards_extremities=%s insertion_events_to_be_backfilled=%s",
backwards_extremities,
insertion_events_to_be_backfilled,
)
# we now have a list of potential places to backpaginate from. We prefer to
# start with the most recent (ie, max depth), so let's sort the list.
sorted_backfill_points: List[_BackfillPoint] = sorted(
backwards_extremities,
itertools.chain(
backwards_extremities,
insertion_events_to_be_backfilled,
),
key=lambda e: -int(e.depth),
)
@@ -387,7 +411,10 @@ class FederationHandler:
# event but not anything before it. This would require looking at the
# state *before* the event, ignoring the special casing certain event
# types have.
event_ids_to_check = await self.store.get_successor_events(bp.event_id)
if bp.type == _BackfillPointType.INSERTION_PONT:
event_ids_to_check = [bp.event_id]
else:
event_ids_to_check = await self.store.get_successor_events(bp.event_id)
events_to_check = await self.store.get_events_as_list(
event_ids_to_check,
@@ -957,7 +984,7 @@ class FederationHandler:
# Note that this requires the /send_join request to come back to the
# same server.
prev_event_ids = None
if room_version.restricted_join_rule:
if room_version.msc3083_join_rules:
# Note that the room's state can change out from under us and render our
# nice join rules-conformant event non-conformant by the time we build the
# event. When this happens, our validation at the end fails and we respond
@@ -1581,7 +1608,9 @@ class FederationHandler:
event.content["third_party_invite"]["signed"]["token"],
)
original_invite = None
prev_state_ids = await context.get_prev_state_ids(StateFilter.from_types([key]))
prev_state_ids = await context.get_prev_state_ids(
StateFilter.from_types([(EventTypes.ThirdPartyInvite, None)])
)
original_invite_id = prev_state_ids.get(key)
if original_invite_id:
original_invite = await self.store.get_event(
@@ -1634,7 +1663,7 @@ class FederationHandler:
token = signed["token"]
prev_state_ids = await context.get_prev_state_ids(
StateFilter.from_types([(EventTypes.ThirdPartyInvite, token)])
StateFilter.from_types([(EventTypes.ThirdPartyInvite, None)])
)
invite_event_id = prev_state_ids.get((EventTypes.ThirdPartyInvite, token))
+109
View File
@@ -601,6 +601,18 @@ class FederationEventHandler:
room_id, [(event, context)]
)
# If we're joining the room again, check if there is new marker
# state indicating that there is new history imported somewhere in
# the DAG. Multiple markers can exist in the current state with
# unique state_keys.
#
# Do this after the state from the remote join was persisted (via
# `persist_events_and_notify`). Otherwise we can run into a
# situation where the create event doesn't exist yet in the
# `current_state_events`
for e in state:
await self._handle_marker_event(origin, e)
return stream_id_after_persist
async def update_state_for_partial_state_event(
@@ -903,6 +915,13 @@ class FederationEventHandler:
)
)
# We construct the event lists in source order from `/backfill` response because
# it's a) easiest, but also b) the order in which we process things matters for
# MSC2716 historical batches because many historical events are all at the same
# `depth` and we rely on the tenuous sort that the other server gave us and hope
# they're doing their best. The brittle nature of this ordering for historical
# messages over federation is one of the reasons why we don't want to continue
# on MSC2716 until we have online topological ordering.
events_with_failed_pull_attempts, fresh_events = partition(
new_events, lambda e: e.event_id in event_ids_with_failed_pull_attempts
)
@@ -1441,6 +1460,8 @@ class FederationEventHandler:
await self._run_push_actions_and_persist_event(event, context, backfilled)
await self._handle_marker_event(origin, event)
if backfilled or context.rejected:
return
@@ -1538,6 +1559,94 @@ class FederationEventHandler:
except Exception:
logger.exception("Failed to resync device for %s", sender)
@trace
async def _handle_marker_event(self, origin: str, marker_event: EventBase) -> None:
"""Handles backfilling the insertion event when we receive a marker
event that points to one.
Args:
origin: Origin of the event. Will be called to get the insertion event
marker_event: The event to process
"""
if marker_event.type != EventTypes.MSC2716_MARKER:
# Not a marker event
return
if marker_event.rejected_reason is not None:
# Rejected event
return
# Skip processing a marker event if the room version doesn't
# support it or the event is not from the room creator.
room_version = await self._store.get_room_version(marker_event.room_id)
create_event = await self._store.get_create_event_for_room(marker_event.room_id)
if not room_version.msc2175_implicit_room_creator:
room_creator = create_event.content.get(EventContentFields.ROOM_CREATOR)
else:
room_creator = create_event.sender
if not room_version.msc2716_historical and (
not self._config.experimental.msc2716_enabled
or marker_event.sender != room_creator
):
return
logger.debug("_handle_marker_event: received %s", marker_event)
insertion_event_id = marker_event.content.get(
EventContentFields.MSC2716_INSERTION_EVENT_REFERENCE
)
if insertion_event_id is None:
# Nothing to retrieve then (invalid marker)
return
already_seen_insertion_event = await self._store.have_seen_event(
marker_event.room_id, insertion_event_id
)
if already_seen_insertion_event:
# No need to process a marker again if we have already seen the
# insertion event that it was pointing to
return
logger.debug(
"_handle_marker_event: backfilling insertion event %s", insertion_event_id
)
await self._get_events_and_persist(
origin,
marker_event.room_id,
[insertion_event_id],
)
insertion_event = await self._store.get_event(
insertion_event_id, allow_none=True
)
if insertion_event is None:
logger.warning(
"_handle_marker_event: server %s didn't return insertion event %s for marker %s",
origin,
insertion_event_id,
marker_event.event_id,
)
return
logger.debug(
"_handle_marker_event: succesfully backfilled insertion event %s from marker event %s",
insertion_event,
marker_event,
)
await self._store.insert_insertion_extremity(
insertion_event_id, marker_event.room_id
)
logger.debug(
"_handle_marker_event: insertion extremity added for %s from marker event %s",
insertion_event,
marker_event,
)
async def backfill_event_id(
self, destinations: List[str], room_id: str, event_id: str
) -> PulledPduInfo:
+191 -111
View File
@@ -53,7 +53,6 @@ from synapse.events.snapshot import EventContext, UnpersistedEventContextBase
from synapse.events.utils import SerializeEventConfig, maybe_upsert_event_field
from synapse.events.validator import EventValidator
from synapse.handlers.directory import DirectoryHandler
from synapse.handlers.worker_lock import DELETE_ROOM_LOCK_NAME
from synapse.logging import opentracing
from synapse.logging.context import make_deferred_yieldable, run_in_background
from synapse.metrics.background_process_metrics import run_as_background_process
@@ -61,6 +60,7 @@ from synapse.replication.http.send_event import ReplicationSendEventRestServlet
from synapse.replication.http.send_events import ReplicationSendEventsRestServlet
from synapse.storage.databases.main.events_worker import EventRedactBehaviour
from synapse.types import (
MutableStateMap,
PersistedEventPosition,
Requester,
RoomAlias,
@@ -486,7 +486,6 @@ class EventCreationHandler:
self._events_shard_config = self.config.worker.events_shard_config
self._instance_name = hs.get_instance_name()
self._notifier = hs.get_notifier()
self._worker_lock_handler = hs.get_worker_locks_handler()
self.room_prejoin_state_types = self.hs.config.api.room_prejoin_state
@@ -561,6 +560,8 @@ class EventCreationHandler:
expiry_ms=30 * 60 * 1000,
)
self._msc3970_enabled = hs.config.experimental.msc3970_enabled
async def create_event(
self,
requester: Requester,
@@ -572,6 +573,7 @@ class EventCreationHandler:
state_event_ids: Optional[List[str]] = None,
require_consent: bool = True,
outlier: bool = False,
historical: bool = False,
depth: Optional[int] = None,
state_map: Optional[StateMap[str]] = None,
for_batch: bool = False,
@@ -597,7 +599,7 @@ class EventCreationHandler:
allow_no_prev_events: Whether to allow this event to be created an empty
list of prev_events. Normally this is prohibited just because most
events should have a prev_event and we should only use this in special
cases (previously useful for MSC2716).
cases like MSC2716.
prev_event_ids:
the forward extremities to use as the prev_events for the
new event.
@@ -612,10 +614,13 @@ class EventCreationHandler:
If non-None, prev_event_ids must also be provided.
state_event_ids:
The full state at a given event. This was previously used particularly
by the MSC2716 /batch_send endpoint. This should normally be left as
None, which will cause the auth_event_ids to be calculated based on the
room state at the prev_events.
The full state at a given event. This is used particularly by the MSC2716
/batch_send endpoint. One use case is with insertion events which float at
the beginning of a historical batch and don't have any `prev_events` to
derive from; we add all of these state events as the explicit state so the
rest of the historical batch can inherit the same state and state_group.
This should normally be left as None, which will cause the auth_event_ids
to be calculated based on the room state at the prev_events.
require_consent: Whether to check if the requester has
consented to the privacy policy.
@@ -624,6 +629,10 @@ class EventCreationHandler:
it's from an arbitrary point and floating in the DAG as
opposed to being inline with the current DAG.
historical: Indicates whether the message is being inserted
back in time around some existing events. This is used to skip
a few checks and mark the event as backfilled.
depth: Override the depth used to order the event in the DAG.
Should normally be set to None, which will cause the depth to be calculated
based on the prev_events.
@@ -708,6 +717,8 @@ class EventCreationHandler:
builder.internal_metadata.outlier = outlier
builder.internal_metadata.historical = historical
event, unpersisted_context = await self.create_new_client_event(
builder=builder,
requester=requester,
@@ -738,7 +749,7 @@ class EventCreationHandler:
prev_event_id = state_map.get((EventTypes.Member, event.sender))
else:
prev_state_ids = await unpersisted_context.get_prev_state_ids(
StateFilter.from_types([(EventTypes.Member, event.sender)])
StateFilter.from_types([(EventTypes.Member, None)])
)
prev_event_id = prev_state_ids.get((EventTypes.Member, event.sender))
prev_event = (
@@ -860,7 +871,7 @@ class EventCreationHandler:
return None
prev_state_ids = await context.get_prev_state_ids(
StateFilter.from_types([(event.type, event.state_key)])
StateFilter.from_types([(event.type, None)])
)
prev_event_id = prev_state_ids.get((event.type, event.state_key))
if not prev_event_id:
@@ -876,53 +887,6 @@ class EventCreationHandler:
return prev_event
return None
async def get_event_id_from_transaction(
self,
requester: Requester,
txn_id: str,
room_id: str,
) -> Optional[str]:
"""For the given transaction ID and room ID, check if there is a matching event ID.
Args:
requester: The requester making the request in the context of which we want
to fetch the event.
txn_id: The transaction ID.
room_id: The room ID.
Returns:
An event ID if one could be found, None otherwise.
"""
existing_event_id = None
# According to the spec, transactions are scoped to a user's device ID.
if requester.device_id:
existing_event_id = (
await self.store.get_event_id_from_transaction_id_and_device_id(
room_id,
requester.user.to_string(),
requester.device_id,
txn_id,
)
)
if existing_event_id:
return existing_event_id
# Some requsters don't have device IDs (appservice, guests, and access
# tokens minted with the admin API), fallback to checking the access token
# ID, which should be close enough.
if requester.access_token_id:
existing_event_id = (
await self.store.get_event_id_from_transaction_id_and_token_id(
room_id,
requester.user.to_string(),
requester.access_token_id,
txn_id,
)
)
return existing_event_id
async def get_event_from_transaction(
self,
requester: Requester,
@@ -941,11 +905,35 @@ class EventCreationHandler:
Returns:
An event if one could be found, None otherwise.
"""
existing_event_id = await self.get_event_id_from_transaction(
requester, txn_id, room_id
)
if existing_event_id:
return await self.store.get_event(existing_event_id)
if self._msc3970_enabled and requester.device_id:
# When MSC3970 is enabled, we lookup for events sent by the same device first,
# and fallback to the old behaviour if none were found.
existing_event_id = (
await self.store.get_event_id_from_transaction_id_and_device_id(
room_id,
requester.user.to_string(),
requester.device_id,
txn_id,
)
)
if existing_event_id:
return await self.store.get_event(existing_event_id)
# Pre-MSC3970, we looked up for events that were sent by the same session by
# using the access token ID.
if requester.access_token_id:
existing_event_id = (
await self.store.get_event_id_from_transaction_id_and_token_id(
room_id,
requester.user.to_string(),
requester.access_token_id,
txn_id,
)
)
if existing_event_id:
return await self.store.get_event(existing_event_id)
return None
async def create_and_send_nonmember_event(
@@ -959,6 +947,7 @@ class EventCreationHandler:
txn_id: Optional[str] = None,
ignore_shadow_ban: bool = False,
outlier: bool = False,
historical: bool = False,
depth: Optional[int] = None,
) -> Tuple[EventBase, int]:
"""
@@ -972,16 +961,19 @@ class EventCreationHandler:
allow_no_prev_events: Whether to allow this event to be created an empty
list of prev_events. Normally this is prohibited just because most
events should have a prev_event and we should only use this in special
cases (previously useful for MSC2716).
cases like MSC2716.
prev_event_ids:
The event IDs to use as the prev events.
Should normally be left as None to automatically request them
from the database.
state_event_ids:
The full state at a given event. This was previously used particularly
by the MSC2716 /batch_send endpoint. This should normally be left as
None, which will cause the auth_event_ids to be calculated based on the
room state at the prev_events.
The full state at a given event. This is used particularly by the MSC2716
/batch_send endpoint. One use case is with insertion events which float at
the beginning of a historical batch and don't have any `prev_events` to
derive from; we add all of these state events as the explicit state so the
rest of the historical batch can inherit the same state and state_group.
This should normally be left as None, which will cause the auth_event_ids
to be calculated based on the room state at the prev_events.
ratelimit: Whether to rate limit this send.
txn_id: The transaction ID.
ignore_shadow_ban: True if shadow-banned users should be allowed to
@@ -989,6 +981,9 @@ class EventCreationHandler:
outlier: Indicates whether the event is an `outlier`, i.e. if
it's from an arbitrary point and floating in the DAG as
opposed to being inline with the current DAG.
historical: Indicates whether the message is being inserted
back in time around some existing events. This is used to skip
a few checks and mark the event as backfilled.
depth: Override the depth used to order the event in the DAG.
Should normally be set to None, which will cause the depth to be calculated
based on the prev_events.
@@ -1033,37 +1028,6 @@ class EventCreationHandler:
event.internal_metadata.stream_ordering,
)
async with self._worker_lock_handler.acquire_read_write_lock(
DELETE_ROOM_LOCK_NAME, room_id, write=False
):
return await self._create_and_send_nonmember_event_locked(
requester=requester,
event_dict=event_dict,
allow_no_prev_events=allow_no_prev_events,
prev_event_ids=prev_event_ids,
state_event_ids=state_event_ids,
ratelimit=ratelimit,
txn_id=txn_id,
ignore_shadow_ban=ignore_shadow_ban,
outlier=outlier,
depth=depth,
)
async def _create_and_send_nonmember_event_locked(
self,
requester: Requester,
event_dict: dict,
allow_no_prev_events: bool = False,
prev_event_ids: Optional[List[str]] = None,
state_event_ids: Optional[List[str]] = None,
ratelimit: bool = True,
txn_id: Optional[str] = None,
ignore_shadow_ban: bool = False,
outlier: bool = False,
depth: Optional[int] = None,
) -> Tuple[EventBase, int]:
room_id = event_dict["room_id"]
# If we don't have any prev event IDs specified then we need to
# check that the host is in the room (as otherwise populating the
# prev events will fail), at which point we may as well check the
@@ -1089,6 +1053,7 @@ class EventCreationHandler:
prev_event_ids=prev_event_ids,
state_event_ids=state_event_ids,
outlier=outlier,
historical=historical,
depth=depth,
)
context = await unpersisted_context.persist(event)
@@ -1180,7 +1145,7 @@ class EventCreationHandler:
allow_no_prev_events: Whether to allow this event to be created an empty
list of prev_events. Normally this is prohibited just because most
events should have a prev_event and we should only use this in special
cases (previously useful for MSC2716).
cases like MSC2716.
prev_event_ids:
the forward extremities to use as the prev_events for the
new event.
@@ -1193,10 +1158,13 @@ class EventCreationHandler:
based on the room state at the prev_events.
state_event_ids:
The full state at a given event. This was previously used particularly
by the MSC2716 /batch_send endpoint. This should normally be left as
None, which will cause the auth_event_ids to be calculated based on the
room state at the prev_events.
The full state at a given event. This is used particularly by the MSC2716
/batch_send endpoint. One use case is with insertion events which float at
the beginning of a historical batch and don't have any `prev_events` to
derive from; we add all of these state events as the explicit state so the
rest of the historical batch can inherit the same state and state_group.
This should normally be left as None, which will cause the auth_event_ids
to be calculated based on the room state at the prev_events.
depth: Override the depth used to order the event in the DAG.
Should normally be set to None, which will cause the depth to be calculated
@@ -1293,6 +1261,52 @@ class EventCreationHandler:
if builder.internal_metadata.outlier:
event.internal_metadata.outlier = True
context = EventContext.for_outlier(self._storage_controllers)
elif (
event.type == EventTypes.MSC2716_INSERTION
and state_event_ids
and builder.internal_metadata.is_historical()
):
# Add explicit state to the insertion event so it has state to derive
# from even though it's floating with no `prev_events`. The rest of
# the batch can derive from this state and state_group.
#
# TODO(faster_joins): figure out how this works, and make sure that the
# old state is complete.
# https://github.com/matrix-org/synapse/issues/13003
metadata = await self.store.get_metadata_for_events(state_event_ids)
state_map_for_event: MutableStateMap[str] = {}
for state_id in state_event_ids:
data = metadata.get(state_id)
if data is None:
# We're trying to persist a new historical batch of events
# with the given state, e.g. via
# `RoomBatchSendEventRestServlet`. The state can be inferred
# by Synapse or set directly by the client.
#
# Either way, we should have persisted all the state before
# getting here.
raise Exception(
f"State event {state_id} not found in DB,"
" Synapse should have persisted it before using it."
)
if data.state_key is None:
raise Exception(
f"Trying to set non-state event {state_id} as state"
)
state_map_for_event[(data.event_type, data.state_key)] = state_id
# TODO(faster_joins): check how MSC2716 works and whether we can have
# partial state here
# https://github.com/matrix-org/synapse/issues/13003
context = await self.state.calculate_context_info(
event,
state_ids_before_event=state_map_for_event,
partial_state=False,
)
else:
context = await self.state.calculate_context_info(event)
@@ -1619,11 +1633,12 @@ class EventCreationHandler:
if state_entry.state_group in self._external_cache_joined_hosts_updates:
return
state = await state_entry.get_state(
self._storage_controllers.state, StateFilter.all()
)
with opentracing.start_active_span("get_joined_hosts"):
joined_hosts = (
await self._storage_controllers.state.get_joined_hosts(
event.room_id, state_entry
)
joined_hosts = await self.store.get_joined_hosts(
event.room_id, state, state_entry
)
# Note that the expiry times must be larger than the expiry time in
@@ -1861,6 +1876,28 @@ class EventCreationHandler:
403, "Redacting server ACL events is not permitted"
)
# Add a little safety stop-gap to prevent people from trying to
# redact MSC2716 related events when they're in a room version
# which does not support it yet. We allow people to use MSC2716
# events in existing room versions but only from the room
# creator since it does not require any changes to the auth
# rules and in effect, the redaction algorithm . In the
# supported room version, we add the `historical` power level to
# auth the MSC2716 related events and adjust the redaction
# algorthim to keep the `historical` field around (redacting an
# event should only strip fields which don't affect the
# structural protocol level).
is_msc2716_event = (
original_event.type == EventTypes.MSC2716_INSERTION
or original_event.type == EventTypes.MSC2716_BATCH
or original_event.type == EventTypes.MSC2716_MARKER
)
if not room_version_obj.msc2716_historical and is_msc2716_event:
raise AuthError(
403,
"Redacting MSC2716 events is not supported in this room version",
)
event_types = event_auth.auth_types_for_event(event.room_version, event)
prev_state_ids = await context.get_prev_state_ids(
StateFilter.from_types(event_types)
@@ -1898,12 +1935,58 @@ class EventCreationHandler:
if prev_state_ids:
raise AuthError(403, "Changing the room create event is forbidden")
if event.type == EventTypes.MSC2716_INSERTION:
room_version = await self.store.get_room_version_id(event.room_id)
room_version_obj = KNOWN_ROOM_VERSIONS[room_version]
create_event = await self.store.get_create_event_for_room(event.room_id)
if not room_version_obj.msc2175_implicit_room_creator:
room_creator = create_event.content.get(
EventContentFields.ROOM_CREATOR
)
else:
room_creator = create_event.sender
# Only check an insertion event if the room version
# supports it or the event is from the room creator.
if room_version_obj.msc2716_historical or (
self.config.experimental.msc2716_enabled
and event.sender == room_creator
):
next_batch_id = event.content.get(
EventContentFields.MSC2716_NEXT_BATCH_ID
)
conflicting_insertion_event_id = None
if next_batch_id:
conflicting_insertion_event_id = (
await self.store.get_insertion_event_id_by_batch_id(
event.room_id, next_batch_id
)
)
if conflicting_insertion_event_id is not None:
# The current insertion event that we're processing is invalid
# because an insertion event already exists in the room with the
# same next_batch_id. We can't allow multiple because the batch
# pointing will get weird, e.g. we can't determine which insertion
# event the batch event is pointing to.
raise SynapseError(
HTTPStatus.BAD_REQUEST,
"Another insertion event already exists with the same next_batch_id",
errcode=Codes.INVALID_PARAM,
)
# Mark any `m.historical` messages as backfilled so they don't appear
# in `/sync` and have the proper decrementing `stream_ordering` as we import
backfilled = False
if event.internal_metadata.is_historical():
backfilled = True
assert self._storage_controllers.persistence is not None
(
persisted_events,
max_stream_token,
) = await self._storage_controllers.persistence.persist_events(
events_and_context,
events_and_context, backfilled=backfilled
)
events_and_pos = []
@@ -1977,10 +2060,7 @@ class EventCreationHandler:
)
for room_id in room_ids:
async with self._worker_lock_handler.acquire_read_write_lock(
DELETE_ROOM_LOCK_NAME, room_id, write=False
):
dummy_event_sent = await self._send_dummy_event_for_room(room_id)
dummy_event_sent = await self._send_dummy_event_for_room(room_id)
if not dummy_event_sent:
# Did not find a valid user in the room, so remove from future attempts
+4 -19
View File
@@ -46,11 +46,6 @@ logger = logging.getLogger(__name__)
BACKFILL_BECAUSE_TOO_MANY_GAPS_THRESHOLD = 3
PURGE_HISTORY_LOCK_NAME = "purge_history_lock"
DELETE_ROOM_LOCK_NAME = "delete_room_lock"
@attr.s(slots=True, auto_attribs=True)
class PurgeStatus:
"""Object tracking the status of a purge request
@@ -147,7 +142,6 @@ class PaginationHandler:
self._server_name = hs.hostname
self._room_shutdown_handler = hs.get_room_shutdown_handler()
self._relations_handler = hs.get_relations_handler()
self._worker_locks = hs.get_worker_locks_handler()
self.pagination_lock = ReadWriteLock()
# IDs of rooms in which there currently an active purge *or delete* operation.
@@ -362,9 +356,7 @@ class PaginationHandler:
"""
self._purges_in_progress_by_room.add(room_id)
try:
async with self._worker_locks.acquire_read_write_lock(
PURGE_HISTORY_LOCK_NAME, room_id, write=True
):
async with self.pagination_lock.write(room_id):
await self._storage_controllers.purge_events.purge_history(
room_id, token, delete_local_events
)
@@ -420,10 +412,7 @@ class PaginationHandler:
room_id: room to be purged
force: set true to skip checking for joined users.
"""
async with self._worker_locks.acquire_multi_read_write_lock(
[(PURGE_HISTORY_LOCK_NAME, room_id), (DELETE_ROOM_LOCK_NAME, room_id)],
write=True,
):
async with self.pagination_lock.write(room_id):
# first check that we have no users in this room
if not force:
joined = await self.store.is_host_joined(room_id, self._server_name)
@@ -482,9 +471,7 @@ class PaginationHandler:
room_token = from_token.room_key
async with self._worker_locks.acquire_read_write_lock(
PURGE_HISTORY_LOCK_NAME, room_id, write=False
):
async with self.pagination_lock.read(room_id):
(membership, member_event_id) = (None, None)
if not use_admin_priviledge:
(
@@ -760,9 +747,7 @@ class PaginationHandler:
self._purges_in_progress_by_room.add(room_id)
try:
async with self._worker_locks.acquire_read_write_lock(
PURGE_HISTORY_LOCK_NAME, room_id, write=True
):
async with self.pagination_lock.write(room_id):
self._delete_by_id[delete_id].status = DeleteStatus.STATUS_SHUTTING_DOWN
self._delete_by_id[
delete_id
+13 -24
View File
@@ -95,12 +95,13 @@ bump_active_time_counter = Counter("synapse_handler_presence_bump_active_time",
get_updates_counter = Counter("synapse_handler_presence_get_updates", "", ["type"])
notify_reason_counter = Counter(
"synapse_handler_presence_notify_reason", "", ["locality", "reason"]
"synapse_handler_presence_notify_reason", "", ["reason"]
)
state_transition_counter = Counter(
"synapse_handler_presence_state_transition", "", ["locality", "from", "to"]
"synapse_handler_presence_state_transition", "", ["from", "to"]
)
# If a user was last active in the last LAST_ACTIVE_GRANULARITY, consider them
# "currently_active"
LAST_ACTIVE_GRANULARITY = 60 * 1000
@@ -566,8 +567,8 @@ class WorkerPresenceHandler(BasePresenceHandler):
for new_state in states:
old_state = self.user_to_current_state.get(new_state.user_id)
self.user_to_current_state[new_state.user_id] = new_state
is_mine = self.is_mine_id(new_state.user_id)
if not old_state or should_notify(old_state, new_state, is_mine):
if not old_state or should_notify(old_state, new_state):
state_to_notify.append(new_state)
stream_id = token
@@ -1498,31 +1499,23 @@ class PresenceHandler(BasePresenceHandler):
)
def should_notify(
old_state: UserPresenceState, new_state: UserPresenceState, is_mine: bool
) -> bool:
def should_notify(old_state: UserPresenceState, new_state: UserPresenceState) -> bool:
"""Decides if a presence state change should be sent to interested parties."""
user_location = "remote"
if is_mine:
user_location = "local"
if old_state == new_state:
return False
if old_state.status_msg != new_state.status_msg:
notify_reason_counter.labels(user_location, "status_msg_change").inc()
notify_reason_counter.labels("status_msg_change").inc()
return True
if old_state.state != new_state.state:
notify_reason_counter.labels(user_location, "state_change").inc()
state_transition_counter.labels(
user_location, old_state.state, new_state.state
).inc()
notify_reason_counter.labels("state_change").inc()
state_transition_counter.labels(old_state.state, new_state.state).inc()
return True
if old_state.state == PresenceState.ONLINE:
if new_state.currently_active != old_state.currently_active:
notify_reason_counter.labels(user_location, "current_active_change").inc()
notify_reason_counter.labels("current_active_change").inc()
return True
if (
@@ -1531,16 +1524,12 @@ def should_notify(
):
# Only notify about last active bumps if we're not currently active
if not new_state.currently_active:
notify_reason_counter.labels(
user_location, "last_active_change_online"
).inc()
notify_reason_counter.labels("last_active_change_online").inc()
return True
elif new_state.last_active_ts - old_state.last_active_ts > LAST_ACTIVE_GRANULARITY:
# Always notify for a transition where last active gets bumped.
notify_reason_counter.labels(
user_location, "last_active_change_not_online"
).inc()
notify_reason_counter.labels("last_active_change_not_online").inc()
return True
return False
@@ -2000,7 +1989,7 @@ def handle_update(
)
# Check whether the change was something worth notifying about
if should_notify(prev_state, new_state, is_mine):
if should_notify(prev_state, new_state):
new_state = new_state.copy_and_replace(last_federation_update_ts=now)
persist_and_notify = True
+2 -2
View File
@@ -68,7 +68,7 @@ class ProfileHandler:
if self.hs.is_mine(target_user):
profileinfo = await self.store.get_profileinfo(target_user)
if profileinfo.display_name is None and profileinfo.avatar_url is None:
if profileinfo.display_name is None:
raise SynapseError(404, "Profile was not found", Codes.NOT_FOUND)
return {
@@ -163,7 +163,7 @@ class ProfileHandler:
400, "Displayname is too long (max %i)" % (MAX_DISPLAYNAME_LEN,)
)
displayname_to_set: Optional[str] = new_displayname.strip()
displayname_to_set: Optional[str] = new_displayname
if new_displayname == "":
displayname_to_set = None
+7 -2
View File
@@ -143,10 +143,15 @@ class RegistrationHandler:
assigned_user_id: Optional[str] = None,
inhibit_user_in_use_error: bool = False,
) -> None:
if types.contains_invalid_mxid_characters(localpart):
if types.contains_invalid_mxid_characters(
localpart, self.hs.config.experimental.msc4009_e164_mxids
):
extra_chars = (
"=_-./+" if self.hs.config.experimental.msc4009_e164_mxids else "=_-./"
)
raise SynapseError(
400,
"User ID can only contain characters a-z, 0-9, or '=_-./+'",
f"User ID can only contain characters a-z, 0-9, or '{extra_chars}'",
Codes.INVALID_USERNAME,
)
+1 -1
View File
@@ -1116,7 +1116,7 @@ class RoomCreationHandler:
preset_config, config = self._room_preset_config(room_config)
# MSC2175 removes the creator field from the create event.
if not room_version.implicit_room_creator:
if not room_version.msc2175_implicit_room_creator:
creation_content["creator"] = creator_id
creation_event, unpersisted_creation_context = await create_event(
EventTypes.Create, creation_content, False
+466
View File
@@ -0,0 +1,466 @@
import logging
from typing import TYPE_CHECKING, List, Tuple
from synapse.api.constants import EventContentFields, EventTypes
from synapse.appservice import ApplicationService
from synapse.http.servlet import assert_params_in_dict
from synapse.types import JsonDict, Requester, UserID, create_requester
from synapse.util.stringutils import random_string
if TYPE_CHECKING:
from synapse.server import HomeServer
logger = logging.getLogger(__name__)
class RoomBatchHandler:
def __init__(self, hs: "HomeServer"):
self.hs = hs
self.store = hs.get_datastores().main
self._state_storage_controller = hs.get_storage_controllers().state
self.event_creation_handler = hs.get_event_creation_handler()
self.room_member_handler = hs.get_room_member_handler()
self.auth = hs.get_auth()
async def inherit_depth_from_prev_ids(self, prev_event_ids: List[str]) -> int:
"""Finds the depth which would sort it after the most-recent
prev_event_id but before the successors of those events. If no
successors are found, we assume it's an historical extremity part of the
current batch and use the same depth of the prev_event_ids.
Args:
prev_event_ids: List of prev event IDs
Returns:
Inherited depth
"""
(
most_recent_prev_event_id,
most_recent_prev_event_depth,
) = await self.store.get_max_depth_of(prev_event_ids)
# We want to insert the historical event after the `prev_event` but before the successor event
#
# We inherit depth from the successor event instead of the `prev_event`
# because events returned from `/messages` are first sorted by `topological_ordering`
# which is just the `depth` and then tie-break with `stream_ordering`.
#
# We mark these inserted historical events as "backfilled" which gives them a
# negative `stream_ordering`. If we use the same depth as the `prev_event`,
# then our historical event will tie-break and be sorted before the `prev_event`
# when it should come after.
#
# We want to use the successor event depth so they appear after `prev_event` because
# it has a larger `depth` but before the successor event because the `stream_ordering`
# is negative before the successor event.
assert most_recent_prev_event_id is not None
successor_event_ids = await self.store.get_successor_events(
most_recent_prev_event_id
)
# If we can't find any successor events, then it's a forward extremity of
# historical messages and we can just inherit from the previous historical
# event which we can already assume has the correct depth where we want
# to insert into.
if not successor_event_ids:
depth = most_recent_prev_event_depth
else:
(
_,
oldest_successor_depth,
) = await self.store.get_min_depth_of(successor_event_ids)
depth = oldest_successor_depth
return depth
def create_insertion_event_dict(
self, sender: str, room_id: str, origin_server_ts: int
) -> JsonDict:
"""Creates an event dict for an "insertion" event with the proper fields
and a random batch ID.
Args:
sender: The event author MXID
room_id: The room ID that the event belongs to
origin_server_ts: Timestamp when the event was sent
Returns:
The new event dictionary to insert.
"""
next_batch_id = random_string(8)
insertion_event = {
"type": EventTypes.MSC2716_INSERTION,
"sender": sender,
"room_id": room_id,
"content": {
EventContentFields.MSC2716_NEXT_BATCH_ID: next_batch_id,
EventContentFields.MSC2716_HISTORICAL: True,
},
"origin_server_ts": origin_server_ts,
}
return insertion_event
async def create_requester_for_user_id_from_app_service(
self, user_id: str, app_service: ApplicationService
) -> Requester:
"""Creates a new requester for the given user_id
and validates that the app service is allowed to control
the given user.
Args:
user_id: The author MXID that the app service is controlling
app_service: The app service that controls the user
Returns:
Requester object
"""
await self.auth.validate_appservice_can_control_user_id(app_service, user_id)
return create_requester(user_id, app_service=app_service)
async def get_most_recent_full_state_ids_from_event_id_list(
self, event_ids: List[str]
) -> List[str]:
"""Find the most recent event_id and grab the full state at that event.
We will use this as a base to auth our historical messages against.
Args:
event_ids: List of event ID's to look at
Returns:
List of event ID's
"""
(
most_recent_event_id,
_,
) = await self.store.get_max_depth_of(event_ids)
# mapping from (type, state_key) -> state_event_id
assert most_recent_event_id is not None
prev_state_map = await self._state_storage_controller.get_state_ids_for_event(
most_recent_event_id
)
# List of state event ID's
full_state_ids = list(prev_state_map.values())
return full_state_ids
async def persist_state_events_at_start(
self,
state_events_at_start: List[JsonDict],
room_id: str,
initial_state_event_ids: List[str],
app_service_requester: Requester,
) -> List[str]:
"""Takes all `state_events_at_start` event dictionaries and creates/persists
them in a floating state event chain which don't resolve into the current room
state. They are floating because they reference no prev_events which disconnects
them from the normal DAG.
Args:
state_events_at_start:
room_id: Room where you want the events persisted in.
initial_state_event_ids:
The base set of state for the historical batch which the floating
state chain will derive from. This should probably be the state
from the `prev_event` defined by `/batch_send?prev_event_id=$abc`.
app_service_requester: The requester of an application service.
Returns:
List of state event ID's we just persisted
"""
assert app_service_requester.app_service
state_event_ids_at_start = []
state_event_ids = initial_state_event_ids.copy()
# Make the state events float off on their own by specifying no
# prev_events for the first one in the chain so we don't have a bunch of
# `@mxid joined the room` noise between each batch.
prev_event_ids_for_state_chain: List[str] = []
for index, state_event in enumerate(state_events_at_start):
assert_params_in_dict(
state_event, ["type", "origin_server_ts", "content", "sender"]
)
logger.debug(
"RoomBatchSendEventRestServlet inserting state_event=%s", state_event
)
event_dict = {
"type": state_event["type"],
"origin_server_ts": state_event["origin_server_ts"],
"content": state_event["content"],
"room_id": room_id,
"sender": state_event["sender"],
"state_key": state_event["state_key"],
}
# Mark all events as historical
event_dict["content"][EventContentFields.MSC2716_HISTORICAL] = True
# TODO: This is pretty much the same as some other code to handle inserting state in this file
if event_dict["type"] == EventTypes.Member:
membership = event_dict["content"].get("membership", None)
event_id, _ = await self.room_member_handler.update_membership(
await self.create_requester_for_user_id_from_app_service(
state_event["sender"], app_service_requester.app_service
),
target=UserID.from_string(event_dict["state_key"]),
room_id=room_id,
action=membership,
content=event_dict["content"],
historical=True,
# Only the first event in the state chain should be floating.
# The rest should hang off each other in a chain.
allow_no_prev_events=index == 0,
prev_event_ids=prev_event_ids_for_state_chain,
# The first event in the state chain is floating with no
# `prev_events` which means it can't derive state from
# anywhere automatically. So we need to set some state
# explicitly.
#
# Make sure to use a copy of this list because we modify it
# later in the loop here. Otherwise it will be the same
# reference and also update in the event when we append
# later.
state_event_ids=state_event_ids.copy(),
)
else:
(
event,
_,
) = await self.event_creation_handler.create_and_send_nonmember_event(
await self.create_requester_for_user_id_from_app_service(
state_event["sender"], app_service_requester.app_service
),
event_dict,
historical=True,
# Only the first event in the state chain should be floating.
# The rest should hang off each other in a chain.
allow_no_prev_events=index == 0,
prev_event_ids=prev_event_ids_for_state_chain,
# The first event in the state chain is floating with no
# `prev_events` which means it can't derive state from
# anywhere automatically. So we need to set some state
# explicitly.
#
# Make sure to use a copy of this list because we modify it
# later in the loop here. Otherwise it will be the same
# reference and also update in the event when we append later.
state_event_ids=state_event_ids.copy(),
)
event_id = event.event_id
state_event_ids_at_start.append(event_id)
state_event_ids.append(event_id)
# Connect all the state in a floating chain
prev_event_ids_for_state_chain = [event_id]
return state_event_ids_at_start
async def persist_historical_events(
self,
events_to_create: List[JsonDict],
room_id: str,
inherited_depth: int,
initial_state_event_ids: List[str],
app_service_requester: Requester,
) -> List[str]:
"""Create and persists all events provided sequentially. Handles the
complexity of creating events in chronological order so they can
reference each other by prev_event but still persists in
reverse-chronoloical order so they have the correct
(topological_ordering, stream_ordering) and sort correctly from
/messages.
Args:
events_to_create: List of historical events to create in JSON
dictionary format.
room_id: Room where you want the events persisted in.
inherited_depth: The depth to create the events at (you will
probably by calling inherit_depth_from_prev_ids(...)).
initial_state_event_ids:
This is used to set explicit state for the insertion event at
the start of the historical batch since it's floating with no
prev_events to derive state from automatically.
app_service_requester: The requester of an application service.
Returns:
List of persisted event IDs
"""
assert app_service_requester.app_service
# We expect the first event in a historical batch to be an insertion event
assert events_to_create[0]["type"] == EventTypes.MSC2716_INSERTION
# We expect the last event in a historical batch to be an batch event
assert events_to_create[-1]["type"] == EventTypes.MSC2716_BATCH
# Make the historical event chain float off on its own by specifying no
# prev_events for the first event in the chain which causes the HS to
# ask for the state at the start of the batch later.
prev_event_ids: List[str] = []
event_ids = []
events_to_persist = []
for index, ev in enumerate(events_to_create):
assert_params_in_dict(ev, ["type", "origin_server_ts", "content", "sender"])
assert self.hs.is_mine_id(ev["sender"]), "User must be our own: %s" % (
ev["sender"],
)
event_dict = {
"type": ev["type"],
"origin_server_ts": ev["origin_server_ts"],
"content": ev["content"],
"room_id": room_id,
"sender": ev["sender"], # requester.user.to_string(),
"prev_events": prev_event_ids.copy(),
}
# Mark all events as historical
event_dict["content"][EventContentFields.MSC2716_HISTORICAL] = True
event, unpersisted_context = await self.event_creation_handler.create_event(
await self.create_requester_for_user_id_from_app_service(
ev["sender"], app_service_requester.app_service
),
event_dict,
# Only the first event (which is the insertion event) in the
# chain should be floating. The rest should hang off each other
# in a chain.
allow_no_prev_events=index == 0,
prev_event_ids=event_dict.get("prev_events"),
# Since the first event (which is the insertion event) in the
# chain is floating with no `prev_events`, it can't derive state
# from anywhere automatically. So we need to set some state
# explicitly.
state_event_ids=initial_state_event_ids if index == 0 else None,
historical=True,
depth=inherited_depth,
)
context = await unpersisted_context.persist(event)
assert context._state_group
# Normally this is done when persisting the event but we have to
# pre-emptively do it here because we create all the events first,
# then persist them in another pass below. And we want to share
# state_groups across the whole batch so this lookup needs to work
# for the next event in the batch in this loop.
await self.store.store_state_group_id_for_event_id(
event_id=event.event_id,
state_group_id=context._state_group,
)
logger.debug(
"RoomBatchSendEventRestServlet inserting event=%s, prev_event_ids=%s",
event,
prev_event_ids,
)
events_to_persist.append((event, context))
event_id = event.event_id
event_ids.append(event_id)
prev_event_ids = [event_id]
# Persist events in reverse-chronological order so they have the
# correct stream_ordering as they are backfilled (which decrements).
# Events are sorted by (topological_ordering, stream_ordering)
# where topological_ordering is just depth.
for event, context in reversed(events_to_persist):
# This call can't raise `PartialStateConflictError` since we forbid
# use of the historical batch API during partial state
await self.event_creation_handler.handle_new_client_event(
await self.create_requester_for_user_id_from_app_service(
event.sender, app_service_requester.app_service
),
events_and_context=[(event, context)],
)
return event_ids
async def handle_batch_of_events(
self,
events_to_create: List[JsonDict],
room_id: str,
batch_id_to_connect_to: str,
inherited_depth: int,
initial_state_event_ids: List[str],
app_service_requester: Requester,
) -> Tuple[List[str], str]:
"""
Handles creating and persisting all of the historical events as well as
insertion and batch meta events to make the batch navigable in the DAG.
Args:
events_to_create: List of historical events to create in JSON
dictionary format.
room_id: Room where you want the events created in.
batch_id_to_connect_to: The batch_id from the insertion event you
want this batch to connect to.
inherited_depth: The depth to create the events at (you will
probably by calling inherit_depth_from_prev_ids(...)).
initial_state_event_ids:
This is used to set explicit state for the insertion event at
the start of the historical batch since it's floating with no
prev_events to derive state from automatically. This should
probably be the state from the `prev_event` defined by
`/batch_send?prev_event_id=$abc` plus the outcome of
`persist_state_events_at_start`
app_service_requester: The requester of an application service.
Returns:
Tuple containing a list of created events and the next_batch_id
"""
# Connect this current batch to the insertion event from the previous batch
last_event_in_batch = events_to_create[-1]
batch_event = {
"type": EventTypes.MSC2716_BATCH,
"sender": app_service_requester.user.to_string(),
"room_id": room_id,
"content": {
EventContentFields.MSC2716_BATCH_ID: batch_id_to_connect_to,
EventContentFields.MSC2716_HISTORICAL: True,
},
# Since the batch event is put at the end of the batch,
# where the newest-in-time event is, copy the origin_server_ts from
# the last event we're inserting
"origin_server_ts": last_event_in_batch["origin_server_ts"],
}
# Add the batch event to the end of the batch (newest-in-time)
events_to_create.append(batch_event)
# Add an "insertion" event to the start of each batch (next to the oldest-in-time
# event in the batch) so the next batch can be connected to this one.
insertion_event = self.create_insertion_event_dict(
sender=app_service_requester.user.to_string(),
room_id=room_id,
# Since the insertion event is put at the start of the batch,
# where the oldest-in-time event is, copy the origin_server_ts from
# the first event we're inserting
origin_server_ts=events_to_create[0]["origin_server_ts"],
)
next_batch_id = insertion_event["content"][
EventContentFields.MSC2716_NEXT_BATCH_ID
]
# Prepend the insertion event to the start of the batch (oldest-in-time)
events_to_create = [insertion_event] + events_to_create
# Create and persist all of the historical events
event_ids = await self.persist_historical_events(
events_to_create=events_to_create,
room_id=room_id,
inherited_depth=inherited_depth,
initial_state_event_ids=initial_state_event_ids,
app_service_requester=app_service_requester,
)
return event_ids, next_batch_id
+89 -53
View File
@@ -39,7 +39,6 @@ from synapse.events import EventBase
from synapse.events.snapshot import EventContext
from synapse.handlers.profile import MAX_AVATAR_URL_LEN, MAX_DISPLAYNAME_LEN
from synapse.handlers.state_deltas import MatchChange, StateDeltasHandler
from synapse.handlers.worker_lock import DELETE_ROOM_LOCK_NAME
from synapse.logging import opentracing
from synapse.metrics import event_processing_positions
from synapse.metrics.background_process_metrics import run_as_background_process
@@ -95,7 +94,6 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
self.event_creation_handler = hs.get_event_creation_handler()
self.account_data_handler = hs.get_account_data_handler()
self.event_auth_handler = hs.get_event_auth_handler()
self._worker_lock_handler = hs.get_worker_locks_handler()
self.member_linearizer: Linearizer = Linearizer(name="member")
self.member_as_limiter = Linearizer(max_count=10, name="member_as_limiter")
@@ -176,6 +174,8 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
self.request_ratelimiter = hs.get_request_ratelimiter()
hs.get_notifier().add_new_join_in_room_callback(self._on_user_joined_room)
self._msc3970_enabled = hs.config.experimental.msc3970_enabled
def _on_user_joined_room(self, event_id: str, room_id: str) -> None:
"""Notify the rate limiter that a room join has occurred.
@@ -362,6 +362,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
content: Optional[dict] = None,
require_consent: bool = True,
outlier: bool = False,
historical: bool = False,
origin_server_ts: Optional[int] = None,
) -> Tuple[str, int]:
"""
@@ -377,13 +378,16 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
allow_no_prev_events: Whether to allow this event to be created an empty
list of prev_events. Normally this is prohibited just because most
events should have a prev_event and we should only use this in special
cases (previously useful for MSC2716).
cases like MSC2716.
prev_event_ids: The event IDs to use as the prev events
state_event_ids:
The full state at a given event. This was previously used particularly
by the MSC2716 /batch_send endpoint. This should normally be left as
None, which will cause the auth_event_ids to be calculated based on the
room state at the prev_events.
The full state at a given event. This is used particularly by the MSC2716
/batch_send endpoint. One use case is the historical `state_events_at_start`;
since each is marked as an `outlier`, the `EventContext.for_outlier()` won't
have any `state_ids` set and therefore can't derive any state even though the
prev_events are set so we need to set them ourself via this argument.
This should normally be left as None, which will cause the auth_event_ids
to be calculated based on the room state at the prev_events.
depth: Override the depth used to order the event in the DAG.
Should normally be set to None, which will cause the depth to be calculated
based on the prev_events.
@@ -396,6 +400,9 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
outlier: Indicates whether the event is an `outlier`, i.e. if
it's from an arbitrary point and floating in the DAG as
opposed to being inline with the current DAG.
historical: Indicates whether the message is being inserted
back in time around some existing events. This is used to skip
a few checks and mark the event as backfilled.
origin_server_ts: The origin_server_ts to use if a new event is created. Uses
the current timestamp if set to None.
@@ -416,11 +423,29 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
# do this check just before we persist an event as well, but may as well
# do it up front for efficiency.)
if txn_id:
existing_event_id = (
await self.event_creation_handler.get_event_id_from_transaction(
requester, txn_id, room_id
existing_event_id = None
if self._msc3970_enabled and requester.device_id:
# When MSC3970 is enabled, we lookup for events sent by the same device
# first, and fallback to the old behaviour if none were found.
existing_event_id = (
await self.store.get_event_id_from_transaction_id_and_device_id(
room_id,
requester.user.to_string(),
requester.device_id,
txn_id,
)
)
)
if requester.access_token_id and not existing_event_id:
existing_event_id = (
await self.store.get_event_id_from_transaction_id_and_token_id(
room_id,
requester.user.to_string(),
requester.access_token_id,
txn_id,
)
)
if existing_event_id:
event_pos = await self.store.get_position_for_event(existing_event_id)
return existing_event_id, event_pos.stream
@@ -452,10 +477,11 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
depth=depth,
require_consent=require_consent,
outlier=outlier,
historical=historical,
)
context = await unpersisted_context.persist(event)
prev_state_ids = await context.get_prev_state_ids(
StateFilter.from_types([(EventTypes.Member, user_id)])
StateFilter.from_types([(EventTypes.Member, None)])
)
prev_member_event_id = prev_state_ids.get(
@@ -559,6 +585,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
new_room: bool = False,
require_consent: bool = True,
outlier: bool = False,
historical: bool = False,
allow_no_prev_events: bool = False,
prev_event_ids: Optional[List[str]] = None,
state_event_ids: Optional[List[str]] = None,
@@ -583,16 +610,22 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
outlier: Indicates whether the event is an `outlier`, i.e. if
it's from an arbitrary point and floating in the DAG as
opposed to being inline with the current DAG.
historical: Indicates whether the message is being inserted
back in time around some existing events. This is used to skip
a few checks and mark the event as backfilled.
allow_no_prev_events: Whether to allow this event to be created an empty
list of prev_events. Normally this is prohibited just because most
events should have a prev_event and we should only use this in special
cases (previously useful for MSC2716).
cases like MSC2716.
prev_event_ids: The event IDs to use as the prev events
state_event_ids:
The full state at a given event. This was previously used particularly
by the MSC2716 /batch_send endpoint. This should normally be left as
None, which will cause the auth_event_ids to be calculated based on the
room state at the prev_events.
The full state at a given event. This is used particularly by the MSC2716
/batch_send endpoint. One use case is the historical `state_events_at_start`;
since each is marked as an `outlier`, the `EventContext.for_outlier()` won't
have any `state_ids` set and therefore can't derive any state even though the
prev_events are set so we need to set them ourself via this argument.
This should normally be left as None, which will cause the auth_event_ids
to be calculated based on the room state at the prev_events.
depth: Override the depth used to order the event in the DAG.
Should normally be set to None, which will cause the depth to be calculated
based on the prev_events.
@@ -620,29 +653,27 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
# by application services), and then by room ID.
async with self.member_as_limiter.queue(as_id):
async with self.member_linearizer.queue(key):
async with self._worker_lock_handler.acquire_read_write_lock(
DELETE_ROOM_LOCK_NAME, room_id, write=False
):
with opentracing.start_active_span("update_membership_locked"):
result = await self.update_membership_locked(
requester,
target,
room_id,
action,
txn_id=txn_id,
remote_room_hosts=remote_room_hosts,
third_party_signed=third_party_signed,
ratelimit=ratelimit,
content=content,
new_room=new_room,
require_consent=require_consent,
outlier=outlier,
allow_no_prev_events=allow_no_prev_events,
prev_event_ids=prev_event_ids,
state_event_ids=state_event_ids,
depth=depth,
origin_server_ts=origin_server_ts,
)
with opentracing.start_active_span("update_membership_locked"):
result = await self.update_membership_locked(
requester,
target,
room_id,
action,
txn_id=txn_id,
remote_room_hosts=remote_room_hosts,
third_party_signed=third_party_signed,
ratelimit=ratelimit,
content=content,
new_room=new_room,
require_consent=require_consent,
outlier=outlier,
historical=historical,
allow_no_prev_events=allow_no_prev_events,
prev_event_ids=prev_event_ids,
state_event_ids=state_event_ids,
depth=depth,
origin_server_ts=origin_server_ts,
)
return result
@@ -660,6 +691,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
new_room: bool = False,
require_consent: bool = True,
outlier: bool = False,
historical: bool = False,
allow_no_prev_events: bool = False,
prev_event_ids: Optional[List[str]] = None,
state_event_ids: Optional[List[str]] = None,
@@ -686,16 +718,22 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
outlier: Indicates whether the event is an `outlier`, i.e. if
it's from an arbitrary point and floating in the DAG as
opposed to being inline with the current DAG.
historical: Indicates whether the message is being inserted
back in time around some existing events. This is used to skip
a few checks and mark the event as backfilled.
allow_no_prev_events: Whether to allow this event to be created an empty
list of prev_events. Normally this is prohibited just because most
events should have a prev_event and we should only use this in special
cases (previously useful for MSC2716).
cases like MSC2716.
prev_event_ids: The event IDs to use as the prev events
state_event_ids:
The full state at a given event. This was previously used particularly
by the MSC2716 /batch_send endpoint. This should normally be left as
None, which will cause the auth_event_ids to be calculated based on the
room state at the prev_events.
The full state at a given event. This is used particularly by the MSC2716
/batch_send endpoint. One use case is the historical `state_events_at_start`;
since each is marked as an `outlier`, the `EventContext.for_outlier()` won't
have any `state_ids` set and therefore can't derive any state even though the
prev_events are set so we need to set them ourself via this argument.
This should normally be left as None, which will cause the auth_event_ids
to be calculated based on the room state at the prev_events.
depth: Override the depth used to order the event in the DAG.
Should normally be set to None, which will cause the depth to be calculated
based on the prev_events.
@@ -839,6 +877,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
content=content,
require_consent=require_consent,
outlier=outlier,
historical=historical,
origin_server_ts=origin_server_ts,
)
@@ -1325,7 +1364,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
requester = types.create_requester(target_user)
prev_state_ids = await context.get_prev_state_ids(
StateFilter.from_types([(EventTypes.GuestAccess, "")])
StateFilter.from_types([(EventTypes.GuestAccess, None)])
)
if event.membership == Membership.JOIN:
if requester.is_guest:
@@ -1347,14 +1386,11 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
ratelimit=ratelimit,
)
if event.membership == Membership.LEAVE:
prev_state_ids = await context.get_prev_state_ids(
StateFilter.from_types([(EventTypes.Member, event.state_key)])
)
prev_member_event_id = prev_state_ids.get(
(EventTypes.Member, event.state_key), None
)
prev_member_event_id = prev_state_ids.get(
(EventTypes.Member, event.state_key), None
)
if event.membership == Membership.LEAVE:
if prev_member_event_id:
prev_member_event = await self.store.get_event(prev_member_event_id)
if prev_member_event.membership == Membership.JOIN:
+2 -2
View File
@@ -564,9 +564,9 @@ class RoomSummaryHandler:
join_rule = join_rules_event.content.get("join_rule")
if (
join_rule == JoinRules.PUBLIC
or (room_version.knock_join_rule and join_rule == JoinRules.KNOCK)
or (room_version.msc2403_knocking and join_rule == JoinRules.KNOCK)
or (
room_version.knock_restricted_join_rule
room_version.msc3787_knock_restricted_join_rule
and join_rule == JoinRules.KNOCK_RESTRICTED
)
):
+2 -2
View File
@@ -27,9 +27,9 @@ from synapse.http.servlet import parse_string
from synapse.http.site import SynapseRequest
from synapse.module_api import ModuleApi
from synapse.types import (
MXID_LOCALPART_ALLOWED_CHARACTERS,
UserID,
map_username_to_mxid_localpart,
mxid_localpart_allowed_characters,
)
from synapse.util.iterutils import chunk_seq
@@ -371,7 +371,7 @@ class SamlHandler:
DOT_REPLACE_PATTERN = re.compile(
"[^%s]" % (re.escape("".join(MXID_LOCALPART_ALLOWED_CHARACTERS)),)
"[^%s]" % (re.escape("".join(mxid_localpart_allowed_characters)),)
)
+4 -2
View File
@@ -225,6 +225,8 @@ class SsoHandler:
self._consent_at_registration = hs.config.consent.user_consent_at_registration
self._e164_mxids = hs.config.experimental.msc4009_e164_mxids
def register_identity_provider(self, p: SsoIdentityProvider) -> None:
p_id = p.idp_id
assert p_id not in self._identity_providers
@@ -711,7 +713,7 @@ class SsoHandler:
# Since the localpart is provided via a potentially untrusted module,
# ensure the MXID is valid before registering.
if not attributes.localpart or contains_invalid_mxid_characters(
attributes.localpart
attributes.localpart, self._e164_mxids
):
raise MappingException("localpart is invalid: %s" % (attributes.localpart,))
@@ -944,7 +946,7 @@ class SsoHandler:
localpart,
)
if contains_invalid_mxid_characters(localpart):
if contains_invalid_mxid_characters(localpart, self._e164_mxids):
raise SynapseError(400, "localpart is invalid: %s" % (localpart,))
user_id = UserID(localpart, self._server_name).to_string()
user_infos = await self._store.get_users_by_id_case_insensitive(user_id)

Some files were not shown because too many files have changed in this diff Show More