From fbb2573525090f18b0f8aa10317a61e8cf819e8d Mon Sep 17 00:00:00 2001
From: "Olivier Wilkinson (reivilibre)"
Date: Tue, 16 Apr 2024 15:53:30 +0100
Subject: [PATCH 001/503] 1.105.0
---
CHANGES.md | 7 +++++++
debian/changelog | 6 ++++++
pyproject.toml | 2 +-
3 files changed, 14 insertions(+), 1 deletion(-)
diff --git a/CHANGES.md b/CHANGES.md
index 2edae64252..ed9cca73bc 100644
--- a/CHANGES.md
+++ b/CHANGES.md
@@ -1,3 +1,10 @@
+# Synapse 1.105.0 (2024-04-16)
+
+No significant changes since 1.105.0rc1.
+
+
+
+
# Synapse 1.105.0rc1 (2024-04-11)
### Features
diff --git a/debian/changelog b/debian/changelog
index 1c6a04dd84..49c9b3b497 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -1,3 +1,9 @@
+matrix-synapse-py3 (1.105.0) stable; urgency=medium
+
+ * New Synapse release 1.105.0.
+
+ -- Synapse Packaging team Tue, 16 Apr 2024 15:53:23 +0100
+
matrix-synapse-py3 (1.105.0~rc1) stable; urgency=medium
* New Synapse release 1.105.0rc1.
diff --git a/pyproject.toml b/pyproject.toml
index 226b591be4..f0f025645f 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -96,7 +96,7 @@ module-name = "synapse.synapse_rust"
[tool.poetry]
name = "matrix-synapse"
-version = "1.105.0rc1"
+version = "1.105.0"
description = "Homeserver for the Matrix decentralised comms protocol"
authors = ["Matrix.org Team and Contributors "]
license = "AGPL-3.0-or-later"
From f0d6f140479d24754993b7fcaeb33e07f26e1c88 Mon Sep 17 00:00:00 2001
From: Gordan Trevis
Date: Tue, 16 Apr 2024 21:12:36 +0200
Subject: [PATCH 002/503] Parse Integer negative value validation (#16920)
---
changelog.d/16920.bugfix | 1 +
synapse/http/servlet.py | 90 ++++++++++++++++------
synapse/rest/admin/federation.py | 38 ++-------
synapse/rest/admin/media.py | 54 ++-----------
synapse/rest/admin/statistics.py | 34 +-------
synapse/rest/admin/users.py | 18 +----
synapse/rest/client/room.py | 2 +-
synapse/rest/media/preview_url_resource.py | 5 +-
tests/rest/admin/test_media.py | 5 +-
9 files changed, 89 insertions(+), 158 deletions(-)
create mode 100644 changelog.d/16920.bugfix
diff --git a/changelog.d/16920.bugfix b/changelog.d/16920.bugfix
new file mode 100644
index 0000000000..460f4f7160
--- /dev/null
+++ b/changelog.d/16920.bugfix
@@ -0,0 +1 @@
+Adds validation to ensure that the `limit` parameter on `/publicRooms` is non-negative.
diff --git a/synapse/http/servlet.py b/synapse/http/servlet.py
index b73d06f1d3..0ca08038f4 100644
--- a/synapse/http/servlet.py
+++ b/synapse/http/servlet.py
@@ -19,7 +19,8 @@
#
#
-""" This module contains base REST classes for constructing REST servlets. """
+"""This module contains base REST classes for constructing REST servlets."""
+
import enum
import logging
from http import HTTPStatus
@@ -65,17 +66,49 @@ def parse_integer(request: Request, name: str, default: int) -> int: ...
@overload
-def parse_integer(request: Request, name: str, *, required: Literal[True]) -> int: ...
+def parse_integer(
+ request: Request, name: str, *, default: int, negative: bool
+) -> int: ...
@overload
def parse_integer(
- request: Request, name: str, default: Optional[int] = None, required: bool = False
+ request: Request, name: str, *, default: int, negative: bool = False
+) -> int: ...
+
+
+@overload
+def parse_integer(
+ request: Request, name: str, *, required: Literal[True], negative: bool = False
+) -> int: ...
+
+
+@overload
+def parse_integer(
+ request: Request, name: str, *, default: Literal[None], negative: bool = False
+) -> None: ...
+
+
+@overload
+def parse_integer(request: Request, name: str, *, negative: bool) -> Optional[int]: ...
+
+
+@overload
+def parse_integer(
+ request: Request,
+ name: str,
+ default: Optional[int] = None,
+ required: bool = False,
+ negative: bool = False,
) -> Optional[int]: ...
def parse_integer(
- request: Request, name: str, default: Optional[int] = None, required: bool = False
+ request: Request,
+ name: str,
+ default: Optional[int] = None,
+ required: bool = False,
+ negative: bool = False,
) -> Optional[int]:
"""Parse an integer parameter from the request string
@@ -85,16 +118,17 @@ def parse_integer(
default: value to use if the parameter is absent, defaults to None.
required: whether to raise a 400 SynapseError if the parameter is absent,
defaults to False.
-
+ negative: whether to allow negative integers, defaults to True.
Returns:
An int value or the default.
Raises:
- SynapseError: if the parameter is absent and required, or if the
- parameter is present and not an integer.
+ SynapseError: if the parameter is absent and required, if the
+ parameter is present and not an integer, or if the
+ parameter is illegitimate negative.
"""
args: Mapping[bytes, Sequence[bytes]] = request.args # type: ignore
- return parse_integer_from_args(args, name, default, required)
+ return parse_integer_from_args(args, name, default, required, negative)
@overload
@@ -120,6 +154,7 @@ def parse_integer_from_args(
name: str,
default: Optional[int] = None,
required: bool = False,
+ negative: bool = False,
) -> Optional[int]: ...
@@ -128,6 +163,7 @@ def parse_integer_from_args(
name: str,
default: Optional[int] = None,
required: bool = False,
+ negative: bool = True,
) -> Optional[int]:
"""Parse an integer parameter from the request string
@@ -137,33 +173,37 @@ def parse_integer_from_args(
default: value to use if the parameter is absent, defaults to None.
required: whether to raise a 400 SynapseError if the parameter is absent,
defaults to False.
+ negative: whether to allow negative integers, defaults to True.
Returns:
An int value or the default.
Raises:
- SynapseError: if the parameter is absent and required, or if the
- parameter is present and not an integer.
+ SynapseError: if the parameter is absent and required, if the
+ parameter is present and not an integer, or if the
+ parameter is illegitimate negative.
"""
name_bytes = name.encode("ascii")
- if name_bytes in args:
- try:
- return int(args[name_bytes][0])
- except Exception:
- message = "Query parameter %r must be an integer" % (name,)
- raise SynapseError(
- HTTPStatus.BAD_REQUEST, message, errcode=Codes.INVALID_PARAM
- )
- else:
- if required:
- message = "Missing integer query parameter %r" % (name,)
- raise SynapseError(
- HTTPStatus.BAD_REQUEST, message, errcode=Codes.MISSING_PARAM
- )
- else:
+ if name_bytes not in args:
+ if not required:
return default
+ message = f"Missing required integer query parameter {name}"
+ raise SynapseError(HTTPStatus.BAD_REQUEST, message, errcode=Codes.MISSING_PARAM)
+
+ try:
+ integer = int(args[name_bytes][0])
+ except Exception:
+ message = f"Query parameter {name} must be an integer"
+ raise SynapseError(HTTPStatus.BAD_REQUEST, message, errcode=Codes.INVALID_PARAM)
+
+ if not negative and integer < 0:
+ message = f"Query parameter {name} must be a positive integer."
+ raise SynapseError(HTTPStatus.BAD_REQUEST, message, errcode=Codes.INVALID_PARAM)
+
+ return integer
+
@overload
def parse_boolean(request: Request, name: str, default: bool) -> bool: ...
diff --git a/synapse/rest/admin/federation.py b/synapse/rest/admin/federation.py
index 045153e0cb..14ab4644cb 100644
--- a/synapse/rest/admin/federation.py
+++ b/synapse/rest/admin/federation.py
@@ -23,7 +23,7 @@ from http import HTTPStatus
from typing import TYPE_CHECKING, Tuple
from synapse.api.constants import Direction
-from synapse.api.errors import Codes, NotFoundError, SynapseError
+from synapse.api.errors import NotFoundError, SynapseError
from synapse.federation.transport.server import Authenticator
from synapse.http.servlet import RestServlet, parse_enum, parse_integer, parse_string
from synapse.http.site import SynapseRequest
@@ -61,22 +61,8 @@ class ListDestinationsRestServlet(RestServlet):
async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
await assert_requester_is_admin(self._auth, request)
- start = parse_integer(request, "from", default=0)
- limit = parse_integer(request, "limit", default=100)
-
- if start < 0:
- raise SynapseError(
- HTTPStatus.BAD_REQUEST,
- "Query parameter from must be a string representing a positive integer.",
- errcode=Codes.INVALID_PARAM,
- )
-
- if limit < 0:
- raise SynapseError(
- HTTPStatus.BAD_REQUEST,
- "Query parameter limit must be a string representing a positive integer.",
- errcode=Codes.INVALID_PARAM,
- )
+ start = parse_integer(request, "from", default=0, negative=False)
+ limit = parse_integer(request, "limit", default=100, negative=False)
destination = parse_string(request, "destination")
@@ -195,22 +181,8 @@ class DestinationMembershipRestServlet(RestServlet):
if not await self._store.is_destination_known(destination):
raise NotFoundError("Unknown destination")
- start = parse_integer(request, "from", default=0)
- limit = parse_integer(request, "limit", default=100)
-
- if start < 0:
- raise SynapseError(
- HTTPStatus.BAD_REQUEST,
- "Query parameter from must be a string representing a positive integer.",
- errcode=Codes.INVALID_PARAM,
- )
-
- if limit < 0:
- raise SynapseError(
- HTTPStatus.BAD_REQUEST,
- "Query parameter limit must be a string representing a positive integer.",
- errcode=Codes.INVALID_PARAM,
- )
+ start = parse_integer(request, "from", default=0, negative=False)
+ limit = parse_integer(request, "limit", default=100, negative=False)
direction = parse_enum(request, "dir", Direction, default=Direction.FORWARDS)
diff --git a/synapse/rest/admin/media.py b/synapse/rest/admin/media.py
index 27f0808658..a05b7252ec 100644
--- a/synapse/rest/admin/media.py
+++ b/synapse/rest/admin/media.py
@@ -311,29 +311,17 @@ class DeleteMediaByDateSize(RestServlet):
) -> Tuple[int, JsonDict]:
await assert_requester_is_admin(self.auth, request)
- before_ts = parse_integer(request, "before_ts", required=True)
- size_gt = parse_integer(request, "size_gt", default=0)
+ before_ts = parse_integer(request, "before_ts", required=True, negative=False)
+ size_gt = parse_integer(request, "size_gt", default=0, negative=False)
keep_profiles = parse_boolean(request, "keep_profiles", default=True)
- if before_ts < 0:
- raise SynapseError(
- HTTPStatus.BAD_REQUEST,
- "Query parameter before_ts must be a positive integer.",
- errcode=Codes.INVALID_PARAM,
- )
- elif before_ts < 30000000000: # Dec 1970 in milliseconds, Aug 2920 in seconds
+ if before_ts < 30000000000: # Dec 1970 in milliseconds, Aug 2920 in seconds
raise SynapseError(
HTTPStatus.BAD_REQUEST,
"Query parameter before_ts you provided is from the year 1970. "
+ "Double check that you are providing a timestamp in milliseconds.",
errcode=Codes.INVALID_PARAM,
)
- if size_gt < 0:
- raise SynapseError(
- HTTPStatus.BAD_REQUEST,
- "Query parameter size_gt must be a string representing a positive integer.",
- errcode=Codes.INVALID_PARAM,
- )
# This check is useless, we keep it for the legacy endpoint only.
if server_name is not None and self.server_name != server_name:
@@ -389,22 +377,8 @@ class UserMediaRestServlet(RestServlet):
if user is None:
raise NotFoundError("Unknown user")
- start = parse_integer(request, "from", default=0)
- limit = parse_integer(request, "limit", default=100)
-
- if start < 0:
- raise SynapseError(
- HTTPStatus.BAD_REQUEST,
- "Query parameter from must be a string representing a positive integer.",
- errcode=Codes.INVALID_PARAM,
- )
-
- if limit < 0:
- raise SynapseError(
- HTTPStatus.BAD_REQUEST,
- "Query parameter limit must be a string representing a positive integer.",
- errcode=Codes.INVALID_PARAM,
- )
+ start = parse_integer(request, "from", default=0, negative=False)
+ limit = parse_integer(request, "limit", default=100, negative=False)
# If neither `order_by` nor `dir` is set, set the default order
# to newest media is on top for backward compatibility.
@@ -447,22 +421,8 @@ class UserMediaRestServlet(RestServlet):
if user is None:
raise NotFoundError("Unknown user")
- start = parse_integer(request, "from", default=0)
- limit = parse_integer(request, "limit", default=100)
-
- if start < 0:
- raise SynapseError(
- HTTPStatus.BAD_REQUEST,
- "Query parameter from must be a string representing a positive integer.",
- errcode=Codes.INVALID_PARAM,
- )
-
- if limit < 0:
- raise SynapseError(
- HTTPStatus.BAD_REQUEST,
- "Query parameter limit must be a string representing a positive integer.",
- errcode=Codes.INVALID_PARAM,
- )
+ start = parse_integer(request, "from", default=0, negative=False)
+ limit = parse_integer(request, "limit", default=100, negative=False)
# If neither `order_by` nor `dir` is set, set the default order
# to newest media is on top for backward compatibility.
diff --git a/synapse/rest/admin/statistics.py b/synapse/rest/admin/statistics.py
index 832f20402e..dc27a41dd9 100644
--- a/synapse/rest/admin/statistics.py
+++ b/synapse/rest/admin/statistics.py
@@ -63,38 +63,12 @@ class UserMediaStatisticsRestServlet(RestServlet):
),
)
- start = parse_integer(request, "from", default=0)
- if start < 0:
- raise SynapseError(
- HTTPStatus.BAD_REQUEST,
- "Query parameter from must be a string representing a positive integer.",
- errcode=Codes.INVALID_PARAM,
- )
+ start = parse_integer(request, "from", default=0, negative=False)
+ limit = parse_integer(request, "limit", default=100, negative=False)
+ from_ts = parse_integer(request, "from_ts", default=0, negative=False)
+ until_ts = parse_integer(request, "until_ts", negative=False)
- limit = parse_integer(request, "limit", default=100)
- if limit < 0:
- raise SynapseError(
- HTTPStatus.BAD_REQUEST,
- "Query parameter limit must be a string representing a positive integer.",
- errcode=Codes.INVALID_PARAM,
- )
-
- from_ts = parse_integer(request, "from_ts", default=0)
- if from_ts < 0:
- raise SynapseError(
- HTTPStatus.BAD_REQUEST,
- "Query parameter from_ts must be a string representing a positive integer.",
- errcode=Codes.INVALID_PARAM,
- )
-
- until_ts = parse_integer(request, "until_ts")
if until_ts is not None:
- if until_ts < 0:
- raise SynapseError(
- HTTPStatus.BAD_REQUEST,
- "Query parameter until_ts must be a string representing a positive integer.",
- errcode=Codes.INVALID_PARAM,
- )
if until_ts <= from_ts:
raise SynapseError(
HTTPStatus.BAD_REQUEST,
diff --git a/synapse/rest/admin/users.py b/synapse/rest/admin/users.py
index 4e34e46512..5bf12c4979 100644
--- a/synapse/rest/admin/users.py
+++ b/synapse/rest/admin/users.py
@@ -90,22 +90,8 @@ class UsersRestServletV2(RestServlet):
async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
await assert_requester_is_admin(self.auth, request)
- start = parse_integer(request, "from", default=0)
- limit = parse_integer(request, "limit", default=100)
-
- if start < 0:
- raise SynapseError(
- HTTPStatus.BAD_REQUEST,
- "Query parameter from must be a string representing a positive integer.",
- errcode=Codes.INVALID_PARAM,
- )
-
- if limit < 0:
- raise SynapseError(
- HTTPStatus.BAD_REQUEST,
- "Query parameter limit must be a string representing a positive integer.",
- errcode=Codes.INVALID_PARAM,
- )
+ start = parse_integer(request, "from", default=0, negative=False)
+ limit = parse_integer(request, "limit", default=100, negative=False)
user_id = parse_string(request, "user_id")
name = parse_string(request, "name", encoding="utf-8")
diff --git a/synapse/rest/client/room.py b/synapse/rest/client/room.py
index 65dedb8b92..4eeadf8779 100644
--- a/synapse/rest/client/room.py
+++ b/synapse/rest/client/room.py
@@ -499,7 +499,7 @@ class PublicRoomListRestServlet(RestServlet):
if server:
raise e
- limit: Optional[int] = parse_integer(request, "limit", 0)
+ limit: Optional[int] = parse_integer(request, "limit", 0, negative=False)
since_token = parse_string(request, "since")
if limit == 0:
diff --git a/synapse/rest/media/preview_url_resource.py b/synapse/rest/media/preview_url_resource.py
index 6724986fcc..bfeff2179b 100644
--- a/synapse/rest/media/preview_url_resource.py
+++ b/synapse/rest/media/preview_url_resource.py
@@ -72,9 +72,6 @@ class PreviewUrlResource(RestServlet):
# XXX: if get_user_by_req fails, what should we do in an async render?
requester = await self.auth.get_user_by_req(request)
url = parse_string(request, "url", required=True)
- ts = parse_integer(request, "ts")
- if ts is None:
- ts = self.clock.time_msec()
-
+ ts = parse_integer(request, "ts", default=self.clock.time_msec())
og = await self.url_previewer.preview(url, requester.user, ts)
respond_with_json_bytes(request, 200, og, send_cors=True)
diff --git a/tests/rest/admin/test_media.py b/tests/rest/admin/test_media.py
index 493e1d1919..f378165513 100644
--- a/tests/rest/admin/test_media.py
+++ b/tests/rest/admin/test_media.py
@@ -277,7 +277,8 @@ class DeleteMediaByDateSizeTestCase(_AdminMediaTests):
self.assertEqual(400, channel.code, msg=channel.json_body)
self.assertEqual(Codes.MISSING_PARAM, channel.json_body["errcode"])
self.assertEqual(
- "Missing integer query parameter 'before_ts'", channel.json_body["error"]
+ "Missing required integer query parameter before_ts",
+ channel.json_body["error"],
)
def test_invalid_parameter(self) -> None:
@@ -320,7 +321,7 @@ class DeleteMediaByDateSizeTestCase(_AdminMediaTests):
self.assertEqual(400, channel.code, msg=channel.json_body)
self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"])
self.assertEqual(
- "Query parameter size_gt must be a string representing a positive integer.",
+ "Query parameter size_gt must be a positive integer.",
channel.json_body["error"],
)
From 28f5ad07d37a9f82c896fa1722d8c47980adc89e Mon Sep 17 00:00:00 2001
From: Quentin Gliech
Date: Wed, 17 Apr 2024 15:44:40 +0200
Subject: [PATCH 003/503] Bump minimum required Rust version to 1.66.0 (#17079)
---
.github/workflows/tests.yml | 18 +++++++++---------
changelog.d/17079.misc | 1 +
rust/Cargo.toml | 2 +-
3 files changed, 11 insertions(+), 10 deletions(-)
create mode 100644 changelog.d/17079.misc
diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml
index 295461aad6..20afe311fe 100644
--- a/.github/workflows/tests.yml
+++ b/.github/workflows/tests.yml
@@ -81,7 +81,7 @@ jobs:
steps:
- uses: actions/checkout@v4
- name: Install Rust
- uses: dtolnay/rust-toolchain@1.65.0
+ uses: dtolnay/rust-toolchain@1.66.0
- uses: Swatinem/rust-cache@v2
- uses: matrix-org/setup-python-poetry@v1
with:
@@ -148,7 +148,7 @@ jobs:
uses: actions/checkout@v4
- name: Install Rust
- uses: dtolnay/rust-toolchain@1.65.0
+ uses: dtolnay/rust-toolchain@1.66.0
- uses: Swatinem/rust-cache@v2
- name: Setup Poetry
@@ -208,7 +208,7 @@ jobs:
with:
ref: ${{ github.event.pull_request.head.sha }}
- name: Install Rust
- uses: dtolnay/rust-toolchain@1.65.0
+ uses: dtolnay/rust-toolchain@1.66.0
- uses: Swatinem/rust-cache@v2
- uses: matrix-org/setup-python-poetry@v1
with:
@@ -225,7 +225,7 @@ jobs:
- uses: actions/checkout@v4
- name: Install Rust
- uses: dtolnay/rust-toolchain@1.65.0
+ uses: dtolnay/rust-toolchain@1.66.0
with:
components: clippy
- uses: Swatinem/rust-cache@v2
@@ -344,7 +344,7 @@ jobs:
postgres:${{ matrix.job.postgres-version }}
- name: Install Rust
- uses: dtolnay/rust-toolchain@1.65.0
+ uses: dtolnay/rust-toolchain@1.66.0
- uses: Swatinem/rust-cache@v2
- uses: matrix-org/setup-python-poetry@v1
@@ -386,7 +386,7 @@ jobs:
- uses: actions/checkout@v4
- name: Install Rust
- uses: dtolnay/rust-toolchain@1.65.0
+ uses: dtolnay/rust-toolchain@1.66.0
- uses: Swatinem/rust-cache@v2
# There aren't wheels for some of the older deps, so we need to install
@@ -498,7 +498,7 @@ jobs:
run: cat sytest-blacklist .ci/worker-blacklist > synapse-blacklist-with-workers
- name: Install Rust
- uses: dtolnay/rust-toolchain@1.65.0
+ uses: dtolnay/rust-toolchain@1.66.0
- uses: Swatinem/rust-cache@v2
- name: Run SyTest
@@ -642,7 +642,7 @@ jobs:
path: synapse
- name: Install Rust
- uses: dtolnay/rust-toolchain@1.65.0
+ uses: dtolnay/rust-toolchain@1.66.0
- uses: Swatinem/rust-cache@v2
- name: Prepare Complement's Prerequisites
@@ -674,7 +674,7 @@ jobs:
- uses: actions/checkout@v4
- name: Install Rust
- uses: dtolnay/rust-toolchain@1.65.0
+ uses: dtolnay/rust-toolchain@1.66.0
- uses: Swatinem/rust-cache@v2
- run: cargo test
diff --git a/changelog.d/17079.misc b/changelog.d/17079.misc
new file mode 100644
index 0000000000..340e40d194
--- /dev/null
+++ b/changelog.d/17079.misc
@@ -0,0 +1 @@
+Bump minimum supported Rust version to 1.66.0.
diff --git a/rust/Cargo.toml b/rust/Cargo.toml
index d89def1843..ba293f8d4f 100644
--- a/rust/Cargo.toml
+++ b/rust/Cargo.toml
@@ -7,7 +7,7 @@ name = "synapse"
version = "0.1.0"
edition = "2021"
-rust-version = "1.65.0"
+rust-version = "1.66.0"
[lib]
name = "synapse"
From c8e0bed4269106c49ffd733eaad5cdb3576f55d2 Mon Sep 17 00:00:00 2001
From: Quentin Gliech
Date: Wed, 17 Apr 2024 16:47:35 +0200
Subject: [PATCH 004/503] Support for MSC4108 via delegation (#17086)
This adds support for MSC4108 via delegation, similar to what has been done for MSC3886
---------
Co-authored-by: Hugh Nimmo-Smith
---
changelog.d/17086.feature | 1 +
synapse/config/experimental.py | 11 +++++++++
synapse/http/server.py | 13 ++++++++++-
synapse/rest/client/rendezvous.py | 30 +++++++++++++++++++++---
synapse/rest/client/versions.py | 3 +++
tests/rest/client/test_rendezvous.py | 34 ++++++++++++++++++++++++----
6 files changed, 84 insertions(+), 8 deletions(-)
create mode 100644 changelog.d/17086.feature
diff --git a/changelog.d/17086.feature b/changelog.d/17086.feature
new file mode 100644
index 0000000000..08b407d316
--- /dev/null
+++ b/changelog.d/17086.feature
@@ -0,0 +1 @@
+Support delegating the rendezvous mechanism described MSC4108 to an external implementation.
diff --git a/synapse/config/experimental.py b/synapse/config/experimental.py
index fcc78d2d81..353ae23f91 100644
--- a/synapse/config/experimental.py
+++ b/synapse/config/experimental.py
@@ -411,3 +411,14 @@ class ExperimentalConfig(Config):
self.msc4069_profile_inhibit_propagation = experimental.get(
"msc4069_profile_inhibit_propagation", False
)
+
+ # MSC4108: Mechanism to allow OIDC sign in and E2EE set up via QR code
+ self.msc4108_delegation_endpoint: Optional[str] = experimental.get(
+ "msc4108_delegation_endpoint", None
+ )
+
+ if self.msc4108_delegation_endpoint is not None and not self.msc3861.enabled:
+ raise ConfigError(
+ "MSC4108 requires MSC3861 to be enabled",
+ ("experimental", "msc4108_delegation_endpoint"),
+ )
diff --git a/synapse/http/server.py b/synapse/http/server.py
index c76500e14f..45b2cbffcd 100644
--- a/synapse/http/server.py
+++ b/synapse/http/server.py
@@ -909,7 +909,18 @@ def set_cors_headers(request: "SynapseRequest") -> None:
request.setHeader(
b"Access-Control-Allow-Methods", b"GET, HEAD, POST, PUT, DELETE, OPTIONS"
)
- if request.experimental_cors_msc3886:
+ if request.path is not None and request.path.startswith(
+ b"/_matrix/client/unstable/org.matrix.msc4108/rendezvous"
+ ):
+ request.setHeader(
+ b"Access-Control-Allow-Headers",
+ b"Content-Type, If-Match, If-None-Match",
+ )
+ request.setHeader(
+ b"Access-Control-Expose-Headers",
+ b"Synapse-Trace-Id, Server, ETag",
+ )
+ elif request.experimental_cors_msc3886:
request.setHeader(
b"Access-Control-Allow-Headers",
b"X-Requested-With, Content-Type, Authorization, Date, If-Match, If-None-Match",
diff --git a/synapse/rest/client/rendezvous.py b/synapse/rest/client/rendezvous.py
index dee7c37ec5..ed06a29987 100644
--- a/synapse/rest/client/rendezvous.py
+++ b/synapse/rest/client/rendezvous.py
@@ -2,7 +2,7 @@
# This file is licensed under the Affero General Public License (AGPL) version 3.
#
# Copyright 2022 The Matrix.org Foundation C.I.C.
-# Copyright (C) 2023 New Vector, Ltd
+# Copyright (C) 2023-2024 New Vector, Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
@@ -34,7 +34,7 @@ if TYPE_CHECKING:
logger = logging.getLogger(__name__)
-class RendezvousServlet(RestServlet):
+class MSC3886RendezvousServlet(RestServlet):
"""
This is a placeholder implementation of [MSC3886](https://github.com/matrix-org/matrix-spec-proposals/pull/3886)
simple client rendezvous capability that is used by the "Sign in with QR" functionality.
@@ -76,6 +76,30 @@ class RendezvousServlet(RestServlet):
# PUT, GET and DELETE are not implemented as they should be fulfilled by the redirect target.
+class MSC4108DelegationRendezvousServlet(RestServlet):
+ PATTERNS = client_patterns(
+ "/org.matrix.msc4108/rendezvous$", releases=[], v1=False, unstable=True
+ )
+
+ def __init__(self, hs: "HomeServer"):
+ super().__init__()
+ redirection_target: Optional[str] = (
+ hs.config.experimental.msc4108_delegation_endpoint
+ )
+ assert (
+ redirection_target is not None
+ ), "Servlet is only registered if there is a delegation target"
+ self.endpoint = redirection_target.encode("utf-8")
+
+ async def on_POST(self, request: SynapseRequest) -> None:
+ respond_with_redirect(
+ request, self.endpoint, statusCode=TEMPORARY_REDIRECT, cors=True
+ )
+
+
def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
if hs.config.experimental.msc3886_endpoint is not None:
- RendezvousServlet(hs).register(http_server)
+ MSC3886RendezvousServlet(hs).register(http_server)
+
+ if hs.config.experimental.msc4108_delegation_endpoint is not None:
+ MSC4108DelegationRendezvousServlet(hs).register(http_server)
diff --git a/synapse/rest/client/versions.py b/synapse/rest/client/versions.py
index c46d4fe8cf..638d4c45ae 100644
--- a/synapse/rest/client/versions.py
+++ b/synapse/rest/client/versions.py
@@ -140,6 +140,9 @@ class VersionsRestServlet(RestServlet):
"org.matrix.msc4069": self.config.experimental.msc4069_profile_inhibit_propagation,
# Allows clients to handle push for encrypted events.
"org.matrix.msc4028": self.config.experimental.msc4028_push_encrypted_events,
+ # MSC4108: Mechanism to allow OIDC sign in and E2EE set up via QR code
+ "org.matrix.msc4108": self.config.experimental.msc4108_delegation_endpoint
+ is not None,
},
},
)
diff --git a/tests/rest/client/test_rendezvous.py b/tests/rest/client/test_rendezvous.py
index 294b39f179..c84704c090 100644
--- a/tests/rest/client/test_rendezvous.py
+++ b/tests/rest/client/test_rendezvous.py
@@ -27,8 +27,10 @@ from synapse.util import Clock
from tests import unittest
from tests.unittest import override_config
+from tests.utils import HAS_AUTHLIB
-endpoint = "/_matrix/client/unstable/org.matrix.msc3886/rendezvous"
+msc3886_endpoint = "/_matrix/client/unstable/org.matrix.msc3886/rendezvous"
+msc4108_endpoint = "/_matrix/client/unstable/org.matrix.msc4108/rendezvous"
class RendezvousServletTestCase(unittest.HomeserverTestCase):
@@ -41,11 +43,35 @@ class RendezvousServletTestCase(unittest.HomeserverTestCase):
return self.hs
def test_disabled(self) -> None:
- channel = self.make_request("POST", endpoint, {}, access_token=None)
+ channel = self.make_request("POST", msc3886_endpoint, {}, access_token=None)
+ self.assertEqual(channel.code, 404)
+ channel = self.make_request("POST", msc4108_endpoint, {}, access_token=None)
self.assertEqual(channel.code, 404)
@override_config({"experimental_features": {"msc3886_endpoint": "/asd"}})
- def test_redirect(self) -> None:
- channel = self.make_request("POST", endpoint, {}, access_token=None)
+ def test_msc3886_redirect(self) -> None:
+ channel = self.make_request("POST", msc3886_endpoint, {}, access_token=None)
self.assertEqual(channel.code, 307)
self.assertEqual(channel.headers.getRawHeaders("Location"), ["/asd"])
+
+ @unittest.skip_unless(HAS_AUTHLIB, "requires authlib")
+ @override_config(
+ {
+ "disable_registration": True,
+ "experimental_features": {
+ "msc4108_delegation_endpoint": "https://asd",
+ "msc3861": {
+ "enabled": True,
+ "issuer": "https://issuer",
+ "client_id": "client_id",
+ "client_auth_method": "client_secret_post",
+ "client_secret": "client_secret",
+ "admin_token": "admin_token_value",
+ },
+ },
+ }
+ )
+ def test_msc4108_delegation(self) -> None:
+ channel = self.make_request("POST", msc4108_endpoint, {}, access_token=None)
+ self.assertEqual(channel.code, 307)
+ self.assertEqual(channel.headers.getRawHeaders("Location"), ["https://asd"])
From 803f05f60caab050e68bfc022a6da3dac5a9a75f Mon Sep 17 00:00:00 2001
From: Erik Johnston
Date: Wed, 17 Apr 2024 16:08:40 +0100
Subject: [PATCH 005/503] Fix remote receipts for events we don't have (#17096)
Introduced in #17032
---
changelog.d/17096.misc | 1 +
synapse/storage/databases/main/receipts.py | 6 +++++-
2 files changed, 6 insertions(+), 1 deletion(-)
create mode 100644 changelog.d/17096.misc
diff --git a/changelog.d/17096.misc b/changelog.d/17096.misc
new file mode 100644
index 0000000000..b03f6f42e5
--- /dev/null
+++ b/changelog.d/17096.misc
@@ -0,0 +1 @@
+Use new receipts column to optimise receipt and push action SQL queries. Contributed by Nick @ Beeper (@fizzadar).
diff --git a/synapse/storage/databases/main/receipts.py b/synapse/storage/databases/main/receipts.py
index 9660fc4699..13387a3839 100644
--- a/synapse/storage/databases/main/receipts.py
+++ b/synapse/storage/databases/main/receipts.py
@@ -734,9 +734,13 @@ class ReceiptsWorkerStore(SQLBaseStore):
thread_clause = "r.thread_id = ?"
thread_args = (thread_id,)
+ # If the receipt doesn't have a stream ordering it is because we
+ # don't have the associated event, and so must be a remote receipt.
+ # Hence it's safe to just allow new receipts to clobber it.
sql = f"""
SELECT r.event_stream_ordering, r.event_id FROM receipts_linearized AS r
- WHERE r.room_id = ? AND r.receipt_type = ? AND r.user_id = ? AND {thread_clause}
+ WHERE r.room_id = ? AND r.receipt_type = ? AND r.user_id = ?
+ AND r.event_stream_ordering IS NOT NULL AND {thread_clause}
"""
txn.execute(
sql,
From 09f0957b36cf1b4e9a89f5594df51a853d0dfffe Mon Sep 17 00:00:00 2001
From: Quentin Gliech
Date: Thu, 18 Apr 2024 12:20:30 +0200
Subject: [PATCH 006/503] Helpers to transform Twisted requests to Rust http
Requests/Responses (#17081)
This adds functions to transform a Twisted request to the
`http::Request`, and then to send back an `http::Response` through it.
It also imports the SynapseError exception so that we can throw that
from Rust code directly
Example usage of this would be:
```rust
use crate::http::{http_request_from_twisted, http_response_to_twisted, HeaderMapPyExt};
fn handler(twisted_request: &PyAny) -> PyResult<()> {
let request = http_request_from_twisted(twisted_request)?;
let ua: headers::UserAgent = request.headers().typed_get_required()?;
if whatever {
return Err((crate::errors::SynapseError::new(
StatusCode::UNAUTHORIZED,
"Whatever".to_owned
"M_UNAUTHORIZED",
None,
None,
)));
}
let response = Response::new("hello".as_bytes());
http_response_to_twisted(twisted_request, response)?;
Ok(())
}
```
---
Cargo.lock | 92 ++++++++++++++++++++++-
changelog.d/17081.misc | 1 +
rust/Cargo.toml | 3 +
rust/src/errors.rs | 60 +++++++++++++++
rust/src/http.rs | 165 +++++++++++++++++++++++++++++++++++++++++
rust/src/lib.rs | 2 +
6 files changed, 321 insertions(+), 2 deletions(-)
create mode 100644 changelog.d/17081.misc
create mode 100644 rust/src/errors.rs
create mode 100644 rust/src/http.rs
diff --git a/Cargo.lock b/Cargo.lock
index 630d38c2f4..65f4807c65 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -29,6 +29,12 @@ version = "1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa"
+[[package]]
+name = "base64"
+version = "0.21.7"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9d297deb1925b89f2ccc13d7635fa0714f12c87adce1c75356b39ca9b7178567"
+
[[package]]
name = "bitflags"
version = "1.3.2"
@@ -53,12 +59,27 @@ dependencies = [
"generic-array",
]
+[[package]]
+name = "bytes"
+version = "1.6.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "514de17de45fdb8dc022b1a7975556c53c86f9f0aa5f534b98977b171857c2c9"
+
[[package]]
name = "cfg-if"
version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
+[[package]]
+name = "cpufeatures"
+version = "0.2.12"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "53fe5e26ff1b7aef8bca9c6080520cfb8d9333c7568e1829cef191a9723e5504"
+dependencies = [
+ "libc",
+]
+
[[package]]
name = "crypto-common"
version = "0.1.6"
@@ -80,6 +101,12 @@ dependencies = [
"subtle",
]
+[[package]]
+name = "fnv"
+version = "1.0.7"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1"
+
[[package]]
name = "generic-array"
version = "0.14.6"
@@ -90,6 +117,30 @@ dependencies = [
"version_check",
]
+[[package]]
+name = "headers"
+version = "0.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "322106e6bd0cba2d5ead589ddb8150a13d7c4217cf80d7c4f682ca994ccc6aa9"
+dependencies = [
+ "base64",
+ "bytes",
+ "headers-core",
+ "http",
+ "httpdate",
+ "mime",
+ "sha1",
+]
+
+[[package]]
+name = "headers-core"
+version = "0.3.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "54b4a22553d4242c49fddb9ba998a99962b5cc6f22cb5a3482bec22522403ce4"
+dependencies = [
+ "http",
+]
+
[[package]]
name = "heck"
version = "0.4.1"
@@ -102,6 +153,23 @@ version = "0.4.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70"
+[[package]]
+name = "http"
+version = "1.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "21b9ddb458710bc376481b842f5da65cdf31522de232c1ca8146abce2a358258"
+dependencies = [
+ "bytes",
+ "fnv",
+ "itoa",
+]
+
+[[package]]
+name = "httpdate"
+version = "1.0.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9"
+
[[package]]
name = "indoc"
version = "2.0.4"
@@ -122,9 +190,9 @@ checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646"
[[package]]
name = "libc"
-version = "0.2.135"
+version = "0.2.153"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "68783febc7782c6c5cb401fbda4de5a9898be1762314da0bb2c10ced61f18b0c"
+checksum = "9c198f91728a82281a64e1f4f9eeb25d82cb32a5de251c6bd1b5154d63a8e7bd"
[[package]]
name = "lock_api"
@@ -157,6 +225,12 @@ dependencies = [
"autocfg",
]
+[[package]]
+name = "mime"
+version = "0.3.17"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a"
+
[[package]]
name = "once_cell"
version = "1.15.0"
@@ -376,6 +450,17 @@ dependencies = [
"serde",
]
+[[package]]
+name = "sha1"
+version = "0.10.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f04293dc80c3993519f2d7f6f511707ee7094fe0c6d3406feb330cdb3540eba3"
+dependencies = [
+ "cfg-if",
+ "cpufeatures",
+ "digest",
+]
+
[[package]]
name = "smallvec"
version = "1.10.0"
@@ -405,7 +490,10 @@ version = "0.1.0"
dependencies = [
"anyhow",
"blake2",
+ "bytes",
+ "headers",
"hex",
+ "http",
"lazy_static",
"log",
"pyo3",
diff --git a/changelog.d/17081.misc b/changelog.d/17081.misc
new file mode 100644
index 0000000000..d1ab69126c
--- /dev/null
+++ b/changelog.d/17081.misc
@@ -0,0 +1 @@
+Add helpers to transform Twisted requests to Rust http Requests/Responses.
diff --git a/rust/Cargo.toml b/rust/Cargo.toml
index ba293f8d4f..9ac766182b 100644
--- a/rust/Cargo.toml
+++ b/rust/Cargo.toml
@@ -23,6 +23,9 @@ name = "synapse.synapse_rust"
[dependencies]
anyhow = "1.0.63"
+bytes = "1.6.0"
+headers = "0.4.0"
+http = "1.1.0"
lazy_static = "1.4.0"
log = "0.4.17"
pyo3 = { version = "0.20.0", features = [
diff --git a/rust/src/errors.rs b/rust/src/errors.rs
new file mode 100644
index 0000000000..4e580e3e8c
--- /dev/null
+++ b/rust/src/errors.rs
@@ -0,0 +1,60 @@
+/*
+ * This file is licensed under the Affero General Public License (AGPL) version 3.
+ *
+ * Copyright (C) 2024 New Vector, Ltd
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License as
+ * published by the Free Software Foundation, either version 3 of the
+ * License, or (at your option) any later version.
+ *
+ * See the GNU Affero General Public License for more details:
+ * .
+ */
+
+#![allow(clippy::new_ret_no_self)]
+
+use std::collections::HashMap;
+
+use http::{HeaderMap, StatusCode};
+use pyo3::{exceptions::PyValueError, import_exception};
+
+import_exception!(synapse.api.errors, SynapseError);
+
+impl SynapseError {
+ pub fn new(
+ code: StatusCode,
+ message: String,
+ errcode: &'static str,
+ additional_fields: Option>,
+ headers: Option,
+ ) -> pyo3::PyErr {
+ // Transform the HeaderMap into a HashMap
+ let headers = if let Some(headers) = headers {
+ let mut map = HashMap::with_capacity(headers.len());
+ for (key, value) in headers.iter() {
+ let Ok(value) = value.to_str() else {
+ // This should never happen, but we don't want to panic in case it does
+ return PyValueError::new_err(
+ "Could not construct SynapseError: header value is not valid ASCII",
+ );
+ };
+
+ map.insert(key.as_str().to_owned(), value.to_owned());
+ }
+ Some(map)
+ } else {
+ None
+ };
+
+ SynapseError::new_err((code.as_u16(), message, errcode, additional_fields, headers))
+ }
+}
+
+import_exception!(synapse.api.errors, NotFoundError);
+
+impl NotFoundError {
+ pub fn new() -> pyo3::PyErr {
+ NotFoundError::new_err(())
+ }
+}
diff --git a/rust/src/http.rs b/rust/src/http.rs
new file mode 100644
index 0000000000..74098f4c8b
--- /dev/null
+++ b/rust/src/http.rs
@@ -0,0 +1,165 @@
+/*
+ * This file is licensed under the Affero General Public License (AGPL) version 3.
+ *
+ * Copyright (C) 2024 New Vector, Ltd
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License as
+ * published by the Free Software Foundation, either version 3 of the
+ * License, or (at your option) any later version.
+ *
+ * See the GNU Affero General Public License for more details:
+ * .
+ */
+
+use bytes::{Buf, BufMut, Bytes, BytesMut};
+use headers::{Header, HeaderMapExt};
+use http::{HeaderName, HeaderValue, Method, Request, Response, StatusCode, Uri};
+use pyo3::{
+ exceptions::PyValueError,
+ types::{PyBytes, PySequence, PyTuple},
+ PyAny, PyResult,
+};
+
+use crate::errors::SynapseError;
+
+/// Read a file-like Python object by chunks
+///
+/// # Errors
+///
+/// Returns an error if calling the `read` on the Python object failed
+fn read_io_body(body: &PyAny, chunk_size: usize) -> PyResult {
+ let mut buf = BytesMut::new();
+ loop {
+ let bytes: &PyBytes = body.call_method1("read", (chunk_size,))?.downcast()?;
+ if bytes.as_bytes().is_empty() {
+ return Ok(buf.into());
+ }
+ buf.put(bytes.as_bytes());
+ }
+}
+
+/// Transform a Twisted `IRequest` to an [`http::Request`]
+///
+/// It uses the following members of `IRequest`:
+/// - `content`, which is expected to be a file-like object with a `read` method
+/// - `uri`, which is expected to be a valid URI as `bytes`
+/// - `method`, which is expected to be a valid HTTP method as `bytes`
+/// - `requestHeaders`, which is expected to have a `getAllRawHeaders` method
+///
+/// # Errors
+///
+/// Returns an error if the Python object doesn't properly implement `IRequest`
+pub fn http_request_from_twisted(request: &PyAny) -> PyResult> {
+ let content = request.getattr("content")?;
+ let body = read_io_body(content, 4096)?;
+
+ let mut req = Request::new(body);
+
+ let uri: &PyBytes = request.getattr("uri")?.downcast()?;
+ *req.uri_mut() =
+ Uri::try_from(uri.as_bytes()).map_err(|_| PyValueError::new_err("invalid uri"))?;
+
+ let method: &PyBytes = request.getattr("method")?.downcast()?;
+ *req.method_mut() = Method::from_bytes(method.as_bytes())
+ .map_err(|_| PyValueError::new_err("invalid method"))?;
+
+ let headers_iter = request
+ .getattr("requestHeaders")?
+ .call_method0("getAllRawHeaders")?
+ .iter()?;
+
+ for header in headers_iter {
+ let header = header?;
+ let header: &PyTuple = header.downcast()?;
+ let name: &PyBytes = header.get_item(0)?.downcast()?;
+ let name = HeaderName::from_bytes(name.as_bytes())
+ .map_err(|_| PyValueError::new_err("invalid header name"))?;
+
+ let values: &PySequence = header.get_item(1)?.downcast()?;
+ for index in 0..values.len()? {
+ let value: &PyBytes = values.get_item(index)?.downcast()?;
+ let value = HeaderValue::from_bytes(value.as_bytes())
+ .map_err(|_| PyValueError::new_err("invalid header value"))?;
+ req.headers_mut().append(name.clone(), value);
+ }
+ }
+
+ Ok(req)
+}
+
+/// Send an [`http::Response`] through a Twisted `IRequest`
+///
+/// It uses the following members of `IRequest`:
+///
+/// - `responseHeaders`, which is expected to have a `addRawHeader(bytes, bytes)` method
+/// - `setResponseCode(int)` method
+/// - `write(bytes)` method
+/// - `finish()` method
+///
+/// # Errors
+///
+/// Returns an error if the Python object doesn't properly implement `IRequest`
+pub fn http_response_to_twisted(request: &PyAny, response: Response) -> PyResult<()>
+where
+ B: Buf,
+{
+ let (parts, mut body) = response.into_parts();
+
+ request.call_method1("setResponseCode", (parts.status.as_u16(),))?;
+
+ let response_headers = request.getattr("responseHeaders")?;
+ for (name, value) in parts.headers.iter() {
+ response_headers.call_method1("addRawHeader", (name.as_str(), value.as_bytes()))?;
+ }
+
+ while body.remaining() != 0 {
+ let chunk = body.chunk();
+ request.call_method1("write", (chunk,))?;
+ body.advance(chunk.len());
+ }
+
+ request.call_method0("finish")?;
+
+ Ok(())
+}
+
+/// An extension trait for [`HeaderMap`] that provides typed access to headers, and throws the
+/// right python exceptions when the header is missing or fails to parse.
+///
+/// [`HeaderMap`]: headers::HeaderMap
+pub trait HeaderMapPyExt: HeaderMapExt {
+ /// Get a header from the map, returning an error if it is missing or invalid.
+ fn typed_get_required(&self) -> PyResult
+ where
+ H: Header,
+ {
+ self.typed_get_optional::()?.ok_or_else(|| {
+ SynapseError::new(
+ StatusCode::BAD_REQUEST,
+ format!("Missing required header: {}", H::name()),
+ "M_MISSING_PARAM",
+ None,
+ None,
+ )
+ })
+ }
+
+ /// Get a header from the map, returning `None` if it is missing and an error if it is invalid.
+ fn typed_get_optional(&self) -> PyResult>
+ where
+ H: Header,
+ {
+ self.typed_try_get::().map_err(|_| {
+ SynapseError::new(
+ StatusCode::BAD_REQUEST,
+ format!("Invalid header: {}", H::name()),
+ "M_INVALID_PARAM",
+ None,
+ None,
+ )
+ })
+ }
+}
+
+impl HeaderMapPyExt for T {}
diff --git a/rust/src/lib.rs b/rust/src/lib.rs
index 7b3b579e55..36a3d64528 100644
--- a/rust/src/lib.rs
+++ b/rust/src/lib.rs
@@ -3,7 +3,9 @@ use pyo3::prelude::*;
use pyo3_log::ResetHandle;
pub mod acl;
+pub mod errors;
pub mod events;
+pub mod http;
pub mod push;
lazy_static! {
From 1d4753231021cfb3cb8a2af7e4fdef543559851a Mon Sep 17 00:00:00 2001
From: Gordan Trevis
Date: Thu, 18 Apr 2024 14:57:38 +0200
Subject: [PATCH 007/503] Parse json validation (#16923)
Co-authored-by: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com>
---
changelog.d/16923.bugfix | 1 +
synapse/http/servlet.py | 82 +++++++++++++++++++++++++++++++++
synapse/rest/admin/rooms.py | 36 +++++----------
synapse/rest/client/room.py | 35 +++++---------
tests/rest/admin/test_room.py | 61 ++++++++++++++++++++++++
tests/rest/client/test_rooms.py | 52 +++++++++++++++++++++
6 files changed, 220 insertions(+), 47 deletions(-)
create mode 100644 changelog.d/16923.bugfix
diff --git a/changelog.d/16923.bugfix b/changelog.d/16923.bugfix
new file mode 100644
index 0000000000..bd6f24925e
--- /dev/null
+++ b/changelog.d/16923.bugfix
@@ -0,0 +1 @@
+Return `400 M_NOT_JSON` upon receiving invalid JSON in query parameters across various client and admin endpoints, rather than an internal server error.
\ No newline at end of file
diff --git a/synapse/http/servlet.py b/synapse/http/servlet.py
index 0ca08038f4..ab12951da8 100644
--- a/synapse/http/servlet.py
+++ b/synapse/http/servlet.py
@@ -23,6 +23,7 @@
import enum
import logging
+import urllib.parse as urlparse
from http import HTTPStatus
from typing import (
TYPE_CHECKING,
@@ -450,6 +451,87 @@ def parse_string(
)
+def parse_json(
+ request: Request,
+ name: str,
+ default: Optional[dict] = None,
+ required: bool = False,
+ encoding: str = "ascii",
+) -> Optional[JsonDict]:
+ """
+ Parse a JSON parameter from the request query string.
+
+ Args:
+ request: the twisted HTTP request.
+ name: the name of the query parameter.
+ default: value to use if the parameter is absent,
+ defaults to None.
+ required: whether to raise a 400 SynapseError if the
+ parameter is absent, defaults to False.
+ encoding: The encoding to decode the string content with.
+
+ Returns:
+ A JSON value, or `default` if the named query parameter was not found
+ and `required` was False.
+
+ Raises:
+ SynapseError if the parameter is absent and required, or if the
+ parameter is present and not a JSON object.
+ """
+ args: Mapping[bytes, Sequence[bytes]] = request.args # type: ignore
+ return parse_json_from_args(
+ args,
+ name,
+ default,
+ required=required,
+ encoding=encoding,
+ )
+
+
+def parse_json_from_args(
+ args: Mapping[bytes, Sequence[bytes]],
+ name: str,
+ default: Optional[dict] = None,
+ required: bool = False,
+ encoding: str = "ascii",
+) -> Optional[JsonDict]:
+ """
+ Parse a JSON parameter from the request query string.
+
+ Args:
+ args: a mapping of request args as bytes to a list of bytes (e.g. request.args).
+ name: the name of the query parameter.
+ default: value to use if the parameter is absent,
+ defaults to None.
+ required: whether to raise a 400 SynapseError if the
+ parameter is absent, defaults to False.
+ encoding: the encoding to decode the string content with.
+
+ A JSON value, or `default` if the named query parameter was not found
+ and `required` was False.
+
+ Raises:
+ SynapseError if the parameter is absent and required, or if the
+ parameter is present and not a JSON object.
+ """
+ name_bytes = name.encode("ascii")
+
+ if name_bytes not in args:
+ if not required:
+ return default
+
+ message = f"Missing required integer query parameter {name}"
+ raise SynapseError(HTTPStatus.BAD_REQUEST, message, errcode=Codes.MISSING_PARAM)
+
+ json_str = parse_string_from_args(args, name, required=True, encoding=encoding)
+
+ try:
+ return json_decoder.decode(urlparse.unquote(json_str))
+ except Exception:
+ message = f"Query parameter {name} must be a valid JSON object"
+ raise SynapseError(HTTPStatus.BAD_REQUEST, message, errcode=Codes.NOT_JSON)
+
+
EnumT = TypeVar("EnumT", bound=enum.Enum)
diff --git a/synapse/rest/admin/rooms.py b/synapse/rest/admin/rooms.py
index 4252f98a6c..0d86a4e15f 100644
--- a/synapse/rest/admin/rooms.py
+++ b/synapse/rest/admin/rooms.py
@@ -21,7 +21,6 @@
import logging
from http import HTTPStatus
from typing import TYPE_CHECKING, List, Optional, Tuple, cast
-from urllib import parse as urlparse
import attr
@@ -38,6 +37,7 @@ from synapse.http.servlet import (
assert_params_in_dict,
parse_enum,
parse_integer,
+ parse_json,
parse_json_object_from_request,
parse_string,
)
@@ -51,7 +51,6 @@ from synapse.storage.databases.main.room import RoomSortOrder
from synapse.streams.config import PaginationConfig
from synapse.types import JsonDict, RoomID, ScheduledTask, UserID, create_requester
from synapse.types.state import StateFilter
-from synapse.util import json_decoder
if TYPE_CHECKING:
from synapse.api.auth import Auth
@@ -776,14 +775,8 @@ class RoomEventContextServlet(RestServlet):
limit = parse_integer(request, "limit", default=10)
# picking the API shape for symmetry with /messages
- filter_str = parse_string(request, "filter", encoding="utf-8")
- if filter_str:
- filter_json = urlparse.unquote(filter_str)
- event_filter: Optional[Filter] = Filter(
- self._hs, json_decoder.decode(filter_json)
- )
- else:
- event_filter = None
+ filter_json = parse_json(request, "filter", encoding="utf-8")
+ event_filter = Filter(self._hs, filter_json) if filter_json else None
event_context = await self.room_context_handler.get_event_context(
requester,
@@ -914,21 +907,16 @@ class RoomMessagesRestServlet(RestServlet):
)
# Twisted will have processed the args by now.
assert request.args is not None
+
+ filter_json = parse_json(request, "filter", encoding="utf-8")
+ event_filter = Filter(self._hs, filter_json) if filter_json else None
+
as_client_event = b"raw" not in request.args
- filter_str = parse_string(request, "filter", encoding="utf-8")
- if filter_str:
- filter_json = urlparse.unquote(filter_str)
- event_filter: Optional[Filter] = Filter(
- self._hs, json_decoder.decode(filter_json)
- )
- if (
- event_filter
- and event_filter.filter_json.get("event_format", "client")
- == "federation"
- ):
- as_client_event = False
- else:
- event_filter = None
+ if (
+ event_filter
+ and event_filter.filter_json.get("event_format", "client") == "federation"
+ ):
+ as_client_event = False
msgs = await self._pagination_handler.get_messages(
room_id=room_id,
diff --git a/synapse/rest/client/room.py b/synapse/rest/client/room.py
index 4eeadf8779..e4c7dd1a58 100644
--- a/synapse/rest/client/room.py
+++ b/synapse/rest/client/room.py
@@ -52,6 +52,7 @@ from synapse.http.servlet import (
parse_boolean,
parse_enum,
parse_integer,
+ parse_json,
parse_json_object_from_request,
parse_string,
parse_strings_from_args,
@@ -65,7 +66,6 @@ from synapse.rest.client.transactions import HttpTransactionCache
from synapse.streams.config import PaginationConfig
from synapse.types import JsonDict, Requester, StreamToken, ThirdPartyInstanceID, UserID
from synapse.types.state import StateFilter
-from synapse.util import json_decoder
from synapse.util.cancellation import cancellable
from synapse.util.stringutils import parse_and_validate_server_name, random_string
@@ -703,21 +703,16 @@ class RoomMessageListRestServlet(RestServlet):
)
# Twisted will have processed the args by now.
assert request.args is not None
+
+ filter_json = parse_json(request, "filter", encoding="utf-8")
+ event_filter = Filter(self._hs, filter_json) if filter_json else None
+
as_client_event = b"raw" not in request.args
- filter_str = parse_string(request, "filter", encoding="utf-8")
- if filter_str:
- filter_json = urlparse.unquote(filter_str)
- event_filter: Optional[Filter] = Filter(
- self._hs, json_decoder.decode(filter_json)
- )
- if (
- event_filter
- and event_filter.filter_json.get("event_format", "client")
- == "federation"
- ):
- as_client_event = False
- else:
- event_filter = None
+ if (
+ event_filter
+ and event_filter.filter_json.get("event_format", "client") == "federation"
+ ):
+ as_client_event = False
msgs = await self.pagination_handler.get_messages(
room_id=room_id,
@@ -898,14 +893,8 @@ class RoomEventContextServlet(RestServlet):
limit = parse_integer(request, "limit", default=10)
# picking the API shape for symmetry with /messages
- filter_str = parse_string(request, "filter", encoding="utf-8")
- if filter_str:
- filter_json = urlparse.unquote(filter_str)
- event_filter: Optional[Filter] = Filter(
- self._hs, json_decoder.decode(filter_json)
- )
- else:
- event_filter = None
+ filter_json = parse_json(request, "filter", encoding="utf-8")
+ event_filter = Filter(self._hs, filter_json) if filter_json else None
event_context = await self.room_context_handler.get_event_context(
requester, room_id, event_id, limit, event_filter
diff --git a/tests/rest/admin/test_room.py b/tests/rest/admin/test_room.py
index 0b669b6ee7..7562747260 100644
--- a/tests/rest/admin/test_room.py
+++ b/tests/rest/admin/test_room.py
@@ -21,6 +21,7 @@
import json
import time
import urllib.parse
+from http import HTTPStatus
from typing import List, Optional
from unittest.mock import AsyncMock, Mock
@@ -2190,6 +2191,33 @@ class RoomMessagesTestCase(unittest.HomeserverTestCase):
chunk = channel.json_body["chunk"]
self.assertEqual(len(chunk), 0, [event["content"] for event in chunk])
+ def test_room_message_filter_query_validation(self) -> None:
+ # Test json validation in (filter) query parameter.
+ # Does not test the validity of the filter, only the json validation.
+
+ # Check Get with valid json filter parameter, expect 200.
+ valid_filter_str = '{"types": ["m.room.message"]}'
+ channel = self.make_request(
+ "GET",
+ f"/_synapse/admin/v1/rooms/{self.room_id}/messages?dir=b&filter={valid_filter_str}",
+ access_token=self.admin_user_tok,
+ )
+
+ self.assertEqual(channel.code, HTTPStatus.OK, channel.json_body)
+
+ # Check Get with invalid json filter parameter, expect 400 NOT_JSON.
+ invalid_filter_str = "}}}{}"
+ channel = self.make_request(
+ "GET",
+ f"/_synapse/admin/v1/rooms/{self.room_id}/messages?dir=b&filter={invalid_filter_str}",
+ access_token=self.admin_user_tok,
+ )
+
+ self.assertEqual(channel.code, HTTPStatus.BAD_REQUEST, channel.json_body)
+ self.assertEqual(
+ channel.json_body["errcode"], Codes.NOT_JSON, channel.json_body
+ )
+
class JoinAliasRoomTestCase(unittest.HomeserverTestCase):
servlets = [
@@ -2522,6 +2550,39 @@ class JoinAliasRoomTestCase(unittest.HomeserverTestCase):
else:
self.fail("Event %s from events_after not found" % j)
+ def test_room_event_context_filter_query_validation(self) -> None:
+ # Test json validation in (filter) query parameter.
+ # Does not test the validity of the filter, only the json validation.
+
+ # Create a user with room and event_id.
+ user_id = self.register_user("test", "test")
+ user_tok = self.login("test", "test")
+ room_id = self.helper.create_room_as(user_id, tok=user_tok)
+ event_id = self.helper.send(room_id, "message 1", tok=user_tok)["event_id"]
+
+ # Check Get with valid json filter parameter, expect 200.
+ valid_filter_str = '{"types": ["m.room.message"]}'
+ channel = self.make_request(
+ "GET",
+ f"/_synapse/admin/v1/rooms/{room_id}/context/{event_id}?filter={valid_filter_str}",
+ access_token=self.admin_user_tok,
+ )
+
+ self.assertEqual(channel.code, HTTPStatus.OK, channel.json_body)
+
+ # Check Get with invalid json filter parameter, expect 400 NOT_JSON.
+ invalid_filter_str = "}}}{}"
+ channel = self.make_request(
+ "GET",
+ f"/_synapse/admin/v1/rooms/{room_id}/context/{event_id}?filter={invalid_filter_str}",
+ access_token=self.admin_user_tok,
+ )
+
+ self.assertEqual(channel.code, HTTPStatus.BAD_REQUEST, channel.json_body)
+ self.assertEqual(
+ channel.json_body["errcode"], Codes.NOT_JSON, channel.json_body
+ )
+
class MakeRoomAdminTestCase(unittest.HomeserverTestCase):
servlets = [
diff --git a/tests/rest/client/test_rooms.py b/tests/rest/client/test_rooms.py
index 1364615085..b796163dcb 100644
--- a/tests/rest/client/test_rooms.py
+++ b/tests/rest/client/test_rooms.py
@@ -2175,6 +2175,31 @@ class RoomMessageListTestCase(RoomBase):
chunk = channel.json_body["chunk"]
self.assertEqual(len(chunk), 0, [event["content"] for event in chunk])
+ def test_room_message_filter_query_validation(self) -> None:
+ # Test json validation in (filter) query parameter.
+ # Does not test the validity of the filter, only the json validation.
+
+ # Check Get with valid json filter parameter, expect 200.
+ valid_filter_str = '{"types": ["m.room.message"]}'
+ channel = self.make_request(
+ "GET",
+ f"/rooms/{self.room_id}/messages?access_token=x&dir=b&filter={valid_filter_str}",
+ )
+
+ self.assertEqual(channel.code, HTTPStatus.OK, channel.json_body)
+
+ # Check Get with invalid json filter parameter, expect 400 NOT_JSON.
+ invalid_filter_str = "}}}{}"
+ channel = self.make_request(
+ "GET",
+ f"/rooms/{self.room_id}/messages?access_token=x&dir=b&filter={invalid_filter_str}",
+ )
+
+ self.assertEqual(channel.code, HTTPStatus.BAD_REQUEST, channel.json_body)
+ self.assertEqual(
+ channel.json_body["errcode"], Codes.NOT_JSON, channel.json_body
+ )
+
class RoomMessageFilterTestCase(RoomBase):
"""Tests /rooms/$room_id/messages REST events."""
@@ -3213,6 +3238,33 @@ class ContextTestCase(unittest.HomeserverTestCase):
self.assertDictEqual(events_after[0].get("content"), {}, events_after[0])
self.assertEqual(events_after[1].get("content"), {}, events_after[1])
+ def test_room_event_context_filter_query_validation(self) -> None:
+ # Test json validation in (filter) query parameter.
+ # Does not test the validity of the filter, only the json validation.
+ event_id = self.helper.send(self.room_id, "message 7", tok=self.tok)["event_id"]
+
+ # Check Get with valid json filter parameter, expect 200.
+ valid_filter_str = '{"types": ["m.room.message"]}'
+ channel = self.make_request(
+ "GET",
+ f"/rooms/{self.room_id}/context/{event_id}?filter={valid_filter_str}",
+ access_token=self.tok,
+ )
+ self.assertEqual(channel.code, HTTPStatus.OK, channel.json_body)
+
+ # Check Get with invalid json filter parameter, expect 400 NOT_JSON.
+ invalid_filter_str = "}}}{}"
+ channel = self.make_request(
+ "GET",
+ f"/rooms/{self.room_id}/context/{event_id}?filter={invalid_filter_str}",
+ access_token=self.tok,
+ )
+
+ self.assertEqual(channel.code, HTTPStatus.BAD_REQUEST, channel.json_body)
+ self.assertEqual(
+ channel.json_body["errcode"], Codes.NOT_JSON, channel.json_body
+ )
+
class RoomAliasListTestCase(unittest.HomeserverTestCase):
servlets = [
From 6d64f1b2b89c3b4efdefbb5748443533f4377e5a Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Fri, 19 Apr 2024 09:40:55 +0100
Subject: [PATCH 008/503] Bump anyhow from 1.0.81 to 1.0.82 (#17095)
Bumps [anyhow](https://github.com/dtolnay/anyhow) from 1.0.81 to 1.0.82.
Release notes
Sourced from anyhow's
releases .
1.0.82
Documentation improvements
Commits
074bdea
Release 1.0.82
47a4fbf
Merge pull request #360
from dtolnay/docensure
c5af1db
Make ensure's doc comment apply to the cfg(not(doc)) macro too
bebc7a2
Revert "Temporarily disable miri on doctests"
f2c4db9
Update ui test suite to nightly-2024-03-31
028cbee
Explicitly install a Rust toolchain for cargo-outdated job
7a4cac5
Merge pull request #358
from dtolnay/workspacewrapper
939db01
Apply RUSTC_WORKSPACE_WRAPPER
9f84a37
Temporarily disable miri on doctests
45e5a58
Ignore dead code lint in test
Additional commits viewable in compare
view
[](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores)
Dependabot will resolve any conflicts with this PR as long as you don't
alter it yourself. You can also trigger a rebase manually by commenting
`@dependabot rebase`.
[//]: # (dependabot-automerge-start)
[//]: # (dependabot-automerge-end)
---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR:
- `@dependabot rebase` will rebase this PR
- `@dependabot recreate` will recreate this PR, overwriting any edits
that have been made to it
- `@dependabot merge` will merge this PR after your CI passes on it
- `@dependabot squash and merge` will squash and merge this PR after
your CI passes on it
- `@dependabot cancel merge` will cancel a previously requested merge
and block automerging
- `@dependabot reopen` will reopen this PR if it is closed
- `@dependabot close` will close this PR and stop Dependabot recreating
it. You can achieve the same result by closing it manually
- `@dependabot show ignore conditions` will show all
of the ignore conditions of the specified dependency
- `@dependabot ignore this major version` will close this PR and stop
Dependabot creating any more for this major version (unless you reopen
the PR or upgrade to it yourself)
- `@dependabot ignore this minor version` will close this PR and stop
Dependabot creating any more for this minor version (unless you reopen
the PR or upgrade to it yourself)
- `@dependabot ignore this dependency` will close this PR and stop
Dependabot creating any more for this dependency (unless you reopen the
PR or upgrade to it yourself)
Signed-off-by: dependabot[bot]
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
---
Cargo.lock | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/Cargo.lock b/Cargo.lock
index 65f4807c65..faac6b3c8a 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -13,9 +13,9 @@ dependencies = [
[[package]]
name = "anyhow"
-version = "1.0.81"
+version = "1.0.82"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0952808a6c2afd1aa8947271f3a60f1a6763c7b912d210184c5149b5cf147247"
+checksum = "f538837af36e6f6a9be0faa67f9a314f8119e4e4b5867c6ab40ed60360142519"
[[package]]
name = "arc-swap"
From 47f3870894847d6f29a6b9d7ee049f1ec69aecf0 Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Fri, 19 Apr 2024 09:41:03 +0100
Subject: [PATCH 009/503] Bump ruff from 0.3.5 to 0.3.7 (#17094)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Bumps [ruff](https://github.com/astral-sh/ruff) from 0.3.5 to 0.3.7.
Release notes
Sourced from ruff's
releases .
v0.3.7
Changes
Preview features
[flake8-bugbear] Implement
loop-iterator-mutation (B909) (#9578 )
[pylint] Implement rule to prefer augmented assignment
(PLR6104) (#9932 )
Bug fixes
Avoid TOCTOU errors in cache initialization (#10884 )
[pylint] Recode nan-comparison rule to
W0177 (#10894 )
[pylint] Reverse min-max logic in
if-stmt-min-max (#10890 )
Contributors
v0.3.6
Changes
Preview features
[pylint] Implement
bad-staticmethod-argument (PLW0211) (#10781 )
[pylint] Implement if-stmt-min-max
(PLR1730, PLR1731) (#10002 )
[pyupgrade] Replace str,Enum multiple
inheritance with StrEnum UP042 (#10713 )
[refurb] Implement
if-expr-instead-of-or-operator (FURB110) (#10687 )
[refurb] Implement int-on-sliced-str
(FURB166) (#10650 )
[refurb] Implement write-whole-file
(FURB103) (#10802 )
[refurb] Support itemgetter in
reimplemented-operator (FURB118) (#10526 )
[flake8_comprehensions] Add
sum/min/max to unnecessary
comprehension check (C419) (#10759 )
Rule changes
[pydocstyle] Require capitalizing docstrings where the
first sentence is a single word (D403) (#10776 )
[pycodestyle] Ignore annotated lambdas in class scopes
(E731) (#10720 )
[flake8-pyi] Various improvements to PYI034 (#10807 )
[flake8-slots] Flag subclasses of call-based
typing.NamedTuples as well as subclasses of
collections.namedtuple() (SLOT002) (#10808 )
[pyflakes] Allow forward references in class bases in
stub files (F821) (#10779 )
[pygrep-hooks] Improve blanket-noqa error
message (PGH004) (#10851 )
CLI
Support FORCE_COLOR env var (#10839 )
Configuration
Support negated patterns in [extend-]per-file-ignores
(#10852 )
... (truncated)
Changelog
Sourced from ruff's
changelog .
0.3.7
Preview features
[flake8-bugbear] Implement
loop-iterator-mutation (B909) (#9578 )
[pylint] Implement rule to prefer augmented assignment
(PLR6104) (#9932 )
Bug fixes
Avoid TOCTOU errors in cache initialization (#10884 )
[pylint] Recode nan-comparison rule to
W0177 (#10894 )
[pylint] Reverse min-max logic in
if-stmt-min-max (#10890 )
0.3.6
Preview features
[pylint] Implement
bad-staticmethod-argument (PLW0211) (#10781 )
[pylint] Implement if-stmt-min-max
(PLR1730, PLR1731) (#10002 )
[pyupgrade] Replace str,Enum multiple
inheritance with StrEnum UP042 (#10713 )
[refurb] Implement
if-expr-instead-of-or-operator (FURB110) (#10687 )
[refurb] Implement int-on-sliced-str
(FURB166) (#10650 )
[refurb] Implement write-whole-file
(FURB103) (#10802 )
[refurb] Support itemgetter in
reimplemented-operator (FURB118) (#10526 )
[flake8_comprehensions] Add
sum/min/max to unnecessary
comprehension check (C419) (#10759 )
Rule changes
[pydocstyle] Require capitalizing docstrings where the
first sentence is a single word (D403) (#10776 )
[pycodestyle] Ignore annotated lambdas in class scopes
(E731) (#10720 )
[flake8-pyi] Various improvements to PYI034 (#10807 )
[flake8-slots] Flag subclasses of call-based
typing.NamedTuples as well as subclasses of
collections.namedtuple() (SLOT002) (#10808 )
[pyflakes] Allow forward references in class bases in
stub files (F821) (#10779 )
[pygrep-hooks] Improve blanket-noqa error
message (PGH004) (#10851 )
CLI
Support FORCE_COLOR env var (#10839 )
Configuration
Support negated patterns in [extend-]per-file-ignores
(#10852 )
Bug fixes
[flake8-import-conventions] Accept non-aliased (but
correct) import in unconventional-import-alias
(ICN001) (#10729 )
[flake8-quotes] Add semantic model flag when inside
f-string replacement field (#10766 )
[pep8-naming] Recursively resolve
TypeDicts for N815 violations (#10719 )
[flake8-quotes] Respect Q00* ignores in
flake8-quotes rules (#10728 )
[flake8-simplify] Show negated condition in
needless-bool diagnostics (SIM103) (#10854 )
... (truncated)
Commits
[](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores)
Dependabot will resolve any conflicts with this PR as long as you don't
alter it yourself. You can also trigger a rebase manually by commenting
`@dependabot rebase`.
[//]: # (dependabot-automerge-start)
[//]: # (dependabot-automerge-end)
---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR:
- `@dependabot rebase` will rebase this PR
- `@dependabot recreate` will recreate this PR, overwriting any edits
that have been made to it
- `@dependabot merge` will merge this PR after your CI passes on it
- `@dependabot squash and merge` will squash and merge this PR after
your CI passes on it
- `@dependabot cancel merge` will cancel a previously requested merge
and block automerging
- `@dependabot reopen` will reopen this PR if it is closed
- `@dependabot close` will close this PR and stop Dependabot recreating
it. You can achieve the same result by closing it manually
- `@dependabot show ignore conditions` will show all
of the ignore conditions of the specified dependency
- `@dependabot ignore this major version` will close this PR and stop
Dependabot creating any more for this major version (unless you reopen
the PR or upgrade to it yourself)
- `@dependabot ignore this minor version` will close this PR and stop
Dependabot creating any more for this minor version (unless you reopen
the PR or upgrade to it yourself)
- `@dependabot ignore this dependency` will close this PR and stop
Dependabot creating any more for this dependency (unless you reopen the
PR or upgrade to it yourself)
Signed-off-by: dependabot[bot]
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
---
poetry.lock | 38 +++++++++++++++++++-------------------
pyproject.toml | 2 +-
2 files changed, 20 insertions(+), 20 deletions(-)
diff --git a/poetry.lock b/poetry.lock
index 814877b70a..6431975923 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -2444,28 +2444,28 @@ files = [
[[package]]
name = "ruff"
-version = "0.3.5"
+version = "0.3.7"
description = "An extremely fast Python linter and code formatter, written in Rust."
optional = false
python-versions = ">=3.7"
files = [
- {file = "ruff-0.3.5-py3-none-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:aef5bd3b89e657007e1be6b16553c8813b221ff6d92c7526b7e0227450981eac"},
- {file = "ruff-0.3.5-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:89b1e92b3bd9fca249153a97d23f29bed3992cff414b222fcd361d763fc53f12"},
- {file = "ruff-0.3.5-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5e55771559c89272c3ebab23326dc23e7f813e492052391fe7950c1a5a139d89"},
- {file = "ruff-0.3.5-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:dabc62195bf54b8a7876add6e789caae0268f34582333cda340497c886111c39"},
- {file = "ruff-0.3.5-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3a05f3793ba25f194f395578579c546ca5d83e0195f992edc32e5907d142bfa3"},
- {file = "ruff-0.3.5-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:dfd3504e881082959b4160ab02f7a205f0fadc0a9619cc481982b6837b2fd4c0"},
- {file = "ruff-0.3.5-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:87258e0d4b04046cf1d6cc1c56fadbf7a880cc3de1f7294938e923234cf9e498"},
- {file = "ruff-0.3.5-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:712e71283fc7d9f95047ed5f793bc019b0b0a29849b14664a60fd66c23b96da1"},
- {file = "ruff-0.3.5-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a532a90b4a18d3f722c124c513ffb5e5eaff0cc4f6d3aa4bda38e691b8600c9f"},
- {file = "ruff-0.3.5-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:122de171a147c76ada00f76df533b54676f6e321e61bd8656ae54be326c10296"},
- {file = "ruff-0.3.5-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:d80a6b18a6c3b6ed25b71b05eba183f37d9bc8b16ace9e3d700997f00b74660b"},
- {file = "ruff-0.3.5-py3-none-musllinux_1_2_i686.whl", hash = "sha256:a7b6e63194c68bca8e71f81de30cfa6f58ff70393cf45aab4c20f158227d5936"},
- {file = "ruff-0.3.5-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:a759d33a20c72f2dfa54dae6e85e1225b8e302e8ac655773aff22e542a300985"},
- {file = "ruff-0.3.5-py3-none-win32.whl", hash = "sha256:9d8605aa990045517c911726d21293ef4baa64f87265896e491a05461cae078d"},
- {file = "ruff-0.3.5-py3-none-win_amd64.whl", hash = "sha256:dc56bb16a63c1303bd47563c60482a1512721053d93231cf7e9e1c6954395a0e"},
- {file = "ruff-0.3.5-py3-none-win_arm64.whl", hash = "sha256:faeeae9905446b975dcf6d4499dc93439b131f1443ee264055c5716dd947af55"},
- {file = "ruff-0.3.5.tar.gz", hash = "sha256:a067daaeb1dc2baf9b82a32dae67d154d95212080c80435eb052d95da647763d"},
+ {file = "ruff-0.3.7-py3-none-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:0e8377cccb2f07abd25e84fc5b2cbe48eeb0fea9f1719cad7caedb061d70e5ce"},
+ {file = "ruff-0.3.7-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:15a4d1cc1e64e556fa0d67bfd388fed416b7f3b26d5d1c3e7d192c897e39ba4b"},
+ {file = "ruff-0.3.7-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d28bdf3d7dc71dd46929fafeec98ba89b7c3550c3f0978e36389b5631b793663"},
+ {file = "ruff-0.3.7-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:379b67d4f49774ba679593b232dcd90d9e10f04d96e3c8ce4a28037ae473f7bb"},
+ {file = "ruff-0.3.7-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c060aea8ad5ef21cdfbbe05475ab5104ce7827b639a78dd55383a6e9895b7c51"},
+ {file = "ruff-0.3.7-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:ebf8f615dde968272d70502c083ebf963b6781aacd3079081e03b32adfe4d58a"},
+ {file = "ruff-0.3.7-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d48098bd8f5c38897b03604f5428901b65e3c97d40b3952e38637b5404b739a2"},
+ {file = "ruff-0.3.7-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:da8a4fda219bf9024692b1bc68c9cff4b80507879ada8769dc7e985755d662ea"},
+ {file = "ruff-0.3.7-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c44e0149f1d8b48c4d5c33d88c677a4aa22fd09b1683d6a7ff55b816b5d074f"},
+ {file = "ruff-0.3.7-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:3050ec0af72b709a62ecc2aca941b9cd479a7bf2b36cc4562f0033d688e44fa1"},
+ {file = "ruff-0.3.7-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:a29cc38e4c1ab00da18a3f6777f8b50099d73326981bb7d182e54a9a21bb4ff7"},
+ {file = "ruff-0.3.7-py3-none-musllinux_1_2_i686.whl", hash = "sha256:5b15cc59c19edca917f51b1956637db47e200b0fc5e6e1878233d3a938384b0b"},
+ {file = "ruff-0.3.7-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:e491045781b1e38b72c91247cf4634f040f8d0cb3e6d3d64d38dcf43616650b4"},
+ {file = "ruff-0.3.7-py3-none-win32.whl", hash = "sha256:bc931de87593d64fad3a22e201e55ad76271f1d5bfc44e1a1887edd0903c7d9f"},
+ {file = "ruff-0.3.7-py3-none-win_amd64.whl", hash = "sha256:5ef0e501e1e39f35e03c2acb1d1238c595b8bb36cf7a170e7c1df1b73da00e74"},
+ {file = "ruff-0.3.7-py3-none-win_arm64.whl", hash = "sha256:789e144f6dc7019d1f92a812891c645274ed08af6037d11fc65fcbc183b7d59f"},
+ {file = "ruff-0.3.7.tar.gz", hash = "sha256:d5c1aebee5162c2226784800ae031f660c350e7a3402c4d1f8ea4e97e232e3ba"},
]
[[package]]
@@ -3451,4 +3451,4 @@ user-search = ["pyicu"]
[metadata]
lock-version = "2.0"
python-versions = "^3.8.0"
-content-hash = "4abda113a01f162bb3978b0372956d569364533aa39f57863c234363f8449a4f"
+content-hash = "1951f2b4623138d47db08a405edd970e67599d05804bb459af21a085e1665f69"
diff --git a/pyproject.toml b/pyproject.toml
index f0f025645f..fb310589f7 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -321,7 +321,7 @@ all = [
# This helps prevents merge conflicts when running a batch of dependabot updates.
isort = ">=5.10.1"
black = ">=22.7.0"
-ruff = "0.3.5"
+ruff = "0.3.7"
# Type checking only works with the pydantic.v1 compat module from pydantic v2
pydantic = "^2"
From f5b6005559784df9383bcc4218375b27d64e4651 Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Fri, 19 Apr 2024 09:41:15 +0100
Subject: [PATCH 010/503] Bump pyasn1-modules from 0.3.0 to 0.4.0 (#17093)
Bumps [pyasn1-modules](https://github.com/pyasn1/pyasn1-modules) from
0.3.0 to 0.4.0.
Release notes
Sourced from pyasn1-modules's
releases .
Release 0.4.0
It's a major release where we drop Python 2 support entirely.
The most significant changes are:
Added support for Python 3.11, 3.12
Removed support for EOL Pythons 2.7, 3.6, 3.7
A full list of changes can be seen in the CHANGELOG .
Changelog
Sourced from pyasn1-modules's
changelog .
Revision 0.4.0, released 26-03-2024
Added support for Python 3.11, 3.12
Removed support for EOL Pythons 2.7, 3.6, 3.7
Commits
[](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores)
Dependabot will resolve any conflicts with this PR as long as you don't
alter it yourself. You can also trigger a rebase manually by commenting
`@dependabot rebase`.
[//]: # (dependabot-automerge-start)
[//]: # (dependabot-automerge-end)
---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR:
- `@dependabot rebase` will rebase this PR
- `@dependabot recreate` will recreate this PR, overwriting any edits
that have been made to it
- `@dependabot merge` will merge this PR after your CI passes on it
- `@dependabot squash and merge` will squash and merge this PR after
your CI passes on it
- `@dependabot cancel merge` will cancel a previously requested merge
and block automerging
- `@dependabot reopen` will reopen this PR if it is closed
- `@dependabot close` will close this PR and stop Dependabot recreating
it. You can achieve the same result by closing it manually
- `@dependabot show ignore conditions` will show all
of the ignore conditions of the specified dependency
- `@dependabot ignore this major version` will close this PR and stop
Dependabot creating any more for this major version (unless you reopen
the PR or upgrade to it yourself)
- `@dependabot ignore this minor version` will close this PR and stop
Dependabot creating any more for this minor version (unless you reopen
the PR or upgrade to it yourself)
- `@dependabot ignore this dependency` will close this PR and stop
Dependabot creating any more for this dependency (unless you reopen the
PR or upgrade to it yourself)
Signed-off-by: dependabot[bot]
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
---
poetry.lock | 10 +++++-----
1 file changed, 5 insertions(+), 5 deletions(-)
diff --git a/poetry.lock b/poetry.lock
index 6431975923..90f592f53f 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -1848,17 +1848,17 @@ files = [
[[package]]
name = "pyasn1-modules"
-version = "0.3.0"
+version = "0.4.0"
description = "A collection of ASN.1-based protocols modules"
optional = false
-python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,>=2.7"
+python-versions = ">=3.8"
files = [
- {file = "pyasn1_modules-0.3.0-py2.py3-none-any.whl", hash = "sha256:d3ccd6ed470d9ffbc716be08bd90efbd44d0734bc9303818f7336070984a162d"},
- {file = "pyasn1_modules-0.3.0.tar.gz", hash = "sha256:5bd01446b736eb9d31512a30d46c1ac3395d676c6f3cafa4c03eb54b9925631c"},
+ {file = "pyasn1_modules-0.4.0-py3-none-any.whl", hash = "sha256:be04f15b66c206eed667e0bb5ab27e2b1855ea54a842e5037738099e8ca4ae0b"},
+ {file = "pyasn1_modules-0.4.0.tar.gz", hash = "sha256:831dbcea1b177b28c9baddf4c6d1013c24c3accd14a1873fffaa6a2e905f17b6"},
]
[package.dependencies]
-pyasn1 = ">=0.4.6,<0.6.0"
+pyasn1 = ">=0.4.6,<0.7.0"
[[package]]
name = "pycparser"
From 98f57ea3f2ffa94bf66310674bea5ff554df277f Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Fri, 19 Apr 2024 09:41:24 +0100
Subject: [PATCH 011/503] Bump pygithub from 2.2.0 to 2.3.0 (#17092)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Bumps [pygithub](https://github.com/pygithub/pygithub) from 2.2.0 to
2.3.0.
Release notes
Sourced from pygithub's
releases .
v2.3.0
New features
Improvements
Bug Fixes
Maintenance
Changelog
Sourced from pygithub's
changelog .
Version 2.3.0 (March 21, 2024)
New features
^^^^^^^^^^^^
Support OAuth for enterprise (#2780 )
(e4106e00)
Support creation of Dependabot Organization and Repository Secrets
(#2874 )
(0784f835)
Improvements
^^^^^^^^^^^^
Create release with optional name and message when
generate_release_notes is true (#2868 )
(d65fc30d)
Add missing attributes to WorkflowJob (#2921 )
(9e092458)
Add created and check_suite_id filter for
Repository WorkflowRuns (#2891 )
(c788985c)
Assert requester argument type in Auth (#2912 )
(0b8435fc)
Bug Fixes
^^^^^^^^^
Revert having allowed values for add_to_collaborators (#2905 )
(b542438e)
Maintenance
^^^^^^^^^^^
Fix imports in authentication docs (#2923 )
(e3d36535)
CI: add docformatter to precommit (#2614 )
(96ad19ae)
Add .swp files to gitignore (#2903 )
(af529abe)
Fix instructions building docs in CONTRIBUTING.md (#2900 )
(cd8e528d)
Explicitly name the modules built in pyproject.toml (#2894 )
(4d461734)
Commits
7266e81
Release v2.3.0 (#2926 )
e4106e0
Support oauth for enterprise (#2780 )
d65fc30
Create release with optional name and message when
generate_release_notes is ...
0784f83
Support creation of Dependabot Organization and Repository Secrets (#2874 )
9e09245
Add missing attributes to WorkflowJob (#2921 )
e3d3653
Fix imports in authentication docs (#2923 )
c788985
Add created and check_suite_id filter for
Repository WorkflowRuns (#2891 )
0b8435f
Assert requester argument type in Auth (#2912 )
96ad19a
CI: add docformatter to precommit (#2614 )
b542438
Revert having allowed values for add_to_collaborators (#2905 )
Additional commits viewable in compare
view
[](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores)
Dependabot will resolve any conflicts with this PR as long as you don't
alter it yourself. You can also trigger a rebase manually by commenting
`@dependabot rebase`.
[//]: # (dependabot-automerge-start)
[//]: # (dependabot-automerge-end)
---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR:
- `@dependabot rebase` will rebase this PR
- `@dependabot recreate` will recreate this PR, overwriting any edits
that have been made to it
- `@dependabot merge` will merge this PR after your CI passes on it
- `@dependabot squash and merge` will squash and merge this PR after
your CI passes on it
- `@dependabot cancel merge` will cancel a previously requested merge
and block automerging
- `@dependabot reopen` will reopen this PR if it is closed
- `@dependabot close` will close this PR and stop Dependabot recreating
it. You can achieve the same result by closing it manually
- `@dependabot show ignore conditions` will show all
of the ignore conditions of the specified dependency
- `@dependabot ignore this major version` will close this PR and stop
Dependabot creating any more for this major version (unless you reopen
the PR or upgrade to it yourself)
- `@dependabot ignore this minor version` will close this PR and stop
Dependabot creating any more for this minor version (unless you reopen
the PR or upgrade to it yourself)
- `@dependabot ignore this dependency` will close this PR and stop
Dependabot creating any more for this dependency (unless you reopen the
PR or upgrade to it yourself)
Signed-off-by: dependabot[bot]
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
---
poetry.lock | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/poetry.lock b/poetry.lock
index 90f592f53f..52351a5692 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -1983,13 +1983,13 @@ typing-extensions = ">=4.6.0,<4.7.0 || >4.7.0"
[[package]]
name = "pygithub"
-version = "2.2.0"
+version = "2.3.0"
description = "Use the full Github API v3"
optional = false
python-versions = ">=3.7"
files = [
- {file = "PyGithub-2.2.0-py3-none-any.whl", hash = "sha256:41042ea53e4c372219db708c38d2ca1fd4fadab75475bac27d89d339596cfad1"},
- {file = "PyGithub-2.2.0.tar.gz", hash = "sha256:e39be7c4dc39418bdd6e3ecab5931c636170b8b21b4d26f9ecf7e6102a3b51c3"},
+ {file = "PyGithub-2.3.0-py3-none-any.whl", hash = "sha256:65b499728be3ce7b0cd2cd760da3b32f0f4d7bc55e5e0677617f90f6564e793e"},
+ {file = "PyGithub-2.3.0.tar.gz", hash = "sha256:0148d7347a1cdeed99af905077010aef81a4dad988b0ba51d4108bf66b443f7e"},
]
[package.dependencies]
From dcae2b4ba445f519ed1b3f3369a4661920f6752e Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Fri, 19 Apr 2024 09:41:45 +0100
Subject: [PATCH 012/503] Bump twine from 4.0.2 to 5.0.0 (#17091)
Bumps [twine](https://github.com/pypa/twine) from 4.0.2 to 5.0.0.
Changelog
Sourced from twine's
changelog .
Twine 5.0.0 (2024-02-10)
Bugfixes
^^^^^^^^
Use email.message instead of cgi as
cgi has been deprecated
([#969](https://github.com/pypa/twine/issues/969)
<https://github.com/pypa/twine/issues/969>_)
Misc
^^^^
[#931](https://github.com/pypa/twine/issues/931)
<https://github.com/pypa/twine/issues/931>,
[#991](https://github.com/pypa/twine/issues/991)
<https://github.com/pypa/twine/issues/991> ,
[#1028](https://github.com/pypa/twine/issues/1028)
<https://github.com/pypa/twine/issues/1028>,
[#1040](https://github.com/pypa/twine/issues/1040)
<https://github.com/pypa/twine/issues/1040>
Commits
94f810c
Merge pull request #1047 from
pypa/new-release
09d993a
Update linkcheck_ignore setting for docs
ab0ed19
Apply 2024 black format
407e6cc
Build changelog for 5.0.0
6644b86
Add missing changelog entries
fe1885f
Merge pull request #1034 from
DimitriPapadopoulos/codespell
694bdcf
Fix typos found by codespell
89ec78c
Merge pull request #1040 from
woodruffw-forks/ww/pypi-mandatory-api-tokens
b3b363a
tests: lintage
6e94d20
tests: more non-PyPI tests
Additional commits viewable in compare
view
[](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores)
Dependabot will resolve any conflicts with this PR as long as you don't
alter it yourself. You can also trigger a rebase manually by commenting
`@dependabot rebase`.
[//]: # (dependabot-automerge-start)
[//]: # (dependabot-automerge-end)
---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR:
- `@dependabot rebase` will rebase this PR
- `@dependabot recreate` will recreate this PR, overwriting any edits
that have been made to it
- `@dependabot merge` will merge this PR after your CI passes on it
- `@dependabot squash and merge` will squash and merge this PR after
your CI passes on it
- `@dependabot cancel merge` will cancel a previously requested merge
and block automerging
- `@dependabot reopen` will reopen this PR if it is closed
- `@dependabot close` will close this PR and stop Dependabot recreating
it. You can achieve the same result by closing it manually
- `@dependabot show ignore conditions` will show all
of the ignore conditions of the specified dependency
- `@dependabot ignore this major version` will close this PR and stop
Dependabot creating any more for this major version (unless you reopen
the PR or upgrade to it yourself)
- `@dependabot ignore this minor version` will close this PR and stop
Dependabot creating any more for this minor version (unless you reopen
the PR or upgrade to it yourself)
- `@dependabot ignore this dependency` will close this PR and stop
Dependabot creating any more for this dependency (unless you reopen the
PR or upgrade to it yourself)
Signed-off-by: dependabot[bot]
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
---
poetry.lock | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/poetry.lock b/poetry.lock
index 52351a5692..eddeee3018 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -2954,13 +2954,13 @@ docs = ["sphinx (<7.0.0)"]
[[package]]
name = "twine"
-version = "4.0.2"
+version = "5.0.0"
description = "Collection of utilities for publishing packages on PyPI"
optional = false
-python-versions = ">=3.7"
+python-versions = ">=3.8"
files = [
- {file = "twine-4.0.2-py3-none-any.whl", hash = "sha256:929bc3c280033347a00f847236564d1c52a3e61b1ac2516c97c48f3ceab756d8"},
- {file = "twine-4.0.2.tar.gz", hash = "sha256:9e102ef5fdd5a20661eb88fad46338806c3bd32cf1db729603fe3697b1bc83c8"},
+ {file = "twine-5.0.0-py3-none-any.whl", hash = "sha256:a262933de0b484c53408f9edae2e7821c1c45a3314ff2df9bdd343aa7ab8edc0"},
+ {file = "twine-5.0.0.tar.gz", hash = "sha256:89b0cc7d370a4b66421cc6102f269aa910fe0f1861c124f573cf2ddedbc10cf4"},
]
[package.dependencies]
From 20c8991a94afb21ad176ef30089235b6051cc43e Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Fri, 19 Apr 2024 09:42:21 +0100
Subject: [PATCH 013/503] Bump peaceiris/actions-mdbook from 1.2.0 to 2.0.0
(#17089)
Bumps
[peaceiris/actions-mdbook](https://github.com/peaceiris/actions-mdbook)
from 1.2.0 to 2.0.0.
Release notes
Sourced from peaceiris/actions-mdbook's
releases .
actions-mdbook v2.0.0
See CHANGELOG.md
for more details.
Changelog
Sourced from peaceiris/actions-mdbook's
changelog .
Changelog
All notable changes to this project will be documented in this file.
See standard-version
for commit guidelines.
2.0.0
(2024-04-08)
build
chore
ci
bump actions/checkout from 3 to 4 (#487 )
(c0c1ffe ),
closes #487
bump actions/dependency-review-action from 2.5.0 to 2.5.1 (#470 )
(e8a2552 ),
closes #470
#290
#300
#299
bump actions/dependency-review-action from 2.5.1 to 3.0.0 (#472 )
(9a6ded1 ),
closes #472
#327
#324
#325
#326
bump actions/dependency-review-action from 3.0.0 to 3.0.1 (#473 )
(939fe76 ),
closes #473
bump actions/dependency-review-action from 3.0.1 to 3.0.2 (#474 )
(404c95a ),
closes #474
bump actions/dependency-review-action from 3.0.2 to 3.0.3 (#476 )
(665e827 ),
closes #476
bump actions/dependency-review-action from 3.0.3 to 3.0.4 (#479 )Co-authored-by:
dependabot[bot] (9d85c8a ),
closes #479
bump actions/dependency-review-action from 3.0.4 to 3.0.6 (#480 )
(a1c0a09 ),
closes #480
bump actions/dependency-review-action from 3.0.6 to 3.0.7 (#483 )
(2987c69 ),
closes #483
bump actions/dependency-review-action from 3.0.7 to 3.0.8 (#485 )
(162a198 ),
closes #485
bump actions/dependency-review-action from 3.0.8 to 3.1.0 (#488 )
(60cc2ff ),
closes #488
bump actions/setup-node from 3.5.1 to 3.6.0 (#475 )
(10da3f5 ),
closes #475
bump actions/setup-node from 3.6.0 to 3.7.0 (#481 )
(334df4e ),
closes #481
bump actions/setup-node from 3.7.0 to 3.8.0 (#484 )
(fe51920 ),
closes #484
bump actions/setup-node from 3.8.0 to 3.8.1 (#486 )
(c6c9e0f ),
closes #486
bump codecov/codecov-action from 3 to 4 (#490 )
(7b0c98f ),
closes #490
bump github/codeql-action from 1 to 2 (#440 )
(7ce6923 ),
closes #440
bump peaceiris/actions-mdbook from 1.1.14 to 1.2.0 (#469 )
(59732c8 ),
closes #469
#397
#397
#385
#385
#407
#407
#409
#409
#424
#424
#463
#463
#393
#393
#395
#395
#399
#399
#400
#400
#405
#405
#411
#411
#412
#412
#416
#416
#435
#435
#438
#438
#456
#456
#460
#460
#462
#462
#371
#371
#437
#437
#392
#392
#394
#394
#396
#396
#402
#402
#404
#404
#436
#436
#373
#373
#374
#374
#377
#377
#380
#380
#381
#381
#383
#383
#384
#384
#382
#382
#466
#463
#462
#460
#456
#438
#437
feat
1.2.0
(2022-10-23)
chore
... (truncated)
Commits
ee69d23
chore(release): 2.0.0
2d79d45
chore(release): Add build assets
c95f05c
chore: revert build
cb4d902
build: bump node to 20.12.1 (#504 )
46c97c2
feat: bump to node20 runtime (#500 )
7b0c98f
ci: bump codecov/codecov-action from 3 to 4 (#490 )
60cc2ff
ci: bump actions/dependency-review-action from 3.0.8 to 3.1.0 (#488 )
c0c1ffe
ci: bump actions/checkout from 3 to 4 (#487 )
c6c9e0f
ci: bump actions/setup-node from 3.8.0 to 3.8.1 (#486 )
162a198
ci: bump actions/dependency-review-action from 3.0.7 to 3.0.8 (#485 )
Additional commits viewable in compare
view
[](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores)
Dependabot will resolve any conflicts with this PR as long as you don't
alter it yourself. You can also trigger a rebase manually by commenting
`@dependabot rebase`.
[//]: # (dependabot-automerge-start)
[//]: # (dependabot-automerge-end)
---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR:
- `@dependabot rebase` will rebase this PR
- `@dependabot recreate` will recreate this PR, overwriting any edits
that have been made to it
- `@dependabot merge` will merge this PR after your CI passes on it
- `@dependabot squash and merge` will squash and merge this PR after
your CI passes on it
- `@dependabot cancel merge` will cancel a previously requested merge
and block automerging
- `@dependabot reopen` will reopen this PR if it is closed
- `@dependabot close` will close this PR and stop Dependabot recreating
it. You can achieve the same result by closing it manually
- `@dependabot show ignore conditions` will show all
of the ignore conditions of the specified dependency
- `@dependabot ignore this major version` will close this PR and stop
Dependabot creating any more for this major version (unless you reopen
the PR or upgrade to it yourself)
- `@dependabot ignore this minor version` will close this PR and stop
Dependabot creating any more for this minor version (unless you reopen
the PR or upgrade to it yourself)
- `@dependabot ignore this dependency` will close this PR and stop
Dependabot creating any more for this dependency (unless you reopen the
PR or upgrade to it yourself)
Signed-off-by: dependabot[bot]
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
---
.github/workflows/docs-pr.yaml | 4 ++--
.github/workflows/docs.yaml | 2 +-
2 files changed, 3 insertions(+), 3 deletions(-)
diff --git a/.github/workflows/docs-pr.yaml b/.github/workflows/docs-pr.yaml
index 652ef90095..07dc301b1a 100644
--- a/.github/workflows/docs-pr.yaml
+++ b/.github/workflows/docs-pr.yaml
@@ -19,7 +19,7 @@ jobs:
fetch-depth: 0
- name: Setup mdbook
- uses: peaceiris/actions-mdbook@adeb05db28a0c0004681db83893d56c0388ea9ea # v1.2.0
+ uses: peaceiris/actions-mdbook@ee69d230fe19748b7abf22df32acaa93833fad08 # v2.0.0
with:
mdbook-version: '0.4.17'
@@ -53,7 +53,7 @@ jobs:
- uses: actions/checkout@v4
- name: Setup mdbook
- uses: peaceiris/actions-mdbook@adeb05db28a0c0004681db83893d56c0388ea9ea # v1.2.0
+ uses: peaceiris/actions-mdbook@ee69d230fe19748b7abf22df32acaa93833fad08 # v2.0.0
with:
mdbook-version: '0.4.17'
diff --git a/.github/workflows/docs.yaml b/.github/workflows/docs.yaml
index d611fdc924..4ddee9ad0a 100644
--- a/.github/workflows/docs.yaml
+++ b/.github/workflows/docs.yaml
@@ -56,7 +56,7 @@ jobs:
fetch-depth: 0
- name: Setup mdbook
- uses: peaceiris/actions-mdbook@adeb05db28a0c0004681db83893d56c0388ea9ea # v1.2.0
+ uses: peaceiris/actions-mdbook@ee69d230fe19748b7abf22df32acaa93833fad08 # v2.0.0
with:
mdbook-version: '0.4.17'
From 14e9ab19be56f6daa429b36c215db22079f0f111 Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Fri, 19 Apr 2024 09:42:35 +0100
Subject: [PATCH 014/503] Bump sigstore/cosign-installer from 3.4.0 to 3.5.0
(#17088)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Bumps
[sigstore/cosign-installer](https://github.com/sigstore/cosign-installer)
from 3.4.0 to 3.5.0.
Release notes
Sourced from sigstore/cosign-installer's
releases .
v3.5.0
What's Changed
Full Changelog : https://github.com/sigstore/cosign-installer/compare/v3.4.0...v3.5.0
Commits
[](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores)
Dependabot will resolve any conflicts with this PR as long as you don't
alter it yourself. You can also trigger a rebase manually by commenting
`@dependabot rebase`.
[//]: # (dependabot-automerge-start)
[//]: # (dependabot-automerge-end)
---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR:
- `@dependabot rebase` will rebase this PR
- `@dependabot recreate` will recreate this PR, overwriting any edits
that have been made to it
- `@dependabot merge` will merge this PR after your CI passes on it
- `@dependabot squash and merge` will squash and merge this PR after
your CI passes on it
- `@dependabot cancel merge` will cancel a previously requested merge
and block automerging
- `@dependabot reopen` will reopen this PR if it is closed
- `@dependabot close` will close this PR and stop Dependabot recreating
it. You can achieve the same result by closing it manually
- `@dependabot show ignore conditions` will show all
of the ignore conditions of the specified dependency
- `@dependabot ignore this major version` will close this PR and stop
Dependabot creating any more for this major version (unless you reopen
the PR or upgrade to it yourself)
- `@dependabot ignore this minor version` will close this PR and stop
Dependabot creating any more for this minor version (unless you reopen
the PR or upgrade to it yourself)
- `@dependabot ignore this dependency` will close this PR and stop
Dependabot creating any more for this dependency (unless you reopen the
PR or upgrade to it yourself)
Signed-off-by: dependabot[bot]
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
---
.github/workflows/docker.yml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml
index 6574550447..391e9c96ff 100644
--- a/.github/workflows/docker.yml
+++ b/.github/workflows/docker.yml
@@ -30,7 +30,7 @@ jobs:
run: docker buildx inspect
- name: Install Cosign
- uses: sigstore/cosign-installer@v3.4.0
+ uses: sigstore/cosign-installer@v3.5.0
- name: Checkout repository
uses: actions/checkout@v4
From 8c667759ad7983774b3937778731fd485af54417 Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Fri, 19 Apr 2024 09:43:04 +0100
Subject: [PATCH 015/503] Bump peaceiris/actions-gh-pages from 3.9.3 to 4.0.0
(#17087)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Bumps
[peaceiris/actions-gh-pages](https://github.com/peaceiris/actions-gh-pages)
from 3.9.3 to 4.0.0.
Release notes
Sourced from peaceiris/actions-gh-pages's
releases .
actions-github-pages v4.0.0
See CHANGELOG.md
for more details.
Changelog
Sourced from peaceiris/actions-gh-pages's
changelog .
Changelog
All notable changes to this project will be documented in this file.
See standard-version
for commit guidelines.
4.0.0
(2024-04-08)
build
chore
ci
docs
3.9.3
(2023-03-30)
docs
fix
3.9.2
(2023-01-17)
chore
... (truncated)
Commits
4f9cc66
chore(release): 4.0.0
9c75028
chore(release): Add build assets
5049354
build: node 20.11.1
4eb285e
chore: bump node16 to node20 (#1067 )
cdc09a3
chore(deps): update dependency @types/node to v16.18.77
(#1065 )
d830378
chore(deps): update dependency @types/node to v16.18.76
(#1063 )
80daa1d
chore(deps): update dependency @types/node to v16.18.75
(#1061 )
108285e
chore(deps): update dependency ts-jest to v29.1.2 (#1060 )
99c95ff
chore(deps): update dependency @types/node to v16.18.74
(#1058 )
1f46537
chore(deps): update dependency @types/node to v16.18.73
(#1057 )
Additional commits viewable in compare
view
[](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores)
Dependabot will resolve any conflicts with this PR as long as you don't
alter it yourself. You can also trigger a rebase manually by commenting
`@dependabot rebase`.
[//]: # (dependabot-automerge-start)
[//]: # (dependabot-automerge-end)
---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR:
- `@dependabot rebase` will rebase this PR
- `@dependabot recreate` will recreate this PR, overwriting any edits
that have been made to it
- `@dependabot merge` will merge this PR after your CI passes on it
- `@dependabot squash and merge` will squash and merge this PR after
your CI passes on it
- `@dependabot cancel merge` will cancel a previously requested merge
and block automerging
- `@dependabot reopen` will reopen this PR if it is closed
- `@dependabot close` will close this PR and stop Dependabot recreating
it. You can achieve the same result by closing it manually
- `@dependabot show ignore conditions` will show all
of the ignore conditions of the specified dependency
- `@dependabot ignore this major version` will close this PR and stop
Dependabot creating any more for this major version (unless you reopen
the PR or upgrade to it yourself)
- `@dependabot ignore this minor version` will close this PR and stop
Dependabot creating any more for this minor version (unless you reopen
the PR or upgrade to it yourself)
- `@dependabot ignore this dependency` will close this PR and stop
Dependabot creating any more for this dependency (unless you reopen the
PR or upgrade to it yourself)
Signed-off-by: dependabot[bot]
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
---
.github/workflows/docs.yaml | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/.github/workflows/docs.yaml b/.github/workflows/docs.yaml
index 4ddee9ad0a..fe3212f82a 100644
--- a/.github/workflows/docs.yaml
+++ b/.github/workflows/docs.yaml
@@ -80,7 +80,7 @@ jobs:
# Deploy to the target directory.
- name: Deploy to gh pages
- uses: peaceiris/actions-gh-pages@373f7f263a76c20808c831209c920827a82a2847 # v3.9.3
+ uses: peaceiris/actions-gh-pages@4f9cc6602d3f66b9c108549d475ec49e8ef4d45e # v4.0.0
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
publish_dir: ./book
@@ -110,7 +110,7 @@ jobs:
# Deploy to the target directory.
- name: Deploy to gh pages
- uses: peaceiris/actions-gh-pages@373f7f263a76c20808c831209c920827a82a2847 # v3.9.3
+ uses: peaceiris/actions-gh-pages@4f9cc6602d3f66b9c108549d475ec49e8ef4d45e # v4.0.0
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
publish_dir: ./dev-docs/_build/html
From 800a5b6ef33076e677a8bf6cf6090b213e42855d Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Fri, 19 Apr 2024 09:43:25 +0100
Subject: [PATCH 016/503] Bump types-pillow from 10.2.0.20240406 to
10.2.0.20240415 (#17090)
Bumps [types-pillow](https://github.com/python/typeshed) from
10.2.0.20240406 to 10.2.0.20240415.
Commits
[](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores)
Dependabot will resolve any conflicts with this PR as long as you don't
alter it yourself. You can also trigger a rebase manually by commenting
`@dependabot rebase`.
[//]: # (dependabot-automerge-start)
[//]: # (dependabot-automerge-end)
---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR:
- `@dependabot rebase` will rebase this PR
- `@dependabot recreate` will recreate this PR, overwriting any edits
that have been made to it
- `@dependabot merge` will merge this PR after your CI passes on it
- `@dependabot squash and merge` will squash and merge this PR after
your CI passes on it
- `@dependabot cancel merge` will cancel a previously requested merge
and block automerging
- `@dependabot reopen` will reopen this PR if it is closed
- `@dependabot close` will close this PR and stop Dependabot recreating
it. You can achieve the same result by closing it manually
- `@dependabot show ignore conditions` will show all
of the ignore conditions of the specified dependency
- `@dependabot ignore this major version` will close this PR and stop
Dependabot creating any more for this major version (unless you reopen
the PR or upgrade to it yourself)
- `@dependabot ignore this minor version` will close this PR and stop
Dependabot creating any more for this minor version (unless you reopen
the PR or upgrade to it yourself)
- `@dependabot ignore this dependency` will close this PR and stop
Dependabot creating any more for this dependency (unless you reopen the
PR or upgrade to it yourself)
Signed-off-by: dependabot[bot]
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
---
poetry.lock | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/poetry.lock b/poetry.lock
index eddeee3018..d916c627a0 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -3109,13 +3109,13 @@ files = [
[[package]]
name = "types-pillow"
-version = "10.2.0.20240406"
+version = "10.2.0.20240415"
description = "Typing stubs for Pillow"
optional = false
python-versions = ">=3.8"
files = [
- {file = "types-Pillow-10.2.0.20240406.tar.gz", hash = "sha256:62e0cc1f17caba40e72e7154a483f4c7f3bea0e1c34c0ebba9de3c7745bc306d"},
- {file = "types_Pillow-10.2.0.20240406-py3-none-any.whl", hash = "sha256:5ac182e8afce53de30abca2fdf9cbec7b2500e549d0be84da035a729a84c7c47"},
+ {file = "types-Pillow-10.2.0.20240415.tar.gz", hash = "sha256:dd6058027639bcdc66ba78b228cc25fdae42524c2150c78c804da427e7e76e70"},
+ {file = "types_Pillow-10.2.0.20240415-py3-none-any.whl", hash = "sha256:f933332b7e96010bae9b9cf82a4c9979ff0c270d63f5c5bbffb2d789b85cd00b"},
]
[[package]]
From 301c9771c41108218b0efab43f30982bf76dc349 Mon Sep 17 00:00:00 2001
From: devonh
Date: Fri, 19 Apr 2024 15:26:28 +0000
Subject: [PATCH 017/503] Clarify what part of message retention is still
experimental (#17099)
### Pull Request Checklist
* [X] Pull request is based on the develop branch
* [x] Pull request includes a [changelog
file](https://element-hq.github.io/synapse/latest/development/contributing_guide.html#changelog).
The entry should:
- Be a short description of your change which makes sense to users.
"Fixed a bug that prevented receiving messages from other servers."
instead of "Moved X method from `EventStore` to `EventWorkerStore`.".
- Use markdown where necessary, mostly for `code blocks`.
- End with either a period (.) or an exclamation mark (!).
- Start with a capital letter.
- Feel free to credit yourself, by adding a sentence "Contributed by
@github_username." or "Contributed by [Your Name]." to the end of the
entry.
* [X] [Code
style](https://element-hq.github.io/synapse/latest/code_style.html) is
correct
(run the
[linters](https://element-hq.github.io/synapse/latest/development/contributing_guide.html#run-the-linters))
---
changelog.d/17099.doc | 1 +
docs/message_retention_policies.md | 6 ++++--
2 files changed, 5 insertions(+), 2 deletions(-)
create mode 100644 changelog.d/17099.doc
diff --git a/changelog.d/17099.doc b/changelog.d/17099.doc
new file mode 100644
index 0000000000..d8d10fa53a
--- /dev/null
+++ b/changelog.d/17099.doc
@@ -0,0 +1 @@
+Clarify what part of message retention is still experimental.
diff --git a/docs/message_retention_policies.md b/docs/message_retention_policies.md
index 2746a106b3..c64d1539b0 100644
--- a/docs/message_retention_policies.md
+++ b/docs/message_retention_policies.md
@@ -7,8 +7,10 @@ follow the semantics described in
and allow server and room admins to configure how long messages should
be kept in a homeserver's database before being purged from it.
**Please note that, as this feature isn't part of the Matrix
-specification yet, this implementation is to be considered as
-experimental.**
+specification yet, the use of `m.room.retention` events for per-room
+retention policies is to be considered as experimental. However, the use
+of a default message retention policy is considered a stable feature
+in Synapse.**
A message retention policy is mainly defined by its `max_lifetime`
parameter, which defines how long a message can be kept around after
From 074ef4d75f9439036119c9874e42f8a92c9bc4fb Mon Sep 17 00:00:00 2001
From: Neil Johnson
Date: Fri, 19 Apr 2024 17:10:44 +0100
Subject: [PATCH 018/503] Add an OSX prompt to manually configure icu4c.
(#17069)
Documentation fix.
---
changelog.d/17069.doc | 1 +
docs/development/contributing_guide.md | 2 ++
2 files changed, 3 insertions(+)
create mode 100644 changelog.d/17069.doc
diff --git a/changelog.d/17069.doc b/changelog.d/17069.doc
new file mode 100644
index 0000000000..f5a7f599d1
--- /dev/null
+++ b/changelog.d/17069.doc
@@ -0,0 +1 @@
+Add a prompt in the contributing guide to manually configure icu4c.
diff --git a/docs/development/contributing_guide.md b/docs/development/contributing_guide.md
index ac8a7039d1..76c3e790cd 100644
--- a/docs/development/contributing_guide.md
+++ b/docs/development/contributing_guide.md
@@ -86,6 +86,8 @@ poetry install --extras all
This will install the runtime and developer dependencies for the project. Be sure to check
that the `poetry install` step completed cleanly.
+For OSX users, be sure to set `PKG_CONFIG_PATH` to support `icu4c`. Run `brew info icu4c` for more details.
+
## Running Synapse via poetry
To start a local instance of Synapse in the locked poetry environment, create a config file:
From 55b0aa847a61774b6a3acdc4b177a20dc019f01a Mon Sep 17 00:00:00 2001
From: Erik Johnston
Date: Tue, 23 Apr 2024 15:24:08 +0100
Subject: [PATCH 019/503] Fix GHSA-3h7q-rfh9-xm4v
Weakness in auth chain indexing allows DoS from remote room members
through disk fill and high CPU usage.
A remote Matrix user with malicious intent, sharing a room with Synapse
instances before 1.104.1, can dispatch specially crafted events to
exploit a weakness in how the auth chain cover index is calculated. This
can induce high CPU consumption and accumulate excessive data in the
database of such instances, resulting in a denial of service.
Servers in private federations, or those that do not federate, are not
affected.
---
changelog.d/17044.misc | 1 +
synapse/storage/databases/main/events.py | 108 ++++++++---------------
synapse/storage/schema/__init__.py | 8 +-
tests/storage/test_event_chain.py | 104 +++++++++++++++-------
4 files changed, 117 insertions(+), 104 deletions(-)
create mode 100644 changelog.d/17044.misc
diff --git a/changelog.d/17044.misc b/changelog.d/17044.misc
new file mode 100644
index 0000000000..a1439752d3
--- /dev/null
+++ b/changelog.d/17044.misc
@@ -0,0 +1 @@
+Refactor auth chain fetching to reduce duplication.
diff --git a/synapse/storage/databases/main/events.py b/synapse/storage/databases/main/events.py
index a6fda3f43c..1e731d56bd 100644
--- a/synapse/storage/databases/main/events.py
+++ b/synapse/storage/databases/main/events.py
@@ -19,6 +19,7 @@
# [This file includes modifications made by New Vector Limited]
#
#
+import collections
import itertools
import logging
from collections import OrderedDict
@@ -53,6 +54,7 @@ from synapse.storage.database import (
LoggingDatabaseConnection,
LoggingTransaction,
)
+from synapse.storage.databases.main.event_federation import EventFederationStore
from synapse.storage.databases.main.events_worker import EventCacheEntry
from synapse.storage.databases.main.search import SearchEntry
from synapse.storage.engines import PostgresEngine
@@ -768,40 +770,26 @@ class PersistEventsStore:
# that have the same chain ID as the event.
# 2. For each retained auth event we:
# a. Add a link from the event's to the auth event's chain
- # ID/sequence number; and
- # b. Add a link from the event to every chain reachable by the
- # auth event.
+ # ID/sequence number
# Step 1, fetch all existing links from all the chains we've seen
# referenced.
chain_links = _LinkMap()
- auth_chain_rows = cast(
- List[Tuple[int, int, int, int]],
- db_pool.simple_select_many_txn(
- txn,
- table="event_auth_chain_links",
- column="origin_chain_id",
- iterable={chain_id for chain_id, _ in chain_map.values()},
- keyvalues={},
- retcols=(
- "origin_chain_id",
- "origin_sequence_number",
- "target_chain_id",
- "target_sequence_number",
- ),
- ),
- )
- for (
- origin_chain_id,
- origin_sequence_number,
- target_chain_id,
- target_sequence_number,
- ) in auth_chain_rows:
- chain_links.add_link(
- (origin_chain_id, origin_sequence_number),
- (target_chain_id, target_sequence_number),
- new=False,
- )
+
+ for links in EventFederationStore._get_chain_links(
+ txn, {chain_id for chain_id, _ in chain_map.values()}
+ ):
+ for origin_chain_id, inner_links in links.items():
+ for (
+ origin_sequence_number,
+ target_chain_id,
+ target_sequence_number,
+ ) in inner_links:
+ chain_links.add_link(
+ (origin_chain_id, origin_sequence_number),
+ (target_chain_id, target_sequence_number),
+ new=False,
+ )
# We do this in toplogical order to avoid adding redundant links.
for event_id in sorted_topologically(
@@ -836,18 +824,6 @@ class PersistEventsStore:
(chain_id, sequence_number), (auth_chain_id, auth_sequence_number)
)
- # Step 2b, add a link to chains reachable from the auth
- # event.
- for target_id, target_seq in chain_links.get_links_from(
- (auth_chain_id, auth_sequence_number)
- ):
- if target_id == chain_id:
- continue
-
- chain_links.add_link(
- (chain_id, sequence_number), (target_id, target_seq)
- )
-
db_pool.simple_insert_many_txn(
txn,
table="event_auth_chain_links",
@@ -2451,31 +2427,6 @@ class _LinkMap:
current_links[src_seq] = target_seq
return True
- def get_links_from(
- self, src_tuple: Tuple[int, int]
- ) -> Generator[Tuple[int, int], None, None]:
- """Gets the chains reachable from the given chain/sequence number.
-
- Yields:
- The chain ID and sequence number the link points to.
- """
- src_chain, src_seq = src_tuple
- for target_id, sequence_numbers in self.maps.get(src_chain, {}).items():
- for link_src_seq, target_seq in sequence_numbers.items():
- if link_src_seq <= src_seq:
- yield target_id, target_seq
-
- def get_links_between(
- self, source_chain: int, target_chain: int
- ) -> Generator[Tuple[int, int], None, None]:
- """Gets the links between two chains.
-
- Yields:
- The source and target sequence numbers.
- """
-
- yield from self.maps.get(source_chain, {}).get(target_chain, {}).items()
-
def get_additions(self) -> Generator[Tuple[int, int, int, int], None, None]:
"""Gets any newly added links.
@@ -2502,9 +2453,24 @@ class _LinkMap:
if src_chain == target_chain:
return target_seq <= src_seq
- links = self.get_links_between(src_chain, target_chain)
- for link_start_seq, link_end_seq in links:
- if link_start_seq <= src_seq and target_seq <= link_end_seq:
- return True
+ # We have to graph traverse the links to check for indirect paths.
+ visited_chains = collections.Counter()
+ search = [(src_chain, src_seq)]
+ while search:
+ chain, seq = search.pop()
+ visited_chains[chain] = max(seq, visited_chains[chain])
+ for tc, links in self.maps.get(chain, {}).items():
+ for ss, ts in links.items():
+ # Don't revisit chains we've already seen, unless the target
+ # sequence number is higher than last time.
+ if ts <= visited_chains.get(tc, 0):
+ continue
+
+ if ss <= seq:
+ if tc == target_chain:
+ if target_seq <= ts:
+ return True
+ else:
+ search.append((tc, ts))
return False
diff --git a/synapse/storage/schema/__init__.py b/synapse/storage/schema/__init__.py
index c0b925444f..039aa91b92 100644
--- a/synapse/storage/schema/__init__.py
+++ b/synapse/storage/schema/__init__.py
@@ -132,12 +132,16 @@ Changes in SCHEMA_VERSION = 82
Changes in SCHEMA_VERSION = 83
- The event_txn_id is no longer used.
+
+Changes in SCHEMA_VERSION = 84
+ - No longer assumes that `event_auth_chain_links` holds transitive links, and
+ so read operations must do graph traversal.
"""
SCHEMA_COMPAT_VERSION = (
- # The event_txn_id table and tables from MSC2716 no longer exist.
- 83
+ # Transitive links are no longer written to `event_auth_chain_links`
+ 84
)
"""Limit on how far the synapse codebase can be rolled back without breaking db compat
diff --git a/tests/storage/test_event_chain.py b/tests/storage/test_event_chain.py
index 9e4e73832e..27d5b0125f 100644
--- a/tests/storage/test_event_chain.py
+++ b/tests/storage/test_event_chain.py
@@ -21,6 +21,8 @@
from typing import Dict, List, Set, Tuple, cast
+from parameterized import parameterized
+
from twisted.test.proto_helpers import MemoryReactor
from twisted.trial import unittest
@@ -45,7 +47,8 @@ class EventChainStoreTestCase(HomeserverTestCase):
self.store = hs.get_datastores().main
self._next_stream_ordering = 1
- def test_simple(self) -> None:
+ @parameterized.expand([(False,), (True,)])
+ def test_simple(self, batched: bool) -> None:
"""Test that the example in `docs/auth_chain_difference_algorithm.md`
works.
"""
@@ -53,6 +56,7 @@ class EventChainStoreTestCase(HomeserverTestCase):
event_factory = self.hs.get_event_builder_factory()
bob = "@creator:test"
alice = "@alice:test"
+ charlie = "@charlie:test"
room_id = "!room:test"
# Ensure that we have a rooms entry so that we generate the chain index.
@@ -191,6 +195,26 @@ class EventChainStoreTestCase(HomeserverTestCase):
)
)
+ charlie_invite = self.get_success(
+ event_factory.for_room_version(
+ RoomVersions.V6,
+ {
+ "type": EventTypes.Member,
+ "state_key": charlie,
+ "sender": alice,
+ "room_id": room_id,
+ "content": {"tag": "charlie_invite"},
+ },
+ ).build(
+ prev_event_ids=[],
+ auth_event_ids=[
+ create.event_id,
+ alice_join2.event_id,
+ power_2.event_id,
+ ],
+ )
+ )
+
events = [
create,
bob_join,
@@ -200,33 +224,41 @@ class EventChainStoreTestCase(HomeserverTestCase):
bob_join_2,
power_2,
alice_join2,
+ charlie_invite,
]
expected_links = [
(bob_join, create),
- (power, create),
(power, bob_join),
- (alice_invite, create),
(alice_invite, power),
- (alice_invite, bob_join),
(bob_join_2, power),
(alice_join2, power_2),
+ (charlie_invite, alice_join2),
]
- self.persist(events)
+ # We either persist as a batch or one-by-one depending on test
+ # parameter.
+ if batched:
+ self.persist(events)
+ else:
+ for event in events:
+ self.persist([event])
+
chain_map, link_map = self.fetch_chains(events)
# Check that the expected links and only the expected links have been
# added.
- self.assertEqual(len(expected_links), len(list(link_map.get_additions())))
+ event_map = {e.event_id: e for e in events}
+ reverse_chain_map = {v: event_map[k] for k, v in chain_map.items()}
- for start, end in expected_links:
- start_id, start_seq = chain_map[start.event_id]
- end_id, end_seq = chain_map[end.event_id]
-
- self.assertIn(
- (start_seq, end_seq), list(link_map.get_links_between(start_id, end_id))
- )
+ self.maxDiff = None
+ self.assertCountEqual(
+ expected_links,
+ [
+ (reverse_chain_map[(s1, s2)], reverse_chain_map[(t1, t2)])
+ for s1, s2, t1, t2 in link_map.get_additions()
+ ],
+ )
# Test that everything can reach the create event, but the create event
# can't reach anything.
@@ -368,24 +400,23 @@ class EventChainStoreTestCase(HomeserverTestCase):
expected_links = [
(bob_join, create),
- (power, create),
(power, bob_join),
- (alice_invite, create),
(alice_invite, power),
- (alice_invite, bob_join),
]
# Check that the expected links and only the expected links have been
# added.
- self.assertEqual(len(expected_links), len(list(link_map.get_additions())))
+ event_map = {e.event_id: e for e in events}
+ reverse_chain_map = {v: event_map[k] for k, v in chain_map.items()}
- for start, end in expected_links:
- start_id, start_seq = chain_map[start.event_id]
- end_id, end_seq = chain_map[end.event_id]
-
- self.assertIn(
- (start_seq, end_seq), list(link_map.get_links_between(start_id, end_id))
- )
+ self.maxDiff = None
+ self.assertCountEqual(
+ expected_links,
+ [
+ (reverse_chain_map[(s1, s2)], reverse_chain_map[(t1, t2)])
+ for s1, s2, t1, t2 in link_map.get_additions()
+ ],
+ )
def persist(
self,
@@ -489,8 +520,6 @@ class LinkMapTestCase(unittest.TestCase):
link_map = _LinkMap()
link_map.add_link((1, 1), (2, 1), new=False)
- self.assertCountEqual(link_map.get_links_between(1, 2), [(1, 1)])
- self.assertCountEqual(link_map.get_links_from((1, 1)), [(2, 1)])
self.assertCountEqual(link_map.get_additions(), [])
self.assertTrue(link_map.exists_path_from((1, 5), (2, 1)))
self.assertFalse(link_map.exists_path_from((1, 5), (2, 2)))
@@ -499,18 +528,31 @@ class LinkMapTestCase(unittest.TestCase):
# Attempting to add a redundant link is ignored.
self.assertFalse(link_map.add_link((1, 4), (2, 1)))
- self.assertCountEqual(link_map.get_links_between(1, 2), [(1, 1)])
+ self.assertCountEqual(link_map.get_additions(), [])
# Adding new non-redundant links works
self.assertTrue(link_map.add_link((1, 3), (2, 3)))
- self.assertCountEqual(link_map.get_links_between(1, 2), [(1, 1), (3, 3)])
+ self.assertCountEqual(link_map.get_additions(), [(1, 3, 2, 3)])
self.assertTrue(link_map.add_link((2, 5), (1, 3)))
- self.assertCountEqual(link_map.get_links_between(2, 1), [(5, 3)])
- self.assertCountEqual(link_map.get_links_between(1, 2), [(1, 1), (3, 3)])
-
self.assertCountEqual(link_map.get_additions(), [(1, 3, 2, 3), (2, 5, 1, 3)])
+ def test_exists_path_from(self) -> None:
+ "Check that `exists_path_from` can handle non-direct links"
+ link_map = _LinkMap()
+
+ link_map.add_link((1, 1), (2, 1), new=False)
+ link_map.add_link((2, 1), (3, 1), new=False)
+
+ self.assertTrue(link_map.exists_path_from((1, 4), (3, 1)))
+ self.assertFalse(link_map.exists_path_from((1, 4), (3, 2)))
+
+ link_map.add_link((1, 5), (2, 3), new=False)
+ link_map.add_link((2, 2), (3, 3), new=False)
+
+ self.assertTrue(link_map.exists_path_from((1, 6), (3, 2)))
+ self.assertFalse(link_map.exists_path_from((1, 4), (3, 2)))
+
class EventChainBackgroundUpdateTestCase(HomeserverTestCase):
servlets = [
From 20c9e195197567c209edf45383e5d0cdd2ef2a5f Mon Sep 17 00:00:00 2001
From: Erik Johnston
Date: Tue, 23 Apr 2024 15:57:13 +0100
Subject: [PATCH 020/503] 1.105.1
---
CHANGES.md | 14 ++++++++++++++
changelog.d/17044.misc | 1 -
debian/changelog | 6 ++++++
pyproject.toml | 2 +-
4 files changed, 21 insertions(+), 2 deletions(-)
delete mode 100644 changelog.d/17044.misc
diff --git a/CHANGES.md b/CHANGES.md
index ed9cca73bc..ec5bc22a98 100644
--- a/CHANGES.md
+++ b/CHANGES.md
@@ -1,3 +1,17 @@
+# Synapse 1.105.1 (2024-04-23)
+
+## Security advisory
+
+The following issues are fixed in 1.105.1.
+
+- [GHSA-3h7q-rfh9-xm4v](https://github.com/element-hq/synapse/security/advisories/GHSA-3h7q-rfh9-xm4v) / [CVE-2024-31208](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2024-31208) — High Severity
+
+ Weakness in auth chain indexing allows DoS from remote room members through disk fill and high CPU usage.
+
+See the advisories for more details. If you have any questions, email security@element.io.
+
+
+
# Synapse 1.105.0 (2024-04-16)
No significant changes since 1.105.0rc1.
diff --git a/changelog.d/17044.misc b/changelog.d/17044.misc
deleted file mode 100644
index a1439752d3..0000000000
--- a/changelog.d/17044.misc
+++ /dev/null
@@ -1 +0,0 @@
-Refactor auth chain fetching to reduce duplication.
diff --git a/debian/changelog b/debian/changelog
index 49c9b3b497..214ed59426 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -1,3 +1,9 @@
+matrix-synapse-py3 (1.105.1) stable; urgency=medium
+
+ * New Synapse release 1.105.1.
+
+ -- Synapse Packaging team Tue, 23 Apr 2024 15:56:18 +0100
+
matrix-synapse-py3 (1.105.0) stable; urgency=medium
* New Synapse release 1.105.0.
diff --git a/pyproject.toml b/pyproject.toml
index f0f025645f..508d31d8d7 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -96,7 +96,7 @@ module-name = "synapse.synapse_rust"
[tool.poetry]
name = "matrix-synapse"
-version = "1.105.0"
+version = "1.105.1"
description = "Homeserver for the Matrix decentralised comms protocol"
authors = ["Matrix.org Team and Contributors "]
license = "AGPL-3.0-or-later"
From ae181233aa4c296d5d973eedfc599145ac0d5918 Mon Sep 17 00:00:00 2001
From: mcalinghee
Date: Tue, 23 Apr 2024 17:45:24 +0200
Subject: [PATCH 021/503] Send an email if the address is already bound to an
user account (#16819)
Co-authored-by: Mathieu Velten
Co-authored-by: Olivier D
---
changelog.d/16819.feature | 1 +
synapse/config/emailconfig.py | 12 ++++++++++++
synapse/push/mailer.py | 16 ++++++++++++++++
synapse/res/templates/already_in_use.html | 12 ++++++++++++
synapse/res/templates/already_in_use.txt | 10 ++++++++++
synapse/rest/client/register.py | 12 ++++++++++--
tests/rest/client/test_register.py | 9 +++++++++
7 files changed, 70 insertions(+), 2 deletions(-)
create mode 100644 changelog.d/16819.feature
create mode 100644 synapse/res/templates/already_in_use.html
create mode 100644 synapse/res/templates/already_in_use.txt
diff --git a/changelog.d/16819.feature b/changelog.d/16819.feature
new file mode 100644
index 0000000000..1af6f466b7
--- /dev/null
+++ b/changelog.d/16819.feature
@@ -0,0 +1 @@
+Send an email if the address is already bound to an user account.
diff --git a/synapse/config/emailconfig.py b/synapse/config/emailconfig.py
index a4dc9db03e..8033fa2e52 100644
--- a/synapse/config/emailconfig.py
+++ b/synapse/config/emailconfig.py
@@ -52,6 +52,7 @@ DEFAULT_SUBJECTS = {
"invite_from_person_to_space": "[%(app)s] %(person)s has invited you to join the %(space)s space on %(app)s...",
"password_reset": "[%(server_name)s] Password reset",
"email_validation": "[%(server_name)s] Validate your email",
+ "email_already_in_use": "[%(server_name)s] Email already in use",
}
LEGACY_TEMPLATE_DIR_WARNING = """
@@ -76,6 +77,7 @@ class EmailSubjectConfig:
invite_from_person_to_space: str
password_reset: str
email_validation: str
+ email_already_in_use: str
class EmailConfig(Config):
@@ -180,6 +182,12 @@ class EmailConfig(Config):
registration_template_text = email_config.get(
"registration_template_text", "registration.txt"
)
+ already_in_use_template_html = email_config.get(
+ "already_in_use_template_html", "already_in_use.html"
+ )
+ already_in_use_template_text = email_config.get(
+ "already_in_use_template_html", "already_in_use.txt"
+ )
add_threepid_template_html = email_config.get(
"add_threepid_template_html", "add_threepid.html"
)
@@ -215,6 +223,8 @@ class EmailConfig(Config):
self.email_password_reset_template_text,
self.email_registration_template_html,
self.email_registration_template_text,
+ self.email_already_in_use_template_html,
+ self.email_already_in_use_template_text,
self.email_add_threepid_template_html,
self.email_add_threepid_template_text,
self.email_password_reset_template_confirmation_html,
@@ -230,6 +240,8 @@ class EmailConfig(Config):
password_reset_template_text,
registration_template_html,
registration_template_text,
+ already_in_use_template_html,
+ already_in_use_template_text,
add_threepid_template_html,
add_threepid_template_text,
"password_reset_confirmation.html",
diff --git a/synapse/push/mailer.py b/synapse/push/mailer.py
index f1ffc8115f..7c15eb7440 100644
--- a/synapse/push/mailer.py
+++ b/synapse/push/mailer.py
@@ -205,6 +205,22 @@ class Mailer:
template_vars,
)
+ emails_sent_counter.labels("already_in_use")
+
+ async def send_already_in_use_mail(self, email_address: str) -> None:
+ """Send an email if the address is already bound to an user account
+
+ Args:
+ email_address: Email address we're sending to the "already in use" mail
+ """
+
+ await self.send_email(
+ email_address,
+ self.email_subjects.email_already_in_use
+ % {"server_name": self.hs.config.server.server_name, "app": self.app_name},
+ {},
+ )
+
emails_sent_counter.labels("add_threepid")
async def send_add_threepid_mail(
diff --git a/synapse/res/templates/already_in_use.html b/synapse/res/templates/already_in_use.html
new file mode 100644
index 0000000000..4c4c3c36a7
--- /dev/null
+++ b/synapse/res/templates/already_in_use.html
@@ -0,0 +1,12 @@
+{% extends "_base.html" %}
+{% block title %}Email already in use{% endblock %}
+
+{% block body %}
+You have asked us to register this email with a new Matrix account, but this email is already registered with an existing account.
+
+Please reset your password if needed.
+
+If this was not you, you can safely disregard this email.
+
+Thank you.
+{% endblock %}
diff --git a/synapse/res/templates/already_in_use.txt b/synapse/res/templates/already_in_use.txt
new file mode 100644
index 0000000000..c60401a940
--- /dev/null
+++ b/synapse/res/templates/already_in_use.txt
@@ -0,0 +1,10 @@
+Hello there,
+
+You have asked us to register this email with a new Matrix account,
+but this email is already registered with an existing account.
+
+Please reset your password if needed.
+
+If this was not you, you can safely disregard this email.
+
+Thank you.
diff --git a/synapse/rest/client/register.py b/synapse/rest/client/register.py
index 634ebed2be..5dddbc69be 100644
--- a/synapse/rest/client/register.py
+++ b/synapse/rest/client/register.py
@@ -86,12 +86,18 @@ class EmailRegisterRequestTokenRestServlet(RestServlet):
self.config = hs.config
if self.hs.config.email.can_verify_email:
- self.mailer = Mailer(
+ self.registration_mailer = Mailer(
hs=self.hs,
app_name=self.config.email.email_app_name,
template_html=self.config.email.email_registration_template_html,
template_text=self.config.email.email_registration_template_text,
)
+ self.already_in_use_mailer = Mailer(
+ hs=self.hs,
+ app_name=self.config.email.email_app_name,
+ template_html=self.config.email.email_already_in_use_template_html,
+ template_text=self.config.email.email_already_in_use_template_text,
+ )
async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
if not self.hs.config.email.can_verify_email:
@@ -139,8 +145,10 @@ class EmailRegisterRequestTokenRestServlet(RestServlet):
if self.hs.config.server.request_token_inhibit_3pid_errors:
# Make the client think the operation succeeded. See the rationale in the
# comments for request_token_inhibit_3pid_errors.
+ # Still send an email to warn the user that an account already exists.
# Also wait for some random amount of time between 100ms and 1s to make it
# look like we did something.
+ await self.already_in_use_mailer.send_already_in_use_mail(email)
await self.hs.get_clock().sleep(random.randint(1, 10) / 10)
return 200, {"sid": random_string(16)}
@@ -151,7 +159,7 @@ class EmailRegisterRequestTokenRestServlet(RestServlet):
email,
client_secret,
send_attempt,
- self.mailer.send_registration_mail,
+ self.registration_mailer.send_registration_mail,
next_link,
)
diff --git a/tests/rest/client/test_register.py b/tests/rest/client/test_register.py
index 859051cdda..694f143eff 100644
--- a/tests/rest/client/test_register.py
+++ b/tests/rest/client/test_register.py
@@ -22,6 +22,7 @@
import datetime
import os
from typing import Any, Dict, List, Tuple
+from unittest.mock import AsyncMock
import pkg_resources
@@ -42,6 +43,7 @@ from synapse.types import JsonDict
from synapse.util import Clock
from tests import unittest
+from tests.server import ThreadedMemoryReactorClock
from tests.unittest import override_config
@@ -58,6 +60,13 @@ class RegisterRestServletTestCase(unittest.HomeserverTestCase):
config["allow_guest_access"] = True
return config
+ def make_homeserver(
+ self, reactor: ThreadedMemoryReactorClock, clock: Clock
+ ) -> HomeServer:
+ hs = super().make_homeserver(reactor, clock)
+ hs.get_send_email_handler()._sendmail = AsyncMock()
+ return hs
+
def test_POST_appservice_registration_valid(self) -> None:
user_id = "@as_user_kermit:test"
as_token = "i_am_an_app_service"
From 646cb6ff2412bfc5180b5d748b95dbe6ef790a0b Mon Sep 17 00:00:00 2001
From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com>
Date: Thu, 25 Apr 2024 13:25:26 +0100
Subject: [PATCH 022/503] Add type annotation to `visited_chains` (#17125)
This should fix CI on `develop`. Broke in
https://github.com/element-hq/synapse/commit/0fe9e1f7dafa80f3e02762f7ae75cefee5b3316c,
presumably due to a `mypy` dependency upgrade.
---
changelog.d/17125.misc | 1 +
synapse/storage/databases/main/events.py | 2 +-
2 files changed, 2 insertions(+), 1 deletion(-)
create mode 100644 changelog.d/17125.misc
diff --git a/changelog.d/17125.misc b/changelog.d/17125.misc
new file mode 100644
index 0000000000..a7d9ce6491
--- /dev/null
+++ b/changelog.d/17125.misc
@@ -0,0 +1 @@
+Fix type annotation for `visited_chains` after `mypy` upgrade.
\ No newline at end of file
diff --git a/synapse/storage/databases/main/events.py b/synapse/storage/databases/main/events.py
index 1e731d56bd..990698aa5c 100644
--- a/synapse/storage/databases/main/events.py
+++ b/synapse/storage/databases/main/events.py
@@ -2454,7 +2454,7 @@ class _LinkMap:
return target_seq <= src_seq
# We have to graph traverse the links to check for indirect paths.
- visited_chains = collections.Counter()
+ visited_chains: Dict[int, int] = collections.Counter()
search = [(src_chain, src_seq)]
while search:
chain, seq = search.pop()
From 2e92b718d5ea063af4b2dc9412dcd2ce625b4987 Mon Sep 17 00:00:00 2001
From: Quentin Gliech
Date: Thu, 25 Apr 2024 14:50:12 +0200
Subject: [PATCH 023/503] MSC4108 implementation (#17056)
Co-authored-by: Hugh Nimmo-Smith
Co-authored-by: Hugh Nimmo-Smith
Co-authored-by: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com>
---
Cargo.lock | 164 ++++++++-
changelog.d/17056.feature | 1 +
rust/Cargo.toml | 4 +
rust/src/lib.rs | 2 +
rust/src/rendezvous/mod.rs | 315 +++++++++++++++++
rust/src/rendezvous/session.rs | 91 +++++
synapse/config/experimental.py | 12 +-
synapse/http/server.py | 5 +-
synapse/rest/client/rendezvous.py | 16 +
synapse/rest/client/versions.py | 9 +-
synapse/rest/synapse/client/__init__.py | 4 +
synapse/rest/synapse/client/rendezvous.py | 58 ++++
synapse/server.py | 5 +
synapse/synapse_rust/rendezvous.pyi | 30 ++
tests/rest/client/test_rendezvous.py | 401 +++++++++++++++++++++-
tests/server.py | 7 +-
tests/unittest.py | 5 +
17 files changed, 1120 insertions(+), 9 deletions(-)
create mode 100644 changelog.d/17056.feature
create mode 100644 rust/src/rendezvous/mod.rs
create mode 100644 rust/src/rendezvous/session.rs
create mode 100644 synapse/rest/synapse/client/rendezvous.py
create mode 100644 synapse/synapse_rust/rendezvous.pyi
diff --git a/Cargo.lock b/Cargo.lock
index faac6b3c8a..4474dfb903 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -59,6 +59,12 @@ dependencies = [
"generic-array",
]
+[[package]]
+name = "bumpalo"
+version = "3.16.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "79296716171880943b8470b5f8d03aa55eb2e645a4874bdbb28adb49162e012c"
+
[[package]]
name = "bytes"
version = "1.6.0"
@@ -92,9 +98,9 @@ dependencies = [
[[package]]
name = "digest"
-version = "0.10.5"
+version = "0.10.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "adfbc57365a37acbd2ebf2b64d7e69bb766e2fea813521ed536f5d0520dcf86c"
+checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292"
dependencies = [
"block-buffer",
"crypto-common",
@@ -117,6 +123,19 @@ dependencies = [
"version_check",
]
+[[package]]
+name = "getrandom"
+version = "0.2.14"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "94b22e06ecb0110981051723910cbf0b5f5e09a2062dd7663334ee79a9d1286c"
+dependencies = [
+ "cfg-if",
+ "js-sys",
+ "libc",
+ "wasi",
+ "wasm-bindgen",
+]
+
[[package]]
name = "headers"
version = "0.4.0"
@@ -182,6 +201,15 @@ version = "1.0.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4217ad341ebadf8d8e724e264f13e593e0648f5b3e94b3896a5df283be015ecc"
+[[package]]
+name = "js-sys"
+version = "0.3.69"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "29c15563dc2726973df627357ce0c9ddddbea194836909d655df6a75d2cf296d"
+dependencies = [
+ "wasm-bindgen",
+]
+
[[package]]
name = "lazy_static"
version = "1.4.0"
@@ -266,6 +294,12 @@ version = "1.6.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7170ef9988bc169ba16dd36a7fa041e5c4cbeb6a35b76d4c03daded371eae7c0"
+[[package]]
+name = "ppv-lite86"
+version = "0.2.17"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de"
+
[[package]]
name = "proc-macro2"
version = "1.0.76"
@@ -369,6 +403,36 @@ dependencies = [
"proc-macro2",
]
+[[package]]
+name = "rand"
+version = "0.8.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404"
+dependencies = [
+ "libc",
+ "rand_chacha",
+ "rand_core",
+]
+
+[[package]]
+name = "rand_chacha"
+version = "0.3.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88"
+dependencies = [
+ "ppv-lite86",
+ "rand_core",
+]
+
+[[package]]
+name = "rand_core"
+version = "0.6.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c"
+dependencies = [
+ "getrandom",
+]
+
[[package]]
name = "redox_syscall"
version = "0.2.16"
@@ -461,6 +525,17 @@ dependencies = [
"digest",
]
+[[package]]
+name = "sha2"
+version = "0.10.8"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "793db75ad2bcafc3ffa7c68b215fee268f537982cd901d132f89c6343f3a3dc8"
+dependencies = [
+ "cfg-if",
+ "cpufeatures",
+ "digest",
+]
+
[[package]]
name = "smallvec"
version = "1.10.0"
@@ -489,6 +564,7 @@ name = "synapse"
version = "0.1.0"
dependencies = [
"anyhow",
+ "base64",
"blake2",
"bytes",
"headers",
@@ -496,12 +572,15 @@ dependencies = [
"http",
"lazy_static",
"log",
+ "mime",
"pyo3",
"pyo3-log",
"pythonize",
"regex",
"serde",
"serde_json",
+ "sha2",
+ "ulid",
]
[[package]]
@@ -516,6 +595,17 @@ version = "1.15.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "dcf81ac59edc17cc8697ff311e8f5ef2d99fcbd9817b34cec66f90b6c3dfd987"
+[[package]]
+name = "ulid"
+version = "1.1.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "34778c17965aa2a08913b57e1f34db9b4a63f5de31768b55bf20d2795f921259"
+dependencies = [
+ "getrandom",
+ "rand",
+ "web-time",
+]
+
[[package]]
name = "unicode-ident"
version = "1.0.5"
@@ -534,6 +624,76 @@ version = "0.9.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f"
+[[package]]
+name = "wasi"
+version = "0.11.0+wasi-snapshot-preview1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423"
+
+[[package]]
+name = "wasm-bindgen"
+version = "0.2.92"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4be2531df63900aeb2bca0daaaddec08491ee64ceecbee5076636a3b026795a8"
+dependencies = [
+ "cfg-if",
+ "wasm-bindgen-macro",
+]
+
+[[package]]
+name = "wasm-bindgen-backend"
+version = "0.2.92"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "614d787b966d3989fa7bb98a654e369c762374fd3213d212cfc0251257e747da"
+dependencies = [
+ "bumpalo",
+ "log",
+ "once_cell",
+ "proc-macro2",
+ "quote",
+ "syn",
+ "wasm-bindgen-shared",
+]
+
+[[package]]
+name = "wasm-bindgen-macro"
+version = "0.2.92"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a1f8823de937b71b9460c0c34e25f3da88250760bec0ebac694b49997550d726"
+dependencies = [
+ "quote",
+ "wasm-bindgen-macro-support",
+]
+
+[[package]]
+name = "wasm-bindgen-macro-support"
+version = "0.2.92"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e94f17b526d0a461a191c78ea52bbce64071ed5c04c9ffe424dcb38f74171bb7"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn",
+ "wasm-bindgen-backend",
+ "wasm-bindgen-shared",
+]
+
+[[package]]
+name = "wasm-bindgen-shared"
+version = "0.2.92"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "af190c94f2773fdb3729c55b007a722abb5384da03bc0986df4c289bf5567e96"
+
+[[package]]
+name = "web-time"
+version = "1.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5a6580f308b1fad9207618087a65c04e7a10bc77e02c8e84e9b00dd4b12fa0bb"
+dependencies = [
+ "js-sys",
+ "wasm-bindgen",
+]
+
[[package]]
name = "windows-sys"
version = "0.36.1"
diff --git a/changelog.d/17056.feature b/changelog.d/17056.feature
new file mode 100644
index 0000000000..b4cbe849e4
--- /dev/null
+++ b/changelog.d/17056.feature
@@ -0,0 +1 @@
+Implement the rendezvous mechanism described by MSC4108.
diff --git a/rust/Cargo.toml b/rust/Cargo.toml
index 9ac766182b..d41a216d1c 100644
--- a/rust/Cargo.toml
+++ b/rust/Cargo.toml
@@ -23,11 +23,13 @@ name = "synapse.synapse_rust"
[dependencies]
anyhow = "1.0.63"
+base64 = "0.21.7"
bytes = "1.6.0"
headers = "0.4.0"
http = "1.1.0"
lazy_static = "1.4.0"
log = "0.4.17"
+mime = "0.3.17"
pyo3 = { version = "0.20.0", features = [
"macros",
"anyhow",
@@ -37,8 +39,10 @@ pyo3 = { version = "0.20.0", features = [
pyo3-log = "0.9.0"
pythonize = "0.20.0"
regex = "1.6.0"
+sha2 = "0.10.8"
serde = { version = "1.0.144", features = ["derive"] }
serde_json = "1.0.85"
+ulid = "1.1.2"
[features]
extension-module = ["pyo3/extension-module"]
diff --git a/rust/src/lib.rs b/rust/src/lib.rs
index 36a3d64528..9bd1f17ad9 100644
--- a/rust/src/lib.rs
+++ b/rust/src/lib.rs
@@ -7,6 +7,7 @@ pub mod errors;
pub mod events;
pub mod http;
pub mod push;
+pub mod rendezvous;
lazy_static! {
static ref LOGGING_HANDLE: ResetHandle = pyo3_log::init();
@@ -45,6 +46,7 @@ fn synapse_rust(py: Python<'_>, m: &PyModule) -> PyResult<()> {
acl::register_module(py, m)?;
push::register_module(py, m)?;
events::register_module(py, m)?;
+ rendezvous::register_module(py, m)?;
Ok(())
}
diff --git a/rust/src/rendezvous/mod.rs b/rust/src/rendezvous/mod.rs
new file mode 100644
index 0000000000..c0f5d8b600
--- /dev/null
+++ b/rust/src/rendezvous/mod.rs
@@ -0,0 +1,315 @@
+/*
+ * This file is licensed under the Affero General Public License (AGPL) version 3.
+ *
+ * Copyright (C) 2024 New Vector, Ltd
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License as
+ * published by the Free Software Foundation, either version 3 of the
+ * License, or (at your option) any later version.
+ *
+ * See the GNU Affero General Public License for more details:
+ * .
+ *
+ */
+
+use std::{
+ collections::{BTreeMap, HashMap},
+ time::{Duration, SystemTime},
+};
+
+use bytes::Bytes;
+use headers::{
+ AccessControlAllowOrigin, AccessControlExposeHeaders, CacheControl, ContentLength, ContentType,
+ HeaderMapExt, IfMatch, IfNoneMatch, Pragma,
+};
+use http::{header::ETAG, HeaderMap, Response, StatusCode, Uri};
+use mime::Mime;
+use pyo3::{
+ exceptions::PyValueError, pyclass, pymethods, types::PyModule, Py, PyAny, PyObject, PyResult,
+ Python, ToPyObject,
+};
+use ulid::Ulid;
+
+use self::session::Session;
+use crate::{
+ errors::{NotFoundError, SynapseError},
+ http::{http_request_from_twisted, http_response_to_twisted, HeaderMapPyExt},
+};
+
+mod session;
+
+// n.b. Because OPTIONS requests are handled by the Python code, we don't need to set Access-Control-Allow-Headers.
+fn prepare_headers(headers: &mut HeaderMap, session: &Session) {
+ headers.typed_insert(AccessControlAllowOrigin::ANY);
+ headers.typed_insert(AccessControlExposeHeaders::from_iter([ETAG]));
+ headers.typed_insert(Pragma::no_cache());
+ headers.typed_insert(CacheControl::new().with_no_store());
+ headers.typed_insert(session.etag());
+ headers.typed_insert(session.expires());
+ headers.typed_insert(session.last_modified());
+}
+
+#[pyclass]
+struct RendezvousHandler {
+ base: Uri,
+ clock: PyObject,
+ sessions: BTreeMap,
+ capacity: usize,
+ max_content_length: u64,
+ ttl: Duration,
+}
+
+impl RendezvousHandler {
+ /// Check the input headers of a request which sets data for a session, and return the content type.
+ fn check_input_headers(&self, headers: &HeaderMap) -> PyResult {
+ let ContentLength(content_length) = headers.typed_get_required()?;
+
+ if content_length > self.max_content_length {
+ return Err(SynapseError::new(
+ StatusCode::PAYLOAD_TOO_LARGE,
+ "Payload too large".to_owned(),
+ "M_TOO_LARGE",
+ None,
+ None,
+ ));
+ }
+
+ let content_type: ContentType = headers.typed_get_required()?;
+
+ // Content-Type must be text/plain
+ if content_type != ContentType::text() {
+ return Err(SynapseError::new(
+ StatusCode::BAD_REQUEST,
+ "Content-Type must be text/plain".to_owned(),
+ "M_INVALID_PARAM",
+ None,
+ None,
+ ));
+ }
+
+ Ok(content_type.into())
+ }
+
+ /// Evict expired sessions and remove the oldest sessions until we're under the capacity.
+ fn evict(&mut self, now: SystemTime) {
+ // First remove all the entries which expired
+ self.sessions.retain(|_, session| !session.expired(now));
+
+ // Then we remove the oldest entires until we're under the limit
+ while self.sessions.len() > self.capacity {
+ self.sessions.pop_first();
+ }
+ }
+}
+
+#[pymethods]
+impl RendezvousHandler {
+ #[new]
+ #[pyo3(signature = (homeserver, /, capacity=100, max_content_length=4*1024, eviction_interval=60*1000, ttl=60*1000))]
+ fn new(
+ py: Python<'_>,
+ homeserver: &PyAny,
+ capacity: usize,
+ max_content_length: u64,
+ eviction_interval: u64,
+ ttl: u64,
+ ) -> PyResult> {
+ let base: String = homeserver
+ .getattr("config")?
+ .getattr("server")?
+ .getattr("public_baseurl")?
+ .extract()?;
+ let base = Uri::try_from(format!("{base}_synapse/client/rendezvous"))
+ .map_err(|_| PyValueError::new_err("Invalid base URI"))?;
+
+ let clock = homeserver.call_method0("get_clock")?.to_object(py);
+
+ // Construct a Python object so that we can get a reference to the
+ // evict method and schedule it to run.
+ let self_ = Py::new(
+ py,
+ Self {
+ base,
+ clock,
+ sessions: BTreeMap::new(),
+ capacity,
+ max_content_length,
+ ttl: Duration::from_millis(ttl),
+ },
+ )?;
+
+ let evict = self_.getattr(py, "_evict")?;
+ homeserver.call_method0("get_clock")?.call_method(
+ "looping_call",
+ (evict, eviction_interval),
+ None,
+ )?;
+
+ Ok(self_)
+ }
+
+ fn _evict(&mut self, py: Python<'_>) -> PyResult<()> {
+ let clock = self.clock.as_ref(py);
+ let now: u64 = clock.call_method0("time_msec")?.extract()?;
+ let now = SystemTime::UNIX_EPOCH + Duration::from_millis(now);
+ self.evict(now);
+
+ Ok(())
+ }
+
+ fn handle_post(&mut self, py: Python<'_>, twisted_request: &PyAny) -> PyResult<()> {
+ let request = http_request_from_twisted(twisted_request)?;
+
+ let content_type = self.check_input_headers(request.headers())?;
+
+ let clock = self.clock.as_ref(py);
+ let now: u64 = clock.call_method0("time_msec")?.extract()?;
+ let now = SystemTime::UNIX_EPOCH + Duration::from_millis(now);
+
+ // We trigger an immediate eviction if we're at 2x the capacity
+ if self.sessions.len() >= self.capacity * 2 {
+ self.evict(now);
+ }
+
+ // Generate a new ULID for the session from the current time.
+ let id = Ulid::from_datetime(now);
+
+ let uri = format!("{base}/{id}", base = self.base);
+
+ let body = request.into_body();
+
+ let session = Session::new(body, content_type, now, self.ttl);
+
+ let response = serde_json::json!({
+ "url": uri,
+ })
+ .to_string();
+
+ let mut response = Response::new(response.as_bytes());
+ *response.status_mut() = StatusCode::CREATED;
+ response.headers_mut().typed_insert(ContentType::json());
+ prepare_headers(response.headers_mut(), &session);
+ http_response_to_twisted(twisted_request, response)?;
+
+ self.sessions.insert(id, session);
+
+ Ok(())
+ }
+
+ fn handle_get(&mut self, py: Python<'_>, twisted_request: &PyAny, id: &str) -> PyResult<()> {
+ let request = http_request_from_twisted(twisted_request)?;
+
+ let if_none_match: Option = request.headers().typed_get_optional()?;
+
+ let now: u64 = self.clock.call_method0(py, "time_msec")?.extract(py)?;
+ let now = SystemTime::UNIX_EPOCH + Duration::from_millis(now);
+
+ let id: Ulid = id.parse().map_err(|_| NotFoundError::new())?;
+ let session = self
+ .sessions
+ .get(&id)
+ .filter(|s| !s.expired(now))
+ .ok_or_else(NotFoundError::new)?;
+
+ if let Some(if_none_match) = if_none_match {
+ if !if_none_match.precondition_passes(&session.etag()) {
+ let mut response = Response::new(Bytes::new());
+ *response.status_mut() = StatusCode::NOT_MODIFIED;
+ prepare_headers(response.headers_mut(), session);
+ http_response_to_twisted(twisted_request, response)?;
+ return Ok(());
+ }
+ }
+
+ let mut response = Response::new(session.data());
+ *response.status_mut() = StatusCode::OK;
+ let headers = response.headers_mut();
+ prepare_headers(headers, session);
+ headers.typed_insert(session.content_type());
+ headers.typed_insert(session.content_length());
+ http_response_to_twisted(twisted_request, response)?;
+
+ Ok(())
+ }
+
+ fn handle_put(&mut self, py: Python<'_>, twisted_request: &PyAny, id: &str) -> PyResult<()> {
+ let request = http_request_from_twisted(twisted_request)?;
+
+ let content_type = self.check_input_headers(request.headers())?;
+
+ let if_match: IfMatch = request.headers().typed_get_required()?;
+
+ let data = request.into_body();
+
+ let now: u64 = self.clock.call_method0(py, "time_msec")?.extract(py)?;
+ let now = SystemTime::UNIX_EPOCH + Duration::from_millis(now);
+
+ let id: Ulid = id.parse().map_err(|_| NotFoundError::new())?;
+ let session = self
+ .sessions
+ .get_mut(&id)
+ .filter(|s| !s.expired(now))
+ .ok_or_else(NotFoundError::new)?;
+
+ if !if_match.precondition_passes(&session.etag()) {
+ let mut headers = HeaderMap::new();
+ prepare_headers(&mut headers, session);
+
+ let mut additional_fields = HashMap::with_capacity(1);
+ additional_fields.insert(
+ String::from("org.matrix.msc4108.errcode"),
+ String::from("M_CONCURRENT_WRITE"),
+ );
+
+ return Err(SynapseError::new(
+ StatusCode::PRECONDITION_FAILED,
+ "ETag does not match".to_owned(),
+ "M_UNKNOWN", // Would be M_CONCURRENT_WRITE
+ Some(additional_fields),
+ Some(headers),
+ ));
+ }
+
+ session.update(data, content_type, now);
+
+ let mut response = Response::new(Bytes::new());
+ *response.status_mut() = StatusCode::ACCEPTED;
+ prepare_headers(response.headers_mut(), session);
+ http_response_to_twisted(twisted_request, response)?;
+
+ Ok(())
+ }
+
+ fn handle_delete(&mut self, twisted_request: &PyAny, id: &str) -> PyResult<()> {
+ let _request = http_request_from_twisted(twisted_request)?;
+
+ let id: Ulid = id.parse().map_err(|_| NotFoundError::new())?;
+ let _session = self.sessions.remove(&id).ok_or_else(NotFoundError::new)?;
+
+ let mut response = Response::new(Bytes::new());
+ *response.status_mut() = StatusCode::NO_CONTENT;
+ response
+ .headers_mut()
+ .typed_insert(AccessControlAllowOrigin::ANY);
+ http_response_to_twisted(twisted_request, response)?;
+
+ Ok(())
+ }
+}
+
+pub fn register_module(py: Python<'_>, m: &PyModule) -> PyResult<()> {
+ let child_module = PyModule::new(py, "rendezvous")?;
+
+ child_module.add_class::()?;
+
+ m.add_submodule(child_module)?;
+
+ // We need to manually add the module to sys.modules to make `from
+ // synapse.synapse_rust import rendezvous` work.
+ py.import("sys")?
+ .getattr("modules")?
+ .set_item("synapse.synapse_rust.rendezvous", child_module)?;
+
+ Ok(())
+}
diff --git a/rust/src/rendezvous/session.rs b/rust/src/rendezvous/session.rs
new file mode 100644
index 0000000000..179304edfe
--- /dev/null
+++ b/rust/src/rendezvous/session.rs
@@ -0,0 +1,91 @@
+/*
+ * This file is licensed under the Affero General Public License (AGPL) version 3.
+ *
+ * Copyright (C) 2024 New Vector, Ltd
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License as
+ * published by the Free Software Foundation, either version 3 of the
+ * License, or (at your option) any later version.
+ *
+ * See the GNU Affero General Public License for more details:
+ * .
+ */
+
+use std::time::{Duration, SystemTime};
+
+use base64::{engine::general_purpose::URL_SAFE_NO_PAD, Engine as _};
+use bytes::Bytes;
+use headers::{ContentLength, ContentType, ETag, Expires, LastModified};
+use mime::Mime;
+use sha2::{Digest, Sha256};
+
+/// A single session, containing data, metadata, and expiry information.
+pub struct Session {
+ hash: [u8; 32],
+ data: Bytes,
+ content_type: Mime,
+ last_modified: SystemTime,
+ expires: SystemTime,
+}
+
+impl Session {
+ /// Create a new session with the given data, content type, and time-to-live.
+ pub fn new(data: Bytes, content_type: Mime, now: SystemTime, ttl: Duration) -> Self {
+ let hash = Sha256::digest(&data).into();
+ Self {
+ hash,
+ data,
+ content_type,
+ expires: now + ttl,
+ last_modified: now,
+ }
+ }
+
+ /// Returns true if the session has expired at the given time.
+ pub fn expired(&self, now: SystemTime) -> bool {
+ self.expires <= now
+ }
+
+ /// Update the session with new data, content type, and last modified time.
+ pub fn update(&mut self, data: Bytes, content_type: Mime, now: SystemTime) {
+ self.hash = Sha256::digest(&data).into();
+ self.data = data;
+ self.content_type = content_type;
+ self.last_modified = now;
+ }
+
+ /// Returns the Content-Type header of the session.
+ pub fn content_type(&self) -> ContentType {
+ self.content_type.clone().into()
+ }
+
+ /// Returns the Content-Length header of the session.
+ pub fn content_length(&self) -> ContentLength {
+ ContentLength(self.data.len() as _)
+ }
+
+ /// Returns the ETag header of the session.
+ pub fn etag(&self) -> ETag {
+ let encoded = URL_SAFE_NO_PAD.encode(self.hash);
+ // SAFETY: Base64 encoding is URL-safe, so ETag-safe
+ format!("\"{encoded}\"")
+ .parse()
+ .expect("base64-encoded hash should be URL-safe")
+ }
+
+ /// Returns the Last-Modified header of the session.
+ pub fn last_modified(&self) -> LastModified {
+ self.last_modified.into()
+ }
+
+ /// Returns the Expires header of the session.
+ pub fn expires(&self) -> Expires {
+ self.expires.into()
+ }
+
+ /// Returns the current data stored in the session.
+ pub fn data(&self) -> Bytes {
+ self.data.clone()
+ }
+}
diff --git a/synapse/config/experimental.py b/synapse/config/experimental.py
index 353ae23f91..baa3580f29 100644
--- a/synapse/config/experimental.py
+++ b/synapse/config/experimental.py
@@ -413,12 +413,22 @@ class ExperimentalConfig(Config):
)
# MSC4108: Mechanism to allow OIDC sign in and E2EE set up via QR code
+ self.msc4108_enabled = experimental.get("msc4108_enabled", False)
+
self.msc4108_delegation_endpoint: Optional[str] = experimental.get(
"msc4108_delegation_endpoint", None
)
- if self.msc4108_delegation_endpoint is not None and not self.msc3861.enabled:
+ if (
+ self.msc4108_enabled or self.msc4108_delegation_endpoint is not None
+ ) and not self.msc3861.enabled:
raise ConfigError(
"MSC4108 requires MSC3861 to be enabled",
("experimental", "msc4108_delegation_endpoint"),
)
+
+ if self.msc4108_delegation_endpoint is not None and self.msc4108_enabled:
+ raise ConfigError(
+ "You cannot have MSC4108 both enabled and delegated at the same time",
+ ("experimental", "msc4108_delegation_endpoint"),
+ )
diff --git a/synapse/http/server.py b/synapse/http/server.py
index 45b2cbffcd..211795dc39 100644
--- a/synapse/http/server.py
+++ b/synapse/http/server.py
@@ -909,8 +909,9 @@ def set_cors_headers(request: "SynapseRequest") -> None:
request.setHeader(
b"Access-Control-Allow-Methods", b"GET, HEAD, POST, PUT, DELETE, OPTIONS"
)
- if request.path is not None and request.path.startswith(
- b"/_matrix/client/unstable/org.matrix.msc4108/rendezvous"
+ if request.path is not None and (
+ request.path == b"/_matrix/client/unstable/org.matrix.msc4108/rendezvous"
+ or request.path.startswith(b"/_synapse/client/rendezvous")
):
request.setHeader(
b"Access-Control-Allow-Headers",
diff --git a/synapse/rest/client/rendezvous.py b/synapse/rest/client/rendezvous.py
index ed06a29987..143f057651 100644
--- a/synapse/rest/client/rendezvous.py
+++ b/synapse/rest/client/rendezvous.py
@@ -97,9 +97,25 @@ class MSC4108DelegationRendezvousServlet(RestServlet):
)
+class MSC4108RendezvousServlet(RestServlet):
+ PATTERNS = client_patterns(
+ "/org.matrix.msc4108/rendezvous$", releases=[], v1=False, unstable=True
+ )
+
+ def __init__(self, hs: "HomeServer") -> None:
+ super().__init__()
+ self._handler = hs.get_rendezvous_handler()
+
+ def on_POST(self, request: SynapseRequest) -> None:
+ self._handler.handle_post(request)
+
+
def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
if hs.config.experimental.msc3886_endpoint is not None:
MSC3886RendezvousServlet(hs).register(http_server)
+ if hs.config.experimental.msc4108_enabled:
+ MSC4108RendezvousServlet(hs).register(http_server)
+
if hs.config.experimental.msc4108_delegation_endpoint is not None:
MSC4108DelegationRendezvousServlet(hs).register(http_server)
diff --git a/synapse/rest/client/versions.py b/synapse/rest/client/versions.py
index 638d4c45ae..fa453a3b02 100644
--- a/synapse/rest/client/versions.py
+++ b/synapse/rest/client/versions.py
@@ -141,8 +141,13 @@ class VersionsRestServlet(RestServlet):
# Allows clients to handle push for encrypted events.
"org.matrix.msc4028": self.config.experimental.msc4028_push_encrypted_events,
# MSC4108: Mechanism to allow OIDC sign in and E2EE set up via QR code
- "org.matrix.msc4108": self.config.experimental.msc4108_delegation_endpoint
- is not None,
+ "org.matrix.msc4108": (
+ self.config.experimental.msc4108_enabled
+ or (
+ self.config.experimental.msc4108_delegation_endpoint
+ is not None
+ )
+ ),
},
},
)
diff --git a/synapse/rest/synapse/client/__init__.py b/synapse/rest/synapse/client/__init__.py
index 31544867d4..ba6576d4db 100644
--- a/synapse/rest/synapse/client/__init__.py
+++ b/synapse/rest/synapse/client/__init__.py
@@ -26,6 +26,7 @@ from twisted.web.resource import Resource
from synapse.rest.synapse.client.new_user_consent import NewUserConsentResource
from synapse.rest.synapse.client.pick_idp import PickIdpResource
from synapse.rest.synapse.client.pick_username import pick_username_resource
+from synapse.rest.synapse.client.rendezvous import MSC4108RendezvousSessionResource
from synapse.rest.synapse.client.sso_register import SsoRegisterResource
from synapse.rest.synapse.client.unsubscribe import UnsubscribeResource
@@ -76,6 +77,9 @@ def build_synapse_client_resource_tree(hs: "HomeServer") -> Mapping[str, Resourc
# To be removed in Synapse v1.32.0.
resources["/_matrix/saml2"] = res
+ if hs.config.experimental.msc4108_enabled:
+ resources["/_synapse/client/rendezvous"] = MSC4108RendezvousSessionResource(hs)
+
return resources
diff --git a/synapse/rest/synapse/client/rendezvous.py b/synapse/rest/synapse/client/rendezvous.py
new file mode 100644
index 0000000000..5216d30d1f
--- /dev/null
+++ b/synapse/rest/synapse/client/rendezvous.py
@@ -0,0 +1,58 @@
+#
+# This file is licensed under the Affero General Public License (AGPL) version 3.
+#
+# Copyright (C) 2024 New Vector, Ltd
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# See the GNU Affero General Public License for more details:
+# .
+#
+#
+
+import logging
+from typing import TYPE_CHECKING, List
+
+from synapse.api.errors import UnrecognizedRequestError
+from synapse.http.server import DirectServeJsonResource
+from synapse.http.site import SynapseRequest
+
+if TYPE_CHECKING:
+ from synapse.server import HomeServer
+
+logger = logging.getLogger(__name__)
+
+
+class MSC4108RendezvousSessionResource(DirectServeJsonResource):
+ isLeaf = True
+
+ def __init__(self, hs: "HomeServer") -> None:
+ super().__init__()
+ self._handler = hs.get_rendezvous_handler()
+
+ async def _async_render_GET(self, request: SynapseRequest) -> None:
+ postpath: List[bytes] = request.postpath # type: ignore
+ if len(postpath) != 1:
+ raise UnrecognizedRequestError()
+ session_id = postpath[0].decode("ascii")
+
+ self._handler.handle_get(request, session_id)
+
+ def _async_render_PUT(self, request: SynapseRequest) -> None:
+ postpath: List[bytes] = request.postpath # type: ignore
+ if len(postpath) != 1:
+ raise UnrecognizedRequestError()
+ session_id = postpath[0].decode("ascii")
+
+ self._handler.handle_put(request, session_id)
+
+ def _async_render_DELETE(self, request: SynapseRequest) -> None:
+ postpath: List[bytes] = request.postpath # type: ignore
+ if len(postpath) != 1:
+ raise UnrecognizedRequestError()
+ session_id = postpath[0].decode("ascii")
+
+ self._handler.handle_delete(request, session_id)
diff --git a/synapse/server.py b/synapse/server.py
index 6d5a18fb1d..95e319d2e6 100644
--- a/synapse/server.py
+++ b/synapse/server.py
@@ -143,6 +143,7 @@ from synapse.state import StateHandler, StateResolutionHandler
from synapse.storage import Databases
from synapse.storage.controllers import StorageControllers
from synapse.streams.events import EventSources
+from synapse.synapse_rust.rendezvous import RendezvousHandler
from synapse.types import DomainSpecificString, ISynapseReactor
from synapse.util import Clock
from synapse.util.distributor import Distributor
@@ -859,6 +860,10 @@ class HomeServer(metaclass=abc.ABCMeta):
def get_room_forgetter_handler(self) -> RoomForgetterHandler:
return RoomForgetterHandler(self)
+ @cache_in_self
+ def get_rendezvous_handler(self) -> RendezvousHandler:
+ return RendezvousHandler(self)
+
@cache_in_self
def get_outbound_redis_connection(self) -> "ConnectionHandler":
"""
diff --git a/synapse/synapse_rust/rendezvous.pyi b/synapse/synapse_rust/rendezvous.pyi
new file mode 100644
index 0000000000..03eae3a196
--- /dev/null
+++ b/synapse/synapse_rust/rendezvous.pyi
@@ -0,0 +1,30 @@
+# This file is licensed under the Affero General Public License (AGPL) version 3.
+#
+# Copyright (C) 2024 New Vector, Ltd
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# See the GNU Affero General Public License for more details:
+# .
+
+from twisted.web.iweb import IRequest
+
+from synapse.server import HomeServer
+
+class RendezvousHandler:
+ def __init__(
+ self,
+ homeserver: HomeServer,
+ /,
+ capacity: int = 100,
+ max_content_length: int = 4 * 1024, # MSC4108 specifies 4KB
+ eviction_interval: int = 60 * 1000,
+ ttl: int = 60 * 1000,
+ ) -> None: ...
+ def handle_post(self, request: IRequest) -> None: ...
+ def handle_get(self, request: IRequest, session_id: str) -> None: ...
+ def handle_put(self, request: IRequest, session_id: str) -> None: ...
+ def handle_delete(self, request: IRequest, session_id: str) -> None: ...
diff --git a/tests/rest/client/test_rendezvous.py b/tests/rest/client/test_rendezvous.py
index c84704c090..0ab754a11a 100644
--- a/tests/rest/client/test_rendezvous.py
+++ b/tests/rest/client/test_rendezvous.py
@@ -2,7 +2,7 @@
# This file is licensed under the Affero General Public License (AGPL) version 3.
#
# Copyright 2022 The Matrix.org Foundation C.I.C.
-# Copyright (C) 2023 New Vector, Ltd
+# Copyright (C) 2023-2024 New Vector, Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
@@ -19,9 +19,14 @@
#
#
+from typing import Dict
+from urllib.parse import urlparse
+
from twisted.test.proto_helpers import MemoryReactor
+from twisted.web.resource import Resource
from synapse.rest.client import rendezvous
+from synapse.rest.synapse.client.rendezvous import MSC4108RendezvousSessionResource
from synapse.server import HomeServer
from synapse.util import Clock
@@ -42,6 +47,12 @@ class RendezvousServletTestCase(unittest.HomeserverTestCase):
self.hs = self.setup_test_homeserver()
return self.hs
+ def create_resource_dict(self) -> Dict[str, Resource]:
+ return {
+ **super().create_resource_dict(),
+ "/_synapse/client/rendezvous": MSC4108RendezvousSessionResource(self.hs),
+ }
+
def test_disabled(self) -> None:
channel = self.make_request("POST", msc3886_endpoint, {}, access_token=None)
self.assertEqual(channel.code, 404)
@@ -75,3 +86,391 @@ class RendezvousServletTestCase(unittest.HomeserverTestCase):
channel = self.make_request("POST", msc4108_endpoint, {}, access_token=None)
self.assertEqual(channel.code, 307)
self.assertEqual(channel.headers.getRawHeaders("Location"), ["https://asd"])
+
+ @unittest.skip_unless(HAS_AUTHLIB, "requires authlib")
+ @override_config(
+ {
+ "disable_registration": True,
+ "experimental_features": {
+ "msc4108_enabled": True,
+ "msc3861": {
+ "enabled": True,
+ "issuer": "https://issuer",
+ "client_id": "client_id",
+ "client_auth_method": "client_secret_post",
+ "client_secret": "client_secret",
+ "admin_token": "admin_token_value",
+ },
+ },
+ }
+ )
+ def test_msc4108(self) -> None:
+ """
+ Test the MSC4108 rendezvous endpoint, including:
+ - Creating a session
+ - Getting the data back
+ - Updating the data
+ - Deleting the data
+ - ETag handling
+ """
+ # We can post arbitrary data to the endpoint
+ channel = self.make_request(
+ "POST",
+ msc4108_endpoint,
+ "foo=bar",
+ content_type=b"text/plain",
+ access_token=None,
+ )
+ self.assertEqual(channel.code, 201)
+ self.assertSubstring("/_synapse/client/rendezvous/", channel.json_body["url"])
+ headers = dict(channel.headers.getAllRawHeaders())
+ self.assertIn(b"ETag", headers)
+ self.assertIn(b"Expires", headers)
+ self.assertEqual(headers[b"Content-Type"], [b"application/json"])
+ self.assertEqual(headers[b"Access-Control-Allow-Origin"], [b"*"])
+ self.assertEqual(headers[b"Access-Control-Expose-Headers"], [b"etag"])
+ self.assertEqual(headers[b"Cache-Control"], [b"no-store"])
+ self.assertEqual(headers[b"Pragma"], [b"no-cache"])
+ self.assertIn("url", channel.json_body)
+ self.assertTrue(channel.json_body["url"].startswith("https://"))
+
+ url = urlparse(channel.json_body["url"])
+ session_endpoint = url.path
+ etag = headers[b"ETag"][0]
+
+ # We can get the data back
+ channel = self.make_request(
+ "GET",
+ session_endpoint,
+ access_token=None,
+ )
+
+ self.assertEqual(channel.code, 200)
+ headers = dict(channel.headers.getAllRawHeaders())
+ self.assertEqual(headers[b"ETag"], [etag])
+ self.assertIn(b"Expires", headers)
+ self.assertEqual(headers[b"Content-Type"], [b"text/plain"])
+ self.assertEqual(headers[b"Access-Control-Allow-Origin"], [b"*"])
+ self.assertEqual(headers[b"Access-Control-Expose-Headers"], [b"etag"])
+ self.assertEqual(headers[b"Cache-Control"], [b"no-store"])
+ self.assertEqual(headers[b"Pragma"], [b"no-cache"])
+ self.assertEqual(channel.text_body, "foo=bar")
+
+ # We can make sure the data hasn't changed
+ channel = self.make_request(
+ "GET",
+ session_endpoint,
+ access_token=None,
+ custom_headers=[("If-None-Match", etag)],
+ )
+
+ self.assertEqual(channel.code, 304)
+
+ # We can update the data
+ channel = self.make_request(
+ "PUT",
+ session_endpoint,
+ "foo=baz",
+ content_type=b"text/plain",
+ access_token=None,
+ custom_headers=[("If-Match", etag)],
+ )
+
+ self.assertEqual(channel.code, 202)
+ headers = dict(channel.headers.getAllRawHeaders())
+ old_etag = etag
+ new_etag = headers[b"ETag"][0]
+
+ # If we try to update it again with the old etag, it should fail
+ channel = self.make_request(
+ "PUT",
+ session_endpoint,
+ "bar=baz",
+ content_type=b"text/plain",
+ access_token=None,
+ custom_headers=[("If-Match", old_etag)],
+ )
+
+ self.assertEqual(channel.code, 412)
+ self.assertEqual(channel.json_body["errcode"], "M_UNKNOWN")
+ self.assertEqual(
+ channel.json_body["org.matrix.msc4108.errcode"], "M_CONCURRENT_WRITE"
+ )
+
+ # If we try to get with the old etag, we should get the updated data
+ channel = self.make_request(
+ "GET",
+ session_endpoint,
+ access_token=None,
+ custom_headers=[("If-None-Match", old_etag)],
+ )
+
+ self.assertEqual(channel.code, 200)
+ headers = dict(channel.headers.getAllRawHeaders())
+ self.assertEqual(headers[b"ETag"], [new_etag])
+ self.assertEqual(channel.text_body, "foo=baz")
+
+ # We can delete the data
+ channel = self.make_request(
+ "DELETE",
+ session_endpoint,
+ access_token=None,
+ )
+
+ self.assertEqual(channel.code, 204)
+
+ # If we try to get the data again, it should fail
+ channel = self.make_request(
+ "GET",
+ session_endpoint,
+ access_token=None,
+ )
+
+ self.assertEqual(channel.code, 404)
+ self.assertEqual(channel.json_body["errcode"], "M_NOT_FOUND")
+
+ @unittest.skip_unless(HAS_AUTHLIB, "requires authlib")
+ @override_config(
+ {
+ "disable_registration": True,
+ "experimental_features": {
+ "msc4108_enabled": True,
+ "msc3861": {
+ "enabled": True,
+ "issuer": "https://issuer",
+ "client_id": "client_id",
+ "client_auth_method": "client_secret_post",
+ "client_secret": "client_secret",
+ "admin_token": "admin_token_value",
+ },
+ },
+ }
+ )
+ def test_msc4108_expiration(self) -> None:
+ """
+ Test that entries are evicted after a TTL.
+ """
+ # Start a new session
+ channel = self.make_request(
+ "POST",
+ msc4108_endpoint,
+ "foo=bar",
+ content_type=b"text/plain",
+ access_token=None,
+ )
+ self.assertEqual(channel.code, 201)
+ session_endpoint = urlparse(channel.json_body["url"]).path
+
+ # Sanity check that we can get the data back
+ channel = self.make_request(
+ "GET",
+ session_endpoint,
+ access_token=None,
+ )
+ self.assertEqual(channel.code, 200)
+ self.assertEqual(channel.text_body, "foo=bar")
+
+ # Advance the clock, TTL of entries is 1 minute
+ self.reactor.advance(60)
+
+ # Get the data back, it should be gone
+ channel = self.make_request(
+ "GET",
+ session_endpoint,
+ access_token=None,
+ )
+ self.assertEqual(channel.code, 404)
+
+ @unittest.skip_unless(HAS_AUTHLIB, "requires authlib")
+ @override_config(
+ {
+ "disable_registration": True,
+ "experimental_features": {
+ "msc4108_enabled": True,
+ "msc3861": {
+ "enabled": True,
+ "issuer": "https://issuer",
+ "client_id": "client_id",
+ "client_auth_method": "client_secret_post",
+ "client_secret": "client_secret",
+ "admin_token": "admin_token_value",
+ },
+ },
+ }
+ )
+ def test_msc4108_capacity(self) -> None:
+ """
+ Test that a capacity limit is enforced on the rendezvous sessions, as old
+ entries are evicted at an interval when the limit is reached.
+ """
+ # Start a new session
+ channel = self.make_request(
+ "POST",
+ msc4108_endpoint,
+ "foo=bar",
+ content_type=b"text/plain",
+ access_token=None,
+ )
+ self.assertEqual(channel.code, 201)
+ session_endpoint = urlparse(channel.json_body["url"]).path
+
+ # Sanity check that we can get the data back
+ channel = self.make_request(
+ "GET",
+ session_endpoint,
+ access_token=None,
+ )
+ self.assertEqual(channel.code, 200)
+ self.assertEqual(channel.text_body, "foo=bar")
+
+ # Start a lot of new sessions
+ for _ in range(100):
+ channel = self.make_request(
+ "POST",
+ msc4108_endpoint,
+ "foo=bar",
+ content_type=b"text/plain",
+ access_token=None,
+ )
+ self.assertEqual(channel.code, 201)
+
+ # Get the data back, it should still be there, as the eviction hasn't run yet
+ channel = self.make_request(
+ "GET",
+ session_endpoint,
+ access_token=None,
+ )
+
+ self.assertEqual(channel.code, 200)
+
+ # Advance the clock, as it will trigger the eviction
+ self.reactor.advance(1)
+
+ # Get the data back, it should be gone
+ channel = self.make_request(
+ "GET",
+ session_endpoint,
+ access_token=None,
+ )
+
+ @unittest.skip_unless(HAS_AUTHLIB, "requires authlib")
+ @override_config(
+ {
+ "disable_registration": True,
+ "experimental_features": {
+ "msc4108_enabled": True,
+ "msc3861": {
+ "enabled": True,
+ "issuer": "https://issuer",
+ "client_id": "client_id",
+ "client_auth_method": "client_secret_post",
+ "client_secret": "client_secret",
+ "admin_token": "admin_token_value",
+ },
+ },
+ }
+ )
+ def test_msc4108_hard_capacity(self) -> None:
+ """
+ Test that a hard capacity limit is enforced on the rendezvous sessions, as old
+ entries are evicted immediately when the limit is reached.
+ """
+ # Start a new session
+ channel = self.make_request(
+ "POST",
+ msc4108_endpoint,
+ "foo=bar",
+ content_type=b"text/plain",
+ access_token=None,
+ )
+ self.assertEqual(channel.code, 201)
+ session_endpoint = urlparse(channel.json_body["url"]).path
+ # We advance the clock to make sure that this entry is the "lowest" in the session list
+ self.reactor.advance(1)
+
+ # Sanity check that we can get the data back
+ channel = self.make_request(
+ "GET",
+ session_endpoint,
+ access_token=None,
+ )
+ self.assertEqual(channel.code, 200)
+ self.assertEqual(channel.text_body, "foo=bar")
+
+ # Start a lot of new sessions
+ for _ in range(200):
+ channel = self.make_request(
+ "POST",
+ msc4108_endpoint,
+ "foo=bar",
+ content_type=b"text/plain",
+ access_token=None,
+ )
+ self.assertEqual(channel.code, 201)
+
+ # Get the data back, it should already be gone as we hit the hard limit
+ channel = self.make_request(
+ "GET",
+ session_endpoint,
+ access_token=None,
+ )
+
+ self.assertEqual(channel.code, 404)
+
+ @unittest.skip_unless(HAS_AUTHLIB, "requires authlib")
+ @override_config(
+ {
+ "disable_registration": True,
+ "experimental_features": {
+ "msc4108_enabled": True,
+ "msc3861": {
+ "enabled": True,
+ "issuer": "https://issuer",
+ "client_id": "client_id",
+ "client_auth_method": "client_secret_post",
+ "client_secret": "client_secret",
+ "admin_token": "admin_token_value",
+ },
+ },
+ }
+ )
+ def test_msc4108_content_type(self) -> None:
+ """
+ Test that the content-type is restricted to text/plain.
+ """
+ # We cannot post invalid content-type arbitrary data to the endpoint
+ channel = self.make_request(
+ "POST",
+ msc4108_endpoint,
+ "foo=bar",
+ content_is_form=True,
+ access_token=None,
+ )
+ self.assertEqual(channel.code, 400)
+ self.assertEqual(channel.json_body["errcode"], "M_INVALID_PARAM")
+
+ # Make a valid request
+ channel = self.make_request(
+ "POST",
+ msc4108_endpoint,
+ "foo=bar",
+ content_type=b"text/plain",
+ access_token=None,
+ )
+ self.assertEqual(channel.code, 201)
+ url = urlparse(channel.json_body["url"])
+ session_endpoint = url.path
+ headers = dict(channel.headers.getAllRawHeaders())
+ etag = headers[b"ETag"][0]
+
+ # We can't update the data with invalid content-type
+ channel = self.make_request(
+ "PUT",
+ session_endpoint,
+ "foo=baz",
+ content_is_form=True,
+ access_token=None,
+ custom_headers=[("If-Match", etag)],
+ )
+ self.assertEqual(channel.code, 400)
+ self.assertEqual(channel.json_body["errcode"], "M_INVALID_PARAM")
diff --git a/tests/server.py b/tests/server.py
index 4aaa91e956..434be3d22c 100644
--- a/tests/server.py
+++ b/tests/server.py
@@ -351,6 +351,7 @@ def make_request(
request: Type[Request] = SynapseRequest,
shorthand: bool = True,
federation_auth_origin: Optional[bytes] = None,
+ content_type: Optional[bytes] = None,
content_is_form: bool = False,
await_result: bool = True,
custom_headers: Optional[Iterable[CustomHeaderType]] = None,
@@ -373,6 +374,8 @@ def make_request(
with the usual REST API path, if it doesn't contain it.
federation_auth_origin: if set to not-None, we will add a fake
Authorization header pretenting to be the given server name.
+ content_type: The content-type to use for the request. If not set then will default to
+ application/json unless content_is_form is true.
content_is_form: Whether the content is URL encoded form data. Adds the
'Content-Type': 'application/x-www-form-urlencoded' header.
await_result: whether to wait for the request to complete rendering. If true,
@@ -436,7 +439,9 @@ def make_request(
)
if content:
- if content_is_form:
+ if content_type is not None:
+ req.requestHeaders.addRawHeader(b"Content-Type", content_type)
+ elif content_is_form:
req.requestHeaders.addRawHeader(
b"Content-Type", b"application/x-www-form-urlencoded"
)
diff --git a/tests/unittest.py b/tests/unittest.py
index 6fe0cd4a2d..e6aad9ed40 100644
--- a/tests/unittest.py
+++ b/tests/unittest.py
@@ -523,6 +523,7 @@ class HomeserverTestCase(TestCase):
request: Type[Request] = SynapseRequest,
shorthand: bool = True,
federation_auth_origin: Optional[bytes] = None,
+ content_type: Optional[bytes] = None,
content_is_form: bool = False,
await_result: bool = True,
custom_headers: Optional[Iterable[CustomHeaderType]] = None,
@@ -541,6 +542,9 @@ class HomeserverTestCase(TestCase):
with the usual REST API path, if it doesn't contain it.
federation_auth_origin: if set to not-None, we will add a fake
Authorization header pretenting to be the given server name.
+
+ content_type: The content-type to use for the request. If not set then will default to
+ application/json unless content_is_form is true.
content_is_form: Whether the content is URL encoded form data. Adds the
'Content-Type': 'application/x-www-form-urlencoded' header.
@@ -566,6 +570,7 @@ class HomeserverTestCase(TestCase):
request,
shorthand,
federation_auth_origin,
+ content_type,
content_is_form,
await_result,
custom_headers,
From 47773232b034c0d7b72bb7419a01e772509c8814 Mon Sep 17 00:00:00 2001
From: Till <2353100+S7evinK@users.noreply.github.com>
Date: Thu, 25 Apr 2024 15:25:31 +0200
Subject: [PATCH 024/503] Redact membership events if the user requested
erasure upon deactivating (#17076)
Fixes #15355 by redacting all membership events before leaving rooms.
---
changelog.d/17076.bugfix | 1 +
synapse/handlers/deactivate_account.py | 13 ++++++-
synapse/storage/databases/main/roommember.py | 22 ++++++++++++
tests/handlers/test_deactivate_account.py | 37 ++++++++++++++++++++
4 files changed, 72 insertions(+), 1 deletion(-)
create mode 100644 changelog.d/17076.bugfix
diff --git a/changelog.d/17076.bugfix b/changelog.d/17076.bugfix
new file mode 100644
index 0000000000..a111ea2b88
--- /dev/null
+++ b/changelog.d/17076.bugfix
@@ -0,0 +1 @@
+Redact membership events if the user requested erasure upon deactivating.
\ No newline at end of file
diff --git a/synapse/handlers/deactivate_account.py b/synapse/handlers/deactivate_account.py
index b13c4b6cb9..11ac377680 100644
--- a/synapse/handlers/deactivate_account.py
+++ b/synapse/handlers/deactivate_account.py
@@ -261,11 +261,22 @@ class DeactivateAccountHandler:
user = UserID.from_string(user_id)
rooms_for_user = await self.store.get_rooms_for_user(user_id)
+ requester = create_requester(user, authenticated_entity=self._server_name)
+ should_erase = await self.store.is_user_erased(user_id)
+
for room_id in rooms_for_user:
logger.info("User parter parting %r from %r", user_id, room_id)
try:
+ # Before parting the user, redact all membership events if requested
+ if should_erase:
+ event_ids = await self.store.get_membership_event_ids_for_user(
+ user_id, room_id
+ )
+ for event_id in event_ids:
+ await self.store.expire_event(event_id)
+
await self._room_member_handler.update_membership(
- create_requester(user, authenticated_entity=self._server_name),
+ requester,
user,
room_id,
"leave",
diff --git a/synapse/storage/databases/main/roommember.py b/synapse/storage/databases/main/roommember.py
index 5d51502595..9fddbb2caf 100644
--- a/synapse/storage/databases/main/roommember.py
+++ b/synapse/storage/databases/main/roommember.py
@@ -1234,6 +1234,28 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore):
return set(room_ids)
+ async def get_membership_event_ids_for_user(
+ self, user_id: str, room_id: str
+ ) -> Set[str]:
+ """Get all event_ids for the given user and room.
+
+ Args:
+ user_id: The user ID to get the event IDs for.
+ room_id: The room ID to look up events for.
+
+ Returns:
+ Set of event IDs
+ """
+
+ event_ids = await self.db_pool.simple_select_onecol(
+ table="room_memberships",
+ keyvalues={"user_id": user_id, "room_id": room_id},
+ retcol="event_id",
+ desc="get_membership_event_ids_for_user",
+ )
+
+ return set(event_ids)
+
@cached(max_entries=5000)
async def _get_membership_from_event_id(
self, member_event_id: str
diff --git a/tests/handlers/test_deactivate_account.py b/tests/handlers/test_deactivate_account.py
index b3f9e50f0f..c698771a06 100644
--- a/tests/handlers/test_deactivate_account.py
+++ b/tests/handlers/test_deactivate_account.py
@@ -424,3 +424,40 @@ class DeactivateAccountTestCase(HomeserverTestCase):
self._store.get_knocked_at_rooms_for_local_user(self.user)
)
self.assertEqual(len(after_deactivate_knocks), 0)
+
+ def test_membership_is_redacted_upon_deactivation(self) -> None:
+ """
+ Tests that room membership events are redacted if erasure is requested.
+ """
+ # Create a room
+ room_id = self.helper.create_room_as(
+ self.user,
+ is_public=True,
+ tok=self.token,
+ )
+
+ # Change the displayname
+ membership_event, _ = self.get_success(
+ self.handler.update_membership(
+ requester=create_requester(self.user),
+ target=UserID.from_string(self.user),
+ room_id=room_id,
+ action=Membership.JOIN,
+ content={"displayname": "Hello World!"},
+ )
+ )
+
+ # Deactivate the account
+ self._deactivate_my_account()
+
+ # Get the all membership event IDs
+ membership_event_ids = self.get_success(
+ self._store.get_membership_event_ids_for_user(self.user, room_id=room_id)
+ )
+
+ # Get the events incl. JSON
+ events = self.get_success(self._store.get_events_as_list(membership_event_ids))
+
+ # Validate that there is no displayname in any of the events
+ for event in events:
+ self.assertTrue("displayname" not in event.content)
From 48a90c697b7d6faf1d44273dfe5c4e76467a0bc4 Mon Sep 17 00:00:00 2001
From: Olivier 'reivilibre
Date: Thu, 25 Apr 2024 15:55:18 +0100
Subject: [PATCH 025/503] 1.106.0rc1
---
CHANGES.md | 42 +++++++++++++++++++++++++++++++++++++++
changelog.d/16819.feature | 1 -
changelog.d/16920.bugfix | 1 -
changelog.d/16923.bugfix | 1 -
changelog.d/16943.bugfix | 1 -
changelog.d/17032.misc | 1 -
changelog.d/17036.misc | 1 -
changelog.d/17056.feature | 1 -
changelog.d/17069.doc | 1 -
changelog.d/17076.bugfix | 1 -
changelog.d/17079.misc | 1 -
changelog.d/17081.misc | 1 -
changelog.d/17086.feature | 1 -
changelog.d/17096.misc | 1 -
changelog.d/17099.doc | 1 -
changelog.d/17125.misc | 1 -
debian/changelog | 6 ++++++
pyproject.toml | 2 +-
18 files changed, 49 insertions(+), 16 deletions(-)
delete mode 100644 changelog.d/16819.feature
delete mode 100644 changelog.d/16920.bugfix
delete mode 100644 changelog.d/16923.bugfix
delete mode 100644 changelog.d/16943.bugfix
delete mode 100644 changelog.d/17032.misc
delete mode 100644 changelog.d/17036.misc
delete mode 100644 changelog.d/17056.feature
delete mode 100644 changelog.d/17069.doc
delete mode 100644 changelog.d/17076.bugfix
delete mode 100644 changelog.d/17079.misc
delete mode 100644 changelog.d/17081.misc
delete mode 100644 changelog.d/17086.feature
delete mode 100644 changelog.d/17096.misc
delete mode 100644 changelog.d/17099.doc
delete mode 100644 changelog.d/17125.misc
diff --git a/CHANGES.md b/CHANGES.md
index ec5bc22a98..913e6fbc8c 100644
--- a/CHANGES.md
+++ b/CHANGES.md
@@ -1,3 +1,45 @@
+# Synapse 1.106.0rc1 (2024-04-25)
+
+### Features
+
+- Send an email if the address is already bound to an user account. ([\#16819](https://github.com/element-hq/synapse/issues/16819))
+- Implement the rendezvous mechanism described by MSC4108. ([\#17056](https://github.com/element-hq/synapse/issues/17056))
+- Support delegating the rendezvous mechanism described MSC4108 to an external implementation. ([\#17086](https://github.com/element-hq/synapse/issues/17086))
+
+### Bugfixes
+
+- Add validation to ensure that the `limit` parameter on `/publicRooms` is non-negative. ([\#16920](https://github.com/element-hq/synapse/issues/16920))
+- Return `400 M_NOT_JSON` upon receiving invalid JSON in query parameters across various client and admin endpoints, rather than an internal server error. ([\#16923](https://github.com/element-hq/synapse/issues/16923))
+- Make the CSAPI endpoint `/keys/device_signing/upload` idempotent. ([\#16943](https://github.com/element-hq/synapse/issues/16943))
+- Redact membership events if the user requested erasure upon deactivating. ([\#17076](https://github.com/element-hq/synapse/issues/17076))
+
+### Improved Documentation
+
+- Add a prompt in the contributing guide to manually configure icu4c. ([\#17069](https://github.com/element-hq/synapse/issues/17069))
+- Clarify what part of message retention is still experimental. ([\#17099](https://github.com/element-hq/synapse/issues/17099))
+
+### Internal Changes
+
+- Use new receipts column to optimise receipt and push action SQL queries. Contributed by Nick @ Beeper (@fizzadar). ([\#17032](https://github.com/element-hq/synapse/issues/17032), [\#17096](https://github.com/element-hq/synapse/issues/17096))
+- Fix mypy with latest Twisted release. ([\#17036](https://github.com/element-hq/synapse/issues/17036))
+- Bump minimum supported Rust version to 1.66.0. ([\#17079](https://github.com/element-hq/synapse/issues/17079))
+- Add helpers to transform Twisted requests to Rust http Requests/Responses. ([\#17081](https://github.com/element-hq/synapse/issues/17081))
+- Fix type annotation for `visited_chains` after `mypy` upgrade. ([\#17125](https://github.com/element-hq/synapse/issues/17125))
+
+
+
+### Updates to locked dependencies
+
+* Bump anyhow from 1.0.81 to 1.0.82. ([\#17095](https://github.com/element-hq/synapse/issues/17095))
+* Bump peaceiris/actions-gh-pages from 3.9.3 to 4.0.0. ([\#17087](https://github.com/element-hq/synapse/issues/17087))
+* Bump peaceiris/actions-mdbook from 1.2.0 to 2.0.0. ([\#17089](https://github.com/element-hq/synapse/issues/17089))
+* Bump pyasn1-modules from 0.3.0 to 0.4.0. ([\#17093](https://github.com/element-hq/synapse/issues/17093))
+* Bump pygithub from 2.2.0 to 2.3.0. ([\#17092](https://github.com/element-hq/synapse/issues/17092))
+* Bump ruff from 0.3.5 to 0.3.7. ([\#17094](https://github.com/element-hq/synapse/issues/17094))
+* Bump sigstore/cosign-installer from 3.4.0 to 3.5.0. ([\#17088](https://github.com/element-hq/synapse/issues/17088))
+* Bump twine from 4.0.2 to 5.0.0. ([\#17091](https://github.com/element-hq/synapse/issues/17091))
+* Bump types-pillow from 10.2.0.20240406 to 10.2.0.20240415. ([\#17090](https://github.com/element-hq/synapse/issues/17090))
+
# Synapse 1.105.1 (2024-04-23)
## Security advisory
diff --git a/changelog.d/16819.feature b/changelog.d/16819.feature
deleted file mode 100644
index 1af6f466b7..0000000000
--- a/changelog.d/16819.feature
+++ /dev/null
@@ -1 +0,0 @@
-Send an email if the address is already bound to an user account.
diff --git a/changelog.d/16920.bugfix b/changelog.d/16920.bugfix
deleted file mode 100644
index 460f4f7160..0000000000
--- a/changelog.d/16920.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Adds validation to ensure that the `limit` parameter on `/publicRooms` is non-negative.
diff --git a/changelog.d/16923.bugfix b/changelog.d/16923.bugfix
deleted file mode 100644
index bd6f24925e..0000000000
--- a/changelog.d/16923.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Return `400 M_NOT_JSON` upon receiving invalid JSON in query parameters across various client and admin endpoints, rather than an internal server error.
\ No newline at end of file
diff --git a/changelog.d/16943.bugfix b/changelog.d/16943.bugfix
deleted file mode 100644
index 4360741132..0000000000
--- a/changelog.d/16943.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Make the CSAPI endpoint `/keys/device_signing/upload` idempotent.
\ No newline at end of file
diff --git a/changelog.d/17032.misc b/changelog.d/17032.misc
deleted file mode 100644
index b03f6f42e5..0000000000
--- a/changelog.d/17032.misc
+++ /dev/null
@@ -1 +0,0 @@
-Use new receipts column to optimise receipt and push action SQL queries. Contributed by Nick @ Beeper (@fizzadar).
diff --git a/changelog.d/17036.misc b/changelog.d/17036.misc
deleted file mode 100644
index 3296668059..0000000000
--- a/changelog.d/17036.misc
+++ /dev/null
@@ -1 +0,0 @@
-Fix mypy with latest Twisted release.
diff --git a/changelog.d/17056.feature b/changelog.d/17056.feature
deleted file mode 100644
index b4cbe849e4..0000000000
--- a/changelog.d/17056.feature
+++ /dev/null
@@ -1 +0,0 @@
-Implement the rendezvous mechanism described by MSC4108.
diff --git a/changelog.d/17069.doc b/changelog.d/17069.doc
deleted file mode 100644
index f5a7f599d1..0000000000
--- a/changelog.d/17069.doc
+++ /dev/null
@@ -1 +0,0 @@
-Add a prompt in the contributing guide to manually configure icu4c.
diff --git a/changelog.d/17076.bugfix b/changelog.d/17076.bugfix
deleted file mode 100644
index a111ea2b88..0000000000
--- a/changelog.d/17076.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Redact membership events if the user requested erasure upon deactivating.
\ No newline at end of file
diff --git a/changelog.d/17079.misc b/changelog.d/17079.misc
deleted file mode 100644
index 340e40d194..0000000000
--- a/changelog.d/17079.misc
+++ /dev/null
@@ -1 +0,0 @@
-Bump minimum supported Rust version to 1.66.0.
diff --git a/changelog.d/17081.misc b/changelog.d/17081.misc
deleted file mode 100644
index d1ab69126c..0000000000
--- a/changelog.d/17081.misc
+++ /dev/null
@@ -1 +0,0 @@
-Add helpers to transform Twisted requests to Rust http Requests/Responses.
diff --git a/changelog.d/17086.feature b/changelog.d/17086.feature
deleted file mode 100644
index 08b407d316..0000000000
--- a/changelog.d/17086.feature
+++ /dev/null
@@ -1 +0,0 @@
-Support delegating the rendezvous mechanism described MSC4108 to an external implementation.
diff --git a/changelog.d/17096.misc b/changelog.d/17096.misc
deleted file mode 100644
index b03f6f42e5..0000000000
--- a/changelog.d/17096.misc
+++ /dev/null
@@ -1 +0,0 @@
-Use new receipts column to optimise receipt and push action SQL queries. Contributed by Nick @ Beeper (@fizzadar).
diff --git a/changelog.d/17099.doc b/changelog.d/17099.doc
deleted file mode 100644
index d8d10fa53a..0000000000
--- a/changelog.d/17099.doc
+++ /dev/null
@@ -1 +0,0 @@
-Clarify what part of message retention is still experimental.
diff --git a/changelog.d/17125.misc b/changelog.d/17125.misc
deleted file mode 100644
index a7d9ce6491..0000000000
--- a/changelog.d/17125.misc
+++ /dev/null
@@ -1 +0,0 @@
-Fix type annotation for `visited_chains` after `mypy` upgrade.
\ No newline at end of file
diff --git a/debian/changelog b/debian/changelog
index 214ed59426..de912c2ac8 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -1,3 +1,9 @@
+matrix-synapse-py3 (1.106.0~rc1) stable; urgency=medium
+
+ * New Synapse release 1.106.0rc1.
+
+ -- Synapse Packaging team Thu, 25 Apr 2024 15:54:59 +0100
+
matrix-synapse-py3 (1.105.1) stable; urgency=medium
* New Synapse release 1.105.1.
diff --git a/pyproject.toml b/pyproject.toml
index ed0f5ef4ba..5e47a46cd7 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -96,7 +96,7 @@ module-name = "synapse.synapse_rust"
[tool.poetry]
name = "matrix-synapse"
-version = "1.105.1"
+version = "1.106.0rc1"
description = "Homeserver for the Matrix decentralised comms protocol"
authors = ["Matrix.org Team and Contributors "]
license = "AGPL-3.0-or-later"
From 30c50e024075f7046baa5465d27a1c490b54dc21 Mon Sep 17 00:00:00 2001
From: Olivier 'reivilibre
Date: Thu, 25 Apr 2024 16:00:37 +0100
Subject: [PATCH 026/503] Tweak changelog
---
CHANGES.md | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/CHANGES.md b/CHANGES.md
index 913e6fbc8c..451581fa63 100644
--- a/CHANGES.md
+++ b/CHANGES.md
@@ -3,8 +3,8 @@
### Features
- Send an email if the address is already bound to an user account. ([\#16819](https://github.com/element-hq/synapse/issues/16819))
-- Implement the rendezvous mechanism described by MSC4108. ([\#17056](https://github.com/element-hq/synapse/issues/17056))
-- Support delegating the rendezvous mechanism described MSC4108 to an external implementation. ([\#17086](https://github.com/element-hq/synapse/issues/17086))
+- Implement the rendezvous mechanism described by [MSC4108](https://github.com/matrix-org/matrix-spec-proposals/issues/4108). ([\#17056](https://github.com/element-hq/synapse/issues/17056))
+- Support delegating the rendezvous mechanism described [MSC4108](https://github.com/matrix-org/matrix-spec-proposals/issues/4108) to an external implementation. ([\#17086](https://github.com/element-hq/synapse/issues/17086))
### Bugfixes
From 922656fc77e03b50daf09f91af5b4d67879c896a Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Fri, 26 Apr 2024 09:36:21 +0100
Subject: [PATCH 027/503] Bump phonenumbers from 8.13.29 to 8.13.35 (#17106)
Bumps
[phonenumbers](https://github.com/daviddrysdale/python-phonenumbers)
from 8.13.29 to 8.13.35.
Commits
9369ff4
Prep for 8.13.35 release
2e1e133
Generated files for metadata
25a306f
Merge metadata changes from upstream 8.13.35
7105292
Prep for 8.13.34 release
e7b328d
Generated files for metadata
315eb10
Merge metadata changes from upstream 8.13.34
29dab75
Prep for 8.13.33 release
f5b9401
Generated files for metadata
aa21158
Merge metadata changes from upstream 8.13.33
92c242c
Prep for 8.13.32 release
Additional commits viewable in compare
view
[](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores)
Dependabot will resolve any conflicts with this PR as long as you don't
alter it yourself. You can also trigger a rebase manually by commenting
`@dependabot rebase`.
[//]: # (dependabot-automerge-start)
[//]: # (dependabot-automerge-end)
---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR:
- `@dependabot rebase` will rebase this PR
- `@dependabot recreate` will recreate this PR, overwriting any edits
that have been made to it
- `@dependabot merge` will merge this PR after your CI passes on it
- `@dependabot squash and merge` will squash and merge this PR after
your CI passes on it
- `@dependabot cancel merge` will cancel a previously requested merge
and block automerging
- `@dependabot reopen` will reopen this PR if it is closed
- `@dependabot close` will close this PR and stop Dependabot recreating
it. You can achieve the same result by closing it manually
- `@dependabot show ignore conditions` will show all
of the ignore conditions of the specified dependency
- `@dependabot ignore this major version` will close this PR and stop
Dependabot creating any more for this major version (unless you reopen
the PR or upgrade to it yourself)
- `@dependabot ignore this minor version` will close this PR and stop
Dependabot creating any more for this minor version (unless you reopen
the PR or upgrade to it yourself)
- `@dependabot ignore this dependency` will close this PR and stop
Dependabot creating any more for this dependency (unless you reopen the
PR or upgrade to it yourself)
Signed-off-by: dependabot[bot]
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
---
poetry.lock | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/poetry.lock b/poetry.lock
index d916c627a0..fe56051238 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -1638,13 +1638,13 @@ files = [
[[package]]
name = "phonenumbers"
-version = "8.13.29"
+version = "8.13.35"
description = "Python version of Google's common library for parsing, formatting, storing and validating international phone numbers."
optional = false
python-versions = "*"
files = [
- {file = "phonenumbers-8.13.29-py2.py3-none-any.whl", hash = "sha256:9d7863dc8a37e8127f3c9dde65be93a5b46649b779184f8b0a85bdd043b0b293"},
- {file = "phonenumbers-8.13.29.tar.gz", hash = "sha256:a6c85b53e28410aba2f312255cc8015f384a43e7e241ffb84ca5cde80f094cdf"},
+ {file = "phonenumbers-8.13.35-py2.py3-none-any.whl", hash = "sha256:58286a8e617bd75f541e04313b28c36398be6d4443a778c85e9617a93c391310"},
+ {file = "phonenumbers-8.13.35.tar.gz", hash = "sha256:64f061a967dcdae11e1c59f3688649e697b897110a33bb74d5a69c3e35321245"},
]
[[package]]
From 31664455142ac38bacd4c03958b5f418ac24c8b2 Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Fri, 26 Apr 2024 09:36:47 +0100
Subject: [PATCH 028/503] Bump pydantic from 2.6.4 to 2.7.0 (#17107)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Bumps [pydantic](https://github.com/pydantic/pydantic) from 2.6.4 to
2.7.0.
Release notes
Sourced from pydantic's
releases .
v2.7.0 (2024-04-11)
The code released in v2.7.0 is practically identical to that of
v2.7.0b1.
What's Changed
Packaging
New Features
Finalized in v2.7.0, rather than v2.7.0b1:
Add support for field level number to str coercion option by @NeevCohen in #9137
Update warnings parameter for serialization utilities
to allow raising a warning by @Lance-Drane in
#9166
Changes
Performance
... (truncated)
Changelog
Sourced from pydantic's
changelog .
v2.7.0 (2024-04-11)
GitHub
release
The code released in v2.7.0 is practically identical to that of
v2.7.0b1.
What's Changed
Packaging
New Features
Finalized in v2.7.0, rather than v2.7.0b1:
Add support for field level number to str coercion option by @NeevCohen in #9137
Update warnings parameter for serialization utilities
to allow raising a warning by @Lance-Drane in
#9166
Changes
Performance
... (truncated)
Commits
[](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores)
Dependabot will resolve any conflicts with this PR as long as you don't
alter it yourself. You can also trigger a rebase manually by commenting
`@dependabot rebase`.
[//]: # (dependabot-automerge-start)
[//]: # (dependabot-automerge-end)
---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR:
- `@dependabot rebase` will rebase this PR
- `@dependabot recreate` will recreate this PR, overwriting any edits
that have been made to it
- `@dependabot merge` will merge this PR after your CI passes on it
- `@dependabot squash and merge` will squash and merge this PR after
your CI passes on it
- `@dependabot cancel merge` will cancel a previously requested merge
and block automerging
- `@dependabot reopen` will reopen this PR if it is closed
- `@dependabot close` will close this PR and stop Dependabot recreating
it. You can achieve the same result by closing it manually
- `@dependabot show ignore conditions` will show all
of the ignore conditions of the specified dependency
- `@dependabot ignore this major version` will close this PR and stop
Dependabot creating any more for this major version (unless you reopen
the PR or upgrade to it yourself)
- `@dependabot ignore this minor version` will close this PR and stop
Dependabot creating any more for this minor version (unless you reopen
the PR or upgrade to it yourself)
- `@dependabot ignore this dependency` will close this PR and stop
Dependabot creating any more for this dependency (unless you reopen the
PR or upgrade to it yourself)
Signed-off-by: dependabot[bot]
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
---
poetry.lock | 170 ++++++++++++++++++++++++++--------------------------
1 file changed, 85 insertions(+), 85 deletions(-)
diff --git a/poetry.lock b/poetry.lock
index fe56051238..4bb9ab590e 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -1873,18 +1873,18 @@ files = [
[[package]]
name = "pydantic"
-version = "2.6.4"
+version = "2.7.0"
description = "Data validation using Python type hints"
optional = false
python-versions = ">=3.8"
files = [
- {file = "pydantic-2.6.4-py3-none-any.whl", hash = "sha256:cc46fce86607580867bdc3361ad462bab9c222ef042d3da86f2fb333e1d916c5"},
- {file = "pydantic-2.6.4.tar.gz", hash = "sha256:b1704e0847db01817624a6b86766967f552dd9dbf3afba4004409f908dcc84e6"},
+ {file = "pydantic-2.7.0-py3-none-any.whl", hash = "sha256:9dee74a271705f14f9a1567671d144a851c675b072736f0a7b2608fd9e495352"},
+ {file = "pydantic-2.7.0.tar.gz", hash = "sha256:b5ecdd42262ca2462e2624793551e80911a1e989f462910bb81aef974b4bb383"},
]
[package.dependencies]
annotated-types = ">=0.4.0"
-pydantic-core = "2.16.3"
+pydantic-core = "2.18.1"
typing-extensions = ">=4.6.1"
[package.extras]
@@ -1892,90 +1892,90 @@ email = ["email-validator (>=2.0.0)"]
[[package]]
name = "pydantic-core"
-version = "2.16.3"
-description = ""
+version = "2.18.1"
+description = "Core functionality for Pydantic validation and serialization"
optional = false
python-versions = ">=3.8"
files = [
- {file = "pydantic_core-2.16.3-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:75b81e678d1c1ede0785c7f46690621e4c6e63ccd9192af1f0bd9d504bbb6bf4"},
- {file = "pydantic_core-2.16.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9c865a7ee6f93783bd5d781af5a4c43dadc37053a5b42f7d18dc019f8c9d2bd1"},
- {file = "pydantic_core-2.16.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:162e498303d2b1c036b957a1278fa0899d02b2842f1ff901b6395104c5554a45"},
- {file = "pydantic_core-2.16.3-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:2f583bd01bbfbff4eaee0868e6fc607efdfcc2b03c1c766b06a707abbc856187"},
- {file = "pydantic_core-2.16.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b926dd38db1519ed3043a4de50214e0d600d404099c3392f098a7f9d75029ff8"},
- {file = "pydantic_core-2.16.3-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:716b542728d4c742353448765aa7cdaa519a7b82f9564130e2b3f6766018c9ec"},
- {file = "pydantic_core-2.16.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fc4ad7f7ee1a13d9cb49d8198cd7d7e3aa93e425f371a68235f784e99741561f"},
- {file = "pydantic_core-2.16.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:bd87f48924f360e5d1c5f770d6155ce0e7d83f7b4e10c2f9ec001c73cf475c99"},
- {file = "pydantic_core-2.16.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:0df446663464884297c793874573549229f9eca73b59360878f382a0fc085979"},
- {file = "pydantic_core-2.16.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:4df8a199d9f6afc5ae9a65f8f95ee52cae389a8c6b20163762bde0426275b7db"},
- {file = "pydantic_core-2.16.3-cp310-none-win32.whl", hash = "sha256:456855f57b413f077dff513a5a28ed838dbbb15082ba00f80750377eed23d132"},
- {file = "pydantic_core-2.16.3-cp310-none-win_amd64.whl", hash = "sha256:732da3243e1b8d3eab8c6ae23ae6a58548849d2e4a4e03a1924c8ddf71a387cb"},
- {file = "pydantic_core-2.16.3-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:519ae0312616026bf4cedc0fe459e982734f3ca82ee8c7246c19b650b60a5ee4"},
- {file = "pydantic_core-2.16.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:b3992a322a5617ded0a9f23fd06dbc1e4bd7cf39bc4ccf344b10f80af58beacd"},
- {file = "pydantic_core-2.16.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8d62da299c6ecb04df729e4b5c52dc0d53f4f8430b4492b93aa8de1f541c4aac"},
- {file = "pydantic_core-2.16.3-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:2acca2be4bb2f2147ada8cac612f8a98fc09f41c89f87add7256ad27332c2fda"},
- {file = "pydantic_core-2.16.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1b662180108c55dfbf1280d865b2d116633d436cfc0bba82323554873967b340"},
- {file = "pydantic_core-2.16.3-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e7c6ed0dc9d8e65f24f5824291550139fe6f37fac03788d4580da0d33bc00c97"},
- {file = "pydantic_core-2.16.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a6b1bb0827f56654b4437955555dc3aeeebeddc47c2d7ed575477f082622c49e"},
- {file = "pydantic_core-2.16.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e56f8186d6210ac7ece503193ec84104da7ceb98f68ce18c07282fcc2452e76f"},
- {file = "pydantic_core-2.16.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:936e5db01dd49476fa8f4383c259b8b1303d5dd5fb34c97de194560698cc2c5e"},
- {file = "pydantic_core-2.16.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:33809aebac276089b78db106ee692bdc9044710e26f24a9a2eaa35a0f9fa70ba"},
- {file = "pydantic_core-2.16.3-cp311-none-win32.whl", hash = "sha256:ded1c35f15c9dea16ead9bffcde9bb5c7c031bff076355dc58dcb1cb436c4721"},
- {file = "pydantic_core-2.16.3-cp311-none-win_amd64.whl", hash = "sha256:d89ca19cdd0dd5f31606a9329e309d4fcbb3df860960acec32630297d61820df"},
- {file = "pydantic_core-2.16.3-cp311-none-win_arm64.whl", hash = "sha256:6162f8d2dc27ba21027f261e4fa26f8bcb3cf9784b7f9499466a311ac284b5b9"},
- {file = "pydantic_core-2.16.3-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:0f56ae86b60ea987ae8bcd6654a887238fd53d1384f9b222ac457070b7ac4cff"},
- {file = "pydantic_core-2.16.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:c9bd22a2a639e26171068f8ebb5400ce2c1bc7d17959f60a3b753ae13c632975"},
- {file = "pydantic_core-2.16.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4204e773b4b408062960e65468d5346bdfe139247ee5f1ca2a378983e11388a2"},
- {file = "pydantic_core-2.16.3-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:f651dd19363c632f4abe3480a7c87a9773be27cfe1341aef06e8759599454120"},
- {file = "pydantic_core-2.16.3-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:aaf09e615a0bf98d406657e0008e4a8701b11481840be7d31755dc9f97c44053"},
- {file = "pydantic_core-2.16.3-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8e47755d8152c1ab5b55928ab422a76e2e7b22b5ed8e90a7d584268dd49e9c6b"},
- {file = "pydantic_core-2.16.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:500960cb3a0543a724a81ba859da816e8cf01b0e6aaeedf2c3775d12ee49cade"},
- {file = "pydantic_core-2.16.3-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:cf6204fe865da605285c34cf1172879d0314ff267b1c35ff59de7154f35fdc2e"},
- {file = "pydantic_core-2.16.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:d33dd21f572545649f90c38c227cc8631268ba25c460b5569abebdd0ec5974ca"},
- {file = "pydantic_core-2.16.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:49d5d58abd4b83fb8ce763be7794d09b2f50f10aa65c0f0c1696c677edeb7cbf"},
- {file = "pydantic_core-2.16.3-cp312-none-win32.whl", hash = "sha256:f53aace168a2a10582e570b7736cc5bef12cae9cf21775e3eafac597e8551fbe"},
- {file = "pydantic_core-2.16.3-cp312-none-win_amd64.whl", hash = "sha256:0d32576b1de5a30d9a97f300cc6a3f4694c428d956adbc7e6e2f9cad279e45ed"},
- {file = "pydantic_core-2.16.3-cp312-none-win_arm64.whl", hash = "sha256:ec08be75bb268473677edb83ba71e7e74b43c008e4a7b1907c6d57e940bf34b6"},
- {file = "pydantic_core-2.16.3-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:b1f6f5938d63c6139860f044e2538baeee6f0b251a1816e7adb6cbce106a1f01"},
- {file = "pydantic_core-2.16.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:2a1ef6a36fdbf71538142ed604ad19b82f67b05749512e47f247a6ddd06afdc7"},
- {file = "pydantic_core-2.16.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:704d35ecc7e9c31d48926150afada60401c55efa3b46cd1ded5a01bdffaf1d48"},
- {file = "pydantic_core-2.16.3-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d937653a696465677ed583124b94a4b2d79f5e30b2c46115a68e482c6a591c8a"},
- {file = "pydantic_core-2.16.3-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c9803edf8e29bd825f43481f19c37f50d2b01899448273b3a7758441b512acf8"},
- {file = "pydantic_core-2.16.3-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:72282ad4892a9fb2da25defeac8c2e84352c108705c972db82ab121d15f14e6d"},
- {file = "pydantic_core-2.16.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7f752826b5b8361193df55afcdf8ca6a57d0232653494ba473630a83ba50d8c9"},
- {file = "pydantic_core-2.16.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:4384a8f68ddb31a0b0c3deae88765f5868a1b9148939c3f4121233314ad5532c"},
- {file = "pydantic_core-2.16.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:a4b2bf78342c40b3dc830880106f54328928ff03e357935ad26c7128bbd66ce8"},
- {file = "pydantic_core-2.16.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:13dcc4802961b5f843a9385fc821a0b0135e8c07fc3d9949fd49627c1a5e6ae5"},
- {file = "pydantic_core-2.16.3-cp38-none-win32.whl", hash = "sha256:e3e70c94a0c3841e6aa831edab1619ad5c511199be94d0c11ba75fe06efe107a"},
- {file = "pydantic_core-2.16.3-cp38-none-win_amd64.whl", hash = "sha256:ecdf6bf5f578615f2e985a5e1f6572e23aa632c4bd1dc67f8f406d445ac115ed"},
- {file = "pydantic_core-2.16.3-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:bda1ee3e08252b8d41fa5537413ffdddd58fa73107171a126d3b9ff001b9b820"},
- {file = "pydantic_core-2.16.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:21b888c973e4f26b7a96491c0965a8a312e13be108022ee510248fe379a5fa23"},
- {file = "pydantic_core-2.16.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:be0ec334369316fa73448cc8c982c01e5d2a81c95969d58b8f6e272884df0074"},
- {file = "pydantic_core-2.16.3-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:b5b6079cc452a7c53dd378c6f881ac528246b3ac9aae0f8eef98498a75657805"},
- {file = "pydantic_core-2.16.3-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7ee8d5f878dccb6d499ba4d30d757111847b6849ae07acdd1205fffa1fc1253c"},
- {file = "pydantic_core-2.16.3-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7233d65d9d651242a68801159763d09e9ec96e8a158dbf118dc090cd77a104c9"},
- {file = "pydantic_core-2.16.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c6119dc90483a5cb50a1306adb8d52c66e447da88ea44f323e0ae1a5fcb14256"},
- {file = "pydantic_core-2.16.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:578114bc803a4c1ff9946d977c221e4376620a46cf78da267d946397dc9514a8"},
- {file = "pydantic_core-2.16.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:d8f99b147ff3fcf6b3cc60cb0c39ea443884d5559a30b1481e92495f2310ff2b"},
- {file = "pydantic_core-2.16.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:4ac6b4ce1e7283d715c4b729d8f9dab9627586dafce81d9eaa009dd7f25dd972"},
- {file = "pydantic_core-2.16.3-cp39-none-win32.whl", hash = "sha256:e7774b570e61cb998490c5235740d475413a1f6de823169b4cf94e2fe9e9f6b2"},
- {file = "pydantic_core-2.16.3-cp39-none-win_amd64.whl", hash = "sha256:9091632a25b8b87b9a605ec0e61f241c456e9248bfdcf7abdf344fdb169c81cf"},
- {file = "pydantic_core-2.16.3-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:36fa178aacbc277bc6b62a2c3da95226520da4f4e9e206fdf076484363895d2c"},
- {file = "pydantic_core-2.16.3-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:dcca5d2bf65c6fb591fff92da03f94cd4f315972f97c21975398bd4bd046854a"},
- {file = "pydantic_core-2.16.3-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2a72fb9963cba4cd5793854fd12f4cfee731e86df140f59ff52a49b3552db241"},
- {file = "pydantic_core-2.16.3-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b60cc1a081f80a2105a59385b92d82278b15d80ebb3adb200542ae165cd7d183"},
- {file = "pydantic_core-2.16.3-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:cbcc558401de90a746d02ef330c528f2e668c83350f045833543cd57ecead1ad"},
- {file = "pydantic_core-2.16.3-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:fee427241c2d9fb7192b658190f9f5fd6dfe41e02f3c1489d2ec1e6a5ab1e04a"},
- {file = "pydantic_core-2.16.3-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:f4cb85f693044e0f71f394ff76c98ddc1bc0953e48c061725e540396d5c8a2e1"},
- {file = "pydantic_core-2.16.3-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:b29eeb887aa931c2fcef5aa515d9d176d25006794610c264ddc114c053bf96fe"},
- {file = "pydantic_core-2.16.3-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:a425479ee40ff021f8216c9d07a6a3b54b31c8267c6e17aa88b70d7ebd0e5e5b"},
- {file = "pydantic_core-2.16.3-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:5c5cbc703168d1b7a838668998308018a2718c2130595e8e190220238addc96f"},
- {file = "pydantic_core-2.16.3-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:99b6add4c0b39a513d323d3b93bc173dac663c27b99860dd5bf491b240d26137"},
- {file = "pydantic_core-2.16.3-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:75f76ee558751746d6a38f89d60b6228fa174e5172d143886af0f85aa306fd89"},
- {file = "pydantic_core-2.16.3-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:00ee1c97b5364b84cb0bd82e9bbf645d5e2871fb8c58059d158412fee2d33d8a"},
- {file = "pydantic_core-2.16.3-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:287073c66748f624be4cef893ef9174e3eb88fe0b8a78dc22e88eca4bc357ca6"},
- {file = "pydantic_core-2.16.3-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:ed25e1835c00a332cb10c683cd39da96a719ab1dfc08427d476bce41b92531fc"},
- {file = "pydantic_core-2.16.3-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:86b3d0033580bd6bbe07590152007275bd7af95f98eaa5bd36f3da219dcd93da"},
- {file = "pydantic_core-2.16.3.tar.gz", hash = "sha256:1cac689f80a3abab2d3c0048b29eea5751114054f032a941a32de4c852c59cad"},
+ {file = "pydantic_core-2.18.1-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:ee9cf33e7fe14243f5ca6977658eb7d1042caaa66847daacbd2117adb258b226"},
+ {file = "pydantic_core-2.18.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:6b7bbb97d82659ac8b37450c60ff2e9f97e4eb0f8a8a3645a5568b9334b08b50"},
+ {file = "pydantic_core-2.18.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:df4249b579e75094f7e9bb4bd28231acf55e308bf686b952f43100a5a0be394c"},
+ {file = "pydantic_core-2.18.1-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d0491006a6ad20507aec2be72e7831a42efc93193d2402018007ff827dc62926"},
+ {file = "pydantic_core-2.18.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2ae80f72bb7a3e397ab37b53a2b49c62cc5496412e71bc4f1277620a7ce3f52b"},
+ {file = "pydantic_core-2.18.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:58aca931bef83217fca7a390e0486ae327c4af9c3e941adb75f8772f8eeb03a1"},
+ {file = "pydantic_core-2.18.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1be91ad664fc9245404a789d60cba1e91c26b1454ba136d2a1bf0c2ac0c0505a"},
+ {file = "pydantic_core-2.18.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:667880321e916a8920ef49f5d50e7983792cf59f3b6079f3c9dac2b88a311d17"},
+ {file = "pydantic_core-2.18.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:f7054fdc556f5421f01e39cbb767d5ec5c1139ea98c3e5b350e02e62201740c7"},
+ {file = "pydantic_core-2.18.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:030e4f9516f9947f38179249778709a460a3adb516bf39b5eb9066fcfe43d0e6"},
+ {file = "pydantic_core-2.18.1-cp310-none-win32.whl", hash = "sha256:2e91711e36e229978d92642bfc3546333a9127ecebb3f2761372e096395fc649"},
+ {file = "pydantic_core-2.18.1-cp310-none-win_amd64.whl", hash = "sha256:9a29726f91c6cb390b3c2338f0df5cd3e216ad7a938762d11c994bb37552edb0"},
+ {file = "pydantic_core-2.18.1-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:9ece8a49696669d483d206b4474c367852c44815fca23ac4e48b72b339807f80"},
+ {file = "pydantic_core-2.18.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:7a5d83efc109ceddb99abd2c1316298ced2adb4570410defe766851a804fcd5b"},
+ {file = "pydantic_core-2.18.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5f7973c381283783cd1043a8c8f61ea5ce7a3a58b0369f0ee0ee975eaf2f2a1b"},
+ {file = "pydantic_core-2.18.1-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:54c7375c62190a7845091f521add19b0f026bcf6ae674bdb89f296972272e86d"},
+ {file = "pydantic_core-2.18.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:dd63cec4e26e790b70544ae5cc48d11b515b09e05fdd5eff12e3195f54b8a586"},
+ {file = "pydantic_core-2.18.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:561cf62c8a3498406495cfc49eee086ed2bb186d08bcc65812b75fda42c38294"},
+ {file = "pydantic_core-2.18.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:68717c38a68e37af87c4da20e08f3e27d7e4212e99e96c3d875fbf3f4812abfc"},
+ {file = "pydantic_core-2.18.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2d5728e93d28a3c63ee513d9ffbac9c5989de8c76e049dbcb5bfe4b923a9739d"},
+ {file = "pydantic_core-2.18.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:f0f17814c505f07806e22b28856c59ac80cee7dd0fbb152aed273e116378f519"},
+ {file = "pydantic_core-2.18.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:d816f44a51ba5175394bc6c7879ca0bd2be560b2c9e9f3411ef3a4cbe644c2e9"},
+ {file = "pydantic_core-2.18.1-cp311-none-win32.whl", hash = "sha256:09f03dfc0ef8c22622eaa8608caa4a1e189cfb83ce847045eca34f690895eccb"},
+ {file = "pydantic_core-2.18.1-cp311-none-win_amd64.whl", hash = "sha256:27f1009dc292f3b7ca77feb3571c537276b9aad5dd4efb471ac88a8bd09024e9"},
+ {file = "pydantic_core-2.18.1-cp311-none-win_arm64.whl", hash = "sha256:48dd883db92e92519201f2b01cafa881e5f7125666141a49ffba8b9facc072b0"},
+ {file = "pydantic_core-2.18.1-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:b6b0e4912030c6f28bcb72b9ebe4989d6dc2eebcd2a9cdc35fefc38052dd4fe8"},
+ {file = "pydantic_core-2.18.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:f3202a429fe825b699c57892d4371c74cc3456d8d71b7f35d6028c96dfecad31"},
+ {file = "pydantic_core-2.18.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a3982b0a32d0a88b3907e4b0dc36809fda477f0757c59a505d4e9b455f384b8b"},
+ {file = "pydantic_core-2.18.1-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:25595ac311f20e5324d1941909b0d12933f1fd2171075fcff763e90f43e92a0d"},
+ {file = "pydantic_core-2.18.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:14fe73881cf8e4cbdaded8ca0aa671635b597e42447fec7060d0868b52d074e6"},
+ {file = "pydantic_core-2.18.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ca976884ce34070799e4dfc6fbd68cb1d181db1eefe4a3a94798ddfb34b8867f"},
+ {file = "pydantic_core-2.18.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:684d840d2c9ec5de9cb397fcb3f36d5ebb6fa0d94734f9886032dd796c1ead06"},
+ {file = "pydantic_core-2.18.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:54764c083bbe0264f0f746cefcded6cb08fbbaaf1ad1d78fb8a4c30cff999a90"},
+ {file = "pydantic_core-2.18.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:201713f2f462e5c015b343e86e68bd8a530a4f76609b33d8f0ec65d2b921712a"},
+ {file = "pydantic_core-2.18.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:fd1a9edb9dd9d79fbeac1ea1f9a8dd527a6113b18d2e9bcc0d541d308dae639b"},
+ {file = "pydantic_core-2.18.1-cp312-none-win32.whl", hash = "sha256:d5e6b7155b8197b329dc787356cfd2684c9d6a6b1a197f6bbf45f5555a98d411"},
+ {file = "pydantic_core-2.18.1-cp312-none-win_amd64.whl", hash = "sha256:9376d83d686ec62e8b19c0ac3bf8d28d8a5981d0df290196fb6ef24d8a26f0d6"},
+ {file = "pydantic_core-2.18.1-cp312-none-win_arm64.whl", hash = "sha256:c562b49c96906b4029b5685075fe1ebd3b5cc2601dfa0b9e16c2c09d6cbce048"},
+ {file = "pydantic_core-2.18.1-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:3e352f0191d99fe617371096845070dee295444979efb8f27ad941227de6ad09"},
+ {file = "pydantic_core-2.18.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:c0295d52b012cbe0d3059b1dba99159c3be55e632aae1999ab74ae2bd86a33d7"},
+ {file = "pydantic_core-2.18.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:56823a92075780582d1ffd4489a2e61d56fd3ebb4b40b713d63f96dd92d28144"},
+ {file = "pydantic_core-2.18.1-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:dd3f79e17b56741b5177bcc36307750d50ea0698df6aa82f69c7db32d968c1c2"},
+ {file = "pydantic_core-2.18.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:38a5024de321d672a132b1834a66eeb7931959c59964b777e8f32dbe9523f6b1"},
+ {file = "pydantic_core-2.18.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d2ce426ee691319d4767748c8e0895cfc56593d725594e415f274059bcf3cb76"},
+ {file = "pydantic_core-2.18.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2adaeea59849ec0939af5c5d476935f2bab4b7f0335b0110f0f069a41024278e"},
+ {file = "pydantic_core-2.18.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:9b6431559676a1079eac0f52d6d0721fb8e3c5ba43c37bc537c8c83724031feb"},
+ {file = "pydantic_core-2.18.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:85233abb44bc18d16e72dc05bf13848a36f363f83757541f1a97db2f8d58cfd9"},
+ {file = "pydantic_core-2.18.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:641a018af4fe48be57a2b3d7a1f0f5dbca07c1d00951d3d7463f0ac9dac66622"},
+ {file = "pydantic_core-2.18.1-cp38-none-win32.whl", hash = "sha256:63d7523cd95d2fde0d28dc42968ac731b5bb1e516cc56b93a50ab293f4daeaad"},
+ {file = "pydantic_core-2.18.1-cp38-none-win_amd64.whl", hash = "sha256:907a4d7720abfcb1c81619863efd47c8a85d26a257a2dbebdb87c3b847df0278"},
+ {file = "pydantic_core-2.18.1-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:aad17e462f42ddbef5984d70c40bfc4146c322a2da79715932cd8976317054de"},
+ {file = "pydantic_core-2.18.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:94b9769ba435b598b547c762184bcfc4783d0d4c7771b04a3b45775c3589ca44"},
+ {file = "pydantic_core-2.18.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:80e0e57cc704a52fb1b48f16d5b2c8818da087dbee6f98d9bf19546930dc64b5"},
+ {file = "pydantic_core-2.18.1-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:76b86e24039c35280ceee6dce7e62945eb93a5175d43689ba98360ab31eebc4a"},
+ {file = "pydantic_core-2.18.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:12a05db5013ec0ca4a32cc6433f53faa2a014ec364031408540ba858c2172bb0"},
+ {file = "pydantic_core-2.18.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:250ae39445cb5475e483a36b1061af1bc233de3e9ad0f4f76a71b66231b07f88"},
+ {file = "pydantic_core-2.18.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a32204489259786a923e02990249c65b0f17235073149d0033efcebe80095570"},
+ {file = "pydantic_core-2.18.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:6395a4435fa26519fd96fdccb77e9d00ddae9dd6c742309bd0b5610609ad7fb2"},
+ {file = "pydantic_core-2.18.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:2533ad2883f001efa72f3d0e733fb846710c3af6dcdd544fe5bf14fa5fe2d7db"},
+ {file = "pydantic_core-2.18.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:b560b72ed4816aee52783c66854d96157fd8175631f01ef58e894cc57c84f0f6"},
+ {file = "pydantic_core-2.18.1-cp39-none-win32.whl", hash = "sha256:582cf2cead97c9e382a7f4d3b744cf0ef1a6e815e44d3aa81af3ad98762f5a9b"},
+ {file = "pydantic_core-2.18.1-cp39-none-win_amd64.whl", hash = "sha256:ca71d501629d1fa50ea7fa3b08ba884fe10cefc559f5c6c8dfe9036c16e8ae89"},
+ {file = "pydantic_core-2.18.1-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:e178e5b66a06ec5bf51668ec0d4ac8cfb2bdcb553b2c207d58148340efd00143"},
+ {file = "pydantic_core-2.18.1-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:72722ce529a76a4637a60be18bd789d8fb871e84472490ed7ddff62d5fed620d"},
+ {file = "pydantic_core-2.18.1-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2fe0c1ce5b129455e43f941f7a46f61f3d3861e571f2905d55cdbb8b5c6f5e2c"},
+ {file = "pydantic_core-2.18.1-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d4284c621f06a72ce2cb55f74ea3150113d926a6eb78ab38340c08f770eb9b4d"},
+ {file = "pydantic_core-2.18.1-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:1a0c3e718f4e064efde68092d9d974e39572c14e56726ecfaeebbe6544521f47"},
+ {file = "pydantic_core-2.18.1-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:2027493cc44c23b598cfaf200936110433d9caa84e2c6cf487a83999638a96ac"},
+ {file = "pydantic_core-2.18.1-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:76909849d1a6bffa5a07742294f3fa1d357dc917cb1fe7b470afbc3a7579d539"},
+ {file = "pydantic_core-2.18.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:ee7ccc7fb7e921d767f853b47814c3048c7de536663e82fbc37f5eb0d532224b"},
+ {file = "pydantic_core-2.18.1-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:ee2794111c188548a4547eccc73a6a8527fe2af6cf25e1a4ebda2fd01cdd2e60"},
+ {file = "pydantic_core-2.18.1-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:a139fe9f298dc097349fb4f28c8b81cc7a202dbfba66af0e14be5cfca4ef7ce5"},
+ {file = "pydantic_core-2.18.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d074b07a10c391fc5bbdcb37b2f16f20fcd9e51e10d01652ab298c0d07908ee2"},
+ {file = "pydantic_core-2.18.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c69567ddbac186e8c0aadc1f324a60a564cfe25e43ef2ce81bcc4b8c3abffbae"},
+ {file = "pydantic_core-2.18.1-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:baf1c7b78cddb5af00971ad5294a4583188bda1495b13760d9f03c9483bb6203"},
+ {file = "pydantic_core-2.18.1-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:2684a94fdfd1b146ff10689c6e4e815f6a01141781c493b97342cdc5b06f4d5d"},
+ {file = "pydantic_core-2.18.1-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:73c1bc8a86a5c9e8721a088df234265317692d0b5cd9e86e975ce3bc3db62a59"},
+ {file = "pydantic_core-2.18.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:e60defc3c15defb70bb38dd605ff7e0fae5f6c9c7cbfe0ad7868582cb7e844a6"},
+ {file = "pydantic_core-2.18.1.tar.gz", hash = "sha256:de9d3e8717560eb05e28739d1b35e4eac2e458553a52a301e51352a7ffc86a35"},
]
[package.dependencies]
From 0d4d00a07ce5d1b7b538d16744558a85cc142ff3 Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Fri, 26 Apr 2024 09:39:30 +0100
Subject: [PATCH 029/503] Bump pyicu from 2.12 to 2.13 (#17109)
[](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores)
Dependabot will resolve any conflicts with this PR as long as you don't
alter it yourself. You can also trigger a rebase manually by commenting
`@dependabot rebase`.
[//]: # (dependabot-automerge-start)
[//]: # (dependabot-automerge-end)
---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR:
- `@dependabot rebase` will rebase this PR
- `@dependabot recreate` will recreate this PR, overwriting any edits
that have been made to it
- `@dependabot merge` will merge this PR after your CI passes on it
- `@dependabot squash and merge` will squash and merge this PR after
your CI passes on it
- `@dependabot cancel merge` will cancel a previously requested merge
and block automerging
- `@dependabot reopen` will reopen this PR if it is closed
- `@dependabot close` will close this PR and stop Dependabot recreating
it. You can achieve the same result by closing it manually
- `@dependabot show ignore conditions` will show all
of the ignore conditions of the specified dependency
- `@dependabot ignore this major version` will close this PR and stop
Dependabot creating any more for this major version (unless you reopen
the PR or upgrade to it yourself)
- `@dependabot ignore this minor version` will close this PR and stop
Dependabot creating any more for this minor version (unless you reopen
the PR or upgrade to it yourself)
- `@dependabot ignore this dependency` will close this PR and stop
Dependabot creating any more for this dependency (unless you reopen the
PR or upgrade to it yourself)
Signed-off-by: dependabot[bot]
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
---
poetry.lock | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/poetry.lock b/poetry.lock
index 4bb9ab590e..953e9fe4c8 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -2016,12 +2016,12 @@ plugins = ["importlib-metadata"]
[[package]]
name = "pyicu"
-version = "2.12"
+version = "2.13"
description = "Python extension wrapping the ICU C++ API"
optional = true
python-versions = "*"
files = [
- {file = "PyICU-2.12.tar.gz", hash = "sha256:bd7ab5efa93ad692e6daa29cd249364e521218329221726a113ca3cb281c8611"},
+ {file = "PyICU-2.13.tar.gz", hash = "sha256:d481be888975df3097c2790241bbe8518f65c9676a74957cdbe790e559c828f6"},
]
[[package]]
From 947e8a6cb04d3b9cfa75fa7b6623c03fc7fe0e89 Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Fri, 26 Apr 2024 09:39:36 +0100
Subject: [PATCH 030/503] Bump types-bleach from 6.1.0.1 to 6.1.0.20240331
(#17110)
Bumps [types-bleach](https://github.com/python/typeshed) from 6.1.0.1 to
6.1.0.20240331.
Commits
[](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores)
Dependabot will resolve any conflicts with this PR as long as you don't
alter it yourself. You can also trigger a rebase manually by commenting
`@dependabot rebase`.
[//]: # (dependabot-automerge-start)
[//]: # (dependabot-automerge-end)
---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR:
- `@dependabot rebase` will rebase this PR
- `@dependabot recreate` will recreate this PR, overwriting any edits
that have been made to it
- `@dependabot merge` will merge this PR after your CI passes on it
- `@dependabot squash and merge` will squash and merge this PR after
your CI passes on it
- `@dependabot cancel merge` will cancel a previously requested merge
and block automerging
- `@dependabot reopen` will reopen this PR if it is closed
- `@dependabot close` will close this PR and stop Dependabot recreating
it. You can achieve the same result by closing it manually
- `@dependabot show ignore conditions` will show all
of the ignore conditions of the specified dependency
- `@dependabot ignore this major version` will close this PR and stop
Dependabot creating any more for this major version (unless you reopen
the PR or upgrade to it yourself)
- `@dependabot ignore this minor version` will close this PR and stop
Dependabot creating any more for this minor version (unless you reopen
the PR or upgrade to it yourself)
- `@dependabot ignore this dependency` will close this PR and stop
Dependabot creating any more for this dependency (unless you reopen the
PR or upgrade to it yourself)
Signed-off-by: dependabot[bot]
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
---
poetry.lock | 22 ++++++++++++++++++----
1 file changed, 18 insertions(+), 4 deletions(-)
diff --git a/poetry.lock b/poetry.lock
index 953e9fe4c8..cfe0045afc 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -3051,15 +3051,18 @@ twisted = "*"
[[package]]
name = "types-bleach"
-version = "6.1.0.1"
+version = "6.1.0.20240331"
description = "Typing stubs for bleach"
optional = false
-python-versions = ">=3.7"
+python-versions = ">=3.8"
files = [
- {file = "types-bleach-6.1.0.1.tar.gz", hash = "sha256:1e43c437e734a90efe4f40ebfe831057599568d3b275939ffbd6094848a18a27"},
- {file = "types_bleach-6.1.0.1-py3-none-any.whl", hash = "sha256:f83f80e0709f13d809a9c79b958a1089df9b99e68059287beb196e38967e4ddf"},
+ {file = "types-bleach-6.1.0.20240331.tar.gz", hash = "sha256:2ee858a84fb06fc2225ff56ba2f7f6c88b65638659efae0d7bfd6b24a1b5a524"},
+ {file = "types_bleach-6.1.0.20240331-py3-none-any.whl", hash = "sha256:399bc59bfd20a36a56595f13f805e56c8a08e5a5c07903e5cf6fafb5a5107dd4"},
]
+[package.dependencies]
+types-html5lib = "*"
+
[[package]]
name = "types-commonmark"
version = "0.9.2.20240106"
@@ -3071,6 +3074,17 @@ files = [
{file = "types_commonmark-0.9.2.20240106-py3-none-any.whl", hash = "sha256:606d9de1e3a96cab0b1c0b6cccf4df099116148d1d864d115fde2e27ad6877c3"},
]
+[[package]]
+name = "types-html5lib"
+version = "1.1.11.20240228"
+description = "Typing stubs for html5lib"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "types-html5lib-1.1.11.20240228.tar.gz", hash = "sha256:22736b7299e605ec4ba539d48691e905fd0c61c3ea610acc59922232dc84cede"},
+ {file = "types_html5lib-1.1.11.20240228-py3-none-any.whl", hash = "sha256:af5de0125cb0fe5667543b158db83849b22e25c0e36c9149836b095548bf1020"},
+]
+
[[package]]
name = "types-jsonschema"
version = "4.21.0.20240311"
From 31742149d42578360ebc828fc8b8c290f6894831 Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Fri, 26 Apr 2024 09:39:49 +0100
Subject: [PATCH 031/503] Bump serde from 1.0.197 to 1.0.198 (#17111)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Bumps [serde](https://github.com/serde-rs/serde) from 1.0.197 to
1.0.198.
Release notes
Sourced from serde's
releases .
v1.0.198
Support serializing and deserializing
Saturating<T> (#2709 ,
thanks @jbethune )
Commits
c4fb923
Release 1.0.198
65b7eea
Merge pull request #2729
from dtolnay/saturating
01cd696
Integrate Saturating<T> deserialization into impl_deserialize_num
macro
c13b3f7
Format PR 2709
a6571ee
Merge pull request #2709
from jbethune/master
6e38aff
Revert "Temporarily disable miri on doctests"
3d1b19e
Implement Ser+De for Saturating\<T>
5b24f88
Resolve legacy_numeric_constants clippy lints
74d0670
Explicitly install a Rust toolchain for cargo-outdated job
3bfab6e
Temporarily disable miri on doctests
Additional commits viewable in compare
view
[](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores)
Dependabot will resolve any conflicts with this PR as long as you don't
alter it yourself. You can also trigger a rebase manually by commenting
`@dependabot rebase`.
[//]: # (dependabot-automerge-start)
[//]: # (dependabot-automerge-end)
---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR:
- `@dependabot rebase` will rebase this PR
- `@dependabot recreate` will recreate this PR, overwriting any edits
that have been made to it
- `@dependabot merge` will merge this PR after your CI passes on it
- `@dependabot squash and merge` will squash and merge this PR after
your CI passes on it
- `@dependabot cancel merge` will cancel a previously requested merge
and block automerging
- `@dependabot reopen` will reopen this PR if it is closed
- `@dependabot close` will close this PR and stop Dependabot recreating
it. You can achieve the same result by closing it manually
- `@dependabot show ignore conditions` will show all
of the ignore conditions of the specified dependency
- `@dependabot ignore this major version` will close this PR and stop
Dependabot creating any more for this major version (unless you reopen
the PR or upgrade to it yourself)
- `@dependabot ignore this minor version` will close this PR and stop
Dependabot creating any more for this minor version (unless you reopen
the PR or upgrade to it yourself)
- `@dependabot ignore this dependency` will close this PR and stop
Dependabot creating any more for this dependency (unless you reopen the
PR or upgrade to it yourself)
Signed-off-by: dependabot[bot]
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
---
Cargo.lock | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/Cargo.lock b/Cargo.lock
index 4474dfb903..a454b6eff5 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -485,18 +485,18 @@ checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd"
[[package]]
name = "serde"
-version = "1.0.197"
+version = "1.0.198"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "3fb1c873e1b9b056a4dc4c0c198b24c3ffa059243875552b2bd0933b1aee4ce2"
+checksum = "9846a40c979031340571da2545a4e5b7c4163bdae79b301d5f86d03979451fcc"
dependencies = [
"serde_derive",
]
[[package]]
name = "serde_derive"
-version = "1.0.197"
+version = "1.0.198"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7eb0b34b42edc17f6b7cac84a52a1c5f0e1bb2227e997ca9011ea3dd34e8610b"
+checksum = "e88edab869b01783ba905e7d0153f9fc1a6505a96e4ad3018011eedb838566d9"
dependencies = [
"proc-macro2",
"quote",
From 9985aa682193d7af989f1582e379a8cbf8f5b1bd Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Fri, 26 Apr 2024 09:39:57 +0100
Subject: [PATCH 032/503] Bump serde_json from 1.0.115 to 1.0.116 (#17112)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Bumps [serde_json](https://github.com/serde-rs/json) from 1.0.115 to
1.0.116.
Release notes
Sourced from serde_json's
releases .
v1.0.116
Make module structure comprehensible to static analysis (#1124 ,
thanks @mleonhard )
Commits
a3f62bb
Release 1.0.116
12c8ee0
Hide "non-exhaustive patterns" errors when crate fails to
compile
051ce97
Merge pull request 1124 from mleonhard/master
25dc750
Replace features_check mod with a call to
std::compile_error!. Fixes htt...
2e15e3d
Revert "Temporarily disable miri on doctests"
0baba28
Resolve legacy_numeric_constants clippy lints
See full diff in compare
view
[](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores)
Dependabot will resolve any conflicts with this PR as long as you don't
alter it yourself. You can also trigger a rebase manually by commenting
`@dependabot rebase`.
[//]: # (dependabot-automerge-start)
[//]: # (dependabot-automerge-end)
---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR:
- `@dependabot rebase` will rebase this PR
- `@dependabot recreate` will recreate this PR, overwriting any edits
that have been made to it
- `@dependabot merge` will merge this PR after your CI passes on it
- `@dependabot squash and merge` will squash and merge this PR after
your CI passes on it
- `@dependabot cancel merge` will cancel a previously requested merge
and block automerging
- `@dependabot reopen` will reopen this PR if it is closed
- `@dependabot close` will close this PR and stop Dependabot recreating
it. You can achieve the same result by closing it manually
- `@dependabot show ignore conditions` will show all
of the ignore conditions of the specified dependency
- `@dependabot ignore this major version` will close this PR and stop
Dependabot creating any more for this major version (unless you reopen
the PR or upgrade to it yourself)
- `@dependabot ignore this minor version` will close this PR and stop
Dependabot creating any more for this minor version (unless you reopen
the PR or upgrade to it yourself)
- `@dependabot ignore this dependency` will close this PR and stop
Dependabot creating any more for this dependency (unless you reopen the
PR or upgrade to it yourself)
Signed-off-by: dependabot[bot]
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
---
Cargo.lock | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/Cargo.lock b/Cargo.lock
index a454b6eff5..8cbcdbd470 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -505,9 +505,9 @@ dependencies = [
[[package]]
name = "serde_json"
-version = "1.0.115"
+version = "1.0.116"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "12dc5c46daa8e9fdf4f5e71b6cf9a53f2487da0e86e55808e2d35539666497dd"
+checksum = "3e17db7126d17feb94eb3fad46bf1a96b034e8aacbc2e775fe81505f8b0b2813"
dependencies = [
"itoa",
"ryu",
From 59710437e4a885252de5e5555fbcf42d223b092c Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Melvyn=20La=C3=AFly?=
Date: Fri, 26 Apr 2024 10:43:52 +0200
Subject: [PATCH 033/503] Return the search terms as search highlights for
SQLite instead of nothing (#17000)
Fixes https://github.com/element-hq/synapse/issues/16999 and
https://github.com/element-hq/element-android/pull/8729 by returning the
search terms as search highlights.
---
changelog.d/17000.bugfix | 1 +
synapse/storage/databases/main/search.py | 31 ++++++++++++++++++------
tests/storage/test_room_search.py | 13 +++++-----
3 files changed, 31 insertions(+), 14 deletions(-)
create mode 100644 changelog.d/17000.bugfix
diff --git a/changelog.d/17000.bugfix b/changelog.d/17000.bugfix
new file mode 100644
index 0000000000..86b21c9615
--- /dev/null
+++ b/changelog.d/17000.bugfix
@@ -0,0 +1 @@
+Fixed search feature of Element Android on homesevers using SQLite by returning search terms as search highlights.
\ No newline at end of file
diff --git a/synapse/storage/databases/main/search.py b/synapse/storage/databases/main/search.py
index 4a0afb50ac..20fcfd3122 100644
--- a/synapse/storage/databases/main/search.py
+++ b/synapse/storage/databases/main/search.py
@@ -470,6 +470,8 @@ class SearchStore(SearchBackgroundUpdateStore):
count_args = args
count_clauses = clauses
+ sqlite_highlights: List[str] = []
+
if isinstance(self.database_engine, PostgresEngine):
search_query = search_term
sql = """
@@ -486,7 +488,7 @@ class SearchStore(SearchBackgroundUpdateStore):
"""
count_args = [search_query] + count_args
elif isinstance(self.database_engine, Sqlite3Engine):
- search_query = _parse_query_for_sqlite(search_term)
+ search_query, sqlite_highlights = _parse_query_for_sqlite(search_term)
sql = """
SELECT rank(matchinfo(event_search)) as rank, room_id, event_id
@@ -531,9 +533,11 @@ class SearchStore(SearchBackgroundUpdateStore):
event_map = {ev.event_id: ev for ev in events}
- highlights = None
+ highlights: Collection[str] = []
if isinstance(self.database_engine, PostgresEngine):
highlights = await self._find_highlights_in_postgres(search_query, events)
+ else:
+ highlights = sqlite_highlights
count_sql += " GROUP BY room_id"
@@ -597,6 +601,8 @@ class SearchStore(SearchBackgroundUpdateStore):
count_args = list(args)
count_clauses = list(clauses)
+ sqlite_highlights: List[str] = []
+
if pagination_token:
try:
origin_server_ts_str, stream_str = pagination_token.split(",")
@@ -647,7 +653,7 @@ class SearchStore(SearchBackgroundUpdateStore):
CROSS JOIN events USING (event_id)
WHERE
"""
- search_query = _parse_query_for_sqlite(search_term)
+ search_query, sqlite_highlights = _parse_query_for_sqlite(search_term)
args = [search_query] + args
count_sql = """
@@ -694,9 +700,11 @@ class SearchStore(SearchBackgroundUpdateStore):
event_map = {ev.event_id: ev for ev in events}
- highlights = None
+ highlights: Collection[str] = []
if isinstance(self.database_engine, PostgresEngine):
highlights = await self._find_highlights_in_postgres(search_query, events)
+ else:
+ highlights = sqlite_highlights
count_sql += " GROUP BY room_id"
@@ -892,19 +900,25 @@ def _tokenize_query(query: str) -> TokenList:
return tokens
-def _tokens_to_sqlite_match_query(tokens: TokenList) -> str:
+def _tokens_to_sqlite_match_query(tokens: TokenList) -> Tuple[str, List[str]]:
"""
Convert the list of tokens to a string suitable for passing to sqlite's MATCH.
Assume sqlite was compiled with enhanced query syntax.
+ Returns the sqlite-formatted query string and the tokenized search terms
+ that can be used as highlights.
+
Ref: https://www.sqlite.org/fts3.html#full_text_index_queries
"""
match_query = []
+ highlights = []
for token in tokens:
if isinstance(token, str):
match_query.append(token)
+ highlights.append(token)
elif isinstance(token, Phrase):
match_query.append('"' + " ".join(token.phrase) + '"')
+ highlights.append(" ".join(token.phrase))
elif token == SearchToken.Not:
# TODO: SQLite treats NOT as a *binary* operator. Hopefully a search
# term has already been added before this.
@@ -916,11 +930,14 @@ def _tokens_to_sqlite_match_query(tokens: TokenList) -> str:
else:
raise ValueError(f"unknown token {token}")
- return "".join(match_query)
+ return "".join(match_query), highlights
-def _parse_query_for_sqlite(search_term: str) -> str:
+def _parse_query_for_sqlite(search_term: str) -> Tuple[str, List[str]]:
"""Takes a plain unicode string from the user and converts it into a form
that can be passed to sqllite's matchinfo().
+
+ Returns the converted query string and the tokenized search terms
+ that can be used as highlights.
"""
return _tokens_to_sqlite_match_query(_tokenize_query(search_term))
diff --git a/tests/storage/test_room_search.py b/tests/storage/test_room_search.py
index 1eab89f140..340642b7e7 100644
--- a/tests/storage/test_room_search.py
+++ b/tests/storage/test_room_search.py
@@ -71,17 +71,16 @@ class EventSearchInsertionTest(HomeserverTestCase):
store.search_msgs([room_id], "hi bob", ["content.body"])
)
self.assertEqual(result.get("count"), 1)
- if isinstance(store.database_engine, PostgresEngine):
- self.assertIn("hi", result.get("highlights"))
- self.assertIn("bob", result.get("highlights"))
+ self.assertIn("hi", result.get("highlights"))
+ self.assertIn("bob", result.get("highlights"))
# Check that search works for an unrelated message
result = self.get_success(
store.search_msgs([room_id], "another", ["content.body"])
)
self.assertEqual(result.get("count"), 1)
- if isinstance(store.database_engine, PostgresEngine):
- self.assertIn("another", result.get("highlights"))
+
+ self.assertIn("another", result.get("highlights"))
# Check that search works for a search term that overlaps with the message
# containing a null byte and an unrelated message.
@@ -90,8 +89,8 @@ class EventSearchInsertionTest(HomeserverTestCase):
result = self.get_success(
store.search_msgs([room_id], "hi alice", ["content.body"])
)
- if isinstance(store.database_engine, PostgresEngine):
- self.assertIn("alice", result.get("highlights"))
+
+ self.assertIn("alice", result.get("highlights"))
def test_non_string(self) -> None:
"""Test that non-string `value`s are not inserted into `event_search`.
From 0ef2315a99859217e319e4cb5a29d88a054952ff Mon Sep 17 00:00:00 2001
From: "Amanda H. L. de Andrade Katz"
Date: Fri, 26 Apr 2024 05:44:54 -0300
Subject: [PATCH 034/503] Update event_cache_size and global_factor
configurations documentation (#17071)
### Pull Request Checklist
* [x] Pull request is based on the develop branch
* [x] Pull request includes a [changelog
file](https://element-hq.github.io/synapse/latest/development/contributing_guide.html#changelog).
The entry should:
- Be a short description of your change which makes sense to users.
"Fixed a bug that prevented receiving messages from other servers."
instead of "Moved X method from `EventStore` to `EventWorkerStore`.".
- Use markdown where necessary, mostly for `code blocks`.
- End with either a period (.) or an exclamation mark (!).
- Start with a capital letter.
- Feel free to credit yourself, by adding a sentence "Contributed by
@github_username." or "Contributed by [Your Name]." to the end of the
entry.
* [x] [Code
style](https://element-hq.github.io/synapse/latest/code_style.html) is
correct
(run the
[linters](https://element-hq.github.io/synapse/latest/development/contributing_guide.html#run-the-linters))
---
changelog.d/17071.doc | 1 +
docs/usage/configuration/config_documentation.md | 8 ++++++++
2 files changed, 9 insertions(+)
create mode 100644 changelog.d/17071.doc
diff --git a/changelog.d/17071.doc b/changelog.d/17071.doc
new file mode 100644
index 0000000000..28773414d8
--- /dev/null
+++ b/changelog.d/17071.doc
@@ -0,0 +1 @@
+Update event_cache_size and global_factor configurations documentation.
diff --git a/docs/usage/configuration/config_documentation.md b/docs/usage/configuration/config_documentation.md
index 985f90c8a1..bcd53145f1 100644
--- a/docs/usage/configuration/config_documentation.md
+++ b/docs/usage/configuration/config_documentation.md
@@ -1317,6 +1317,12 @@ Options related to caching.
The number of events to cache in memory. Defaults to 10K. Like other caches,
this is affected by `caches.global_factor` (see below).
+For example, the default is 10K and the global_factor default is 0.5.
+
+Since 10K * 0.5 is 5K then the event cache size will be 5K.
+
+The cache affected by this configuration is named as "*getEvent*".
+
Note that this option is not part of the `caches` section.
Example configuration:
@@ -1342,6 +1348,8 @@ number of entries that can be stored.
Defaults to 0.5, which will halve the size of all caches.
+ Note that changing this value also affects the HTTP connection pool.
+
* `per_cache_factors`: A dictionary of cache name to cache factor for that individual
cache. Overrides the global cache factor for a given cache.
From 516fd891eeb3cade255298a2239ca607bfbec16a Mon Sep 17 00:00:00 2001
From: Andrew Ferrazzutti
Date: Fri, 26 Apr 2024 17:46:42 +0900
Subject: [PATCH 035/503] Use recommended endpoint for MSC3266 requests
(#17078)
Keep the existing endpoint for backwards compatibility
Signed-off-by: Andrew Ferrazzutti
---
changelog.d/17078.bugfix | 1 +
docs/workers.md | 2 +-
synapse/rest/client/room.py | 6 ++++++
3 files changed, 8 insertions(+), 1 deletion(-)
create mode 100644 changelog.d/17078.bugfix
diff --git a/changelog.d/17078.bugfix b/changelog.d/17078.bugfix
new file mode 100644
index 0000000000..286a772a1e
--- /dev/null
+++ b/changelog.d/17078.bugfix
@@ -0,0 +1 @@
+For MSC3266 room summaries, support queries at the recommended endpoint of `/_matrix/client/unstable/im.nheko.summary/summary/{roomIdOrAlias}`. The existing endpoint of `/_matrix/client/unstable/im.nheko.summary/rooms/{roomIdOrAlias}/summary` is deprecated.
diff --git a/docs/workers.md b/docs/workers.md
index ab9c1db86b..9a0cc9f2f4 100644
--- a/docs/workers.md
+++ b/docs/workers.md
@@ -232,7 +232,7 @@ information.
^/_matrix/client/v1/rooms/.*/hierarchy$
^/_matrix/client/(v1|unstable)/rooms/.*/relations/
^/_matrix/client/v1/rooms/.*/threads$
- ^/_matrix/client/unstable/im.nheko.summary/rooms/.*/summary$
+ ^/_matrix/client/unstable/im.nheko.summary/summary/.*$
^/_matrix/client/(r0|v3|unstable)/account/3pid$
^/_matrix/client/(r0|v3|unstable)/account/whoami$
^/_matrix/client/(r0|v3|unstable)/devices$
diff --git a/synapse/rest/client/room.py b/synapse/rest/client/room.py
index e4c7dd1a58..fb4d44211e 100644
--- a/synapse/rest/client/room.py
+++ b/synapse/rest/client/room.py
@@ -1442,10 +1442,16 @@ class RoomHierarchyRestServlet(RestServlet):
class RoomSummaryRestServlet(ResolveRoomIdMixin, RestServlet):
PATTERNS = (
+ # deprecated endpoint, to be removed
re.compile(
"^/_matrix/client/unstable/im.nheko.summary"
"/rooms/(?P[^/]*)/summary$"
),
+ # recommended endpoint
+ re.compile(
+ "^/_matrix/client/unstable/im.nheko.summary"
+ "/summary/(?P[^/]*)$"
+ ),
)
CATEGORY = "Client API requests"
From 90cc9e5b29bc6c2433df5e62da1ad8bcb83ac038 Mon Sep 17 00:00:00 2001
From: "Amanda H. L. de Andrade Katz"
Date: Fri, 26 Apr 2024 05:52:58 -0300
Subject: [PATCH 036/503] Rephrase enable_notifs configuration (#17116)
---
changelog.d/17116.doc | 1 +
docs/usage/configuration/config_documentation.md | 4 ++--
2 files changed, 3 insertions(+), 2 deletions(-)
create mode 100644 changelog.d/17116.doc
diff --git a/changelog.d/17116.doc b/changelog.d/17116.doc
new file mode 100644
index 0000000000..8712737c05
--- /dev/null
+++ b/changelog.d/17116.doc
@@ -0,0 +1 @@
+Update enable_notifs configuration documentation.
diff --git a/docs/usage/configuration/config_documentation.md b/docs/usage/configuration/config_documentation.md
index bcd53145f1..0c582d0387 100644
--- a/docs/usage/configuration/config_documentation.md
+++ b/docs/usage/configuration/config_documentation.md
@@ -676,8 +676,8 @@ This setting has the following sub-options:
trailing 's'.
* `app_name`: `app_name` defines the default value for '%(app)s' in `notif_from` and email
subjects. It defaults to 'Matrix'.
-* `enable_notifs`: Set to true to enable sending emails for messages that the user
- has missed. Disabled by default.
+* `enable_notifs`: Set to true to allow users to receive e-mail notifications. If this is not set,
+ users can configure e-mail notifications but will not receive them. Disabled by default.
* `notif_for_new_users`: Set to false to disable automatic subscription to email
notifications for new users. Enabled by default.
* `notif_delay_before_mail`: The time to wait before emailing about a notification.
From 41fbe387d64c3b47202926711bb574d4a7b74d47 Mon Sep 17 00:00:00 2001
From: Michael Telatynski <7t3chguy@gmail.com>
Date: Fri, 26 Apr 2024 09:54:30 +0100
Subject: [PATCH 037/503] Improve error message for cross signing reset with
MSC3861 enabled (#17121)
---
changelog.d/17121.bugfix | 1 +
synapse/rest/client/keys.py | 13 ++++++++-----
2 files changed, 9 insertions(+), 5 deletions(-)
create mode 100644 changelog.d/17121.bugfix
diff --git a/changelog.d/17121.bugfix b/changelog.d/17121.bugfix
new file mode 100644
index 0000000000..f160839aac
--- /dev/null
+++ b/changelog.d/17121.bugfix
@@ -0,0 +1 @@
+Improve error message for cross signing reset with MSC3861 enabled.
diff --git a/synapse/rest/client/keys.py b/synapse/rest/client/keys.py
index 86c9515854..a0017257ce 100644
--- a/synapse/rest/client/keys.py
+++ b/synapse/rest/client/keys.py
@@ -393,17 +393,20 @@ class SigningKeyUploadServlet(RestServlet):
# time. Because there is no UIA in MSC3861, for now we throw an error if the
# user tries to reset the device signing key when MSC3861 is enabled, but allow
# first-time setup.
- #
- # XXX: We now have a get-out clause by which MAS can temporarily mark the master
- # key as replaceable. It should do its own equivalent of user interactive auth
- # before doing so.
if self.hs.config.experimental.msc3861.enabled:
# The auth service has to explicitly mark the master key as replaceable
# without UIA to reset the device signing key with MSC3861.
if is_cross_signing_setup and not master_key_updatable_without_uia:
+ config = self.hs.config.experimental.msc3861
+ if config.account_management_url is not None:
+ url = f"{config.account_management_url}?action=org.matrix.cross_signing_reset"
+ else:
+ url = config.issuer
+
raise SynapseError(
HTTPStatus.NOT_IMPLEMENTED,
- "Resetting cross signing keys is not yet supported with MSC3861",
+ "To reset your end-to-end encryption cross-signing identity, "
+ f"you first need to approve it at {url} and then try again.",
Codes.UNRECOGNIZED,
)
# But first-time setup is fine
From 9c918739220bb548d9e5e5c2f5692c79bd38668d Mon Sep 17 00:00:00 2001
From: villepeh <100730729+villepeh@users.noreply.github.com>
Date: Fri, 26 Apr 2024 11:56:20 +0300
Subject: [PATCH 038/503] Add RuntimeDirectory to matrix-synapse.service
(#17084)
This makes it easy to store UNIX sockets with correct permissions. Those
would be located in /run/synapse which is the directory used in many
examples in Synapse configuration manual. Additionally, the directory
and sockets are deleted when Synapse is shut down.
---
changelog.d/17084.doc | 1 +
docs/systemd-with-workers/system/matrix-synapse.service | 1 +
2 files changed, 2 insertions(+)
create mode 100644 changelog.d/17084.doc
diff --git a/changelog.d/17084.doc b/changelog.d/17084.doc
new file mode 100644
index 0000000000..8b97c81096
--- /dev/null
+++ b/changelog.d/17084.doc
@@ -0,0 +1 @@
+Add RuntimeDirectory to example matrix-synapse.service systemd unit.
diff --git a/docs/systemd-with-workers/system/matrix-synapse.service b/docs/systemd-with-workers/system/matrix-synapse.service
index 0c73fb55fb..31ceccb77f 100644
--- a/docs/systemd-with-workers/system/matrix-synapse.service
+++ b/docs/systemd-with-workers/system/matrix-synapse.service
@@ -9,6 +9,7 @@ ReloadPropagatedFrom=matrix-synapse.target
Type=notify
NotifyAccess=main
User=matrix-synapse
+RuntimeDirectory=synapse
WorkingDirectory=/var/lib/matrix-synapse
EnvironmentFile=-/etc/default/matrix-synapse
ExecStartPre=/opt/venvs/matrix-synapse/bin/python -m synapse.app.homeserver --config-path=/etc/matrix-synapse/homeserver.yaml --config-path=/etc/matrix-synapse/conf.d/ --generate-keys
From 89fc579329d7c81c040b1c178099860e7de37bed Mon Sep 17 00:00:00 2001
From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com>
Date: Fri, 26 Apr 2024 10:52:24 +0100
Subject: [PATCH 039/503] Fix filtering of rooms when supplying the
`destination` query parameter to
`/_synapse/admin/v1/federation/destinations//rooms` (#17077)
---
changelog.d/17077.bugfix | 1 +
.../storage/databases/main/transactions.py | 1 +
tests/rest/admin/test_federation.py | 67 ++++++++++++++++++-
3 files changed, 66 insertions(+), 3 deletions(-)
create mode 100644 changelog.d/17077.bugfix
diff --git a/changelog.d/17077.bugfix b/changelog.d/17077.bugfix
new file mode 100644
index 0000000000..7d8ea37406
--- /dev/null
+++ b/changelog.d/17077.bugfix
@@ -0,0 +1 @@
+Fixes a bug introduced in v1.52.0 where the `destination` query parameter for the [Destination Rooms Admin API](https://element-hq.github.io/synapse/v1.105/usage/administration/admin_api/federation.html#destination-rooms) failed to actually filter returned rooms.
\ No newline at end of file
diff --git a/synapse/storage/databases/main/transactions.py b/synapse/storage/databases/main/transactions.py
index 08e0241f68..770802483c 100644
--- a/synapse/storage/databases/main/transactions.py
+++ b/synapse/storage/databases/main/transactions.py
@@ -660,6 +660,7 @@ class TransactionWorkerStore(CacheInvalidationWorkerStore):
limit=limit,
retcols=("room_id", "stream_ordering"),
order_direction=order,
+ keyvalues={"destination": destination},
),
)
return rooms, count
diff --git a/tests/rest/admin/test_federation.py b/tests/rest/admin/test_federation.py
index c1d88f0176..c2015774a1 100644
--- a/tests/rest/admin/test_federation.py
+++ b/tests/rest/admin/test_federation.py
@@ -778,20 +778,81 @@ class DestinationMembershipTestCase(unittest.HomeserverTestCase):
self.assertEqual(number_rooms, len(channel.json_body["rooms"]))
self._check_fields(channel.json_body["rooms"])
- def _create_destination_rooms(self, number_rooms: int) -> None:
- """Create a number rooms for destination
+ def test_room_filtering(self) -> None:
+ """Tests that rooms are correctly filtered"""
+
+ # Create two rooms on the homeserver. Each has a different remote homeserver
+ # participating in it.
+ other_destination = "other.destination.org"
+ room_ids_self_dest = self._create_destination_rooms(2, destination=self.dest)
+ room_ids_other_dest = self._create_destination_rooms(
+ 1, destination=other_destination
+ )
+
+ # Ask for the rooms that `self.dest` is participating in.
+ channel = self.make_request("GET", self.url, access_token=self.admin_user_tok)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
+
+ # Verify that we received only the rooms that `self.dest` is participating in.
+ # This assertion method name is a bit misleading. It does check that both lists
+ # contain the same items, and the same counts.
+ self.assertCountEqual(
+ [r["room_id"] for r in channel.json_body["rooms"]], room_ids_self_dest
+ )
+ self.assertEqual(channel.json_body["total"], len(room_ids_self_dest))
+
+ # Ask for the rooms that `other_destination` is participating in.
+ channel = self.make_request(
+ "GET",
+ self.url.replace(self.dest, other_destination),
+ access_token=self.admin_user_tok,
+ )
+ self.assertEqual(200, channel.code, msg=channel.json_body)
+
+ # Verify that we received only the rooms that `other_destination` is
+ # participating in.
+ self.assertCountEqual(
+ [r["room_id"] for r in channel.json_body["rooms"]], room_ids_other_dest
+ )
+ self.assertEqual(channel.json_body["total"], len(room_ids_other_dest))
+
+ def _create_destination_rooms(
+ self,
+ number_rooms: int,
+ destination: Optional[str] = None,
+ ) -> List[str]:
+ """
+ Create the given number of rooms. The given `destination` homeserver will
+ be recorded as a participant.
Args:
number_rooms: Number of rooms to be created
+ destination: The domain of the homeserver that will be considered
+ as a participant in the rooms.
+
+ Returns:
+ The IDs of the rooms that have been created.
"""
+ room_ids = []
+
+ # If no destination was provided, default to `self.dest`.
+ if destination is None:
+ destination = self.dest
+
for _ in range(number_rooms):
room_id = self.helper.create_room_as(
self.admin_user, tok=self.admin_user_tok
)
+ room_ids.append(room_id)
+
self.get_success(
- self.store.store_destination_rooms_entries((self.dest,), room_id, 1234)
+ self.store.store_destination_rooms_entries(
+ (destination,), room_id, 1234
+ )
)
+ return room_ids
+
def _check_fields(self, content: List[JsonDict]) -> None:
"""Checks that the expected room attributes are present in content
From 0fd6b269d32340c367a67ee34b963c32da080697 Mon Sep 17 00:00:00 2001
From: devonh
Date: Fri, 26 Apr 2024 18:10:45 +0000
Subject: [PATCH 040/503] Fix various typos in docs (#17114)
### Pull Request Checklist
* [X] Pull request is based on the develop branch
* [X] Pull request includes a [changelog
file](https://element-hq.github.io/synapse/latest/development/contributing_guide.html#changelog).
The entry should:
- Be a short description of your change which makes sense to users.
"Fixed a bug that prevented receiving messages from other servers."
instead of "Moved X method from `EventStore` to `EventWorkerStore`.".
- Use markdown where necessary, mostly for `code blocks`.
- End with either a period (.) or an exclamation mark (!).
- Start with a capital letter.
- Feel free to credit yourself, by adding a sentence "Contributed by
@github_username." or "Contributed by [Your Name]." to the end of the
entry.
* [X] [Code
style](https://element-hq.github.io/synapse/latest/code_style.html) is
correct
(run the
[linters](https://element-hq.github.io/synapse/latest/development/contributing_guide.html#run-the-linters))
---
changelog.d/17114.doc | 1 +
docs/admin_api/room_membership.md | 2 +-
docs/message_retention_policies.md | 6 +++---
docs/postgres.md | 2 +-
docs/setup/installation.md | 4 ++--
docs/usage/administration/admin_api/background_updates.md | 2 +-
docs/usage/administration/admin_faq.md | 2 +-
docs/user_directory.md | 4 ++--
docs/workers.md | 2 +-
9 files changed, 13 insertions(+), 12 deletions(-)
create mode 100644 changelog.d/17114.doc
diff --git a/changelog.d/17114.doc b/changelog.d/17114.doc
new file mode 100644
index 0000000000..042bd89618
--- /dev/null
+++ b/changelog.d/17114.doc
@@ -0,0 +1 @@
+Fix various small typos throughout the docs.
diff --git a/docs/admin_api/room_membership.md b/docs/admin_api/room_membership.md
index 94bc95a8d5..6cbaba3dcc 100644
--- a/docs/admin_api/room_membership.md
+++ b/docs/admin_api/room_membership.md
@@ -1,6 +1,6 @@
# Edit Room Membership API
-This API allows an administrator to join an user account with a given `user_id`
+This API allows an administrator to join a user account with a given `user_id`
to a room with a given `room_id_or_alias`. You can only modify the membership of
local users. The server administrator must be in the room and have permission to
invite users.
diff --git a/docs/message_retention_policies.md b/docs/message_retention_policies.md
index c64d1539b0..01f67c952a 100644
--- a/docs/message_retention_policies.md
+++ b/docs/message_retention_policies.md
@@ -51,8 +51,8 @@ clients.
## Server configuration
-Support for this feature can be enabled and configured by adding a the
-`retention` in the Synapse configuration file (see
+Support for this feature can be enabled and configured by adding the
+`retention` option in the Synapse configuration file (see
[configuration manual](usage/configuration/config_documentation.md#retention)).
To enable support for message retention policies, set the setting
@@ -117,7 +117,7 @@ In this example, we define three jobs:
policy's `max_lifetime` is greater than a week.
Note that this example is tailored to show different configurations and
-features slightly more jobs than it's probably necessary (in practice, a
+features slightly more jobs than is probably necessary (in practice, a
server admin would probably consider it better to replace the two last
jobs with one that runs once a day and handles rooms which
policy's `max_lifetime` is greater than 3 days).
diff --git a/docs/postgres.md b/docs/postgres.md
index 921bae9877..ae34f7689b 100644
--- a/docs/postgres.md
+++ b/docs/postgres.md
@@ -128,7 +128,7 @@ can read more about that [here](https://www.postgresql.org/docs/10/kernel-resour
### Overview
The script `synapse_port_db` allows porting an existing synapse server
-backed by SQLite to using PostgreSQL. This is done in as a two phase
+backed by SQLite to using PostgreSQL. This is done as a two phase
process:
1. Copy the existing SQLite database to a separate location and run
diff --git a/docs/setup/installation.md b/docs/setup/installation.md
index 9126874d44..ed3e59c470 100644
--- a/docs/setup/installation.md
+++ b/docs/setup/installation.md
@@ -259,9 +259,9 @@ users, etc.) to the developers via the `--report-stats` argument.
This command will generate you a config file that you can then customise, but it will
also generate a set of keys for you. These keys will allow your homeserver to
-identify itself to other homeserver, so don't lose or delete them. It would be
+identify itself to other homeservers, so don't lose or delete them. It would be
wise to back them up somewhere safe. (If, for whatever reason, you do need to
-change your homeserver's keys, you may find that other homeserver have the
+change your homeserver's keys, you may find that other homeservers have the
old key cached. If you update the signing key, you should change the name of the
key in the `.signing.key` file (the second word) to something
different. See the [spec](https://matrix.org/docs/spec/server_server/latest.html#retrieving-server-keys) for more information on key management).
diff --git a/docs/usage/administration/admin_api/background_updates.md b/docs/usage/administration/admin_api/background_updates.md
index 9f6ac7d567..7b75ee5587 100644
--- a/docs/usage/administration/admin_api/background_updates.md
+++ b/docs/usage/administration/admin_api/background_updates.md
@@ -44,7 +44,7 @@ For each update:
## Enabled
-This API allow pausing background updates.
+This API allows pausing background updates.
Background updates should *not* be paused for significant periods of time, as
this can affect the performance of Synapse.
diff --git a/docs/usage/administration/admin_faq.md b/docs/usage/administration/admin_faq.md
index 092dcc1c84..0d98f73fb1 100644
--- a/docs/usage/administration/admin_faq.md
+++ b/docs/usage/administration/admin_faq.md
@@ -241,7 +241,7 @@ in memory constrained environments, or increased if performance starts to
degrade.
However, degraded performance due to a low cache factor, common on
-machines with slow disks, often leads to explosions in memory use due
+machines with slow disks, often leads to explosions in memory use due to
backlogged requests. In this case, reducing the cache factor will make
things worse. Instead, try increasing it drastically. 2.0 is a good
starting value.
diff --git a/docs/user_directory.md b/docs/user_directory.md
index 1271cfb862..be8664a016 100644
--- a/docs/user_directory.md
+++ b/docs/user_directory.md
@@ -86,9 +86,9 @@ The search term is then split into words:
* If unavailable, then runs of ASCII characters, numbers, underscores, and hyphens
are considered words.
-The queries for PostgreSQL and SQLite are detailed below, by their overall goal
+The queries for PostgreSQL and SQLite are detailed below, but their overall goal
is to find matching users, preferring users who are "real" (e.g. not bots,
-not deactivated). It is assumed that real users will have an display name and
+not deactivated). It is assumed that real users will have a display name and
avatar set.
### PostgreSQL
diff --git a/docs/workers.md b/docs/workers.md
index 9a0cc9f2f4..82f4bfc1d1 100644
--- a/docs/workers.md
+++ b/docs/workers.md
@@ -634,7 +634,7 @@ worker application type.
#### Push Notifications
-You can designate generic worker to sending push notifications to
+You can designate generic workers to send push notifications to
a [push gateway](https://spec.matrix.org/v1.5/push-gateway-api/) such as
[sygnal](https://github.com/matrix-org/sygnal) and email.
From 02bda250f8991bd102b9baaee6b5f2820d31d273 Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Mon, 29 Apr 2024 14:05:24 +0100
Subject: [PATCH 041/503] Bump furo from 2024.1.29 to 2024.4.27 (#17133)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Bumps [furo](https://github.com/pradyunsg/furo) from 2024.1.29 to
2024.4.27.
Changelog
Sourced from furo's
changelog .
Changelog
2024.04.27 -- Bold Burgundy
Add a skip to content link.
Add --font-stack--headings.
Add :visited colour and enforce uniform contrast
between light/dark.
Add an offset of :target to reduce back-to-top
overlap.
Improve dark mode colours.
Fix outstanding colour contrast warnings on Firefox.
Fix bad indent in footnotes.
Tweak handling of default configuration options in a more resilient
manner.
Tweak length and sizing of API source links.
Stop search engine indexing on search page.
2024.01.29 -- Amazing Amethyst
Fix canonical url when building with dirhtml.
Relicense the demo module.
2023.09.10 -- Zesty Zaffre
Make asset hash injection idempotent, fixing Sphinx 6
compatibility.
Fix the check for HTML builders, fixing non-HTML Read the Docs
builds.
2023.08.19 -- Xenolithic Xanadu
Fix missing search context with Sphinx 7.2, for dirhtml builds.
Drop support for Python 3.7.
Present configuration errors in a better format -- thanks @AA-Turner !
Bump require_sphinx() to Sphinx 6.0, in line with
dependency changes in Unassuming Ultramarine.
2023.08.17 -- Wonderous White
Fix compatiblity with Sphinx 7.2.0 and 7.2.1.
2023.07.26 -- Vigilant Volt
Fix compatiblity with Sphinx 7.1.
... (truncated)
Commits
[](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores)
Dependabot will resolve any conflicts with this PR as long as you don't
alter it yourself. You can also trigger a rebase manually by commenting
`@dependabot rebase`.
[//]: # (dependabot-automerge-start)
[//]: # (dependabot-automerge-end)
---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR:
- `@dependabot rebase` will rebase this PR
- `@dependabot recreate` will recreate this PR, overwriting any edits
that have been made to it
- `@dependabot merge` will merge this PR after your CI passes on it
- `@dependabot squash and merge` will squash and merge this PR after
your CI passes on it
- `@dependabot cancel merge` will cancel a previously requested merge
and block automerging
- `@dependabot reopen` will reopen this PR if it is closed
- `@dependabot close` will close this PR and stop Dependabot recreating
it. You can achieve the same result by closing it manually
- `@dependabot show ignore conditions` will show all
of the ignore conditions of the specified dependency
- `@dependabot ignore this major version` will close this PR and stop
Dependabot creating any more for this major version (unless you reopen
the PR or upgrade to it yourself)
- `@dependabot ignore this minor version` will close this PR and stop
Dependabot creating any more for this minor version (unless you reopen
the PR or upgrade to it yourself)
- `@dependabot ignore this dependency` will close this PR and stop
Dependabot creating any more for this dependency (unless you reopen the
PR or upgrade to it yourself)
Signed-off-by: dependabot[bot]
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
---
poetry.lock | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/poetry.lock b/poetry.lock
index cfe0045afc..b6b04e4a97 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -572,13 +572,13 @@ dev = ["Sphinx", "coverage", "flake8", "lxml", "lxml-stubs", "memory-profiler",
[[package]]
name = "furo"
-version = "2024.1.29"
+version = "2024.4.27"
description = "A clean customisable Sphinx documentation theme."
optional = false
python-versions = ">=3.8"
files = [
- {file = "furo-2024.1.29-py3-none-any.whl", hash = "sha256:3548be2cef45a32f8cdc0272d415fcb3e5fa6a0eb4ddfe21df3ecf1fe45a13cf"},
- {file = "furo-2024.1.29.tar.gz", hash = "sha256:4d6b2fe3f10a6e36eb9cc24c1e7beb38d7a23fc7b3c382867503b7fcac8a1e02"},
+ {file = "furo-2024.4.27-py3-none-any.whl", hash = "sha256:f7eb1b2c2204fd9cbd4af42e027289a67f17a98a4e14f4f9e2f17b96d61bb020"},
+ {file = "furo-2024.4.27.tar.gz", hash = "sha256:15a9b65269038def2cefafb86c71c6616e3969b8f07ba231f588c10c4aee6d88"},
]
[package.dependencies]
From f6437ca1c4115a1dc1684720d23ac6d2466b8678 Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Mon, 29 Apr 2024 14:05:30 +0100
Subject: [PATCH 042/503] Bump serde from 1.0.198 to 1.0.199 (#17132)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Bumps [serde](https://github.com/serde-rs/serde) from 1.0.198 to
1.0.199.
Release notes
Sourced from serde's
releases .
v1.0.199
Fix ambiguous associated item when
forward_to_deserialize_any! is used on an enum with
Error variant (#2732 ,
thanks @aatifsyed )
Commits
1477028
Release 1.0.199
789740b
Merge pull request #2732
from aatifsyed/master
8fe7539
fix: ambiguous associated type in forward_to_deserialize_any!
f6623a3
Ignore cast_precision_loss pedantic clippy lint
See full diff in compare
view
[](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores)
Dependabot will resolve any conflicts with this PR as long as you don't
alter it yourself. You can also trigger a rebase manually by commenting
`@dependabot rebase`.
[//]: # (dependabot-automerge-start)
[//]: # (dependabot-automerge-end)
---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR:
- `@dependabot rebase` will rebase this PR
- `@dependabot recreate` will recreate this PR, overwriting any edits
that have been made to it
- `@dependabot merge` will merge this PR after your CI passes on it
- `@dependabot squash and merge` will squash and merge this PR after
your CI passes on it
- `@dependabot cancel merge` will cancel a previously requested merge
and block automerging
- `@dependabot reopen` will reopen this PR if it is closed
- `@dependabot close` will close this PR and stop Dependabot recreating
it. You can achieve the same result by closing it manually
- `@dependabot show ignore conditions` will show all
of the ignore conditions of the specified dependency
- `@dependabot ignore this major version` will close this PR and stop
Dependabot creating any more for this major version (unless you reopen
the PR or upgrade to it yourself)
- `@dependabot ignore this minor version` will close this PR and stop
Dependabot creating any more for this minor version (unless you reopen
the PR or upgrade to it yourself)
- `@dependabot ignore this dependency` will close this PR and stop
Dependabot creating any more for this dependency (unless you reopen the
PR or upgrade to it yourself)
Signed-off-by: dependabot[bot]
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
---
Cargo.lock | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/Cargo.lock b/Cargo.lock
index 8cbcdbd470..24127c6540 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -485,18 +485,18 @@ checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd"
[[package]]
name = "serde"
-version = "1.0.198"
+version = "1.0.199"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9846a40c979031340571da2545a4e5b7c4163bdae79b301d5f86d03979451fcc"
+checksum = "0c9f6e76df036c77cd94996771fb40db98187f096dd0b9af39c6c6e452ba966a"
dependencies = [
"serde_derive",
]
[[package]]
name = "serde_derive"
-version = "1.0.198"
+version = "1.0.199"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e88edab869b01783ba905e7d0153f9fc1a6505a96e4ad3018011eedb838566d9"
+checksum = "11bd257a6541e141e42ca6d24ae26f7714887b47e89aa739099104c7e4d3b7fc"
dependencies = [
"proc-macro2",
"quote",
From 48ee17dc79176094c96145a27e7e3a423cb5efc9 Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Mon, 29 Apr 2024 14:05:53 +0100
Subject: [PATCH 043/503] Bump twisted from 23.10.0 to 24.3.0 (#17135)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Bumps [twisted](https://github.com/twisted/twisted) from 23.10.0 to
24.3.0.
Release notes
Sourced from twisted's
releases .
Twisted 24.3.0 (2024-03-01)
This release supports PyPy v7.3.14.
Bugfixes
twisted.logger.formatEvent now honors dotted method names, not just
flat function names, in format strings, as it has long been
explicitly documented to do. So, you will now get the expected
result from [formatEvent("here's the result of calling a method at
log-format time: {obj.method()}", obj=...)]{.title-ref} (#9347 )
twisted.web.http.HTTPChannel now ignores the trailer headers
provided in the last chunk of a chunked encoded response, rather
than raising an exception. (#11997 )
twisted.protocols.tls.BufferingTLSTransport, used by default by
twisted.protocols.tls.TLSMemoryBIOFactory, was refactored for
improved performance when doing a high number of small writes.
(#12011 )
twisted.python.failure.Failure now throws exception for generators
without triggering a deprecation warnings on Python 3.12. (#12026 )
twisted.internet.process.Process, used by
reactor.spawnProcess,
now copies the parent environment when the [env=None]{.title-ref}
argument is passed on Posix systems and os.posix_spawnp is
used
internally. (#12068 )
twisted.internet.defer.inlineCallbacks.returnValue's stack
introspection was adjusted for the latest PyPy 7.3.14 release,
allowing legacy @inlineCallbacks
to run on new PyPY versions.
(#12084 )
Deprecations and Removals
twisted.trial.reporter.TestRun.startTest() is no longer called for
tests with skip annotation or skip attribute for Python 3.12.1 or
newer. This is the result of upstream Python gh-106584
change. The
behavior is not change in 3.12.0 or older. (#12052 )
Misc
#11902 ,
#12018 ,
#12023 ,
#12031 ,
#12032 ,
#12052 ,
#12056 ,
#12067 ,
#12076 ,
#12078 ,
#12087 ,
#12095
Conch
No significant changes.
... (truncated)
Changelog
Sourced from twisted's
changelog .
Twisted 24.3.0 (2024-03-01)
This release supports PyPy v7.3.14.
Bugfixes
twisted.logger.formatEvent now honors dotted method names, not just
flat
function names, in format strings, as it has long been explicitly
documented to
do. So, you will now get the expected result from
formatEvent("here's the result of calling a method at
log-format time: {obj.method()}", obj=...) (#9347 )
twisted.web.http.HTTPChannel now ignores the trailer headers
provided in the last chunk of a chunked encoded response, rather than
raising an exception. (#11997 )
twisted.protocols.tls.BufferingTLSTransport, used by default by
twisted.protocols.tls.TLSMemoryBIOFactory, was refactored for improved
performance when doing a high number of small writes. (#12011 )
twisted.python.failure.Failure now throws exception for generators
without triggering a deprecation warnings on Python 3.12. (#12026 )
twisted.internet.process.Process, used by
reactor.spawnProcess, now copies the parent environment
when the env=None argument is passed on Posix systems and
os.posix_spawnp is used internally. (#12068 )
twisted.internet.defer.inlineCallbacks.returnValue's stack
introspection was adjusted for the latest PyPy 7.3.14 release, allowing
legacy @inlineCallbacks
to run on new PyPY versions. (#12084 )
Deprecations and Removals
twisted.trial.reporter.TestRun.startTest() is no longer called for
tests
with skip annotation or skip attribute for Python 3.12.1 or newer.
This is the result of upstream Python gh-106584
change.
The behavior is not change in 3.12.0 or older. (#12052 )
Misc
#11902 ,
#12018 ,
#12023 ,
#12031 ,
#12032 ,
#12052 ,
#12056 ,
#12067 ,
#12076 ,
#12078 ,
#12087 ,
#12095
Conch
No significant changes.
Web
Bugfixes
- The documentation for twisted.web.client.CookieAgent no longer
references
long-deprecated ``cookielib`` and ``urllib2`` standard library modules.
([#12044](https://github.com/twisted/twisted/issues/12044))
</tr></table>
... (truncated)
Commits
[](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores)
Dependabot will resolve any conflicts with this PR as long as you don't
alter it yourself. You can also trigger a rebase manually by commenting
`@dependabot rebase`.
[//]: # (dependabot-automerge-start)
[//]: # (dependabot-automerge-end)
---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR:
- `@dependabot rebase` will rebase this PR
- `@dependabot recreate` will recreate this PR, overwriting any edits
that have been made to it
- `@dependabot merge` will merge this PR after your CI passes on it
- `@dependabot squash and merge` will squash and merge this PR after
your CI passes on it
- `@dependabot cancel merge` will cancel a previously requested merge
and block automerging
- `@dependabot reopen` will reopen this PR if it is closed
- `@dependabot close` will close this PR and stop Dependabot recreating
it. You can achieve the same result by closing it manually
- `@dependabot show ignore conditions` will show all
of the ignore conditions of the specified dependency
- `@dependabot ignore this major version` will close this PR and stop
Dependabot creating any more for this major version (unless you reopen
the PR or upgrade to it yourself)
- `@dependabot ignore this minor version` will close this PR and stop
Dependabot creating any more for this minor version (unless you reopen
the PR or upgrade to it yourself)
- `@dependabot ignore this dependency` will close this PR and stop
Dependabot creating any more for this dependency (unless you reopen the
PR or upgrade to it yourself)
Signed-off-by: dependabot[bot]
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
---
poetry.lock | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/poetry.lock b/poetry.lock
index b6b04e4a97..9beeabc684 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -2976,13 +2976,13 @@ urllib3 = ">=1.26.0"
[[package]]
name = "twisted"
-version = "23.10.0"
+version = "24.3.0"
description = "An asynchronous networking framework written in Python"
optional = false
python-versions = ">=3.8.0"
files = [
- {file = "twisted-23.10.0-py3-none-any.whl", hash = "sha256:4ae8bce12999a35f7fe6443e7f1893e6fe09588c8d2bed9c35cdce8ff2d5b444"},
- {file = "twisted-23.10.0.tar.gz", hash = "sha256:987847a0790a2c597197613686e2784fd54167df3a55d0fb17c8412305d76ce5"},
+ {file = "twisted-24.3.0-py3-none-any.whl", hash = "sha256:039f2e6a49ab5108abd94de187fa92377abe5985c7a72d68d0ad266ba19eae63"},
+ {file = "twisted-24.3.0.tar.gz", hash = "sha256:6b38b6ece7296b5e122c9eb17da2eeab3d98a198f50ca9efd00fb03e5b4fd4ae"},
]
[package.dependencies]
@@ -3006,7 +3006,7 @@ dev-release = ["pydoctor (>=23.9.0,<23.10.0)", "pydoctor (>=23.9.0,<23.10.0)", "
gtk-platform = ["pygobject", "pygobject", "twisted[all-non-platform]", "twisted[all-non-platform]"]
http2 = ["h2 (>=3.0,<5.0)", "priority (>=1.1.0,<2.0)"]
macos-platform = ["pyobjc-core", "pyobjc-core", "pyobjc-framework-cfnetwork", "pyobjc-framework-cfnetwork", "pyobjc-framework-cocoa", "pyobjc-framework-cocoa", "twisted[all-non-platform]", "twisted[all-non-platform]"]
-mypy = ["mypy (>=1.5.1,<1.6.0)", "mypy-zope (>=1.0.1,<1.1.0)", "twisted[all-non-platform,dev]", "types-pyopenssl", "types-setuptools"]
+mypy = ["mypy (>=1.8,<2.0)", "mypy-zope (>=1.0.3,<1.1.0)", "twisted[all-non-platform,dev]", "types-pyopenssl", "types-setuptools"]
osx-platform = ["twisted[macos-platform]", "twisted[macos-platform]"]
serial = ["pyserial (>=3.0)", "pywin32 (!=226)"]
test = ["cython-test-exception-raiser (>=1.0.2,<2)", "hypothesis (>=6.56)", "pyhamcrest (>=2)"]
From 6e373468a4655a0e3c79f56e2ce0be8219dbfeb8 Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Mon, 29 Apr 2024 14:06:02 +0100
Subject: [PATCH 044/503] Bump idna from 3.6 to 3.7 (#17136)
Bumps [idna](https://github.com/kjd/idna) from 3.6 to 3.7.
Release notes
Sourced from idna's
releases .
v3.7
What's Changed
Fix issue where specially crafted inputs to encode() could take
exceptionally long amount of time to process. [CVE-2024-3651]
Thanks to Guido Vranken for reporting the issue.
Full Changelog : https://github.com/kjd/idna/compare/v3.6...v3.7
Changelog
Sourced from idna's
changelog .
3.7 (2024-04-11)
++++++++++++++++
Fix issue where specially crafted inputs to encode() could
take exceptionally long amount of time to process. [CVE-2024-3651]
Thanks to Guido Vranken for reporting the issue.
Commits
1d365e1
Release v3.7
c1b3154
Merge pull request #172 from
kjd/optimize-contextj
0394ec7
Merge branch 'master' into optimize-contextj
cd58a23
Merge pull request #152 from
elliotwutingfeng/dev
5beb28b
More efficient resolution of joiner contexts
1b12148
Update ossf/scorecard-action to v2.3.1
d516b87
Update Github actions/checkout to v4
c095c75
Merge branch 'master' into dev
60a0a4c
Fix typo in GitHub Actions workflow key
5918a0e
Merge branch 'master' into dev
Additional commits viewable in compare
view
[](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores)
Dependabot will resolve any conflicts with this PR as long as you don't
alter it yourself. You can also trigger a rebase manually by commenting
`@dependabot rebase`.
[//]: # (dependabot-automerge-start)
[//]: # (dependabot-automerge-end)
---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR:
- `@dependabot rebase` will rebase this PR
- `@dependabot recreate` will recreate this PR, overwriting any edits
that have been made to it
- `@dependabot merge` will merge this PR after your CI passes on it
- `@dependabot squash and merge` will squash and merge this PR after
your CI passes on it
- `@dependabot cancel merge` will cancel a previously requested merge
and block automerging
- `@dependabot reopen` will reopen this PR if it is closed
- `@dependabot close` will close this PR and stop Dependabot recreating
it. You can achieve the same result by closing it manually
- `@dependabot show ignore conditions` will show all
of the ignore conditions of the specified dependency
- `@dependabot ignore this major version` will close this PR and stop
Dependabot creating any more for this major version (unless you reopen
the PR or upgrade to it yourself)
- `@dependabot ignore this minor version` will close this PR and stop
Dependabot creating any more for this minor version (unless you reopen
the PR or upgrade to it yourself)
- `@dependabot ignore this dependency` will close this PR and stop
Dependabot creating any more for this dependency (unless you reopen the
PR or upgrade to it yourself)
Signed-off-by: dependabot[bot]
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
---
poetry.lock | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/poetry.lock b/poetry.lock
index 9beeabc684..bc71294656 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -752,13 +752,13 @@ idna = ">=2.5"
[[package]]
name = "idna"
-version = "3.6"
+version = "3.7"
description = "Internationalized Domain Names in Applications (IDNA)"
optional = false
python-versions = ">=3.5"
files = [
- {file = "idna-3.6-py3-none-any.whl", hash = "sha256:c05567e9c24a6b9faaa835c4821bad0590fbb9d5779e7caa6e1cc4978e7eb24f"},
- {file = "idna-3.6.tar.gz", hash = "sha256:9ecdbbd083b06798ae1e86adcbfe8ab1479cf864e4ee30fe4e46a003d12491ca"},
+ {file = "idna-3.7-py3-none-any.whl", hash = "sha256:82fee1fc78add43492d3a1898bfa6d8a904cc97d8427f683ed8e798d07761aa0"},
+ {file = "idna-3.7.tar.gz", hash = "sha256:028ff3aadf0609c1fd278d8ea3089299412a7a8b9bd005dd08b9f8285bcb5cfc"},
]
[[package]]
From 6a275828c8c15f03a4a4e26922972bcad265ba9c Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Mon, 29 Apr 2024 14:06:14 +0100
Subject: [PATCH 045/503] Bump types-setuptools from 69.0.0.20240125 to
69.5.0.20240423 (#17134)
Bumps [types-setuptools](https://github.com/python/typeshed) from
69.0.0.20240125 to 69.5.0.20240423.
Commits
[](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores)
Dependabot will resolve any conflicts with this PR as long as you don't
alter it yourself. You can also trigger a rebase manually by commenting
`@dependabot rebase`.
[//]: # (dependabot-automerge-start)
[//]: # (dependabot-automerge-end)
---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR:
- `@dependabot rebase` will rebase this PR
- `@dependabot recreate` will recreate this PR, overwriting any edits
that have been made to it
- `@dependabot merge` will merge this PR after your CI passes on it
- `@dependabot squash and merge` will squash and merge this PR after
your CI passes on it
- `@dependabot cancel merge` will cancel a previously requested merge
and block automerging
- `@dependabot reopen` will reopen this PR if it is closed
- `@dependabot close` will close this PR and stop Dependabot recreating
it. You can achieve the same result by closing it manually
- `@dependabot show ignore conditions` will show all
of the ignore conditions of the specified dependency
- `@dependabot ignore this major version` will close this PR and stop
Dependabot creating any more for this major version (unless you reopen
the PR or upgrade to it yourself)
- `@dependabot ignore this minor version` will close this PR and stop
Dependabot creating any more for this minor version (unless you reopen
the PR or upgrade to it yourself)
- `@dependabot ignore this dependency` will close this PR and stop
Dependabot creating any more for this dependency (unless you reopen the
PR or upgrade to it yourself)
Signed-off-by: dependabot[bot]
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
---
poetry.lock | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/poetry.lock b/poetry.lock
index bc71294656..baa630be8d 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -3184,13 +3184,13 @@ urllib3 = ">=2"
[[package]]
name = "types-setuptools"
-version = "69.0.0.20240125"
+version = "69.5.0.20240423"
description = "Typing stubs for setuptools"
optional = false
python-versions = ">=3.8"
files = [
- {file = "types-setuptools-69.0.0.20240125.tar.gz", hash = "sha256:22ad498cb585b22ce8c97ada1fccdf294a2e0dd7dc984a28535a84ea82f45b3f"},
- {file = "types_setuptools-69.0.0.20240125-py3-none-any.whl", hash = "sha256:00835f959ff24ebc32c55da8df9d46e8df25e3c4bfacb43e98b61fde51a4bc41"},
+ {file = "types-setuptools-69.5.0.20240423.tar.gz", hash = "sha256:a7ba908f1746c4337d13f027fa0f4a5bcad6d1d92048219ba792b3295c58586d"},
+ {file = "types_setuptools-69.5.0.20240423-py3-none-any.whl", hash = "sha256:a4381e041510755a6c9210e26ad55b1629bc10237aeb9cb8b6bd24996b73db48"},
]
[[package]]
From 38bc7a009d30cedc9dd19cb0e8e45d669f1e9706 Mon Sep 17 00:00:00 2001
From: Patrick Cloke
Date: Mon, 29 Apr 2024 09:09:03 -0400
Subject: [PATCH 046/503] Declare support for Matrix v1.10. (#17082)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Pretty straightforward. 😄
Fixes #17021
---
changelog.d/17082.feature | 1 +
synapse/rest/client/versions.py | 1 +
2 files changed, 2 insertions(+)
create mode 100644 changelog.d/17082.feature
diff --git a/changelog.d/17082.feature b/changelog.d/17082.feature
new file mode 100644
index 0000000000..e3990f44e7
--- /dev/null
+++ b/changelog.d/17082.feature
@@ -0,0 +1 @@
+Declare support for [Matrix v1.10](https://matrix.org/blog/2024/03/22/matrix-v1.10-release/). Contributed by @clokep.
diff --git a/synapse/rest/client/versions.py b/synapse/rest/client/versions.py
index fa453a3b02..56de6906d0 100644
--- a/synapse/rest/client/versions.py
+++ b/synapse/rest/client/versions.py
@@ -89,6 +89,7 @@ class VersionsRestServlet(RestServlet):
"v1.7",
"v1.8",
"v1.9",
+ "v1.10",
],
# as per MSC1497:
"unstable_features": {
From c897ac63e90e198723baa4bc73574a30fb02176b Mon Sep 17 00:00:00 2001
From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com>
Date: Mon, 29 Apr 2024 14:11:00 +0100
Subject: [PATCH 047/503] Ensure that incoming to-device messages are not
dropped (#17127)
... when workers are unreachable, etc.
Fixes https://github.com/element-hq/synapse/issues/17117.
The general principle is just to make sure that we propagate any
exceptions to the JsonResource, so that we return an error code to the
sending server. That means that the sending server no longer considers
the message safely sent, so it will retry later.
In the issue, Erik mentions that an alternative solution would be to
persist the to-device messages into a table so that they can be retried.
This might be an improvement for performance, but even if we did that,
we still need this mechanism, since we might be unable to reach the
database. So, if we want to do that, it can be a later follow-up.
---------
Co-authored-by: Erik Johnston
---
changelog.d/17127.bugfix | 1 +
synapse/federation/federation_server.py | 44 +++++++++++++---------
synapse/handlers/devicemessage.py | 3 ++
tests/federation/test_federation_server.py | 17 +++++++++
tests/federation/transport/test_server.py | 9 ++++-
5 files changed, 55 insertions(+), 19 deletions(-)
create mode 100644 changelog.d/17127.bugfix
diff --git a/changelog.d/17127.bugfix b/changelog.d/17127.bugfix
new file mode 100644
index 0000000000..93c7314098
--- /dev/null
+++ b/changelog.d/17127.bugfix
@@ -0,0 +1 @@
+Fix a bug which meant that to-device messages received over federation could be dropped when the server was under load or networking problems caused problems between Synapse processes or the database.
diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py
index 65d3a661fe..7ffc650aa1 100644
--- a/synapse/federation/federation_server.py
+++ b/synapse/federation/federation_server.py
@@ -546,7 +546,25 @@ class FederationServer(FederationBase):
edu_type=edu_dict["edu_type"],
content=edu_dict["content"],
)
- await self.registry.on_edu(edu.edu_type, origin, edu.content)
+ try:
+ await self.registry.on_edu(edu.edu_type, origin, edu.content)
+ except Exception:
+ # If there was an error handling the EDU, we must reject the
+ # transaction.
+ #
+ # Some EDU types (notably, to-device messages) are, despite their name,
+ # expected to be reliable; if we weren't able to do something with it,
+ # we have to tell the sender that, and the only way the protocol gives
+ # us to do so is by sending an HTTP error back on the transaction.
+ #
+ # We log the exception now, and then raise a new SynapseError to cause
+ # the transaction to be failed.
+ logger.exception("Error handling EDU of type %s", edu.edu_type)
+ raise SynapseError(500, f"Error handing EDU of type {edu.edu_type}")
+
+ # TODO: if the first EDU fails, we should probably abort the whole
+ # thing rather than carrying on with the rest of them. That would
+ # probably be best done inside `concurrently_execute`.
await concurrently_execute(
_process_edu,
@@ -1414,12 +1432,7 @@ class FederationHandlerRegistry:
handler = self.edu_handlers.get(edu_type)
if handler:
with start_active_span_from_edu(content, "handle_edu"):
- try:
- await handler(origin, content)
- except SynapseError as e:
- logger.info("Failed to handle edu %r: %r", edu_type, e)
- except Exception:
- logger.exception("Failed to handle edu %r", edu_type)
+ await handler(origin, content)
return
# Check if we can route it somewhere else that isn't us
@@ -1428,17 +1441,12 @@ class FederationHandlerRegistry:
# Pick an instance randomly so that we don't overload one.
route_to = random.choice(instances)
- try:
- await self._send_edu(
- instance_name=route_to,
- edu_type=edu_type,
- origin=origin,
- content=content,
- )
- except SynapseError as e:
- logger.info("Failed to handle edu %r: %r", edu_type, e)
- except Exception:
- logger.exception("Failed to handle edu %r", edu_type)
+ await self._send_edu(
+ instance_name=route_to,
+ edu_type=edu_type,
+ origin=origin,
+ content=content,
+ )
return
# Oh well, let's just log and move on.
diff --git a/synapse/handlers/devicemessage.py b/synapse/handlers/devicemessage.py
index 2b034dcbb7..79be7c97c8 100644
--- a/synapse/handlers/devicemessage.py
+++ b/synapse/handlers/devicemessage.py
@@ -104,6 +104,9 @@ class DeviceMessageHandler:
"""
Handle receiving to-device messages from remote homeservers.
+ Note that any errors thrown from this method will cause the federation /send
+ request to receive an error response.
+
Args:
origin: The remote homeserver.
content: The JSON dictionary containing the to-device messages.
diff --git a/tests/federation/test_federation_server.py b/tests/federation/test_federation_server.py
index 36684c2c91..88261450b1 100644
--- a/tests/federation/test_federation_server.py
+++ b/tests/federation/test_federation_server.py
@@ -67,6 +67,23 @@ class FederationServerTests(unittest.FederatingHomeserverTestCase):
self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, channel.result)
self.assertEqual(channel.json_body["errcode"], "M_NOT_JSON")
+ def test_failed_edu_causes_500(self) -> None:
+ """If the EDU handler fails, /send should return a 500."""
+
+ async def failing_handler(_origin: str, _content: JsonDict) -> None:
+ raise Exception("bleh")
+
+ self.hs.get_federation_registry().register_edu_handler(
+ "FAIL_EDU_TYPE", failing_handler
+ )
+
+ channel = self.make_signed_federation_request(
+ "PUT",
+ "/_matrix/federation/v1/send/txn",
+ {"edus": [{"edu_type": "FAIL_EDU_TYPE", "content": {}}]},
+ )
+ self.assertEqual(500, channel.code, channel.result)
+
class ServerACLsTestCase(unittest.TestCase):
def test_blocked_server(self) -> None:
diff --git a/tests/federation/transport/test_server.py b/tests/federation/transport/test_server.py
index 190b79bf26..0237369998 100644
--- a/tests/federation/transport/test_server.py
+++ b/tests/federation/transport/test_server.py
@@ -59,7 +59,14 @@ class RoomDirectoryFederationTests(unittest.FederatingHomeserverTestCase):
"/_matrix/federation/v1/send/txn_id_1234/",
content={
"edus": [
- {"edu_type": EduTypes.DEVICE_LIST_UPDATE, "content": {"foo": "bar"}}
+ {
+ "edu_type": EduTypes.DEVICE_LIST_UPDATE,
+ "content": {
+ "device_id": "QBUAZIFURK",
+ "stream_id": 0,
+ "user_id": "@user:id",
+ },
+ },
],
"pdus": [],
},
From 758aec6b34958e08f5195266ccc0e62a017bba2a Mon Sep 17 00:00:00 2001
From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com>
Date: Mon, 29 Apr 2024 14:33:25 +0100
Subject: [PATCH 048/503] Update tornado 6.2 -> 6.4 (#17131)
---
changelog.d/17131.misc | 1 +
poetry.lock | 37 +++++++++++++------------------------
2 files changed, 14 insertions(+), 24 deletions(-)
create mode 100644 changelog.d/17131.misc
diff --git a/changelog.d/17131.misc b/changelog.d/17131.misc
new file mode 100644
index 0000000000..fe1ecc7688
--- /dev/null
+++ b/changelog.d/17131.misc
@@ -0,0 +1 @@
+Update `tornado` Python dependency from 6.2 to 6.4.
\ No newline at end of file
diff --git a/poetry.lock b/poetry.lock
index baa630be8d..f564fd420a 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -2186,7 +2186,6 @@ files = [
{file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:69b023b2b4daa7548bcfbd4aa3da05b3a74b772db9e23b982788168117739938"},
{file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:81e0b275a9ecc9c0c0c07b4b90ba548307583c125f54d5b6946cfee6360c733d"},
{file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba336e390cd8e4d1739f42dfe9bb83a3cc2e80f567d8805e11b46f4a943f5515"},
- {file = "PyYAML-6.0.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:326c013efe8048858a6d312ddd31d56e468118ad4cdeda36c719bf5bb6192290"},
{file = "PyYAML-6.0.1-cp310-cp310-win32.whl", hash = "sha256:bd4af7373a854424dabd882decdc5579653d7868b8fb26dc7d0e99f823aa5924"},
{file = "PyYAML-6.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:fd1592b3fdf65fff2ad0004b5e363300ef59ced41c2e6b3a99d4089fa8c5435d"},
{file = "PyYAML-6.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6965a7bc3cf88e5a1c3bd2e0b5c22f8d677dc88a455344035f03399034eb3007"},
@@ -2194,16 +2193,8 @@ files = [
{file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:42f8152b8dbc4fe7d96729ec2b99c7097d656dc1213a3229ca5383f973a5ed6d"},
{file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:062582fca9fabdd2c8b54a3ef1c978d786e0f6b3a1510e0ac93ef59e0ddae2bc"},
{file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d2b04aac4d386b172d5b9692e2d2da8de7bfb6c387fa4f801fbf6fb2e6ba4673"},
- {file = "PyYAML-6.0.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:e7d73685e87afe9f3b36c799222440d6cf362062f78be1013661b00c5c6f678b"},
{file = "PyYAML-6.0.1-cp311-cp311-win32.whl", hash = "sha256:1635fd110e8d85d55237ab316b5b011de701ea0f29d07611174a1b42f1444741"},
{file = "PyYAML-6.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34"},
- {file = "PyYAML-6.0.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:855fb52b0dc35af121542a76b9a84f8d1cd886ea97c84703eaa6d88e37a2ad28"},
- {file = "PyYAML-6.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:40df9b996c2b73138957fe23a16a4f0ba614f4c0efce1e9406a184b6d07fa3a9"},
- {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a08c6f0fe150303c1c6b71ebcd7213c2858041a7e01975da3a99aed1e7a378ef"},
- {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c22bec3fbe2524cde73d7ada88f6566758a8f7227bfbf93a408a9d86bcc12a0"},
- {file = "PyYAML-6.0.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8d4e9c88387b0f5c7d5f281e55304de64cf7f9c0021a3525bd3b1c542da3b0e4"},
- {file = "PyYAML-6.0.1-cp312-cp312-win32.whl", hash = "sha256:d483d2cdf104e7c9fa60c544d92981f12ad66a457afae824d146093b8c294c54"},
- {file = "PyYAML-6.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:0d3304d8c0adc42be59c5f8a4d9e3d7379e6955ad754aa9d6ab7a398b59dd1df"},
{file = "PyYAML-6.0.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:50550eb667afee136e9a77d6dc71ae76a44df8b3e51e41b77f6de2932bfe0f47"},
{file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1fe35611261b29bd1de0070f0b2f47cb6ff71fa6595c077e42bd0c419fa27b98"},
{file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:704219a11b772aea0d8ecd7058d0082713c3562b4e271b849ad7dc4a5c90c13c"},
@@ -2220,7 +2211,6 @@ files = [
{file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a0cd17c15d3bb3fa06978b4e8958dcdc6e0174ccea823003a106c7d4d7899ac5"},
{file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:28c119d996beec18c05208a8bd78cbe4007878c6dd15091efb73a30e90539696"},
{file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7e07cbde391ba96ab58e532ff4803f79c4129397514e1413a7dc761ccd755735"},
- {file = "PyYAML-6.0.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:49a183be227561de579b4a36efbb21b3eab9651dd81b1858589f796549873dd6"},
{file = "PyYAML-6.0.1-cp38-cp38-win32.whl", hash = "sha256:184c5108a2aca3c5b3d3bf9395d50893a7ab82a38004c8f61c258d4428e80206"},
{file = "PyYAML-6.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:1e2722cc9fbb45d9b87631ac70924c11d3a401b2d7f410cc0e3bbf249f2dca62"},
{file = "PyYAML-6.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9eb6caa9a297fc2c2fb8862bc5370d0303ddba53ba97e71f08023b6cd73d16a8"},
@@ -2228,7 +2218,6 @@ files = [
{file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5773183b6446b2c99bb77e77595dd486303b4faab2b086e7b17bc6bef28865f6"},
{file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b786eecbdf8499b9ca1d697215862083bd6d2a99965554781d0d8d1ad31e13a0"},
{file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc1bf2925a1ecd43da378f4db9e4f799775d6367bdb94671027b73b393a7c42c"},
- {file = "PyYAML-6.0.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:04ac92ad1925b2cff1db0cfebffb6ffc43457495c9b3c39d3fcae417d7125dc5"},
{file = "PyYAML-6.0.1-cp39-cp39-win32.whl", hash = "sha256:faca3bdcf85b2fc05d06ff3fbc1f83e1391b3e724afa3feba7d13eeab355484c"},
{file = "PyYAML-6.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:510c9deebc5c0225e8c96813043e62b680ba2f9c50a08d3724c7f28a747d1486"},
{file = "PyYAML-6.0.1.tar.gz", hash = "sha256:bfdf460b1736c775f2ba9f6a92bca30bc2095067b8a9d77876d1fad6cc3b4a43"},
@@ -2891,22 +2880,22 @@ files = [
[[package]]
name = "tornado"
-version = "6.2"
+version = "6.4"
description = "Tornado is a Python web framework and asynchronous networking library, originally developed at FriendFeed."
optional = true
-python-versions = ">= 3.7"
+python-versions = ">= 3.8"
files = [
- {file = "tornado-6.2-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:20f638fd8cc85f3cbae3c732326e96addff0a15e22d80f049e00121651e82e72"},
- {file = "tornado-6.2-cp37-abi3-macosx_10_9_x86_64.whl", hash = "sha256:87dcafae3e884462f90c90ecc200defe5e580a7fbbb4365eda7c7c1eb809ebc9"},
- {file = "tornado-6.2-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ba09ef14ca9893954244fd872798b4ccb2367c165946ce2dd7376aebdde8e3ac"},
- {file = "tornado-6.2-cp37-abi3-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b8150f721c101abdef99073bf66d3903e292d851bee51910839831caba341a75"},
- {file = "tornado-6.2-cp37-abi3-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d3a2f5999215a3a06a4fc218026cd84c61b8b2b40ac5296a6db1f1451ef04c1e"},
- {file = "tornado-6.2-cp37-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:5f8c52d219d4995388119af7ccaa0bcec289535747620116a58d830e7c25d8a8"},
- {file = "tornado-6.2-cp37-abi3-musllinux_1_1_i686.whl", hash = "sha256:6fdfabffd8dfcb6cf887428849d30cf19a3ea34c2c248461e1f7d718ad30b66b"},
- {file = "tornado-6.2-cp37-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:1d54d13ab8414ed44de07efecb97d4ef7c39f7438cf5e976ccd356bebb1b5fca"},
- {file = "tornado-6.2-cp37-abi3-win32.whl", hash = "sha256:5c87076709343557ef8032934ce5f637dbb552efa7b21d08e89ae7619ed0eb23"},
- {file = "tornado-6.2-cp37-abi3-win_amd64.whl", hash = "sha256:e5f923aa6a47e133d1cf87d60700889d7eae68988704e20c75fb2d65677a8e4b"},
- {file = "tornado-6.2.tar.gz", hash = "sha256:9b630419bde84ec666bfd7ea0a4cb2a8a651c2d5cccdbdd1972a0c859dfc3c13"},
+ {file = "tornado-6.4-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:02ccefc7d8211e5a7f9e8bc3f9e5b0ad6262ba2fbb683a6443ecc804e5224ce0"},
+ {file = "tornado-6.4-cp38-abi3-macosx_10_9_x86_64.whl", hash = "sha256:27787de946a9cffd63ce5814c33f734c627a87072ec7eed71f7fc4417bb16263"},
+ {file = "tornado-6.4-cp38-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f7894c581ecdcf91666a0912f18ce5e757213999e183ebfc2c3fdbf4d5bd764e"},
+ {file = "tornado-6.4-cp38-abi3-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e43bc2e5370a6a8e413e1e1cd0c91bedc5bd62a74a532371042a18ef19e10579"},
+ {file = "tornado-6.4-cp38-abi3-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f0251554cdd50b4b44362f73ad5ba7126fc5b2c2895cc62b14a1c2d7ea32f212"},
+ {file = "tornado-6.4-cp38-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:fd03192e287fbd0899dd8f81c6fb9cbbc69194d2074b38f384cb6fa72b80e9c2"},
+ {file = "tornado-6.4-cp38-abi3-musllinux_1_1_i686.whl", hash = "sha256:88b84956273fbd73420e6d4b8d5ccbe913c65d31351b4c004ae362eba06e1f78"},
+ {file = "tornado-6.4-cp38-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:71ddfc23a0e03ef2df1c1397d859868d158c8276a0603b96cf86892bff58149f"},
+ {file = "tornado-6.4-cp38-abi3-win32.whl", hash = "sha256:6f8a6c77900f5ae93d8b4ae1196472d0ccc2775cc1dfdc9e7727889145c45052"},
+ {file = "tornado-6.4-cp38-abi3-win_amd64.whl", hash = "sha256:10aeaa8006333433da48dec9fe417877f8bcc21f48dda8d661ae79da357b2a63"},
+ {file = "tornado-6.4.tar.gz", hash = "sha256:72291fa6e6bc84e626589f1c29d90a5a6d593ef5ae68052ee2ef000dfd273dee"},
]
[[package]]
From b548f7803a9b7ba51a66d47ddb9bb69dce541a48 Mon Sep 17 00:00:00 2001
From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com>
Date: Mon, 29 Apr 2024 15:22:13 +0100
Subject: [PATCH 049/503] Add support for MSC4115 (#17104)
Co-authored-by: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com>
---
changelog.d/17104.feature | 1 +
.../conf/workers-shared-extra.yaml.j2 | 4 +-
rust/src/events/internal_metadata.rs | 9 +-
synapse/api/constants.py | 7 +
synapse/config/experimental.py | 4 +
synapse/events/utils.py | 30 +-
synapse/handlers/admin.py | 6 +-
synapse/handlers/events.py | 7 +-
synapse/handlers/initial_sync.py | 7 +-
synapse/handlers/pagination.py | 1 +
synapse/handlers/relations.py | 3 +
synapse/handlers/room.py | 1 +
synapse/handlers/search.py | 20 +-
synapse/handlers/sync.py | 2 +
synapse/notifier.py | 1 +
synapse/push/mailer.py | 5 +-
synapse/visibility.py | 73 +++-
tests/events/test_utils.py | 24 ++
tests/rest/client/test_retention.py | 7 +-
tests/test_visibility.py | 320 +++++++++++++-----
20 files changed, 407 insertions(+), 125 deletions(-)
create mode 100644 changelog.d/17104.feature
diff --git a/changelog.d/17104.feature b/changelog.d/17104.feature
new file mode 100644
index 0000000000..1c2355e155
--- /dev/null
+++ b/changelog.d/17104.feature
@@ -0,0 +1 @@
+Add support for MSC4115 (membership metadata on events).
diff --git a/docker/complement/conf/workers-shared-extra.yaml.j2 b/docker/complement/conf/workers-shared-extra.yaml.j2
index 32eada4419..a2c378f547 100644
--- a/docker/complement/conf/workers-shared-extra.yaml.j2
+++ b/docker/complement/conf/workers-shared-extra.yaml.j2
@@ -92,8 +92,6 @@ allow_device_name_lookup_over_federation: true
## Experimental Features ##
experimental_features:
- # client-side support for partial state in /send_join responses
- faster_joins: true
# Enable support for polls
msc3381_polls_enabled: true
# Enable deleting device-specific notification settings stored in account data
@@ -105,6 +103,8 @@ experimental_features:
# no UIA for x-signing upload for the first time
msc3967_enabled: true
+ msc4115_membership_on_events: true
+
server_notices:
system_mxid_localpart: _server
system_mxid_display_name: "Server Alert"
diff --git a/rust/src/events/internal_metadata.rs b/rust/src/events/internal_metadata.rs
index a53601862d..53c7b1ba61 100644
--- a/rust/src/events/internal_metadata.rs
+++ b/rust/src/events/internal_metadata.rs
@@ -20,8 +20,10 @@
//! Implements the internal metadata class attached to events.
//!
-//! The internal metadata is a bit like a `TypedDict`, in that it is stored as a
-//! JSON dict in the DB. Most events have zero, or only a few, of these keys
+//! The internal metadata is a bit like a `TypedDict`, in that most of
+//! it is stored as a JSON dict in the DB (the exceptions being `outlier`
+//! and `stream_ordering` which have their own columns in the database).
+//! Most events have zero, or only a few, of these keys
//! set. Therefore, since we care more about memory size than performance here,
//! we store these fields in a mapping.
//!
@@ -234,6 +236,9 @@ impl EventInternalMetadata {
self.clone()
}
+ /// Get a dict holding the data stored in the `internal_metadata` column in the database.
+ ///
+ /// Note that `outlier` and `stream_ordering` are stored in separate columns so are not returned here.
fn get_dict(&self, py: Python<'_>) -> PyResult {
let dict = PyDict::new(py);
diff --git a/synapse/api/constants.py b/synapse/api/constants.py
index 98884b4967..0a9123c56b 100644
--- a/synapse/api/constants.py
+++ b/synapse/api/constants.py
@@ -234,6 +234,13 @@ class EventContentFields:
TO_DEVICE_MSGID: Final = "org.matrix.msgid"
+class EventUnsignedContentFields:
+ """Fields found inside the 'unsigned' data on events"""
+
+ # Requesting user's membership, per MSC4115
+ MSC4115_MEMBERSHIP: Final = "io.element.msc4115.membership"
+
+
class RoomTypes:
"""Understood values of the room_type field of m.room.create events."""
diff --git a/synapse/config/experimental.py b/synapse/config/experimental.py
index baa3580f29..749452ce93 100644
--- a/synapse/config/experimental.py
+++ b/synapse/config/experimental.py
@@ -432,3 +432,7 @@ class ExperimentalConfig(Config):
"You cannot have MSC4108 both enabled and delegated at the same time",
("experimental", "msc4108_delegation_endpoint"),
)
+
+ self.msc4115_membership_on_events = experimental.get(
+ "msc4115_membership_on_events", False
+ )
diff --git a/synapse/events/utils.py b/synapse/events/utils.py
index e0613d0dbc..0772472312 100644
--- a/synapse/events/utils.py
+++ b/synapse/events/utils.py
@@ -49,7 +49,7 @@ from synapse.api.errors import Codes, SynapseError
from synapse.api.room_versions import RoomVersion
from synapse.types import JsonDict, Requester
-from . import EventBase
+from . import EventBase, make_event_from_dict
if TYPE_CHECKING:
from synapse.handlers.relations import BundledAggregations
@@ -82,17 +82,14 @@ def prune_event(event: EventBase) -> EventBase:
"""
pruned_event_dict = prune_event_dict(event.room_version, event.get_dict())
- from . import make_event_from_dict
-
pruned_event = make_event_from_dict(
pruned_event_dict, event.room_version, event.internal_metadata.get_dict()
)
- # copy the internal fields
+ # Copy the bits of `internal_metadata` that aren't returned by `get_dict`
pruned_event.internal_metadata.stream_ordering = (
event.internal_metadata.stream_ordering
)
-
pruned_event.internal_metadata.outlier = event.internal_metadata.outlier
# Mark the event as redacted
@@ -101,6 +98,29 @@ def prune_event(event: EventBase) -> EventBase:
return pruned_event
+def clone_event(event: EventBase) -> EventBase:
+ """Take a copy of the event.
+
+ This is mostly useful because it does a *shallow* copy of the `unsigned` data,
+ which means it can then be updated without corrupting the in-memory cache. Note that
+ other properties of the event, such as `content`, are *not* (currently) copied here.
+ """
+ # XXX: We rely on at least one of `event.get_dict()` and `make_event_from_dict()`
+ # making a copy of `unsigned`. Currently, both do, though I don't really know why.
+ # Still, as long as they do, there's not much point doing yet another copy here.
+ new_event = make_event_from_dict(
+ event.get_dict(), event.room_version, event.internal_metadata.get_dict()
+ )
+
+ # Copy the bits of `internal_metadata` that aren't returned by `get_dict`.
+ new_event.internal_metadata.stream_ordering = (
+ event.internal_metadata.stream_ordering
+ )
+ new_event.internal_metadata.outlier = event.internal_metadata.outlier
+
+ return new_event
+
+
def prune_event_dict(room_version: RoomVersion, event_dict: JsonDict) -> JsonDict:
"""Redacts the event_dict in the same way as `prune_event`, except it
operates on dicts rather than event objects
diff --git a/synapse/handlers/admin.py b/synapse/handlers/admin.py
index 360614e25b..702d40332c 100644
--- a/synapse/handlers/admin.py
+++ b/synapse/handlers/admin.py
@@ -42,6 +42,7 @@ class AdminHandler:
self._device_handler = hs.get_device_handler()
self._storage_controllers = hs.get_storage_controllers()
self._state_storage_controller = self._storage_controllers.state
+ self._hs_config = hs.config
self._msc3866_enabled = hs.config.experimental.msc3866.enabled
async def get_whois(self, user: UserID) -> JsonMapping:
@@ -217,7 +218,10 @@ class AdminHandler:
)
events = await filter_events_for_client(
- self._storage_controllers, user_id, events
+ self._storage_controllers,
+ user_id,
+ events,
+ msc4115_membership_on_events=self._hs_config.experimental.msc4115_membership_on_events,
)
writer.write_events(room_id, events)
diff --git a/synapse/handlers/events.py b/synapse/handlers/events.py
index c3fee74a98..09d553cff1 100644
--- a/synapse/handlers/events.py
+++ b/synapse/handlers/events.py
@@ -148,6 +148,7 @@ class EventHandler:
def __init__(self, hs: "HomeServer"):
self.store = hs.get_datastores().main
self._storage_controllers = hs.get_storage_controllers()
+ self._config = hs.config
async def get_event(
self,
@@ -189,7 +190,11 @@ class EventHandler:
is_peeking = not is_user_in_room
filtered = await filter_events_for_client(
- self._storage_controllers, user.to_string(), [event], is_peeking=is_peeking
+ self._storage_controllers,
+ user.to_string(),
+ [event],
+ is_peeking=is_peeking,
+ msc4115_membership_on_events=self._config.experimental.msc4115_membership_on_events,
)
if not filtered:
diff --git a/synapse/handlers/initial_sync.py b/synapse/handlers/initial_sync.py
index bcc5b285ac..d99fc4bec0 100644
--- a/synapse/handlers/initial_sync.py
+++ b/synapse/handlers/initial_sync.py
@@ -221,7 +221,10 @@ class InitialSyncHandler:
).addErrback(unwrapFirstError)
messages = await filter_events_for_client(
- self._storage_controllers, user_id, messages
+ self._storage_controllers,
+ user_id,
+ messages,
+ msc4115_membership_on_events=self.hs.config.experimental.msc4115_membership_on_events,
)
start_token = now_token.copy_and_replace(StreamKeyType.ROOM, token)
@@ -380,6 +383,7 @@ class InitialSyncHandler:
requester.user.to_string(),
messages,
is_peeking=is_peeking,
+ msc4115_membership_on_events=self.hs.config.experimental.msc4115_membership_on_events,
)
start_token = StreamToken.START.copy_and_replace(StreamKeyType.ROOM, token)
@@ -494,6 +498,7 @@ class InitialSyncHandler:
requester.user.to_string(),
messages,
is_peeking=is_peeking,
+ msc4115_membership_on_events=self.hs.config.experimental.msc4115_membership_on_events,
)
start_token = now_token.copy_and_replace(StreamKeyType.ROOM, token)
diff --git a/synapse/handlers/pagination.py b/synapse/handlers/pagination.py
index cd3a9088cd..6617105cdb 100644
--- a/synapse/handlers/pagination.py
+++ b/synapse/handlers/pagination.py
@@ -623,6 +623,7 @@ class PaginationHandler:
user_id,
events,
is_peeking=(member_event_id is None),
+ msc4115_membership_on_events=self.hs.config.experimental.msc4115_membership_on_events,
)
# if after the filter applied there are no more events
diff --git a/synapse/handlers/relations.py b/synapse/handlers/relations.py
index 931ac0c813..c5cee8860b 100644
--- a/synapse/handlers/relations.py
+++ b/synapse/handlers/relations.py
@@ -95,6 +95,7 @@ class RelationsHandler:
self._event_handler = hs.get_event_handler()
self._event_serializer = hs.get_event_client_serializer()
self._event_creation_handler = hs.get_event_creation_handler()
+ self._config = hs.config
async def get_relations(
self,
@@ -163,6 +164,7 @@ class RelationsHandler:
user_id,
events,
is_peeking=(member_event_id is None),
+ msc4115_membership_on_events=self._config.experimental.msc4115_membership_on_events,
)
# The relations returned for the requested event do include their
@@ -608,6 +610,7 @@ class RelationsHandler:
user_id,
events,
is_peeking=(member_event_id is None),
+ msc4115_membership_on_events=self._config.experimental.msc4115_membership_on_events,
)
aggregations = await self.get_bundled_aggregations(
diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py
index 5e81a51638..51739a2653 100644
--- a/synapse/handlers/room.py
+++ b/synapse/handlers/room.py
@@ -1476,6 +1476,7 @@ class RoomContextHandler:
user.to_string(),
events,
is_peeking=is_peeking,
+ msc4115_membership_on_events=self.hs.config.experimental.msc4115_membership_on_events,
)
event = await self.store.get_event(
diff --git a/synapse/handlers/search.py b/synapse/handlers/search.py
index 19c5a2f257..fdbe98de3b 100644
--- a/synapse/handlers/search.py
+++ b/synapse/handlers/search.py
@@ -480,7 +480,10 @@ class SearchHandler:
filtered_events = await search_filter.filter([r["event"] for r in results])
events = await filter_events_for_client(
- self._storage_controllers, user.to_string(), filtered_events
+ self._storage_controllers,
+ user.to_string(),
+ filtered_events,
+ msc4115_membership_on_events=self.hs.config.experimental.msc4115_membership_on_events,
)
events.sort(key=lambda e: -rank_map[e.event_id])
@@ -579,7 +582,10 @@ class SearchHandler:
filtered_events = await search_filter.filter([r["event"] for r in results])
events = await filter_events_for_client(
- self._storage_controllers, user.to_string(), filtered_events
+ self._storage_controllers,
+ user.to_string(),
+ filtered_events,
+ msc4115_membership_on_events=self.hs.config.experimental.msc4115_membership_on_events,
)
room_events.extend(events)
@@ -664,11 +670,17 @@ class SearchHandler:
)
events_before = await filter_events_for_client(
- self._storage_controllers, user.to_string(), res.events_before
+ self._storage_controllers,
+ user.to_string(),
+ res.events_before,
+ msc4115_membership_on_events=self.hs.config.experimental.msc4115_membership_on_events,
)
events_after = await filter_events_for_client(
- self._storage_controllers, user.to_string(), res.events_after
+ self._storage_controllers,
+ user.to_string(),
+ res.events_after,
+ msc4115_membership_on_events=self.hs.config.experimental.msc4115_membership_on_events,
)
context: JsonDict = {
diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py
index a6d54ee4b8..8ff45a3353 100644
--- a/synapse/handlers/sync.py
+++ b/synapse/handlers/sync.py
@@ -596,6 +596,7 @@ class SyncHandler:
sync_config.user.to_string(),
recents,
always_include_ids=current_state_ids,
+ msc4115_membership_on_events=self.hs_config.experimental.msc4115_membership_on_events,
)
log_kv({"recents_after_visibility_filtering": len(recents)})
else:
@@ -681,6 +682,7 @@ class SyncHandler:
sync_config.user.to_string(),
loaded_recents,
always_include_ids=current_state_ids,
+ msc4115_membership_on_events=self.hs_config.experimental.msc4115_membership_on_events,
)
loaded_recents = []
diff --git a/synapse/notifier.py b/synapse/notifier.py
index e87333a80a..7c1cd3b5f2 100644
--- a/synapse/notifier.py
+++ b/synapse/notifier.py
@@ -721,6 +721,7 @@ class Notifier:
user.to_string(),
new_events,
is_peeking=is_peeking,
+ msc4115_membership_on_events=self.hs.config.experimental.msc4115_membership_on_events,
)
elif keyname == StreamKeyType.PRESENCE:
now = self.clock.time_msec()
diff --git a/synapse/push/mailer.py b/synapse/push/mailer.py
index 7c15eb7440..49ce9d6dda 100644
--- a/synapse/push/mailer.py
+++ b/synapse/push/mailer.py
@@ -529,7 +529,10 @@ class Mailer:
}
the_events = await filter_events_for_client(
- self._storage_controllers, user_id, results.events_before
+ self._storage_controllers,
+ user_id,
+ results.events_before,
+ msc4115_membership_on_events=self.hs.config.experimental.msc4115_membership_on_events,
)
the_events.append(notif_event)
diff --git a/synapse/visibility.py b/synapse/visibility.py
index d1d478129f..09a947ef15 100644
--- a/synapse/visibility.py
+++ b/synapse/visibility.py
@@ -36,10 +36,15 @@ from typing import (
import attr
-from synapse.api.constants import EventTypes, HistoryVisibility, Membership
+from synapse.api.constants import (
+ EventTypes,
+ EventUnsignedContentFields,
+ HistoryVisibility,
+ Membership,
+)
from synapse.events import EventBase
from synapse.events.snapshot import EventContext
-from synapse.events.utils import prune_event
+from synapse.events.utils import clone_event, prune_event
from synapse.logging.opentracing import trace
from synapse.storage.controllers import StorageControllers
from synapse.storage.databases.main import DataStore
@@ -77,6 +82,7 @@ async def filter_events_for_client(
is_peeking: bool = False,
always_include_ids: FrozenSet[str] = frozenset(),
filter_send_to_client: bool = True,
+ msc4115_membership_on_events: bool = False,
) -> List[EventBase]:
"""
Check which events a user is allowed to see. If the user can see the event but its
@@ -95,9 +101,12 @@ async def filter_events_for_client(
filter_send_to_client: Whether we're checking an event that's going to be
sent to a client. This might not always be the case since this function can
also be called to check whether a user can see the state at a given point.
+ msc4115_membership_on_events: Whether to include the requesting user's
+ membership in the "unsigned" data, per MSC4115.
Returns:
- The filtered events.
+ The filtered events. If `msc4115_membership_on_events` is true, the `unsigned`
+ data is annotated with the membership state of `user_id` at each event.
"""
# Filter out events that have been soft failed so that we don't relay them
# to clients.
@@ -134,7 +143,8 @@ async def filter_events_for_client(
)
def allowed(event: EventBase) -> Optional[EventBase]:
- return _check_client_allowed_to_see_event(
+ state_after_event = event_id_to_state.get(event.event_id)
+ filtered = _check_client_allowed_to_see_event(
user_id=user_id,
event=event,
clock=storage.main.clock,
@@ -142,13 +152,45 @@ async def filter_events_for_client(
sender_ignored=event.sender in ignore_list,
always_include_ids=always_include_ids,
retention_policy=retention_policies[room_id],
- state=event_id_to_state.get(event.event_id),
+ state=state_after_event,
is_peeking=is_peeking,
sender_erased=erased_senders.get(event.sender, False),
)
+ if filtered is None:
+ return None
- # Check each event: gives an iterable of None or (a potentially modified)
- # EventBase.
+ if not msc4115_membership_on_events:
+ return filtered
+
+ # Annotate the event with the user's membership after the event.
+ #
+ # Normally we just look in `state_after_event`, but if the event is an outlier
+ # we won't have such a state. The only outliers that are returned here are the
+ # user's own membership event, so we can just inspect that.
+
+ user_membership_event: Optional[EventBase]
+ if event.type == EventTypes.Member and event.state_key == user_id:
+ user_membership_event = event
+ elif state_after_event is not None:
+ user_membership_event = state_after_event.get((EventTypes.Member, user_id))
+ else:
+ # unreachable!
+ raise Exception("Missing state for event that is not user's own membership")
+
+ user_membership = (
+ user_membership_event.membership
+ if user_membership_event
+ else Membership.LEAVE
+ )
+
+ # Copy the event before updating the unsigned data: this shouldn't be persisted
+ # to the cache!
+ cloned = clone_event(filtered)
+ cloned.unsigned[EventUnsignedContentFields.MSC4115_MEMBERSHIP] = user_membership
+
+ return cloned
+
+ # Check each event: gives an iterable of None or (a modified) EventBase.
filtered_events = map(allowed, events)
# Turn it into a list and remove None entries before returning.
@@ -396,7 +438,13 @@ def _check_client_allowed_to_see_event(
@attr.s(frozen=True, slots=True, auto_attribs=True)
class _CheckMembershipReturn:
- "Return value of _check_membership"
+ """Return value of `_check_membership`.
+
+ Attributes:
+ allowed: Whether the user should be allowed to see the event.
+ joined: Whether the user was joined to the room at the event.
+ """
+
allowed: bool
joined: bool
@@ -408,12 +456,7 @@ def _check_membership(
state: StateMap[EventBase],
is_peeking: bool,
) -> _CheckMembershipReturn:
- """Check whether the user can see the event due to their membership
-
- Returns:
- True if they can, False if they can't, plus the membership of the user
- at the event.
- """
+ """Check whether the user can see the event due to their membership"""
# If the event is the user's own membership event, use the 'most joined'
# membership
membership = None
@@ -435,7 +478,7 @@ def _check_membership(
if membership == "leave" and (
prev_membership == "join" or prev_membership == "invite"
):
- return _CheckMembershipReturn(True, membership == Membership.JOIN)
+ return _CheckMembershipReturn(True, False)
new_priority = MEMBERSHIP_PRIORITY.index(membership)
old_priority = MEMBERSHIP_PRIORITY.index(prev_membership)
diff --git a/tests/events/test_utils.py b/tests/events/test_utils.py
index cf81bcf52c..d5ac66a6ed 100644
--- a/tests/events/test_utils.py
+++ b/tests/events/test_utils.py
@@ -32,6 +32,7 @@ from synapse.events.utils import (
PowerLevelsContent,
SerializeEventConfig,
_split_field,
+ clone_event,
copy_and_fixup_power_levels_contents,
maybe_upsert_event_field,
prune_event,
@@ -611,6 +612,29 @@ class PruneEventTestCase(stdlib_unittest.TestCase):
)
+class CloneEventTestCase(stdlib_unittest.TestCase):
+ def test_unsigned_is_copied(self) -> None:
+ original = make_event_from_dict(
+ {
+ "type": "A",
+ "event_id": "$test:domain",
+ "unsigned": {"a": 1, "b": 2},
+ },
+ RoomVersions.V1,
+ {"txn_id": "txn"},
+ )
+ original.internal_metadata.stream_ordering = 1234
+ self.assertEqual(original.internal_metadata.stream_ordering, 1234)
+
+ cloned = clone_event(original)
+ cloned.unsigned["b"] = 3
+
+ self.assertEqual(original.unsigned, {"a": 1, "b": 2})
+ self.assertEqual(cloned.unsigned, {"a": 1, "b": 3})
+ self.assertEqual(cloned.internal_metadata.stream_ordering, 1234)
+ self.assertEqual(cloned.internal_metadata.txn_id, "txn")
+
+
class SerializeEventTestCase(stdlib_unittest.TestCase):
def serialize(self, ev: EventBase, fields: Optional[List[str]]) -> JsonDict:
return serialize_event(
diff --git a/tests/rest/client/test_retention.py b/tests/rest/client/test_retention.py
index 09a5d64349..ceae40498e 100644
--- a/tests/rest/client/test_retention.py
+++ b/tests/rest/client/test_retention.py
@@ -163,7 +163,12 @@ class RetentionTestCase(unittest.HomeserverTestCase):
)
self.assertEqual(2, len(events), "events retrieved from database")
filtered_events = self.get_success(
- filter_events_for_client(storage_controllers, self.user_id, events)
+ filter_events_for_client(
+ storage_controllers,
+ self.user_id,
+ events,
+ msc4115_membership_on_events=True,
+ )
)
# We should only get one event back.
diff --git a/tests/test_visibility.py b/tests/test_visibility.py
index e51f72d65f..3e2100eab4 100644
--- a/tests/test_visibility.py
+++ b/tests/test_visibility.py
@@ -21,13 +21,19 @@ import logging
from typing import Optional
from unittest.mock import patch
+from synapse.api.constants import EventUnsignedContentFields
from synapse.api.room_versions import RoomVersions
from synapse.events import EventBase, make_event_from_dict
from synapse.events.snapshot import EventContext
-from synapse.types import JsonDict, create_requester
+from synapse.rest import admin
+from synapse.rest.client import login, room
+from synapse.server import HomeServer
+from synapse.types import create_requester
from synapse.visibility import filter_events_for_client, filter_events_for_server
from tests import unittest
+from tests.test_utils.event_injection import inject_event, inject_member_event
+from tests.unittest import HomeserverTestCase
from tests.utils import create_room
logger = logging.getLogger(__name__)
@@ -56,15 +62,31 @@ class FilterEventsForServerTestCase(unittest.HomeserverTestCase):
#
# before we do that, we persist some other events to act as state.
- self._inject_visibility("@admin:hs", "joined")
+ self.get_success(
+ inject_visibility_event(self.hs, TEST_ROOM_ID, "@admin:hs", "joined")
+ )
for i in range(10):
- self._inject_room_member("@resident%i:hs" % i)
+ self.get_success(
+ inject_member_event(
+ self.hs,
+ TEST_ROOM_ID,
+ "@resident%i:hs" % i,
+ "join",
+ )
+ )
events_to_filter = []
for i in range(10):
- user = "@user%i:%s" % (i, "test_server" if i == 5 else "other_server")
- evt = self._inject_room_member(user, extra_content={"a": "b"})
+ evt = self.get_success(
+ inject_member_event(
+ self.hs,
+ TEST_ROOM_ID,
+ "@user%i:%s" % (i, "test_server" if i == 5 else "other_server"),
+ "join",
+ extra_content={"a": "b"},
+ )
+ )
events_to_filter.append(evt)
filtered = self.get_success(
@@ -90,8 +112,19 @@ class FilterEventsForServerTestCase(unittest.HomeserverTestCase):
def test_filter_outlier(self) -> None:
# outlier events must be returned, for the good of the collective federation
- self._inject_room_member("@resident:remote_hs")
- self._inject_visibility("@resident:remote_hs", "joined")
+ self.get_success(
+ inject_member_event(
+ self.hs,
+ TEST_ROOM_ID,
+ "@resident:remote_hs",
+ "join",
+ )
+ )
+ self.get_success(
+ inject_visibility_event(
+ self.hs, TEST_ROOM_ID, "@resident:remote_hs", "joined"
+ )
+ )
outlier = self._inject_outlier()
self.assertEqual(
@@ -110,7 +143,9 @@ class FilterEventsForServerTestCase(unittest.HomeserverTestCase):
)
# it should also work when there are other events in the list
- evt = self._inject_message("@unerased:local_hs")
+ evt = self.get_success(
+ inject_message_event(self.hs, TEST_ROOM_ID, "@unerased:local_hs")
+ )
filtered = self.get_success(
filter_events_for_server(
@@ -150,19 +185,34 @@ class FilterEventsForServerTestCase(unittest.HomeserverTestCase):
# change in the middle of them.
events_to_filter = []
- evt = self._inject_message("@unerased:local_hs")
+ evt = self.get_success(
+ inject_message_event(self.hs, TEST_ROOM_ID, "@unerased:local_hs")
+ )
events_to_filter.append(evt)
- evt = self._inject_message("@erased:local_hs")
+ evt = self.get_success(
+ inject_message_event(self.hs, TEST_ROOM_ID, "@erased:local_hs")
+ )
events_to_filter.append(evt)
- evt = self._inject_room_member("@joiner:remote_hs")
+ evt = self.get_success(
+ inject_member_event(
+ self.hs,
+ TEST_ROOM_ID,
+ "@joiner:remote_hs",
+ "join",
+ )
+ )
events_to_filter.append(evt)
- evt = self._inject_message("@unerased:local_hs")
+ evt = self.get_success(
+ inject_message_event(self.hs, TEST_ROOM_ID, "@unerased:local_hs")
+ )
events_to_filter.append(evt)
- evt = self._inject_message("@erased:local_hs")
+ evt = self.get_success(
+ inject_message_event(self.hs, TEST_ROOM_ID, "@erased:local_hs")
+ )
events_to_filter.append(evt)
# the erasey user gets erased
@@ -200,76 +250,6 @@ class FilterEventsForServerTestCase(unittest.HomeserverTestCase):
for i in (1, 4):
self.assertNotIn("body", filtered[i].content)
- def _inject_visibility(self, user_id: str, visibility: str) -> EventBase:
- content = {"history_visibility": visibility}
- builder = self.event_builder_factory.for_room_version(
- RoomVersions.V1,
- {
- "type": "m.room.history_visibility",
- "sender": user_id,
- "state_key": "",
- "room_id": TEST_ROOM_ID,
- "content": content,
- },
- )
-
- event, unpersisted_context = self.get_success(
- self.event_creation_handler.create_new_client_event(builder)
- )
- context = self.get_success(unpersisted_context.persist(event))
- self.get_success(self._persistence.persist_event(event, context))
- return event
-
- def _inject_room_member(
- self,
- user_id: str,
- membership: str = "join",
- extra_content: Optional[JsonDict] = None,
- ) -> EventBase:
- content = {"membership": membership}
- content.update(extra_content or {})
- builder = self.event_builder_factory.for_room_version(
- RoomVersions.V1,
- {
- "type": "m.room.member",
- "sender": user_id,
- "state_key": user_id,
- "room_id": TEST_ROOM_ID,
- "content": content,
- },
- )
-
- event, unpersisted_context = self.get_success(
- self.event_creation_handler.create_new_client_event(builder)
- )
- context = self.get_success(unpersisted_context.persist(event))
-
- self.get_success(self._persistence.persist_event(event, context))
- return event
-
- def _inject_message(
- self, user_id: str, content: Optional[JsonDict] = None
- ) -> EventBase:
- if content is None:
- content = {"body": "testytest", "msgtype": "m.text"}
- builder = self.event_builder_factory.for_room_version(
- RoomVersions.V1,
- {
- "type": "m.room.message",
- "sender": user_id,
- "room_id": TEST_ROOM_ID,
- "content": content,
- },
- )
-
- event, unpersisted_context = self.get_success(
- self.event_creation_handler.create_new_client_event(builder)
- )
- context = self.get_success(unpersisted_context.persist(event))
-
- self.get_success(self._persistence.persist_event(event, context))
- return event
-
def _inject_outlier(self) -> EventBase:
builder = self.event_builder_factory.for_room_version(
RoomVersions.V1,
@@ -292,7 +272,122 @@ class FilterEventsForServerTestCase(unittest.HomeserverTestCase):
return event
-class FilterEventsForClientTestCase(unittest.FederatingHomeserverTestCase):
+class FilterEventsForClientTestCase(HomeserverTestCase):
+ servlets = [
+ admin.register_servlets,
+ login.register_servlets,
+ room.register_servlets,
+ ]
+
+ def test_joined_history_visibility(self) -> None:
+ # User joins and leaves room. Should be able to see the join and leave,
+ # and messages sent between the two, but not before or after.
+
+ self.register_user("resident", "p1")
+ resident_token = self.login("resident", "p1")
+ room_id = self.helper.create_room_as("resident", tok=resident_token)
+
+ self.get_success(
+ inject_visibility_event(self.hs, room_id, "@resident:test", "joined")
+ )
+ before_event = self.get_success(
+ inject_message_event(self.hs, room_id, "@resident:test", body="before")
+ )
+ join_event = self.get_success(
+ inject_member_event(self.hs, room_id, "@joiner:test", "join")
+ )
+ during_event = self.get_success(
+ inject_message_event(self.hs, room_id, "@resident:test", body="during")
+ )
+ leave_event = self.get_success(
+ inject_member_event(self.hs, room_id, "@joiner:test", "leave")
+ )
+ after_event = self.get_success(
+ inject_message_event(self.hs, room_id, "@resident:test", body="after")
+ )
+
+ # We have to reload the events from the db, to ensure that prev_content is
+ # populated.
+ events_to_filter = [
+ self.get_success(
+ self.hs.get_storage_controllers().main.get_event(
+ e.event_id,
+ get_prev_content=True,
+ )
+ )
+ for e in [
+ before_event,
+ join_event,
+ during_event,
+ leave_event,
+ after_event,
+ ]
+ ]
+
+ # Now run the events through the filter, and check that we can see the events
+ # we expect, and that the membership prop is as expected.
+ #
+ # We deliberately do the queries for both users upfront; this simulates
+ # concurrent queries on the server, and helps ensure that we aren't
+ # accidentally serving the same event object (with the same unsigned.membership
+ # property) to both users.
+ joiner_filtered_events = self.get_success(
+ filter_events_for_client(
+ self.hs.get_storage_controllers(),
+ "@joiner:test",
+ events_to_filter,
+ msc4115_membership_on_events=True,
+ )
+ )
+ resident_filtered_events = self.get_success(
+ filter_events_for_client(
+ self.hs.get_storage_controllers(),
+ "@resident:test",
+ events_to_filter,
+ msc4115_membership_on_events=True,
+ )
+ )
+
+ # The joiner should be able to seem the join and leave,
+ # and messages sent between the two, but not before or after.
+ self.assertEqual(
+ [e.event_id for e in [join_event, during_event, leave_event]],
+ [e.event_id for e in joiner_filtered_events],
+ )
+ self.assertEqual(
+ ["join", "join", "leave"],
+ [
+ e.unsigned[EventUnsignedContentFields.MSC4115_MEMBERSHIP]
+ for e in joiner_filtered_events
+ ],
+ )
+
+ # The resident user should see all the events.
+ self.assertEqual(
+ [
+ e.event_id
+ for e in [
+ before_event,
+ join_event,
+ during_event,
+ leave_event,
+ after_event,
+ ]
+ ],
+ [e.event_id for e in resident_filtered_events],
+ )
+ self.assertEqual(
+ ["join", "join", "join", "join", "join"],
+ [
+ e.unsigned[EventUnsignedContentFields.MSC4115_MEMBERSHIP]
+ for e in resident_filtered_events
+ ],
+ )
+
+
+class FilterEventsOutOfBandEventsForClientTestCase(
+ unittest.FederatingHomeserverTestCase
+):
def test_out_of_band_invite_rejection(self) -> None:
# this is where we have received an invite event over federation, and then
# rejected it.
@@ -341,15 +436,24 @@ class FilterEventsForClientTestCase(unittest.FederatingHomeserverTestCase):
)
# the invited user should be able to see both the invite and the rejection
+ filtered_events = self.get_success(
+ filter_events_for_client(
+ self.hs.get_storage_controllers(),
+ "@user:test",
+ [invite_event, reject_event],
+ msc4115_membership_on_events=True,
+ )
+ )
self.assertEqual(
- self.get_success(
- filter_events_for_client(
- self.hs.get_storage_controllers(),
- "@user:test",
- [invite_event, reject_event],
- )
- ),
- [invite_event, reject_event],
+ [e.event_id for e in filtered_events],
+ [e.event_id for e in [invite_event, reject_event]],
+ )
+ self.assertEqual(
+ ["invite", "leave"],
+ [
+ e.unsigned[EventUnsignedContentFields.MSC4115_MEMBERSHIP]
+ for e in filtered_events
+ ],
)
# other users should see neither
@@ -359,7 +463,39 @@ class FilterEventsForClientTestCase(unittest.FederatingHomeserverTestCase):
self.hs.get_storage_controllers(),
"@other:test",
[invite_event, reject_event],
+ msc4115_membership_on_events=True,
)
),
[],
)
+
+
+async def inject_visibility_event(
+ hs: HomeServer,
+ room_id: str,
+ sender: str,
+ visibility: str,
+) -> EventBase:
+ return await inject_event(
+ hs,
+ type="m.room.history_visibility",
+ sender=sender,
+ state_key="",
+ room_id=room_id,
+ content={"history_visibility": visibility},
+ )
+
+
+async def inject_message_event(
+ hs: HomeServer,
+ room_id: str,
+ sender: str,
+ body: Optional[str] = "testytest",
+) -> EventBase:
+ return await inject_event(
+ hs,
+ type="m.room.message",
+ sender=sender,
+ room_id=room_id,
+ content={"body": body, "msgtype": "m.text"},
+ )
From 7ab0f630da0ab16c4d5dc0603695df888e2a7ab0 Mon Sep 17 00:00:00 2001
From: devonh
Date: Mon, 29 Apr 2024 15:23:05 +0000
Subject: [PATCH 050/503] Apply user `email` & `picture` during OIDC
registration if present & selected (#17120)
This change will apply the `email` & `picture` provided by OIDC to the
new user account when registering a new user via OIDC. If the user is
directed to the account details form, this change makes sure they have
been selected before applying them, otherwise they are omitted. In
particular, this change ensures the values are carried through when
Synapse has consent configured, and the redirect to the consent form/s
are followed.
I have tested everything manually. Including:
- with/without consent configured
- allowing/not allowing the use of email/avatar (via
`sso_auth_account_details.html`)
- with/without automatic account detail population (by un/commenting the
`localpart_template` option in synapse config).
### Pull Request Checklist
* [X] Pull request is based on the develop branch
* [X] Pull request includes a [changelog
file](https://element-hq.github.io/synapse/latest/development/contributing_guide.html#changelog).
The entry should:
- Be a short description of your change which makes sense to users.
"Fixed a bug that prevented receiving messages from other servers."
instead of "Moved X method from `EventStore` to `EventWorkerStore`.".
- Use markdown where necessary, mostly for `code blocks`.
- End with either a period (.) or an exclamation mark (!).
- Start with a capital letter.
- Feel free to credit yourself, by adding a sentence "Contributed by
@github_username." or "Contributed by [Your Name]." to the end of the
entry.
* [X] [Code
style](https://element-hq.github.io/synapse/latest/code_style.html) is
correct
(run the
[linters](https://element-hq.github.io/synapse/latest/development/contributing_guide.html#run-the-linters))
---
changelog.d/17120.bugfix | 1 +
docs/sso_mapping_providers.md | 1 +
synapse/handlers/sso.py | 10 +
synapse/rest/synapse/client/pick_username.py | 4 +-
tests/rest/client/test_login.py | 204 +++++++++++++++++--
5 files changed, 205 insertions(+), 15 deletions(-)
create mode 100644 changelog.d/17120.bugfix
diff --git a/changelog.d/17120.bugfix b/changelog.d/17120.bugfix
new file mode 100644
index 0000000000..85b34c2e98
--- /dev/null
+++ b/changelog.d/17120.bugfix
@@ -0,0 +1 @@
+Apply user email & picture during OIDC registration if present & selected.
diff --git a/docs/sso_mapping_providers.md b/docs/sso_mapping_providers.md
index 10c695029f..d6c4e860ae 100644
--- a/docs/sso_mapping_providers.md
+++ b/docs/sso_mapping_providers.md
@@ -98,6 +98,7 @@ A custom mapping provider must specify the following methods:
either accept this localpart or pick their own username. Otherwise this
option has no effect. If omitted, defaults to `False`.
- `display_name`: An optional string, the display name for the user.
+ - `picture`: An optional string, the avatar url for the user.
- `emails`: A list of strings, the email address(es) to associate with
this user. If omitted, defaults to an empty list.
* `async def get_extra_attributes(self, userinfo, token)`
diff --git a/synapse/handlers/sso.py b/synapse/handlers/sso.py
index 8e39e76c97..f275d4f35a 100644
--- a/synapse/handlers/sso.py
+++ b/synapse/handlers/sso.py
@@ -169,6 +169,7 @@ class UsernameMappingSession:
# attributes returned by the ID mapper
display_name: Optional[str]
emails: StrCollection
+ avatar_url: Optional[str]
# An optional dictionary of extra attributes to be provided to the client in the
# login response.
@@ -183,6 +184,7 @@ class UsernameMappingSession:
# choices made by the user
chosen_localpart: Optional[str] = None
use_display_name: bool = True
+ use_avatar: bool = True
emails_to_use: StrCollection = ()
terms_accepted_version: Optional[str] = None
@@ -660,6 +662,9 @@ class SsoHandler:
remote_user_id=remote_user_id,
display_name=attributes.display_name,
emails=attributes.emails,
+ avatar_url=attributes.picture,
+ # Default to using all mapped emails. Will be overwritten in handle_submit_username_request.
+ emails_to_use=attributes.emails,
client_redirect_url=client_redirect_url,
expiry_time_ms=now + self._MAPPING_SESSION_VALIDITY_PERIOD_MS,
extra_login_attributes=extra_login_attributes,
@@ -966,6 +971,7 @@ class SsoHandler:
session_id: str,
localpart: str,
use_display_name: bool,
+ use_avatar: bool,
emails_to_use: Iterable[str],
) -> None:
"""Handle a request to the username-picker 'submit' endpoint
@@ -988,6 +994,7 @@ class SsoHandler:
# update the session with the user's choices
session.chosen_localpart = localpart
session.use_display_name = use_display_name
+ session.use_avatar = use_avatar
emails_from_idp = set(session.emails)
filtered_emails: Set[str] = set()
@@ -1068,6 +1075,9 @@ class SsoHandler:
if session.use_display_name:
attributes.display_name = session.display_name
+ if session.use_avatar:
+ attributes.picture = session.avatar_url
+
# the following will raise a 400 error if the username has been taken in the
# meantime.
user_id = await self._register_mapped_user(
diff --git a/synapse/rest/synapse/client/pick_username.py b/synapse/rest/synapse/client/pick_username.py
index e671774aeb..7d16b796d4 100644
--- a/synapse/rest/synapse/client/pick_username.py
+++ b/synapse/rest/synapse/client/pick_username.py
@@ -113,6 +113,7 @@ class AccountDetailsResource(DirectServeHtmlResource):
"display_name": session.display_name,
"emails": session.emails,
"localpart": localpart,
+ "avatar_url": session.avatar_url,
},
}
@@ -134,6 +135,7 @@ class AccountDetailsResource(DirectServeHtmlResource):
try:
localpart = parse_string(request, "username", required=True)
use_display_name = parse_boolean(request, "use_display_name", default=False)
+ use_avatar = parse_boolean(request, "use_avatar", default=False)
try:
emails_to_use: List[str] = [
@@ -147,5 +149,5 @@ class AccountDetailsResource(DirectServeHtmlResource):
return
await self._sso_handler.handle_submit_username_request(
- request, session_id, localpart, use_display_name, emails_to_use
+ request, session_id, localpart, use_display_name, use_avatar, emails_to_use
)
diff --git a/tests/rest/client/test_login.py b/tests/rest/client/test_login.py
index 3a1f150082..3fb77fd9dd 100644
--- a/tests/rest/client/test_login.py
+++ b/tests/rest/client/test_login.py
@@ -20,7 +20,17 @@
#
import time
import urllib.parse
-from typing import Any, Collection, Dict, List, Optional, Tuple, Union
+from typing import (
+ Any,
+ BinaryIO,
+ Callable,
+ Collection,
+ Dict,
+ List,
+ Optional,
+ Tuple,
+ Union,
+)
from unittest.mock import Mock
from urllib.parse import urlencode
@@ -34,8 +44,9 @@ import synapse.rest.admin
from synapse.api.constants import ApprovalNoticeMedium, LoginType
from synapse.api.errors import Codes
from synapse.appservice import ApplicationService
+from synapse.http.client import RawHeaders
from synapse.module_api import ModuleApi
-from synapse.rest.client import devices, login, logout, register
+from synapse.rest.client import account, devices, login, logout, profile, register
from synapse.rest.client.account import WhoamiRestServlet
from synapse.rest.synapse.client import build_synapse_client_resource_tree
from synapse.server import HomeServer
@@ -48,6 +59,7 @@ from tests.handlers.test_saml import has_saml2
from tests.rest.client.utils import TEST_OIDC_CONFIG
from tests.server import FakeChannel
from tests.test_utils.html_parsers import TestHtmlParser
+from tests.test_utils.oidc import FakeOidcServer
from tests.unittest import HomeserverTestCase, override_config, skip_unless
try:
@@ -1421,7 +1433,19 @@ class AppserviceLoginRestServletTestCase(unittest.HomeserverTestCase):
class UsernamePickerTestCase(HomeserverTestCase):
"""Tests for the username picker flow of SSO login"""
- servlets = [login.register_servlets]
+ servlets = [
+ login.register_servlets,
+ profile.register_servlets,
+ account.register_servlets,
+ ]
+
+ def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer:
+ self.http_client = Mock(spec=["get_file"])
+ self.http_client.get_file.side_effect = mock_get_file
+ hs = self.setup_test_homeserver(
+ proxied_blocklisted_http_client=self.http_client
+ )
+ return hs
def default_config(self) -> Dict[str, Any]:
config = super().default_config()
@@ -1430,7 +1454,11 @@ class UsernamePickerTestCase(HomeserverTestCase):
config["oidc_config"] = {}
config["oidc_config"].update(TEST_OIDC_CONFIG)
config["oidc_config"]["user_mapping_provider"] = {
- "config": {"display_name_template": "{{ user.displayname }}"}
+ "config": {
+ "display_name_template": "{{ user.displayname }}",
+ "email_template": "{{ user.email }}",
+ "picture_template": "{{ user.picture }}",
+ }
}
# whitelist this client URI so we redirect straight to it rather than
@@ -1443,15 +1471,22 @@ class UsernamePickerTestCase(HomeserverTestCase):
d.update(build_synapse_client_resource_tree(self.hs))
return d
- def test_username_picker(self) -> None:
- """Test the happy path of a username picker flow."""
-
- fake_oidc_server = self.helper.fake_oidc_server()
-
+ def proceed_to_username_picker_page(
+ self,
+ fake_oidc_server: FakeOidcServer,
+ displayname: str,
+ email: str,
+ picture: str,
+ ) -> Tuple[str, str]:
# do the start of the login flow
channel, _ = self.helper.auth_via_oidc(
fake_oidc_server,
- {"sub": "tester", "displayname": "Jonny"},
+ {
+ "sub": "tester",
+ "displayname": displayname,
+ "picture": picture,
+ "email": email,
+ },
TEST_CLIENT_REDIRECT_URL,
)
@@ -1478,16 +1513,42 @@ class UsernamePickerTestCase(HomeserverTestCase):
)
session = username_mapping_sessions[session_id]
self.assertEqual(session.remote_user_id, "tester")
- self.assertEqual(session.display_name, "Jonny")
+ self.assertEqual(session.display_name, displayname)
+ self.assertEqual(session.emails, [email])
+ self.assertEqual(session.avatar_url, picture)
self.assertEqual(session.client_redirect_url, TEST_CLIENT_REDIRECT_URL)
# the expiry time should be about 15 minutes away
expected_expiry = self.clock.time_msec() + (15 * 60 * 1000)
self.assertApproximates(session.expiry_time_ms, expected_expiry, tolerance=1000)
+ return picker_url, session_id
+
+ def test_username_picker_use_displayname_avatar_and_email(self) -> None:
+ """Test the happy path of a username picker flow with using displayname, avatar and email."""
+
+ fake_oidc_server = self.helper.fake_oidc_server()
+
+ mxid = "@bobby:test"
+ displayname = "Jonny"
+ email = "bobby@test.com"
+ picture = "mxc://test/avatar_url"
+
+ picker_url, session_id = self.proceed_to_username_picker_page(
+ fake_oidc_server, displayname, email, picture
+ )
+
# Now, submit a username to the username picker, which should serve a redirect
- # to the completion page
- content = urlencode({b"username": b"bobby"}).encode("utf8")
+ # to the completion page.
+ # Also specify that we should use the provided displayname, avatar and email.
+ content = urlencode(
+ {
+ b"username": b"bobby",
+ b"use_display_name": b"true",
+ b"use_avatar": b"true",
+ b"use_email": email,
+ }
+ ).encode("utf8")
chan = self.make_request(
"POST",
path=picker_url,
@@ -1536,4 +1597,119 @@ class UsernamePickerTestCase(HomeserverTestCase):
content={"type": "m.login.token", "token": login_token},
)
self.assertEqual(chan.code, 200, chan.result)
- self.assertEqual(chan.json_body["user_id"], "@bobby:test")
+ self.assertEqual(chan.json_body["user_id"], mxid)
+
+ # ensure the displayname and avatar from the OIDC response have been configured for the user.
+ channel = self.make_request(
+ "GET", "/profile/" + mxid, access_token=chan.json_body["access_token"]
+ )
+ self.assertEqual(channel.code, 200, channel.result)
+ self.assertIn("mxc://test", channel.json_body["avatar_url"])
+ self.assertEqual(displayname, channel.json_body["displayname"])
+
+ # ensure the email from the OIDC response has been configured for the user.
+ channel = self.make_request(
+ "GET", "/account/3pid", access_token=chan.json_body["access_token"]
+ )
+ self.assertEqual(channel.code, 200, channel.result)
+ self.assertEqual(email, channel.json_body["threepids"][0]["address"])
+
+ def test_username_picker_dont_use_displayname_avatar_or_email(self) -> None:
+ """Test the happy path of a username picker flow without using displayname, avatar or email."""
+
+ fake_oidc_server = self.helper.fake_oidc_server()
+
+ mxid = "@bobby:test"
+ displayname = "Jonny"
+ email = "bobby@test.com"
+ picture = "mxc://test/avatar_url"
+ username = "bobby"
+
+ picker_url, session_id = self.proceed_to_username_picker_page(
+ fake_oidc_server, displayname, email, picture
+ )
+
+ # Now, submit a username to the username picker, which should serve a redirect
+ # to the completion page.
+ # Also specify that we should not use the provided displayname, avatar or email.
+ content = urlencode(
+ {
+ b"username": username,
+ b"use_display_name": b"false",
+ b"use_avatar": b"false",
+ }
+ ).encode("utf8")
+ chan = self.make_request(
+ "POST",
+ path=picker_url,
+ content=content,
+ content_is_form=True,
+ custom_headers=[
+ ("Cookie", "username_mapping_session=" + session_id),
+ # old versions of twisted don't do form-parsing without a valid
+ # content-length header.
+ ("Content-Length", str(len(content))),
+ ],
+ )
+ self.assertEqual(chan.code, 302, chan.result)
+ location_headers = chan.headers.getRawHeaders("Location")
+ assert location_headers
+
+ # send a request to the completion page, which should 302 to the client redirectUrl
+ chan = self.make_request(
+ "GET",
+ path=location_headers[0],
+ custom_headers=[("Cookie", "username_mapping_session=" + session_id)],
+ )
+ self.assertEqual(chan.code, 302, chan.result)
+ location_headers = chan.headers.getRawHeaders("Location")
+ assert location_headers
+
+ # ensure that the returned location matches the requested redirect URL
+ path, query = location_headers[0].split("?", 1)
+ self.assertEqual(path, "https://x")
+
+ # it will have url-encoded the params properly, so we'll have to parse them
+ params = urllib.parse.parse_qsl(
+ query, keep_blank_values=True, strict_parsing=True, errors="strict"
+ )
+ self.assertEqual(params[0:2], EXPECTED_CLIENT_REDIRECT_URL_PARAMS)
+ self.assertEqual(params[2][0], "loginToken")
+
+ # fish the login token out of the returned redirect uri
+ login_token = params[2][1]
+
+ # finally, submit the matrix login token to the login API, which gives us our
+ # matrix access token, mxid, and device id.
+ chan = self.make_request(
+ "POST",
+ "/login",
+ content={"type": "m.login.token", "token": login_token},
+ )
+ self.assertEqual(chan.code, 200, chan.result)
+ self.assertEqual(chan.json_body["user_id"], mxid)
+
+ # ensure the displayname and avatar from the OIDC response have not been configured for the user.
+ channel = self.make_request(
+ "GET", "/profile/" + mxid, access_token=chan.json_body["access_token"]
+ )
+ self.assertEqual(channel.code, 200, channel.result)
+ self.assertNotIn("avatar_url", channel.json_body)
+ self.assertEqual(username, channel.json_body["displayname"])
+
+ # ensure the email from the OIDC response has not been configured for the user.
+ channel = self.make_request(
+ "GET", "/account/3pid", access_token=chan.json_body["access_token"]
+ )
+ self.assertEqual(channel.code, 200, channel.result)
+ self.assertListEqual([], channel.json_body["threepids"])
+
+
+async def mock_get_file(
+ url: str,
+ output_stream: BinaryIO,
+ max_size: Optional[int] = None,
+ headers: Optional[RawHeaders] = None,
+ is_allowed_content_type: Optional[Callable[[str], bool]] = None,
+) -> Tuple[int, Dict[bytes, List[bytes]], str, int]:
+ return 0, {b"Content-Type": [b"image/png"]}, "", 200
From e26673fe971a4d0316723b0e9ab487297db359f2 Mon Sep 17 00:00:00 2001
From: Andrew Morgan
Date: Tue, 30 Apr 2024 11:51:50 +0100
Subject: [PATCH 051/503] 1.106.0
---
CHANGES.md | 7 +++++++
debian/changelog | 6 ++++++
pyproject.toml | 2 +-
3 files changed, 14 insertions(+), 1 deletion(-)
diff --git a/CHANGES.md b/CHANGES.md
index 451581fa63..7263832057 100644
--- a/CHANGES.md
+++ b/CHANGES.md
@@ -1,3 +1,10 @@
+# Synapse 1.106.0 (2024-04-30)
+
+No significant changes since 1.106.0rc1.
+
+
+
+
# Synapse 1.106.0rc1 (2024-04-25)
### Features
diff --git a/debian/changelog b/debian/changelog
index de912c2ac8..06d682e722 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -1,3 +1,9 @@
+matrix-synapse-py3 (1.106.0) stable; urgency=medium
+
+ * New Synapse release 1.106.0.
+
+ -- Synapse Packaging team Tue, 30 Apr 2024 11:51:43 +0100
+
matrix-synapse-py3 (1.106.0~rc1) stable; urgency=medium
* New Synapse release 1.106.0rc1.
diff --git a/pyproject.toml b/pyproject.toml
index 5e47a46cd7..150084a75e 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -96,7 +96,7 @@ module-name = "synapse.synapse_rust"
[tool.poetry]
name = "matrix-synapse"
-version = "1.106.0rc1"
+version = "1.106.0"
description = "Homeserver for the Matrix decentralised comms protocol"
authors = ["Matrix.org Team and Contributors "]
license = "AGPL-3.0-or-later"
From 07232e27a892962627071f40d8ce7a38e5bf2553 Mon Sep 17 00:00:00 2001
From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com>
Date: Tue, 30 Apr 2024 13:57:20 +0100
Subject: [PATCH 052/503] Enable complement tests for MSC4115 support (#17137)
Follow-up to #17137 and
https://github.com/matrix-org/complement/pull/722
---
changelog.d/17137.feature | 1 +
scripts-dev/complement.sh | 16 +++++++++++++---
2 files changed, 14 insertions(+), 3 deletions(-)
create mode 100644 changelog.d/17137.feature
diff --git a/changelog.d/17137.feature b/changelog.d/17137.feature
new file mode 100644
index 0000000000..1c2355e155
--- /dev/null
+++ b/changelog.d/17137.feature
@@ -0,0 +1 @@
+Add support for MSC4115 (membership metadata on events).
diff --git a/scripts-dev/complement.sh b/scripts-dev/complement.sh
index 2a779f8255..b306b80749 100755
--- a/scripts-dev/complement.sh
+++ b/scripts-dev/complement.sh
@@ -214,7 +214,17 @@ fi
extra_test_args=()
-test_packages="./tests/csapi ./tests ./tests/msc3874 ./tests/msc3890 ./tests/msc3391 ./tests/msc3930 ./tests/msc3902 ./tests/msc3967"
+test_packages=(
+ ./tests/csapi
+ ./tests
+ ./tests/msc3874
+ ./tests/msc3890
+ ./tests/msc3391
+ ./tests/msc3930
+ ./tests/msc3902
+ ./tests/msc3967
+ ./tests/msc4115
+)
# Enable dirty runs, so tests will reuse the same container where possible.
# This significantly speeds up tests, but increases the possibility of test pollution.
@@ -278,7 +288,7 @@ fi
export PASS_SYNAPSE_LOG_TESTING=1
# Run the tests!
-echo "Images built; running complement with ${extra_test_args[@]} $@ $test_packages"
+echo "Images built; running complement with ${extra_test_args[@]} $@ ${test_packages[@]}"
cd "$COMPLEMENT_DIR"
-go test -v -tags "synapse_blacklist" -count=1 "${extra_test_args[@]}" "$@" $test_packages
+go test -v -tags "synapse_blacklist" -count=1 "${extra_test_args[@]}" "$@" "${test_packages[@]}"
From 7254015665daca2e4356409207e345db92bd5112 Mon Sep 17 00:00:00 2001
From: Ben Banfield-Zanin
Date: Wed, 1 May 2024 16:23:42 +0100
Subject: [PATCH 053/503] Correct licensing metadata on the Docker image
(#17141)
### Pull Request Checklist
* [x] Pull request is based on the develop branch
* [x] Pull request includes a [changelog
file](https://element-hq.github.io/synapse/latest/development/contributing_guide.html#changelog).
The entry should:
- Be a short description of your change which makes sense to users.
"Fixed a bug that prevented receiving messages from other servers."
instead of "Moved X method from `EventStore` to `EventWorkerStore`.".
- Use markdown where necessary, mostly for `code blocks`.
- End with either a period (.) or an exclamation mark (!).
- Start with a capital letter.
- Feel free to credit yourself, by adding a sentence "Contributed by
@github_username." or "Contributed by [Your Name]." to the end of the
entry.
* [x] [Code
style](https://element-hq.github.io/synapse/latest/code_style.html) is
correct
(run the
[linters](https://element-hq.github.io/synapse/latest/development/contributing_guide.html#run-the-linters))
---
changelog.d/17141.docker | 1 +
docker/Dockerfile | 2 +-
2 files changed, 2 insertions(+), 1 deletion(-)
create mode 100644 changelog.d/17141.docker
diff --git a/changelog.d/17141.docker b/changelog.d/17141.docker
new file mode 100644
index 0000000000..20c30746df
--- /dev/null
+++ b/changelog.d/17141.docker
@@ -0,0 +1 @@
+Correct licensing metadata on Docker image.
diff --git a/docker/Dockerfile b/docker/Dockerfile
index d4cb9414ff..1bef8045ca 100644
--- a/docker/Dockerfile
+++ b/docker/Dockerfile
@@ -163,7 +163,7 @@ FROM docker.io/library/python:${PYTHON_VERSION}-slim-bookworm
LABEL org.opencontainers.image.url='https://matrix.org/docs/projects/server/synapse'
LABEL org.opencontainers.image.documentation='https://github.com/element-hq/synapse/blob/master/docker/README.md'
LABEL org.opencontainers.image.source='https://github.com/element-hq/synapse.git'
-LABEL org.opencontainers.image.licenses='Apache-2.0'
+LABEL org.opencontainers.image.licenses='AGPL-3.0-or-later'
RUN \
--mount=type=cache,target=/var/cache/apt,sharing=locked \
From 0b358f8643bcec9263d115303ace92e6980d2c6f Mon Sep 17 00:00:00 2001
From: Erik Johnston
Date: Wed, 1 May 2024 17:01:50 +0100
Subject: [PATCH 054/503] Drop sphinx docs (#17073)
It is broken, and we only seemed to have been building it for the
federation sender.
Closes https://github.com/element-hq/synapse/issues/16804
---
.github/workflows/docs.yaml | 30 ----------------
changelog.d/17073.doc | 1 +
dev-docs/Makefile | 20 -----------
dev-docs/conf.py | 50 ---------------------------
dev-docs/index.rst | 22 ------------
dev-docs/modules/federation_sender.md | 5 ---
pyproject.toml | 11 ------
7 files changed, 1 insertion(+), 138 deletions(-)
create mode 100644 changelog.d/17073.doc
delete mode 100644 dev-docs/Makefile
delete mode 100644 dev-docs/conf.py
delete mode 100644 dev-docs/index.rst
delete mode 100644 dev-docs/modules/federation_sender.md
diff --git a/.github/workflows/docs.yaml b/.github/workflows/docs.yaml
index fe3212f82a..434dcbb6c7 100644
--- a/.github/workflows/docs.yaml
+++ b/.github/workflows/docs.yaml
@@ -85,33 +85,3 @@ jobs:
github_token: ${{ secrets.GITHUB_TOKEN }}
publish_dir: ./book
destination_dir: ./${{ needs.pre.outputs.branch-version }}
-
-################################################################################
- pages-devdocs:
- name: GitHub Pages (developer docs)
- runs-on: ubuntu-latest
- needs:
- - pre
- steps:
- - uses: actions/checkout@v4
-
- - name: "Set up Sphinx"
- uses: matrix-org/setup-python-poetry@v1
- with:
- python-version: "3.x"
- poetry-version: "1.3.2"
- groups: "dev-docs"
- extras: ""
-
- - name: Build the documentation
- run: |
- cd dev-docs
- poetry run make html
-
- # Deploy to the target directory.
- - name: Deploy to gh pages
- uses: peaceiris/actions-gh-pages@4f9cc6602d3f66b9c108549d475ec49e8ef4d45e # v4.0.0
- with:
- github_token: ${{ secrets.GITHUB_TOKEN }}
- publish_dir: ./dev-docs/_build/html
- destination_dir: ./dev-docs/${{ needs.pre.outputs.branch-version }}
diff --git a/changelog.d/17073.doc b/changelog.d/17073.doc
new file mode 100644
index 0000000000..bc33887efe
--- /dev/null
+++ b/changelog.d/17073.doc
@@ -0,0 +1 @@
+Remove broken sphinx docs.
diff --git a/dev-docs/Makefile b/dev-docs/Makefile
deleted file mode 100644
index d4bb2cbb9e..0000000000
--- a/dev-docs/Makefile
+++ /dev/null
@@ -1,20 +0,0 @@
-# Minimal makefile for Sphinx documentation
-#
-
-# You can set these variables from the command line, and also
-# from the environment for the first two.
-SPHINXOPTS ?=
-SPHINXBUILD ?= sphinx-build
-SOURCEDIR = .
-BUILDDIR = _build
-
-# Put it first so that "make" without argument is like "make help".
-help:
- @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
-
-.PHONY: help Makefile
-
-# Catch-all target: route all unknown targets to Sphinx using the new
-# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
-%: Makefile
- @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
diff --git a/dev-docs/conf.py b/dev-docs/conf.py
deleted file mode 100644
index 826d578c0b..0000000000
--- a/dev-docs/conf.py
+++ /dev/null
@@ -1,50 +0,0 @@
-# Configuration file for the Sphinx documentation builder.
-#
-# For the full list of built-in configuration values, see the documentation:
-# https://www.sphinx-doc.org/en/master/usage/configuration.html
-
-# -- Project information -----------------------------------------------------
-# https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information
-
-project = "Synapse development"
-copyright = "2023, The Matrix.org Foundation C.I.C."
-author = "The Synapse Maintainers and Community"
-
-# -- General configuration ---------------------------------------------------
-# https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration
-
-extensions = [
- "autodoc2",
- "myst_parser",
-]
-
-templates_path = ["_templates"]
-exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
-
-
-# -- Options for Autodoc2 ----------------------------------------------------
-
-autodoc2_docstring_parser_regexes = [
- # this will render all docstrings as 'MyST' Markdown
- (r".*", "myst"),
-]
-
-autodoc2_packages = [
- {
- "path": "../synapse",
- # Don't render documentation for everything as a matter of course
- "auto_mode": False,
- },
-]
-
-
-# -- Options for MyST (Markdown) ---------------------------------------------
-
-# myst_heading_anchors = 2
-
-
-# -- Options for HTML output -------------------------------------------------
-# https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output
-
-html_theme = "furo"
-html_static_path = ["_static"]
diff --git a/dev-docs/index.rst b/dev-docs/index.rst
deleted file mode 100644
index 1ef210460a..0000000000
--- a/dev-docs/index.rst
+++ /dev/null
@@ -1,22 +0,0 @@
-.. Synapse Developer Documentation documentation master file, created by
- sphinx-quickstart on Mon Mar 13 08:59:51 2023.
- You can adapt this file completely to your liking, but it should at least
- contain the root `toctree` directive.
-
-Welcome to the Synapse Developer Documentation!
-===========================================================
-
-.. toctree::
- :maxdepth: 2
- :caption: Contents:
-
- modules/federation_sender
-
-
-
-Indices and tables
-==================
-
-* :ref:`genindex`
-* :ref:`modindex`
-* :ref:`search`
diff --git a/dev-docs/modules/federation_sender.md b/dev-docs/modules/federation_sender.md
deleted file mode 100644
index dac6852c16..0000000000
--- a/dev-docs/modules/federation_sender.md
+++ /dev/null
@@ -1,5 +0,0 @@
-Federation Sender
-=================
-
-```{autodoc2-docstring} synapse.federation.sender
-```
diff --git a/pyproject.toml b/pyproject.toml
index 150084a75e..9ad674b603 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -364,17 +364,6 @@ towncrier = ">=18.6.0rc1"
tomli = ">=1.2.3"
-# Dependencies for building the development documentation
-[tool.poetry.group.dev-docs]
-optional = true
-
-[tool.poetry.group.dev-docs.dependencies]
-sphinx = {version = "^6.1", python = "^3.8"}
-sphinx-autodoc2 = {version = ">=0.4.2,<0.6.0", python = "^3.8"}
-myst-parser = {version = "^1.0.0", python = "^3.8"}
-furo = ">=2022.12.7,<2025.0.0"
-
-
[build-system]
# The upper bounds here are defensive, intended to prevent situations like
# https://github.com/matrix-org/synapse/issues/13849 and
From 37558d5e4cd22ec8f120d2c0fbb8c9842d6dd131 Mon Sep 17 00:00:00 2001
From: Shay
Date: Wed, 1 May 2024 09:45:17 -0700
Subject: [PATCH 055/503] Add support for MSC3823 - Account Suspension (#17051)
---
changelog.d/17051.feature | 1 +
synapse/_scripts/synapse_port_db.py | 2 +-
synapse/handlers/room_member.py | 30 ++++++++
.../storage/databases/main/registration.py | 55 ++++++++++++++-
synapse/storage/schema/__init__.py | 5 +-
.../schema/main/delta/85/01_add_suspended.sql | 14 ++++
synapse/types/__init__.py | 2 +
tests/rest/client/test_rooms.py | 69 ++++++++++++++++++-
tests/storage/test_registration.py | 2 +-
9 files changed, 173 insertions(+), 7 deletions(-)
create mode 100644 changelog.d/17051.feature
create mode 100644 synapse/storage/schema/main/delta/85/01_add_suspended.sql
diff --git a/changelog.d/17051.feature b/changelog.d/17051.feature
new file mode 100644
index 0000000000..1c41f49f7d
--- /dev/null
+++ b/changelog.d/17051.feature
@@ -0,0 +1 @@
+Add preliminary support for [MSC3823](https://github.com/matrix-org/matrix-spec-proposals/pull/3823) - Account Suspension.
\ No newline at end of file
diff --git a/synapse/_scripts/synapse_port_db.py b/synapse/_scripts/synapse_port_db.py
index 15507372a4..1e56f46911 100755
--- a/synapse/_scripts/synapse_port_db.py
+++ b/synapse/_scripts/synapse_port_db.py
@@ -127,7 +127,7 @@ BOOLEAN_COLUMNS = {
"redactions": ["have_censored"],
"room_stats_state": ["is_federatable"],
"rooms": ["is_public", "has_auth_chain_index"],
- "users": ["shadow_banned", "approved", "locked"],
+ "users": ["shadow_banned", "approved", "locked", "suspended"],
"un_partial_stated_event_stream": ["rejection_status_changed"],
"users_who_share_rooms": ["share_private"],
"per_user_experimental_features": ["enabled"],
diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py
index 601d37341b..655c78e150 100644
--- a/synapse/handlers/room_member.py
+++ b/synapse/handlers/room_member.py
@@ -752,6 +752,36 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
and requester.user.to_string() == self._server_notices_mxid
)
+ requester_suspended = await self.store.get_user_suspended_status(
+ requester.user.to_string()
+ )
+ if action == Membership.INVITE and requester_suspended:
+ raise SynapseError(
+ 403,
+ "Sending invites while account is suspended is not allowed.",
+ Codes.USER_ACCOUNT_SUSPENDED,
+ )
+
+ if target.to_string() != requester.user.to_string():
+ target_suspended = await self.store.get_user_suspended_status(
+ target.to_string()
+ )
+ else:
+ target_suspended = requester_suspended
+
+ if action == Membership.JOIN and target_suspended:
+ raise SynapseError(
+ 403,
+ "Joining rooms while account is suspended is not allowed.",
+ Codes.USER_ACCOUNT_SUSPENDED,
+ )
+ if action == Membership.KNOCK and target_suspended:
+ raise SynapseError(
+ 403,
+ "Knocking on rooms while account is suspended is not allowed.",
+ Codes.USER_ACCOUNT_SUSPENDED,
+ )
+
if (
not self.allow_per_room_profiles and not is_requester_server_notices_user
) or requester.shadow_banned:
diff --git a/synapse/storage/databases/main/registration.py b/synapse/storage/databases/main/registration.py
index 29bf47befc..df7f8a43b7 100644
--- a/synapse/storage/databases/main/registration.py
+++ b/synapse/storage/databases/main/registration.py
@@ -236,7 +236,8 @@ class RegistrationWorkerStore(CacheInvalidationWorkerStore):
consent_server_notice_sent, appservice_id, creation_ts, user_type,
deactivated, COALESCE(shadow_banned, FALSE) AS shadow_banned,
COALESCE(approved, TRUE) AS approved,
- COALESCE(locked, FALSE) AS locked
+ COALESCE(locked, FALSE) AS locked,
+ suspended
FROM users
WHERE name = ?
""",
@@ -261,6 +262,7 @@ class RegistrationWorkerStore(CacheInvalidationWorkerStore):
shadow_banned,
approved,
locked,
+ suspended,
) = row
return UserInfo(
@@ -277,6 +279,7 @@ class RegistrationWorkerStore(CacheInvalidationWorkerStore):
user_type=user_type,
approved=bool(approved),
locked=bool(locked),
+ suspended=bool(suspended),
)
return await self.db_pool.runInteraction(
@@ -1180,6 +1183,27 @@ class RegistrationWorkerStore(CacheInvalidationWorkerStore):
# Convert the potential integer into a boolean.
return bool(res)
+ @cached()
+ async def get_user_suspended_status(self, user_id: str) -> bool:
+ """
+ Determine whether the user's account is suspended.
+ Args:
+ user_id: The user ID of the user in question
+ Returns:
+ True if the user's account is suspended, false if it is not suspended or
+ if the user ID cannot be found.
+ """
+
+ res = await self.db_pool.simple_select_one_onecol(
+ table="users",
+ keyvalues={"name": user_id},
+ retcol="suspended",
+ allow_none=True,
+ desc="get_user_suspended",
+ )
+
+ return bool(res)
+
async def get_threepid_validation_session(
self,
medium: Optional[str],
@@ -2213,6 +2237,35 @@ class RegistrationBackgroundUpdateStore(RegistrationWorkerStore):
self._invalidate_cache_and_stream(txn, self.get_user_by_id, (user_id,))
txn.call_after(self.is_guest.invalidate, (user_id,))
+ async def set_user_suspended_status(self, user_id: str, suspended: bool) -> None:
+ """
+ Set whether the user's account is suspended in the `users` table.
+
+ Args:
+ user_id: The user ID of the user in question
+ suspended: True if the user is suspended, false if not
+ """
+ await self.db_pool.runInteraction(
+ "set_user_suspended_status",
+ self.set_user_suspended_status_txn,
+ user_id,
+ suspended,
+ )
+
+ def set_user_suspended_status_txn(
+ self, txn: LoggingTransaction, user_id: str, suspended: bool
+ ) -> None:
+ self.db_pool.simple_update_one_txn(
+ txn=txn,
+ table="users",
+ keyvalues={"name": user_id},
+ updatevalues={"suspended": suspended},
+ )
+ self._invalidate_cache_and_stream(
+ txn, self.get_user_suspended_status, (user_id,)
+ )
+ self._invalidate_cache_and_stream(txn, self.get_user_by_id, (user_id,))
+
async def set_user_locked_status(self, user_id: str, locked: bool) -> None:
"""Set the `locked` property for the provided user to the provided value.
diff --git a/synapse/storage/schema/__init__.py b/synapse/storage/schema/__init__.py
index 039aa91b92..0dc5d24249 100644
--- a/synapse/storage/schema/__init__.py
+++ b/synapse/storage/schema/__init__.py
@@ -19,7 +19,7 @@
#
#
-SCHEMA_VERSION = 84 # remember to update the list below when updating
+SCHEMA_VERSION = 85 # remember to update the list below when updating
"""Represents the expectations made by the codebase about the database schema
This should be incremented whenever the codebase changes its requirements on the
@@ -136,6 +136,9 @@ Changes in SCHEMA_VERSION = 83
Changes in SCHEMA_VERSION = 84
- No longer assumes that `event_auth_chain_links` holds transitive links, and
so read operations must do graph traversal.
+
+Changes in SCHEMA_VERSION = 85
+ - Add a column `suspended` to the `users` table
"""
diff --git a/synapse/storage/schema/main/delta/85/01_add_suspended.sql b/synapse/storage/schema/main/delta/85/01_add_suspended.sql
new file mode 100644
index 0000000000..807aad374f
--- /dev/null
+++ b/synapse/storage/schema/main/delta/85/01_add_suspended.sql
@@ -0,0 +1,14 @@
+--
+-- This file is licensed under the Affero General Public License (AGPL) version 3.
+--
+-- Copyright (C) 2024 New Vector, Ltd
+--
+-- This program is free software: you can redistribute it and/or modify
+-- it under the terms of the GNU Affero General Public License as
+-- published by the Free Software Foundation, either version 3 of the
+-- License, or (at your option) any later version.
+--
+-- See the GNU Affero General Public License for more details:
+-- .
+
+ALTER TABLE users ADD COLUMN suspended BOOLEAN DEFAULT FALSE NOT NULL;
\ No newline at end of file
diff --git a/synapse/types/__init__.py b/synapse/types/__init__.py
index a88982a04c..509a2d3a0f 100644
--- a/synapse/types/__init__.py
+++ b/synapse/types/__init__.py
@@ -1156,6 +1156,7 @@ class UserInfo:
user_type: User type (None for normal user, 'support' and 'bot' other options).
approved: If the user has been "approved" to register on the server.
locked: Whether the user's account has been locked
+ suspended: Whether the user's account is currently suspended
"""
user_id: UserID
@@ -1171,6 +1172,7 @@ class UserInfo:
is_shadow_banned: bool
approved: bool
locked: bool
+ suspended: bool
class UserProfile(TypedDict):
diff --git a/tests/rest/client/test_rooms.py b/tests/rest/client/test_rooms.py
index b796163dcb..d398cead1c 100644
--- a/tests/rest/client/test_rooms.py
+++ b/tests/rest/client/test_rooms.py
@@ -48,7 +48,16 @@ from synapse.appservice import ApplicationService
from synapse.events import EventBase
from synapse.events.snapshot import EventContext
from synapse.rest import admin
-from synapse.rest.client import account, directory, login, profile, register, room, sync
+from synapse.rest.client import (
+ account,
+ directory,
+ knock,
+ login,
+ profile,
+ register,
+ room,
+ sync,
+)
from synapse.server import HomeServer
from synapse.types import JsonDict, RoomAlias, UserID, create_requester
from synapse.util import Clock
@@ -733,7 +742,7 @@ class RoomsCreateTestCase(RoomBase):
self.assertEqual(HTTPStatus.OK, channel.code, channel.result)
self.assertTrue("room_id" in channel.json_body)
assert channel.resource_usage is not None
- self.assertEqual(32, channel.resource_usage.db_txn_count)
+ self.assertEqual(33, channel.resource_usage.db_txn_count)
def test_post_room_initial_state(self) -> None:
# POST with initial_state config key, expect new room id
@@ -746,7 +755,7 @@ class RoomsCreateTestCase(RoomBase):
self.assertEqual(HTTPStatus.OK, channel.code, channel.result)
self.assertTrue("room_id" in channel.json_body)
assert channel.resource_usage is not None
- self.assertEqual(34, channel.resource_usage.db_txn_count)
+ self.assertEqual(35, channel.resource_usage.db_txn_count)
def test_post_room_visibility_key(self) -> None:
# POST with visibility config key, expect new room id
@@ -1154,6 +1163,7 @@ class RoomJoinTestCase(RoomBase):
admin.register_servlets,
login.register_servlets,
room.register_servlets,
+ knock.register_servlets,
]
def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
@@ -1167,6 +1177,8 @@ class RoomJoinTestCase(RoomBase):
self.room2 = self.helper.create_room_as(room_creator=self.user1, tok=self.tok1)
self.room3 = self.helper.create_room_as(room_creator=self.user1, tok=self.tok1)
+ self.store = hs.get_datastores().main
+
def test_spam_checker_may_join_room_deprecated(self) -> None:
"""Tests that the user_may_join_room spam checker callback is correctly called
and blocks room joins when needed.
@@ -1317,6 +1329,57 @@ class RoomJoinTestCase(RoomBase):
expect_additional_fields=return_value[1],
)
+ def test_suspended_user_cannot_join_room(self) -> None:
+ # set the user as suspended
+ self.get_success(self.store.set_user_suspended_status(self.user2, True))
+
+ channel = self.make_request(
+ "POST", f"/join/{self.room1}", access_token=self.tok2
+ )
+ self.assertEqual(channel.code, 403)
+ self.assertEqual(
+ channel.json_body["errcode"], "ORG.MATRIX.MSC3823.USER_ACCOUNT_SUSPENDED"
+ )
+
+ channel = self.make_request(
+ "POST", f"/rooms/{self.room1}/join", access_token=self.tok2
+ )
+ self.assertEqual(channel.code, 403)
+ self.assertEqual(
+ channel.json_body["errcode"], "ORG.MATRIX.MSC3823.USER_ACCOUNT_SUSPENDED"
+ )
+
+ def test_suspended_user_cannot_knock_on_room(self) -> None:
+ # set the user as suspended
+ self.get_success(self.store.set_user_suspended_status(self.user2, True))
+
+ channel = self.make_request(
+ "POST",
+ f"/_matrix/client/v3/knock/{self.room1}",
+ access_token=self.tok2,
+ content={},
+ shorthand=False,
+ )
+ self.assertEqual(channel.code, 403)
+ self.assertEqual(
+ channel.json_body["errcode"], "ORG.MATRIX.MSC3823.USER_ACCOUNT_SUSPENDED"
+ )
+
+ def test_suspended_user_cannot_invite_to_room(self) -> None:
+ # set the user as suspended
+ self.get_success(self.store.set_user_suspended_status(self.user1, True))
+
+ # first user invites second user
+ channel = self.make_request(
+ "POST",
+ f"/rooms/{self.room1}/invite",
+ access_token=self.tok1,
+ content={"user_id": self.user2},
+ )
+ self.assertEqual(
+ channel.json_body["errcode"], "ORG.MATRIX.MSC3823.USER_ACCOUNT_SUSPENDED"
+ )
+
class RoomAppserviceTsParamTestCase(unittest.HomeserverTestCase):
servlets = [
diff --git a/tests/storage/test_registration.py b/tests/storage/test_registration.py
index 505465d529..14e3871dc1 100644
--- a/tests/storage/test_registration.py
+++ b/tests/storage/test_registration.py
@@ -43,7 +43,6 @@ class RegistrationStoreTestCase(HomeserverTestCase):
self.assertEqual(
UserInfo(
- # TODO(paul): Surely this field should be 'user_id', not 'name'
user_id=UserID.from_string(self.user_id),
is_admin=False,
is_guest=False,
@@ -57,6 +56,7 @@ class RegistrationStoreTestCase(HomeserverTestCase):
locked=False,
is_shadow_banned=False,
approved=True,
+ suspended=False,
),
(self.get_success(self.store.get_user_by_id(self.user_id))),
)
From c0ea2bf8005dc18463bded62ef84f8450955bece Mon Sep 17 00:00:00 2001
From: Benjamin Bouvier
Date: Thu, 2 May 2024 12:48:27 +0200
Subject: [PATCH 056/503] synapse complement image: hardcode enabling msc3266
(#17105)
This is an alternative to
https://github.com/matrix-org/matrix-rust-sdk/issues/3340 where we don't
need to change our CI setup too much in the Rust SDK repository, and
still can test MSC3266.
---
changelog.d/17105.misc | 1 +
docker/complement/conf/workers-shared-extra.yaml.j2 | 2 ++
2 files changed, 3 insertions(+)
create mode 100644 changelog.d/17105.misc
diff --git a/changelog.d/17105.misc b/changelog.d/17105.misc
new file mode 100644
index 0000000000..d4443b89cf
--- /dev/null
+++ b/changelog.d/17105.misc
@@ -0,0 +1 @@
+Enabled MSC3266 by default in the synapse complement image.
diff --git a/docker/complement/conf/workers-shared-extra.yaml.j2 b/docker/complement/conf/workers-shared-extra.yaml.j2
index a2c378f547..4c41ee7709 100644
--- a/docker/complement/conf/workers-shared-extra.yaml.j2
+++ b/docker/complement/conf/workers-shared-extra.yaml.j2
@@ -102,6 +102,8 @@ experimental_features:
msc3874_enabled: true
# no UIA for x-signing upload for the first time
msc3967_enabled: true
+ # Expose a room summary for public rooms
+ msc3266_enabled: true
msc4115_membership_on_events: true
From 5b6a75935e560945f69af72e9768bbaac10c9b4f Mon Sep 17 00:00:00 2001
From: jahway603 <64485701+jahway603@users.noreply.github.com>
Date: Thu, 2 May 2024 09:57:29 -0400
Subject: [PATCH 057/503] upgrade.md: Bump minimum Rust version to 1.66.0
(element-hq#17079) (#17140)
upgrade.md: Bump minimum Rust version to 1.66.0 (element-hq#17079)
---
changelog.d/17140.doc | 1 +
docs/upgrade.md | 8 ++++++++
2 files changed, 9 insertions(+)
create mode 100644 changelog.d/17140.doc
diff --git a/changelog.d/17140.doc b/changelog.d/17140.doc
new file mode 100644
index 0000000000..2d447ed928
--- /dev/null
+++ b/changelog.d/17140.doc
@@ -0,0 +1 @@
+Update the upgrade.md with the latest minimum supported Rust version of 1.66.0. Contributed by @jahway603.
diff --git a/docs/upgrade.md b/docs/upgrade.md
index e7247676d1..99be4122bb 100644
--- a/docs/upgrade.md
+++ b/docs/upgrade.md
@@ -117,6 +117,14 @@ each upgrade are complete before moving on to the next upgrade, to avoid
stacking them up. You can monitor the currently running background updates with
[the Admin API](usage/administration/admin_api/background_updates.html#status).
+# Upgrading to v1.106.0
+
+## Minimum supported Rust version
+The minimum supported Rust version has been increased from v1.65.0 to v1.66.0.
+Users building from source will need to ensure their `rustc` version is up to
+date.
+
+
# Upgrading to v1.100.0
## Minimum supported Rust version
From 3aadf43122d46359ab00a69d154a6a55ed920bfc Mon Sep 17 00:00:00 2001
From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com>
Date: Fri, 3 May 2024 10:55:59 +0100
Subject: [PATCH 058/503] Bump `pillow` from 10.2.0 to 10.3.0 (#17146)
---
changelog.d/17146.misc | 1 +
poetry.lock | 510 ++++++-----------------------------------
2 files changed, 74 insertions(+), 437 deletions(-)
create mode 100644 changelog.d/17146.misc
diff --git a/changelog.d/17146.misc b/changelog.d/17146.misc
new file mode 100644
index 0000000000..dc7f9faa83
--- /dev/null
+++ b/changelog.d/17146.misc
@@ -0,0 +1 @@
+Bump `pillow` from 10.2.0 to 10.3.0.
\ No newline at end of file
diff --git a/poetry.lock b/poetry.lock
index f564fd420a..2c3ae2f88f 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -1,15 +1,4 @@
-# This file is automatically @generated by Poetry 1.8.2 and should not be changed by hand.
-
-[[package]]
-name = "alabaster"
-version = "0.7.13"
-description = "A configurable sidebar-enabled Sphinx theme"
-optional = false
-python-versions = ">=3.6"
-files = [
- {file = "alabaster-0.7.13-py3-none-any.whl", hash = "sha256:1ee19aca801bbabb5ba3f5f258e4422dfa86f82f3e9cefb0859b283cdd7f62a3"},
- {file = "alabaster-0.7.13.tar.gz", hash = "sha256:a27a4a084d5e690e16e01e03ad2b2e552c61a65469419b907243193de1a84ae2"},
-]
+# This file is automatically @generated by Poetry 1.5.1 and should not be changed by hand.
[[package]]
name = "annotated-types"
@@ -25,25 +14,6 @@ files = [
[package.dependencies]
typing-extensions = {version = ">=4.0.0", markers = "python_version < \"3.9\""}
-[[package]]
-name = "astroid"
-version = "2.15.0"
-description = "An abstract syntax tree for Python with inference support."
-optional = false
-python-versions = ">=3.7.2"
-files = [
- {file = "astroid-2.15.0-py3-none-any.whl", hash = "sha256:e3e4d0ffc2d15d954065579689c36aac57a339a4679a679579af6401db4d3fdb"},
- {file = "astroid-2.15.0.tar.gz", hash = "sha256:525f126d5dc1b8b0b6ee398b33159105615d92dc4a17f2cd064125d57f6186fa"},
-]
-
-[package.dependencies]
-lazy-object-proxy = ">=1.4.0"
-typing-extensions = {version = ">=4.0.0", markers = "python_version < \"3.11\""}
-wrapt = [
- {version = ">=1.11,<2", markers = "python_version < \"3.11\""},
- {version = ">=1.14,<2", markers = "python_version >= \"3.11\""},
-]
-
[[package]]
name = "attrs"
version = "23.2.0"
@@ -95,20 +65,6 @@ six = "*"
[package.extras]
visualize = ["Twisted (>=16.1.1)", "graphviz (>0.5.1)"]
-[[package]]
-name = "babel"
-version = "2.12.1"
-description = "Internationalization utilities"
-optional = false
-python-versions = ">=3.7"
-files = [
- {file = "Babel-2.12.1-py3-none-any.whl", hash = "sha256:b4246fb7677d3b98f501a39d43396d3cafdc8eadb045f4a31be01863f655c610"},
- {file = "Babel-2.12.1.tar.gz", hash = "sha256:cc2d99999cd01d44420ae725a21c9e3711b3aadc7976d6147f622d8581963455"},
-]
-
-[package.dependencies]
-pytz = {version = ">=2015.7", markers = "python_version < \"3.9\""}
-
[[package]]
name = "bcrypt"
version = "4.1.2"
@@ -149,24 +105,6 @@ files = [
tests = ["pytest (>=3.2.1,!=3.3.0)"]
typecheck = ["mypy"]
-[[package]]
-name = "beautifulsoup4"
-version = "4.12.0"
-description = "Screen-scraping library"
-optional = false
-python-versions = ">=3.6.0"
-files = [
- {file = "beautifulsoup4-4.12.0-py3-none-any.whl", hash = "sha256:2130a5ad7f513200fae61a17abb5e338ca980fa28c439c0571014bc0217e9591"},
- {file = "beautifulsoup4-4.12.0.tar.gz", hash = "sha256:c5fceeaec29d09c84970e47c65f2f0efe57872f7cff494c9691a26ec0ff13234"},
-]
-
-[package.dependencies]
-soupsieve = ">1.2"
-
-[package.extras]
-html5lib = ["html5lib"]
-lxml = ["lxml"]
-
[[package]]
name = "black"
version = "24.2.0"
@@ -570,23 +508,6 @@ files = [
[package.extras]
dev = ["Sphinx", "coverage", "flake8", "lxml", "lxml-stubs", "memory-profiler", "memray", "mypy", "tox", "xmlschema (>=2.0.0)"]
-[[package]]
-name = "furo"
-version = "2024.4.27"
-description = "A clean customisable Sphinx documentation theme."
-optional = false
-python-versions = ">=3.8"
-files = [
- {file = "furo-2024.4.27-py3-none-any.whl", hash = "sha256:f7eb1b2c2204fd9cbd4af42e027289a67f17a98a4e14f4f9e2f17b96d61bb020"},
- {file = "furo-2024.4.27.tar.gz", hash = "sha256:15a9b65269038def2cefafb86c71c6616e3969b8f07ba231f588c10c4aee6d88"},
-]
-
-[package.dependencies]
-beautifulsoup4 = "*"
-pygments = ">=2.7"
-sphinx = ">=6.0,<8.0"
-sphinx-basic-ng = "*"
-
[[package]]
name = "gitdb"
version = "4.0.10"
@@ -859,17 +780,6 @@ files = [
{file = "ijson-3.2.3.tar.gz", hash = "sha256:10294e9bf89cb713da05bc4790bdff616610432db561964827074898e174f917"},
]
-[[package]]
-name = "imagesize"
-version = "1.4.1"
-description = "Getting image size from png/jpeg/jpeg2000/gif file"
-optional = false
-python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*"
-files = [
- {file = "imagesize-1.4.1-py2.py3-none-any.whl", hash = "sha256:0d8d18d08f840c19d0ee7ca1fd82490fdc3729b7ac93f49870406ddde8ef8d8b"},
- {file = "imagesize-1.4.1.tar.gz", hash = "sha256:69150444affb9cb0d5cc5a92b3676f0b2fb7cd9ae39e947a5e11a36b4497cd4a"},
-]
-
[[package]]
name = "immutabledict"
version = "4.1.0"
@@ -1078,51 +988,6 @@ completion = ["shtab"]
docs = ["furo", "jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)"]
testing = ["flake8 (<5)", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-flake8", "pytest-mypy (>=0.9.1)"]
-[[package]]
-name = "lazy-object-proxy"
-version = "1.9.0"
-description = "A fast and thorough lazy object proxy."
-optional = false
-python-versions = ">=3.7"
-files = [
- {file = "lazy-object-proxy-1.9.0.tar.gz", hash = "sha256:659fb5809fa4629b8a1ac5106f669cfc7bef26fbb389dda53b3e010d1ac4ebae"},
- {file = "lazy_object_proxy-1.9.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:b40387277b0ed2d0602b8293b94d7257e17d1479e257b4de114ea11a8cb7f2d7"},
- {file = "lazy_object_proxy-1.9.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e8c6cfb338b133fbdbc5cfaa10fe3c6aeea827db80c978dbd13bc9dd8526b7d4"},
- {file = "lazy_object_proxy-1.9.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:721532711daa7db0d8b779b0bb0318fa87af1c10d7fe5e52ef30f8eff254d0cd"},
- {file = "lazy_object_proxy-1.9.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:66a3de4a3ec06cd8af3f61b8e1ec67614fbb7c995d02fa224813cb7afefee701"},
- {file = "lazy_object_proxy-1.9.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:1aa3de4088c89a1b69f8ec0dcc169aa725b0ff017899ac568fe44ddc1396df46"},
- {file = "lazy_object_proxy-1.9.0-cp310-cp310-win32.whl", hash = "sha256:f0705c376533ed2a9e5e97aacdbfe04cecd71e0aa84c7c0595d02ef93b6e4455"},
- {file = "lazy_object_proxy-1.9.0-cp310-cp310-win_amd64.whl", hash = "sha256:ea806fd4c37bf7e7ad82537b0757999264d5f70c45468447bb2b91afdbe73a6e"},
- {file = "lazy_object_proxy-1.9.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:946d27deaff6cf8452ed0dba83ba38839a87f4f7a9732e8f9fd4107b21e6ff07"},
- {file = "lazy_object_proxy-1.9.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:79a31b086e7e68b24b99b23d57723ef7e2c6d81ed21007b6281ebcd1688acb0a"},
- {file = "lazy_object_proxy-1.9.0-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f699ac1c768270c9e384e4cbd268d6e67aebcfae6cd623b4d7c3bfde5a35db59"},
- {file = "lazy_object_proxy-1.9.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:bfb38f9ffb53b942f2b5954e0f610f1e721ccebe9cce9025a38c8ccf4a5183a4"},
- {file = "lazy_object_proxy-1.9.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:189bbd5d41ae7a498397287c408617fe5c48633e7755287b21d741f7db2706a9"},
- {file = "lazy_object_proxy-1.9.0-cp311-cp311-win32.whl", hash = "sha256:81fc4d08b062b535d95c9ea70dbe8a335c45c04029878e62d744bdced5141586"},
- {file = "lazy_object_proxy-1.9.0-cp311-cp311-win_amd64.whl", hash = "sha256:f2457189d8257dd41ae9b434ba33298aec198e30adf2dcdaaa3a28b9994f6adb"},
- {file = "lazy_object_proxy-1.9.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:d9e25ef10a39e8afe59a5c348a4dbf29b4868ab76269f81ce1674494e2565a6e"},
- {file = "lazy_object_proxy-1.9.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cbf9b082426036e19c6924a9ce90c740a9861e2bdc27a4834fd0a910742ac1e8"},
- {file = "lazy_object_proxy-1.9.0-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9f5fa4a61ce2438267163891961cfd5e32ec97a2c444e5b842d574251ade27d2"},
- {file = "lazy_object_proxy-1.9.0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:8fa02eaab317b1e9e03f69aab1f91e120e7899b392c4fc19807a8278a07a97e8"},
- {file = "lazy_object_proxy-1.9.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:e7c21c95cae3c05c14aafffe2865bbd5e377cfc1348c4f7751d9dc9a48ca4bda"},
- {file = "lazy_object_proxy-1.9.0-cp37-cp37m-win32.whl", hash = "sha256:f12ad7126ae0c98d601a7ee504c1122bcef553d1d5e0c3bfa77b16b3968d2734"},
- {file = "lazy_object_proxy-1.9.0-cp37-cp37m-win_amd64.whl", hash = "sha256:edd20c5a55acb67c7ed471fa2b5fb66cb17f61430b7a6b9c3b4a1e40293b1671"},
- {file = "lazy_object_proxy-1.9.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:2d0daa332786cf3bb49e10dc6a17a52f6a8f9601b4cf5c295a4f85854d61de63"},
- {file = "lazy_object_proxy-1.9.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9cd077f3d04a58e83d04b20e334f678c2b0ff9879b9375ed107d5d07ff160171"},
- {file = "lazy_object_proxy-1.9.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:660c94ea760b3ce47d1855a30984c78327500493d396eac4dfd8bd82041b22be"},
- {file = "lazy_object_proxy-1.9.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:212774e4dfa851e74d393a2370871e174d7ff0ebc980907723bb67d25c8a7c30"},
- {file = "lazy_object_proxy-1.9.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:f0117049dd1d5635bbff65444496c90e0baa48ea405125c088e93d9cf4525b11"},
- {file = "lazy_object_proxy-1.9.0-cp38-cp38-win32.whl", hash = "sha256:0a891e4e41b54fd5b8313b96399f8b0e173bbbfc03c7631f01efbe29bb0bcf82"},
- {file = "lazy_object_proxy-1.9.0-cp38-cp38-win_amd64.whl", hash = "sha256:9990d8e71b9f6488e91ad25f322898c136b008d87bf852ff65391b004da5e17b"},
- {file = "lazy_object_proxy-1.9.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9e7551208b2aded9c1447453ee366f1c4070602b3d932ace044715d89666899b"},
- {file = "lazy_object_proxy-1.9.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5f83ac4d83ef0ab017683d715ed356e30dd48a93746309c8f3517e1287523ef4"},
- {file = "lazy_object_proxy-1.9.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7322c3d6f1766d4ef1e51a465f47955f1e8123caee67dd641e67d539a534d006"},
- {file = "lazy_object_proxy-1.9.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:18b78ec83edbbeb69efdc0e9c1cb41a3b1b1ed11ddd8ded602464c3fc6020494"},
- {file = "lazy_object_proxy-1.9.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:09763491ce220c0299688940f8dc2c5d05fd1f45af1e42e636b2e8b2303e4382"},
- {file = "lazy_object_proxy-1.9.0-cp39-cp39-win32.whl", hash = "sha256:9090d8e53235aa280fc9239a86ae3ea8ac58eff66a705fa6aa2ec4968b95c821"},
- {file = "lazy_object_proxy-1.9.0-cp39-cp39-win_amd64.whl", hash = "sha256:db1c1722726f47e10e0b5fdbf15ac3b8adb58c091d12b3ab713965795036985f"},
-]
-
[[package]]
name = "ldap3"
version = "2.9.1"
@@ -1364,25 +1229,6 @@ Twisted = ">=15.1.0"
[package.extras]
dev = ["black (==22.3.0)", "flake8 (==4.0.1)", "isort (==5.9.3)", "ldaptor", "matrix-synapse", "mypy (==0.910)", "tox", "types-setuptools"]
-[[package]]
-name = "mdit-py-plugins"
-version = "0.3.5"
-description = "Collection of plugins for markdown-it-py"
-optional = false
-python-versions = ">=3.7"
-files = [
- {file = "mdit-py-plugins-0.3.5.tar.gz", hash = "sha256:eee0adc7195e5827e17e02d2a258a2ba159944a0748f59c5099a4a27f78fcf6a"},
- {file = "mdit_py_plugins-0.3.5-py3-none-any.whl", hash = "sha256:ca9a0714ea59a24b2b044a1831f48d817dd0c817e84339f20e7889f392d77c4e"},
-]
-
-[package.dependencies]
-markdown-it-py = ">=1.0.0,<3.0.0"
-
-[package.extras]
-code-style = ["pre-commit"]
-rtd = ["attrs", "myst-parser (>=0.16.1,<0.17.0)", "sphinx-book-theme (>=0.1.0,<0.2.0)"]
-testing = ["coverage", "pytest", "pytest-cov", "pytest-regressions"]
-
[[package]]
name = "mdurl"
version = "0.1.2"
@@ -1547,32 +1393,6 @@ mypy = ">=1.0.0,<1.9.0"
[package.extras]
test = ["lxml", "pytest (>=4.6)", "pytest-cov"]
-[[package]]
-name = "myst-parser"
-version = "1.0.0"
-description = "An extended [CommonMark](https://spec.commonmark.org/) compliant parser,"
-optional = false
-python-versions = ">=3.7"
-files = [
- {file = "myst-parser-1.0.0.tar.gz", hash = "sha256:502845659313099542bd38a2ae62f01360e7dd4b1310f025dd014dfc0439cdae"},
- {file = "myst_parser-1.0.0-py3-none-any.whl", hash = "sha256:69fb40a586c6fa68995e6521ac0a525793935db7e724ca9bac1d33be51be9a4c"},
-]
-
-[package.dependencies]
-docutils = ">=0.15,<0.20"
-jinja2 = "*"
-markdown-it-py = ">=1.0.0,<3.0.0"
-mdit-py-plugins = ">=0.3.4,<0.4.0"
-pyyaml = "*"
-sphinx = ">=5,<7"
-
-[package.extras]
-code-style = ["pre-commit (>=3.0,<4.0)"]
-linkify = ["linkify-it-py (>=1.0,<2.0)"]
-rtd = ["ipython", "pydata-sphinx-theme (==v0.13.0rc4)", "sphinx-autodoc2 (>=0.4.2,<0.5.0)", "sphinx-book-theme (==1.0.0rc2)", "sphinx-copybutton", "sphinx-design2", "sphinx-pyscript", "sphinx-tippy (>=0.3.1)", "sphinx-togglebutton", "sphinxext-opengraph (>=0.7.5,<0.8.0)", "sphinxext-rediraffe (>=0.2.7,<0.3.0)"]
-testing = ["beautifulsoup4", "coverage[toml]", "pytest (>=7,<8)", "pytest-cov", "pytest-param-files (>=0.3.4,<0.4.0)", "pytest-regressions", "sphinx-pytest"]
-testing-docutils = ["pygments", "pytest (>=7,<8)", "pytest-param-files (>=0.3.4,<0.4.0)"]
-
[[package]]
name = "netaddr"
version = "1.2.1"
@@ -1649,79 +1469,80 @@ files = [
[[package]]
name = "pillow"
-version = "10.2.0"
+version = "10.3.0"
description = "Python Imaging Library (Fork)"
optional = false
python-versions = ">=3.8"
files = [
- {file = "pillow-10.2.0-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:7823bdd049099efa16e4246bdf15e5a13dbb18a51b68fa06d6c1d4d8b99a796e"},
- {file = "pillow-10.2.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:83b2021f2ade7d1ed556bc50a399127d7fb245e725aa0113ebd05cfe88aaf588"},
- {file = "pillow-10.2.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6fad5ff2f13d69b7e74ce5b4ecd12cc0ec530fcee76356cac6742785ff71c452"},
- {file = "pillow-10.2.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:da2b52b37dad6d9ec64e653637a096905b258d2fc2b984c41ae7d08b938a67e4"},
- {file = "pillow-10.2.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:47c0995fc4e7f79b5cfcab1fc437ff2890b770440f7696a3ba065ee0fd496563"},
- {file = "pillow-10.2.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:322bdf3c9b556e9ffb18f93462e5f749d3444ce081290352c6070d014c93feb2"},
- {file = "pillow-10.2.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:51f1a1bffc50e2e9492e87d8e09a17c5eea8409cda8d3f277eb6edc82813c17c"},
- {file = "pillow-10.2.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:69ffdd6120a4737710a9eee73e1d2e37db89b620f702754b8f6e62594471dee0"},
- {file = "pillow-10.2.0-cp310-cp310-win32.whl", hash = "sha256:c6dafac9e0f2b3c78df97e79af707cdc5ef8e88208d686a4847bab8266870023"},
- {file = "pillow-10.2.0-cp310-cp310-win_amd64.whl", hash = "sha256:aebb6044806f2e16ecc07b2a2637ee1ef67a11840a66752751714a0d924adf72"},
- {file = "pillow-10.2.0-cp310-cp310-win_arm64.whl", hash = "sha256:7049e301399273a0136ff39b84c3678e314f2158f50f517bc50285fb5ec847ad"},
- {file = "pillow-10.2.0-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:35bb52c37f256f662abdfa49d2dfa6ce5d93281d323a9af377a120e89a9eafb5"},
- {file = "pillow-10.2.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:9c23f307202661071d94b5e384e1e1dc7dfb972a28a2310e4ee16103e66ddb67"},
- {file = "pillow-10.2.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:773efe0603db30c281521a7c0214cad7836c03b8ccff897beae9b47c0b657d61"},
- {file = "pillow-10.2.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:11fa2e5984b949b0dd6d7a94d967743d87c577ff0b83392f17cb3990d0d2fd6e"},
- {file = "pillow-10.2.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:716d30ed977be8b37d3ef185fecb9e5a1d62d110dfbdcd1e2a122ab46fddb03f"},
- {file = "pillow-10.2.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:a086c2af425c5f62a65e12fbf385f7c9fcb8f107d0849dba5839461a129cf311"},
- {file = "pillow-10.2.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:c8de2789052ed501dd829e9cae8d3dcce7acb4777ea4a479c14521c942d395b1"},
- {file = "pillow-10.2.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:609448742444d9290fd687940ac0b57fb35e6fd92bdb65386e08e99af60bf757"},
- {file = "pillow-10.2.0-cp311-cp311-win32.whl", hash = "sha256:823ef7a27cf86df6597fa0671066c1b596f69eba53efa3d1e1cb8b30f3533068"},
- {file = "pillow-10.2.0-cp311-cp311-win_amd64.whl", hash = "sha256:1da3b2703afd040cf65ec97efea81cfba59cdbed9c11d8efc5ab09df9509fc56"},
- {file = "pillow-10.2.0-cp311-cp311-win_arm64.whl", hash = "sha256:edca80cbfb2b68d7b56930b84a0e45ae1694aeba0541f798e908a49d66b837f1"},
- {file = "pillow-10.2.0-cp312-cp312-macosx_10_10_x86_64.whl", hash = "sha256:1b5e1b74d1bd1b78bc3477528919414874748dd363e6272efd5abf7654e68bef"},
- {file = "pillow-10.2.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:0eae2073305f451d8ecacb5474997c08569fb4eb4ac231ffa4ad7d342fdc25ac"},
- {file = "pillow-10.2.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b7c2286c23cd350b80d2fc9d424fc797575fb16f854b831d16fd47ceec078f2c"},
- {file = "pillow-10.2.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1e23412b5c41e58cec602f1135c57dfcf15482013ce6e5f093a86db69646a5aa"},
- {file = "pillow-10.2.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:52a50aa3fb3acb9cf7213573ef55d31d6eca37f5709c69e6858fe3bc04a5c2a2"},
- {file = "pillow-10.2.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:127cee571038f252a552760076407f9cff79761c3d436a12af6000cd182a9d04"},
- {file = "pillow-10.2.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:8d12251f02d69d8310b046e82572ed486685c38f02176bd08baf216746eb947f"},
- {file = "pillow-10.2.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:54f1852cd531aa981bc0965b7d609f5f6cc8ce8c41b1139f6ed6b3c54ab82bfb"},
- {file = "pillow-10.2.0-cp312-cp312-win32.whl", hash = "sha256:257d8788df5ca62c980314053197f4d46eefedf4e6175bc9412f14412ec4ea2f"},
- {file = "pillow-10.2.0-cp312-cp312-win_amd64.whl", hash = "sha256:154e939c5f0053a383de4fd3d3da48d9427a7e985f58af8e94d0b3c9fcfcf4f9"},
- {file = "pillow-10.2.0-cp312-cp312-win_arm64.whl", hash = "sha256:f379abd2f1e3dddb2b61bc67977a6b5a0a3f7485538bcc6f39ec76163891ee48"},
- {file = "pillow-10.2.0-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:8373c6c251f7ef8bda6675dd6d2b3a0fcc31edf1201266b5cf608b62a37407f9"},
- {file = "pillow-10.2.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:870ea1ada0899fd0b79643990809323b389d4d1d46c192f97342eeb6ee0b8483"},
- {file = "pillow-10.2.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b4b6b1e20608493548b1f32bce8cca185bf0480983890403d3b8753e44077129"},
- {file = "pillow-10.2.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3031709084b6e7852d00479fd1d310b07d0ba82765f973b543c8af5061cf990e"},
- {file = "pillow-10.2.0-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:3ff074fc97dd4e80543a3e91f69d58889baf2002b6be64347ea8cf5533188213"},
- {file = "pillow-10.2.0-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:cb4c38abeef13c61d6916f264d4845fab99d7b711be96c326b84df9e3e0ff62d"},
- {file = "pillow-10.2.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:b1b3020d90c2d8e1dae29cf3ce54f8094f7938460fb5ce8bc5c01450b01fbaf6"},
- {file = "pillow-10.2.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:170aeb00224ab3dc54230c797f8404507240dd868cf52066f66a41b33169bdbe"},
- {file = "pillow-10.2.0-cp38-cp38-win32.whl", hash = "sha256:c4225f5220f46b2fde568c74fca27ae9771536c2e29d7c04f4fb62c83275ac4e"},
- {file = "pillow-10.2.0-cp38-cp38-win_amd64.whl", hash = "sha256:0689b5a8c5288bc0504d9fcee48f61a6a586b9b98514d7d29b840143d6734f39"},
- {file = "pillow-10.2.0-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:b792a349405fbc0163190fde0dc7b3fef3c9268292586cf5645598b48e63dc67"},
- {file = "pillow-10.2.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c570f24be1e468e3f0ce7ef56a89a60f0e05b30a3669a459e419c6eac2c35364"},
- {file = "pillow-10.2.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d8ecd059fdaf60c1963c58ceb8997b32e9dc1b911f5da5307aab614f1ce5c2fb"},
- {file = "pillow-10.2.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c365fd1703040de1ec284b176d6af5abe21b427cb3a5ff68e0759e1e313a5e7e"},
- {file = "pillow-10.2.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:70c61d4c475835a19b3a5aa42492409878bbca7438554a1f89d20d58a7c75c01"},
- {file = "pillow-10.2.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:b6f491cdf80ae540738859d9766783e3b3c8e5bd37f5dfa0b76abdecc5081f13"},
- {file = "pillow-10.2.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:9d189550615b4948f45252d7f005e53c2040cea1af5b60d6f79491a6e147eef7"},
- {file = "pillow-10.2.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:49d9ba1ed0ef3e061088cd1e7538a0759aab559e2e0a80a36f9fd9d8c0c21591"},
- {file = "pillow-10.2.0-cp39-cp39-win32.whl", hash = "sha256:babf5acfede515f176833ed6028754cbcd0d206f7f614ea3447d67c33be12516"},
- {file = "pillow-10.2.0-cp39-cp39-win_amd64.whl", hash = "sha256:0304004f8067386b477d20a518b50f3fa658a28d44e4116970abfcd94fac34a8"},
- {file = "pillow-10.2.0-cp39-cp39-win_arm64.whl", hash = "sha256:0fb3e7fc88a14eacd303e90481ad983fd5b69c761e9e6ef94c983f91025da869"},
- {file = "pillow-10.2.0-pp310-pypy310_pp73-macosx_10_10_x86_64.whl", hash = "sha256:322209c642aabdd6207517e9739c704dc9f9db943015535783239022002f054a"},
- {file = "pillow-10.2.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3eedd52442c0a5ff4f887fab0c1c0bb164d8635b32c894bc1faf4c618dd89df2"},
- {file = "pillow-10.2.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cb28c753fd5eb3dd859b4ee95de66cc62af91bcff5db5f2571d32a520baf1f04"},
- {file = "pillow-10.2.0-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:33870dc4653c5017bf4c8873e5488d8f8d5f8935e2f1fb9a2208c47cdd66efd2"},
- {file = "pillow-10.2.0-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:3c31822339516fb3c82d03f30e22b1d038da87ef27b6a78c9549888f8ceda39a"},
- {file = "pillow-10.2.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:a2b56ba36e05f973d450582fb015594aaa78834fefe8dfb8fcd79b93e64ba4c6"},
- {file = "pillow-10.2.0-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:d8e6aeb9201e655354b3ad049cb77d19813ad4ece0df1249d3c793de3774f8c7"},
- {file = "pillow-10.2.0-pp39-pypy39_pp73-macosx_10_10_x86_64.whl", hash = "sha256:2247178effb34a77c11c0e8ac355c7a741ceca0a732b27bf11e747bbc950722f"},
- {file = "pillow-10.2.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:15587643b9e5eb26c48e49a7b33659790d28f190fc514a322d55da2fb5c2950e"},
- {file = "pillow-10.2.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:753cd8f2086b2b80180d9b3010dd4ed147efc167c90d3bf593fe2af21265e5a5"},
- {file = "pillow-10.2.0-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:7c8f97e8e7a9009bcacbe3766a36175056c12f9a44e6e6f2d5caad06dcfbf03b"},
- {file = "pillow-10.2.0-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:d1b35bcd6c5543b9cb547dee3150c93008f8dd0f1fef78fc0cd2b141c5baf58a"},
- {file = "pillow-10.2.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:fe4c15f6c9285dc54ce6553a3ce908ed37c8f3825b5a51a15c91442bb955b868"},
- {file = "pillow-10.2.0.tar.gz", hash = "sha256:e87f0b2c78157e12d7686b27d63c070fd65d994e8ddae6f328e0dcf4a0cd007e"},
+ {file = "pillow-10.3.0-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:90b9e29824800e90c84e4022dd5cc16eb2d9605ee13f05d47641eb183cd73d45"},
+ {file = "pillow-10.3.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a2c405445c79c3f5a124573a051062300936b0281fee57637e706453e452746c"},
+ {file = "pillow-10.3.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:78618cdbccaa74d3f88d0ad6cb8ac3007f1a6fa5c6f19af64b55ca170bfa1edf"},
+ {file = "pillow-10.3.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:261ddb7ca91fcf71757979534fb4c128448b5b4c55cb6152d280312062f69599"},
+ {file = "pillow-10.3.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:ce49c67f4ea0609933d01c0731b34b8695a7a748d6c8d186f95e7d085d2fe475"},
+ {file = "pillow-10.3.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:b14f16f94cbc61215115b9b1236f9c18403c15dd3c52cf629072afa9d54c1cbf"},
+ {file = "pillow-10.3.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:d33891be6df59d93df4d846640f0e46f1a807339f09e79a8040bc887bdcd7ed3"},
+ {file = "pillow-10.3.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:b50811d664d392f02f7761621303eba9d1b056fb1868c8cdf4231279645c25f5"},
+ {file = "pillow-10.3.0-cp310-cp310-win32.whl", hash = "sha256:ca2870d5d10d8726a27396d3ca4cf7976cec0f3cb706debe88e3a5bd4610f7d2"},
+ {file = "pillow-10.3.0-cp310-cp310-win_amd64.whl", hash = "sha256:f0d0591a0aeaefdaf9a5e545e7485f89910c977087e7de2b6c388aec32011e9f"},
+ {file = "pillow-10.3.0-cp310-cp310-win_arm64.whl", hash = "sha256:ccce24b7ad89adb5a1e34a6ba96ac2530046763912806ad4c247356a8f33a67b"},
+ {file = "pillow-10.3.0-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:5f77cf66e96ae734717d341c145c5949c63180842a545c47a0ce7ae52ca83795"},
+ {file = "pillow-10.3.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:e4b878386c4bf293578b48fc570b84ecfe477d3b77ba39a6e87150af77f40c57"},
+ {file = "pillow-10.3.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fdcbb4068117dfd9ce0138d068ac512843c52295ed996ae6dd1faf537b6dbc27"},
+ {file = "pillow-10.3.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9797a6c8fe16f25749b371c02e2ade0efb51155e767a971c61734b1bf6293994"},
+ {file = "pillow-10.3.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:9e91179a242bbc99be65e139e30690e081fe6cb91a8e77faf4c409653de39451"},
+ {file = "pillow-10.3.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:1b87bd9d81d179bd8ab871603bd80d8645729939f90b71e62914e816a76fc6bd"},
+ {file = "pillow-10.3.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:81d09caa7b27ef4e61cb7d8fbf1714f5aec1c6b6c5270ee53504981e6e9121ad"},
+ {file = "pillow-10.3.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:048ad577748b9fa4a99a0548c64f2cb8d672d5bf2e643a739ac8faff1164238c"},
+ {file = "pillow-10.3.0-cp311-cp311-win32.whl", hash = "sha256:7161ec49ef0800947dc5570f86568a7bb36fa97dd09e9827dc02b718c5643f09"},
+ {file = "pillow-10.3.0-cp311-cp311-win_amd64.whl", hash = "sha256:8eb0908e954d093b02a543dc963984d6e99ad2b5e36503d8a0aaf040505f747d"},
+ {file = "pillow-10.3.0-cp311-cp311-win_arm64.whl", hash = "sha256:4e6f7d1c414191c1199f8996d3f2282b9ebea0945693fb67392c75a3a320941f"},
+ {file = "pillow-10.3.0-cp312-cp312-macosx_10_10_x86_64.whl", hash = "sha256:e46f38133e5a060d46bd630faa4d9fa0202377495df1f068a8299fd78c84de84"},
+ {file = "pillow-10.3.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:50b8eae8f7334ec826d6eeffaeeb00e36b5e24aa0b9df322c247539714c6df19"},
+ {file = "pillow-10.3.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9d3bea1c75f8c53ee4d505c3e67d8c158ad4df0d83170605b50b64025917f338"},
+ {file = "pillow-10.3.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:19aeb96d43902f0a783946a0a87dbdad5c84c936025b8419da0a0cd7724356b1"},
+ {file = "pillow-10.3.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:74d28c17412d9caa1066f7a31df8403ec23d5268ba46cd0ad2c50fb82ae40462"},
+ {file = "pillow-10.3.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:ff61bfd9253c3915e6d41c651d5f962da23eda633cf02262990094a18a55371a"},
+ {file = "pillow-10.3.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:d886f5d353333b4771d21267c7ecc75b710f1a73d72d03ca06df49b09015a9ef"},
+ {file = "pillow-10.3.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:4b5ec25d8b17217d635f8935dbc1b9aa5907962fae29dff220f2659487891cd3"},
+ {file = "pillow-10.3.0-cp312-cp312-win32.whl", hash = "sha256:51243f1ed5161b9945011a7360e997729776f6e5d7005ba0c6879267d4c5139d"},
+ {file = "pillow-10.3.0-cp312-cp312-win_amd64.whl", hash = "sha256:412444afb8c4c7a6cc11a47dade32982439925537e483be7c0ae0cf96c4f6a0b"},
+ {file = "pillow-10.3.0-cp312-cp312-win_arm64.whl", hash = "sha256:798232c92e7665fe82ac085f9d8e8ca98826f8e27859d9a96b41d519ecd2e49a"},
+ {file = "pillow-10.3.0-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:4eaa22f0d22b1a7e93ff0a596d57fdede2e550aecffb5a1ef1106aaece48e96b"},
+ {file = "pillow-10.3.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:cd5e14fbf22a87321b24c88669aad3a51ec052eb145315b3da3b7e3cc105b9a2"},
+ {file = "pillow-10.3.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1530e8f3a4b965eb6a7785cf17a426c779333eb62c9a7d1bbcf3ffd5bf77a4aa"},
+ {file = "pillow-10.3.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5d512aafa1d32efa014fa041d38868fda85028e3f930a96f85d49c7d8ddc0383"},
+ {file = "pillow-10.3.0-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:339894035d0ede518b16073bdc2feef4c991ee991a29774b33e515f1d308e08d"},
+ {file = "pillow-10.3.0-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:aa7e402ce11f0885305bfb6afb3434b3cd8f53b563ac065452d9d5654c7b86fd"},
+ {file = "pillow-10.3.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:0ea2a783a2bdf2a561808fe4a7a12e9aa3799b701ba305de596bc48b8bdfce9d"},
+ {file = "pillow-10.3.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:c78e1b00a87ce43bb37642c0812315b411e856a905d58d597750eb79802aaaa3"},
+ {file = "pillow-10.3.0-cp38-cp38-win32.whl", hash = "sha256:72d622d262e463dfb7595202d229f5f3ab4b852289a1cd09650362db23b9eb0b"},
+ {file = "pillow-10.3.0-cp38-cp38-win_amd64.whl", hash = "sha256:2034f6759a722da3a3dbd91a81148cf884e91d1b747992ca288ab88c1de15999"},
+ {file = "pillow-10.3.0-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:2ed854e716a89b1afcedea551cd85f2eb2a807613752ab997b9974aaa0d56936"},
+ {file = "pillow-10.3.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:dc1a390a82755a8c26c9964d457d4c9cbec5405896cba94cf51f36ea0d855002"},
+ {file = "pillow-10.3.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4203efca580f0dd6f882ca211f923168548f7ba334c189e9eab1178ab840bf60"},
+ {file = "pillow-10.3.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3102045a10945173d38336f6e71a8dc71bcaeed55c3123ad4af82c52807b9375"},
+ {file = "pillow-10.3.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:6fb1b30043271ec92dc65f6d9f0b7a830c210b8a96423074b15c7bc999975f57"},
+ {file = "pillow-10.3.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:1dfc94946bc60ea375cc39cff0b8da6c7e5f8fcdc1d946beb8da5c216156ddd8"},
+ {file = "pillow-10.3.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:b09b86b27a064c9624d0a6c54da01c1beaf5b6cadfa609cf63789b1d08a797b9"},
+ {file = "pillow-10.3.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:d3b2348a78bc939b4fed6552abfd2e7988e0f81443ef3911a4b8498ca084f6eb"},
+ {file = "pillow-10.3.0-cp39-cp39-win32.whl", hash = "sha256:45ebc7b45406febf07fef35d856f0293a92e7417ae7933207e90bf9090b70572"},
+ {file = "pillow-10.3.0-cp39-cp39-win_amd64.whl", hash = "sha256:0ba26351b137ca4e0db0342d5d00d2e355eb29372c05afd544ebf47c0956ffeb"},
+ {file = "pillow-10.3.0-cp39-cp39-win_arm64.whl", hash = "sha256:50fd3f6b26e3441ae07b7c979309638b72abc1a25da31a81a7fbd9495713ef4f"},
+ {file = "pillow-10.3.0-pp310-pypy310_pp73-macosx_10_10_x86_64.whl", hash = "sha256:6b02471b72526ab8a18c39cb7967b72d194ec53c1fd0a70b050565a0f366d355"},
+ {file = "pillow-10.3.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:8ab74c06ffdab957d7670c2a5a6e1a70181cd10b727cd788c4dd9005b6a8acd9"},
+ {file = "pillow-10.3.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:048eeade4c33fdf7e08da40ef402e748df113fd0b4584e32c4af74fe78baaeb2"},
+ {file = "pillow-10.3.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9e2ec1e921fd07c7cda7962bad283acc2f2a9ccc1b971ee4b216b75fad6f0463"},
+ {file = "pillow-10.3.0-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:4c8e73e99da7db1b4cad7f8d682cf6abad7844da39834c288fbfa394a47bbced"},
+ {file = "pillow-10.3.0-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:16563993329b79513f59142a6b02055e10514c1a8e86dca8b48a893e33cf91e3"},
+ {file = "pillow-10.3.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:dd78700f5788ae180b5ee8902c6aea5a5726bac7c364b202b4b3e3ba2d293170"},
+ {file = "pillow-10.3.0-pp39-pypy39_pp73-macosx_10_10_x86_64.whl", hash = "sha256:aff76a55a8aa8364d25400a210a65ff59d0168e0b4285ba6bf2bd83cf675ba32"},
+ {file = "pillow-10.3.0-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:b7bc2176354defba3edc2b9a777744462da2f8e921fbaf61e52acb95bafa9828"},
+ {file = "pillow-10.3.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:793b4e24db2e8742ca6423d3fde8396db336698c55cd34b660663ee9e45ed37f"},
+ {file = "pillow-10.3.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d93480005693d247f8346bc8ee28c72a2191bdf1f6b5db469c096c0c867ac015"},
+ {file = "pillow-10.3.0-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:c83341b89884e2b2e55886e8fbbf37c3fa5efd6c8907124aeb72f285ae5696e5"},
+ {file = "pillow-10.3.0-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:1a1d1915db1a4fdb2754b9de292642a39a7fb28f1736699527bb649484fb966a"},
+ {file = "pillow-10.3.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:a0eaa93d054751ee9964afa21c06247779b90440ca41d184aeb5d410f20ff591"},
+ {file = "pillow-10.3.0.tar.gz", hash = "sha256:9d2455fbf44c914840c793e89aa82d0e1763a14253a000743719ae5946814b2d"},
]
[package.extras]
@@ -2156,7 +1977,7 @@ six = ">=1.5"
name = "pytz"
version = "2022.7.1"
description = "World timezone definitions, modern and historical"
-optional = false
+optional = true
python-versions = "*"
files = [
{file = "pytz-2022.7.1-py2.py3-none-any.whl", hash = "sha256:78f4f37d8198e0627c5f1143240bb0206b8691d8d7ac6d78fee88b78733f8c4a"},
@@ -2629,17 +2450,6 @@ files = [
{file = "smmap-5.0.0.tar.gz", hash = "sha256:c840e62059cd3be204b0c9c9f74be2c09d5648eddd4580d9314c3ecde0b30936"},
]
-[[package]]
-name = "snowballstemmer"
-version = "2.2.0"
-description = "This package provides 29 stemmers for 28 languages generated from Snowball algorithms."
-optional = false
-python-versions = "*"
-files = [
- {file = "snowballstemmer-2.2.0-py2.py3-none-any.whl", hash = "sha256:c8e1716e83cc398ae16824e5572ae04e0d9fc2c6b985fb0f900f5f0c96ecba1a"},
- {file = "snowballstemmer-2.2.0.tar.gz", hash = "sha256:09b16deb8547d3412ad7b590689584cd0fe25ec8db3be37788be3810cbf19cb1"},
-]
-
[[package]]
name = "sortedcontainers"
version = "2.4.0"
@@ -2651,180 +2461,6 @@ files = [
{file = "sortedcontainers-2.4.0.tar.gz", hash = "sha256:25caa5a06cc30b6b83d11423433f65d1f9d76c4c6a0c90e3379eaa43b9bfdb88"},
]
-[[package]]
-name = "soupsieve"
-version = "2.4"
-description = "A modern CSS selector implementation for Beautiful Soup."
-optional = false
-python-versions = ">=3.7"
-files = [
- {file = "soupsieve-2.4-py3-none-any.whl", hash = "sha256:49e5368c2cda80ee7e84da9dbe3e110b70a4575f196efb74e51b94549d921955"},
- {file = "soupsieve-2.4.tar.gz", hash = "sha256:e28dba9ca6c7c00173e34e4ba57448f0688bb681b7c5e8bf4971daafc093d69a"},
-]
-
-[[package]]
-name = "sphinx"
-version = "6.2.1"
-description = "Python documentation generator"
-optional = false
-python-versions = ">=3.8"
-files = [
- {file = "Sphinx-6.2.1.tar.gz", hash = "sha256:6d56a34697bb749ffa0152feafc4b19836c755d90a7c59b72bc7dfd371b9cc6b"},
- {file = "sphinx-6.2.1-py3-none-any.whl", hash = "sha256:97787ff1fa3256a3eef9eda523a63dbf299f7b47e053cfcf684a1c2a8380c912"},
-]
-
-[package.dependencies]
-alabaster = ">=0.7,<0.8"
-babel = ">=2.9"
-colorama = {version = ">=0.4.5", markers = "sys_platform == \"win32\""}
-docutils = ">=0.18.1,<0.20"
-imagesize = ">=1.3"
-importlib-metadata = {version = ">=4.8", markers = "python_version < \"3.10\""}
-Jinja2 = ">=3.0"
-packaging = ">=21.0"
-Pygments = ">=2.13"
-requests = ">=2.25.0"
-snowballstemmer = ">=2.0"
-sphinxcontrib-applehelp = "*"
-sphinxcontrib-devhelp = "*"
-sphinxcontrib-htmlhelp = ">=2.0.0"
-sphinxcontrib-jsmath = "*"
-sphinxcontrib-qthelp = "*"
-sphinxcontrib-serializinghtml = ">=1.1.5"
-
-[package.extras]
-docs = ["sphinxcontrib-websupport"]
-lint = ["docutils-stubs", "flake8 (>=3.5.0)", "flake8-simplify", "isort", "mypy (>=0.990)", "ruff", "sphinx-lint", "types-requests"]
-test = ["cython", "filelock", "html5lib", "pytest (>=4.6)"]
-
-[[package]]
-name = "sphinx-autodoc2"
-version = "0.5.0"
-description = "Analyse a python project and create documentation for it."
-optional = false
-python-versions = ">=3.8"
-files = [
- {file = "sphinx_autodoc2-0.5.0-py3-none-any.whl", hash = "sha256:e867013b1512f9d6d7e6f6799f8b537d6884462acd118ef361f3f619a60b5c9e"},
- {file = "sphinx_autodoc2-0.5.0.tar.gz", hash = "sha256:7d76044aa81d6af74447080182b6868c7eb066874edc835e8ddf810735b6565a"},
-]
-
-[package.dependencies]
-astroid = ">=2.7,<4"
-tomli = {version = "*", markers = "python_version < \"3.11\""}
-typing-extensions = "*"
-
-[package.extras]
-cli = ["typer[all]"]
-docs = ["furo", "myst-parser", "sphinx (>=4.0.0)"]
-sphinx = ["sphinx (>=4.0.0)"]
-testing = ["pytest", "pytest-cov", "pytest-regressions", "sphinx (>=4.0.0,<7)"]
-
-[[package]]
-name = "sphinx-basic-ng"
-version = "1.0.0b1"
-description = "A modern skeleton for Sphinx themes."
-optional = false
-python-versions = ">=3.7"
-files = [
- {file = "sphinx_basic_ng-1.0.0b1-py3-none-any.whl", hash = "sha256:ade597a3029c7865b24ad0eda88318766bcc2f9f4cef60df7e28126fde94db2a"},
- {file = "sphinx_basic_ng-1.0.0b1.tar.gz", hash = "sha256:89374bd3ccd9452a301786781e28c8718e99960f2d4f411845ea75fc7bb5a9b0"},
-]
-
-[package.dependencies]
-sphinx = ">=4.0"
-
-[package.extras]
-docs = ["furo", "ipython", "myst-parser", "sphinx-copybutton", "sphinx-inline-tabs"]
-
-[[package]]
-name = "sphinxcontrib-applehelp"
-version = "1.0.4"
-description = "sphinxcontrib-applehelp is a Sphinx extension which outputs Apple help books"
-optional = false
-python-versions = ">=3.8"
-files = [
- {file = "sphinxcontrib-applehelp-1.0.4.tar.gz", hash = "sha256:828f867945bbe39817c210a1abfd1bc4895c8b73fcaade56d45357a348a07d7e"},
- {file = "sphinxcontrib_applehelp-1.0.4-py3-none-any.whl", hash = "sha256:29d341f67fb0f6f586b23ad80e072c8e6ad0b48417db2bde114a4c9746feb228"},
-]
-
-[package.extras]
-lint = ["docutils-stubs", "flake8", "mypy"]
-test = ["pytest"]
-
-[[package]]
-name = "sphinxcontrib-devhelp"
-version = "1.0.2"
-description = "sphinxcontrib-devhelp is a sphinx extension which outputs Devhelp document."
-optional = false
-python-versions = ">=3.5"
-files = [
- {file = "sphinxcontrib-devhelp-1.0.2.tar.gz", hash = "sha256:ff7f1afa7b9642e7060379360a67e9c41e8f3121f2ce9164266f61b9f4b338e4"},
- {file = "sphinxcontrib_devhelp-1.0.2-py2.py3-none-any.whl", hash = "sha256:8165223f9a335cc1af7ffe1ed31d2871f325254c0423bc0c4c7cd1c1e4734a2e"},
-]
-
-[package.extras]
-lint = ["docutils-stubs", "flake8", "mypy"]
-test = ["pytest"]
-
-[[package]]
-name = "sphinxcontrib-htmlhelp"
-version = "2.0.1"
-description = "sphinxcontrib-htmlhelp is a sphinx extension which renders HTML help files"
-optional = false
-python-versions = ">=3.8"
-files = [
- {file = "sphinxcontrib-htmlhelp-2.0.1.tar.gz", hash = "sha256:0cbdd302815330058422b98a113195c9249825d681e18f11e8b1f78a2f11efff"},
- {file = "sphinxcontrib_htmlhelp-2.0.1-py3-none-any.whl", hash = "sha256:c38cb46dccf316c79de6e5515e1770414b797162b23cd3d06e67020e1d2a6903"},
-]
-
-[package.extras]
-lint = ["docutils-stubs", "flake8", "mypy"]
-test = ["html5lib", "pytest"]
-
-[[package]]
-name = "sphinxcontrib-jsmath"
-version = "1.0.1"
-description = "A sphinx extension which renders display math in HTML via JavaScript"
-optional = false
-python-versions = ">=3.5"
-files = [
- {file = "sphinxcontrib-jsmath-1.0.1.tar.gz", hash = "sha256:a9925e4a4587247ed2191a22df5f6970656cb8ca2bd6284309578f2153e0c4b8"},
- {file = "sphinxcontrib_jsmath-1.0.1-py2.py3-none-any.whl", hash = "sha256:2ec2eaebfb78f3f2078e73666b1415417a116cc848b72e5172e596c871103178"},
-]
-
-[package.extras]
-test = ["flake8", "mypy", "pytest"]
-
-[[package]]
-name = "sphinxcontrib-qthelp"
-version = "1.0.3"
-description = "sphinxcontrib-qthelp is a sphinx extension which outputs QtHelp document."
-optional = false
-python-versions = ">=3.5"
-files = [
- {file = "sphinxcontrib-qthelp-1.0.3.tar.gz", hash = "sha256:4c33767ee058b70dba89a6fc5c1892c0d57a54be67ddd3e7875a18d14cba5a72"},
- {file = "sphinxcontrib_qthelp-1.0.3-py2.py3-none-any.whl", hash = "sha256:bd9fc24bcb748a8d51fd4ecaade681350aa63009a347a8c14e637895444dfab6"},
-]
-
-[package.extras]
-lint = ["docutils-stubs", "flake8", "mypy"]
-test = ["pytest"]
-
-[[package]]
-name = "sphinxcontrib-serializinghtml"
-version = "1.1.5"
-description = "sphinxcontrib-serializinghtml is a sphinx extension which outputs \"serialized\" HTML files (json and pickle)."
-optional = false
-python-versions = ">=3.5"
-files = [
- {file = "sphinxcontrib-serializinghtml-1.1.5.tar.gz", hash = "sha256:aa5f6de5dfdf809ef505c4895e51ef5c9eac17d0f287933eb49ec495280b6952"},
- {file = "sphinxcontrib_serializinghtml-1.1.5-py2.py3-none-any.whl", hash = "sha256:352a9a00ae864471d3a7ead8d7d79f5fc0b57e8b3f95e9867eb9eb28999b92fd"},
-]
-
-[package.extras]
-lint = ["docutils-stubs", "flake8", "mypy"]
-test = ["pytest"]
-
[[package]]
name = "systemd-python"
version = "235"
@@ -3454,4 +3090,4 @@ user-search = ["pyicu"]
[metadata]
lock-version = "2.0"
python-versions = "^3.8.0"
-content-hash = "1951f2b4623138d47db08a405edd970e67599d05804bb459af21a085e1665f69"
+content-hash = "987f8eccaa222367b1a2e15b0d496586ca50d46ca1277e69694922d31c93ce5b"
From 3818597751701e0d221608afe0a2fe7148a670a4 Mon Sep 17 00:00:00 2001
From: Erik Johnston
Date: Fri, 3 May 2024 17:12:03 +0100
Subject: [PATCH 059/503] Fix lint.sh script (#17148)
Broke in #17073
---
changelog.d/17148.doc | 1 +
scripts-dev/lint.sh | 1 -
2 files changed, 1 insertion(+), 1 deletion(-)
create mode 100644 changelog.d/17148.doc
diff --git a/changelog.d/17148.doc b/changelog.d/17148.doc
new file mode 100644
index 0000000000..bc33887efe
--- /dev/null
+++ b/changelog.d/17148.doc
@@ -0,0 +1 @@
+Remove broken sphinx docs.
diff --git a/scripts-dev/lint.sh b/scripts-dev/lint.sh
index 1c0e6582f6..9e4ed3246e 100755
--- a/scripts-dev/lint.sh
+++ b/scripts-dev/lint.sh
@@ -91,7 +91,6 @@ else
"synapse" "docker" "tests"
"scripts-dev"
"contrib" "synmark" "stubs" ".ci"
- "dev-docs"
)
fi
fi
From 7c9ac01eb5fd18e564d6650c2c0f853822aec212 Mon Sep 17 00:00:00 2001
From: Erik Johnston
Date: Fri, 3 May 2024 18:00:08 +0100
Subject: [PATCH 060/503] Fix bug where `StreamChangeCache` would not respect
cache factors (#17152)
Annoyingly mypy didn't pick up this typo.
---
changelog.d/17152.bugfix | 1 +
synapse/util/caches/stream_change_cache.py | 2 +-
2 files changed, 2 insertions(+), 1 deletion(-)
create mode 100644 changelog.d/17152.bugfix
diff --git a/changelog.d/17152.bugfix b/changelog.d/17152.bugfix
new file mode 100644
index 0000000000..67aee91672
--- /dev/null
+++ b/changelog.d/17152.bugfix
@@ -0,0 +1 @@
+Fix bug where `StreamChangeCache` would not respect configured cache factors.
diff --git a/synapse/util/caches/stream_change_cache.py b/synapse/util/caches/stream_change_cache.py
index d8253bd942..2079ca789c 100644
--- a/synapse/util/caches/stream_change_cache.py
+++ b/synapse/util/caches/stream_change_cache.py
@@ -115,7 +115,7 @@ class StreamChangeCache:
"""
new_size = math.floor(self._original_max_size * factor)
if new_size != self._max_size:
- self.max_size = new_size
+ self._max_size = new_size
self._evict()
return True
return False
From 3e6ee8ff88c41ad1fca8c055520be952ab21b705 Mon Sep 17 00:00:00 2001
From: Erik Johnston
Date: Mon, 6 May 2024 12:56:52 +0100
Subject: [PATCH 061/503] Add optimisation to `StreamChangeCache` (#17130)
When there have been lots of changes compared with the number of
entities, we can do a fast(er) path.
Locally I ran some benchmarking, and the comparison seems to give the
best determination of which method we use.
---
changelog.d/17130.misc | 1 +
synapse/util/caches/stream_change_cache.py | 20 +++++++++++++++++++-
tests/util/test_stream_change_cache.py | 17 ++++++++++++++---
3 files changed, 34 insertions(+), 4 deletions(-)
create mode 100644 changelog.d/17130.misc
diff --git a/changelog.d/17130.misc b/changelog.d/17130.misc
new file mode 100644
index 0000000000..ac20c90bde
--- /dev/null
+++ b/changelog.d/17130.misc
@@ -0,0 +1 @@
+Add optimisation to `StreamChangeCache.get_entities_changed(..)`.
diff --git a/synapse/util/caches/stream_change_cache.py b/synapse/util/caches/stream_change_cache.py
index 2079ca789c..91c335f85b 100644
--- a/synapse/util/caches/stream_change_cache.py
+++ b/synapse/util/caches/stream_change_cache.py
@@ -165,7 +165,7 @@ class StreamChangeCache:
return False
def get_entities_changed(
- self, entities: Collection[EntityType], stream_pos: int
+ self, entities: Collection[EntityType], stream_pos: int, _perf_factor: int = 1
) -> Union[Set[EntityType], FrozenSet[EntityType]]:
"""
Returns the subset of the given entities that have had changes after the given position.
@@ -177,6 +177,8 @@ class StreamChangeCache:
Args:
entities: Entities to check for changes.
stream_pos: The stream position to check for changes after.
+ _perf_factor: Used by unit tests to choose when to use each
+ optimisation.
Return:
A subset of entities which have changed after the given stream position.
@@ -184,6 +186,22 @@ class StreamChangeCache:
This will be all entities if the given stream position is at or earlier
than the earliest known stream position.
"""
+ if not self._cache or stream_pos <= self._earliest_known_stream_pos:
+ self.metrics.inc_misses()
+ return set(entities)
+
+ # If there have been tonnes of changes compared with the number of
+ # entities, it is faster to check each entities stream ordering
+ # one-by-one.
+ max_stream_pos, _ = self._cache.peekitem()
+ if max_stream_pos - stream_pos > _perf_factor * len(entities):
+ self.metrics.inc_hits()
+ return {
+ entity
+ for entity in entities
+ if self._entity_to_key.get(entity, -1) > stream_pos
+ }
+
cache_result = self.get_all_entities_changed(stream_pos)
if cache_result.hit:
# We now do an intersection, trying to do so in the most efficient
diff --git a/tests/util/test_stream_change_cache.py b/tests/util/test_stream_change_cache.py
index 3df053493b..5d38718a50 100644
--- a/tests/util/test_stream_change_cache.py
+++ b/tests/util/test_stream_change_cache.py
@@ -1,3 +1,5 @@
+from parameterized import parameterized
+
from synapse.util.caches.stream_change_cache import StreamChangeCache
from tests import unittest
@@ -161,7 +163,8 @@ class StreamChangeCacheTests(unittest.HomeserverTestCase):
self.assertFalse(cache.has_any_entity_changed(2))
self.assertFalse(cache.has_any_entity_changed(3))
- def test_get_entities_changed(self) -> None:
+ @parameterized.expand([(0,), (1000000000,)])
+ def test_get_entities_changed(self, perf_factor: int) -> None:
"""
StreamChangeCache.get_entities_changed will return the entities in the
given list that have changed since the provided stream ID. If the
@@ -178,7 +181,9 @@ class StreamChangeCacheTests(unittest.HomeserverTestCase):
# get the ones after that point.
self.assertEqual(
cache.get_entities_changed(
- ["user@foo.com", "bar@baz.net", "user@elsewhere.org"], stream_pos=2
+ ["user@foo.com", "bar@baz.net", "user@elsewhere.org"],
+ stream_pos=2,
+ _perf_factor=perf_factor,
),
{"bar@baz.net", "user@elsewhere.org"},
)
@@ -195,6 +200,7 @@ class StreamChangeCacheTests(unittest.HomeserverTestCase):
"not@here.website",
],
stream_pos=2,
+ _perf_factor=perf_factor,
),
{"bar@baz.net", "user@elsewhere.org"},
)
@@ -210,6 +216,7 @@ class StreamChangeCacheTests(unittest.HomeserverTestCase):
"not@here.website",
],
stream_pos=0,
+ _perf_factor=perf_factor,
),
{"user@foo.com", "bar@baz.net", "user@elsewhere.org", "not@here.website"},
)
@@ -217,7 +224,11 @@ class StreamChangeCacheTests(unittest.HomeserverTestCase):
# Query a subset of the entries mid-way through the stream. We should
# only get back the subset.
self.assertEqual(
- cache.get_entities_changed(["bar@baz.net"], stream_pos=2),
+ cache.get_entities_changed(
+ ["bar@baz.net"],
+ stream_pos=2,
+ _perf_factor=perf_factor,
+ ),
{"bar@baz.net"},
)
From c3682ff668a5e7846e87d5b82d33fe86036d4bba Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Tue, 7 May 2024 10:34:30 +0100
Subject: [PATCH 062/503] Bump jsonschema from 4.21.1 to 4.22.0 (#17157)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Bumps [jsonschema](https://github.com/python-jsonschema/jsonschema) from
4.21.1 to 4.22.0.
Release notes
Sourced from jsonschema's
releases .
v4.22.0
What's Changed
Improve best_match (and thereby error messages from
jsonschema.validate) in cases where there are multiple
sibling errors from applying anyOf /
allOf -- i.e. when multiple elements of a JSON array have
errors, we now do prefer showing errors from earlier elements rather
than simply showing an error for the full array (#1250 ).
(Micro-)optimize equality checks when comparing for JSON Schema
equality by first checking for object identity, as ==
would.
New Contributors
Full Changelog : https://github.com/python-jsonschema/jsonschema/compare/v4.21.1...v4.22.0
Changelog
Sourced from jsonschema's
changelog .
v4.22.0
Improve best_match (and thereby error messages from
jsonschema.validate) in cases where there are multiple
sibling errors from applying anyOf /
allOf -- i.e. when multiple elements of a JSON array have
errors, we now do prefer showing errors from earlier elements rather
than simply showing an error for the full array (#1250 ).
(Micro-)optimize equality checks when comparing for JSON Schema
equality by first checking for object identity, as ==
would.
Commits
9882dbe
Add / ignore the new specification test suite property.
ebc90bb
Merge commit '8fcfc3a674a7188a4fcc822b7a91efb3e0422a20'
8fcfc3a
Squashed 'json/' changes from b41167c74..54f3784a8
30b7537
Pin pyenchant to pre from below until pyenchant/pyenchant#302
is released.
c3729db
Enable doctests for the rest of the referencing page.
70a994c
Remove a now-unneeded noqa since apparently this is fixed in new
ruff.
e6d0ef1
Fix a minor typo in the referencing example docs.
bceaf41
Another placeholder benchmark for future optimization.
b20234e
Consider errors from earlier indices (in instances) to be better
matches
41b49c6
Minor improvement to test failure message when a best match test
fails.
Additional commits viewable in compare
view
[](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores)
Dependabot will resolve any conflicts with this PR as long as you don't
alter it yourself. You can also trigger a rebase manually by commenting
`@dependabot rebase`.
[//]: # (dependabot-automerge-start)
[//]: # (dependabot-automerge-end)
---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR:
- `@dependabot rebase` will rebase this PR
- `@dependabot recreate` will recreate this PR, overwriting any edits
that have been made to it
- `@dependabot merge` will merge this PR after your CI passes on it
- `@dependabot squash and merge` will squash and merge this PR after
your CI passes on it
- `@dependabot cancel merge` will cancel a previously requested merge
and block automerging
- `@dependabot reopen` will reopen this PR if it is closed
- `@dependabot close` will close this PR and stop Dependabot recreating
it. You can achieve the same result by closing it manually
- `@dependabot show ignore conditions` will show all
of the ignore conditions of the specified dependency
- `@dependabot ignore this major version` will close this PR and stop
Dependabot creating any more for this major version (unless you reopen
the PR or upgrade to it yourself)
- `@dependabot ignore this minor version` will close this PR and stop
Dependabot creating any more for this minor version (unless you reopen
the PR or upgrade to it yourself)
- `@dependabot ignore this dependency` will close this PR and stop
Dependabot creating any more for this dependency (unless you reopen the
PR or upgrade to it yourself)
Signed-off-by: dependabot[bot]
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
---
poetry.lock | 19 +++++++++++++++----
1 file changed, 15 insertions(+), 4 deletions(-)
diff --git a/poetry.lock b/poetry.lock
index 2c3ae2f88f..0a2e942e29 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -1,4 +1,4 @@
-# This file is automatically @generated by Poetry 1.5.1 and should not be changed by hand.
+# This file is automatically @generated by Poetry 1.8.2 and should not be changed by hand.
[[package]]
name = "annotated-types"
@@ -928,13 +928,13 @@ i18n = ["Babel (>=2.7)"]
[[package]]
name = "jsonschema"
-version = "4.21.1"
+version = "4.22.0"
description = "An implementation of JSON Schema validation for Python"
optional = false
python-versions = ">=3.8"
files = [
- {file = "jsonschema-4.21.1-py3-none-any.whl", hash = "sha256:7996507afae316306f9e2290407761157c6f78002dcf7419acb99822143d1c6f"},
- {file = "jsonschema-4.21.1.tar.gz", hash = "sha256:85727c00279f5fa6bedbe6238d2aa6403bedd8b4864ab11207d07df3cc1b2ee5"},
+ {file = "jsonschema-4.22.0-py3-none-any.whl", hash = "sha256:ff4cfd6b1367a40e7bc6411caec72effadd3db0bbe5017de188f2d6108335802"},
+ {file = "jsonschema-4.22.0.tar.gz", hash = "sha256:5b22d434a45935119af990552c862e5d6d564e8f6601206b305a61fdf661a2b7"},
]
[package.dependencies]
@@ -2007,6 +2007,7 @@ files = [
{file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:69b023b2b4daa7548bcfbd4aa3da05b3a74b772db9e23b982788168117739938"},
{file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:81e0b275a9ecc9c0c0c07b4b90ba548307583c125f54d5b6946cfee6360c733d"},
{file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba336e390cd8e4d1739f42dfe9bb83a3cc2e80f567d8805e11b46f4a943f5515"},
+ {file = "PyYAML-6.0.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:326c013efe8048858a6d312ddd31d56e468118ad4cdeda36c719bf5bb6192290"},
{file = "PyYAML-6.0.1-cp310-cp310-win32.whl", hash = "sha256:bd4af7373a854424dabd882decdc5579653d7868b8fb26dc7d0e99f823aa5924"},
{file = "PyYAML-6.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:fd1592b3fdf65fff2ad0004b5e363300ef59ced41c2e6b3a99d4089fa8c5435d"},
{file = "PyYAML-6.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6965a7bc3cf88e5a1c3bd2e0b5c22f8d677dc88a455344035f03399034eb3007"},
@@ -2014,8 +2015,16 @@ files = [
{file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:42f8152b8dbc4fe7d96729ec2b99c7097d656dc1213a3229ca5383f973a5ed6d"},
{file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:062582fca9fabdd2c8b54a3ef1c978d786e0f6b3a1510e0ac93ef59e0ddae2bc"},
{file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d2b04aac4d386b172d5b9692e2d2da8de7bfb6c387fa4f801fbf6fb2e6ba4673"},
+ {file = "PyYAML-6.0.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:e7d73685e87afe9f3b36c799222440d6cf362062f78be1013661b00c5c6f678b"},
{file = "PyYAML-6.0.1-cp311-cp311-win32.whl", hash = "sha256:1635fd110e8d85d55237ab316b5b011de701ea0f29d07611174a1b42f1444741"},
{file = "PyYAML-6.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34"},
+ {file = "PyYAML-6.0.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:855fb52b0dc35af121542a76b9a84f8d1cd886ea97c84703eaa6d88e37a2ad28"},
+ {file = "PyYAML-6.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:40df9b996c2b73138957fe23a16a4f0ba614f4c0efce1e9406a184b6d07fa3a9"},
+ {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a08c6f0fe150303c1c6b71ebcd7213c2858041a7e01975da3a99aed1e7a378ef"},
+ {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c22bec3fbe2524cde73d7ada88f6566758a8f7227bfbf93a408a9d86bcc12a0"},
+ {file = "PyYAML-6.0.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8d4e9c88387b0f5c7d5f281e55304de64cf7f9c0021a3525bd3b1c542da3b0e4"},
+ {file = "PyYAML-6.0.1-cp312-cp312-win32.whl", hash = "sha256:d483d2cdf104e7c9fa60c544d92981f12ad66a457afae824d146093b8c294c54"},
+ {file = "PyYAML-6.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:0d3304d8c0adc42be59c5f8a4d9e3d7379e6955ad754aa9d6ab7a398b59dd1df"},
{file = "PyYAML-6.0.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:50550eb667afee136e9a77d6dc71ae76a44df8b3e51e41b77f6de2932bfe0f47"},
{file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1fe35611261b29bd1de0070f0b2f47cb6ff71fa6595c077e42bd0c419fa27b98"},
{file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:704219a11b772aea0d8ecd7058d0082713c3562b4e271b849ad7dc4a5c90c13c"},
@@ -2032,6 +2041,7 @@ files = [
{file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a0cd17c15d3bb3fa06978b4e8958dcdc6e0174ccea823003a106c7d4d7899ac5"},
{file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:28c119d996beec18c05208a8bd78cbe4007878c6dd15091efb73a30e90539696"},
{file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7e07cbde391ba96ab58e532ff4803f79c4129397514e1413a7dc761ccd755735"},
+ {file = "PyYAML-6.0.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:49a183be227561de579b4a36efbb21b3eab9651dd81b1858589f796549873dd6"},
{file = "PyYAML-6.0.1-cp38-cp38-win32.whl", hash = "sha256:184c5108a2aca3c5b3d3bf9395d50893a7ab82a38004c8f61c258d4428e80206"},
{file = "PyYAML-6.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:1e2722cc9fbb45d9b87631ac70924c11d3a401b2d7f410cc0e3bbf249f2dca62"},
{file = "PyYAML-6.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9eb6caa9a297fc2c2fb8862bc5370d0303ddba53ba97e71f08023b6cd73d16a8"},
@@ -2039,6 +2049,7 @@ files = [
{file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5773183b6446b2c99bb77e77595dd486303b4faab2b086e7b17bc6bef28865f6"},
{file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b786eecbdf8499b9ca1d697215862083bd6d2a99965554781d0d8d1ad31e13a0"},
{file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc1bf2925a1ecd43da378f4db9e4f799775d6367bdb94671027b73b393a7c42c"},
+ {file = "PyYAML-6.0.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:04ac92ad1925b2cff1db0cfebffb6ffc43457495c9b3c39d3fcae417d7125dc5"},
{file = "PyYAML-6.0.1-cp39-cp39-win32.whl", hash = "sha256:faca3bdcf85b2fc05d06ff3fbc1f83e1391b3e724afa3feba7d13eeab355484c"},
{file = "PyYAML-6.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:510c9deebc5c0225e8c96813043e62b680ba2f9c50a08d3724c7f28a747d1486"},
{file = "PyYAML-6.0.1.tar.gz", hash = "sha256:bfdf460b1736c775f2ba9f6a92bca30bc2095067b8a9d77876d1fad6cc3b4a43"},
From 792cfe7ba651ff96fcc88ee75a9702f263df1c04 Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Tue, 7 May 2024 10:34:46 +0100
Subject: [PATCH 063/503] Bump lxml from 5.1.0 to 5.2.1 (#17158)
---
poetry.lock | 237 ++++++++++++++++++++++++++++++++++------------------
1 file changed, 157 insertions(+), 80 deletions(-)
diff --git a/poetry.lock b/poetry.lock
index 0a2e942e29..c0fce0f45e 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -1004,96 +1004,173 @@ pyasn1 = ">=0.4.6"
[[package]]
name = "lxml"
-version = "5.1.0"
+version = "5.2.1"
description = "Powerful and Pythonic XML processing library combining libxml2/libxslt with the ElementTree API."
optional = true
python-versions = ">=3.6"
files = [
- {file = "lxml-5.1.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:704f5572ff473a5f897745abebc6df40f22d4133c1e0a1f124e4f2bd3330ff7e"},
- {file = "lxml-5.1.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:9d3c0f8567ffe7502d969c2c1b809892dc793b5d0665f602aad19895f8d508da"},
- {file = "lxml-5.1.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:5fcfbebdb0c5d8d18b84118842f31965d59ee3e66996ac842e21f957eb76138c"},
- {file = "lxml-5.1.0-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2f37c6d7106a9d6f0708d4e164b707037b7380fcd0b04c5bd9cae1fb46a856fb"},
- {file = "lxml-5.1.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2befa20a13f1a75c751f47e00929fb3433d67eb9923c2c0b364de449121f447c"},
- {file = "lxml-5.1.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:22b7ee4c35f374e2c20337a95502057964d7e35b996b1c667b5c65c567d2252a"},
- {file = "lxml-5.1.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:bf8443781533b8d37b295016a4b53c1494fa9a03573c09ca5104550c138d5c05"},
- {file = "lxml-5.1.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:82bddf0e72cb2af3cbba7cec1d2fd11fda0de6be8f4492223d4a268713ef2147"},
- {file = "lxml-5.1.0-cp310-cp310-win32.whl", hash = "sha256:b66aa6357b265670bb574f050ffceefb98549c721cf28351b748be1ef9577d93"},
- {file = "lxml-5.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:4946e7f59b7b6a9e27bef34422f645e9a368cb2be11bf1ef3cafc39a1f6ba68d"},
- {file = "lxml-5.1.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:14deca1460b4b0f6b01f1ddc9557704e8b365f55c63070463f6c18619ebf964f"},
- {file = "lxml-5.1.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ed8c3d2cd329bf779b7ed38db176738f3f8be637bb395ce9629fc76f78afe3d4"},
- {file = "lxml-5.1.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:436a943c2900bb98123b06437cdd30580a61340fbdb7b28aaf345a459c19046a"},
- {file = "lxml-5.1.0-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:acb6b2f96f60f70e7f34efe0c3ea34ca63f19ca63ce90019c6cbca6b676e81fa"},
- {file = "lxml-5.1.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:af8920ce4a55ff41167ddbc20077f5698c2e710ad3353d32a07d3264f3a2021e"},
- {file = "lxml-5.1.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7cfced4a069003d8913408e10ca8ed092c49a7f6cefee9bb74b6b3e860683b45"},
- {file = "lxml-5.1.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:9e5ac3437746189a9b4121db2a7b86056ac8786b12e88838696899328fc44bb2"},
- {file = "lxml-5.1.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:f4c9bda132ad108b387c33fabfea47866af87f4ea6ffb79418004f0521e63204"},
- {file = "lxml-5.1.0-cp311-cp311-win32.whl", hash = "sha256:bc64d1b1dab08f679fb89c368f4c05693f58a9faf744c4d390d7ed1d8223869b"},
- {file = "lxml-5.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:a5ab722ae5a873d8dcee1f5f45ddd93c34210aed44ff2dc643b5025981908cda"},
- {file = "lxml-5.1.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:9aa543980ab1fbf1720969af1d99095a548ea42e00361e727c58a40832439114"},
- {file = "lxml-5.1.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:6f11b77ec0979f7e4dc5ae081325a2946f1fe424148d3945f943ceaede98adb8"},
- {file = "lxml-5.1.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:a36c506e5f8aeb40680491d39ed94670487ce6614b9d27cabe45d94cd5d63e1e"},
- {file = "lxml-5.1.0-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f643ffd2669ffd4b5a3e9b41c909b72b2a1d5e4915da90a77e119b8d48ce867a"},
- {file = "lxml-5.1.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:16dd953fb719f0ffc5bc067428fc9e88f599e15723a85618c45847c96f11f431"},
- {file = "lxml-5.1.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:16018f7099245157564d7148165132c70adb272fb5a17c048ba70d9cc542a1a1"},
- {file = "lxml-5.1.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:82cd34f1081ae4ea2ede3d52f71b7be313756e99b4b5f829f89b12da552d3aa3"},
- {file = "lxml-5.1.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:19a1bc898ae9f06bccb7c3e1dfd73897ecbbd2c96afe9095a6026016e5ca97b8"},
- {file = "lxml-5.1.0-cp312-cp312-win32.whl", hash = "sha256:13521a321a25c641b9ea127ef478b580b5ec82aa2e9fc076c86169d161798b01"},
- {file = "lxml-5.1.0-cp312-cp312-win_amd64.whl", hash = "sha256:1ad17c20e3666c035db502c78b86e58ff6b5991906e55bdbef94977700c72623"},
- {file = "lxml-5.1.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:24ef5a4631c0b6cceaf2dbca21687e29725b7c4e171f33a8f8ce23c12558ded1"},
- {file = "lxml-5.1.0-cp36-cp36m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8d2900b7f5318bc7ad8631d3d40190b95ef2aa8cc59473b73b294e4a55e9f30f"},
- {file = "lxml-5.1.0-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:601f4a75797d7a770daed8b42b97cd1bb1ba18bd51a9382077a6a247a12aa38d"},
- {file = "lxml-5.1.0-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b4b68c961b5cc402cbd99cca5eb2547e46ce77260eb705f4d117fd9c3f932b95"},
- {file = "lxml-5.1.0-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:afd825e30f8d1f521713a5669b63657bcfe5980a916c95855060048b88e1adb7"},
- {file = "lxml-5.1.0-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:262bc5f512a66b527d026518507e78c2f9c2bd9eb5c8aeeb9f0eb43fcb69dc67"},
- {file = "lxml-5.1.0-cp36-cp36m-win32.whl", hash = "sha256:e856c1c7255c739434489ec9c8aa9cdf5179785d10ff20add308b5d673bed5cd"},
- {file = "lxml-5.1.0-cp36-cp36m-win_amd64.whl", hash = "sha256:c7257171bb8d4432fe9d6fdde4d55fdbe663a63636a17f7f9aaba9bcb3153ad7"},
- {file = "lxml-5.1.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:b9e240ae0ba96477682aa87899d94ddec1cc7926f9df29b1dd57b39e797d5ab5"},
- {file = "lxml-5.1.0-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a96f02ba1bcd330807fc060ed91d1f7a20853da6dd449e5da4b09bfcc08fdcf5"},
- {file = "lxml-5.1.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3e3898ae2b58eeafedfe99e542a17859017d72d7f6a63de0f04f99c2cb125936"},
- {file = "lxml-5.1.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:61c5a7edbd7c695e54fca029ceb351fc45cd8860119a0f83e48be44e1c464862"},
- {file = "lxml-5.1.0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:3aeca824b38ca78d9ee2ab82bd9883083d0492d9d17df065ba3b94e88e4d7ee6"},
- {file = "lxml-5.1.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:8f52fe6859b9db71ee609b0c0a70fea5f1e71c3462ecf144ca800d3f434f0764"},
- {file = "lxml-5.1.0-cp37-cp37m-win32.whl", hash = "sha256:d42e3a3fc18acc88b838efded0e6ec3edf3e328a58c68fbd36a7263a874906c8"},
- {file = "lxml-5.1.0-cp37-cp37m-win_amd64.whl", hash = "sha256:eac68f96539b32fce2c9b47eb7c25bb2582bdaf1bbb360d25f564ee9e04c542b"},
- {file = "lxml-5.1.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:ae15347a88cf8af0949a9872b57a320d2605ae069bcdf047677318bc0bba45b1"},
- {file = "lxml-5.1.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:c26aab6ea9c54d3bed716b8851c8bfc40cb249b8e9880e250d1eddde9f709bf5"},
- {file = "lxml-5.1.0-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:342e95bddec3a698ac24378d61996b3ee5ba9acfeb253986002ac53c9a5f6f84"},
- {file = "lxml-5.1.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:725e171e0b99a66ec8605ac77fa12239dbe061482ac854d25720e2294652eeaa"},
- {file = "lxml-5.1.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3d184e0d5c918cff04cdde9dbdf9600e960161d773666958c9d7b565ccc60c45"},
- {file = "lxml-5.1.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:98f3f020a2b736566c707c8e034945c02aa94e124c24f77ca097c446f81b01f1"},
- {file = "lxml-5.1.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:6d48fc57e7c1e3df57be5ae8614bab6d4e7b60f65c5457915c26892c41afc59e"},
- {file = "lxml-5.1.0-cp38-cp38-win32.whl", hash = "sha256:7ec465e6549ed97e9f1e5ed51c657c9ede767bc1c11552f7f4d022c4df4a977a"},
- {file = "lxml-5.1.0-cp38-cp38-win_amd64.whl", hash = "sha256:b21b4031b53d25b0858d4e124f2f9131ffc1530431c6d1321805c90da78388d1"},
- {file = "lxml-5.1.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:52427a7eadc98f9e62cb1368a5079ae826f94f05755d2d567d93ee1bc3ceb354"},
- {file = "lxml-5.1.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:6a2a2c724d97c1eb8cf966b16ca2915566a4904b9aad2ed9a09c748ffe14f969"},
- {file = "lxml-5.1.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:843b9c835580d52828d8f69ea4302537337a21e6b4f1ec711a52241ba4a824f3"},
- {file = "lxml-5.1.0-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9b99f564659cfa704a2dd82d0684207b1aadf7d02d33e54845f9fc78e06b7581"},
- {file = "lxml-5.1.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4f8b0c78e7aac24979ef09b7f50da871c2de2def043d468c4b41f512d831e912"},
- {file = "lxml-5.1.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9bcf86dfc8ff3e992fed847c077bd875d9e0ba2fa25d859c3a0f0f76f07f0c8d"},
- {file = "lxml-5.1.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:49a9b4af45e8b925e1cd6f3b15bbba2c81e7dba6dce170c677c9cda547411e14"},
- {file = "lxml-5.1.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:280f3edf15c2a967d923bcfb1f8f15337ad36f93525828b40a0f9d6c2ad24890"},
- {file = "lxml-5.1.0-cp39-cp39-win32.whl", hash = "sha256:ed7326563024b6e91fef6b6c7a1a2ff0a71b97793ac33dbbcf38f6005e51ff6e"},
- {file = "lxml-5.1.0-cp39-cp39-win_amd64.whl", hash = "sha256:8d7b4beebb178e9183138f552238f7e6613162a42164233e2bda00cb3afac58f"},
- {file = "lxml-5.1.0-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:9bd0ae7cc2b85320abd5e0abad5ccee5564ed5f0cc90245d2f9a8ef330a8deae"},
- {file = "lxml-5.1.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d8c1d679df4361408b628f42b26a5d62bd3e9ba7f0c0e7969f925021554755aa"},
- {file = "lxml-5.1.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:2ad3a8ce9e8a767131061a22cd28fdffa3cd2dc193f399ff7b81777f3520e372"},
- {file = "lxml-5.1.0-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:304128394c9c22b6569eba2a6d98392b56fbdfbad58f83ea702530be80d0f9df"},
- {file = "lxml-5.1.0-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d74fcaf87132ffc0447b3c685a9f862ffb5b43e70ea6beec2fb8057d5d2a1fea"},
- {file = "lxml-5.1.0-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:8cf5877f7ed384dabfdcc37922c3191bf27e55b498fecece9fd5c2c7aaa34c33"},
- {file = "lxml-5.1.0-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:877efb968c3d7eb2dad540b6cabf2f1d3c0fbf4b2d309a3c141f79c7e0061324"},
- {file = "lxml-5.1.0-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3f14a4fb1c1c402a22e6a341a24c1341b4a3def81b41cd354386dcb795f83897"},
- {file = "lxml-5.1.0-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:25663d6e99659544ee8fe1b89b1a8c0aaa5e34b103fab124b17fa958c4a324a6"},
- {file = "lxml-5.1.0-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:8b9f19df998761babaa7f09e6bc169294eefafd6149aaa272081cbddc7ba4ca3"},
- {file = "lxml-5.1.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5e53d7e6a98b64fe54775d23a7c669763451340c3d44ad5e3a3b48a1efbdc96f"},
- {file = "lxml-5.1.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:c3cd1fc1dc7c376c54440aeaaa0dcc803d2126732ff5c6b68ccd619f2e64be4f"},
- {file = "lxml-5.1.0.tar.gz", hash = "sha256:3eea6ed6e6c918e468e693c41ef07f3c3acc310b70ddd9cc72d9ef84bc9564ca"},
+ {file = "lxml-5.2.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:1f7785f4f789fdb522729ae465adcaa099e2a3441519df750ebdccc481d961a1"},
+ {file = "lxml-5.2.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:6cc6ee342fb7fa2471bd9b6d6fdfc78925a697bf5c2bcd0a302e98b0d35bfad3"},
+ {file = "lxml-5.2.1-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:794f04eec78f1d0e35d9e0c36cbbb22e42d370dda1609fb03bcd7aeb458c6377"},
+ {file = "lxml-5.2.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c817d420c60a5183953c783b0547d9eb43b7b344a2c46f69513d5952a78cddf3"},
+ {file = "lxml-5.2.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2213afee476546a7f37c7a9b4ad4d74b1e112a6fafffc9185d6d21f043128c81"},
+ {file = "lxml-5.2.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b070bbe8d3f0f6147689bed981d19bbb33070225373338df755a46893528104a"},
+ {file = "lxml-5.2.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e02c5175f63effbd7c5e590399c118d5db6183bbfe8e0d118bdb5c2d1b48d937"},
+ {file = "lxml-5.2.1-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:3dc773b2861b37b41a6136e0b72a1a44689a9c4c101e0cddb6b854016acc0aa8"},
+ {file = "lxml-5.2.1-cp310-cp310-manylinux_2_28_ppc64le.whl", hash = "sha256:d7520db34088c96cc0e0a3ad51a4fd5b401f279ee112aa2b7f8f976d8582606d"},
+ {file = "lxml-5.2.1-cp310-cp310-manylinux_2_28_s390x.whl", hash = "sha256:bcbf4af004f98793a95355980764b3d80d47117678118a44a80b721c9913436a"},
+ {file = "lxml-5.2.1-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:a2b44bec7adf3e9305ce6cbfa47a4395667e744097faed97abb4728748ba7d47"},
+ {file = "lxml-5.2.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:1c5bb205e9212d0ebddf946bc07e73fa245c864a5f90f341d11ce7b0b854475d"},
+ {file = "lxml-5.2.1-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:2c9d147f754b1b0e723e6afb7ba1566ecb162fe4ea657f53d2139bbf894d050a"},
+ {file = "lxml-5.2.1-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:3545039fa4779be2df51d6395e91a810f57122290864918b172d5dc7ca5bb433"},
+ {file = "lxml-5.2.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:a91481dbcddf1736c98a80b122afa0f7296eeb80b72344d7f45dc9f781551f56"},
+ {file = "lxml-5.2.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:2ddfe41ddc81f29a4c44c8ce239eda5ade4e7fc305fb7311759dd6229a080052"},
+ {file = "lxml-5.2.1-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:a7baf9ffc238e4bf401299f50e971a45bfcc10a785522541a6e3179c83eabf0a"},
+ {file = "lxml-5.2.1-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:31e9a882013c2f6bd2f2c974241bf4ba68c85eba943648ce88936d23209a2e01"},
+ {file = "lxml-5.2.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:0a15438253b34e6362b2dc41475e7f80de76320f335e70c5528b7148cac253a1"},
+ {file = "lxml-5.2.1-cp310-cp310-win32.whl", hash = "sha256:6992030d43b916407c9aa52e9673612ff39a575523c5f4cf72cdef75365709a5"},
+ {file = "lxml-5.2.1-cp310-cp310-win_amd64.whl", hash = "sha256:da052e7962ea2d5e5ef5bc0355d55007407087392cf465b7ad84ce5f3e25fe0f"},
+ {file = "lxml-5.2.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:70ac664a48aa64e5e635ae5566f5227f2ab7f66a3990d67566d9907edcbbf867"},
+ {file = "lxml-5.2.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:1ae67b4e737cddc96c99461d2f75d218bdf7a0c3d3ad5604d1f5e7464a2f9ffe"},
+ {file = "lxml-5.2.1-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f18a5a84e16886898e51ab4b1d43acb3083c39b14c8caeb3589aabff0ee0b270"},
+ {file = "lxml-5.2.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c6f2c8372b98208ce609c9e1d707f6918cc118fea4e2c754c9f0812c04ca116d"},
+ {file = "lxml-5.2.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:394ed3924d7a01b5bd9a0d9d946136e1c2f7b3dc337196d99e61740ed4bc6fe1"},
+ {file = "lxml-5.2.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5d077bc40a1fe984e1a9931e801e42959a1e6598edc8a3223b061d30fbd26bbc"},
+ {file = "lxml-5.2.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:764b521b75701f60683500d8621841bec41a65eb739b8466000c6fdbc256c240"},
+ {file = "lxml-5.2.1-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:3a6b45da02336895da82b9d472cd274b22dc27a5cea1d4b793874eead23dd14f"},
+ {file = "lxml-5.2.1-cp311-cp311-manylinux_2_28_ppc64le.whl", hash = "sha256:5ea7b6766ac2dfe4bcac8b8595107665a18ef01f8c8343f00710b85096d1b53a"},
+ {file = "lxml-5.2.1-cp311-cp311-manylinux_2_28_s390x.whl", hash = "sha256:e196a4ff48310ba62e53a8e0f97ca2bca83cdd2fe2934d8b5cb0df0a841b193a"},
+ {file = "lxml-5.2.1-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:200e63525948e325d6a13a76ba2911f927ad399ef64f57898cf7c74e69b71095"},
+ {file = "lxml-5.2.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:dae0ed02f6b075426accbf6b2863c3d0a7eacc1b41fb40f2251d931e50188dad"},
+ {file = "lxml-5.2.1-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:ab31a88a651039a07a3ae327d68ebdd8bc589b16938c09ef3f32a4b809dc96ef"},
+ {file = "lxml-5.2.1-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:df2e6f546c4df14bc81f9498bbc007fbb87669f1bb707c6138878c46b06f6510"},
+ {file = "lxml-5.2.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:5dd1537e7cc06efd81371f5d1a992bd5ab156b2b4f88834ca852de4a8ea523fa"},
+ {file = "lxml-5.2.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:9b9ec9c9978b708d488bec36b9e4c94d88fd12ccac3e62134a9d17ddba910ea9"},
+ {file = "lxml-5.2.1-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:8e77c69d5892cb5ba71703c4057091e31ccf534bd7f129307a4d084d90d014b8"},
+ {file = "lxml-5.2.1-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:a8d5c70e04aac1eda5c829a26d1f75c6e5286c74743133d9f742cda8e53b9c2f"},
+ {file = "lxml-5.2.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:c94e75445b00319c1fad60f3c98b09cd63fe1134a8a953dcd48989ef42318534"},
+ {file = "lxml-5.2.1-cp311-cp311-win32.whl", hash = "sha256:4951e4f7a5680a2db62f7f4ab2f84617674d36d2d76a729b9a8be4b59b3659be"},
+ {file = "lxml-5.2.1-cp311-cp311-win_amd64.whl", hash = "sha256:5c670c0406bdc845b474b680b9a5456c561c65cf366f8db5a60154088c92d102"},
+ {file = "lxml-5.2.1-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:abc25c3cab9ec7fcd299b9bcb3b8d4a1231877e425c650fa1c7576c5107ab851"},
+ {file = "lxml-5.2.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:6935bbf153f9a965f1e07c2649c0849d29832487c52bb4a5c5066031d8b44fd5"},
+ {file = "lxml-5.2.1-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d793bebb202a6000390a5390078e945bbb49855c29c7e4d56a85901326c3b5d9"},
+ {file = "lxml-5.2.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:afd5562927cdef7c4f5550374acbc117fd4ecc05b5007bdfa57cc5355864e0a4"},
+ {file = "lxml-5.2.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0e7259016bc4345a31af861fdce942b77c99049d6c2107ca07dc2bba2435c1d9"},
+ {file = "lxml-5.2.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:530e7c04f72002d2f334d5257c8a51bf409db0316feee7c87e4385043be136af"},
+ {file = "lxml-5.2.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:59689a75ba8d7ffca577aefd017d08d659d86ad4585ccc73e43edbfc7476781a"},
+ {file = "lxml-5.2.1-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:f9737bf36262046213a28e789cc82d82c6ef19c85a0cf05e75c670a33342ac2c"},
+ {file = "lxml-5.2.1-cp312-cp312-manylinux_2_28_ppc64le.whl", hash = "sha256:3a74c4f27167cb95c1d4af1c0b59e88b7f3e0182138db2501c353555f7ec57f4"},
+ {file = "lxml-5.2.1-cp312-cp312-manylinux_2_28_s390x.whl", hash = "sha256:68a2610dbe138fa8c5826b3f6d98a7cfc29707b850ddcc3e21910a6fe51f6ca0"},
+ {file = "lxml-5.2.1-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:f0a1bc63a465b6d72569a9bba9f2ef0334c4e03958e043da1920299100bc7c08"},
+ {file = "lxml-5.2.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:c2d35a1d047efd68027817b32ab1586c1169e60ca02c65d428ae815b593e65d4"},
+ {file = "lxml-5.2.1-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:79bd05260359170f78b181b59ce871673ed01ba048deef4bf49a36ab3e72e80b"},
+ {file = "lxml-5.2.1-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:865bad62df277c04beed9478fe665b9ef63eb28fe026d5dedcb89b537d2e2ea6"},
+ {file = "lxml-5.2.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:44f6c7caff88d988db017b9b0e4ab04934f11e3e72d478031efc7edcac6c622f"},
+ {file = "lxml-5.2.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:71e97313406ccf55d32cc98a533ee05c61e15d11b99215b237346171c179c0b0"},
+ {file = "lxml-5.2.1-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:057cdc6b86ab732cf361f8b4d8af87cf195a1f6dc5b0ff3de2dced242c2015e0"},
+ {file = "lxml-5.2.1-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:f3bbbc998d42f8e561f347e798b85513ba4da324c2b3f9b7969e9c45b10f6169"},
+ {file = "lxml-5.2.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:491755202eb21a5e350dae00c6d9a17247769c64dcf62d8c788b5c135e179dc4"},
+ {file = "lxml-5.2.1-cp312-cp312-win32.whl", hash = "sha256:8de8f9d6caa7f25b204fc861718815d41cbcf27ee8f028c89c882a0cf4ae4134"},
+ {file = "lxml-5.2.1-cp312-cp312-win_amd64.whl", hash = "sha256:f2a9efc53d5b714b8df2b4b3e992accf8ce5bbdfe544d74d5c6766c9e1146a3a"},
+ {file = "lxml-5.2.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:70a9768e1b9d79edca17890175ba915654ee1725975d69ab64813dd785a2bd5c"},
+ {file = "lxml-5.2.1-cp36-cp36m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c38d7b9a690b090de999835f0443d8aa93ce5f2064035dfc48f27f02b4afc3d0"},
+ {file = "lxml-5.2.1-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5670fb70a828663cc37552a2a85bf2ac38475572b0e9b91283dc09efb52c41d1"},
+ {file = "lxml-5.2.1-cp36-cp36m-manylinux_2_28_x86_64.whl", hash = "sha256:958244ad566c3ffc385f47dddde4145088a0ab893504b54b52c041987a8c1863"},
+ {file = "lxml-5.2.1-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:b6241d4eee5f89453307c2f2bfa03b50362052ca0af1efecf9fef9a41a22bb4f"},
+ {file = "lxml-5.2.1-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:2a66bf12fbd4666dd023b6f51223aed3d9f3b40fef06ce404cb75bafd3d89536"},
+ {file = "lxml-5.2.1-cp36-cp36m-musllinux_1_1_ppc64le.whl", hash = "sha256:9123716666e25b7b71c4e1789ec829ed18663152008b58544d95b008ed9e21e9"},
+ {file = "lxml-5.2.1-cp36-cp36m-musllinux_1_1_s390x.whl", hash = "sha256:0c3f67e2aeda739d1cc0b1102c9a9129f7dc83901226cc24dd72ba275ced4218"},
+ {file = "lxml-5.2.1-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:5d5792e9b3fb8d16a19f46aa8208987cfeafe082363ee2745ea8b643d9cc5b45"},
+ {file = "lxml-5.2.1-cp36-cp36m-musllinux_1_2_aarch64.whl", hash = "sha256:88e22fc0a6684337d25c994381ed8a1580a6f5ebebd5ad41f89f663ff4ec2885"},
+ {file = "lxml-5.2.1-cp36-cp36m-musllinux_1_2_ppc64le.whl", hash = "sha256:21c2e6b09565ba5b45ae161b438e033a86ad1736b8c838c766146eff8ceffff9"},
+ {file = "lxml-5.2.1-cp36-cp36m-musllinux_1_2_s390x.whl", hash = "sha256:afbbdb120d1e78d2ba8064a68058001b871154cc57787031b645c9142b937a62"},
+ {file = "lxml-5.2.1-cp36-cp36m-musllinux_1_2_x86_64.whl", hash = "sha256:627402ad8dea044dde2eccde4370560a2b750ef894c9578e1d4f8ffd54000461"},
+ {file = "lxml-5.2.1-cp36-cp36m-win32.whl", hash = "sha256:e89580a581bf478d8dcb97d9cd011d567768e8bc4095f8557b21c4d4c5fea7d0"},
+ {file = "lxml-5.2.1-cp36-cp36m-win_amd64.whl", hash = "sha256:59565f10607c244bc4c05c0c5fa0c190c990996e0c719d05deec7030c2aa8289"},
+ {file = "lxml-5.2.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:857500f88b17a6479202ff5fe5f580fc3404922cd02ab3716197adf1ef628029"},
+ {file = "lxml-5.2.1-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:56c22432809085b3f3ae04e6e7bdd36883d7258fcd90e53ba7b2e463efc7a6af"},
+ {file = "lxml-5.2.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a55ee573116ba208932e2d1a037cc4b10d2c1cb264ced2184d00b18ce585b2c0"},
+ {file = "lxml-5.2.1-cp37-cp37m-manylinux_2_28_x86_64.whl", hash = "sha256:6cf58416653c5901e12624e4013708b6e11142956e7f35e7a83f1ab02f3fe456"},
+ {file = "lxml-5.2.1-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:64c2baa7774bc22dd4474248ba16fe1a7f611c13ac6123408694d4cc93d66dbd"},
+ {file = "lxml-5.2.1-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:74b28c6334cca4dd704e8004cba1955af0b778cf449142e581e404bd211fb619"},
+ {file = "lxml-5.2.1-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:7221d49259aa1e5a8f00d3d28b1e0b76031655ca74bb287123ef56c3db92f213"},
+ {file = "lxml-5.2.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:3dbe858ee582cbb2c6294dc85f55b5f19c918c2597855e950f34b660f1a5ede6"},
+ {file = "lxml-5.2.1-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:04ab5415bf6c86e0518d57240a96c4d1fcfc3cb370bb2ac2a732b67f579e5a04"},
+ {file = "lxml-5.2.1-cp37-cp37m-musllinux_1_2_ppc64le.whl", hash = "sha256:6ab833e4735a7e5533711a6ea2df26459b96f9eec36d23f74cafe03631647c41"},
+ {file = "lxml-5.2.1-cp37-cp37m-musllinux_1_2_s390x.whl", hash = "sha256:f443cdef978430887ed55112b491f670bba6462cea7a7742ff8f14b7abb98d75"},
+ {file = "lxml-5.2.1-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:9e2addd2d1866fe112bc6f80117bcc6bc25191c5ed1bfbcf9f1386a884252ae8"},
+ {file = "lxml-5.2.1-cp37-cp37m-win32.whl", hash = "sha256:f51969bac61441fd31f028d7b3b45962f3ecebf691a510495e5d2cd8c8092dbd"},
+ {file = "lxml-5.2.1-cp37-cp37m-win_amd64.whl", hash = "sha256:b0b58fbfa1bf7367dde8a557994e3b1637294be6cf2169810375caf8571a085c"},
+ {file = "lxml-5.2.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:804f74efe22b6a227306dd890eecc4f8c59ff25ca35f1f14e7482bbce96ef10b"},
+ {file = "lxml-5.2.1-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:08802f0c56ed150cc6885ae0788a321b73505d2263ee56dad84d200cab11c07a"},
+ {file = "lxml-5.2.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0f8c09ed18ecb4ebf23e02b8e7a22a05d6411911e6fabef3a36e4f371f4f2585"},
+ {file = "lxml-5.2.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e3d30321949861404323c50aebeb1943461a67cd51d4200ab02babc58bd06a86"},
+ {file = "lxml-5.2.1-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:b560e3aa4b1d49e0e6c847d72665384db35b2f5d45f8e6a5c0072e0283430533"},
+ {file = "lxml-5.2.1-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:058a1308914f20784c9f4674036527e7c04f7be6fb60f5d61353545aa7fcb739"},
+ {file = "lxml-5.2.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:adfb84ca6b87e06bc6b146dc7da7623395db1e31621c4785ad0658c5028b37d7"},
+ {file = "lxml-5.2.1-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:417d14450f06d51f363e41cace6488519038f940676ce9664b34ebf5653433a5"},
+ {file = "lxml-5.2.1-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:a2dfe7e2473f9b59496247aad6e23b405ddf2e12ef0765677b0081c02d6c2c0b"},
+ {file = "lxml-5.2.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:bf2e2458345d9bffb0d9ec16557d8858c9c88d2d11fed53998512504cd9df49b"},
+ {file = "lxml-5.2.1-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:58278b29cb89f3e43ff3e0c756abbd1518f3ee6adad9e35b51fb101c1c1daaec"},
+ {file = "lxml-5.2.1-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:64641a6068a16201366476731301441ce93457eb8452056f570133a6ceb15fca"},
+ {file = "lxml-5.2.1-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:78bfa756eab503673991bdcf464917ef7845a964903d3302c5f68417ecdc948c"},
+ {file = "lxml-5.2.1-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:11a04306fcba10cd9637e669fd73aa274c1c09ca64af79c041aa820ea992b637"},
+ {file = "lxml-5.2.1-cp38-cp38-win32.whl", hash = "sha256:66bc5eb8a323ed9894f8fa0ee6cb3e3fb2403d99aee635078fd19a8bc7a5a5da"},
+ {file = "lxml-5.2.1-cp38-cp38-win_amd64.whl", hash = "sha256:9676bfc686fa6a3fa10cd4ae6b76cae8be26eb5ec6811d2a325636c460da1806"},
+ {file = "lxml-5.2.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:cf22b41fdae514ee2f1691b6c3cdeae666d8b7fa9434de445f12bbeee0cf48dd"},
+ {file = "lxml-5.2.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:ec42088248c596dbd61d4ae8a5b004f97a4d91a9fd286f632e42e60b706718d7"},
+ {file = "lxml-5.2.1-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cd53553ddad4a9c2f1f022756ae64abe16da1feb497edf4d9f87f99ec7cf86bd"},
+ {file = "lxml-5.2.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:feaa45c0eae424d3e90d78823f3828e7dc42a42f21ed420db98da2c4ecf0a2cb"},
+ {file = "lxml-5.2.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ddc678fb4c7e30cf830a2b5a8d869538bc55b28d6c68544d09c7d0d8f17694dc"},
+ {file = "lxml-5.2.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:853e074d4931dbcba7480d4dcab23d5c56bd9607f92825ab80ee2bd916edea53"},
+ {file = "lxml-5.2.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cc4691d60512798304acb9207987e7b2b7c44627ea88b9d77489bbe3e6cc3bd4"},
+ {file = "lxml-5.2.1-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:beb72935a941965c52990f3a32d7f07ce869fe21c6af8b34bf6a277b33a345d3"},
+ {file = "lxml-5.2.1-cp39-cp39-manylinux_2_28_ppc64le.whl", hash = "sha256:6588c459c5627fefa30139be4d2e28a2c2a1d0d1c265aad2ba1935a7863a4913"},
+ {file = "lxml-5.2.1-cp39-cp39-manylinux_2_28_s390x.whl", hash = "sha256:588008b8497667f1ddca7c99f2f85ce8511f8f7871b4a06ceede68ab62dff64b"},
+ {file = "lxml-5.2.1-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:b6787b643356111dfd4032b5bffe26d2f8331556ecb79e15dacb9275da02866e"},
+ {file = "lxml-5.2.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:7c17b64b0a6ef4e5affae6a3724010a7a66bda48a62cfe0674dabd46642e8b54"},
+ {file = "lxml-5.2.1-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:27aa20d45c2e0b8cd05da6d4759649170e8dfc4f4e5ef33a34d06f2d79075d57"},
+ {file = "lxml-5.2.1-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:d4f2cc7060dc3646632d7f15fe68e2fa98f58e35dd5666cd525f3b35d3fed7f8"},
+ {file = "lxml-5.2.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:ff46d772d5f6f73564979cd77a4fffe55c916a05f3cb70e7c9c0590059fb29ef"},
+ {file = "lxml-5.2.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:96323338e6c14e958d775700ec8a88346014a85e5de73ac7967db0367582049b"},
+ {file = "lxml-5.2.1-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:52421b41ac99e9d91934e4d0d0fe7da9f02bfa7536bb4431b4c05c906c8c6919"},
+ {file = "lxml-5.2.1-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:7a7efd5b6d3e30d81ec68ab8a88252d7c7c6f13aaa875009fe3097eb4e30b84c"},
+ {file = "lxml-5.2.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:0ed777c1e8c99b63037b91f9d73a6aad20fd035d77ac84afcc205225f8f41188"},
+ {file = "lxml-5.2.1-cp39-cp39-win32.whl", hash = "sha256:644df54d729ef810dcd0f7732e50e5ad1bd0a135278ed8d6bcb06f33b6b6f708"},
+ {file = "lxml-5.2.1-cp39-cp39-win_amd64.whl", hash = "sha256:9ca66b8e90daca431b7ca1408cae085d025326570e57749695d6a01454790e95"},
+ {file = "lxml-5.2.1-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:9b0ff53900566bc6325ecde9181d89afadc59c5ffa39bddf084aaedfe3b06a11"},
+ {file = "lxml-5.2.1-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fd6037392f2d57793ab98d9e26798f44b8b4da2f2464388588f48ac52c489ea1"},
+ {file = "lxml-5.2.1-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8b9c07e7a45bb64e21df4b6aa623cb8ba214dfb47d2027d90eac197329bb5e94"},
+ {file = "lxml-5.2.1-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:3249cc2989d9090eeac5467e50e9ec2d40704fea9ab72f36b034ea34ee65ca98"},
+ {file = "lxml-5.2.1-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:f42038016852ae51b4088b2862126535cc4fc85802bfe30dea3500fdfaf1864e"},
+ {file = "lxml-5.2.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:533658f8fbf056b70e434dff7e7aa611bcacb33e01f75de7f821810e48d1bb66"},
+ {file = "lxml-5.2.1-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:622020d4521e22fb371e15f580d153134bfb68d6a429d1342a25f051ec72df1c"},
+ {file = "lxml-5.2.1-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:efa7b51824aa0ee957ccd5a741c73e6851de55f40d807f08069eb4c5a26b2baa"},
+ {file = "lxml-5.2.1-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9c6ad0fbf105f6bcc9300c00010a2ffa44ea6f555df1a2ad95c88f5656104817"},
+ {file = "lxml-5.2.1-pp37-pypy37_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:e233db59c8f76630c512ab4a4daf5a5986da5c3d5b44b8e9fc742f2a24dbd460"},
+ {file = "lxml-5.2.1-pp37-pypy37_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:6a014510830df1475176466b6087fc0c08b47a36714823e58d8b8d7709132a96"},
+ {file = "lxml-5.2.1-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:d38c8f50ecf57f0463399569aa388b232cf1a2ffb8f0a9a5412d0db57e054860"},
+ {file = "lxml-5.2.1-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:5aea8212fb823e006b995c4dda533edcf98a893d941f173f6c9506126188860d"},
+ {file = "lxml-5.2.1-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ff097ae562e637409b429a7ac958a20aab237a0378c42dabaa1e3abf2f896e5f"},
+ {file = "lxml-5.2.1-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0f5d65c39f16717a47c36c756af0fb36144069c4718824b7533f803ecdf91138"},
+ {file = "lxml-5.2.1-pp38-pypy38_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:3d0c3dd24bb4605439bf91068598d00c6370684f8de4a67c2992683f6c309d6b"},
+ {file = "lxml-5.2.1-pp38-pypy38_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:e32be23d538753a8adb6c85bd539f5fd3b15cb987404327c569dfc5fd8366e85"},
+ {file = "lxml-5.2.1-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:cc518cea79fd1e2f6c90baafa28906d4309d24f3a63e801d855e7424c5b34144"},
+ {file = "lxml-5.2.1-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:a0af35bd8ebf84888373630f73f24e86bf016642fb8576fba49d3d6b560b7cbc"},
+ {file = "lxml-5.2.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f8aca2e3a72f37bfc7b14ba96d4056244001ddcc18382bd0daa087fd2e68a354"},
+ {file = "lxml-5.2.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5ca1e8188b26a819387b29c3895c47a5e618708fe6f787f3b1a471de2c4a94d9"},
+ {file = "lxml-5.2.1-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:c8ba129e6d3b0136a0f50345b2cb3db53f6bda5dd8c7f5d83fbccba97fb5dcb5"},
+ {file = "lxml-5.2.1-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:e998e304036198b4f6914e6a1e2b6f925208a20e2042563d9734881150c6c246"},
+ {file = "lxml-5.2.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:d3be9b2076112e51b323bdf6d5a7f8a798de55fb8d95fcb64bd179460cdc0704"},
+ {file = "lxml-5.2.1.tar.gz", hash = "sha256:3f7765e69bbce0906a7c74d5fe46d2c7a7596147318dbc08e4a2431f3060e306"},
]
[package.extras]
cssselect = ["cssselect (>=0.7)"]
+html-clean = ["lxml-html-clean"]
html5 = ["html5lib"]
htmlsoup = ["BeautifulSoup4"]
-source = ["Cython (>=3.0.7)"]
+source = ["Cython (>=3.0.10)"]
[[package]]
name = "lxml-stubs"
From 1726b494579f8a96a1f474ff692fb7b8642be9aa Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Tue, 7 May 2024 10:34:56 +0100
Subject: [PATCH 064/503] Bump types-pillow from 10.2.0.20240415 to
10.2.0.20240423 (#17159)
---
poetry.lock | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/poetry.lock b/poetry.lock
index c0fce0f45e..5202c2df36 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -2836,13 +2836,13 @@ files = [
[[package]]
name = "types-pillow"
-version = "10.2.0.20240415"
+version = "10.2.0.20240423"
description = "Typing stubs for Pillow"
optional = false
python-versions = ">=3.8"
files = [
- {file = "types-Pillow-10.2.0.20240415.tar.gz", hash = "sha256:dd6058027639bcdc66ba78b228cc25fdae42524c2150c78c804da427e7e76e70"},
- {file = "types_Pillow-10.2.0.20240415-py3-none-any.whl", hash = "sha256:f933332b7e96010bae9b9cf82a4c9979ff0c270d63f5c5bbffb2d789b85cd00b"},
+ {file = "types-Pillow-10.2.0.20240423.tar.gz", hash = "sha256:696e68b9b6a58548fc307a8669830469237c5b11809ddf978ac77fafa79251cd"},
+ {file = "types_Pillow-10.2.0.20240423-py3-none-any.whl", hash = "sha256:bd12923093b96c91d523efcdb66967a307f1a843bcfaf2d5a529146c10a9ced3"},
]
[[package]]
From cf30cfe5d1ad585ae0391b2606160b8b7057f853 Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Tue, 7 May 2024 10:35:24 +0100
Subject: [PATCH 065/503] Bump pydantic from 2.7.0 to 2.7.1 (#17160)
---
poetry.lock | 168 ++++++++++++++++++++++++++--------------------------
1 file changed, 84 insertions(+), 84 deletions(-)
diff --git a/poetry.lock b/poetry.lock
index 5202c2df36..502f45f8bb 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -1771,18 +1771,18 @@ files = [
[[package]]
name = "pydantic"
-version = "2.7.0"
+version = "2.7.1"
description = "Data validation using Python type hints"
optional = false
python-versions = ">=3.8"
files = [
- {file = "pydantic-2.7.0-py3-none-any.whl", hash = "sha256:9dee74a271705f14f9a1567671d144a851c675b072736f0a7b2608fd9e495352"},
- {file = "pydantic-2.7.0.tar.gz", hash = "sha256:b5ecdd42262ca2462e2624793551e80911a1e989f462910bb81aef974b4bb383"},
+ {file = "pydantic-2.7.1-py3-none-any.whl", hash = "sha256:e029badca45266732a9a79898a15ae2e8b14840b1eabbb25844be28f0b33f3d5"},
+ {file = "pydantic-2.7.1.tar.gz", hash = "sha256:e9dbb5eada8abe4d9ae5f46b9939aead650cd2b68f249bb3a8139dbe125803cc"},
]
[package.dependencies]
annotated-types = ">=0.4.0"
-pydantic-core = "2.18.1"
+pydantic-core = "2.18.2"
typing-extensions = ">=4.6.1"
[package.extras]
@@ -1790,90 +1790,90 @@ email = ["email-validator (>=2.0.0)"]
[[package]]
name = "pydantic-core"
-version = "2.18.1"
+version = "2.18.2"
description = "Core functionality for Pydantic validation and serialization"
optional = false
python-versions = ">=3.8"
files = [
- {file = "pydantic_core-2.18.1-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:ee9cf33e7fe14243f5ca6977658eb7d1042caaa66847daacbd2117adb258b226"},
- {file = "pydantic_core-2.18.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:6b7bbb97d82659ac8b37450c60ff2e9f97e4eb0f8a8a3645a5568b9334b08b50"},
- {file = "pydantic_core-2.18.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:df4249b579e75094f7e9bb4bd28231acf55e308bf686b952f43100a5a0be394c"},
- {file = "pydantic_core-2.18.1-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d0491006a6ad20507aec2be72e7831a42efc93193d2402018007ff827dc62926"},
- {file = "pydantic_core-2.18.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2ae80f72bb7a3e397ab37b53a2b49c62cc5496412e71bc4f1277620a7ce3f52b"},
- {file = "pydantic_core-2.18.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:58aca931bef83217fca7a390e0486ae327c4af9c3e941adb75f8772f8eeb03a1"},
- {file = "pydantic_core-2.18.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1be91ad664fc9245404a789d60cba1e91c26b1454ba136d2a1bf0c2ac0c0505a"},
- {file = "pydantic_core-2.18.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:667880321e916a8920ef49f5d50e7983792cf59f3b6079f3c9dac2b88a311d17"},
- {file = "pydantic_core-2.18.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:f7054fdc556f5421f01e39cbb767d5ec5c1139ea98c3e5b350e02e62201740c7"},
- {file = "pydantic_core-2.18.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:030e4f9516f9947f38179249778709a460a3adb516bf39b5eb9066fcfe43d0e6"},
- {file = "pydantic_core-2.18.1-cp310-none-win32.whl", hash = "sha256:2e91711e36e229978d92642bfc3546333a9127ecebb3f2761372e096395fc649"},
- {file = "pydantic_core-2.18.1-cp310-none-win_amd64.whl", hash = "sha256:9a29726f91c6cb390b3c2338f0df5cd3e216ad7a938762d11c994bb37552edb0"},
- {file = "pydantic_core-2.18.1-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:9ece8a49696669d483d206b4474c367852c44815fca23ac4e48b72b339807f80"},
- {file = "pydantic_core-2.18.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:7a5d83efc109ceddb99abd2c1316298ced2adb4570410defe766851a804fcd5b"},
- {file = "pydantic_core-2.18.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5f7973c381283783cd1043a8c8f61ea5ce7a3a58b0369f0ee0ee975eaf2f2a1b"},
- {file = "pydantic_core-2.18.1-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:54c7375c62190a7845091f521add19b0f026bcf6ae674bdb89f296972272e86d"},
- {file = "pydantic_core-2.18.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:dd63cec4e26e790b70544ae5cc48d11b515b09e05fdd5eff12e3195f54b8a586"},
- {file = "pydantic_core-2.18.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:561cf62c8a3498406495cfc49eee086ed2bb186d08bcc65812b75fda42c38294"},
- {file = "pydantic_core-2.18.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:68717c38a68e37af87c4da20e08f3e27d7e4212e99e96c3d875fbf3f4812abfc"},
- {file = "pydantic_core-2.18.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2d5728e93d28a3c63ee513d9ffbac9c5989de8c76e049dbcb5bfe4b923a9739d"},
- {file = "pydantic_core-2.18.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:f0f17814c505f07806e22b28856c59ac80cee7dd0fbb152aed273e116378f519"},
- {file = "pydantic_core-2.18.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:d816f44a51ba5175394bc6c7879ca0bd2be560b2c9e9f3411ef3a4cbe644c2e9"},
- {file = "pydantic_core-2.18.1-cp311-none-win32.whl", hash = "sha256:09f03dfc0ef8c22622eaa8608caa4a1e189cfb83ce847045eca34f690895eccb"},
- {file = "pydantic_core-2.18.1-cp311-none-win_amd64.whl", hash = "sha256:27f1009dc292f3b7ca77feb3571c537276b9aad5dd4efb471ac88a8bd09024e9"},
- {file = "pydantic_core-2.18.1-cp311-none-win_arm64.whl", hash = "sha256:48dd883db92e92519201f2b01cafa881e5f7125666141a49ffba8b9facc072b0"},
- {file = "pydantic_core-2.18.1-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:b6b0e4912030c6f28bcb72b9ebe4989d6dc2eebcd2a9cdc35fefc38052dd4fe8"},
- {file = "pydantic_core-2.18.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:f3202a429fe825b699c57892d4371c74cc3456d8d71b7f35d6028c96dfecad31"},
- {file = "pydantic_core-2.18.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a3982b0a32d0a88b3907e4b0dc36809fda477f0757c59a505d4e9b455f384b8b"},
- {file = "pydantic_core-2.18.1-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:25595ac311f20e5324d1941909b0d12933f1fd2171075fcff763e90f43e92a0d"},
- {file = "pydantic_core-2.18.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:14fe73881cf8e4cbdaded8ca0aa671635b597e42447fec7060d0868b52d074e6"},
- {file = "pydantic_core-2.18.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ca976884ce34070799e4dfc6fbd68cb1d181db1eefe4a3a94798ddfb34b8867f"},
- {file = "pydantic_core-2.18.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:684d840d2c9ec5de9cb397fcb3f36d5ebb6fa0d94734f9886032dd796c1ead06"},
- {file = "pydantic_core-2.18.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:54764c083bbe0264f0f746cefcded6cb08fbbaaf1ad1d78fb8a4c30cff999a90"},
- {file = "pydantic_core-2.18.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:201713f2f462e5c015b343e86e68bd8a530a4f76609b33d8f0ec65d2b921712a"},
- {file = "pydantic_core-2.18.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:fd1a9edb9dd9d79fbeac1ea1f9a8dd527a6113b18d2e9bcc0d541d308dae639b"},
- {file = "pydantic_core-2.18.1-cp312-none-win32.whl", hash = "sha256:d5e6b7155b8197b329dc787356cfd2684c9d6a6b1a197f6bbf45f5555a98d411"},
- {file = "pydantic_core-2.18.1-cp312-none-win_amd64.whl", hash = "sha256:9376d83d686ec62e8b19c0ac3bf8d28d8a5981d0df290196fb6ef24d8a26f0d6"},
- {file = "pydantic_core-2.18.1-cp312-none-win_arm64.whl", hash = "sha256:c562b49c96906b4029b5685075fe1ebd3b5cc2601dfa0b9e16c2c09d6cbce048"},
- {file = "pydantic_core-2.18.1-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:3e352f0191d99fe617371096845070dee295444979efb8f27ad941227de6ad09"},
- {file = "pydantic_core-2.18.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:c0295d52b012cbe0d3059b1dba99159c3be55e632aae1999ab74ae2bd86a33d7"},
- {file = "pydantic_core-2.18.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:56823a92075780582d1ffd4489a2e61d56fd3ebb4b40b713d63f96dd92d28144"},
- {file = "pydantic_core-2.18.1-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:dd3f79e17b56741b5177bcc36307750d50ea0698df6aa82f69c7db32d968c1c2"},
- {file = "pydantic_core-2.18.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:38a5024de321d672a132b1834a66eeb7931959c59964b777e8f32dbe9523f6b1"},
- {file = "pydantic_core-2.18.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d2ce426ee691319d4767748c8e0895cfc56593d725594e415f274059bcf3cb76"},
- {file = "pydantic_core-2.18.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2adaeea59849ec0939af5c5d476935f2bab4b7f0335b0110f0f069a41024278e"},
- {file = "pydantic_core-2.18.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:9b6431559676a1079eac0f52d6d0721fb8e3c5ba43c37bc537c8c83724031feb"},
- {file = "pydantic_core-2.18.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:85233abb44bc18d16e72dc05bf13848a36f363f83757541f1a97db2f8d58cfd9"},
- {file = "pydantic_core-2.18.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:641a018af4fe48be57a2b3d7a1f0f5dbca07c1d00951d3d7463f0ac9dac66622"},
- {file = "pydantic_core-2.18.1-cp38-none-win32.whl", hash = "sha256:63d7523cd95d2fde0d28dc42968ac731b5bb1e516cc56b93a50ab293f4daeaad"},
- {file = "pydantic_core-2.18.1-cp38-none-win_amd64.whl", hash = "sha256:907a4d7720abfcb1c81619863efd47c8a85d26a257a2dbebdb87c3b847df0278"},
- {file = "pydantic_core-2.18.1-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:aad17e462f42ddbef5984d70c40bfc4146c322a2da79715932cd8976317054de"},
- {file = "pydantic_core-2.18.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:94b9769ba435b598b547c762184bcfc4783d0d4c7771b04a3b45775c3589ca44"},
- {file = "pydantic_core-2.18.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:80e0e57cc704a52fb1b48f16d5b2c8818da087dbee6f98d9bf19546930dc64b5"},
- {file = "pydantic_core-2.18.1-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:76b86e24039c35280ceee6dce7e62945eb93a5175d43689ba98360ab31eebc4a"},
- {file = "pydantic_core-2.18.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:12a05db5013ec0ca4a32cc6433f53faa2a014ec364031408540ba858c2172bb0"},
- {file = "pydantic_core-2.18.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:250ae39445cb5475e483a36b1061af1bc233de3e9ad0f4f76a71b66231b07f88"},
- {file = "pydantic_core-2.18.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a32204489259786a923e02990249c65b0f17235073149d0033efcebe80095570"},
- {file = "pydantic_core-2.18.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:6395a4435fa26519fd96fdccb77e9d00ddae9dd6c742309bd0b5610609ad7fb2"},
- {file = "pydantic_core-2.18.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:2533ad2883f001efa72f3d0e733fb846710c3af6dcdd544fe5bf14fa5fe2d7db"},
- {file = "pydantic_core-2.18.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:b560b72ed4816aee52783c66854d96157fd8175631f01ef58e894cc57c84f0f6"},
- {file = "pydantic_core-2.18.1-cp39-none-win32.whl", hash = "sha256:582cf2cead97c9e382a7f4d3b744cf0ef1a6e815e44d3aa81af3ad98762f5a9b"},
- {file = "pydantic_core-2.18.1-cp39-none-win_amd64.whl", hash = "sha256:ca71d501629d1fa50ea7fa3b08ba884fe10cefc559f5c6c8dfe9036c16e8ae89"},
- {file = "pydantic_core-2.18.1-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:e178e5b66a06ec5bf51668ec0d4ac8cfb2bdcb553b2c207d58148340efd00143"},
- {file = "pydantic_core-2.18.1-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:72722ce529a76a4637a60be18bd789d8fb871e84472490ed7ddff62d5fed620d"},
- {file = "pydantic_core-2.18.1-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2fe0c1ce5b129455e43f941f7a46f61f3d3861e571f2905d55cdbb8b5c6f5e2c"},
- {file = "pydantic_core-2.18.1-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d4284c621f06a72ce2cb55f74ea3150113d926a6eb78ab38340c08f770eb9b4d"},
- {file = "pydantic_core-2.18.1-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:1a0c3e718f4e064efde68092d9d974e39572c14e56726ecfaeebbe6544521f47"},
- {file = "pydantic_core-2.18.1-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:2027493cc44c23b598cfaf200936110433d9caa84e2c6cf487a83999638a96ac"},
- {file = "pydantic_core-2.18.1-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:76909849d1a6bffa5a07742294f3fa1d357dc917cb1fe7b470afbc3a7579d539"},
- {file = "pydantic_core-2.18.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:ee7ccc7fb7e921d767f853b47814c3048c7de536663e82fbc37f5eb0d532224b"},
- {file = "pydantic_core-2.18.1-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:ee2794111c188548a4547eccc73a6a8527fe2af6cf25e1a4ebda2fd01cdd2e60"},
- {file = "pydantic_core-2.18.1-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:a139fe9f298dc097349fb4f28c8b81cc7a202dbfba66af0e14be5cfca4ef7ce5"},
- {file = "pydantic_core-2.18.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d074b07a10c391fc5bbdcb37b2f16f20fcd9e51e10d01652ab298c0d07908ee2"},
- {file = "pydantic_core-2.18.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c69567ddbac186e8c0aadc1f324a60a564cfe25e43ef2ce81bcc4b8c3abffbae"},
- {file = "pydantic_core-2.18.1-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:baf1c7b78cddb5af00971ad5294a4583188bda1495b13760d9f03c9483bb6203"},
- {file = "pydantic_core-2.18.1-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:2684a94fdfd1b146ff10689c6e4e815f6a01141781c493b97342cdc5b06f4d5d"},
- {file = "pydantic_core-2.18.1-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:73c1bc8a86a5c9e8721a088df234265317692d0b5cd9e86e975ce3bc3db62a59"},
- {file = "pydantic_core-2.18.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:e60defc3c15defb70bb38dd605ff7e0fae5f6c9c7cbfe0ad7868582cb7e844a6"},
- {file = "pydantic_core-2.18.1.tar.gz", hash = "sha256:de9d3e8717560eb05e28739d1b35e4eac2e458553a52a301e51352a7ffc86a35"},
+ {file = "pydantic_core-2.18.2-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:9e08e867b306f525802df7cd16c44ff5ebbe747ff0ca6cf3fde7f36c05a59a81"},
+ {file = "pydantic_core-2.18.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:f0a21cbaa69900cbe1a2e7cad2aa74ac3cf21b10c3efb0fa0b80305274c0e8a2"},
+ {file = "pydantic_core-2.18.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0680b1f1f11fda801397de52c36ce38ef1c1dc841a0927a94f226dea29c3ae3d"},
+ {file = "pydantic_core-2.18.2-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:95b9d5e72481d3780ba3442eac863eae92ae43a5f3adb5b4d0a1de89d42bb250"},
+ {file = "pydantic_core-2.18.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c4fcf5cd9c4b655ad666ca332b9a081112cd7a58a8b5a6ca7a3104bc950f2038"},
+ {file = "pydantic_core-2.18.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9b5155ff768083cb1d62f3e143b49a8a3432e6789a3abee8acd005c3c7af1c74"},
+ {file = "pydantic_core-2.18.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:553ef617b6836fc7e4df130bb851e32fe357ce36336d897fd6646d6058d980af"},
+ {file = "pydantic_core-2.18.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b89ed9eb7d616ef5714e5590e6cf7f23b02d0d539767d33561e3675d6f9e3857"},
+ {file = "pydantic_core-2.18.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:75f7e9488238e920ab6204399ded280dc4c307d034f3924cd7f90a38b1829563"},
+ {file = "pydantic_core-2.18.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:ef26c9e94a8c04a1b2924149a9cb081836913818e55681722d7f29af88fe7b38"},
+ {file = "pydantic_core-2.18.2-cp310-none-win32.whl", hash = "sha256:182245ff6b0039e82b6bb585ed55a64d7c81c560715d1bad0cbad6dfa07b4027"},
+ {file = "pydantic_core-2.18.2-cp310-none-win_amd64.whl", hash = "sha256:e23ec367a948b6d812301afc1b13f8094ab7b2c280af66ef450efc357d2ae543"},
+ {file = "pydantic_core-2.18.2-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:219da3f096d50a157f33645a1cf31c0ad1fe829a92181dd1311022f986e5fbe3"},
+ {file = "pydantic_core-2.18.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:cc1cfd88a64e012b74e94cd00bbe0f9c6df57049c97f02bb07d39e9c852e19a4"},
+ {file = "pydantic_core-2.18.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:05b7133a6e6aeb8df37d6f413f7705a37ab4031597f64ab56384c94d98fa0e90"},
+ {file = "pydantic_core-2.18.2-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:224c421235f6102e8737032483f43c1a8cfb1d2f45740c44166219599358c2cd"},
+ {file = "pydantic_core-2.18.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b14d82cdb934e99dda6d9d60dc84a24379820176cc4a0d123f88df319ae9c150"},
+ {file = "pydantic_core-2.18.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2728b01246a3bba6de144f9e3115b532ee44bd6cf39795194fb75491824a1413"},
+ {file = "pydantic_core-2.18.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:470b94480bb5ee929f5acba6995251ada5e059a5ef3e0dfc63cca287283ebfa6"},
+ {file = "pydantic_core-2.18.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:997abc4df705d1295a42f95b4eec4950a37ad8ae46d913caeee117b6b198811c"},
+ {file = "pydantic_core-2.18.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:75250dbc5290e3f1a0f4618db35e51a165186f9034eff158f3d490b3fed9f8a0"},
+ {file = "pydantic_core-2.18.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:4456f2dca97c425231d7315737d45239b2b51a50dc2b6f0c2bb181fce6207664"},
+ {file = "pydantic_core-2.18.2-cp311-none-win32.whl", hash = "sha256:269322dcc3d8bdb69f054681edff86276b2ff972447863cf34c8b860f5188e2e"},
+ {file = "pydantic_core-2.18.2-cp311-none-win_amd64.whl", hash = "sha256:800d60565aec896f25bc3cfa56d2277d52d5182af08162f7954f938c06dc4ee3"},
+ {file = "pydantic_core-2.18.2-cp311-none-win_arm64.whl", hash = "sha256:1404c69d6a676245199767ba4f633cce5f4ad4181f9d0ccb0577e1f66cf4c46d"},
+ {file = "pydantic_core-2.18.2-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:fb2bd7be70c0fe4dfd32c951bc813d9fe6ebcbfdd15a07527796c8204bd36242"},
+ {file = "pydantic_core-2.18.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:6132dd3bd52838acddca05a72aafb6eab6536aa145e923bb50f45e78b7251043"},
+ {file = "pydantic_core-2.18.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d7d904828195733c183d20a54230c0df0eb46ec746ea1a666730787353e87182"},
+ {file = "pydantic_core-2.18.2-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c9bd70772c720142be1020eac55f8143a34ec9f82d75a8e7a07852023e46617f"},
+ {file = "pydantic_core-2.18.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2b8ed04b3582771764538f7ee7001b02e1170223cf9b75dff0bc698fadb00cf3"},
+ {file = "pydantic_core-2.18.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e6dac87ddb34aaec85f873d737e9d06a3555a1cc1a8e0c44b7f8d5daeb89d86f"},
+ {file = "pydantic_core-2.18.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7ca4ae5a27ad7a4ee5170aebce1574b375de390bc01284f87b18d43a3984df72"},
+ {file = "pydantic_core-2.18.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:886eec03591b7cf058467a70a87733b35f44707bd86cf64a615584fd72488b7c"},
+ {file = "pydantic_core-2.18.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:ca7b0c1f1c983e064caa85f3792dd2fe3526b3505378874afa84baf662e12241"},
+ {file = "pydantic_core-2.18.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:4b4356d3538c3649337df4074e81b85f0616b79731fe22dd11b99499b2ebbdf3"},
+ {file = "pydantic_core-2.18.2-cp312-none-win32.whl", hash = "sha256:8b172601454f2d7701121bbec3425dd71efcb787a027edf49724c9cefc14c038"},
+ {file = "pydantic_core-2.18.2-cp312-none-win_amd64.whl", hash = "sha256:b1bd7e47b1558ea872bd16c8502c414f9e90dcf12f1395129d7bb42a09a95438"},
+ {file = "pydantic_core-2.18.2-cp312-none-win_arm64.whl", hash = "sha256:98758d627ff397e752bc339272c14c98199c613f922d4a384ddc07526c86a2ec"},
+ {file = "pydantic_core-2.18.2-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:9fdad8e35f278b2c3eb77cbdc5c0a49dada440657bf738d6905ce106dc1de439"},
+ {file = "pydantic_core-2.18.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:1d90c3265ae107f91a4f279f4d6f6f1d4907ac76c6868b27dc7fb33688cfb347"},
+ {file = "pydantic_core-2.18.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:390193c770399861d8df9670fb0d1874f330c79caaca4642332df7c682bf6b91"},
+ {file = "pydantic_core-2.18.2-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:82d5d4d78e4448683cb467897fe24e2b74bb7b973a541ea1dcfec1d3cbce39fb"},
+ {file = "pydantic_core-2.18.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4774f3184d2ef3e14e8693194f661dea5a4d6ca4e3dc8e39786d33a94865cefd"},
+ {file = "pydantic_core-2.18.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d4d938ec0adf5167cb335acb25a4ee69a8107e4984f8fbd2e897021d9e4ca21b"},
+ {file = "pydantic_core-2.18.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e0e8b1be28239fc64a88a8189d1df7fad8be8c1ae47fcc33e43d4be15f99cc70"},
+ {file = "pydantic_core-2.18.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:868649da93e5a3d5eacc2b5b3b9235c98ccdbfd443832f31e075f54419e1b96b"},
+ {file = "pydantic_core-2.18.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:78363590ef93d5d226ba21a90a03ea89a20738ee5b7da83d771d283fd8a56761"},
+ {file = "pydantic_core-2.18.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:852e966fbd035a6468fc0a3496589b45e2208ec7ca95c26470a54daed82a0788"},
+ {file = "pydantic_core-2.18.2-cp38-none-win32.whl", hash = "sha256:6a46e22a707e7ad4484ac9ee9f290f9d501df45954184e23fc29408dfad61350"},
+ {file = "pydantic_core-2.18.2-cp38-none-win_amd64.whl", hash = "sha256:d91cb5ea8b11607cc757675051f61b3d93f15eca3cefb3e6c704a5d6e8440f4e"},
+ {file = "pydantic_core-2.18.2-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:ae0a8a797a5e56c053610fa7be147993fe50960fa43609ff2a9552b0e07013e8"},
+ {file = "pydantic_core-2.18.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:042473b6280246b1dbf530559246f6842b56119c2926d1e52b631bdc46075f2a"},
+ {file = "pydantic_core-2.18.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1a388a77e629b9ec814c1b1e6b3b595fe521d2cdc625fcca26fbc2d44c816804"},
+ {file = "pydantic_core-2.18.2-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e25add29b8f3b233ae90ccef2d902d0ae0432eb0d45370fe315d1a5cf231004b"},
+ {file = "pydantic_core-2.18.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f459a5ce8434614dfd39bbebf1041952ae01da6bed9855008cb33b875cb024c0"},
+ {file = "pydantic_core-2.18.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:eff2de745698eb46eeb51193a9f41d67d834d50e424aef27df2fcdee1b153845"},
+ {file = "pydantic_core-2.18.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a8309f67285bdfe65c372ea3722b7a5642680f3dba538566340a9d36e920b5f0"},
+ {file = "pydantic_core-2.18.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f93a8a2e3938ff656a7c1bc57193b1319960ac015b6e87d76c76bf14fe0244b4"},
+ {file = "pydantic_core-2.18.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:22057013c8c1e272eb8d0eebc796701167d8377441ec894a8fed1af64a0bf399"},
+ {file = "pydantic_core-2.18.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:cfeecd1ac6cc1fb2692c3d5110781c965aabd4ec5d32799773ca7b1456ac636b"},
+ {file = "pydantic_core-2.18.2-cp39-none-win32.whl", hash = "sha256:0d69b4c2f6bb3e130dba60d34c0845ba31b69babdd3f78f7c0c8fae5021a253e"},
+ {file = "pydantic_core-2.18.2-cp39-none-win_amd64.whl", hash = "sha256:d9319e499827271b09b4e411905b24a426b8fb69464dfa1696258f53a3334641"},
+ {file = "pydantic_core-2.18.2-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:a1874c6dd4113308bd0eb568418e6114b252afe44319ead2b4081e9b9521fe75"},
+ {file = "pydantic_core-2.18.2-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:ccdd111c03bfd3666bd2472b674c6899550e09e9f298954cfc896ab92b5b0e6d"},
+ {file = "pydantic_core-2.18.2-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e18609ceaa6eed63753037fc06ebb16041d17d28199ae5aba0052c51449650a9"},
+ {file = "pydantic_core-2.18.2-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6e5c584d357c4e2baf0ff7baf44f4994be121e16a2c88918a5817331fc7599d7"},
+ {file = "pydantic_core-2.18.2-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:43f0f463cf89ace478de71a318b1b4f05ebc456a9b9300d027b4b57c1a2064fb"},
+ {file = "pydantic_core-2.18.2-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:e1b395e58b10b73b07b7cf740d728dd4ff9365ac46c18751bf8b3d8cca8f625a"},
+ {file = "pydantic_core-2.18.2-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:0098300eebb1c837271d3d1a2cd2911e7c11b396eac9661655ee524a7f10587b"},
+ {file = "pydantic_core-2.18.2-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:36789b70d613fbac0a25bb07ab3d9dba4d2e38af609c020cf4d888d165ee0bf3"},
+ {file = "pydantic_core-2.18.2-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:3f9a801e7c8f1ef8718da265bba008fa121243dfe37c1cea17840b0944dfd72c"},
+ {file = "pydantic_core-2.18.2-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:3a6515ebc6e69d85502b4951d89131ca4e036078ea35533bb76327f8424531ce"},
+ {file = "pydantic_core-2.18.2-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:20aca1e2298c56ececfd8ed159ae4dde2df0781988c97ef77d5c16ff4bd5b400"},
+ {file = "pydantic_core-2.18.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:223ee893d77a310a0391dca6df00f70bbc2f36a71a895cecd9a0e762dc37b349"},
+ {file = "pydantic_core-2.18.2-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2334ce8c673ee93a1d6a65bd90327588387ba073c17e61bf19b4fd97d688d63c"},
+ {file = "pydantic_core-2.18.2-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:cbca948f2d14b09d20268cda7b0367723d79063f26c4ffc523af9042cad95592"},
+ {file = "pydantic_core-2.18.2-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:b3ef08e20ec49e02d5c6717a91bb5af9b20f1805583cb0adfe9ba2c6b505b5ae"},
+ {file = "pydantic_core-2.18.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:c6fdc8627910eed0c01aed6a390a252fe3ea6d472ee70fdde56273f198938374"},
+ {file = "pydantic_core-2.18.2.tar.gz", hash = "sha256:2e29d20810dfc3043ee13ac7d9e25105799817683348823f305ab3f349b9386e"},
]
[package.dependencies]
From 438bc2356093adabec920c4704a2cd16c0e0418c Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Tue, 7 May 2024 10:35:37 +0100
Subject: [PATCH 066/503] Bump serde from 1.0.199 to 1.0.200 (#17161)
---
Cargo.lock | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/Cargo.lock b/Cargo.lock
index 24127c6540..ceda2789e2 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -485,18 +485,18 @@ checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd"
[[package]]
name = "serde"
-version = "1.0.199"
+version = "1.0.200"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0c9f6e76df036c77cd94996771fb40db98187f096dd0b9af39c6c6e452ba966a"
+checksum = "ddc6f9cc94d67c0e21aaf7eda3a010fd3af78ebf6e096aa6e2e13c79749cce4f"
dependencies = [
"serde_derive",
]
[[package]]
name = "serde_derive"
-version = "1.0.199"
+version = "1.0.200"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "11bd257a6541e141e42ca6d24ae26f7714887b47e89aa739099104c7e4d3b7fc"
+checksum = "856f046b9400cee3c8c94ed572ecdb752444c24528c035cd35882aad6f492bcb"
dependencies = [
"proc-macro2",
"quote",
From dcd03d3b156362d522001e69625fb43ba04e06a2 Mon Sep 17 00:00:00 2001
From: Olivier 'reivilibre
Date: Tue, 7 May 2024 16:30:07 +0100
Subject: [PATCH 067/503] 1.107.0rc1
---
CHANGES.md | 59 +++++++++++++++++++++++++++++++++++++++
changelog.d/17000.bugfix | 1 -
changelog.d/17051.feature | 1 -
changelog.d/17071.doc | 1 -
changelog.d/17073.doc | 1 -
changelog.d/17077.bugfix | 1 -
changelog.d/17078.bugfix | 1 -
changelog.d/17082.feature | 1 -
changelog.d/17084.doc | 1 -
changelog.d/17104.feature | 1 -
changelog.d/17105.misc | 1 -
changelog.d/17114.doc | 1 -
changelog.d/17116.doc | 1 -
changelog.d/17120.bugfix | 1 -
changelog.d/17121.bugfix | 1 -
changelog.d/17127.bugfix | 1 -
changelog.d/17130.misc | 1 -
changelog.d/17131.misc | 1 -
changelog.d/17137.feature | 1 -
changelog.d/17140.doc | 1 -
changelog.d/17141.docker | 1 -
changelog.d/17146.misc | 1 -
changelog.d/17148.doc | 1 -
changelog.d/17152.bugfix | 1 -
debian/changelog | 6 ++++
pyproject.toml | 2 +-
26 files changed, 66 insertions(+), 24 deletions(-)
delete mode 100644 changelog.d/17000.bugfix
delete mode 100644 changelog.d/17051.feature
delete mode 100644 changelog.d/17071.doc
delete mode 100644 changelog.d/17073.doc
delete mode 100644 changelog.d/17077.bugfix
delete mode 100644 changelog.d/17078.bugfix
delete mode 100644 changelog.d/17082.feature
delete mode 100644 changelog.d/17084.doc
delete mode 100644 changelog.d/17104.feature
delete mode 100644 changelog.d/17105.misc
delete mode 100644 changelog.d/17114.doc
delete mode 100644 changelog.d/17116.doc
delete mode 100644 changelog.d/17120.bugfix
delete mode 100644 changelog.d/17121.bugfix
delete mode 100644 changelog.d/17127.bugfix
delete mode 100644 changelog.d/17130.misc
delete mode 100644 changelog.d/17131.misc
delete mode 100644 changelog.d/17137.feature
delete mode 100644 changelog.d/17140.doc
delete mode 100644 changelog.d/17141.docker
delete mode 100644 changelog.d/17146.misc
delete mode 100644 changelog.d/17148.doc
delete mode 100644 changelog.d/17152.bugfix
diff --git a/CHANGES.md b/CHANGES.md
index 7263832057..2e91f15ca0 100644
--- a/CHANGES.md
+++ b/CHANGES.md
@@ -1,3 +1,62 @@
+# Synapse 1.107.0rc1 (2024-05-07)
+
+### Features
+
+- Add preliminary support for [MSC3823: Account Suspension](https://github.com/matrix-org/matrix-spec-proposals/pull/3823). ([\#17051](https://github.com/element-hq/synapse/issues/17051))
+- Declare support for [Matrix v1.10](https://matrix.org/blog/2024/03/22/matrix-v1.10-release/). Contributed by @clokep. ([\#17082](https://github.com/element-hq/synapse/issues/17082))
+- Add support for MSC4115 (membership metadata on events). ([\#17104](https://github.com/element-hq/synapse/issues/17104), [\#17137](https://github.com/element-hq/synapse/issues/17137))
+
+### Bugfixes
+
+- Fixed search feature of Element Android on homesevers using SQLite by returning search terms as search highlights. ([\#17000](https://github.com/element-hq/synapse/issues/17000))
+- Fixes a bug introduced in v1.52.0 where the `destination` query parameter for the [Destination Rooms Admin API](https://element-hq.github.io/synapse/v1.105/usage/administration/admin_api/federation.html#destination-rooms) failed to actually filter returned rooms. ([\#17077](https://github.com/element-hq/synapse/issues/17077))
+- For MSC3266 room summaries, support queries at the recommended endpoint of `/_matrix/client/unstable/im.nheko.summary/summary/{roomIdOrAlias}`. The existing endpoint of `/_matrix/client/unstable/im.nheko.summary/rooms/{roomIdOrAlias}/summary` is deprecated. ([\#17078](https://github.com/element-hq/synapse/issues/17078))
+- Apply user email & picture during OIDC registration if present & selected. ([\#17120](https://github.com/element-hq/synapse/issues/17120))
+- Improve error message for cross signing reset with MSC3861 enabled. ([\#17121](https://github.com/element-hq/synapse/issues/17121))
+- Fix a bug which meant that to-device messages received over federation could be dropped when the server was under load or networking problems caused problems between Synapse processes or the database. ([\#17127](https://github.com/element-hq/synapse/issues/17127))
+- Fix bug where `StreamChangeCache` would not respect configured cache factors. ([\#17152](https://github.com/element-hq/synapse/issues/17152))
+
+### Updates to the Docker image
+
+- Correct licensing metadata on Docker image. ([\#17141](https://github.com/element-hq/synapse/issues/17141))
+
+### Improved Documentation
+
+- Update the `event_cache_size` and `global_factor` configuration options' documentation. ([\#17071](https://github.com/element-hq/synapse/issues/17071))
+- Remove broken sphinx docs. ([\#17073](https://github.com/element-hq/synapse/issues/17073), [\#17148](https://github.com/element-hq/synapse/issues/17148))
+- Add RuntimeDirectory to example matrix-synapse.service systemd unit. ([\#17084](https://github.com/element-hq/synapse/issues/17084))
+- Fix various small typos throughout the docs. ([\#17114](https://github.com/element-hq/synapse/issues/17114))
+- Update enable_notifs configuration documentation. ([\#17116](https://github.com/element-hq/synapse/issues/17116))
+- Update the upgrade.md with the latest minimum supported Rust version of 1.66.0. Contributed by @jahway603. ([\#17140](https://github.com/element-hq/synapse/issues/17140))
+
+### Internal Changes
+
+- Enable MSC3266 by default in the Synapse Complement image. ([\#17105](https://github.com/element-hq/synapse/issues/17105))
+- Add optimisation to `StreamChangeCache.get_entities_changed(..)`. ([\#17130](https://github.com/element-hq/synapse/issues/17130))
+- Update `tornado` Python dependency from 6.2 to 6.4. ([\#17131](https://github.com/element-hq/synapse/issues/17131))
+- Bump `pillow` from 10.2.0 to 10.3.0. ([\#17146](https://github.com/element-hq/synapse/issues/17146))
+
+
+
+### Updates to locked dependencies
+
+* Bump furo from 2024.1.29 to 2024.4.27. ([\#17133](https://github.com/element-hq/synapse/issues/17133))
+* Bump idna from 3.6 to 3.7. ([\#17136](https://github.com/element-hq/synapse/issues/17136))
+* Bump jsonschema from 4.21.1 to 4.22.0. ([\#17157](https://github.com/element-hq/synapse/issues/17157))
+* Bump lxml from 5.1.0 to 5.2.1. ([\#17158](https://github.com/element-hq/synapse/issues/17158))
+* Bump phonenumbers from 8.13.29 to 8.13.35. ([\#17106](https://github.com/element-hq/synapse/issues/17106))
+* Bump pydantic from 2.6.4 to 2.7.0. ([\#17107](https://github.com/element-hq/synapse/issues/17107))
+* Bump pydantic from 2.7.0 to 2.7.1. ([\#17160](https://github.com/element-hq/synapse/issues/17160))
+* Bump pyicu from 2.12 to 2.13. ([\#17109](https://github.com/element-hq/synapse/issues/17109))
+* Bump serde from 1.0.197 to 1.0.198. ([\#17111](https://github.com/element-hq/synapse/issues/17111))
+* Bump serde from 1.0.198 to 1.0.199. ([\#17132](https://github.com/element-hq/synapse/issues/17132))
+* Bump serde from 1.0.199 to 1.0.200. ([\#17161](https://github.com/element-hq/synapse/issues/17161))
+* Bump serde_json from 1.0.115 to 1.0.116. ([\#17112](https://github.com/element-hq/synapse/issues/17112))
+* Bump twisted from 23.10.0 to 24.3.0. ([\#17135](https://github.com/element-hq/synapse/issues/17135))
+* Bump types-bleach from 6.1.0.1 to 6.1.0.20240331. ([\#17110](https://github.com/element-hq/synapse/issues/17110))
+* Bump types-pillow from 10.2.0.20240415 to 10.2.0.20240423. ([\#17159](https://github.com/element-hq/synapse/issues/17159))
+* Bump types-setuptools from 69.0.0.20240125 to 69.5.0.20240423. ([\#17134](https://github.com/element-hq/synapse/issues/17134))
+
# Synapse 1.106.0 (2024-04-30)
No significant changes since 1.106.0rc1.
diff --git a/changelog.d/17000.bugfix b/changelog.d/17000.bugfix
deleted file mode 100644
index 86b21c9615..0000000000
--- a/changelog.d/17000.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fixed search feature of Element Android on homesevers using SQLite by returning search terms as search highlights.
\ No newline at end of file
diff --git a/changelog.d/17051.feature b/changelog.d/17051.feature
deleted file mode 100644
index 1c41f49f7d..0000000000
--- a/changelog.d/17051.feature
+++ /dev/null
@@ -1 +0,0 @@
-Add preliminary support for [MSC3823](https://github.com/matrix-org/matrix-spec-proposals/pull/3823) - Account Suspension.
\ No newline at end of file
diff --git a/changelog.d/17071.doc b/changelog.d/17071.doc
deleted file mode 100644
index 28773414d8..0000000000
--- a/changelog.d/17071.doc
+++ /dev/null
@@ -1 +0,0 @@
-Update event_cache_size and global_factor configurations documentation.
diff --git a/changelog.d/17073.doc b/changelog.d/17073.doc
deleted file mode 100644
index bc33887efe..0000000000
--- a/changelog.d/17073.doc
+++ /dev/null
@@ -1 +0,0 @@
-Remove broken sphinx docs.
diff --git a/changelog.d/17077.bugfix b/changelog.d/17077.bugfix
deleted file mode 100644
index 7d8ea37406..0000000000
--- a/changelog.d/17077.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fixes a bug introduced in v1.52.0 where the `destination` query parameter for the [Destination Rooms Admin API](https://element-hq.github.io/synapse/v1.105/usage/administration/admin_api/federation.html#destination-rooms) failed to actually filter returned rooms.
\ No newline at end of file
diff --git a/changelog.d/17078.bugfix b/changelog.d/17078.bugfix
deleted file mode 100644
index 286a772a1e..0000000000
--- a/changelog.d/17078.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-For MSC3266 room summaries, support queries at the recommended endpoint of `/_matrix/client/unstable/im.nheko.summary/summary/{roomIdOrAlias}`. The existing endpoint of `/_matrix/client/unstable/im.nheko.summary/rooms/{roomIdOrAlias}/summary` is deprecated.
diff --git a/changelog.d/17082.feature b/changelog.d/17082.feature
deleted file mode 100644
index e3990f44e7..0000000000
--- a/changelog.d/17082.feature
+++ /dev/null
@@ -1 +0,0 @@
-Declare support for [Matrix v1.10](https://matrix.org/blog/2024/03/22/matrix-v1.10-release/). Contributed by @clokep.
diff --git a/changelog.d/17084.doc b/changelog.d/17084.doc
deleted file mode 100644
index 8b97c81096..0000000000
--- a/changelog.d/17084.doc
+++ /dev/null
@@ -1 +0,0 @@
-Add RuntimeDirectory to example matrix-synapse.service systemd unit.
diff --git a/changelog.d/17104.feature b/changelog.d/17104.feature
deleted file mode 100644
index 1c2355e155..0000000000
--- a/changelog.d/17104.feature
+++ /dev/null
@@ -1 +0,0 @@
-Add support for MSC4115 (membership metadata on events).
diff --git a/changelog.d/17105.misc b/changelog.d/17105.misc
deleted file mode 100644
index d4443b89cf..0000000000
--- a/changelog.d/17105.misc
+++ /dev/null
@@ -1 +0,0 @@
-Enabled MSC3266 by default in the synapse complement image.
diff --git a/changelog.d/17114.doc b/changelog.d/17114.doc
deleted file mode 100644
index 042bd89618..0000000000
--- a/changelog.d/17114.doc
+++ /dev/null
@@ -1 +0,0 @@
-Fix various small typos throughout the docs.
diff --git a/changelog.d/17116.doc b/changelog.d/17116.doc
deleted file mode 100644
index 8712737c05..0000000000
--- a/changelog.d/17116.doc
+++ /dev/null
@@ -1 +0,0 @@
-Update enable_notifs configuration documentation.
diff --git a/changelog.d/17120.bugfix b/changelog.d/17120.bugfix
deleted file mode 100644
index 85b34c2e98..0000000000
--- a/changelog.d/17120.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Apply user email & picture during OIDC registration if present & selected.
diff --git a/changelog.d/17121.bugfix b/changelog.d/17121.bugfix
deleted file mode 100644
index f160839aac..0000000000
--- a/changelog.d/17121.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Improve error message for cross signing reset with MSC3861 enabled.
diff --git a/changelog.d/17127.bugfix b/changelog.d/17127.bugfix
deleted file mode 100644
index 93c7314098..0000000000
--- a/changelog.d/17127.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix a bug which meant that to-device messages received over federation could be dropped when the server was under load or networking problems caused problems between Synapse processes or the database.
diff --git a/changelog.d/17130.misc b/changelog.d/17130.misc
deleted file mode 100644
index ac20c90bde..0000000000
--- a/changelog.d/17130.misc
+++ /dev/null
@@ -1 +0,0 @@
-Add optimisation to `StreamChangeCache.get_entities_changed(..)`.
diff --git a/changelog.d/17131.misc b/changelog.d/17131.misc
deleted file mode 100644
index fe1ecc7688..0000000000
--- a/changelog.d/17131.misc
+++ /dev/null
@@ -1 +0,0 @@
-Update `tornado` Python dependency from 6.2 to 6.4.
\ No newline at end of file
diff --git a/changelog.d/17137.feature b/changelog.d/17137.feature
deleted file mode 100644
index 1c2355e155..0000000000
--- a/changelog.d/17137.feature
+++ /dev/null
@@ -1 +0,0 @@
-Add support for MSC4115 (membership metadata on events).
diff --git a/changelog.d/17140.doc b/changelog.d/17140.doc
deleted file mode 100644
index 2d447ed928..0000000000
--- a/changelog.d/17140.doc
+++ /dev/null
@@ -1 +0,0 @@
-Update the upgrade.md with the latest minimum supported Rust version of 1.66.0. Contributed by @jahway603.
diff --git a/changelog.d/17141.docker b/changelog.d/17141.docker
deleted file mode 100644
index 20c30746df..0000000000
--- a/changelog.d/17141.docker
+++ /dev/null
@@ -1 +0,0 @@
-Correct licensing metadata on Docker image.
diff --git a/changelog.d/17146.misc b/changelog.d/17146.misc
deleted file mode 100644
index dc7f9faa83..0000000000
--- a/changelog.d/17146.misc
+++ /dev/null
@@ -1 +0,0 @@
-Bump `pillow` from 10.2.0 to 10.3.0.
\ No newline at end of file
diff --git a/changelog.d/17148.doc b/changelog.d/17148.doc
deleted file mode 100644
index bc33887efe..0000000000
--- a/changelog.d/17148.doc
+++ /dev/null
@@ -1 +0,0 @@
-Remove broken sphinx docs.
diff --git a/changelog.d/17152.bugfix b/changelog.d/17152.bugfix
deleted file mode 100644
index 67aee91672..0000000000
--- a/changelog.d/17152.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix bug where `StreamChangeCache` would not respect configured cache factors.
diff --git a/debian/changelog b/debian/changelog
index 06d682e722..06b61b5d1b 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -1,3 +1,9 @@
+matrix-synapse-py3 (1.107.0~rc1) stable; urgency=medium
+
+ * New Synapse release 1.107.0rc1.
+
+ -- Synapse Packaging team Tue, 07 May 2024 16:26:26 +0100
+
matrix-synapse-py3 (1.106.0) stable; urgency=medium
* New Synapse release 1.106.0.
diff --git a/pyproject.toml b/pyproject.toml
index 9ad674b603..48aac66f63 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -96,7 +96,7 @@ module-name = "synapse.synapse_rust"
[tool.poetry]
name = "matrix-synapse"
-version = "1.106.0"
+version = "1.107.0rc1"
description = "Homeserver for the Matrix decentralised comms protocol"
authors = ["Matrix.org Team and Contributors "]
license = "AGPL-3.0-or-later"
From 522a40c4dedfce543c73a045efe09509a2a3ddad Mon Sep 17 00:00:00 2001
From: Olivier 'reivilibre
Date: Tue, 7 May 2024 17:25:47 +0100
Subject: [PATCH 068/503] Tweak changelog
---
CHANGES.md | 12 ++++++------
1 file changed, 6 insertions(+), 6 deletions(-)
diff --git a/CHANGES.md b/CHANGES.md
index 2e91f15ca0..fec9581174 100644
--- a/CHANGES.md
+++ b/CHANGES.md
@@ -4,7 +4,7 @@
- Add preliminary support for [MSC3823: Account Suspension](https://github.com/matrix-org/matrix-spec-proposals/pull/3823). ([\#17051](https://github.com/element-hq/synapse/issues/17051))
- Declare support for [Matrix v1.10](https://matrix.org/blog/2024/03/22/matrix-v1.10-release/). Contributed by @clokep. ([\#17082](https://github.com/element-hq/synapse/issues/17082))
-- Add support for MSC4115 (membership metadata on events). ([\#17104](https://github.com/element-hq/synapse/issues/17104), [\#17137](https://github.com/element-hq/synapse/issues/17137))
+- Add support for [MSC4115: membership metadata on events](https://github.com/matrix-org/matrix-spec-proposals/pull/4115). ([\#17104](https://github.com/element-hq/synapse/issues/17104), [\#17137](https://github.com/element-hq/synapse/issues/17137))
### Bugfixes
@@ -12,7 +12,7 @@
- Fixes a bug introduced in v1.52.0 where the `destination` query parameter for the [Destination Rooms Admin API](https://element-hq.github.io/synapse/v1.105/usage/administration/admin_api/federation.html#destination-rooms) failed to actually filter returned rooms. ([\#17077](https://github.com/element-hq/synapse/issues/17077))
- For MSC3266 room summaries, support queries at the recommended endpoint of `/_matrix/client/unstable/im.nheko.summary/summary/{roomIdOrAlias}`. The existing endpoint of `/_matrix/client/unstable/im.nheko.summary/rooms/{roomIdOrAlias}/summary` is deprecated. ([\#17078](https://github.com/element-hq/synapse/issues/17078))
- Apply user email & picture during OIDC registration if present & selected. ([\#17120](https://github.com/element-hq/synapse/issues/17120))
-- Improve error message for cross signing reset with MSC3861 enabled. ([\#17121](https://github.com/element-hq/synapse/issues/17121))
+- Improve error message for cross signing reset with [MSC3861](https://github.com/matrix-org/matrix-spec-proposals/pull/3861) enabled. ([\#17121](https://github.com/element-hq/synapse/issues/17121))
- Fix a bug which meant that to-device messages received over federation could be dropped when the server was under load or networking problems caused problems between Synapse processes or the database. ([\#17127](https://github.com/element-hq/synapse/issues/17127))
- Fix bug where `StreamChangeCache` would not respect configured cache factors. ([\#17152](https://github.com/element-hq/synapse/issues/17152))
@@ -27,14 +27,12 @@
- Add RuntimeDirectory to example matrix-synapse.service systemd unit. ([\#17084](https://github.com/element-hq/synapse/issues/17084))
- Fix various small typos throughout the docs. ([\#17114](https://github.com/element-hq/synapse/issues/17114))
- Update enable_notifs configuration documentation. ([\#17116](https://github.com/element-hq/synapse/issues/17116))
-- Update the upgrade.md with the latest minimum supported Rust version of 1.66.0. Contributed by @jahway603. ([\#17140](https://github.com/element-hq/synapse/issues/17140))
+- Update the Upgrade Notes with the latest minimum supported Rust version of 1.66.0. Contributed by @jahway603. ([\#17140](https://github.com/element-hq/synapse/issues/17140))
### Internal Changes
-- Enable MSC3266 by default in the Synapse Complement image. ([\#17105](https://github.com/element-hq/synapse/issues/17105))
+- Enable [MSC3266](https://github.com/matrix-org/matrix-spec-proposals/pull/3266) by default in the Synapse Complement image. ([\#17105](https://github.com/element-hq/synapse/issues/17105))
- Add optimisation to `StreamChangeCache.get_entities_changed(..)`. ([\#17130](https://github.com/element-hq/synapse/issues/17130))
-- Update `tornado` Python dependency from 6.2 to 6.4. ([\#17131](https://github.com/element-hq/synapse/issues/17131))
-- Bump `pillow` from 10.2.0 to 10.3.0. ([\#17146](https://github.com/element-hq/synapse/issues/17146))
@@ -45,6 +43,7 @@
* Bump jsonschema from 4.21.1 to 4.22.0. ([\#17157](https://github.com/element-hq/synapse/issues/17157))
* Bump lxml from 5.1.0 to 5.2.1. ([\#17158](https://github.com/element-hq/synapse/issues/17158))
* Bump phonenumbers from 8.13.29 to 8.13.35. ([\#17106](https://github.com/element-hq/synapse/issues/17106))
+- Bump pillow from 10.2.0 to 10.3.0. ([\#17146](https://github.com/element-hq/synapse/issues/17146))
* Bump pydantic from 2.6.4 to 2.7.0. ([\#17107](https://github.com/element-hq/synapse/issues/17107))
* Bump pydantic from 2.7.0 to 2.7.1. ([\#17160](https://github.com/element-hq/synapse/issues/17160))
* Bump pyicu from 2.12 to 2.13. ([\#17109](https://github.com/element-hq/synapse/issues/17109))
@@ -52,6 +51,7 @@
* Bump serde from 1.0.198 to 1.0.199. ([\#17132](https://github.com/element-hq/synapse/issues/17132))
* Bump serde from 1.0.199 to 1.0.200. ([\#17161](https://github.com/element-hq/synapse/issues/17161))
* Bump serde_json from 1.0.115 to 1.0.116. ([\#17112](https://github.com/element-hq/synapse/issues/17112))
+- Update `tornado` Python dependency from 6.2 to 6.4. ([\#17131](https://github.com/element-hq/synapse/issues/17131))
* Bump twisted from 23.10.0 to 24.3.0. ([\#17135](https://github.com/element-hq/synapse/issues/17135))
* Bump types-bleach from 6.1.0.1 to 6.1.0.20240331. ([\#17110](https://github.com/element-hq/synapse/issues/17110))
* Bump types-pillow from 10.2.0.20240415 to 10.2.0.20240423. ([\#17159](https://github.com/element-hq/synapse/issues/17159))
From 1b155362cac535ace799df786ace3c53e816d042 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Jacob=20S=C3=A1nchez?=
Date: Tue, 7 May 2024 16:38:29 +0000
Subject: [PATCH 069/503] Add note about external_ids for User Admin API in
documentation (#17139)
---
changelog.d/17139.doc | 1 +
docs/admin_api/user_admin_api.md | 4 ++--
2 files changed, 3 insertions(+), 2 deletions(-)
create mode 100644 changelog.d/17139.doc
diff --git a/changelog.d/17139.doc b/changelog.d/17139.doc
new file mode 100644
index 0000000000..a6d5408cac
--- /dev/null
+++ b/changelog.d/17139.doc
@@ -0,0 +1 @@
+Update User Admin API with note about prefixing OIDC external_id providers.
diff --git a/docs/admin_api/user_admin_api.md b/docs/admin_api/user_admin_api.md
index 9736fe3021..2281385830 100644
--- a/docs/admin_api/user_admin_api.md
+++ b/docs/admin_api/user_admin_api.md
@@ -141,8 +141,8 @@ Body parameters:
provider for SSO (Single sign-on). More details are in the configuration manual under the
sections [sso](../usage/configuration/config_documentation.md#sso) and [oidc_providers](../usage/configuration/config_documentation.md#oidc_providers).
- `auth_provider` - **string**, required. The unique, internal ID of the external identity provider.
- The same as `idp_id` from the homeserver configuration. Note that no error is raised if the
- provided value is not in the homeserver configuration.
+ The same as `idp_id` from the homeserver configuration. If using OIDC, this value should be prefixed
+ with `oidc-`. Note that no error is raised if the provided value is not in the homeserver configuration.
- `external_id` - **string**, required. An identifier for the user in the external identity provider.
When the user logs in to the identity provider, this must be the unique ID that they map to.
- `admin` - **bool**, optional, defaults to `false`. Whether the user is a homeserver administrator,
From 212f150208b4d94d597653282b64ff02680e3a28 Mon Sep 17 00:00:00 2001
From: Hugh Nimmo-Smith
Date: Wed, 8 May 2024 12:49:32 +0100
Subject: [PATCH 070/503] Add note about MSC3886 being closed (#17151)
---
changelog.d/17151.misc | 1 +
synapse/rest/client/rendezvous.py | 3 +++
2 files changed, 4 insertions(+)
create mode 100644 changelog.d/17151.misc
diff --git a/changelog.d/17151.misc b/changelog.d/17151.misc
new file mode 100644
index 0000000000..7b23c1e18e
--- /dev/null
+++ b/changelog.d/17151.misc
@@ -0,0 +1 @@
+Add note to reflect that [MSC3886](https://github.com/matrix-org/matrix-spec-proposals/pull/3886) is closed but will support will remain for some time.
diff --git a/synapse/rest/client/rendezvous.py b/synapse/rest/client/rendezvous.py
index 143f057651..27bf53314a 100644
--- a/synapse/rest/client/rendezvous.py
+++ b/synapse/rest/client/rendezvous.py
@@ -34,6 +34,9 @@ if TYPE_CHECKING:
logger = logging.getLogger(__name__)
+# n.b [MSC3886](https://github.com/matrix-org/matrix-spec-proposals/pull/3886) has now been closed.
+# However, we want to keep this implementation around for some time.
+# TODO: define an end-of-life date for this implementation.
class MSC3886RendezvousServlet(RestServlet):
"""
This is a placeholder implementation of [MSC3886](https://github.com/matrix-org/matrix-spec-proposals/pull/3886)
From 4d408cb4dd3d433c7cf8fbcfd88c4da87af7c70d Mon Sep 17 00:00:00 2001
From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com>
Date: Wed, 8 May 2024 13:05:10 +0100
Subject: [PATCH 071/503] Note preset behaviour in
`autocreate_auto_join_room_preset` docs (#17150)
---
changelog.d/17150.doc | 1 +
docs/usage/configuration/config_documentation.md | 5 +++++
2 files changed, 6 insertions(+)
create mode 100644 changelog.d/17150.doc
diff --git a/changelog.d/17150.doc b/changelog.d/17150.doc
new file mode 100644
index 0000000000..109f192818
--- /dev/null
+++ b/changelog.d/17150.doc
@@ -0,0 +1 @@
+Clarify the state of the created room when using the `autocreate_auto_join_room_preset` config option.
\ No newline at end of file
diff --git a/docs/usage/configuration/config_documentation.md b/docs/usage/configuration/config_documentation.md
index 0c582d0387..2046bf4564 100644
--- a/docs/usage/configuration/config_documentation.md
+++ b/docs/usage/configuration/config_documentation.md
@@ -2591,6 +2591,11 @@ Possible values for this option are:
* "trusted_private_chat": an invitation is required to join this room and the invitee is
assigned a power level of 100 upon joining the room.
+Each preset will set up a room in the same manner as if it were provided as the `preset` parameter when
+calling the
+[`POST /_matrix/client/v3/createRoom`](https://spec.matrix.org/latest/client-server-api/#post_matrixclientv3createroom)
+Client-Server API endpoint.
+
If a value of "private_chat" or "trusted_private_chat" is used then
`auto_join_mxid_localpart` must also be configured.
From 414ddcd45722be8a4a3f70d4b52c3b81be79118f Mon Sep 17 00:00:00 2001
From: Erik Johnston
Date: Wed, 8 May 2024 14:30:06 +0100
Subject: [PATCH 072/503] Update PyO3 to 0.21 (#17162)
This version change requires a migration to a new API. See
https://pyo3.rs/v0.21.2/migration#from-020-to-021
This will fix the annoying warnings added when using the recent rust
nightly:
> warning: non-local `impl` definition, they should be avoided as they
go against expectation
---
Cargo.lock | 203 +++++++++++++++------------
changelog.d/17162.misc | 1 +
rust/Cargo.toml | 6 +-
rust/src/acl/mod.rs | 14 +-
rust/src/events/internal_metadata.rs | 22 +--
rust/src/events/mod.rs | 13 +-
rust/src/http.rs | 35 +++--
rust/src/lib.rs | 2 +-
rust/src/push/mod.rs | 22 +--
rust/src/rendezvous/mod.rs | 38 +++--
10 files changed, 203 insertions(+), 153 deletions(-)
create mode 100644 changelog.d/17162.misc
diff --git a/Cargo.lock b/Cargo.lock
index ceda2789e2..59d43ece2d 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -4,30 +4,30 @@ version = 3
[[package]]
name = "aho-corasick"
-version = "1.0.2"
+version = "1.1.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "43f6cb1bf222025340178f382c426f13757b2960e89779dfcb319c32542a5a41"
+checksum = "8e60d3430d3a69478ad0993f19238d2df97c507009a52b3c10addcd7f6bcb916"
dependencies = [
"memchr",
]
[[package]]
name = "anyhow"
-version = "1.0.82"
+version = "1.0.83"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f538837af36e6f6a9be0faa67f9a314f8119e4e4b5867c6ab40ed60360142519"
+checksum = "25bdb32cbbdce2b519a9cd7df3a678443100e265d5e25ca763b7572a5104f5f3"
[[package]]
name = "arc-swap"
-version = "1.5.1"
+version = "1.7.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "983cd8b9d4b02a6dc6ffa557262eb5858a27a0038ffffe21a0f133eaa819a164"
+checksum = "69f7f8c3906b62b754cd5326047894316021dcfe5a194c8ea52bdd94934a3457"
[[package]]
name = "autocfg"
-version = "1.1.0"
+version = "1.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa"
+checksum = "0c4b4d0bd25bd0b74681c0ad21497610ce1b7c91b1022cd21c80c6fbdd9476b0"
[[package]]
name = "base64"
@@ -37,9 +37,9 @@ checksum = "9d297deb1925b89f2ccc13d7635fa0714f12c87adce1c75356b39ca9b7178567"
[[package]]
name = "bitflags"
-version = "1.3.2"
+version = "2.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a"
+checksum = "cf4b9d6a944f767f8e5e0db018570623c85f3d925ac718db4e06d0187adb21c1"
[[package]]
name = "blake2"
@@ -52,9 +52,9 @@ dependencies = [
[[package]]
name = "block-buffer"
-version = "0.10.3"
+version = "0.10.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "69cce20737498f97b993470a6e536b8523f0af7892a4f928cceb1ac5e52ebe7e"
+checksum = "3078c7629b62d3f0439517fa394996acacc5cbc91c5a20d8c658e77abd503a71"
dependencies = [
"generic-array",
]
@@ -115,9 +115,9 @@ checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1"
[[package]]
name = "generic-array"
-version = "0.14.6"
+version = "0.14.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "bff49e947297f3312447abdca79f45f4738097cc82b06e72054d2223f601f1b9"
+checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a"
dependencies = [
"typenum",
"version_check",
@@ -125,9 +125,9 @@ dependencies = [
[[package]]
name = "getrandom"
-version = "0.2.14"
+version = "0.2.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "94b22e06ecb0110981051723910cbf0b5f5e09a2062dd7663334ee79a9d1286c"
+checksum = "c4567c8db10ae91089c99af84c68c38da3ec2f087c3f82960bcdbf3656b6f4d7"
dependencies = [
"cfg-if",
"js-sys",
@@ -191,15 +191,15 @@ checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9"
[[package]]
name = "indoc"
-version = "2.0.4"
+version = "2.0.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1e186cfbae8084e513daff4240b4797e342f988cecda4fb6c939150f96315fd8"
+checksum = "b248f5224d1d606005e02c97f5aa4e88eeb230488bcc03bc9ca4d7991399f2b5"
[[package]]
name = "itoa"
-version = "1.0.4"
+version = "1.0.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "4217ad341ebadf8d8e724e264f13e593e0648f5b3e94b3896a5df283be015ecc"
+checksum = "49f1f14873335454500d59611f1cf4a4b0f786f9ac11f4312a78e4cf2566695b"
[[package]]
name = "js-sys"
@@ -218,15 +218,15 @@ checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646"
[[package]]
name = "libc"
-version = "0.2.153"
+version = "0.2.154"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9c198f91728a82281a64e1f4f9eeb25d82cb32a5de251c6bd1b5154d63a8e7bd"
+checksum = "ae743338b92ff9146ce83992f766a31066a91a8c84a45e0e9f21e7cf6de6d346"
[[package]]
name = "lock_api"
-version = "0.4.9"
+version = "0.4.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "435011366fe56583b16cf956f9df0095b405b82d76425bc8981c0e22e60ec4df"
+checksum = "07af8b9cdd281b7915f413fa73f29ebd5d55d0d3f0155584dade1ff18cea1b17"
dependencies = [
"autocfg",
"scopeguard",
@@ -240,15 +240,15 @@ checksum = "90ed8c1e510134f979dbc4f070f87d4313098b704861a105fe34231c70a3901c"
[[package]]
name = "memchr"
-version = "2.6.3"
+version = "2.7.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8f232d6ef707e1956a43342693d2a31e72989554d58299d7a88738cc95b0d35c"
+checksum = "6c8640c5d730cb13ebd907d8d04b52f55ac9a2eec55b440c8892f40d56c76c1d"
[[package]]
name = "memoffset"
-version = "0.9.0"
+version = "0.9.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5a634b1c61a95585bd15607c6ab0c4e5b226e695ff2800ba0cdccddf208c406c"
+checksum = "488016bfae457b036d996092f6cb448677611ce4449e970ceaf42695203f218a"
dependencies = [
"autocfg",
]
@@ -261,15 +261,15 @@ checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a"
[[package]]
name = "once_cell"
-version = "1.15.0"
+version = "1.19.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e82dad04139b71a90c080c8463fe0dc7902db5192d939bd0950f074d014339e1"
+checksum = "3fdb12b2476b595f9358c5161aa467c2438859caa136dec86c26fdd2efe17b92"
[[package]]
name = "parking_lot"
-version = "0.12.1"
+version = "0.12.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "3742b2c103b9f06bc9fff0a37ff4912935851bee6d36f3c02bcc755bcfec228f"
+checksum = "7e4af0ca4f6caed20e900d564c242b8e5d4903fdacf31d3daf527b66fe6f42fb"
dependencies = [
"lock_api",
"parking_lot_core",
@@ -277,15 +277,15 @@ dependencies = [
[[package]]
name = "parking_lot_core"
-version = "0.9.3"
+version = "0.9.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "09a279cbf25cb0757810394fbc1e359949b59e348145c643a939a525692e6929"
+checksum = "1e401f977ab385c9e4e3ab30627d6f26d00e2c73eef317493c4ec6d468726cf8"
dependencies = [
"cfg-if",
"libc",
"redox_syscall",
"smallvec",
- "windows-sys",
+ "windows-targets",
]
[[package]]
@@ -302,18 +302,18 @@ checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de"
[[package]]
name = "proc-macro2"
-version = "1.0.76"
+version = "1.0.82"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "95fc56cda0b5c3325f5fbbd7ff9fda9e02bb00bb3dac51252d2f1bfa1cb8cc8c"
+checksum = "8ad3d49ab951a01fbaafe34f2ec74122942fe18a3f9814c3268f1bb72042131b"
dependencies = [
"unicode-ident",
]
[[package]]
name = "pyo3"
-version = "0.20.3"
+version = "0.21.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "53bdbb96d49157e65d45cc287af5f32ffadd5f4761438b527b055fb0d4bb8233"
+checksum = "a5e00b96a521718e08e03b1a622f01c8a8deb50719335de3f60b3b3950f069d8"
dependencies = [
"anyhow",
"cfg-if",
@@ -330,9 +330,9 @@ dependencies = [
[[package]]
name = "pyo3-build-config"
-version = "0.20.3"
+version = "0.21.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "deaa5745de3f5231ce10517a1f5dd97d53e5a2fd77aa6b5842292085831d48d7"
+checksum = "7883df5835fafdad87c0d888b266c8ec0f4c9ca48a5bed6bbb592e8dedee1b50"
dependencies = [
"once_cell",
"target-lexicon",
@@ -340,9 +340,9 @@ dependencies = [
[[package]]
name = "pyo3-ffi"
-version = "0.20.3"
+version = "0.21.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "62b42531d03e08d4ef1f6e85a2ed422eb678b8cd62b762e53891c05faf0d4afa"
+checksum = "01be5843dc60b916ab4dad1dca6d20b9b4e6ddc8e15f50c47fe6d85f1fb97403"
dependencies = [
"libc",
"pyo3-build-config",
@@ -350,9 +350,9 @@ dependencies = [
[[package]]
name = "pyo3-log"
-version = "0.9.0"
+version = "0.10.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "4c10808ee7250403bedb24bc30c32493e93875fef7ba3e4292226fe924f398bd"
+checksum = "2af49834b8d2ecd555177e63b273b708dea75150abc6f5341d0a6e1a9623976c"
dependencies = [
"arc-swap",
"log",
@@ -361,9 +361,9 @@ dependencies = [
[[package]]
name = "pyo3-macros"
-version = "0.20.3"
+version = "0.21.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7305c720fa01b8055ec95e484a6eca7a83c841267f0dd5280f0c8b8551d2c158"
+checksum = "77b34069fc0682e11b31dbd10321cbf94808394c56fd996796ce45217dfac53c"
dependencies = [
"proc-macro2",
"pyo3-macros-backend",
@@ -373,9 +373,9 @@ dependencies = [
[[package]]
name = "pyo3-macros-backend"
-version = "0.20.3"
+version = "0.21.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7c7e9b68bb9c3149c5b0cade5d07f953d6d125eb4337723c4ccdb665f1f96185"
+checksum = "08260721f32db5e1a5beae69a55553f56b99bd0e1c3e6e0a5e8851a9d0f5a85c"
dependencies = [
"heck",
"proc-macro2",
@@ -386,9 +386,9 @@ dependencies = [
[[package]]
name = "pythonize"
-version = "0.20.0"
+version = "0.21.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ffd1c3ef39c725d63db5f9bc455461bafd80540cb7824c61afb823501921a850"
+checksum = "9d0664248812c38cc55a4ed07f88e4df516ce82604b93b1ffdc041aa77a6cb3c"
dependencies = [
"pyo3",
"serde",
@@ -396,9 +396,9 @@ dependencies = [
[[package]]
name = "quote"
-version = "1.0.35"
+version = "1.0.36"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "291ec9ab5efd934aaf503a6466c5d5251535d108ee747472c3977cc5acc868ef"
+checksum = "0fa76aaf39101c457836aec0ce2316dbdc3ab723cdda1c6bd4e6ad4208acaca7"
dependencies = [
"proc-macro2",
]
@@ -435,9 +435,9 @@ dependencies = [
[[package]]
name = "redox_syscall"
-version = "0.2.16"
+version = "0.5.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "fb5a58c1855b4b6819d59012155603f0b22ad30cad752600aadfcb695265519a"
+checksum = "469052894dcb553421e483e4209ee581a45100d31b4018de03e5a7ad86374a7e"
dependencies = [
"bitflags",
]
@@ -456,9 +456,9 @@ dependencies = [
[[package]]
name = "regex-automata"
-version = "0.4.4"
+version = "0.4.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "3b7fa1134405e2ec9353fd416b17f8dacd46c473d7d3fd1cf202706a14eb792a"
+checksum = "86b83b8b9847f9bf95ef68afb0b8e6cdb80f498442f5179a29fad448fcc1eaea"
dependencies = [
"aho-corasick",
"memchr",
@@ -467,21 +467,21 @@ dependencies = [
[[package]]
name = "regex-syntax"
-version = "0.8.2"
+version = "0.8.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c08c74e62047bb2de4ff487b251e4a92e24f48745648451635cec7d591162d9f"
+checksum = "adad44e29e4c806119491a7f06f03de4d1af22c3a680dd47f1e6e179439d1f56"
[[package]]
name = "ryu"
-version = "1.0.11"
+version = "1.0.18"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "4501abdff3ae82a1c1b477a17252eb69cee9e66eb915c1abaa4f44d873df9f09"
+checksum = "f3cb5ba0dc43242ce17de99c180e96db90b235b8a9fdc9543c96d2209116bd9f"
[[package]]
name = "scopeguard"
-version = "1.1.0"
+version = "1.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd"
+checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49"
[[package]]
name = "serde"
@@ -516,9 +516,9 @@ dependencies = [
[[package]]
name = "sha1"
-version = "0.10.5"
+version = "0.10.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f04293dc80c3993519f2d7f6f511707ee7094fe0c6d3406feb330cdb3540eba3"
+checksum = "e3bf829a2d51ab4a5ddf1352d8470c140cadc8301b2ae1789db023f01cedd6ba"
dependencies = [
"cfg-if",
"cpufeatures",
@@ -538,21 +538,21 @@ dependencies = [
[[package]]
name = "smallvec"
-version = "1.10.0"
+version = "1.13.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a507befe795404456341dfab10cef66ead4c041f62b8b11bbb92bffe5d0953e0"
+checksum = "3c5e1a9a646d36c3599cd173a41282daf47c44583ad367b8e6837255952e5c67"
[[package]]
name = "subtle"
-version = "2.4.1"
+version = "2.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "6bdef32e8150c2a081110b42772ffe7d7c9032b606bc226c8260fd97e0976601"
+checksum = "81cdd64d312baedb58e21336b31bc043b77e01cc99033ce76ef539f78e965ebc"
[[package]]
name = "syn"
-version = "2.0.48"
+version = "2.0.61"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0f3531638e407dfc0814761abb7c00a5b54992b849452a0646b7f65c9f770f3f"
+checksum = "c993ed8ccba56ae856363b1845da7266a7cb78e1d146c8a32d54b45a8b831fc9"
dependencies = [
"proc-macro2",
"quote",
@@ -585,15 +585,15 @@ dependencies = [
[[package]]
name = "target-lexicon"
-version = "0.12.4"
+version = "0.12.14"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c02424087780c9b71cc96799eaeddff35af2bc513278cda5c99fc1f5d026d3c1"
+checksum = "e1fc403891a21bcfb7c37834ba66a547a8f402146eba7265b5a6d88059c9ff2f"
[[package]]
name = "typenum"
-version = "1.15.0"
+version = "1.17.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "dcf81ac59edc17cc8697ff311e8f5ef2d99fcbd9817b34cec66f90b6c3dfd987"
+checksum = "42ff0bf0c66b8238c6f3b578df37d0b7848e55df8577b3f74f92a69acceeb825"
[[package]]
name = "ulid"
@@ -608,9 +608,9 @@ dependencies = [
[[package]]
name = "unicode-ident"
-version = "1.0.5"
+version = "1.0.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "6ceab39d59e4c9499d4e5a8ee0e2735b891bb7308ac83dfb4e80cad195c9f6f3"
+checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b"
[[package]]
name = "unindent"
@@ -695,44 +695,65 @@ dependencies = [
]
[[package]]
-name = "windows-sys"
-version = "0.36.1"
+name = "windows-targets"
+version = "0.52.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ea04155a16a59f9eab786fe12a4a450e75cdb175f9e0d80da1e17db09f55b8d2"
+checksum = "6f0713a46559409d202e70e28227288446bf7841d3211583a4b53e3f6d96e7eb"
dependencies = [
+ "windows_aarch64_gnullvm",
"windows_aarch64_msvc",
"windows_i686_gnu",
+ "windows_i686_gnullvm",
"windows_i686_msvc",
"windows_x86_64_gnu",
+ "windows_x86_64_gnullvm",
"windows_x86_64_msvc",
]
[[package]]
-name = "windows_aarch64_msvc"
-version = "0.36.1"
+name = "windows_aarch64_gnullvm"
+version = "0.52.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9bb8c3fd39ade2d67e9874ac4f3db21f0d710bee00fe7cab16949ec184eeaa47"
+checksum = "7088eed71e8b8dda258ecc8bac5fb1153c5cffaf2578fc8ff5d61e23578d3263"
+
+[[package]]
+name = "windows_aarch64_msvc"
+version = "0.52.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9985fd1504e250c615ca5f281c3f7a6da76213ebd5ccc9561496568a2752afb6"
[[package]]
name = "windows_i686_gnu"
-version = "0.36.1"
+version = "0.52.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "180e6ccf01daf4c426b846dfc66db1fc518f074baa793aa7d9b9aaeffad6a3b6"
+checksum = "88ba073cf16d5372720ec942a8ccbf61626074c6d4dd2e745299726ce8b89670"
+
+[[package]]
+name = "windows_i686_gnullvm"
+version = "0.52.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "87f4261229030a858f36b459e748ae97545d6f1ec60e5e0d6a3d32e0dc232ee9"
[[package]]
name = "windows_i686_msvc"
-version = "0.36.1"
+version = "0.52.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e2e7917148b2812d1eeafaeb22a97e4813dfa60a3f8f78ebe204bcc88f12f024"
+checksum = "db3c2bf3d13d5b658be73463284eaf12830ac9a26a90c717b7f771dfe97487bf"
[[package]]
name = "windows_x86_64_gnu"
-version = "0.36.1"
+version = "0.52.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "4dcd171b8776c41b97521e5da127a2d86ad280114807d0b2ab1e462bc764d9e1"
+checksum = "4e4246f76bdeff09eb48875a0fd3e2af6aada79d409d33011886d3e1581517d9"
+
+[[package]]
+name = "windows_x86_64_gnullvm"
+version = "0.52.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "852298e482cd67c356ddd9570386e2862b5673c85bd5f88df9ab6802b334c596"
[[package]]
name = "windows_x86_64_msvc"
-version = "0.36.1"
+version = "0.52.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c811ca4a8c853ef420abd8592ba53ddbbac90410fab6903b3e79972a631f7680"
+checksum = "bec47e5bfd1bff0eeaf6d8b485cc1074891a197ab4225d504cb7a1ab88b02bf0"
diff --git a/changelog.d/17162.misc b/changelog.d/17162.misc
new file mode 100644
index 0000000000..5cbc086e04
--- /dev/null
+++ b/changelog.d/17162.misc
@@ -0,0 +1 @@
+Update dependency PyO3 to 0.21.
diff --git a/rust/Cargo.toml b/rust/Cargo.toml
index d41a216d1c..026487275c 100644
--- a/rust/Cargo.toml
+++ b/rust/Cargo.toml
@@ -30,14 +30,14 @@ http = "1.1.0"
lazy_static = "1.4.0"
log = "0.4.17"
mime = "0.3.17"
-pyo3 = { version = "0.20.0", features = [
+pyo3 = { version = "0.21.0", features = [
"macros",
"anyhow",
"abi3",
"abi3-py38",
] }
-pyo3-log = "0.9.0"
-pythonize = "0.20.0"
+pyo3-log = "0.10.0"
+pythonize = "0.21.0"
regex = "1.6.0"
sha2 = "0.10.8"
serde = { version = "1.0.144", features = ["derive"] }
diff --git a/rust/src/acl/mod.rs b/rust/src/acl/mod.rs
index 286574fb49..982720ba90 100644
--- a/rust/src/acl/mod.rs
+++ b/rust/src/acl/mod.rs
@@ -25,21 +25,21 @@ use std::net::Ipv4Addr;
use std::str::FromStr;
use anyhow::Error;
-use pyo3::prelude::*;
+use pyo3::{prelude::*, pybacked::PyBackedStr};
use regex::Regex;
use crate::push::utils::{glob_to_regex, GlobMatchType};
/// Called when registering modules with python.
-pub fn register_module(py: Python<'_>, m: &PyModule) -> PyResult<()> {
- let child_module = PyModule::new(py, "acl")?;
+pub fn register_module(py: Python<'_>, m: &Bound<'_, PyModule>) -> PyResult<()> {
+ let child_module = PyModule::new_bound(py, "acl")?;
child_module.add_class::()?;
- m.add_submodule(child_module)?;
+ m.add_submodule(&child_module)?;
// We need to manually add the module to sys.modules to make `from
// synapse.synapse_rust import acl` work.
- py.import("sys")?
+ py.import_bound("sys")?
.getattr("modules")?
.set_item("synapse.synapse_rust.acl", child_module)?;
@@ -59,8 +59,8 @@ impl ServerAclEvaluator {
#[new]
pub fn py_new(
allow_ip_literals: bool,
- allow: Vec<&str>,
- deny: Vec<&str>,
+ allow: Vec,
+ deny: Vec,
) -> Result {
let allow = allow
.iter()
diff --git a/rust/src/events/internal_metadata.rs b/rust/src/events/internal_metadata.rs
index 53c7b1ba61..63774fbd54 100644
--- a/rust/src/events/internal_metadata.rs
+++ b/rust/src/events/internal_metadata.rs
@@ -38,9 +38,10 @@ use anyhow::Context;
use log::warn;
use pyo3::{
exceptions::PyAttributeError,
+ pybacked::PyBackedStr,
pyclass, pymethods,
- types::{PyDict, PyString},
- IntoPy, PyAny, PyObject, PyResult, Python,
+ types::{PyAnyMethods, PyDict, PyDictMethods, PyString},
+ Bound, IntoPy, PyAny, PyObject, PyResult, Python,
};
/// Definitions of the various fields of the internal metadata.
@@ -59,7 +60,7 @@ enum EventInternalMetadataData {
impl EventInternalMetadataData {
/// Convert the field to its name and python object.
- fn to_python_pair<'a>(&self, py: Python<'a>) -> (&'a PyString, PyObject) {
+ fn to_python_pair<'a>(&self, py: Python<'a>) -> (&'a Bound<'a, PyString>, PyObject) {
match self {
EventInternalMetadataData::OutOfBandMembership(o) => {
(pyo3::intern!(py, "out_of_band_membership"), o.into_py(py))
@@ -90,10 +91,13 @@ impl EventInternalMetadataData {
/// Converts from python key/values to the field.
///
/// Returns `None` if the key is a valid but unrecognized string.
- fn from_python_pair(key: &PyAny, value: &PyAny) -> PyResult> {
- let key_str: &str = key.extract()?;
+ fn from_python_pair(
+ key: &Bound<'_, PyAny>,
+ value: &Bound<'_, PyAny>,
+ ) -> PyResult > {
+ let key_str: PyBackedStr = key.extract()?;
- let e = match key_str {
+ let e = match &*key_str {
"out_of_band_membership" => EventInternalMetadataData::OutOfBandMembership(
value
.extract()
@@ -210,11 +214,11 @@ pub struct EventInternalMetadata {
#[pymethods]
impl EventInternalMetadata {
#[new]
- fn new(dict: &PyDict) -> PyResult {
+ fn new(dict: &Bound<'_, PyDict>) -> PyResult {
let mut data = Vec::with_capacity(dict.len());
for (key, value) in dict.iter() {
- match EventInternalMetadataData::from_python_pair(key, value) {
+ match EventInternalMetadataData::from_python_pair(&key, &value) {
Ok(Some(entry)) => data.push(entry),
Ok(None) => {}
Err(err) => {
@@ -240,7 +244,7 @@ impl EventInternalMetadata {
///
/// Note that `outlier` and `stream_ordering` are stored in separate columns so are not returned here.
fn get_dict(&self, py: Python<'_>) -> PyResult {
- let dict = PyDict::new(py);
+ let dict = PyDict::new_bound(py);
for entry in &self.data {
let (key, value) = entry.to_python_pair(py);
diff --git a/rust/src/events/mod.rs b/rust/src/events/mod.rs
index ee857b3d72..a4ade1a178 100644
--- a/rust/src/events/mod.rs
+++ b/rust/src/events/mod.rs
@@ -20,20 +20,23 @@
//! Classes for representing Events.
-use pyo3::{types::PyModule, PyResult, Python};
+use pyo3::{
+ types::{PyAnyMethods, PyModule, PyModuleMethods},
+ Bound, PyResult, Python,
+};
mod internal_metadata;
/// Called when registering modules with python.
-pub fn register_module(py: Python<'_>, m: &PyModule) -> PyResult<()> {
- let child_module = PyModule::new(py, "events")?;
+pub fn register_module(py: Python<'_>, m: &Bound<'_, PyModule>) -> PyResult<()> {
+ let child_module = PyModule::new_bound(py, "events")?;
child_module.add_class::()?;
- m.add_submodule(child_module)?;
+ m.add_submodule(&child_module)?;
// We need to manually add the module to sys.modules to make `from
// synapse.synapse_rust import events` work.
- py.import("sys")?
+ py.import_bound("sys")?
.getattr("modules")?
.set_item("synapse.synapse_rust.events", child_module)?;
diff --git a/rust/src/http.rs b/rust/src/http.rs
index 74098f4c8b..af052ab721 100644
--- a/rust/src/http.rs
+++ b/rust/src/http.rs
@@ -17,8 +17,8 @@ use headers::{Header, HeaderMapExt};
use http::{HeaderName, HeaderValue, Method, Request, Response, StatusCode, Uri};
use pyo3::{
exceptions::PyValueError,
- types::{PyBytes, PySequence, PyTuple},
- PyAny, PyResult,
+ types::{PyAnyMethods, PyBytes, PyBytesMethods, PySequence, PyTuple},
+ Bound, PyAny, PyResult,
};
use crate::errors::SynapseError;
@@ -28,10 +28,11 @@ use crate::errors::SynapseError;
/// # Errors
///
/// Returns an error if calling the `read` on the Python object failed
-fn read_io_body(body: &PyAny, chunk_size: usize) -> PyResult {
+fn read_io_body(body: &Bound<'_, PyAny>, chunk_size: usize) -> PyResult {
let mut buf = BytesMut::new();
loop {
- let bytes: &PyBytes = body.call_method1("read", (chunk_size,))?.downcast()?;
+ let bound = &body.call_method1("read", (chunk_size,))?;
+ let bytes: &Bound<'_, PyBytes> = bound.downcast()?;
if bytes.as_bytes().is_empty() {
return Ok(buf.into());
}
@@ -50,17 +51,19 @@ fn read_io_body(body: &PyAny, chunk_size: usize) -> PyResult {
/// # Errors
///
/// Returns an error if the Python object doesn't properly implement `IRequest`
-pub fn http_request_from_twisted(request: &PyAny) -> PyResult> {
+pub fn http_request_from_twisted(request: &Bound<'_, PyAny>) -> PyResult> {
let content = request.getattr("content")?;
- let body = read_io_body(content, 4096)?;
+ let body = read_io_body(&content, 4096)?;
let mut req = Request::new(body);
- let uri: &PyBytes = request.getattr("uri")?.downcast()?;
+ let bound = &request.getattr("uri")?;
+ let uri: &Bound<'_, PyBytes> = bound.downcast()?;
*req.uri_mut() =
Uri::try_from(uri.as_bytes()).map_err(|_| PyValueError::new_err("invalid uri"))?;
- let method: &PyBytes = request.getattr("method")?.downcast()?;
+ let bound = &request.getattr("method")?;
+ let method: &Bound<'_, PyBytes> = bound.downcast()?;
*req.method_mut() = Method::from_bytes(method.as_bytes())
.map_err(|_| PyValueError::new_err("invalid method"))?;
@@ -71,14 +74,17 @@ pub fn http_request_from_twisted(request: &PyAny) -> PyResult> {
for header in headers_iter {
let header = header?;
- let header: &PyTuple = header.downcast()?;
- let name: &PyBytes = header.get_item(0)?.downcast()?;
+ let header: &Bound<'_, PyTuple> = header.downcast()?;
+ let bound = &header.get_item(0)?;
+ let name: &Bound<'_, PyBytes> = bound.downcast()?;
let name = HeaderName::from_bytes(name.as_bytes())
.map_err(|_| PyValueError::new_err("invalid header name"))?;
- let values: &PySequence = header.get_item(1)?.downcast()?;
+ let bound = &header.get_item(1)?;
+ let values: &Bound<'_, PySequence> = bound.downcast()?;
for index in 0..values.len()? {
- let value: &PyBytes = values.get_item(index)?.downcast()?;
+ let bound = &values.get_item(index)?;
+ let value: &Bound<'_, PyBytes> = bound.downcast()?;
let value = HeaderValue::from_bytes(value.as_bytes())
.map_err(|_| PyValueError::new_err("invalid header value"))?;
req.headers_mut().append(name.clone(), value);
@@ -100,7 +106,10 @@ pub fn http_request_from_twisted(request: &PyAny) -> PyResult> {
/// # Errors
///
/// Returns an error if the Python object doesn't properly implement `IRequest`
-pub fn http_response_to_twisted(request: &PyAny, response: Response) -> PyResult<()>
+pub fn http_response_to_twisted(
+ request: &Bound<'_, PyAny>,
+ response: Response,
+) -> PyResult<()>
where
B: Buf,
{
diff --git a/rust/src/lib.rs b/rust/src/lib.rs
index 9bd1f17ad9..06477880b9 100644
--- a/rust/src/lib.rs
+++ b/rust/src/lib.rs
@@ -38,7 +38,7 @@ fn reset_logging_config() {
/// The entry point for defining the Python module.
#[pymodule]
-fn synapse_rust(py: Python<'_>, m: &PyModule) -> PyResult<()> {
+fn synapse_rust(py: Python<'_>, m: &Bound<'_, PyModule>) -> PyResult<()> {
m.add_function(wrap_pyfunction!(sum_as_string, m)?)?;
m.add_function(wrap_pyfunction!(get_rust_file_digest, m)?)?;
m.add_function(wrap_pyfunction!(reset_logging_config, m)?)?;
diff --git a/rust/src/push/mod.rs b/rust/src/push/mod.rs
index 7dedbf10b6..2a452b69a3 100644
--- a/rust/src/push/mod.rs
+++ b/rust/src/push/mod.rs
@@ -66,7 +66,7 @@ use log::warn;
use pyo3::exceptions::PyTypeError;
use pyo3::prelude::*;
use pyo3::types::{PyBool, PyList, PyLong, PyString};
-use pythonize::{depythonize, pythonize};
+use pythonize::{depythonize_bound, pythonize};
use serde::de::Error as _;
use serde::{Deserialize, Serialize};
use serde_json::Value;
@@ -78,19 +78,19 @@ pub mod evaluator;
pub mod utils;
/// Called when registering modules with python.
-pub fn register_module(py: Python<'_>, m: &PyModule) -> PyResult<()> {
- let child_module = PyModule::new(py, "push")?;
+pub fn register_module(py: Python<'_>, m: &Bound<'_, PyModule>) -> PyResult<()> {
+ let child_module = PyModule::new_bound(py, "push")?;
child_module.add_class::()?;
child_module.add_class::()?;
child_module.add_class::()?;
child_module.add_class::()?;
child_module.add_function(wrap_pyfunction!(get_base_rule_ids, m)?)?;
- m.add_submodule(child_module)?;
+ m.add_submodule(&child_module)?;
// We need to manually add the module to sys.modules to make `from
// synapse.synapse_rust import push` work.
- py.import("sys")?
+ py.import_bound("sys")?
.getattr("modules")?
.set_item("synapse.synapse_rust.push", child_module)?;
@@ -271,12 +271,12 @@ pub enum SimpleJsonValue {
impl<'source> FromPyObject<'source> for SimpleJsonValue {
fn extract(ob: &'source PyAny) -> PyResult {
- if let Ok(s) = ::try_from(ob) {
+ if let Ok(s) = ob.downcast::() {
Ok(SimpleJsonValue::Str(Cow::Owned(s.to_string())))
// A bool *is* an int, ensure we try bool first.
- } else if let Ok(b) = ::try_from(ob) {
+ } else if let Ok(b) = ob.downcast::() {
Ok(SimpleJsonValue::Bool(b.extract()?))
- } else if let Ok(i) = ::try_from(ob) {
+ } else if let Ok(i) = ob.downcast::() {
Ok(SimpleJsonValue::Int(i.extract()?))
} else if ob.is_none() {
Ok(SimpleJsonValue::Null)
@@ -299,7 +299,7 @@ pub enum JsonValue {
impl<'source> FromPyObject<'source> for JsonValue {
fn extract(ob: &'source PyAny) -> PyResult {
- if let Ok(l) = ::try_from(ob) {
+ if let Ok(l) = ob.downcast::() {
match l.iter().map(SimpleJsonValue::extract).collect() {
Ok(a) => Ok(JsonValue::Array(a)),
Err(e) => Err(PyTypeError::new_err(format!(
@@ -370,8 +370,8 @@ impl IntoPy for Condition {
}
impl<'source> FromPyObject<'source> for Condition {
- fn extract(ob: &'source PyAny) -> PyResult {
- Ok(depythonize(ob)?)
+ fn extract_bound(ob: &Bound<'source, PyAny>) -> PyResult {
+ Ok(depythonize_bound(ob.clone())?)
}
}
diff --git a/rust/src/rendezvous/mod.rs b/rust/src/rendezvous/mod.rs
index c0f5d8b600..f69f45490f 100644
--- a/rust/src/rendezvous/mod.rs
+++ b/rust/src/rendezvous/mod.rs
@@ -26,8 +26,10 @@ use headers::{
use http::{header::ETAG, HeaderMap, Response, StatusCode, Uri};
use mime::Mime;
use pyo3::{
- exceptions::PyValueError, pyclass, pymethods, types::PyModule, Py, PyAny, PyObject, PyResult,
- Python, ToPyObject,
+ exceptions::PyValueError,
+ pyclass, pymethods,
+ types::{PyAnyMethods, PyModule, PyModuleMethods},
+ Bound, Py, PyAny, PyObject, PyResult, Python, ToPyObject,
};
use ulid::Ulid;
@@ -109,7 +111,7 @@ impl RendezvousHandler {
#[pyo3(signature = (homeserver, /, capacity=100, max_content_length=4*1024, eviction_interval=60*1000, ttl=60*1000))]
fn new(
py: Python<'_>,
- homeserver: &PyAny,
+ homeserver: &Bound<'_, PyAny>,
capacity: usize,
max_content_length: u64,
eviction_interval: u64,
@@ -150,7 +152,7 @@ impl RendezvousHandler {
}
fn _evict(&mut self, py: Python<'_>) -> PyResult<()> {
- let clock = self.clock.as_ref(py);
+ let clock = self.clock.bind(py);
let now: u64 = clock.call_method0("time_msec")?.extract()?;
let now = SystemTime::UNIX_EPOCH + Duration::from_millis(now);
self.evict(now);
@@ -158,12 +160,12 @@ impl RendezvousHandler {
Ok(())
}
- fn handle_post(&mut self, py: Python<'_>, twisted_request: &PyAny) -> PyResult<()> {
+ fn handle_post(&mut self, py: Python<'_>, twisted_request: &Bound<'_, PyAny>) -> PyResult<()> {
let request = http_request_from_twisted(twisted_request)?;
let content_type = self.check_input_headers(request.headers())?;
- let clock = self.clock.as_ref(py);
+ let clock = self.clock.bind(py);
let now: u64 = clock.call_method0("time_msec")?.extract()?;
let now = SystemTime::UNIX_EPOCH + Duration::from_millis(now);
@@ -197,7 +199,12 @@ impl RendezvousHandler {
Ok(())
}
- fn handle_get(&mut self, py: Python<'_>, twisted_request: &PyAny, id: &str) -> PyResult<()> {
+ fn handle_get(
+ &mut self,
+ py: Python<'_>,
+ twisted_request: &Bound<'_, PyAny>,
+ id: &str,
+ ) -> PyResult<()> {
let request = http_request_from_twisted(twisted_request)?;
let if_none_match: Option = request.headers().typed_get_optional()?;
@@ -233,7 +240,12 @@ impl RendezvousHandler {
Ok(())
}
- fn handle_put(&mut self, py: Python<'_>, twisted_request: &PyAny, id: &str) -> PyResult<()> {
+ fn handle_put(
+ &mut self,
+ py: Python<'_>,
+ twisted_request: &Bound<'_, PyAny>,
+ id: &str,
+ ) -> PyResult<()> {
let request = http_request_from_twisted(twisted_request)?;
let content_type = self.check_input_headers(request.headers())?;
@@ -281,7 +293,7 @@ impl RendezvousHandler {
Ok(())
}
- fn handle_delete(&mut self, twisted_request: &PyAny, id: &str) -> PyResult<()> {
+ fn handle_delete(&mut self, twisted_request: &Bound<'_, PyAny>, id: &str) -> PyResult<()> {
let _request = http_request_from_twisted(twisted_request)?;
let id: Ulid = id.parse().map_err(|_| NotFoundError::new())?;
@@ -298,16 +310,16 @@ impl RendezvousHandler {
}
}
-pub fn register_module(py: Python<'_>, m: &PyModule) -> PyResult<()> {
- let child_module = PyModule::new(py, "rendezvous")?;
+pub fn register_module(py: Python<'_>, m: &Bound<'_, PyModule>) -> PyResult<()> {
+ let child_module = PyModule::new_bound(py, "rendezvous")?;
child_module.add_class::()?;
- m.add_submodule(child_module)?;
+ m.add_submodule(&child_module)?;
// We need to manually add the module to sys.modules to make `from
// synapse.synapse_rust import rendezvous` work.
- py.import("sys")?
+ py.import_bound("sys")?
.getattr("modules")?
.set_item("synapse.synapse_rust.rendezvous", child_module)?;
From 34a8652366c60d785e5c0510749e615d0dec9ec0 Mon Sep 17 00:00:00 2001
From: Timshel
Date: Wed, 8 May 2024 15:56:16 +0200
Subject: [PATCH 073/503] Optional whitespace support in Authorization (#1350)
(#17145)
Co-authored-by: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com>
---
changelog.d/17145.bugfix | 1 +
synapse/federation/transport/server/_base.py | 6 +++++-
tests/federation/transport/server/test__base.py | 7 +++++++
3 files changed, 13 insertions(+), 1 deletion(-)
create mode 100644 changelog.d/17145.bugfix
diff --git a/changelog.d/17145.bugfix b/changelog.d/17145.bugfix
new file mode 100644
index 0000000000..5c1e600f4e
--- /dev/null
+++ b/changelog.d/17145.bugfix
@@ -0,0 +1 @@
+Add support for optional whitespace around the Federation API's `Authorization` header's parameter commas.
diff --git a/synapse/federation/transport/server/_base.py b/synapse/federation/transport/server/_base.py
index 23d1254127..db0f5076a9 100644
--- a/synapse/federation/transport/server/_base.py
+++ b/synapse/federation/transport/server/_base.py
@@ -180,7 +180,11 @@ def _parse_auth_header(header_bytes: bytes) -> Tuple[str, str, str, Optional[str
"""
try:
header_str = header_bytes.decode("utf-8")
- params = re.split(" +", header_str)[1].split(",")
+ space_or_tab = "[ \t]"
+ params = re.split(
+ rf"{space_or_tab}*,{space_or_tab}*",
+ re.split(r"^X-Matrix +", header_str, maxsplit=1)[1],
+ )
param_dict: Dict[str, str] = {
k.lower(): v for k, v in [param.split("=", maxsplit=1) for param in params]
}
diff --git a/tests/federation/transport/server/test__base.py b/tests/federation/transport/server/test__base.py
index 065e85957e..0e3b41ec4d 100644
--- a/tests/federation/transport/server/test__base.py
+++ b/tests/federation/transport/server/test__base.py
@@ -147,3 +147,10 @@ class BaseFederationAuthorizationTests(unittest.TestCase):
),
("foo", "ed25519:1", "sig", "bar"),
)
+ # test that "optional whitespace(s)" (space and tabulation) are allowed between comma-separated auth-param components
+ self.assertEqual(
+ _parse_auth_header(
+ b'X-Matrix origin=foo , key="ed25519:1", sig="sig", destination="bar", extra_field=ignored'
+ ),
+ ("foo", "ed25519:1", "sig", "bar"),
+ )
From 393429d6928ce5cef6ced647567b7ec899d69ecc Mon Sep 17 00:00:00 2001
From: devonh
Date: Wed, 8 May 2024 14:57:32 +0000
Subject: [PATCH 074/503] Fix undiscovered linter errors (#17166)
Linter errors are showing up in #17147 that are unrelated to that PR.
The errors do not currently show up on develop.
This PR aims to resolve the linter errors separately from #17147.
---
changelog.d/17166.misc | 1 +
synapse/handlers/profile.py | 14 +++++++++++---
2 files changed, 12 insertions(+), 3 deletions(-)
create mode 100644 changelog.d/17166.misc
diff --git a/changelog.d/17166.misc b/changelog.d/17166.misc
new file mode 100644
index 0000000000..22c1f9922d
--- /dev/null
+++ b/changelog.d/17166.misc
@@ -0,0 +1 @@
+Fixes linter errors found in PR #17147.
diff --git a/synapse/handlers/profile.py b/synapse/handlers/profile.py
index e51e282a9f..6663d4b271 100644
--- a/synapse/handlers/profile.py
+++ b/synapse/handlers/profile.py
@@ -20,7 +20,7 @@
#
import logging
import random
-from typing import TYPE_CHECKING, Optional, Union
+from typing import TYPE_CHECKING, List, Optional, Union
from synapse.api.errors import (
AuthError,
@@ -64,8 +64,10 @@ class ProfileHandler:
self.user_directory_handler = hs.get_user_directory_handler()
self.request_ratelimiter = hs.get_request_ratelimiter()
- self.max_avatar_size = hs.config.server.max_avatar_size
- self.allowed_avatar_mimetypes = hs.config.server.allowed_avatar_mimetypes
+ self.max_avatar_size: Optional[int] = hs.config.server.max_avatar_size
+ self.allowed_avatar_mimetypes: Optional[List[str]] = (
+ hs.config.server.allowed_avatar_mimetypes
+ )
self._is_mine_server_name = hs.is_mine_server_name
@@ -337,6 +339,12 @@ class ProfileHandler:
return False
if self.max_avatar_size:
+ if media_info.media_length is None:
+ logger.warning(
+ "Forbidding avatar change to %s: unknown media size",
+ mxc,
+ )
+ return False
# Ensure avatar does not exceed max allowed avatar size
if media_info.media_length > self.max_avatar_size:
logger.warning(
From ef7e040e5439f99e72b397cb6065f3155b04b7b3 Mon Sep 17 00:00:00 2001
From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com>
Date: Wed, 8 May 2024 17:02:09 +0100
Subject: [PATCH 075/503] Bump black from 24.2.0 to 24.4.2 (#17170)
---
changelog.d/17170.misc | 1 +
poetry.lock | 57 +++++++++++++++++-------------------------
2 files changed, 24 insertions(+), 34 deletions(-)
create mode 100644 changelog.d/17170.misc
diff --git a/changelog.d/17170.misc b/changelog.d/17170.misc
new file mode 100644
index 0000000000..698a59deaa
--- /dev/null
+++ b/changelog.d/17170.misc
@@ -0,0 +1 @@
+Bump black from 24.2.0 to 24.4.2.
\ No newline at end of file
diff --git a/poetry.lock b/poetry.lock
index 502f45f8bb..ea38c69eb3 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -107,33 +107,33 @@ typecheck = ["mypy"]
[[package]]
name = "black"
-version = "24.2.0"
+version = "24.4.2"
description = "The uncompromising code formatter."
optional = false
python-versions = ">=3.8"
files = [
- {file = "black-24.2.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:6981eae48b3b33399c8757036c7f5d48a535b962a7c2310d19361edeef64ce29"},
- {file = "black-24.2.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:d533d5e3259720fdbc1b37444491b024003e012c5173f7d06825a77508085430"},
- {file = "black-24.2.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:61a0391772490ddfb8a693c067df1ef5227257e72b0e4108482b8d41b5aee13f"},
- {file = "black-24.2.0-cp310-cp310-win_amd64.whl", hash = "sha256:992e451b04667116680cb88f63449267c13e1ad134f30087dec8527242e9862a"},
- {file = "black-24.2.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:163baf4ef40e6897a2a9b83890e59141cc8c2a98f2dda5080dc15c00ee1e62cd"},
- {file = "black-24.2.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:e37c99f89929af50ffaf912454b3e3b47fd64109659026b678c091a4cd450fb2"},
- {file = "black-24.2.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4f9de21bafcba9683853f6c96c2d515e364aee631b178eaa5145fc1c61a3cc92"},
- {file = "black-24.2.0-cp311-cp311-win_amd64.whl", hash = "sha256:9db528bccb9e8e20c08e716b3b09c6bdd64da0dd129b11e160bf082d4642ac23"},
- {file = "black-24.2.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:d84f29eb3ee44859052073b7636533ec995bd0f64e2fb43aeceefc70090e752b"},
- {file = "black-24.2.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:1e08fb9a15c914b81dd734ddd7fb10513016e5ce7e6704bdd5e1251ceee51ac9"},
- {file = "black-24.2.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:810d445ae6069ce64030c78ff6127cd9cd178a9ac3361435708b907d8a04c693"},
- {file = "black-24.2.0-cp312-cp312-win_amd64.whl", hash = "sha256:ba15742a13de85e9b8f3239c8f807723991fbfae24bad92d34a2b12e81904982"},
- {file = "black-24.2.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:7e53a8c630f71db01b28cd9602a1ada68c937cbf2c333e6ed041390d6968faf4"},
- {file = "black-24.2.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:93601c2deb321b4bad8f95df408e3fb3943d85012dddb6121336b8e24a0d1218"},
- {file = "black-24.2.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a0057f800de6acc4407fe75bb147b0c2b5cbb7c3ed110d3e5999cd01184d53b0"},
- {file = "black-24.2.0-cp38-cp38-win_amd64.whl", hash = "sha256:faf2ee02e6612577ba0181f4347bcbcf591eb122f7841ae5ba233d12c39dcb4d"},
- {file = "black-24.2.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:057c3dc602eaa6fdc451069bd027a1b2635028b575a6c3acfd63193ced20d9c8"},
- {file = "black-24.2.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:08654d0797e65f2423f850fc8e16a0ce50925f9337fb4a4a176a7aa4026e63f8"},
- {file = "black-24.2.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ca610d29415ee1a30a3f30fab7a8f4144e9d34c89a235d81292a1edb2b55f540"},
- {file = "black-24.2.0-cp39-cp39-win_amd64.whl", hash = "sha256:4dd76e9468d5536abd40ffbc7a247f83b2324f0c050556d9c371c2b9a9a95e31"},
- {file = "black-24.2.0-py3-none-any.whl", hash = "sha256:e8a6ae970537e67830776488bca52000eaa37fa63b9988e8c487458d9cd5ace6"},
- {file = "black-24.2.0.tar.gz", hash = "sha256:bce4f25c27c3435e4dace4815bcb2008b87e167e3bf4ee47ccdc5ce906eb4894"},
+ {file = "black-24.4.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:dd1b5a14e417189db4c7b64a6540f31730713d173f0b63e55fabd52d61d8fdce"},
+ {file = "black-24.4.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8e537d281831ad0e71007dcdcbe50a71470b978c453fa41ce77186bbe0ed6021"},
+ {file = "black-24.4.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eaea3008c281f1038edb473c1aa8ed8143a5535ff18f978a318f10302b254063"},
+ {file = "black-24.4.2-cp310-cp310-win_amd64.whl", hash = "sha256:7768a0dbf16a39aa5e9a3ded568bb545c8c2727396d063bbaf847df05b08cd96"},
+ {file = "black-24.4.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:257d724c2c9b1660f353b36c802ccece186a30accc7742c176d29c146df6e474"},
+ {file = "black-24.4.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:bdde6f877a18f24844e381d45e9947a49e97933573ac9d4345399be37621e26c"},
+ {file = "black-24.4.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e151054aa00bad1f4e1f04919542885f89f5f7d086b8a59e5000e6c616896ffb"},
+ {file = "black-24.4.2-cp311-cp311-win_amd64.whl", hash = "sha256:7e122b1c4fb252fd85df3ca93578732b4749d9be076593076ef4d07a0233c3e1"},
+ {file = "black-24.4.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:accf49e151c8ed2c0cdc528691838afd217c50412534e876a19270fea1e28e2d"},
+ {file = "black-24.4.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:88c57dc656038f1ab9f92b3eb5335ee9b021412feaa46330d5eba4e51fe49b04"},
+ {file = "black-24.4.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:be8bef99eb46d5021bf053114442914baeb3649a89dc5f3a555c88737e5e98fc"},
+ {file = "black-24.4.2-cp312-cp312-win_amd64.whl", hash = "sha256:415e686e87dbbe6f4cd5ef0fbf764af7b89f9057b97c908742b6008cc554b9c0"},
+ {file = "black-24.4.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:bf10f7310db693bb62692609b397e8d67257c55f949abde4c67f9cc574492cc7"},
+ {file = "black-24.4.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:98e123f1d5cfd42f886624d84464f7756f60ff6eab89ae845210631714f6db94"},
+ {file = "black-24.4.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:48a85f2cb5e6799a9ef05347b476cce6c182d6c71ee36925a6c194d074336ef8"},
+ {file = "black-24.4.2-cp38-cp38-win_amd64.whl", hash = "sha256:b1530ae42e9d6d5b670a34db49a94115a64596bc77710b1d05e9801e62ca0a7c"},
+ {file = "black-24.4.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:37aae07b029fa0174d39daf02748b379399b909652a806e5708199bd93899da1"},
+ {file = "black-24.4.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:da33a1a5e49c4122ccdfd56cd021ff1ebc4a1ec4e2d01594fef9b6f267a9e741"},
+ {file = "black-24.4.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ef703f83fc32e131e9bcc0a5094cfe85599e7109f896fe8bc96cc402f3eb4b6e"},
+ {file = "black-24.4.2-cp39-cp39-win_amd64.whl", hash = "sha256:b9176b9832e84308818a99a561e90aa479e73c523b3f77afd07913380ae2eab7"},
+ {file = "black-24.4.2-py3-none-any.whl", hash = "sha256:d36ed1124bb81b32f8614555b34cc4259c3fbc7eec17870e8ff8ded335b58d8c"},
+ {file = "black-24.4.2.tar.gz", hash = "sha256:c872b53057f000085da66a19c55d68f6f8ddcac2642392ad3a355878406fbd4d"},
]
[package.dependencies]
@@ -2084,7 +2084,6 @@ files = [
{file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:69b023b2b4daa7548bcfbd4aa3da05b3a74b772db9e23b982788168117739938"},
{file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:81e0b275a9ecc9c0c0c07b4b90ba548307583c125f54d5b6946cfee6360c733d"},
{file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba336e390cd8e4d1739f42dfe9bb83a3cc2e80f567d8805e11b46f4a943f5515"},
- {file = "PyYAML-6.0.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:326c013efe8048858a6d312ddd31d56e468118ad4cdeda36c719bf5bb6192290"},
{file = "PyYAML-6.0.1-cp310-cp310-win32.whl", hash = "sha256:bd4af7373a854424dabd882decdc5579653d7868b8fb26dc7d0e99f823aa5924"},
{file = "PyYAML-6.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:fd1592b3fdf65fff2ad0004b5e363300ef59ced41c2e6b3a99d4089fa8c5435d"},
{file = "PyYAML-6.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6965a7bc3cf88e5a1c3bd2e0b5c22f8d677dc88a455344035f03399034eb3007"},
@@ -2092,16 +2091,8 @@ files = [
{file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:42f8152b8dbc4fe7d96729ec2b99c7097d656dc1213a3229ca5383f973a5ed6d"},
{file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:062582fca9fabdd2c8b54a3ef1c978d786e0f6b3a1510e0ac93ef59e0ddae2bc"},
{file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d2b04aac4d386b172d5b9692e2d2da8de7bfb6c387fa4f801fbf6fb2e6ba4673"},
- {file = "PyYAML-6.0.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:e7d73685e87afe9f3b36c799222440d6cf362062f78be1013661b00c5c6f678b"},
{file = "PyYAML-6.0.1-cp311-cp311-win32.whl", hash = "sha256:1635fd110e8d85d55237ab316b5b011de701ea0f29d07611174a1b42f1444741"},
{file = "PyYAML-6.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34"},
- {file = "PyYAML-6.0.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:855fb52b0dc35af121542a76b9a84f8d1cd886ea97c84703eaa6d88e37a2ad28"},
- {file = "PyYAML-6.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:40df9b996c2b73138957fe23a16a4f0ba614f4c0efce1e9406a184b6d07fa3a9"},
- {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a08c6f0fe150303c1c6b71ebcd7213c2858041a7e01975da3a99aed1e7a378ef"},
- {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c22bec3fbe2524cde73d7ada88f6566758a8f7227bfbf93a408a9d86bcc12a0"},
- {file = "PyYAML-6.0.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8d4e9c88387b0f5c7d5f281e55304de64cf7f9c0021a3525bd3b1c542da3b0e4"},
- {file = "PyYAML-6.0.1-cp312-cp312-win32.whl", hash = "sha256:d483d2cdf104e7c9fa60c544d92981f12ad66a457afae824d146093b8c294c54"},
- {file = "PyYAML-6.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:0d3304d8c0adc42be59c5f8a4d9e3d7379e6955ad754aa9d6ab7a398b59dd1df"},
{file = "PyYAML-6.0.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:50550eb667afee136e9a77d6dc71ae76a44df8b3e51e41b77f6de2932bfe0f47"},
{file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1fe35611261b29bd1de0070f0b2f47cb6ff71fa6595c077e42bd0c419fa27b98"},
{file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:704219a11b772aea0d8ecd7058d0082713c3562b4e271b849ad7dc4a5c90c13c"},
@@ -2118,7 +2109,6 @@ files = [
{file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a0cd17c15d3bb3fa06978b4e8958dcdc6e0174ccea823003a106c7d4d7899ac5"},
{file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:28c119d996beec18c05208a8bd78cbe4007878c6dd15091efb73a30e90539696"},
{file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7e07cbde391ba96ab58e532ff4803f79c4129397514e1413a7dc761ccd755735"},
- {file = "PyYAML-6.0.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:49a183be227561de579b4a36efbb21b3eab9651dd81b1858589f796549873dd6"},
{file = "PyYAML-6.0.1-cp38-cp38-win32.whl", hash = "sha256:184c5108a2aca3c5b3d3bf9395d50893a7ab82a38004c8f61c258d4428e80206"},
{file = "PyYAML-6.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:1e2722cc9fbb45d9b87631ac70924c11d3a401b2d7f410cc0e3bbf249f2dca62"},
{file = "PyYAML-6.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9eb6caa9a297fc2c2fb8862bc5370d0303ddba53ba97e71f08023b6cd73d16a8"},
@@ -2126,7 +2116,6 @@ files = [
{file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5773183b6446b2c99bb77e77595dd486303b4faab2b086e7b17bc6bef28865f6"},
{file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b786eecbdf8499b9ca1d697215862083bd6d2a99965554781d0d8d1ad31e13a0"},
{file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc1bf2925a1ecd43da378f4db9e4f799775d6367bdb94671027b73b393a7c42c"},
- {file = "PyYAML-6.0.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:04ac92ad1925b2cff1db0cfebffb6ffc43457495c9b3c39d3fcae417d7125dc5"},
{file = "PyYAML-6.0.1-cp39-cp39-win32.whl", hash = "sha256:faca3bdcf85b2fc05d06ff3fbc1f83e1391b3e724afa3feba7d13eeab355484c"},
{file = "PyYAML-6.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:510c9deebc5c0225e8c96813043e62b680ba2f9c50a08d3724c7f28a747d1486"},
{file = "PyYAML-6.0.1.tar.gz", hash = "sha256:bfdf460b1736c775f2ba9f6a92bca30bc2095067b8a9d77876d1fad6cc3b4a43"},
From 4cf4a8281b822017527969d7b5077dfc5f7f0b39 Mon Sep 17 00:00:00 2001
From: ll-SKY-ll <140309860+ll-SKY-ll@users.noreply.github.com>
Date: Thu, 9 May 2024 12:50:05 +0200
Subject: [PATCH 076/503] Update docs to bump libjemalloc version on latest
debian; correct "push_rules" stream name (#17171)
---
changelog.d/17171.doc | 1 +
docs/usage/administration/admin_faq.md | 4 ++--
docs/workers.md | 2 +-
3 files changed, 4 insertions(+), 3 deletions(-)
create mode 100644 changelog.d/17171.doc
diff --git a/changelog.d/17171.doc b/changelog.d/17171.doc
new file mode 100644
index 0000000000..ef9f14ac7e
--- /dev/null
+++ b/changelog.d/17171.doc
@@ -0,0 +1 @@
+Update the Admin FAQ with the current libjemalloc version for latest Debian stable. Additionally update the name of the "push_rules" stream in the Workers documentation.
diff --git a/docs/usage/administration/admin_faq.md b/docs/usage/administration/admin_faq.md
index 0d98f73fb1..a1184d0375 100644
--- a/docs/usage/administration/admin_faq.md
+++ b/docs/usage/administration/admin_faq.md
@@ -250,10 +250,10 @@ Using [libjemalloc](https://jemalloc.net) can also yield a significant
improvement in overall memory use, and especially in terms of giving back
RAM to the OS. To use it, the library must simply be put in the
LD_PRELOAD environment variable when launching Synapse. On Debian, this
-can be done by installing the `libjemalloc1` package and adding this
+can be done by installing the `libjemalloc2` package and adding this
line to `/etc/default/matrix-synapse`:
- LD_PRELOAD=/usr/lib/x86_64-linux-gnu/libjemalloc.so.1
+ LD_PRELOAD=/usr/lib/x86_64-linux-gnu/libjemalloc.so.2
This made a significant difference on Python 2.7 - it's unclear how
much of an improvement it provides on Python 3.x.
diff --git a/docs/workers.md b/docs/workers.md
index 82f4bfc1d1..6cb4416bfc 100644
--- a/docs/workers.md
+++ b/docs/workers.md
@@ -535,7 +535,7 @@ the stream writer for the `presence` stream:
##### The `push_rules` stream
The following endpoints should be routed directly to the worker configured as
-the stream writer for the `push` stream:
+the stream writer for the `push_rules` stream:
^/_matrix/client/(api/v1|r0|v3|unstable)/pushrules/
From a2e6f43f11d56d014f0c09458d42bc9669459dd4 Mon Sep 17 00:00:00 2001
From: Erik Johnston
Date: Mon, 13 May 2024 12:12:26 +0100
Subject: [PATCH 077/503] Fix bug with creating public rooms on workers
(#17177)
If room publication is disabled then creating public rooms on workers
would not work.
Introduced in #16811.
---
changelog.d/17177.bugfix | 1 +
synapse/storage/databases/main/room.py | 116 +++++++++++--------------
2 files changed, 52 insertions(+), 65 deletions(-)
create mode 100644 changelog.d/17177.bugfix
diff --git a/changelog.d/17177.bugfix b/changelog.d/17177.bugfix
new file mode 100644
index 0000000000..db2334d690
--- /dev/null
+++ b/changelog.d/17177.bugfix
@@ -0,0 +1 @@
+Fix bug where disabling room publication prevented public rooms being created on workers.
diff --git a/synapse/storage/databases/main/room.py b/synapse/storage/databases/main/room.py
index 81c7bf3712..82bff9c9b6 100644
--- a/synapse/storage/databases/main/room.py
+++ b/synapse/storage/databases/main/room.py
@@ -21,13 +21,11 @@
#
import logging
-from abc import abstractmethod
from enum import Enum
from typing import (
TYPE_CHECKING,
AbstractSet,
Any,
- Awaitable,
Collection,
Dict,
List,
@@ -1935,13 +1933,57 @@ class RoomBackgroundUpdateStore(SQLBaseStore):
return len(rooms)
- @abstractmethod
- def set_room_is_public(self, room_id: str, is_public: bool) -> Awaitable[None]:
- # this will need to be implemented if a background update is performed with
- # existing (tombstoned, public) rooms in the database.
- #
- # It's overridden by RoomStore for the synapse master.
- raise NotImplementedError()
+ async def set_room_is_public(self, room_id: str, is_public: bool) -> None:
+ await self.db_pool.simple_update_one(
+ table="rooms",
+ keyvalues={"room_id": room_id},
+ updatevalues={"is_public": is_public},
+ desc="set_room_is_public",
+ )
+
+ async def set_room_is_public_appservice(
+ self, room_id: str, appservice_id: str, network_id: str, is_public: bool
+ ) -> None:
+ """Edit the appservice/network specific public room list.
+
+ Each appservice can have a number of published room lists associated
+ with them, keyed off of an appservice defined `network_id`, which
+ basically represents a single instance of a bridge to a third party
+ network.
+
+ Args:
+ room_id
+ appservice_id
+ network_id
+ is_public: Whether to publish or unpublish the room from the list.
+ """
+
+ if is_public:
+ await self.db_pool.simple_upsert(
+ table="appservice_room_list",
+ keyvalues={
+ "appservice_id": appservice_id,
+ "network_id": network_id,
+ "room_id": room_id,
+ },
+ values={},
+ insertion_values={
+ "appservice_id": appservice_id,
+ "network_id": network_id,
+ "room_id": room_id,
+ },
+ desc="set_room_is_public_appservice_true",
+ )
+ else:
+ await self.db_pool.simple_delete(
+ table="appservice_room_list",
+ keyvalues={
+ "appservice_id": appservice_id,
+ "network_id": network_id,
+ "room_id": room_id,
+ },
+ desc="set_room_is_public_appservice_false",
+ )
async def has_auth_chain_index(self, room_id: str) -> bool:
"""Check if the room has (or can have) a chain cover index.
@@ -2349,62 +2391,6 @@ class RoomStore(RoomBackgroundUpdateStore, RoomWorkerStore):
},
)
- async def set_room_is_public(self, room_id: str, is_public: bool) -> None:
- await self.db_pool.simple_update_one(
- table="rooms",
- keyvalues={"room_id": room_id},
- updatevalues={"is_public": is_public},
- desc="set_room_is_public",
- )
-
- self.hs.get_notifier().on_new_replication_data()
-
- async def set_room_is_public_appservice(
- self, room_id: str, appservice_id: str, network_id: str, is_public: bool
- ) -> None:
- """Edit the appservice/network specific public room list.
-
- Each appservice can have a number of published room lists associated
- with them, keyed off of an appservice defined `network_id`, which
- basically represents a single instance of a bridge to a third party
- network.
-
- Args:
- room_id
- appservice_id
- network_id
- is_public: Whether to publish or unpublish the room from the list.
- """
-
- if is_public:
- await self.db_pool.simple_upsert(
- table="appservice_room_list",
- keyvalues={
- "appservice_id": appservice_id,
- "network_id": network_id,
- "room_id": room_id,
- },
- values={},
- insertion_values={
- "appservice_id": appservice_id,
- "network_id": network_id,
- "room_id": room_id,
- },
- desc="set_room_is_public_appservice_true",
- )
- else:
- await self.db_pool.simple_delete(
- table="appservice_room_list",
- keyvalues={
- "appservice_id": appservice_id,
- "network_id": network_id,
- "room_id": room_id,
- },
- desc="set_room_is_public_appservice_false",
- )
-
- self.hs.get_notifier().on_new_replication_data()
-
async def add_event_report(
self,
room_id: str,
From 59ac5413109751962517257f4b92cf862affe882 Mon Sep 17 00:00:00 2001
From: Erik Johnston
Date: Mon, 13 May 2024 13:11:07 +0100
Subject: [PATCH 078/503] Actually fix public rooms (#17184)
See #17177.
I'm an idiot and moved them to the wrong store :facepalm:
---
changelog.d/17184.bugfix | 1 +
synapse/storage/databases/main/room.py | 108 ++++++++++++-------------
2 files changed, 55 insertions(+), 54 deletions(-)
create mode 100644 changelog.d/17184.bugfix
diff --git a/changelog.d/17184.bugfix b/changelog.d/17184.bugfix
new file mode 100644
index 0000000000..db2334d690
--- /dev/null
+++ b/changelog.d/17184.bugfix
@@ -0,0 +1 @@
+Fix bug where disabling room publication prevented public rooms being created on workers.
diff --git a/synapse/storage/databases/main/room.py b/synapse/storage/databases/main/room.py
index 82bff9c9b6..8205109548 100644
--- a/synapse/storage/databases/main/room.py
+++ b/synapse/storage/databases/main/room.py
@@ -51,7 +51,7 @@ from synapse.api.room_versions import RoomVersion, RoomVersions
from synapse.config.homeserver import HomeServerConfig
from synapse.events import EventBase
from synapse.replication.tcp.streams.partial_state import UnPartialStatedRoomStream
-from synapse.storage._base import SQLBaseStore, db_to_json, make_in_list_sql_clause
+from synapse.storage._base import db_to_json, make_in_list_sql_clause
from synapse.storage.database import (
DatabasePool,
LoggingDatabaseConnection,
@@ -1682,6 +1682,58 @@ class RoomWorkerStore(CacheInvalidationWorkerStore):
return True
+ async def set_room_is_public(self, room_id: str, is_public: bool) -> None:
+ await self.db_pool.simple_update_one(
+ table="rooms",
+ keyvalues={"room_id": room_id},
+ updatevalues={"is_public": is_public},
+ desc="set_room_is_public",
+ )
+
+ async def set_room_is_public_appservice(
+ self, room_id: str, appservice_id: str, network_id: str, is_public: bool
+ ) -> None:
+ """Edit the appservice/network specific public room list.
+
+ Each appservice can have a number of published room lists associated
+ with them, keyed off of an appservice defined `network_id`, which
+ basically represents a single instance of a bridge to a third party
+ network.
+
+ Args:
+ room_id
+ appservice_id
+ network_id
+ is_public: Whether to publish or unpublish the room from the list.
+ """
+
+ if is_public:
+ await self.db_pool.simple_upsert(
+ table="appservice_room_list",
+ keyvalues={
+ "appservice_id": appservice_id,
+ "network_id": network_id,
+ "room_id": room_id,
+ },
+ values={},
+ insertion_values={
+ "appservice_id": appservice_id,
+ "network_id": network_id,
+ "room_id": room_id,
+ },
+ desc="set_room_is_public_appservice_true",
+ )
+ else:
+ await self.db_pool.simple_delete(
+ table="appservice_room_list",
+ keyvalues={
+ "appservice_id": appservice_id,
+ "network_id": network_id,
+ "room_id": room_id,
+ },
+ desc="set_room_is_public_appservice_false",
+ )
+
class _BackgroundUpdates:
REMOVE_TOMESTONED_ROOMS_BG_UPDATE = "remove_tombstoned_rooms_from_directory"
@@ -1700,7 +1752,7 @@ _REPLACE_ROOM_DEPTH_SQL_COMMANDS = (
)
-class RoomBackgroundUpdateStore(SQLBaseStore):
+class RoomBackgroundUpdateStore(RoomWorkerStore):
def __init__(
self,
database: DatabasePool,
@@ -1933,58 +1985,6 @@ class RoomBackgroundUpdateStore(SQLBaseStore):
return len(rooms)
- async def set_room_is_public(self, room_id: str, is_public: bool) -> None:
- await self.db_pool.simple_update_one(
- table="rooms",
- keyvalues={"room_id": room_id},
- updatevalues={"is_public": is_public},
- desc="set_room_is_public",
- )
-
- async def set_room_is_public_appservice(
- self, room_id: str, appservice_id: str, network_id: str, is_public: bool
- ) -> None:
- """Edit the appservice/network specific public room list.
-
- Each appservice can have a number of published room lists associated
- with them, keyed off of an appservice defined `network_id`, which
- basically represents a single instance of a bridge to a third party
- network.
-
- Args:
- room_id
- appservice_id
- network_id
- is_public: Whether to publish or unpublish the room from the list.
- """
-
- if is_public:
- await self.db_pool.simple_upsert(
- table="appservice_room_list",
- keyvalues={
- "appservice_id": appservice_id,
- "network_id": network_id,
- "room_id": room_id,
- },
- values={},
- insertion_values={
- "appservice_id": appservice_id,
- "network_id": network_id,
- "room_id": room_id,
- },
- desc="set_room_is_public_appservice_true",
- )
- else:
- await self.db_pool.simple_delete(
- table="appservice_room_list",
- keyvalues={
- "appservice_id": appservice_id,
- "network_id": network_id,
- "room_id": room_id,
- },
- desc="set_room_is_public_appservice_false",
- )
-
async def has_auth_chain_index(self, room_id: str) -> bool:
"""Check if the room has (or can have) a chain cover index.
From 038b9ec59a5d2080372aa4b7684e7b6580a79bd8 Mon Sep 17 00:00:00 2001
From: Erik Johnston
Date: Mon, 13 May 2024 20:38:45 +0100
Subject: [PATCH 079/503] An federation whitelist query endpoint extension
(#16848)
This is to allow clients to query the configured federation whitelist.
Disabled by default.
---------
Co-authored-by: Devon Hudson
Co-authored-by: devonh
Co-authored-by: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com>
---
changelog.d/16848.feature | 1 +
.../configuration/config_documentation.md | 25 ++++
synapse/config/federation.py | 4 +
synapse/rest/synapse/client/__init__.py | 4 +
.../synapse/client/federation_whitelist.py | 66 ++++++++++
tests/rest/synapse/__init__.py | 12 ++
tests/rest/synapse/client/__init__.py | 12 ++
.../client/test_federation_whitelist.py | 119 ++++++++++++++++++
8 files changed, 243 insertions(+)
create mode 100644 changelog.d/16848.feature
create mode 100644 synapse/rest/synapse/client/federation_whitelist.py
create mode 100644 tests/rest/synapse/__init__.py
create mode 100644 tests/rest/synapse/client/__init__.py
create mode 100644 tests/rest/synapse/client/test_federation_whitelist.py
diff --git a/changelog.d/16848.feature b/changelog.d/16848.feature
new file mode 100644
index 0000000000..1a72bad013
--- /dev/null
+++ b/changelog.d/16848.feature
@@ -0,0 +1 @@
+Add a feature that allows clients to query the configured federation whitelist. Disabled by default.
diff --git a/docs/usage/configuration/config_documentation.md b/docs/usage/configuration/config_documentation.md
index 2046bf4564..2257318bcc 100644
--- a/docs/usage/configuration/config_documentation.md
+++ b/docs/usage/configuration/config_documentation.md
@@ -1232,6 +1232,31 @@ federation_domain_whitelist:
- syd.example.com
```
---
+### `federation_whitelist_endpoint_enabled`
+
+Enables an endpoint for fetching the federation whitelist config.
+
+The request method and path is `GET /_synapse/client/config/federation_whitelist`, and the
+response format is:
+
+```json
+{
+ "whitelist_enabled": true, // Whether the federation whitelist is being enforced
+ "whitelist": [ // Which server names are allowed by the whitelist
+ "example.com"
+ ]
+}
+```
+
+If `whitelist_enabled` is `false` then the server is permitted to federate with all others.
+
+The endpoint requires authentication.
+
+Example configuration:
+```yaml
+federation_whitelist_endpoint_enabled: true
+```
+---
### `federation_metrics_domains`
Report prometheus metrics on the age of PDUs being sent to and received from
diff --git a/synapse/config/federation.py b/synapse/config/federation.py
index 9032effac3..cf29fa2562 100644
--- a/synapse/config/federation.py
+++ b/synapse/config/federation.py
@@ -42,6 +42,10 @@ class FederationConfig(Config):
for domain in federation_domain_whitelist:
self.federation_domain_whitelist[domain] = True
+ self.federation_whitelist_endpoint_enabled = config.get(
+ "federation_whitelist_endpoint_enabled", False
+ )
+
federation_metrics_domains = config.get("federation_metrics_domains") or []
validate_config(
_METRICS_FOR_DOMAINS_SCHEMA,
diff --git a/synapse/rest/synapse/client/__init__.py b/synapse/rest/synapse/client/__init__.py
index ba6576d4db..7b5bfc0421 100644
--- a/synapse/rest/synapse/client/__init__.py
+++ b/synapse/rest/synapse/client/__init__.py
@@ -23,6 +23,7 @@ from typing import TYPE_CHECKING, Mapping
from twisted.web.resource import Resource
+from synapse.rest.synapse.client.federation_whitelist import FederationWhitelistResource
from synapse.rest.synapse.client.new_user_consent import NewUserConsentResource
from synapse.rest.synapse.client.pick_idp import PickIdpResource
from synapse.rest.synapse.client.pick_username import pick_username_resource
@@ -77,6 +78,9 @@ def build_synapse_client_resource_tree(hs: "HomeServer") -> Mapping[str, Resourc
# To be removed in Synapse v1.32.0.
resources["/_matrix/saml2"] = res
+ if hs.config.federation.federation_whitelist_endpoint_enabled:
+ resources[FederationWhitelistResource.PATH] = FederationWhitelistResource(hs)
+
if hs.config.experimental.msc4108_enabled:
resources["/_synapse/client/rendezvous"] = MSC4108RendezvousSessionResource(hs)
diff --git a/synapse/rest/synapse/client/federation_whitelist.py b/synapse/rest/synapse/client/federation_whitelist.py
new file mode 100644
index 0000000000..2b8f0320e0
--- /dev/null
+++ b/synapse/rest/synapse/client/federation_whitelist.py
@@ -0,0 +1,66 @@
+#
+# This file is licensed under the Affero General Public License (AGPL) version 3.
+#
+# Copyright (C) 2024 New Vector, Ltd
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# See the GNU Affero General Public License for more details:
+# .
+#
+
+import logging
+from typing import TYPE_CHECKING, Tuple
+
+from synapse.http.server import DirectServeJsonResource
+from synapse.http.site import SynapseRequest
+from synapse.types import JsonDict
+
+if TYPE_CHECKING:
+ from synapse.server import HomeServer
+
+logger = logging.getLogger(__name__)
+
+
+class FederationWhitelistResource(DirectServeJsonResource):
+ """Custom endpoint (disabled by default) to fetch the federation whitelist
+ config.
+
+ Only enabled if `federation_whitelist_endpoint_enabled` feature is enabled.
+
+ Response format:
+
+ {
+ "whitelist_enabled": true, // Whether the federation whitelist is being enforced
+ "whitelist": [ // Which server names are allowed by the whitelist
+ "example.com"
+ ]
+ }
+ """
+
+ PATH = "/_synapse/client/v1/config/federation_whitelist"
+
+ def __init__(self, hs: "HomeServer"):
+ super().__init__()
+
+ self._federation_whitelist = hs.config.federation.federation_domain_whitelist
+
+ self._auth = hs.get_auth()
+
+ async def _async_render_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
+ await self._auth.get_user_by_req(request)
+
+ whitelist = []
+ if self._federation_whitelist:
+ # federation_whitelist is actually a dict, not a list
+ whitelist = list(self._federation_whitelist)
+
+ return_dict: JsonDict = {
+ "whitelist_enabled": self._federation_whitelist is not None,
+ "whitelist": whitelist,
+ }
+
+ return 200, return_dict
diff --git a/tests/rest/synapse/__init__.py b/tests/rest/synapse/__init__.py
new file mode 100644
index 0000000000..e5138f67e1
--- /dev/null
+++ b/tests/rest/synapse/__init__.py
@@ -0,0 +1,12 @@
+#
+# This file is licensed under the Affero General Public License (AGPL) version 3.
+#
+# Copyright (C) 2024 New Vector, Ltd
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# See the GNU Affero General Public License for more details:
+# .
diff --git a/tests/rest/synapse/client/__init__.py b/tests/rest/synapse/client/__init__.py
new file mode 100644
index 0000000000..e5138f67e1
--- /dev/null
+++ b/tests/rest/synapse/client/__init__.py
@@ -0,0 +1,12 @@
+#
+# This file is licensed under the Affero General Public License (AGPL) version 3.
+#
+# Copyright (C) 2024 New Vector, Ltd
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# See the GNU Affero General Public License for more details:
+# .
diff --git a/tests/rest/synapse/client/test_federation_whitelist.py b/tests/rest/synapse/client/test_federation_whitelist.py
new file mode 100644
index 0000000000..f0067a8f2b
--- /dev/null
+++ b/tests/rest/synapse/client/test_federation_whitelist.py
@@ -0,0 +1,119 @@
+#
+# This file is licensed under the Affero General Public License (AGPL) version 3.
+#
+# Copyright (C) 2024 New Vector, Ltd
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# See the GNU Affero General Public License for more details:
+# .
+
+from typing import Dict
+
+from twisted.web.resource import Resource
+
+from synapse.rest import admin
+from synapse.rest.client import login
+from synapse.rest.synapse.client import build_synapse_client_resource_tree
+
+from tests import unittest
+
+
+class FederationWhitelistTests(unittest.HomeserverTestCase):
+ servlets = [
+ admin.register_servlets_for_client_rest_resource,
+ login.register_servlets,
+ ]
+
+ def create_resource_dict(self) -> Dict[str, Resource]:
+ base = super().create_resource_dict()
+ base.update(build_synapse_client_resource_tree(self.hs))
+ return base
+
+ def test_default(self) -> None:
+ "If the config option is not enabled, the endpoint should 404"
+ channel = self.make_request(
+ "GET", "/_synapse/client/v1/config/federation_whitelist", shorthand=False
+ )
+
+ self.assertEqual(channel.code, 404)
+
+ @unittest.override_config({"federation_whitelist_endpoint_enabled": True})
+ def test_no_auth(self) -> None:
+ "Endpoint requires auth when enabled"
+
+ channel = self.make_request(
+ "GET", "/_synapse/client/v1/config/federation_whitelist", shorthand=False
+ )
+
+ self.assertEqual(channel.code, 401)
+
+ @unittest.override_config({"federation_whitelist_endpoint_enabled": True})
+ def test_no_whitelist(self) -> None:
+ "Test when there is no whitelist configured"
+
+ self.register_user("user", "password")
+ tok = self.login("user", "password")
+
+ channel = self.make_request(
+ "GET",
+ "/_synapse/client/v1/config/federation_whitelist",
+ shorthand=False,
+ access_token=tok,
+ )
+
+ self.assertEqual(channel.code, 200)
+ self.assertEqual(
+ channel.json_body, {"whitelist_enabled": False, "whitelist": []}
+ )
+
+ @unittest.override_config(
+ {
+ "federation_whitelist_endpoint_enabled": True,
+ "federation_domain_whitelist": ["example.com"],
+ }
+ )
+ def test_whitelist(self) -> None:
+ "Test when there is a whitelist configured"
+
+ self.register_user("user", "password")
+ tok = self.login("user", "password")
+
+ channel = self.make_request(
+ "GET",
+ "/_synapse/client/v1/config/federation_whitelist",
+ shorthand=False,
+ access_token=tok,
+ )
+
+ self.assertEqual(channel.code, 200)
+ self.assertEqual(
+ channel.json_body, {"whitelist_enabled": True, "whitelist": ["example.com"]}
+ )
+
+ @unittest.override_config(
+ {
+ "federation_whitelist_endpoint_enabled": True,
+ "federation_domain_whitelist": ["example.com", "example.com"],
+ }
+ )
+ def test_whitelist_no_duplicates(self) -> None:
+ "Test when there is a whitelist configured with duplicates, no duplicates are returned"
+
+ self.register_user("user", "password")
+ tok = self.login("user", "password")
+
+ channel = self.make_request(
+ "GET",
+ "/_synapse/client/v1/config/federation_whitelist",
+ shorthand=False,
+ access_token=tok,
+ )
+
+ self.assertEqual(channel.code, 200)
+ self.assertEqual(
+ channel.json_body, {"whitelist_enabled": True, "whitelist": ["example.com"]}
+ )
From 2b438df9b36a811f6b3f7a94f965fc7aa8591449 Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Tue, 14 May 2024 09:31:25 +0100
Subject: [PATCH 080/503] Bump sentry-sdk from 1.40.3 to 2.1.1 (#17178)
---
poetry.lock | 26 +++++++++++++++++++++-----
1 file changed, 21 insertions(+), 5 deletions(-)
diff --git a/poetry.lock b/poetry.lock
index ea38c69eb3..f200ad0c55 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -2084,6 +2084,7 @@ files = [
{file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:69b023b2b4daa7548bcfbd4aa3da05b3a74b772db9e23b982788168117739938"},
{file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:81e0b275a9ecc9c0c0c07b4b90ba548307583c125f54d5b6946cfee6360c733d"},
{file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba336e390cd8e4d1739f42dfe9bb83a3cc2e80f567d8805e11b46f4a943f5515"},
+ {file = "PyYAML-6.0.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:326c013efe8048858a6d312ddd31d56e468118ad4cdeda36c719bf5bb6192290"},
{file = "PyYAML-6.0.1-cp310-cp310-win32.whl", hash = "sha256:bd4af7373a854424dabd882decdc5579653d7868b8fb26dc7d0e99f823aa5924"},
{file = "PyYAML-6.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:fd1592b3fdf65fff2ad0004b5e363300ef59ced41c2e6b3a99d4089fa8c5435d"},
{file = "PyYAML-6.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6965a7bc3cf88e5a1c3bd2e0b5c22f8d677dc88a455344035f03399034eb3007"},
@@ -2091,8 +2092,16 @@ files = [
{file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:42f8152b8dbc4fe7d96729ec2b99c7097d656dc1213a3229ca5383f973a5ed6d"},
{file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:062582fca9fabdd2c8b54a3ef1c978d786e0f6b3a1510e0ac93ef59e0ddae2bc"},
{file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d2b04aac4d386b172d5b9692e2d2da8de7bfb6c387fa4f801fbf6fb2e6ba4673"},
+ {file = "PyYAML-6.0.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:e7d73685e87afe9f3b36c799222440d6cf362062f78be1013661b00c5c6f678b"},
{file = "PyYAML-6.0.1-cp311-cp311-win32.whl", hash = "sha256:1635fd110e8d85d55237ab316b5b011de701ea0f29d07611174a1b42f1444741"},
{file = "PyYAML-6.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34"},
+ {file = "PyYAML-6.0.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:855fb52b0dc35af121542a76b9a84f8d1cd886ea97c84703eaa6d88e37a2ad28"},
+ {file = "PyYAML-6.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:40df9b996c2b73138957fe23a16a4f0ba614f4c0efce1e9406a184b6d07fa3a9"},
+ {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a08c6f0fe150303c1c6b71ebcd7213c2858041a7e01975da3a99aed1e7a378ef"},
+ {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c22bec3fbe2524cde73d7ada88f6566758a8f7227bfbf93a408a9d86bcc12a0"},
+ {file = "PyYAML-6.0.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8d4e9c88387b0f5c7d5f281e55304de64cf7f9c0021a3525bd3b1c542da3b0e4"},
+ {file = "PyYAML-6.0.1-cp312-cp312-win32.whl", hash = "sha256:d483d2cdf104e7c9fa60c544d92981f12ad66a457afae824d146093b8c294c54"},
+ {file = "PyYAML-6.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:0d3304d8c0adc42be59c5f8a4d9e3d7379e6955ad754aa9d6ab7a398b59dd1df"},
{file = "PyYAML-6.0.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:50550eb667afee136e9a77d6dc71ae76a44df8b3e51e41b77f6de2932bfe0f47"},
{file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1fe35611261b29bd1de0070f0b2f47cb6ff71fa6595c077e42bd0c419fa27b98"},
{file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:704219a11b772aea0d8ecd7058d0082713c3562b4e271b849ad7dc4a5c90c13c"},
@@ -2109,6 +2118,7 @@ files = [
{file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a0cd17c15d3bb3fa06978b4e8958dcdc6e0174ccea823003a106c7d4d7899ac5"},
{file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:28c119d996beec18c05208a8bd78cbe4007878c6dd15091efb73a30e90539696"},
{file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7e07cbde391ba96ab58e532ff4803f79c4129397514e1413a7dc761ccd755735"},
+ {file = "PyYAML-6.0.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:49a183be227561de579b4a36efbb21b3eab9651dd81b1858589f796549873dd6"},
{file = "PyYAML-6.0.1-cp38-cp38-win32.whl", hash = "sha256:184c5108a2aca3c5b3d3bf9395d50893a7ab82a38004c8f61c258d4428e80206"},
{file = "PyYAML-6.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:1e2722cc9fbb45d9b87631ac70924c11d3a401b2d7f410cc0e3bbf249f2dca62"},
{file = "PyYAML-6.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9eb6caa9a297fc2c2fb8862bc5370d0303ddba53ba97e71f08023b6cd73d16a8"},
@@ -2116,6 +2126,7 @@ files = [
{file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5773183b6446b2c99bb77e77595dd486303b4faab2b086e7b17bc6bef28865f6"},
{file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b786eecbdf8499b9ca1d697215862083bd6d2a99965554781d0d8d1ad31e13a0"},
{file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc1bf2925a1ecd43da378f4db9e4f799775d6367bdb94671027b73b393a7c42c"},
+ {file = "PyYAML-6.0.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:04ac92ad1925b2cff1db0cfebffb6ffc43457495c9b3c39d3fcae417d7125dc5"},
{file = "PyYAML-6.0.1-cp39-cp39-win32.whl", hash = "sha256:faca3bdcf85b2fc05d06ff3fbc1f83e1391b3e724afa3feba7d13eeab355484c"},
{file = "PyYAML-6.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:510c9deebc5c0225e8c96813043e62b680ba2f9c50a08d3724c7f28a747d1486"},
{file = "PyYAML-6.0.1.tar.gz", hash = "sha256:bfdf460b1736c775f2ba9f6a92bca30bc2095067b8a9d77876d1fad6cc3b4a43"},
@@ -2387,26 +2398,28 @@ doc = ["Sphinx", "sphinx-rtd-theme"]
[[package]]
name = "sentry-sdk"
-version = "1.40.3"
+version = "2.1.1"
description = "Python client for Sentry (https://sentry.io)"
optional = true
-python-versions = "*"
+python-versions = ">=3.6"
files = [
- {file = "sentry-sdk-1.40.3.tar.gz", hash = "sha256:3c2b027979bb400cd65a47970e64f8cef8acda86b288a27f42a98692505086cd"},
- {file = "sentry_sdk-1.40.3-py2.py3-none-any.whl", hash = "sha256:73383f28311ae55602bb6cc3b013830811135ba5521e41333a6e68f269413502"},
+ {file = "sentry_sdk-2.1.1-py2.py3-none-any.whl", hash = "sha256:99aeb78fb76771513bd3b2829d12613130152620768d00cd3e45ac00cb17950f"},
+ {file = "sentry_sdk-2.1.1.tar.gz", hash = "sha256:95d8c0bb41c8b0bc37ab202c2c4a295bb84398ee05f4cdce55051cd75b926ec1"},
]
[package.dependencies]
certifi = "*"
-urllib3 = {version = ">=1.26.11", markers = "python_version >= \"3.6\""}
+urllib3 = ">=1.26.11"
[package.extras]
aiohttp = ["aiohttp (>=3.5)"]
+anthropic = ["anthropic (>=0.16)"]
arq = ["arq (>=0.23)"]
asyncpg = ["asyncpg (>=0.23)"]
beam = ["apache-beam (>=2.12)"]
bottle = ["bottle (>=0.12.13)"]
celery = ["celery (>=3)"]
+celery-redbeat = ["celery-redbeat (>=2)"]
chalice = ["chalice (>=1.16.0)"]
clickhouse-driver = ["clickhouse-driver (>=0.2.0)"]
django = ["django (>=1.8)"]
@@ -2416,7 +2429,10 @@ flask = ["blinker (>=1.1)", "flask (>=0.11)", "markupsafe"]
grpcio = ["grpcio (>=1.21.1)"]
httpx = ["httpx (>=0.16.0)"]
huey = ["huey (>=2)"]
+huggingface-hub = ["huggingface-hub (>=0.22)"]
+langchain = ["langchain (>=0.0.210)"]
loguru = ["loguru (>=0.5)"]
+openai = ["openai (>=1.0.0)", "tiktoken (>=0.3.0)"]
opentelemetry = ["opentelemetry-distro (>=0.35b0)"]
opentelemetry-experimental = ["opentelemetry-distro (>=0.40b0,<1.0)", "opentelemetry-instrumentation-aiohttp-client (>=0.40b0,<1.0)", "opentelemetry-instrumentation-django (>=0.40b0,<1.0)", "opentelemetry-instrumentation-fastapi (>=0.40b0,<1.0)", "opentelemetry-instrumentation-flask (>=0.40b0,<1.0)", "opentelemetry-instrumentation-requests (>=0.40b0,<1.0)", "opentelemetry-instrumentation-sqlite3 (>=0.40b0,<1.0)", "opentelemetry-instrumentation-urllib (>=0.40b0,<1.0)"]
pure-eval = ["asttokens", "executing", "pure-eval"]
From aa6345cb3b96730b6972d468d7f58e40324e5d60 Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Tue, 14 May 2024 09:31:35 +0100
Subject: [PATCH 081/503] Bump immutabledict from 4.1.0 to 4.2.0 (#17179)
---
poetry.lock | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/poetry.lock b/poetry.lock
index f200ad0c55..19aab8d23b 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -782,13 +782,13 @@ files = [
[[package]]
name = "immutabledict"
-version = "4.1.0"
+version = "4.2.0"
description = "Immutable wrapper around dictionaries (a fork of frozendict)"
optional = false
python-versions = ">=3.8,<4.0"
files = [
- {file = "immutabledict-4.1.0-py3-none-any.whl", hash = "sha256:c176e99aa90aedb81716ad35218bb2055d049b549626db4523dbe011cf2f32ac"},
- {file = "immutabledict-4.1.0.tar.gz", hash = "sha256:93d100ccd2cd09a1fd3f136b9328c6e59529ba341de8bb499437f6819159fe8a"},
+ {file = "immutabledict-4.2.0-py3-none-any.whl", hash = "sha256:d728b2c2410d698d95e6200237feb50a695584d20289ad3379a439aa3d90baba"},
+ {file = "immutabledict-4.2.0.tar.gz", hash = "sha256:e003fd81aad2377a5a758bf7e1086cf3b70b63e9a5cc2f46bce8d0a2b4727c5f"},
]
[[package]]
From 03a342b049c315458913eac8548b196fa61eabdd Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Tue, 14 May 2024 09:31:46 +0100
Subject: [PATCH 082/503] Bump cryptography from 42.0.5 to 42.0.7 (#17180)
---
poetry.lock | 66 ++++++++++++++++++++++++++---------------------------
1 file changed, 33 insertions(+), 33 deletions(-)
diff --git a/poetry.lock b/poetry.lock
index 19aab8d23b..7922309d27 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -403,43 +403,43 @@ files = [
[[package]]
name = "cryptography"
-version = "42.0.5"
+version = "42.0.7"
description = "cryptography is a package which provides cryptographic recipes and primitives to Python developers."
optional = false
python-versions = ">=3.7"
files = [
- {file = "cryptography-42.0.5-cp37-abi3-macosx_10_12_universal2.whl", hash = "sha256:a30596bae9403a342c978fb47d9b0ee277699fa53bbafad14706af51fe543d16"},
- {file = "cryptography-42.0.5-cp37-abi3-macosx_10_12_x86_64.whl", hash = "sha256:b7ffe927ee6531c78f81aa17e684e2ff617daeba7f189f911065b2ea2d526dec"},
- {file = "cryptography-42.0.5-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2424ff4c4ac7f6b8177b53c17ed5d8fa74ae5955656867f5a8affaca36a27abb"},
- {file = "cryptography-42.0.5-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:329906dcc7b20ff3cad13c069a78124ed8247adcac44b10bea1130e36caae0b4"},
- {file = "cryptography-42.0.5-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:b03c2ae5d2f0fc05f9a2c0c997e1bc18c8229f392234e8a0194f202169ccd278"},
- {file = "cryptography-42.0.5-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:f8837fe1d6ac4a8052a9a8ddab256bc006242696f03368a4009be7ee3075cdb7"},
- {file = "cryptography-42.0.5-cp37-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:0270572b8bd2c833c3981724b8ee9747b3ec96f699a9665470018594301439ee"},
- {file = "cryptography-42.0.5-cp37-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:b8cac287fafc4ad485b8a9b67d0ee80c66bf3574f655d3b97ef2e1082360faf1"},
- {file = "cryptography-42.0.5-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:16a48c23a62a2f4a285699dba2e4ff2d1cff3115b9df052cdd976a18856d8e3d"},
- {file = "cryptography-42.0.5-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:2bce03af1ce5a5567ab89bd90d11e7bbdff56b8af3acbbec1faded8f44cb06da"},
- {file = "cryptography-42.0.5-cp37-abi3-win32.whl", hash = "sha256:b6cd2203306b63e41acdf39aa93b86fb566049aeb6dc489b70e34bcd07adca74"},
- {file = "cryptography-42.0.5-cp37-abi3-win_amd64.whl", hash = "sha256:98d8dc6d012b82287f2c3d26ce1d2dd130ec200c8679b6213b3c73c08b2b7940"},
- {file = "cryptography-42.0.5-cp39-abi3-macosx_10_12_universal2.whl", hash = "sha256:5e6275c09d2badf57aea3afa80d975444f4be8d3bc58f7f80d2a484c6f9485c8"},
- {file = "cryptography-42.0.5-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e4985a790f921508f36f81831817cbc03b102d643b5fcb81cd33df3fa291a1a1"},
- {file = "cryptography-42.0.5-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7cde5f38e614f55e28d831754e8a3bacf9ace5d1566235e39d91b35502d6936e"},
- {file = "cryptography-42.0.5-cp39-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:7367d7b2eca6513681127ebad53b2582911d1736dc2ffc19f2c3ae49997496bc"},
- {file = "cryptography-42.0.5-cp39-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:cd2030f6650c089aeb304cf093f3244d34745ce0cfcc39f20c6fbfe030102e2a"},
- {file = "cryptography-42.0.5-cp39-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:a2913c5375154b6ef2e91c10b5720ea6e21007412f6437504ffea2109b5a33d7"},
- {file = "cryptography-42.0.5-cp39-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:c41fb5e6a5fe9ebcd58ca3abfeb51dffb5d83d6775405305bfa8715b76521922"},
- {file = "cryptography-42.0.5-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:3eaafe47ec0d0ffcc9349e1708be2aaea4c6dd4978d76bf6eb0cb2c13636c6fc"},
- {file = "cryptography-42.0.5-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:1b95b98b0d2af784078fa69f637135e3c317091b615cd0905f8b8a087e86fa30"},
- {file = "cryptography-42.0.5-cp39-abi3-win32.whl", hash = "sha256:1f71c10d1e88467126f0efd484bd44bca5e14c664ec2ede64c32f20875c0d413"},
- {file = "cryptography-42.0.5-cp39-abi3-win_amd64.whl", hash = "sha256:a011a644f6d7d03736214d38832e030d8268bcff4a41f728e6030325fea3e400"},
- {file = "cryptography-42.0.5-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:9481ffe3cf013b71b2428b905c4f7a9a4f76ec03065b05ff499bb5682a8d9ad8"},
- {file = "cryptography-42.0.5-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:ba334e6e4b1d92442b75ddacc615c5476d4ad55cc29b15d590cc6b86efa487e2"},
- {file = "cryptography-42.0.5-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:ba3e4a42397c25b7ff88cdec6e2a16c2be18720f317506ee25210f6d31925f9c"},
- {file = "cryptography-42.0.5-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:111a0d8553afcf8eb02a4fea6ca4f59d48ddb34497aa8706a6cf536f1a5ec576"},
- {file = "cryptography-42.0.5-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:cd65d75953847815962c84a4654a84850b2bb4aed3f26fadcc1c13892e1e29f6"},
- {file = "cryptography-42.0.5-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:e807b3188f9eb0eaa7bbb579b462c5ace579f1cedb28107ce8b48a9f7ad3679e"},
- {file = "cryptography-42.0.5-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:f12764b8fffc7a123f641d7d049d382b73f96a34117e0b637b80643169cec8ac"},
- {file = "cryptography-42.0.5-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:37dd623507659e08be98eec89323469e8c7b4c1407c85112634ae3dbdb926fdd"},
- {file = "cryptography-42.0.5.tar.gz", hash = "sha256:6fe07eec95dfd477eb9530aef5bead34fec819b3aaf6c5bd6d20565da607bfe1"},
+ {file = "cryptography-42.0.7-cp37-abi3-macosx_10_12_universal2.whl", hash = "sha256:a987f840718078212fdf4504d0fd4c6effe34a7e4740378e59d47696e8dfb477"},
+ {file = "cryptography-42.0.7-cp37-abi3-macosx_10_12_x86_64.whl", hash = "sha256:bd13b5e9b543532453de08bcdc3cc7cebec6f9883e886fd20a92f26940fd3e7a"},
+ {file = "cryptography-42.0.7-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a79165431551042cc9d1d90e6145d5d0d3ab0f2d66326c201d9b0e7f5bf43604"},
+ {file = "cryptography-42.0.7-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a47787a5e3649008a1102d3df55424e86606c9bae6fb77ac59afe06d234605f8"},
+ {file = "cryptography-42.0.7-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:02c0eee2d7133bdbbc5e24441258d5d2244beb31da5ed19fbb80315f4bbbff55"},
+ {file = "cryptography-42.0.7-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:5e44507bf8d14b36b8389b226665d597bc0f18ea035d75b4e53c7b1ea84583cc"},
+ {file = "cryptography-42.0.7-cp37-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:7f8b25fa616d8b846aef64b15c606bb0828dbc35faf90566eb139aa9cff67af2"},
+ {file = "cryptography-42.0.7-cp37-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:93a3209f6bb2b33e725ed08ee0991b92976dfdcf4e8b38646540674fc7508e13"},
+ {file = "cryptography-42.0.7-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:e6b8f1881dac458c34778d0a424ae5769de30544fc678eac51c1c8bb2183e9da"},
+ {file = "cryptography-42.0.7-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:3de9a45d3b2b7d8088c3fbf1ed4395dfeff79d07842217b38df14ef09ce1d8d7"},
+ {file = "cryptography-42.0.7-cp37-abi3-win32.whl", hash = "sha256:789caea816c6704f63f6241a519bfa347f72fbd67ba28d04636b7c6b7da94b0b"},
+ {file = "cryptography-42.0.7-cp37-abi3-win_amd64.whl", hash = "sha256:8cb8ce7c3347fcf9446f201dc30e2d5a3c898d009126010cbd1f443f28b52678"},
+ {file = "cryptography-42.0.7-cp39-abi3-macosx_10_12_universal2.whl", hash = "sha256:a3a5ac8b56fe37f3125e5b72b61dcde43283e5370827f5233893d461b7360cd4"},
+ {file = "cryptography-42.0.7-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:779245e13b9a6638df14641d029add5dc17edbef6ec915688f3acb9e720a5858"},
+ {file = "cryptography-42.0.7-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0d563795db98b4cd57742a78a288cdbdc9daedac29f2239793071fe114f13785"},
+ {file = "cryptography-42.0.7-cp39-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:31adb7d06fe4383226c3e963471f6837742889b3c4caa55aac20ad951bc8ffda"},
+ {file = "cryptography-42.0.7-cp39-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:efd0bf5205240182e0f13bcaea41be4fdf5c22c5129fc7ced4a0282ac86998c9"},
+ {file = "cryptography-42.0.7-cp39-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:a9bc127cdc4ecf87a5ea22a2556cab6c7eda2923f84e4f3cc588e8470ce4e42e"},
+ {file = "cryptography-42.0.7-cp39-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:3577d029bc3f4827dd5bf8bf7710cac13527b470bbf1820a3f394adb38ed7d5f"},
+ {file = "cryptography-42.0.7-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:2e47577f9b18723fa294b0ea9a17d5e53a227867a0a4904a1a076d1646d45ca1"},
+ {file = "cryptography-42.0.7-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:1a58839984d9cb34c855197043eaae2c187d930ca6d644612843b4fe8513c886"},
+ {file = "cryptography-42.0.7-cp39-abi3-win32.whl", hash = "sha256:e6b79d0adb01aae87e8a44c2b64bc3f3fe59515280e00fb6d57a7267a2583cda"},
+ {file = "cryptography-42.0.7-cp39-abi3-win_amd64.whl", hash = "sha256:16268d46086bb8ad5bf0a2b5544d8a9ed87a0e33f5e77dd3c3301e63d941a83b"},
+ {file = "cryptography-42.0.7-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:2954fccea107026512b15afb4aa664a5640cd0af630e2ee3962f2602693f0c82"},
+ {file = "cryptography-42.0.7-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:362e7197754c231797ec45ee081f3088a27a47c6c01eff2ac83f60f85a50fe60"},
+ {file = "cryptography-42.0.7-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:4f698edacf9c9e0371112792558d2f705b5645076cc0aaae02f816a0171770fd"},
+ {file = "cryptography-42.0.7-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:5482e789294854c28237bba77c4c83be698be740e31a3ae5e879ee5444166582"},
+ {file = "cryptography-42.0.7-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:e9b2a6309f14c0497f348d08a065d52f3020656f675819fc405fb63bbcd26562"},
+ {file = "cryptography-42.0.7-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:d8e3098721b84392ee45af2dd554c947c32cc52f862b6a3ae982dbb90f577f14"},
+ {file = "cryptography-42.0.7-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:c65f96dad14f8528a447414125e1fc8feb2ad5a272b8f68477abbcc1ea7d94b9"},
+ {file = "cryptography-42.0.7-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:36017400817987670037fbb0324d71489b6ead6231c9604f8fc1f7d008087c68"},
+ {file = "cryptography-42.0.7.tar.gz", hash = "sha256:ecbfbc00bf55888edda9868a4cf927205de8499e7fabe6c050322298382953f2"},
]
[package.dependencies]
From f0c72d8e876550617086af902ca10d7e4eb706dc Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Tue, 14 May 2024 09:32:01 +0100
Subject: [PATCH 083/503] Bump gitpython from 3.1.41 to 3.1.43 (#17181)
---
poetry.lock | 9 +++++----
1 file changed, 5 insertions(+), 4 deletions(-)
diff --git a/poetry.lock b/poetry.lock
index 7922309d27..8537f37529 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -524,20 +524,21 @@ smmap = ">=3.0.1,<6"
[[package]]
name = "gitpython"
-version = "3.1.41"
+version = "3.1.43"
description = "GitPython is a Python library used to interact with Git repositories"
optional = false
python-versions = ">=3.7"
files = [
- {file = "GitPython-3.1.41-py3-none-any.whl", hash = "sha256:c36b6634d069b3f719610175020a9aed919421c87552185b085e04fbbdb10b7c"},
- {file = "GitPython-3.1.41.tar.gz", hash = "sha256:ed66e624884f76df22c8e16066d567aaa5a37d5b5fa19db2c6df6f7156db9048"},
+ {file = "GitPython-3.1.43-py3-none-any.whl", hash = "sha256:eec7ec56b92aad751f9912a73404bc02ba212a23adb2c7098ee668417051a1ff"},
+ {file = "GitPython-3.1.43.tar.gz", hash = "sha256:35f314a9f878467f5453cc1fee295c3e18e52f1b99f10f6cf5b1682e968a9e7c"},
]
[package.dependencies]
gitdb = ">=4.0.1,<5"
[package.extras]
-test = ["black", "coverage[toml]", "ddt (>=1.1.1,!=1.4.3)", "mock", "mypy", "pre-commit", "pytest (>=7.3.1)", "pytest-cov", "pytest-instafail", "pytest-mock", "pytest-sugar", "sumtypes"]
+doc = ["sphinx (==4.3.2)", "sphinx-autodoc-typehints", "sphinx-rtd-theme", "sphinxcontrib-applehelp (>=1.0.2,<=1.0.4)", "sphinxcontrib-devhelp (==1.0.2)", "sphinxcontrib-htmlhelp (>=2.0.0,<=2.0.1)", "sphinxcontrib-qthelp (==1.0.3)", "sphinxcontrib-serializinghtml (==1.1.5)"]
+test = ["coverage[toml]", "ddt (>=1.1.1,!=1.4.3)", "mock", "mypy", "pre-commit", "pytest (>=7.3.1)", "pytest-cov", "pytest-instafail", "pytest-mock", "pytest-sugar", "typing-extensions"]
[[package]]
name = "hiredis"
From caf528477e8320fa1cd4920e105d0aba22250722 Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Tue, 14 May 2024 09:32:14 +0100
Subject: [PATCH 084/503] Bump serde from 1.0.200 to 1.0.201 (#17183)
---
Cargo.lock | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/Cargo.lock b/Cargo.lock
index 59d43ece2d..88d25a7146 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -485,18 +485,18 @@ checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49"
[[package]]
name = "serde"
-version = "1.0.200"
+version = "1.0.201"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ddc6f9cc94d67c0e21aaf7eda3a010fd3af78ebf6e096aa6e2e13c79749cce4f"
+checksum = "780f1cebed1629e4753a1a38a3c72d30b97ec044f0aef68cb26650a3c5cf363c"
dependencies = [
"serde_derive",
]
[[package]]
name = "serde_derive"
-version = "1.0.200"
+version = "1.0.201"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "856f046b9400cee3c8c94ed572ecdb752444c24528c035cd35882aad6f492bcb"
+checksum = "c5e405930b9796f1c00bee880d03fc7e0bb4b9a11afc776885ffe84320da2865"
dependencies = [
"proc-macro2",
"quote",
From bd8d8865fba3d6c68c9a4d698f42549820fdf12c Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Tue, 14 May 2024 09:32:23 +0100
Subject: [PATCH 085/503] Bump serde_json from 1.0.116 to 1.0.117 (#17182)
---
Cargo.lock | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/Cargo.lock b/Cargo.lock
index 88d25a7146..d6f9000138 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -505,9 +505,9 @@ dependencies = [
[[package]]
name = "serde_json"
-version = "1.0.116"
+version = "1.0.117"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "3e17db7126d17feb94eb3fad46bf1a96b034e8aacbc2e775fe81505f8b0b2813"
+checksum = "455182ea6142b14f93f4bc5320a2b31c1f266b66a4a5c858b013302a5d8cbfc3"
dependencies = [
"itoa",
"ryu",
From 7d82987b2765b6c203ba12941c844fb7242c6c83 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Aur=C3=A9lien=20Grimpard?=
Date: Tue, 14 May 2024 14:55:32 +0200
Subject: [PATCH 086/503] Allows CAS SSO flow to provide user IDs composed of
numbers only (#17098)
---
changelog.d/17098.feature | 1 +
docs/usage/configuration/config_documentation.md | 11 +++++++++++
synapse/config/cas.py | 13 +++++++++++++
synapse/handlers/cas.py | 5 +++++
4 files changed, 30 insertions(+)
create mode 100644 changelog.d/17098.feature
diff --git a/changelog.d/17098.feature b/changelog.d/17098.feature
new file mode 100644
index 0000000000..43e06481b2
--- /dev/null
+++ b/changelog.d/17098.feature
@@ -0,0 +1 @@
+Add the ability to allow numeric user IDs with a specific prefix when in the CAS flow. Contributed by Aurélien Grimpard.
diff --git a/docs/usage/configuration/config_documentation.md b/docs/usage/configuration/config_documentation.md
index 2257318bcc..f4edbdcc3e 100644
--- a/docs/usage/configuration/config_documentation.md
+++ b/docs/usage/configuration/config_documentation.md
@@ -3558,6 +3558,15 @@ Has the following sub-options:
users. This allows the CAS SSO flow to be limited to sign in only, rather than
automatically registering users that have a valid SSO login but do not have
a pre-registered account. Defaults to true.
+* `allow_numeric_ids`: set to 'true' allow numeric user IDs (default false).
+ This allows CAS SSO flow to provide user IDs composed of numbers only.
+ These identifiers will be prefixed by the letter "u" by default.
+ The prefix can be configured using the "numeric_ids_prefix" option.
+ Be careful to choose the prefix correctly to avoid any possible conflicts
+ (e.g. user 1234 becomes u1234 when a user u1234 already exists).
+* `numeric_ids_prefix`: the prefix you wish to add in front of a numeric user ID
+ when the "allow_numeric_ids" option is set to "true".
+ By default, the prefix is the letter "u" and only alphanumeric characters are allowed.
*Added in Synapse 1.93.0.*
@@ -3572,6 +3581,8 @@ cas_config:
userGroup: "staff"
department: None
enable_registration: true
+ allow_numeric_ids: true
+ numeric_ids_prefix: "numericuser"
```
---
### `sso`
diff --git a/synapse/config/cas.py b/synapse/config/cas.py
index d23dcf96b2..fa59c350c1 100644
--- a/synapse/config/cas.py
+++ b/synapse/config/cas.py
@@ -66,6 +66,17 @@ class CasConfig(Config):
self.cas_enable_registration = cas_config.get("enable_registration", True)
+ self.cas_allow_numeric_ids = cas_config.get("allow_numeric_ids")
+ self.cas_numeric_ids_prefix = cas_config.get("numeric_ids_prefix")
+ if (
+ self.cas_numeric_ids_prefix is not None
+ and self.cas_numeric_ids_prefix.isalnum() is False
+ ):
+ raise ConfigError(
+ "Only alphanumeric characters are allowed for numeric IDs prefix",
+ ("cas_config", "numeric_ids_prefix"),
+ )
+
self.idp_name = cas_config.get("idp_name", "CAS")
self.idp_icon = cas_config.get("idp_icon")
self.idp_brand = cas_config.get("idp_brand")
@@ -77,6 +88,8 @@ class CasConfig(Config):
self.cas_displayname_attribute = None
self.cas_required_attributes = []
self.cas_enable_registration = False
+ self.cas_allow_numeric_ids = False
+ self.cas_numeric_ids_prefix = "u"
# CAS uses a legacy required attributes mapping, not the one provided by
diff --git a/synapse/handlers/cas.py b/synapse/handlers/cas.py
index 153123ee83..cc3d641b7d 100644
--- a/synapse/handlers/cas.py
+++ b/synapse/handlers/cas.py
@@ -78,6 +78,8 @@ class CasHandler:
self._cas_displayname_attribute = hs.config.cas.cas_displayname_attribute
self._cas_required_attributes = hs.config.cas.cas_required_attributes
self._cas_enable_registration = hs.config.cas.cas_enable_registration
+ self._cas_allow_numeric_ids = hs.config.cas.cas_allow_numeric_ids
+ self._cas_numeric_ids_prefix = hs.config.cas.cas_numeric_ids_prefix
self._http_client = hs.get_proxied_http_client()
@@ -188,6 +190,9 @@ class CasHandler:
for child in root[0]:
if child.tag.endswith("user"):
user = child.text
+ # if numeric user IDs are allowed and username is numeric then we add the prefix so Synapse can handle it
+ if self._cas_allow_numeric_ids and user is not None and user.isdigit():
+ user = f"{self._cas_numeric_ids_prefix}{user}"
if child.tag.endswith("attributes"):
for attribute in child:
# ElementTree library expands the namespace in
From ecf4e0674c9a027aec253adaca89ed260700224b Mon Sep 17 00:00:00 2001
From: Andrew Morgan
Date: Tue, 14 May 2024 14:15:41 +0100
Subject: [PATCH 087/503] 1.107.0
---
CHANGES.md | 6 ++++++
debian/changelog | 6 ++++++
pyproject.toml | 2 +-
3 files changed, 13 insertions(+), 1 deletion(-)
diff --git a/CHANGES.md b/CHANGES.md
index fec9581174..85c565a76d 100644
--- a/CHANGES.md
+++ b/CHANGES.md
@@ -1,3 +1,9 @@
+Synapse 1.107.0 (2024-05-14)
+============================
+
+No significant changes since 1.107.0rc1.
+
+
# Synapse 1.107.0rc1 (2024-05-07)
### Features
diff --git a/debian/changelog b/debian/changelog
index 06b61b5d1b..d228c1cc8d 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -1,3 +1,9 @@
+matrix-synapse-py3 (1.107.0) stable; urgency=medium
+
+ * New Synapse release 1.107.0.
+
+ -- Synapse Packaging team Tue, 14 May 2024 14:15:34 +0100
+
matrix-synapse-py3 (1.107.0~rc1) stable; urgency=medium
* New Synapse release 1.107.0rc1.
diff --git a/pyproject.toml b/pyproject.toml
index 48aac66f63..dd4521ff71 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -96,7 +96,7 @@ module-name = "synapse.synapse_rust"
[tool.poetry]
name = "matrix-synapse"
-version = "1.107.0rc1"
+version = "1.107.0"
description = "Homeserver for the Matrix decentralised comms protocol"
authors = ["Matrix.org Team and Contributors "]
license = "AGPL-3.0-or-later"
From 0b91ccce47bd821ed69ea2628f80c98f83981033 Mon Sep 17 00:00:00 2001
From: Erik Johnston
Date: Tue, 14 May 2024 14:39:04 +0100
Subject: [PATCH 088/503] Improve perf of sync device lists (#17191)
It's almost always more efficient to query the rooms that have device
list changes, rather than looking at the list of all users whose devices
have changed and then look for shared rooms.
---
changelog.d/17191.misc | 1 +
synapse/handlers/sync.py | 37 ++++-------------------
synapse/storage/databases/main/devices.py | 17 ++---------
3 files changed, 9 insertions(+), 46 deletions(-)
create mode 100644 changelog.d/17191.misc
diff --git a/changelog.d/17191.misc b/changelog.d/17191.misc
new file mode 100644
index 0000000000..bd55eeaa33
--- /dev/null
+++ b/changelog.d/17191.misc
@@ -0,0 +1 @@
+Improve performance of calculating device lists changes in `/sync`.
diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py
index 8ff45a3353..0bef58351c 100644
--- a/synapse/handlers/sync.py
+++ b/synapse/handlers/sync.py
@@ -1803,38 +1803,13 @@ class SyncHandler:
# Step 1a, check for changes in devices of users we share a room
# with
- #
- # We do this in two different ways depending on what we have cached.
- # If we already have a list of all the user that have changed since
- # the last sync then it's likely more efficient to compare the rooms
- # they're in with the rooms the syncing user is in.
- #
- # If we don't have that info cached then we get all the users that
- # share a room with our user and check if those users have changed.
- cache_result = self.store.get_cached_device_list_changes(
- since_token.device_list_key
- )
- if cache_result.hit:
- changed_users = cache_result.entities
-
- result = await self.store.get_rooms_for_users(changed_users)
-
- for changed_user_id, entries in result.items():
- # Check if the changed user shares any rooms with the user,
- # or if the changed user is the syncing user (as we always
- # want to include device list updates of their own devices).
- if user_id == changed_user_id or any(
- rid in joined_rooms for rid in entries
- ):
- users_that_have_changed.add(changed_user_id)
- else:
- users_that_have_changed = (
- await self._device_handler.get_device_changes_in_shared_rooms(
- user_id,
- sync_result_builder.joined_room_ids,
- from_token=since_token,
- )
+ users_that_have_changed = (
+ await self._device_handler.get_device_changes_in_shared_rooms(
+ user_id,
+ sync_result_builder.joined_room_ids,
+ from_token=since_token,
)
+ )
# Step 1b, check for newly joined rooms
for room_id in newly_joined_rooms:
diff --git a/synapse/storage/databases/main/devices.py b/synapse/storage/databases/main/devices.py
index 8dbcb3f5a0..d98f0593bc 100644
--- a/synapse/storage/databases/main/devices.py
+++ b/synapse/storage/databases/main/devices.py
@@ -70,10 +70,7 @@ from synapse.types import (
from synapse.util import json_decoder, json_encoder
from synapse.util.caches.descriptors import cached, cachedList
from synapse.util.caches.lrucache import LruCache
-from synapse.util.caches.stream_change_cache import (
- AllEntitiesChangedResult,
- StreamChangeCache,
-)
+from synapse.util.caches.stream_change_cache import StreamChangeCache
from synapse.util.cancellation import cancellable
from synapse.util.iterutils import batch_iter
from synapse.util.stringutils import shortstr
@@ -832,16 +829,6 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore):
)
return {device[0]: db_to_json(device[1]) for device in devices}
- def get_cached_device_list_changes(
- self,
- from_key: int,
- ) -> AllEntitiesChangedResult:
- """Get set of users whose devices have changed since `from_key`, or None
- if that information is not in our cache.
- """
-
- return self._device_list_stream_cache.get_all_entities_changed(from_key)
-
@cancellable
async def get_all_devices_changed(
self,
@@ -1475,7 +1462,7 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore):
sql = """
SELECT DISTINCT user_id FROM device_lists_changes_in_room
- WHERE {clause} AND stream_id >= ?
+ WHERE {clause} AND stream_id > ?
"""
def _get_device_list_changes_in_rooms_txn(
From ebe77381b0e32a063d615b79fb7cbd727222fc4c Mon Sep 17 00:00:00 2001
From: Erik Johnston
Date: Tue, 14 May 2024 14:39:11 +0100
Subject: [PATCH 089/503] Reduce pauses on large device list changes (#17192)
For large accounts waking up all the relevant notifier streams can cause
pauses of the reactor.
---
changelog.d/17192.misc | 1 +
synapse/replication/tcp/client.py | 13 ++++++++++---
2 files changed, 11 insertions(+), 3 deletions(-)
create mode 100644 changelog.d/17192.misc
diff --git a/changelog.d/17192.misc b/changelog.d/17192.misc
new file mode 100644
index 0000000000..25e157a50a
--- /dev/null
+++ b/changelog.d/17192.misc
@@ -0,0 +1 @@
+Improve performance by fixing a reactor pause.
diff --git a/synapse/replication/tcp/client.py b/synapse/replication/tcp/client.py
index ba257d34e6..5e5387fdcb 100644
--- a/synapse/replication/tcp/client.py
+++ b/synapse/replication/tcp/client.py
@@ -55,6 +55,7 @@ from synapse.replication.tcp.streams.partial_state import (
)
from synapse.types import PersistedEventPosition, ReadReceipt, StreamKeyType, UserID
from synapse.util.async_helpers import Linearizer, timeout_deferred
+from synapse.util.iterutils import batch_iter
from synapse.util.metrics import Measure
if TYPE_CHECKING:
@@ -150,9 +151,15 @@ class ReplicationDataHandler:
if row.entity.startswith("@") and not row.is_signature:
room_ids = await self.store.get_rooms_for_user(row.entity)
all_room_ids.update(room_ids)
- self.notifier.on_new_event(
- StreamKeyType.DEVICE_LIST, token, rooms=all_room_ids
- )
+
+ # `all_room_ids` can be large, so let's wake up those streams in batches
+ for batched_room_ids in batch_iter(all_room_ids, 100):
+ self.notifier.on_new_event(
+ StreamKeyType.DEVICE_LIST, token, rooms=batched_room_ids
+ )
+
+ # Yield to reactor so that we don't block.
+ await self._clock.sleep(0)
elif stream_name == PushersStream.NAME:
for row in rows:
if row.deleted:
From 284d85dee34d1d79ff92c38799dabdc28a713793 Mon Sep 17 00:00:00 2001
From: Erik Johnston
Date: Tue, 14 May 2024 15:08:46 +0100
Subject: [PATCH 090/503] Cache literal sync filter validation (#17186)
The sliding sync proxy (amongst other things) use literal json blobs as
filters, and repeatedly validating them takes a bunch of CPU.
---
changelog.d/17186.misc | 1 +
synapse/rest/client/sync.py | 14 +++++++++++++-
2 files changed, 14 insertions(+), 1 deletion(-)
create mode 100644 changelog.d/17186.misc
diff --git a/changelog.d/17186.misc b/changelog.d/17186.misc
new file mode 100644
index 0000000000..358209d5a0
--- /dev/null
+++ b/changelog.d/17186.misc
@@ -0,0 +1 @@
+Cache literal sync filter validation for performance.
diff --git a/synapse/rest/client/sync.py b/synapse/rest/client/sync.py
index 2b103ca6a8..d19aaf0e22 100644
--- a/synapse/rest/client/sync.py
+++ b/synapse/rest/client/sync.py
@@ -47,6 +47,7 @@ from synapse.http.site import SynapseRequest
from synapse.logging.opentracing import trace_with_opname
from synapse.types import JsonDict, Requester, StreamToken
from synapse.util import json_decoder
+from synapse.util.caches.lrucache import LruCache
from ._base import client_patterns, set_timeline_upper_limit
@@ -110,6 +111,11 @@ class SyncRestServlet(RestServlet):
self._msc2654_enabled = hs.config.experimental.msc2654_enabled
self._msc3773_enabled = hs.config.experimental.msc3773_enabled
+ self._json_filter_cache: LruCache[str, bool] = LruCache(
+ max_size=1000,
+ cache_name="sync_valid_filter",
+ )
+
async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
# This will always be set by the time Twisted calls us.
assert request.args is not None
@@ -177,7 +183,13 @@ class SyncRestServlet(RestServlet):
filter_object = json_decoder.decode(filter_id)
except Exception:
raise SynapseError(400, "Invalid filter JSON", errcode=Codes.NOT_JSON)
- self.filtering.check_valid_filter(filter_object)
+
+ # We cache the validation, as this can get quite expensive if people use
+ # a literal json blob as a query param.
+ if not self._json_filter_cache.get(filter_id):
+ self.filtering.check_valid_filter(filter_object)
+ self._json_filter_cache[filter_id] = True
+
set_timeline_upper_limit(
filter_object, self.hs.config.server.filter_timeline_limit
)
From 2359c64decd6740508b92ef14037df2dde471d66 Mon Sep 17 00:00:00 2001
From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com>
Date: Wed, 15 May 2024 17:26:22 +0100
Subject: [PATCH 091/503] Fix request path for
`federation_whitelist_endpoint_enabled` option in documentation (#17199)
---
changelog.d/17199.feature | 1 +
docs/usage/configuration/config_documentation.md | 2 +-
2 files changed, 2 insertions(+), 1 deletion(-)
create mode 100644 changelog.d/17199.feature
diff --git a/changelog.d/17199.feature b/changelog.d/17199.feature
new file mode 100644
index 0000000000..60d63f1bff
--- /dev/null
+++ b/changelog.d/17199.feature
@@ -0,0 +1 @@
+Add a feature that allows clients to query the configured federation whitelist. Disabled by default.
\ No newline at end of file
diff --git a/docs/usage/configuration/config_documentation.md b/docs/usage/configuration/config_documentation.md
index f4edbdcc3e..e04fdfdfb0 100644
--- a/docs/usage/configuration/config_documentation.md
+++ b/docs/usage/configuration/config_documentation.md
@@ -1236,7 +1236,7 @@ federation_domain_whitelist:
Enables an endpoint for fetching the federation whitelist config.
-The request method and path is `GET /_synapse/client/config/federation_whitelist`, and the
+The request method and path is `GET /_synapse/client/v1/config/federation_whitelist`, and the
response format is:
```json
From d2d48cce85556753f8443d72aafe697c477c217b Mon Sep 17 00:00:00 2001
From: Eric Eastwood
Date: Thu, 16 May 2024 05:36:54 -0500
Subject: [PATCH 092/503] Refactor Sync handler to be able to return different
sync responses (`SyncVersion`) (#17200)
Refactor Sync handler to be able to be able to return different sync
responses (`SyncVersion`). Preparation to be able support sync v2 and a
new Sliding Sync `/sync/e2ee` endpoint which returns a subset of sync
v2.
Split upon request:
https://github.com/element-hq/synapse/pull/17167#discussion_r1601497279
Split from https://github.com/element-hq/synapse/pull/17167 where we
will add `SyncVersion.E2EE_SYNC` and a new type of sync response.
---
changelog.d/17200.misc | 1 +
synapse/handlers/sync.py | 65 +++++++++++++++++++---
synapse/rest/client/sync.py | 2 +
tests/events/test_presence_router.py | 4 +-
tests/handlers/test_sync.py | 81 ++++++++++++++++++++++------
5 files changed, 128 insertions(+), 25 deletions(-)
create mode 100644 changelog.d/17200.misc
diff --git a/changelog.d/17200.misc b/changelog.d/17200.misc
new file mode 100644
index 0000000000..a02b315041
--- /dev/null
+++ b/changelog.d/17200.misc
@@ -0,0 +1 @@
+Prepare sync handler to be able to return different sync responses (`SyncVersion`).
diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py
index 0bef58351c..53fe2a6a53 100644
--- a/synapse/handlers/sync.py
+++ b/synapse/handlers/sync.py
@@ -20,6 +20,7 @@
#
import itertools
import logging
+from enum import Enum
from typing import (
TYPE_CHECKING,
AbstractSet,
@@ -112,6 +113,23 @@ LAZY_LOADED_MEMBERS_CACHE_MAX_SIZE = 100
SyncRequestKey = Tuple[Any, ...]
+class SyncVersion(Enum):
+ """
+ Enum for specifying the version of sync request. This is used to key which type of
+ sync response that we are generating.
+
+ This is different than the `sync_type` you might see used in other code below; which
+ specifies the sub-type sync request (e.g. initial_sync, full_state_sync,
+ incremental_sync) and is really only relevant for the `/sync` v2 endpoint.
+ """
+
+ # These string values are semantically significant because they are used in the the
+ # metrics
+
+ # Traditional `/sync` endpoint
+ SYNC_V2 = "sync_v2"
+
+
@attr.s(slots=True, frozen=True, auto_attribs=True)
class SyncConfig:
user: UserID
@@ -309,6 +327,7 @@ class SyncHandler:
self,
requester: Requester,
sync_config: SyncConfig,
+ sync_version: SyncVersion,
since_token: Optional[StreamToken] = None,
timeout: int = 0,
full_state: bool = False,
@@ -316,6 +335,17 @@ class SyncHandler:
"""Get the sync for a client if we have new data for it now. Otherwise
wait for new data to arrive on the server. If the timeout expires, then
return an empty sync result.
+
+ Args:
+ requester: The user requesting the sync response.
+ sync_config: Config/info necessary to process the sync request.
+ sync_version: Determines what kind of sync response to generate.
+ since_token: The point in the stream to sync from.
+ timeout: How long to wait for new data to arrive before giving up.
+ full_state: Whether to return the full state for each room.
+
+ Returns:
+ When `SyncVersion.SYNC_V2`, returns a full `SyncResult`.
"""
# If the user is not part of the mau group, then check that limits have
# not been exceeded (if not part of the group by this point, almost certain
@@ -327,6 +357,7 @@ class SyncHandler:
sync_config.request_key,
self._wait_for_sync_for_user,
sync_config,
+ sync_version,
since_token,
timeout,
full_state,
@@ -338,6 +369,7 @@ class SyncHandler:
async def _wait_for_sync_for_user(
self,
sync_config: SyncConfig,
+ sync_version: SyncVersion,
since_token: Optional[StreamToken],
timeout: int,
full_state: bool,
@@ -363,9 +395,11 @@ class SyncHandler:
else:
sync_type = "incremental_sync"
+ sync_label = f"{sync_version}:{sync_type}"
+
context = current_context()
if context:
- context.tag = sync_type
+ context.tag = sync_label
# if we have a since token, delete any to-device messages before that token
# (since we now know that the device has received them)
@@ -384,14 +418,16 @@ class SyncHandler:
# we are going to return immediately, so don't bother calling
# notifier.wait_for_events.
result: SyncResult = await self.current_sync_for_user(
- sync_config, since_token, full_state=full_state
+ sync_config, sync_version, since_token, full_state=full_state
)
else:
# Otherwise, we wait for something to happen and report it to the user.
async def current_sync_callback(
before_token: StreamToken, after_token: StreamToken
) -> SyncResult:
- return await self.current_sync_for_user(sync_config, since_token)
+ return await self.current_sync_for_user(
+ sync_config, sync_version, since_token
+ )
result = await self.notifier.wait_for_events(
sync_config.user.to_string(),
@@ -416,13 +452,14 @@ class SyncHandler:
lazy_loaded = "true"
else:
lazy_loaded = "false"
- non_empty_sync_counter.labels(sync_type, lazy_loaded).inc()
+ non_empty_sync_counter.labels(sync_label, lazy_loaded).inc()
return result
async def current_sync_for_user(
self,
sync_config: SyncConfig,
+ sync_version: SyncVersion,
since_token: Optional[StreamToken] = None,
full_state: bool = False,
) -> SyncResult:
@@ -431,12 +468,26 @@ class SyncHandler:
This is a wrapper around `generate_sync_result` which starts an open tracing
span to track the sync. See `generate_sync_result` for the next part of your
indoctrination.
+
+ Args:
+ sync_config: Config/info necessary to process the sync request.
+ sync_version: Determines what kind of sync response to generate.
+ since_token: The point in the stream to sync from.p.
+ full_state: Whether to return the full state for each room.
+ Returns:
+ When `SyncVersion.SYNC_V2`, returns a full `SyncResult`.
"""
with start_active_span("sync.current_sync_for_user"):
log_kv({"since_token": since_token})
- sync_result = await self.generate_sync_result(
- sync_config, since_token, full_state
- )
+ # Go through the `/sync` v2 path
+ if sync_version == SyncVersion.SYNC_V2:
+ sync_result: SyncResult = await self.generate_sync_result(
+ sync_config, since_token, full_state
+ )
+ else:
+ raise Exception(
+ f"Unknown sync_version (this is a Synapse problem): {sync_version}"
+ )
set_tag(SynapseTags.SYNC_RESULT, bool(sync_result))
return sync_result
diff --git a/synapse/rest/client/sync.py b/synapse/rest/client/sync.py
index d19aaf0e22..d0713536e1 100644
--- a/synapse/rest/client/sync.py
+++ b/synapse/rest/client/sync.py
@@ -40,6 +40,7 @@ from synapse.handlers.sync import (
KnockedSyncResult,
SyncConfig,
SyncResult,
+ SyncVersion,
)
from synapse.http.server import HttpServer
from synapse.http.servlet import RestServlet, parse_boolean, parse_integer, parse_string
@@ -232,6 +233,7 @@ class SyncRestServlet(RestServlet):
sync_result = await self.sync_handler.wait_for_sync_for_user(
requester,
sync_config,
+ SyncVersion.SYNC_V2,
since_token=since_token,
timeout=timeout,
full_state=full_state,
diff --git a/tests/events/test_presence_router.py b/tests/events/test_presence_router.py
index e51cdf01ab..aa67afa695 100644
--- a/tests/events/test_presence_router.py
+++ b/tests/events/test_presence_router.py
@@ -36,7 +36,7 @@ from synapse.server import HomeServer
from synapse.types import JsonDict, StreamToken, create_requester
from synapse.util import Clock
-from tests.handlers.test_sync import generate_sync_config
+from tests.handlers.test_sync import SyncVersion, generate_sync_config
from tests.unittest import (
FederatingHomeserverTestCase,
HomeserverTestCase,
@@ -521,7 +521,7 @@ def sync_presence(
sync_config = generate_sync_config(requester.user.to_string())
sync_result = testcase.get_success(
testcase.hs.get_sync_handler().wait_for_sync_for_user(
- requester, sync_config, since_token
+ requester, sync_config, SyncVersion.SYNC_V2, since_token
)
)
diff --git a/tests/handlers/test_sync.py b/tests/handlers/test_sync.py
index 2780d29cad..9c12a11e3a 100644
--- a/tests/handlers/test_sync.py
+++ b/tests/handlers/test_sync.py
@@ -31,7 +31,7 @@ from synapse.api.room_versions import RoomVersion, RoomVersions
from synapse.events import EventBase
from synapse.events.snapshot import EventContext
from synapse.federation.federation_base import event_from_pdu_json
-from synapse.handlers.sync import SyncConfig, SyncResult
+from synapse.handlers.sync import SyncConfig, SyncResult, SyncVersion
from synapse.rest import admin
from synapse.rest.client import knock, login, room
from synapse.server import HomeServer
@@ -73,13 +73,21 @@ class SyncTestCase(tests.unittest.HomeserverTestCase):
# Check that the happy case does not throw errors
self.get_success(self.store.upsert_monthly_active_user(user_id1))
self.get_success(
- self.sync_handler.wait_for_sync_for_user(requester, sync_config)
+ self.sync_handler.wait_for_sync_for_user(
+ requester,
+ sync_config,
+ sync_version=SyncVersion.SYNC_V2,
+ )
)
# Test that global lock works
self.auth_blocking._hs_disabled = True
e = self.get_failure(
- self.sync_handler.wait_for_sync_for_user(requester, sync_config),
+ self.sync_handler.wait_for_sync_for_user(
+ requester,
+ sync_config,
+ sync_version=SyncVersion.SYNC_V2,
+ ),
ResourceLimitError,
)
self.assertEqual(e.value.errcode, Codes.RESOURCE_LIMIT_EXCEEDED)
@@ -90,7 +98,11 @@ class SyncTestCase(tests.unittest.HomeserverTestCase):
requester = create_requester(user_id2)
e = self.get_failure(
- self.sync_handler.wait_for_sync_for_user(requester, sync_config),
+ self.sync_handler.wait_for_sync_for_user(
+ requester,
+ sync_config,
+ sync_version=SyncVersion.SYNC_V2,
+ ),
ResourceLimitError,
)
self.assertEqual(e.value.errcode, Codes.RESOURCE_LIMIT_EXCEEDED)
@@ -109,7 +121,9 @@ class SyncTestCase(tests.unittest.HomeserverTestCase):
requester = create_requester(user)
initial_result = self.get_success(
self.sync_handler.wait_for_sync_for_user(
- requester, sync_config=generate_sync_config(user, device_id="dev")
+ requester,
+ sync_config=generate_sync_config(user, device_id="dev"),
+ sync_version=SyncVersion.SYNC_V2,
)
)
@@ -140,7 +154,9 @@ class SyncTestCase(tests.unittest.HomeserverTestCase):
# The rooms should appear in the sync response.
result = self.get_success(
self.sync_handler.wait_for_sync_for_user(
- requester, sync_config=generate_sync_config(user)
+ requester,
+ sync_config=generate_sync_config(user),
+ sync_version=SyncVersion.SYNC_V2,
)
)
self.assertIn(joined_room, [r.room_id for r in result.joined])
@@ -152,6 +168,7 @@ class SyncTestCase(tests.unittest.HomeserverTestCase):
self.sync_handler.wait_for_sync_for_user(
requester,
sync_config=generate_sync_config(user, device_id="dev"),
+ sync_version=SyncVersion.SYNC_V2,
since_token=initial_result.next_batch,
)
)
@@ -180,7 +197,9 @@ class SyncTestCase(tests.unittest.HomeserverTestCase):
# Get a new request key.
result = self.get_success(
self.sync_handler.wait_for_sync_for_user(
- requester, sync_config=generate_sync_config(user)
+ requester,
+ sync_config=generate_sync_config(user),
+ sync_version=SyncVersion.SYNC_V2,
)
)
self.assertNotIn(joined_room, [r.room_id for r in result.joined])
@@ -192,6 +211,7 @@ class SyncTestCase(tests.unittest.HomeserverTestCase):
self.sync_handler.wait_for_sync_for_user(
requester,
sync_config=generate_sync_config(user, device_id="dev"),
+ sync_version=SyncVersion.SYNC_V2,
since_token=initial_result.next_batch,
)
)
@@ -231,7 +251,9 @@ class SyncTestCase(tests.unittest.HomeserverTestCase):
# Do a sync as Alice to get the latest event in the room.
alice_sync_result: SyncResult = self.get_success(
self.sync_handler.wait_for_sync_for_user(
- create_requester(owner), generate_sync_config(owner)
+ create_requester(owner),
+ generate_sync_config(owner),
+ sync_version=SyncVersion.SYNC_V2,
)
)
self.assertEqual(len(alice_sync_result.joined), 1)
@@ -251,7 +273,11 @@ class SyncTestCase(tests.unittest.HomeserverTestCase):
eve_requester = create_requester(eve)
eve_sync_config = generate_sync_config(eve)
eve_sync_after_ban: SyncResult = self.get_success(
- self.sync_handler.wait_for_sync_for_user(eve_requester, eve_sync_config)
+ self.sync_handler.wait_for_sync_for_user(
+ eve_requester,
+ eve_sync_config,
+ sync_version=SyncVersion.SYNC_V2,
+ )
)
# Sanity check this sync result. We shouldn't be joined to the room.
@@ -268,6 +294,7 @@ class SyncTestCase(tests.unittest.HomeserverTestCase):
self.sync_handler.wait_for_sync_for_user(
eve_requester,
eve_sync_config,
+ sync_version=SyncVersion.SYNC_V2,
since_token=eve_sync_after_ban.next_batch,
)
)
@@ -279,6 +306,7 @@ class SyncTestCase(tests.unittest.HomeserverTestCase):
self.sync_handler.wait_for_sync_for_user(
eve_requester,
eve_sync_config,
+ sync_version=SyncVersion.SYNC_V2,
since_token=None,
)
)
@@ -310,7 +338,9 @@ class SyncTestCase(tests.unittest.HomeserverTestCase):
# Do an initial sync as Alice to get a known starting point.
initial_sync_result = self.get_success(
self.sync_handler.wait_for_sync_for_user(
- alice_requester, generate_sync_config(alice)
+ alice_requester,
+ generate_sync_config(alice),
+ sync_version=SyncVersion.SYNC_V2,
)
)
last_room_creation_event_id = (
@@ -338,6 +368,7 @@ class SyncTestCase(tests.unittest.HomeserverTestCase):
self.hs, {"room": {"timeline": {"limit": 2}}}
),
),
+ sync_version=SyncVersion.SYNC_V2,
since_token=initial_sync_result.next_batch,
)
)
@@ -380,7 +411,9 @@ class SyncTestCase(tests.unittest.HomeserverTestCase):
# Do an initial sync as Alice to get a known starting point.
initial_sync_result = self.get_success(
self.sync_handler.wait_for_sync_for_user(
- alice_requester, generate_sync_config(alice)
+ alice_requester,
+ generate_sync_config(alice),
+ sync_version=SyncVersion.SYNC_V2,
)
)
last_room_creation_event_id = (
@@ -418,6 +451,7 @@ class SyncTestCase(tests.unittest.HomeserverTestCase):
},
),
),
+ sync_version=SyncVersion.SYNC_V2,
since_token=initial_sync_result.next_batch,
)
)
@@ -461,7 +495,9 @@ class SyncTestCase(tests.unittest.HomeserverTestCase):
# Do an initial sync as Alice to get a known starting point.
initial_sync_result = self.get_success(
self.sync_handler.wait_for_sync_for_user(
- alice_requester, generate_sync_config(alice)
+ alice_requester,
+ generate_sync_config(alice),
+ sync_version=SyncVersion.SYNC_V2,
)
)
last_room_creation_event_id = (
@@ -486,6 +522,7 @@ class SyncTestCase(tests.unittest.HomeserverTestCase):
self.hs, {"room": {"timeline": {"limit": 1}}}
),
),
+ sync_version=SyncVersion.SYNC_V2,
since_token=initial_sync_result.next_batch,
)
)
@@ -515,6 +552,7 @@ class SyncTestCase(tests.unittest.HomeserverTestCase):
self.hs, {"room": {"timeline": {"limit": 1}}}
),
),
+ sync_version=SyncVersion.SYNC_V2,
since_token=incremental_sync.next_batch,
)
)
@@ -574,7 +612,9 @@ class SyncTestCase(tests.unittest.HomeserverTestCase):
# Do an initial sync to get a known starting point.
initial_sync_result = self.get_success(
self.sync_handler.wait_for_sync_for_user(
- alice_requester, generate_sync_config(alice)
+ alice_requester,
+ generate_sync_config(alice),
+ sync_version=SyncVersion.SYNC_V2,
)
)
last_room_creation_event_id = (
@@ -598,6 +638,7 @@ class SyncTestCase(tests.unittest.HomeserverTestCase):
self.hs, {"room": {"timeline": {"limit": 1}}}
),
),
+ sync_version=SyncVersion.SYNC_V2,
)
)
room_sync = initial_sync_result.joined[0]
@@ -618,6 +659,7 @@ class SyncTestCase(tests.unittest.HomeserverTestCase):
self.sync_handler.wait_for_sync_for_user(
alice_requester,
generate_sync_config(alice),
+ sync_version=SyncVersion.SYNC_V2,
since_token=initial_sync_result.next_batch,
)
)
@@ -668,7 +710,9 @@ class SyncTestCase(tests.unittest.HomeserverTestCase):
initial_sync_result = self.get_success(
self.sync_handler.wait_for_sync_for_user(
- bob_requester, generate_sync_config(bob)
+ bob_requester,
+ generate_sync_config(bob),
+ sync_version=SyncVersion.SYNC_V2,
)
)
@@ -699,6 +743,7 @@ class SyncTestCase(tests.unittest.HomeserverTestCase):
generate_sync_config(
bob, filter_collection=FilterCollection(self.hs, filter_dict)
),
+ sync_version=SyncVersion.SYNC_V2,
since_token=None if initial_sync else initial_sync_result.next_batch,
)
).archived[0]
@@ -791,7 +836,9 @@ class SyncTestCase(tests.unittest.HomeserverTestCase):
# but that it does not come down /sync in public room
sync_result: SyncResult = self.get_success(
self.sync_handler.wait_for_sync_for_user(
- create_requester(user), generate_sync_config(user)
+ create_requester(user),
+ generate_sync_config(user),
+ sync_version=SyncVersion.SYNC_V2,
)
)
event_ids = []
@@ -837,7 +884,9 @@ class SyncTestCase(tests.unittest.HomeserverTestCase):
private_sync_result: SyncResult = self.get_success(
self.sync_handler.wait_for_sync_for_user(
- create_requester(user2), generate_sync_config(user2)
+ create_requester(user2),
+ generate_sync_config(user2),
+ sync_version=SyncVersion.SYNC_V2,
)
)
priv_event_ids = []
From 5e892671a74251109bf9cf4a78bebed9d8085979 Mon Sep 17 00:00:00 2001
From: Erik Johnston
Date: Thu, 16 May 2024 15:04:14 +0100
Subject: [PATCH 093/503] Fix bug where push rules would be empty in `/sync`
(#17142)
Fixes #16987
Some old accounts seem to have an entry in global account data table for
push rules, which we should ignore
---
changelog.d/17142.bugfix | 1 +
synapse/handlers/sync.py | 20 ++++++++------------
tests/handlers/test_sync.py | 29 ++++++++++++++++++++++++++++-
3 files changed, 37 insertions(+), 13 deletions(-)
create mode 100644 changelog.d/17142.bugfix
diff --git a/changelog.d/17142.bugfix b/changelog.d/17142.bugfix
new file mode 100644
index 0000000000..09b617aed1
--- /dev/null
+++ b/changelog.d/17142.bugfix
@@ -0,0 +1 @@
+Fix bug where push rules would be empty in `/sync` for some accounts. Introduced in v1.93.0.
diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py
index 53fe2a6a53..659499af75 100644
--- a/synapse/handlers/sync.py
+++ b/synapse/handlers/sync.py
@@ -1971,23 +1971,19 @@ class SyncHandler:
)
if push_rules_changed:
- global_account_data = {
- AccountDataTypes.PUSH_RULES: await self._push_rules_handler.push_rules_for_user(
- sync_config.user
- ),
- **global_account_data,
- }
+ global_account_data = dict(global_account_data)
+ global_account_data[AccountDataTypes.PUSH_RULES] = (
+ await self._push_rules_handler.push_rules_for_user(sync_config.user)
+ )
else:
all_global_account_data = await self.store.get_global_account_data_for_user(
user_id
)
- global_account_data = {
- AccountDataTypes.PUSH_RULES: await self._push_rules_handler.push_rules_for_user(
- sync_config.user
- ),
- **all_global_account_data,
- }
+ global_account_data = dict(all_global_account_data)
+ global_account_data[AccountDataTypes.PUSH_RULES] = (
+ await self._push_rules_handler.push_rules_for_user(sync_config.user)
+ )
account_data_for_user = (
await sync_config.filter_collection.filter_global_account_data(
diff --git a/tests/handlers/test_sync.py b/tests/handlers/test_sync.py
index 9c12a11e3a..0299113b95 100644
--- a/tests/handlers/test_sync.py
+++ b/tests/handlers/test_sync.py
@@ -24,7 +24,7 @@ from parameterized import parameterized
from twisted.test.proto_helpers import MemoryReactor
-from synapse.api.constants import EventTypes, JoinRules
+from synapse.api.constants import AccountDataTypes, EventTypes, JoinRules
from synapse.api.errors import Codes, ResourceLimitError
from synapse.api.filtering import FilterCollection, Filtering
from synapse.api.room_versions import RoomVersion, RoomVersions
@@ -895,6 +895,33 @@ class SyncTestCase(tests.unittest.HomeserverTestCase):
self.assertIn(private_call_event.event_id, priv_event_ids)
+ def test_push_rules_with_bad_account_data(self) -> None:
+ """Some old accounts have managed to set a `m.push_rules` account data,
+ which we should ignore in /sync response.
+ """
+
+ user = self.register_user("alice", "password")
+
+ # Insert the bad account data.
+ self.get_success(
+ self.store.add_account_data_for_user(user, AccountDataTypes.PUSH_RULES, {})
+ )
+
+ sync_result: SyncResult = self.get_success(
+ self.sync_handler.wait_for_sync_for_user(
+ create_requester(user), generate_sync_config(user)
+ )
+ )
+
+ for account_dict in sync_result.account_data:
+ if account_dict["type"] == AccountDataTypes.PUSH_RULES:
+ # We should have lots of push rules here, rather than the bad
+ # empty data.
+ self.assertNotEqual(account_dict["content"], {})
+ return
+
+ self.fail("No push rules found")
+
_request_key = 0
From fd1200344112eb28486ee6f82ee341ada8bb4f06 Mon Sep 17 00:00:00 2001
From: Erik Johnston
Date: Thu, 16 May 2024 16:07:54 +0100
Subject: [PATCH 094/503] Revert "Improve perf of sync device lists" (#17207)
Reverts element-hq/synapse#17191
---
changelog.d/17191.misc | 1 -
synapse/handlers/sync.py | 37 +++++++++++++++++++----
synapse/storage/databases/main/devices.py | 17 +++++++++--
3 files changed, 46 insertions(+), 9 deletions(-)
delete mode 100644 changelog.d/17191.misc
diff --git a/changelog.d/17191.misc b/changelog.d/17191.misc
deleted file mode 100644
index bd55eeaa33..0000000000
--- a/changelog.d/17191.misc
+++ /dev/null
@@ -1 +0,0 @@
-Improve performance of calculating device lists changes in `/sync`.
diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py
index 659499af75..2bd1b8de88 100644
--- a/synapse/handlers/sync.py
+++ b/synapse/handlers/sync.py
@@ -1854,13 +1854,38 @@ class SyncHandler:
# Step 1a, check for changes in devices of users we share a room
# with
- users_that_have_changed = (
- await self._device_handler.get_device_changes_in_shared_rooms(
- user_id,
- sync_result_builder.joined_room_ids,
- from_token=since_token,
- )
+ #
+ # We do this in two different ways depending on what we have cached.
+ # If we already have a list of all the user that have changed since
+ # the last sync then it's likely more efficient to compare the rooms
+ # they're in with the rooms the syncing user is in.
+ #
+ # If we don't have that info cached then we get all the users that
+ # share a room with our user and check if those users have changed.
+ cache_result = self.store.get_cached_device_list_changes(
+ since_token.device_list_key
)
+ if cache_result.hit:
+ changed_users = cache_result.entities
+
+ result = await self.store.get_rooms_for_users(changed_users)
+
+ for changed_user_id, entries in result.items():
+ # Check if the changed user shares any rooms with the user,
+ # or if the changed user is the syncing user (as we always
+ # want to include device list updates of their own devices).
+ if user_id == changed_user_id or any(
+ rid in joined_rooms for rid in entries
+ ):
+ users_that_have_changed.add(changed_user_id)
+ else:
+ users_that_have_changed = (
+ await self._device_handler.get_device_changes_in_shared_rooms(
+ user_id,
+ sync_result_builder.joined_room_ids,
+ from_token=since_token,
+ )
+ )
# Step 1b, check for newly joined rooms
for room_id in newly_joined_rooms:
diff --git a/synapse/storage/databases/main/devices.py b/synapse/storage/databases/main/devices.py
index d98f0593bc..8dbcb3f5a0 100644
--- a/synapse/storage/databases/main/devices.py
+++ b/synapse/storage/databases/main/devices.py
@@ -70,7 +70,10 @@ from synapse.types import (
from synapse.util import json_decoder, json_encoder
from synapse.util.caches.descriptors import cached, cachedList
from synapse.util.caches.lrucache import LruCache
-from synapse.util.caches.stream_change_cache import StreamChangeCache
+from synapse.util.caches.stream_change_cache import (
+ AllEntitiesChangedResult,
+ StreamChangeCache,
+)
from synapse.util.cancellation import cancellable
from synapse.util.iterutils import batch_iter
from synapse.util.stringutils import shortstr
@@ -829,6 +832,16 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore):
)
return {device[0]: db_to_json(device[1]) for device in devices}
+ def get_cached_device_list_changes(
+ self,
+ from_key: int,
+ ) -> AllEntitiesChangedResult:
+ """Get set of users whose devices have changed since `from_key`, or None
+ if that information is not in our cache.
+ """
+
+ return self._device_list_stream_cache.get_all_entities_changed(from_key)
+
@cancellable
async def get_all_devices_changed(
self,
@@ -1462,7 +1475,7 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore):
sql = """
SELECT DISTINCT user_id FROM device_lists_changes_in_room
- WHERE {clause} AND stream_id > ?
+ WHERE {clause} AND stream_id >= ?
"""
def _get_device_list_changes_in_rooms_txn(
From 7cb3f8a97991d7a8765a19c5f744d517b1542a77 Mon Sep 17 00:00:00 2001
From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com>
Date: Thu, 16 May 2024 17:53:26 +0100
Subject: [PATCH 095/503] Route `/make_knock` and `/send_knock` to workers in
Complement docker image (#17195)
---
changelog.d/17195.misc | 1 +
docker/configure_workers_and_start.py | 2 ++
2 files changed, 3 insertions(+)
create mode 100644 changelog.d/17195.misc
diff --git a/changelog.d/17195.misc b/changelog.d/17195.misc
new file mode 100644
index 0000000000..18b8f1464d
--- /dev/null
+++ b/changelog.d/17195.misc
@@ -0,0 +1 @@
+Route `/make_knock` and `/send_knock` federation APIs to the federation reader worker in Complement test runs.
\ No newline at end of file
diff --git a/docker/configure_workers_and_start.py b/docker/configure_workers_and_start.py
index 77534a4f4f..063f3727f9 100755
--- a/docker/configure_workers_and_start.py
+++ b/docker/configure_workers_and_start.py
@@ -211,6 +211,8 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = {
"^/_matrix/federation/(v1|v2)/make_leave/",
"^/_matrix/federation/(v1|v2)/send_join/",
"^/_matrix/federation/(v1|v2)/send_leave/",
+ "^/_matrix/federation/v1/make_knock/",
+ "^/_matrix/federation/v1/send_knock/",
"^/_matrix/federation/(v1|v2)/invite/",
"^/_matrix/federation/(v1|v2)/query_auth/",
"^/_matrix/federation/(v1|v2)/event_auth/",
From 28a948f04f1e04cbcbd68c53a78aa2ada3a791a1 Mon Sep 17 00:00:00 2001
From: Eric Eastwood
Date: Thu, 16 May 2024 11:54:46 -0500
Subject: [PATCH 096/503] Removed `request_key` from the `SyncConfig` (moved
outside as its own function parameter) (#17201)
Removed `request_key` from the `SyncConfig` (moved outside as its own function parameter) so it doesn't have to flow into `_generate_sync_entry_for_xxx` methods. This way we can separate the concerns of caching from generating the response and reuse the `_generate_sync_entry_for_xxx` functions as we see fit. Plus caching doesn't really have anything to do with the config of sync.
Split from https://github.com/element-hq/synapse/pull/17167
Spawning from https://github.com/element-hq/synapse/pull/17167#discussion_r1601497279
---
changelog.d/17201.misc | 1 +
synapse/handlers/sync.py | 6 ++--
synapse/rest/client/sync.py | 2 +-
tests/events/test_presence_router.py | 17 ++++++++--
tests/handlers/test_sync.py | 47 +++++++++++++++++++++++-----
5 files changed, 59 insertions(+), 14 deletions(-)
create mode 100644 changelog.d/17201.misc
diff --git a/changelog.d/17201.misc b/changelog.d/17201.misc
new file mode 100644
index 0000000000..2bd08d8f06
--- /dev/null
+++ b/changelog.d/17201.misc
@@ -0,0 +1 @@
+Organize the sync cache key parameter outside of the sync config (separate concerns).
diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py
index 2bd1b8de88..40e42af1f3 100644
--- a/synapse/handlers/sync.py
+++ b/synapse/handlers/sync.py
@@ -135,7 +135,6 @@ class SyncConfig:
user: UserID
filter_collection: FilterCollection
is_guest: bool
- request_key: SyncRequestKey
device_id: Optional[str]
@@ -328,6 +327,7 @@ class SyncHandler:
requester: Requester,
sync_config: SyncConfig,
sync_version: SyncVersion,
+ request_key: SyncRequestKey,
since_token: Optional[StreamToken] = None,
timeout: int = 0,
full_state: bool = False,
@@ -340,10 +340,10 @@ class SyncHandler:
requester: The user requesting the sync response.
sync_config: Config/info necessary to process the sync request.
sync_version: Determines what kind of sync response to generate.
+ request_key: The key to use for caching the response.
since_token: The point in the stream to sync from.
timeout: How long to wait for new data to arrive before giving up.
full_state: Whether to return the full state for each room.
-
Returns:
When `SyncVersion.SYNC_V2`, returns a full `SyncResult`.
"""
@@ -354,7 +354,7 @@ class SyncHandler:
await self.auth_blocking.check_auth_blocking(requester=requester)
res = await self.response_cache.wrap(
- sync_config.request_key,
+ request_key,
self._wait_for_sync_for_user,
sync_config,
sync_version,
diff --git a/synapse/rest/client/sync.py b/synapse/rest/client/sync.py
index d0713536e1..4a57eaf930 100644
--- a/synapse/rest/client/sync.py
+++ b/synapse/rest/client/sync.py
@@ -210,7 +210,6 @@ class SyncRestServlet(RestServlet):
user=user,
filter_collection=filter_collection,
is_guest=requester.is_guest,
- request_key=request_key,
device_id=device_id,
)
@@ -234,6 +233,7 @@ class SyncRestServlet(RestServlet):
requester,
sync_config,
SyncVersion.SYNC_V2,
+ request_key,
since_token=since_token,
timeout=timeout,
full_state=full_state,
diff --git a/tests/events/test_presence_router.py b/tests/events/test_presence_router.py
index aa67afa695..e48983ddfe 100644
--- a/tests/events/test_presence_router.py
+++ b/tests/events/test_presence_router.py
@@ -36,7 +36,7 @@ from synapse.server import HomeServer
from synapse.types import JsonDict, StreamToken, create_requester
from synapse.util import Clock
-from tests.handlers.test_sync import SyncVersion, generate_sync_config
+from tests.handlers.test_sync import SyncRequestKey, SyncVersion, generate_sync_config
from tests.unittest import (
FederatingHomeserverTestCase,
HomeserverTestCase,
@@ -498,6 +498,15 @@ def send_presence_update(
return channel.json_body
+_request_key = 0
+
+
+def generate_request_key() -> SyncRequestKey:
+ global _request_key
+ _request_key += 1
+ return ("request_key", _request_key)
+
+
def sync_presence(
testcase: HomeserverTestCase,
user_id: str,
@@ -521,7 +530,11 @@ def sync_presence(
sync_config = generate_sync_config(requester.user.to_string())
sync_result = testcase.get_success(
testcase.hs.get_sync_handler().wait_for_sync_for_user(
- requester, sync_config, SyncVersion.SYNC_V2, since_token
+ requester,
+ sync_config,
+ SyncVersion.SYNC_V2,
+ generate_request_key(),
+ since_token,
)
)
diff --git a/tests/handlers/test_sync.py b/tests/handlers/test_sync.py
index 0299113b95..02371ce724 100644
--- a/tests/handlers/test_sync.py
+++ b/tests/handlers/test_sync.py
@@ -31,7 +31,7 @@ from synapse.api.room_versions import RoomVersion, RoomVersions
from synapse.events import EventBase
from synapse.events.snapshot import EventContext
from synapse.federation.federation_base import event_from_pdu_json
-from synapse.handlers.sync import SyncConfig, SyncResult, SyncVersion
+from synapse.handlers.sync import SyncConfig, SyncRequestKey, SyncResult, SyncVersion
from synapse.rest import admin
from synapse.rest.client import knock, login, room
from synapse.server import HomeServer
@@ -41,6 +41,14 @@ from synapse.util import Clock
import tests.unittest
import tests.utils
+_request_key = 0
+
+
+def generate_request_key() -> SyncRequestKey:
+ global _request_key
+ _request_key += 1
+ return ("request_key", _request_key)
+
class SyncTestCase(tests.unittest.HomeserverTestCase):
"""Tests Sync Handler."""
@@ -77,6 +85,7 @@ class SyncTestCase(tests.unittest.HomeserverTestCase):
requester,
sync_config,
sync_version=SyncVersion.SYNC_V2,
+ request_key=generate_request_key(),
)
)
@@ -87,6 +96,7 @@ class SyncTestCase(tests.unittest.HomeserverTestCase):
requester,
sync_config,
sync_version=SyncVersion.SYNC_V2,
+ request_key=generate_request_key(),
),
ResourceLimitError,
)
@@ -102,6 +112,7 @@ class SyncTestCase(tests.unittest.HomeserverTestCase):
requester,
sync_config,
sync_version=SyncVersion.SYNC_V2,
+ request_key=generate_request_key(),
),
ResourceLimitError,
)
@@ -124,6 +135,7 @@ class SyncTestCase(tests.unittest.HomeserverTestCase):
requester,
sync_config=generate_sync_config(user, device_id="dev"),
sync_version=SyncVersion.SYNC_V2,
+ request_key=generate_request_key(),
)
)
@@ -157,6 +169,7 @@ class SyncTestCase(tests.unittest.HomeserverTestCase):
requester,
sync_config=generate_sync_config(user),
sync_version=SyncVersion.SYNC_V2,
+ request_key=generate_request_key(),
)
)
self.assertIn(joined_room, [r.room_id for r in result.joined])
@@ -169,6 +182,7 @@ class SyncTestCase(tests.unittest.HomeserverTestCase):
requester,
sync_config=generate_sync_config(user, device_id="dev"),
sync_version=SyncVersion.SYNC_V2,
+ request_key=generate_request_key(),
since_token=initial_result.next_batch,
)
)
@@ -200,6 +214,7 @@ class SyncTestCase(tests.unittest.HomeserverTestCase):
requester,
sync_config=generate_sync_config(user),
sync_version=SyncVersion.SYNC_V2,
+ request_key=generate_request_key(),
)
)
self.assertNotIn(joined_room, [r.room_id for r in result.joined])
@@ -212,6 +227,7 @@ class SyncTestCase(tests.unittest.HomeserverTestCase):
requester,
sync_config=generate_sync_config(user, device_id="dev"),
sync_version=SyncVersion.SYNC_V2,
+ request_key=generate_request_key(),
since_token=initial_result.next_batch,
)
)
@@ -254,6 +270,7 @@ class SyncTestCase(tests.unittest.HomeserverTestCase):
create_requester(owner),
generate_sync_config(owner),
sync_version=SyncVersion.SYNC_V2,
+ request_key=generate_request_key(),
)
)
self.assertEqual(len(alice_sync_result.joined), 1)
@@ -277,6 +294,7 @@ class SyncTestCase(tests.unittest.HomeserverTestCase):
eve_requester,
eve_sync_config,
sync_version=SyncVersion.SYNC_V2,
+ request_key=generate_request_key(),
)
)
@@ -295,6 +313,7 @@ class SyncTestCase(tests.unittest.HomeserverTestCase):
eve_requester,
eve_sync_config,
sync_version=SyncVersion.SYNC_V2,
+ request_key=generate_request_key(),
since_token=eve_sync_after_ban.next_batch,
)
)
@@ -307,6 +326,7 @@ class SyncTestCase(tests.unittest.HomeserverTestCase):
eve_requester,
eve_sync_config,
sync_version=SyncVersion.SYNC_V2,
+ request_key=generate_request_key(),
since_token=None,
)
)
@@ -341,6 +361,7 @@ class SyncTestCase(tests.unittest.HomeserverTestCase):
alice_requester,
generate_sync_config(alice),
sync_version=SyncVersion.SYNC_V2,
+ request_key=generate_request_key(),
)
)
last_room_creation_event_id = (
@@ -369,6 +390,7 @@ class SyncTestCase(tests.unittest.HomeserverTestCase):
),
),
sync_version=SyncVersion.SYNC_V2,
+ request_key=generate_request_key(),
since_token=initial_sync_result.next_batch,
)
)
@@ -414,6 +436,7 @@ class SyncTestCase(tests.unittest.HomeserverTestCase):
alice_requester,
generate_sync_config(alice),
sync_version=SyncVersion.SYNC_V2,
+ request_key=generate_request_key(),
)
)
last_room_creation_event_id = (
@@ -452,6 +475,7 @@ class SyncTestCase(tests.unittest.HomeserverTestCase):
),
),
sync_version=SyncVersion.SYNC_V2,
+ request_key=generate_request_key(),
since_token=initial_sync_result.next_batch,
)
)
@@ -498,6 +522,7 @@ class SyncTestCase(tests.unittest.HomeserverTestCase):
alice_requester,
generate_sync_config(alice),
sync_version=SyncVersion.SYNC_V2,
+ request_key=generate_request_key(),
)
)
last_room_creation_event_id = (
@@ -523,6 +548,7 @@ class SyncTestCase(tests.unittest.HomeserverTestCase):
),
),
sync_version=SyncVersion.SYNC_V2,
+ request_key=generate_request_key(),
since_token=initial_sync_result.next_batch,
)
)
@@ -553,6 +579,7 @@ class SyncTestCase(tests.unittest.HomeserverTestCase):
),
),
sync_version=SyncVersion.SYNC_V2,
+ request_key=generate_request_key(),
since_token=incremental_sync.next_batch,
)
)
@@ -615,6 +642,7 @@ class SyncTestCase(tests.unittest.HomeserverTestCase):
alice_requester,
generate_sync_config(alice),
sync_version=SyncVersion.SYNC_V2,
+ request_key=generate_request_key(),
)
)
last_room_creation_event_id = (
@@ -639,6 +667,7 @@ class SyncTestCase(tests.unittest.HomeserverTestCase):
),
),
sync_version=SyncVersion.SYNC_V2,
+ request_key=generate_request_key(),
)
)
room_sync = initial_sync_result.joined[0]
@@ -660,6 +689,7 @@ class SyncTestCase(tests.unittest.HomeserverTestCase):
alice_requester,
generate_sync_config(alice),
sync_version=SyncVersion.SYNC_V2,
+ request_key=generate_request_key(),
since_token=initial_sync_result.next_batch,
)
)
@@ -713,6 +743,7 @@ class SyncTestCase(tests.unittest.HomeserverTestCase):
bob_requester,
generate_sync_config(bob),
sync_version=SyncVersion.SYNC_V2,
+ request_key=generate_request_key(),
)
)
@@ -744,6 +775,7 @@ class SyncTestCase(tests.unittest.HomeserverTestCase):
bob, filter_collection=FilterCollection(self.hs, filter_dict)
),
sync_version=SyncVersion.SYNC_V2,
+ request_key=generate_request_key(),
since_token=None if initial_sync else initial_sync_result.next_batch,
)
).archived[0]
@@ -839,6 +871,7 @@ class SyncTestCase(tests.unittest.HomeserverTestCase):
create_requester(user),
generate_sync_config(user),
sync_version=SyncVersion.SYNC_V2,
+ request_key=generate_request_key(),
)
)
event_ids = []
@@ -887,6 +920,7 @@ class SyncTestCase(tests.unittest.HomeserverTestCase):
create_requester(user2),
generate_sync_config(user2),
sync_version=SyncVersion.SYNC_V2,
+ request_key=generate_request_key(),
)
)
priv_event_ids = []
@@ -909,7 +943,10 @@ class SyncTestCase(tests.unittest.HomeserverTestCase):
sync_result: SyncResult = self.get_success(
self.sync_handler.wait_for_sync_for_user(
- create_requester(user), generate_sync_config(user)
+ create_requester(user),
+ generate_sync_config(user),
+ sync_version=SyncVersion.SYNC_V2,
+ request_key=generate_request_key(),
)
)
@@ -923,9 +960,6 @@ class SyncTestCase(tests.unittest.HomeserverTestCase):
self.fail("No push rules found")
-_request_key = 0
-
-
def generate_sync_config(
user_id: str,
device_id: Optional[str] = "device_id",
@@ -942,12 +976,9 @@ def generate_sync_config(
if filter_collection is None:
filter_collection = Filtering(Mock()).DEFAULT_FILTER_COLLECTION
- global _request_key
- _request_key += 1
return SyncConfig(
user=UserID.from_string(user_id),
filter_collection=filter_collection,
is_guest=False,
- request_key=("request_key", _request_key),
device_id=device_id,
)
From 52a649580f34b4f36dfa21abcd05dad27e28bd1a Mon Sep 17 00:00:00 2001
From: Eric Eastwood
Date: Thu, 16 May 2024 11:55:51 -0500
Subject: [PATCH 097/503] Rename to be obvious: `joined_rooms` ->
`joined_room_ids` (#17203)
Split out from https://github.com/element-hq/synapse/pull/17167
---
changelog.d/17203.misc | 1 +
synapse/handlers/sync.py | 4 ++--
2 files changed, 3 insertions(+), 2 deletions(-)
create mode 100644 changelog.d/17203.misc
diff --git a/changelog.d/17203.misc b/changelog.d/17203.misc
new file mode 100644
index 0000000000..142300b1f2
--- /dev/null
+++ b/changelog.d/17203.misc
@@ -0,0 +1 @@
+Rename to be obvious: `joined_rooms` -> `joined_room_ids`.
diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py
index 40e42af1f3..6d4373008c 100644
--- a/synapse/handlers/sync.py
+++ b/synapse/handlers/sync.py
@@ -1850,7 +1850,7 @@ class SyncHandler:
users_that_have_changed = set()
- joined_rooms = sync_result_builder.joined_room_ids
+ joined_room_ids = sync_result_builder.joined_room_ids
# Step 1a, check for changes in devices of users we share a room
# with
@@ -1909,7 +1909,7 @@ class SyncHandler:
# Remove any users that we still share a room with.
left_users_rooms = await self.store.get_rooms_for_users(newly_left_users)
for user_id, entries in left_users_rooms.items():
- if any(rid in joined_rooms for rid in entries):
+ if any(rid in joined_room_ids for rid in entries):
newly_left_users.discard(user_id)
return DeviceListUpdates(changed=users_that_have_changed, left=newly_left_users)
From fe07995e691a8f6d84dde4de990f8f53634ec5b5 Mon Sep 17 00:00:00 2001
From: Eric Eastwood
Date: Thu, 16 May 2024 12:27:38 -0500
Subject: [PATCH 098/503] Fix `joined_rooms`/`joined_room_ids` usage (#17208)
This change was introduced in
https://github.com/element-hq/synapse/pull/17203
But then https://github.com/element-hq/synapse/pull/17207 was reverted
which brought back usage `joined_rooms` that needed to be updated.
Wasn't caught because `develop` wasn't up to date before merging.
---
changelog.d/17208.misc | 1 +
synapse/handlers/sync.py | 2 +-
2 files changed, 2 insertions(+), 1 deletion(-)
create mode 100644 changelog.d/17208.misc
diff --git a/changelog.d/17208.misc b/changelog.d/17208.misc
new file mode 100644
index 0000000000..142300b1f2
--- /dev/null
+++ b/changelog.d/17208.misc
@@ -0,0 +1 @@
+Rename to be obvious: `joined_rooms` -> `joined_room_ids`.
diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py
index 6d4373008c..6634b3887e 100644
--- a/synapse/handlers/sync.py
+++ b/synapse/handlers/sync.py
@@ -1875,7 +1875,7 @@ class SyncHandler:
# or if the changed user is the syncing user (as we always
# want to include device list updates of their own devices).
if user_id == changed_user_id or any(
- rid in joined_rooms for rid in entries
+ rid in joined_room_ids for rid in entries
):
users_that_have_changed.add(changed_user_id)
else:
From c856ae47247579446bbe1a1adc1564158e5e0643 Mon Sep 17 00:00:00 2001
From: Eric Eastwood
Date: Thu, 16 May 2024 13:05:31 -0500
Subject: [PATCH 099/503] Refactor `SyncResultBuilder` assembly to its own
function (#17202)
We will re-use `get_sync_result_builder(...)` in
https://github.com/element-hq/synapse/pull/17167
Split out from https://github.com/element-hq/synapse/pull/17167
---
changelog.d/17202.misc | 1 +
synapse/handlers/sync.py | 266 ++++++++++++++++++++++-----------------
2 files changed, 150 insertions(+), 117 deletions(-)
create mode 100644 changelog.d/17202.misc
diff --git a/changelog.d/17202.misc b/changelog.d/17202.misc
new file mode 100644
index 0000000000..4a558c8bcf
--- /dev/null
+++ b/changelog.d/17202.misc
@@ -0,0 +1 @@
+Refactor `SyncResultBuilder` assembly to its own function.
diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py
index 6634b3887e..d3d40e8682 100644
--- a/synapse/handlers/sync.py
+++ b/synapse/handlers/sync.py
@@ -1569,128 +1569,17 @@ class SyncHandler:
# See https://github.com/matrix-org/matrix-doc/issues/1144
raise NotImplementedError()
- # Note: we get the users room list *before* we get the current token, this
- # avoids checking back in history if rooms are joined after the token is fetched.
- token_before_rooms = self.event_sources.get_current_token()
- mutable_joined_room_ids = set(await self.store.get_rooms_for_user(user_id))
-
- # NB: The now_token gets changed by some of the generate_sync_* methods,
- # this is due to some of the underlying streams not supporting the ability
- # to query up to a given point.
- # Always use the `now_token` in `SyncResultBuilder`
- now_token = self.event_sources.get_current_token()
- log_kv({"now_token": now_token})
-
- # Since we fetched the users room list before the token, there's a small window
- # during which membership events may have been persisted, so we fetch these now
- # and modify the joined room list for any changes between the get_rooms_for_user
- # call and the get_current_token call.
- membership_change_events = []
- if since_token:
- membership_change_events = await self.store.get_membership_changes_for_user(
- user_id,
- since_token.room_key,
- now_token.room_key,
- self.rooms_to_exclude_globally,
- )
-
- mem_last_change_by_room_id: Dict[str, EventBase] = {}
- for event in membership_change_events:
- mem_last_change_by_room_id[event.room_id] = event
-
- # For the latest membership event in each room found, add/remove the room ID
- # from the joined room list accordingly. In this case we only care if the
- # latest change is JOIN.
-
- for room_id, event in mem_last_change_by_room_id.items():
- assert event.internal_metadata.stream_ordering
- if (
- event.internal_metadata.stream_ordering
- < token_before_rooms.room_key.stream
- ):
- continue
-
- logger.info(
- "User membership change between getting rooms and current token: %s %s %s",
- user_id,
- event.membership,
- room_id,
- )
- # User joined a room - we have to then check the room state to ensure we
- # respect any bans if there's a race between the join and ban events.
- if event.membership == Membership.JOIN:
- user_ids_in_room = await self.store.get_users_in_room(room_id)
- if user_id in user_ids_in_room:
- mutable_joined_room_ids.add(room_id)
- # The user left the room, or left and was re-invited but not joined yet
- else:
- mutable_joined_room_ids.discard(room_id)
-
- # Tweak the set of rooms to return to the client for eager (non-lazy) syncs.
- mutable_rooms_to_exclude = set(self.rooms_to_exclude_globally)
- if not sync_config.filter_collection.lazy_load_members():
- # Non-lazy syncs should never include partially stated rooms.
- # Exclude all partially stated rooms from this sync.
- results = await self.store.is_partial_state_room_batched(
- mutable_joined_room_ids
- )
- mutable_rooms_to_exclude.update(
- room_id
- for room_id, is_partial_state in results.items()
- if is_partial_state
- )
- membership_change_events = [
- event
- for event in membership_change_events
- if not results.get(event.room_id, False)
- ]
-
- # Incremental eager syncs should additionally include rooms that
- # - we are joined to
- # - are full-stated
- # - became fully-stated at some point during the sync period
- # (These rooms will have been omitted during a previous eager sync.)
- forced_newly_joined_room_ids: Set[str] = set()
- if since_token and not sync_config.filter_collection.lazy_load_members():
- un_partial_stated_rooms = (
- await self.store.get_un_partial_stated_rooms_between(
- since_token.un_partial_stated_rooms_key,
- now_token.un_partial_stated_rooms_key,
- mutable_joined_room_ids,
- )
- )
- results = await self.store.is_partial_state_room_batched(
- un_partial_stated_rooms
- )
- forced_newly_joined_room_ids.update(
- room_id
- for room_id, is_partial_state in results.items()
- if not is_partial_state
- )
-
- # Now we have our list of joined room IDs, exclude as configured and freeze
- joined_room_ids = frozenset(
- room_id
- for room_id in mutable_joined_room_ids
- if room_id not in mutable_rooms_to_exclude
+ sync_result_builder = await self.get_sync_result_builder(
+ sync_config,
+ since_token,
+ full_state,
)
logger.debug(
"Calculating sync response for %r between %s and %s",
sync_config.user,
- since_token,
- now_token,
- )
-
- sync_result_builder = SyncResultBuilder(
- sync_config,
- full_state,
- since_token=since_token,
- now_token=now_token,
- joined_room_ids=joined_room_ids,
- excluded_room_ids=frozenset(mutable_rooms_to_exclude),
- forced_newly_joined_room_ids=frozenset(forced_newly_joined_room_ids),
- membership_change_events=membership_change_events,
+ sync_result_builder.since_token,
+ sync_result_builder.now_token,
)
logger.debug("Fetching account data")
@@ -1802,6 +1691,149 @@ class SyncHandler:
next_batch=sync_result_builder.now_token,
)
+ async def get_sync_result_builder(
+ self,
+ sync_config: SyncConfig,
+ since_token: Optional[StreamToken] = None,
+ full_state: bool = False,
+ ) -> "SyncResultBuilder":
+ """
+ Assemble a `SyncResultBuilder` with all of the initial context to
+ start building up the sync response:
+
+ - Membership changes between the last sync and the current sync.
+ - Joined room IDs (minus any rooms to exclude).
+ - Rooms that became fully-stated/un-partial stated since the last sync.
+
+ Args:
+ sync_config: Config/info necessary to process the sync request.
+ since_token: The point in the stream to sync from.
+ full_state: Whether to return the full state for each room.
+
+ Returns:
+ `SyncResultBuilder` ready to start generating parts of the sync response.
+ """
+ user_id = sync_config.user.to_string()
+
+ # Note: we get the users room list *before* we get the current token, this
+ # avoids checking back in history if rooms are joined after the token is fetched.
+ token_before_rooms = self.event_sources.get_current_token()
+ mutable_joined_room_ids = set(await self.store.get_rooms_for_user(user_id))
+
+ # NB: The `now_token` gets changed by some of the `generate_sync_*` methods,
+ # this is due to some of the underlying streams not supporting the ability
+ # to query up to a given point.
+ # Always use the `now_token` in `SyncResultBuilder`
+ now_token = self.event_sources.get_current_token()
+ log_kv({"now_token": now_token})
+
+ # Since we fetched the users room list before the token, there's a small window
+ # during which membership events may have been persisted, so we fetch these now
+ # and modify the joined room list for any changes between the get_rooms_for_user
+ # call and the get_current_token call.
+ membership_change_events = []
+ if since_token:
+ membership_change_events = await self.store.get_membership_changes_for_user(
+ user_id,
+ since_token.room_key,
+ now_token.room_key,
+ self.rooms_to_exclude_globally,
+ )
+
+ mem_last_change_by_room_id: Dict[str, EventBase] = {}
+ for event in membership_change_events:
+ mem_last_change_by_room_id[event.room_id] = event
+
+ # For the latest membership event in each room found, add/remove the room ID
+ # from the joined room list accordingly. In this case we only care if the
+ # latest change is JOIN.
+
+ for room_id, event in mem_last_change_by_room_id.items():
+ assert event.internal_metadata.stream_ordering
+ if (
+ event.internal_metadata.stream_ordering
+ < token_before_rooms.room_key.stream
+ ):
+ continue
+
+ logger.info(
+ "User membership change between getting rooms and current token: %s %s %s",
+ user_id,
+ event.membership,
+ room_id,
+ )
+ # User joined a room - we have to then check the room state to ensure we
+ # respect any bans if there's a race between the join and ban events.
+ if event.membership == Membership.JOIN:
+ user_ids_in_room = await self.store.get_users_in_room(room_id)
+ if user_id in user_ids_in_room:
+ mutable_joined_room_ids.add(room_id)
+ # The user left the room, or left and was re-invited but not joined yet
+ else:
+ mutable_joined_room_ids.discard(room_id)
+
+ # Tweak the set of rooms to return to the client for eager (non-lazy) syncs.
+ mutable_rooms_to_exclude = set(self.rooms_to_exclude_globally)
+ if not sync_config.filter_collection.lazy_load_members():
+ # Non-lazy syncs should never include partially stated rooms.
+ # Exclude all partially stated rooms from this sync.
+ results = await self.store.is_partial_state_room_batched(
+ mutable_joined_room_ids
+ )
+ mutable_rooms_to_exclude.update(
+ room_id
+ for room_id, is_partial_state in results.items()
+ if is_partial_state
+ )
+ membership_change_events = [
+ event
+ for event in membership_change_events
+ if not results.get(event.room_id, False)
+ ]
+
+ # Incremental eager syncs should additionally include rooms that
+ # - we are joined to
+ # - are full-stated
+ # - became fully-stated at some point during the sync period
+ # (These rooms will have been omitted during a previous eager sync.)
+ forced_newly_joined_room_ids: Set[str] = set()
+ if since_token and not sync_config.filter_collection.lazy_load_members():
+ un_partial_stated_rooms = (
+ await self.store.get_un_partial_stated_rooms_between(
+ since_token.un_partial_stated_rooms_key,
+ now_token.un_partial_stated_rooms_key,
+ mutable_joined_room_ids,
+ )
+ )
+ results = await self.store.is_partial_state_room_batched(
+ un_partial_stated_rooms
+ )
+ forced_newly_joined_room_ids.update(
+ room_id
+ for room_id, is_partial_state in results.items()
+ if not is_partial_state
+ )
+
+ # Now we have our list of joined room IDs, exclude as configured and freeze
+ joined_room_ids = frozenset(
+ room_id
+ for room_id in mutable_joined_room_ids
+ if room_id not in mutable_rooms_to_exclude
+ )
+
+ sync_result_builder = SyncResultBuilder(
+ sync_config,
+ full_state,
+ since_token=since_token,
+ now_token=now_token,
+ joined_room_ids=joined_room_ids,
+ excluded_room_ids=frozenset(mutable_rooms_to_exclude),
+ forced_newly_joined_room_ids=frozenset(forced_newly_joined_room_ids),
+ membership_change_events=membership_change_events,
+ )
+
+ return sync_result_builder
+
@measure_func("_generate_sync_entry_for_device_list")
async def _generate_sync_entry_for_device_list(
self,
From 38f03a09ff185296ae622b171254095a27da3ae8 Mon Sep 17 00:00:00 2001
From: Dominic Schubert
Date: Fri, 17 May 2024 11:54:11 +0200
Subject: [PATCH 100/503] Federated Knocking Endpoints added (missing in Docu)
(#17058)
---
changelog.d/17058.doc | 1 +
docs/workers.md | 2 ++
2 files changed, 3 insertions(+)
create mode 100644 changelog.d/17058.doc
diff --git a/changelog.d/17058.doc b/changelog.d/17058.doc
new file mode 100644
index 0000000000..99795bf3da
--- /dev/null
+++ b/changelog.d/17058.doc
@@ -0,0 +1 @@
+Document [`/v1/make_knock`](https://spec.matrix.org/v1.10/server-server-api/#get_matrixfederationv1make_knockroomiduserid) and [`/v1/send_knock/](https://spec.matrix.org/v1.10/server-server-api/#put_matrixfederationv1send_knockroomideventid) federation endpoints as worker-compatible.
\ No newline at end of file
diff --git a/docs/workers.md b/docs/workers.md
index 6cb4416bfc..1f6bfd9e7f 100644
--- a/docs/workers.md
+++ b/docs/workers.md
@@ -211,6 +211,8 @@ information.
^/_matrix/federation/v1/make_leave/
^/_matrix/federation/(v1|v2)/send_join/
^/_matrix/federation/(v1|v2)/send_leave/
+ ^/_matrix/federation/v1/make_knock/
+ ^/_matrix/federation/v1/send_knock/
^/_matrix/federation/(v1|v2)/invite/
^/_matrix/federation/v1/event_auth/
^/_matrix/federation/v1/timestamp_to_event/
From 52af16c56175160512420d8654ac558a1e5af541 Mon Sep 17 00:00:00 2001
From: Erik Johnston
Date: Sat, 18 May 2024 12:03:30 +0100
Subject: [PATCH 101/503] Add a short sleep if the request is rate-limited
(#17210)
This helps prevent clients from "tight-looping" retrying their request.
---
changelog.d/17210.misc | 1 +
synapse/api/ratelimiting.py | 4 ++++
tests/api/test_ratelimiting.py | 5 +++--
tests/handlers/test_federation.py | 1 +
tests/handlers/test_room_member.py | 4 ++++
tests/unittest.py | 4 ++--
6 files changed, 15 insertions(+), 4 deletions(-)
create mode 100644 changelog.d/17210.misc
diff --git a/changelog.d/17210.misc b/changelog.d/17210.misc
new file mode 100644
index 0000000000..2059ebea7b
--- /dev/null
+++ b/changelog.d/17210.misc
@@ -0,0 +1 @@
+Add a short pause when rate-limiting a request.
diff --git a/synapse/api/ratelimiting.py b/synapse/api/ratelimiting.py
index a73626bc86..a99a9e09fc 100644
--- a/synapse/api/ratelimiting.py
+++ b/synapse/api/ratelimiting.py
@@ -316,6 +316,10 @@ class Ratelimiter:
)
if not allowed:
+ # We pause for a bit here to stop clients from "tight-looping" on
+ # retrying their request.
+ await self.clock.sleep(0.5)
+
raise LimitExceededError(
limiter_name=self._limiter_name,
retry_after_ms=int(1000 * (time_allowed - time_now_s)),
diff --git a/tests/api/test_ratelimiting.py b/tests/api/test_ratelimiting.py
index a24638c9ef..a59e168db1 100644
--- a/tests/api/test_ratelimiting.py
+++ b/tests/api/test_ratelimiting.py
@@ -116,8 +116,9 @@ class TestRatelimiter(unittest.HomeserverTestCase):
# Should raise
with self.assertRaises(LimitExceededError) as context:
self.get_success_or_raise(
- limiter.ratelimit(None, key="test_id", _time_now_s=5)
+ limiter.ratelimit(None, key="test_id", _time_now_s=5), by=0.5
)
+
self.assertEqual(context.exception.retry_after_ms, 5000)
# Shouldn't raise
@@ -192,7 +193,7 @@ class TestRatelimiter(unittest.HomeserverTestCase):
# Second attempt, 1s later, will fail
with self.assertRaises(LimitExceededError) as context:
self.get_success_or_raise(
- limiter.ratelimit(None, key=("test_id",), _time_now_s=1)
+ limiter.ratelimit(None, key=("test_id",), _time_now_s=1), by=0.5
)
self.assertEqual(context.exception.retry_after_ms, 9000)
diff --git a/tests/handlers/test_federation.py b/tests/handlers/test_federation.py
index b819b60c5d..3fe5b0a1b4 100644
--- a/tests/handlers/test_federation.py
+++ b/tests/handlers/test_federation.py
@@ -483,6 +483,7 @@ class FederationTestCase(unittest.FederatingHomeserverTestCase):
event.room_version,
),
exc=LimitExceededError,
+ by=0.5,
)
def _build_and_send_join_event(
diff --git a/tests/handlers/test_room_member.py b/tests/handlers/test_room_member.py
index 3e28117e2c..df43ce581c 100644
--- a/tests/handlers/test_room_member.py
+++ b/tests/handlers/test_room_member.py
@@ -70,6 +70,7 @@ class TestJoinsLimitedByPerRoomRateLimiter(FederatingHomeserverTestCase):
action=Membership.JOIN,
),
LimitExceededError,
+ by=0.5,
)
@override_config({"rc_joins_per_room": {"per_second": 0, "burst_count": 2}})
@@ -206,6 +207,7 @@ class TestJoinsLimitedByPerRoomRateLimiter(FederatingHomeserverTestCase):
remote_room_hosts=[self.OTHER_SERVER_NAME],
),
LimitExceededError,
+ by=0.5,
)
# TODO: test that remote joins to a room are rate limited.
@@ -273,6 +275,7 @@ class TestReplicatedJoinsLimitedByPerRoomRateLimiter(BaseMultiWorkerStreamTestCa
action=Membership.JOIN,
),
LimitExceededError,
+ by=0.5,
)
# Try to join as Chris on the original worker. Should get denied because Alice
@@ -285,6 +288,7 @@ class TestReplicatedJoinsLimitedByPerRoomRateLimiter(BaseMultiWorkerStreamTestCa
action=Membership.JOIN,
),
LimitExceededError,
+ by=0.5,
)
diff --git a/tests/unittest.py b/tests/unittest.py
index e6aad9ed40..18963b9e32 100644
--- a/tests/unittest.py
+++ b/tests/unittest.py
@@ -637,13 +637,13 @@ class HomeserverTestCase(TestCase):
return self.successResultOf(deferred)
def get_failure(
- self, d: Awaitable[Any], exc: Type[_ExcType]
+ self, d: Awaitable[Any], exc: Type[_ExcType], by: float = 0.0
) -> _TypedFailure[_ExcType]:
"""
Run a Deferred and get a Failure from it. The failure must be of the type `exc`.
"""
deferred: Deferred[Any] = ensureDeferred(d) # type: ignore[arg-type]
- self.pump()
+ self.pump(by)
return self.failureResultOf(deferred, exc)
def get_success_or_raise(self, d: Awaitable[TV], by: float = 0.0) -> TV:
From 8b43cc89fae94030708d20d99ee4c2017f39d95d Mon Sep 17 00:00:00 2001
From: Erik Johnston
Date: Tue, 21 May 2024 10:54:19 +0100
Subject: [PATCH 102/503] 1.108.0rc1
---
CHANGES.md | 46 +++++++++++++++++++++++++++++++++++++++
changelog.d/16848.feature | 1 -
changelog.d/17058.doc | 1 -
changelog.d/17098.feature | 1 -
changelog.d/17139.doc | 1 -
changelog.d/17142.bugfix | 1 -
changelog.d/17145.bugfix | 1 -
changelog.d/17150.doc | 1 -
changelog.d/17151.misc | 1 -
changelog.d/17162.misc | 1 -
changelog.d/17166.misc | 1 -
changelog.d/17170.misc | 1 -
changelog.d/17171.doc | 1 -
changelog.d/17177.bugfix | 1 -
changelog.d/17184.bugfix | 1 -
changelog.d/17186.misc | 1 -
changelog.d/17192.misc | 1 -
changelog.d/17195.misc | 1 -
changelog.d/17199.feature | 1 -
changelog.d/17200.misc | 1 -
changelog.d/17201.misc | 1 -
changelog.d/17202.misc | 1 -
changelog.d/17203.misc | 1 -
changelog.d/17208.misc | 1 -
changelog.d/17210.misc | 1 -
debian/changelog | 6 +++++
pyproject.toml | 2 +-
27 files changed, 53 insertions(+), 25 deletions(-)
delete mode 100644 changelog.d/16848.feature
delete mode 100644 changelog.d/17058.doc
delete mode 100644 changelog.d/17098.feature
delete mode 100644 changelog.d/17139.doc
delete mode 100644 changelog.d/17142.bugfix
delete mode 100644 changelog.d/17145.bugfix
delete mode 100644 changelog.d/17150.doc
delete mode 100644 changelog.d/17151.misc
delete mode 100644 changelog.d/17162.misc
delete mode 100644 changelog.d/17166.misc
delete mode 100644 changelog.d/17170.misc
delete mode 100644 changelog.d/17171.doc
delete mode 100644 changelog.d/17177.bugfix
delete mode 100644 changelog.d/17184.bugfix
delete mode 100644 changelog.d/17186.misc
delete mode 100644 changelog.d/17192.misc
delete mode 100644 changelog.d/17195.misc
delete mode 100644 changelog.d/17199.feature
delete mode 100644 changelog.d/17200.misc
delete mode 100644 changelog.d/17201.misc
delete mode 100644 changelog.d/17202.misc
delete mode 100644 changelog.d/17203.misc
delete mode 100644 changelog.d/17208.misc
delete mode 100644 changelog.d/17210.misc
diff --git a/CHANGES.md b/CHANGES.md
index 85c565a76d..09ac761802 100644
--- a/CHANGES.md
+++ b/CHANGES.md
@@ -1,3 +1,49 @@
+# Synapse 1.108.0rc1 (2024-05-21)
+
+### Features
+
+- Add a feature that allows clients to query the configured federation whitelist. Disabled by default. ([\#16848](https://github.com/element-hq/synapse/issues/16848), [\#17199](https://github.com/element-hq/synapse/issues/17199))
+- Add the ability to allow numeric user IDs with a specific prefix when in the CAS flow. Contributed by Aurélien Grimpard. ([\#17098](https://github.com/element-hq/synapse/issues/17098))
+
+### Bugfixes
+
+- Fix bug where push rules would be empty in `/sync` for some accounts. Introduced in v1.93.0. ([\#17142](https://github.com/element-hq/synapse/issues/17142))
+- Add support for optional whitespace around the Federation API's `Authorization` header's parameter commas. ([\#17145](https://github.com/element-hq/synapse/issues/17145))
+- Fix bug where disabling room publication prevented public rooms being created on workers. ([\#17177](https://github.com/element-hq/synapse/issues/17177), [\#17184](https://github.com/element-hq/synapse/issues/17184))
+
+### Improved Documentation
+
+- Document [`/v1/make_knock`](https://spec.matrix.org/v1.10/server-server-api/#get_matrixfederationv1make_knockroomiduserid) and [`/v1/send_knock/](https://spec.matrix.org/v1.10/server-server-api/#put_matrixfederationv1send_knockroomideventid) federation endpoints as worker-compatible. ([\#17058](https://github.com/element-hq/synapse/issues/17058))
+- Update User Admin API with note about prefixing OIDC external_id providers. ([\#17139](https://github.com/element-hq/synapse/issues/17139))
+- Clarify the state of the created room when using the `autocreate_auto_join_room_preset` config option. ([\#17150](https://github.com/element-hq/synapse/issues/17150))
+- Update the Admin FAQ with the current libjemalloc version for latest Debian stable. Additionally update the name of the "push_rules" stream in the Workers documentation. ([\#17171](https://github.com/element-hq/synapse/issues/17171))
+
+### Internal Changes
+
+- Add note to reflect that [MSC3886](https://github.com/matrix-org/matrix-spec-proposals/pull/3886) is closed but will support will remain for some time. ([\#17151](https://github.com/element-hq/synapse/issues/17151))
+- Update dependency PyO3 to 0.21. ([\#17162](https://github.com/element-hq/synapse/issues/17162))
+- Fixes linter errors found in PR #17147. ([\#17166](https://github.com/element-hq/synapse/issues/17166))
+- Bump black from 24.2.0 to 24.4.2. ([\#17170](https://github.com/element-hq/synapse/issues/17170))
+- Cache literal sync filter validation for performance. ([\#17186](https://github.com/element-hq/synapse/issues/17186))
+- Improve performance by fixing a reactor pause. ([\#17192](https://github.com/element-hq/synapse/issues/17192))
+- Route `/make_knock` and `/send_knock` federation APIs to the federation reader worker in Complement test runs. ([\#17195](https://github.com/element-hq/synapse/issues/17195))
+- Prepare sync handler to be able to return different sync responses (`SyncVersion`). ([\#17200](https://github.com/element-hq/synapse/issues/17200))
+- Organize the sync cache key parameter outside of the sync config (separate concerns). ([\#17201](https://github.com/element-hq/synapse/issues/17201))
+- Refactor `SyncResultBuilder` assembly to its own function. ([\#17202](https://github.com/element-hq/synapse/issues/17202))
+- Rename to be obvious: `joined_rooms` -> `joined_room_ids`. ([\#17203](https://github.com/element-hq/synapse/issues/17203), [\#17208](https://github.com/element-hq/synapse/issues/17208))
+- Add a short pause when rate-limiting a request. ([\#17210](https://github.com/element-hq/synapse/issues/17210))
+
+
+
+### Updates to locked dependencies
+
+* Bump cryptography from 42.0.5 to 42.0.7. ([\#17180](https://github.com/element-hq/synapse/issues/17180))
+* Bump gitpython from 3.1.41 to 3.1.43. ([\#17181](https://github.com/element-hq/synapse/issues/17181))
+* Bump immutabledict from 4.1.0 to 4.2.0. ([\#17179](https://github.com/element-hq/synapse/issues/17179))
+* Bump sentry-sdk from 1.40.3 to 2.1.1. ([\#17178](https://github.com/element-hq/synapse/issues/17178))
+* Bump serde from 1.0.200 to 1.0.201. ([\#17183](https://github.com/element-hq/synapse/issues/17183))
+* Bump serde_json from 1.0.116 to 1.0.117. ([\#17182](https://github.com/element-hq/synapse/issues/17182))
+
Synapse 1.107.0 (2024-05-14)
============================
diff --git a/changelog.d/16848.feature b/changelog.d/16848.feature
deleted file mode 100644
index 1a72bad013..0000000000
--- a/changelog.d/16848.feature
+++ /dev/null
@@ -1 +0,0 @@
-Add a feature that allows clients to query the configured federation whitelist. Disabled by default.
diff --git a/changelog.d/17058.doc b/changelog.d/17058.doc
deleted file mode 100644
index 99795bf3da..0000000000
--- a/changelog.d/17058.doc
+++ /dev/null
@@ -1 +0,0 @@
-Document [`/v1/make_knock`](https://spec.matrix.org/v1.10/server-server-api/#get_matrixfederationv1make_knockroomiduserid) and [`/v1/send_knock/](https://spec.matrix.org/v1.10/server-server-api/#put_matrixfederationv1send_knockroomideventid) federation endpoints as worker-compatible.
\ No newline at end of file
diff --git a/changelog.d/17098.feature b/changelog.d/17098.feature
deleted file mode 100644
index 43e06481b2..0000000000
--- a/changelog.d/17098.feature
+++ /dev/null
@@ -1 +0,0 @@
-Add the ability to allow numeric user IDs with a specific prefix when in the CAS flow. Contributed by Aurélien Grimpard.
diff --git a/changelog.d/17139.doc b/changelog.d/17139.doc
deleted file mode 100644
index a6d5408cac..0000000000
--- a/changelog.d/17139.doc
+++ /dev/null
@@ -1 +0,0 @@
-Update User Admin API with note about prefixing OIDC external_id providers.
diff --git a/changelog.d/17142.bugfix b/changelog.d/17142.bugfix
deleted file mode 100644
index 09b617aed1..0000000000
--- a/changelog.d/17142.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix bug where push rules would be empty in `/sync` for some accounts. Introduced in v1.93.0.
diff --git a/changelog.d/17145.bugfix b/changelog.d/17145.bugfix
deleted file mode 100644
index 5c1e600f4e..0000000000
--- a/changelog.d/17145.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Add support for optional whitespace around the Federation API's `Authorization` header's parameter commas.
diff --git a/changelog.d/17150.doc b/changelog.d/17150.doc
deleted file mode 100644
index 109f192818..0000000000
--- a/changelog.d/17150.doc
+++ /dev/null
@@ -1 +0,0 @@
-Clarify the state of the created room when using the `autocreate_auto_join_room_preset` config option.
\ No newline at end of file
diff --git a/changelog.d/17151.misc b/changelog.d/17151.misc
deleted file mode 100644
index 7b23c1e18e..0000000000
--- a/changelog.d/17151.misc
+++ /dev/null
@@ -1 +0,0 @@
-Add note to reflect that [MSC3886](https://github.com/matrix-org/matrix-spec-proposals/pull/3886) is closed but will support will remain for some time.
diff --git a/changelog.d/17162.misc b/changelog.d/17162.misc
deleted file mode 100644
index 5cbc086e04..0000000000
--- a/changelog.d/17162.misc
+++ /dev/null
@@ -1 +0,0 @@
-Update dependency PyO3 to 0.21.
diff --git a/changelog.d/17166.misc b/changelog.d/17166.misc
deleted file mode 100644
index 22c1f9922d..0000000000
--- a/changelog.d/17166.misc
+++ /dev/null
@@ -1 +0,0 @@
-Fixes linter errors found in PR #17147.
diff --git a/changelog.d/17170.misc b/changelog.d/17170.misc
deleted file mode 100644
index 698a59deaa..0000000000
--- a/changelog.d/17170.misc
+++ /dev/null
@@ -1 +0,0 @@
-Bump black from 24.2.0 to 24.4.2.
\ No newline at end of file
diff --git a/changelog.d/17171.doc b/changelog.d/17171.doc
deleted file mode 100644
index ef9f14ac7e..0000000000
--- a/changelog.d/17171.doc
+++ /dev/null
@@ -1 +0,0 @@
-Update the Admin FAQ with the current libjemalloc version for latest Debian stable. Additionally update the name of the "push_rules" stream in the Workers documentation.
diff --git a/changelog.d/17177.bugfix b/changelog.d/17177.bugfix
deleted file mode 100644
index db2334d690..0000000000
--- a/changelog.d/17177.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix bug where disabling room publication prevented public rooms being created on workers.
diff --git a/changelog.d/17184.bugfix b/changelog.d/17184.bugfix
deleted file mode 100644
index db2334d690..0000000000
--- a/changelog.d/17184.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix bug where disabling room publication prevented public rooms being created on workers.
diff --git a/changelog.d/17186.misc b/changelog.d/17186.misc
deleted file mode 100644
index 358209d5a0..0000000000
--- a/changelog.d/17186.misc
+++ /dev/null
@@ -1 +0,0 @@
-Cache literal sync filter validation for performance.
diff --git a/changelog.d/17192.misc b/changelog.d/17192.misc
deleted file mode 100644
index 25e157a50a..0000000000
--- a/changelog.d/17192.misc
+++ /dev/null
@@ -1 +0,0 @@
-Improve performance by fixing a reactor pause.
diff --git a/changelog.d/17195.misc b/changelog.d/17195.misc
deleted file mode 100644
index 18b8f1464d..0000000000
--- a/changelog.d/17195.misc
+++ /dev/null
@@ -1 +0,0 @@
-Route `/make_knock` and `/send_knock` federation APIs to the federation reader worker in Complement test runs.
\ No newline at end of file
diff --git a/changelog.d/17199.feature b/changelog.d/17199.feature
deleted file mode 100644
index 60d63f1bff..0000000000
--- a/changelog.d/17199.feature
+++ /dev/null
@@ -1 +0,0 @@
-Add a feature that allows clients to query the configured federation whitelist. Disabled by default.
\ No newline at end of file
diff --git a/changelog.d/17200.misc b/changelog.d/17200.misc
deleted file mode 100644
index a02b315041..0000000000
--- a/changelog.d/17200.misc
+++ /dev/null
@@ -1 +0,0 @@
-Prepare sync handler to be able to return different sync responses (`SyncVersion`).
diff --git a/changelog.d/17201.misc b/changelog.d/17201.misc
deleted file mode 100644
index 2bd08d8f06..0000000000
--- a/changelog.d/17201.misc
+++ /dev/null
@@ -1 +0,0 @@
-Organize the sync cache key parameter outside of the sync config (separate concerns).
diff --git a/changelog.d/17202.misc b/changelog.d/17202.misc
deleted file mode 100644
index 4a558c8bcf..0000000000
--- a/changelog.d/17202.misc
+++ /dev/null
@@ -1 +0,0 @@
-Refactor `SyncResultBuilder` assembly to its own function.
diff --git a/changelog.d/17203.misc b/changelog.d/17203.misc
deleted file mode 100644
index 142300b1f2..0000000000
--- a/changelog.d/17203.misc
+++ /dev/null
@@ -1 +0,0 @@
-Rename to be obvious: `joined_rooms` -> `joined_room_ids`.
diff --git a/changelog.d/17208.misc b/changelog.d/17208.misc
deleted file mode 100644
index 142300b1f2..0000000000
--- a/changelog.d/17208.misc
+++ /dev/null
@@ -1 +0,0 @@
-Rename to be obvious: `joined_rooms` -> `joined_room_ids`.
diff --git a/changelog.d/17210.misc b/changelog.d/17210.misc
deleted file mode 100644
index 2059ebea7b..0000000000
--- a/changelog.d/17210.misc
+++ /dev/null
@@ -1 +0,0 @@
-Add a short pause when rate-limiting a request.
diff --git a/debian/changelog b/debian/changelog
index d228c1cc8d..a9a5011f76 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -1,3 +1,9 @@
+matrix-synapse-py3 (1.108.0~rc1) stable; urgency=medium
+
+ * New Synapse release 1.108.0rc1.
+
+ -- Synapse Packaging team Tue, 21 May 2024 10:54:13 +0100
+
matrix-synapse-py3 (1.107.0) stable; urgency=medium
* New Synapse release 1.107.0.
diff --git a/pyproject.toml b/pyproject.toml
index dd4521ff71..00366ebb6b 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -96,7 +96,7 @@ module-name = "synapse.synapse_rust"
[tool.poetry]
name = "matrix-synapse"
-version = "1.107.0"
+version = "1.108.0rc1"
description = "Homeserver for the Matrix decentralised comms protocol"
authors = ["Matrix.org Team and Contributors "]
license = "AGPL-3.0-or-later"
From f4ce0306082c3575ab0cef51984736beeb87b93e Mon Sep 17 00:00:00 2001
From: Erik Johnston
Date: Tue, 21 May 2024 10:55:22 +0100
Subject: [PATCH 103/503] Fixup changelog
---
CHANGES.md | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/CHANGES.md b/CHANGES.md
index 09ac761802..5c27dceccf 100644
--- a/CHANGES.md
+++ b/CHANGES.md
@@ -13,7 +13,7 @@
### Improved Documentation
-- Document [`/v1/make_knock`](https://spec.matrix.org/v1.10/server-server-api/#get_matrixfederationv1make_knockroomiduserid) and [`/v1/send_knock/](https://spec.matrix.org/v1.10/server-server-api/#put_matrixfederationv1send_knockroomideventid) federation endpoints as worker-compatible. ([\#17058](https://github.com/element-hq/synapse/issues/17058))
+- Document [`/v1/make_knock`](https://spec.matrix.org/v1.10/server-server-api/#get_matrixfederationv1make_knockroomiduserid) and [`/v1/send_knock/`](https://spec.matrix.org/v1.10/server-server-api/#put_matrixfederationv1send_knockroomideventid) federation endpoints as worker-compatible. ([\#17058](https://github.com/element-hq/synapse/issues/17058))
- Update User Admin API with note about prefixing OIDC external_id providers. ([\#17139](https://github.com/element-hq/synapse/issues/17139))
- Clarify the state of the created room when using the `autocreate_auto_join_room_preset` config option. ([\#17150](https://github.com/element-hq/synapse/issues/17150))
- Update the Admin FAQ with the current libjemalloc version for latest Debian stable. Additionally update the name of the "push_rules" stream in the Workers documentation. ([\#17171](https://github.com/element-hq/synapse/issues/17171))
From d43042864ac1efb86dfa65526fc310c762b72819 Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Tue, 21 May 2024 10:56:07 +0100
Subject: [PATCH 104/503] Bump anyhow from 1.0.83 to 1.0.86 (#17220)
---
Cargo.lock | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/Cargo.lock b/Cargo.lock
index d6f9000138..76b81e9c6c 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -13,9 +13,9 @@ dependencies = [
[[package]]
name = "anyhow"
-version = "1.0.83"
+version = "1.0.86"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "25bdb32cbbdce2b519a9cd7df3a678443100e265d5e25ca763b7572a5104f5f3"
+checksum = "b3d1d046238990b9cf5bcde22a3fb3584ee5cf65fb2765f454ed428c7a0063da"
[[package]]
name = "arc-swap"
From f33266232e9cbc425e4f7ce5f3f24a35867e91f8 Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Tue, 21 May 2024 10:56:16 +0100
Subject: [PATCH 105/503] Bump serde from 1.0.201 to 1.0.202 (#17221)
---
Cargo.lock | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/Cargo.lock b/Cargo.lock
index 76b81e9c6c..5fbc511563 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -485,18 +485,18 @@ checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49"
[[package]]
name = "serde"
-version = "1.0.201"
+version = "1.0.202"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "780f1cebed1629e4753a1a38a3c72d30b97ec044f0aef68cb26650a3c5cf363c"
+checksum = "226b61a0d411b2ba5ff6d7f73a476ac4f8bb900373459cd00fab8512828ba395"
dependencies = [
"serde_derive",
]
[[package]]
name = "serde_derive"
-version = "1.0.201"
+version = "1.0.202"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c5e405930b9796f1c00bee880d03fc7e0bb4b9a11afc776885ffe84320da2865"
+checksum = "6048858004bcff69094cd972ed40a32500f153bd3be9f716b2eed2e8217c4838"
dependencies = [
"proc-macro2",
"quote",
From 5d040f20662146c4cc4e8e815dc8d7f500199a71 Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Tue, 21 May 2024 10:56:24 +0100
Subject: [PATCH 106/503] Bump types-psycopg2 from 2.9.21.20240311 to
2.9.21.20240417 (#17222)
---
poetry.lock | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/poetry.lock b/poetry.lock
index 8537f37529..64036cd65c 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -2853,13 +2853,13 @@ files = [
[[package]]
name = "types-psycopg2"
-version = "2.9.21.20240311"
+version = "2.9.21.20240417"
description = "Typing stubs for psycopg2"
optional = false
python-versions = ">=3.8"
files = [
- {file = "types-psycopg2-2.9.21.20240311.tar.gz", hash = "sha256:722945dffa6a729bebc660f14137f37edfcead5a2c15eb234212a7d017ee8072"},
- {file = "types_psycopg2-2.9.21.20240311-py3-none-any.whl", hash = "sha256:2e137ae2b516ee0dbaab6f555086b6cfb723ba4389d67f551b0336adf4efcf1b"},
+ {file = "types-psycopg2-2.9.21.20240417.tar.gz", hash = "sha256:05db256f4a459fb21a426b8e7fca0656c3539105ff0208eaf6bdaf406a387087"},
+ {file = "types_psycopg2-2.9.21.20240417-py3-none-any.whl", hash = "sha256:644d6644d64ebbe37203229b00771012fb3b3bddd507a129a2e136485990e4f8"},
]
[[package]]
From 439a095edcb2cddf10588f0327fd4c941dd1343a Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Tue, 21 May 2024 10:56:40 +0100
Subject: [PATCH 107/503] Bump pyasn1 from 0.5.1 to 0.6.0 (#17223)
---
poetry.lock | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/poetry.lock b/poetry.lock
index 64036cd65c..675e55e062 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -1736,13 +1736,13 @@ psycopg2 = "*"
[[package]]
name = "pyasn1"
-version = "0.5.1"
+version = "0.6.0"
description = "Pure-Python implementation of ASN.1 types and DER/BER/CER codecs (X.208)"
optional = false
-python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,>=2.7"
+python-versions = ">=3.8"
files = [
- {file = "pyasn1-0.5.1-py2.py3-none-any.whl", hash = "sha256:4439847c58d40b1d0a573d07e3856e95333f1976294494c325775aeca506eb58"},
- {file = "pyasn1-0.5.1.tar.gz", hash = "sha256:6d391a96e59b23130a5cfa74d6fd7f388dbbe26cc8f1edf39fdddf08d9d6676c"},
+ {file = "pyasn1-0.6.0-py2.py3-none-any.whl", hash = "sha256:cca4bb0f2df5504f02f6f8a775b6e416ff9b0b3b16f7ee80b5a3153d9b804473"},
+ {file = "pyasn1-0.6.0.tar.gz", hash = "sha256:3a35ab2c4b5ef98e17dfdec8ab074046fbda76e281c5a706ccd82328cfc8f64c"},
]
[[package]]
From d464ee3602a8a0eddb699e164a5b6c978f1333c6 Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Tue, 21 May 2024 10:57:08 +0100
Subject: [PATCH 108/503] Bump bcrypt from 4.1.2 to 4.1.3 (#17224)
---
poetry.lock | 56 ++++++++++++++++++++++++++---------------------------
1 file changed, 28 insertions(+), 28 deletions(-)
diff --git a/poetry.lock b/poetry.lock
index 675e55e062..397f3ff8e3 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -67,38 +67,38 @@ visualize = ["Twisted (>=16.1.1)", "graphviz (>0.5.1)"]
[[package]]
name = "bcrypt"
-version = "4.1.2"
+version = "4.1.3"
description = "Modern password hashing for your software and your servers"
optional = false
python-versions = ">=3.7"
files = [
- {file = "bcrypt-4.1.2-cp37-abi3-macosx_10_12_universal2.whl", hash = "sha256:ac621c093edb28200728a9cca214d7e838529e557027ef0581685909acd28b5e"},
- {file = "bcrypt-4.1.2-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ea505c97a5c465ab8c3ba75c0805a102ce526695cd6818c6de3b1a38f6f60da1"},
- {file = "bcrypt-4.1.2-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:57fa9442758da926ed33a91644649d3e340a71e2d0a5a8de064fb621fd5a3326"},
- {file = "bcrypt-4.1.2-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:eb3bd3321517916696233b5e0c67fd7d6281f0ef48e66812db35fc963a422a1c"},
- {file = "bcrypt-4.1.2-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:6cad43d8c63f34b26aef462b6f5e44fdcf9860b723d2453b5d391258c4c8e966"},
- {file = "bcrypt-4.1.2-cp37-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:44290ccc827d3a24604f2c8bcd00d0da349e336e6503656cb8192133e27335e2"},
- {file = "bcrypt-4.1.2-cp37-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:732b3920a08eacf12f93e6b04ea276c489f1c8fb49344f564cca2adb663b3e4c"},
- {file = "bcrypt-4.1.2-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:1c28973decf4e0e69cee78c68e30a523be441972c826703bb93099868a8ff5b5"},
- {file = "bcrypt-4.1.2-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:b8df79979c5bae07f1db22dcc49cc5bccf08a0380ca5c6f391cbb5790355c0b0"},
- {file = "bcrypt-4.1.2-cp37-abi3-win32.whl", hash = "sha256:fbe188b878313d01b7718390f31528be4010fed1faa798c5a1d0469c9c48c369"},
- {file = "bcrypt-4.1.2-cp37-abi3-win_amd64.whl", hash = "sha256:9800ae5bd5077b13725e2e3934aa3c9c37e49d3ea3d06318010aa40f54c63551"},
- {file = "bcrypt-4.1.2-cp39-abi3-macosx_10_12_universal2.whl", hash = "sha256:71b8be82bc46cedd61a9f4ccb6c1a493211d031415a34adde3669ee1b0afbb63"},
- {file = "bcrypt-4.1.2-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:68e3c6642077b0c8092580c819c1684161262b2e30c4f45deb000c38947bf483"},
- {file = "bcrypt-4.1.2-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:387e7e1af9a4dd636b9505a465032f2f5cb8e61ba1120e79a0e1cd0b512f3dfc"},
- {file = "bcrypt-4.1.2-cp39-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:f70d9c61f9c4ca7d57f3bfe88a5ccf62546ffbadf3681bb1e268d9d2e41c91a7"},
- {file = "bcrypt-4.1.2-cp39-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:2a298db2a8ab20056120b45e86c00a0a5eb50ec4075b6142db35f593b97cb3fb"},
- {file = "bcrypt-4.1.2-cp39-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:ba55e40de38a24e2d78d34c2d36d6e864f93e0d79d0b6ce915e4335aa81d01b1"},
- {file = "bcrypt-4.1.2-cp39-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:3566a88234e8de2ccae31968127b0ecccbb4cddb629da744165db72b58d88ca4"},
- {file = "bcrypt-4.1.2-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:b90e216dc36864ae7132cb151ffe95155a37a14e0de3a8f64b49655dd959ff9c"},
- {file = "bcrypt-4.1.2-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:69057b9fc5093ea1ab00dd24ede891f3e5e65bee040395fb1e66ee196f9c9b4a"},
- {file = "bcrypt-4.1.2-cp39-abi3-win32.whl", hash = "sha256:02d9ef8915f72dd6daaef40e0baeef8a017ce624369f09754baf32bb32dba25f"},
- {file = "bcrypt-4.1.2-cp39-abi3-win_amd64.whl", hash = "sha256:be3ab1071662f6065899fe08428e45c16aa36e28bc42921c4901a191fda6ee42"},
- {file = "bcrypt-4.1.2-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:d75fc8cd0ba23f97bae88a6ec04e9e5351ff3c6ad06f38fe32ba50cbd0d11946"},
- {file = "bcrypt-4.1.2-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:a97e07e83e3262599434816f631cc4c7ca2aa8e9c072c1b1a7fec2ae809a1d2d"},
- {file = "bcrypt-4.1.2-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:e51c42750b7585cee7892c2614be0d14107fad9581d1738d954a262556dd1aab"},
- {file = "bcrypt-4.1.2-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:ba4e4cc26610581a6329b3937e02d319f5ad4b85b074846bf4fef8a8cf51e7bb"},
- {file = "bcrypt-4.1.2.tar.gz", hash = "sha256:33313a1200a3ae90b75587ceac502b048b840fc69e7f7a0905b5f87fac7a1258"},
+ {file = "bcrypt-4.1.3-cp37-abi3-macosx_10_12_universal2.whl", hash = "sha256:48429c83292b57bf4af6ab75809f8f4daf52aa5d480632e53707805cc1ce9b74"},
+ {file = "bcrypt-4.1.3-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4a8bea4c152b91fd8319fef4c6a790da5c07840421c2b785084989bf8bbb7455"},
+ {file = "bcrypt-4.1.3-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3d3b317050a9a711a5c7214bf04e28333cf528e0ed0ec9a4e55ba628d0f07c1a"},
+ {file = "bcrypt-4.1.3-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:094fd31e08c2b102a14880ee5b3d09913ecf334cd604af27e1013c76831f7b05"},
+ {file = "bcrypt-4.1.3-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:4fb253d65da30d9269e0a6f4b0de32bd657a0208a6f4e43d3e645774fb5457f3"},
+ {file = "bcrypt-4.1.3-cp37-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:193bb49eeeb9c1e2db9ba65d09dc6384edd5608d9d672b4125e9320af9153a15"},
+ {file = "bcrypt-4.1.3-cp37-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:8cbb119267068c2581ae38790e0d1fbae65d0725247a930fc9900c285d95725d"},
+ {file = "bcrypt-4.1.3-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:6cac78a8d42f9d120b3987f82252bdbeb7e6e900a5e1ba37f6be6fe4e3848286"},
+ {file = "bcrypt-4.1.3-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:01746eb2c4299dd0ae1670234bf77704f581dd72cc180f444bfe74eb80495b64"},
+ {file = "bcrypt-4.1.3-cp37-abi3-win32.whl", hash = "sha256:037c5bf7c196a63dcce75545c8874610c600809d5d82c305dd327cd4969995bf"},
+ {file = "bcrypt-4.1.3-cp37-abi3-win_amd64.whl", hash = "sha256:8a893d192dfb7c8e883c4576813bf18bb9d59e2cfd88b68b725990f033f1b978"},
+ {file = "bcrypt-4.1.3-cp39-abi3-macosx_10_12_universal2.whl", hash = "sha256:0d4cf6ef1525f79255ef048b3489602868c47aea61f375377f0d00514fe4a78c"},
+ {file = "bcrypt-4.1.3-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f5698ce5292a4e4b9e5861f7e53b1d89242ad39d54c3da451a93cac17b61921a"},
+ {file = "bcrypt-4.1.3-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ec3c2e1ca3e5c4b9edb94290b356d082b721f3f50758bce7cce11d8a7c89ce84"},
+ {file = "bcrypt-4.1.3-cp39-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:3a5be252fef513363fe281bafc596c31b552cf81d04c5085bc5dac29670faa08"},
+ {file = "bcrypt-4.1.3-cp39-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:5f7cd3399fbc4ec290378b541b0cf3d4398e4737a65d0f938c7c0f9d5e686611"},
+ {file = "bcrypt-4.1.3-cp39-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:c4c8d9b3e97209dd7111bf726e79f638ad9224b4691d1c7cfefa571a09b1b2d6"},
+ {file = "bcrypt-4.1.3-cp39-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:31adb9cbb8737a581a843e13df22ffb7c84638342de3708a98d5c986770f2834"},
+ {file = "bcrypt-4.1.3-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:551b320396e1d05e49cc18dd77d970accd52b322441628aca04801bbd1d52a73"},
+ {file = "bcrypt-4.1.3-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:6717543d2c110a155e6821ce5670c1f512f602eabb77dba95717ca76af79867d"},
+ {file = "bcrypt-4.1.3-cp39-abi3-win32.whl", hash = "sha256:6004f5229b50f8493c49232b8e75726b568535fd300e5039e255d919fc3a07f2"},
+ {file = "bcrypt-4.1.3-cp39-abi3-win_amd64.whl", hash = "sha256:2505b54afb074627111b5a8dc9b6ae69d0f01fea65c2fcaea403448c503d3991"},
+ {file = "bcrypt-4.1.3-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:cb9c707c10bddaf9e5ba7cdb769f3e889e60b7d4fea22834b261f51ca2b89fed"},
+ {file = "bcrypt-4.1.3-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:9f8ea645eb94fb6e7bea0cf4ba121c07a3a182ac52876493870033141aa687bc"},
+ {file = "bcrypt-4.1.3-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:f44a97780677e7ac0ca393bd7982b19dbbd8d7228c1afe10b128fd9550eef5f1"},
+ {file = "bcrypt-4.1.3-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:d84702adb8f2798d813b17d8187d27076cca3cd52fe3686bb07a9083930ce650"},
+ {file = "bcrypt-4.1.3.tar.gz", hash = "sha256:2ee15dd749f5952fe3f0430d0ff6b74082e159c50332a1413d51b5689cf06623"},
]
[package.extras]
From 9956f35c6a74056c4bff10fa97e9fd4f451662b0 Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Tue, 21 May 2024 10:57:31 +0100
Subject: [PATCH 109/503] Bump twine from 5.0.0 to 5.1.0 (#17225)
---
poetry.lock | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/poetry.lock b/poetry.lock
index 397f3ff8e3..d6dc27bfae 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -2673,13 +2673,13 @@ docs = ["sphinx (<7.0.0)"]
[[package]]
name = "twine"
-version = "5.0.0"
+version = "5.1.0"
description = "Collection of utilities for publishing packages on PyPI"
optional = false
python-versions = ">=3.8"
files = [
- {file = "twine-5.0.0-py3-none-any.whl", hash = "sha256:a262933de0b484c53408f9edae2e7821c1c45a3314ff2df9bdd343aa7ab8edc0"},
- {file = "twine-5.0.0.tar.gz", hash = "sha256:89b0cc7d370a4b66421cc6102f269aa910fe0f1861c124f573cf2ddedbc10cf4"},
+ {file = "twine-5.1.0-py3-none-any.whl", hash = "sha256:fe1d814395bfe50cfbe27783cb74efe93abeac3f66deaeb6c8390e4e92bacb43"},
+ {file = "twine-5.1.0.tar.gz", hash = "sha256:4d74770c88c4fcaf8134d2a6a9d863e40f08255ff7d8e2acb3cbbd57d25f6e9d"},
]
[package.dependencies]
From e0d420fbd1deb00314bccf5ab1d9b2a605149d91 Mon Sep 17 00:00:00 2001
From: Erik Johnston
Date: Tue, 21 May 2024 10:57:58 +0100
Subject: [PATCH 110/503] Fixup changelog
---
CHANGES.md | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/CHANGES.md b/CHANGES.md
index 5c27dceccf..2d2474108c 100644
--- a/CHANGES.md
+++ b/CHANGES.md
@@ -20,7 +20,7 @@
### Internal Changes
-- Add note to reflect that [MSC3886](https://github.com/matrix-org/matrix-spec-proposals/pull/3886) is closed but will support will remain for some time. ([\#17151](https://github.com/element-hq/synapse/issues/17151))
+- Add note to reflect that [MSC3886](https://github.com/matrix-org/matrix-spec-proposals/pull/3886) is closed but will remain supported for some time. ([\#17151](https://github.com/element-hq/synapse/issues/17151))
- Update dependency PyO3 to 0.21. ([\#17162](https://github.com/element-hq/synapse/issues/17162))
- Fixes linter errors found in PR #17147. ([\#17166](https://github.com/element-hq/synapse/issues/17166))
- Bump black from 24.2.0 to 24.4.2. ([\#17170](https://github.com/element-hq/synapse/issues/17170))
From b5facbac0f2d5f6f0e83d7cac43f8de02ce6742f Mon Sep 17 00:00:00 2001
From: Erik Johnston
Date: Tue, 21 May 2024 16:48:20 +0100
Subject: [PATCH 111/503] Improve perf of sync device lists (#17216)
Re-introduces #17191, and includes #17197 and #17214
The basic idea is to stop calling `get_rooms_for_user` everywhere, and
instead use the table `device_lists_changes_in_room`.
Commits reviewable one-by-one.
---
changelog.d/17216.misc | 1 +
synapse/handlers/device.py | 22 +++++-
synapse/handlers/sync.py | 38 ++--------
synapse/replication/tcp/client.py | 15 ++--
synapse/storage/databases/main/devices.py | 89 +++++++++++++++++------
5 files changed, 103 insertions(+), 62 deletions(-)
create mode 100644 changelog.d/17216.misc
diff --git a/changelog.d/17216.misc b/changelog.d/17216.misc
new file mode 100644
index 0000000000..bd55eeaa33
--- /dev/null
+++ b/changelog.d/17216.misc
@@ -0,0 +1 @@
+Improve performance of calculating device lists changes in `/sync`.
diff --git a/synapse/handlers/device.py b/synapse/handlers/device.py
index 67953a3ed9..55842e7c7b 100644
--- a/synapse/handlers/device.py
+++ b/synapse/handlers/device.py
@@ -159,20 +159,32 @@ class DeviceWorkerHandler:
@cancellable
async def get_device_changes_in_shared_rooms(
- self, user_id: str, room_ids: StrCollection, from_token: StreamToken
+ self,
+ user_id: str,
+ room_ids: StrCollection,
+ from_token: StreamToken,
+ now_token: Optional[StreamToken] = None,
) -> Set[str]:
"""Get the set of users whose devices have changed who share a room with
the given user.
"""
+ now_device_lists_key = self.store.get_device_stream_token()
+ if now_token:
+ now_device_lists_key = now_token.device_list_key
+
changed_users = await self.store.get_device_list_changes_in_rooms(
- room_ids, from_token.device_list_key
+ room_ids,
+ from_token.device_list_key,
+ now_device_lists_key,
)
if changed_users is not None:
# We also check if the given user has changed their device. If
# they're in no rooms then the above query won't include them.
changed = await self.store.get_users_whose_devices_changed(
- from_token.device_list_key, [user_id]
+ from_token.device_list_key,
+ [user_id],
+ to_key=now_device_lists_key,
)
changed_users.update(changed)
return changed_users
@@ -190,7 +202,9 @@ class DeviceWorkerHandler:
tracked_users.add(user_id)
changed = await self.store.get_users_whose_devices_changed(
- from_token.device_list_key, tracked_users
+ from_token.device_list_key,
+ tracked_users,
+ to_key=now_device_lists_key,
)
return changed
diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py
index d3d40e8682..b7917a99d6 100644
--- a/synapse/handlers/sync.py
+++ b/synapse/handlers/sync.py
@@ -1886,38 +1886,14 @@ class SyncHandler:
# Step 1a, check for changes in devices of users we share a room
# with
- #
- # We do this in two different ways depending on what we have cached.
- # If we already have a list of all the user that have changed since
- # the last sync then it's likely more efficient to compare the rooms
- # they're in with the rooms the syncing user is in.
- #
- # If we don't have that info cached then we get all the users that
- # share a room with our user and check if those users have changed.
- cache_result = self.store.get_cached_device_list_changes(
- since_token.device_list_key
- )
- if cache_result.hit:
- changed_users = cache_result.entities
-
- result = await self.store.get_rooms_for_users(changed_users)
-
- for changed_user_id, entries in result.items():
- # Check if the changed user shares any rooms with the user,
- # or if the changed user is the syncing user (as we always
- # want to include device list updates of their own devices).
- if user_id == changed_user_id or any(
- rid in joined_room_ids for rid in entries
- ):
- users_that_have_changed.add(changed_user_id)
- else:
- users_that_have_changed = (
- await self._device_handler.get_device_changes_in_shared_rooms(
- user_id,
- sync_result_builder.joined_room_ids,
- from_token=since_token,
- )
+ users_that_have_changed = (
+ await self._device_handler.get_device_changes_in_shared_rooms(
+ user_id,
+ sync_result_builder.joined_room_ids,
+ from_token=since_token,
+ now_token=sync_result_builder.now_token,
)
+ )
# Step 1b, check for newly joined rooms
for room_id in newly_joined_rooms:
diff --git a/synapse/replication/tcp/client.py b/synapse/replication/tcp/client.py
index 5e5387fdcb..2d6d49eed7 100644
--- a/synapse/replication/tcp/client.py
+++ b/synapse/replication/tcp/client.py
@@ -112,6 +112,15 @@ class ReplicationDataHandler:
token: stream token for this batch of rows
rows: a list of Stream.ROW_TYPE objects as returned by Stream.parse_row.
"""
+ all_room_ids: Set[str] = set()
+ if stream_name == DeviceListsStream.NAME:
+ if any(row.entity.startswith("@") and not row.is_signature for row in rows):
+ prev_token = self.store.get_device_stream_token()
+ all_room_ids = await self.store.get_all_device_list_changes(
+ prev_token, token
+ )
+ self.store.device_lists_in_rooms_have_changed(all_room_ids, token)
+
self.store.process_replication_rows(stream_name, instance_name, token, rows)
# NOTE: this must be called after process_replication_rows to ensure any
# cache invalidations are first handled before any stream ID advances.
@@ -146,12 +155,6 @@ class ReplicationDataHandler:
StreamKeyType.TO_DEVICE, token, users=entities
)
elif stream_name == DeviceListsStream.NAME:
- all_room_ids: Set[str] = set()
- for row in rows:
- if row.entity.startswith("@") and not row.is_signature:
- room_ids = await self.store.get_rooms_for_user(row.entity)
- all_room_ids.update(room_ids)
-
# `all_room_ids` can be large, so let's wake up those streams in batches
for batched_room_ids in batch_iter(all_room_ids, 100):
self.notifier.on_new_event(
diff --git a/synapse/storage/databases/main/devices.py b/synapse/storage/databases/main/devices.py
index 8dbcb3f5a0..f4410b5c02 100644
--- a/synapse/storage/databases/main/devices.py
+++ b/synapse/storage/databases/main/devices.py
@@ -70,10 +70,7 @@ from synapse.types import (
from synapse.util import json_decoder, json_encoder
from synapse.util.caches.descriptors import cached, cachedList
from synapse.util.caches.lrucache import LruCache
-from synapse.util.caches.stream_change_cache import (
- AllEntitiesChangedResult,
- StreamChangeCache,
-)
+from synapse.util.caches.stream_change_cache import StreamChangeCache
from synapse.util.cancellation import cancellable
from synapse.util.iterutils import batch_iter
from synapse.util.stringutils import shortstr
@@ -132,6 +129,20 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore):
prefilled_cache=device_list_prefill,
)
+ device_list_room_prefill, min_device_list_room_id = self.db_pool.get_cache_dict(
+ db_conn,
+ "device_lists_changes_in_room",
+ entity_column="room_id",
+ stream_column="stream_id",
+ max_value=device_list_max,
+ limit=10000,
+ )
+ self._device_list_room_stream_cache = StreamChangeCache(
+ "DeviceListRoomStreamChangeCache",
+ min_device_list_room_id,
+ prefilled_cache=device_list_room_prefill,
+ )
+
(
user_signature_stream_prefill,
user_signature_stream_list_id,
@@ -209,6 +220,13 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore):
row.entity, token
)
+ def device_lists_in_rooms_have_changed(
+ self, room_ids: StrCollection, token: int
+ ) -> None:
+ "Record that device lists have changed in rooms"
+ for room_id in room_ids:
+ self._device_list_room_stream_cache.entity_has_changed(room_id, token)
+
def get_device_stream_token(self) -> int:
return self._device_list_id_gen.get_current_token()
@@ -832,16 +850,6 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore):
)
return {device[0]: db_to_json(device[1]) for device in devices}
- def get_cached_device_list_changes(
- self,
- from_key: int,
- ) -> AllEntitiesChangedResult:
- """Get set of users whose devices have changed since `from_key`, or None
- if that information is not in our cache.
- """
-
- return self._device_list_stream_cache.get_all_entities_changed(from_key)
-
@cancellable
async def get_all_devices_changed(
self,
@@ -1457,7 +1465,7 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore):
@cancellable
async def get_device_list_changes_in_rooms(
- self, room_ids: Collection[str], from_id: int
+ self, room_ids: Collection[str], from_id: int, to_id: int
) -> Optional[Set[str]]:
"""Return the set of users whose devices have changed in the given rooms
since the given stream ID.
@@ -1473,9 +1481,15 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore):
if min_stream_id > from_id:
return None
+ changed_room_ids = self._device_list_room_stream_cache.get_entities_changed(
+ room_ids, from_id
+ )
+ if not changed_room_ids:
+ return set()
+
sql = """
SELECT DISTINCT user_id FROM device_lists_changes_in_room
- WHERE {clause} AND stream_id >= ?
+ WHERE {clause} AND stream_id > ? AND stream_id <= ?
"""
def _get_device_list_changes_in_rooms_txn(
@@ -1487,11 +1501,12 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore):
return {user_id for user_id, in txn}
changes = set()
- for chunk in batch_iter(room_ids, 1000):
+ for chunk in batch_iter(changed_room_ids, 1000):
clause, args = make_in_list_sql_clause(
self.database_engine, "room_id", chunk
)
args.append(from_id)
+ args.append(to_id)
changes |= await self.db_pool.runInteraction(
"get_device_list_changes_in_rooms",
@@ -1502,6 +1517,34 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore):
return changes
+ async def get_all_device_list_changes(self, from_id: int, to_id: int) -> Set[str]:
+ """Return the set of rooms where devices have changed since the given
+ stream ID.
+
+ Will raise an exception if the given stream ID is too old.
+ """
+
+ min_stream_id = await self._get_min_device_lists_changes_in_room()
+
+ if min_stream_id > from_id:
+ raise Exception("stream ID is too old")
+
+ sql = """
+ SELECT DISTINCT room_id FROM device_lists_changes_in_room
+ WHERE stream_id > ? AND stream_id <= ?
+ """
+
+ def _get_all_device_list_changes_txn(
+ txn: LoggingTransaction,
+ ) -> Set[str]:
+ txn.execute(sql, (from_id, to_id))
+ return {room_id for room_id, in txn}
+
+ return await self.db_pool.runInteraction(
+ "get_all_device_list_changes",
+ _get_all_device_list_changes_txn,
+ )
+
async def get_device_list_changes_in_room(
self, room_id: str, min_stream_id: int
) -> Collection[Tuple[str, str]]:
@@ -1962,8 +2005,8 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore):
async def add_device_change_to_streams(
self,
user_id: str,
- device_ids: Collection[str],
- room_ids: Collection[str],
+ device_ids: StrCollection,
+ room_ids: StrCollection,
) -> Optional[int]:
"""Persist that a user's devices have been updated, and which hosts
(if any) should be poked.
@@ -2122,8 +2165,8 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore):
self,
txn: LoggingTransaction,
user_id: str,
- device_ids: Iterable[str],
- room_ids: Collection[str],
+ device_ids: StrCollection,
+ room_ids: StrCollection,
stream_ids: List[int],
context: Dict[str, str],
) -> None:
@@ -2161,6 +2204,10 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore):
],
)
+ txn.call_after(
+ self.device_lists_in_rooms_have_changed, room_ids, max(stream_ids)
+ )
+
async def get_uncoverted_outbound_room_pokes(
self, start_stream_id: int, start_room_id: str, limit: int = 10
) -> List[Tuple[str, str, str, int, Optional[Dict[str, str]]]]:
From 6a9a641fb86b04587840bcb6b76af9a0acef9b54 Mon Sep 17 00:00:00 2001
From: devonh
Date: Tue, 21 May 2024 20:09:17 +0000
Subject: [PATCH 112/503] Bring auto-accept invite logic into Synapse (#17147)
This PR ports the logic from the
[synapse_auto_accept_invite](https://github.com/matrix-org/synapse-auto-accept-invite)
module into synapse.
I went with the naive approach of injecting the "module" next to where
third party modules are currently loaded. If there is a better/preferred
way to handle this, I'm all ears. It wasn't obvious to me if there was a
better location to add this logic that would cleanly apply to all
incoming invite events.
Relies on https://github.com/element-hq/synapse/pull/17166 to fix linter
errors.
---
changelog.d/17147.feature | 1 +
.../configuration/config_documentation.md | 29 +
synapse/app/_base.py | 6 +
synapse/config/_base.pyi | 2 +
synapse/config/auto_accept_invites.py | 43 ++
synapse/config/homeserver.py | 2 +
synapse/events/auto_accept_invites.py | 196 ++++++
synapse/handlers/sso.py | 2 +-
tests/events/test_auto_accept_invites.py | 657 ++++++++++++++++++
tests/rest/client/utils.py | 2 +
tests/server.py | 6 +
11 files changed, 945 insertions(+), 1 deletion(-)
create mode 100644 changelog.d/17147.feature
create mode 100644 synapse/config/auto_accept_invites.py
create mode 100644 synapse/events/auto_accept_invites.py
create mode 100644 tests/events/test_auto_accept_invites.py
diff --git a/changelog.d/17147.feature b/changelog.d/17147.feature
new file mode 100644
index 0000000000..7c2cdb6bdf
--- /dev/null
+++ b/changelog.d/17147.feature
@@ -0,0 +1 @@
+Add the ability to auto-accept invites on the behalf of users. See the [`auto_accept_invites`](https://element-hq.github.io/synapse/latest/usage/configuration/config_documentation.html#auto-accept-invites) config option for details.
diff --git a/docs/usage/configuration/config_documentation.md b/docs/usage/configuration/config_documentation.md
index e04fdfdfb0..2c917d1f8e 100644
--- a/docs/usage/configuration/config_documentation.md
+++ b/docs/usage/configuration/config_documentation.md
@@ -4595,3 +4595,32 @@ background_updates:
min_batch_size: 10
default_batch_size: 50
```
+---
+## Auto Accept Invites
+Configuration settings related to automatically accepting invites.
+
+---
+### `auto_accept_invites`
+
+Automatically accepting invites controls whether users are presented with an invite request or if they
+are instead automatically joined to a room when receiving an invite. Set the `enabled` sub-option to true to
+enable auto-accepting invites. Defaults to false.
+This setting has the following sub-options:
+* `enabled`: Whether to run the auto-accept invites logic. Defaults to false.
+* `only_for_direct_messages`: Whether invites should be automatically accepted for all room types, or only
+ for direct messages. Defaults to false.
+* `only_from_local_users`: Whether to only automatically accept invites from users on this homeserver. Defaults to false.
+* `worker_to_run_on`: Which worker to run this module on. This must match the "worker_name".
+
+NOTE: Care should be taken not to enable this setting if the `synapse_auto_accept_invite` module is enabled and installed.
+The two modules will compete to perform the same task and may result in undesired behaviour. For example, multiple join
+events could be generated from a single invite.
+
+Example configuration:
+```yaml
+auto_accept_invites:
+ enabled: true
+ only_for_direct_messages: true
+ only_from_local_users: true
+ worker_to_run_on: "worker_1"
+```
diff --git a/synapse/app/_base.py b/synapse/app/_base.py
index 3182608f73..67e0df1459 100644
--- a/synapse/app/_base.py
+++ b/synapse/app/_base.py
@@ -68,6 +68,7 @@ from synapse.config._base import format_config_error
from synapse.config.homeserver import HomeServerConfig
from synapse.config.server import ListenerConfig, ManholeConfig, TCPListenerConfig
from synapse.crypto import context_factory
+from synapse.events.auto_accept_invites import InviteAutoAccepter
from synapse.events.presence_router import load_legacy_presence_router
from synapse.handlers.auth import load_legacy_password_auth_providers
from synapse.http.site import SynapseSite
@@ -582,6 +583,11 @@ async def start(hs: "HomeServer") -> None:
m = module(config, module_api)
logger.info("Loaded module %s", m)
+ if hs.config.auto_accept_invites.enabled:
+ # Start the local auto_accept_invites module.
+ m = InviteAutoAccepter(hs.config.auto_accept_invites, module_api)
+ logger.info("Loaded local module %s", m)
+
load_legacy_spam_checkers(hs)
load_legacy_third_party_event_rules(hs)
load_legacy_presence_router(hs)
diff --git a/synapse/config/_base.pyi b/synapse/config/_base.pyi
index fc51aed234..d9cb0da38b 100644
--- a/synapse/config/_base.pyi
+++ b/synapse/config/_base.pyi
@@ -23,6 +23,7 @@ from synapse.config import ( # noqa: F401
api,
appservice,
auth,
+ auto_accept_invites,
background_updates,
cache,
captcha,
@@ -120,6 +121,7 @@ class RootConfig:
federation: federation.FederationConfig
retention: retention.RetentionConfig
background_updates: background_updates.BackgroundUpdateConfig
+ auto_accept_invites: auto_accept_invites.AutoAcceptInvitesConfig
config_classes: List[Type["Config"]] = ...
config_files: List[str]
diff --git a/synapse/config/auto_accept_invites.py b/synapse/config/auto_accept_invites.py
new file mode 100644
index 0000000000..d90e13a510
--- /dev/null
+++ b/synapse/config/auto_accept_invites.py
@@ -0,0 +1,43 @@
+#
+# This file is licensed under the Affero General Public License (AGPL) version 3.
+#
+# Copyright (C) 2024 New Vector, Ltd
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# See the GNU Affero General Public License for more details:
+# .
+#
+# Originally licensed under the Apache License, Version 2.0:
+# .
+#
+# [This file includes modifications made by New Vector Limited]
+#
+#
+from typing import Any
+
+from synapse.types import JsonDict
+
+from ._base import Config
+
+
+class AutoAcceptInvitesConfig(Config):
+ section = "auto_accept_invites"
+
+ def read_config(self, config: JsonDict, **kwargs: Any) -> None:
+ auto_accept_invites_config = config.get("auto_accept_invites") or {}
+
+ self.enabled = auto_accept_invites_config.get("enabled", False)
+
+ self.accept_invites_only_for_direct_messages = auto_accept_invites_config.get(
+ "only_for_direct_messages", False
+ )
+
+ self.accept_invites_only_from_local_users = auto_accept_invites_config.get(
+ "only_from_local_users", False
+ )
+
+ self.worker_to_run_on = auto_accept_invites_config.get("worker_to_run_on")
diff --git a/synapse/config/homeserver.py b/synapse/config/homeserver.py
index 72e93ed04f..e36c0bd6ae 100644
--- a/synapse/config/homeserver.py
+++ b/synapse/config/homeserver.py
@@ -23,6 +23,7 @@ from .account_validity import AccountValidityConfig
from .api import ApiConfig
from .appservice import AppServiceConfig
from .auth import AuthConfig
+from .auto_accept_invites import AutoAcceptInvitesConfig
from .background_updates import BackgroundUpdateConfig
from .cache import CacheConfig
from .captcha import CaptchaConfig
@@ -105,4 +106,5 @@ class HomeServerConfig(RootConfig):
RedisConfig,
ExperimentalConfig,
BackgroundUpdateConfig,
+ AutoAcceptInvitesConfig,
]
diff --git a/synapse/events/auto_accept_invites.py b/synapse/events/auto_accept_invites.py
new file mode 100644
index 0000000000..d88ec51d9d
--- /dev/null
+++ b/synapse/events/auto_accept_invites.py
@@ -0,0 +1,196 @@
+#
+# This file is licensed under the Affero General Public License (AGPL) version 3.
+#
+# Copyright 2021 The Matrix.org Foundation C.I.C
+# Copyright (C) 2024 New Vector, Ltd
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# See the GNU Affero General Public License for more details:
+# .
+#
+# Originally licensed under the Apache License, Version 2.0:
+# .
+#
+# [This file includes modifications made by New Vector Limited]
+#
+#
+import logging
+from http import HTTPStatus
+from typing import Any, Dict, Tuple
+
+from synapse.api.constants import AccountDataTypes, EventTypes, Membership
+from synapse.api.errors import SynapseError
+from synapse.config.auto_accept_invites import AutoAcceptInvitesConfig
+from synapse.module_api import EventBase, ModuleApi, run_as_background_process
+
+logger = logging.getLogger(__name__)
+
+
+class InviteAutoAccepter:
+ def __init__(self, config: AutoAcceptInvitesConfig, api: ModuleApi):
+ # Keep a reference to the Module API.
+ self._api = api
+ self._config = config
+
+ if not self._config.enabled:
+ return
+
+ should_run_on_this_worker = config.worker_to_run_on == self._api.worker_name
+
+ if not should_run_on_this_worker:
+ logger.info(
+ "Not accepting invites on this worker (configured: %r, here: %r)",
+ config.worker_to_run_on,
+ self._api.worker_name,
+ )
+ return
+
+ logger.info(
+ "Accepting invites on this worker (here: %r)", self._api.worker_name
+ )
+
+ # Register the callback.
+ self._api.register_third_party_rules_callbacks(
+ on_new_event=self.on_new_event,
+ )
+
+ async def on_new_event(self, event: EventBase, *args: Any) -> None:
+ """Listens for new events, and if the event is an invite for a local user then
+ automatically accepts it.
+
+ Args:
+ event: The incoming event.
+ """
+ # Check if the event is an invite for a local user.
+ is_invite_for_local_user = (
+ event.type == EventTypes.Member
+ and event.is_state()
+ and event.membership == Membership.INVITE
+ and self._api.is_mine(event.state_key)
+ )
+
+ # Only accept invites for direct messages if the configuration mandates it.
+ is_direct_message = event.content.get("is_direct", False)
+ is_allowed_by_direct_message_rules = (
+ not self._config.accept_invites_only_for_direct_messages
+ or is_direct_message is True
+ )
+
+ # Only accept invites from remote users if the configuration mandates it.
+ is_from_local_user = self._api.is_mine(event.sender)
+ is_allowed_by_local_user_rules = (
+ not self._config.accept_invites_only_from_local_users
+ or is_from_local_user is True
+ )
+
+ if (
+ is_invite_for_local_user
+ and is_allowed_by_direct_message_rules
+ and is_allowed_by_local_user_rules
+ ):
+ # Make the user join the room. We run this as a background process to circumvent a race condition
+ # that occurs when responding to invites over federation (see https://github.com/matrix-org/synapse-auto-accept-invite/issues/12)
+ run_as_background_process(
+ "retry_make_join",
+ self._retry_make_join,
+ event.state_key,
+ event.state_key,
+ event.room_id,
+ "join",
+ bg_start_span=False,
+ )
+
+ if is_direct_message:
+ # Mark this room as a direct message!
+ await self._mark_room_as_direct_message(
+ event.state_key, event.sender, event.room_id
+ )
+
+ async def _mark_room_as_direct_message(
+ self, user_id: str, dm_user_id: str, room_id: str
+ ) -> None:
+ """
+ Marks a room (`room_id`) as a direct message with the counterparty `dm_user_id`
+ from the perspective of the user `user_id`.
+
+ Args:
+ user_id: the user for whom the membership is changing
+ dm_user_id: the user performing the membership change
+ room_id: room id of the room the user is invited to
+ """
+
+ # This is a dict of User IDs to tuples of Room IDs
+ # (get_global will return a frozendict of tuples as it freezes the data,
+ # but we should accept either frozen or unfrozen variants.)
+ # Be careful: we convert the outer frozendict into a dict here,
+ # but the contents of the dict are still frozen (tuples in lieu of lists,
+ # etc.)
+ dm_map: Dict[str, Tuple[str, ...]] = dict(
+ await self._api.account_data_manager.get_global(
+ user_id, AccountDataTypes.DIRECT
+ )
+ or {}
+ )
+
+ if dm_user_id not in dm_map:
+ dm_map[dm_user_id] = (room_id,)
+ else:
+ dm_rooms_for_user = dm_map[dm_user_id]
+ assert isinstance(dm_rooms_for_user, (tuple, list))
+
+ dm_map[dm_user_id] = tuple(dm_rooms_for_user) + (room_id,)
+
+ await self._api.account_data_manager.put_global(
+ user_id, AccountDataTypes.DIRECT, dm_map
+ )
+
+ async def _retry_make_join(
+ self, sender: str, target: str, room_id: str, new_membership: str
+ ) -> None:
+ """
+ A function to retry sending the `make_join` request with an increasing backoff. This is
+ implemented to work around a race condition when receiving invites over federation.
+
+ Args:
+ sender: the user performing the membership change
+ target: the user for whom the membership is changing
+ room_id: room id of the room to join to
+ new_membership: the type of membership event (in this case will be "join")
+ """
+
+ sleep = 0
+ retries = 0
+ join_event = None
+
+ while retries < 5:
+ try:
+ await self._api.sleep(sleep)
+ join_event = await self._api.update_room_membership(
+ sender=sender,
+ target=target,
+ room_id=room_id,
+ new_membership=new_membership,
+ )
+ except SynapseError as e:
+ if e.code == HTTPStatus.FORBIDDEN:
+ logger.debug(
+ f"Update_room_membership was forbidden. This can sometimes be expected for remote invites. Exception: {e}"
+ )
+ else:
+ logger.warn(
+ f"Update_room_membership raised the following unexpected (SynapseError) exception: {e}"
+ )
+ except Exception as e:
+ logger.warn(
+ f"Update_room_membership raised the following unexpected exception: {e}"
+ )
+
+ sleep = 2**retries
+ retries += 1
+
+ if join_event is not None:
+ break
diff --git a/synapse/handlers/sso.py b/synapse/handlers/sso.py
index f275d4f35a..ee74289b6c 100644
--- a/synapse/handlers/sso.py
+++ b/synapse/handlers/sso.py
@@ -817,7 +817,7 @@ class SsoHandler:
server_name = profile["avatar_url"].split("/")[-2]
media_id = profile["avatar_url"].split("/")[-1]
if self._is_mine_server_name(server_name):
- media = await self._media_repo.store.get_local_media(media_id)
+ media = await self._media_repo.store.get_local_media(media_id) # type: ignore[has-type]
if media is not None and upload_name == media.upload_name:
logger.info("skipping saving the user avatar")
return True
diff --git a/tests/events/test_auto_accept_invites.py b/tests/events/test_auto_accept_invites.py
new file mode 100644
index 0000000000..7fb4d4fa90
--- /dev/null
+++ b/tests/events/test_auto_accept_invites.py
@@ -0,0 +1,657 @@
+#
+# This file is licensed under the Affero General Public License (AGPL) version 3.
+#
+# Copyright 2021 The Matrix.org Foundation C.I.C
+# Copyright (C) 2024 New Vector, Ltd
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# See the GNU Affero General Public License for more details:
+# .
+#
+# Originally licensed under the Apache License, Version 2.0:
+# .
+#
+# [This file includes modifications made by New Vector Limited]
+#
+#
+import asyncio
+from asyncio import Future
+from http import HTTPStatus
+from typing import Any, Awaitable, Dict, List, Optional, Tuple, TypeVar, cast
+from unittest.mock import Mock
+
+import attr
+from parameterized import parameterized
+
+from twisted.test.proto_helpers import MemoryReactor
+
+from synapse.api.constants import EventTypes
+from synapse.api.errors import SynapseError
+from synapse.config.auto_accept_invites import AutoAcceptInvitesConfig
+from synapse.events.auto_accept_invites import InviteAutoAccepter
+from synapse.federation.federation_base import event_from_pdu_json
+from synapse.handlers.sync import JoinedSyncResult, SyncRequestKey, SyncVersion
+from synapse.module_api import ModuleApi
+from synapse.rest import admin
+from synapse.rest.client import login, room
+from synapse.server import HomeServer
+from synapse.types import StreamToken, create_requester
+from synapse.util import Clock
+
+from tests.handlers.test_sync import generate_sync_config
+from tests.unittest import (
+ FederatingHomeserverTestCase,
+ HomeserverTestCase,
+ TestCase,
+ override_config,
+)
+
+
+class AutoAcceptInvitesTestCase(FederatingHomeserverTestCase):
+ """
+ Integration test cases for auto-accepting invites.
+ """
+
+ servlets = [
+ admin.register_servlets,
+ login.register_servlets,
+ room.register_servlets,
+ ]
+
+ def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer:
+ hs = self.setup_test_homeserver()
+ self.handler = hs.get_federation_handler()
+ self.store = hs.get_datastores().main
+ return hs
+
+ def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
+ self.sync_handler = self.hs.get_sync_handler()
+ self.module_api = hs.get_module_api()
+
+ @parameterized.expand(
+ [
+ [False],
+ [True],
+ ]
+ )
+ @override_config(
+ {
+ "auto_accept_invites": {
+ "enabled": True,
+ },
+ }
+ )
+ def test_auto_accept_invites(self, direct_room: bool) -> None:
+ """Test that a user automatically joins a room when invited, if the
+ module is enabled.
+ """
+ # A local user who sends an invite
+ inviting_user_id = self.register_user("inviter", "pass")
+ inviting_user_tok = self.login("inviter", "pass")
+
+ # A local user who receives an invite
+ invited_user_id = self.register_user("invitee", "pass")
+ self.login("invitee", "pass")
+
+ # Create a room and send an invite to the other user
+ room_id = self.helper.create_room_as(
+ inviting_user_id,
+ is_public=False,
+ tok=inviting_user_tok,
+ )
+
+ self.helper.invite(
+ room_id,
+ inviting_user_id,
+ invited_user_id,
+ tok=inviting_user_tok,
+ extra_data={"is_direct": direct_room},
+ )
+
+ # Check that the invite receiving user has automatically joined the room when syncing
+ join_updates, _ = sync_join(self, invited_user_id)
+ self.assertEqual(len(join_updates), 1)
+
+ join_update: JoinedSyncResult = join_updates[0]
+ self.assertEqual(join_update.room_id, room_id)
+
+ @override_config(
+ {
+ "auto_accept_invites": {
+ "enabled": False,
+ },
+ }
+ )
+ def test_module_not_enabled(self) -> None:
+ """Test that a user does not automatically join a room when invited,
+ if the module is not enabled.
+ """
+ # A local user who sends an invite
+ inviting_user_id = self.register_user("inviter", "pass")
+ inviting_user_tok = self.login("inviter", "pass")
+
+ # A local user who receives an invite
+ invited_user_id = self.register_user("invitee", "pass")
+ self.login("invitee", "pass")
+
+ # Create a room and send an invite to the other user
+ room_id = self.helper.create_room_as(
+ inviting_user_id, is_public=False, tok=inviting_user_tok
+ )
+
+ self.helper.invite(
+ room_id,
+ inviting_user_id,
+ invited_user_id,
+ tok=inviting_user_tok,
+ )
+
+ # Check that the invite receiving user has not automatically joined the room when syncing
+ join_updates, _ = sync_join(self, invited_user_id)
+ self.assertEqual(len(join_updates), 0)
+
+ @override_config(
+ {
+ "auto_accept_invites": {
+ "enabled": True,
+ },
+ }
+ )
+ def test_invite_from_remote_user(self) -> None:
+ """Test that an invite from a remote user results in the invited user
+ automatically joining the room.
+ """
+ # A remote user who sends the invite
+ remote_server = "otherserver"
+ remote_user = "@otheruser:" + remote_server
+
+ # A local user who creates the room
+ creator_user_id = self.register_user("creator", "pass")
+ creator_user_tok = self.login("creator", "pass")
+
+ # A local user who receives an invite
+ invited_user_id = self.register_user("invitee", "pass")
+ self.login("invitee", "pass")
+
+ room_id = self.helper.create_room_as(
+ room_creator=creator_user_id, tok=creator_user_tok
+ )
+ room_version = self.get_success(self.store.get_room_version(room_id))
+
+ invite_event = event_from_pdu_json(
+ {
+ "type": EventTypes.Member,
+ "content": {"membership": "invite"},
+ "room_id": room_id,
+ "sender": remote_user,
+ "state_key": invited_user_id,
+ "depth": 32,
+ "prev_events": [],
+ "auth_events": [],
+ "origin_server_ts": self.clock.time_msec(),
+ },
+ room_version,
+ )
+ self.get_success(
+ self.handler.on_invite_request(
+ remote_server,
+ invite_event,
+ invite_event.room_version,
+ )
+ )
+
+ # Check that the invite receiving user has automatically joined the room when syncing
+ join_updates, _ = sync_join(self, invited_user_id)
+ self.assertEqual(len(join_updates), 1)
+
+ join_update: JoinedSyncResult = join_updates[0]
+ self.assertEqual(join_update.room_id, room_id)
+
+ @parameterized.expand(
+ [
+ [False, False],
+ [True, True],
+ ]
+ )
+ @override_config(
+ {
+ "auto_accept_invites": {
+ "enabled": True,
+ "only_for_direct_messages": True,
+ },
+ }
+ )
+ def test_accept_invite_direct_message(
+ self,
+ direct_room: bool,
+ expect_auto_join: bool,
+ ) -> None:
+ """Tests that, if the module is configured to only accept DM invites, invites to DM rooms are still
+ automatically accepted. Otherwise they are rejected.
+ """
+ # A local user who sends an invite
+ inviting_user_id = self.register_user("inviter", "pass")
+ inviting_user_tok = self.login("inviter", "pass")
+
+ # A local user who receives an invite
+ invited_user_id = self.register_user("invitee", "pass")
+ self.login("invitee", "pass")
+
+ # Create a room and send an invite to the other user
+ room_id = self.helper.create_room_as(
+ inviting_user_id,
+ is_public=False,
+ tok=inviting_user_tok,
+ )
+
+ self.helper.invite(
+ room_id,
+ inviting_user_id,
+ invited_user_id,
+ tok=inviting_user_tok,
+ extra_data={"is_direct": direct_room},
+ )
+
+ if expect_auto_join:
+ # Check that the invite receiving user has automatically joined the room when syncing
+ join_updates, _ = sync_join(self, invited_user_id)
+ self.assertEqual(len(join_updates), 1)
+
+ join_update: JoinedSyncResult = join_updates[0]
+ self.assertEqual(join_update.room_id, room_id)
+ else:
+ # Check that the invite receiving user has not automatically joined the room when syncing
+ join_updates, _ = sync_join(self, invited_user_id)
+ self.assertEqual(len(join_updates), 0)
+
+ @parameterized.expand(
+ [
+ [False, True],
+ [True, False],
+ ]
+ )
+ @override_config(
+ {
+ "auto_accept_invites": {
+ "enabled": True,
+ "only_from_local_users": True,
+ },
+ }
+ )
+ def test_accept_invite_local_user(
+ self, remote_inviter: bool, expect_auto_join: bool
+ ) -> None:
+ """Tests that, if the module is configured to only accept invites from local users, invites
+ from local users are still automatically accepted. Otherwise they are rejected.
+ """
+ # A local user who sends an invite
+ creator_user_id = self.register_user("inviter", "pass")
+ creator_user_tok = self.login("inviter", "pass")
+
+ # A local user who receives an invite
+ invited_user_id = self.register_user("invitee", "pass")
+ self.login("invitee", "pass")
+
+ # Create a room and send an invite to the other user
+ room_id = self.helper.create_room_as(
+ creator_user_id, is_public=False, tok=creator_user_tok
+ )
+
+ if remote_inviter:
+ room_version = self.get_success(self.store.get_room_version(room_id))
+
+ # A remote user who sends the invite
+ remote_server = "otherserver"
+ remote_user = "@otheruser:" + remote_server
+
+ invite_event = event_from_pdu_json(
+ {
+ "type": EventTypes.Member,
+ "content": {"membership": "invite"},
+ "room_id": room_id,
+ "sender": remote_user,
+ "state_key": invited_user_id,
+ "depth": 32,
+ "prev_events": [],
+ "auth_events": [],
+ "origin_server_ts": self.clock.time_msec(),
+ },
+ room_version,
+ )
+ self.get_success(
+ self.handler.on_invite_request(
+ remote_server,
+ invite_event,
+ invite_event.room_version,
+ )
+ )
+ else:
+ self.helper.invite(
+ room_id,
+ creator_user_id,
+ invited_user_id,
+ tok=creator_user_tok,
+ )
+
+ if expect_auto_join:
+ # Check that the invite receiving user has automatically joined the room when syncing
+ join_updates, _ = sync_join(self, invited_user_id)
+ self.assertEqual(len(join_updates), 1)
+
+ join_update: JoinedSyncResult = join_updates[0]
+ self.assertEqual(join_update.room_id, room_id)
+ else:
+ # Check that the invite receiving user has not automatically joined the room when syncing
+ join_updates, _ = sync_join(self, invited_user_id)
+ self.assertEqual(len(join_updates), 0)
+
+
+_request_key = 0
+
+
+def generate_request_key() -> SyncRequestKey:
+ global _request_key
+ _request_key += 1
+ return ("request_key", _request_key)
+
+
+def sync_join(
+ testcase: HomeserverTestCase,
+ user_id: str,
+ since_token: Optional[StreamToken] = None,
+) -> Tuple[List[JoinedSyncResult], StreamToken]:
+ """Perform a sync request for the given user and return the user join updates
+ they've received, as well as the next_batch token.
+
+ This method assumes testcase.sync_handler points to the homeserver's sync handler.
+
+ Args:
+ testcase: The testcase that is currently being run.
+ user_id: The ID of the user to generate a sync response for.
+ since_token: An optional token to indicate from at what point to sync from.
+
+ Returns:
+ A tuple containing a list of join updates, and the sync response's
+ next_batch token.
+ """
+ requester = create_requester(user_id)
+ sync_config = generate_sync_config(requester.user.to_string())
+ sync_result = testcase.get_success(
+ testcase.hs.get_sync_handler().wait_for_sync_for_user(
+ requester,
+ sync_config,
+ SyncVersion.SYNC_V2,
+ generate_request_key(),
+ since_token,
+ )
+ )
+
+ return sync_result.joined, sync_result.next_batch
+
+
+class InviteAutoAccepterInternalTestCase(TestCase):
+ """
+ Test cases which exercise the internals of the InviteAutoAccepter.
+ """
+
+ def setUp(self) -> None:
+ self.module = create_module()
+ self.user_id = "@peter:test"
+ self.invitee = "@lesley:test"
+ self.remote_invitee = "@thomas:remote"
+
+ # We know our module API is a mock, but mypy doesn't.
+ self.mocked_update_membership: Mock = self.module._api.update_room_membership # type: ignore[assignment]
+
+ async def test_accept_invite_with_failures(self) -> None:
+ """Tests that receiving an invite for a local user makes the module attempt to
+ make the invitee join the room. This test verifies that it works if the call to
+ update membership returns exceptions before successfully completing and returning an event.
+ """
+ invite = MockEvent(
+ sender="@inviter:test",
+ state_key="@invitee:test",
+ type="m.room.member",
+ content={"membership": "invite"},
+ )
+
+ join_event = MockEvent(
+ sender="someone",
+ state_key="someone",
+ type="m.room.member",
+ content={"membership": "join"},
+ )
+ # the first two calls raise an exception while the third call is successful
+ self.mocked_update_membership.side_effect = [
+ SynapseError(HTTPStatus.FORBIDDEN, "Forbidden"),
+ SynapseError(HTTPStatus.FORBIDDEN, "Forbidden"),
+ make_awaitable(join_event),
+ ]
+
+ # Stop mypy from complaining that we give on_new_event a MockEvent rather than an
+ # EventBase.
+ await self.module.on_new_event(event=invite) # type: ignore[arg-type]
+
+ await self.retry_assertions(
+ self.mocked_update_membership,
+ 3,
+ sender=invite.state_key,
+ target=invite.state_key,
+ room_id=invite.room_id,
+ new_membership="join",
+ )
+
+ async def test_accept_invite_failures(self) -> None:
+ """Tests that receiving an invite for a local user makes the module attempt to
+ make the invitee join the room. This test verifies that if the update_membership call
+ fails consistently, _retry_make_join will break the loop after the set number of retries and
+ execution will continue.
+ """
+ invite = MockEvent(
+ sender=self.user_id,
+ state_key=self.invitee,
+ type="m.room.member",
+ content={"membership": "invite"},
+ )
+ self.mocked_update_membership.side_effect = SynapseError(
+ HTTPStatus.FORBIDDEN, "Forbidden"
+ )
+
+ # Stop mypy from complaining that we give on_new_event a MockEvent rather than an
+ # EventBase.
+ await self.module.on_new_event(event=invite) # type: ignore[arg-type]
+
+ await self.retry_assertions(
+ self.mocked_update_membership,
+ 5,
+ sender=invite.state_key,
+ target=invite.state_key,
+ room_id=invite.room_id,
+ new_membership="join",
+ )
+
+ async def test_not_state(self) -> None:
+ """Tests that receiving an invite that's not a state event does nothing."""
+ invite = MockEvent(
+ sender=self.user_id, type="m.room.member", content={"membership": "invite"}
+ )
+
+ # Stop mypy from complaining that we give on_new_event a MockEvent rather than an
+ # EventBase.
+ await self.module.on_new_event(event=invite) # type: ignore[arg-type]
+
+ self.mocked_update_membership.assert_not_called()
+
+ async def test_not_invite(self) -> None:
+ """Tests that receiving a membership update that's not an invite does nothing."""
+ invite = MockEvent(
+ sender=self.user_id,
+ state_key=self.user_id,
+ type="m.room.member",
+ content={"membership": "join"},
+ )
+
+ # Stop mypy from complaining that we give on_new_event a MockEvent rather than an
+ # EventBase.
+ await self.module.on_new_event(event=invite) # type: ignore[arg-type]
+
+ self.mocked_update_membership.assert_not_called()
+
+ async def test_not_membership(self) -> None:
+ """Tests that receiving a state event that's not a membership update does
+ nothing.
+ """
+ invite = MockEvent(
+ sender=self.user_id,
+ state_key=self.user_id,
+ type="org.matrix.test",
+ content={"foo": "bar"},
+ )
+
+ # Stop mypy from complaining that we give on_new_event a MockEvent rather than an
+ # EventBase.
+ await self.module.on_new_event(event=invite) # type: ignore[arg-type]
+
+ self.mocked_update_membership.assert_not_called()
+
+ def test_config_parse(self) -> None:
+ """Tests that a correct configuration parses."""
+ config = {
+ "auto_accept_invites": {
+ "enabled": True,
+ "only_for_direct_messages": True,
+ "only_from_local_users": True,
+ }
+ }
+ parsed_config = AutoAcceptInvitesConfig()
+ parsed_config.read_config(config)
+
+ self.assertTrue(parsed_config.enabled)
+ self.assertTrue(parsed_config.accept_invites_only_for_direct_messages)
+ self.assertTrue(parsed_config.accept_invites_only_from_local_users)
+
+ def test_runs_on_only_one_worker(self) -> None:
+ """
+ Tests that the module only runs on the specified worker.
+ """
+ # By default, we run on the main process...
+ main_module = create_module(
+ config_override={"auto_accept_invites": {"enabled": True}}, worker_name=None
+ )
+ cast(
+ Mock, main_module._api.register_third_party_rules_callbacks
+ ).assert_called_once()
+
+ # ...and not on other workers (like synchrotrons)...
+ sync_module = create_module(worker_name="synchrotron42")
+ cast(
+ Mock, sync_module._api.register_third_party_rules_callbacks
+ ).assert_not_called()
+
+ # ...unless we configured them to be the designated worker.
+ specified_module = create_module(
+ config_override={
+ "auto_accept_invites": {
+ "enabled": True,
+ "worker_to_run_on": "account_data1",
+ }
+ },
+ worker_name="account_data1",
+ )
+ cast(
+ Mock, specified_module._api.register_third_party_rules_callbacks
+ ).assert_called_once()
+
+ async def retry_assertions(
+ self, mock: Mock, call_count: int, **kwargs: Any
+ ) -> None:
+ """
+ This is a hacky way to ensure that the assertions are not called before the other coroutine
+ has a chance to call `update_room_membership`. It catches the exception caused by a failure,
+ and sleeps the thread before retrying, up until 5 tries.
+
+ Args:
+ call_count: the number of times the mock should have been called
+ mock: the mocked function we want to assert on
+ kwargs: keyword arguments to assert that the mock was called with
+ """
+
+ i = 0
+ while i < 5:
+ try:
+ # Check that the mocked method is called the expected amount of times and with the right
+ # arguments to attempt to make the user join the room.
+ mock.assert_called_with(**kwargs)
+ self.assertEqual(call_count, mock.call_count)
+ break
+ except AssertionError as e:
+ i += 1
+ if i == 5:
+ # we've used up the tries, force the test to fail as we've already caught the exception
+ self.fail(e)
+ await asyncio.sleep(1)
+
+
+@attr.s(auto_attribs=True)
+class MockEvent:
+ """Mocks an event. Only exposes properties the module uses."""
+
+ sender: str
+ type: str
+ content: Dict[str, Any]
+ room_id: str = "!someroom"
+ state_key: Optional[str] = None
+
+ def is_state(self) -> bool:
+ """Checks if the event is a state event by checking if it has a state key."""
+ return self.state_key is not None
+
+ @property
+ def membership(self) -> str:
+ """Extracts the membership from the event. Should only be called on an event
+ that's a membership event, and will raise a KeyError otherwise.
+ """
+ membership: str = self.content["membership"]
+ return membership
+
+
+T = TypeVar("T")
+TV = TypeVar("TV")
+
+
+async def make_awaitable(value: T) -> T:
+ return value
+
+
+def make_multiple_awaitable(result: TV) -> Awaitable[TV]:
+ """
+ Makes an awaitable, suitable for mocking an `async` function.
+ This uses Futures as they can be awaited multiple times so can be returned
+ to multiple callers.
+ """
+ future: Future[TV] = Future()
+ future.set_result(result)
+ return future
+
+
+def create_module(
+ config_override: Optional[Dict[str, Any]] = None, worker_name: Optional[str] = None
+) -> InviteAutoAccepter:
+ # Create a mock based on the ModuleApi spec, but override some mocked functions
+ # because some capabilities are needed for running the tests.
+ module_api = Mock(spec=ModuleApi)
+ module_api.is_mine.side_effect = lambda a: a.split(":")[1] == "test"
+ module_api.worker_name = worker_name
+ module_api.sleep.return_value = make_multiple_awaitable(None)
+
+ if config_override is None:
+ config_override = {}
+
+ config = AutoAcceptInvitesConfig()
+ config.read_config(config_override)
+
+ return InviteAutoAccepter(config, module_api)
diff --git a/tests/rest/client/utils.py b/tests/rest/client/utils.py
index fe00afe198..7362bde7ab 100644
--- a/tests/rest/client/utils.py
+++ b/tests/rest/client/utils.py
@@ -170,6 +170,7 @@ class RestHelper:
targ: Optional[str] = None,
expect_code: int = HTTPStatus.OK,
tok: Optional[str] = None,
+ extra_data: Optional[dict] = None,
) -> JsonDict:
return self.change_membership(
room=room,
@@ -178,6 +179,7 @@ class RestHelper:
tok=tok,
membership=Membership.INVITE,
expect_code=expect_code,
+ extra_data=extra_data,
)
def join(
diff --git a/tests/server.py b/tests/server.py
index 434be3d22c..f3a917f835 100644
--- a/tests/server.py
+++ b/tests/server.py
@@ -85,6 +85,7 @@ from twisted.web.server import Request, Site
from synapse.config.database import DatabaseConnectionConfig
from synapse.config.homeserver import HomeServerConfig
+from synapse.events.auto_accept_invites import InviteAutoAccepter
from synapse.events.presence_router import load_legacy_presence_router
from synapse.handlers.auth import load_legacy_password_auth_providers
from synapse.http.site import SynapseRequest
@@ -1156,6 +1157,11 @@ def setup_test_homeserver(
for module, module_config in hs.config.modules.loaded_modules:
module(config=module_config, api=module_api)
+ if hs.config.auto_accept_invites.enabled:
+ # Start the local auto_accept_invites module.
+ m = InviteAutoAccepter(hs.config.auto_accept_invites, module_api)
+ logger.info("Loaded local module %s", m)
+
load_legacy_spam_checkers(hs)
load_legacy_third_party_event_rules(hs)
load_legacy_presence_router(hs)
From a547b49773b504deddee4db4ec4fb07971cd2fea Mon Sep 17 00:00:00 2001
From: Yadd
Date: Wed, 22 May 2024 16:29:31 +0400
Subject: [PATCH 113/503] Update Lemonldap-NG OIDC config (#17204)
Update OIDC documentation: by default Matrix doesn't query userinfo endpoint, then claims should be put on id_token.
---
changelog.d/17204.doc | 1 +
docs/openid.md | 2 ++
2 files changed, 3 insertions(+)
create mode 100644 changelog.d/17204.doc
diff --git a/changelog.d/17204.doc b/changelog.d/17204.doc
new file mode 100644
index 0000000000..5a5a8f5107
--- /dev/null
+++ b/changelog.d/17204.doc
@@ -0,0 +1 @@
+Update OIDC documentation: by default Matrix doesn't query userinfo endpoint, then claims should be put on id_token.
diff --git a/docs/openid.md b/docs/openid.md
index 9773a7de52..7a10b1615b 100644
--- a/docs/openid.md
+++ b/docs/openid.md
@@ -525,6 +525,8 @@ oidc_providers:
(`Options > Security > ID Token signature algorithm` and `Options > Security >
Access Token signature algorithm`)
- Scopes: OpenID, Email and Profile
+- Force claims into `id_token`
+ (`Options > Advanced > Force claims to be returned in ID Token`)
- Allowed redirection addresses for login (`Options > Basic > Allowed
redirection addresses for login` ) :
`[synapse public baseurl]/_synapse/client/oidc/callback`
From b71d2774388c90a68d71dd8d805556c8f62c92a1 Mon Sep 17 00:00:00 2001
From: Erik Johnston
Date: Wed, 22 May 2024 13:55:18 +0100
Subject: [PATCH 114/503] Reduce work of calculating outbound device pokes
(#17211)
---
changelog.d/17211.misc | 1 +
synapse/handlers/device.py | 7 +++++++
synapse/storage/databases/main/devices.py | 24 +++++++++++++++++++++++
3 files changed, 32 insertions(+)
create mode 100644 changelog.d/17211.misc
diff --git a/changelog.d/17211.misc b/changelog.d/17211.misc
new file mode 100644
index 0000000000..144db03a40
--- /dev/null
+++ b/changelog.d/17211.misc
@@ -0,0 +1 @@
+Reduce work of calculating outbound device lists updates.
diff --git a/synapse/handlers/device.py b/synapse/handlers/device.py
index 55842e7c7b..0432d97109 100644
--- a/synapse/handlers/device.py
+++ b/synapse/handlers/device.py
@@ -906,6 +906,13 @@ class DeviceHandler(DeviceWorkerHandler):
context=opentracing_context,
)
+ await self.store.mark_redundant_device_lists_pokes(
+ user_id=user_id,
+ device_id=device_id,
+ room_id=room_id,
+ converted_upto_stream_id=stream_id,
+ )
+
# Notify replication that we've updated the device list stream.
self.notifier.notify_replication()
diff --git a/synapse/storage/databases/main/devices.py b/synapse/storage/databases/main/devices.py
index f4410b5c02..48384e238c 100644
--- a/synapse/storage/databases/main/devices.py
+++ b/synapse/storage/databases/main/devices.py
@@ -2161,6 +2161,30 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore):
},
)
+ async def mark_redundant_device_lists_pokes(
+ self,
+ user_id: str,
+ device_id: str,
+ room_id: str,
+ converted_upto_stream_id: int,
+ ) -> None:
+ """If we've calculated the outbound pokes for a given room/device list
+ update, mark any subsequent changes as already converted"""
+
+ sql = """
+ UPDATE device_lists_changes_in_room
+ SET converted_to_destinations = true
+ WHERE stream_id > ? AND user_id = ? AND device_id = ?
+ AND room_id = ? AND NOT converted_to_destinations
+ """
+
+ def mark_redundant_device_lists_pokes_txn(txn: LoggingTransaction) -> None:
+ txn.execute(sql, (converted_upto_stream_id, user_id, device_id, room_id))
+
+ return await self.db_pool.runInteraction(
+ "mark_redundant_device_lists_pokes", mark_redundant_device_lists_pokes_txn
+ )
+
def _add_device_outbound_room_poke_txn(
self,
txn: LoggingTransaction,
From 7ef00b76280c9d8f5ad60e42cc384b316569d15f Mon Sep 17 00:00:00 2001
From: reivilibre
Date: Wed, 22 May 2024 14:12:58 +0100
Subject: [PATCH 115/503] Add logging to tasks managed by the task scheduler,
showing CPU and database usage. (#17219)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
The log format is the same as the request log format, except:
- fields that are specific to HTTP requests have been removed
- the task's params are included at the end of the log line.
These log lines are emitted:
- when the task function finishes — both completion and failure (and I
suppose it is possible for a task to become schedulable again?)
- every 5 minutes whilst it is running
Closes #17217.
---------
Signed-off-by: Olivier 'reivilibre
---
changelog.d/17219.feature | 1 +
synapse/util/task_scheduler.py | 69 +++++++++++++++++++++++++++++++++-
2 files changed, 68 insertions(+), 2 deletions(-)
create mode 100644 changelog.d/17219.feature
diff --git a/changelog.d/17219.feature b/changelog.d/17219.feature
new file mode 100644
index 0000000000..f8277a89d8
--- /dev/null
+++ b/changelog.d/17219.feature
@@ -0,0 +1 @@
+Add logging to tasks managed by the task scheduler, showing CPU and database usage.
\ No newline at end of file
diff --git a/synapse/util/task_scheduler.py b/synapse/util/task_scheduler.py
index 01d05c9ed6..448960b297 100644
--- a/synapse/util/task_scheduler.py
+++ b/synapse/util/task_scheduler.py
@@ -24,7 +24,12 @@ from typing import TYPE_CHECKING, Awaitable, Callable, Dict, List, Optional, Set
from twisted.python.failure import Failure
-from synapse.logging.context import nested_logging_context
+from synapse.logging.context import (
+ ContextResourceUsage,
+ LoggingContext,
+ nested_logging_context,
+ set_current_context,
+)
from synapse.metrics import LaterGauge
from synapse.metrics.background_process_metrics import (
run_as_background_process,
@@ -81,6 +86,8 @@ class TaskScheduler:
MAX_CONCURRENT_RUNNING_TASKS = 5
# Time from the last task update after which we will log a warning
LAST_UPDATE_BEFORE_WARNING_MS = 24 * 60 * 60 * 1000 # 24hrs
+ # Report a running task's status and usage every so often.
+ OCCASIONAL_REPORT_INTERVAL_MS = 5 * 60 * 1000 # 5 minutes
def __init__(self, hs: "HomeServer"):
self._hs = hs
@@ -346,6 +353,33 @@ class TaskScheduler:
assert task.id not in self._running_tasks
await self._store.delete_scheduled_task(task.id)
+ @staticmethod
+ def _log_task_usage(
+ state: str, task: ScheduledTask, usage: ContextResourceUsage, active_time: float
+ ) -> None:
+ """
+ Log a line describing the state and usage of a task.
+ The log line is inspired by / a copy of the request log line format,
+ but with irrelevant fields removed.
+
+ active_time: Time that the task has been running for, in seconds.
+ """
+
+ logger.info(
+ "Task %s: %.3fsec (%.3fsec, %.3fsec) (%.3fsec/%.3fsec/%d)"
+ " [%d dbevts] %r, %r",
+ state,
+ active_time,
+ usage.ru_utime,
+ usage.ru_stime,
+ usage.db_sched_duration_sec,
+ usage.db_txn_duration_sec,
+ int(usage.db_txn_count),
+ usage.evt_db_fetch_count,
+ task.resource_id,
+ task.params,
+ )
+
async def _launch_task(self, task: ScheduledTask) -> None:
"""Launch a scheduled task now.
@@ -360,8 +394,32 @@ class TaskScheduler:
)
function = self._actions[task.action]
+ def _occasional_report(
+ task_log_context: LoggingContext, start_time: float
+ ) -> None:
+ """
+ Helper to log a 'Task continuing' line every so often.
+ """
+
+ current_time = self._clock.time()
+ calling_context = set_current_context(task_log_context)
+ try:
+ usage = task_log_context.get_resource_usage()
+ TaskScheduler._log_task_usage(
+ "continuing", task, usage, current_time - start_time
+ )
+ finally:
+ set_current_context(calling_context)
+
async def wrapper() -> None:
- with nested_logging_context(task.id):
+ with nested_logging_context(task.id) as log_context:
+ start_time = self._clock.time()
+ occasional_status_call = self._clock.looping_call(
+ _occasional_report,
+ TaskScheduler.OCCASIONAL_REPORT_INTERVAL_MS,
+ log_context,
+ start_time,
+ )
try:
(status, result, error) = await function(task)
except Exception:
@@ -383,6 +441,13 @@ class TaskScheduler:
)
self._running_tasks.remove(task.id)
+ current_time = self._clock.time()
+ usage = log_context.get_resource_usage()
+ TaskScheduler._log_task_usage(
+ status.value, task, usage, current_time - start_time
+ )
+ occasional_status_call.stop()
+
# Try launch a new task since we've finished with this one.
self._clock.call_later(0.1, self._launch_scheduled_tasks)
From 7e2412265da43552b26dedfa72909afd704d1500 Mon Sep 17 00:00:00 2001
From: reivilibre
Date: Wed, 22 May 2024 14:22:33 +0100
Subject: [PATCH 116/503] Log exceptions when failing to auto-join new user
according to the `auto_join_rooms` option. (#17176)
Would have been useful for tracking down #16878.
Signed-off-by: Olivier 'reivilibre
---
changelog.d/17176.misc | 1 +
synapse/handlers/register.py | 2 +-
2 files changed, 2 insertions(+), 1 deletion(-)
create mode 100644 changelog.d/17176.misc
diff --git a/changelog.d/17176.misc b/changelog.d/17176.misc
new file mode 100644
index 0000000000..cc9f2a5202
--- /dev/null
+++ b/changelog.d/17176.misc
@@ -0,0 +1 @@
+Log exceptions when failing to auto-join new user according to the `auto_join_rooms` option.
\ No newline at end of file
diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py
index e48e70db04..c200e29569 100644
--- a/synapse/handlers/register.py
+++ b/synapse/handlers/register.py
@@ -590,7 +590,7 @@ class RegistrationHandler:
# moving away from bare excepts is a good thing to do.
logger.error("Failed to join new user to %r: %r", r, e)
except Exception as e:
- logger.error("Failed to join new user to %r: %r", r, e)
+ logger.error("Failed to join new user to %r: %r", r, e, exc_info=True)
async def _auto_join_rooms(self, user_id: str) -> None:
"""Automatically joins users to auto join rooms - creating the room in the first place
From c97251d5ba53b905036b3181afaa9c792777d1ff Mon Sep 17 00:00:00 2001
From: Eric Eastwood
Date: Thu, 23 May 2024 12:06:16 -0500
Subject: [PATCH 117/503] Add Sliding Sync `/sync/e2ee` endpoint for To-Device
messages (#17167)
This is being introduced as part of Sliding Sync but doesn't have any sliding window component. It's just a way to get E2EE events without having to sit through a big initial sync (`/sync` v2). And we can avoid encryption events being backed up by the main sync response or vice-versa.
Part of some Sliding Sync simplification/experimentation. See [this discussion](https://github.com/element-hq/synapse/pull/17167#discussion_r1610495866) for why it may not be as useful as we thought.
Based on:
- https://github.com/matrix-org/matrix-spec-proposals/pull/3575
- https://github.com/matrix-org/matrix-spec-proposals/pull/3885
- https://github.com/matrix-org/matrix-spec-proposals/pull/3884
---
changelog.d/17167.feature | 1 +
synapse/config/experimental.py | 3 +
synapse/handlers/sync.py | 247 ++++++++++++++-
synapse/rest/client/sync.py | 171 +++++++++++
tests/rest/client/test_devices.py | 144 +--------
tests/rest/client/test_sendtodevice.py | 71 ++++-
tests/rest/client/test_sync.py | 399 ++++++++++++++++++++++++-
7 files changed, 861 insertions(+), 175 deletions(-)
create mode 100644 changelog.d/17167.feature
diff --git a/changelog.d/17167.feature b/changelog.d/17167.feature
new file mode 100644
index 0000000000..5ad31db974
--- /dev/null
+++ b/changelog.d/17167.feature
@@ -0,0 +1 @@
+Add experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync/e2ee` endpoint for To-Device messages and device encryption info.
diff --git a/synapse/config/experimental.py b/synapse/config/experimental.py
index 749452ce93..cda7afc5c4 100644
--- a/synapse/config/experimental.py
+++ b/synapse/config/experimental.py
@@ -332,6 +332,9 @@ class ExperimentalConfig(Config):
# MSC3391: Removing account data.
self.msc3391_enabled = experimental.get("msc3391_enabled", False)
+ # MSC3575 (Sliding Sync API endpoints)
+ self.msc3575_enabled: bool = experimental.get("msc3575_enabled", False)
+
# MSC3773: Thread notifications
self.msc3773_enabled: bool = experimental.get("msc3773_enabled", False)
diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py
index b7917a99d6..ac5bddd52f 100644
--- a/synapse/handlers/sync.py
+++ b/synapse/handlers/sync.py
@@ -28,11 +28,14 @@ from typing import (
Dict,
FrozenSet,
List,
+ Literal,
Mapping,
Optional,
Sequence,
Set,
Tuple,
+ Union,
+ overload,
)
import attr
@@ -128,6 +131,8 @@ class SyncVersion(Enum):
# Traditional `/sync` endpoint
SYNC_V2 = "sync_v2"
+ # Part of MSC3575 Sliding Sync
+ E2EE_SYNC = "e2ee_sync"
@attr.s(slots=True, frozen=True, auto_attribs=True)
@@ -280,6 +285,26 @@ class SyncResult:
)
+@attr.s(slots=True, frozen=True, auto_attribs=True)
+class E2eeSyncResult:
+ """
+ Attributes:
+ next_batch: Token for the next sync
+ to_device: List of direct messages for the device.
+ device_lists: List of user_ids whose devices have changed
+ device_one_time_keys_count: Dict of algorithm to count for one time keys
+ for this device
+ device_unused_fallback_key_types: List of key types that have an unused fallback
+ key
+ """
+
+ next_batch: StreamToken
+ to_device: List[JsonDict]
+ device_lists: DeviceListUpdates
+ device_one_time_keys_count: JsonMapping
+ device_unused_fallback_key_types: List[str]
+
+
class SyncHandler:
def __init__(self, hs: "HomeServer"):
self.hs_config = hs.config
@@ -322,6 +347,31 @@ class SyncHandler:
self.rooms_to_exclude_globally = hs.config.server.rooms_to_exclude_from_sync
+ @overload
+ async def wait_for_sync_for_user(
+ self,
+ requester: Requester,
+ sync_config: SyncConfig,
+ sync_version: Literal[SyncVersion.SYNC_V2],
+ request_key: SyncRequestKey,
+ since_token: Optional[StreamToken] = None,
+ timeout: int = 0,
+ full_state: bool = False,
+ ) -> SyncResult: ...
+
+ @overload
+ async def wait_for_sync_for_user(
+ self,
+ requester: Requester,
+ sync_config: SyncConfig,
+ sync_version: Literal[SyncVersion.E2EE_SYNC],
+ request_key: SyncRequestKey,
+ since_token: Optional[StreamToken] = None,
+ timeout: int = 0,
+ full_state: bool = False,
+ ) -> E2eeSyncResult: ...
+
+ @overload
async def wait_for_sync_for_user(
self,
requester: Requester,
@@ -331,7 +381,18 @@ class SyncHandler:
since_token: Optional[StreamToken] = None,
timeout: int = 0,
full_state: bool = False,
- ) -> SyncResult:
+ ) -> Union[SyncResult, E2eeSyncResult]: ...
+
+ async def wait_for_sync_for_user(
+ self,
+ requester: Requester,
+ sync_config: SyncConfig,
+ sync_version: SyncVersion,
+ request_key: SyncRequestKey,
+ since_token: Optional[StreamToken] = None,
+ timeout: int = 0,
+ full_state: bool = False,
+ ) -> Union[SyncResult, E2eeSyncResult]:
"""Get the sync for a client if we have new data for it now. Otherwise
wait for new data to arrive on the server. If the timeout expires, then
return an empty sync result.
@@ -344,8 +405,10 @@ class SyncHandler:
since_token: The point in the stream to sync from.
timeout: How long to wait for new data to arrive before giving up.
full_state: Whether to return the full state for each room.
+
Returns:
When `SyncVersion.SYNC_V2`, returns a full `SyncResult`.
+ When `SyncVersion.E2EE_SYNC`, returns a `E2eeSyncResult`.
"""
# If the user is not part of the mau group, then check that limits have
# not been exceeded (if not part of the group by this point, almost certain
@@ -366,6 +429,29 @@ class SyncHandler:
logger.debug("Returning sync response for %s", user_id)
return res
+ @overload
+ async def _wait_for_sync_for_user(
+ self,
+ sync_config: SyncConfig,
+ sync_version: Literal[SyncVersion.SYNC_V2],
+ since_token: Optional[StreamToken],
+ timeout: int,
+ full_state: bool,
+ cache_context: ResponseCacheContext[SyncRequestKey],
+ ) -> SyncResult: ...
+
+ @overload
+ async def _wait_for_sync_for_user(
+ self,
+ sync_config: SyncConfig,
+ sync_version: Literal[SyncVersion.E2EE_SYNC],
+ since_token: Optional[StreamToken],
+ timeout: int,
+ full_state: bool,
+ cache_context: ResponseCacheContext[SyncRequestKey],
+ ) -> E2eeSyncResult: ...
+
+ @overload
async def _wait_for_sync_for_user(
self,
sync_config: SyncConfig,
@@ -374,7 +460,17 @@ class SyncHandler:
timeout: int,
full_state: bool,
cache_context: ResponseCacheContext[SyncRequestKey],
- ) -> SyncResult:
+ ) -> Union[SyncResult, E2eeSyncResult]: ...
+
+ async def _wait_for_sync_for_user(
+ self,
+ sync_config: SyncConfig,
+ sync_version: SyncVersion,
+ since_token: Optional[StreamToken],
+ timeout: int,
+ full_state: bool,
+ cache_context: ResponseCacheContext[SyncRequestKey],
+ ) -> Union[SyncResult, E2eeSyncResult]:
"""The start of the machinery that produces a /sync response.
See https://spec.matrix.org/v1.1/client-server-api/#syncing for full details.
@@ -417,14 +513,16 @@ class SyncHandler:
if timeout == 0 or since_token is None or full_state:
# we are going to return immediately, so don't bother calling
# notifier.wait_for_events.
- result: SyncResult = await self.current_sync_for_user(
- sync_config, sync_version, since_token, full_state=full_state
+ result: Union[SyncResult, E2eeSyncResult] = (
+ await self.current_sync_for_user(
+ sync_config, sync_version, since_token, full_state=full_state
+ )
)
else:
# Otherwise, we wait for something to happen and report it to the user.
async def current_sync_callback(
before_token: StreamToken, after_token: StreamToken
- ) -> SyncResult:
+ ) -> Union[SyncResult, E2eeSyncResult]:
return await self.current_sync_for_user(
sync_config, sync_version, since_token
)
@@ -456,14 +554,43 @@ class SyncHandler:
return result
+ @overload
+ async def current_sync_for_user(
+ self,
+ sync_config: SyncConfig,
+ sync_version: Literal[SyncVersion.SYNC_V2],
+ since_token: Optional[StreamToken] = None,
+ full_state: bool = False,
+ ) -> SyncResult: ...
+
+ @overload
+ async def current_sync_for_user(
+ self,
+ sync_config: SyncConfig,
+ sync_version: Literal[SyncVersion.E2EE_SYNC],
+ since_token: Optional[StreamToken] = None,
+ full_state: bool = False,
+ ) -> E2eeSyncResult: ...
+
+ @overload
async def current_sync_for_user(
self,
sync_config: SyncConfig,
sync_version: SyncVersion,
since_token: Optional[StreamToken] = None,
full_state: bool = False,
- ) -> SyncResult:
- """Generates the response body of a sync result, represented as a SyncResult.
+ ) -> Union[SyncResult, E2eeSyncResult]: ...
+
+ async def current_sync_for_user(
+ self,
+ sync_config: SyncConfig,
+ sync_version: SyncVersion,
+ since_token: Optional[StreamToken] = None,
+ full_state: bool = False,
+ ) -> Union[SyncResult, E2eeSyncResult]:
+ """
+ Generates the response body of a sync result, represented as a
+ `SyncResult`/`E2eeSyncResult`.
This is a wrapper around `generate_sync_result` which starts an open tracing
span to track the sync. See `generate_sync_result` for the next part of your
@@ -474,15 +601,25 @@ class SyncHandler:
sync_version: Determines what kind of sync response to generate.
since_token: The point in the stream to sync from.p.
full_state: Whether to return the full state for each room.
+
Returns:
When `SyncVersion.SYNC_V2`, returns a full `SyncResult`.
+ When `SyncVersion.E2EE_SYNC`, returns a `E2eeSyncResult`.
"""
with start_active_span("sync.current_sync_for_user"):
log_kv({"since_token": since_token})
+
# Go through the `/sync` v2 path
if sync_version == SyncVersion.SYNC_V2:
- sync_result: SyncResult = await self.generate_sync_result(
- sync_config, since_token, full_state
+ sync_result: Union[SyncResult, E2eeSyncResult] = (
+ await self.generate_sync_result(
+ sync_config, since_token, full_state
+ )
+ )
+ # Go through the MSC3575 Sliding Sync `/sync/e2ee` path
+ elif sync_version == SyncVersion.E2EE_SYNC:
+ sync_result = await self.generate_e2ee_sync_result(
+ sync_config, since_token
)
else:
raise Exception(
@@ -1691,6 +1828,96 @@ class SyncHandler:
next_batch=sync_result_builder.now_token,
)
+ async def generate_e2ee_sync_result(
+ self,
+ sync_config: SyncConfig,
+ since_token: Optional[StreamToken] = None,
+ ) -> E2eeSyncResult:
+ """
+ Generates the response body of a MSC3575 Sliding Sync `/sync/e2ee` result.
+
+ This is represented by a `E2eeSyncResult` struct, which is built from small
+ pieces using a `SyncResultBuilder`. The `sync_result_builder` is passed as a
+ mutable ("inout") parameter to various helper functions. These retrieve and
+ process the data which forms the sync body, often writing to the
+ `sync_result_builder` to store their output.
+
+ At the end, we transfer data from the `sync_result_builder` to a new `E2eeSyncResult`
+ instance to signify that the sync calculation is complete.
+ """
+ user_id = sync_config.user.to_string()
+ app_service = self.store.get_app_service_by_user_id(user_id)
+ if app_service:
+ # We no longer support AS users using /sync directly.
+ # See https://github.com/matrix-org/matrix-doc/issues/1144
+ raise NotImplementedError()
+
+ sync_result_builder = await self.get_sync_result_builder(
+ sync_config,
+ since_token,
+ full_state=False,
+ )
+
+ # 1. Calculate `to_device` events
+ await self._generate_sync_entry_for_to_device(sync_result_builder)
+
+ # 2. Calculate `device_lists`
+ # Device list updates are sent if a since token is provided.
+ device_lists = DeviceListUpdates()
+ include_device_list_updates = bool(since_token and since_token.device_list_key)
+ if include_device_list_updates:
+ # Note that _generate_sync_entry_for_rooms sets sync_result_builder.joined, which
+ # is used in calculate_user_changes below.
+ #
+ # TODO: Running `_generate_sync_entry_for_rooms()` is a lot of work just to
+ # figure out the membership changes/derived info needed for
+ # `_generate_sync_entry_for_device_list()`. In the future, we should try to
+ # refactor this away.
+ (
+ newly_joined_rooms,
+ newly_left_rooms,
+ ) = await self._generate_sync_entry_for_rooms(sync_result_builder)
+
+ # This uses the sync_result_builder.joined which is set in
+ # `_generate_sync_entry_for_rooms`, if that didn't find any joined
+ # rooms for some reason it is a no-op.
+ (
+ newly_joined_or_invited_or_knocked_users,
+ newly_left_users,
+ ) = sync_result_builder.calculate_user_changes()
+
+ device_lists = await self._generate_sync_entry_for_device_list(
+ sync_result_builder,
+ newly_joined_rooms=newly_joined_rooms,
+ newly_joined_or_invited_or_knocked_users=newly_joined_or_invited_or_knocked_users,
+ newly_left_rooms=newly_left_rooms,
+ newly_left_users=newly_left_users,
+ )
+
+ # 3. Calculate `device_one_time_keys_count` and `device_unused_fallback_key_types`
+ device_id = sync_config.device_id
+ one_time_keys_count: JsonMapping = {}
+ unused_fallback_key_types: List[str] = []
+ if device_id:
+ # TODO: We should have a way to let clients differentiate between the states of:
+ # * no change in OTK count since the provided since token
+ # * the server has zero OTKs left for this device
+ # Spec issue: https://github.com/matrix-org/matrix-doc/issues/3298
+ one_time_keys_count = await self.store.count_e2e_one_time_keys(
+ user_id, device_id
+ )
+ unused_fallback_key_types = list(
+ await self.store.get_e2e_unused_fallback_key_types(user_id, device_id)
+ )
+
+ return E2eeSyncResult(
+ to_device=sync_result_builder.to_device,
+ device_lists=device_lists,
+ device_one_time_keys_count=one_time_keys_count,
+ device_unused_fallback_key_types=unused_fallback_key_types,
+ next_batch=sync_result_builder.now_token,
+ )
+
async def get_sync_result_builder(
self,
sync_config: SyncConfig,
@@ -1889,7 +2116,7 @@ class SyncHandler:
users_that_have_changed = (
await self._device_handler.get_device_changes_in_shared_rooms(
user_id,
- sync_result_builder.joined_room_ids,
+ joined_room_ids,
from_token=since_token,
now_token=sync_result_builder.now_token,
)
diff --git a/synapse/rest/client/sync.py b/synapse/rest/client/sync.py
index 4a57eaf930..27ea943e31 100644
--- a/synapse/rest/client/sync.py
+++ b/synapse/rest/client/sync.py
@@ -567,5 +567,176 @@ class SyncRestServlet(RestServlet):
return result
+class SlidingSyncE2eeRestServlet(RestServlet):
+ """
+ API endpoint for MSC3575 Sliding Sync `/sync/e2ee`. This is being introduced as part
+ of Sliding Sync but doesn't have any sliding window component. It's just a way to
+ get E2EE events without having to sit through a big initial sync (`/sync` v2). And
+ we can avoid encryption events being backed up by the main sync response.
+
+ Having To-Device messages split out to this sync endpoint also helps when clients
+ need to have 2 or more sync streams open at a time, e.g a push notification process
+ and a main process. This can cause the two processes to race to fetch the To-Device
+ events, resulting in the need for complex synchronisation rules to ensure the token
+ is correctly and atomically exchanged between processes.
+
+ GET parameters::
+ timeout(int): How long to wait for new events in milliseconds.
+ since(batch_token): Batch token when asking for incremental deltas.
+
+ Response JSON::
+ {
+ "next_batch": // batch token for the next /sync
+ "to_device": {
+ // list of to-device events
+ "events": [
+ {
+ "content: { "algorithm": "m.olm.v1.curve25519-aes-sha2", "ciphertext": { ... }, "org.matrix.msgid": "abcd", "session_id": "abcd" },
+ "type": "m.room.encrypted",
+ "sender": "@alice:example.com",
+ }
+ // ...
+ ]
+ },
+ "device_lists": {
+ "changed": ["@alice:example.com"],
+ "left": ["@bob:example.com"]
+ },
+ "device_one_time_keys_count": {
+ "signed_curve25519": 50
+ },
+ "device_unused_fallback_key_types": [
+ "signed_curve25519"
+ ]
+ }
+ """
+
+ PATTERNS = client_patterns(
+ "/org.matrix.msc3575/sync/e2ee$", releases=[], v1=False, unstable=True
+ )
+
+ def __init__(self, hs: "HomeServer"):
+ super().__init__()
+ self.hs = hs
+ self.auth = hs.get_auth()
+ self.store = hs.get_datastores().main
+ self.sync_handler = hs.get_sync_handler()
+
+ # Filtering only matters for the `device_lists` because it requires a bunch of
+ # derived information from rooms (see how `_generate_sync_entry_for_rooms()`
+ # prepares a bunch of data for `_generate_sync_entry_for_device_list()`).
+ self.only_member_events_filter_collection = FilterCollection(
+ self.hs,
+ {
+ "room": {
+ # We only care about membership events for the `device_lists`.
+ # Membership will tell us whether a user has joined/left a room and
+ # if there are new devices to encrypt for.
+ "timeline": {
+ "types": ["m.room.member"],
+ },
+ "state": {
+ "types": ["m.room.member"],
+ },
+ # We don't want any extra account_data generated because it's not
+ # returned by this endpoint. This helps us avoid work in
+ # `_generate_sync_entry_for_rooms()`
+ "account_data": {
+ "not_types": ["*"],
+ },
+ # We don't want any extra ephemeral data generated because it's not
+ # returned by this endpoint. This helps us avoid work in
+ # `_generate_sync_entry_for_rooms()`
+ "ephemeral": {
+ "not_types": ["*"],
+ },
+ },
+ # We don't want any extra account_data generated because it's not
+ # returned by this endpoint. (This is just here for good measure)
+ "account_data": {
+ "not_types": ["*"],
+ },
+ # We don't want any extra presence data generated because it's not
+ # returned by this endpoint. (This is just here for good measure)
+ "presence": {
+ "not_types": ["*"],
+ },
+ },
+ )
+
+ async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
+ requester = await self.auth.get_user_by_req(request, allow_guest=True)
+ user = requester.user
+ device_id = requester.device_id
+
+ timeout = parse_integer(request, "timeout", default=0)
+ since = parse_string(request, "since")
+
+ sync_config = SyncConfig(
+ user=user,
+ filter_collection=self.only_member_events_filter_collection,
+ is_guest=requester.is_guest,
+ device_id=device_id,
+ )
+
+ since_token = None
+ if since is not None:
+ since_token = await StreamToken.from_string(self.store, since)
+
+ # Request cache key
+ request_key = (
+ SyncVersion.E2EE_SYNC,
+ user,
+ timeout,
+ since,
+ )
+
+ # Gather data for the response
+ sync_result = await self.sync_handler.wait_for_sync_for_user(
+ requester,
+ sync_config,
+ SyncVersion.E2EE_SYNC,
+ request_key,
+ since_token=since_token,
+ timeout=timeout,
+ full_state=False,
+ )
+
+ # The client may have disconnected by now; don't bother to serialize the
+ # response if so.
+ if request._disconnected:
+ logger.info("Client has disconnected; not serializing response.")
+ return 200, {}
+
+ response: JsonDict = defaultdict(dict)
+ response["next_batch"] = await sync_result.next_batch.to_string(self.store)
+
+ if sync_result.to_device:
+ response["to_device"] = {"events": sync_result.to_device}
+
+ if sync_result.device_lists.changed:
+ response["device_lists"]["changed"] = list(sync_result.device_lists.changed)
+ if sync_result.device_lists.left:
+ response["device_lists"]["left"] = list(sync_result.device_lists.left)
+
+ # We always include this because https://github.com/vector-im/element-android/issues/3725
+ # The spec isn't terribly clear on when this can be omitted and how a client would tell
+ # the difference between "no keys present" and "nothing changed" in terms of whole field
+ # absent / individual key type entry absent
+ # Corresponding synapse issue: https://github.com/matrix-org/synapse/issues/10456
+ response["device_one_time_keys_count"] = sync_result.device_one_time_keys_count
+
+ # https://github.com/matrix-org/matrix-doc/blob/54255851f642f84a4f1aaf7bc063eebe3d76752b/proposals/2732-olm-fallback-keys.md
+ # states that this field should always be included, as long as the server supports the feature.
+ response["device_unused_fallback_key_types"] = (
+ sync_result.device_unused_fallback_key_types
+ )
+
+ return 200, response
+
+
def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
SyncRestServlet(hs).register(http_server)
+
+ if hs.config.experimental.msc3575_enabled:
+ SlidingSyncE2eeRestServlet(hs).register(http_server)
diff --git a/tests/rest/client/test_devices.py b/tests/rest/client/test_devices.py
index 2b360732ac..a3ed12a38f 100644
--- a/tests/rest/client/test_devices.py
+++ b/tests/rest/client/test_devices.py
@@ -24,8 +24,8 @@ from twisted.internet.defer import ensureDeferred
from twisted.test.proto_helpers import MemoryReactor
from synapse.api.errors import NotFoundError
-from synapse.rest import admin, devices, room, sync
-from synapse.rest.client import account, keys, login, register
+from synapse.rest import admin, devices, sync
+from synapse.rest.client import keys, login, register
from synapse.server import HomeServer
from synapse.types import JsonDict, UserID, create_requester
from synapse.util import Clock
@@ -33,146 +33,6 @@ from synapse.util import Clock
from tests import unittest
-class DeviceListsTestCase(unittest.HomeserverTestCase):
- """Tests regarding device list changes."""
-
- servlets = [
- admin.register_servlets_for_client_rest_resource,
- login.register_servlets,
- register.register_servlets,
- account.register_servlets,
- room.register_servlets,
- sync.register_servlets,
- devices.register_servlets,
- ]
-
- def test_receiving_local_device_list_changes(self) -> None:
- """Tests that a local users that share a room receive each other's device list
- changes.
- """
- # Register two users
- test_device_id = "TESTDEVICE"
- alice_user_id = self.register_user("alice", "correcthorse")
- alice_access_token = self.login(
- alice_user_id, "correcthorse", device_id=test_device_id
- )
-
- bob_user_id = self.register_user("bob", "ponyponypony")
- bob_access_token = self.login(bob_user_id, "ponyponypony")
-
- # Create a room for them to coexist peacefully in
- new_room_id = self.helper.create_room_as(
- alice_user_id, is_public=True, tok=alice_access_token
- )
- self.assertIsNotNone(new_room_id)
-
- # Have Bob join the room
- self.helper.invite(
- new_room_id, alice_user_id, bob_user_id, tok=alice_access_token
- )
- self.helper.join(new_room_id, bob_user_id, tok=bob_access_token)
-
- # Now have Bob initiate an initial sync (in order to get a since token)
- channel = self.make_request(
- "GET",
- "/sync",
- access_token=bob_access_token,
- )
- self.assertEqual(channel.code, 200, channel.json_body)
- next_batch_token = channel.json_body["next_batch"]
-
- # ...and then an incremental sync. This should block until the sync stream is woken up,
- # which we hope will happen as a result of Alice updating their device list.
- bob_sync_channel = self.make_request(
- "GET",
- f"/sync?since={next_batch_token}&timeout=30000",
- access_token=bob_access_token,
- # Start the request, then continue on.
- await_result=False,
- )
-
- # Have alice update their device list
- channel = self.make_request(
- "PUT",
- f"/devices/{test_device_id}",
- {
- "display_name": "New Device Name",
- },
- access_token=alice_access_token,
- )
- self.assertEqual(channel.code, HTTPStatus.OK, channel.json_body)
-
- # Check that bob's incremental sync contains the updated device list.
- # If not, the client would only receive the device list update on the
- # *next* sync.
- bob_sync_channel.await_result()
- self.assertEqual(bob_sync_channel.code, 200, bob_sync_channel.json_body)
-
- changed_device_lists = bob_sync_channel.json_body.get("device_lists", {}).get(
- "changed", []
- )
- self.assertIn(alice_user_id, changed_device_lists, bob_sync_channel.json_body)
-
- def test_not_receiving_local_device_list_changes(self) -> None:
- """Tests a local users DO NOT receive device updates from each other if they do not
- share a room.
- """
- # Register two users
- test_device_id = "TESTDEVICE"
- alice_user_id = self.register_user("alice", "correcthorse")
- alice_access_token = self.login(
- alice_user_id, "correcthorse", device_id=test_device_id
- )
-
- bob_user_id = self.register_user("bob", "ponyponypony")
- bob_access_token = self.login(bob_user_id, "ponyponypony")
-
- # These users do not share a room. They are lonely.
-
- # Have Bob initiate an initial sync (in order to get a since token)
- channel = self.make_request(
- "GET",
- "/sync",
- access_token=bob_access_token,
- )
- self.assertEqual(channel.code, HTTPStatus.OK, channel.json_body)
- next_batch_token = channel.json_body["next_batch"]
-
- # ...and then an incremental sync. This should block until the sync stream is woken up,
- # which we hope will happen as a result of Alice updating their device list.
- bob_sync_channel = self.make_request(
- "GET",
- f"/sync?since={next_batch_token}&timeout=1000",
- access_token=bob_access_token,
- # Start the request, then continue on.
- await_result=False,
- )
-
- # Have alice update their device list
- channel = self.make_request(
- "PUT",
- f"/devices/{test_device_id}",
- {
- "display_name": "New Device Name",
- },
- access_token=alice_access_token,
- )
- self.assertEqual(channel.code, HTTPStatus.OK, channel.json_body)
-
- # Check that bob's incremental sync does not contain the updated device list.
- bob_sync_channel.await_result()
- self.assertEqual(
- bob_sync_channel.code, HTTPStatus.OK, bob_sync_channel.json_body
- )
-
- changed_device_lists = bob_sync_channel.json_body.get("device_lists", {}).get(
- "changed", []
- )
- self.assertNotIn(
- alice_user_id, changed_device_lists, bob_sync_channel.json_body
- )
-
-
class DevicesTestCase(unittest.HomeserverTestCase):
servlets = [
admin.register_servlets,
diff --git a/tests/rest/client/test_sendtodevice.py b/tests/rest/client/test_sendtodevice.py
index 2f994ad553..5ef501c6d5 100644
--- a/tests/rest/client/test_sendtodevice.py
+++ b/tests/rest/client/test_sendtodevice.py
@@ -18,15 +18,39 @@
# [This file includes modifications made by New Vector Limited]
#
#
+from parameterized import parameterized_class
from synapse.api.constants import EduTypes
from synapse.rest import admin
from synapse.rest.client import login, sendtodevice, sync
+from synapse.types import JsonDict
from tests.unittest import HomeserverTestCase, override_config
+@parameterized_class(
+ ("sync_endpoint", "experimental_features"),
+ [
+ ("/sync", {}),
+ (
+ "/_matrix/client/unstable/org.matrix.msc3575/sync/e2ee",
+ # Enable sliding sync
+ {"msc3575_enabled": True},
+ ),
+ ],
+)
class SendToDeviceTestCase(HomeserverTestCase):
+ """
+ Test `/sendToDevice` will deliver messages across to people receiving them over `/sync`.
+
+ Attributes:
+ sync_endpoint: The endpoint under test to use for syncing.
+ experimental_features: The experimental features homeserver config to use.
+ """
+
+ sync_endpoint: str
+ experimental_features: JsonDict
+
servlets = [
admin.register_servlets,
login.register_servlets,
@@ -34,6 +58,11 @@ class SendToDeviceTestCase(HomeserverTestCase):
sync.register_servlets,
]
+ def default_config(self) -> JsonDict:
+ config = super().default_config()
+ config["experimental_features"] = self.experimental_features
+ return config
+
def test_user_to_user(self) -> None:
"""A to-device message from one user to another should get delivered"""
@@ -54,7 +83,7 @@ class SendToDeviceTestCase(HomeserverTestCase):
self.assertEqual(chan.code, 200, chan.result)
# check it appears
- channel = self.make_request("GET", "/sync", access_token=user2_tok)
+ channel = self.make_request("GET", self.sync_endpoint, access_token=user2_tok)
self.assertEqual(channel.code, 200, channel.result)
expected_result = {
"events": [
@@ -67,15 +96,19 @@ class SendToDeviceTestCase(HomeserverTestCase):
}
self.assertEqual(channel.json_body["to_device"], expected_result)
- # it should re-appear if we do another sync
- channel = self.make_request("GET", "/sync", access_token=user2_tok)
+ # it should re-appear if we do another sync because the to-device message is not
+ # deleted until we acknowledge it by sending a `?since=...` parameter in the
+ # next sync request corresponding to the `next_batch` value from the response.
+ channel = self.make_request("GET", self.sync_endpoint, access_token=user2_tok)
self.assertEqual(channel.code, 200, channel.result)
self.assertEqual(channel.json_body["to_device"], expected_result)
# it should *not* appear if we do an incremental sync
sync_token = channel.json_body["next_batch"]
channel = self.make_request(
- "GET", f"/sync?since={sync_token}", access_token=user2_tok
+ "GET",
+ f"{self.sync_endpoint}?since={sync_token}",
+ access_token=user2_tok,
)
self.assertEqual(channel.code, 200, channel.result)
self.assertEqual(channel.json_body.get("to_device", {}).get("events", []), [])
@@ -99,15 +132,19 @@ class SendToDeviceTestCase(HomeserverTestCase):
)
self.assertEqual(chan.code, 200, chan.result)
- # now sync: we should get two of the three
- channel = self.make_request("GET", "/sync", access_token=user2_tok)
+ # now sync: we should get two of the three (because burst_count=2)
+ channel = self.make_request("GET", self.sync_endpoint, access_token=user2_tok)
self.assertEqual(channel.code, 200, channel.result)
msgs = channel.json_body["to_device"]["events"]
self.assertEqual(len(msgs), 2)
for i in range(2):
self.assertEqual(
msgs[i],
- {"sender": user1, "type": "m.room_key_request", "content": {"idx": i}},
+ {
+ "sender": user1,
+ "type": "m.room_key_request",
+ "content": {"idx": i},
+ },
)
sync_token = channel.json_body["next_batch"]
@@ -125,7 +162,9 @@ class SendToDeviceTestCase(HomeserverTestCase):
# ... which should arrive
channel = self.make_request(
- "GET", f"/sync?since={sync_token}", access_token=user2_tok
+ "GET",
+ f"{self.sync_endpoint}?since={sync_token}",
+ access_token=user2_tok,
)
self.assertEqual(channel.code, 200, channel.result)
msgs = channel.json_body["to_device"]["events"]
@@ -159,7 +198,7 @@ class SendToDeviceTestCase(HomeserverTestCase):
)
# now sync: we should get two of the three
- channel = self.make_request("GET", "/sync", access_token=user2_tok)
+ channel = self.make_request("GET", self.sync_endpoint, access_token=user2_tok)
self.assertEqual(channel.code, 200, channel.result)
msgs = channel.json_body["to_device"]["events"]
self.assertEqual(len(msgs), 2)
@@ -193,7 +232,9 @@ class SendToDeviceTestCase(HomeserverTestCase):
# ... which should arrive
channel = self.make_request(
- "GET", f"/sync?since={sync_token}", access_token=user2_tok
+ "GET",
+ f"{self.sync_endpoint}?since={sync_token}",
+ access_token=user2_tok,
)
self.assertEqual(channel.code, 200, channel.result)
msgs = channel.json_body["to_device"]["events"]
@@ -217,7 +258,7 @@ class SendToDeviceTestCase(HomeserverTestCase):
user2_tok = self.login("u2", "pass", "d2")
# Do an initial sync
- channel = self.make_request("GET", "/sync", access_token=user2_tok)
+ channel = self.make_request("GET", self.sync_endpoint, access_token=user2_tok)
self.assertEqual(channel.code, 200, channel.result)
sync_token = channel.json_body["next_batch"]
@@ -233,7 +274,9 @@ class SendToDeviceTestCase(HomeserverTestCase):
self.assertEqual(chan.code, 200, chan.result)
channel = self.make_request(
- "GET", f"/sync?since={sync_token}&timeout=300000", access_token=user2_tok
+ "GET",
+ f"{self.sync_endpoint}?since={sync_token}&timeout=300000",
+ access_token=user2_tok,
)
self.assertEqual(channel.code, 200, channel.result)
messages = channel.json_body.get("to_device", {}).get("events", [])
@@ -241,7 +284,9 @@ class SendToDeviceTestCase(HomeserverTestCase):
sync_token = channel.json_body["next_batch"]
channel = self.make_request(
- "GET", f"/sync?since={sync_token}&timeout=300000", access_token=user2_tok
+ "GET",
+ f"{self.sync_endpoint}?since={sync_token}&timeout=300000",
+ access_token=user2_tok,
)
self.assertEqual(channel.code, 200, channel.result)
messages = channel.json_body.get("to_device", {}).get("events", [])
diff --git a/tests/rest/client/test_sync.py b/tests/rest/client/test_sync.py
index 417a87feb2..daeb1d3ddd 100644
--- a/tests/rest/client/test_sync.py
+++ b/tests/rest/client/test_sync.py
@@ -21,7 +21,7 @@
import json
from typing import List
-from parameterized import parameterized
+from parameterized import parameterized, parameterized_class
from twisted.test.proto_helpers import MemoryReactor
@@ -688,24 +688,180 @@ class SyncCacheTestCase(unittest.HomeserverTestCase):
self.assertEqual(channel.code, 200, channel.json_body)
+@parameterized_class(
+ ("sync_endpoint", "experimental_features"),
+ [
+ ("/sync", {}),
+ (
+ "/_matrix/client/unstable/org.matrix.msc3575/sync/e2ee",
+ # Enable sliding sync
+ {"msc3575_enabled": True},
+ ),
+ ],
+)
class DeviceListSyncTestCase(unittest.HomeserverTestCase):
+ """
+ Tests regarding device list (`device_lists`) changes.
+
+ Attributes:
+ sync_endpoint: The endpoint under test to use for syncing.
+ experimental_features: The experimental features homeserver config to use.
+ """
+
+ sync_endpoint: str
+ experimental_features: JsonDict
+
servlets = [
synapse.rest.admin.register_servlets,
login.register_servlets,
+ room.register_servlets,
sync.register_servlets,
devices.register_servlets,
]
+ def default_config(self) -> JsonDict:
+ config = super().default_config()
+ config["experimental_features"] = self.experimental_features
+ return config
+
+ def test_receiving_local_device_list_changes(self) -> None:
+ """Tests that a local users that share a room receive each other's device list
+ changes.
+ """
+ # Register two users
+ test_device_id = "TESTDEVICE"
+ alice_user_id = self.register_user("alice", "correcthorse")
+ alice_access_token = self.login(
+ alice_user_id, "correcthorse", device_id=test_device_id
+ )
+
+ bob_user_id = self.register_user("bob", "ponyponypony")
+ bob_access_token = self.login(bob_user_id, "ponyponypony")
+
+ # Create a room for them to coexist peacefully in
+ new_room_id = self.helper.create_room_as(
+ alice_user_id, is_public=True, tok=alice_access_token
+ )
+ self.assertIsNotNone(new_room_id)
+
+ # Have Bob join the room
+ self.helper.invite(
+ new_room_id, alice_user_id, bob_user_id, tok=alice_access_token
+ )
+ self.helper.join(new_room_id, bob_user_id, tok=bob_access_token)
+
+ # Now have Bob initiate an initial sync (in order to get a since token)
+ channel = self.make_request(
+ "GET",
+ self.sync_endpoint,
+ access_token=bob_access_token,
+ )
+ self.assertEqual(channel.code, 200, channel.json_body)
+ next_batch_token = channel.json_body["next_batch"]
+
+ # ...and then an incremental sync. This should block until the sync stream is woken up,
+ # which we hope will happen as a result of Alice updating their device list.
+ bob_sync_channel = self.make_request(
+ "GET",
+ f"{self.sync_endpoint}?since={next_batch_token}&timeout=30000",
+ access_token=bob_access_token,
+ # Start the request, then continue on.
+ await_result=False,
+ )
+
+ # Have alice update their device list
+ channel = self.make_request(
+ "PUT",
+ f"/devices/{test_device_id}",
+ {
+ "display_name": "New Device Name",
+ },
+ access_token=alice_access_token,
+ )
+ self.assertEqual(channel.code, 200, channel.json_body)
+
+ # Check that bob's incremental sync contains the updated device list.
+ # If not, the client would only receive the device list update on the
+ # *next* sync.
+ bob_sync_channel.await_result()
+ self.assertEqual(bob_sync_channel.code, 200, bob_sync_channel.json_body)
+
+ changed_device_lists = bob_sync_channel.json_body.get("device_lists", {}).get(
+ "changed", []
+ )
+ self.assertIn(alice_user_id, changed_device_lists, bob_sync_channel.json_body)
+
+ def test_not_receiving_local_device_list_changes(self) -> None:
+ """Tests a local users DO NOT receive device updates from each other if they do not
+ share a room.
+ """
+ # Register two users
+ test_device_id = "TESTDEVICE"
+ alice_user_id = self.register_user("alice", "correcthorse")
+ alice_access_token = self.login(
+ alice_user_id, "correcthorse", device_id=test_device_id
+ )
+
+ bob_user_id = self.register_user("bob", "ponyponypony")
+ bob_access_token = self.login(bob_user_id, "ponyponypony")
+
+ # These users do not share a room. They are lonely.
+
+ # Have Bob initiate an initial sync (in order to get a since token)
+ channel = self.make_request(
+ "GET",
+ self.sync_endpoint,
+ access_token=bob_access_token,
+ )
+ self.assertEqual(channel.code, 200, channel.json_body)
+ next_batch_token = channel.json_body["next_batch"]
+
+ # ...and then an incremental sync. This should block until the sync stream is woken up,
+ # which we hope will happen as a result of Alice updating their device list.
+ bob_sync_channel = self.make_request(
+ "GET",
+ f"{self.sync_endpoint}?since={next_batch_token}&timeout=1000",
+ access_token=bob_access_token,
+ # Start the request, then continue on.
+ await_result=False,
+ )
+
+ # Have alice update their device list
+ channel = self.make_request(
+ "PUT",
+ f"/devices/{test_device_id}",
+ {
+ "display_name": "New Device Name",
+ },
+ access_token=alice_access_token,
+ )
+ self.assertEqual(channel.code, 200, channel.json_body)
+
+ # Check that bob's incremental sync does not contain the updated device list.
+ bob_sync_channel.await_result()
+ self.assertEqual(bob_sync_channel.code, 200, bob_sync_channel.json_body)
+
+ changed_device_lists = bob_sync_channel.json_body.get("device_lists", {}).get(
+ "changed", []
+ )
+ self.assertNotIn(
+ alice_user_id, changed_device_lists, bob_sync_channel.json_body
+ )
+
def test_user_with_no_rooms_receives_self_device_list_updates(self) -> None:
"""Tests that a user with no rooms still receives their own device list updates"""
- device_id = "TESTDEVICE"
+ test_device_id = "TESTDEVICE"
# Register a user and login, creating a device
- self.user_id = self.register_user("kermit", "monkey")
- self.tok = self.login("kermit", "monkey", device_id=device_id)
+ alice_user_id = self.register_user("alice", "correcthorse")
+ alice_access_token = self.login(
+ alice_user_id, "correcthorse", device_id=test_device_id
+ )
# Request an initial sync
- channel = self.make_request("GET", "/sync", access_token=self.tok)
+ channel = self.make_request(
+ "GET", self.sync_endpoint, access_token=alice_access_token
+ )
self.assertEqual(channel.code, 200, channel.json_body)
next_batch = channel.json_body["next_batch"]
@@ -713,19 +869,19 @@ class DeviceListSyncTestCase(unittest.HomeserverTestCase):
# It won't return until something has happened
incremental_sync_channel = self.make_request(
"GET",
- f"/sync?since={next_batch}&timeout=30000",
- access_token=self.tok,
+ f"{self.sync_endpoint}?since={next_batch}&timeout=30000",
+ access_token=alice_access_token,
await_result=False,
)
# Change our device's display name
channel = self.make_request(
"PUT",
- f"devices/{device_id}",
+ f"devices/{test_device_id}",
{
"display_name": "freeze ray",
},
- access_token=self.tok,
+ access_token=alice_access_token,
)
self.assertEqual(channel.code, 200, channel.json_body)
@@ -739,7 +895,230 @@ class DeviceListSyncTestCase(unittest.HomeserverTestCase):
).get("changed", [])
self.assertIn(
- self.user_id, device_list_changes, incremental_sync_channel.json_body
+ alice_user_id, device_list_changes, incremental_sync_channel.json_body
+ )
+
+
+@parameterized_class(
+ ("sync_endpoint", "experimental_features"),
+ [
+ ("/sync", {}),
+ (
+ "/_matrix/client/unstable/org.matrix.msc3575/sync/e2ee",
+ # Enable sliding sync
+ {"msc3575_enabled": True},
+ ),
+ ],
+)
+class DeviceOneTimeKeysSyncTestCase(unittest.HomeserverTestCase):
+ """
+ Tests regarding device one time keys (`device_one_time_keys_count`) changes.
+
+ Attributes:
+ sync_endpoint: The endpoint under test to use for syncing.
+ experimental_features: The experimental features homeserver config to use.
+ """
+
+ sync_endpoint: str
+ experimental_features: JsonDict
+
+ servlets = [
+ synapse.rest.admin.register_servlets,
+ login.register_servlets,
+ sync.register_servlets,
+ devices.register_servlets,
+ ]
+
+ def default_config(self) -> JsonDict:
+ config = super().default_config()
+ config["experimental_features"] = self.experimental_features
+ return config
+
+ def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
+ self.e2e_keys_handler = hs.get_e2e_keys_handler()
+
+ def test_no_device_one_time_keys(self) -> None:
+ """
+ Tests when no one time keys set, it still has the default `signed_curve25519` in
+ `device_one_time_keys_count`
+ """
+ test_device_id = "TESTDEVICE"
+
+ alice_user_id = self.register_user("alice", "correcthorse")
+ alice_access_token = self.login(
+ alice_user_id, "correcthorse", device_id=test_device_id
+ )
+
+ # Request an initial sync
+ channel = self.make_request(
+ "GET", self.sync_endpoint, access_token=alice_access_token
+ )
+ self.assertEqual(channel.code, 200, channel.json_body)
+
+ # Check for those one time key counts
+ self.assertDictEqual(
+ channel.json_body["device_one_time_keys_count"],
+ # Note that "signed_curve25519" is always returned in key count responses
+ # regardless of whether we uploaded any keys for it. This is necessary until
+ # https://github.com/matrix-org/matrix-doc/issues/3298 is fixed.
+ {"signed_curve25519": 0},
+ channel.json_body["device_one_time_keys_count"],
+ )
+
+ def test_returns_device_one_time_keys(self) -> None:
+ """
+ Tests that one time keys for the device/user are counted correctly in the `/sync`
+ response
+ """
+ test_device_id = "TESTDEVICE"
+
+ alice_user_id = self.register_user("alice", "correcthorse")
+ alice_access_token = self.login(
+ alice_user_id, "correcthorse", device_id=test_device_id
+ )
+
+ # Upload one time keys for the user/device
+ keys: JsonDict = {
+ "alg1:k1": "key1",
+ "alg2:k2": {"key": "key2", "signatures": {"k1": "sig1"}},
+ "alg2:k3": {"key": "key3"},
+ }
+ res = self.get_success(
+ self.e2e_keys_handler.upload_keys_for_user(
+ alice_user_id, test_device_id, {"one_time_keys": keys}
+ )
+ )
+ # Note that "signed_curve25519" is always returned in key count responses
+ # regardless of whether we uploaded any keys for it. This is necessary until
+ # https://github.com/matrix-org/matrix-doc/issues/3298 is fixed.
+ self.assertDictEqual(
+ res,
+ {"one_time_key_counts": {"alg1": 1, "alg2": 2, "signed_curve25519": 0}},
+ )
+
+ # Request an initial sync
+ channel = self.make_request(
+ "GET", self.sync_endpoint, access_token=alice_access_token
+ )
+ self.assertEqual(channel.code, 200, channel.json_body)
+
+ # Check for those one time key counts
+ self.assertDictEqual(
+ channel.json_body["device_one_time_keys_count"],
+ {"alg1": 1, "alg2": 2, "signed_curve25519": 0},
+ channel.json_body["device_one_time_keys_count"],
+ )
+
+
+@parameterized_class(
+ ("sync_endpoint", "experimental_features"),
+ [
+ ("/sync", {}),
+ (
+ "/_matrix/client/unstable/org.matrix.msc3575/sync/e2ee",
+ # Enable sliding sync
+ {"msc3575_enabled": True},
+ ),
+ ],
+)
+class DeviceUnusedFallbackKeySyncTestCase(unittest.HomeserverTestCase):
+ """
+ Tests regarding device one time keys (`device_unused_fallback_key_types`) changes.
+
+ Attributes:
+ sync_endpoint: The endpoint under test to use for syncing.
+ experimental_features: The experimental features homeserver config to use.
+ """
+
+ sync_endpoint: str
+ experimental_features: JsonDict
+
+ servlets = [
+ synapse.rest.admin.register_servlets,
+ login.register_servlets,
+ sync.register_servlets,
+ devices.register_servlets,
+ ]
+
+ def default_config(self) -> JsonDict:
+ config = super().default_config()
+ config["experimental_features"] = self.experimental_features
+ return config
+
+ def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
+ self.store = self.hs.get_datastores().main
+ self.e2e_keys_handler = hs.get_e2e_keys_handler()
+
+ def test_no_device_unused_fallback_key(self) -> None:
+ """
+ Test when no unused fallback key is set, it just returns an empty list. The MSC
+ says "The device_unused_fallback_key_types parameter must be present if the
+ server supports fallback keys.",
+ https://github.com/matrix-org/matrix-spec-proposals/blob/54255851f642f84a4f1aaf7bc063eebe3d76752b/proposals/2732-olm-fallback-keys.md
+ """
+ test_device_id = "TESTDEVICE"
+
+ alice_user_id = self.register_user("alice", "correcthorse")
+ alice_access_token = self.login(
+ alice_user_id, "correcthorse", device_id=test_device_id
+ )
+
+ # Request an initial sync
+ channel = self.make_request(
+ "GET", self.sync_endpoint, access_token=alice_access_token
+ )
+ self.assertEqual(channel.code, 200, channel.json_body)
+
+ # Check for those one time key counts
+ self.assertListEqual(
+ channel.json_body["device_unused_fallback_key_types"],
+ [],
+ channel.json_body["device_unused_fallback_key_types"],
+ )
+
+ def test_returns_device_one_time_keys(self) -> None:
+ """
+ Tests that device unused fallback key type is returned correctly in the `/sync`
+ """
+ test_device_id = "TESTDEVICE"
+
+ alice_user_id = self.register_user("alice", "correcthorse")
+ alice_access_token = self.login(
+ alice_user_id, "correcthorse", device_id=test_device_id
+ )
+
+ # We shouldn't have any unused fallback keys yet
+ res = self.get_success(
+ self.store.get_e2e_unused_fallback_key_types(alice_user_id, test_device_id)
+ )
+ self.assertEqual(res, [])
+
+ # Upload a fallback key for the user/device
+ fallback_key = {"alg1:k1": "fallback_key1"}
+ self.get_success(
+ self.e2e_keys_handler.upload_keys_for_user(
+ alice_user_id,
+ test_device_id,
+ {"fallback_keys": fallback_key},
+ )
+ )
+ # We should now have an unused alg1 key
+ fallback_res = self.get_success(
+ self.store.get_e2e_unused_fallback_key_types(alice_user_id, test_device_id)
+ )
+ self.assertEqual(fallback_res, ["alg1"], fallback_res)
+
+ # Request an initial sync
+ channel = self.make_request(
+ "GET", self.sync_endpoint, access_token=alice_access_token
+ )
+ self.assertEqual(channel.code, 200, channel.json_body)
+
+ # Check for the unused fallback key types
+ self.assertListEqual(
+ channel.json_body["device_unused_fallback_key_types"],
+ ["alg1"],
+ channel.json_body["device_unused_fallback_key_types"],
)
From 9edb725ebcd41c0ca1ee8cbb833dcb28df47a402 Mon Sep 17 00:00:00 2001
From: Shay
Date: Fri, 24 May 2024 01:47:37 -0700
Subject: [PATCH 118/503] Support MSC3916 by adding unstable media endpoints to
`_matrix/client` (#17213)
[MSC3916](https://github.com/matrix-org/matrix-spec-proposals/blob/rav/authentication-for-media/proposals/3916-authentication-for-media.md)
adds new media endpoints under `_matrix/client`. This PR adds the
`/preview_url`, `/config`, and `/thumbnail` endpoints. `/download` will
be added in a follow-up PR once the work for the federation `/download`
endpoint is complete (see
https://github.com/element-hq/synapse/pull/17172).
Should be reviewable commit-by-commit.
---
changelog.d/17213.feature | 1 +
synapse/config/experimental.py | 4 +
synapse/media/thumbnailer.py | 486 ++++++-
synapse/rest/client/media.py | 205 +++
synapse/rest/media/thumbnail_resource.py | 476 +------
tests/media/test_media_storage.py | 161 ++-
tests/rest/client/test_media.py | 1609 ++++++++++++++++++++++
7 files changed, 2393 insertions(+), 549 deletions(-)
create mode 100644 changelog.d/17213.feature
create mode 100644 synapse/rest/client/media.py
create mode 100644 tests/rest/client/test_media.py
diff --git a/changelog.d/17213.feature b/changelog.d/17213.feature
new file mode 100644
index 0000000000..ca60afa8f3
--- /dev/null
+++ b/changelog.d/17213.feature
@@ -0,0 +1 @@
+Support MSC3916 by adding unstable media endpoints to `_matrix/client` (#17213).
\ No newline at end of file
diff --git a/synapse/config/experimental.py b/synapse/config/experimental.py
index cda7afc5c4..75fe6d7b24 100644
--- a/synapse/config/experimental.py
+++ b/synapse/config/experimental.py
@@ -439,3 +439,7 @@ class ExperimentalConfig(Config):
self.msc4115_membership_on_events = experimental.get(
"msc4115_membership_on_events", False
)
+
+ self.msc3916_authenticated_media_enabled = experimental.get(
+ "msc3916_authenticated_media_enabled", False
+ )
diff --git a/synapse/media/thumbnailer.py b/synapse/media/thumbnailer.py
index 5538020bec..cc3acf51e1 100644
--- a/synapse/media/thumbnailer.py
+++ b/synapse/media/thumbnailer.py
@@ -22,11 +22,27 @@
import logging
from io import BytesIO
from types import TracebackType
-from typing import Optional, Tuple, Type
+from typing import TYPE_CHECKING, List, Optional, Tuple, Type
from PIL import Image
+from synapse.api.errors import Codes, SynapseError, cs_error
+from synapse.config.repository import THUMBNAIL_SUPPORTED_MEDIA_FORMAT_MAP
+from synapse.http.server import respond_with_json
+from synapse.http.site import SynapseRequest
from synapse.logging.opentracing import trace
+from synapse.media._base import (
+ FileInfo,
+ ThumbnailInfo,
+ respond_404,
+ respond_with_file,
+ respond_with_responder,
+)
+from synapse.media.media_storage import MediaStorage
+
+if TYPE_CHECKING:
+ from synapse.media.media_repository import MediaRepository
+ from synapse.server import HomeServer
logger = logging.getLogger(__name__)
@@ -231,3 +247,471 @@ class Thumbnailer:
def __del__(self) -> None:
# Make sure we actually do close the image, rather than leak data.
self.close()
+
+
+class ThumbnailProvider:
+ def __init__(
+ self,
+ hs: "HomeServer",
+ media_repo: "MediaRepository",
+ media_storage: MediaStorage,
+ ):
+ self.hs = hs
+ self.media_repo = media_repo
+ self.media_storage = media_storage
+ self.store = hs.get_datastores().main
+ self.dynamic_thumbnails = hs.config.media.dynamic_thumbnails
+
+ async def respond_local_thumbnail(
+ self,
+ request: SynapseRequest,
+ media_id: str,
+ width: int,
+ height: int,
+ method: str,
+ m_type: str,
+ max_timeout_ms: int,
+ ) -> None:
+ media_info = await self.media_repo.get_local_media_info(
+ request, media_id, max_timeout_ms
+ )
+ if not media_info:
+ return
+
+ thumbnail_infos = await self.store.get_local_media_thumbnails(media_id)
+ await self._select_and_respond_with_thumbnail(
+ request,
+ width,
+ height,
+ method,
+ m_type,
+ thumbnail_infos,
+ media_id,
+ media_id,
+ url_cache=bool(media_info.url_cache),
+ server_name=None,
+ )
+
+ async def select_or_generate_local_thumbnail(
+ self,
+ request: SynapseRequest,
+ media_id: str,
+ desired_width: int,
+ desired_height: int,
+ desired_method: str,
+ desired_type: str,
+ max_timeout_ms: int,
+ ) -> None:
+ media_info = await self.media_repo.get_local_media_info(
+ request, media_id, max_timeout_ms
+ )
+
+ if not media_info:
+ return
+
+ thumbnail_infos = await self.store.get_local_media_thumbnails(media_id)
+ for info in thumbnail_infos:
+ t_w = info.width == desired_width
+ t_h = info.height == desired_height
+ t_method = info.method == desired_method
+ t_type = info.type == desired_type
+
+ if t_w and t_h and t_method and t_type:
+ file_info = FileInfo(
+ server_name=None,
+ file_id=media_id,
+ url_cache=bool(media_info.url_cache),
+ thumbnail=info,
+ )
+
+ responder = await self.media_storage.fetch_media(file_info)
+ if responder:
+ await respond_with_responder(
+ request, responder, info.type, info.length
+ )
+ return
+
+ logger.debug("We don't have a thumbnail of that size. Generating")
+
+ # Okay, so we generate one.
+ file_path = await self.media_repo.generate_local_exact_thumbnail(
+ media_id,
+ desired_width,
+ desired_height,
+ desired_method,
+ desired_type,
+ url_cache=bool(media_info.url_cache),
+ )
+
+ if file_path:
+ await respond_with_file(request, desired_type, file_path)
+ else:
+ logger.warning("Failed to generate thumbnail")
+ raise SynapseError(400, "Failed to generate thumbnail.")
+
+ async def select_or_generate_remote_thumbnail(
+ self,
+ request: SynapseRequest,
+ server_name: str,
+ media_id: str,
+ desired_width: int,
+ desired_height: int,
+ desired_method: str,
+ desired_type: str,
+ max_timeout_ms: int,
+ ) -> None:
+ media_info = await self.media_repo.get_remote_media_info(
+ server_name, media_id, max_timeout_ms
+ )
+ if not media_info:
+ respond_404(request)
+ return
+
+ thumbnail_infos = await self.store.get_remote_media_thumbnails(
+ server_name, media_id
+ )
+
+ file_id = media_info.filesystem_id
+
+ for info in thumbnail_infos:
+ t_w = info.width == desired_width
+ t_h = info.height == desired_height
+ t_method = info.method == desired_method
+ t_type = info.type == desired_type
+
+ if t_w and t_h and t_method and t_type:
+ file_info = FileInfo(
+ server_name=server_name,
+ file_id=file_id,
+ thumbnail=info,
+ )
+
+ responder = await self.media_storage.fetch_media(file_info)
+ if responder:
+ await respond_with_responder(
+ request, responder, info.type, info.length
+ )
+ return
+
+ logger.debug("We don't have a thumbnail of that size. Generating")
+
+ # Okay, so we generate one.
+ file_path = await self.media_repo.generate_remote_exact_thumbnail(
+ server_name,
+ file_id,
+ media_id,
+ desired_width,
+ desired_height,
+ desired_method,
+ desired_type,
+ )
+
+ if file_path:
+ await respond_with_file(request, desired_type, file_path)
+ else:
+ logger.warning("Failed to generate thumbnail")
+ raise SynapseError(400, "Failed to generate thumbnail.")
+
+ async def respond_remote_thumbnail(
+ self,
+ request: SynapseRequest,
+ server_name: str,
+ media_id: str,
+ width: int,
+ height: int,
+ method: str,
+ m_type: str,
+ max_timeout_ms: int,
+ ) -> None:
+ # TODO: Don't download the whole remote file
+ # We should proxy the thumbnail from the remote server instead of
+ # downloading the remote file and generating our own thumbnails.
+ media_info = await self.media_repo.get_remote_media_info(
+ server_name, media_id, max_timeout_ms
+ )
+ if not media_info:
+ return
+
+ thumbnail_infos = await self.store.get_remote_media_thumbnails(
+ server_name, media_id
+ )
+ await self._select_and_respond_with_thumbnail(
+ request,
+ width,
+ height,
+ method,
+ m_type,
+ thumbnail_infos,
+ media_id,
+ media_info.filesystem_id,
+ url_cache=False,
+ server_name=server_name,
+ )
+
+ async def _select_and_respond_with_thumbnail(
+ self,
+ request: SynapseRequest,
+ desired_width: int,
+ desired_height: int,
+ desired_method: str,
+ desired_type: str,
+ thumbnail_infos: List[ThumbnailInfo],
+ media_id: str,
+ file_id: str,
+ url_cache: bool,
+ server_name: Optional[str] = None,
+ ) -> None:
+ """
+ Respond to a request with an appropriate thumbnail from the previously generated thumbnails.
+
+ Args:
+ request: The incoming request.
+ desired_width: The desired width, the returned thumbnail may be larger than this.
+ desired_height: The desired height, the returned thumbnail may be larger than this.
+ desired_method: The desired method used to generate the thumbnail.
+ desired_type: The desired content-type of the thumbnail.
+ thumbnail_infos: A list of thumbnail info of candidate thumbnails.
+ file_id: The ID of the media that a thumbnail is being requested for.
+ url_cache: True if this is from a URL cache.
+ server_name: The server name, if this is a remote thumbnail.
+ """
+ logger.debug(
+ "_select_and_respond_with_thumbnail: media_id=%s desired=%sx%s (%s) thumbnail_infos=%s",
+ media_id,
+ desired_width,
+ desired_height,
+ desired_method,
+ thumbnail_infos,
+ )
+
+ # If `dynamic_thumbnails` is enabled, we expect Synapse to go down a
+ # different code path to handle it.
+ assert not self.dynamic_thumbnails
+
+ if thumbnail_infos:
+ file_info = self._select_thumbnail(
+ desired_width,
+ desired_height,
+ desired_method,
+ desired_type,
+ thumbnail_infos,
+ file_id,
+ url_cache,
+ server_name,
+ )
+ if not file_info:
+ logger.info("Couldn't find a thumbnail matching the desired inputs")
+ respond_404(request)
+ return
+
+ # The thumbnail property must exist.
+ assert file_info.thumbnail is not None
+
+ responder = await self.media_storage.fetch_media(file_info)
+ if responder:
+ await respond_with_responder(
+ request,
+ responder,
+ file_info.thumbnail.type,
+ file_info.thumbnail.length,
+ )
+ return
+
+ # If we can't find the thumbnail we regenerate it. This can happen
+ # if e.g. we've deleted the thumbnails but still have the original
+ # image somewhere.
+ #
+ # Since we have an entry for the thumbnail in the DB we a) know we
+ # have have successfully generated the thumbnail in the past (so we
+ # don't need to worry about repeatedly failing to generate
+ # thumbnails), and b) have already calculated that appropriate
+ # width/height/method so we can just call the "generate exact"
+ # methods.
+
+ # First let's check that we do actually have the original image
+ # still. This will throw a 404 if we don't.
+ # TODO: We should refetch the thumbnails for remote media.
+ await self.media_storage.ensure_media_is_in_local_cache(
+ FileInfo(server_name, file_id, url_cache=url_cache)
+ )
+
+ if server_name:
+ await self.media_repo.generate_remote_exact_thumbnail(
+ server_name,
+ file_id=file_id,
+ media_id=media_id,
+ t_width=file_info.thumbnail.width,
+ t_height=file_info.thumbnail.height,
+ t_method=file_info.thumbnail.method,
+ t_type=file_info.thumbnail.type,
+ )
+ else:
+ await self.media_repo.generate_local_exact_thumbnail(
+ media_id=media_id,
+ t_width=file_info.thumbnail.width,
+ t_height=file_info.thumbnail.height,
+ t_method=file_info.thumbnail.method,
+ t_type=file_info.thumbnail.type,
+ url_cache=url_cache,
+ )
+
+ responder = await self.media_storage.fetch_media(file_info)
+ await respond_with_responder(
+ request,
+ responder,
+ file_info.thumbnail.type,
+ file_info.thumbnail.length,
+ )
+ else:
+ # This might be because:
+ # 1. We can't create thumbnails for the given media (corrupted or
+ # unsupported file type), or
+ # 2. The thumbnailing process never ran or errored out initially
+ # when the media was first uploaded (these bugs should be
+ # reported and fixed).
+ # Note that we don't attempt to generate a thumbnail now because
+ # `dynamic_thumbnails` is disabled.
+ logger.info("Failed to find any generated thumbnails")
+
+ assert request.path is not None
+ respond_with_json(
+ request,
+ 400,
+ cs_error(
+ "Cannot find any thumbnails for the requested media ('%s'). This might mean the media is not a supported_media_format=(%s) or that thumbnailing failed for some other reason. (Dynamic thumbnails are disabled on this server.)"
+ % (
+ request.path.decode(),
+ ", ".join(THUMBNAIL_SUPPORTED_MEDIA_FORMAT_MAP.keys()),
+ ),
+ code=Codes.UNKNOWN,
+ ),
+ send_cors=True,
+ )
+
+ def _select_thumbnail(
+ self,
+ desired_width: int,
+ desired_height: int,
+ desired_method: str,
+ desired_type: str,
+ thumbnail_infos: List[ThumbnailInfo],
+ file_id: str,
+ url_cache: bool,
+ server_name: Optional[str],
+ ) -> Optional[FileInfo]:
+ """
+ Choose an appropriate thumbnail from the previously generated thumbnails.
+
+ Args:
+ desired_width: The desired width, the returned thumbnail may be larger than this.
+ desired_height: The desired height, the returned thumbnail may be larger than this.
+ desired_method: The desired method used to generate the thumbnail.
+ desired_type: The desired content-type of the thumbnail.
+ thumbnail_infos: A list of thumbnail infos of candidate thumbnails.
+ file_id: The ID of the media that a thumbnail is being requested for.
+ url_cache: True if this is from a URL cache.
+ server_name: The server name, if this is a remote thumbnail.
+
+ Returns:
+ The thumbnail which best matches the desired parameters.
+ """
+ desired_method = desired_method.lower()
+
+ # The chosen thumbnail.
+ thumbnail_info = None
+
+ d_w = desired_width
+ d_h = desired_height
+
+ if desired_method == "crop":
+ # Thumbnails that match equal or larger sizes of desired width/height.
+ crop_info_list: List[
+ Tuple[int, int, int, bool, Optional[int], ThumbnailInfo]
+ ] = []
+ # Other thumbnails.
+ crop_info_list2: List[
+ Tuple[int, int, int, bool, Optional[int], ThumbnailInfo]
+ ] = []
+ for info in thumbnail_infos:
+ # Skip thumbnails generated with different methods.
+ if info.method != "crop":
+ continue
+
+ t_w = info.width
+ t_h = info.height
+ aspect_quality = abs(d_w * t_h - d_h * t_w)
+ min_quality = 0 if d_w <= t_w and d_h <= t_h else 1
+ size_quality = abs((d_w - t_w) * (d_h - t_h))
+ type_quality = desired_type != info.type
+ length_quality = info.length
+ if t_w >= d_w or t_h >= d_h:
+ crop_info_list.append(
+ (
+ aspect_quality,
+ min_quality,
+ size_quality,
+ type_quality,
+ length_quality,
+ info,
+ )
+ )
+ else:
+ crop_info_list2.append(
+ (
+ aspect_quality,
+ min_quality,
+ size_quality,
+ type_quality,
+ length_quality,
+ info,
+ )
+ )
+ # Pick the most appropriate thumbnail. Some values of `desired_width` and
+ # `desired_height` may result in a tie, in which case we avoid comparing on
+ # the thumbnail info and pick the thumbnail that appears earlier
+ # in the list of candidates.
+ if crop_info_list:
+ thumbnail_info = min(crop_info_list, key=lambda t: t[:-1])[-1]
+ elif crop_info_list2:
+ thumbnail_info = min(crop_info_list2, key=lambda t: t[:-1])[-1]
+ elif desired_method == "scale":
+ # Thumbnails that match equal or larger sizes of desired width/height.
+ info_list: List[Tuple[int, bool, int, ThumbnailInfo]] = []
+ # Other thumbnails.
+ info_list2: List[Tuple[int, bool, int, ThumbnailInfo]] = []
+
+ for info in thumbnail_infos:
+ # Skip thumbnails generated with different methods.
+ if info.method != "scale":
+ continue
+
+ t_w = info.width
+ t_h = info.height
+ size_quality = abs((d_w - t_w) * (d_h - t_h))
+ type_quality = desired_type != info.type
+ length_quality = info.length
+ if t_w >= d_w or t_h >= d_h:
+ info_list.append((size_quality, type_quality, length_quality, info))
+ else:
+ info_list2.append(
+ (size_quality, type_quality, length_quality, info)
+ )
+ # Pick the most appropriate thumbnail. Some values of `desired_width` and
+ # `desired_height` may result in a tie, in which case we avoid comparing on
+ # the thumbnail info and pick the thumbnail that appears earlier
+ # in the list of candidates.
+ if info_list:
+ thumbnail_info = min(info_list, key=lambda t: t[:-1])[-1]
+ elif info_list2:
+ thumbnail_info = min(info_list2, key=lambda t: t[:-1])[-1]
+
+ if thumbnail_info:
+ return FileInfo(
+ file_id=file_id,
+ url_cache=url_cache,
+ server_name=server_name,
+ thumbnail=thumbnail_info,
+ )
+
+ # No matching thumbnail was found.
+ return None
diff --git a/synapse/rest/client/media.py b/synapse/rest/client/media.py
new file mode 100644
index 0000000000..172d240783
--- /dev/null
+++ b/synapse/rest/client/media.py
@@ -0,0 +1,205 @@
+#
+# This file is licensed under the Affero General Public License (AGPL) version 3.
+#
+# Copyright 2020 The Matrix.org Foundation C.I.C.
+# Copyright 2015, 2016 OpenMarket Ltd
+# Copyright (C) 2024 New Vector, Ltd
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# See the GNU Affero General Public License for more details:
+# .
+#
+# Originally licensed under the Apache License, Version 2.0:
+# .
+#
+# [This file includes modifications made by New Vector Limited]
+#
+#
+
+import logging
+import re
+
+from synapse.http.server import (
+ HttpServer,
+ respond_with_json,
+ respond_with_json_bytes,
+ set_corp_headers,
+ set_cors_headers,
+)
+from synapse.http.servlet import RestServlet, parse_integer, parse_string
+from synapse.http.site import SynapseRequest
+from synapse.media._base import (
+ DEFAULT_MAX_TIMEOUT_MS,
+ MAXIMUM_ALLOWED_MAX_TIMEOUT_MS,
+ respond_404,
+)
+from synapse.media.media_repository import MediaRepository
+from synapse.media.media_storage import MediaStorage
+from synapse.media.thumbnailer import ThumbnailProvider
+from synapse.server import HomeServer
+from synapse.util.stringutils import parse_and_validate_server_name
+
+logger = logging.getLogger(__name__)
+
+
+class UnstablePreviewURLServlet(RestServlet):
+ """
+ Same as `GET /_matrix/media/r0/preview_url`, this endpoint provides a generic preview API
+ for URLs which outputs Open Graph (https://ogp.me/) responses (with some Matrix
+ specific additions).
+
+ This does have trade-offs compared to other designs:
+
+ * Pros:
+ * Simple and flexible; can be used by any clients at any point
+ * Cons:
+ * If each homeserver provides one of these independently, all the homeservers in a
+ room may needlessly DoS the target URI
+ * The URL metadata must be stored somewhere, rather than just using Matrix
+ itself to store the media.
+ * Matrix cannot be used to distribute the metadata between homeservers.
+ """
+
+ PATTERNS = [
+ re.compile(r"^/_matrix/client/unstable/org.matrix.msc3916/media/preview_url$")
+ ]
+
+ def __init__(
+ self,
+ hs: "HomeServer",
+ media_repo: "MediaRepository",
+ media_storage: MediaStorage,
+ ):
+ super().__init__()
+
+ self.auth = hs.get_auth()
+ self.clock = hs.get_clock()
+ self.media_repo = media_repo
+ self.media_storage = media_storage
+ assert self.media_repo.url_previewer is not None
+ self.url_previewer = self.media_repo.url_previewer
+
+ async def on_GET(self, request: SynapseRequest) -> None:
+ requester = await self.auth.get_user_by_req(request)
+ url = parse_string(request, "url", required=True)
+ ts = parse_integer(request, "ts")
+ if ts is None:
+ ts = self.clock.time_msec()
+
+ og = await self.url_previewer.preview(url, requester.user, ts)
+ respond_with_json_bytes(request, 200, og, send_cors=True)
+
+
+class UnstableMediaConfigResource(RestServlet):
+ PATTERNS = [
+ re.compile(r"^/_matrix/client/unstable/org.matrix.msc3916/media/config$")
+ ]
+
+ def __init__(self, hs: "HomeServer"):
+ super().__init__()
+ config = hs.config
+ self.clock = hs.get_clock()
+ self.auth = hs.get_auth()
+ self.limits_dict = {"m.upload.size": config.media.max_upload_size}
+
+ async def on_GET(self, request: SynapseRequest) -> None:
+ await self.auth.get_user_by_req(request)
+ respond_with_json(request, 200, self.limits_dict, send_cors=True)
+
+
+class UnstableThumbnailResource(RestServlet):
+ PATTERNS = [
+ re.compile(
+ "/_matrix/client/unstable/org.matrix.msc3916/media/thumbnail/(?P[^/]*)/(?P[^/]*)$"
+ )
+ ]
+
+ def __init__(
+ self,
+ hs: "HomeServer",
+ media_repo: "MediaRepository",
+ media_storage: MediaStorage,
+ ):
+ super().__init__()
+
+ self.store = hs.get_datastores().main
+ self.media_repo = media_repo
+ self.media_storage = media_storage
+ self.dynamic_thumbnails = hs.config.media.dynamic_thumbnails
+ self._is_mine_server_name = hs.is_mine_server_name
+ self._server_name = hs.hostname
+ self.prevent_media_downloads_from = hs.config.media.prevent_media_downloads_from
+ self.thumbnailer = ThumbnailProvider(hs, media_repo, media_storage)
+ self.auth = hs.get_auth()
+
+ async def on_GET(
+ self, request: SynapseRequest, server_name: str, media_id: str
+ ) -> None:
+ # Validate the server name, raising if invalid
+ parse_and_validate_server_name(server_name)
+ await self.auth.get_user_by_req(request)
+
+ set_cors_headers(request)
+ set_corp_headers(request)
+ width = parse_integer(request, "width", required=True)
+ height = parse_integer(request, "height", required=True)
+ method = parse_string(request, "method", "scale")
+ # TODO Parse the Accept header to get an prioritised list of thumbnail types.
+ m_type = "image/png"
+ max_timeout_ms = parse_integer(
+ request, "timeout_ms", default=DEFAULT_MAX_TIMEOUT_MS
+ )
+ max_timeout_ms = min(max_timeout_ms, MAXIMUM_ALLOWED_MAX_TIMEOUT_MS)
+
+ if self._is_mine_server_name(server_name):
+ if self.dynamic_thumbnails:
+ await self.thumbnailer.select_or_generate_local_thumbnail(
+ request, media_id, width, height, method, m_type, max_timeout_ms
+ )
+ else:
+ await self.thumbnailer.respond_local_thumbnail(
+ request, media_id, width, height, method, m_type, max_timeout_ms
+ )
+ self.media_repo.mark_recently_accessed(None, media_id)
+ else:
+ # Don't let users download media from configured domains, even if it
+ # is already downloaded. This is Trust & Safety tooling to make some
+ # media inaccessible to local users.
+ # See `prevent_media_downloads_from` config docs for more info.
+ if server_name in self.prevent_media_downloads_from:
+ respond_404(request)
+ return
+
+ remote_resp_function = (
+ self.thumbnailer.select_or_generate_remote_thumbnail
+ if self.dynamic_thumbnails
+ else self.thumbnailer.respond_remote_thumbnail
+ )
+ await remote_resp_function(
+ request,
+ server_name,
+ media_id,
+ width,
+ height,
+ method,
+ m_type,
+ max_timeout_ms,
+ )
+ self.media_repo.mark_recently_accessed(server_name, media_id)
+
+
+def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
+ if hs.config.experimental.msc3916_authenticated_media_enabled:
+ media_repo = hs.get_media_repository()
+ if hs.config.media.url_preview_enabled:
+ UnstablePreviewURLServlet(
+ hs, media_repo, media_repo.media_storage
+ ).register(http_server)
+ UnstableMediaConfigResource(hs).register(http_server)
+ UnstableThumbnailResource(hs, media_repo, media_repo.media_storage).register(
+ http_server
+ )
diff --git a/synapse/rest/media/thumbnail_resource.py b/synapse/rest/media/thumbnail_resource.py
index 7cb335c7c3..fe8fbb06e4 100644
--- a/synapse/rest/media/thumbnail_resource.py
+++ b/synapse/rest/media/thumbnail_resource.py
@@ -22,23 +22,18 @@
import logging
import re
-from typing import TYPE_CHECKING, List, Optional, Tuple
+from typing import TYPE_CHECKING
-from synapse.api.errors import Codes, SynapseError, cs_error
-from synapse.config.repository import THUMBNAIL_SUPPORTED_MEDIA_FORMAT_MAP
-from synapse.http.server import respond_with_json, set_corp_headers, set_cors_headers
+from synapse.http.server import set_corp_headers, set_cors_headers
from synapse.http.servlet import RestServlet, parse_integer, parse_string
from synapse.http.site import SynapseRequest
from synapse.media._base import (
DEFAULT_MAX_TIMEOUT_MS,
MAXIMUM_ALLOWED_MAX_TIMEOUT_MS,
- FileInfo,
- ThumbnailInfo,
respond_404,
- respond_with_file,
- respond_with_responder,
)
from synapse.media.media_storage import MediaStorage
+from synapse.media.thumbnailer import ThumbnailProvider
from synapse.util.stringutils import parse_and_validate_server_name
if TYPE_CHECKING:
@@ -66,10 +61,11 @@ class ThumbnailResource(RestServlet):
self.store = hs.get_datastores().main
self.media_repo = media_repo
self.media_storage = media_storage
- self.dynamic_thumbnails = hs.config.media.dynamic_thumbnails
self._is_mine_server_name = hs.is_mine_server_name
self._server_name = hs.hostname
self.prevent_media_downloads_from = hs.config.media.prevent_media_downloads_from
+ self.dynamic_thumbnails = hs.config.media.dynamic_thumbnails
+ self.thumbnail_provider = ThumbnailProvider(hs, media_repo, media_storage)
async def on_GET(
self, request: SynapseRequest, server_name: str, media_id: str
@@ -91,11 +87,11 @@ class ThumbnailResource(RestServlet):
if self._is_mine_server_name(server_name):
if self.dynamic_thumbnails:
- await self._select_or_generate_local_thumbnail(
+ await self.thumbnail_provider.select_or_generate_local_thumbnail(
request, media_id, width, height, method, m_type, max_timeout_ms
)
else:
- await self._respond_local_thumbnail(
+ await self.thumbnail_provider.respond_local_thumbnail(
request, media_id, width, height, method, m_type, max_timeout_ms
)
self.media_repo.mark_recently_accessed(None, media_id)
@@ -109,9 +105,9 @@ class ThumbnailResource(RestServlet):
return
remote_resp_function = (
- self._select_or_generate_remote_thumbnail
+ self.thumbnail_provider.select_or_generate_remote_thumbnail
if self.dynamic_thumbnails
- else self._respond_remote_thumbnail
+ else self.thumbnail_provider.respond_remote_thumbnail
)
await remote_resp_function(
request,
@@ -124,457 +120,3 @@ class ThumbnailResource(RestServlet):
max_timeout_ms,
)
self.media_repo.mark_recently_accessed(server_name, media_id)
-
- async def _respond_local_thumbnail(
- self,
- request: SynapseRequest,
- media_id: str,
- width: int,
- height: int,
- method: str,
- m_type: str,
- max_timeout_ms: int,
- ) -> None:
- media_info = await self.media_repo.get_local_media_info(
- request, media_id, max_timeout_ms
- )
- if not media_info:
- return
-
- thumbnail_infos = await self.store.get_local_media_thumbnails(media_id)
- await self._select_and_respond_with_thumbnail(
- request,
- width,
- height,
- method,
- m_type,
- thumbnail_infos,
- media_id,
- media_id,
- url_cache=bool(media_info.url_cache),
- server_name=None,
- )
-
- async def _select_or_generate_local_thumbnail(
- self,
- request: SynapseRequest,
- media_id: str,
- desired_width: int,
- desired_height: int,
- desired_method: str,
- desired_type: str,
- max_timeout_ms: int,
- ) -> None:
- media_info = await self.media_repo.get_local_media_info(
- request, media_id, max_timeout_ms
- )
-
- if not media_info:
- return
-
- thumbnail_infos = await self.store.get_local_media_thumbnails(media_id)
- for info in thumbnail_infos:
- t_w = info.width == desired_width
- t_h = info.height == desired_height
- t_method = info.method == desired_method
- t_type = info.type == desired_type
-
- if t_w and t_h and t_method and t_type:
- file_info = FileInfo(
- server_name=None,
- file_id=media_id,
- url_cache=bool(media_info.url_cache),
- thumbnail=info,
- )
-
- responder = await self.media_storage.fetch_media(file_info)
- if responder:
- await respond_with_responder(
- request, responder, info.type, info.length
- )
- return
-
- logger.debug("We don't have a thumbnail of that size. Generating")
-
- # Okay, so we generate one.
- file_path = await self.media_repo.generate_local_exact_thumbnail(
- media_id,
- desired_width,
- desired_height,
- desired_method,
- desired_type,
- url_cache=bool(media_info.url_cache),
- )
-
- if file_path:
- await respond_with_file(request, desired_type, file_path)
- else:
- logger.warning("Failed to generate thumbnail")
- raise SynapseError(400, "Failed to generate thumbnail.")
-
- async def _select_or_generate_remote_thumbnail(
- self,
- request: SynapseRequest,
- server_name: str,
- media_id: str,
- desired_width: int,
- desired_height: int,
- desired_method: str,
- desired_type: str,
- max_timeout_ms: int,
- ) -> None:
- media_info = await self.media_repo.get_remote_media_info(
- server_name, media_id, max_timeout_ms
- )
- if not media_info:
- respond_404(request)
- return
-
- thumbnail_infos = await self.store.get_remote_media_thumbnails(
- server_name, media_id
- )
-
- file_id = media_info.filesystem_id
-
- for info in thumbnail_infos:
- t_w = info.width == desired_width
- t_h = info.height == desired_height
- t_method = info.method == desired_method
- t_type = info.type == desired_type
-
- if t_w and t_h and t_method and t_type:
- file_info = FileInfo(
- server_name=server_name,
- file_id=file_id,
- thumbnail=info,
- )
-
- responder = await self.media_storage.fetch_media(file_info)
- if responder:
- await respond_with_responder(
- request, responder, info.type, info.length
- )
- return
-
- logger.debug("We don't have a thumbnail of that size. Generating")
-
- # Okay, so we generate one.
- file_path = await self.media_repo.generate_remote_exact_thumbnail(
- server_name,
- file_id,
- media_id,
- desired_width,
- desired_height,
- desired_method,
- desired_type,
- )
-
- if file_path:
- await respond_with_file(request, desired_type, file_path)
- else:
- logger.warning("Failed to generate thumbnail")
- raise SynapseError(400, "Failed to generate thumbnail.")
-
- async def _respond_remote_thumbnail(
- self,
- request: SynapseRequest,
- server_name: str,
- media_id: str,
- width: int,
- height: int,
- method: str,
- m_type: str,
- max_timeout_ms: int,
- ) -> None:
- # TODO: Don't download the whole remote file
- # We should proxy the thumbnail from the remote server instead of
- # downloading the remote file and generating our own thumbnails.
- media_info = await self.media_repo.get_remote_media_info(
- server_name, media_id, max_timeout_ms
- )
- if not media_info:
- return
-
- thumbnail_infos = await self.store.get_remote_media_thumbnails(
- server_name, media_id
- )
- await self._select_and_respond_with_thumbnail(
- request,
- width,
- height,
- method,
- m_type,
- thumbnail_infos,
- media_id,
- media_info.filesystem_id,
- url_cache=False,
- server_name=server_name,
- )
-
- async def _select_and_respond_with_thumbnail(
- self,
- request: SynapseRequest,
- desired_width: int,
- desired_height: int,
- desired_method: str,
- desired_type: str,
- thumbnail_infos: List[ThumbnailInfo],
- media_id: str,
- file_id: str,
- url_cache: bool,
- server_name: Optional[str] = None,
- ) -> None:
- """
- Respond to a request with an appropriate thumbnail from the previously generated thumbnails.
-
- Args:
- request: The incoming request.
- desired_width: The desired width, the returned thumbnail may be larger than this.
- desired_height: The desired height, the returned thumbnail may be larger than this.
- desired_method: The desired method used to generate the thumbnail.
- desired_type: The desired content-type of the thumbnail.
- thumbnail_infos: A list of thumbnail info of candidate thumbnails.
- file_id: The ID of the media that a thumbnail is being requested for.
- url_cache: True if this is from a URL cache.
- server_name: The server name, if this is a remote thumbnail.
- """
- logger.debug(
- "_select_and_respond_with_thumbnail: media_id=%s desired=%sx%s (%s) thumbnail_infos=%s",
- media_id,
- desired_width,
- desired_height,
- desired_method,
- thumbnail_infos,
- )
-
- # If `dynamic_thumbnails` is enabled, we expect Synapse to go down a
- # different code path to handle it.
- assert not self.dynamic_thumbnails
-
- if thumbnail_infos:
- file_info = self._select_thumbnail(
- desired_width,
- desired_height,
- desired_method,
- desired_type,
- thumbnail_infos,
- file_id,
- url_cache,
- server_name,
- )
- if not file_info:
- logger.info("Couldn't find a thumbnail matching the desired inputs")
- respond_404(request)
- return
-
- # The thumbnail property must exist.
- assert file_info.thumbnail is not None
-
- responder = await self.media_storage.fetch_media(file_info)
- if responder:
- await respond_with_responder(
- request,
- responder,
- file_info.thumbnail.type,
- file_info.thumbnail.length,
- )
- return
-
- # If we can't find the thumbnail we regenerate it. This can happen
- # if e.g. we've deleted the thumbnails but still have the original
- # image somewhere.
- #
- # Since we have an entry for the thumbnail in the DB we a) know we
- # have have successfully generated the thumbnail in the past (so we
- # don't need to worry about repeatedly failing to generate
- # thumbnails), and b) have already calculated that appropriate
- # width/height/method so we can just call the "generate exact"
- # methods.
-
- # First let's check that we do actually have the original image
- # still. This will throw a 404 if we don't.
- # TODO: We should refetch the thumbnails for remote media.
- await self.media_storage.ensure_media_is_in_local_cache(
- FileInfo(server_name, file_id, url_cache=url_cache)
- )
-
- if server_name:
- await self.media_repo.generate_remote_exact_thumbnail(
- server_name,
- file_id=file_id,
- media_id=media_id,
- t_width=file_info.thumbnail.width,
- t_height=file_info.thumbnail.height,
- t_method=file_info.thumbnail.method,
- t_type=file_info.thumbnail.type,
- )
- else:
- await self.media_repo.generate_local_exact_thumbnail(
- media_id=media_id,
- t_width=file_info.thumbnail.width,
- t_height=file_info.thumbnail.height,
- t_method=file_info.thumbnail.method,
- t_type=file_info.thumbnail.type,
- url_cache=url_cache,
- )
-
- responder = await self.media_storage.fetch_media(file_info)
- await respond_with_responder(
- request,
- responder,
- file_info.thumbnail.type,
- file_info.thumbnail.length,
- )
- else:
- # This might be because:
- # 1. We can't create thumbnails for the given media (corrupted or
- # unsupported file type), or
- # 2. The thumbnailing process never ran or errored out initially
- # when the media was first uploaded (these bugs should be
- # reported and fixed).
- # Note that we don't attempt to generate a thumbnail now because
- # `dynamic_thumbnails` is disabled.
- logger.info("Failed to find any generated thumbnails")
-
- assert request.path is not None
- respond_with_json(
- request,
- 400,
- cs_error(
- "Cannot find any thumbnails for the requested media ('%s'). This might mean the media is not a supported_media_format=(%s) or that thumbnailing failed for some other reason. (Dynamic thumbnails are disabled on this server.)"
- % (
- request.path.decode(),
- ", ".join(THUMBNAIL_SUPPORTED_MEDIA_FORMAT_MAP.keys()),
- ),
- code=Codes.UNKNOWN,
- ),
- send_cors=True,
- )
-
- def _select_thumbnail(
- self,
- desired_width: int,
- desired_height: int,
- desired_method: str,
- desired_type: str,
- thumbnail_infos: List[ThumbnailInfo],
- file_id: str,
- url_cache: bool,
- server_name: Optional[str],
- ) -> Optional[FileInfo]:
- """
- Choose an appropriate thumbnail from the previously generated thumbnails.
-
- Args:
- desired_width: The desired width, the returned thumbnail may be larger than this.
- desired_height: The desired height, the returned thumbnail may be larger than this.
- desired_method: The desired method used to generate the thumbnail.
- desired_type: The desired content-type of the thumbnail.
- thumbnail_infos: A list of thumbnail infos of candidate thumbnails.
- file_id: The ID of the media that a thumbnail is being requested for.
- url_cache: True if this is from a URL cache.
- server_name: The server name, if this is a remote thumbnail.
-
- Returns:
- The thumbnail which best matches the desired parameters.
- """
- desired_method = desired_method.lower()
-
- # The chosen thumbnail.
- thumbnail_info = None
-
- d_w = desired_width
- d_h = desired_height
-
- if desired_method == "crop":
- # Thumbnails that match equal or larger sizes of desired width/height.
- crop_info_list: List[
- Tuple[int, int, int, bool, Optional[int], ThumbnailInfo]
- ] = []
- # Other thumbnails.
- crop_info_list2: List[
- Tuple[int, int, int, bool, Optional[int], ThumbnailInfo]
- ] = []
- for info in thumbnail_infos:
- # Skip thumbnails generated with different methods.
- if info.method != "crop":
- continue
-
- t_w = info.width
- t_h = info.height
- aspect_quality = abs(d_w * t_h - d_h * t_w)
- min_quality = 0 if d_w <= t_w and d_h <= t_h else 1
- size_quality = abs((d_w - t_w) * (d_h - t_h))
- type_quality = desired_type != info.type
- length_quality = info.length
- if t_w >= d_w or t_h >= d_h:
- crop_info_list.append(
- (
- aspect_quality,
- min_quality,
- size_quality,
- type_quality,
- length_quality,
- info,
- )
- )
- else:
- crop_info_list2.append(
- (
- aspect_quality,
- min_quality,
- size_quality,
- type_quality,
- length_quality,
- info,
- )
- )
- # Pick the most appropriate thumbnail. Some values of `desired_width` and
- # `desired_height` may result in a tie, in which case we avoid comparing on
- # the thumbnail info and pick the thumbnail that appears earlier
- # in the list of candidates.
- if crop_info_list:
- thumbnail_info = min(crop_info_list, key=lambda t: t[:-1])[-1]
- elif crop_info_list2:
- thumbnail_info = min(crop_info_list2, key=lambda t: t[:-1])[-1]
- elif desired_method == "scale":
- # Thumbnails that match equal or larger sizes of desired width/height.
- info_list: List[Tuple[int, bool, int, ThumbnailInfo]] = []
- # Other thumbnails.
- info_list2: List[Tuple[int, bool, int, ThumbnailInfo]] = []
-
- for info in thumbnail_infos:
- # Skip thumbnails generated with different methods.
- if info.method != "scale":
- continue
-
- t_w = info.width
- t_h = info.height
- size_quality = abs((d_w - t_w) * (d_h - t_h))
- type_quality = desired_type != info.type
- length_quality = info.length
- if t_w >= d_w or t_h >= d_h:
- info_list.append((size_quality, type_quality, length_quality, info))
- else:
- info_list2.append(
- (size_quality, type_quality, length_quality, info)
- )
- # Pick the most appropriate thumbnail. Some values of `desired_width` and
- # `desired_height` may result in a tie, in which case we avoid comparing on
- # the thumbnail info and pick the thumbnail that appears earlier
- # in the list of candidates.
- if info_list:
- thumbnail_info = min(info_list, key=lambda t: t[:-1])[-1]
- elif info_list2:
- thumbnail_info = min(info_list2, key=lambda t: t[:-1])[-1]
-
- if thumbnail_info:
- return FileInfo(
- file_id=file_id,
- url_cache=url_cache,
- server_name=server_name,
- thumbnail=thumbnail_info,
- )
-
- # No matching thumbnail was found.
- return None
diff --git a/tests/media/test_media_storage.py b/tests/media/test_media_storage.py
index cae67e11c8..1bd51ceba2 100644
--- a/tests/media/test_media_storage.py
+++ b/tests/media/test_media_storage.py
@@ -18,6 +18,7 @@
# [This file includes modifications made by New Vector Limited]
#
#
+import itertools
import os
import shutil
import tempfile
@@ -46,11 +47,11 @@ from synapse.media._base import FileInfo, ThumbnailInfo
from synapse.media.filepath import MediaFilePaths
from synapse.media.media_storage import MediaStorage, ReadableFileWrapper
from synapse.media.storage_provider import FileStorageProviderBackend
+from synapse.media.thumbnailer import ThumbnailProvider
from synapse.module_api import ModuleApi
from synapse.module_api.callbacks.spamchecker_callbacks import load_legacy_spam_checkers
from synapse.rest import admin
-from synapse.rest.client import login
-from synapse.rest.media.thumbnail_resource import ThumbnailResource
+from synapse.rest.client import login, media
from synapse.server import HomeServer
from synapse.types import JsonDict, RoomAlias
from synapse.util import Clock
@@ -153,68 +154,54 @@ class _TestImage:
is_inline: bool = True
-@parameterized_class(
- ("test_image",),
- [
- # small png
- (
- _TestImage(
- SMALL_PNG,
- b"image/png",
- b".png",
- unhexlify(
- b"89504e470d0a1a0a0000000d4948445200000020000000200806"
- b"000000737a7af40000001a49444154789cedc101010000008220"
- b"ffaf6e484001000000ef0610200001194334ee0000000049454e"
- b"44ae426082"
- ),
- unhexlify(
- b"89504e470d0a1a0a0000000d4948445200000001000000010806"
- b"0000001f15c4890000000d49444154789c636060606000000005"
- b"0001a5f645400000000049454e44ae426082"
- ),
- ),
- ),
- # small png with transparency.
- (
- _TestImage(
- unhexlify(
- b"89504e470d0a1a0a0000000d49484452000000010000000101000"
- b"00000376ef9240000000274524e5300010194fdae0000000a4944"
- b"4154789c636800000082008177cd72b60000000049454e44ae426"
- b"082"
- ),
- b"image/png",
- b".png",
- # Note that we don't check the output since it varies across
- # different versions of Pillow.
- ),
- ),
- # small lossless webp
- (
- _TestImage(
- unhexlify(
- b"524946461a000000574542505650384c0d0000002f0000001007"
- b"1011118888fe0700"
- ),
- b"image/webp",
- b".webp",
- ),
- ),
- # an empty file
- (
- _TestImage(
- b"",
- b"image/gif",
- b".gif",
- expected_found=False,
- unable_to_thumbnail=True,
- ),
- ),
- # An SVG.
- (
- _TestImage(
- b"""
+small_png = _TestImage(
+ SMALL_PNG,
+ b"image/png",
+ b".png",
+ unhexlify(
+ b"89504e470d0a1a0a0000000d4948445200000020000000200806"
+ b"000000737a7af40000001a49444154789cedc101010000008220"
+ b"ffaf6e484001000000ef0610200001194334ee0000000049454e"
+ b"44ae426082"
+ ),
+ unhexlify(
+ b"89504e470d0a1a0a0000000d4948445200000001000000010806"
+ b"0000001f15c4890000000d49444154789c636060606000000005"
+ b"0001a5f645400000000049454e44ae426082"
+ ),
+)
+
+small_png_with_transparency = _TestImage(
+ unhexlify(
+ b"89504e470d0a1a0a0000000d49484452000000010000000101000"
+ b"00000376ef9240000000274524e5300010194fdae0000000a4944"
+ b"4154789c636800000082008177cd72b60000000049454e44ae426"
+ b"082"
+ ),
+ b"image/png",
+ b".png",
+ # Note that we don't check the output since it varies across
+ # different versions of Pillow.
+)
+
+small_lossless_webp = _TestImage(
+ unhexlify(
+ b"524946461a000000574542505650384c0d0000002f0000001007" b"1011118888fe0700"
+ ),
+ b"image/webp",
+ b".webp",
+)
+
+empty_file = _TestImage(
+ b"",
+ b"image/gif",
+ b".gif",
+ expected_found=False,
+ unable_to_thumbnail=True,
+)
+
+SVG = _TestImage(
+ b"""
@@ -223,19 +210,32 @@ class _TestImage:
""",
- b"image/svg",
- b".svg",
- expected_found=False,
- unable_to_thumbnail=True,
- is_inline=False,
- ),
- ),
- ],
+ b"image/svg",
+ b".svg",
+ expected_found=False,
+ unable_to_thumbnail=True,
+ is_inline=False,
)
+test_images = [
+ small_png,
+ small_png_with_transparency,
+ small_lossless_webp,
+ empty_file,
+ SVG,
+]
+urls = [
+ "_matrix/media/r0/thumbnail",
+ "_matrix/client/unstable/org.matrix.msc3916/media/thumbnail",
+]
+
+
+@parameterized_class(("test_image", "url"), itertools.product(test_images, urls))
class MediaRepoTests(unittest.HomeserverTestCase):
+ servlets = [media.register_servlets]
test_image: ClassVar[_TestImage]
hijack_auth = True
user_id = "@test:user"
+ url: ClassVar[str]
def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer:
self.fetches: List[
@@ -298,6 +298,7 @@ class MediaRepoTests(unittest.HomeserverTestCase):
"config": {"directory": self.storage_path},
}
config["media_storage_providers"] = [provider_config]
+ config["experimental_features"] = {"msc3916_authenticated_media_enabled": True}
hs = self.setup_test_homeserver(config=config, federation_http_client=client)
@@ -502,7 +503,7 @@ class MediaRepoTests(unittest.HomeserverTestCase):
params = "?width=32&height=32&method=scale"
channel = self.make_request(
"GET",
- f"/_matrix/media/v3/thumbnail/{self.media_id}{params}",
+ f"/{self.url}/{self.media_id}{params}",
shorthand=False,
await_result=False,
)
@@ -530,7 +531,7 @@ class MediaRepoTests(unittest.HomeserverTestCase):
channel = self.make_request(
"GET",
- f"/_matrix/media/v3/thumbnail/{self.media_id}{params}",
+ f"/{self.url}/{self.media_id}{params}",
shorthand=False,
await_result=False,
)
@@ -566,12 +567,11 @@ class MediaRepoTests(unittest.HomeserverTestCase):
params = "?width=32&height=32&method=" + method
channel = self.make_request(
"GET",
- f"/_matrix/media/r0/thumbnail/{self.media_id}{params}",
+ f"/{self.url}/{self.media_id}{params}",
shorthand=False,
await_result=False,
)
self.pump()
-
headers = {
b"Content-Length": [b"%d" % (len(self.test_image.data))],
b"Content-Type": [self.test_image.content_type],
@@ -580,7 +580,6 @@ class MediaRepoTests(unittest.HomeserverTestCase):
(self.test_image.data, (len(self.test_image.data), headers))
)
self.pump()
-
if expected_found:
self.assertEqual(channel.code, 200)
@@ -603,7 +602,7 @@ class MediaRepoTests(unittest.HomeserverTestCase):
channel.json_body,
{
"errcode": "M_UNKNOWN",
- "error": "Cannot find any thumbnails for the requested media ('/_matrix/media/r0/thumbnail/example.com/12345'). This might mean the media is not a supported_media_format=(image/jpeg, image/jpg, image/webp, image/gif, image/png) or that thumbnailing failed for some other reason. (Dynamic thumbnails are disabled on this server.)",
+ "error": f"Cannot find any thumbnails for the requested media ('/{self.url}/example.com/12345'). This might mean the media is not a supported_media_format=(image/jpeg, image/jpg, image/webp, image/gif, image/png) or that thumbnailing failed for some other reason. (Dynamic thumbnails are disabled on this server.)",
},
)
else:
@@ -613,7 +612,7 @@ class MediaRepoTests(unittest.HomeserverTestCase):
channel.json_body,
{
"errcode": "M_NOT_FOUND",
- "error": "Not found '/_matrix/media/r0/thumbnail/example.com/12345'",
+ "error": f"Not found '/{self.url}/example.com/12345'",
},
)
@@ -625,12 +624,12 @@ class MediaRepoTests(unittest.HomeserverTestCase):
content_type = self.test_image.content_type.decode()
media_repo = self.hs.get_media_repository()
- thumbnail_resouce = ThumbnailResource(
+ thumbnail_provider = ThumbnailProvider(
self.hs, media_repo, media_repo.media_storage
)
self.assertIsNotNone(
- thumbnail_resouce._select_thumbnail(
+ thumbnail_provider._select_thumbnail(
desired_width=desired_size,
desired_height=desired_size,
desired_method=method,
diff --git a/tests/rest/client/test_media.py b/tests/rest/client/test_media.py
new file mode 100644
index 0000000000..600cbf8963
--- /dev/null
+++ b/tests/rest/client/test_media.py
@@ -0,0 +1,1609 @@
+#
+# This file is licensed under the Affero General Public License (AGPL) version 3.
+#
+# Copyright 2022 The Matrix.org Foundation C.I.C.
+# Copyright (C) 2024 New Vector, Ltd
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# See the GNU Affero General Public License for more details:
+# .
+#
+# Originally licensed under the Apache License, Version 2.0:
+# .
+#
+# [This file includes modifications made by New Vector Limited]
+#
+#
+import base64
+import json
+import os
+import re
+from typing import Any, Dict, Optional, Sequence, Tuple, Type
+from urllib.parse import quote, urlencode
+
+from twisted.internet._resolver import HostResolution
+from twisted.internet.address import IPv4Address, IPv6Address
+from twisted.internet.error import DNSLookupError
+from twisted.internet.interfaces import IAddress, IResolutionReceiver
+from twisted.test.proto_helpers import AccumulatingProtocol, MemoryReactor
+from twisted.web.resource import Resource
+
+from synapse.config.oembed import OEmbedEndpointConfig
+from synapse.media._base import FileInfo
+from synapse.media.url_previewer import IMAGE_CACHE_EXPIRY_MS
+from synapse.rest import admin
+from synapse.rest.client import login, media
+from synapse.server import HomeServer
+from synapse.types import JsonDict
+from synapse.util import Clock
+from synapse.util.stringutils import parse_and_validate_mxc_uri
+
+from tests import unittest
+from tests.server import FakeTransport, ThreadedMemoryReactorClock
+from tests.test_utils import SMALL_PNG
+from tests.unittest import override_config
+
+try:
+ import lxml
+except ImportError:
+ lxml = None # type: ignore[assignment]
+
+
+class UnstableMediaDomainBlockingTests(unittest.HomeserverTestCase):
+ remote_media_id = "doesnotmatter"
+ remote_server_name = "evil.com"
+ servlets = [
+ media.register_servlets,
+ admin.register_servlets,
+ login.register_servlets,
+ ]
+
+ def make_homeserver(
+ self, reactor: ThreadedMemoryReactorClock, clock: Clock
+ ) -> HomeServer:
+ config = self.default_config()
+
+ self.storage_path = self.mktemp()
+ self.media_store_path = self.mktemp()
+ os.mkdir(self.storage_path)
+ os.mkdir(self.media_store_path)
+ config["media_store_path"] = self.media_store_path
+
+ provider_config = {
+ "module": "synapse.media.storage_provider.FileStorageProviderBackend",
+ "store_local": True,
+ "store_synchronous": False,
+ "store_remote": True,
+ "config": {"directory": self.storage_path},
+ }
+
+ config["media_storage_providers"] = [provider_config]
+
+ return self.setup_test_homeserver(config=config)
+
+ def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
+ self.store = hs.get_datastores().main
+
+ # Inject a piece of media. We'll use this to ensure we're returning a sane
+ # response when we're not supposed to block it, distinguishing a media block
+ # from a regular 404.
+ file_id = "abcdefg12345"
+ file_info = FileInfo(server_name=self.remote_server_name, file_id=file_id)
+ with hs.get_media_repository().media_storage.store_into_file(file_info) as (
+ f,
+ fname,
+ finish,
+ ):
+ f.write(SMALL_PNG)
+ self.get_success(finish())
+
+ self.get_success(
+ self.store.store_cached_remote_media(
+ origin=self.remote_server_name,
+ media_id=self.remote_media_id,
+ media_type="image/png",
+ media_length=1,
+ time_now_ms=clock.time_msec(),
+ upload_name="test.png",
+ filesystem_id=file_id,
+ )
+ )
+ self.register_user("user", "password")
+ self.tok = self.login("user", "password")
+
+ @override_config(
+ {
+ # Disable downloads from the domain we'll be trying to download from.
+ # Should result in a 404.
+ "prevent_media_downloads_from": ["evil.com"],
+ "dynamic_thumbnails": True,
+ "experimental_features": {"msc3916_authenticated_media_enabled": True},
+ }
+ )
+ def test_cannot_download_blocked_media_thumbnail(self) -> None:
+ """
+ Same test as test_cannot_download_blocked_media but for thumbnails.
+ """
+ response = self.make_request(
+ "GET",
+ f"/_matrix/client/unstable/org.matrix.msc3916/media/thumbnail/evil.com/{self.remote_media_id}?width=100&height=100",
+ shorthand=False,
+ content={"width": 100, "height": 100},
+ access_token=self.tok,
+ )
+ self.assertEqual(response.code, 404)
+
+ @override_config(
+ {
+ # Disable downloads from a domain we won't be requesting downloads from.
+ # This proves we haven't broken anything.
+ "prevent_media_downloads_from": ["not-listed.com"],
+ "dynamic_thumbnails": True,
+ "experimental_features": {"msc3916_authenticated_media_enabled": True},
+ }
+ )
+ def test_remote_media_thumbnail_normally_unblocked(self) -> None:
+ """
+ Same test as test_remote_media_normally_unblocked but for thumbnails.
+ """
+ response = self.make_request(
+ "GET",
+ f"/_matrix/client/unstable/org.matrix.msc3916/media/thumbnail/evil.com/{self.remote_media_id}?width=100&height=100",
+ shorthand=False,
+ access_token=self.tok,
+ )
+ self.assertEqual(response.code, 200)
+
+
+class UnstableURLPreviewTests(unittest.HomeserverTestCase):
+ if not lxml:
+ skip = "url preview feature requires lxml"
+
+ servlets = [media.register_servlets]
+ hijack_auth = True
+ user_id = "@test:user"
+ end_content = (
+ b""
+ b' '
+ b' '
+ b""
+ )
+
+ def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer:
+ config = self.default_config()
+ config["experimental_features"] = {"msc3916_authenticated_media_enabled": True}
+ config["url_preview_enabled"] = True
+ config["max_spider_size"] = 9999999
+ config["url_preview_ip_range_blacklist"] = (
+ "192.168.1.1",
+ "1.0.0.0/8",
+ "3fff:ffff:ffff:ffff:ffff:ffff:ffff:ffff",
+ "2001:800::/21",
+ )
+ config["url_preview_ip_range_whitelist"] = ("1.1.1.1",)
+ config["url_preview_accept_language"] = [
+ "en-UK",
+ "en-US;q=0.9",
+ "fr;q=0.8",
+ "*;q=0.7",
+ ]
+
+ self.storage_path = self.mktemp()
+ self.media_store_path = self.mktemp()
+ os.mkdir(self.storage_path)
+ os.mkdir(self.media_store_path)
+ config["media_store_path"] = self.media_store_path
+
+ provider_config = {
+ "module": "synapse.media.storage_provider.FileStorageProviderBackend",
+ "store_local": True,
+ "store_synchronous": False,
+ "store_remote": True,
+ "config": {"directory": self.storage_path},
+ }
+
+ config["media_storage_providers"] = [provider_config]
+
+ hs = self.setup_test_homeserver(config=config)
+
+ # After the hs is created, modify the parsed oEmbed config (to avoid
+ # messing with files).
+ #
+ # Note that HTTP URLs are used to avoid having to deal with TLS in tests.
+ hs.config.oembed.oembed_patterns = [
+ OEmbedEndpointConfig(
+ api_endpoint="http://publish.twitter.com/oembed",
+ url_patterns=[
+ re.compile(r"http://twitter\.com/.+/status/.+"),
+ ],
+ formats=None,
+ ),
+ OEmbedEndpointConfig(
+ api_endpoint="http://www.hulu.com/api/oembed.{format}",
+ url_patterns=[
+ re.compile(r"http://www\.hulu\.com/watch/.+"),
+ ],
+ formats=["json"],
+ ),
+ ]
+
+ return hs
+
+ def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
+ self.media_repo = hs.get_media_repository()
+ assert self.media_repo.url_previewer is not None
+ self.url_previewer = self.media_repo.url_previewer
+
+ self.lookups: Dict[str, Any] = {}
+
+ class Resolver:
+ def resolveHostName(
+ _self,
+ resolutionReceiver: IResolutionReceiver,
+ hostName: str,
+ portNumber: int = 0,
+ addressTypes: Optional[Sequence[Type[IAddress]]] = None,
+ transportSemantics: str = "TCP",
+ ) -> IResolutionReceiver:
+ resolution = HostResolution(hostName)
+ resolutionReceiver.resolutionBegan(resolution)
+ if hostName not in self.lookups:
+ raise DNSLookupError("OH NO")
+
+ for i in self.lookups[hostName]:
+ resolutionReceiver.addressResolved(i[0]("TCP", i[1], portNumber))
+ resolutionReceiver.resolutionComplete()
+ return resolutionReceiver
+
+ self.reactor.nameResolver = Resolver() # type: ignore[assignment]
+
+ def create_resource_dict(self) -> Dict[str, Resource]:
+ """Create a resource tree for the test server
+
+ A resource tree is a mapping from path to twisted.web.resource.
+
+ The default implementation creates a JsonResource and calls each function in
+ `servlets` to register servlets against it.
+ """
+ resources = super().create_resource_dict()
+ resources["/_matrix/media"] = self.hs.get_media_repository_resource()
+ return resources
+
+ def _assert_small_png(self, json_body: JsonDict) -> None:
+ """Assert properties from the SMALL_PNG test image."""
+ self.assertTrue(json_body["og:image"].startswith("mxc://"))
+ self.assertEqual(json_body["og:image:height"], 1)
+ self.assertEqual(json_body["og:image:width"], 1)
+ self.assertEqual(json_body["og:image:type"], "image/png")
+ self.assertEqual(json_body["matrix:image:size"], 67)
+
+ def test_cache_returns_correct_type(self) -> None:
+ self.lookups["matrix.org"] = [(IPv4Address, "10.1.2.3")]
+
+ channel = self.make_request(
+ "GET",
+ "/_matrix/client/unstable/org.matrix.msc3916/media/preview_url?url=http://matrix.org",
+ shorthand=False,
+ await_result=False,
+ )
+ self.pump()
+
+ client = self.reactor.tcpClients[0][2].buildProtocol(None)
+ server = AccumulatingProtocol()
+ server.makeConnection(FakeTransport(client, self.reactor))
+ client.makeConnection(FakeTransport(server, self.reactor))
+ client.dataReceived(
+ b"HTTP/1.0 200 OK\r\nContent-Length: %d\r\nContent-Type: text/html\r\n\r\n"
+ % (len(self.end_content),)
+ + self.end_content
+ )
+
+ self.pump()
+ self.assertEqual(channel.code, 200)
+ self.assertEqual(
+ channel.json_body, {"og:title": "~matrix~", "og:description": "hi"}
+ )
+
+ # Check the cache returns the correct response
+ channel = self.make_request(
+ "GET",
+ "/_matrix/client/unstable/org.matrix.msc3916/media/preview_url?url=http://matrix.org",
+ shorthand=False,
+ )
+
+ # Check the cache response has the same content
+ self.assertEqual(channel.code, 200)
+ self.assertEqual(
+ channel.json_body, {"og:title": "~matrix~", "og:description": "hi"}
+ )
+
+ # Clear the in-memory cache
+ self.assertIn("http://matrix.org", self.url_previewer._cache)
+ self.url_previewer._cache.pop("http://matrix.org")
+ self.assertNotIn("http://matrix.org", self.url_previewer._cache)
+
+ # Check the database cache returns the correct response
+ channel = self.make_request(
+ "GET",
+ "/_matrix/client/unstable/org.matrix.msc3916/media/preview_url?url=http://matrix.org",
+ shorthand=False,
+ )
+
+ # Check the cache response has the same content
+ self.assertEqual(channel.code, 200)
+ self.assertEqual(
+ channel.json_body, {"og:title": "~matrix~", "og:description": "hi"}
+ )
+
+ def test_non_ascii_preview_httpequiv(self) -> None:
+ self.lookups["matrix.org"] = [(IPv4Address, "10.1.2.3")]
+
+ end_content = (
+ b""
+ b' '
+ b' '
+ b' '
+ b""
+ )
+
+ channel = self.make_request(
+ "GET",
+ "/_matrix/client/unstable/org.matrix.msc3916/media/preview_url?url=http://matrix.org",
+ shorthand=False,
+ await_result=False,
+ )
+ self.pump()
+
+ client = self.reactor.tcpClients[0][2].buildProtocol(None)
+ server = AccumulatingProtocol()
+ server.makeConnection(FakeTransport(client, self.reactor))
+ client.makeConnection(FakeTransport(server, self.reactor))
+ client.dataReceived(
+ (
+ b"HTTP/1.0 200 OK\r\nContent-Length: %d\r\n"
+ b'Content-Type: text/html; charset="utf8"\r\n\r\n'
+ )
+ % (len(end_content),)
+ + end_content
+ )
+
+ self.pump()
+ self.assertEqual(channel.code, 200)
+ self.assertEqual(channel.json_body["og:title"], "\u0434\u043a\u0430")
+
+ def test_video_rejected(self) -> None:
+ self.lookups["matrix.org"] = [(IPv4Address, "10.1.2.3")]
+
+ end_content = b"anything"
+
+ channel = self.make_request(
+ "GET",
+ "/_matrix/client/unstable/org.matrix.msc3916/media/preview_url?url=http://matrix.org",
+ shorthand=False,
+ await_result=False,
+ )
+ self.pump()
+
+ client = self.reactor.tcpClients[0][2].buildProtocol(None)
+ server = AccumulatingProtocol()
+ server.makeConnection(FakeTransport(client, self.reactor))
+ client.makeConnection(FakeTransport(server, self.reactor))
+ client.dataReceived(
+ (
+ b"HTTP/1.0 200 OK\r\nContent-Length: %d\r\n"
+ b"Content-Type: video/mp4\r\n\r\n"
+ )
+ % (len(end_content))
+ + end_content
+ )
+
+ self.pump()
+ self.assertEqual(channel.code, 502)
+ self.assertEqual(
+ channel.json_body,
+ {
+ "errcode": "M_UNKNOWN",
+ "error": "Requested file's content type not allowed for this operation: video/mp4",
+ },
+ )
+
+ def test_audio_rejected(self) -> None:
+ self.lookups["matrix.org"] = [(IPv4Address, "10.1.2.3")]
+
+ end_content = b"anything"
+
+ channel = self.make_request(
+ "GET",
+ "/_matrix/client/unstable/org.matrix.msc3916/media/preview_url?url=http://matrix.org",
+ shorthand=False,
+ await_result=False,
+ )
+ self.pump()
+
+ client = self.reactor.tcpClients[0][2].buildProtocol(None)
+ server = AccumulatingProtocol()
+ server.makeConnection(FakeTransport(client, self.reactor))
+ client.makeConnection(FakeTransport(server, self.reactor))
+ client.dataReceived(
+ (
+ b"HTTP/1.0 200 OK\r\nContent-Length: %d\r\n"
+ b"Content-Type: audio/aac\r\n\r\n"
+ )
+ % (len(end_content))
+ + end_content
+ )
+
+ self.pump()
+ self.assertEqual(channel.code, 502)
+ self.assertEqual(
+ channel.json_body,
+ {
+ "errcode": "M_UNKNOWN",
+ "error": "Requested file's content type not allowed for this operation: audio/aac",
+ },
+ )
+
+ def test_non_ascii_preview_content_type(self) -> None:
+ self.lookups["matrix.org"] = [(IPv4Address, "10.1.2.3")]
+
+ end_content = (
+ b""
+ b' '
+ b' '
+ b""
+ )
+
+ channel = self.make_request(
+ "GET",
+ "/_matrix/client/unstable/org.matrix.msc3916/media/preview_url?url=http://matrix.org",
+ shorthand=False,
+ await_result=False,
+ )
+ self.pump()
+
+ client = self.reactor.tcpClients[0][2].buildProtocol(None)
+ server = AccumulatingProtocol()
+ server.makeConnection(FakeTransport(client, self.reactor))
+ client.makeConnection(FakeTransport(server, self.reactor))
+ client.dataReceived(
+ (
+ b"HTTP/1.0 200 OK\r\nContent-Length: %d\r\n"
+ b'Content-Type: text/html; charset="windows-1251"\r\n\r\n'
+ )
+ % (len(end_content),)
+ + end_content
+ )
+
+ self.pump()
+ self.assertEqual(channel.code, 200)
+ self.assertEqual(channel.json_body["og:title"], "\u0434\u043a\u0430")
+
+ def test_overlong_title(self) -> None:
+ self.lookups["matrix.org"] = [(IPv4Address, "10.1.2.3")]
+
+ end_content = (
+ b""
+ b"" + b"x" * 2000 + b" "
+ b' '
+ b""
+ )
+
+ channel = self.make_request(
+ "GET",
+ "/_matrix/client/unstable/org.matrix.msc3916/media/preview_url?url=http://matrix.org",
+ shorthand=False,
+ await_result=False,
+ )
+ self.pump()
+
+ client = self.reactor.tcpClients[0][2].buildProtocol(None)
+ server = AccumulatingProtocol()
+ server.makeConnection(FakeTransport(client, self.reactor))
+ client.makeConnection(FakeTransport(server, self.reactor))
+ client.dataReceived(
+ (
+ b"HTTP/1.0 200 OK\r\nContent-Length: %d\r\n"
+ b'Content-Type: text/html; charset="windows-1251"\r\n\r\n'
+ )
+ % (len(end_content),)
+ + end_content
+ )
+
+ self.pump()
+ self.assertEqual(channel.code, 200)
+ res = channel.json_body
+ # We should only see the `og:description` field, as `title` is too long and should be stripped out
+ self.assertCountEqual(["og:description"], res.keys())
+
+ def test_ipaddr(self) -> None:
+ """
+ IP addresses can be previewed directly.
+ """
+ self.lookups["example.com"] = [(IPv4Address, "10.1.2.3")]
+
+ channel = self.make_request(
+ "GET",
+ "/_matrix/client/unstable/org.matrix.msc3916/media/preview_url?url=http://example.com",
+ shorthand=False,
+ await_result=False,
+ )
+ self.pump()
+
+ client = self.reactor.tcpClients[0][2].buildProtocol(None)
+ server = AccumulatingProtocol()
+ server.makeConnection(FakeTransport(client, self.reactor))
+ client.makeConnection(FakeTransport(server, self.reactor))
+ client.dataReceived(
+ b"HTTP/1.0 200 OK\r\nContent-Length: %d\r\nContent-Type: text/html\r\n\r\n"
+ % (len(self.end_content),)
+ + self.end_content
+ )
+
+ self.pump()
+ self.assertEqual(channel.code, 200)
+ self.assertEqual(
+ channel.json_body, {"og:title": "~matrix~", "og:description": "hi"}
+ )
+
+ def test_blocked_ip_specific(self) -> None:
+ """
+ Blocked IP addresses, found via DNS, are not spidered.
+ """
+ self.lookups["example.com"] = [(IPv4Address, "192.168.1.1")]
+
+ channel = self.make_request(
+ "GET",
+ "/_matrix/client/unstable/org.matrix.msc3916/media/preview_url?url=http://example.com",
+ shorthand=False,
+ )
+
+ # No requests made.
+ self.assertEqual(len(self.reactor.tcpClients), 0)
+ self.assertEqual(channel.code, 502)
+ self.assertEqual(
+ channel.json_body,
+ {
+ "errcode": "M_UNKNOWN",
+ "error": "DNS resolution failure during URL preview generation",
+ },
+ )
+
+ def test_blocked_ip_range(self) -> None:
+ """
+ Blocked IP ranges, IPs found over DNS, are not spidered.
+ """
+ self.lookups["example.com"] = [(IPv4Address, "1.1.1.2")]
+
+ channel = self.make_request(
+ "GET",
+ "/_matrix/client/unstable/org.matrix.msc3916/media/preview_url?url=http://example.com",
+ shorthand=False,
+ )
+
+ self.assertEqual(channel.code, 502)
+ self.assertEqual(
+ channel.json_body,
+ {
+ "errcode": "M_UNKNOWN",
+ "error": "DNS resolution failure during URL preview generation",
+ },
+ )
+
+ def test_blocked_ip_specific_direct(self) -> None:
+ """
+ Blocked IP addresses, accessed directly, are not spidered.
+ """
+ channel = self.make_request(
+ "GET",
+ "/_matrix/client/unstable/org.matrix.msc3916/media/preview_url?url=http://192.168.1.1",
+ shorthand=False,
+ )
+
+ # No requests made.
+ self.assertEqual(len(self.reactor.tcpClients), 0)
+ self.assertEqual(
+ channel.json_body,
+ {"errcode": "M_UNKNOWN", "error": "IP address blocked"},
+ )
+ self.assertEqual(channel.code, 403)
+
+ def test_blocked_ip_range_direct(self) -> None:
+ """
+ Blocked IP ranges, accessed directly, are not spidered.
+ """
+ channel = self.make_request(
+ "GET",
+ "/_matrix/client/unstable/org.matrix.msc3916/media/preview_url?url=http://1.1.1.2",
+ shorthand=False,
+ )
+
+ self.assertEqual(channel.code, 403)
+ self.assertEqual(
+ channel.json_body,
+ {"errcode": "M_UNKNOWN", "error": "IP address blocked"},
+ )
+
+ def test_blocked_ip_range_whitelisted_ip(self) -> None:
+ """
+ Blocked but then subsequently whitelisted IP addresses can be
+ spidered.
+ """
+ self.lookups["example.com"] = [(IPv4Address, "1.1.1.1")]
+
+ channel = self.make_request(
+ "GET",
+ "/_matrix/client/unstable/org.matrix.msc3916/media/preview_url?url=http://example.com",
+ shorthand=False,
+ await_result=False,
+ )
+ self.pump()
+
+ client = self.reactor.tcpClients[0][2].buildProtocol(None)
+
+ server = AccumulatingProtocol()
+ server.makeConnection(FakeTransport(client, self.reactor))
+ client.makeConnection(FakeTransport(server, self.reactor))
+
+ client.dataReceived(
+ b"HTTP/1.0 200 OK\r\nContent-Length: %d\r\nContent-Type: text/html\r\n\r\n"
+ % (len(self.end_content),)
+ + self.end_content
+ )
+
+ self.pump()
+ self.assertEqual(channel.code, 200)
+ self.assertEqual(
+ channel.json_body, {"og:title": "~matrix~", "og:description": "hi"}
+ )
+
+ def test_blocked_ip_with_external_ip(self) -> None:
+ """
+ If a hostname resolves a blocked IP, even if there's a non-blocked one,
+ it will be rejected.
+ """
+ # Hardcode the URL resolving to the IP we want.
+ self.lookups["example.com"] = [
+ (IPv4Address, "1.1.1.2"),
+ (IPv4Address, "10.1.2.3"),
+ ]
+
+ channel = self.make_request(
+ "GET",
+ "/_matrix/client/unstable/org.matrix.msc3916/media/preview_url?url=http://example.com",
+ shorthand=False,
+ )
+ self.assertEqual(channel.code, 502)
+ self.assertEqual(
+ channel.json_body,
+ {
+ "errcode": "M_UNKNOWN",
+ "error": "DNS resolution failure during URL preview generation",
+ },
+ )
+
+ def test_blocked_ipv6_specific(self) -> None:
+ """
+ Blocked IP addresses, found via DNS, are not spidered.
+ """
+ self.lookups["example.com"] = [
+ (IPv6Address, "3fff:ffff:ffff:ffff:ffff:ffff:ffff:ffff")
+ ]
+
+ channel = self.make_request(
+ "GET",
+ "/_matrix/client/unstable/org.matrix.msc3916/media/preview_url?url=http://example.com",
+ shorthand=False,
+ )
+
+ # No requests made.
+ self.assertEqual(len(self.reactor.tcpClients), 0)
+ self.assertEqual(channel.code, 502)
+ self.assertEqual(
+ channel.json_body,
+ {
+ "errcode": "M_UNKNOWN",
+ "error": "DNS resolution failure during URL preview generation",
+ },
+ )
+
+ def test_blocked_ipv6_range(self) -> None:
+ """
+ Blocked IP ranges, IPs found over DNS, are not spidered.
+ """
+ self.lookups["example.com"] = [(IPv6Address, "2001:800::1")]
+
+ channel = self.make_request(
+ "GET",
+ "/_matrix/client/unstable/org.matrix.msc3916/media/preview_url?url=http://example.com",
+ shorthand=False,
+ )
+
+ self.assertEqual(channel.code, 502)
+ self.assertEqual(
+ channel.json_body,
+ {
+ "errcode": "M_UNKNOWN",
+ "error": "DNS resolution failure during URL preview generation",
+ },
+ )
+
+ def test_OPTIONS(self) -> None:
+ """
+ OPTIONS returns the OPTIONS.
+ """
+ channel = self.make_request(
+ "OPTIONS",
+ "/_matrix/client/unstable/org.matrix.msc3916/media/preview_url?url=http://example.com",
+ shorthand=False,
+ )
+ self.assertEqual(channel.code, 204)
+
+ def test_accept_language_config_option(self) -> None:
+ """
+ Accept-Language header is sent to the remote server
+ """
+ self.lookups["example.com"] = [(IPv4Address, "10.1.2.3")]
+
+ # Build and make a request to the server
+ channel = self.make_request(
+ "GET",
+ "/_matrix/client/unstable/org.matrix.msc3916/media/preview_url?url=http://example.com",
+ shorthand=False,
+ await_result=False,
+ )
+ self.pump()
+
+ # Extract Synapse's tcp client
+ client = self.reactor.tcpClients[0][2].buildProtocol(None)
+
+ # Build a fake remote server to reply with
+ server = AccumulatingProtocol()
+
+ # Connect the two together
+ server.makeConnection(FakeTransport(client, self.reactor))
+ client.makeConnection(FakeTransport(server, self.reactor))
+
+ # Tell Synapse that it has received some data from the remote server
+ client.dataReceived(
+ b"HTTP/1.0 200 OK\r\nContent-Length: %d\r\nContent-Type: text/html\r\n\r\n"
+ % (len(self.end_content),)
+ + self.end_content
+ )
+
+ # Move the reactor along until we get a response on our original channel
+ self.pump()
+ self.assertEqual(channel.code, 200)
+ self.assertEqual(
+ channel.json_body, {"og:title": "~matrix~", "og:description": "hi"}
+ )
+
+ # Check that the server received the Accept-Language header as part
+ # of the request from Synapse
+ self.assertIn(
+ (
+ b"Accept-Language: en-UK\r\n"
+ b"Accept-Language: en-US;q=0.9\r\n"
+ b"Accept-Language: fr;q=0.8\r\n"
+ b"Accept-Language: *;q=0.7"
+ ),
+ server.data,
+ )
+
+ def test_image(self) -> None:
+ """An image should be precached if mentioned in the HTML."""
+ self.lookups["matrix.org"] = [(IPv4Address, "10.1.2.3")]
+ self.lookups["cdn.matrix.org"] = [(IPv4Address, "10.1.2.4")]
+
+ result = (
+ b""" """
+ )
+
+ channel = self.make_request(
+ "GET",
+ "/_matrix/client/unstable/org.matrix.msc3916/media/preview_url?url=http://matrix.org",
+ shorthand=False,
+ await_result=False,
+ )
+ self.pump()
+
+ # Respond with the HTML.
+ client = self.reactor.tcpClients[0][2].buildProtocol(None)
+ server = AccumulatingProtocol()
+ server.makeConnection(FakeTransport(client, self.reactor))
+ client.makeConnection(FakeTransport(server, self.reactor))
+ client.dataReceived(
+ (
+ b"HTTP/1.0 200 OK\r\nContent-Length: %d\r\n"
+ b'Content-Type: text/html; charset="utf8"\r\n\r\n'
+ )
+ % (len(result),)
+ + result
+ )
+ self.pump()
+
+ # Respond with the photo.
+ client = self.reactor.tcpClients[1][2].buildProtocol(None)
+ server = AccumulatingProtocol()
+ server.makeConnection(FakeTransport(client, self.reactor))
+ client.makeConnection(FakeTransport(server, self.reactor))
+ client.dataReceived(
+ (
+ b"HTTP/1.0 200 OK\r\nContent-Length: %d\r\n"
+ b"Content-Type: image/png\r\n\r\n"
+ )
+ % (len(SMALL_PNG),)
+ + SMALL_PNG
+ )
+ self.pump()
+
+ # The image should be in the result.
+ self.assertEqual(channel.code, 200)
+ self._assert_small_png(channel.json_body)
+
+ def test_nonexistent_image(self) -> None:
+ """If the preview image doesn't exist, ensure some data is returned."""
+ self.lookups["matrix.org"] = [(IPv4Address, "10.1.2.3")]
+
+ result = (
+ b""" """
+ )
+
+ channel = self.make_request(
+ "GET",
+ "/_matrix/client/unstable/org.matrix.msc3916/media/preview_url?url=http://matrix.org",
+ shorthand=False,
+ await_result=False,
+ )
+ self.pump()
+
+ client = self.reactor.tcpClients[0][2].buildProtocol(None)
+ server = AccumulatingProtocol()
+ server.makeConnection(FakeTransport(client, self.reactor))
+ client.makeConnection(FakeTransport(server, self.reactor))
+ client.dataReceived(
+ (
+ b"HTTP/1.0 200 OK\r\nContent-Length: %d\r\n"
+ b'Content-Type: text/html; charset="utf8"\r\n\r\n'
+ )
+ % (len(result),)
+ + result
+ )
+
+ self.pump()
+
+ # There should not be a second connection.
+ self.assertEqual(len(self.reactor.tcpClients), 1)
+
+ # The image should not be in the result.
+ self.assertEqual(channel.code, 200)
+ self.assertNotIn("og:image", channel.json_body)
+
+ @unittest.override_config(
+ {"url_preview_url_blacklist": [{"netloc": "cdn.matrix.org"}]}
+ )
+ def test_image_blocked(self) -> None:
+ """If the preview image doesn't exist, ensure some data is returned."""
+ self.lookups["matrix.org"] = [(IPv4Address, "10.1.2.3")]
+ self.lookups["cdn.matrix.org"] = [(IPv4Address, "10.1.2.4")]
+
+ result = (
+ b""" """
+ )
+
+ channel = self.make_request(
+ "GET",
+ "/_matrix/client/unstable/org.matrix.msc3916/media/preview_url?url=http://matrix.org",
+ shorthand=False,
+ await_result=False,
+ )
+ self.pump()
+
+ client = self.reactor.tcpClients[0][2].buildProtocol(None)
+ server = AccumulatingProtocol()
+ server.makeConnection(FakeTransport(client, self.reactor))
+ client.makeConnection(FakeTransport(server, self.reactor))
+ client.dataReceived(
+ (
+ b"HTTP/1.0 200 OK\r\nContent-Length: %d\r\n"
+ b'Content-Type: text/html; charset="utf8"\r\n\r\n'
+ )
+ % (len(result),)
+ + result
+ )
+ self.pump()
+
+ # There should not be a second connection.
+ self.assertEqual(len(self.reactor.tcpClients), 1)
+
+ # The image should not be in the result.
+ self.assertEqual(channel.code, 200)
+ self.assertNotIn("og:image", channel.json_body)
+
+ def test_oembed_failure(self) -> None:
+ """If the autodiscovered oEmbed URL fails, ensure some data is returned."""
+ self.lookups["matrix.org"] = [(IPv4Address, "10.1.2.3")]
+
+ result = b"""
+ oEmbed Autodiscovery Fail
+
+ """
+
+ channel = self.make_request(
+ "GET",
+ "/_matrix/client/unstable/org.matrix.msc3916/media/preview_url?url=http://matrix.org",
+ shorthand=False,
+ await_result=False,
+ )
+ self.pump()
+
+ client = self.reactor.tcpClients[0][2].buildProtocol(None)
+ server = AccumulatingProtocol()
+ server.makeConnection(FakeTransport(client, self.reactor))
+ client.makeConnection(FakeTransport(server, self.reactor))
+ client.dataReceived(
+ (
+ b"HTTP/1.0 200 OK\r\nContent-Length: %d\r\n"
+ b'Content-Type: text/html; charset="utf8"\r\n\r\n'
+ )
+ % (len(result),)
+ + result
+ )
+
+ self.pump()
+ self.assertEqual(channel.code, 200)
+
+ # The image should not be in the result.
+ self.assertEqual(channel.json_body["og:title"], "oEmbed Autodiscovery Fail")
+
+ def test_data_url(self) -> None:
+ """
+ Requesting to preview a data URL is not supported.
+ """
+ self.lookups["matrix.org"] = [(IPv4Address, "10.1.2.3")]
+
+ data = base64.b64encode(SMALL_PNG).decode()
+
+ query_params = urlencode(
+ {
+ "url": f' '
+ }
+ )
+
+ channel = self.make_request(
+ "GET",
+ f"/_matrix/client/unstable/org.matrix.msc3916/media/preview_url?{query_params}",
+ shorthand=False,
+ )
+ self.pump()
+
+ self.assertEqual(channel.code, 500)
+
+ def test_inline_data_url(self) -> None:
+ """
+ An inline image (as a data URL) should be parsed properly.
+ """
+ self.lookups["matrix.org"] = [(IPv4Address, "10.1.2.3")]
+
+ data = base64.b64encode(SMALL_PNG)
+
+ end_content = (
+ b"" b' ' b""
+ ) % (data,)
+
+ channel = self.make_request(
+ "GET",
+ "/_matrix/client/unstable/org.matrix.msc3916/media/preview_url?url=http://matrix.org",
+ shorthand=False,
+ await_result=False,
+ )
+ self.pump()
+
+ client = self.reactor.tcpClients[0][2].buildProtocol(None)
+ server = AccumulatingProtocol()
+ server.makeConnection(FakeTransport(client, self.reactor))
+ client.makeConnection(FakeTransport(server, self.reactor))
+ client.dataReceived(
+ (
+ b"HTTP/1.0 200 OK\r\nContent-Length: %d\r\n"
+ b'Content-Type: text/html; charset="utf8"\r\n\r\n'
+ )
+ % (len(end_content),)
+ + end_content
+ )
+
+ self.pump()
+ self.assertEqual(channel.code, 200)
+ self._assert_small_png(channel.json_body)
+
+ def test_oembed_photo(self) -> None:
+ """Test an oEmbed endpoint which returns a 'photo' type which redirects the preview to a new URL."""
+ self.lookups["publish.twitter.com"] = [(IPv4Address, "10.1.2.3")]
+ self.lookups["cdn.twitter.com"] = [(IPv4Address, "10.1.2.3")]
+
+ result = {
+ "version": "1.0",
+ "type": "photo",
+ "url": "http://cdn.twitter.com/matrixdotorg",
+ }
+ oembed_content = json.dumps(result).encode("utf-8")
+
+ channel = self.make_request(
+ "GET",
+ "/_matrix/client/unstable/org.matrix.msc3916/media/preview_url?url=http://twitter.com/matrixdotorg/status/12345",
+ shorthand=False,
+ await_result=False,
+ )
+ self.pump()
+
+ client = self.reactor.tcpClients[0][2].buildProtocol(None)
+ server = AccumulatingProtocol()
+ server.makeConnection(FakeTransport(client, self.reactor))
+ client.makeConnection(FakeTransport(server, self.reactor))
+ client.dataReceived(
+ (
+ b"HTTP/1.0 200 OK\r\nContent-Length: %d\r\n"
+ b'Content-Type: application/json; charset="utf8"\r\n\r\n'
+ )
+ % (len(oembed_content),)
+ + oembed_content
+ )
+
+ self.pump()
+
+ # Ensure a second request is made to the photo URL.
+ client = self.reactor.tcpClients[1][2].buildProtocol(None)
+ server = AccumulatingProtocol()
+ server.makeConnection(FakeTransport(client, self.reactor))
+ client.makeConnection(FakeTransport(server, self.reactor))
+ client.dataReceived(
+ (
+ b"HTTP/1.0 200 OK\r\nContent-Length: %d\r\n"
+ b"Content-Type: image/png\r\n\r\n"
+ )
+ % (len(SMALL_PNG),)
+ + SMALL_PNG
+ )
+
+ self.pump()
+
+ # Ensure the URL is what was requested.
+ self.assertIn(b"/matrixdotorg", server.data)
+
+ self.assertEqual(channel.code, 200)
+ body = channel.json_body
+ self.assertEqual(body["og:url"], "http://twitter.com/matrixdotorg/status/12345")
+ self._assert_small_png(body)
+
+ def test_oembed_rich(self) -> None:
+ """Test an oEmbed endpoint which returns HTML content via the 'rich' type."""
+ self.lookups["publish.twitter.com"] = [(IPv4Address, "10.1.2.3")]
+
+ result = {
+ "version": "1.0",
+ "type": "rich",
+ # Note that this provides the author, not the title.
+ "author_name": "Alice",
+ "html": "Content Preview
",
+ }
+ end_content = json.dumps(result).encode("utf-8")
+
+ channel = self.make_request(
+ "GET",
+ "/_matrix/client/unstable/org.matrix.msc3916/media/preview_url?url=http://twitter.com/matrixdotorg/status/12345",
+ shorthand=False,
+ await_result=False,
+ )
+ self.pump()
+
+ client = self.reactor.tcpClients[0][2].buildProtocol(None)
+ server = AccumulatingProtocol()
+ server.makeConnection(FakeTransport(client, self.reactor))
+ client.makeConnection(FakeTransport(server, self.reactor))
+ client.dataReceived(
+ (
+ b"HTTP/1.0 200 OK\r\nContent-Length: %d\r\n"
+ b'Content-Type: application/json; charset="utf8"\r\n\r\n'
+ )
+ % (len(end_content),)
+ + end_content
+ )
+
+ self.pump()
+
+ # Double check that the proper host is being connected to. (Note that
+ # twitter.com can't be resolved so this is already implicitly checked.)
+ self.assertIn(b"\r\nHost: publish.twitter.com\r\n", server.data)
+
+ self.assertEqual(channel.code, 200)
+ body = channel.json_body
+ self.assertEqual(
+ body,
+ {
+ "og:url": "http://twitter.com/matrixdotorg/status/12345",
+ "og:title": "Alice",
+ "og:description": "Content Preview",
+ },
+ )
+
+ def test_oembed_format(self) -> None:
+ """Test an oEmbed endpoint which requires the format in the URL."""
+ self.lookups["www.hulu.com"] = [(IPv4Address, "10.1.2.3")]
+
+ result = {
+ "version": "1.0",
+ "type": "rich",
+ "html": "Content Preview
",
+ }
+ end_content = json.dumps(result).encode("utf-8")
+
+ channel = self.make_request(
+ "GET",
+ "/_matrix/client/unstable/org.matrix.msc3916/media/preview_url?url=http://www.hulu.com/watch/12345",
+ shorthand=False,
+ await_result=False,
+ )
+ self.pump()
+
+ client = self.reactor.tcpClients[0][2].buildProtocol(None)
+ server = AccumulatingProtocol()
+ server.makeConnection(FakeTransport(client, self.reactor))
+ client.makeConnection(FakeTransport(server, self.reactor))
+ client.dataReceived(
+ (
+ b"HTTP/1.0 200 OK\r\nContent-Length: %d\r\n"
+ b'Content-Type: application/json; charset="utf8"\r\n\r\n'
+ )
+ % (len(end_content),)
+ + end_content
+ )
+
+ self.pump()
+
+ # The {format} should have been turned into json.
+ self.assertIn(b"/api/oembed.json", server.data)
+ # A URL parameter of format=json should be provided.
+ self.assertIn(b"format=json", server.data)
+
+ self.assertEqual(channel.code, 200)
+ body = channel.json_body
+ self.assertEqual(
+ body,
+ {
+ "og:url": "http://www.hulu.com/watch/12345",
+ "og:description": "Content Preview",
+ },
+ )
+
+ @unittest.override_config(
+ {"url_preview_url_blacklist": [{"netloc": "publish.twitter.com"}]}
+ )
+ def test_oembed_blocked(self) -> None:
+ """The oEmbed URL should not be downloaded if the oEmbed URL is blocked."""
+ self.lookups["twitter.com"] = [(IPv4Address, "10.1.2.3")]
+
+ channel = self.make_request(
+ "GET",
+ "/_matrix/client/unstable/org.matrix.msc3916/media/preview_url?url=http://twitter.com/matrixdotorg/status/12345",
+ shorthand=False,
+ await_result=False,
+ )
+ self.pump()
+ self.assertEqual(channel.code, 403, channel.result)
+
+ def test_oembed_autodiscovery(self) -> None:
+ """
+ Autodiscovery works by finding the link in the HTML response and then requesting an oEmbed URL.
+ 1. Request a preview of a URL which is not known to the oEmbed code.
+ 2. It returns HTML including a link to an oEmbed preview.
+ 3. The oEmbed preview is requested and returns a URL for an image.
+ 4. The image is requested for thumbnailing.
+ """
+ # This is a little cheesy in that we use the www subdomain (which isn't the
+ # list of oEmbed patterns) to get "raw" HTML response.
+ self.lookups["www.twitter.com"] = [(IPv4Address, "10.1.2.3")]
+ self.lookups["publish.twitter.com"] = [(IPv4Address, "10.1.2.3")]
+ self.lookups["cdn.twitter.com"] = [(IPv4Address, "10.1.2.3")]
+
+ result = b"""
+
+ """
+
+ channel = self.make_request(
+ "GET",
+ "/_matrix/client/unstable/org.matrix.msc3916/media/preview_url?url=http://www.twitter.com/matrixdotorg/status/12345",
+ shorthand=False,
+ await_result=False,
+ )
+ self.pump()
+
+ client = self.reactor.tcpClients[0][2].buildProtocol(None)
+ server = AccumulatingProtocol()
+ server.makeConnection(FakeTransport(client, self.reactor))
+ client.makeConnection(FakeTransport(server, self.reactor))
+ client.dataReceived(
+ (
+ b"HTTP/1.0 200 OK\r\nContent-Length: %d\r\n"
+ b'Content-Type: text/html; charset="utf8"\r\n\r\n'
+ )
+ % (len(result),)
+ + result
+ )
+ self.pump()
+
+ # The oEmbed response.
+ result2 = {
+ "version": "1.0",
+ "type": "photo",
+ "url": "http://cdn.twitter.com/matrixdotorg",
+ }
+ oembed_content = json.dumps(result2).encode("utf-8")
+
+ # Ensure a second request is made to the oEmbed URL.
+ client = self.reactor.tcpClients[1][2].buildProtocol(None)
+ server = AccumulatingProtocol()
+ server.makeConnection(FakeTransport(client, self.reactor))
+ client.makeConnection(FakeTransport(server, self.reactor))
+ client.dataReceived(
+ (
+ b"HTTP/1.0 200 OK\r\nContent-Length: %d\r\n"
+ b'Content-Type: application/json; charset="utf8"\r\n\r\n'
+ )
+ % (len(oembed_content),)
+ + oembed_content
+ )
+ self.pump()
+
+ # Ensure the URL is what was requested.
+ self.assertIn(b"/oembed?", server.data)
+
+ # Ensure a third request is made to the photo URL.
+ client = self.reactor.tcpClients[2][2].buildProtocol(None)
+ server = AccumulatingProtocol()
+ server.makeConnection(FakeTransport(client, self.reactor))
+ client.makeConnection(FakeTransport(server, self.reactor))
+ client.dataReceived(
+ (
+ b"HTTP/1.0 200 OK\r\nContent-Length: %d\r\n"
+ b"Content-Type: image/png\r\n\r\n"
+ )
+ % (len(SMALL_PNG),)
+ + SMALL_PNG
+ )
+ self.pump()
+
+ # Ensure the URL is what was requested.
+ self.assertIn(b"/matrixdotorg", server.data)
+
+ self.assertEqual(channel.code, 200)
+ body = channel.json_body
+ self.assertEqual(
+ body["og:url"], "http://www.twitter.com/matrixdotorg/status/12345"
+ )
+ self._assert_small_png(body)
+
+ @unittest.override_config(
+ {"url_preview_url_blacklist": [{"netloc": "publish.twitter.com"}]}
+ )
+ def test_oembed_autodiscovery_blocked(self) -> None:
+ """
+ If the discovered oEmbed URL is blocked, it should be discarded.
+ """
+ # This is a little cheesy in that we use the www subdomain (which isn't the
+ # list of oEmbed patterns) to get "raw" HTML response.
+ self.lookups["www.twitter.com"] = [(IPv4Address, "10.1.2.3")]
+ self.lookups["publish.twitter.com"] = [(IPv4Address, "10.1.2.4")]
+
+ result = b"""
+ Test
+
+ """
+
+ channel = self.make_request(
+ "GET",
+ "/_matrix/client/unstable/org.matrix.msc3916/media/preview_url?url=http://www.twitter.com/matrixdotorg/status/12345",
+ shorthand=False,
+ await_result=False,
+ )
+ self.pump()
+
+ client = self.reactor.tcpClients[0][2].buildProtocol(None)
+ server = AccumulatingProtocol()
+ server.makeConnection(FakeTransport(client, self.reactor))
+ client.makeConnection(FakeTransport(server, self.reactor))
+ client.dataReceived(
+ (
+ b"HTTP/1.0 200 OK\r\nContent-Length: %d\r\n"
+ b'Content-Type: text/html; charset="utf8"\r\n\r\n'
+ )
+ % (len(result),)
+ + result
+ )
+
+ self.pump()
+
+ # Ensure there's no additional connections.
+ self.assertEqual(len(self.reactor.tcpClients), 1)
+
+ # Ensure the URL is what was requested.
+ self.assertIn(b"\r\nHost: www.twitter.com\r\n", server.data)
+
+ self.assertEqual(channel.code, 200)
+ body = channel.json_body
+ self.assertEqual(body["og:title"], "Test")
+ self.assertNotIn("og:image", body)
+
+ def _download_image(self) -> Tuple[str, str]:
+ """Downloads an image into the URL cache.
+ Returns:
+ A (host, media_id) tuple representing the MXC URI of the image.
+ """
+ self.lookups["cdn.twitter.com"] = [(IPv4Address, "10.1.2.3")]
+
+ channel = self.make_request(
+ "GET",
+ "/_matrix/client/unstable/org.matrix.msc3916/media/preview_url?url=http://cdn.twitter.com/matrixdotorg",
+ shorthand=False,
+ await_result=False,
+ )
+ self.pump()
+
+ client = self.reactor.tcpClients[0][2].buildProtocol(None)
+ server = AccumulatingProtocol()
+ server.makeConnection(FakeTransport(client, self.reactor))
+ client.makeConnection(FakeTransport(server, self.reactor))
+ client.dataReceived(
+ b"HTTP/1.0 200 OK\r\nContent-Length: %d\r\nContent-Type: image/png\r\n\r\n"
+ % (len(SMALL_PNG),)
+ + SMALL_PNG
+ )
+
+ self.pump()
+ self.assertEqual(channel.code, 200)
+ body = channel.json_body
+ mxc_uri = body["og:image"]
+ host, _port, media_id = parse_and_validate_mxc_uri(mxc_uri)
+ self.assertIsNone(_port)
+ return host, media_id
+
+ def test_storage_providers_exclude_files(self) -> None:
+ """Test that files are not stored in or fetched from storage providers."""
+ host, media_id = self._download_image()
+
+ rel_file_path = self.media_repo.filepaths.url_cache_filepath_rel(media_id)
+ media_store_path = os.path.join(self.media_store_path, rel_file_path)
+ storage_provider_path = os.path.join(self.storage_path, rel_file_path)
+
+ # Check storage
+ self.assertTrue(os.path.isfile(media_store_path))
+ self.assertFalse(
+ os.path.isfile(storage_provider_path),
+ "URL cache file was unexpectedly stored in a storage provider",
+ )
+
+ # Check fetching
+ channel = self.make_request(
+ "GET",
+ f"/_matrix/media/v3/download/{host}/{media_id}",
+ shorthand=False,
+ await_result=False,
+ )
+ self.pump()
+ self.assertEqual(channel.code, 200)
+
+ # Move cached file into the storage provider
+ os.makedirs(os.path.dirname(storage_provider_path), exist_ok=True)
+ os.rename(media_store_path, storage_provider_path)
+
+ channel = self.make_request(
+ "GET",
+ f"/_matrix/media/v3/download/{host}/{media_id}",
+ shorthand=False,
+ await_result=False,
+ )
+ self.pump()
+ self.assertEqual(
+ channel.code,
+ 404,
+ "URL cache file was unexpectedly retrieved from a storage provider",
+ )
+
+ def test_storage_providers_exclude_thumbnails(self) -> None:
+ """Test that thumbnails are not stored in or fetched from storage providers."""
+ host, media_id = self._download_image()
+
+ rel_thumbnail_path = (
+ self.media_repo.filepaths.url_cache_thumbnail_directory_rel(media_id)
+ )
+ media_store_thumbnail_path = os.path.join(
+ self.media_store_path, rel_thumbnail_path
+ )
+ storage_provider_thumbnail_path = os.path.join(
+ self.storage_path, rel_thumbnail_path
+ )
+
+ # Check storage
+ self.assertTrue(os.path.isdir(media_store_thumbnail_path))
+ self.assertFalse(
+ os.path.isdir(storage_provider_thumbnail_path),
+ "URL cache thumbnails were unexpectedly stored in a storage provider",
+ )
+
+ # Check fetching
+ channel = self.make_request(
+ "GET",
+ f"/_matrix/client/unstable/org.matrix.msc3916/media/thumbnail/{host}/{media_id}?width=32&height=32&method=scale",
+ shorthand=False,
+ await_result=False,
+ )
+ self.pump()
+ self.assertEqual(channel.code, 200)
+
+ # Remove the original, otherwise thumbnails will regenerate
+ rel_file_path = self.media_repo.filepaths.url_cache_filepath_rel(media_id)
+ media_store_path = os.path.join(self.media_store_path, rel_file_path)
+ os.remove(media_store_path)
+
+ # Move cached thumbnails into the storage provider
+ os.makedirs(os.path.dirname(storage_provider_thumbnail_path), exist_ok=True)
+ os.rename(media_store_thumbnail_path, storage_provider_thumbnail_path)
+
+ channel = self.make_request(
+ "GET",
+ f"/_matrix/client/unstable/org.matrix.msc3916/media/thumbnail/{host}/{media_id}?width=32&height=32&method=scale",
+ shorthand=False,
+ await_result=False,
+ )
+ self.pump()
+ self.assertEqual(
+ channel.code,
+ 404,
+ "URL cache thumbnail was unexpectedly retrieved from a storage provider",
+ )
+
+ def test_cache_expiry(self) -> None:
+ """Test that URL cache files and thumbnails are cleaned up properly on expiry."""
+ _host, media_id = self._download_image()
+
+ file_path = self.media_repo.filepaths.url_cache_filepath(media_id)
+ file_dirs = self.media_repo.filepaths.url_cache_filepath_dirs_to_delete(
+ media_id
+ )
+ thumbnail_dir = self.media_repo.filepaths.url_cache_thumbnail_directory(
+ media_id
+ )
+ thumbnail_dirs = self.media_repo.filepaths.url_cache_thumbnail_dirs_to_delete(
+ media_id
+ )
+
+ self.assertTrue(os.path.isfile(file_path))
+ self.assertTrue(os.path.isdir(thumbnail_dir))
+
+ self.reactor.advance(IMAGE_CACHE_EXPIRY_MS * 1000 + 1)
+ self.get_success(self.url_previewer._expire_url_cache_data())
+
+ for path in [file_path] + file_dirs + [thumbnail_dir] + thumbnail_dirs:
+ self.assertFalse(
+ os.path.exists(path),
+ f"{os.path.relpath(path, self.media_store_path)} was not deleted",
+ )
+
+ @unittest.override_config({"url_preview_url_blacklist": [{"port": "*"}]})
+ def test_blocked_port(self) -> None:
+ """Tests that blocking URLs with a port makes previewing such URLs
+ fail with a 403 error and doesn't impact other previews.
+ """
+ self.lookups["matrix.org"] = [(IPv4Address, "10.1.2.3")]
+
+ bad_url = quote("http://matrix.org:8888/foo")
+ good_url = quote("http://matrix.org/foo")
+
+ channel = self.make_request(
+ "GET",
+ "/_matrix/client/unstable/org.matrix.msc3916/media/preview_url?url="
+ + bad_url,
+ shorthand=False,
+ await_result=False,
+ )
+ self.pump()
+ self.assertEqual(channel.code, 403, channel.result)
+
+ channel = self.make_request(
+ "GET",
+ "/_matrix/client/unstable/org.matrix.msc3916/media/preview_url?url="
+ + good_url,
+ shorthand=False,
+ await_result=False,
+ )
+ self.pump()
+
+ client = self.reactor.tcpClients[0][2].buildProtocol(None)
+ server = AccumulatingProtocol()
+ server.makeConnection(FakeTransport(client, self.reactor))
+ client.makeConnection(FakeTransport(server, self.reactor))
+ client.dataReceived(
+ b"HTTP/1.0 200 OK\r\nContent-Length: %d\r\nContent-Type: text/html\r\n\r\n"
+ % (len(self.end_content),)
+ + self.end_content
+ )
+
+ self.pump()
+ self.assertEqual(channel.code, 200)
+
+ @unittest.override_config(
+ {"url_preview_url_blacklist": [{"netloc": "example.com"}]}
+ )
+ def test_blocked_url(self) -> None:
+ """Tests that blocking URLs with a host makes previewing such URLs
+ fail with a 403 error.
+ """
+ self.lookups["example.com"] = [(IPv4Address, "10.1.2.3")]
+
+ bad_url = quote("http://example.com/foo")
+
+ channel = self.make_request(
+ "GET",
+ "/_matrix/client/unstable/org.matrix.msc3916/media/preview_url?url="
+ + bad_url,
+ shorthand=False,
+ await_result=False,
+ )
+ self.pump()
+ self.assertEqual(channel.code, 403, channel.result)
+
+
+class UnstableMediaConfigTest(unittest.HomeserverTestCase):
+ servlets = [
+ media.register_servlets,
+ admin.register_servlets,
+ login.register_servlets,
+ ]
+
+ def make_homeserver(
+ self, reactor: ThreadedMemoryReactorClock, clock: Clock
+ ) -> HomeServer:
+ config = self.default_config()
+ config["experimental_features"] = {"msc3916_authenticated_media_enabled": True}
+
+ self.storage_path = self.mktemp()
+ self.media_store_path = self.mktemp()
+ os.mkdir(self.storage_path)
+ os.mkdir(self.media_store_path)
+ config["media_store_path"] = self.media_store_path
+
+ provider_config = {
+ "module": "synapse.media.storage_provider.FileStorageProviderBackend",
+ "store_local": True,
+ "store_synchronous": False,
+ "store_remote": True,
+ "config": {"directory": self.storage_path},
+ }
+
+ config["media_storage_providers"] = [provider_config]
+
+ return self.setup_test_homeserver(config=config)
+
+ def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
+ self.register_user("user", "password")
+ self.tok = self.login("user", "password")
+
+ def test_media_config(self) -> None:
+ channel = self.make_request(
+ "GET",
+ "/_matrix/client/unstable/org.matrix.msc3916/media/config",
+ shorthand=False,
+ access_token=self.tok,
+ )
+ self.assertEqual(channel.code, 200)
+ self.assertEqual(
+ channel.json_body["m.upload.size"], self.hs.config.media.max_upload_size
+ )
From 887f7734726f3af127c89375766cb112405d7587 Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Tue, 28 May 2024 11:27:51 +0100
Subject: [PATCH 119/503] Bump serde from 1.0.202 to 1.0.203 (#17232)
---
Cargo.lock | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/Cargo.lock b/Cargo.lock
index 5fbc511563..e3e63fc205 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -485,18 +485,18 @@ checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49"
[[package]]
name = "serde"
-version = "1.0.202"
+version = "1.0.203"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "226b61a0d411b2ba5ff6d7f73a476ac4f8bb900373459cd00fab8512828ba395"
+checksum = "7253ab4de971e72fb7be983802300c30b5a7f0c2e56fab8abfc6a214307c0094"
dependencies = [
"serde_derive",
]
[[package]]
name = "serde_derive"
-version = "1.0.202"
+version = "1.0.203"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "6048858004bcff69094cd972ed40a32500f153bd3be9f716b2eed2e8217c4838"
+checksum = "500cbc0ebeb6f46627f50f3f5811ccf6bf00643be300b4c3eabc0ef55dc5b5ba"
dependencies = [
"proc-macro2",
"quote",
From 0893ee9af877b76df10e3a55cba910eac947a4e4 Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Tue, 28 May 2024 11:28:16 +0100
Subject: [PATCH 120/503] Bump prometheus-client from 0.19.0 to 0.20.0 (#17233)
---
poetry.lock | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/poetry.lock b/poetry.lock
index d6dc27bfae..2d0d89498d 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -1673,13 +1673,13 @@ test = ["appdirs (==1.4.4)", "covdefaults (>=2.2.2)", "pytest (>=7.2.1)", "pytes
[[package]]
name = "prometheus-client"
-version = "0.19.0"
+version = "0.20.0"
description = "Python client for the Prometheus monitoring system."
optional = false
python-versions = ">=3.8"
files = [
- {file = "prometheus_client-0.19.0-py3-none-any.whl", hash = "sha256:c88b1e6ecf6b41cd8fb5731c7ae919bf66df6ec6fafa555cd6c0e16ca169ae92"},
- {file = "prometheus_client-0.19.0.tar.gz", hash = "sha256:4585b0d1223148c27a225b10dbec5ae9bc4c81a99a3fa80774fa6209935324e1"},
+ {file = "prometheus_client-0.20.0-py3-none-any.whl", hash = "sha256:cde524a85bce83ca359cc837f28b8c0db5cac7aa653a588fd7e84ba061c329e7"},
+ {file = "prometheus_client-0.20.0.tar.gz", hash = "sha256:287629d00b147a32dcb2be0b9df905da599b2d82f80377083ec8463309a4bb89"},
]
[package.extras]
From 86a2a0258f9f431cf3636044140a5fa736fa2fc7 Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Tue, 28 May 2024 11:28:32 +0100
Subject: [PATCH 121/503] Bump pyopenssl from 24.0.0 to 24.1.0 (#17234)
---
poetry.lock | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/poetry.lock b/poetry.lock
index 2d0d89498d..55a3f4951f 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -1997,13 +1997,13 @@ tests = ["hypothesis (>=3.27.0)", "pytest (>=3.2.1,!=3.3.0)"]
[[package]]
name = "pyopenssl"
-version = "24.0.0"
+version = "24.1.0"
description = "Python wrapper module around the OpenSSL library"
optional = false
python-versions = ">=3.7"
files = [
- {file = "pyOpenSSL-24.0.0-py3-none-any.whl", hash = "sha256:ba07553fb6fd6a7a2259adb9b84e12302a9a8a75c44046e8bb5d3e5ee887e3c3"},
- {file = "pyOpenSSL-24.0.0.tar.gz", hash = "sha256:6aa33039a93fffa4563e655b61d11364d01264be8ccb49906101e02a334530bf"},
+ {file = "pyOpenSSL-24.1.0-py3-none-any.whl", hash = "sha256:17ed5be5936449c5418d1cd269a1a9e9081bc54c17aed272b45856a3d3dc86ad"},
+ {file = "pyOpenSSL-24.1.0.tar.gz", hash = "sha256:cabed4bfaa5df9f1a16c0ef64a0cb65318b5cd077a7eda7d6970131ca2f41a6f"},
]
[package.dependencies]
@@ -2011,7 +2011,7 @@ cryptography = ">=41.0.5,<43"
[package.extras]
docs = ["sphinx (!=5.2.0,!=5.2.0.post0,!=7.2.5)", "sphinx-rtd-theme"]
-test = ["flaky", "pretend", "pytest (>=3.0.1)"]
+test = ["pretend", "pytest (>=3.0.1)", "pytest-rerunfailures"]
[[package]]
name = "pysaml2"
From f2616edb7375f2386b4d1fc9df0988d171f325e2 Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Tue, 28 May 2024 11:28:58 +0100
Subject: [PATCH 122/503] Bump pyicu from 2.13 to 2.13.1 (#17236)
---
poetry.lock | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/poetry.lock b/poetry.lock
index 55a3f4951f..a669c27595 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -1915,12 +1915,12 @@ plugins = ["importlib-metadata"]
[[package]]
name = "pyicu"
-version = "2.13"
+version = "2.13.1"
description = "Python extension wrapping the ICU C++ API"
optional = true
python-versions = "*"
files = [
- {file = "PyICU-2.13.tar.gz", hash = "sha256:d481be888975df3097c2790241bbe8518f65c9676a74957cdbe790e559c828f6"},
+ {file = "PyICU-2.13.1.tar.gz", hash = "sha256:d4919085eaa07da12bade8ee721e7bbf7ade0151ca0f82946a26c8f4b98cdceb"},
]
[[package]]
From f35bc08d3933416d2c7a0c566f895214e9ffd88e Mon Sep 17 00:00:00 2001
From: Olivier 'reivilibre
Date: Tue, 28 May 2024 11:54:28 +0100
Subject: [PATCH 123/503] 1.108.0
---
CHANGES.md | 7 +++++++
debian/changelog | 6 ++++++
pyproject.toml | 2 +-
3 files changed, 14 insertions(+), 1 deletion(-)
diff --git a/CHANGES.md b/CHANGES.md
index 2d2474108c..d517fc4eff 100644
--- a/CHANGES.md
+++ b/CHANGES.md
@@ -1,3 +1,10 @@
+# Synapse 1.108.0 (2024-05-28)
+
+No significant changes since 1.108.0rc1.
+
+
+
+
# Synapse 1.108.0rc1 (2024-05-21)
### Features
diff --git a/debian/changelog b/debian/changelog
index a9a5011f76..8491b587e8 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -1,3 +1,9 @@
+matrix-synapse-py3 (1.108.0) stable; urgency=medium
+
+ * New Synapse release 1.108.0.
+
+ -- Synapse Packaging team Tue, 28 May 2024 11:54:22 +0100
+
matrix-synapse-py3 (1.108.0~rc1) stable; urgency=medium
* New Synapse release 1.108.0rc1.
diff --git a/pyproject.toml b/pyproject.toml
index 00366ebb6b..ea14b98199 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -96,7 +96,7 @@ module-name = "synapse.synapse_rust"
[tool.poetry]
name = "matrix-synapse"
-version = "1.108.0rc1"
+version = "1.108.0"
description = "Homeserver for the Matrix decentralised comms protocol"
authors = ["Matrix.org Team and Contributors "]
license = "AGPL-3.0-or-later"
From 5147ce294a5653166e6238b2e304b81c13d343a1 Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Tue, 28 May 2024 13:26:37 +0100
Subject: [PATCH 124/503] Bump phonenumbers from 8.13.35 to 8.13.37 (#17235)
---
poetry.lock | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/poetry.lock b/poetry.lock
index a669c27595..73814e49d0 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -1536,13 +1536,13 @@ files = [
[[package]]
name = "phonenumbers"
-version = "8.13.35"
+version = "8.13.37"
description = "Python version of Google's common library for parsing, formatting, storing and validating international phone numbers."
optional = false
python-versions = "*"
files = [
- {file = "phonenumbers-8.13.35-py2.py3-none-any.whl", hash = "sha256:58286a8e617bd75f541e04313b28c36398be6d4443a778c85e9617a93c391310"},
- {file = "phonenumbers-8.13.35.tar.gz", hash = "sha256:64f061a967dcdae11e1c59f3688649e697b897110a33bb74d5a69c3e35321245"},
+ {file = "phonenumbers-8.13.37-py2.py3-none-any.whl", hash = "sha256:4ea00ef5012422c08c7955c21131e7ae5baa9a3ef52cf2d561e963f023006b80"},
+ {file = "phonenumbers-8.13.37.tar.gz", hash = "sha256:bd315fed159aea0516f7c367231810fe8344d5bec26156b88fa18374c11d1cf2"},
]
[[package]]
From bb5a692946e69c7f3686f1cb3fc0833b736f066a Mon Sep 17 00:00:00 2001
From: Erik Johnston
Date: Wed, 29 May 2024 11:14:42 +0100
Subject: [PATCH 125/503] Fix slipped logging context when media rejected
(#17239)
When a module rejects a piece of media we end up trying to close the
same logging context twice.
Instead of fixing the existing code we refactor to use an async context
manager, which is easier to write correctly.
---
changelog.d/17239.misc | 1 +
synapse/media/media_repository.py | 11 +--
synapse/media/media_storage.py | 104 +++++++++--------------
synapse/media/url_previewer.py | 4 +-
tests/rest/client/test_media.py | 14 +--
tests/rest/media/test_domain_blocking.py | 14 +--
6 files changed, 56 insertions(+), 92 deletions(-)
create mode 100644 changelog.d/17239.misc
diff --git a/changelog.d/17239.misc b/changelog.d/17239.misc
new file mode 100644
index 0000000000..9fca36bb29
--- /dev/null
+++ b/changelog.d/17239.misc
@@ -0,0 +1 @@
+Fix errors in logs about closing incorrect logging contexts when media gets rejected by a module.
diff --git a/synapse/media/media_repository.py b/synapse/media/media_repository.py
index 0e875132f6..9da8495950 100644
--- a/synapse/media/media_repository.py
+++ b/synapse/media/media_repository.py
@@ -650,7 +650,7 @@ class MediaRepository:
file_info = FileInfo(server_name=server_name, file_id=file_id)
- with self.media_storage.store_into_file(file_info) as (f, fname, finish):
+ async with self.media_storage.store_into_file(file_info) as (f, fname):
try:
length, headers = await self.client.download_media(
server_name,
@@ -693,8 +693,6 @@ class MediaRepository:
)
raise SynapseError(502, "Failed to fetch remote media")
- await finish()
-
if b"Content-Type" in headers:
media_type = headers[b"Content-Type"][0].decode("ascii")
else:
@@ -1045,14 +1043,9 @@ class MediaRepository:
),
)
- with self.media_storage.store_into_file(file_info) as (
- f,
- fname,
- finish,
- ):
+ async with self.media_storage.store_into_file(file_info) as (f, fname):
try:
await self.media_storage.write_to_file(t_byte_source, f)
- await finish()
finally:
t_byte_source.close()
diff --git a/synapse/media/media_storage.py b/synapse/media/media_storage.py
index b45b319f5c..9979c48eac 100644
--- a/synapse/media/media_storage.py
+++ b/synapse/media/media_storage.py
@@ -27,10 +27,9 @@ from typing import (
IO,
TYPE_CHECKING,
Any,
- Awaitable,
+ AsyncIterator,
BinaryIO,
Callable,
- Generator,
Optional,
Sequence,
Tuple,
@@ -97,11 +96,9 @@ class MediaStorage:
the file path written to in the primary media store
"""
- with self.store_into_file(file_info) as (f, fname, finish_cb):
+ async with self.store_into_file(file_info) as (f, fname):
# Write to the main media repository
await self.write_to_file(source, f)
- # Write to the other storage providers
- await finish_cb()
return fname
@@ -111,32 +108,27 @@ class MediaStorage:
await defer_to_thread(self.reactor, _write_file_synchronously, source, output)
@trace_with_opname("MediaStorage.store_into_file")
- @contextlib.contextmanager
- def store_into_file(
+ @contextlib.asynccontextmanager
+ async def store_into_file(
self, file_info: FileInfo
- ) -> Generator[Tuple[BinaryIO, str, Callable[[], Awaitable[None]]], None, None]:
- """Context manager used to get a file like object to write into, as
+ ) -> AsyncIterator[Tuple[BinaryIO, str]]:
+ """Async Context manager used to get a file like object to write into, as
described by file_info.
- Actually yields a 3-tuple (file, fname, finish_cb), where file is a file
- like object that can be written to, fname is the absolute path of file
- on disk, and finish_cb is a function that returns an awaitable.
+ Actually yields a 2-tuple (file, fname,), where file is a file
+ like object that can be written to and fname is the absolute path of file
+ on disk.
fname can be used to read the contents from after upload, e.g. to
generate thumbnails.
- finish_cb must be called and waited on after the file has been successfully been
- written to. Should not be called if there was an error. Checks for spam and
- stores the file into the configured storage providers.
-
Args:
file_info: Info about the file to store
Example:
- with media_storage.store_into_file(info) as (f, fname, finish_cb):
+ async with media_storage.store_into_file(info) as (f, fname,):
# .. write into f ...
- await finish_cb()
"""
path = self._file_info_to_path(file_info)
@@ -145,62 +137,42 @@ class MediaStorage:
dirname = os.path.dirname(fname)
os.makedirs(dirname, exist_ok=True)
- finished_called = [False]
-
main_media_repo_write_trace_scope = start_active_span(
"writing to main media repo"
)
main_media_repo_write_trace_scope.__enter__()
- try:
- with open(fname, "wb") as f:
-
- async def finish() -> None:
- # When someone calls finish, we assume they are done writing to the main media repo
- main_media_repo_write_trace_scope.__exit__(None, None, None)
-
- with start_active_span("writing to other storage providers"):
- # Ensure that all writes have been flushed and close the
- # file.
- f.flush()
- f.close()
-
- spam_check = await self._spam_checker_module_callbacks.check_media_file_for_spam(
- ReadableFileWrapper(self.clock, fname), file_info
- )
- if spam_check != self._spam_checker_module_callbacks.NOT_SPAM:
- logger.info("Blocking media due to spam checker")
- # Note that we'll delete the stored media, due to the
- # try/except below. The media also won't be stored in
- # the DB.
- # We currently ignore any additional field returned by
- # the spam-check API.
- raise SpamMediaException(errcode=spam_check[0])
-
- for provider in self.storage_providers:
- with start_active_span(str(provider)):
- await provider.store_file(path, file_info)
-
- finished_called[0] = True
-
- yield f, fname, finish
- except Exception as e:
+ with main_media_repo_write_trace_scope:
try:
- main_media_repo_write_trace_scope.__exit__(
- type(e), None, e.__traceback__
+ with open(fname, "wb") as f:
+ yield f, fname
+
+ except Exception as e:
+ try:
+ os.remove(fname)
+ except Exception:
+ pass
+
+ raise e from None
+
+ with start_active_span("writing to other storage providers"):
+ spam_check = (
+ await self._spam_checker_module_callbacks.check_media_file_for_spam(
+ ReadableFileWrapper(self.clock, fname), file_info
)
- os.remove(fname)
- except Exception:
- pass
-
- raise e from None
-
- if not finished_called:
- exc = Exception("Finished callback not called")
- main_media_repo_write_trace_scope.__exit__(
- type(exc), None, exc.__traceback__
)
- raise exc
+ if spam_check != self._spam_checker_module_callbacks.NOT_SPAM:
+ logger.info("Blocking media due to spam checker")
+ # Note that we'll delete the stored media, due to the
+ # try/except below. The media also won't be stored in
+ # the DB.
+ # We currently ignore any additional field returned by
+ # the spam-check API.
+ raise SpamMediaException(errcode=spam_check[0])
+
+ for provider in self.storage_providers:
+ with start_active_span(str(provider)):
+ await provider.store_file(path, file_info)
async def fetch_media(self, file_info: FileInfo) -> Optional[Responder]:
"""Attempts to fetch media described by file_info from the local cache
diff --git a/synapse/media/url_previewer.py b/synapse/media/url_previewer.py
index 3897823b35..2e65a04789 100644
--- a/synapse/media/url_previewer.py
+++ b/synapse/media/url_previewer.py
@@ -592,7 +592,7 @@ class UrlPreviewer:
file_info = FileInfo(server_name=None, file_id=file_id, url_cache=True)
- with self.media_storage.store_into_file(file_info) as (f, fname, finish):
+ async with self.media_storage.store_into_file(file_info) as (f, fname):
if url.startswith("data:"):
if not allow_data_urls:
raise SynapseError(
@@ -603,8 +603,6 @@ class UrlPreviewer:
else:
download_result = await self._download_url(url, f)
- await finish()
-
try:
time_now_ms = self.clock.time_msec()
diff --git a/tests/rest/client/test_media.py b/tests/rest/client/test_media.py
index 600cbf8963..be4a289ec1 100644
--- a/tests/rest/client/test_media.py
+++ b/tests/rest/client/test_media.py
@@ -93,13 +93,13 @@ class UnstableMediaDomainBlockingTests(unittest.HomeserverTestCase):
# from a regular 404.
file_id = "abcdefg12345"
file_info = FileInfo(server_name=self.remote_server_name, file_id=file_id)
- with hs.get_media_repository().media_storage.store_into_file(file_info) as (
- f,
- fname,
- finish,
- ):
- f.write(SMALL_PNG)
- self.get_success(finish())
+
+ media_storage = hs.get_media_repository().media_storage
+
+ ctx = media_storage.store_into_file(file_info)
+ (f, fname) = self.get_success(ctx.__aenter__())
+ f.write(SMALL_PNG)
+ self.get_success(ctx.__aexit__(None, None, None))
self.get_success(
self.store.store_cached_remote_media(
diff --git a/tests/rest/media/test_domain_blocking.py b/tests/rest/media/test_domain_blocking.py
index 88988f3a22..72205c6bb3 100644
--- a/tests/rest/media/test_domain_blocking.py
+++ b/tests/rest/media/test_domain_blocking.py
@@ -44,13 +44,13 @@ class MediaDomainBlockingTests(unittest.HomeserverTestCase):
# from a regular 404.
file_id = "abcdefg12345"
file_info = FileInfo(server_name=self.remote_server_name, file_id=file_id)
- with hs.get_media_repository().media_storage.store_into_file(file_info) as (
- f,
- fname,
- finish,
- ):
- f.write(SMALL_PNG)
- self.get_success(finish())
+
+ media_storage = hs.get_media_repository().media_storage
+
+ ctx = media_storage.store_into_file(file_info)
+ (f, fname) = self.get_success(ctx.__aenter__())
+ f.write(SMALL_PNG)
+ self.get_success(ctx.__aexit__(None, None, None))
self.get_success(
self.store.store_cached_remote_media(
From 94ef2f4f5d60f6e73fdfb96c8816f3af83f65eb8 Mon Sep 17 00:00:00 2001
From: Erik Johnston
Date: Wed, 29 May 2024 11:16:00 +0100
Subject: [PATCH 126/503] Handle duplicate OTK uploads racing (#17241)
Currently this causes one of then to 500.
---
changelog.d/17241.bugfix | 1 +
synapse/handlers/e2e_keys.py | 78 +++++++++++++++++++++---------------
2 files changed, 46 insertions(+), 33 deletions(-)
create mode 100644 changelog.d/17241.bugfix
diff --git a/changelog.d/17241.bugfix b/changelog.d/17241.bugfix
new file mode 100644
index 0000000000..1b7f0bca94
--- /dev/null
+++ b/changelog.d/17241.bugfix
@@ -0,0 +1 @@
+Fix handling of duplicate concurrent uploading of device one-time-keys.
diff --git a/synapse/handlers/e2e_keys.py b/synapse/handlers/e2e_keys.py
index 1ece54ccfc..4f40e9ffd6 100644
--- a/synapse/handlers/e2e_keys.py
+++ b/synapse/handlers/e2e_keys.py
@@ -53,6 +53,9 @@ if TYPE_CHECKING:
logger = logging.getLogger(__name__)
+ONE_TIME_KEY_UPLOAD = "one_time_key_upload_lock"
+
+
class E2eKeysHandler:
def __init__(self, hs: "HomeServer"):
self.config = hs.config
@@ -62,6 +65,7 @@ class E2eKeysHandler:
self._appservice_handler = hs.get_application_service_handler()
self.is_mine = hs.is_mine
self.clock = hs.get_clock()
+ self._worker_lock_handler = hs.get_worker_locks_handler()
federation_registry = hs.get_federation_registry()
@@ -855,45 +859,53 @@ class E2eKeysHandler:
async def _upload_one_time_keys_for_user(
self, user_id: str, device_id: str, time_now: int, one_time_keys: JsonDict
) -> None:
- logger.info(
- "Adding one_time_keys %r for device %r for user %r at %d",
- one_time_keys.keys(),
- device_id,
- user_id,
- time_now,
- )
+ # We take out a lock so that we don't have to worry about a client
+ # sending duplicate requests.
+ lock_key = f"{user_id}_{device_id}"
+ async with self._worker_lock_handler.acquire_lock(
+ ONE_TIME_KEY_UPLOAD, lock_key
+ ):
+ logger.info(
+ "Adding one_time_keys %r for device %r for user %r at %d",
+ one_time_keys.keys(),
+ device_id,
+ user_id,
+ time_now,
+ )
- # make a list of (alg, id, key) tuples
- key_list = []
- for key_id, key_obj in one_time_keys.items():
- algorithm, key_id = key_id.split(":")
- key_list.append((algorithm, key_id, key_obj))
+ # make a list of (alg, id, key) tuples
+ key_list = []
+ for key_id, key_obj in one_time_keys.items():
+ algorithm, key_id = key_id.split(":")
+ key_list.append((algorithm, key_id, key_obj))
- # First we check if we have already persisted any of the keys.
- existing_key_map = await self.store.get_e2e_one_time_keys(
- user_id, device_id, [k_id for _, k_id, _ in key_list]
- )
+ # First we check if we have already persisted any of the keys.
+ existing_key_map = await self.store.get_e2e_one_time_keys(
+ user_id, device_id, [k_id for _, k_id, _ in key_list]
+ )
- new_keys = [] # Keys that we need to insert. (alg, id, json) tuples.
- for algorithm, key_id, key in key_list:
- ex_json = existing_key_map.get((algorithm, key_id), None)
- if ex_json:
- if not _one_time_keys_match(ex_json, key):
- raise SynapseError(
- 400,
- (
- "One time key %s:%s already exists. "
- "Old key: %s; new key: %r"
+ new_keys = [] # Keys that we need to insert. (alg, id, json) tuples.
+ for algorithm, key_id, key in key_list:
+ ex_json = existing_key_map.get((algorithm, key_id), None)
+ if ex_json:
+ if not _one_time_keys_match(ex_json, key):
+ raise SynapseError(
+ 400,
+ (
+ "One time key %s:%s already exists. "
+ "Old key: %s; new key: %r"
+ )
+ % (algorithm, key_id, ex_json, key),
)
- % (algorithm, key_id, ex_json, key),
+ else:
+ new_keys.append(
+ (algorithm, key_id, encode_canonical_json(key).decode("ascii"))
)
- else:
- new_keys.append(
- (algorithm, key_id, encode_canonical_json(key).decode("ascii"))
- )
- log_kv({"message": "Inserting new one_time_keys.", "keys": new_keys})
- await self.store.add_e2e_one_time_keys(user_id, device_id, time_now, new_keys)
+ log_kv({"message": "Inserting new one_time_keys.", "keys": new_keys})
+ await self.store.add_e2e_one_time_keys(
+ user_id, device_id, time_now, new_keys
+ )
async def upload_signing_keys_for_user(
self, user_id: str, keys: JsonDict
From d7198dfb950ad4b2b1c65ff1b22026782d231f3c Mon Sep 17 00:00:00 2001
From: Erik Johnston
Date: Wed, 29 May 2024 11:52:48 +0100
Subject: [PATCH 127/503] Ignore attempts to send to-device messages to bad
users (#17240)
Currently sending a to-device message to a user ID with a dodgy
destination is accepted, but then ends up spamming the logs when we try
and send to the destination.
An alternative would be to reject the request, but I'm slightly nervous
that could break things.
---
changelog.d/17240.bugfix | 1 +
synapse/handlers/devicemessage.py | 7 +++++++
2 files changed, 8 insertions(+)
create mode 100644 changelog.d/17240.bugfix
diff --git a/changelog.d/17240.bugfix b/changelog.d/17240.bugfix
new file mode 100644
index 0000000000..c596d270ce
--- /dev/null
+++ b/changelog.d/17240.bugfix
@@ -0,0 +1 @@
+Ignore attempts to send to-device messages to bad users, to avoid log spam when we try to connect to the bad server.
diff --git a/synapse/handlers/devicemessage.py b/synapse/handlers/devicemessage.py
index 79be7c97c8..e56bdb4072 100644
--- a/synapse/handlers/devicemessage.py
+++ b/synapse/handlers/devicemessage.py
@@ -236,6 +236,13 @@ class DeviceMessageHandler:
local_messages = {}
remote_messages: Dict[str, Dict[str, Dict[str, JsonDict]]] = {}
for user_id, by_device in messages.items():
+ if not UserID.is_valid(user_id):
+ logger.warning(
+ "Ignoring attempt to send device message to invalid user: %r",
+ user_id,
+ )
+ continue
+
# add an opentracing log entry for each message
for device_id, message_content in by_device.items():
log_kv(
From 967b6948b0d738bc685d433d44e82631fd2ad232 Mon Sep 17 00:00:00 2001
From: Erik Johnston
Date: Wed, 29 May 2024 12:04:13 +0100
Subject: [PATCH 128/503] Change allow_unsafe_locale to also apply on new
databases (#17238)
We relax this as there are use cases where this is safe, though it is
still highly recommended that people avoid using it.
---
changelog.d/17238.misc | 1 +
docs/postgres.md | 11 +++++------
synapse/storage/engines/postgres.py | 8 +++++++-
3 files changed, 13 insertions(+), 7 deletions(-)
create mode 100644 changelog.d/17238.misc
diff --git a/changelog.d/17238.misc b/changelog.d/17238.misc
new file mode 100644
index 0000000000..261467e55c
--- /dev/null
+++ b/changelog.d/17238.misc
@@ -0,0 +1 @@
+Change the `allow_unsafe_locale` config option to also apply when setting up new databases.
diff --git a/docs/postgres.md b/docs/postgres.md
index ae34f7689b..4b2ba38275 100644
--- a/docs/postgres.md
+++ b/docs/postgres.md
@@ -242,12 +242,11 @@ host all all ::1/128 ident
### Fixing incorrect `COLLATE` or `CTYPE`
-Synapse will refuse to set up a new database if it has the wrong values of
-`COLLATE` and `CTYPE` set. Synapse will also refuse to start an existing database with incorrect values
-of `COLLATE` and `CTYPE` unless the config flag `allow_unsafe_locale`, found in the
-`database` section of the config, is set to true. Using different locales can cause issues if the locale library is updated from
-underneath the database, or if a different version of the locale is used on any
-replicas.
+Synapse will refuse to start when using a database with incorrect values of
+`COLLATE` and `CTYPE` unless the config flag `allow_unsafe_locale`, found in the
+`database` section of the config, is set to true. Using different locales can
+cause issues if the locale library is updated from underneath the database, or
+if a different version of the locale is used on any replicas.
If you have a database with an unsafe locale, the safest way to fix the issue is to dump the database and recreate it with
the correct locale parameter (as shown above). It is also possible to change the
diff --git a/synapse/storage/engines/postgres.py b/synapse/storage/engines/postgres.py
index b9168ee074..90641d5a18 100644
--- a/synapse/storage/engines/postgres.py
+++ b/synapse/storage/engines/postgres.py
@@ -142,6 +142,10 @@ class PostgresEngine(
apply stricter checks on new databases versus existing database.
"""
+ allow_unsafe_locale = self.config.get("allow_unsafe_locale", False)
+ if allow_unsafe_locale:
+ return
+
collation, ctype = self.get_db_locale(txn)
errors = []
@@ -155,7 +159,9 @@ class PostgresEngine(
if errors:
raise IncorrectDatabaseSetup(
"Database is incorrectly configured:\n\n%s\n\n"
- "See docs/postgres.md for more information." % ("\n".join(errors))
+ "See docs/postgres.md for more information. You can override this check by"
+ "setting 'allow_unsafe_locale' to true in the database config.",
+ "\n".join(errors),
)
def convert_param_style(self, sql: str) -> str:
From 726006cdf2dfea3bcac9f6e0e912646b1751bdb7 Mon Sep 17 00:00:00 2001
From: Erik Johnston
Date: Wed, 29 May 2024 12:57:10 +0100
Subject: [PATCH 129/503] Don't invalidate all `get_relations_for_event` on
history purge (#17083)
This is a tree cache already, so may as well move the room ID to the
front and use that
---
changelog.d/17083.misc | 1 +
synapse/handlers/relations.py | 2 +-
synapse/storage/databases/main/cache.py | 18 +++++++++++---
synapse/storage/databases/main/events.py | 7 +++++-
.../databases/main/events_bg_updates.py | 24 +++++++++++++------
synapse/storage/databases/main/relations.py | 2 +-
6 files changed, 41 insertions(+), 13 deletions(-)
create mode 100644 changelog.d/17083.misc
diff --git a/changelog.d/17083.misc b/changelog.d/17083.misc
new file mode 100644
index 0000000000..7c7cebea4e
--- /dev/null
+++ b/changelog.d/17083.misc
@@ -0,0 +1 @@
+Improve DB usage when fetching related events.
diff --git a/synapse/handlers/relations.py b/synapse/handlers/relations.py
index c5cee8860b..de092f8623 100644
--- a/synapse/handlers/relations.py
+++ b/synapse/handlers/relations.py
@@ -393,9 +393,9 @@ class RelationsHandler:
# Attempt to find another event to use as the latest event.
potential_events, _ = await self._main_store.get_relations_for_event(
+ room_id,
event_id,
event,
- room_id,
RelationTypes.THREAD,
direction=Direction.FORWARDS,
)
diff --git a/synapse/storage/databases/main/cache.py b/synapse/storage/databases/main/cache.py
index bfd492d95d..c6787faea0 100644
--- a/synapse/storage/databases/main/cache.py
+++ b/synapse/storage/databases/main/cache.py
@@ -318,7 +318,13 @@ class CacheInvalidationWorkerStore(SQLBaseStore):
self._invalidate_local_get_event_cache(redacts) # type: ignore[attr-defined]
# Caches which might leak edits must be invalidated for the event being
# redacted.
- self._attempt_to_invalidate_cache("get_relations_for_event", (redacts,))
+ self._attempt_to_invalidate_cache(
+ "get_relations_for_event",
+ (
+ room_id,
+ redacts,
+ ),
+ )
self._attempt_to_invalidate_cache("get_applicable_edit", (redacts,))
self._attempt_to_invalidate_cache("get_thread_id", (redacts,))
self._attempt_to_invalidate_cache("get_thread_id_for_receipts", (redacts,))
@@ -345,7 +351,13 @@ class CacheInvalidationWorkerStore(SQLBaseStore):
)
if relates_to:
- self._attempt_to_invalidate_cache("get_relations_for_event", (relates_to,))
+ self._attempt_to_invalidate_cache(
+ "get_relations_for_event",
+ (
+ room_id,
+ relates_to,
+ ),
+ )
self._attempt_to_invalidate_cache("get_references_for_event", (relates_to,))
self._attempt_to_invalidate_cache("get_applicable_edit", (relates_to,))
self._attempt_to_invalidate_cache("get_thread_summary", (relates_to,))
@@ -380,9 +392,9 @@ class CacheInvalidationWorkerStore(SQLBaseStore):
self._attempt_to_invalidate_cache(
"get_unread_event_push_actions_by_room_for_user", (room_id,)
)
+ self._attempt_to_invalidate_cache("get_relations_for_event", (room_id,))
self._attempt_to_invalidate_cache("_get_membership_from_event_id", None)
- self._attempt_to_invalidate_cache("get_relations_for_event", None)
self._attempt_to_invalidate_cache("get_applicable_edit", None)
self._attempt_to_invalidate_cache("get_thread_id", None)
self._attempt_to_invalidate_cache("get_thread_id_for_receipts", None)
diff --git a/synapse/storage/databases/main/events.py b/synapse/storage/databases/main/events.py
index 990698aa5c..fd7167904d 100644
--- a/synapse/storage/databases/main/events.py
+++ b/synapse/storage/databases/main/events.py
@@ -1923,7 +1923,12 @@ class PersistEventsStore:
# Any relation information for the related event must be cleared.
self.store._invalidate_cache_and_stream(
- txn, self.store.get_relations_for_event, (redacted_relates_to,)
+ txn,
+ self.store.get_relations_for_event,
+ (
+ room_id,
+ redacted_relates_to,
+ ),
)
if rel_type == RelationTypes.REFERENCE:
self.store._invalidate_cache_and_stream(
diff --git a/synapse/storage/databases/main/events_bg_updates.py b/synapse/storage/databases/main/events_bg_updates.py
index 6c979f9f2c..64d303e330 100644
--- a/synapse/storage/databases/main/events_bg_updates.py
+++ b/synapse/storage/databases/main/events_bg_updates.py
@@ -1181,7 +1181,7 @@ class EventsBackgroundUpdatesStore(SQLBaseStore):
results = list(txn)
# (event_id, parent_id, rel_type) for each relation
- relations_to_insert: List[Tuple[str, str, str]] = []
+ relations_to_insert: List[Tuple[str, str, str, str]] = []
for event_id, event_json_raw in results:
try:
event_json = db_to_json(event_json_raw)
@@ -1214,7 +1214,8 @@ class EventsBackgroundUpdatesStore(SQLBaseStore):
if not isinstance(parent_id, str):
continue
- relations_to_insert.append((event_id, parent_id, rel_type))
+ room_id = event_json["room_id"]
+ relations_to_insert.append((room_id, event_id, parent_id, rel_type))
# Insert the missing data, note that we upsert here in case the event
# has already been processed.
@@ -1223,18 +1224,27 @@ class EventsBackgroundUpdatesStore(SQLBaseStore):
txn=txn,
table="event_relations",
key_names=("event_id",),
- key_values=[(r[0],) for r in relations_to_insert],
+ key_values=[(r[1],) for r in relations_to_insert],
value_names=("relates_to_id", "relation_type"),
- value_values=[r[1:] for r in relations_to_insert],
+ value_values=[r[2:] for r in relations_to_insert],
)
# Iterate the parent IDs and invalidate caches.
- cache_tuples = {(r[1],) for r in relations_to_insert}
self._invalidate_cache_and_stream_bulk( # type: ignore[attr-defined]
- txn, self.get_relations_for_event, cache_tuples # type: ignore[attr-defined]
+ txn,
+ self.get_relations_for_event, # type: ignore[attr-defined]
+ {
+ (
+ r[0], # room_id
+ r[2], # parent_id
+ )
+ for r in relations_to_insert
+ },
)
self._invalidate_cache_and_stream_bulk( # type: ignore[attr-defined]
- txn, self.get_thread_summary, cache_tuples # type: ignore[attr-defined]
+ txn,
+ self.get_thread_summary, # type: ignore[attr-defined]
+ {(r[1],) for r in relations_to_insert},
)
if results:
diff --git a/synapse/storage/databases/main/relations.py b/synapse/storage/databases/main/relations.py
index 77f3641525..29a001ff92 100644
--- a/synapse/storage/databases/main/relations.py
+++ b/synapse/storage/databases/main/relations.py
@@ -169,9 +169,9 @@ class RelationsWorkerStore(SQLBaseStore):
@cached(uncached_args=("event",), tree=True)
async def get_relations_for_event(
self,
+ room_id: str,
event_id: str,
event: EventBase,
- room_id: str,
relation_type: Optional[str] = None,
event_type: Optional[str] = None,
limit: int = 5,
From 466f344547fc6bea2c43257dd65286380fbb512d Mon Sep 17 00:00:00 2001
From: Erik Johnston
Date: Wed, 29 May 2024 13:19:10 +0100
Subject: [PATCH 130/503] Move towards using `MultiWriterIdGenerator`
everywhere (#17226)
There is a problem with `StreamIdGenerator` where it can go backwards
over restarts when a stream ID is requested but then not inserted into
the DB. This is problematic if we want to land #17215, and is generally
a potential cause for all sorts of nastiness.
Instead of trying to fix `StreamIdGenerator`, we may as well move to
`MultiWriterIdGenerator` that does not suffer from this problem (the
latest positions are stored in `stream_positions` table). This involves
adding SQLite support to the class.
This only changes id generators that were already using
`MultiWriterIdGenerator` under postgres, a separate PR will move the
rest of the uses of `StreamIdGenerator` over.
---
changelog.d/17226.misc | 1 +
synapse/storage/database.py | 21 +-
.../storage/databases/main/account_data.py | 47 +--
synapse/storage/databases/main/deviceinbox.py | 46 +--
.../storage/databases/main/events_worker.py | 101 ++---
synapse/storage/databases/main/presence.py | 27 +-
synapse/storage/databases/main/receipts.py | 43 +-
synapse/storage/databases/main/room.py | 34 +-
synapse/storage/util/id_generators.py | 49 ++-
tests/storage/test_id_generators.py | 367 ++++++++++--------
10 files changed, 349 insertions(+), 387 deletions(-)
create mode 100644 changelog.d/17226.misc
diff --git a/changelog.d/17226.misc b/changelog.d/17226.misc
new file mode 100644
index 0000000000..7c023a5759
--- /dev/null
+++ b/changelog.d/17226.misc
@@ -0,0 +1 @@
+Move towards using `MultiWriterIdGenerator` everywhere.
diff --git a/synapse/storage/database.py b/synapse/storage/database.py
index d9c85e411e..569f618193 100644
--- a/synapse/storage/database.py
+++ b/synapse/storage/database.py
@@ -2461,7 +2461,11 @@ class DatabasePool:
def make_in_list_sql_clause(
- database_engine: BaseDatabaseEngine, column: str, iterable: Collection[Any]
+ database_engine: BaseDatabaseEngine,
+ column: str,
+ iterable: Collection[Any],
+ *,
+ negative: bool = False,
) -> Tuple[str, list]:
"""Returns an SQL clause that checks the given column is in the iterable.
@@ -2474,6 +2478,7 @@ def make_in_list_sql_clause(
database_engine
column: Name of the column
iterable: The values to check the column against.
+ negative: Whether we should check for inequality, i.e. `NOT IN`
Returns:
A tuple of SQL query and the args
@@ -2482,9 +2487,19 @@ def make_in_list_sql_clause(
if database_engine.supports_using_any_list:
# This should hopefully be faster, but also makes postgres query
# stats easier to understand.
- return "%s = ANY(?)" % (column,), [list(iterable)]
+ if not negative:
+ clause = f"{column} = ANY(?)"
+ else:
+ clause = f"{column} != ALL(?)"
+
+ return clause, [list(iterable)]
else:
- return "%s IN (%s)" % (column, ",".join("?" for _ in iterable)), list(iterable)
+ params = ",".join("?" for _ in iterable)
+ if not negative:
+ clause = f"{column} IN ({params})"
+ else:
+ clause = f"{column} NOT IN ({params})"
+ return clause, list(iterable)
# These overloads ensure that `columns` and `iterable` values have the same length.
diff --git a/synapse/storage/databases/main/account_data.py b/synapse/storage/databases/main/account_data.py
index 563450a97e..9611a84932 100644
--- a/synapse/storage/databases/main/account_data.py
+++ b/synapse/storage/databases/main/account_data.py
@@ -43,11 +43,9 @@ from synapse.storage.database import (
)
from synapse.storage.databases.main.cache import CacheInvalidationWorkerStore
from synapse.storage.databases.main.push_rule import PushRulesWorkerStore
-from synapse.storage.engines import PostgresEngine
from synapse.storage.util.id_generators import (
AbstractStreamIdGenerator,
MultiWriterIdGenerator,
- StreamIdGenerator,
)
from synapse.types import JsonDict, JsonMapping
from synapse.util import json_encoder
@@ -75,37 +73,20 @@ class AccountDataWorkerStore(PushRulesWorkerStore, CacheInvalidationWorkerStore)
self._account_data_id_gen: AbstractStreamIdGenerator
- if isinstance(database.engine, PostgresEngine):
- self._account_data_id_gen = MultiWriterIdGenerator(
- db_conn=db_conn,
- db=database,
- notifier=hs.get_replication_notifier(),
- stream_name="account_data",
- instance_name=self._instance_name,
- tables=[
- ("room_account_data", "instance_name", "stream_id"),
- ("room_tags_revisions", "instance_name", "stream_id"),
- ("account_data", "instance_name", "stream_id"),
- ],
- sequence_name="account_data_sequence",
- writers=hs.config.worker.writers.account_data,
- )
- else:
- # Multiple writers are not supported for SQLite.
- #
- # We shouldn't be running in worker mode with SQLite, but its useful
- # to support it for unit tests.
- self._account_data_id_gen = StreamIdGenerator(
- db_conn,
- hs.get_replication_notifier(),
- "room_account_data",
- "stream_id",
- extra_tables=[
- ("account_data", "stream_id"),
- ("room_tags_revisions", "stream_id"),
- ],
- is_writer=self._instance_name in hs.config.worker.writers.account_data,
- )
+ self._account_data_id_gen = MultiWriterIdGenerator(
+ db_conn=db_conn,
+ db=database,
+ notifier=hs.get_replication_notifier(),
+ stream_name="account_data",
+ instance_name=self._instance_name,
+ tables=[
+ ("room_account_data", "instance_name", "stream_id"),
+ ("room_tags_revisions", "instance_name", "stream_id"),
+ ("account_data", "instance_name", "stream_id"),
+ ],
+ sequence_name="account_data_sequence",
+ writers=hs.config.worker.writers.account_data,
+ )
account_max = self.get_max_account_data_stream_id()
self._account_data_stream_cache = StreamChangeCache(
diff --git a/synapse/storage/databases/main/deviceinbox.py b/synapse/storage/databases/main/deviceinbox.py
index e17821ff6e..25023b5e7a 100644
--- a/synapse/storage/databases/main/deviceinbox.py
+++ b/synapse/storage/databases/main/deviceinbox.py
@@ -50,11 +50,9 @@ from synapse.storage.database import (
LoggingTransaction,
make_in_list_sql_clause,
)
-from synapse.storage.engines import PostgresEngine
from synapse.storage.util.id_generators import (
AbstractStreamIdGenerator,
MultiWriterIdGenerator,
- StreamIdGenerator,
)
from synapse.types import JsonDict
from synapse.util import json_encoder
@@ -89,35 +87,23 @@ class DeviceInboxWorkerStore(SQLBaseStore):
expiry_ms=30 * 60 * 1000,
)
- if isinstance(database.engine, PostgresEngine):
- self._can_write_to_device = (
- self._instance_name in hs.config.worker.writers.to_device
- )
+ self._can_write_to_device = (
+ self._instance_name in hs.config.worker.writers.to_device
+ )
- self._to_device_msg_id_gen: AbstractStreamIdGenerator = (
- MultiWriterIdGenerator(
- db_conn=db_conn,
- db=database,
- notifier=hs.get_replication_notifier(),
- stream_name="to_device",
- instance_name=self._instance_name,
- tables=[
- ("device_inbox", "instance_name", "stream_id"),
- ("device_federation_outbox", "instance_name", "stream_id"),
- ],
- sequence_name="device_inbox_sequence",
- writers=hs.config.worker.writers.to_device,
- )
- )
- else:
- self._can_write_to_device = True
- self._to_device_msg_id_gen = StreamIdGenerator(
- db_conn,
- hs.get_replication_notifier(),
- "device_inbox",
- "stream_id",
- extra_tables=[("device_federation_outbox", "stream_id")],
- )
+ self._to_device_msg_id_gen: AbstractStreamIdGenerator = MultiWriterIdGenerator(
+ db_conn=db_conn,
+ db=database,
+ notifier=hs.get_replication_notifier(),
+ stream_name="to_device",
+ instance_name=self._instance_name,
+ tables=[
+ ("device_inbox", "instance_name", "stream_id"),
+ ("device_federation_outbox", "instance_name", "stream_id"),
+ ],
+ sequence_name="device_inbox_sequence",
+ writers=hs.config.worker.writers.to_device,
+ )
max_device_inbox_id = self._to_device_msg_id_gen.get_current_token()
device_inbox_prefill, min_device_inbox_id = self.db_pool.get_cache_dict(
diff --git a/synapse/storage/databases/main/events_worker.py b/synapse/storage/databases/main/events_worker.py
index e39d4b9624..426df2a9d2 100644
--- a/synapse/storage/databases/main/events_worker.py
+++ b/synapse/storage/databases/main/events_worker.py
@@ -75,12 +75,10 @@ from synapse.storage.database import (
LoggingDatabaseConnection,
LoggingTransaction,
)
-from synapse.storage.engines import PostgresEngine
from synapse.storage.types import Cursor
from synapse.storage.util.id_generators import (
AbstractStreamIdGenerator,
MultiWriterIdGenerator,
- StreamIdGenerator,
)
from synapse.storage.util.sequence import build_sequence_generator
from synapse.types import JsonDict, get_domain_from_id
@@ -195,51 +193,28 @@ class EventsWorkerStore(SQLBaseStore):
self._stream_id_gen: AbstractStreamIdGenerator
self._backfill_id_gen: AbstractStreamIdGenerator
- if isinstance(database.engine, PostgresEngine):
- # If we're using Postgres than we can use `MultiWriterIdGenerator`
- # regardless of whether this process writes to the streams or not.
- self._stream_id_gen = MultiWriterIdGenerator(
- db_conn=db_conn,
- db=database,
- notifier=hs.get_replication_notifier(),
- stream_name="events",
- instance_name=hs.get_instance_name(),
- tables=[("events", "instance_name", "stream_ordering")],
- sequence_name="events_stream_seq",
- writers=hs.config.worker.writers.events,
- )
- self._backfill_id_gen = MultiWriterIdGenerator(
- db_conn=db_conn,
- db=database,
- notifier=hs.get_replication_notifier(),
- stream_name="backfill",
- instance_name=hs.get_instance_name(),
- tables=[("events", "instance_name", "stream_ordering")],
- sequence_name="events_backfill_stream_seq",
- positive=False,
- writers=hs.config.worker.writers.events,
- )
- else:
- # Multiple writers are not supported for SQLite.
- #
- # We shouldn't be running in worker mode with SQLite, but its useful
- # to support it for unit tests.
- self._stream_id_gen = StreamIdGenerator(
- db_conn,
- hs.get_replication_notifier(),
- "events",
- "stream_ordering",
- is_writer=hs.get_instance_name() in hs.config.worker.writers.events,
- )
- self._backfill_id_gen = StreamIdGenerator(
- db_conn,
- hs.get_replication_notifier(),
- "events",
- "stream_ordering",
- step=-1,
- extra_tables=[("ex_outlier_stream", "event_stream_ordering")],
- is_writer=hs.get_instance_name() in hs.config.worker.writers.events,
- )
+
+ self._stream_id_gen = MultiWriterIdGenerator(
+ db_conn=db_conn,
+ db=database,
+ notifier=hs.get_replication_notifier(),
+ stream_name="events",
+ instance_name=hs.get_instance_name(),
+ tables=[("events", "instance_name", "stream_ordering")],
+ sequence_name="events_stream_seq",
+ writers=hs.config.worker.writers.events,
+ )
+ self._backfill_id_gen = MultiWriterIdGenerator(
+ db_conn=db_conn,
+ db=database,
+ notifier=hs.get_replication_notifier(),
+ stream_name="backfill",
+ instance_name=hs.get_instance_name(),
+ tables=[("events", "instance_name", "stream_ordering")],
+ sequence_name="events_backfill_stream_seq",
+ positive=False,
+ writers=hs.config.worker.writers.events,
+ )
events_max = self._stream_id_gen.get_current_token()
curr_state_delta_prefill, min_curr_state_delta_id = self.db_pool.get_cache_dict(
@@ -309,27 +284,17 @@ class EventsWorkerStore(SQLBaseStore):
self._un_partial_stated_events_stream_id_gen: AbstractStreamIdGenerator
- if isinstance(database.engine, PostgresEngine):
- self._un_partial_stated_events_stream_id_gen = MultiWriterIdGenerator(
- db_conn=db_conn,
- db=database,
- notifier=hs.get_replication_notifier(),
- stream_name="un_partial_stated_event_stream",
- instance_name=hs.get_instance_name(),
- tables=[
- ("un_partial_stated_event_stream", "instance_name", "stream_id")
- ],
- sequence_name="un_partial_stated_event_stream_sequence",
- # TODO(faster_joins, multiple writers) Support multiple writers.
- writers=["master"],
- )
- else:
- self._un_partial_stated_events_stream_id_gen = StreamIdGenerator(
- db_conn,
- hs.get_replication_notifier(),
- "un_partial_stated_event_stream",
- "stream_id",
- )
+ self._un_partial_stated_events_stream_id_gen = MultiWriterIdGenerator(
+ db_conn=db_conn,
+ db=database,
+ notifier=hs.get_replication_notifier(),
+ stream_name="un_partial_stated_event_stream",
+ instance_name=hs.get_instance_name(),
+ tables=[("un_partial_stated_event_stream", "instance_name", "stream_id")],
+ sequence_name="un_partial_stated_event_stream_sequence",
+ # TODO(faster_joins, multiple writers) Support multiple writers.
+ writers=["master"],
+ )
def get_un_partial_stated_events_token(self, instance_name: str) -> int:
return (
diff --git a/synapse/storage/databases/main/presence.py b/synapse/storage/databases/main/presence.py
index 567c2d30bd..923e764491 100644
--- a/synapse/storage/databases/main/presence.py
+++ b/synapse/storage/databases/main/presence.py
@@ -40,13 +40,11 @@ from synapse.storage.database import (
LoggingTransaction,
)
from synapse.storage.databases.main.cache import CacheInvalidationWorkerStore
-from synapse.storage.engines import PostgresEngine
from synapse.storage.engines._base import IsolationLevel
from synapse.storage.types import Connection
from synapse.storage.util.id_generators import (
AbstractStreamIdGenerator,
MultiWriterIdGenerator,
- StreamIdGenerator,
)
from synapse.util.caches.descriptors import cached, cachedList
from synapse.util.caches.stream_change_cache import StreamChangeCache
@@ -91,21 +89,16 @@ class PresenceStore(PresenceBackgroundUpdateStore, CacheInvalidationWorkerStore)
self._instance_name in hs.config.worker.writers.presence
)
- if isinstance(database.engine, PostgresEngine):
- self._presence_id_gen = MultiWriterIdGenerator(
- db_conn=db_conn,
- db=database,
- notifier=hs.get_replication_notifier(),
- stream_name="presence_stream",
- instance_name=self._instance_name,
- tables=[("presence_stream", "instance_name", "stream_id")],
- sequence_name="presence_stream_sequence",
- writers=hs.config.worker.writers.presence,
- )
- else:
- self._presence_id_gen = StreamIdGenerator(
- db_conn, hs.get_replication_notifier(), "presence_stream", "stream_id"
- )
+ self._presence_id_gen = MultiWriterIdGenerator(
+ db_conn=db_conn,
+ db=database,
+ notifier=hs.get_replication_notifier(),
+ stream_name="presence_stream",
+ instance_name=self._instance_name,
+ tables=[("presence_stream", "instance_name", "stream_id")],
+ sequence_name="presence_stream_sequence",
+ writers=hs.config.worker.writers.presence,
+ )
self.hs = hs
self._presence_on_startup = self._get_active_presence(db_conn)
diff --git a/synapse/storage/databases/main/receipts.py b/synapse/storage/databases/main/receipts.py
index 13387a3839..8432560a89 100644
--- a/synapse/storage/databases/main/receipts.py
+++ b/synapse/storage/databases/main/receipts.py
@@ -44,12 +44,10 @@ from synapse.storage.database import (
LoggingDatabaseConnection,
LoggingTransaction,
)
-from synapse.storage.engines import PostgresEngine
from synapse.storage.engines._base import IsolationLevel
from synapse.storage.util.id_generators import (
AbstractStreamIdGenerator,
MultiWriterIdGenerator,
- StreamIdGenerator,
)
from synapse.types import (
JsonDict,
@@ -80,35 +78,20 @@ class ReceiptsWorkerStore(SQLBaseStore):
# class below that is used on the main process.
self._receipts_id_gen: AbstractStreamIdGenerator
- if isinstance(database.engine, PostgresEngine):
- self._can_write_to_receipts = (
- self._instance_name in hs.config.worker.writers.receipts
- )
+ self._can_write_to_receipts = (
+ self._instance_name in hs.config.worker.writers.receipts
+ )
- self._receipts_id_gen = MultiWriterIdGenerator(
- db_conn=db_conn,
- db=database,
- notifier=hs.get_replication_notifier(),
- stream_name="receipts",
- instance_name=self._instance_name,
- tables=[("receipts_linearized", "instance_name", "stream_id")],
- sequence_name="receipts_sequence",
- writers=hs.config.worker.writers.receipts,
- )
- else:
- self._can_write_to_receipts = True
-
- # Multiple writers are not supported for SQLite.
- #
- # We shouldn't be running in worker mode with SQLite, but its useful
- # to support it for unit tests.
- self._receipts_id_gen = StreamIdGenerator(
- db_conn,
- hs.get_replication_notifier(),
- "receipts_linearized",
- "stream_id",
- is_writer=hs.get_instance_name() in hs.config.worker.writers.receipts,
- )
+ self._receipts_id_gen = MultiWriterIdGenerator(
+ db_conn=db_conn,
+ db=database,
+ notifier=hs.get_replication_notifier(),
+ stream_name="receipts",
+ instance_name=self._instance_name,
+ tables=[("receipts_linearized", "instance_name", "stream_id")],
+ sequence_name="receipts_sequence",
+ writers=hs.config.worker.writers.receipts,
+ )
super().__init__(database, db_conn, hs)
diff --git a/synapse/storage/databases/main/room.py b/synapse/storage/databases/main/room.py
index 8205109548..616c941687 100644
--- a/synapse/storage/databases/main/room.py
+++ b/synapse/storage/databases/main/room.py
@@ -58,13 +58,11 @@ from synapse.storage.database import (
LoggingTransaction,
)
from synapse.storage.databases.main.cache import CacheInvalidationWorkerStore
-from synapse.storage.engines import PostgresEngine
from synapse.storage.types import Cursor
from synapse.storage.util.id_generators import (
AbstractStreamIdGenerator,
IdGenerator,
MultiWriterIdGenerator,
- StreamIdGenerator,
)
from synapse.types import JsonDict, RetentionPolicy, StrCollection, ThirdPartyInstanceID
from synapse.util import json_encoder
@@ -155,27 +153,17 @@ class RoomWorkerStore(CacheInvalidationWorkerStore):
self._un_partial_stated_rooms_stream_id_gen: AbstractStreamIdGenerator
- if isinstance(database.engine, PostgresEngine):
- self._un_partial_stated_rooms_stream_id_gen = MultiWriterIdGenerator(
- db_conn=db_conn,
- db=database,
- notifier=hs.get_replication_notifier(),
- stream_name="un_partial_stated_room_stream",
- instance_name=self._instance_name,
- tables=[
- ("un_partial_stated_room_stream", "instance_name", "stream_id")
- ],
- sequence_name="un_partial_stated_room_stream_sequence",
- # TODO(faster_joins, multiple writers) Support multiple writers.
- writers=["master"],
- )
- else:
- self._un_partial_stated_rooms_stream_id_gen = StreamIdGenerator(
- db_conn,
- hs.get_replication_notifier(),
- "un_partial_stated_room_stream",
- "stream_id",
- )
+ self._un_partial_stated_rooms_stream_id_gen = MultiWriterIdGenerator(
+ db_conn=db_conn,
+ db=database,
+ notifier=hs.get_replication_notifier(),
+ stream_name="un_partial_stated_room_stream",
+ instance_name=self._instance_name,
+ tables=[("un_partial_stated_room_stream", "instance_name", "stream_id")],
+ sequence_name="un_partial_stated_room_stream_sequence",
+ # TODO(faster_joins, multiple writers) Support multiple writers.
+ writers=["master"],
+ )
def process_replication_position(
self, stream_name: str, instance_name: str, token: int
diff --git a/synapse/storage/util/id_generators.py b/synapse/storage/util/id_generators.py
index fadc75cc80..0cf5851ad7 100644
--- a/synapse/storage/util/id_generators.py
+++ b/synapse/storage/util/id_generators.py
@@ -53,9 +53,11 @@ from synapse.storage.database import (
DatabasePool,
LoggingDatabaseConnection,
LoggingTransaction,
+ make_in_list_sql_clause,
)
+from synapse.storage.engines import PostgresEngine
from synapse.storage.types import Cursor
-from synapse.storage.util.sequence import PostgresSequenceGenerator
+from synapse.storage.util.sequence import build_sequence_generator
if TYPE_CHECKING:
from synapse.notifier import ReplicationNotifier
@@ -432,7 +434,22 @@ class MultiWriterIdGenerator(AbstractStreamIdGenerator):
# no active writes in progress.
self._max_position_of_local_instance = self._max_seen_allocated_stream_id
- self._sequence_gen = PostgresSequenceGenerator(sequence_name)
+ # This goes and fills out the above state from the database.
+ self._load_current_ids(db_conn, tables)
+
+ self._sequence_gen = build_sequence_generator(
+ db_conn=db_conn,
+ database_engine=db.engine,
+ get_first_callback=lambda _: self._persisted_upto_position,
+ sequence_name=sequence_name,
+ # We only need to set the below if we want it to call
+ # `check_consistency`, but we do that ourselves below so we can
+ # leave them blank.
+ table=None,
+ id_column=None,
+ stream_name=None,
+ positive=positive,
+ )
# We check that the table and sequence haven't diverged.
for table, _, id_column in tables:
@@ -444,9 +461,6 @@ class MultiWriterIdGenerator(AbstractStreamIdGenerator):
positive=positive,
)
- # This goes and fills out the above state from the database.
- self._load_current_ids(db_conn, tables)
-
self._max_seen_allocated_stream_id = max(
self._current_positions.values(), default=1
)
@@ -480,13 +494,17 @@ class MultiWriterIdGenerator(AbstractStreamIdGenerator):
# important if we add back a writer after a long time; we want to
# consider that a "new" writer, rather than using the old stale
# entry here.
- sql = """
+ clause, args = make_in_list_sql_clause(
+ self._db.engine, "instance_name", self._writers, negative=True
+ )
+
+ sql = f"""
DELETE FROM stream_positions
WHERE
stream_name = ?
- AND instance_name != ALL(?)
+ AND {clause}
"""
- cur.execute(sql, (self._stream_name, self._writers))
+ cur.execute(sql, [self._stream_name] + args)
sql = """
SELECT instance_name, stream_id FROM stream_positions
@@ -508,12 +526,16 @@ class MultiWriterIdGenerator(AbstractStreamIdGenerator):
# We add a GREATEST here to ensure that the result is always
# positive. (This can be a problem for e.g. backfill streams where
# the server has never backfilled).
+ greatest_func = (
+ "GREATEST" if isinstance(self._db.engine, PostgresEngine) else "MAX"
+ )
max_stream_id = 1
for table, _, id_column in tables:
sql = """
- SELECT GREATEST(COALESCE(%(agg)s(%(id)s), 1), 1)
+ SELECT %(greatest_func)s(COALESCE(%(agg)s(%(id)s), 1), 1)
FROM %(table)s
""" % {
+ "greatest_func": greatest_func,
"id": id_column,
"table": table,
"agg": "MAX" if self._positive else "-MIN",
@@ -913,6 +935,11 @@ class MultiWriterIdGenerator(AbstractStreamIdGenerator):
# We upsert the value, ensuring on conflict that we always increase the
# value (or decrease if stream goes backwards).
+ if isinstance(self._db.engine, PostgresEngine):
+ agg = "GREATEST" if self._positive else "LEAST"
+ else:
+ agg = "MAX" if self._positive else "MIN"
+
sql = """
INSERT INTO stream_positions (stream_name, instance_name, stream_id)
VALUES (?, ?, ?)
@@ -920,10 +947,10 @@ class MultiWriterIdGenerator(AbstractStreamIdGenerator):
DO UPDATE SET
stream_id = %(agg)s(stream_positions.stream_id, EXCLUDED.stream_id)
""" % {
- "agg": "GREATEST" if self._positive else "LEAST",
+ "agg": agg,
}
- pos = (self.get_current_token_for_writer(self._instance_name),)
+ pos = self.get_current_token_for_writer(self._instance_name)
txn.execute(sql, (self._stream_name, self._instance_name, pos))
diff --git a/tests/storage/test_id_generators.py b/tests/storage/test_id_generators.py
index 409d856ab9..fad9511cea 100644
--- a/tests/storage/test_id_generators.py
+++ b/tests/storage/test_id_generators.py
@@ -31,6 +31,11 @@ from synapse.storage.database import (
from synapse.storage.engines import IncorrectDatabaseSetup
from synapse.storage.types import Cursor
from synapse.storage.util.id_generators import MultiWriterIdGenerator, StreamIdGenerator
+from synapse.storage.util.sequence import (
+ LocalSequenceGenerator,
+ PostgresSequenceGenerator,
+ SequenceGenerator,
+)
from synapse.util import Clock
from tests.unittest import HomeserverTestCase
@@ -175,18 +180,22 @@ class StreamIdGeneratorTestCase(HomeserverTestCase):
self.get_success(test_gen_next())
-class MultiWriterIdGeneratorTestCase(HomeserverTestCase):
- if not USE_POSTGRES_FOR_TESTS:
- skip = "Requires Postgres"
-
+class MultiWriterIdGeneratorBase(HomeserverTestCase):
def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
self.store = hs.get_datastores().main
self.db_pool: DatabasePool = self.store.db_pool
self.get_success(self.db_pool.runInteraction("_setup_db", self._setup_db))
+ if USE_POSTGRES_FOR_TESTS:
+ self.seq_gen: SequenceGenerator = PostgresSequenceGenerator("foobar_seq")
+ else:
+ self.seq_gen = LocalSequenceGenerator(lambda _: 0)
+
def _setup_db(self, txn: LoggingTransaction) -> None:
- txn.execute("CREATE SEQUENCE foobar_seq")
+ if USE_POSTGRES_FOR_TESTS:
+ txn.execute("CREATE SEQUENCE foobar_seq")
+
txn.execute(
"""
CREATE TABLE foobar (
@@ -221,44 +230,27 @@ class MultiWriterIdGeneratorTestCase(HomeserverTestCase):
def _insert(txn: LoggingTransaction) -> None:
for _ in range(number):
+ next_val = self.seq_gen.get_next_id_txn(txn)
txn.execute(
- "INSERT INTO foobar VALUES (nextval('foobar_seq'), ?)",
- (instance_name,),
+ "INSERT INTO foobar (stream_id, instance_name) VALUES (?, ?)",
+ (
+ next_val,
+ instance_name,
+ ),
)
+
txn.execute(
"""
- INSERT INTO stream_positions VALUES ('test_stream', ?, lastval())
- ON CONFLICT (stream_name, instance_name) DO UPDATE SET stream_id = lastval()
+ INSERT INTO stream_positions VALUES ('test_stream', ?, ?)
+ ON CONFLICT (stream_name, instance_name) DO UPDATE SET stream_id = ?
""",
- (instance_name,),
+ (instance_name, next_val, next_val),
)
self.get_success(self.db_pool.runInteraction("_insert_rows", _insert))
- def _insert_row_with_id(self, instance_name: str, stream_id: int) -> None:
- """Insert one row as the given instance with given stream_id, updating
- the postgres sequence position to match.
- """
-
- def _insert(txn: LoggingTransaction) -> None:
- txn.execute(
- "INSERT INTO foobar VALUES (?, ?)",
- (
- stream_id,
- instance_name,
- ),
- )
- txn.execute("SELECT setval('foobar_seq', ?)", (stream_id,))
- txn.execute(
- """
- INSERT INTO stream_positions VALUES ('test_stream', ?, ?)
- ON CONFLICT (stream_name, instance_name) DO UPDATE SET stream_id = ?
- """,
- (instance_name, stream_id, stream_id),
- )
-
- self.get_success(self.db_pool.runInteraction("_insert_row_with_id", _insert))
+class MultiWriterIdGeneratorTestCase(MultiWriterIdGeneratorBase):
def test_empty(self) -> None:
"""Test an ID generator against an empty database gives sensible
current positions.
@@ -347,6 +339,176 @@ class MultiWriterIdGeneratorTestCase(HomeserverTestCase):
self.assertEqual(id_gen.get_positions(), {"master": 11})
self.assertEqual(id_gen.get_current_token_for_writer("master"), 11)
+ def test_get_next_txn(self) -> None:
+ """Test that the `get_next_txn` function works correctly."""
+
+ # Prefill table with 7 rows written by 'master'
+ self._insert_rows("master", 7)
+
+ id_gen = self._create_id_generator()
+
+ self.assertEqual(id_gen.get_positions(), {"master": 7})
+ self.assertEqual(id_gen.get_current_token_for_writer("master"), 7)
+
+ # Try allocating a new ID gen and check that we only see position
+ # advanced after we leave the context manager.
+
+ def _get_next_txn(txn: LoggingTransaction) -> None:
+ stream_id = id_gen.get_next_txn(txn)
+ self.assertEqual(stream_id, 8)
+
+ self.assertEqual(id_gen.get_positions(), {"master": 7})
+ self.assertEqual(id_gen.get_current_token_for_writer("master"), 7)
+
+ self.get_success(self.db_pool.runInteraction("test", _get_next_txn))
+
+ self.assertEqual(id_gen.get_positions(), {"master": 8})
+ self.assertEqual(id_gen.get_current_token_for_writer("master"), 8)
+
+ def test_restart_during_out_of_order_persistence(self) -> None:
+ """Test that restarting a process while another process is writing out
+ of order updates are handled correctly.
+ """
+
+ # Prefill table with 7 rows written by 'master'
+ self._insert_rows("master", 7)
+
+ id_gen = self._create_id_generator()
+
+ self.assertEqual(id_gen.get_positions(), {"master": 7})
+ self.assertEqual(id_gen.get_current_token_for_writer("master"), 7)
+
+ # Persist two rows at once
+ ctx1 = id_gen.get_next()
+ ctx2 = id_gen.get_next()
+
+ s1 = self.get_success(ctx1.__aenter__())
+ s2 = self.get_success(ctx2.__aenter__())
+
+ self.assertEqual(s1, 8)
+ self.assertEqual(s2, 9)
+
+ self.assertEqual(id_gen.get_positions(), {"master": 7})
+ self.assertEqual(id_gen.get_current_token_for_writer("master"), 7)
+
+ # We finish persisting the second row before restart
+ self.get_success(ctx2.__aexit__(None, None, None))
+
+ # We simulate a restart of another worker by just creating a new ID gen.
+ id_gen_worker = self._create_id_generator("worker")
+
+ # Restarted worker should not see the second persisted row
+ self.assertEqual(id_gen_worker.get_positions(), {"master": 7})
+ self.assertEqual(id_gen_worker.get_current_token_for_writer("master"), 7)
+
+ # Now if we persist the first row then both instances should jump ahead
+ # correctly.
+ self.get_success(ctx1.__aexit__(None, None, None))
+
+ self.assertEqual(id_gen.get_positions(), {"master": 9})
+ id_gen_worker.advance("master", 9)
+ self.assertEqual(id_gen_worker.get_positions(), {"master": 9})
+
+
+class WorkerMultiWriterIdGeneratorTestCase(MultiWriterIdGeneratorBase):
+ if not USE_POSTGRES_FOR_TESTS:
+ skip = "Requires Postgres"
+
+ def _insert_row_with_id(self, instance_name: str, stream_id: int) -> None:
+ """Insert one row as the given instance with given stream_id, updating
+ the postgres sequence position to match.
+ """
+
+ def _insert(txn: LoggingTransaction) -> None:
+ txn.execute(
+ "INSERT INTO foobar (stream_id, instance_name) VALUES (?, ?)",
+ (
+ stream_id,
+ instance_name,
+ ),
+ )
+
+ txn.execute("SELECT setval('foobar_seq', ?)", (stream_id,))
+
+ txn.execute(
+ """
+ INSERT INTO stream_positions VALUES ('test_stream', ?, ?)
+ ON CONFLICT (stream_name, instance_name) DO UPDATE SET stream_id = ?
+ """,
+ (instance_name, stream_id, stream_id),
+ )
+
+ self.get_success(self.db_pool.runInteraction("_insert_row_with_id", _insert))
+
+ def test_get_persisted_upto_position(self) -> None:
+ """Test that `get_persisted_upto_position` correctly tracks updates to
+ positions.
+ """
+
+ # The following tests are a bit cheeky in that we notify about new
+ # positions via `advance` without *actually* advancing the postgres
+ # sequence.
+
+ self._insert_row_with_id("first", 3)
+ self._insert_row_with_id("second", 5)
+
+ id_gen = self._create_id_generator("worker", writers=["first", "second"])
+
+ self.assertEqual(id_gen.get_positions(), {"first": 3, "second": 5})
+
+ # Min is 3 and there is a gap between 5, so we expect it to be 3.
+ self.assertEqual(id_gen.get_persisted_upto_position(), 3)
+
+ # We advance "first" straight to 6. Min is now 5 but there is no gap so
+ # we expect it to be 6
+ id_gen.advance("first", 6)
+ self.assertEqual(id_gen.get_persisted_upto_position(), 6)
+
+ # No gap, so we expect 7.
+ id_gen.advance("second", 7)
+ self.assertEqual(id_gen.get_persisted_upto_position(), 7)
+
+ # We haven't seen 8 yet, so we expect 7 still.
+ id_gen.advance("second", 9)
+ self.assertEqual(id_gen.get_persisted_upto_position(), 7)
+
+ # Now that we've seen 7, 8 and 9 we can got straight to 9.
+ id_gen.advance("first", 8)
+ self.assertEqual(id_gen.get_persisted_upto_position(), 9)
+
+ # Jump forward with gaps. The minimum is 11, even though we haven't seen
+ # 10 we know that everything before 11 must be persisted.
+ id_gen.advance("first", 11)
+ id_gen.advance("second", 15)
+ self.assertEqual(id_gen.get_persisted_upto_position(), 11)
+
+ def test_get_persisted_upto_position_get_next(self) -> None:
+ """Test that `get_persisted_upto_position` correctly tracks updates to
+ positions when `get_next` is called.
+ """
+
+ self._insert_row_with_id("first", 3)
+ self._insert_row_with_id("second", 5)
+
+ id_gen = self._create_id_generator("first", writers=["first", "second"])
+
+ self.assertEqual(id_gen.get_positions(), {"first": 3, "second": 5})
+
+ self.assertEqual(id_gen.get_persisted_upto_position(), 5)
+
+ async def _get_next_async() -> None:
+ async with id_gen.get_next() as stream_id:
+ self.assertEqual(stream_id, 6)
+ self.assertEqual(id_gen.get_persisted_upto_position(), 5)
+
+ self.get_success(_get_next_async())
+
+ self.assertEqual(id_gen.get_persisted_upto_position(), 6)
+
+ # We assume that so long as `get_next` does correctly advance the
+ # `persisted_upto_position` in this case, then it will be correct in the
+ # other cases that are tested above (since they'll hit the same code).
+
def test_multi_instance(self) -> None:
"""Test that reads and writes from multiple processes are handled
correctly.
@@ -453,145 +615,6 @@ class MultiWriterIdGeneratorTestCase(HomeserverTestCase):
third_id_gen.get_positions(), {"first": 3, "second": 7, "third": 8}
)
- def test_get_next_txn(self) -> None:
- """Test that the `get_next_txn` function works correctly."""
-
- # Prefill table with 7 rows written by 'master'
- self._insert_rows("master", 7)
-
- id_gen = self._create_id_generator()
-
- self.assertEqual(id_gen.get_positions(), {"master": 7})
- self.assertEqual(id_gen.get_current_token_for_writer("master"), 7)
-
- # Try allocating a new ID gen and check that we only see position
- # advanced after we leave the context manager.
-
- def _get_next_txn(txn: LoggingTransaction) -> None:
- stream_id = id_gen.get_next_txn(txn)
- self.assertEqual(stream_id, 8)
-
- self.assertEqual(id_gen.get_positions(), {"master": 7})
- self.assertEqual(id_gen.get_current_token_for_writer("master"), 7)
-
- self.get_success(self.db_pool.runInteraction("test", _get_next_txn))
-
- self.assertEqual(id_gen.get_positions(), {"master": 8})
- self.assertEqual(id_gen.get_current_token_for_writer("master"), 8)
-
- def test_get_persisted_upto_position(self) -> None:
- """Test that `get_persisted_upto_position` correctly tracks updates to
- positions.
- """
-
- # The following tests are a bit cheeky in that we notify about new
- # positions via `advance` without *actually* advancing the postgres
- # sequence.
-
- self._insert_row_with_id("first", 3)
- self._insert_row_with_id("second", 5)
-
- id_gen = self._create_id_generator("worker", writers=["first", "second"])
-
- self.assertEqual(id_gen.get_positions(), {"first": 3, "second": 5})
-
- # Min is 3 and there is a gap between 5, so we expect it to be 3.
- self.assertEqual(id_gen.get_persisted_upto_position(), 3)
-
- # We advance "first" straight to 6. Min is now 5 but there is no gap so
- # we expect it to be 6
- id_gen.advance("first", 6)
- self.assertEqual(id_gen.get_persisted_upto_position(), 6)
-
- # No gap, so we expect 7.
- id_gen.advance("second", 7)
- self.assertEqual(id_gen.get_persisted_upto_position(), 7)
-
- # We haven't seen 8 yet, so we expect 7 still.
- id_gen.advance("second", 9)
- self.assertEqual(id_gen.get_persisted_upto_position(), 7)
-
- # Now that we've seen 7, 8 and 9 we can got straight to 9.
- id_gen.advance("first", 8)
- self.assertEqual(id_gen.get_persisted_upto_position(), 9)
-
- # Jump forward with gaps. The minimum is 11, even though we haven't seen
- # 10 we know that everything before 11 must be persisted.
- id_gen.advance("first", 11)
- id_gen.advance("second", 15)
- self.assertEqual(id_gen.get_persisted_upto_position(), 11)
-
- def test_get_persisted_upto_position_get_next(self) -> None:
- """Test that `get_persisted_upto_position` correctly tracks updates to
- positions when `get_next` is called.
- """
-
- self._insert_row_with_id("first", 3)
- self._insert_row_with_id("second", 5)
-
- id_gen = self._create_id_generator("first", writers=["first", "second"])
-
- self.assertEqual(id_gen.get_positions(), {"first": 3, "second": 5})
-
- self.assertEqual(id_gen.get_persisted_upto_position(), 5)
-
- async def _get_next_async() -> None:
- async with id_gen.get_next() as stream_id:
- self.assertEqual(stream_id, 6)
- self.assertEqual(id_gen.get_persisted_upto_position(), 5)
-
- self.get_success(_get_next_async())
-
- self.assertEqual(id_gen.get_persisted_upto_position(), 6)
-
- # We assume that so long as `get_next` does correctly advance the
- # `persisted_upto_position` in this case, then it will be correct in the
- # other cases that are tested above (since they'll hit the same code).
-
- def test_restart_during_out_of_order_persistence(self) -> None:
- """Test that restarting a process while another process is writing out
- of order updates are handled correctly.
- """
-
- # Prefill table with 7 rows written by 'master'
- self._insert_rows("master", 7)
-
- id_gen = self._create_id_generator()
-
- self.assertEqual(id_gen.get_positions(), {"master": 7})
- self.assertEqual(id_gen.get_current_token_for_writer("master"), 7)
-
- # Persist two rows at once
- ctx1 = id_gen.get_next()
- ctx2 = id_gen.get_next()
-
- s1 = self.get_success(ctx1.__aenter__())
- s2 = self.get_success(ctx2.__aenter__())
-
- self.assertEqual(s1, 8)
- self.assertEqual(s2, 9)
-
- self.assertEqual(id_gen.get_positions(), {"master": 7})
- self.assertEqual(id_gen.get_current_token_for_writer("master"), 7)
-
- # We finish persisting the second row before restart
- self.get_success(ctx2.__aexit__(None, None, None))
-
- # We simulate a restart of another worker by just creating a new ID gen.
- id_gen_worker = self._create_id_generator("worker")
-
- # Restarted worker should not see the second persisted row
- self.assertEqual(id_gen_worker.get_positions(), {"master": 7})
- self.assertEqual(id_gen_worker.get_current_token_for_writer("master"), 7)
-
- # Now if we persist the first row then both instances should jump ahead
- # correctly.
- self.get_success(ctx1.__aexit__(None, None, None))
-
- self.assertEqual(id_gen.get_positions(), {"master": 9})
- id_gen_worker.advance("master", 9)
- self.assertEqual(id_gen_worker.get_positions(), {"master": 9})
-
def test_writer_config_change(self) -> None:
"""Test that changing the writer config correctly works."""
From 8bd9ff0783c26d9ce4d08b396e5620c57eef2e67 Mon Sep 17 00:00:00 2001
From: Erik Johnston
Date: Thu, 30 May 2024 11:22:19 +0100
Subject: [PATCH 131/503] Ensure we delete media if we reject due to spam check
(#17246)
Fixes up #17239
We need to keep the spam check within the `try/except` block. Also makes
it so that we don't enter the top span twice.
Also also ensures that we get the right thumbnail length.
---
changelog.d/17246.misc | 1 +
synapse/media/media_repository.py | 5 +++
synapse/media/media_storage.py | 57 ++++++++++++++-----------------
3 files changed, 32 insertions(+), 31 deletions(-)
create mode 100644 changelog.d/17246.misc
diff --git a/changelog.d/17246.misc b/changelog.d/17246.misc
new file mode 100644
index 0000000000..9fca36bb29
--- /dev/null
+++ b/changelog.d/17246.misc
@@ -0,0 +1 @@
+Fix errors in logs about closing incorrect logging contexts when media gets rejected by a module.
diff --git a/synapse/media/media_repository.py b/synapse/media/media_repository.py
index 9da8495950..9c29e09653 100644
--- a/synapse/media/media_repository.py
+++ b/synapse/media/media_repository.py
@@ -1049,6 +1049,11 @@ class MediaRepository:
finally:
t_byte_source.close()
+ # We flush and close the file to ensure that the bytes have
+ # been written before getting the size.
+ f.flush()
+ f.close()
+
t_len = os.path.getsize(fname)
# Write to database
diff --git a/synapse/media/media_storage.py b/synapse/media/media_storage.py
index 9979c48eac..b3cd3fd8f4 100644
--- a/synapse/media/media_storage.py
+++ b/synapse/media/media_storage.py
@@ -137,42 +137,37 @@ class MediaStorage:
dirname = os.path.dirname(fname)
os.makedirs(dirname, exist_ok=True)
- main_media_repo_write_trace_scope = start_active_span(
- "writing to main media repo"
- )
- main_media_repo_write_trace_scope.__enter__()
-
- with main_media_repo_write_trace_scope:
- try:
+ try:
+ with start_active_span("writing to main media repo"):
with open(fname, "wb") as f:
yield f, fname
- except Exception as e:
- try:
- os.remove(fname)
- except Exception:
- pass
-
- raise e from None
-
- with start_active_span("writing to other storage providers"):
- spam_check = (
- await self._spam_checker_module_callbacks.check_media_file_for_spam(
- ReadableFileWrapper(self.clock, fname), file_info
+ with start_active_span("writing to other storage providers"):
+ spam_check = (
+ await self._spam_checker_module_callbacks.check_media_file_for_spam(
+ ReadableFileWrapper(self.clock, fname), file_info
+ )
)
- )
- if spam_check != self._spam_checker_module_callbacks.NOT_SPAM:
- logger.info("Blocking media due to spam checker")
- # Note that we'll delete the stored media, due to the
- # try/except below. The media also won't be stored in
- # the DB.
- # We currently ignore any additional field returned by
- # the spam-check API.
- raise SpamMediaException(errcode=spam_check[0])
+ if spam_check != self._spam_checker_module_callbacks.NOT_SPAM:
+ logger.info("Blocking media due to spam checker")
+ # Note that we'll delete the stored media, due to the
+ # try/except below. The media also won't be stored in
+ # the DB.
+ # We currently ignore any additional field returned by
+ # the spam-check API.
+ raise SpamMediaException(errcode=spam_check[0])
- for provider in self.storage_providers:
- with start_active_span(str(provider)):
- await provider.store_file(path, file_info)
+ for provider in self.storage_providers:
+ with start_active_span(str(provider)):
+ await provider.store_file(path, file_info)
+
+ except Exception as e:
+ try:
+ os.remove(fname)
+ except Exception:
+ pass
+
+ raise e from None
async def fetch_media(self, file_info: FileInfo) -> Optional[Responder]:
"""Attempts to fetch media described by file_info from the local cache
From 225f378ffa4893fdba8eeb4a22bff7daade180bd Mon Sep 17 00:00:00 2001
From: Erik Johnston
Date: Thu, 30 May 2024 11:25:24 +0100
Subject: [PATCH 132/503] Clean out invalid destinations from outbox (#17242)
We started ensuring we only insert valid destinations:
https://github.com/element-hq/synapse/pull/17240
---
changelog.d/17242.misc | 1 +
synapse/storage/databases/main/deviceinbox.py | 76 +++++++++++++++++++
.../04_cleanup_device_federation_outbox.sql | 15 ++++
3 files changed, 92 insertions(+)
create mode 100644 changelog.d/17242.misc
create mode 100644 synapse/storage/schema/main/delta/85/04_cleanup_device_federation_outbox.sql
diff --git a/changelog.d/17242.misc b/changelog.d/17242.misc
new file mode 100644
index 0000000000..5bd627da57
--- /dev/null
+++ b/changelog.d/17242.misc
@@ -0,0 +1 @@
+Clean out invalid destinations from `device_federation_outbox` table.
diff --git a/synapse/storage/databases/main/deviceinbox.py b/synapse/storage/databases/main/deviceinbox.py
index 25023b5e7a..07333efff8 100644
--- a/synapse/storage/databases/main/deviceinbox.py
+++ b/synapse/storage/databases/main/deviceinbox.py
@@ -58,6 +58,7 @@ from synapse.types import JsonDict
from synapse.util import json_encoder
from synapse.util.caches.expiringcache import ExpiringCache
from synapse.util.caches.stream_change_cache import StreamChangeCache
+from synapse.util.stringutils import parse_and_validate_server_name
if TYPE_CHECKING:
from synapse.server import HomeServer
@@ -964,6 +965,7 @@ class DeviceInboxWorkerStore(SQLBaseStore):
class DeviceInboxBackgroundUpdateStore(SQLBaseStore):
DEVICE_INBOX_STREAM_ID = "device_inbox_stream_drop"
REMOVE_DEAD_DEVICES_FROM_INBOX = "remove_dead_devices_from_device_inbox"
+ CLEANUP_DEVICE_FEDERATION_OUTBOX = "cleanup_device_federation_outbox"
def __init__(
self,
@@ -989,6 +991,11 @@ class DeviceInboxBackgroundUpdateStore(SQLBaseStore):
self._remove_dead_devices_from_device_inbox,
)
+ self.db_pool.updates.register_background_update_handler(
+ self.CLEANUP_DEVICE_FEDERATION_OUTBOX,
+ self._cleanup_device_federation_outbox,
+ )
+
async def _background_drop_index_device_inbox(
self, progress: JsonDict, batch_size: int
) -> int:
@@ -1080,6 +1087,75 @@ class DeviceInboxBackgroundUpdateStore(SQLBaseStore):
return batch_size
+ async def _cleanup_device_federation_outbox(
+ self,
+ progress: JsonDict,
+ batch_size: int,
+ ) -> int:
+ def _cleanup_device_federation_outbox_txn(
+ txn: LoggingTransaction,
+ ) -> bool:
+ if "max_stream_id" in progress:
+ max_stream_id = progress["max_stream_id"]
+ else:
+ txn.execute("SELECT max(stream_id) FROM device_federation_outbox")
+ res = cast(Tuple[Optional[int]], txn.fetchone())
+ if res[0] is None:
+ # this can only happen if the `device_inbox` table is empty, in which
+ # case we have no work to do.
+ return True
+ else:
+ max_stream_id = res[0]
+
+ start = progress.get("stream_id", 0)
+ stop = start + batch_size
+
+ sql = """
+ SELECT destination FROM device_federation_outbox
+ WHERE ? < stream_id AND stream_id <= ?
+ """
+
+ txn.execute(sql, (start, stop))
+
+ destinations = {d for d, in txn}
+ to_remove = set()
+ for d in destinations:
+ try:
+ parse_and_validate_server_name(d)
+ except ValueError:
+ to_remove.add(d)
+
+ self.db_pool.simple_delete_many_txn(
+ txn,
+ table="device_federation_outbox",
+ column="destination",
+ values=to_remove,
+ keyvalues={},
+ )
+
+ self.db_pool.updates._background_update_progress_txn(
+ txn,
+ self.CLEANUP_DEVICE_FEDERATION_OUTBOX,
+ {
+ "stream_id": stop,
+ "max_stream_id": max_stream_id,
+ },
+ )
+
+ return stop >= max_stream_id
+
+ finished = await self.db_pool.runInteraction(
+ "_cleanup_device_federation_outbox",
+ _cleanup_device_federation_outbox_txn,
+ )
+
+ if finished:
+ await self.db_pool.updates._end_background_update(
+ self.CLEANUP_DEVICE_FEDERATION_OUTBOX,
+ )
+
+ return batch_size
+
class DeviceInboxStore(DeviceInboxWorkerStore, DeviceInboxBackgroundUpdateStore):
pass
diff --git a/synapse/storage/schema/main/delta/85/04_cleanup_device_federation_outbox.sql b/synapse/storage/schema/main/delta/85/04_cleanup_device_federation_outbox.sql
new file mode 100644
index 0000000000..041b17b0ee
--- /dev/null
+++ b/synapse/storage/schema/main/delta/85/04_cleanup_device_federation_outbox.sql
@@ -0,0 +1,15 @@
+--
+-- This file is licensed under the Affero General Public License (AGPL) version 3.
+--
+-- Copyright (C) 2024 New Vector, Ltd
+--
+-- This program is free software: you can redistribute it and/or modify
+-- it under the terms of the GNU Affero General Public License as
+-- published by the Free Software Foundation, either version 3 of the
+-- License, or (at your option) any later version.
+--
+-- See the GNU Affero General Public License for more details:
+-- .
+
+INSERT INTO background_updates (ordering, update_name, progress_json) VALUES
+ (8504, 'cleanup_device_federation_outbox', '{}');
From d16910ca021320f0fa09c6cf82a802ee97e22a0c Mon Sep 17 00:00:00 2001
From: Erik Johnston
Date: Thu, 30 May 2024 12:07:32 +0100
Subject: [PATCH 133/503] Replaces all usages of `StreamIdGenerator` with
`MultiWriterIdGenerator` (#17229)
Replaces all usages of `StreamIdGenerator` with `MultiWriterIdGenerator`, which is safer.
---
changelog.d/17229.misc | 1 +
synapse/_scripts/synapse_port_db.py | 71 ++++++--
synapse/storage/databases/main/devices.py | 54 +++---
.../storage/databases/main/end_to_end_keys.py | 19 ++-
synapse/storage/databases/main/push_rule.py | 24 +--
synapse/storage/databases/main/pusher.py | 42 +++--
.../main/delta/85/02_add_instance_names.sql | 27 +++
.../delta/85/03_new_sequences.sql.postgres | 54 ++++++
synapse/storage/util/id_generators.py | 158 ------------------
tests/storage/test_id_generators.py | 140 +---------------
10 files changed, 227 insertions(+), 363 deletions(-)
create mode 100644 changelog.d/17229.misc
create mode 100644 synapse/storage/schema/main/delta/85/02_add_instance_names.sql
create mode 100644 synapse/storage/schema/main/delta/85/03_new_sequences.sql.postgres
diff --git a/changelog.d/17229.misc b/changelog.d/17229.misc
new file mode 100644
index 0000000000..d411550786
--- /dev/null
+++ b/changelog.d/17229.misc
@@ -0,0 +1 @@
+Replaces all usages of `StreamIdGenerator` with `MultiWriterIdGenerator`.
diff --git a/synapse/_scripts/synapse_port_db.py b/synapse/_scripts/synapse_port_db.py
index 1e56f46911..3bb4a34938 100755
--- a/synapse/_scripts/synapse_port_db.py
+++ b/synapse/_scripts/synapse_port_db.py
@@ -777,22 +777,74 @@ class Porter:
await self._setup_events_stream_seqs()
await self._setup_sequence(
"un_partial_stated_event_stream_sequence",
- ("un_partial_stated_event_stream",),
+ [("un_partial_stated_event_stream", "stream_id")],
)
await self._setup_sequence(
- "device_inbox_sequence", ("device_inbox", "device_federation_outbox")
+ "device_inbox_sequence",
+ [
+ ("device_inbox", "stream_id"),
+ ("device_federation_outbox", "stream_id"),
+ ],
)
await self._setup_sequence(
"account_data_sequence",
- ("room_account_data", "room_tags_revisions", "account_data"),
+ [
+ ("room_account_data", "stream_id"),
+ ("room_tags_revisions", "stream_id"),
+ ("account_data", "stream_id"),
+ ],
+ )
+ await self._setup_sequence(
+ "receipts_sequence",
+ [
+ ("receipts_linearized", "stream_id"),
+ ],
+ )
+ await self._setup_sequence(
+ "presence_stream_sequence",
+ [
+ ("presence_stream", "stream_id"),
+ ],
)
- await self._setup_sequence("receipts_sequence", ("receipts_linearized",))
- await self._setup_sequence("presence_stream_sequence", ("presence_stream",))
await self._setup_auth_chain_sequence()
await self._setup_sequence(
"application_services_txn_id_seq",
- ("application_services_txns",),
- "txn_id",
+ [
+ (
+ "application_services_txns",
+ "txn_id",
+ )
+ ],
+ )
+ await self._setup_sequence(
+ "device_lists_sequence",
+ [
+ ("device_lists_stream", "stream_id"),
+ ("user_signature_stream", "stream_id"),
+ ("device_lists_outbound_pokes", "stream_id"),
+ ("device_lists_changes_in_room", "stream_id"),
+ ("device_lists_remote_pending", "stream_id"),
+ ("device_lists_changes_converted_stream_position", "stream_id"),
+ ],
+ )
+ await self._setup_sequence(
+ "e2e_cross_signing_keys_sequence",
+ [
+ ("e2e_cross_signing_keys", "stream_id"),
+ ],
+ )
+ await self._setup_sequence(
+ "push_rules_stream_sequence",
+ [
+ ("push_rules_stream", "stream_id"),
+ ],
+ )
+ await self._setup_sequence(
+ "pushers_sequence",
+ [
+ ("pushers", "id"),
+ ("deleted_pushers", "stream_id"),
+ ],
)
# Step 3. Get tables.
@@ -1101,12 +1153,11 @@ class Porter:
async def _setup_sequence(
self,
sequence_name: str,
- stream_id_tables: Iterable[str],
- column_name: str = "stream_id",
+ stream_id_tables: Iterable[Tuple[str, str]],
) -> None:
"""Set a sequence to the correct value."""
current_stream_ids = []
- for stream_id_table in stream_id_tables:
+ for stream_id_table, column_name in stream_id_tables:
max_stream_id = cast(
int,
await self.sqlite_store.db_pool.simple_select_one_onecol(
diff --git a/synapse/storage/databases/main/devices.py b/synapse/storage/databases/main/devices.py
index 48384e238c..1c771e48f7 100644
--- a/synapse/storage/databases/main/devices.py
+++ b/synapse/storage/databases/main/devices.py
@@ -57,10 +57,7 @@ from synapse.storage.database import (
from synapse.storage.databases.main.end_to_end_keys import EndToEndKeyWorkerStore
from synapse.storage.databases.main.roommember import RoomMemberWorkerStore
from synapse.storage.types import Cursor
-from synapse.storage.util.id_generators import (
- AbstractStreamIdGenerator,
- StreamIdGenerator,
-)
+from synapse.storage.util.id_generators import MultiWriterIdGenerator
from synapse.types import (
JsonDict,
JsonMapping,
@@ -99,19 +96,21 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore):
# In the worker store this is an ID tracker which we overwrite in the non-worker
# class below that is used on the main process.
- self._device_list_id_gen = StreamIdGenerator(
- db_conn,
- hs.get_replication_notifier(),
- "device_lists_stream",
- "stream_id",
- extra_tables=[
- ("user_signature_stream", "stream_id"),
- ("device_lists_outbound_pokes", "stream_id"),
- ("device_lists_changes_in_room", "stream_id"),
- ("device_lists_remote_pending", "stream_id"),
- ("device_lists_changes_converted_stream_position", "stream_id"),
+ self._device_list_id_gen = MultiWriterIdGenerator(
+ db_conn=db_conn,
+ db=database,
+ notifier=hs.get_replication_notifier(),
+ stream_name="device_lists_stream",
+ instance_name=self._instance_name,
+ tables=[
+ ("device_lists_stream", "instance_name", "stream_id"),
+ ("user_signature_stream", "instance_name", "stream_id"),
+ ("device_lists_outbound_pokes", "instance_name", "stream_id"),
+ ("device_lists_changes_in_room", "instance_name", "stream_id"),
+ ("device_lists_remote_pending", "instance_name", "stream_id"),
],
- is_writer=hs.config.worker.worker_app is None,
+ sequence_name="device_lists_sequence",
+ writers=["master"],
)
device_list_max = self._device_list_id_gen.get_current_token()
@@ -762,6 +761,7 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore):
"stream_id": stream_id,
"from_user_id": from_user_id,
"user_ids": json_encoder.encode(user_ids),
+ "instance_name": self._instance_name,
},
)
@@ -1582,6 +1582,8 @@ class DeviceBackgroundUpdateStore(SQLBaseStore):
):
super().__init__(database, db_conn, hs)
+ self._instance_name = hs.get_instance_name()
+
self.db_pool.updates.register_background_index_update(
"device_lists_stream_idx",
index_name="device_lists_stream_user_id",
@@ -1694,6 +1696,7 @@ class DeviceBackgroundUpdateStore(SQLBaseStore):
"device_lists_outbound_pokes",
{
"stream_id": stream_id,
+ "instance_name": self._instance_name,
"destination": destination,
"user_id": user_id,
"device_id": device_id,
@@ -1730,10 +1733,6 @@ class DeviceBackgroundUpdateStore(SQLBaseStore):
class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore):
- # Because we have write access, this will be a StreamIdGenerator
- # (see DeviceWorkerStore.__init__)
- _device_list_id_gen: AbstractStreamIdGenerator
-
def __init__(
self,
database: DatabasePool,
@@ -2092,9 +2091,9 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore):
self.db_pool.simple_insert_many_txn(
txn,
table="device_lists_stream",
- keys=("stream_id", "user_id", "device_id"),
+ keys=("instance_name", "stream_id", "user_id", "device_id"),
values=[
- (stream_id, user_id, device_id)
+ (self._instance_name, stream_id, user_id, device_id)
for stream_id, device_id in zip(stream_ids, device_ids)
],
)
@@ -2124,6 +2123,7 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore):
values = [
(
destination,
+ self._instance_name,
next(stream_id_iterator),
user_id,
device_id,
@@ -2139,6 +2139,7 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore):
table="device_lists_outbound_pokes",
keys=(
"destination",
+ "instance_name",
"stream_id",
"user_id",
"device_id",
@@ -2157,7 +2158,7 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore):
device_id,
{
stream_id: destination
- for (destination, stream_id, _, _, _, _, _) in values
+ for (destination, _, stream_id, _, _, _, _, _) in values
},
)
@@ -2210,6 +2211,7 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore):
"device_id",
"room_id",
"stream_id",
+ "instance_name",
"converted_to_destinations",
"opentracing_context",
),
@@ -2219,6 +2221,7 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore):
device_id,
room_id,
stream_id,
+ self._instance_name,
# We only need to calculate outbound pokes for local users
not self.hs.is_mine_id(user_id),
encoded_context,
@@ -2338,7 +2341,10 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore):
"user_id": user_id,
"device_id": device_id,
},
- values={"stream_id": stream_id},
+ values={
+ "stream_id": stream_id,
+ "instance_name": self._instance_name,
+ },
desc="add_remote_device_list_to_pending",
)
diff --git a/synapse/storage/databases/main/end_to_end_keys.py b/synapse/storage/databases/main/end_to_end_keys.py
index b219ea70ee..38d8785faa 100644
--- a/synapse/storage/databases/main/end_to_end_keys.py
+++ b/synapse/storage/databases/main/end_to_end_keys.py
@@ -58,7 +58,7 @@ from synapse.storage.database import (
)
from synapse.storage.databases.main.cache import CacheInvalidationWorkerStore
from synapse.storage.engines import PostgresEngine
-from synapse.storage.util.id_generators import StreamIdGenerator
+from synapse.storage.util.id_generators import MultiWriterIdGenerator
from synapse.types import JsonDict, JsonMapping
from synapse.util import json_decoder, json_encoder
from synapse.util.caches.descriptors import cached, cachedList
@@ -1448,11 +1448,17 @@ class EndToEndKeyStore(EndToEndKeyWorkerStore, SQLBaseStore):
):
super().__init__(database, db_conn, hs)
- self._cross_signing_id_gen = StreamIdGenerator(
- db_conn,
- hs.get_replication_notifier(),
- "e2e_cross_signing_keys",
- "stream_id",
+ self._cross_signing_id_gen = MultiWriterIdGenerator(
+ db_conn=db_conn,
+ db=database,
+ notifier=hs.get_replication_notifier(),
+ stream_name="e2e_cross_signing_keys",
+ instance_name=self._instance_name,
+ tables=[
+ ("e2e_cross_signing_keys", "instance_name", "stream_id"),
+ ],
+ sequence_name="e2e_cross_signing_keys_sequence",
+ writers=["master"],
)
async def set_e2e_device_keys(
@@ -1627,6 +1633,7 @@ class EndToEndKeyStore(EndToEndKeyWorkerStore, SQLBaseStore):
"keytype": key_type,
"keydata": json_encoder.encode(key),
"stream_id": stream_id,
+ "instance_name": self._instance_name,
},
)
diff --git a/synapse/storage/databases/main/push_rule.py b/synapse/storage/databases/main/push_rule.py
index 660c834518..2a39dc9f90 100644
--- a/synapse/storage/databases/main/push_rule.py
+++ b/synapse/storage/databases/main/push_rule.py
@@ -53,7 +53,7 @@ from synapse.storage.databases.main.receipts import ReceiptsWorkerStore
from synapse.storage.databases.main.roommember import RoomMemberWorkerStore
from synapse.storage.engines import PostgresEngine, Sqlite3Engine
from synapse.storage.push_rule import InconsistentRuleException, RuleNotFoundException
-from synapse.storage.util.id_generators import IdGenerator, StreamIdGenerator
+from synapse.storage.util.id_generators import IdGenerator, MultiWriterIdGenerator
from synapse.synapse_rust.push import FilteredPushRules, PushRule, PushRules
from synapse.types import JsonDict
from synapse.util import json_encoder, unwrapFirstError
@@ -126,7 +126,7 @@ class PushRulesWorkerStore(
`get_max_push_rules_stream_id` which can be called in the initializer.
"""
- _push_rules_stream_id_gen: StreamIdGenerator
+ _push_rules_stream_id_gen: MultiWriterIdGenerator
def __init__(
self,
@@ -140,14 +140,17 @@ class PushRulesWorkerStore(
hs.get_instance_name() in hs.config.worker.writers.push_rules
)
- # In the worker store this is an ID tracker which we overwrite in the non-worker
- # class below that is used on the main process.
- self._push_rules_stream_id_gen = StreamIdGenerator(
- db_conn,
- hs.get_replication_notifier(),
- "push_rules_stream",
- "stream_id",
- is_writer=self._is_push_writer,
+ self._push_rules_stream_id_gen = MultiWriterIdGenerator(
+ db_conn=db_conn,
+ db=database,
+ notifier=hs.get_replication_notifier(),
+ stream_name="push_rules_stream",
+ instance_name=self._instance_name,
+ tables=[
+ ("push_rules_stream", "instance_name", "stream_id"),
+ ],
+ sequence_name="push_rules_stream_sequence",
+ writers=hs.config.worker.writers.push_rules,
)
push_rules_prefill, push_rules_id = self.db_pool.get_cache_dict(
@@ -880,6 +883,7 @@ class PushRulesWorkerStore(
raise Exception("Not a push writer")
values = {
+ "instance_name": self._instance_name,
"stream_id": stream_id,
"event_stream_ordering": event_stream_ordering,
"user_id": user_id,
diff --git a/synapse/storage/databases/main/pusher.py b/synapse/storage/databases/main/pusher.py
index 39e22d3b43..a8a37b6c85 100644
--- a/synapse/storage/databases/main/pusher.py
+++ b/synapse/storage/databases/main/pusher.py
@@ -40,10 +40,7 @@ from synapse.storage.database import (
LoggingDatabaseConnection,
LoggingTransaction,
)
-from synapse.storage.util.id_generators import (
- AbstractStreamIdGenerator,
- StreamIdGenerator,
-)
+from synapse.storage.util.id_generators import MultiWriterIdGenerator
from synapse.types import JsonDict
from synapse.util import json_encoder
from synapse.util.caches.descriptors import cached
@@ -84,15 +81,20 @@ class PusherWorkerStore(SQLBaseStore):
):
super().__init__(database, db_conn, hs)
- # In the worker store this is an ID tracker which we overwrite in the non-worker
- # class below that is used on the main process.
- self._pushers_id_gen = StreamIdGenerator(
- db_conn,
- hs.get_replication_notifier(),
- "pushers",
- "id",
- extra_tables=[("deleted_pushers", "stream_id")],
- is_writer=hs.config.worker.worker_app is None,
+ self._instance_name = hs.get_instance_name()
+
+ self._pushers_id_gen = MultiWriterIdGenerator(
+ db_conn=db_conn,
+ db=database,
+ notifier=hs.get_replication_notifier(),
+ stream_name="pushers",
+ instance_name=self._instance_name,
+ tables=[
+ ("pushers", "instance_name", "id"),
+ ("deleted_pushers", "instance_name", "stream_id"),
+ ],
+ sequence_name="pushers_sequence",
+ writers=["master"],
)
self.db_pool.updates.register_background_update_handler(
@@ -655,7 +657,7 @@ class PusherBackgroundUpdatesStore(SQLBaseStore):
class PusherStore(PusherWorkerStore, PusherBackgroundUpdatesStore):
# Because we have write access, this will be a StreamIdGenerator
# (see PusherWorkerStore.__init__)
- _pushers_id_gen: AbstractStreamIdGenerator
+ _pushers_id_gen: MultiWriterIdGenerator
async def add_pusher(
self,
@@ -688,6 +690,7 @@ class PusherStore(PusherWorkerStore, PusherBackgroundUpdatesStore):
"last_stream_ordering": last_stream_ordering,
"profile_tag": profile_tag,
"id": stream_id,
+ "instance_name": self._instance_name,
"enabled": enabled,
"device_id": device_id,
# XXX(quenting): We're only really persisting the access token ID
@@ -735,6 +738,7 @@ class PusherStore(PusherWorkerStore, PusherBackgroundUpdatesStore):
table="deleted_pushers",
values={
"stream_id": stream_id,
+ "instance_name": self._instance_name,
"app_id": app_id,
"pushkey": pushkey,
"user_id": user_id,
@@ -773,9 +777,15 @@ class PusherStore(PusherWorkerStore, PusherBackgroundUpdatesStore):
self.db_pool.simple_insert_many_txn(
txn,
table="deleted_pushers",
- keys=("stream_id", "app_id", "pushkey", "user_id"),
+ keys=("stream_id", "instance_name", "app_id", "pushkey", "user_id"),
values=[
- (stream_id, pusher.app_id, pusher.pushkey, user_id)
+ (
+ stream_id,
+ self._instance_name,
+ pusher.app_id,
+ pusher.pushkey,
+ user_id,
+ )
for stream_id, pusher in zip(stream_ids, pushers)
],
)
diff --git a/synapse/storage/schema/main/delta/85/02_add_instance_names.sql b/synapse/storage/schema/main/delta/85/02_add_instance_names.sql
new file mode 100644
index 0000000000..d604595f73
--- /dev/null
+++ b/synapse/storage/schema/main/delta/85/02_add_instance_names.sql
@@ -0,0 +1,27 @@
+--
+-- This file is licensed under the Affero General Public License (AGPL) version 3.
+--
+-- Copyright (C) 2024 New Vector, Ltd
+--
+-- This program is free software: you can redistribute it and/or modify
+-- it under the terms of the GNU Affero General Public License as
+-- published by the Free Software Foundation, either version 3 of the
+-- License, or (at your option) any later version.
+--
+-- See the GNU Affero General Public License for more details:
+-- .
+
+-- Add `instance_name` columns to stream tables to allow them to be used with
+-- `MultiWriterIdGenerator`
+ALTER TABLE device_lists_stream ADD COLUMN instance_name TEXT;
+ALTER TABLE user_signature_stream ADD COLUMN instance_name TEXT;
+ALTER TABLE device_lists_outbound_pokes ADD COLUMN instance_name TEXT;
+ALTER TABLE device_lists_changes_in_room ADD COLUMN instance_name TEXT;
+ALTER TABLE device_lists_remote_pending ADD COLUMN instance_name TEXT;
+
+ALTER TABLE e2e_cross_signing_keys ADD COLUMN instance_name TEXT;
+
+ALTER TABLE push_rules_stream ADD COLUMN instance_name TEXT;
+
+ALTER TABLE pushers ADD COLUMN instance_name TEXT;
+ALTER TABLE deleted_pushers ADD COLUMN instance_name TEXT;
diff --git a/synapse/storage/schema/main/delta/85/03_new_sequences.sql.postgres b/synapse/storage/schema/main/delta/85/03_new_sequences.sql.postgres
new file mode 100644
index 0000000000..9d34066bf5
--- /dev/null
+++ b/synapse/storage/schema/main/delta/85/03_new_sequences.sql.postgres
@@ -0,0 +1,54 @@
+--
+-- This file is licensed under the Affero General Public License (AGPL) version 3.
+--
+-- Copyright (C) 2024 New Vector, Ltd
+--
+-- This program is free software: you can redistribute it and/or modify
+-- it under the terms of the GNU Affero General Public License as
+-- published by the Free Software Foundation, either version 3 of the
+-- License, or (at your option) any later version.
+--
+-- See the GNU Affero General Public License for more details:
+-- .
+
+-- Add squences for stream tables to allow them to be used with
+-- `MultiWriterIdGenerator`
+CREATE SEQUENCE IF NOT EXISTS device_lists_sequence;
+
+-- We need to take the max across all the device lists tables as they share the
+-- ID generator
+SELECT setval('device_lists_sequence', (
+ SELECT GREATEST(
+ (SELECT COALESCE(MAX(stream_id), 1) FROM device_lists_stream),
+ (SELECT COALESCE(MAX(stream_id), 1) FROM user_signature_stream),
+ (SELECT COALESCE(MAX(stream_id), 1) FROM device_lists_outbound_pokes),
+ (SELECT COALESCE(MAX(stream_id), 1) FROM device_lists_changes_in_room),
+ (SELECT COALESCE(MAX(stream_id), 1) FROM device_lists_remote_pending),
+ (SELECT COALESCE(MAX(stream_id), 1) FROM device_lists_changes_converted_stream_position)
+ )
+));
+
+CREATE SEQUENCE IF NOT EXISTS e2e_cross_signing_keys_sequence;
+
+SELECT setval('e2e_cross_signing_keys_sequence', (
+ SELECT COALESCE(MAX(stream_id), 1) FROM e2e_cross_signing_keys
+));
+
+
+CREATE SEQUENCE IF NOT EXISTS push_rules_stream_sequence;
+
+SELECT setval('push_rules_stream_sequence', (
+ SELECT COALESCE(MAX(stream_id), 1) FROM push_rules_stream
+));
+
+
+CREATE SEQUENCE IF NOT EXISTS pushers_sequence;
+
+-- We need to take the max across all the pusher tables as they share the
+-- ID generator
+SELECT setval('pushers_sequence', (
+ SELECT GREATEST(
+ (SELECT COALESCE(MAX(id), 1) FROM pushers),
+ (SELECT COALESCE(MAX(stream_id), 1) FROM deleted_pushers)
+ )
+));
diff --git a/synapse/storage/util/id_generators.py b/synapse/storage/util/id_generators.py
index 0cf5851ad7..59c8e05c39 100644
--- a/synapse/storage/util/id_generators.py
+++ b/synapse/storage/util/id_generators.py
@@ -23,15 +23,12 @@ import abc
import heapq
import logging
import threading
-from collections import OrderedDict
-from contextlib import contextmanager
from types import TracebackType
from typing import (
TYPE_CHECKING,
AsyncContextManager,
ContextManager,
Dict,
- Generator,
Generic,
Iterable,
List,
@@ -179,161 +176,6 @@ class AbstractStreamIdGenerator(metaclass=abc.ABCMeta):
raise NotImplementedError()
-class StreamIdGenerator(AbstractStreamIdGenerator):
- """Generates and tracks stream IDs for a stream with a single writer.
-
- This class must only be used when the current Synapse process is the sole
- writer for a stream.
-
- Args:
- db_conn(connection): A database connection to use to fetch the
- initial value of the generator from.
- table(str): A database table to read the initial value of the id
- generator from.
- column(str): The column of the database table to read the initial
- value from the id generator from.
- extra_tables(list): List of pairs of database tables and columns to
- use to source the initial value of the generator from. The value
- with the largest magnitude is used.
- step(int): which direction the stream ids grow in. +1 to grow
- upwards, -1 to grow downwards.
-
- Usage:
- async with stream_id_gen.get_next() as stream_id:
- # ... persist event ...
- """
-
- def __init__(
- self,
- db_conn: LoggingDatabaseConnection,
- notifier: "ReplicationNotifier",
- table: str,
- column: str,
- extra_tables: Iterable[Tuple[str, str]] = (),
- step: int = 1,
- is_writer: bool = True,
- ) -> None:
- assert step != 0
- self._lock = threading.Lock()
- self._step: int = step
- self._current: int = _load_current_id(db_conn, table, column, step)
- self._is_writer = is_writer
- for table, column in extra_tables:
- self._current = (max if step > 0 else min)(
- self._current, _load_current_id(db_conn, table, column, step)
- )
-
- # We use this as an ordered set, as we want to efficiently append items,
- # remove items and get the first item. Since we insert IDs in order, the
- # insertion ordering will ensure its in the correct ordering.
- #
- # The key and values are the same, but we never look at the values.
- self._unfinished_ids: OrderedDict[int, int] = OrderedDict()
-
- self._notifier = notifier
-
- def advance(self, instance_name: str, new_id: int) -> None:
- # Advance should never be called on a writer instance, only over replication
- if self._is_writer:
- raise Exception("Replication is not supported by writer StreamIdGenerator")
-
- self._current = (max if self._step > 0 else min)(self._current, new_id)
-
- def get_next(self) -> AsyncContextManager[int]:
- with self._lock:
- self._current += self._step
- next_id = self._current
-
- self._unfinished_ids[next_id] = next_id
-
- @contextmanager
- def manager() -> Generator[int, None, None]:
- try:
- yield next_id
- finally:
- with self._lock:
- self._unfinished_ids.pop(next_id)
-
- self._notifier.notify_replication()
-
- return _AsyncCtxManagerWrapper(manager())
-
- def get_next_mult(self, n: int) -> AsyncContextManager[Sequence[int]]:
- with self._lock:
- next_ids = range(
- self._current + self._step,
- self._current + self._step * (n + 1),
- self._step,
- )
- self._current += n * self._step
-
- for next_id in next_ids:
- self._unfinished_ids[next_id] = next_id
-
- @contextmanager
- def manager() -> Generator[Sequence[int], None, None]:
- try:
- yield next_ids
- finally:
- with self._lock:
- for next_id in next_ids:
- self._unfinished_ids.pop(next_id)
-
- self._notifier.notify_replication()
-
- return _AsyncCtxManagerWrapper(manager())
-
- def get_next_txn(self, txn: LoggingTransaction) -> int:
- """
- Retrieve the next stream ID from within a database transaction.
-
- Clean-up functions will be called when the transaction finishes.
-
- Args:
- txn: The database transaction object.
-
- Returns:
- The next stream ID.
- """
- if not self._is_writer:
- raise Exception("Tried to allocate stream ID on non-writer")
-
- # Get the next stream ID.
- with self._lock:
- self._current += self._step
- next_id = self._current
-
- self._unfinished_ids[next_id] = next_id
-
- def clear_unfinished_id(id_to_clear: int) -> None:
- """A function to mark processing this ID as finished"""
- with self._lock:
- self._unfinished_ids.pop(id_to_clear)
-
- # Mark this ID as finished once the database transaction itself finishes.
- txn.call_after(clear_unfinished_id, next_id)
- txn.call_on_exception(clear_unfinished_id, next_id)
-
- # Return the new ID.
- return next_id
-
- def get_current_token(self) -> int:
- if not self._is_writer:
- return self._current
-
- with self._lock:
- if self._unfinished_ids:
- return next(iter(self._unfinished_ids)) - self._step
-
- return self._current
-
- def get_current_token_for_writer(self, instance_name: str) -> int:
- return self.get_current_token()
-
- def get_minimal_local_current_token(self) -> int:
- return self.get_current_token()
-
-
class MultiWriterIdGenerator(AbstractStreamIdGenerator):
"""Generates and tracks stream IDs for a stream with multiple writers.
diff --git a/tests/storage/test_id_generators.py b/tests/storage/test_id_generators.py
index fad9511cea..f0307252f3 100644
--- a/tests/storage/test_id_generators.py
+++ b/tests/storage/test_id_generators.py
@@ -30,7 +30,7 @@ from synapse.storage.database import (
)
from synapse.storage.engines import IncorrectDatabaseSetup
from synapse.storage.types import Cursor
-from synapse.storage.util.id_generators import MultiWriterIdGenerator, StreamIdGenerator
+from synapse.storage.util.id_generators import MultiWriterIdGenerator
from synapse.storage.util.sequence import (
LocalSequenceGenerator,
PostgresSequenceGenerator,
@@ -42,144 +42,6 @@ from tests.unittest import HomeserverTestCase
from tests.utils import USE_POSTGRES_FOR_TESTS
-class StreamIdGeneratorTestCase(HomeserverTestCase):
- def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
- self.store = hs.get_datastores().main
- self.db_pool: DatabasePool = self.store.db_pool
-
- self.get_success(self.db_pool.runInteraction("_setup_db", self._setup_db))
-
- def _setup_db(self, txn: LoggingTransaction) -> None:
- txn.execute(
- """
- CREATE TABLE foobar (
- stream_id BIGINT NOT NULL,
- data TEXT
- );
- """
- )
- txn.execute("INSERT INTO foobar VALUES (123, 'hello world');")
-
- def _create_id_generator(self) -> StreamIdGenerator:
- def _create(conn: LoggingDatabaseConnection) -> StreamIdGenerator:
- return StreamIdGenerator(
- db_conn=conn,
- notifier=self.hs.get_replication_notifier(),
- table="foobar",
- column="stream_id",
- )
-
- return self.get_success_or_raise(self.db_pool.runWithConnection(_create))
-
- def test_initial_value(self) -> None:
- """Check that we read the current token from the DB."""
- id_gen = self._create_id_generator()
- self.assertEqual(id_gen.get_current_token(), 123)
-
- def test_single_gen_next(self) -> None:
- """Check that we correctly increment the current token from the DB."""
- id_gen = self._create_id_generator()
-
- async def test_gen_next() -> None:
- async with id_gen.get_next() as next_id:
- # We haven't persisted `next_id` yet; current token is still 123
- self.assertEqual(id_gen.get_current_token(), 123)
- # But we did learn what the next value is
- self.assertEqual(next_id, 124)
-
- # Once the context manager closes we assume that the `next_id` has been
- # written to the DB.
- self.assertEqual(id_gen.get_current_token(), 124)
-
- self.get_success(test_gen_next())
-
- def test_multiple_gen_nexts(self) -> None:
- """Check that we handle overlapping calls to gen_next sensibly."""
- id_gen = self._create_id_generator()
-
- async def test_gen_next() -> None:
- ctx1 = id_gen.get_next()
- ctx2 = id_gen.get_next()
- ctx3 = id_gen.get_next()
-
- # Request three new stream IDs.
- self.assertEqual(await ctx1.__aenter__(), 124)
- self.assertEqual(await ctx2.__aenter__(), 125)
- self.assertEqual(await ctx3.__aenter__(), 126)
-
- # None are persisted: current token unchanged.
- self.assertEqual(id_gen.get_current_token(), 123)
-
- # Persist each in turn.
- await ctx1.__aexit__(None, None, None)
- self.assertEqual(id_gen.get_current_token(), 124)
- await ctx2.__aexit__(None, None, None)
- self.assertEqual(id_gen.get_current_token(), 125)
- await ctx3.__aexit__(None, None, None)
- self.assertEqual(id_gen.get_current_token(), 126)
-
- self.get_success(test_gen_next())
-
- def test_multiple_gen_nexts_closed_in_different_order(self) -> None:
- """Check that we handle overlapping calls to gen_next, even when their IDs
- created and persisted in different orders."""
- id_gen = self._create_id_generator()
-
- async def test_gen_next() -> None:
- ctx1 = id_gen.get_next()
- ctx2 = id_gen.get_next()
- ctx3 = id_gen.get_next()
-
- # Request three new stream IDs.
- self.assertEqual(await ctx1.__aenter__(), 124)
- self.assertEqual(await ctx2.__aenter__(), 125)
- self.assertEqual(await ctx3.__aenter__(), 126)
-
- # None are persisted: current token unchanged.
- self.assertEqual(id_gen.get_current_token(), 123)
-
- # Persist them in a different order, starting with 126 from ctx3.
- await ctx3.__aexit__(None, None, None)
- # We haven't persisted 124 from ctx1 yet---current token is still 123.
- self.assertEqual(id_gen.get_current_token(), 123)
-
- # Now persist 124 from ctx1.
- await ctx1.__aexit__(None, None, None)
- # Current token is then 124, waiting for 125 to be persisted.
- self.assertEqual(id_gen.get_current_token(), 124)
-
- # Finally persist 125 from ctx2.
- await ctx2.__aexit__(None, None, None)
- # Current token is then 126 (skipping over 125).
- self.assertEqual(id_gen.get_current_token(), 126)
-
- self.get_success(test_gen_next())
-
- def test_gen_next_while_still_waiting_for_persistence(self) -> None:
- """Check that we handle overlapping calls to gen_next."""
- id_gen = self._create_id_generator()
-
- async def test_gen_next() -> None:
- ctx1 = id_gen.get_next()
- ctx2 = id_gen.get_next()
- ctx3 = id_gen.get_next()
-
- # Request two new stream IDs.
- self.assertEqual(await ctx1.__aenter__(), 124)
- self.assertEqual(await ctx2.__aenter__(), 125)
-
- # Persist ctx2 first.
- await ctx2.__aexit__(None, None, None)
- # Still waiting on ctx1's ID to be persisted.
- self.assertEqual(id_gen.get_current_token(), 123)
-
- # Now request a third stream ID. It should be 126 (the smallest ID that
- # we've not yet handed out.)
- self.assertEqual(await ctx3.__aenter__(), 126)
-
- self.get_success(test_gen_next())
-
-
class MultiWriterIdGeneratorBase(HomeserverTestCase):
def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
self.store = hs.get_datastores().main
From 4e3868dc46df08e56efbad11b9a583ed4ec699ff Mon Sep 17 00:00:00 2001
From: Erik Johnston
Date: Thu, 30 May 2024 12:33:48 +0100
Subject: [PATCH 134/503] Fix deduplicating of membership events to not create
unused state groups. (#17164)
We try and deduplicate in two places: 1) really early on, and 2) just
before we persist the event. The first case was broken due to it
occuring before the profile information was added, and so it thought the
event contents were different.
The second case did catch it and handle it correctly, however doing so
creates a redundant state group leading to bloat.
Fixes #3791
---
changelog.d/17164.bugfix | 1 +
synapse/handlers/message.py | 32 ---------------------------
synapse/handlers/room_member.py | 35 +++++++++++++++++++++++++++---
tests/handlers/test_room_member.py | 21 ++++++++++++++++++
4 files changed, 54 insertions(+), 35 deletions(-)
create mode 100644 changelog.d/17164.bugfix
diff --git a/changelog.d/17164.bugfix b/changelog.d/17164.bugfix
new file mode 100644
index 0000000000..597e2f14b0
--- /dev/null
+++ b/changelog.d/17164.bugfix
@@ -0,0 +1 @@
+Fix deduplicating of membership events to not create unused state groups.
diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py
index ccaa5508ff..de5bd44a5f 100644
--- a/synapse/handlers/message.py
+++ b/synapse/handlers/message.py
@@ -496,13 +496,6 @@ class EventCreationHandler:
self.room_prejoin_state_types = self.hs.config.api.room_prejoin_state
- self.membership_types_to_include_profile_data_in = {
- Membership.JOIN,
- Membership.KNOCK,
- }
- if self.hs.config.server.include_profile_data_on_invite:
- self.membership_types_to_include_profile_data_in.add(Membership.INVITE)
-
self.send_event = ReplicationSendEventRestServlet.make_client(hs)
self.send_events = ReplicationSendEventsRestServlet.make_client(hs)
@@ -594,8 +587,6 @@ class EventCreationHandler:
Creates an FrozenEvent object, filling out auth_events, prev_events,
etc.
- Adds display names to Join membership events.
-
Args:
requester
event_dict: An entire event
@@ -672,29 +663,6 @@ class EventCreationHandler:
self.validator.validate_builder(builder)
- if builder.type == EventTypes.Member:
- membership = builder.content.get("membership", None)
- target = UserID.from_string(builder.state_key)
-
- if membership in self.membership_types_to_include_profile_data_in:
- # If event doesn't include a display name, add one.
- profile = self.profile_handler
- content = builder.content
-
- try:
- if "displayname" not in content:
- displayname = await profile.get_displayname(target)
- if displayname is not None:
- content["displayname"] = displayname
- if "avatar_url" not in content:
- avatar_url = await profile.get_avatar_url(target)
- if avatar_url is not None:
- content["avatar_url"] = avatar_url
- except Exception as e:
- logger.info(
- "Failed to get profile information for %r: %s", target, e
- )
-
is_exempt = await self._is_exempt_from_privacy_policy(builder, requester)
if require_consent and not is_exempt:
await self.assert_accepted_privacy_policy(requester)
diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py
index 655c78e150..51b9772329 100644
--- a/synapse/handlers/room_member.py
+++ b/synapse/handlers/room_member.py
@@ -106,6 +106,13 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
self.event_auth_handler = hs.get_event_auth_handler()
self._worker_lock_handler = hs.get_worker_locks_handler()
+ self._membership_types_to_include_profile_data_in = {
+ Membership.JOIN,
+ Membership.KNOCK,
+ }
+ if self.hs.config.server.include_profile_data_on_invite:
+ self._membership_types_to_include_profile_data_in.add(Membership.INVITE)
+
self.member_linearizer: Linearizer = Linearizer(name="member")
self.member_as_limiter = Linearizer(max_count=10, name="member_as_limiter")
@@ -785,9 +792,8 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
if (
not self.allow_per_room_profiles and not is_requester_server_notices_user
) or requester.shadow_banned:
- # Strip profile data, knowing that new profile data will be added to the
- # event's content in event_creation_handler.create_event() using the target's
- # global profile.
+ # Strip profile data, knowing that new profile data will be added to
+ # the event's content below using the target's global profile.
content.pop("displayname", None)
content.pop("avatar_url", None)
@@ -823,6 +829,29 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
if action in ["kick", "unban"]:
effective_membership_state = "leave"
+ if effective_membership_state not in Membership.LIST:
+ raise SynapseError(400, "Invalid membership key")
+
+ # Add profile data for joins etc, if no per-room profile.
+ if (
+ effective_membership_state
+ in self._membership_types_to_include_profile_data_in
+ ):
+ # If event doesn't include a display name, add one.
+ profile = self.profile_handler
+
+ try:
+ if "displayname" not in content:
+ displayname = await profile.get_displayname(target)
+ if displayname is not None:
+ content["displayname"] = displayname
+ if "avatar_url" not in content:
+ avatar_url = await profile.get_avatar_url(target)
+ if avatar_url is not None:
+ content["avatar_url"] = avatar_url
+ except Exception as e:
+ logger.info("Failed to get profile information for %r: %s", target, e)
+
# if this is a join with a 3pid signature, we may need to turn a 3pid
# invite into a normal invite before we can handle the join.
if third_party_signed is not None:
diff --git a/tests/handlers/test_room_member.py b/tests/handlers/test_room_member.py
index df43ce581c..213a66ed1a 100644
--- a/tests/handlers/test_room_member.py
+++ b/tests/handlers/test_room_member.py
@@ -407,3 +407,24 @@ class RoomMemberMasterHandlerTestCase(HomeserverTestCase):
self.assertFalse(
self.get_success(self.store.did_forget(self.alice, self.room_id))
)
+
+ def test_deduplicate_joins(self) -> None:
+ """
+ Test that calling /join multiple times does not store a new state group.
+ """
+
+ self.helper.join(self.room_id, user=self.bob, tok=self.bob_token)
+
+ sql = "SELECT COUNT(*) FROM state_groups WHERE room_id = ?"
+ rows = self.get_success(
+ self.store.db_pool.execute("test_deduplicate_joins", sql, self.room_id)
+ )
+ initial_count = rows[0][0]
+
+ self.helper.join(self.room_id, user=self.bob, tok=self.bob_token)
+ rows = self.get_success(
+ self.store.db_pool.execute("test_deduplicate_joins", sql, self.room_id)
+ )
+ new_count = rows[0][0]
+
+ self.assertEqual(initial_count, new_count)
From 5624c8b961ed6a8310a2c6723ae13e854721756b Mon Sep 17 00:00:00 2001
From: Erik Johnston
Date: Thu, 30 May 2024 14:03:49 +0100
Subject: [PATCH 135/503] In sync wait for worker to catch up since token
(#17215)
Otherwise things will get confused.
An alternative would be to make sure that for lagging stream we don't
return anything (and make sure the returned next_batch token doesn't go
backwards). But that is a faff.
---
changelog.d/17215.bugfix | 1 +
pyproject.toml | 6 +-
synapse/handlers/sync.py | 35 +++++++++++
synapse/notifier.py | 23 ++++++++
synapse/storage/databases/main/events.py | 7 +++
.../storage/databases/main/events_worker.py | 11 +++-
synapse/types/__init__.py | 58 ++++++++++++++++++-
7 files changed, 134 insertions(+), 7 deletions(-)
create mode 100644 changelog.d/17215.bugfix
diff --git a/changelog.d/17215.bugfix b/changelog.d/17215.bugfix
new file mode 100644
index 0000000000..10981b798e
--- /dev/null
+++ b/changelog.d/17215.bugfix
@@ -0,0 +1 @@
+Fix bug where duplicate events could be sent down sync when using workers that are overloaded.
diff --git a/pyproject.toml b/pyproject.toml
index ea14b98199..9a3348be49 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -200,10 +200,8 @@ netaddr = ">=0.7.18"
# add a lower bound to the Jinja2 dependency.
Jinja2 = ">=3.0"
bleach = ">=1.4.3"
-# We use `ParamSpec` and `Concatenate`, which were added in `typing-extensions` 3.10.0.0.
-# Additionally we need https://github.com/python/typing/pull/817 to allow types to be
-# generic over ParamSpecs.
-typing-extensions = ">=3.10.0.1"
+# We use `Self`, which were added in `typing-extensions` 4.0.
+typing-extensions = ">=4.0"
# We enforce that we have a `cryptography` version that bundles an `openssl`
# with the latest security patches.
cryptography = ">=3.4.7"
diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py
index ac5bddd52f..1d7d9dfdd0 100644
--- a/synapse/handlers/sync.py
+++ b/synapse/handlers/sync.py
@@ -284,6 +284,23 @@ class SyncResult:
or self.device_lists
)
+ @staticmethod
+ def empty(next_batch: StreamToken) -> "SyncResult":
+ "Return a new empty result"
+ return SyncResult(
+ next_batch=next_batch,
+ presence=[],
+ account_data=[],
+ joined=[],
+ invited=[],
+ knocked=[],
+ archived=[],
+ to_device=[],
+ device_lists=DeviceListUpdates(),
+ device_one_time_keys_count={},
+ device_unused_fallback_key_types=[],
+ )
+
@attr.s(slots=True, frozen=True, auto_attribs=True)
class E2eeSyncResult:
@@ -497,6 +514,24 @@ class SyncHandler:
if context:
context.tag = sync_label
+ if since_token is not None:
+ # We need to make sure this worker has caught up with the token. If
+ # this returns false it means we timed out waiting, and we should
+ # just return an empty response.
+ start = self.clock.time_msec()
+ if not await self.notifier.wait_for_stream_token(since_token):
+ logger.warning(
+ "Timed out waiting for worker to catch up. Returning empty response"
+ )
+ return SyncResult.empty(since_token)
+
+ # If we've spent significant time waiting to catch up, take it off
+ # the timeout.
+ now = self.clock.time_msec()
+ if now - start > 1_000:
+ timeout -= now - start
+ timeout = max(timeout, 0)
+
# if we have a since token, delete any to-device messages before that token
# (since we now know that the device has received them)
if since_token is not None:
diff --git a/synapse/notifier.py b/synapse/notifier.py
index 7c1cd3b5f2..ced9e9ad66 100644
--- a/synapse/notifier.py
+++ b/synapse/notifier.py
@@ -763,6 +763,29 @@ class Notifier:
return result
+ async def wait_for_stream_token(self, stream_token: StreamToken) -> bool:
+ """Wait for this worker to catch up with the given stream token."""
+
+ start = self.clock.time_msec()
+ while True:
+ current_token = self.event_sources.get_current_token()
+ if stream_token.is_before_or_eq(current_token):
+ return True
+
+ now = self.clock.time_msec()
+
+ if now - start > 10_000:
+ return False
+
+ logger.info(
+ "Waiting for current token to reach %s; currently at %s",
+ stream_token,
+ current_token,
+ )
+
+ # TODO: be better
+ await self.clock.sleep(0.5)
+
async def _get_room_ids(
self, user: UserID, explicit_room_id: Optional[str]
) -> Tuple[StrCollection, bool]:
diff --git a/synapse/storage/databases/main/events.py b/synapse/storage/databases/main/events.py
index fd7167904d..f1bd85aa27 100644
--- a/synapse/storage/databases/main/events.py
+++ b/synapse/storage/databases/main/events.py
@@ -95,6 +95,10 @@ class DeltaState:
to_insert: StateMap[str]
no_longer_in_room: bool = False
+ def is_noop(self) -> bool:
+ """Whether this state delta is actually empty"""
+ return not self.to_delete and not self.to_insert and not self.no_longer_in_room
+
class PersistEventsStore:
"""Contains all the functions for writing events to the database.
@@ -1017,6 +1021,9 @@ class PersistEventsStore:
) -> None:
"""Update the current state stored in the datatabase for the given room"""
+ if state_delta.is_noop():
+ return
+
async with self._stream_id_gen.get_next() as stream_ordering:
await self.db_pool.runInteraction(
"update_current_state",
diff --git a/synapse/storage/databases/main/events_worker.py b/synapse/storage/databases/main/events_worker.py
index 426df2a9d2..c06c44deb1 100644
--- a/synapse/storage/databases/main/events_worker.py
+++ b/synapse/storage/databases/main/events_worker.py
@@ -200,7 +200,11 @@ class EventsWorkerStore(SQLBaseStore):
notifier=hs.get_replication_notifier(),
stream_name="events",
instance_name=hs.get_instance_name(),
- tables=[("events", "instance_name", "stream_ordering")],
+ tables=[
+ ("events", "instance_name", "stream_ordering"),
+ ("current_state_delta_stream", "instance_name", "stream_id"),
+ ("ex_outlier_stream", "instance_name", "event_stream_ordering"),
+ ],
sequence_name="events_stream_seq",
writers=hs.config.worker.writers.events,
)
@@ -210,7 +214,10 @@ class EventsWorkerStore(SQLBaseStore):
notifier=hs.get_replication_notifier(),
stream_name="backfill",
instance_name=hs.get_instance_name(),
- tables=[("events", "instance_name", "stream_ordering")],
+ tables=[
+ ("events", "instance_name", "stream_ordering"),
+ ("ex_outlier_stream", "instance_name", "event_stream_ordering"),
+ ],
sequence_name="events_backfill_stream_seq",
positive=False,
writers=hs.config.worker.writers.events,
diff --git a/synapse/types/__init__.py b/synapse/types/__init__.py
index 509a2d3a0f..151658df53 100644
--- a/synapse/types/__init__.py
+++ b/synapse/types/__init__.py
@@ -48,7 +48,7 @@ import attr
from immutabledict import immutabledict
from signedjson.key import decode_verify_key_bytes
from signedjson.types import VerifyKey
-from typing_extensions import TypedDict
+from typing_extensions import Self, TypedDict
from unpaddedbase64 import decode_base64
from zope.interface import Interface
@@ -515,6 +515,27 @@ class AbstractMultiWriterStreamToken(metaclass=abc.ABCMeta):
# at `self.stream`.
return self.instance_map.get(instance_name, self.stream)
+ def is_before_or_eq(self, other_token: Self) -> bool:
+ """Wether this token is before the other token, i.e. every constituent
+ part is before the other.
+
+ Essentially it is `self <= other`.
+
+ Note: if `self.is_before_or_eq(other_token) is False` then that does not
+ imply that the reverse is True.
+ """
+ if self.stream > other_token.stream:
+ return False
+
+ instances = self.instance_map.keys() | other_token.instance_map.keys()
+ for instance in instances:
+ if self.instance_map.get(
+ instance, self.stream
+ ) > other_token.instance_map.get(instance, other_token.stream):
+ return False
+
+ return True
+
@attr.s(frozen=True, slots=True, order=False)
class RoomStreamToken(AbstractMultiWriterStreamToken):
@@ -1008,6 +1029,41 @@ class StreamToken:
"""Returns the stream ID for the given key."""
return getattr(self, key.value)
+ def is_before_or_eq(self, other_token: "StreamToken") -> bool:
+ """Wether this token is before the other token, i.e. every constituent
+ part is before the other.
+
+ Essentially it is `self <= other`.
+
+ Note: if `self.is_before_or_eq(other_token) is False` then that does not
+ imply that the reverse is True.
+ """
+
+ for _, key in StreamKeyType.__members__.items():
+ if key == StreamKeyType.TYPING:
+ # Typing stream is allowed to "reset", and so comparisons don't
+ # really make sense as is.
+ # TODO: Figure out a better way of tracking resets.
+ continue
+
+ self_value = self.get_field(key)
+ other_value = other_token.get_field(key)
+
+ if isinstance(self_value, RoomStreamToken):
+ assert isinstance(other_value, RoomStreamToken)
+ if not self_value.is_before_or_eq(other_value):
+ return False
+ elif isinstance(self_value, MultiWriterStreamToken):
+ assert isinstance(other_value, MultiWriterStreamToken)
+ if not self_value.is_before_or_eq(other_value):
+ return False
+ else:
+ assert isinstance(other_value, int)
+ if self_value > other_value:
+ return False
+
+ return True
+
StreamToken.START = StreamToken(
RoomStreamToken(stream=0), 0, 0, MultiWriterStreamToken(stream=0), 0, 0, 0, 0, 0, 0
From 7dd14fadb12c33841df30d4668ff9b24f5f23631 Mon Sep 17 00:00:00 2001
From: Erik Johnston
Date: Fri, 31 May 2024 11:27:47 +0100
Subject: [PATCH 136/503] Fix sentry default tags (#17251)
This was broken by the sentry 2.0 upgrade
Broke in v1.108.0
---
changelog.d/17251.bugfix | 1 +
synapse/app/_base.py | 20 ++++++++++----------
2 files changed, 11 insertions(+), 10 deletions(-)
create mode 100644 changelog.d/17251.bugfix
diff --git a/changelog.d/17251.bugfix b/changelog.d/17251.bugfix
new file mode 100644
index 0000000000..f573e01e87
--- /dev/null
+++ b/changelog.d/17251.bugfix
@@ -0,0 +1 @@
+Fix reporting of default tags to Sentry, such as worker name. Broke in v1.108.0.
diff --git a/synapse/app/_base.py b/synapse/app/_base.py
index 67e0df1459..4cc260d551 100644
--- a/synapse/app/_base.py
+++ b/synapse/app/_base.py
@@ -681,17 +681,17 @@ def setup_sentry(hs: "HomeServer") -> None:
)
# We set some default tags that give some context to this instance
- with sentry_sdk.configure_scope() as scope:
- scope.set_tag("matrix_server_name", hs.config.server.server_name)
+ global_scope = sentry_sdk.Scope.get_global_scope()
+ global_scope.set_tag("matrix_server_name", hs.config.server.server_name)
- app = (
- hs.config.worker.worker_app
- if hs.config.worker.worker_app
- else "synapse.app.homeserver"
- )
- name = hs.get_instance_name()
- scope.set_tag("worker_app", app)
- scope.set_tag("worker_name", name)
+ app = (
+ hs.config.worker.worker_app
+ if hs.config.worker.worker_app
+ else "synapse.app.homeserver"
+ )
+ name = hs.get_instance_name()
+ global_scope.set_tag("worker_app", app)
+ global_scope.set_tag("worker_name", name)
def setup_sdnotify(hs: "HomeServer") -> None:
From 64f5a4a3534672778cc995cee8b4805db26a9e0c Mon Sep 17 00:00:00 2001
From: Erik Johnston
Date: Fri, 31 May 2024 11:27:56 +0100
Subject: [PATCH 137/503] Fix logging errors when receiving invalid User ID for
key querys (#17250)
---
changelog.d/17250.misc | 1 +
synapse/handlers/e2e_keys.py | 5 +++++
2 files changed, 6 insertions(+)
create mode 100644 changelog.d/17250.misc
diff --git a/changelog.d/17250.misc b/changelog.d/17250.misc
new file mode 100644
index 0000000000..49834e83ba
--- /dev/null
+++ b/changelog.d/17250.misc
@@ -0,0 +1 @@
+Stop logging errors when receiving invalid User IDs in key querys requests.
diff --git a/synapse/handlers/e2e_keys.py b/synapse/handlers/e2e_keys.py
index 4f40e9ffd6..560530a7b3 100644
--- a/synapse/handlers/e2e_keys.py
+++ b/synapse/handlers/e2e_keys.py
@@ -149,6 +149,11 @@ class E2eKeysHandler:
remote_queries = {}
for user_id, device_ids in device_keys_query.items():
+ if not UserID.is_valid(user_id):
+ # Ignore invalid user IDs, which is the same behaviour as if
+ # the user existed but had no keys.
+ continue
+
# we use UserID.from_string to catch invalid user ids
if self.is_mine(UserID.from_string(user_id)):
local_query[user_id] = device_ids
From 5c2a837e3cb3eb307f080a7991f464598f43f283 Mon Sep 17 00:00:00 2001
From: Erik Johnston
Date: Fri, 31 May 2024 16:07:05 +0100
Subject: [PATCH 138/503] Fix bug where typing replication breaks (#17252)
This can happen on restarts of the service, due to old rooms being
pruned.
---
changelog.d/17252.bugfix | 1 +
synapse/handlers/typing.py | 6 ++--
tests/handlers/test_typing.py | 53 ++++++++++++++++++++++++++++++++++-
3 files changed, 56 insertions(+), 4 deletions(-)
create mode 100644 changelog.d/17252.bugfix
diff --git a/changelog.d/17252.bugfix b/changelog.d/17252.bugfix
new file mode 100644
index 0000000000..f3289d1568
--- /dev/null
+++ b/changelog.d/17252.bugfix
@@ -0,0 +1 @@
+Fix bug where typing updates would not be sent when using workers after a restart.
diff --git a/synapse/handlers/typing.py b/synapse/handlers/typing.py
index 7619d91c98..4c87718337 100644
--- a/synapse/handlers/typing.py
+++ b/synapse/handlers/typing.py
@@ -477,9 +477,9 @@ class TypingWriterHandler(FollowerTypingHandler):
rows = []
for room_id in changed_rooms:
- serial = self._room_serials[room_id]
- if last_id < serial <= current_id:
- typing = self._room_typing[room_id]
+ serial = self._room_serials.get(room_id)
+ if serial and last_id < serial <= current_id:
+ typing = self._room_typing.get(room_id, set())
rows.append((serial, [room_id, list(typing)]))
rows.sort()
diff --git a/tests/handlers/test_typing.py b/tests/handlers/test_typing.py
index c754083967..9d8960315f 100644
--- a/tests/handlers/test_typing.py
+++ b/tests/handlers/test_typing.py
@@ -32,7 +32,7 @@ from twisted.web.resource import Resource
from synapse.api.constants import EduTypes
from synapse.api.errors import AuthError
from synapse.federation.transport.server import TransportLayerServer
-from synapse.handlers.typing import TypingWriterHandler
+from synapse.handlers.typing import FORGET_TIMEOUT, TypingWriterHandler
from synapse.http.federation.matrix_federation_agent import MatrixFederationAgent
from synapse.server import HomeServer
from synapse.types import JsonDict, Requester, StreamKeyType, UserID, create_requester
@@ -501,3 +501,54 @@ class TypingNotificationsTestCase(unittest.HomeserverTestCase):
}
],
)
+
+ def test_prune_typing_replication(self) -> None:
+ """Regression test for `get_all_typing_updates` breaking when we prune
+ old updates
+ """
+ self.room_members = [U_APPLE, U_BANANA]
+
+ instance_name = self.hs.get_instance_name()
+
+ self.get_success(
+ self.handler.started_typing(
+ target_user=U_APPLE,
+ requester=create_requester(U_APPLE),
+ room_id=ROOM_ID,
+ timeout=10000,
+ )
+ )
+
+ rows, _, _ = self.get_success(
+ self.handler.get_all_typing_updates(
+ instance_name=instance_name,
+ last_id=0,
+ current_id=self.handler.get_current_token(),
+ limit=100,
+ )
+ )
+ self.assertEqual(rows, [(1, [ROOM_ID, [U_APPLE.to_string()]])])
+
+ self.reactor.advance(20000)
+
+ rows, _, _ = self.get_success(
+ self.handler.get_all_typing_updates(
+ instance_name=instance_name,
+ last_id=1,
+ current_id=self.handler.get_current_token(),
+ limit=100,
+ )
+ )
+ self.assertEqual(rows, [(2, [ROOM_ID, []])])
+
+ self.reactor.advance(FORGET_TIMEOUT)
+
+ rows, _, _ = self.get_success(
+ self.handler.get_all_typing_updates(
+ instance_name=instance_name,
+ last_id=1,
+ current_id=self.handler.get_current_token(),
+ limit=100,
+ )
+ )
+ self.assertEqual(rows, [])
From 6b709c512de03543120fcaf5ef7d3c1243788a33 Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Mon, 3 Jun 2024 14:21:20 +0100
Subject: [PATCH 139/503] Bump lxml from 5.2.1 to 5.2.2 (#17261)
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
---
poetry.lock | 300 +++++++++++++++++++++++++---------------------------
1 file changed, 144 insertions(+), 156 deletions(-)
diff --git a/poetry.lock b/poetry.lock
index 73814e49d0..1c387165fd 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -1005,165 +1005,153 @@ pyasn1 = ">=0.4.6"
[[package]]
name = "lxml"
-version = "5.2.1"
+version = "5.2.2"
description = "Powerful and Pythonic XML processing library combining libxml2/libxslt with the ElementTree API."
optional = true
python-versions = ">=3.6"
files = [
- {file = "lxml-5.2.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:1f7785f4f789fdb522729ae465adcaa099e2a3441519df750ebdccc481d961a1"},
- {file = "lxml-5.2.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:6cc6ee342fb7fa2471bd9b6d6fdfc78925a697bf5c2bcd0a302e98b0d35bfad3"},
- {file = "lxml-5.2.1-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:794f04eec78f1d0e35d9e0c36cbbb22e42d370dda1609fb03bcd7aeb458c6377"},
- {file = "lxml-5.2.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c817d420c60a5183953c783b0547d9eb43b7b344a2c46f69513d5952a78cddf3"},
- {file = "lxml-5.2.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2213afee476546a7f37c7a9b4ad4d74b1e112a6fafffc9185d6d21f043128c81"},
- {file = "lxml-5.2.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b070bbe8d3f0f6147689bed981d19bbb33070225373338df755a46893528104a"},
- {file = "lxml-5.2.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e02c5175f63effbd7c5e590399c118d5db6183bbfe8e0d118bdb5c2d1b48d937"},
- {file = "lxml-5.2.1-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:3dc773b2861b37b41a6136e0b72a1a44689a9c4c101e0cddb6b854016acc0aa8"},
- {file = "lxml-5.2.1-cp310-cp310-manylinux_2_28_ppc64le.whl", hash = "sha256:d7520db34088c96cc0e0a3ad51a4fd5b401f279ee112aa2b7f8f976d8582606d"},
- {file = "lxml-5.2.1-cp310-cp310-manylinux_2_28_s390x.whl", hash = "sha256:bcbf4af004f98793a95355980764b3d80d47117678118a44a80b721c9913436a"},
- {file = "lxml-5.2.1-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:a2b44bec7adf3e9305ce6cbfa47a4395667e744097faed97abb4728748ba7d47"},
- {file = "lxml-5.2.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:1c5bb205e9212d0ebddf946bc07e73fa245c864a5f90f341d11ce7b0b854475d"},
- {file = "lxml-5.2.1-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:2c9d147f754b1b0e723e6afb7ba1566ecb162fe4ea657f53d2139bbf894d050a"},
- {file = "lxml-5.2.1-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:3545039fa4779be2df51d6395e91a810f57122290864918b172d5dc7ca5bb433"},
- {file = "lxml-5.2.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:a91481dbcddf1736c98a80b122afa0f7296eeb80b72344d7f45dc9f781551f56"},
- {file = "lxml-5.2.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:2ddfe41ddc81f29a4c44c8ce239eda5ade4e7fc305fb7311759dd6229a080052"},
- {file = "lxml-5.2.1-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:a7baf9ffc238e4bf401299f50e971a45bfcc10a785522541a6e3179c83eabf0a"},
- {file = "lxml-5.2.1-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:31e9a882013c2f6bd2f2c974241bf4ba68c85eba943648ce88936d23209a2e01"},
- {file = "lxml-5.2.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:0a15438253b34e6362b2dc41475e7f80de76320f335e70c5528b7148cac253a1"},
- {file = "lxml-5.2.1-cp310-cp310-win32.whl", hash = "sha256:6992030d43b916407c9aa52e9673612ff39a575523c5f4cf72cdef75365709a5"},
- {file = "lxml-5.2.1-cp310-cp310-win_amd64.whl", hash = "sha256:da052e7962ea2d5e5ef5bc0355d55007407087392cf465b7ad84ce5f3e25fe0f"},
- {file = "lxml-5.2.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:70ac664a48aa64e5e635ae5566f5227f2ab7f66a3990d67566d9907edcbbf867"},
- {file = "lxml-5.2.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:1ae67b4e737cddc96c99461d2f75d218bdf7a0c3d3ad5604d1f5e7464a2f9ffe"},
- {file = "lxml-5.2.1-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f18a5a84e16886898e51ab4b1d43acb3083c39b14c8caeb3589aabff0ee0b270"},
- {file = "lxml-5.2.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c6f2c8372b98208ce609c9e1d707f6918cc118fea4e2c754c9f0812c04ca116d"},
- {file = "lxml-5.2.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:394ed3924d7a01b5bd9a0d9d946136e1c2f7b3dc337196d99e61740ed4bc6fe1"},
- {file = "lxml-5.2.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5d077bc40a1fe984e1a9931e801e42959a1e6598edc8a3223b061d30fbd26bbc"},
- {file = "lxml-5.2.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:764b521b75701f60683500d8621841bec41a65eb739b8466000c6fdbc256c240"},
- {file = "lxml-5.2.1-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:3a6b45da02336895da82b9d472cd274b22dc27a5cea1d4b793874eead23dd14f"},
- {file = "lxml-5.2.1-cp311-cp311-manylinux_2_28_ppc64le.whl", hash = "sha256:5ea7b6766ac2dfe4bcac8b8595107665a18ef01f8c8343f00710b85096d1b53a"},
- {file = "lxml-5.2.1-cp311-cp311-manylinux_2_28_s390x.whl", hash = "sha256:e196a4ff48310ba62e53a8e0f97ca2bca83cdd2fe2934d8b5cb0df0a841b193a"},
- {file = "lxml-5.2.1-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:200e63525948e325d6a13a76ba2911f927ad399ef64f57898cf7c74e69b71095"},
- {file = "lxml-5.2.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:dae0ed02f6b075426accbf6b2863c3d0a7eacc1b41fb40f2251d931e50188dad"},
- {file = "lxml-5.2.1-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:ab31a88a651039a07a3ae327d68ebdd8bc589b16938c09ef3f32a4b809dc96ef"},
- {file = "lxml-5.2.1-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:df2e6f546c4df14bc81f9498bbc007fbb87669f1bb707c6138878c46b06f6510"},
- {file = "lxml-5.2.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:5dd1537e7cc06efd81371f5d1a992bd5ab156b2b4f88834ca852de4a8ea523fa"},
- {file = "lxml-5.2.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:9b9ec9c9978b708d488bec36b9e4c94d88fd12ccac3e62134a9d17ddba910ea9"},
- {file = "lxml-5.2.1-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:8e77c69d5892cb5ba71703c4057091e31ccf534bd7f129307a4d084d90d014b8"},
- {file = "lxml-5.2.1-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:a8d5c70e04aac1eda5c829a26d1f75c6e5286c74743133d9f742cda8e53b9c2f"},
- {file = "lxml-5.2.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:c94e75445b00319c1fad60f3c98b09cd63fe1134a8a953dcd48989ef42318534"},
- {file = "lxml-5.2.1-cp311-cp311-win32.whl", hash = "sha256:4951e4f7a5680a2db62f7f4ab2f84617674d36d2d76a729b9a8be4b59b3659be"},
- {file = "lxml-5.2.1-cp311-cp311-win_amd64.whl", hash = "sha256:5c670c0406bdc845b474b680b9a5456c561c65cf366f8db5a60154088c92d102"},
- {file = "lxml-5.2.1-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:abc25c3cab9ec7fcd299b9bcb3b8d4a1231877e425c650fa1c7576c5107ab851"},
- {file = "lxml-5.2.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:6935bbf153f9a965f1e07c2649c0849d29832487c52bb4a5c5066031d8b44fd5"},
- {file = "lxml-5.2.1-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d793bebb202a6000390a5390078e945bbb49855c29c7e4d56a85901326c3b5d9"},
- {file = "lxml-5.2.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:afd5562927cdef7c4f5550374acbc117fd4ecc05b5007bdfa57cc5355864e0a4"},
- {file = "lxml-5.2.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0e7259016bc4345a31af861fdce942b77c99049d6c2107ca07dc2bba2435c1d9"},
- {file = "lxml-5.2.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:530e7c04f72002d2f334d5257c8a51bf409db0316feee7c87e4385043be136af"},
- {file = "lxml-5.2.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:59689a75ba8d7ffca577aefd017d08d659d86ad4585ccc73e43edbfc7476781a"},
- {file = "lxml-5.2.1-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:f9737bf36262046213a28e789cc82d82c6ef19c85a0cf05e75c670a33342ac2c"},
- {file = "lxml-5.2.1-cp312-cp312-manylinux_2_28_ppc64le.whl", hash = "sha256:3a74c4f27167cb95c1d4af1c0b59e88b7f3e0182138db2501c353555f7ec57f4"},
- {file = "lxml-5.2.1-cp312-cp312-manylinux_2_28_s390x.whl", hash = "sha256:68a2610dbe138fa8c5826b3f6d98a7cfc29707b850ddcc3e21910a6fe51f6ca0"},
- {file = "lxml-5.2.1-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:f0a1bc63a465b6d72569a9bba9f2ef0334c4e03958e043da1920299100bc7c08"},
- {file = "lxml-5.2.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:c2d35a1d047efd68027817b32ab1586c1169e60ca02c65d428ae815b593e65d4"},
- {file = "lxml-5.2.1-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:79bd05260359170f78b181b59ce871673ed01ba048deef4bf49a36ab3e72e80b"},
- {file = "lxml-5.2.1-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:865bad62df277c04beed9478fe665b9ef63eb28fe026d5dedcb89b537d2e2ea6"},
- {file = "lxml-5.2.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:44f6c7caff88d988db017b9b0e4ab04934f11e3e72d478031efc7edcac6c622f"},
- {file = "lxml-5.2.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:71e97313406ccf55d32cc98a533ee05c61e15d11b99215b237346171c179c0b0"},
- {file = "lxml-5.2.1-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:057cdc6b86ab732cf361f8b4d8af87cf195a1f6dc5b0ff3de2dced242c2015e0"},
- {file = "lxml-5.2.1-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:f3bbbc998d42f8e561f347e798b85513ba4da324c2b3f9b7969e9c45b10f6169"},
- {file = "lxml-5.2.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:491755202eb21a5e350dae00c6d9a17247769c64dcf62d8c788b5c135e179dc4"},
- {file = "lxml-5.2.1-cp312-cp312-win32.whl", hash = "sha256:8de8f9d6caa7f25b204fc861718815d41cbcf27ee8f028c89c882a0cf4ae4134"},
- {file = "lxml-5.2.1-cp312-cp312-win_amd64.whl", hash = "sha256:f2a9efc53d5b714b8df2b4b3e992accf8ce5bbdfe544d74d5c6766c9e1146a3a"},
- {file = "lxml-5.2.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:70a9768e1b9d79edca17890175ba915654ee1725975d69ab64813dd785a2bd5c"},
- {file = "lxml-5.2.1-cp36-cp36m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c38d7b9a690b090de999835f0443d8aa93ce5f2064035dfc48f27f02b4afc3d0"},
- {file = "lxml-5.2.1-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5670fb70a828663cc37552a2a85bf2ac38475572b0e9b91283dc09efb52c41d1"},
- {file = "lxml-5.2.1-cp36-cp36m-manylinux_2_28_x86_64.whl", hash = "sha256:958244ad566c3ffc385f47dddde4145088a0ab893504b54b52c041987a8c1863"},
- {file = "lxml-5.2.1-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:b6241d4eee5f89453307c2f2bfa03b50362052ca0af1efecf9fef9a41a22bb4f"},
- {file = "lxml-5.2.1-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:2a66bf12fbd4666dd023b6f51223aed3d9f3b40fef06ce404cb75bafd3d89536"},
- {file = "lxml-5.2.1-cp36-cp36m-musllinux_1_1_ppc64le.whl", hash = "sha256:9123716666e25b7b71c4e1789ec829ed18663152008b58544d95b008ed9e21e9"},
- {file = "lxml-5.2.1-cp36-cp36m-musllinux_1_1_s390x.whl", hash = "sha256:0c3f67e2aeda739d1cc0b1102c9a9129f7dc83901226cc24dd72ba275ced4218"},
- {file = "lxml-5.2.1-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:5d5792e9b3fb8d16a19f46aa8208987cfeafe082363ee2745ea8b643d9cc5b45"},
- {file = "lxml-5.2.1-cp36-cp36m-musllinux_1_2_aarch64.whl", hash = "sha256:88e22fc0a6684337d25c994381ed8a1580a6f5ebebd5ad41f89f663ff4ec2885"},
- {file = "lxml-5.2.1-cp36-cp36m-musllinux_1_2_ppc64le.whl", hash = "sha256:21c2e6b09565ba5b45ae161b438e033a86ad1736b8c838c766146eff8ceffff9"},
- {file = "lxml-5.2.1-cp36-cp36m-musllinux_1_2_s390x.whl", hash = "sha256:afbbdb120d1e78d2ba8064a68058001b871154cc57787031b645c9142b937a62"},
- {file = "lxml-5.2.1-cp36-cp36m-musllinux_1_2_x86_64.whl", hash = "sha256:627402ad8dea044dde2eccde4370560a2b750ef894c9578e1d4f8ffd54000461"},
- {file = "lxml-5.2.1-cp36-cp36m-win32.whl", hash = "sha256:e89580a581bf478d8dcb97d9cd011d567768e8bc4095f8557b21c4d4c5fea7d0"},
- {file = "lxml-5.2.1-cp36-cp36m-win_amd64.whl", hash = "sha256:59565f10607c244bc4c05c0c5fa0c190c990996e0c719d05deec7030c2aa8289"},
- {file = "lxml-5.2.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:857500f88b17a6479202ff5fe5f580fc3404922cd02ab3716197adf1ef628029"},
- {file = "lxml-5.2.1-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:56c22432809085b3f3ae04e6e7bdd36883d7258fcd90e53ba7b2e463efc7a6af"},
- {file = "lxml-5.2.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a55ee573116ba208932e2d1a037cc4b10d2c1cb264ced2184d00b18ce585b2c0"},
- {file = "lxml-5.2.1-cp37-cp37m-manylinux_2_28_x86_64.whl", hash = "sha256:6cf58416653c5901e12624e4013708b6e11142956e7f35e7a83f1ab02f3fe456"},
- {file = "lxml-5.2.1-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:64c2baa7774bc22dd4474248ba16fe1a7f611c13ac6123408694d4cc93d66dbd"},
- {file = "lxml-5.2.1-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:74b28c6334cca4dd704e8004cba1955af0b778cf449142e581e404bd211fb619"},
- {file = "lxml-5.2.1-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:7221d49259aa1e5a8f00d3d28b1e0b76031655ca74bb287123ef56c3db92f213"},
- {file = "lxml-5.2.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:3dbe858ee582cbb2c6294dc85f55b5f19c918c2597855e950f34b660f1a5ede6"},
- {file = "lxml-5.2.1-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:04ab5415bf6c86e0518d57240a96c4d1fcfc3cb370bb2ac2a732b67f579e5a04"},
- {file = "lxml-5.2.1-cp37-cp37m-musllinux_1_2_ppc64le.whl", hash = "sha256:6ab833e4735a7e5533711a6ea2df26459b96f9eec36d23f74cafe03631647c41"},
- {file = "lxml-5.2.1-cp37-cp37m-musllinux_1_2_s390x.whl", hash = "sha256:f443cdef978430887ed55112b491f670bba6462cea7a7742ff8f14b7abb98d75"},
- {file = "lxml-5.2.1-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:9e2addd2d1866fe112bc6f80117bcc6bc25191c5ed1bfbcf9f1386a884252ae8"},
- {file = "lxml-5.2.1-cp37-cp37m-win32.whl", hash = "sha256:f51969bac61441fd31f028d7b3b45962f3ecebf691a510495e5d2cd8c8092dbd"},
- {file = "lxml-5.2.1-cp37-cp37m-win_amd64.whl", hash = "sha256:b0b58fbfa1bf7367dde8a557994e3b1637294be6cf2169810375caf8571a085c"},
- {file = "lxml-5.2.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:804f74efe22b6a227306dd890eecc4f8c59ff25ca35f1f14e7482bbce96ef10b"},
- {file = "lxml-5.2.1-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:08802f0c56ed150cc6885ae0788a321b73505d2263ee56dad84d200cab11c07a"},
- {file = "lxml-5.2.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0f8c09ed18ecb4ebf23e02b8e7a22a05d6411911e6fabef3a36e4f371f4f2585"},
- {file = "lxml-5.2.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e3d30321949861404323c50aebeb1943461a67cd51d4200ab02babc58bd06a86"},
- {file = "lxml-5.2.1-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:b560e3aa4b1d49e0e6c847d72665384db35b2f5d45f8e6a5c0072e0283430533"},
- {file = "lxml-5.2.1-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:058a1308914f20784c9f4674036527e7c04f7be6fb60f5d61353545aa7fcb739"},
- {file = "lxml-5.2.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:adfb84ca6b87e06bc6b146dc7da7623395db1e31621c4785ad0658c5028b37d7"},
- {file = "lxml-5.2.1-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:417d14450f06d51f363e41cace6488519038f940676ce9664b34ebf5653433a5"},
- {file = "lxml-5.2.1-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:a2dfe7e2473f9b59496247aad6e23b405ddf2e12ef0765677b0081c02d6c2c0b"},
- {file = "lxml-5.2.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:bf2e2458345d9bffb0d9ec16557d8858c9c88d2d11fed53998512504cd9df49b"},
- {file = "lxml-5.2.1-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:58278b29cb89f3e43ff3e0c756abbd1518f3ee6adad9e35b51fb101c1c1daaec"},
- {file = "lxml-5.2.1-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:64641a6068a16201366476731301441ce93457eb8452056f570133a6ceb15fca"},
- {file = "lxml-5.2.1-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:78bfa756eab503673991bdcf464917ef7845a964903d3302c5f68417ecdc948c"},
- {file = "lxml-5.2.1-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:11a04306fcba10cd9637e669fd73aa274c1c09ca64af79c041aa820ea992b637"},
- {file = "lxml-5.2.1-cp38-cp38-win32.whl", hash = "sha256:66bc5eb8a323ed9894f8fa0ee6cb3e3fb2403d99aee635078fd19a8bc7a5a5da"},
- {file = "lxml-5.2.1-cp38-cp38-win_amd64.whl", hash = "sha256:9676bfc686fa6a3fa10cd4ae6b76cae8be26eb5ec6811d2a325636c460da1806"},
- {file = "lxml-5.2.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:cf22b41fdae514ee2f1691b6c3cdeae666d8b7fa9434de445f12bbeee0cf48dd"},
- {file = "lxml-5.2.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:ec42088248c596dbd61d4ae8a5b004f97a4d91a9fd286f632e42e60b706718d7"},
- {file = "lxml-5.2.1-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cd53553ddad4a9c2f1f022756ae64abe16da1feb497edf4d9f87f99ec7cf86bd"},
- {file = "lxml-5.2.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:feaa45c0eae424d3e90d78823f3828e7dc42a42f21ed420db98da2c4ecf0a2cb"},
- {file = "lxml-5.2.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ddc678fb4c7e30cf830a2b5a8d869538bc55b28d6c68544d09c7d0d8f17694dc"},
- {file = "lxml-5.2.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:853e074d4931dbcba7480d4dcab23d5c56bd9607f92825ab80ee2bd916edea53"},
- {file = "lxml-5.2.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cc4691d60512798304acb9207987e7b2b7c44627ea88b9d77489bbe3e6cc3bd4"},
- {file = "lxml-5.2.1-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:beb72935a941965c52990f3a32d7f07ce869fe21c6af8b34bf6a277b33a345d3"},
- {file = "lxml-5.2.1-cp39-cp39-manylinux_2_28_ppc64le.whl", hash = "sha256:6588c459c5627fefa30139be4d2e28a2c2a1d0d1c265aad2ba1935a7863a4913"},
- {file = "lxml-5.2.1-cp39-cp39-manylinux_2_28_s390x.whl", hash = "sha256:588008b8497667f1ddca7c99f2f85ce8511f8f7871b4a06ceede68ab62dff64b"},
- {file = "lxml-5.2.1-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:b6787b643356111dfd4032b5bffe26d2f8331556ecb79e15dacb9275da02866e"},
- {file = "lxml-5.2.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:7c17b64b0a6ef4e5affae6a3724010a7a66bda48a62cfe0674dabd46642e8b54"},
- {file = "lxml-5.2.1-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:27aa20d45c2e0b8cd05da6d4759649170e8dfc4f4e5ef33a34d06f2d79075d57"},
- {file = "lxml-5.2.1-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:d4f2cc7060dc3646632d7f15fe68e2fa98f58e35dd5666cd525f3b35d3fed7f8"},
- {file = "lxml-5.2.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:ff46d772d5f6f73564979cd77a4fffe55c916a05f3cb70e7c9c0590059fb29ef"},
- {file = "lxml-5.2.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:96323338e6c14e958d775700ec8a88346014a85e5de73ac7967db0367582049b"},
- {file = "lxml-5.2.1-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:52421b41ac99e9d91934e4d0d0fe7da9f02bfa7536bb4431b4c05c906c8c6919"},
- {file = "lxml-5.2.1-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:7a7efd5b6d3e30d81ec68ab8a88252d7c7c6f13aaa875009fe3097eb4e30b84c"},
- {file = "lxml-5.2.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:0ed777c1e8c99b63037b91f9d73a6aad20fd035d77ac84afcc205225f8f41188"},
- {file = "lxml-5.2.1-cp39-cp39-win32.whl", hash = "sha256:644df54d729ef810dcd0f7732e50e5ad1bd0a135278ed8d6bcb06f33b6b6f708"},
- {file = "lxml-5.2.1-cp39-cp39-win_amd64.whl", hash = "sha256:9ca66b8e90daca431b7ca1408cae085d025326570e57749695d6a01454790e95"},
- {file = "lxml-5.2.1-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:9b0ff53900566bc6325ecde9181d89afadc59c5ffa39bddf084aaedfe3b06a11"},
- {file = "lxml-5.2.1-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fd6037392f2d57793ab98d9e26798f44b8b4da2f2464388588f48ac52c489ea1"},
- {file = "lxml-5.2.1-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8b9c07e7a45bb64e21df4b6aa623cb8ba214dfb47d2027d90eac197329bb5e94"},
- {file = "lxml-5.2.1-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:3249cc2989d9090eeac5467e50e9ec2d40704fea9ab72f36b034ea34ee65ca98"},
- {file = "lxml-5.2.1-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:f42038016852ae51b4088b2862126535cc4fc85802bfe30dea3500fdfaf1864e"},
- {file = "lxml-5.2.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:533658f8fbf056b70e434dff7e7aa611bcacb33e01f75de7f821810e48d1bb66"},
- {file = "lxml-5.2.1-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:622020d4521e22fb371e15f580d153134bfb68d6a429d1342a25f051ec72df1c"},
- {file = "lxml-5.2.1-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:efa7b51824aa0ee957ccd5a741c73e6851de55f40d807f08069eb4c5a26b2baa"},
- {file = "lxml-5.2.1-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9c6ad0fbf105f6bcc9300c00010a2ffa44ea6f555df1a2ad95c88f5656104817"},
- {file = "lxml-5.2.1-pp37-pypy37_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:e233db59c8f76630c512ab4a4daf5a5986da5c3d5b44b8e9fc742f2a24dbd460"},
- {file = "lxml-5.2.1-pp37-pypy37_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:6a014510830df1475176466b6087fc0c08b47a36714823e58d8b8d7709132a96"},
- {file = "lxml-5.2.1-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:d38c8f50ecf57f0463399569aa388b232cf1a2ffb8f0a9a5412d0db57e054860"},
- {file = "lxml-5.2.1-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:5aea8212fb823e006b995c4dda533edcf98a893d941f173f6c9506126188860d"},
- {file = "lxml-5.2.1-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ff097ae562e637409b429a7ac958a20aab237a0378c42dabaa1e3abf2f896e5f"},
- {file = "lxml-5.2.1-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0f5d65c39f16717a47c36c756af0fb36144069c4718824b7533f803ecdf91138"},
- {file = "lxml-5.2.1-pp38-pypy38_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:3d0c3dd24bb4605439bf91068598d00c6370684f8de4a67c2992683f6c309d6b"},
- {file = "lxml-5.2.1-pp38-pypy38_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:e32be23d538753a8adb6c85bd539f5fd3b15cb987404327c569dfc5fd8366e85"},
- {file = "lxml-5.2.1-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:cc518cea79fd1e2f6c90baafa28906d4309d24f3a63e801d855e7424c5b34144"},
- {file = "lxml-5.2.1-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:a0af35bd8ebf84888373630f73f24e86bf016642fb8576fba49d3d6b560b7cbc"},
- {file = "lxml-5.2.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f8aca2e3a72f37bfc7b14ba96d4056244001ddcc18382bd0daa087fd2e68a354"},
- {file = "lxml-5.2.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5ca1e8188b26a819387b29c3895c47a5e618708fe6f787f3b1a471de2c4a94d9"},
- {file = "lxml-5.2.1-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:c8ba129e6d3b0136a0f50345b2cb3db53f6bda5dd8c7f5d83fbccba97fb5dcb5"},
- {file = "lxml-5.2.1-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:e998e304036198b4f6914e6a1e2b6f925208a20e2042563d9734881150c6c246"},
- {file = "lxml-5.2.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:d3be9b2076112e51b323bdf6d5a7f8a798de55fb8d95fcb64bd179460cdc0704"},
- {file = "lxml-5.2.1.tar.gz", hash = "sha256:3f7765e69bbce0906a7c74d5fe46d2c7a7596147318dbc08e4a2431f3060e306"},
+ {file = "lxml-5.2.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:364d03207f3e603922d0d3932ef363d55bbf48e3647395765f9bfcbdf6d23632"},
+ {file = "lxml-5.2.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:50127c186f191b8917ea2fb8b206fbebe87fd414a6084d15568c27d0a21d60db"},
+ {file = "lxml-5.2.2-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:74e4f025ef3db1c6da4460dd27c118d8cd136d0391da4e387a15e48e5c975147"},
+ {file = "lxml-5.2.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:981a06a3076997adf7c743dcd0d7a0415582661e2517c7d961493572e909aa1d"},
+ {file = "lxml-5.2.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:aef5474d913d3b05e613906ba4090433c515e13ea49c837aca18bde190853dff"},
+ {file = "lxml-5.2.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1e275ea572389e41e8b039ac076a46cb87ee6b8542df3fff26f5baab43713bca"},
+ {file = "lxml-5.2.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f5b65529bb2f21ac7861a0e94fdbf5dc0daab41497d18223b46ee8515e5ad297"},
+ {file = "lxml-5.2.2-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:bcc98f911f10278d1daf14b87d65325851a1d29153caaf146877ec37031d5f36"},
+ {file = "lxml-5.2.2-cp310-cp310-manylinux_2_28_ppc64le.whl", hash = "sha256:b47633251727c8fe279f34025844b3b3a3e40cd1b198356d003aa146258d13a2"},
+ {file = "lxml-5.2.2-cp310-cp310-manylinux_2_28_s390x.whl", hash = "sha256:fbc9d316552f9ef7bba39f4edfad4a734d3d6f93341232a9dddadec4f15d425f"},
+ {file = "lxml-5.2.2-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:13e69be35391ce72712184f69000cda04fc89689429179bc4c0ae5f0b7a8c21b"},
+ {file = "lxml-5.2.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:3b6a30a9ab040b3f545b697cb3adbf3696c05a3a68aad172e3fd7ca73ab3c835"},
+ {file = "lxml-5.2.2-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:a233bb68625a85126ac9f1fc66d24337d6e8a0f9207b688eec2e7c880f012ec0"},
+ {file = "lxml-5.2.2-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:dfa7c241073d8f2b8e8dbc7803c434f57dbb83ae2a3d7892dd068d99e96efe2c"},
+ {file = "lxml-5.2.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:1a7aca7964ac4bb07680d5c9d63b9d7028cace3e2d43175cb50bba8c5ad33316"},
+ {file = "lxml-5.2.2-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:ae4073a60ab98529ab8a72ebf429f2a8cc612619a8c04e08bed27450d52103c0"},
+ {file = "lxml-5.2.2-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:ffb2be176fed4457e445fe540617f0252a72a8bc56208fd65a690fdb1f57660b"},
+ {file = "lxml-5.2.2-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:e290d79a4107d7d794634ce3e985b9ae4f920380a813717adf61804904dc4393"},
+ {file = "lxml-5.2.2-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:96e85aa09274955bb6bd483eaf5b12abadade01010478154b0ec70284c1b1526"},
+ {file = "lxml-5.2.2-cp310-cp310-win32.whl", hash = "sha256:f956196ef61369f1685d14dad80611488d8dc1ef00be57c0c5a03064005b0f30"},
+ {file = "lxml-5.2.2-cp310-cp310-win_amd64.whl", hash = "sha256:875a3f90d7eb5c5d77e529080d95140eacb3c6d13ad5b616ee8095447b1d22e7"},
+ {file = "lxml-5.2.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:45f9494613160d0405682f9eee781c7e6d1bf45f819654eb249f8f46a2c22545"},
+ {file = "lxml-5.2.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:b0b3f2df149efb242cee2ffdeb6674b7f30d23c9a7af26595099afaf46ef4e88"},
+ {file = "lxml-5.2.2-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d28cb356f119a437cc58a13f8135ab8a4c8ece18159eb9194b0d269ec4e28083"},
+ {file = "lxml-5.2.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:657a972f46bbefdbba2d4f14413c0d079f9ae243bd68193cb5061b9732fa54c1"},
+ {file = "lxml-5.2.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b74b9ea10063efb77a965a8d5f4182806fbf59ed068b3c3fd6f30d2ac7bee734"},
+ {file = "lxml-5.2.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:07542787f86112d46d07d4f3c4e7c760282011b354d012dc4141cc12a68cef5f"},
+ {file = "lxml-5.2.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:303f540ad2dddd35b92415b74b900c749ec2010e703ab3bfd6660979d01fd4ed"},
+ {file = "lxml-5.2.2-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:2eb2227ce1ff998faf0cd7fe85bbf086aa41dfc5af3b1d80867ecfe75fb68df3"},
+ {file = "lxml-5.2.2-cp311-cp311-manylinux_2_28_ppc64le.whl", hash = "sha256:1d8a701774dfc42a2f0b8ccdfe7dbc140500d1049e0632a611985d943fcf12df"},
+ {file = "lxml-5.2.2-cp311-cp311-manylinux_2_28_s390x.whl", hash = "sha256:56793b7a1a091a7c286b5f4aa1fe4ae5d1446fe742d00cdf2ffb1077865db10d"},
+ {file = "lxml-5.2.2-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:eb00b549b13bd6d884c863554566095bf6fa9c3cecb2e7b399c4bc7904cb33b5"},
+ {file = "lxml-5.2.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:1a2569a1f15ae6c8c64108a2cd2b4a858fc1e13d25846be0666fc144715e32ab"},
+ {file = "lxml-5.2.2-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:8cf85a6e40ff1f37fe0f25719aadf443686b1ac7652593dc53c7ef9b8492b115"},
+ {file = "lxml-5.2.2-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:d237ba6664b8e60fd90b8549a149a74fcc675272e0e95539a00522e4ca688b04"},
+ {file = "lxml-5.2.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:0b3f5016e00ae7630a4b83d0868fca1e3d494c78a75b1c7252606a3a1c5fc2ad"},
+ {file = "lxml-5.2.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:23441e2b5339bc54dc949e9e675fa35efe858108404ef9aa92f0456929ef6fe8"},
+ {file = "lxml-5.2.2-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:2fb0ba3e8566548d6c8e7dd82a8229ff47bd8fb8c2da237607ac8e5a1b8312e5"},
+ {file = "lxml-5.2.2-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:79d1fb9252e7e2cfe4de6e9a6610c7cbb99b9708e2c3e29057f487de5a9eaefa"},
+ {file = "lxml-5.2.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:6dcc3d17eac1df7859ae01202e9bb11ffa8c98949dcbeb1069c8b9a75917e01b"},
+ {file = "lxml-5.2.2-cp311-cp311-win32.whl", hash = "sha256:4c30a2f83677876465f44c018830f608fa3c6a8a466eb223535035fbc16f3438"},
+ {file = "lxml-5.2.2-cp311-cp311-win_amd64.whl", hash = "sha256:49095a38eb333aaf44c06052fd2ec3b8f23e19747ca7ec6f6c954ffea6dbf7be"},
+ {file = "lxml-5.2.2-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:7429e7faa1a60cad26ae4227f4dd0459efde239e494c7312624ce228e04f6391"},
+ {file = "lxml-5.2.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:50ccb5d355961c0f12f6cf24b7187dbabd5433f29e15147a67995474f27d1776"},
+ {file = "lxml-5.2.2-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:dc911208b18842a3a57266d8e51fc3cfaccee90a5351b92079beed912a7914c2"},
+ {file = "lxml-5.2.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:33ce9e786753743159799fdf8e92a5da351158c4bfb6f2db0bf31e7892a1feb5"},
+ {file = "lxml-5.2.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ec87c44f619380878bd49ca109669c9f221d9ae6883a5bcb3616785fa8f94c97"},
+ {file = "lxml-5.2.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:08ea0f606808354eb8f2dfaac095963cb25d9d28e27edcc375d7b30ab01abbf6"},
+ {file = "lxml-5.2.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:75a9632f1d4f698b2e6e2e1ada40e71f369b15d69baddb8968dcc8e683839b18"},
+ {file = "lxml-5.2.2-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:74da9f97daec6928567b48c90ea2c82a106b2d500f397eeb8941e47d30b1ca85"},
+ {file = "lxml-5.2.2-cp312-cp312-manylinux_2_28_ppc64le.whl", hash = "sha256:0969e92af09c5687d769731e3f39ed62427cc72176cebb54b7a9d52cc4fa3b73"},
+ {file = "lxml-5.2.2-cp312-cp312-manylinux_2_28_s390x.whl", hash = "sha256:9164361769b6ca7769079f4d426a41df6164879f7f3568be9086e15baca61466"},
+ {file = "lxml-5.2.2-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:d26a618ae1766279f2660aca0081b2220aca6bd1aa06b2cf73f07383faf48927"},
+ {file = "lxml-5.2.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:ab67ed772c584b7ef2379797bf14b82df9aa5f7438c5b9a09624dd834c1c1aaf"},
+ {file = "lxml-5.2.2-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:3d1e35572a56941b32c239774d7e9ad724074d37f90c7a7d499ab98761bd80cf"},
+ {file = "lxml-5.2.2-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:8268cbcd48c5375f46e000adb1390572c98879eb4f77910c6053d25cc3ac2c67"},
+ {file = "lxml-5.2.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:e282aedd63c639c07c3857097fc0e236f984ceb4089a8b284da1c526491e3f3d"},
+ {file = "lxml-5.2.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:6dfdc2bfe69e9adf0df4915949c22a25b39d175d599bf98e7ddf620a13678585"},
+ {file = "lxml-5.2.2-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:4aefd911793b5d2d7a921233a54c90329bf3d4a6817dc465f12ffdfe4fc7b8fe"},
+ {file = "lxml-5.2.2-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:8b8df03a9e995b6211dafa63b32f9d405881518ff1ddd775db4e7b98fb545e1c"},
+ {file = "lxml-5.2.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:f11ae142f3a322d44513de1018b50f474f8f736bc3cd91d969f464b5bfef8836"},
+ {file = "lxml-5.2.2-cp312-cp312-win32.whl", hash = "sha256:16a8326e51fcdffc886294c1e70b11ddccec836516a343f9ed0f82aac043c24a"},
+ {file = "lxml-5.2.2-cp312-cp312-win_amd64.whl", hash = "sha256:bbc4b80af581e18568ff07f6395c02114d05f4865c2812a1f02f2eaecf0bfd48"},
+ {file = "lxml-5.2.2-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:e3d9d13603410b72787579769469af730c38f2f25505573a5888a94b62b920f8"},
+ {file = "lxml-5.2.2-cp36-cp36m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:38b67afb0a06b8575948641c1d6d68e41b83a3abeae2ca9eed2ac59892b36706"},
+ {file = "lxml-5.2.2-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c689d0d5381f56de7bd6966a4541bff6e08bf8d3871bbd89a0c6ab18aa699573"},
+ {file = "lxml-5.2.2-cp36-cp36m-manylinux_2_28_x86_64.whl", hash = "sha256:cf2a978c795b54c539f47964ec05e35c05bd045db5ca1e8366988c7f2fe6b3ce"},
+ {file = "lxml-5.2.2-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:739e36ef7412b2bd940f75b278749106e6d025e40027c0b94a17ef7968d55d56"},
+ {file = "lxml-5.2.2-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:d8bbcd21769594dbba9c37d3c819e2d5847656ca99c747ddb31ac1701d0c0ed9"},
+ {file = "lxml-5.2.2-cp36-cp36m-musllinux_1_2_x86_64.whl", hash = "sha256:2304d3c93f2258ccf2cf7a6ba8c761d76ef84948d87bf9664e14d203da2cd264"},
+ {file = "lxml-5.2.2-cp36-cp36m-win32.whl", hash = "sha256:02437fb7308386867c8b7b0e5bc4cd4b04548b1c5d089ffb8e7b31009b961dc3"},
+ {file = "lxml-5.2.2-cp36-cp36m-win_amd64.whl", hash = "sha256:edcfa83e03370032a489430215c1e7783128808fd3e2e0a3225deee278585196"},
+ {file = "lxml-5.2.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:28bf95177400066596cdbcfc933312493799382879da504633d16cf60bba735b"},
+ {file = "lxml-5.2.2-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3a745cc98d504d5bd2c19b10c79c61c7c3df9222629f1b6210c0368177589fb8"},
+ {file = "lxml-5.2.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1b590b39ef90c6b22ec0be925b211298e810b4856909c8ca60d27ffbca6c12e6"},
+ {file = "lxml-5.2.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b336b0416828022bfd5a2e3083e7f5ba54b96242159f83c7e3eebaec752f1716"},
+ {file = "lxml-5.2.2-cp37-cp37m-manylinux_2_28_aarch64.whl", hash = "sha256:c2faf60c583af0d135e853c86ac2735ce178f0e338a3c7f9ae8f622fd2eb788c"},
+ {file = "lxml-5.2.2-cp37-cp37m-manylinux_2_28_x86_64.whl", hash = "sha256:4bc6cb140a7a0ad1f7bc37e018d0ed690b7b6520ade518285dc3171f7a117905"},
+ {file = "lxml-5.2.2-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:7ff762670cada8e05b32bf1e4dc50b140790909caa8303cfddc4d702b71ea184"},
+ {file = "lxml-5.2.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:57f0a0bbc9868e10ebe874e9f129d2917750adf008fe7b9c1598c0fbbfdde6a6"},
+ {file = "lxml-5.2.2-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:a6d2092797b388342c1bc932077ad232f914351932353e2e8706851c870bca1f"},
+ {file = "lxml-5.2.2-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:60499fe961b21264e17a471ec296dcbf4365fbea611bf9e303ab69db7159ce61"},
+ {file = "lxml-5.2.2-cp37-cp37m-win32.whl", hash = "sha256:d9b342c76003c6b9336a80efcc766748a333573abf9350f4094ee46b006ec18f"},
+ {file = "lxml-5.2.2-cp37-cp37m-win_amd64.whl", hash = "sha256:b16db2770517b8799c79aa80f4053cd6f8b716f21f8aca962725a9565ce3ee40"},
+ {file = "lxml-5.2.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:7ed07b3062b055d7a7f9d6557a251cc655eed0b3152b76de619516621c56f5d3"},
+ {file = "lxml-5.2.2-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f60fdd125d85bf9c279ffb8e94c78c51b3b6a37711464e1f5f31078b45002421"},
+ {file = "lxml-5.2.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8a7e24cb69ee5f32e003f50e016d5fde438010c1022c96738b04fc2423e61706"},
+ {file = "lxml-5.2.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:23cfafd56887eaed93d07bc4547abd5e09d837a002b791e9767765492a75883f"},
+ {file = "lxml-5.2.2-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:19b4e485cd07b7d83e3fe3b72132e7df70bfac22b14fe4bf7a23822c3a35bff5"},
+ {file = "lxml-5.2.2-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:7ce7ad8abebe737ad6143d9d3bf94b88b93365ea30a5b81f6877ec9c0dee0a48"},
+ {file = "lxml-5.2.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:e49b052b768bb74f58c7dda4e0bdf7b79d43a9204ca584ffe1fb48a6f3c84c66"},
+ {file = "lxml-5.2.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:d14a0d029a4e176795cef99c056d58067c06195e0c7e2dbb293bf95c08f772a3"},
+ {file = "lxml-5.2.2-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:be49ad33819d7dcc28a309b86d4ed98e1a65f3075c6acd3cd4fe32103235222b"},
+ {file = "lxml-5.2.2-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:a6d17e0370d2516d5bb9062c7b4cb731cff921fc875644c3d751ad857ba9c5b1"},
+ {file = "lxml-5.2.2-cp38-cp38-win32.whl", hash = "sha256:5b8c041b6265e08eac8a724b74b655404070b636a8dd6d7a13c3adc07882ef30"},
+ {file = "lxml-5.2.2-cp38-cp38-win_amd64.whl", hash = "sha256:f61efaf4bed1cc0860e567d2ecb2363974d414f7f1f124b1df368bbf183453a6"},
+ {file = "lxml-5.2.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:fb91819461b1b56d06fa4bcf86617fac795f6a99d12239fb0c68dbeba41a0a30"},
+ {file = "lxml-5.2.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:d4ed0c7cbecde7194cd3228c044e86bf73e30a23505af852857c09c24e77ec5d"},
+ {file = "lxml-5.2.2-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:54401c77a63cc7d6dc4b4e173bb484f28a5607f3df71484709fe037c92d4f0ed"},
+ {file = "lxml-5.2.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:625e3ef310e7fa3a761d48ca7ea1f9d8718a32b1542e727d584d82f4453d5eeb"},
+ {file = "lxml-5.2.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:519895c99c815a1a24a926d5b60627ce5ea48e9f639a5cd328bda0515ea0f10c"},
+ {file = "lxml-5.2.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c7079d5eb1c1315a858bbf180000757db8ad904a89476653232db835c3114001"},
+ {file = "lxml-5.2.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:343ab62e9ca78094f2306aefed67dcfad61c4683f87eee48ff2fd74902447726"},
+ {file = "lxml-5.2.2-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:cd9e78285da6c9ba2d5c769628f43ef66d96ac3085e59b10ad4f3707980710d3"},
+ {file = "lxml-5.2.2-cp39-cp39-manylinux_2_28_ppc64le.whl", hash = "sha256:546cf886f6242dff9ec206331209db9c8e1643ae642dea5fdbecae2453cb50fd"},
+ {file = "lxml-5.2.2-cp39-cp39-manylinux_2_28_s390x.whl", hash = "sha256:02f6a8eb6512fdc2fd4ca10a49c341c4e109aa6e9448cc4859af5b949622715a"},
+ {file = "lxml-5.2.2-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:339ee4a4704bc724757cd5dd9dc8cf4d00980f5d3e6e06d5847c1b594ace68ab"},
+ {file = "lxml-5.2.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:0a028b61a2e357ace98b1615fc03f76eb517cc028993964fe08ad514b1e8892d"},
+ {file = "lxml-5.2.2-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:f90e552ecbad426eab352e7b2933091f2be77115bb16f09f78404861c8322981"},
+ {file = "lxml-5.2.2-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:d83e2d94b69bf31ead2fa45f0acdef0757fa0458a129734f59f67f3d2eb7ef32"},
+ {file = "lxml-5.2.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:a02d3c48f9bb1e10c7788d92c0c7db6f2002d024ab6e74d6f45ae33e3d0288a3"},
+ {file = "lxml-5.2.2-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:6d68ce8e7b2075390e8ac1e1d3a99e8b6372c694bbe612632606d1d546794207"},
+ {file = "lxml-5.2.2-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:453d037e09a5176d92ec0fd282e934ed26d806331a8b70ab431a81e2fbabf56d"},
+ {file = "lxml-5.2.2-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:3b019d4ee84b683342af793b56bb35034bd749e4cbdd3d33f7d1107790f8c472"},
+ {file = "lxml-5.2.2-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:cb3942960f0beb9f46e2a71a3aca220d1ca32feb5a398656be934320804c0df9"},
+ {file = "lxml-5.2.2-cp39-cp39-win32.whl", hash = "sha256:ac6540c9fff6e3813d29d0403ee7a81897f1d8ecc09a8ff84d2eea70ede1cdbf"},
+ {file = "lxml-5.2.2-cp39-cp39-win_amd64.whl", hash = "sha256:610b5c77428a50269f38a534057444c249976433f40f53e3b47e68349cca1425"},
+ {file = "lxml-5.2.2-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:b537bd04d7ccd7c6350cdaaaad911f6312cbd61e6e6045542f781c7f8b2e99d2"},
+ {file = "lxml-5.2.2-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4820c02195d6dfb7b8508ff276752f6b2ff8b64ae5d13ebe02e7667e035000b9"},
+ {file = "lxml-5.2.2-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f2a09f6184f17a80897172863a655467da2b11151ec98ba8d7af89f17bf63dae"},
+ {file = "lxml-5.2.2-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:76acba4c66c47d27c8365e7c10b3d8016a7da83d3191d053a58382311a8bf4e1"},
+ {file = "lxml-5.2.2-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:b128092c927eaf485928cec0c28f6b8bead277e28acf56800e972aa2c2abd7a2"},
+ {file = "lxml-5.2.2-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:ae791f6bd43305aade8c0e22f816b34f3b72b6c820477aab4d18473a37e8090b"},
+ {file = "lxml-5.2.2-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:a2f6a1bc2460e643785a2cde17293bd7a8f990884b822f7bca47bee0a82fc66b"},
+ {file = "lxml-5.2.2-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8e8d351ff44c1638cb6e980623d517abd9f580d2e53bfcd18d8941c052a5a009"},
+ {file = "lxml-5.2.2-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bec4bd9133420c5c52d562469c754f27c5c9e36ee06abc169612c959bd7dbb07"},
+ {file = "lxml-5.2.2-pp37-pypy37_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:55ce6b6d803890bd3cc89975fca9de1dff39729b43b73cb15ddd933b8bc20484"},
+ {file = "lxml-5.2.2-pp37-pypy37_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:8ab6a358d1286498d80fe67bd3d69fcbc7d1359b45b41e74c4a26964ca99c3f8"},
+ {file = "lxml-5.2.2-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:06668e39e1f3c065349c51ac27ae430719d7806c026fec462e5693b08b95696b"},
+ {file = "lxml-5.2.2-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:9cd5323344d8ebb9fb5e96da5de5ad4ebab993bbf51674259dbe9d7a18049525"},
+ {file = "lxml-5.2.2-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:89feb82ca055af0fe797a2323ec9043b26bc371365847dbe83c7fd2e2f181c34"},
+ {file = "lxml-5.2.2-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e481bba1e11ba585fb06db666bfc23dbe181dbafc7b25776156120bf12e0d5a6"},
+ {file = "lxml-5.2.2-pp38-pypy38_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:9d6c6ea6a11ca0ff9cd0390b885984ed31157c168565702959c25e2191674a14"},
+ {file = "lxml-5.2.2-pp38-pypy38_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:3d98de734abee23e61f6b8c2e08a88453ada7d6486dc7cdc82922a03968928db"},
+ {file = "lxml-5.2.2-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:69ab77a1373f1e7563e0fb5a29a8440367dec051da6c7405333699d07444f511"},
+ {file = "lxml-5.2.2-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:34e17913c431f5ae01d8658dbf792fdc457073dcdfbb31dc0cc6ab256e664a8d"},
+ {file = "lxml-5.2.2-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:05f8757b03208c3f50097761be2dea0aba02e94f0dc7023ed73a7bb14ff11eb0"},
+ {file = "lxml-5.2.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6a520b4f9974b0a0a6ed73c2154de57cdfd0c8800f4f15ab2b73238ffed0b36e"},
+ {file = "lxml-5.2.2-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:5e097646944b66207023bc3c634827de858aebc226d5d4d6d16f0b77566ea182"},
+ {file = "lxml-5.2.2-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:b5e4ef22ff25bfd4ede5f8fb30f7b24446345f3e79d9b7455aef2836437bc38a"},
+ {file = "lxml-5.2.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:ff69a9a0b4b17d78170c73abe2ab12084bdf1691550c5629ad1fe7849433f324"},
+ {file = "lxml-5.2.2.tar.gz", hash = "sha256:bb2dc4898180bea79863d5487e5f9c7c34297414bad54bcd0f0852aee9cfdb87"},
]
[package.extras]
@@ -3184,4 +3172,4 @@ user-search = ["pyicu"]
[metadata]
lock-version = "2.0"
python-versions = "^3.8.0"
-content-hash = "987f8eccaa222367b1a2e15b0d496586ca50d46ca1277e69694922d31c93ce5b"
+content-hash = "107c8fb5c67360340854fbdba3c085fc5f9c7be24bcb592596a914eea621faea"
From f458dff16ddc6462789d807697f40880146a80fb Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Mon, 3 Jun 2024 14:23:29 +0100
Subject: [PATCH 140/503] Bump mypy-zope from 1.0.3 to 1.0.4 (#17262)
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
---
poetry.lock | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/poetry.lock b/poetry.lock
index 1c387165fd..6049e7027a 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -1442,17 +1442,17 @@ files = [
[[package]]
name = "mypy-zope"
-version = "1.0.3"
+version = "1.0.4"
description = "Plugin for mypy to support zope interfaces"
optional = false
python-versions = "*"
files = [
- {file = "mypy-zope-1.0.3.tar.gz", hash = "sha256:149081bd2754d947747baefac569bb1c2bc127b4a2cc1fa505492336946bb3b4"},
- {file = "mypy_zope-1.0.3-py3-none-any.whl", hash = "sha256:7a30ce1a2589173f0be66662c9a9179f75737afc40e4104df4c76fb5a8421c14"},
+ {file = "mypy-zope-1.0.4.tar.gz", hash = "sha256:a9569e73ae85a65247787d98590fa6d4290e76f26aabe035d1c3e94a0b9ab6ee"},
+ {file = "mypy_zope-1.0.4-py3-none-any.whl", hash = "sha256:c7298f93963a84f2b145c2b5cc98709fc2a5be4adf54bfe23fa7fdd8fd19c975"},
]
[package.dependencies]
-mypy = ">=1.0.0,<1.9.0"
+mypy = ">=1.0.0,<1.10.0"
"zope.interface" = "*"
"zope.schema" = "*"
From 8a3270075b48905d8c8d682892a6411ed13fd328 Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Mon, 3 Jun 2024 14:26:41 +0100
Subject: [PATCH 141/503] Bump types-pyopenssl from 24.0.0.20240311 to
24.1.0.20240425 (#17260)
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
---
poetry.lock | 21 ++++++++++++++++++---
1 file changed, 18 insertions(+), 3 deletions(-)
diff --git a/poetry.lock b/poetry.lock
index 6049e7027a..80924ccbb1 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -2770,6 +2770,20 @@ files = [
[package.dependencies]
types-html5lib = "*"
+[[package]]
+name = "types-cffi"
+version = "1.16.0.20240331"
+description = "Typing stubs for cffi"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "types-cffi-1.16.0.20240331.tar.gz", hash = "sha256:b8b20d23a2b89cfed5f8c5bc53b0cb8677c3aac6d970dbc771e28b9c698f5dee"},
+ {file = "types_cffi-1.16.0.20240331-py3-none-any.whl", hash = "sha256:a363e5ea54a4eb6a4a105d800685fde596bc318089b025b27dee09849fe41ff0"},
+]
+
+[package.dependencies]
+types-setuptools = "*"
+
[[package]]
name = "types-commonmark"
version = "0.9.2.20240106"
@@ -2852,17 +2866,18 @@ files = [
[[package]]
name = "types-pyopenssl"
-version = "24.0.0.20240311"
+version = "24.1.0.20240425"
description = "Typing stubs for pyOpenSSL"
optional = false
python-versions = ">=3.8"
files = [
- {file = "types-pyOpenSSL-24.0.0.20240311.tar.gz", hash = "sha256:7bca00cfc4e7ef9c5d2663c6a1c068c35798e59670595439f6296e7ba3d58083"},
- {file = "types_pyOpenSSL-24.0.0.20240311-py3-none-any.whl", hash = "sha256:6e8e8bfad34924067333232c93f7fc4b369856d8bea0d5c9d1808cb290ab1972"},
+ {file = "types-pyOpenSSL-24.1.0.20240425.tar.gz", hash = "sha256:0a7e82626c1983dc8dc59292bf20654a51c3c3881bcbb9b337c1da6e32f0204e"},
+ {file = "types_pyOpenSSL-24.1.0.20240425-py3-none-any.whl", hash = "sha256:f51a156835555dd2a1f025621e8c4fbe7493470331afeef96884d1d29bf3a473"},
]
[package.dependencies]
cryptography = ">=35.0.0"
+types-cffi = "*"
[[package]]
name = "types-pyyaml"
From 18c11968937c8313e363d2dc1cae38435af137a6 Mon Sep 17 00:00:00 2001
From: Andrew Morgan
Date: Tue, 4 Jun 2024 09:46:09 +0100
Subject: [PATCH 142/503] 1.109.0rc1
---
CHANGES.md | 54 +++++++++++++++++++++++++++++++++++++++
changelog.d/17083.misc | 1 -
changelog.d/17147.feature | 1 -
changelog.d/17164.bugfix | 1 -
changelog.d/17167.feature | 1 -
changelog.d/17176.misc | 1 -
changelog.d/17204.doc | 1 -
changelog.d/17211.misc | 1 -
changelog.d/17213.feature | 1 -
changelog.d/17215.bugfix | 1 -
changelog.d/17216.misc | 1 -
changelog.d/17219.feature | 1 -
changelog.d/17226.misc | 1 -
changelog.d/17229.misc | 1 -
changelog.d/17238.misc | 1 -
changelog.d/17239.misc | 1 -
changelog.d/17240.bugfix | 1 -
changelog.d/17241.bugfix | 1 -
changelog.d/17242.misc | 1 -
changelog.d/17246.misc | 1 -
changelog.d/17250.misc | 1 -
changelog.d/17251.bugfix | 1 -
changelog.d/17252.bugfix | 1 -
debian/changelog | 6 +++++
pyproject.toml | 2 +-
25 files changed, 61 insertions(+), 23 deletions(-)
delete mode 100644 changelog.d/17083.misc
delete mode 100644 changelog.d/17147.feature
delete mode 100644 changelog.d/17164.bugfix
delete mode 100644 changelog.d/17167.feature
delete mode 100644 changelog.d/17176.misc
delete mode 100644 changelog.d/17204.doc
delete mode 100644 changelog.d/17211.misc
delete mode 100644 changelog.d/17213.feature
delete mode 100644 changelog.d/17215.bugfix
delete mode 100644 changelog.d/17216.misc
delete mode 100644 changelog.d/17219.feature
delete mode 100644 changelog.d/17226.misc
delete mode 100644 changelog.d/17229.misc
delete mode 100644 changelog.d/17238.misc
delete mode 100644 changelog.d/17239.misc
delete mode 100644 changelog.d/17240.bugfix
delete mode 100644 changelog.d/17241.bugfix
delete mode 100644 changelog.d/17242.misc
delete mode 100644 changelog.d/17246.misc
delete mode 100644 changelog.d/17250.misc
delete mode 100644 changelog.d/17251.bugfix
delete mode 100644 changelog.d/17252.bugfix
diff --git a/CHANGES.md b/CHANGES.md
index d517fc4eff..0c61183d6b 100644
--- a/CHANGES.md
+++ b/CHANGES.md
@@ -1,3 +1,57 @@
+# Synapse 1.109.0rc1 (2024-06-04)
+
+### Features
+
+- Add the ability to auto-accept invites on the behalf of users. See the [`auto_accept_invites`](https://element-hq.github.io/synapse/latest/usage/configuration/config_documentation.html#auto-accept-invites) config option for details. ([\#17147](https://github.com/element-hq/synapse/issues/17147))
+- Add experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync/e2ee` endpoint for to-device messages and device encryption info. ([\#17167](https://github.com/element-hq/synapse/issues/17167))
+- Support [MSC3916](https://github.com/matrix-org/matrix-spec-proposals/issues/3916) by adding unstable media endpoints to `/_matrix/client` (#17213). ([\#17213](https://github.com/element-hq/synapse/issues/17213))
+- Add logging to tasks managed by the task scheduler, showing CPU and database usage. ([\#17219](https://github.com/element-hq/synapse/issues/17219))
+
+### Bugfixes
+
+- Fix deduplicating of membership events to not create unused state groups. ([\#17164](https://github.com/element-hq/synapse/issues/17164))
+- Fix bug where duplicate events could be sent down sync when using workers that are overloaded. ([\#17215](https://github.com/element-hq/synapse/issues/17215))
+- Ignore attempts to send to-device messages to bad users, to avoid log spam when we try to connect to the bad server. ([\#17240](https://github.com/element-hq/synapse/issues/17240))
+- Fix handling of duplicate concurrent uploading of device one-time-keys. ([\#17241](https://github.com/element-hq/synapse/issues/17241))
+- Fix reporting of default tags to Sentry, such as worker name. Broke in v1.108.0. ([\#17251](https://github.com/element-hq/synapse/issues/17251))
+- Fix bug where typing updates would not be sent when using workers after a restart. ([\#17252](https://github.com/element-hq/synapse/issues/17252))
+
+### Improved Documentation
+
+- Update the LemonLDAP documentation to say that claims should be explicitly included in the returned `id_token`, as Synapse won't request them. ([\#17204](https://github.com/element-hq/synapse/issues/17204))
+
+### Internal Changes
+
+- Improve DB usage when fetching related events. ([\#17083](https://github.com/element-hq/synapse/issues/17083))
+- Log exceptions when failing to auto-join new user according to the `auto_join_rooms` option. ([\#17176](https://github.com/element-hq/synapse/issues/17176))
+- Reduce work of calculating outbound device lists updates. ([\#17211](https://github.com/element-hq/synapse/issues/17211))
+- Improve performance of calculating device lists changes in `/sync`. ([\#17216](https://github.com/element-hq/synapse/issues/17216))
+- Move towards using `MultiWriterIdGenerator` everywhere. ([\#17226](https://github.com/element-hq/synapse/issues/17226))
+- Replaces all usages of `StreamIdGenerator` with `MultiWriterIdGenerator`. ([\#17229](https://github.com/element-hq/synapse/issues/17229))
+- Change the `allow_unsafe_locale` config option to also apply when setting up new databases. ([\#17238](https://github.com/element-hq/synapse/issues/17238))
+- Fix errors in logs about closing incorrect logging contexts when media gets rejected by a module. ([\#17239](https://github.com/element-hq/synapse/issues/17239), [\#17246](https://github.com/element-hq/synapse/issues/17246))
+- Clean out invalid destinations from `device_federation_outbox` table. ([\#17242](https://github.com/element-hq/synapse/issues/17242))
+- Stop logging errors when receiving invalid User IDs in key querys requests. ([\#17250](https://github.com/element-hq/synapse/issues/17250))
+
+
+
+### Updates to locked dependencies
+
+* Bump anyhow from 1.0.83 to 1.0.86. ([\#17220](https://github.com/element-hq/synapse/issues/17220))
+* Bump bcrypt from 4.1.2 to 4.1.3. ([\#17224](https://github.com/element-hq/synapse/issues/17224))
+* Bump lxml from 5.2.1 to 5.2.2. ([\#17261](https://github.com/element-hq/synapse/issues/17261))
+* Bump mypy-zope from 1.0.3 to 1.0.4. ([\#17262](https://github.com/element-hq/synapse/issues/17262))
+* Bump phonenumbers from 8.13.35 to 8.13.37. ([\#17235](https://github.com/element-hq/synapse/issues/17235))
+* Bump prometheus-client from 0.19.0 to 0.20.0. ([\#17233](https://github.com/element-hq/synapse/issues/17233))
+* Bump pyasn1 from 0.5.1 to 0.6.0. ([\#17223](https://github.com/element-hq/synapse/issues/17223))
+* Bump pyicu from 2.13 to 2.13.1. ([\#17236](https://github.com/element-hq/synapse/issues/17236))
+* Bump pyopenssl from 24.0.0 to 24.1.0. ([\#17234](https://github.com/element-hq/synapse/issues/17234))
+* Bump serde from 1.0.201 to 1.0.202. ([\#17221](https://github.com/element-hq/synapse/issues/17221))
+* Bump serde from 1.0.202 to 1.0.203. ([\#17232](https://github.com/element-hq/synapse/issues/17232))
+* Bump twine from 5.0.0 to 5.1.0. ([\#17225](https://github.com/element-hq/synapse/issues/17225))
+* Bump types-psycopg2 from 2.9.21.20240311 to 2.9.21.20240417. ([\#17222](https://github.com/element-hq/synapse/issues/17222))
+* Bump types-pyopenssl from 24.0.0.20240311 to 24.1.0.20240425. ([\#17260](https://github.com/element-hq/synapse/issues/17260))
+
# Synapse 1.108.0 (2024-05-28)
No significant changes since 1.108.0rc1.
diff --git a/changelog.d/17083.misc b/changelog.d/17083.misc
deleted file mode 100644
index 7c7cebea4e..0000000000
--- a/changelog.d/17083.misc
+++ /dev/null
@@ -1 +0,0 @@
-Improve DB usage when fetching related events.
diff --git a/changelog.d/17147.feature b/changelog.d/17147.feature
deleted file mode 100644
index 7c2cdb6bdf..0000000000
--- a/changelog.d/17147.feature
+++ /dev/null
@@ -1 +0,0 @@
-Add the ability to auto-accept invites on the behalf of users. See the [`auto_accept_invites`](https://element-hq.github.io/synapse/latest/usage/configuration/config_documentation.html#auto-accept-invites) config option for details.
diff --git a/changelog.d/17164.bugfix b/changelog.d/17164.bugfix
deleted file mode 100644
index 597e2f14b0..0000000000
--- a/changelog.d/17164.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix deduplicating of membership events to not create unused state groups.
diff --git a/changelog.d/17167.feature b/changelog.d/17167.feature
deleted file mode 100644
index 5ad31db974..0000000000
--- a/changelog.d/17167.feature
+++ /dev/null
@@ -1 +0,0 @@
-Add experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync/e2ee` endpoint for To-Device messages and device encryption info.
diff --git a/changelog.d/17176.misc b/changelog.d/17176.misc
deleted file mode 100644
index cc9f2a5202..0000000000
--- a/changelog.d/17176.misc
+++ /dev/null
@@ -1 +0,0 @@
-Log exceptions when failing to auto-join new user according to the `auto_join_rooms` option.
\ No newline at end of file
diff --git a/changelog.d/17204.doc b/changelog.d/17204.doc
deleted file mode 100644
index 5a5a8f5107..0000000000
--- a/changelog.d/17204.doc
+++ /dev/null
@@ -1 +0,0 @@
-Update OIDC documentation: by default Matrix doesn't query userinfo endpoint, then claims should be put on id_token.
diff --git a/changelog.d/17211.misc b/changelog.d/17211.misc
deleted file mode 100644
index 144db03a40..0000000000
--- a/changelog.d/17211.misc
+++ /dev/null
@@ -1 +0,0 @@
-Reduce work of calculating outbound device lists updates.
diff --git a/changelog.d/17213.feature b/changelog.d/17213.feature
deleted file mode 100644
index ca60afa8f3..0000000000
--- a/changelog.d/17213.feature
+++ /dev/null
@@ -1 +0,0 @@
-Support MSC3916 by adding unstable media endpoints to `_matrix/client` (#17213).
\ No newline at end of file
diff --git a/changelog.d/17215.bugfix b/changelog.d/17215.bugfix
deleted file mode 100644
index 10981b798e..0000000000
--- a/changelog.d/17215.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix bug where duplicate events could be sent down sync when using workers that are overloaded.
diff --git a/changelog.d/17216.misc b/changelog.d/17216.misc
deleted file mode 100644
index bd55eeaa33..0000000000
--- a/changelog.d/17216.misc
+++ /dev/null
@@ -1 +0,0 @@
-Improve performance of calculating device lists changes in `/sync`.
diff --git a/changelog.d/17219.feature b/changelog.d/17219.feature
deleted file mode 100644
index f8277a89d8..0000000000
--- a/changelog.d/17219.feature
+++ /dev/null
@@ -1 +0,0 @@
-Add logging to tasks managed by the task scheduler, showing CPU and database usage.
\ No newline at end of file
diff --git a/changelog.d/17226.misc b/changelog.d/17226.misc
deleted file mode 100644
index 7c023a5759..0000000000
--- a/changelog.d/17226.misc
+++ /dev/null
@@ -1 +0,0 @@
-Move towards using `MultiWriterIdGenerator` everywhere.
diff --git a/changelog.d/17229.misc b/changelog.d/17229.misc
deleted file mode 100644
index d411550786..0000000000
--- a/changelog.d/17229.misc
+++ /dev/null
@@ -1 +0,0 @@
-Replaces all usages of `StreamIdGenerator` with `MultiWriterIdGenerator`.
diff --git a/changelog.d/17238.misc b/changelog.d/17238.misc
deleted file mode 100644
index 261467e55c..0000000000
--- a/changelog.d/17238.misc
+++ /dev/null
@@ -1 +0,0 @@
-Change the `allow_unsafe_locale` config option to also apply when setting up new databases.
diff --git a/changelog.d/17239.misc b/changelog.d/17239.misc
deleted file mode 100644
index 9fca36bb29..0000000000
--- a/changelog.d/17239.misc
+++ /dev/null
@@ -1 +0,0 @@
-Fix errors in logs about closing incorrect logging contexts when media gets rejected by a module.
diff --git a/changelog.d/17240.bugfix b/changelog.d/17240.bugfix
deleted file mode 100644
index c596d270ce..0000000000
--- a/changelog.d/17240.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Ignore attempts to send to-device messages to bad users, to avoid log spam when we try to connect to the bad server.
diff --git a/changelog.d/17241.bugfix b/changelog.d/17241.bugfix
deleted file mode 100644
index 1b7f0bca94..0000000000
--- a/changelog.d/17241.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix handling of duplicate concurrent uploading of device one-time-keys.
diff --git a/changelog.d/17242.misc b/changelog.d/17242.misc
deleted file mode 100644
index 5bd627da57..0000000000
--- a/changelog.d/17242.misc
+++ /dev/null
@@ -1 +0,0 @@
-Clean out invalid destinations from `device_federation_outbox` table.
diff --git a/changelog.d/17246.misc b/changelog.d/17246.misc
deleted file mode 100644
index 9fca36bb29..0000000000
--- a/changelog.d/17246.misc
+++ /dev/null
@@ -1 +0,0 @@
-Fix errors in logs about closing incorrect logging contexts when media gets rejected by a module.
diff --git a/changelog.d/17250.misc b/changelog.d/17250.misc
deleted file mode 100644
index 49834e83ba..0000000000
--- a/changelog.d/17250.misc
+++ /dev/null
@@ -1 +0,0 @@
-Stop logging errors when receiving invalid User IDs in key querys requests.
diff --git a/changelog.d/17251.bugfix b/changelog.d/17251.bugfix
deleted file mode 100644
index f573e01e87..0000000000
--- a/changelog.d/17251.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix reporting of default tags to Sentry, such as worker name. Broke in v1.108.0.
diff --git a/changelog.d/17252.bugfix b/changelog.d/17252.bugfix
deleted file mode 100644
index f3289d1568..0000000000
--- a/changelog.d/17252.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix bug where typing updates would not be sent when using workers after a restart.
diff --git a/debian/changelog b/debian/changelog
index 8491b587e8..927248bdab 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -1,3 +1,9 @@
+matrix-synapse-py3 (1.109.0~rc1) stable; urgency=medium
+
+ * New Synapse release 1.109.0rc1.
+
+ -- Synapse Packaging team Tue, 04 Jun 2024 09:42:46 +0100
+
matrix-synapse-py3 (1.108.0) stable; urgency=medium
* New Synapse release 1.108.0.
diff --git a/pyproject.toml b/pyproject.toml
index 9a3348be49..8cc99b8cba 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -96,7 +96,7 @@ module-name = "synapse.synapse_rust"
[tool.poetry]
name = "matrix-synapse"
-version = "1.108.0"
+version = "1.109.0rc1"
description = "Homeserver for the Matrix decentralised comms protocol"
authors = ["Matrix.org Team and Contributors "]
license = "AGPL-3.0-or-later"
From e2f8476044ef1e694e8e25c2ccb60711e65d518a Mon Sep 17 00:00:00 2001
From: Andrew Morgan
Date: Tue, 4 Jun 2024 09:47:28 +0100
Subject: [PATCH 143/503] Fix typo in CHANGES.md
---
CHANGES.md | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/CHANGES.md b/CHANGES.md
index 0c61183d6b..092dbdbf2d 100644
--- a/CHANGES.md
+++ b/CHANGES.md
@@ -4,7 +4,7 @@
- Add the ability to auto-accept invites on the behalf of users. See the [`auto_accept_invites`](https://element-hq.github.io/synapse/latest/usage/configuration/config_documentation.html#auto-accept-invites) config option for details. ([\#17147](https://github.com/element-hq/synapse/issues/17147))
- Add experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync/e2ee` endpoint for to-device messages and device encryption info. ([\#17167](https://github.com/element-hq/synapse/issues/17167))
-- Support [MSC3916](https://github.com/matrix-org/matrix-spec-proposals/issues/3916) by adding unstable media endpoints to `/_matrix/client` (#17213). ([\#17213](https://github.com/element-hq/synapse/issues/17213))
+- Support [MSC3916](https://github.com/matrix-org/matrix-spec-proposals/issues/3916) by adding unstable media endpoints to `/_matrix/client`. ([\#17213](https://github.com/element-hq/synapse/issues/17213))
- Add logging to tasks managed by the task scheduler, showing CPU and database usage. ([\#17219](https://github.com/element-hq/synapse/issues/17219))
### Bugfixes
From 81cef38d4b47776d776577560421795afce85ca1 Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Tue, 4 Jun 2024 11:58:27 +0100
Subject: [PATCH 144/503] Bump sentry-sdk from 2.1.1 to 2.3.1 (#17263)
---
poetry.lock | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/poetry.lock b/poetry.lock
index 80924ccbb1..942e26701d 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -2387,13 +2387,13 @@ doc = ["Sphinx", "sphinx-rtd-theme"]
[[package]]
name = "sentry-sdk"
-version = "2.1.1"
+version = "2.3.1"
description = "Python client for Sentry (https://sentry.io)"
optional = true
python-versions = ">=3.6"
files = [
- {file = "sentry_sdk-2.1.1-py2.py3-none-any.whl", hash = "sha256:99aeb78fb76771513bd3b2829d12613130152620768d00cd3e45ac00cb17950f"},
- {file = "sentry_sdk-2.1.1.tar.gz", hash = "sha256:95d8c0bb41c8b0bc37ab202c2c4a295bb84398ee05f4cdce55051cd75b926ec1"},
+ {file = "sentry_sdk-2.3.1-py2.py3-none-any.whl", hash = "sha256:c5aeb095ba226391d337dd42a6f9470d86c9fc236ecc71cfc7cd1942b45010c6"},
+ {file = "sentry_sdk-2.3.1.tar.gz", hash = "sha256:139a71a19f5e9eb5d3623942491ce03cf8ebc14ea2e39ba3e6fe79560d8a5b1f"},
]
[package.dependencies]
@@ -2415,7 +2415,7 @@ django = ["django (>=1.8)"]
falcon = ["falcon (>=1.4)"]
fastapi = ["fastapi (>=0.79.0)"]
flask = ["blinker (>=1.1)", "flask (>=0.11)", "markupsafe"]
-grpcio = ["grpcio (>=1.21.1)"]
+grpcio = ["grpcio (>=1.21.1)", "protobuf (>=3.8.0)"]
httpx = ["httpx (>=0.16.0)"]
huey = ["huey (>=2)"]
huggingface-hub = ["huggingface-hub (>=0.22)"]
From 7d8f0ef351e99adf602b3acb67b2516a02ff6918 Mon Sep 17 00:00:00 2001
From: Eric Eastwood
Date: Tue, 4 Jun 2024 12:58:03 -0500
Subject: [PATCH 145/503] Use fully-qualified `PersistedEventPosition` when
returning `RoomsForUser` (#17265)
Use fully-qualified `PersistedEventPosition` (`instance_name` and `stream_ordering`) when returning `RoomsForUser` to facilitate proper comparisons and `RoomStreamToken` generation.
Spawning from https://github.com/element-hq/synapse/pull/17187 where we want to utilize this change
---
changelog.d/17265.misc | 1 +
synapse/federation/federation_server.py | 4 +-
synapse/handlers/admin.py | 10 +---
synapse/handlers/initial_sync.py | 2 +-
synapse/handlers/pagination.py | 3 +-
synapse/handlers/room.py | 60 +-------------------
synapse/handlers/sync.py | 2 +-
synapse/storage/databases/main/roommember.py | 14 ++++-
synapse/storage/roommember.py | 2 +-
synapse/types/__init__.py | 57 +++++++++++++++++++
tests/replication/storage/test_events.py | 5 +-
11 files changed, 85 insertions(+), 75 deletions(-)
create mode 100644 changelog.d/17265.misc
diff --git a/changelog.d/17265.misc b/changelog.d/17265.misc
new file mode 100644
index 0000000000..e6d4d8b4ee
--- /dev/null
+++ b/changelog.d/17265.misc
@@ -0,0 +1 @@
+Use fully-qualified `PersistedEventPosition` when returning `RoomsForUser` to facilitate proper comparisons and `RoomStreamToken` generation.
diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py
index 7ffc650aa1..1932fa82a4 100644
--- a/synapse/federation/federation_server.py
+++ b/synapse/federation/federation_server.py
@@ -674,7 +674,7 @@ class FederationServer(FederationBase):
# This is in addition to the HS-level rate limiting applied by
# BaseFederationServlet.
# type-ignore: mypy doesn't seem able to deduce the type of the limiter(!?)
- await self._room_member_handler._join_rate_per_room_limiter.ratelimit( # type: ignore[has-type]
+ await self._room_member_handler._join_rate_per_room_limiter.ratelimit(
requester=None,
key=room_id,
update=False,
@@ -717,7 +717,7 @@ class FederationServer(FederationBase):
SynapseTags.SEND_JOIN_RESPONSE_IS_PARTIAL_STATE,
caller_supports_partial_state,
)
- await self._room_member_handler._join_rate_per_room_limiter.ratelimit( # type: ignore[has-type]
+ await self._room_member_handler._join_rate_per_room_limiter.ratelimit(
requester=None,
key=room_id,
update=False,
diff --git a/synapse/handlers/admin.py b/synapse/handlers/admin.py
index 702d40332c..21d3bb37f3 100644
--- a/synapse/handlers/admin.py
+++ b/synapse/handlers/admin.py
@@ -126,13 +126,7 @@ class AdminHandler:
# Get all rooms the user is in or has been in
rooms = await self._store.get_rooms_for_local_user_where_membership_is(
user_id,
- membership_list=(
- Membership.JOIN,
- Membership.LEAVE,
- Membership.BAN,
- Membership.INVITE,
- Membership.KNOCK,
- ),
+ membership_list=Membership.LIST,
)
# We only try and fetch events for rooms the user has been in. If
@@ -179,7 +173,7 @@ class AdminHandler:
if room.membership == Membership.JOIN:
stream_ordering = self._store.get_room_max_stream_ordering()
else:
- stream_ordering = room.stream_ordering
+ stream_ordering = room.event_pos.stream
from_key = RoomStreamToken(topological=0, stream=0)
to_key = RoomStreamToken(stream=stream_ordering)
diff --git a/synapse/handlers/initial_sync.py b/synapse/handlers/initial_sync.py
index d99fc4bec0..84d6fecf31 100644
--- a/synapse/handlers/initial_sync.py
+++ b/synapse/handlers/initial_sync.py
@@ -199,7 +199,7 @@ class InitialSyncHandler:
)
elif event.membership == Membership.LEAVE:
room_end_token = RoomStreamToken(
- stream=event.stream_ordering,
+ stream=event.event_pos.stream,
)
deferred_room_state = run_in_background(
self._state_storage_controller.get_state_for_events,
diff --git a/synapse/handlers/pagination.py b/synapse/handlers/pagination.py
index 6617105cdb..f7447b8ba5 100644
--- a/synapse/handlers/pagination.py
+++ b/synapse/handlers/pagination.py
@@ -27,7 +27,6 @@ from synapse.api.constants import Direction, EventTypes, Membership
from synapse.api.errors import SynapseError
from synapse.api.filtering import Filter
from synapse.events.utils import SerializeEventConfig
-from synapse.handlers.room import ShutdownRoomParams, ShutdownRoomResponse
from synapse.handlers.worker_lock import NEW_EVENT_DURING_PURGE_LOCK_NAME
from synapse.logging.opentracing import trace
from synapse.metrics.background_process_metrics import run_as_background_process
@@ -38,6 +37,8 @@ from synapse.types import (
JsonMapping,
Requester,
ScheduledTask,
+ ShutdownRoomParams,
+ ShutdownRoomResponse,
StreamKeyType,
TaskStatus,
)
diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py
index 51739a2653..7f1b674d10 100644
--- a/synapse/handlers/room.py
+++ b/synapse/handlers/room.py
@@ -40,7 +40,6 @@ from typing import (
)
import attr
-from typing_extensions import TypedDict
import synapse.events.snapshot
from synapse.api.constants import (
@@ -81,6 +80,8 @@ from synapse.types import (
RoomAlias,
RoomID,
RoomStreamToken,
+ ShutdownRoomParams,
+ ShutdownRoomResponse,
StateMap,
StrCollection,
StreamKeyType,
@@ -1780,63 +1781,6 @@ class RoomEventSource(EventSource[RoomStreamToken, EventBase]):
return self.store.get_current_room_stream_token_for_room_id(room_id)
-class ShutdownRoomParams(TypedDict):
- """
- Attributes:
- requester_user_id:
- User who requested the action. Will be recorded as putting the room on the
- blocking list.
- new_room_user_id:
- If set, a new room will be created with this user ID
- as the creator and admin, and all users in the old room will be
- moved into that room. If not set, no new room will be created
- and the users will just be removed from the old room.
- new_room_name:
- A string representing the name of the room that new users will
- be invited to. Defaults to `Content Violation Notification`
- message:
- A string containing the first message that will be sent as
- `new_room_user_id` in the new room. Ideally this will clearly
- convey why the original room was shut down.
- Defaults to `Sharing illegal content on this server is not
- permitted and rooms in violation will be blocked.`
- block:
- If set to `true`, this room will be added to a blocking list,
- preventing future attempts to join the room. Defaults to `false`.
- purge:
- If set to `true`, purge the given room from the database.
- force_purge:
- If set to `true`, the room will be purged from database
- even if there are still users joined to the room.
- """
-
- requester_user_id: Optional[str]
- new_room_user_id: Optional[str]
- new_room_name: Optional[str]
- message: Optional[str]
- block: bool
- purge: bool
- force_purge: bool
-
-
-class ShutdownRoomResponse(TypedDict):
- """
- Attributes:
- kicked_users: An array of users (`user_id`) that were kicked.
- failed_to_kick_users:
- An array of users (`user_id`) that that were not kicked.
- local_aliases:
- An array of strings representing the local aliases that were
- migrated from the old room to the new.
- new_room_id: A string representing the room ID of the new room.
- """
-
- kicked_users: List[str]
- failed_to_kick_users: List[str]
- local_aliases: List[str]
- new_room_id: Optional[str]
-
-
class RoomShutdownHandler:
DEFAULT_MESSAGE = (
"Sharing illegal content on this server is not permitted and rooms in"
diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py
index 1d7d9dfdd0..e815e0ea7f 100644
--- a/synapse/handlers/sync.py
+++ b/synapse/handlers/sync.py
@@ -2805,7 +2805,7 @@ class SyncHandler:
continue
leave_token = now_token.copy_and_replace(
- StreamKeyType.ROOM, RoomStreamToken(stream=event.stream_ordering)
+ StreamKeyType.ROOM, RoomStreamToken(stream=event.event_pos.stream)
)
room_entries.append(
RoomSyncResultBuilder(
diff --git a/synapse/storage/databases/main/roommember.py b/synapse/storage/databases/main/roommember.py
index 9fddbb2caf..d8b54dc4e3 100644
--- a/synapse/storage/databases/main/roommember.py
+++ b/synapse/storage/databases/main/roommember.py
@@ -476,7 +476,7 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore):
)
sql = """
- SELECT room_id, e.sender, c.membership, event_id, e.stream_ordering, r.room_version
+ SELECT room_id, e.sender, c.membership, event_id, e.instance_name, e.stream_ordering, r.room_version
FROM local_current_membership AS c
INNER JOIN events AS e USING (room_id, event_id)
INNER JOIN rooms AS r USING (room_id)
@@ -488,7 +488,17 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore):
)
txn.execute(sql, (user_id, *args))
- results = [RoomsForUser(*r) for r in txn]
+ results = [
+ RoomsForUser(
+ room_id=room_id,
+ sender=sender,
+ membership=membership,
+ event_id=event_id,
+ event_pos=PersistedEventPosition(instance_name, stream_ordering),
+ room_version_id=room_version,
+ )
+ for room_id, sender, membership, event_id, instance_name, stream_ordering, room_version in txn
+ ]
return results
diff --git a/synapse/storage/roommember.py b/synapse/storage/roommember.py
index 7471f81a19..80c9630867 100644
--- a/synapse/storage/roommember.py
+++ b/synapse/storage/roommember.py
@@ -35,7 +35,7 @@ class RoomsForUser:
sender: str
membership: str
event_id: str
- stream_ordering: int
+ event_pos: PersistedEventPosition
room_version_id: str
diff --git a/synapse/types/__init__.py b/synapse/types/__init__.py
index 151658df53..3a89787cab 100644
--- a/synapse/types/__init__.py
+++ b/synapse/types/__init__.py
@@ -1279,3 +1279,60 @@ class ScheduledTask:
result: Optional[JsonMapping]
# Optional error that should be assigned a value when the status is FAILED
error: Optional[str]
+
+
+class ShutdownRoomParams(TypedDict):
+ """
+ Attributes:
+ requester_user_id:
+ User who requested the action. Will be recorded as putting the room on the
+ blocking list.
+ new_room_user_id:
+ If set, a new room will be created with this user ID
+ as the creator and admin, and all users in the old room will be
+ moved into that room. If not set, no new room will be created
+ and the users will just be removed from the old room.
+ new_room_name:
+ A string representing the name of the room that new users will
+ be invited to. Defaults to `Content Violation Notification`
+ message:
+ A string containing the first message that will be sent as
+ `new_room_user_id` in the new room. Ideally this will clearly
+ convey why the original room was shut down.
+ Defaults to `Sharing illegal content on this server is not
+ permitted and rooms in violation will be blocked.`
+ block:
+ If set to `true`, this room will be added to a blocking list,
+ preventing future attempts to join the room. Defaults to `false`.
+ purge:
+ If set to `true`, purge the given room from the database.
+ force_purge:
+ If set to `true`, the room will be purged from database
+ even if there are still users joined to the room.
+ """
+
+ requester_user_id: Optional[str]
+ new_room_user_id: Optional[str]
+ new_room_name: Optional[str]
+ message: Optional[str]
+ block: bool
+ purge: bool
+ force_purge: bool
+
+
+class ShutdownRoomResponse(TypedDict):
+ """
+ Attributes:
+ kicked_users: An array of users (`user_id`) that were kicked.
+ failed_to_kick_users:
+ An array of users (`user_id`) that that were not kicked.
+ local_aliases:
+ An array of strings representing the local aliases that were
+ migrated from the old room to the new.
+ new_room_id: A string representing the room ID of the new room.
+ """
+
+ kicked_users: List[str]
+ failed_to_kick_users: List[str]
+ local_aliases: List[str]
+ new_room_id: Optional[str]
diff --git a/tests/replication/storage/test_events.py b/tests/replication/storage/test_events.py
index 86c8f14d1b..4e41a1c912 100644
--- a/tests/replication/storage/test_events.py
+++ b/tests/replication/storage/test_events.py
@@ -154,7 +154,10 @@ class EventsWorkerStoreTestCase(BaseWorkerStoreTestCase):
USER_ID,
"invite",
event.event_id,
- event.internal_metadata.stream_ordering,
+ PersistedEventPosition(
+ self.hs.get_instance_name(),
+ event.internal_metadata.stream_ordering,
+ ),
RoomVersions.V1.identifier,
)
],
From aabf577166546d98353ab9bdb6f0648193a94b85 Mon Sep 17 00:00:00 2001
From: Erik Johnston
Date: Wed, 5 Jun 2024 10:40:34 +0100
Subject: [PATCH 146/503] Handle hyphens in user dir search porperly (#17254)
c.f. #16675
---
changelog.d/17254.bugfix | 1 +
.../storage/databases/main/user_directory.py | 66 +++++++++++++++++--
tests/handlers/test_user_directory.py | 39 +++++++++++
tests/storage/test_user_directory.py | 4 ++
4 files changed, 104 insertions(+), 6 deletions(-)
create mode 100644 changelog.d/17254.bugfix
diff --git a/changelog.d/17254.bugfix b/changelog.d/17254.bugfix
new file mode 100644
index 0000000000..b0d61309e2
--- /dev/null
+++ b/changelog.d/17254.bugfix
@@ -0,0 +1 @@
+Fix searching for users with their exact localpart whose ID includes a hyphen.
diff --git a/synapse/storage/databases/main/user_directory.py b/synapse/storage/databases/main/user_directory.py
index 0513e7dc06..6e18f714d7 100644
--- a/synapse/storage/databases/main/user_directory.py
+++ b/synapse/storage/databases/main/user_directory.py
@@ -1281,7 +1281,7 @@ def _parse_words_with_regex(search_term: str) -> List[str]:
Break down search term into words, when we don't have ICU available.
See: `_parse_words`
"""
- return re.findall(r"([\w\-]+)", search_term, re.UNICODE)
+ return re.findall(r"([\w-]+)", search_term, re.UNICODE)
def _parse_words_with_icu(search_term: str) -> List[str]:
@@ -1303,15 +1303,69 @@ def _parse_words_with_icu(search_term: str) -> List[str]:
if j < 0:
break
- result = search_term[i:j]
+ # We want to make sure that we split on `@` and `:` specifically, as
+ # they occur in user IDs.
+ for result in re.split(r"[@:]+", search_term[i:j]):
+ results.append(result.strip())
+
+ i = j
+
+ # libicu will break up words that have punctuation in them, but to handle
+ # cases where user IDs have '-', '.' and '_' in them we want to *not* break
+ # those into words and instead allow the DB to tokenise them how it wants.
+ #
+ # In particular, user-71 in postgres gets tokenised to "user, -71", and this
+ # will not match a query for "user, 71".
+ new_results: List[str] = []
+ i = 0
+ while i < len(results):
+ curr = results[i]
+
+ prev = None
+ next = None
+ if i > 0:
+ prev = results[i - 1]
+ if i + 1 < len(results):
+ next = results[i + 1]
+
+ i += 1
# libicu considers spaces and punctuation between words as words, but we don't
# want to include those in results as they would result in syntax errors in SQL
# queries (e.g. "foo bar" would result in the search query including "foo & &
# bar").
- if len(re.findall(r"([\w\-]+)", result, re.UNICODE)):
- results.append(result)
+ if not curr:
+ continue
- i = j
+ if curr in ["-", ".", "_"]:
+ prefix = ""
+ suffix = ""
- return results
+ # Check if the next item is a word, and if so use it as the suffix.
+ # We check for if its a word as we don't want to concatenate
+ # multiple punctuation marks.
+ if next is not None and re.match(r"\w", next):
+ suffix = next
+ i += 1 # We're using next, so we skip it in the outer loop.
+ else:
+ # We want to avoid creating terms like "user-", as we should
+ # strip trailing punctuation.
+ continue
+
+ if prev and re.match(r"\w", prev) and new_results:
+ prefix = new_results[-1]
+ new_results.pop()
+
+ # We might not have a prefix here, but that's fine as we want to
+ # ensure that we don't strip preceding punctuation e.g. '-71'
+ # shouldn't be converted to '71'.
+
+ new_results.append(f"{prefix}{curr}{suffix}")
+ continue
+ elif not re.match(r"\w", curr):
+ # Ignore other punctuation
+ continue
+
+ new_results.append(curr)
+
+ return new_results
diff --git a/tests/handlers/test_user_directory.py b/tests/handlers/test_user_directory.py
index 77c6cac449..878d9683b6 100644
--- a/tests/handlers/test_user_directory.py
+++ b/tests/handlers/test_user_directory.py
@@ -1061,6 +1061,45 @@ class UserDirectoryTestCase(unittest.HomeserverTestCase):
{alice: ProfileInfo(display_name=None, avatar_url=MXC_DUMMY)},
)
+ def test_search_punctuation(self) -> None:
+ """Test that you can search for a user that includes punctuation"""
+
+ searching_user = self.register_user("searcher", "password")
+ searching_user_tok = self.login("searcher", "password")
+
+ room_id = self.helper.create_room_as(
+ searching_user,
+ room_version=RoomVersions.V1.identifier,
+ tok=searching_user_tok,
+ )
+
+ # We want to test searching for users of the form e.g. "user-1", with
+ # various punctuation. We also test both where the prefix is numeric and
+ # alphanumeric, as e.g. postgres tokenises "user-1" as "user" and "-1".
+ i = 1
+ for char in ["-", ".", "_"]:
+ for use_numeric in [False, True]:
+ if use_numeric:
+ prefix1 = f"{i}"
+ prefix2 = f"{i+1}"
+ else:
+ prefix1 = f"a{i}"
+ prefix2 = f"a{i+1}"
+
+ local_user_1 = self.register_user(f"user{char}{prefix1}", "password")
+ local_user_2 = self.register_user(f"user{char}{prefix2}", "password")
+
+ self._add_user_to_room(room_id, RoomVersions.V1, local_user_1)
+ self._add_user_to_room(room_id, RoomVersions.V1, local_user_2)
+
+ results = self.get_success(
+ self.handler.search_users(searching_user, local_user_1, 20)
+ )["results"]
+ received_user_id_ordering = [result["user_id"] for result in results]
+ self.assertSequenceEqual(received_user_id_ordering[:1], [local_user_1])
+
+ i += 2
+
class TestUserDirSearchDisabled(unittest.HomeserverTestCase):
servlets = [
diff --git a/tests/storage/test_user_directory.py b/tests/storage/test_user_directory.py
index 156a610faa..c26932069f 100644
--- a/tests/storage/test_user_directory.py
+++ b/tests/storage/test_user_directory.py
@@ -711,6 +711,10 @@ class UserDirectoryICUTestCase(HomeserverTestCase):
),
)
+ self.assertEqual(_parse_words_with_icu("user-1"), ["user-1"])
+ self.assertEqual(_parse_words_with_icu("user-ab"), ["user-ab"])
+ self.assertEqual(_parse_words_with_icu("user.--1"), ["user", "-1"])
+
def test_regex_word_boundary_punctuation(self) -> None:
"""
Tests the behaviour of punctuation with the non-ICU tokeniser
From fcbc79bb87d08147e86dafa0fee5a9aec4d3fc23 Mon Sep 17 00:00:00 2001
From: Shay
Date: Wed, 5 Jun 2024 05:43:36 -0700
Subject: [PATCH 147/503] Ratelimiting of remote media downloads (#17256)
---
changelog.d/17256.feature | 1 +
.../configuration/config_documentation.md | 18 ++
synapse/config/ratelimiting.py | 10 +
synapse/federation/federation_client.py | 7 +
synapse/federation/transport/client.py | 9 +
synapse/http/matrixfederationclient.py | 55 ++++-
synapse/media/media_repository.py | 43 +++-
synapse/media/thumbnailer.py | 6 +-
synapse/rest/client/media.py | 2 +
synapse/rest/media/download_resource.py | 8 +-
synapse/rest/media/thumbnail_resource.py | 2 +
tests/media/test_media_storage.py | 225 +++++++++++++++++-
12 files changed, 372 insertions(+), 14 deletions(-)
create mode 100644 changelog.d/17256.feature
diff --git a/changelog.d/17256.feature b/changelog.d/17256.feature
new file mode 100644
index 0000000000..6ec4cb7a31
--- /dev/null
+++ b/changelog.d/17256.feature
@@ -0,0 +1 @@
+ Improve ratelimiting in Synapse (#17256).
\ No newline at end of file
diff --git a/docs/usage/configuration/config_documentation.md b/docs/usage/configuration/config_documentation.md
index 2c917d1f8e..d23f8c4c4f 100644
--- a/docs/usage/configuration/config_documentation.md
+++ b/docs/usage/configuration/config_documentation.md
@@ -1946,6 +1946,24 @@ Example configuration:
max_image_pixels: 35M
```
---
+### `remote_media_download_burst_count`
+
+Remote media downloads are ratelimited using a [leaky bucket algorithm](https://en.wikipedia.org/wiki/Leaky_bucket), where a given "bucket" is keyed to the IP address of the requester when requesting remote media downloads. This configuration option sets the size of the bucket against which the size in bytes of downloads are penalized - if the bucket is full, ie a given number of bytes have already been downloaded, further downloads will be denied until the bucket drains. Defaults to 500MiB. See also `remote_media_download_per_second` which determines the rate at which the "bucket" is emptied and thus has available space to authorize new requests.
+
+Example configuration:
+```yaml
+remote_media_download_burst_count: 200M
+```
+---
+### `remote_media_download_per_second`
+
+Works in conjunction with `remote_media_download_burst_count` to ratelimit remote media downloads - this configuration option determines the rate at which the "bucket" (see above) leaks in bytes per second. As requests are made to download remote media, the size of those requests in bytes is added to the bucket, and once the bucket has reached it's capacity, no more requests will be allowed until a number of bytes has "drained" from the bucket. This setting determines the rate at which bytes drain from the bucket, with the practical effect that the larger the number, the faster the bucket leaks, allowing for more bytes downloaded over a shorter period of time. Defaults to 87KiB per second. See also `remote_media_download_burst_count`.
+
+Example configuration:
+```yaml
+remote_media_download_per_second: 40K
+```
+---
### `prevent_media_downloads_from`
A list of domains to never download media from. Media from these
diff --git a/synapse/config/ratelimiting.py b/synapse/config/ratelimiting.py
index d2cb4576df..3fa33f5373 100644
--- a/synapse/config/ratelimiting.py
+++ b/synapse/config/ratelimiting.py
@@ -218,3 +218,13 @@ class RatelimitConfig(Config):
"rc_media_create",
defaults={"per_second": 10, "burst_count": 50},
)
+
+ self.remote_media_downloads = RatelimitSettings(
+ key="rc_remote_media_downloads",
+ per_second=self.parse_size(
+ config.get("remote_media_download_per_second", "87K")
+ ),
+ burst_count=self.parse_size(
+ config.get("remote_media_download_burst_count", "500M")
+ ),
+ )
diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py
index e613eb87a6..f0f5a37a57 100644
--- a/synapse/federation/federation_client.py
+++ b/synapse/federation/federation_client.py
@@ -56,6 +56,7 @@ from synapse.api.errors import (
SynapseError,
UnsupportedRoomVersionError,
)
+from synapse.api.ratelimiting import Ratelimiter
from synapse.api.room_versions import (
KNOWN_ROOM_VERSIONS,
EventFormatVersions,
@@ -1877,6 +1878,8 @@ class FederationClient(FederationBase):
output_stream: BinaryIO,
max_size: int,
max_timeout_ms: int,
+ download_ratelimiter: Ratelimiter,
+ ip_address: str,
) -> Tuple[int, Dict[bytes, List[bytes]]]:
try:
return await self.transport_layer.download_media_v3(
@@ -1885,6 +1888,8 @@ class FederationClient(FederationBase):
output_stream=output_stream,
max_size=max_size,
max_timeout_ms=max_timeout_ms,
+ download_ratelimiter=download_ratelimiter,
+ ip_address=ip_address,
)
except HttpResponseException as e:
# If an error is received that is due to an unrecognised endpoint,
@@ -1905,6 +1910,8 @@ class FederationClient(FederationBase):
output_stream=output_stream,
max_size=max_size,
max_timeout_ms=max_timeout_ms,
+ download_ratelimiter=download_ratelimiter,
+ ip_address=ip_address,
)
diff --git a/synapse/federation/transport/client.py b/synapse/federation/transport/client.py
index de408f7f8d..af1336fe5f 100644
--- a/synapse/federation/transport/client.py
+++ b/synapse/federation/transport/client.py
@@ -43,6 +43,7 @@ import ijson
from synapse.api.constants import Direction, Membership
from synapse.api.errors import Codes, HttpResponseException, SynapseError
+from synapse.api.ratelimiting import Ratelimiter
from synapse.api.room_versions import RoomVersion
from synapse.api.urls import (
FEDERATION_UNSTABLE_PREFIX,
@@ -819,6 +820,8 @@ class TransportLayerClient:
output_stream: BinaryIO,
max_size: int,
max_timeout_ms: int,
+ download_ratelimiter: Ratelimiter,
+ ip_address: str,
) -> Tuple[int, Dict[bytes, List[bytes]]]:
path = f"/_matrix/media/r0/download/{destination}/{media_id}"
@@ -834,6 +837,8 @@ class TransportLayerClient:
"allow_remote": "false",
"timeout_ms": str(max_timeout_ms),
},
+ download_ratelimiter=download_ratelimiter,
+ ip_address=ip_address,
)
async def download_media_v3(
@@ -843,6 +848,8 @@ class TransportLayerClient:
output_stream: BinaryIO,
max_size: int,
max_timeout_ms: int,
+ download_ratelimiter: Ratelimiter,
+ ip_address: str,
) -> Tuple[int, Dict[bytes, List[bytes]]]:
path = f"/_matrix/media/v3/download/{destination}/{media_id}"
@@ -862,6 +869,8 @@ class TransportLayerClient:
"allow_redirect": "true",
},
follow_redirects=True,
+ download_ratelimiter=download_ratelimiter,
+ ip_address=ip_address,
)
diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py
index c73a589e6c..104b803b0f 100644
--- a/synapse/http/matrixfederationclient.py
+++ b/synapse/http/matrixfederationclient.py
@@ -57,7 +57,7 @@ from twisted.internet.interfaces import IReactorTime
from twisted.internet.task import Cooperator
from twisted.web.client import ResponseFailed
from twisted.web.http_headers import Headers
-from twisted.web.iweb import IAgent, IBodyProducer, IResponse
+from twisted.web.iweb import UNKNOWN_LENGTH, IAgent, IBodyProducer, IResponse
import synapse.metrics
import synapse.util.retryutils
@@ -68,6 +68,7 @@ from synapse.api.errors import (
RequestSendFailed,
SynapseError,
)
+from synapse.api.ratelimiting import Ratelimiter
from synapse.crypto.context_factory import FederationPolicyForHTTPS
from synapse.http import QuieterFileBodyProducer
from synapse.http.client import (
@@ -1411,9 +1412,11 @@ class MatrixFederationHttpClient:
destination: str,
path: str,
output_stream: BinaryIO,
+ download_ratelimiter: Ratelimiter,
+ ip_address: str,
+ max_size: int,
args: Optional[QueryParams] = None,
retry_on_dns_fail: bool = True,
- max_size: Optional[int] = None,
ignore_backoff: bool = False,
follow_redirects: bool = False,
) -> Tuple[int, Dict[bytes, List[bytes]]]:
@@ -1422,6 +1425,10 @@ class MatrixFederationHttpClient:
destination: The remote server to send the HTTP request to.
path: The HTTP path to GET.
output_stream: File to write the response body to.
+ download_ratelimiter: a ratelimiter to limit remote media downloads, keyed to
+ requester IP
+ ip_address: IP address of the requester
+ max_size: maximum allowable size in bytes of the file
args: Optional dictionary used to create the query string.
ignore_backoff: true to ignore the historical backoff data
and try the request anyway.
@@ -1441,11 +1448,27 @@ class MatrixFederationHttpClient:
federation whitelist
RequestSendFailed: If there were problems connecting to the
remote, due to e.g. DNS failures, connection timeouts etc.
+ SynapseError: If the requested file exceeds ratelimits
"""
request = MatrixFederationRequest(
method="GET", destination=destination, path=path, query=args
)
+ # check for a minimum balance of 1MiB in ratelimiter before initiating request
+ send_req, _ = await download_ratelimiter.can_do_action(
+ requester=None, key=ip_address, n_actions=1048576, update=False
+ )
+
+ if not send_req:
+ msg = "Requested file size exceeds ratelimits"
+ logger.warning(
+ "{%s} [%s] %s",
+ request.txn_id,
+ request.destination,
+ msg,
+ )
+ raise SynapseError(HTTPStatus.TOO_MANY_REQUESTS, msg, Codes.LIMIT_EXCEEDED)
+
response = await self._send_request(
request,
retry_on_dns_fail=retry_on_dns_fail,
@@ -1455,12 +1478,36 @@ class MatrixFederationHttpClient:
headers = dict(response.headers.getAllRawHeaders())
+ expected_size = response.length
+ # if we don't get an expected length then use the max length
+ if expected_size == UNKNOWN_LENGTH:
+ expected_size = max_size
+ logger.debug(
+ f"File size unknown, assuming file is max allowable size: {max_size}"
+ )
+
+ read_body, _ = await download_ratelimiter.can_do_action(
+ requester=None,
+ key=ip_address,
+ n_actions=expected_size,
+ )
+ if not read_body:
+ msg = "Requested file size exceeds ratelimits"
+ logger.warning(
+ "{%s} [%s] %s",
+ request.txn_id,
+ request.destination,
+ msg,
+ )
+ raise SynapseError(HTTPStatus.TOO_MANY_REQUESTS, msg, Codes.LIMIT_EXCEEDED)
+
try:
- d = read_body_with_max_size(response, output_stream, max_size)
+ # add a byte of headroom to max size as function errs at >=
+ d = read_body_with_max_size(response, output_stream, expected_size + 1)
d.addTimeout(self.default_timeout_seconds, self.reactor)
length = await make_deferred_yieldable(d)
except BodyExceededMaxSize:
- msg = "Requested file is too large > %r bytes" % (max_size,)
+ msg = "Requested file is too large > %r bytes" % (expected_size,)
logger.warning(
"{%s} [%s] %s",
request.txn_id,
diff --git a/synapse/media/media_repository.py b/synapse/media/media_repository.py
index 9c29e09653..6ed56099ca 100644
--- a/synapse/media/media_repository.py
+++ b/synapse/media/media_repository.py
@@ -42,6 +42,7 @@ from synapse.api.errors import (
SynapseError,
cs_error,
)
+from synapse.api.ratelimiting import Ratelimiter
from synapse.config.repository import ThumbnailRequirement
from synapse.http.server import respond_with_json
from synapse.http.site import SynapseRequest
@@ -111,6 +112,12 @@ class MediaRepository:
)
self.prevent_media_downloads_from = hs.config.media.prevent_media_downloads_from
+ self.download_ratelimiter = Ratelimiter(
+ store=hs.get_storage_controllers().main,
+ clock=hs.get_clock(),
+ cfg=hs.config.ratelimiting.remote_media_downloads,
+ )
+
# List of StorageProviders where we should search for media and
# potentially upload to.
storage_providers = []
@@ -464,6 +471,7 @@ class MediaRepository:
media_id: str,
name: Optional[str],
max_timeout_ms: int,
+ ip_address: str,
) -> None:
"""Respond to requests for remote media.
@@ -475,6 +483,7 @@ class MediaRepository:
the filename in the Content-Disposition header of the response.
max_timeout_ms: the maximum number of milliseconds to wait for the
media to be uploaded.
+ ip_address: the IP address of the requester
Returns:
Resolves once a response has successfully been written to request
@@ -500,7 +509,11 @@ class MediaRepository:
key = (server_name, media_id)
async with self.remote_media_linearizer.queue(key):
responder, media_info = await self._get_remote_media_impl(
- server_name, media_id, max_timeout_ms
+ server_name,
+ media_id,
+ max_timeout_ms,
+ self.download_ratelimiter,
+ ip_address,
)
# We deliberately stream the file outside the lock
@@ -517,7 +530,7 @@ class MediaRepository:
respond_404(request)
async def get_remote_media_info(
- self, server_name: str, media_id: str, max_timeout_ms: int
+ self, server_name: str, media_id: str, max_timeout_ms: int, ip_address: str
) -> RemoteMedia:
"""Gets the media info associated with the remote file, downloading
if necessary.
@@ -527,6 +540,7 @@ class MediaRepository:
media_id: The media ID of the content (as defined by the remote server).
max_timeout_ms: the maximum number of milliseconds to wait for the
media to be uploaded.
+ ip_address: IP address of the requester
Returns:
The media info of the file
@@ -542,7 +556,11 @@ class MediaRepository:
key = (server_name, media_id)
async with self.remote_media_linearizer.queue(key):
responder, media_info = await self._get_remote_media_impl(
- server_name, media_id, max_timeout_ms
+ server_name,
+ media_id,
+ max_timeout_ms,
+ self.download_ratelimiter,
+ ip_address,
)
# Ensure we actually use the responder so that it releases resources
@@ -553,7 +571,12 @@ class MediaRepository:
return media_info
async def _get_remote_media_impl(
- self, server_name: str, media_id: str, max_timeout_ms: int
+ self,
+ server_name: str,
+ media_id: str,
+ max_timeout_ms: int,
+ download_ratelimiter: Ratelimiter,
+ ip_address: str,
) -> Tuple[Optional[Responder], RemoteMedia]:
"""Looks for media in local cache, if not there then attempt to
download from remote server.
@@ -564,6 +587,9 @@ class MediaRepository:
remote server).
max_timeout_ms: the maximum number of milliseconds to wait for the
media to be uploaded.
+ download_ratelimiter: a ratelimiter limiting remote media downloads, keyed to
+ requester IP.
+ ip_address: the IP address of the requester
Returns:
A tuple of responder and the media info of the file.
@@ -596,7 +622,7 @@ class MediaRepository:
try:
media_info = await self._download_remote_file(
- server_name, media_id, max_timeout_ms
+ server_name, media_id, max_timeout_ms, download_ratelimiter, ip_address
)
except SynapseError:
raise
@@ -630,6 +656,8 @@ class MediaRepository:
server_name: str,
media_id: str,
max_timeout_ms: int,
+ download_ratelimiter: Ratelimiter,
+ ip_address: str,
) -> RemoteMedia:
"""Attempt to download the remote file from the given server name,
using the given file_id as the local id.
@@ -641,6 +669,9 @@ class MediaRepository:
locally generated.
max_timeout_ms: the maximum number of milliseconds to wait for the
media to be uploaded.
+ download_ratelimiter: a ratelimiter limiting remote media downloads, keyed to
+ requester IP
+ ip_address: the IP address of the requester
Returns:
The media info of the file.
@@ -658,6 +689,8 @@ class MediaRepository:
output_stream=f,
max_size=self.max_upload_size,
max_timeout_ms=max_timeout_ms,
+ download_ratelimiter=download_ratelimiter,
+ ip_address=ip_address,
)
except RequestSendFailed as e:
logger.warning(
diff --git a/synapse/media/thumbnailer.py b/synapse/media/thumbnailer.py
index cc3acf51e1..f8a9560784 100644
--- a/synapse/media/thumbnailer.py
+++ b/synapse/media/thumbnailer.py
@@ -359,9 +359,10 @@ class ThumbnailProvider:
desired_method: str,
desired_type: str,
max_timeout_ms: int,
+ ip_address: str,
) -> None:
media_info = await self.media_repo.get_remote_media_info(
- server_name, media_id, max_timeout_ms
+ server_name, media_id, max_timeout_ms, ip_address
)
if not media_info:
respond_404(request)
@@ -422,12 +423,13 @@ class ThumbnailProvider:
method: str,
m_type: str,
max_timeout_ms: int,
+ ip_address: str,
) -> None:
# TODO: Don't download the whole remote file
# We should proxy the thumbnail from the remote server instead of
# downloading the remote file and generating our own thumbnails.
media_info = await self.media_repo.get_remote_media_info(
- server_name, media_id, max_timeout_ms
+ server_name, media_id, max_timeout_ms, ip_address
)
if not media_info:
return
diff --git a/synapse/rest/client/media.py b/synapse/rest/client/media.py
index 172d240783..0c089163c1 100644
--- a/synapse/rest/client/media.py
+++ b/synapse/rest/client/media.py
@@ -174,6 +174,7 @@ class UnstableThumbnailResource(RestServlet):
respond_404(request)
return
+ ip_address = request.getClientAddress().host
remote_resp_function = (
self.thumbnailer.select_or_generate_remote_thumbnail
if self.dynamic_thumbnails
@@ -188,6 +189,7 @@ class UnstableThumbnailResource(RestServlet):
method,
m_type,
max_timeout_ms,
+ ip_address,
)
self.media_repo.mark_recently_accessed(server_name, media_id)
diff --git a/synapse/rest/media/download_resource.py b/synapse/rest/media/download_resource.py
index 8ba723c8d4..1628d58926 100644
--- a/synapse/rest/media/download_resource.py
+++ b/synapse/rest/media/download_resource.py
@@ -97,6 +97,12 @@ class DownloadResource(RestServlet):
respond_404(request)
return
+ ip_address = request.getClientAddress().host
await self.media_repo.get_remote_media(
- request, server_name, media_id, file_name, max_timeout_ms
+ request,
+ server_name,
+ media_id,
+ file_name,
+ max_timeout_ms,
+ ip_address,
)
diff --git a/synapse/rest/media/thumbnail_resource.py b/synapse/rest/media/thumbnail_resource.py
index fe8fbb06e4..ce511c6dce 100644
--- a/synapse/rest/media/thumbnail_resource.py
+++ b/synapse/rest/media/thumbnail_resource.py
@@ -104,6 +104,7 @@ class ThumbnailResource(RestServlet):
respond_404(request)
return
+ ip_address = request.getClientAddress().host
remote_resp_function = (
self.thumbnail_provider.select_or_generate_remote_thumbnail
if self.dynamic_thumbnails
@@ -118,5 +119,6 @@ class ThumbnailResource(RestServlet):
method,
m_type,
max_timeout_ms,
+ ip_address,
)
self.media_repo.mark_recently_accessed(server_name, media_id)
diff --git a/tests/media/test_media_storage.py b/tests/media/test_media_storage.py
index 1bd51ceba2..46d20ce775 100644
--- a/tests/media/test_media_storage.py
+++ b/tests/media/test_media_storage.py
@@ -25,7 +25,7 @@ import tempfile
from binascii import unhexlify
from io import BytesIO
from typing import Any, BinaryIO, ClassVar, Dict, List, Optional, Tuple, Union
-from unittest.mock import Mock
+from unittest.mock import MagicMock, Mock, patch
from urllib import parse
import attr
@@ -37,9 +37,12 @@ from twisted.internet import defer
from twisted.internet.defer import Deferred
from twisted.python.failure import Failure
from twisted.test.proto_helpers import MemoryReactor
+from twisted.web.http_headers import Headers
+from twisted.web.iweb import UNKNOWN_LENGTH, IResponse
from twisted.web.resource import Resource
from synapse.api.errors import Codes, HttpResponseException
+from synapse.api.ratelimiting import Ratelimiter
from synapse.events import EventBase
from synapse.http.types import QueryParams
from synapse.logging.context import make_deferred_yieldable
@@ -59,6 +62,7 @@ from synapse.util import Clock
from tests import unittest
from tests.server import FakeChannel
from tests.test_utils import SMALL_PNG
+from tests.unittest import override_config
from tests.utils import default_config
@@ -251,9 +255,11 @@ class MediaRepoTests(unittest.HomeserverTestCase):
destination: str,
path: str,
output_stream: BinaryIO,
+ download_ratelimiter: Ratelimiter,
+ ip_address: Any,
+ max_size: int,
args: Optional[QueryParams] = None,
retry_on_dns_fail: bool = True,
- max_size: Optional[int] = None,
ignore_backoff: bool = False,
follow_redirects: bool = False,
) -> "Deferred[Tuple[int, Dict[bytes, List[bytes]]]]":
@@ -878,3 +884,218 @@ class SpamCheckerTestCase(unittest.HomeserverTestCase):
tok=self.tok,
expect_code=400,
)
+
+
+class RemoteDownloadLimiterTestCase(unittest.HomeserverTestCase):
+ def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer:
+ config = self.default_config()
+
+ self.storage_path = self.mktemp()
+ self.media_store_path = self.mktemp()
+ os.mkdir(self.storage_path)
+ os.mkdir(self.media_store_path)
+ config["media_store_path"] = self.media_store_path
+
+ provider_config = {
+ "module": "synapse.media.storage_provider.FileStorageProviderBackend",
+ "store_local": True,
+ "store_synchronous": False,
+ "store_remote": True,
+ "config": {"directory": self.storage_path},
+ }
+
+ config["media_storage_providers"] = [provider_config]
+
+ return self.setup_test_homeserver(config=config)
+
+ def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
+ self.repo = hs.get_media_repository()
+ self.client = hs.get_federation_http_client()
+ self.store = hs.get_datastores().main
+
+ def create_resource_dict(self) -> Dict[str, Resource]:
+ # We need to manually set the resource tree to include media, the
+ # default only does `/_matrix/client` APIs.
+ return {"/_matrix/media": self.hs.get_media_repository_resource()}
+
+ # mock actually reading file body
+ def read_body_with_max_size_30MiB(*args: Any, **kwargs: Any) -> Deferred:
+ d: Deferred = defer.Deferred()
+ d.callback(31457280)
+ return d
+
+ def read_body_with_max_size_50MiB(*args: Any, **kwargs: Any) -> Deferred:
+ d: Deferred = defer.Deferred()
+ d.callback(52428800)
+ return d
+
+ @patch(
+ "synapse.http.matrixfederationclient.read_body_with_max_size",
+ read_body_with_max_size_30MiB,
+ )
+ def test_download_ratelimit_default(self) -> None:
+ """
+ Test remote media download ratelimiting against default configuration - 500MB bucket
+ and 87kb/second drain rate
+ """
+
+ # mock out actually sending the request, returns a 30MiB response
+ async def _send_request(*args: Any, **kwargs: Any) -> IResponse:
+ resp = MagicMock(spec=IResponse)
+ resp.code = 200
+ resp.length = 31457280
+ resp.headers = Headers({"Content-Type": ["application/octet-stream"]})
+ resp.phrase = b"OK"
+ return resp
+
+ self.client._send_request = _send_request # type: ignore
+
+ # first request should go through
+ channel = self.make_request(
+ "GET",
+ "/_matrix/media/v3/download/remote.org/abcdefghijklmnopqrstuvwxyz",
+ shorthand=False,
+ )
+ assert channel.code == 200
+
+ # next 15 should go through
+ for i in range(15):
+ channel2 = self.make_request(
+ "GET",
+ f"/_matrix/media/v3/download/remote.org/abcdefghijklmnopqrstuvwxy{i}",
+ shorthand=False,
+ )
+ assert channel2.code == 200
+
+ # 17th will hit ratelimit
+ channel3 = self.make_request(
+ "GET",
+ "/_matrix/media/v3/download/remote.org/abcdefghijklmnopqrstuvwxyx",
+ shorthand=False,
+ )
+ assert channel3.code == 429
+
+ # however, a request from a different IP will go through
+ channel4 = self.make_request(
+ "GET",
+ "/_matrix/media/v3/download/remote.org/abcdefghijklmnopqrstuvwxyz",
+ shorthand=False,
+ client_ip="187.233.230.159",
+ )
+ assert channel4.code == 200
+
+ # at 87Kib/s it should take about 2 minutes for enough to drain from bucket that another
+ # 30MiB download is authorized - The last download was blocked at 503,316,480.
+ # The next download will be authorized when bucket hits 492,830,720
+ # (524,288,000 total capacity - 31,457,280 download size) so 503,316,480 - 492,830,720 ~= 10,485,760
+ # needs to drain before another download will be authorized, that will take ~=
+ # 2 minutes (10,485,760/89,088/60)
+ self.reactor.pump([2.0 * 60.0])
+
+ # enough has drained and next request goes through
+ channel5 = self.make_request(
+ "GET",
+ "/_matrix/media/v3/download/remote.org/abcdefghijklmnopqrstuvwxyb",
+ shorthand=False,
+ )
+ assert channel5.code == 200
+
+ @override_config(
+ {
+ "remote_media_download_per_second": "50M",
+ "remote_media_download_burst_count": "50M",
+ }
+ )
+ @patch(
+ "synapse.http.matrixfederationclient.read_body_with_max_size",
+ read_body_with_max_size_50MiB,
+ )
+ def test_download_rate_limit_config(self) -> None:
+ """
+ Test that download rate limit config options are correctly picked up and applied
+ """
+
+ async def _send_request(*args: Any, **kwargs: Any) -> IResponse:
+ resp = MagicMock(spec=IResponse)
+ resp.code = 200
+ resp.length = 52428800
+ resp.headers = Headers({"Content-Type": ["application/octet-stream"]})
+ resp.phrase = b"OK"
+ return resp
+
+ self.client._send_request = _send_request # type: ignore
+
+ # first request should go through
+ channel = self.make_request(
+ "GET",
+ "/_matrix/media/v3/download/remote.org/abcdefghijklmnopqrstuvwxyz",
+ shorthand=False,
+ )
+ assert channel.code == 200
+
+ # immediate second request should fail
+ channel = self.make_request(
+ "GET",
+ "/_matrix/media/v3/download/remote.org/abcdefghijklmnopqrstuvwxy1",
+ shorthand=False,
+ )
+ assert channel.code == 429
+
+ # advance half a second
+ self.reactor.pump([0.5])
+
+ # request still fails
+ channel = self.make_request(
+ "GET",
+ "/_matrix/media/v3/download/remote.org/abcdefghijklmnopqrstuvwxy2",
+ shorthand=False,
+ )
+ assert channel.code == 429
+
+ # advance another half second
+ self.reactor.pump([0.5])
+
+ # enough has drained from bucket and request is successful
+ channel = self.make_request(
+ "GET",
+ "/_matrix/media/v3/download/remote.org/abcdefghijklmnopqrstuvwxy3",
+ shorthand=False,
+ )
+ assert channel.code == 200
+
+ @patch(
+ "synapse.http.matrixfederationclient.read_body_with_max_size",
+ read_body_with_max_size_30MiB,
+ )
+ def test_download_ratelimit_max_size_sub(self) -> None:
+ """
+ Test that if no content-length is provided, the default max size is applied instead
+ """
+
+ # mock out actually sending the request
+ async def _send_request(*args: Any, **kwargs: Any) -> IResponse:
+ resp = MagicMock(spec=IResponse)
+ resp.code = 200
+ resp.length = UNKNOWN_LENGTH
+ resp.headers = Headers({"Content-Type": ["application/octet-stream"]})
+ resp.phrase = b"OK"
+ return resp
+
+ self.client._send_request = _send_request # type: ignore
+
+ # ten requests should go through using the max size (500MB/50MB)
+ for i in range(10):
+ channel2 = self.make_request(
+ "GET",
+ f"/_matrix/media/v3/download/remote.org/abcdefghijklmnopqrstuvwxy{i}",
+ shorthand=False,
+ )
+ assert channel2.code == 200
+
+ # eleventh will hit ratelimit
+ channel3 = self.make_request(
+ "GET",
+ "/_matrix/media/v3/download/remote.org/abcdefghijklmnopqrstuvwxyx",
+ shorthand=False,
+ )
+ assert channel3.code == 429
From 3f06bbc0acaa7994b2df8c974e212f21f4a028ba Mon Sep 17 00:00:00 2001
From: Erik Johnston
Date: Thu, 6 Jun 2024 17:10:58 +0100
Subject: [PATCH 148/503] Always return OTK counts (#17275)
Broke in https://github.com/element-hq/synapse/pull/17215
---
changelog.d/17275.bugfix | 1 +
synapse/handlers/sync.py | 33 +++++++++++++++++++++++++++++----
2 files changed, 30 insertions(+), 4 deletions(-)
create mode 100644 changelog.d/17275.bugfix
diff --git a/changelog.d/17275.bugfix b/changelog.d/17275.bugfix
new file mode 100644
index 0000000000..eb522bb997
--- /dev/null
+++ b/changelog.d/17275.bugfix
@@ -0,0 +1 @@
+Fix bug where OTKs were not always included in `/sync` response when using workers.
diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py
index e815e0ea7f..9d37e2a86f 100644
--- a/synapse/handlers/sync.py
+++ b/synapse/handlers/sync.py
@@ -285,7 +285,11 @@ class SyncResult:
)
@staticmethod
- def empty(next_batch: StreamToken) -> "SyncResult":
+ def empty(
+ next_batch: StreamToken,
+ device_one_time_keys_count: JsonMapping,
+ device_unused_fallback_key_types: List[str],
+ ) -> "SyncResult":
"Return a new empty result"
return SyncResult(
next_batch=next_batch,
@@ -297,8 +301,8 @@ class SyncResult:
archived=[],
to_device=[],
device_lists=DeviceListUpdates(),
- device_one_time_keys_count={},
- device_unused_fallback_key_types=[],
+ device_one_time_keys_count=device_one_time_keys_count,
+ device_unused_fallback_key_types=device_unused_fallback_key_types,
)
@@ -523,7 +527,28 @@ class SyncHandler:
logger.warning(
"Timed out waiting for worker to catch up. Returning empty response"
)
- return SyncResult.empty(since_token)
+ device_id = sync_config.device_id
+ one_time_keys_count: JsonMapping = {}
+ unused_fallback_key_types: List[str] = []
+ if device_id:
+ user_id = sync_config.user.to_string()
+ # TODO: We should have a way to let clients differentiate between the states of:
+ # * no change in OTK count since the provided since token
+ # * the server has zero OTKs left for this device
+ # Spec issue: https://github.com/matrix-org/matrix-doc/issues/3298
+ one_time_keys_count = await self.store.count_e2e_one_time_keys(
+ user_id, device_id
+ )
+ unused_fallback_key_types = list(
+ await self.store.get_e2e_unused_fallback_key_types(
+ user_id, device_id
+ )
+ )
+
+ cache_context.should_cache = False # Don't cache empty responses
+ return SyncResult.empty(
+ since_token, one_time_keys_count, unused_fallback_key_types
+ )
# If we've spent significant time waiting to catch up, take it off
# the timeout.
From a963f579de02ffd4347cd3f013bd6fce816ba89c Mon Sep 17 00:00:00 2001
From: Erik Johnston
Date: Thu, 6 Jun 2024 17:46:52 +0100
Subject: [PATCH 149/503] Don't try and resync devices for down hosts (#17273)
It's just a waste of time if we won't even query the remote host as its
marked as down.
---
changelog.d/17273.misc | 1 +
synapse/handlers/e2e_keys.py | 24 ++++++++++++++++++------
2 files changed, 19 insertions(+), 6 deletions(-)
create mode 100644 changelog.d/17273.misc
diff --git a/changelog.d/17273.misc b/changelog.d/17273.misc
new file mode 100644
index 0000000000..2c1c6bc0d5
--- /dev/null
+++ b/changelog.d/17273.misc
@@ -0,0 +1 @@
+Don't try and resync devices for remote users whose servers are marked as down.
diff --git a/synapse/handlers/e2e_keys.py b/synapse/handlers/e2e_keys.py
index 560530a7b3..7d4feecaf1 100644
--- a/synapse/handlers/e2e_keys.py
+++ b/synapse/handlers/e2e_keys.py
@@ -45,7 +45,10 @@ from synapse.types import (
from synapse.util import json_decoder
from synapse.util.async_helpers import Linearizer, concurrently_execute
from synapse.util.cancellation import cancellable
-from synapse.util.retryutils import NotRetryingDestination
+from synapse.util.retryutils import (
+ NotRetryingDestination,
+ filter_destinations_by_retry_limiter,
+)
if TYPE_CHECKING:
from synapse.server import HomeServer
@@ -268,10 +271,8 @@ class E2eKeysHandler:
"%d destinations to query devices for", len(remote_queries_not_in_cache)
)
- async def _query(
- destination_queries: Tuple[str, Dict[str, Iterable[str]]]
- ) -> None:
- destination, queries = destination_queries
+ async def _query(destination: str) -> None:
+ queries = remote_queries_not_in_cache[destination]
return await self._query_devices_for_destination(
results,
cross_signing_keys,
@@ -281,9 +282,20 @@ class E2eKeysHandler:
timeout,
)
+ # Only try and fetch keys for destinations that are not marked as
+ # down.
+ filtered_destinations = await filter_destinations_by_retry_limiter(
+ remote_queries_not_in_cache.keys(),
+ self.clock,
+ self.store,
+ # Let's give an arbitrary grace period for those hosts that are
+ # only recently down
+ retry_due_within_ms=60 * 1000,
+ )
+
await concurrently_execute(
_query,
- remote_queries_not_in_cache.items(),
+ filtered_destinations,
10,
delay_cancellation=True,
)
From ce9385819bea6b6e3b956e4a54412ace0bed305c Mon Sep 17 00:00:00 2001
From: Erik Johnston
Date: Thu, 6 Jun 2024 17:47:02 +0100
Subject: [PATCH 150/503] Handle OTK uploads off master (#17271)
And fallback keys uploads. Only device keys need handling on master
---
changelog.d/17271.misc | 1 +
synapse/handlers/e2e_keys.py | 84 +++++++++++++++++++++++-------------
synapse/rest/client/keys.py | 13 ++----
3 files changed, 60 insertions(+), 38 deletions(-)
create mode 100644 changelog.d/17271.misc
diff --git a/changelog.d/17271.misc b/changelog.d/17271.misc
new file mode 100644
index 0000000000..915d717ad7
--- /dev/null
+++ b/changelog.d/17271.misc
@@ -0,0 +1 @@
+Handle OTK uploads off master.
diff --git a/synapse/handlers/e2e_keys.py b/synapse/handlers/e2e_keys.py
index 7d4feecaf1..668cec513b 100644
--- a/synapse/handlers/e2e_keys.py
+++ b/synapse/handlers/e2e_keys.py
@@ -35,6 +35,7 @@ from synapse.api.errors import CodeMessageException, Codes, NotFoundError, Synap
from synapse.handlers.device import DeviceHandler
from synapse.logging.context import make_deferred_yieldable, run_in_background
from synapse.logging.opentracing import log_kv, set_tag, tag_args, trace
+from synapse.replication.http.devices import ReplicationUploadKeysForUserRestServlet
from synapse.types import (
JsonDict,
JsonMapping,
@@ -89,6 +90,12 @@ class E2eKeysHandler:
edu_updater.incoming_signing_key_update,
)
+ self.device_key_uploader = self.upload_device_keys_for_user
+ else:
+ self.device_key_uploader = (
+ ReplicationUploadKeysForUserRestServlet.make_client(hs)
+ )
+
# doesn't really work as part of the generic query API, because the
# query request requires an object POST, but we abuse the
# "query handler" interface.
@@ -796,36 +803,17 @@ class E2eKeysHandler:
"one_time_keys": A mapping from algorithm to number of keys for that
algorithm, including those previously persisted.
"""
- # This can only be called from the main process.
- assert isinstance(self.device_handler, DeviceHandler)
-
time_now = self.clock.time_msec()
# TODO: Validate the JSON to make sure it has the right keys.
device_keys = keys.get("device_keys", None)
if device_keys:
- logger.info(
- "Updating device_keys for device %r for user %s at %d",
- device_id,
- user_id,
- time_now,
+ await self.device_key_uploader(
+ user_id=user_id,
+ device_id=device_id,
+ keys={"device_keys": device_keys},
)
- log_kv(
- {
- "message": "Updating device_keys for user.",
- "user_id": user_id,
- "device_id": device_id,
- }
- )
- # TODO: Sign the JSON with the server key
- changed = await self.store.set_e2e_device_keys(
- user_id, device_id, time_now, device_keys
- )
- if changed:
- # Only notify about device updates *if* the keys actually changed
- await self.device_handler.notify_device_update(user_id, [device_id])
- else:
- log_kv({"message": "Not updating device_keys for user", "user_id": user_id})
+
one_time_keys = keys.get("one_time_keys", None)
if one_time_keys:
log_kv(
@@ -861,6 +849,49 @@ class E2eKeysHandler:
{"message": "Did not update fallback_keys", "reason": "no keys given"}
)
+ result = await self.store.count_e2e_one_time_keys(user_id, device_id)
+
+ set_tag("one_time_key_counts", str(result))
+ return {"one_time_key_counts": result}
+
+ @tag_args
+ async def upload_device_keys_for_user(
+ self, user_id: str, device_id: str, keys: JsonDict
+ ) -> None:
+ """
+ Args:
+ user_id: user whose keys are being uploaded.
+ device_id: device whose keys are being uploaded.
+ device_keys: the `device_keys` of an /keys/upload request.
+
+ """
+ # This can only be called from the main process.
+ assert isinstance(self.device_handler, DeviceHandler)
+
+ time_now = self.clock.time_msec()
+
+ device_keys = keys["device_keys"]
+ logger.info(
+ "Updating device_keys for device %r for user %s at %d",
+ device_id,
+ user_id,
+ time_now,
+ )
+ log_kv(
+ {
+ "message": "Updating device_keys for user.",
+ "user_id": user_id,
+ "device_id": device_id,
+ }
+ )
+ # TODO: Sign the JSON with the server key
+ changed = await self.store.set_e2e_device_keys(
+ user_id, device_id, time_now, device_keys
+ )
+ if changed:
+ # Only notify about device updates *if* the keys actually changed
+ await self.device_handler.notify_device_update(user_id, [device_id])
+
# the device should have been registered already, but it may have been
# deleted due to a race with a DELETE request. Or we may be using an
# old access_token without an associated device_id. Either way, we
@@ -868,11 +899,6 @@ class E2eKeysHandler:
# keys without a corresponding device.
await self.device_handler.check_device_registered(user_id, device_id)
- result = await self.store.count_e2e_one_time_keys(user_id, device_id)
-
- set_tag("one_time_key_counts", str(result))
- return {"one_time_key_counts": result}
-
async def _upload_one_time_keys_for_user(
self, user_id: str, device_id: str, time_now: int, one_time_keys: JsonDict
) -> None:
diff --git a/synapse/rest/client/keys.py b/synapse/rest/client/keys.py
index a0017257ce..306db07b86 100644
--- a/synapse/rest/client/keys.py
+++ b/synapse/rest/client/keys.py
@@ -36,7 +36,6 @@ from synapse.http.servlet import (
)
from synapse.http.site import SynapseRequest
from synapse.logging.opentracing import log_kv, set_tag
-from synapse.replication.http.devices import ReplicationUploadKeysForUserRestServlet
from synapse.rest.client._base import client_patterns, interactive_auth_handler
from synapse.types import JsonDict, StreamToken
from synapse.util.cancellation import cancellable
@@ -105,13 +104,8 @@ class KeyUploadServlet(RestServlet):
self.auth = hs.get_auth()
self.e2e_keys_handler = hs.get_e2e_keys_handler()
self.device_handler = hs.get_device_handler()
-
- if hs.config.worker.worker_app is None:
- # if main process
- self.key_uploader = self.e2e_keys_handler.upload_keys_for_user
- else:
- # then a worker
- self.key_uploader = ReplicationUploadKeysForUserRestServlet.make_client(hs)
+ self._clock = hs.get_clock()
+ self._store = hs.get_datastores().main
async def on_POST(
self, request: SynapseRequest, device_id: Optional[str]
@@ -151,9 +145,10 @@ class KeyUploadServlet(RestServlet):
400, "To upload keys, you must pass device_id when authenticating"
)
- result = await self.key_uploader(
+ result = await self.e2e_keys_handler.upload_keys_for_user(
user_id=user_id, device_id=device_id, keys=body
)
+
return 200, result
From 4a7c58642c2eaedbf59faa2e368a0dc3bf09ceb4 Mon Sep 17 00:00:00 2001
From: Eric Eastwood
Date: Thu, 6 Jun 2024 14:44:32 -0500
Subject: [PATCH 151/503] Add Sliding Sync `/sync` endpoint (initial
implementation) (#17187)
Based on [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575): Sliding Sync
This iteration only focuses on returning the list of room IDs in the sliding window API (without sorting/filtering).
Rooms appear in the Sliding sync response based on:
- `invite`, `join`, `knock`, `ban` membership events
- Kicks (`leave` membership events where `sender` is different from the `user_id`/`state_key`)
- `newly_left` (rooms that were left during the given token range, > `from_token` and <= `to_token`)
- In order for bans/kicks to not show up, you need to `/forget` those rooms. This doesn't modify the event itself though and only adds the `forgotten` flag to `room_memberships` in Synapse. There isn't a way to tell when a room was forgotten at the moment so we can't factor it into the from/to range.
### Example request
`POST http://localhost:8008/_matrix/client/unstable/org.matrix.msc3575/sync`
```json
{
"lists": {
"foo-list": {
"ranges": [ [0, 99] ],
"sort": [ "by_notification_level", "by_recency", "by_name" ],
"required_state": [
["m.room.join_rules", ""],
["m.room.history_visibility", ""],
["m.space.child", "*"]
],
"timeline_limit": 100
}
}
}
```
Response:
```json
{
"next_pos": "s58_224_0_13_10_1_1_16_0_1",
"lists": {
"foo-list": {
"count": 1,
"ops": [
{
"op": "SYNC",
"range": [0, 99],
"room_ids": [
"!MmgikIyFzsuvtnbvVG:my.synapse.linux.server"
]
}
]
}
},
"rooms": {},
"extensions": {}
}
```
---
changelog.d/17187.feature | 1 +
synapse/api/constants.py | 2 +-
synapse/handlers/sliding_sync.py | 610 +++++++++++++++
synapse/handlers/sync.py | 19 +-
synapse/rest/client/models.py | 191 ++++-
synapse/rest/client/room.py | 3 +
synapse/rest/client/sync.py | 230 +++++-
synapse/server.py | 4 +
tests/handlers/test_sliding_sync.py | 1118 +++++++++++++++++++++++++++
tests/rest/client/test_sync.py | 134 +++-
tests/rest/client/utils.py | 5 +-
11 files changed, 2302 insertions(+), 15 deletions(-)
create mode 100644 changelog.d/17187.feature
create mode 100644 synapse/handlers/sliding_sync.py
create mode 100644 tests/handlers/test_sliding_sync.py
diff --git a/changelog.d/17187.feature b/changelog.d/17187.feature
new file mode 100644
index 0000000000..50383cb4a4
--- /dev/null
+++ b/changelog.d/17187.feature
@@ -0,0 +1 @@
+Add initial implementation of an experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint.
diff --git a/synapse/api/constants.py b/synapse/api/constants.py
index 0a9123c56b..542e4faaa1 100644
--- a/synapse/api/constants.py
+++ b/synapse/api/constants.py
@@ -50,7 +50,7 @@ class Membership:
KNOCK: Final = "knock"
LEAVE: Final = "leave"
BAN: Final = "ban"
- LIST: Final = (INVITE, JOIN, KNOCK, LEAVE, BAN)
+ LIST: Final = {INVITE, JOIN, KNOCK, LEAVE, BAN}
class PresenceState:
diff --git a/synapse/handlers/sliding_sync.py b/synapse/handlers/sliding_sync.py
new file mode 100644
index 0000000000..34ae21ba50
--- /dev/null
+++ b/synapse/handlers/sliding_sync.py
@@ -0,0 +1,610 @@
+#
+# This file is licensed under the Affero General Public License (AGPL) version 3.
+#
+# Copyright (C) 2024 New Vector, Ltd
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# See the GNU Affero General Public License for more details:
+# .
+#
+# Originally licensed under the Apache License, Version 2.0:
+# .
+#
+# [This file includes modifications made by New Vector Limited]
+#
+#
+import logging
+from enum import Enum
+from typing import TYPE_CHECKING, AbstractSet, Dict, Final, List, Optional, Tuple
+
+import attr
+from immutabledict import immutabledict
+
+from synapse._pydantic_compat import HAS_PYDANTIC_V2
+
+if TYPE_CHECKING or HAS_PYDANTIC_V2:
+ from pydantic.v1 import Extra
+else:
+ from pydantic import Extra
+
+from synapse.api.constants import Membership
+from synapse.events import EventBase
+from synapse.rest.client.models import SlidingSyncBody
+from synapse.types import JsonMapping, Requester, RoomStreamToken, StreamToken, UserID
+
+if TYPE_CHECKING:
+ from synapse.server import HomeServer
+
+logger = logging.getLogger(__name__)
+
+
+def filter_membership_for_sync(*, membership: str, user_id: str, sender: str) -> bool:
+ """
+ Returns True if the membership event should be included in the sync response,
+ otherwise False.
+
+ Attributes:
+ membership: The membership state of the user in the room.
+ user_id: The user ID that the membership applies to
+ sender: The person who sent the membership event
+ """
+
+ # Everything except `Membership.LEAVE` because we want everything that's *still*
+ # relevant to the user. There are few more things to include in the sync response
+ # (newly_left) but those are handled separately.
+ #
+ # This logic includes kicks (leave events where the sender is not the same user) and
+ # can be read as "anything that isn't a leave or a leave with a different sender".
+ return membership != Membership.LEAVE or sender != user_id
+
+
+class SlidingSyncConfig(SlidingSyncBody):
+ """
+ Inherit from `SlidingSyncBody` since we need all of the same fields and add a few
+ extra fields that we need in the handler
+ """
+
+ user: UserID
+ device_id: Optional[str]
+
+ # Pydantic config
+ class Config:
+ # By default, ignore fields that we don't recognise.
+ extra = Extra.ignore
+ # By default, don't allow fields to be reassigned after parsing.
+ allow_mutation = False
+ # Allow custom types like `UserID` to be used in the model
+ arbitrary_types_allowed = True
+
+
+class OperationType(Enum):
+ """
+ Represents the operation types in a Sliding Sync window.
+
+ Attributes:
+ SYNC: Sets a range of entries. Clients SHOULD discard what they previous knew about
+ entries in this range.
+ INSERT: Sets a single entry. If the position is not empty then clients MUST move
+ entries to the left or the right depending on where the closest empty space is.
+ DELETE: Remove a single entry. Often comes before an INSERT to allow entries to move
+ places.
+ INVALIDATE: Remove a range of entries. Clients MAY persist the invalidated range for
+ offline support, but they should be treated as empty when additional operations
+ which concern indexes in the range arrive from the server.
+ """
+
+ SYNC: Final = "SYNC"
+ INSERT: Final = "INSERT"
+ DELETE: Final = "DELETE"
+ INVALIDATE: Final = "INVALIDATE"
+
+
+@attr.s(slots=True, frozen=True, auto_attribs=True)
+class SlidingSyncResult:
+ """
+ The Sliding Sync result to be serialized to JSON for a response.
+
+ Attributes:
+ next_pos: The next position token in the sliding window to request (next_batch).
+ lists: Sliding window API. A map of list key to list results.
+ rooms: Room subscription API. A map of room ID to room subscription to room results.
+ extensions: Extensions API. A map of extension key to extension results.
+ """
+
+ @attr.s(slots=True, frozen=True, auto_attribs=True)
+ class RoomResult:
+ """
+ Attributes:
+ name: Room name or calculated room name.
+ avatar: Room avatar
+ heroes: List of stripped membership events (containing `user_id` and optionally
+ `avatar_url` and `displayname`) for the users used to calculate the room name.
+ initial: Flag which is set when this is the first time the server is sending this
+ data on this connection. Clients can use this flag to replace or update
+ their local state. When there is an update, servers MUST omit this flag
+ entirely and NOT send "initial":false as this is wasteful on bandwidth. The
+ absence of this flag means 'false'.
+ required_state: The current state of the room
+ timeline: Latest events in the room. The last event is the most recent
+ is_dm: Flag to specify whether the room is a direct-message room (most likely
+ between two people).
+ invite_state: Stripped state events. Same as `rooms.invite.$room_id.invite_state`
+ in sync v2, absent on joined/left rooms
+ prev_batch: A token that can be passed as a start parameter to the
+ `/rooms//messages` API to retrieve earlier messages.
+ limited: True if their are more events than fit between the given position and now.
+ Sync again to get more.
+ joined_count: The number of users with membership of join, including the client's
+ own user ID. (same as sync `v2 m.joined_member_count`)
+ invited_count: The number of users with membership of invite. (same as sync v2
+ `m.invited_member_count`)
+ notification_count: The total number of unread notifications for this room. (same
+ as sync v2)
+ highlight_count: The number of unread notifications for this room with the highlight
+ flag set. (same as sync v2)
+ num_live: The number of timeline events which have just occurred and are not historical.
+ The last N events are 'live' and should be treated as such. This is mostly
+ useful to determine whether a given @mention event should make a noise or not.
+ Clients cannot rely solely on the absence of `initial: true` to determine live
+ events because if a room not in the sliding window bumps into the window because
+ of an @mention it will have `initial: true` yet contain a single live event
+ (with potentially other old events in the timeline).
+ """
+
+ name: str
+ avatar: Optional[str]
+ heroes: Optional[List[EventBase]]
+ initial: bool
+ required_state: List[EventBase]
+ timeline: List[EventBase]
+ is_dm: bool
+ invite_state: List[EventBase]
+ prev_batch: StreamToken
+ limited: bool
+ joined_count: int
+ invited_count: int
+ notification_count: int
+ highlight_count: int
+ num_live: int
+
+ @attr.s(slots=True, frozen=True, auto_attribs=True)
+ class SlidingWindowList:
+ """
+ Attributes:
+ count: The total number of entries in the list. Always present if this list
+ is.
+ ops: The sliding list operations to perform.
+ """
+
+ @attr.s(slots=True, frozen=True, auto_attribs=True)
+ class Operation:
+ """
+ Attributes:
+ op: The operation type to perform.
+ range: Which index positions are affected by this operation. These are
+ both inclusive.
+ room_ids: Which room IDs are affected by this operation. These IDs match
+ up to the positions in the `range`, so the last room ID in this list
+ matches the 9th index. The room data is held in a separate object.
+ """
+
+ op: OperationType
+ range: Tuple[int, int]
+ room_ids: List[str]
+
+ count: int
+ ops: List[Operation]
+
+ next_pos: StreamToken
+ lists: Dict[str, SlidingWindowList]
+ rooms: Dict[str, RoomResult]
+ extensions: JsonMapping
+
+ def __bool__(self) -> bool:
+ """Make the result appear empty if there are no updates. This is used
+ to tell if the notifier needs to wait for more events when polling for
+ events.
+ """
+ return bool(self.lists or self.rooms or self.extensions)
+
+ @staticmethod
+ def empty(next_pos: StreamToken) -> "SlidingSyncResult":
+ "Return a new empty result"
+ return SlidingSyncResult(
+ next_pos=next_pos,
+ lists={},
+ rooms={},
+ extensions={},
+ )
+
+
+class SlidingSyncHandler:
+ def __init__(self, hs: "HomeServer"):
+ self.clock = hs.get_clock()
+ self.store = hs.get_datastores().main
+ self.auth_blocking = hs.get_auth_blocking()
+ self.notifier = hs.get_notifier()
+ self.event_sources = hs.get_event_sources()
+ self.rooms_to_exclude_globally = hs.config.server.rooms_to_exclude_from_sync
+
+ async def wait_for_sync_for_user(
+ self,
+ requester: Requester,
+ sync_config: SlidingSyncConfig,
+ from_token: Optional[StreamToken] = None,
+ timeout_ms: int = 0,
+ ) -> SlidingSyncResult:
+ """Get the sync for a client if we have new data for it now. Otherwise
+ wait for new data to arrive on the server. If the timeout expires, then
+ return an empty sync result.
+ """
+ # If the user is not part of the mau group, then check that limits have
+ # not been exceeded (if not part of the group by this point, almost certain
+ # auth_blocking will occur)
+ await self.auth_blocking.check_auth_blocking(requester=requester)
+
+ # TODO: If the To-Device extension is enabled and we have a `from_token`, delete
+ # any to-device messages before that token (since we now know that the device
+ # has received them). (see sync v2 for how to do this)
+
+ # If we're working with a user-provided token, we need to make sure to wait for
+ # this worker to catch up with the token so we don't skip past any incoming
+ # events or future events if the user is nefariously, manually modifying the
+ # token.
+ if from_token is not None:
+ # We need to make sure this worker has caught up with the token. If
+ # this returns false, it means we timed out waiting, and we should
+ # just return an empty response.
+ before_wait_ts = self.clock.time_msec()
+ if not await self.notifier.wait_for_stream_token(from_token):
+ logger.warning(
+ "Timed out waiting for worker to catch up. Returning empty response"
+ )
+ return SlidingSyncResult.empty(from_token)
+
+ # If we've spent significant time waiting to catch up, take it off
+ # the timeout.
+ after_wait_ts = self.clock.time_msec()
+ if after_wait_ts - before_wait_ts > 1_000:
+ timeout_ms -= after_wait_ts - before_wait_ts
+ timeout_ms = max(timeout_ms, 0)
+
+ # We're going to respond immediately if the timeout is 0 or if this is an
+ # initial sync (without a `from_token`) so we can avoid calling
+ # `notifier.wait_for_events()`.
+ if timeout_ms == 0 or from_token is None:
+ now_token = self.event_sources.get_current_token()
+ result = await self.current_sync_for_user(
+ sync_config,
+ from_token=from_token,
+ to_token=now_token,
+ )
+ else:
+ # Otherwise, we wait for something to happen and report it to the user.
+ async def current_sync_callback(
+ before_token: StreamToken, after_token: StreamToken
+ ) -> SlidingSyncResult:
+ return await self.current_sync_for_user(
+ sync_config,
+ from_token=from_token,
+ to_token=after_token,
+ )
+
+ result = await self.notifier.wait_for_events(
+ sync_config.user.to_string(),
+ timeout_ms,
+ current_sync_callback,
+ from_token=from_token,
+ )
+
+ return result
+
+ async def current_sync_for_user(
+ self,
+ sync_config: SlidingSyncConfig,
+ to_token: StreamToken,
+ from_token: Optional[StreamToken] = None,
+ ) -> SlidingSyncResult:
+ """
+ Generates the response body of a Sliding Sync result, represented as a
+ `SlidingSyncResult`.
+ """
+ user_id = sync_config.user.to_string()
+ app_service = self.store.get_app_service_by_user_id(user_id)
+ if app_service:
+ # We no longer support AS users using /sync directly.
+ # See https://github.com/matrix-org/matrix-doc/issues/1144
+ raise NotImplementedError()
+
+ # Get all of the room IDs that the user should be able to see in the sync
+ # response
+ room_id_set = await self.get_sync_room_ids_for_user(
+ sync_config.user,
+ from_token=from_token,
+ to_token=to_token,
+ )
+
+ # Assemble sliding window lists
+ lists: Dict[str, SlidingSyncResult.SlidingWindowList] = {}
+ if sync_config.lists:
+ for list_key, list_config in sync_config.lists.items():
+ # TODO: Apply filters
+ #
+ # TODO: Exclude partially stated rooms unless the `required_state` has
+ # `["m.room.member", "$LAZY"]`
+ filtered_room_ids = room_id_set
+ # TODO: Apply sorts
+ sorted_room_ids = sorted(filtered_room_ids)
+
+ ops: List[SlidingSyncResult.SlidingWindowList.Operation] = []
+ if list_config.ranges:
+ for range in list_config.ranges:
+ ops.append(
+ SlidingSyncResult.SlidingWindowList.Operation(
+ op=OperationType.SYNC,
+ range=range,
+ room_ids=sorted_room_ids[range[0] : range[1]],
+ )
+ )
+
+ lists[list_key] = SlidingSyncResult.SlidingWindowList(
+ count=len(sorted_room_ids),
+ ops=ops,
+ )
+
+ return SlidingSyncResult(
+ next_pos=to_token,
+ lists=lists,
+ # TODO: Gather room data for rooms in lists and `sync_config.room_subscriptions`
+ rooms={},
+ extensions={},
+ )
+
+ async def get_sync_room_ids_for_user(
+ self,
+ user: UserID,
+ to_token: StreamToken,
+ from_token: Optional[StreamToken] = None,
+ ) -> AbstractSet[str]:
+ """
+ Fetch room IDs that should be listed for this user in the sync response (the
+ full room list that will be filtered, sorted, and sliced).
+
+ We're looking for rooms where the user has the following state in the token
+ range (> `from_token` and <= `to_token`):
+
+ - `invite`, `join`, `knock`, `ban` membership events
+ - Kicks (`leave` membership events where `sender` is different from the
+ `user_id`/`state_key`)
+ - `newly_left` (rooms that were left during the given token range)
+ - In order for bans/kicks to not show up in sync, you need to `/forget` those
+ rooms. This doesn't modify the event itself though and only adds the
+ `forgotten` flag to the `room_memberships` table in Synapse. There isn't a way
+ to tell when a room was forgotten at the moment so we can't factor it into the
+ from/to range.
+ """
+ user_id = user.to_string()
+
+ # First grab a current snapshot rooms for the user
+ # (also handles forgotten rooms)
+ room_for_user_list = await self.store.get_rooms_for_local_user_where_membership_is(
+ user_id=user_id,
+ # We want to fetch any kind of membership (joined and left rooms) in order
+ # to get the `event_pos` of the latest room membership event for the
+ # user.
+ #
+ # We will filter out the rooms that don't belong below (see
+ # `filter_membership_for_sync`)
+ membership_list=Membership.LIST,
+ excluded_rooms=self.rooms_to_exclude_globally,
+ )
+
+ # If the user has never joined any rooms before, we can just return an empty list
+ if not room_for_user_list:
+ return set()
+
+ # Our working list of rooms that can show up in the sync response
+ sync_room_id_set = {
+ room_for_user.room_id
+ for room_for_user in room_for_user_list
+ if filter_membership_for_sync(
+ membership=room_for_user.membership,
+ user_id=user_id,
+ sender=room_for_user.sender,
+ )
+ }
+
+ # Get the `RoomStreamToken` that represents the spot we queried up to when we got
+ # our membership snapshot from `get_rooms_for_local_user_where_membership_is()`.
+ #
+ # First, we need to get the max stream_ordering of each event persister instance
+ # that we queried events from.
+ instance_to_max_stream_ordering_map: Dict[str, int] = {}
+ for room_for_user in room_for_user_list:
+ instance_name = room_for_user.event_pos.instance_name
+ stream_ordering = room_for_user.event_pos.stream
+
+ current_instance_max_stream_ordering = (
+ instance_to_max_stream_ordering_map.get(instance_name)
+ )
+ if (
+ current_instance_max_stream_ordering is None
+ or stream_ordering > current_instance_max_stream_ordering
+ ):
+ instance_to_max_stream_ordering_map[instance_name] = stream_ordering
+
+ # Then assemble the `RoomStreamToken`
+ membership_snapshot_token = RoomStreamToken(
+ # Minimum position in the `instance_map`
+ stream=min(instance_to_max_stream_ordering_map.values()),
+ instance_map=immutabledict(instance_to_max_stream_ordering_map),
+ )
+
+ # If our `to_token` is already the same or ahead of the latest room membership
+ # for the user, we can just straight-up return the room list (nothing has
+ # changed)
+ if membership_snapshot_token.is_before_or_eq(to_token.room_key):
+ return sync_room_id_set
+
+ # Since we fetched the users room list at some point in time after the from/to
+ # tokens, we need to revert/rewind some membership changes to match the point in
+ # time of the `to_token`. In particular, we need to make these fixups:
+ #
+ # - 1a) Remove rooms that the user joined after the `to_token`
+ # - 1b) Add back rooms that the user left after the `to_token`
+ # - 2) Add back newly_left rooms (> `from_token` and <= `to_token`)
+ #
+ # Below, we're doing two separate lookups for membership changes. We could
+ # request everything for both fixups in one range, [`from_token.room_key`,
+ # `membership_snapshot_token`), but we want to avoid raw `stream_ordering`
+ # comparison without `instance_name` (which is flawed). We could refactor
+ # `event.internal_metadata` to include `instance_name` but it might turn out a
+ # little difficult and a bigger, broader Synapse change than we want to make.
+
+ # 1) -----------------------------------------------------
+
+ # 1) Fetch membership changes that fall in the range from `to_token` up to
+ # `membership_snapshot_token`
+ membership_change_events_after_to_token = (
+ await self.store.get_membership_changes_for_user(
+ user_id,
+ from_key=to_token.room_key,
+ to_key=membership_snapshot_token,
+ excluded_rooms=self.rooms_to_exclude_globally,
+ )
+ )
+
+ # 1) Assemble a list of the last membership events in some given ranges. Someone
+ # could have left and joined multiple times during the given range but we only
+ # care about end-result so we grab the last one.
+ last_membership_change_by_room_id_after_to_token: Dict[str, EventBase] = {}
+ # We also need the first membership event after the `to_token` so we can step
+ # backward to the previous membership that would apply to the from/to range.
+ first_membership_change_by_room_id_after_to_token: Dict[str, EventBase] = {}
+ for event in membership_change_events_after_to_token:
+ last_membership_change_by_room_id_after_to_token[event.room_id] = event
+ # Only set if we haven't already set it
+ first_membership_change_by_room_id_after_to_token.setdefault(
+ event.room_id, event
+ )
+
+ # 1) Fixup
+ for (
+ last_membership_change_after_to_token
+ ) in last_membership_change_by_room_id_after_to_token.values():
+ room_id = last_membership_change_after_to_token.room_id
+
+ # We want to find the first membership change after the `to_token` then step
+ # backward to know the membership in the from/to range.
+ first_membership_change_after_to_token = (
+ first_membership_change_by_room_id_after_to_token.get(room_id)
+ )
+ assert first_membership_change_after_to_token is not None, (
+ "If there was a `last_membership_change_after_to_token` that we're iterating over, "
+ + "then there should be corresponding a first change. For example, even if there "
+ + "is only one event after the `to_token`, the first and last event will be same event. "
+ + "This is probably a mistake in assembling the `last_membership_change_by_room_id_after_to_token`"
+ + "/`first_membership_change_by_room_id_after_to_token` dicts above."
+ )
+ # TODO: Instead of reading from `unsigned`, refactor this to use the
+ # `current_state_delta_stream` table in the future. Probably a new
+ # `get_membership_changes_for_user()` function that uses
+ # `current_state_delta_stream` with a join to `room_memberships`. This would
+ # help in state reset scenarios since `prev_content` is looking at the
+ # current branch vs the current room state. This is all just data given to
+ # the client so no real harm to data integrity, but we'd like to be nice to
+ # the client. Since the `current_state_delta_stream` table is new, it
+ # doesn't have all events in it. Since this is Sliding Sync, if we ever need
+ # to, we can signal the client to throw all of their state away by sending
+ # "operation: RESET".
+ prev_content = first_membership_change_after_to_token.unsigned.get(
+ "prev_content", {}
+ )
+ prev_membership = prev_content.get("membership", None)
+ prev_sender = first_membership_change_after_to_token.unsigned.get(
+ "prev_sender", None
+ )
+
+ # Check if the previous membership (membership that applies to the from/to
+ # range) should be included in our `sync_room_id_set`
+ should_prev_membership_be_included = (
+ prev_membership is not None
+ and prev_sender is not None
+ and filter_membership_for_sync(
+ membership=prev_membership,
+ user_id=user_id,
+ sender=prev_sender,
+ )
+ )
+
+ # Check if the last membership (membership that applies to our snapshot) was
+ # already included in our `sync_room_id_set`
+ was_last_membership_already_included = filter_membership_for_sync(
+ membership=last_membership_change_after_to_token.membership,
+ user_id=user_id,
+ sender=last_membership_change_after_to_token.sender,
+ )
+
+ # 1a) Add back rooms that the user left after the `to_token`
+ #
+ # For example, if the last membership event after the `to_token` is a leave
+ # event, then the room was excluded from `sync_room_id_set` when we first
+ # crafted it above. We should add these rooms back as long as the user also
+ # was part of the room before the `to_token`.
+ if (
+ not was_last_membership_already_included
+ and should_prev_membership_be_included
+ ):
+ sync_room_id_set.add(room_id)
+ # 1b) Remove rooms that the user joined (hasn't left) after the `to_token`
+ #
+ # For example, if the last membership event after the `to_token` is a "join"
+ # event, then the room was included `sync_room_id_set` when we first crafted
+ # it above. We should remove these rooms as long as the user also wasn't
+ # part of the room before the `to_token`.
+ elif (
+ was_last_membership_already_included
+ and not should_prev_membership_be_included
+ ):
+ sync_room_id_set.discard(room_id)
+
+ # 2) -----------------------------------------------------
+ # We fix-up newly_left rooms after the first fixup because it may have removed
+ # some left rooms that we can figure out our newly_left in the following code
+
+ # 2) Fetch membership changes that fall in the range from `from_token` up to `to_token`
+ membership_change_events_in_from_to_range = []
+ if from_token:
+ membership_change_events_in_from_to_range = (
+ await self.store.get_membership_changes_for_user(
+ user_id,
+ from_key=from_token.room_key,
+ to_key=to_token.room_key,
+ excluded_rooms=self.rooms_to_exclude_globally,
+ )
+ )
+
+ # 2) Assemble a list of the last membership events in some given ranges. Someone
+ # could have left and joined multiple times during the given range but we only
+ # care about end-result so we grab the last one.
+ last_membership_change_by_room_id_in_from_to_range: Dict[str, EventBase] = {}
+ for event in membership_change_events_in_from_to_range:
+ last_membership_change_by_room_id_in_from_to_range[event.room_id] = event
+
+ # 2) Fixup
+ for (
+ last_membership_change_in_from_to_range
+ ) in last_membership_change_by_room_id_in_from_to_range.values():
+ room_id = last_membership_change_in_from_to_range.room_id
+
+ # 2) Add back newly_left rooms (> `from_token` and <= `to_token`). We
+ # include newly_left rooms because the last event that the user should see
+ # is their own leave event
+ if last_membership_change_in_from_to_range.membership == Membership.LEAVE:
+ sync_room_id_set.add(room_id)
+
+ return sync_room_id_set
diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py
index 9d37e2a86f..39964726c5 100644
--- a/synapse/handlers/sync.py
+++ b/synapse/handlers/sync.py
@@ -2002,7 +2002,7 @@ class SyncHandler:
"""
user_id = sync_config.user.to_string()
- # Note: we get the users room list *before* we get the current token, this
+ # Note: we get the users room list *before* we get the `now_token`, this
# avoids checking back in history if rooms are joined after the token is fetched.
token_before_rooms = self.event_sources.get_current_token()
mutable_joined_room_ids = set(await self.store.get_rooms_for_user(user_id))
@@ -2014,10 +2014,10 @@ class SyncHandler:
now_token = self.event_sources.get_current_token()
log_kv({"now_token": now_token})
- # Since we fetched the users room list before the token, there's a small window
- # during which membership events may have been persisted, so we fetch these now
- # and modify the joined room list for any changes between the get_rooms_for_user
- # call and the get_current_token call.
+ # Since we fetched the users room list before calculating the `now_token` (see
+ # above), there's a small window during which membership events may have been
+ # persisted, so we fetch these now and modify the joined room list for any
+ # changes between the get_rooms_for_user call and the get_current_token call.
membership_change_events = []
if since_token:
membership_change_events = await self.store.get_membership_changes_for_user(
@@ -2027,16 +2027,19 @@ class SyncHandler:
self.rooms_to_exclude_globally,
)
- mem_last_change_by_room_id: Dict[str, EventBase] = {}
+ last_membership_change_by_room_id: Dict[str, EventBase] = {}
for event in membership_change_events:
- mem_last_change_by_room_id[event.room_id] = event
+ last_membership_change_by_room_id[event.room_id] = event
# For the latest membership event in each room found, add/remove the room ID
# from the joined room list accordingly. In this case we only care if the
# latest change is JOIN.
- for room_id, event in mem_last_change_by_room_id.items():
+ for room_id, event in last_membership_change_by_room_id.items():
assert event.internal_metadata.stream_ordering
+ # As a shortcut, skip any events that happened before we got our
+ # `get_rooms_for_user()` snapshot (any changes are already represented
+ # in that list).
if (
event.internal_metadata.stream_ordering
< token_before_rooms.room_key.stream
diff --git a/synapse/rest/client/models.py b/synapse/rest/client/models.py
index fc1aed2889..5433ed91ef 100644
--- a/synapse/rest/client/models.py
+++ b/synapse/rest/client/models.py
@@ -18,14 +18,30 @@
# [This file includes modifications made by New Vector Limited]
#
#
-from typing import TYPE_CHECKING, Dict, Optional
+from typing import TYPE_CHECKING, Dict, List, Optional, Tuple, Union
from synapse._pydantic_compat import HAS_PYDANTIC_V2
if TYPE_CHECKING or HAS_PYDANTIC_V2:
- from pydantic.v1 import Extra, StrictInt, StrictStr, constr, validator
+ from pydantic.v1 import (
+ Extra,
+ StrictBool,
+ StrictInt,
+ StrictStr,
+ conint,
+ constr,
+ validator,
+ )
else:
- from pydantic import Extra, StrictInt, StrictStr, constr, validator
+ from pydantic import (
+ Extra,
+ StrictBool,
+ StrictInt,
+ StrictStr,
+ conint,
+ constr,
+ validator,
+ )
from synapse.rest.models import RequestBodyModel
from synapse.util.threepids import validate_email
@@ -97,3 +113,172 @@ else:
class MsisdnRequestTokenBody(ThreepidRequestTokenBody):
country: ISO3116_1_Alpha_2
phone_number: StrictStr
+
+
+class SlidingSyncBody(RequestBodyModel):
+ """
+ Sliding Sync API request body.
+
+ Attributes:
+ lists: Sliding window API. A map of list key to list information
+ (:class:`SlidingSyncList`). Max lists: 100. The list keys should be
+ arbitrary strings which the client is using to refer to the list. Keep this
+ small as it needs to be sent a lot. Max length: 64 bytes.
+ room_subscriptions: Room subscription API. A map of room ID to room subscription
+ information. Used to subscribe to a specific room. Sometimes clients know
+ exactly which room they want to get information about e.g by following a
+ permalink or by refreshing a webapp currently viewing a specific room. The
+ sliding window API alone is insufficient for this use case because there's
+ no way to say "please track this room explicitly".
+ extensions: Extensions API. A map of extension key to extension config.
+ """
+
+ class CommonRoomParameters(RequestBodyModel):
+ """
+ Common parameters shared between the sliding window and room subscription APIs.
+
+ Attributes:
+ required_state: Required state for each room returned. An array of event
+ type and state key tuples. Elements in this array are ORd together to
+ produce the final set of state events to return. One unique exception is
+ when you request all state events via `["*", "*"]`. When used, all state
+ events are returned by default, and additional entries FILTER OUT the
+ returned set of state events. These additional entries cannot use `*`
+ themselves. For example, `["*", "*"], ["m.room.member",
+ "@alice:example.com"]` will *exclude* every `m.room.member` event
+ *except* for `@alice:example.com`, and include every other state event.
+ In addition, `["*", "*"], ["m.space.child", "*"]` is an error, the
+ `m.space.child` filter is not required as it would have been returned
+ anyway.
+ timeline_limit: The maximum number of timeline events to return per response.
+ (Max 1000 messages)
+ include_old_rooms: Determines if `predecessor` rooms are included in the
+ `rooms` response. The user MUST be joined to old rooms for them to show up
+ in the response.
+ """
+
+ class IncludeOldRooms(RequestBodyModel):
+ timeline_limit: StrictInt
+ required_state: List[Tuple[StrictStr, StrictStr]]
+
+ required_state: List[Tuple[StrictStr, StrictStr]]
+ # mypy workaround via https://github.com/pydantic/pydantic/issues/156#issuecomment-1130883884
+ if TYPE_CHECKING:
+ timeline_limit: int
+ else:
+ timeline_limit: conint(le=1000, strict=True) # type: ignore[valid-type]
+ include_old_rooms: Optional[IncludeOldRooms] = None
+
+ class SlidingSyncList(CommonRoomParameters):
+ """
+ Attributes:
+ ranges: Sliding window ranges. If this field is missing, no sliding window
+ is used and all rooms are returned in this list. Integers are
+ *inclusive*.
+ sort: How the list should be sorted on the server. The first value is
+ applied first, then tiebreaks are performed with each subsequent sort
+ listed.
+
+ FIXME: Furthermore, it's not currently defined how servers should behave
+ if they encounter a filter or sort operation they do not recognise. If
+ the server rejects the request with an HTTP 400 then that will break
+ backwards compatibility with new clients vs old servers. However, the
+ client would be otherwise unaware that only some of the sort/filter
+ operations have taken effect. We may need to include a "warnings"
+ section to indicate which sort/filter operations are unrecognised,
+ allowing for some form of graceful degradation of service.
+ -- https://github.com/matrix-org/matrix-spec-proposals/blob/kegan/sync-v3/proposals/3575-sync.md#filter-and-sort-extensions
+
+ slow_get_all_rooms: Just get all rooms (for clients that don't want to deal with
+ sliding windows). When true, the `ranges` and `sort` fields are ignored.
+ required_state: Required state for each room returned. An array of event
+ type and state key tuples. Elements in this array are ORd together to
+ produce the final set of state events to return.
+
+ One unique exception is when you request all state events via `["*",
+ "*"]`. When used, all state events are returned by default, and
+ additional entries FILTER OUT the returned set of state events. These
+ additional entries cannot use `*` themselves. For example, `["*", "*"],
+ ["m.room.member", "@alice:example.com"]` will *exclude* every
+ `m.room.member` event *except* for `@alice:example.com`, and include
+ every other state event. In addition, `["*", "*"], ["m.space.child",
+ "*"]` is an error, the `m.space.child` filter is not required as it
+ would have been returned anyway.
+
+ Room members can be lazily-loaded by using the special `$LAZY` state key
+ (`["m.room.member", "$LAZY"]`). Typically, when you view a room, you
+ want to retrieve all state events except for m.room.member events which
+ you want to lazily load. To get this behaviour, clients can send the
+ following::
+
+ {
+ "required_state": [
+ // activate lazy loading
+ ["m.room.member", "$LAZY"],
+ // request all state events _except_ for m.room.member
+ events which are lazily loaded
+ ["*", "*"]
+ ]
+ }
+
+ timeline_limit: The maximum number of timeline events to return per response.
+ include_old_rooms: Determines if `predecessor` rooms are included in the
+ `rooms` response. The user MUST be joined to old rooms for them to show up
+ in the response.
+ include_heroes: Return a stripped variant of membership events (containing
+ `user_id` and optionally `avatar_url` and `displayname`) for the users used
+ to calculate the room name.
+ filters: Filters to apply to the list before sorting.
+ bump_event_types: Allowlist of event types which should be considered recent activity
+ when sorting `by_recency`. By omitting event types from this field,
+ clients can ensure that uninteresting events (e.g. a profile rename) do
+ not cause a room to jump to the top of its list(s). Empty or omitted
+ `bump_event_types` have no effect—all events in a room will be
+ considered recent activity.
+ """
+
+ class Filters(RequestBodyModel):
+ is_dm: Optional[StrictBool] = None
+ spaces: Optional[List[StrictStr]] = None
+ is_encrypted: Optional[StrictBool] = None
+ is_invite: Optional[StrictBool] = None
+ room_types: Optional[List[Union[StrictStr, None]]] = None
+ not_room_types: Optional[List[StrictStr]] = None
+ room_name_like: Optional[StrictStr] = None
+ tags: Optional[List[StrictStr]] = None
+ not_tags: Optional[List[StrictStr]] = None
+
+ # mypy workaround via https://github.com/pydantic/pydantic/issues/156#issuecomment-1130883884
+ if TYPE_CHECKING:
+ ranges: Optional[List[Tuple[int, int]]] = None
+ else:
+ ranges: Optional[List[Tuple[conint(ge=0, strict=True), conint(ge=0, strict=True)]]] = None # type: ignore[valid-type]
+ sort: Optional[List[StrictStr]] = None
+ slow_get_all_rooms: Optional[StrictBool] = False
+ include_heroes: Optional[StrictBool] = False
+ filters: Optional[Filters] = None
+ bump_event_types: Optional[List[StrictStr]] = None
+
+ class RoomSubscription(CommonRoomParameters):
+ pass
+
+ class Extension(RequestBodyModel):
+ enabled: Optional[StrictBool] = False
+ lists: Optional[List[StrictStr]] = None
+ rooms: Optional[List[StrictStr]] = None
+
+ # mypy workaround via https://github.com/pydantic/pydantic/issues/156#issuecomment-1130883884
+ if TYPE_CHECKING:
+ lists: Optional[Dict[str, SlidingSyncList]] = None
+ else:
+ lists: Optional[Dict[constr(max_length=64, strict=True), SlidingSyncList]] = None # type: ignore[valid-type]
+ room_subscriptions: Optional[Dict[StrictStr, RoomSubscription]] = None
+ extensions: Optional[Dict[StrictStr, Extension]] = None
+
+ @validator("lists")
+ def lists_length_check(
+ cls, value: Optional[Dict[str, SlidingSyncList]]
+ ) -> Optional[Dict[str, SlidingSyncList]]:
+ if value is not None:
+ assert len(value) <= 100, f"Max lists: 100 but saw {len(value)}"
+ return value
diff --git a/synapse/rest/client/room.py b/synapse/rest/client/room.py
index fb4d44211e..61fdf71a27 100644
--- a/synapse/rest/client/room.py
+++ b/synapse/rest/client/room.py
@@ -292,6 +292,9 @@ class RoomStateEventRestServlet(RestServlet):
try:
if event_type == EventTypes.Member:
membership = content.get("membership", None)
+ if not isinstance(membership, str):
+ raise SynapseError(400, "Invalid membership (must be a string)")
+
event_id, _ = await self.room_member_handler.update_membership(
requester,
target=UserID.from_string(state_key),
diff --git a/synapse/rest/client/sync.py b/synapse/rest/client/sync.py
index 27ea943e31..385b102b3d 100644
--- a/synapse/rest/client/sync.py
+++ b/synapse/rest/client/sync.py
@@ -33,6 +33,7 @@ from synapse.events.utils import (
format_event_raw,
)
from synapse.handlers.presence import format_user_presence_state
+from synapse.handlers.sliding_sync import SlidingSyncConfig, SlidingSyncResult
from synapse.handlers.sync import (
ArchivedSyncResult,
InvitedSyncResult,
@@ -43,9 +44,16 @@ from synapse.handlers.sync import (
SyncVersion,
)
from synapse.http.server import HttpServer
-from synapse.http.servlet import RestServlet, parse_boolean, parse_integer, parse_string
+from synapse.http.servlet import (
+ RestServlet,
+ parse_and_validate_json_object_from_request,
+ parse_boolean,
+ parse_integer,
+ parse_string,
+)
from synapse.http.site import SynapseRequest
from synapse.logging.opentracing import trace_with_opname
+from synapse.rest.client.models import SlidingSyncBody
from synapse.types import JsonDict, Requester, StreamToken
from synapse.util import json_decoder
from synapse.util.caches.lrucache import LruCache
@@ -735,8 +743,228 @@ class SlidingSyncE2eeRestServlet(RestServlet):
return 200, response
+class SlidingSyncRestServlet(RestServlet):
+ """
+ API endpoint for MSC3575 Sliding Sync `/sync`. Allows for clients to request a
+ subset (sliding window) of rooms, state, and timeline events (just what they need)
+ in order to bootstrap quickly and subscribe to only what the client cares about.
+ Because the client can specify what it cares about, we can respond quickly and skip
+ all of the work we would normally have to do with a sync v2 response.
+
+ Request query parameters:
+ timeout: How long to wait for new events in milliseconds.
+ pos: Stream position token when asking for incremental deltas.
+
+ Request body::
+ {
+ // Sliding Window API
+ "lists": {
+ "foo-list": {
+ "ranges": [ [0, 99] ],
+ "sort": [ "by_notification_level", "by_recency", "by_name" ],
+ "required_state": [
+ ["m.room.join_rules", ""],
+ ["m.room.history_visibility", ""],
+ ["m.space.child", "*"]
+ ],
+ "timeline_limit": 10,
+ "filters": {
+ "is_dm": true
+ },
+ "bump_event_types": [ "m.room.message", "m.room.encrypted" ],
+ }
+ },
+ // Room Subscriptions API
+ "room_subscriptions": {
+ "!sub1:bar": {
+ "required_state": [ ["*","*"] ],
+ "timeline_limit": 10,
+ "include_old_rooms": {
+ "timeline_limit": 1,
+ "required_state": [ ["m.room.tombstone", ""], ["m.room.create", ""] ],
+ }
+ }
+ },
+ // Extensions API
+ "extensions": {}
+ }
+
+ Response JSON::
+ {
+ "next_pos": "s58_224_0_13_10_1_1_16_0_1",
+ "lists": {
+ "foo-list": {
+ "count": 1337,
+ "ops": [{
+ "op": "SYNC",
+ "range": [0, 99],
+ "room_ids": [
+ "!foo:bar",
+ // ... 99 more room IDs
+ ]
+ }]
+ }
+ },
+ // Aggregated rooms from lists and room subscriptions
+ "rooms": {
+ // Room from room subscription
+ "!sub1:bar": {
+ "name": "Alice and Bob",
+ "avatar": "mxc://...",
+ "initial": true,
+ "required_state": [
+ {"sender":"@alice:example.com","type":"m.room.create", "state_key":"", "content":{"creator":"@alice:example.com"}},
+ {"sender":"@alice:example.com","type":"m.room.join_rules", "state_key":"", "content":{"join_rule":"invite"}},
+ {"sender":"@alice:example.com","type":"m.room.history_visibility", "state_key":"", "content":{"history_visibility":"joined"}},
+ {"sender":"@alice:example.com","type":"m.room.member", "state_key":"@alice:example.com", "content":{"membership":"join"}}
+ ],
+ "timeline": [
+ {"sender":"@alice:example.com","type":"m.room.create", "state_key":"", "content":{"creator":"@alice:example.com"}},
+ {"sender":"@alice:example.com","type":"m.room.join_rules", "state_key":"", "content":{"join_rule":"invite"}},
+ {"sender":"@alice:example.com","type":"m.room.history_visibility", "state_key":"", "content":{"history_visibility":"joined"}},
+ {"sender":"@alice:example.com","type":"m.room.member", "state_key":"@alice:example.com", "content":{"membership":"join"}},
+ {"sender":"@alice:example.com","type":"m.room.message", "content":{"body":"A"}},
+ {"sender":"@alice:example.com","type":"m.room.message", "content":{"body":"B"}},
+ ],
+ "prev_batch": "t111_222_333",
+ "joined_count": 41,
+ "invited_count": 1,
+ "notification_count": 1,
+ "highlight_count": 0
+ },
+ // rooms from list
+ "!foo:bar": {
+ "name": "The calculated room name",
+ "avatar": "mxc://...",
+ "initial": true,
+ "required_state": [
+ {"sender":"@alice:example.com","type":"m.room.join_rules", "state_key":"", "content":{"join_rule":"invite"}},
+ {"sender":"@alice:example.com","type":"m.room.history_visibility", "state_key":"", "content":{"history_visibility":"joined"}},
+ {"sender":"@alice:example.com","type":"m.space.child", "state_key":"!foo:example.com", "content":{"via":["example.com"]}},
+ {"sender":"@alice:example.com","type":"m.space.child", "state_key":"!bar:example.com", "content":{"via":["example.com"]}},
+ {"sender":"@alice:example.com","type":"m.space.child", "state_key":"!baz:example.com", "content":{"via":["example.com"]}}
+ ],
+ "timeline": [
+ {"sender":"@alice:example.com","type":"m.room.join_rules", "state_key":"", "content":{"join_rule":"invite"}},
+ {"sender":"@alice:example.com","type":"m.room.message", "content":{"body":"A"}},
+ {"sender":"@alice:example.com","type":"m.room.message", "content":{"body":"B"}},
+ {"sender":"@alice:example.com","type":"m.room.message", "content":{"body":"C"}},
+ {"sender":"@alice:example.com","type":"m.room.message", "content":{"body":"D"}},
+ ],
+ "prev_batch": "t111_222_333",
+ "joined_count": 4,
+ "invited_count": 0,
+ "notification_count": 54,
+ "highlight_count": 3
+ },
+ // ... 99 more items
+ },
+ "extensions": {}
+ }
+ """
+
+ PATTERNS = client_patterns(
+ "/org.matrix.msc3575/sync$", releases=[], v1=False, unstable=True
+ )
+
+ def __init__(self, hs: "HomeServer"):
+ super().__init__()
+ self.auth = hs.get_auth()
+ self.store = hs.get_datastores().main
+ self.filtering = hs.get_filtering()
+ self.sliding_sync_handler = hs.get_sliding_sync_handler()
+
+ # TODO: Update this to `on_GET` once we figure out how we want to handle params
+ async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
+ requester = await self.auth.get_user_by_req(request, allow_guest=True)
+ user = requester.user
+ device_id = requester.device_id
+
+ timeout = parse_integer(request, "timeout", default=0)
+ # Position in the stream
+ from_token_string = parse_string(request, "pos")
+
+ from_token = None
+ if from_token_string is not None:
+ from_token = await StreamToken.from_string(self.store, from_token_string)
+
+ # TODO: We currently don't know whether we're going to use sticky params or
+ # maybe some filters like sync v2 where they are built up once and referenced
+ # by filter ID. For now, we will just prototype with always passing everything
+ # in.
+ body = parse_and_validate_json_object_from_request(request, SlidingSyncBody)
+ logger.info("Sliding sync request: %r", body)
+
+ sync_config = SlidingSyncConfig(
+ user=user,
+ device_id=device_id,
+ # FIXME: Currently, we're just manually copying the fields from the
+ # `SlidingSyncBody` into the config. How can we gurantee into the future
+ # that we don't forget any? I would like something more structured like
+ # `copy_attributes(from=body, to=config)`
+ lists=body.lists,
+ room_subscriptions=body.room_subscriptions,
+ extensions=body.extensions,
+ )
+
+ sliding_sync_results = await self.sliding_sync_handler.wait_for_sync_for_user(
+ requester,
+ sync_config,
+ from_token,
+ timeout,
+ )
+
+ # The client may have disconnected by now; don't bother to serialize the
+ # response if so.
+ if request._disconnected:
+ logger.info("Client has disconnected; not serializing response.")
+ return 200, {}
+
+ response_content = await self.encode_response(sliding_sync_results)
+
+ return 200, response_content
+
+ # TODO: Is there a better way to encode things?
+ async def encode_response(
+ self,
+ sliding_sync_result: SlidingSyncResult,
+ ) -> JsonDict:
+ response: JsonDict = defaultdict(dict)
+
+ response["next_pos"] = await sliding_sync_result.next_pos.to_string(self.store)
+ serialized_lists = self.encode_lists(sliding_sync_result.lists)
+ if serialized_lists:
+ response["lists"] = serialized_lists
+ response["rooms"] = {} # TODO: sliding_sync_result.rooms
+ response["extensions"] = {} # TODO: sliding_sync_result.extensions
+
+ return response
+
+ def encode_lists(
+ self, lists: Dict[str, SlidingSyncResult.SlidingWindowList]
+ ) -> JsonDict:
+ def encode_operation(
+ operation: SlidingSyncResult.SlidingWindowList.Operation,
+ ) -> JsonDict:
+ return {
+ "op": operation.op.value,
+ "range": operation.range,
+ "room_ids": operation.room_ids,
+ }
+
+ serialized_lists = {}
+ for list_key, list_result in lists.items():
+ serialized_lists[list_key] = {
+ "count": list_result.count,
+ "ops": [encode_operation(op) for op in list_result.ops],
+ }
+
+ return serialized_lists
+
+
def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
SyncRestServlet(hs).register(http_server)
if hs.config.experimental.msc3575_enabled:
+ SlidingSyncRestServlet(hs).register(http_server)
SlidingSyncE2eeRestServlet(hs).register(http_server)
diff --git a/synapse/server.py b/synapse/server.py
index 95e319d2e6..ae927c3904 100644
--- a/synapse/server.py
+++ b/synapse/server.py
@@ -109,6 +109,7 @@ from synapse.handlers.room_summary import RoomSummaryHandler
from synapse.handlers.search import SearchHandler
from synapse.handlers.send_email import SendEmailHandler
from synapse.handlers.set_password import SetPasswordHandler
+from synapse.handlers.sliding_sync import SlidingSyncHandler
from synapse.handlers.sso import SsoHandler
from synapse.handlers.stats import StatsHandler
from synapse.handlers.sync import SyncHandler
@@ -554,6 +555,9 @@ class HomeServer(metaclass=abc.ABCMeta):
def get_sync_handler(self) -> SyncHandler:
return SyncHandler(self)
+ def get_sliding_sync_handler(self) -> SlidingSyncHandler:
+ return SlidingSyncHandler(self)
+
@cache_in_self
def get_room_list_handler(self) -> RoomListHandler:
return RoomListHandler(self)
diff --git a/tests/handlers/test_sliding_sync.py b/tests/handlers/test_sliding_sync.py
new file mode 100644
index 0000000000..5c27474b96
--- /dev/null
+++ b/tests/handlers/test_sliding_sync.py
@@ -0,0 +1,1118 @@
+#
+# This file is licensed under the Affero General Public License (AGPL) version 3.
+#
+# Copyright (C) 2024 New Vector, Ltd
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# See the GNU Affero General Public License for more details:
+# .
+#
+# Originally licensed under the Apache License, Version 2.0:
+# .
+#
+# [This file includes modifications made by New Vector Limited]
+#
+#
+import logging
+from unittest.mock import patch
+
+from twisted.test.proto_helpers import MemoryReactor
+
+from synapse.api.constants import EventTypes, JoinRules, Membership
+from synapse.api.room_versions import RoomVersions
+from synapse.rest import admin
+from synapse.rest.client import knock, login, room
+from synapse.server import HomeServer
+from synapse.storage.util.id_generators import MultiWriterIdGenerator
+from synapse.types import JsonDict, UserID
+from synapse.util import Clock
+
+from tests.replication._base import BaseMultiWorkerStreamTestCase
+from tests.unittest import HomeserverTestCase
+
+logger = logging.getLogger(__name__)
+
+
+class GetSyncRoomIdsForUserTestCase(HomeserverTestCase):
+ """
+ Tests Sliding Sync handler `get_sync_room_ids_for_user()` to make sure it returns
+ the correct list of rooms IDs.
+ """
+
+ servlets = [
+ admin.register_servlets,
+ knock.register_servlets,
+ login.register_servlets,
+ room.register_servlets,
+ ]
+
+ def default_config(self) -> JsonDict:
+ config = super().default_config()
+ # Enable sliding sync
+ config["experimental_features"] = {"msc3575_enabled": True}
+ return config
+
+ def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
+ self.sliding_sync_handler = self.hs.get_sliding_sync_handler()
+ self.store = self.hs.get_datastores().main
+ self.event_sources = hs.get_event_sources()
+
+ def test_no_rooms(self) -> None:
+ """
+ Test when the user has never joined any rooms before
+ """
+ user1_id = self.register_user("user1", "pass")
+ # user1_tok = self.login(user1_id, "pass")
+
+ now_token = self.event_sources.get_current_token()
+
+ room_id_results = self.get_success(
+ self.sliding_sync_handler.get_sync_room_ids_for_user(
+ UserID.from_string(user1_id),
+ from_token=now_token,
+ to_token=now_token,
+ )
+ )
+
+ self.assertEqual(room_id_results, set())
+
+ def test_get_newly_joined_room(self) -> None:
+ """
+ Test that rooms that the user has newly_joined show up. newly_joined is when you
+ join after the `from_token` and <= `to_token`.
+ """
+ user1_id = self.register_user("user1", "pass")
+ user1_tok = self.login(user1_id, "pass")
+
+ before_room_token = self.event_sources.get_current_token()
+
+ room_id = self.helper.create_room_as(user1_id, tok=user1_tok, is_public=True)
+
+ after_room_token = self.event_sources.get_current_token()
+
+ room_id_results = self.get_success(
+ self.sliding_sync_handler.get_sync_room_ids_for_user(
+ UserID.from_string(user1_id),
+ from_token=before_room_token,
+ to_token=after_room_token,
+ )
+ )
+
+ self.assertEqual(room_id_results, {room_id})
+
+ def test_get_already_joined_room(self) -> None:
+ """
+ Test that rooms that the user is already joined show up.
+ """
+ user1_id = self.register_user("user1", "pass")
+ user1_tok = self.login(user1_id, "pass")
+
+ room_id = self.helper.create_room_as(user1_id, tok=user1_tok, is_public=True)
+
+ after_room_token = self.event_sources.get_current_token()
+
+ room_id_results = self.get_success(
+ self.sliding_sync_handler.get_sync_room_ids_for_user(
+ UserID.from_string(user1_id),
+ from_token=after_room_token,
+ to_token=after_room_token,
+ )
+ )
+
+ self.assertEqual(room_id_results, {room_id})
+
+ def test_get_invited_banned_knocked_room(self) -> None:
+ """
+ Test that rooms that the user is invited to, banned from, and knocked on show
+ up.
+ """
+ user1_id = self.register_user("user1", "pass")
+ user1_tok = self.login(user1_id, "pass")
+ user2_id = self.register_user("user2", "pass")
+ user2_tok = self.login(user2_id, "pass")
+
+ before_room_token = self.event_sources.get_current_token()
+
+ # Setup the invited room (user2 invites user1 to the room)
+ invited_room_id = self.helper.create_room_as(user2_id, tok=user2_tok)
+ self.helper.invite(invited_room_id, targ=user1_id, tok=user2_tok)
+
+ # Setup the ban room (user2 bans user1 from the room)
+ ban_room_id = self.helper.create_room_as(
+ user2_id, tok=user2_tok, is_public=True
+ )
+ self.helper.join(ban_room_id, user1_id, tok=user1_tok)
+ self.helper.ban(ban_room_id, src=user2_id, targ=user1_id, tok=user2_tok)
+
+ # Setup the knock room (user1 knocks on the room)
+ knock_room_id = self.helper.create_room_as(
+ user2_id, tok=user2_tok, room_version=RoomVersions.V7.identifier
+ )
+ self.helper.send_state(
+ knock_room_id,
+ EventTypes.JoinRules,
+ {"join_rule": JoinRules.KNOCK},
+ tok=user2_tok,
+ )
+ # User1 knocks on the room
+ channel = self.make_request(
+ "POST",
+ "/_matrix/client/r0/knock/%s" % (knock_room_id,),
+ b"{}",
+ user1_tok,
+ )
+ self.assertEqual(channel.code, 200, channel.result)
+
+ after_room_token = self.event_sources.get_current_token()
+
+ room_id_results = self.get_success(
+ self.sliding_sync_handler.get_sync_room_ids_for_user(
+ UserID.from_string(user1_id),
+ from_token=before_room_token,
+ to_token=after_room_token,
+ )
+ )
+
+ # Ensure that the invited, ban, and knock rooms show up
+ self.assertEqual(
+ room_id_results,
+ {
+ invited_room_id,
+ ban_room_id,
+ knock_room_id,
+ },
+ )
+
+ def test_get_kicked_room(self) -> None:
+ """
+ Test that a room that the user was kicked from still shows up. When the user
+ comes back to their client, they should see that they were kicked.
+ """
+ user1_id = self.register_user("user1", "pass")
+ user1_tok = self.login(user1_id, "pass")
+ user2_id = self.register_user("user2", "pass")
+ user2_tok = self.login(user2_id, "pass")
+
+ # Setup the kick room (user2 kicks user1 from the room)
+ kick_room_id = self.helper.create_room_as(
+ user2_id, tok=user2_tok, is_public=True
+ )
+ self.helper.join(kick_room_id, user1_id, tok=user1_tok)
+ # Kick user1 from the room
+ self.helper.change_membership(
+ room=kick_room_id,
+ src=user2_id,
+ targ=user1_id,
+ tok=user2_tok,
+ membership=Membership.LEAVE,
+ extra_data={
+ "reason": "Bad manners",
+ },
+ )
+
+ after_kick_token = self.event_sources.get_current_token()
+
+ room_id_results = self.get_success(
+ self.sliding_sync_handler.get_sync_room_ids_for_user(
+ UserID.from_string(user1_id),
+ from_token=after_kick_token,
+ to_token=after_kick_token,
+ )
+ )
+
+ # The kicked room should show up
+ self.assertEqual(room_id_results, {kick_room_id})
+
+ def test_forgotten_rooms(self) -> None:
+ """
+ Forgotten rooms do not show up even if we forget after the from/to range.
+
+ Ideally, we would be able to track when the `/forget` happens and apply it
+ accordingly in the token range but the forgotten flag is only an extra bool in
+ the `room_memberships` table.
+ """
+ user1_id = self.register_user("user1", "pass")
+ user1_tok = self.login(user1_id, "pass")
+ user2_id = self.register_user("user2", "pass")
+ user2_tok = self.login(user2_id, "pass")
+
+ # Setup a normal room that we leave. This won't show up in the sync response
+ # because we left it before our token but is good to check anyway.
+ leave_room_id = self.helper.create_room_as(
+ user2_id, tok=user2_tok, is_public=True
+ )
+ self.helper.join(leave_room_id, user1_id, tok=user1_tok)
+ self.helper.leave(leave_room_id, user1_id, tok=user1_tok)
+
+ # Setup the ban room (user2 bans user1 from the room)
+ ban_room_id = self.helper.create_room_as(
+ user2_id, tok=user2_tok, is_public=True
+ )
+ self.helper.join(ban_room_id, user1_id, tok=user1_tok)
+ self.helper.ban(ban_room_id, src=user2_id, targ=user1_id, tok=user2_tok)
+
+ # Setup the kick room (user2 kicks user1 from the room)
+ kick_room_id = self.helper.create_room_as(
+ user2_id, tok=user2_tok, is_public=True
+ )
+ self.helper.join(kick_room_id, user1_id, tok=user1_tok)
+ # Kick user1 from the room
+ self.helper.change_membership(
+ room=kick_room_id,
+ src=user2_id,
+ targ=user1_id,
+ tok=user2_tok,
+ membership=Membership.LEAVE,
+ extra_data={
+ "reason": "Bad manners",
+ },
+ )
+
+ before_room_forgets = self.event_sources.get_current_token()
+
+ # Forget the room after we already have our tokens. This doesn't change
+ # the membership event itself but will mark it internally in Synapse
+ channel = self.make_request(
+ "POST",
+ f"/_matrix/client/r0/rooms/{leave_room_id}/forget",
+ content={},
+ access_token=user1_tok,
+ )
+ self.assertEqual(channel.code, 200, channel.result)
+ channel = self.make_request(
+ "POST",
+ f"/_matrix/client/r0/rooms/{ban_room_id}/forget",
+ content={},
+ access_token=user1_tok,
+ )
+ self.assertEqual(channel.code, 200, channel.result)
+ channel = self.make_request(
+ "POST",
+ f"/_matrix/client/r0/rooms/{kick_room_id}/forget",
+ content={},
+ access_token=user1_tok,
+ )
+ self.assertEqual(channel.code, 200, channel.result)
+
+ room_id_results = self.get_success(
+ self.sliding_sync_handler.get_sync_room_ids_for_user(
+ UserID.from_string(user1_id),
+ from_token=before_room_forgets,
+ to_token=before_room_forgets,
+ )
+ )
+
+ # We shouldn't see the room because it was forgotten
+ self.assertEqual(room_id_results, set())
+
+ def test_only_newly_left_rooms_show_up(self) -> None:
+ """
+ Test that newly_left rooms still show up in the sync response but rooms that
+ were left before the `from_token` don't show up. See condition "2)" comments in
+ the `get_sync_room_ids_for_user` method.
+ """
+ user1_id = self.register_user("user1", "pass")
+ user1_tok = self.login(user1_id, "pass")
+
+ # Leave before we calculate the `from_token`
+ room_id1 = self.helper.create_room_as(user1_id, tok=user1_tok)
+ self.helper.leave(room_id1, user1_id, tok=user1_tok)
+
+ after_room1_token = self.event_sources.get_current_token()
+
+ # Leave during the from_token/to_token range (newly_left)
+ room_id2 = self.helper.create_room_as(user1_id, tok=user1_tok)
+ self.helper.leave(room_id1, user1_id, tok=user1_tok)
+
+ after_room2_token = self.event_sources.get_current_token()
+
+ room_id_results = self.get_success(
+ self.sliding_sync_handler.get_sync_room_ids_for_user(
+ UserID.from_string(user1_id),
+ from_token=after_room1_token,
+ to_token=after_room2_token,
+ )
+ )
+
+ # Only the newly_left room should show up
+ self.assertEqual(room_id_results, {room_id2})
+
+ def test_no_joins_after_to_token(self) -> None:
+ """
+ Rooms we join after the `to_token` should *not* show up. See condition "1b)"
+ comments in the `get_sync_room_ids_for_user()` method.
+ """
+ user1_id = self.register_user("user1", "pass")
+ user1_tok = self.login(user1_id, "pass")
+
+ before_room1_token = self.event_sources.get_current_token()
+
+ room_id1 = self.helper.create_room_as(user1_id, tok=user1_tok)
+
+ after_room1_token = self.event_sources.get_current_token()
+
+ # Room join after after our `to_token` shouldn't show up
+ room_id2 = self.helper.create_room_as(user1_id, tok=user1_tok)
+ _ = room_id2
+
+ room_id_results = self.get_success(
+ self.sliding_sync_handler.get_sync_room_ids_for_user(
+ UserID.from_string(user1_id),
+ from_token=before_room1_token,
+ to_token=after_room1_token,
+ )
+ )
+
+ self.assertEqual(room_id_results, {room_id1})
+
+ def test_join_during_range_and_left_room_after_to_token(self) -> None:
+ """
+ Room still shows up if we left the room but were joined during the
+ from_token/to_token. See condition "1a)" comments in the
+ `get_sync_room_ids_for_user()` method.
+ """
+ user1_id = self.register_user("user1", "pass")
+ user1_tok = self.login(user1_id, "pass")
+
+ before_room1_token = self.event_sources.get_current_token()
+
+ room_id1 = self.helper.create_room_as(user1_id, tok=user1_tok)
+
+ after_room1_token = self.event_sources.get_current_token()
+
+ # Leave the room after we already have our tokens
+ self.helper.leave(room_id1, user1_id, tok=user1_tok)
+
+ room_id_results = self.get_success(
+ self.sliding_sync_handler.get_sync_room_ids_for_user(
+ UserID.from_string(user1_id),
+ from_token=before_room1_token,
+ to_token=after_room1_token,
+ )
+ )
+
+ # We should still see the room because we were joined during the
+ # from_token/to_token time period.
+ self.assertEqual(room_id_results, {room_id1})
+
+ def test_join_before_range_and_left_room_after_to_token(self) -> None:
+ """
+ Room still shows up if we left the room but were joined before the `from_token`
+ so it should show up. See condition "1a)" comments in the
+ `get_sync_room_ids_for_user()` method.
+ """
+ user1_id = self.register_user("user1", "pass")
+ user1_tok = self.login(user1_id, "pass")
+
+ room_id1 = self.helper.create_room_as(user1_id, tok=user1_tok)
+
+ after_room1_token = self.event_sources.get_current_token()
+
+ # Leave the room after we already have our tokens
+ self.helper.leave(room_id1, user1_id, tok=user1_tok)
+
+ room_id_results = self.get_success(
+ self.sliding_sync_handler.get_sync_room_ids_for_user(
+ UserID.from_string(user1_id),
+ from_token=after_room1_token,
+ to_token=after_room1_token,
+ )
+ )
+
+ # We should still see the room because we were joined before the `from_token`
+ self.assertEqual(room_id_results, {room_id1})
+
+ def test_kicked_before_range_and_left_after_to_token(self) -> None:
+ """
+ Room still shows up if we left the room but were kicked before the `from_token`
+ so it should show up. See condition "1a)" comments in the
+ `get_sync_room_ids_for_user()` method.
+ """
+ user1_id = self.register_user("user1", "pass")
+ user1_tok = self.login(user1_id, "pass")
+ user2_id = self.register_user("user2", "pass")
+ user2_tok = self.login(user2_id, "pass")
+
+ # Setup the kick room (user2 kicks user1 from the room)
+ kick_room_id = self.helper.create_room_as(
+ user2_id, tok=user2_tok, is_public=True
+ )
+ self.helper.join(kick_room_id, user1_id, tok=user1_tok)
+ # Kick user1 from the room
+ self.helper.change_membership(
+ room=kick_room_id,
+ src=user2_id,
+ targ=user1_id,
+ tok=user2_tok,
+ membership=Membership.LEAVE,
+ extra_data={
+ "reason": "Bad manners",
+ },
+ )
+
+ after_kick_token = self.event_sources.get_current_token()
+
+ # Leave the room after we already have our tokens
+ #
+ # We have to join before we can leave (leave -> leave isn't a valid transition
+ # or at least it doesn't work in Synapse, 403 forbidden)
+ self.helper.join(kick_room_id, user1_id, tok=user1_tok)
+ self.helper.leave(kick_room_id, user1_id, tok=user1_tok)
+
+ room_id_results = self.get_success(
+ self.sliding_sync_handler.get_sync_room_ids_for_user(
+ UserID.from_string(user1_id),
+ from_token=after_kick_token,
+ to_token=after_kick_token,
+ )
+ )
+
+ # We shouldn't see the room because it was forgotten
+ self.assertEqual(room_id_results, {kick_room_id})
+
+ def test_newly_left_during_range_and_join_leave_after_to_token(self) -> None:
+ """
+ Newly left room should show up. But we're also testing that joining and leaving
+ after the `to_token` doesn't mess with the results. See condition "2)" and "1a)"
+ comments in the `get_sync_room_ids_for_user()` method.
+ """
+ user1_id = self.register_user("user1", "pass")
+ user1_tok = self.login(user1_id, "pass")
+ user2_id = self.register_user("user2", "pass")
+ user2_tok = self.login(user2_id, "pass")
+
+ before_room1_token = self.event_sources.get_current_token()
+
+ # We create the room with user2 so the room isn't left with no members when we
+ # leave and can still re-join.
+ room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok, is_public=True)
+ # Join and leave the room during the from/to range
+ self.helper.join(room_id1, user1_id, tok=user1_tok)
+ self.helper.leave(room_id1, user1_id, tok=user1_tok)
+
+ after_room1_token = self.event_sources.get_current_token()
+
+ # Join and leave the room after we already have our tokens
+ self.helper.join(room_id1, user1_id, tok=user1_tok)
+ self.helper.leave(room_id1, user1_id, tok=user1_tok)
+
+ room_id_results = self.get_success(
+ self.sliding_sync_handler.get_sync_room_ids_for_user(
+ UserID.from_string(user1_id),
+ from_token=before_room1_token,
+ to_token=after_room1_token,
+ )
+ )
+
+ # Room should still show up because it's newly_left during the from/to range
+ self.assertEqual(room_id_results, {room_id1})
+
+ def test_newly_left_during_range_and_join_after_to_token(self) -> None:
+ """
+ Newly left room should show up. But we're also testing that joining after the
+ `to_token` doesn't mess with the results. See condition "2)" and "1b)" comments
+ in the `get_sync_room_ids_for_user()` method.
+ """
+ user1_id = self.register_user("user1", "pass")
+ user1_tok = self.login(user1_id, "pass")
+ user2_id = self.register_user("user2", "pass")
+ user2_tok = self.login(user2_id, "pass")
+
+ before_room1_token = self.event_sources.get_current_token()
+
+ # We create the room with user2 so the room isn't left with no members when we
+ # leave and can still re-join.
+ room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok, is_public=True)
+ # Join and leave the room during the from/to range
+ self.helper.join(room_id1, user1_id, tok=user1_tok)
+ self.helper.leave(room_id1, user1_id, tok=user1_tok)
+
+ after_room1_token = self.event_sources.get_current_token()
+
+ # Join the room after we already have our tokens
+ self.helper.join(room_id1, user1_id, tok=user1_tok)
+
+ room_id_results = self.get_success(
+ self.sliding_sync_handler.get_sync_room_ids_for_user(
+ UserID.from_string(user1_id),
+ from_token=before_room1_token,
+ to_token=after_room1_token,
+ )
+ )
+
+ # Room should still show up because it's newly_left during the from/to range
+ self.assertEqual(room_id_results, {room_id1})
+
+ def test_no_from_token(self) -> None:
+ """
+ Test that if we don't provide a `from_token`, we get all the rooms that we we're
+ joined to up to the `to_token`.
+
+ Providing `from_token` only really has the effect that it adds `newly_left`
+ rooms to the response.
+ """
+ user1_id = self.register_user("user1", "pass")
+ user1_tok = self.login(user1_id, "pass")
+ user2_id = self.register_user("user2", "pass")
+ user2_tok = self.login(user2_id, "pass")
+
+ # We create the room with user2 so the room isn't left with no members when we
+ # leave and can still re-join.
+ room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok, is_public=True)
+ room_id2 = self.helper.create_room_as(user2_id, tok=user2_tok, is_public=True)
+
+ # Join room1
+ self.helper.join(room_id1, user1_id, tok=user1_tok)
+
+ # Join and leave the room2 before the `to_token`
+ self.helper.join(room_id2, user1_id, tok=user1_tok)
+ self.helper.leave(room_id2, user1_id, tok=user1_tok)
+
+ after_room1_token = self.event_sources.get_current_token()
+
+ # Join the room2 after we already have our tokens
+ self.helper.join(room_id2, user1_id, tok=user1_tok)
+
+ room_id_results = self.get_success(
+ self.sliding_sync_handler.get_sync_room_ids_for_user(
+ UserID.from_string(user1_id),
+ from_token=None,
+ to_token=after_room1_token,
+ )
+ )
+
+ # Only rooms we were joined to before the `to_token` should show up
+ self.assertEqual(room_id_results, {room_id1})
+
+ def test_from_token_ahead_of_to_token(self) -> None:
+ """
+ Test when the provided `from_token` comes after the `to_token`. We should
+ basically expect the same result as having no `from_token`.
+ """
+ user1_id = self.register_user("user1", "pass")
+ user1_tok = self.login(user1_id, "pass")
+ user2_id = self.register_user("user2", "pass")
+ user2_tok = self.login(user2_id, "pass")
+
+ # We create the room with user2 so the room isn't left with no members when we
+ # leave and can still re-join.
+ room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok, is_public=True)
+ room_id2 = self.helper.create_room_as(user2_id, tok=user2_tok, is_public=True)
+ room_id3 = self.helper.create_room_as(user2_id, tok=user2_tok, is_public=True)
+ room_id4 = self.helper.create_room_as(user2_id, tok=user2_tok, is_public=True)
+
+ # Join room1 before `before_room_token`
+ self.helper.join(room_id1, user1_id, tok=user1_tok)
+
+ # Join and leave the room2 before `before_room_token`
+ self.helper.join(room_id2, user1_id, tok=user1_tok)
+ self.helper.leave(room_id2, user1_id, tok=user1_tok)
+
+ # Note: These are purposely swapped. The `from_token` should come after
+ # the `to_token` in this test
+ to_token = self.event_sources.get_current_token()
+
+ # Join room2 after `before_room_token`
+ self.helper.join(room_id2, user1_id, tok=user1_tok)
+
+ # --------
+
+ # Join room3 after `before_room_token`
+ self.helper.join(room_id3, user1_id, tok=user1_tok)
+
+ # Join and leave the room4 after `before_room_token`
+ self.helper.join(room_id4, user1_id, tok=user1_tok)
+ self.helper.leave(room_id4, user1_id, tok=user1_tok)
+
+ # Note: These are purposely swapped. The `from_token` should come after the
+ # `to_token` in this test
+ from_token = self.event_sources.get_current_token()
+
+ # Join the room4 after we already have our tokens
+ self.helper.join(room_id4, user1_id, tok=user1_tok)
+
+ room_id_results = self.get_success(
+ self.sliding_sync_handler.get_sync_room_ids_for_user(
+ UserID.from_string(user1_id),
+ from_token=from_token,
+ to_token=to_token,
+ )
+ )
+
+ # Only rooms we were joined to before the `to_token` should show up
+ #
+ # There won't be any newly_left rooms because the `from_token` is ahead of the
+ # `to_token` and that range will give no membership changes to check.
+ self.assertEqual(room_id_results, {room_id1})
+
+ def test_leave_before_range_and_join_leave_after_to_token(self) -> None:
+ """
+ Old left room shouldn't show up. But we're also testing that joining and leaving
+ after the `to_token` doesn't mess with the results. See condition "1a)" comments
+ in the `get_sync_room_ids_for_user()` method.
+ """
+ user1_id = self.register_user("user1", "pass")
+ user1_tok = self.login(user1_id, "pass")
+ user2_id = self.register_user("user2", "pass")
+ user2_tok = self.login(user2_id, "pass")
+
+ # We create the room with user2 so the room isn't left with no members when we
+ # leave and can still re-join.
+ room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok, is_public=True)
+ # Join and leave the room before the from/to range
+ self.helper.join(room_id1, user1_id, tok=user1_tok)
+ self.helper.leave(room_id1, user1_id, tok=user1_tok)
+
+ after_room1_token = self.event_sources.get_current_token()
+
+ # Join and leave the room after we already have our tokens
+ self.helper.join(room_id1, user1_id, tok=user1_tok)
+ self.helper.leave(room_id1, user1_id, tok=user1_tok)
+
+ room_id_results = self.get_success(
+ self.sliding_sync_handler.get_sync_room_ids_for_user(
+ UserID.from_string(user1_id),
+ from_token=after_room1_token,
+ to_token=after_room1_token,
+ )
+ )
+
+ # Room shouldn't show up because it was left before the `from_token`
+ self.assertEqual(room_id_results, set())
+
+ def test_leave_before_range_and_join_after_to_token(self) -> None:
+ """
+ Old left room shouldn't show up. But we're also testing that joining after the
+ `to_token` doesn't mess with the results. See condition "1b)" comments in the
+ `get_sync_room_ids_for_user()` method.
+ """
+ user1_id = self.register_user("user1", "pass")
+ user1_tok = self.login(user1_id, "pass")
+ user2_id = self.register_user("user2", "pass")
+ user2_tok = self.login(user2_id, "pass")
+
+ # We create the room with user2 so the room isn't left with no members when we
+ # leave and can still re-join.
+ room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok, is_public=True)
+ # Join and leave the room before the from/to range
+ self.helper.join(room_id1, user1_id, tok=user1_tok)
+ self.helper.leave(room_id1, user1_id, tok=user1_tok)
+
+ after_room1_token = self.event_sources.get_current_token()
+
+ # Join the room after we already have our tokens
+ self.helper.join(room_id1, user1_id, tok=user1_tok)
+
+ room_id_results = self.get_success(
+ self.sliding_sync_handler.get_sync_room_ids_for_user(
+ UserID.from_string(user1_id),
+ from_token=after_room1_token,
+ to_token=after_room1_token,
+ )
+ )
+
+ # Room shouldn't show up because it was left before the `from_token`
+ self.assertEqual(room_id_results, set())
+
+ def test_join_leave_multiple_times_during_range_and_after_to_token(
+ self,
+ ) -> None:
+ """
+ Join and leave multiple times shouldn't affect rooms from showing up. It just
+ matters that we were joined or newly_left in the from/to range. But we're also
+ testing that joining and leaving after the `to_token` doesn't mess with the
+ results.
+ """
+ user1_id = self.register_user("user1", "pass")
+ user1_tok = self.login(user1_id, "pass")
+ user2_id = self.register_user("user2", "pass")
+ user2_tok = self.login(user2_id, "pass")
+
+ before_room1_token = self.event_sources.get_current_token()
+
+ # We create the room with user2 so the room isn't left with no members when we
+ # leave and can still re-join.
+ room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok, is_public=True)
+ # Join, leave, join back to the room before the from/to range
+ self.helper.join(room_id1, user1_id, tok=user1_tok)
+ self.helper.leave(room_id1, user1_id, tok=user1_tok)
+ self.helper.join(room_id1, user1_id, tok=user1_tok)
+
+ after_room1_token = self.event_sources.get_current_token()
+
+ # Leave and Join the room multiple times after we already have our tokens
+ self.helper.leave(room_id1, user1_id, tok=user1_tok)
+ self.helper.join(room_id1, user1_id, tok=user1_tok)
+ self.helper.leave(room_id1, user1_id, tok=user1_tok)
+
+ room_id_results = self.get_success(
+ self.sliding_sync_handler.get_sync_room_ids_for_user(
+ UserID.from_string(user1_id),
+ from_token=before_room1_token,
+ to_token=after_room1_token,
+ )
+ )
+
+ # Room should show up because it was newly_left and joined during the from/to range
+ self.assertEqual(room_id_results, {room_id1})
+
+ def test_join_leave_multiple_times_before_range_and_after_to_token(
+ self,
+ ) -> None:
+ """
+ Join and leave multiple times before the from/to range shouldn't affect rooms
+ from showing up. It just matters that we were joined or newly_left in the
+ from/to range. But we're also testing that joining and leaving after the
+ `to_token` doesn't mess with the results.
+ """
+ user1_id = self.register_user("user1", "pass")
+ user1_tok = self.login(user1_id, "pass")
+ user2_id = self.register_user("user2", "pass")
+ user2_tok = self.login(user2_id, "pass")
+
+ # We create the room with user2 so the room isn't left with no members when we
+ # leave and can still re-join.
+ room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok, is_public=True)
+ # Join, leave, join back to the room before the from/to range
+ self.helper.join(room_id1, user1_id, tok=user1_tok)
+ self.helper.leave(room_id1, user1_id, tok=user1_tok)
+ self.helper.join(room_id1, user1_id, tok=user1_tok)
+
+ after_room1_token = self.event_sources.get_current_token()
+
+ # Leave and Join the room multiple times after we already have our tokens
+ self.helper.leave(room_id1, user1_id, tok=user1_tok)
+ self.helper.join(room_id1, user1_id, tok=user1_tok)
+ self.helper.leave(room_id1, user1_id, tok=user1_tok)
+
+ room_id_results = self.get_success(
+ self.sliding_sync_handler.get_sync_room_ids_for_user(
+ UserID.from_string(user1_id),
+ from_token=after_room1_token,
+ to_token=after_room1_token,
+ )
+ )
+
+ # Room should show up because we were joined before the from/to range
+ self.assertEqual(room_id_results, {room_id1})
+
+ def test_invite_before_range_and_join_leave_after_to_token(
+ self,
+ ) -> None:
+ """
+ Make it look like we joined after the token range but we were invited before the
+ from/to range so the room should still show up. See condition "1a)" comments in
+ the `get_sync_room_ids_for_user()` method.
+ """
+ user1_id = self.register_user("user1", "pass")
+ user1_tok = self.login(user1_id, "pass")
+ user2_id = self.register_user("user2", "pass")
+ user2_tok = self.login(user2_id, "pass")
+
+ # We create the room with user2 so the room isn't left with no members when we
+ # leave and can still re-join.
+ room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok, is_public=True)
+
+ # Invited to the room before the token
+ self.helper.invite(room_id1, src=user2_id, targ=user1_id, tok=user2_tok)
+
+ after_room1_token = self.event_sources.get_current_token()
+
+ # Join and leave the room after we already have our tokens
+ self.helper.join(room_id1, user1_id, tok=user1_tok)
+ self.helper.leave(room_id1, user1_id, tok=user1_tok)
+
+ room_id_results = self.get_success(
+ self.sliding_sync_handler.get_sync_room_ids_for_user(
+ UserID.from_string(user1_id),
+ from_token=after_room1_token,
+ to_token=after_room1_token,
+ )
+ )
+
+ # Room should show up because we were invited before the from/to range
+ self.assertEqual(room_id_results, {room_id1})
+
+ def test_multiple_rooms_are_not_confused(
+ self,
+ ) -> None:
+ """
+ Test that multiple rooms are not confused as we fixup the list. This test is
+ spawning from a real world bug in the code where I was accidentally using
+ `event.room_id` in one of the fix-up loops but the `event` being referenced was
+ actually from a different loop.
+ """
+ user1_id = self.register_user("user1", "pass")
+ user1_tok = self.login(user1_id, "pass")
+ user2_id = self.register_user("user2", "pass")
+ user2_tok = self.login(user2_id, "pass")
+
+ # We create the room with user2 so the room isn't left with no members when we
+ # leave and can still re-join.
+ room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok, is_public=True)
+ room_id2 = self.helper.create_room_as(user2_id, tok=user2_tok, is_public=True)
+
+ # Invited and left the room before the token
+ self.helper.invite(room_id1, src=user2_id, targ=user1_id, tok=user2_tok)
+ self.helper.leave(room_id1, user1_id, tok=user1_tok)
+ # Invited to room2
+ self.helper.invite(room_id2, src=user2_id, targ=user1_id, tok=user2_tok)
+
+ before_room3_token = self.event_sources.get_current_token()
+
+ # Invited and left room3 during the from/to range
+ room_id3 = self.helper.create_room_as(user2_id, tok=user2_tok, is_public=True)
+ self.helper.invite(room_id3, src=user2_id, targ=user1_id, tok=user2_tok)
+ self.helper.leave(room_id3, user1_id, tok=user1_tok)
+
+ after_room3_token = self.event_sources.get_current_token()
+
+ # Join and leave the room after we already have our tokens
+ self.helper.join(room_id1, user1_id, tok=user1_tok)
+ self.helper.leave(room_id1, user1_id, tok=user1_tok)
+ # Leave room2
+ self.helper.leave(room_id2, user1_id, tok=user1_tok)
+ # Leave room3
+ self.helper.leave(room_id3, user1_id, tok=user1_tok)
+
+ room_id_results = self.get_success(
+ self.sliding_sync_handler.get_sync_room_ids_for_user(
+ UserID.from_string(user1_id),
+ from_token=before_room3_token,
+ to_token=after_room3_token,
+ )
+ )
+
+ self.assertEqual(
+ room_id_results,
+ {
+ # `room_id1` shouldn't show up because we left before the from/to range
+ #
+ # Room should show up because we were invited before the from/to range
+ room_id2,
+ # Room should show up because it was newly_left during the from/to range
+ room_id3,
+ },
+ )
+
+
+class GetSyncRoomIdsForUserEventShardTestCase(BaseMultiWorkerStreamTestCase):
+ """
+ Tests Sliding Sync handler `get_sync_room_ids_for_user()` to make sure it works with
+ sharded event stream_writers enabled
+ """
+
+ servlets = [
+ admin.register_servlets_for_client_rest_resource,
+ room.register_servlets,
+ login.register_servlets,
+ ]
+
+ def default_config(self) -> dict:
+ config = super().default_config()
+ # Enable sliding sync
+ config["experimental_features"] = {"msc3575_enabled": True}
+
+ # Enable shared event stream_writers
+ config["stream_writers"] = {"events": ["worker1", "worker2", "worker3"]}
+ config["instance_map"] = {
+ "main": {"host": "testserv", "port": 8765},
+ "worker1": {"host": "testserv", "port": 1001},
+ "worker2": {"host": "testserv", "port": 1002},
+ "worker3": {"host": "testserv", "port": 1003},
+ }
+ return config
+
+ def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
+ self.sliding_sync_handler = self.hs.get_sliding_sync_handler()
+ self.store = self.hs.get_datastores().main
+ self.event_sources = hs.get_event_sources()
+
+ def _create_room(self, room_id: str, user_id: str, tok: str) -> None:
+ """
+ Create a room with a specific room_id. We use this so that that we have a
+ consistent room_id across test runs that hashes to the same value and will be
+ sharded to a known worker in the tests.
+ """
+
+ # We control the room ID generation by patching out the
+ # `_generate_room_id` method
+ with patch(
+ "synapse.handlers.room.RoomCreationHandler._generate_room_id"
+ ) as mock:
+ mock.side_effect = lambda: room_id
+ self.helper.create_room_as(user_id, tok=tok)
+
+ def test_sharded_event_persisters(self) -> None:
+ """
+ This test should catch bugs that would come from flawed stream position
+ (`stream_ordering`) comparisons or making `RoomStreamToken`'s naively. To
+ compare event positions properly, you need to consider both the `instance_name`
+ and `stream_ordering` together.
+
+ The test creates three event persister workers and a room that is sharded to
+ each worker. On worker2, we make the event stream position stuck so that it lags
+ behind the other workers and we start getting `RoomStreamToken` that have an
+ `instance_map` component (i.e. q`m{min_pos}~{writer1}.{pos1}~{writer2}.{pos2}`).
+
+ We then send some events to advance the stream positions of worker1 and worker3
+ but worker2 is lagging behind because it's stuck. We are specifically testing
+ that `get_sync_room_ids_for_user(from_token=xxx, to_token=xxx)` should work
+ correctly in these adverse conditions.
+ """
+ user1_id = self.register_user("user1", "pass")
+ user1_tok = self.login(user1_id, "pass")
+ user2_id = self.register_user("user2", "pass")
+ user2_tok = self.login(user2_id, "pass")
+
+ self.make_worker_hs(
+ "synapse.app.generic_worker",
+ {"worker_name": "worker1"},
+ )
+
+ worker_hs2 = self.make_worker_hs(
+ "synapse.app.generic_worker",
+ {"worker_name": "worker2"},
+ )
+
+ self.make_worker_hs(
+ "synapse.app.generic_worker",
+ {"worker_name": "worker3"},
+ )
+
+ # Specially crafted room IDs that get persisted on different workers.
+ #
+ # Sharded to worker1
+ room_id1 = "!fooo:test"
+ # Sharded to worker2
+ room_id2 = "!bar:test"
+ # Sharded to worker3
+ room_id3 = "!quux:test"
+
+ # Create rooms on the different workers.
+ self._create_room(room_id1, user2_id, user2_tok)
+ self._create_room(room_id2, user2_id, user2_tok)
+ self._create_room(room_id3, user2_id, user2_tok)
+ join_response1 = self.helper.join(room_id1, user1_id, tok=user1_tok)
+ join_response2 = self.helper.join(room_id2, user1_id, tok=user1_tok)
+ # Leave room2
+ self.helper.leave(room_id2, user1_id, tok=user1_tok)
+ join_response3 = self.helper.join(room_id3, user1_id, tok=user1_tok)
+ # Leave room3
+ self.helper.leave(room_id3, user1_id, tok=user1_tok)
+
+ # Ensure that the events were sharded to different workers.
+ pos1 = self.get_success(
+ self.store.get_position_for_event(join_response1["event_id"])
+ )
+ self.assertEqual(pos1.instance_name, "worker1")
+ pos2 = self.get_success(
+ self.store.get_position_for_event(join_response2["event_id"])
+ )
+ self.assertEqual(pos2.instance_name, "worker2")
+ pos3 = self.get_success(
+ self.store.get_position_for_event(join_response3["event_id"])
+ )
+ self.assertEqual(pos3.instance_name, "worker3")
+
+ before_stuck_activity_token = self.event_sources.get_current_token()
+
+ # We now gut wrench into the events stream `MultiWriterIdGenerator` on worker2 to
+ # mimic it getting stuck persisting an event. This ensures that when we send an
+ # event on worker1/worker3 we end up in a state where worker2 events stream
+ # position lags that on worker1/worker3, resulting in a RoomStreamToken with a
+ # non-empty `instance_map` component.
+ #
+ # Worker2's event stream position will not advance until we call `__aexit__`
+ # again.
+ worker_store2 = worker_hs2.get_datastores().main
+ assert isinstance(worker_store2._stream_id_gen, MultiWriterIdGenerator)
+ actx = worker_store2._stream_id_gen.get_next()
+ self.get_success(actx.__aenter__())
+
+ # For room_id1/worker1: leave and join the room to advance the stream position
+ # and generate membership changes.
+ self.helper.leave(room_id1, user1_id, tok=user1_tok)
+ self.helper.join(room_id1, user1_id, tok=user1_tok)
+ # For room_id2/worker2: which is currently stuck, join the room.
+ join_on_worker2_response = self.helper.join(room_id2, user1_id, tok=user1_tok)
+ # For room_id3/worker3: leave and join the room to advance the stream position
+ # and generate membership changes.
+ self.helper.leave(room_id3, user1_id, tok=user1_tok)
+ join_on_worker3_response = self.helper.join(room_id3, user1_id, tok=user1_tok)
+
+ # Get a token while things are stuck after our activity
+ stuck_activity_token = self.event_sources.get_current_token()
+ logger.info("stuck_activity_token %s", stuck_activity_token)
+ # Let's make sure we're working with a token that has an `instance_map`
+ self.assertNotEqual(len(stuck_activity_token.room_key.instance_map), 0)
+
+ # Just double check that the join event on worker2 (that is stuck) happened
+ # after the position recorded for worker2 in the token but before the max
+ # position in the token. This is crucial for the behavior we're trying to test.
+ join_on_worker2_pos = self.get_success(
+ self.store.get_position_for_event(join_on_worker2_response["event_id"])
+ )
+ logger.info("join_on_worker2_pos %s", join_on_worker2_pos)
+ # Ensure the join technially came after our token
+ self.assertGreater(
+ join_on_worker2_pos.stream,
+ stuck_activity_token.room_key.get_stream_pos_for_instance("worker2"),
+ )
+ # But less than the max stream position of some other worker
+ self.assertLess(
+ join_on_worker2_pos.stream,
+ # max
+ stuck_activity_token.room_key.get_max_stream_pos(),
+ )
+
+ # Just double check that the join event on worker3 happened after the min stream
+ # value in the token but still within the position recorded for worker3. This is
+ # crucial for the behavior we're trying to test.
+ join_on_worker3_pos = self.get_success(
+ self.store.get_position_for_event(join_on_worker3_response["event_id"])
+ )
+ logger.info("join_on_worker3_pos %s", join_on_worker3_pos)
+ # Ensure the join came after the min but still encapsulated by the token
+ self.assertGreaterEqual(
+ join_on_worker3_pos.stream,
+ # min
+ stuck_activity_token.room_key.stream,
+ )
+ self.assertLessEqual(
+ join_on_worker3_pos.stream,
+ stuck_activity_token.room_key.get_stream_pos_for_instance("worker3"),
+ )
+
+ # We finish the fake persisting an event we started above and advance worker2's
+ # event stream position (unstuck worker2).
+ self.get_success(actx.__aexit__(None, None, None))
+
+ # The function under test
+ room_id_results = self.get_success(
+ self.sliding_sync_handler.get_sync_room_ids_for_user(
+ UserID.from_string(user1_id),
+ from_token=before_stuck_activity_token,
+ to_token=stuck_activity_token,
+ )
+ )
+
+ self.assertEqual(
+ room_id_results,
+ {
+ room_id1,
+ # room_id2 shouldn't show up because we left before the from/to range
+ # and the join event during the range happened while worker2 was stuck.
+ # This means that from the perspective of the master, where the
+ # `stuck_activity_token` is generated, the stream position for worker2
+ # wasn't advanced to the join yet. Looking at the `instance_map`, the
+ # join technically comes after `stuck_activity_token``.
+ #
+ # room_id2,
+ room_id3,
+ },
+ )
diff --git a/tests/rest/client/test_sync.py b/tests/rest/client/test_sync.py
index daeb1d3ddd..a20a3fb40d 100644
--- a/tests/rest/client/test_sync.py
+++ b/tests/rest/client/test_sync.py
@@ -34,7 +34,7 @@ from synapse.api.constants import (
)
from synapse.rest.client import devices, knock, login, read_marker, receipts, room, sync
from synapse.server import HomeServer
-from synapse.types import JsonDict
+from synapse.types import JsonDict, RoomStreamToken, StreamKeyType
from synapse.util import Clock
from tests import unittest
@@ -1204,3 +1204,135 @@ class ExcludeRoomTestCase(unittest.HomeserverTestCase):
self.assertNotIn(self.excluded_room_id, channel.json_body["rooms"]["join"])
self.assertIn(self.included_room_id, channel.json_body["rooms"]["join"])
+
+
+class SlidingSyncTestCase(unittest.HomeserverTestCase):
+ """
+ Tests regarding MSC3575 Sliding Sync `/sync` endpoint.
+ """
+
+ servlets = [
+ synapse.rest.admin.register_servlets,
+ login.register_servlets,
+ room.register_servlets,
+ sync.register_servlets,
+ devices.register_servlets,
+ ]
+
+ def default_config(self) -> JsonDict:
+ config = super().default_config()
+ # Enable sliding sync
+ config["experimental_features"] = {"msc3575_enabled": True}
+ return config
+
+ def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
+ self.sync_endpoint = "/_matrix/client/unstable/org.matrix.msc3575/sync"
+ self.store = hs.get_datastores().main
+ self.event_sources = hs.get_event_sources()
+
+ def test_sync_list(self) -> None:
+ """
+ Test that room IDs show up in the Sliding Sync lists
+ """
+ alice_user_id = self.register_user("alice", "correcthorse")
+ alice_access_token = self.login(alice_user_id, "correcthorse")
+
+ room_id = self.helper.create_room_as(
+ alice_user_id, tok=alice_access_token, is_public=True
+ )
+
+ # Make the Sliding Sync request
+ channel = self.make_request(
+ "POST",
+ self.sync_endpoint,
+ {
+ "lists": {
+ "foo-list": {
+ "ranges": [[0, 99]],
+ "sort": ["by_notification_level", "by_recency", "by_name"],
+ "required_state": [
+ ["m.room.join_rules", ""],
+ ["m.room.history_visibility", ""],
+ ["m.space.child", "*"],
+ ],
+ "timeline_limit": 1,
+ }
+ }
+ },
+ access_token=alice_access_token,
+ )
+ self.assertEqual(channel.code, 200, channel.json_body)
+
+ # Make sure it has the foo-list we requested
+ self.assertListEqual(
+ list(channel.json_body["lists"].keys()),
+ ["foo-list"],
+ channel.json_body["lists"].keys(),
+ )
+
+ # Make sure the list includes the room we are joined to
+ self.assertListEqual(
+ list(channel.json_body["lists"]["foo-list"]["ops"]),
+ [
+ {
+ "op": "SYNC",
+ "range": [0, 99],
+ "room_ids": [room_id],
+ }
+ ],
+ channel.json_body["lists"]["foo-list"],
+ )
+
+ def test_wait_for_sync_token(self) -> None:
+ """
+ Test that worker will wait until it catches up to the given token
+ """
+ alice_user_id = self.register_user("alice", "correcthorse")
+ alice_access_token = self.login(alice_user_id, "correcthorse")
+
+ # Create a future token that will cause us to wait. Since we never send a new
+ # event to reach that future stream_ordering, the worker will wait until the
+ # full timeout.
+ current_token = self.event_sources.get_current_token()
+ future_position_token = current_token.copy_and_replace(
+ StreamKeyType.ROOM,
+ RoomStreamToken(stream=current_token.room_key.stream + 1),
+ )
+
+ future_position_token_serialized = self.get_success(
+ future_position_token.to_string(self.store)
+ )
+
+ # Make the Sliding Sync request
+ channel = self.make_request(
+ "POST",
+ self.sync_endpoint + f"?pos={future_position_token_serialized}",
+ {
+ "lists": {
+ "foo-list": {
+ "ranges": [[0, 99]],
+ "sort": ["by_notification_level", "by_recency", "by_name"],
+ "required_state": [
+ ["m.room.join_rules", ""],
+ ["m.room.history_visibility", ""],
+ ["m.space.child", "*"],
+ ],
+ "timeline_limit": 1,
+ }
+ }
+ },
+ access_token=alice_access_token,
+ await_result=False,
+ )
+ # Block for 10 seconds to make `notifier.wait_for_stream_token(from_token)`
+ # timeout
+ with self.assertRaises(TimedOutException):
+ channel.await_result(timeout_ms=9900)
+ channel.await_result(timeout_ms=200)
+ self.assertEqual(channel.code, 200, channel.json_body)
+
+ # We expect the `next_pos` in the result to be the same as what we requested
+ # with because we weren't able to find anything new yet.
+ self.assertEqual(
+ channel.json_body["next_pos"], future_position_token_serialized
+ )
diff --git a/tests/rest/client/utils.py b/tests/rest/client/utils.py
index 7362bde7ab..f0ba40a1f1 100644
--- a/tests/rest/client/utils.py
+++ b/tests/rest/client/utils.py
@@ -330,9 +330,12 @@ class RestHelper:
data,
)
- assert channel.code == expect_code, "Expected: %d, got: %d, resp: %r" % (
+ assert (
+ channel.code == expect_code
+ ), "Expected: %d, got: %d, PUT %s -> resp: %r" % (
expect_code,
channel.code,
+ path,
channel.result["body"],
)
From 17d6c2828557cb39f24bfbf2816b45fe1990122d Mon Sep 17 00:00:00 2001
From: reivilibre
Date: Fri, 7 Jun 2024 12:01:21 +0100
Subject: [PATCH 152/503] Add debug logging for when room keys are uploaded,
including whether they are replacing other room keys. (#17266)
Fixes: #17013
Add logging for whether room keys are replaced
This is motivated by the Crypto team who need to diagnose crypto issues.
The existing opentracing logging is not enough because it is not enabled
for all users.
---
changelog.d/17266.misc | 1 +
synapse/handlers/e2e_room_keys.py | 18 ++++++++++++++++++
2 files changed, 19 insertions(+)
create mode 100644 changelog.d/17266.misc
diff --git a/changelog.d/17266.misc b/changelog.d/17266.misc
new file mode 100644
index 0000000000..ce8c4ab086
--- /dev/null
+++ b/changelog.d/17266.misc
@@ -0,0 +1 @@
+Add debug logging for when room keys are uploaded, including whether they are replacing other room keys.
\ No newline at end of file
diff --git a/synapse/handlers/e2e_room_keys.py b/synapse/handlers/e2e_room_keys.py
index e76a51ba30..99f9f6e64a 100644
--- a/synapse/handlers/e2e_room_keys.py
+++ b/synapse/handlers/e2e_room_keys.py
@@ -247,6 +247,12 @@ class E2eRoomKeysHandler:
if current_room_key:
if self._should_replace_room_key(current_room_key, room_key):
log_kv({"message": "Replacing room key."})
+ logger.debug(
+ "Replacing room key. room=%s session=%s user=%s",
+ room_id,
+ session_id,
+ user_id,
+ )
# updates are done one at a time in the DB, so send
# updates right away rather than batching them up,
# like we do with the inserts
@@ -256,6 +262,12 @@ class E2eRoomKeysHandler:
changed = True
else:
log_kv({"message": "Not replacing room_key."})
+ logger.debug(
+ "Not replacing room key. room=%s session=%s user=%s",
+ room_id,
+ session_id,
+ user_id,
+ )
else:
log_kv(
{
@@ -265,6 +277,12 @@ class E2eRoomKeysHandler:
}
)
log_kv({"message": "Replacing room key."})
+ logger.debug(
+ "Inserting new room key. room=%s session=%s user=%s",
+ room_id,
+ session_id,
+ user_id,
+ )
to_insert.append((room_id, session_id, room_key))
changed = True
From ab94bce02cc6c268d8c3b693cbbbacd8ef926481 Mon Sep 17 00:00:00 2001
From: Shay
Date: Fri, 7 Jun 2024 05:54:28 -0700
Subject: [PATCH 153/503] Support MSC3916 by adding a federation `/download`
endpoint (#17172)
---
changelog.d/17172.feature | 2 +
.../federation/transport/server/__init__.py | 24 ++
synapse/federation/transport/server/_base.py | 24 +-
.../federation/transport/server/federation.py | 41 +++
synapse/media/_base.py | 63 ++++-
synapse/media/media_repository.py | 18 +-
synapse/media/media_storage.py | 223 ++++++++++++++++-
synapse/media/storage_provider.py | 40 ++-
tests/federation/test_federation_media.py | 234 ++++++++++++++++++
tests/media/test_media_storage.py | 14 +-
10 files changed, 659 insertions(+), 24 deletions(-)
create mode 100644 changelog.d/17172.feature
create mode 100644 tests/federation/test_federation_media.py
diff --git a/changelog.d/17172.feature b/changelog.d/17172.feature
new file mode 100644
index 0000000000..245dea815c
--- /dev/null
+++ b/changelog.d/17172.feature
@@ -0,0 +1,2 @@
+Support [MSC3916](https://github.com/matrix-org/matrix-spec-proposals/blob/rav/authentication-for-media/proposals/3916-authentication-for-media.md)
+by adding a federation /download endpoint (#17172).
\ No newline at end of file
diff --git a/synapse/federation/transport/server/__init__.py b/synapse/federation/transport/server/__init__.py
index bac569e977..266675c9b8 100644
--- a/synapse/federation/transport/server/__init__.py
+++ b/synapse/federation/transport/server/__init__.py
@@ -19,6 +19,7 @@
# [This file includes modifications made by New Vector Limited]
#
#
+import inspect
import logging
from typing import TYPE_CHECKING, Dict, Iterable, List, Optional, Tuple, Type
@@ -33,6 +34,7 @@ from synapse.federation.transport.server.federation import (
FEDERATION_SERVLET_CLASSES,
FederationAccountStatusServlet,
FederationUnstableClientKeysClaimServlet,
+ FederationUnstableMediaDownloadServlet,
)
from synapse.http.server import HttpServer, JsonResource
from synapse.http.servlet import (
@@ -315,6 +317,28 @@ def register_servlets(
):
continue
+ if servletclass == FederationUnstableMediaDownloadServlet:
+ if (
+ not hs.config.server.enable_media_repo
+ or not hs.config.experimental.msc3916_authenticated_media_enabled
+ ):
+ continue
+
+ # don't load the endpoint if the storage provider is incompatible
+ media_repo = hs.get_media_repository()
+ load_download_endpoint = True
+ for provider in media_repo.media_storage.storage_providers:
+ signature = inspect.signature(provider.backend.fetch)
+ if "federation" not in signature.parameters:
+ logger.warning(
+ f"Federation media `/download` endpoint will not be enabled as storage provider {provider.backend} is not compatible with this endpoint."
+ )
+ load_download_endpoint = False
+ break
+
+ if not load_download_endpoint:
+ continue
+
servletclass(
hs=hs,
authenticator=authenticator,
diff --git a/synapse/federation/transport/server/_base.py b/synapse/federation/transport/server/_base.py
index db0f5076a9..4e2717b565 100644
--- a/synapse/federation/transport/server/_base.py
+++ b/synapse/federation/transport/server/_base.py
@@ -360,13 +360,29 @@ class BaseFederationServlet:
"request"
)
return None
+ if (
+ func.__self__.__class__.__name__ # type: ignore
+ == "FederationUnstableMediaDownloadServlet"
+ ):
+ response = await func(
+ origin, content, request, *args, **kwargs
+ )
+ else:
+ response = await func(
+ origin, content, request.args, *args, **kwargs
+ )
+ else:
+ if (
+ func.__self__.__class__.__name__ # type: ignore
+ == "FederationUnstableMediaDownloadServlet"
+ ):
+ response = await func(
+ origin, content, request, *args, **kwargs
+ )
+ else:
response = await func(
origin, content, request.args, *args, **kwargs
)
- else:
- response = await func(
- origin, content, request.args, *args, **kwargs
- )
finally:
# if we used the origin's context as the parent, add a new span using
# the servlet span as a parent, so that we have a link
diff --git a/synapse/federation/transport/server/federation.py b/synapse/federation/transport/server/federation.py
index a59734785f..1f02451efa 100644
--- a/synapse/federation/transport/server/federation.py
+++ b/synapse/federation/transport/server/federation.py
@@ -44,10 +44,13 @@ from synapse.federation.transport.server._base import (
)
from synapse.http.servlet import (
parse_boolean_from_args,
+ parse_integer,
parse_integer_from_args,
parse_string_from_args,
parse_strings_from_args,
)
+from synapse.http.site import SynapseRequest
+from synapse.media._base import DEFAULT_MAX_TIMEOUT_MS, MAXIMUM_ALLOWED_MAX_TIMEOUT_MS
from synapse.types import JsonDict
from synapse.util import SYNAPSE_VERSION
from synapse.util.ratelimitutils import FederationRateLimiter
@@ -787,6 +790,43 @@ class FederationAccountStatusServlet(BaseFederationServerServlet):
return 200, {"account_statuses": statuses, "failures": failures}
+class FederationUnstableMediaDownloadServlet(BaseFederationServerServlet):
+ """
+ Implementation of new federation media `/download` endpoint outlined in MSC3916. Returns
+ a multipart/form-data response consisting of a JSON object and the requested media
+ item. This endpoint only returns local media.
+ """
+
+ PATH = "/media/download/(?P[^/]*)"
+ PREFIX = FEDERATION_UNSTABLE_PREFIX + "/org.matrix.msc3916"
+ RATELIMIT = True
+
+ def __init__(
+ self,
+ hs: "HomeServer",
+ ratelimiter: FederationRateLimiter,
+ authenticator: Authenticator,
+ server_name: str,
+ ):
+ super().__init__(hs, authenticator, ratelimiter, server_name)
+ self.media_repo = self.hs.get_media_repository()
+
+ async def on_GET(
+ self,
+ origin: Optional[str],
+ content: Literal[None],
+ request: SynapseRequest,
+ media_id: str,
+ ) -> None:
+ max_timeout_ms = parse_integer(
+ request, "timeout_ms", default=DEFAULT_MAX_TIMEOUT_MS
+ )
+ max_timeout_ms = min(max_timeout_ms, MAXIMUM_ALLOWED_MAX_TIMEOUT_MS)
+ await self.media_repo.get_local_media(
+ request, media_id, None, max_timeout_ms, federation=True
+ )
+
+
FEDERATION_SERVLET_CLASSES: Tuple[Type[BaseFederationServlet], ...] = (
FederationSendServlet,
FederationEventServlet,
@@ -818,4 +858,5 @@ FEDERATION_SERVLET_CLASSES: Tuple[Type[BaseFederationServlet], ...] = (
FederationV1SendKnockServlet,
FederationMakeKnockServlet,
FederationAccountStatusServlet,
+ FederationUnstableMediaDownloadServlet,
)
diff --git a/synapse/media/_base.py b/synapse/media/_base.py
index 3fbed6062f..19bca94170 100644
--- a/synapse/media/_base.py
+++ b/synapse/media/_base.py
@@ -25,7 +25,16 @@ import os
import urllib
from abc import ABC, abstractmethod
from types import TracebackType
-from typing import Awaitable, Dict, Generator, List, Optional, Tuple, Type
+from typing import (
+ TYPE_CHECKING,
+ Awaitable,
+ Dict,
+ Generator,
+ List,
+ Optional,
+ Tuple,
+ Type,
+)
import attr
@@ -39,6 +48,11 @@ from synapse.http.site import SynapseRequest
from synapse.logging.context import make_deferred_yieldable
from synapse.util.stringutils import is_ascii
+if TYPE_CHECKING:
+ from synapse.media.media_storage import MultipartResponder
+ from synapse.storage.databases.main.media_repository import LocalMedia
+
+
logger = logging.getLogger(__name__)
# list all text content types that will have the charset default to UTF-8 when
@@ -260,6 +274,53 @@ def _can_encode_filename_as_token(x: str) -> bool:
return True
+async def respond_with_multipart_responder(
+ request: SynapseRequest,
+ responder: "Optional[MultipartResponder]",
+ media_info: "LocalMedia",
+) -> None:
+ """
+ Responds via a Multipart responder for the federation media `/download` requests
+
+ Args:
+ request: the federation request to respond to
+ responder: the Multipart responder which will send the response
+ media_info: metadata about the media item
+ """
+ if not responder:
+ respond_404(request)
+ return
+
+ # If we have a responder we *must* use it as a context manager.
+ with responder:
+ if request._disconnected:
+ logger.warning(
+ "Not sending response to request %s, already disconnected.", request
+ )
+ return
+
+ logger.debug("Responding to media request with responder %s", responder)
+ if media_info.media_length is not None:
+ request.setHeader(b"Content-Length", b"%d" % (media_info.media_length,))
+ request.setHeader(
+ b"Content-Type", b"multipart/mixed; boundary=%s" % responder.boundary
+ )
+
+ try:
+ await responder.write_to_consumer(request)
+ except Exception as e:
+ # The majority of the time this will be due to the client having gone
+ # away. Unfortunately, Twisted simply throws a generic exception at us
+ # in that case.
+ logger.warning("Failed to write to consumer: %s %s", type(e), e)
+
+ # Unregister the producer, if it has one, so Twisted doesn't complain
+ if request.producer:
+ request.unregisterProducer()
+
+ finish_request(request)
+
+
async def respond_with_responder(
request: SynapseRequest,
responder: "Optional[Responder]",
diff --git a/synapse/media/media_repository.py b/synapse/media/media_repository.py
index 6ed56099ca..c335e518a0 100644
--- a/synapse/media/media_repository.py
+++ b/synapse/media/media_repository.py
@@ -54,10 +54,11 @@ from synapse.media._base import (
ThumbnailInfo,
get_filename_from_headers,
respond_404,
+ respond_with_multipart_responder,
respond_with_responder,
)
from synapse.media.filepath import MediaFilePaths
-from synapse.media.media_storage import MediaStorage
+from synapse.media.media_storage import MediaStorage, MultipartResponder
from synapse.media.storage_provider import StorageProviderWrapper
from synapse.media.thumbnailer import Thumbnailer, ThumbnailError
from synapse.media.url_previewer import UrlPreviewer
@@ -429,6 +430,7 @@ class MediaRepository:
media_id: str,
name: Optional[str],
max_timeout_ms: int,
+ federation: bool = False,
) -> None:
"""Responds to requests for local media, if exists, or returns 404.
@@ -440,6 +442,7 @@ class MediaRepository:
the filename in the Content-Disposition header of the response.
max_timeout_ms: the maximum number of milliseconds to wait for the
media to be uploaded.
+ federation: whether the local media being fetched is for a federation request
Returns:
Resolves once a response has successfully been written to request
@@ -459,10 +462,17 @@ class MediaRepository:
file_info = FileInfo(None, media_id, url_cache=bool(url_cache))
- responder = await self.media_storage.fetch_media(file_info)
- await respond_with_responder(
- request, responder, media_type, media_length, upload_name
+ responder = await self.media_storage.fetch_media(
+ file_info, media_info, federation
)
+ if federation:
+ # this really should be a Multipart responder but just in case
+ assert isinstance(responder, MultipartResponder)
+ await respond_with_multipart_responder(request, responder, media_info)
+ else:
+ await respond_with_responder(
+ request, responder, media_type, media_length, upload_name
+ )
async def get_remote_media(
self,
diff --git a/synapse/media/media_storage.py b/synapse/media/media_storage.py
index b3cd3fd8f4..2f55d12b6b 100644
--- a/synapse/media/media_storage.py
+++ b/synapse/media/media_storage.py
@@ -19,9 +19,12 @@
#
#
import contextlib
+import json
import logging
import os
import shutil
+from contextlib import closing
+from io import BytesIO
from types import TracebackType
from typing import (
IO,
@@ -30,14 +33,19 @@ from typing import (
AsyncIterator,
BinaryIO,
Callable,
+ List,
Optional,
Sequence,
Tuple,
Type,
+ Union,
)
+from uuid import uuid4
import attr
+from zope.interface import implementer
+from twisted.internet import defer, interfaces
from twisted.internet.defer import Deferred
from twisted.internet.interfaces import IConsumer
from twisted.protocols.basic import FileSender
@@ -48,15 +56,19 @@ from synapse.logging.opentracing import start_active_span, trace, trace_with_opn
from synapse.util import Clock
from synapse.util.file_consumer import BackgroundFileConsumer
+from ..storage.databases.main.media_repository import LocalMedia
+from ..types import JsonDict
from ._base import FileInfo, Responder
from .filepath import MediaFilePaths
if TYPE_CHECKING:
- from synapse.media.storage_provider import StorageProvider
+ from synapse.media.storage_provider import StorageProviderWrapper
from synapse.server import HomeServer
logger = logging.getLogger(__name__)
+CRLF = b"\r\n"
+
class MediaStorage:
"""Responsible for storing/fetching files from local sources.
@@ -73,7 +85,7 @@ class MediaStorage:
hs: "HomeServer",
local_media_directory: str,
filepaths: MediaFilePaths,
- storage_providers: Sequence["StorageProvider"],
+ storage_providers: Sequence["StorageProviderWrapper"],
):
self.hs = hs
self.reactor = hs.get_reactor()
@@ -169,15 +181,23 @@ class MediaStorage:
raise e from None
- async def fetch_media(self, file_info: FileInfo) -> Optional[Responder]:
+ async def fetch_media(
+ self,
+ file_info: FileInfo,
+ media_info: Optional[LocalMedia] = None,
+ federation: bool = False,
+ ) -> Optional[Responder]:
"""Attempts to fetch media described by file_info from the local cache
and configured storage providers.
Args:
- file_info
+ file_info: Metadata about the media file
+ media_info: Metadata about the media item
+ federation: Whether this file is being fetched for a federation request
Returns:
- Returns a Responder if the file was found, otherwise None.
+ If the file was found returns a Responder (a Multipart Responder if the requested
+ file is for the federation /download endpoint), otherwise None.
"""
paths = [self._file_info_to_path(file_info)]
@@ -197,12 +217,19 @@ class MediaStorage:
local_path = os.path.join(self.local_media_directory, path)
if os.path.exists(local_path):
logger.debug("responding with local file %s", local_path)
- return FileResponder(open(local_path, "rb"))
+ if federation:
+ assert media_info is not None
+ boundary = uuid4().hex.encode("ascii")
+ return MultipartResponder(
+ open(local_path, "rb"), media_info, boundary
+ )
+ else:
+ return FileResponder(open(local_path, "rb"))
logger.debug("local file %s did not exist", local_path)
for provider in self.storage_providers:
for path in paths:
- res: Any = await provider.fetch(path, file_info)
+ res: Any = await provider.fetch(path, file_info, media_info, federation)
if res:
logger.debug("Streaming %s from %s", path, provider)
return res
@@ -316,7 +343,7 @@ class FileResponder(Responder):
"""Wraps an open file that can be sent to a request.
Args:
- open_file: A file like object to be streamed ot the client,
+ open_file: A file like object to be streamed to the client,
is closed when finished streaming.
"""
@@ -337,6 +364,38 @@ class FileResponder(Responder):
self.open_file.close()
+class MultipartResponder(Responder):
+ """Wraps an open file, formats the response according to MSC3916 and sends it to a
+ federation request.
+
+ Args:
+ open_file: A file like object to be streamed to the client,
+ is closed when finished streaming.
+ media_info: metadata about the media item
+ boundary: bytes to use for the multipart response boundary
+ """
+
+ def __init__(self, open_file: IO, media_info: LocalMedia, boundary: bytes) -> None:
+ self.open_file = open_file
+ self.media_info = media_info
+ self.boundary = boundary
+
+ def write_to_consumer(self, consumer: IConsumer) -> Deferred:
+ return make_deferred_yieldable(
+ MultipartFileSender().beginFileTransfer(
+ self.open_file, consumer, self.media_info.media_type, {}, self.boundary
+ )
+ )
+
+ def __exit__(
+ self,
+ exc_type: Optional[Type[BaseException]],
+ exc_val: Optional[BaseException],
+ exc_tb: Optional[TracebackType],
+ ) -> None:
+ self.open_file.close()
+
+
class SpamMediaException(NotFoundError):
"""The media was blocked by a spam checker, so we simply 404 the request (in
the same way as if it was quarantined).
@@ -370,3 +429,151 @@ class ReadableFileWrapper:
# We yield to the reactor by sleeping for 0 seconds.
await self.clock.sleep(0)
+
+
+@implementer(interfaces.IProducer)
+class MultipartFileSender:
+ """
+ A producer that sends the contents of a file to a federation request in the format
+ outlined in MSC3916 - a multipart/format-data response where the first field is a
+ JSON object and the second is the requested file.
+
+ This is a slight re-writing of twisted.protocols.basic.FileSender to achieve the format
+ outlined above.
+ """
+
+ CHUNK_SIZE = 2**14
+
+ lastSent = ""
+ deferred: Optional[defer.Deferred] = None
+
+ def beginFileTransfer(
+ self,
+ file: IO,
+ consumer: IConsumer,
+ file_content_type: str,
+ json_object: JsonDict,
+ boundary: bytes,
+ ) -> Deferred:
+ """
+ Begin transferring a file
+
+ Args:
+ file: The file object to read data from
+ consumer: The synapse request to write the data to
+ file_content_type: The content-type of the file
+ json_object: The JSON object to write to the first field of the response
+ boundary: bytes to be used as the multipart/form-data boundary
+
+ Returns: A deferred whose callback will be invoked when the file has
+ been completely written to the consumer. The last byte written to the
+ consumer is passed to the callback.
+ """
+ self.file: Optional[IO] = file
+ self.consumer = consumer
+ self.json_field = json_object
+ self.json_field_written = False
+ self.content_type_written = False
+ self.file_content_type = file_content_type
+ self.boundary = boundary
+ self.deferred: Deferred = defer.Deferred()
+ self.consumer.registerProducer(self, False)
+ # while it's not entirely clear why this assignment is necessary, it mirrors
+ # the behavior in FileSender.beginFileTransfer and thus is preserved here
+ deferred = self.deferred
+ return deferred
+
+ def resumeProducing(self) -> None:
+ # write the first field, which will always be a json field
+ if not self.json_field_written:
+ self.consumer.write(CRLF + b"--" + self.boundary + CRLF)
+
+ content_type = Header(b"Content-Type", b"application/json")
+ self.consumer.write(bytes(content_type) + CRLF)
+
+ json_field = json.dumps(self.json_field)
+ json_bytes = json_field.encode("utf-8")
+ self.consumer.write(json_bytes)
+ self.consumer.write(CRLF + b"--" + self.boundary + CRLF)
+
+ self.json_field_written = True
+
+ chunk: Any = ""
+ if self.file:
+ # if we haven't written the content type yet, do so
+ if not self.content_type_written:
+ type = self.file_content_type.encode("utf-8")
+ content_type = Header(b"Content-Type", type)
+ self.consumer.write(bytes(content_type) + CRLF)
+ self.content_type_written = True
+
+ chunk = self.file.read(self.CHUNK_SIZE)
+
+ if not chunk:
+ # we've reached the end of the file
+ self.consumer.write(CRLF + b"--" + self.boundary + b"--" + CRLF)
+ self.file = None
+ self.consumer.unregisterProducer()
+
+ if self.deferred:
+ self.deferred.callback(self.lastSent)
+ self.deferred = None
+ return
+
+ self.consumer.write(chunk)
+ self.lastSent = chunk[-1:]
+
+ def pauseProducing(self) -> None:
+ pass
+
+ def stopProducing(self) -> None:
+ if self.deferred:
+ self.deferred.errback(Exception("Consumer asked us to stop producing"))
+ self.deferred = None
+
+
+class Header:
+ """
+ `Header` This class is a tiny wrapper that produces
+ request headers. We can't use standard python header
+ class because it encodes unicode fields using =? bla bla ?=
+ encoding, which is correct, but no one in HTTP world expects
+ that, everyone wants utf-8 raw bytes. (stolen from treq.multipart)
+
+ """
+
+ def __init__(
+ self,
+ name: bytes,
+ value: Any,
+ params: Optional[List[Tuple[Any, Any]]] = None,
+ ):
+ self.name = name
+ self.value = value
+ self.params = params or []
+
+ def add_param(self, name: Any, value: Any) -> None:
+ self.params.append((name, value))
+
+ def __bytes__(self) -> bytes:
+ with closing(BytesIO()) as h:
+ h.write(self.name + b": " + escape(self.value).encode("us-ascii"))
+ if self.params:
+ for name, val in self.params:
+ h.write(b"; ")
+ h.write(escape(name).encode("us-ascii"))
+ h.write(b"=")
+ h.write(b'"' + escape(val).encode("utf-8") + b'"')
+ h.seek(0)
+ return h.read()
+
+
+def escape(value: Union[str, bytes]) -> str:
+ """
+ This function prevents header values from corrupting the request,
+ a newline in the file name parameter makes form-data request unreadable
+ for a majority of parsers. (stolen from treq.multipart)
+ """
+ if isinstance(value, bytes):
+ value = value.decode("utf-8")
+ return value.replace("\r", "").replace("\n", "").replace('"', '\\"')
diff --git a/synapse/media/storage_provider.py b/synapse/media/storage_provider.py
index 06e5d27a53..a2d50adf65 100644
--- a/synapse/media/storage_provider.py
+++ b/synapse/media/storage_provider.py
@@ -24,14 +24,16 @@ import logging
import os
import shutil
from typing import TYPE_CHECKING, Callable, Optional
+from uuid import uuid4
from synapse.config._base import Config
from synapse.logging.context import defer_to_thread, run_in_background
from synapse.logging.opentracing import start_active_span, trace_with_opname
from synapse.util.async_helpers import maybe_awaitable
+from ..storage.databases.main.media_repository import LocalMedia
from ._base import FileInfo, Responder
-from .media_storage import FileResponder
+from .media_storage import FileResponder, MultipartResponder
logger = logging.getLogger(__name__)
@@ -55,13 +57,21 @@ class StorageProvider(metaclass=abc.ABCMeta):
"""
@abc.abstractmethod
- async def fetch(self, path: str, file_info: FileInfo) -> Optional[Responder]:
+ async def fetch(
+ self,
+ path: str,
+ file_info: FileInfo,
+ media_info: Optional[LocalMedia] = None,
+ federation: bool = False,
+ ) -> Optional[Responder]:
"""Attempt to fetch the file described by file_info and stream it
into writer.
Args:
path: Relative path of file in local cache
file_info: The metadata of the file.
+ media_info: metadata of the media item
+ federation: Whether the requested media is for a federation request
Returns:
Returns a Responder if the provider has the file, otherwise returns None.
@@ -124,7 +134,13 @@ class StorageProviderWrapper(StorageProvider):
run_in_background(store)
@trace_with_opname("StorageProviderWrapper.fetch")
- async def fetch(self, path: str, file_info: FileInfo) -> Optional[Responder]:
+ async def fetch(
+ self,
+ path: str,
+ file_info: FileInfo,
+ media_info: Optional[LocalMedia] = None,
+ federation: bool = False,
+ ) -> Optional[Responder]:
if file_info.url_cache:
# Files in the URL preview cache definitely aren't stored here,
# so avoid any potentially slow I/O or network access.
@@ -132,7 +148,9 @@ class StorageProviderWrapper(StorageProvider):
# store_file is supposed to return an Awaitable, but guard
# against improper implementations.
- return await maybe_awaitable(self.backend.fetch(path, file_info))
+ return await maybe_awaitable(
+ self.backend.fetch(path, file_info, media_info, federation)
+ )
class FileStorageProviderBackend(StorageProvider):
@@ -172,11 +190,23 @@ class FileStorageProviderBackend(StorageProvider):
)
@trace_with_opname("FileStorageProviderBackend.fetch")
- async def fetch(self, path: str, file_info: FileInfo) -> Optional[Responder]:
+ async def fetch(
+ self,
+ path: str,
+ file_info: FileInfo,
+ media_info: Optional[LocalMedia] = None,
+ federation: bool = False,
+ ) -> Optional[Responder]:
"""See StorageProvider.fetch"""
backup_fname = os.path.join(self.base_directory, path)
if os.path.isfile(backup_fname):
+ if federation:
+ assert media_info is not None
+ boundary = uuid4().hex.encode("ascii")
+ return MultipartResponder(
+ open(backup_fname, "rb"), media_info, boundary
+ )
return FileResponder(open(backup_fname, "rb"))
return None
diff --git a/tests/federation/test_federation_media.py b/tests/federation/test_federation_media.py
new file mode 100644
index 0000000000..1c89d19e99
--- /dev/null
+++ b/tests/federation/test_federation_media.py
@@ -0,0 +1,234 @@
+#
+# This file is licensed under the Affero General Public License (AGPL) version 3.
+#
+# Copyright (C) 2024 New Vector, Ltd
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# See the GNU Affero General Public License for more details:
+# .
+#
+# Originally licensed under the Apache License, Version 2.0:
+# .
+#
+# [This file includes modifications made by New Vector Limited]
+#
+#
+import io
+import os
+import shutil
+import tempfile
+from typing import Optional
+
+from twisted.test.proto_helpers import MemoryReactor
+
+from synapse.media._base import FileInfo, Responder
+from synapse.media.filepath import MediaFilePaths
+from synapse.media.media_storage import MediaStorage
+from synapse.media.storage_provider import (
+ FileStorageProviderBackend,
+ StorageProviderWrapper,
+)
+from synapse.server import HomeServer
+from synapse.storage.databases.main.media_repository import LocalMedia
+from synapse.types import JsonDict, UserID
+from synapse.util import Clock
+
+from tests import unittest
+from tests.test_utils import SMALL_PNG
+from tests.unittest import override_config
+
+
+class FederationUnstableMediaDownloadsTest(unittest.FederatingHomeserverTestCase):
+
+ def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
+ super().prepare(reactor, clock, hs)
+ self.test_dir = tempfile.mkdtemp(prefix="synapse-tests-")
+ self.addCleanup(shutil.rmtree, self.test_dir)
+ self.primary_base_path = os.path.join(self.test_dir, "primary")
+ self.secondary_base_path = os.path.join(self.test_dir, "secondary")
+
+ hs.config.media.media_store_path = self.primary_base_path
+
+ storage_providers = [
+ StorageProviderWrapper(
+ FileStorageProviderBackend(hs, self.secondary_base_path),
+ store_local=True,
+ store_remote=False,
+ store_synchronous=True,
+ )
+ ]
+
+ self.filepaths = MediaFilePaths(self.primary_base_path)
+ self.media_storage = MediaStorage(
+ hs, self.primary_base_path, self.filepaths, storage_providers
+ )
+ self.media_repo = hs.get_media_repository()
+
+ @override_config(
+ {"experimental_features": {"msc3916_authenticated_media_enabled": True}}
+ )
+ def test_file_download(self) -> None:
+ content = io.BytesIO(b"file_to_stream")
+ content_uri = self.get_success(
+ self.media_repo.create_content(
+ "text/plain",
+ "test_upload",
+ content,
+ 46,
+ UserID.from_string("@user_id:whatever.org"),
+ )
+ )
+ # test with a text file
+ channel = self.make_signed_federation_request(
+ "GET",
+ f"/_matrix/federation/unstable/org.matrix.msc3916/media/download/{content_uri.media_id}",
+ )
+ self.pump()
+ self.assertEqual(200, channel.code)
+
+ content_type = channel.headers.getRawHeaders("content-type")
+ assert content_type is not None
+ assert "multipart/mixed" in content_type[0]
+ assert "boundary" in content_type[0]
+
+ # extract boundary
+ boundary = content_type[0].split("boundary=")[1]
+ # split on boundary and check that json field and expected value exist
+ stripped = channel.text_body.split("\r\n" + "--" + boundary)
+ # TODO: the json object expected will change once MSC3911 is implemented, currently
+ # {} is returned for all requests as a placeholder (per MSC3196)
+ found_json = any(
+ "\r\nContent-Type: application/json\r\n{}" in field for field in stripped
+ )
+ self.assertTrue(found_json)
+
+ # check that text file and expected value exist
+ found_file = any(
+ "\r\nContent-Type: text/plain\r\nfile_to_stream" in field
+ for field in stripped
+ )
+ self.assertTrue(found_file)
+
+ content = io.BytesIO(SMALL_PNG)
+ content_uri = self.get_success(
+ self.media_repo.create_content(
+ "image/png",
+ "test_png_upload",
+ content,
+ 67,
+ UserID.from_string("@user_id:whatever.org"),
+ )
+ )
+ # test with an image file
+ channel = self.make_signed_federation_request(
+ "GET",
+ f"/_matrix/federation/unstable/org.matrix.msc3916/media/download/{content_uri.media_id}",
+ )
+ self.pump()
+ self.assertEqual(200, channel.code)
+
+ content_type = channel.headers.getRawHeaders("content-type")
+ assert content_type is not None
+ assert "multipart/mixed" in content_type[0]
+ assert "boundary" in content_type[0]
+
+ # extract boundary
+ boundary = content_type[0].split("boundary=")[1]
+ # split on boundary and check that json field and expected value exist
+ body = channel.result.get("body")
+ assert body is not None
+ stripped_bytes = body.split(b"\r\n" + b"--" + boundary.encode("utf-8"))
+ found_json = any(
+ b"\r\nContent-Type: application/json\r\n{}" in field
+ for field in stripped_bytes
+ )
+ self.assertTrue(found_json)
+
+ # check that png file exists and matches what was uploaded
+ found_file = any(SMALL_PNG in field for field in stripped_bytes)
+ self.assertTrue(found_file)
+
+ @override_config(
+ {"experimental_features": {"msc3916_authenticated_media_enabled": False}}
+ )
+ def test_disable_config(self) -> None:
+ content = io.BytesIO(b"file_to_stream")
+ content_uri = self.get_success(
+ self.media_repo.create_content(
+ "text/plain",
+ "test_upload",
+ content,
+ 46,
+ UserID.from_string("@user_id:whatever.org"),
+ )
+ )
+ channel = self.make_signed_federation_request(
+ "GET",
+ f"/_matrix/federation/unstable/org.matrix.msc3916/media/download/{content_uri.media_id}",
+ )
+ self.pump()
+ self.assertEqual(404, channel.code)
+ self.assertEqual(channel.json_body.get("errcode"), "M_UNRECOGNIZED")
+
+
+class FakeFileStorageProviderBackend:
+ """
+ Fake storage provider stub with incompatible `fetch` signature for testing
+ """
+
+ def __init__(self, hs: "HomeServer", config: str):
+ self.hs = hs
+ self.cache_directory = hs.config.media.media_store_path
+ self.base_directory = config
+
+ def __str__(self) -> str:
+ return "FakeFileStorageProviderBackend[%s]" % (self.base_directory,)
+
+ async def fetch(
+ self, path: str, file_info: FileInfo, media_info: Optional[LocalMedia] = None
+ ) -> Optional[Responder]:
+ pass
+
+
+TEST_DIR = tempfile.mkdtemp(prefix="synapse-tests-")
+
+
+class FederationUnstableMediaEndpointCompatibilityTest(
+ unittest.FederatingHomeserverTestCase
+):
+
+ def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
+ super().prepare(reactor, clock, hs)
+ self.test_dir = TEST_DIR
+ self.addCleanup(shutil.rmtree, self.test_dir)
+ self.media_repo = hs.get_media_repository()
+
+ def default_config(self) -> JsonDict:
+ config = super().default_config()
+ primary_base_path = os.path.join(TEST_DIR, "primary")
+ config["media_storage_providers"] = [
+ {
+ "module": "tests.federation.test_federation_media.FakeFileStorageProviderBackend",
+ "store_local": "True",
+ "store_remote": "False",
+ "store_synchronous": "False",
+ "config": {"directory": primary_base_path},
+ }
+ ]
+ return config
+
+ @override_config(
+ {"experimental_features": {"msc3916_authenticated_media_enabled": True}}
+ )
+ def test_incompatible_storage_provider_fails_to_load_endpoint(self) -> None:
+ channel = self.make_signed_federation_request(
+ "GET",
+ "/_matrix/federation/unstable/org.matrix.msc3916/media/download/xyz",
+ )
+ self.pump()
+ self.assertEqual(404, channel.code)
+ self.assertEqual(channel.json_body.get("errcode"), "M_UNRECOGNIZED")
diff --git a/tests/media/test_media_storage.py b/tests/media/test_media_storage.py
index 46d20ce775..47a89e9c66 100644
--- a/tests/media/test_media_storage.py
+++ b/tests/media/test_media_storage.py
@@ -49,7 +49,10 @@ from synapse.logging.context import make_deferred_yieldable
from synapse.media._base import FileInfo, ThumbnailInfo
from synapse.media.filepath import MediaFilePaths
from synapse.media.media_storage import MediaStorage, ReadableFileWrapper
-from synapse.media.storage_provider import FileStorageProviderBackend
+from synapse.media.storage_provider import (
+ FileStorageProviderBackend,
+ StorageProviderWrapper,
+)
from synapse.media.thumbnailer import ThumbnailProvider
from synapse.module_api import ModuleApi
from synapse.module_api.callbacks.spamchecker_callbacks import load_legacy_spam_checkers
@@ -78,7 +81,14 @@ class MediaStorageTests(unittest.HomeserverTestCase):
hs.config.media.media_store_path = self.primary_base_path
- storage_providers = [FileStorageProviderBackend(hs, self.secondary_base_path)]
+ storage_providers = [
+ StorageProviderWrapper(
+ FileStorageProviderBackend(hs, self.secondary_base_path),
+ store_local=True,
+ store_remote=False,
+ store_synchronous=True,
+ )
+ ]
self.filepaths = MediaFilePaths(self.primary_base_path)
self.media_storage = MediaStorage(
From 265ee88f34e8aec4c242af5b6c428bd1331fa354 Mon Sep 17 00:00:00 2001
From: Eric Eastwood
Date: Mon, 10 Jun 2024 09:48:38 -0500
Subject: [PATCH 154/503] Wrong retention policy being used when filtering
events (lint `ControlVarUsedAfterBlockViolation` `WPS441`) (#17272)
Fix loop var being used outside block.
Before this change, we were always using the last room_id's retention policy for all events being filtered.
I found this bug with the [new lint rule, `ControlVarUsedAfterBlockViolation` `WPS441`](https://github.com/astral-sh/ruff/pull/11769), that I re-implemented in `ruff`. Shout-out to @reivilibre for all the help in the beginning!
### Pull Request Checklist
* [x] Pull request is based on the develop branch
* [x] Pull request includes a [changelog
file](https://element-hq.github.io/synapse/latest/development/contributing_guide.html#changelog).
The entry should:
- Be a short description of your change which makes sense to users.
"Fixed a bug that prevented receiving messages from other servers."
instead of "Moved X method from `EventStore` to `EventWorkerStore`.".
- Use markdown where necessary, mostly for `code blocks`.
- End with either a period (.) or an exclamation mark (!).
- Start with a capital letter.
- Feel free to credit yourself, by adding a sentence "Contributed by
@github_username." or "Contributed by [Your Name]." to the end of the
entry.
* [x] [Code
style](https://element-hq.github.io/synapse/latest/code_style.html) is
correct
(run the
[linters](https://element-hq.github.io/synapse/latest/development/contributing_guide.html#run-the-linters))
---
changelog.d/17272.bugfix | 1 +
synapse/visibility.py | 2 +-
2 files changed, 2 insertions(+), 1 deletion(-)
create mode 100644 changelog.d/17272.bugfix
diff --git a/changelog.d/17272.bugfix b/changelog.d/17272.bugfix
new file mode 100644
index 0000000000..83e7ca426a
--- /dev/null
+++ b/changelog.d/17272.bugfix
@@ -0,0 +1 @@
+Fix wrong retention policy being used when filtering events.
diff --git a/synapse/visibility.py b/synapse/visibility.py
index 09a947ef15..c891bd845b 100644
--- a/synapse/visibility.py
+++ b/synapse/visibility.py
@@ -151,7 +151,7 @@ async def filter_events_for_client(
filter_send_to_client=filter_send_to_client,
sender_ignored=event.sender in ignore_list,
always_include_ids=always_include_ids,
- retention_policy=retention_policies[room_id],
+ retention_policy=retention_policies[event.room_id],
state=state_after_event,
is_peeking=is_peeking,
sender_erased=erased_senders.get(event.sender, False),
From 06953bc193530780a7686b2aee9632a4ed1d604f Mon Sep 17 00:00:00 2001
From: Erik Johnston
Date: Thu, 6 Jun 2024 17:10:58 +0100
Subject: [PATCH 155/503] Always return OTK counts (#17275)
Broke in https://github.com/element-hq/synapse/pull/17215
---
changelog.d/17275.bugfix | 1 +
synapse/handlers/sync.py | 33 +++++++++++++++++++++++++++++----
2 files changed, 30 insertions(+), 4 deletions(-)
create mode 100644 changelog.d/17275.bugfix
diff --git a/changelog.d/17275.bugfix b/changelog.d/17275.bugfix
new file mode 100644
index 0000000000..eb522bb997
--- /dev/null
+++ b/changelog.d/17275.bugfix
@@ -0,0 +1 @@
+Fix bug where OTKs were not always included in `/sync` response when using workers.
diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py
index 1d7d9dfdd0..6389c51b1c 100644
--- a/synapse/handlers/sync.py
+++ b/synapse/handlers/sync.py
@@ -285,7 +285,11 @@ class SyncResult:
)
@staticmethod
- def empty(next_batch: StreamToken) -> "SyncResult":
+ def empty(
+ next_batch: StreamToken,
+ device_one_time_keys_count: JsonMapping,
+ device_unused_fallback_key_types: List[str],
+ ) -> "SyncResult":
"Return a new empty result"
return SyncResult(
next_batch=next_batch,
@@ -297,8 +301,8 @@ class SyncResult:
archived=[],
to_device=[],
device_lists=DeviceListUpdates(),
- device_one_time_keys_count={},
- device_unused_fallback_key_types=[],
+ device_one_time_keys_count=device_one_time_keys_count,
+ device_unused_fallback_key_types=device_unused_fallback_key_types,
)
@@ -523,7 +527,28 @@ class SyncHandler:
logger.warning(
"Timed out waiting for worker to catch up. Returning empty response"
)
- return SyncResult.empty(since_token)
+ device_id = sync_config.device_id
+ one_time_keys_count: JsonMapping = {}
+ unused_fallback_key_types: List[str] = []
+ if device_id:
+ user_id = sync_config.user.to_string()
+ # TODO: We should have a way to let clients differentiate between the states of:
+ # * no change in OTK count since the provided since token
+ # * the server has zero OTKs left for this device
+ # Spec issue: https://github.com/matrix-org/matrix-doc/issues/3298
+ one_time_keys_count = await self.store.count_e2e_one_time_keys(
+ user_id, device_id
+ )
+ unused_fallback_key_types = list(
+ await self.store.get_e2e_unused_fallback_key_types(
+ user_id, device_id
+ )
+ )
+
+ cache_context.should_cache = False # Don't cache empty responses
+ return SyncResult.empty(
+ since_token, one_time_keys_count, unused_fallback_key_types
+ )
# If we've spent significant time waiting to catch up, take it off
# the timeout.
From b84e31375b0ca80970a6c56e3e0e86a5c63af025 Mon Sep 17 00:00:00 2001
From: Erik Johnston
Date: Mon, 10 Jun 2024 15:55:42 +0100
Subject: [PATCH 156/503] Update changelog
---
changelog.d/17275.bugfix | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/changelog.d/17275.bugfix b/changelog.d/17275.bugfix
index eb522bb997..04e8ab5eff 100644
--- a/changelog.d/17275.bugfix
+++ b/changelog.d/17275.bugfix
@@ -1 +1 @@
-Fix bug where OTKs were not always included in `/sync` response when using workers.
+Fix bug where OTKs were not always included in `/sync` response when using workers. Introduced v1.109.0rc1.
From 8c4937b216309eec62f9262f23162878fa3b772c Mon Sep 17 00:00:00 2001
From: Erik Johnston
Date: Mon, 10 Jun 2024 15:56:57 +0100
Subject: [PATCH 157/503] Fix bug where device lists would break sync (#17292)
If the stream ID in the unconverted table is ahead of the device lists
ID gen, then it can break all /sync requests that had an ID from ahead
of the table.
The fix is to make sure we add the unconverted table to the list of
tables we check at start up.
Broke in https://github.com/element-hq/synapse/pull/17229
---
changelog.d/17292.bugfix | 1 +
synapse/storage/databases/main/devices.py | 28 +++++++++++++------
.../05_add_instance_names_converted_pos.sql | 16 +++++++++++
3 files changed, 36 insertions(+), 9 deletions(-)
create mode 100644 changelog.d/17292.bugfix
create mode 100644 synapse/storage/schema/main/delta/85/05_add_instance_names_converted_pos.sql
diff --git a/changelog.d/17292.bugfix b/changelog.d/17292.bugfix
new file mode 100644
index 0000000000..c067a98ce8
--- /dev/null
+++ b/changelog.d/17292.bugfix
@@ -0,0 +1 @@
+Fix bug where `/sync` could get stuck due to edge case in device lists handling. Introduced in v1.109.0rc1.
diff --git a/synapse/storage/databases/main/devices.py b/synapse/storage/databases/main/devices.py
index 1c771e48f7..40187496e2 100644
--- a/synapse/storage/databases/main/devices.py
+++ b/synapse/storage/databases/main/devices.py
@@ -108,6 +108,11 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore):
("device_lists_outbound_pokes", "instance_name", "stream_id"),
("device_lists_changes_in_room", "instance_name", "stream_id"),
("device_lists_remote_pending", "instance_name", "stream_id"),
+ (
+ "device_lists_changes_converted_stream_position",
+ "instance_name",
+ "stream_id",
+ ),
],
sequence_name="device_lists_sequence",
writers=["master"],
@@ -2394,15 +2399,16 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore):
`FALSE` have not been converted.
"""
- return cast(
- Tuple[int, str],
- await self.db_pool.simple_select_one(
- table="device_lists_changes_converted_stream_position",
- keyvalues={},
- retcols=["stream_id", "room_id"],
- desc="get_device_change_last_converted_pos",
- ),
+ # There should be only one row in this table, though we want to
+ # future-proof ourselves for when we have multiple rows (one for each
+ # instance). So to handle that case we take the minimum of all rows.
+ rows = await self.db_pool.simple_select_list(
+ table="device_lists_changes_converted_stream_position",
+ keyvalues={},
+ retcols=["stream_id", "room_id"],
+ desc="get_device_change_last_converted_pos",
)
+ return cast(Tuple[int, str], min(rows))
async def set_device_change_last_converted_pos(
self,
@@ -2417,6 +2423,10 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore):
await self.db_pool.simple_update_one(
table="device_lists_changes_converted_stream_position",
keyvalues={},
- updatevalues={"stream_id": stream_id, "room_id": room_id},
+ updatevalues={
+ "stream_id": stream_id,
+ "instance_name": self._instance_name,
+ "room_id": room_id,
+ },
desc="set_device_change_last_converted_pos",
)
diff --git a/synapse/storage/schema/main/delta/85/05_add_instance_names_converted_pos.sql b/synapse/storage/schema/main/delta/85/05_add_instance_names_converted_pos.sql
new file mode 100644
index 0000000000..c3f2b6a1dd
--- /dev/null
+++ b/synapse/storage/schema/main/delta/85/05_add_instance_names_converted_pos.sql
@@ -0,0 +1,16 @@
+--
+-- This file is licensed under the Affero General Public License (AGPL) version 3.
+--
+-- Copyright (C) 2024 New Vector, Ltd
+--
+-- This program is free software: you can redistribute it and/or modify
+-- it under the terms of the GNU Affero General Public License as
+-- published by the Free Software Foundation, either version 3 of the
+-- License, or (at your option) any later version.
+--
+-- See the GNU Affero General Public License for more details:
+-- .
+
+-- Add `instance_name` columns to stream tables to allow them to be used with
+-- `MultiWriterIdGenerator`
+ALTER TABLE device_lists_changes_converted_stream_position ADD COLUMN instance_name TEXT;
From dad155972160cec2a8c166e2f713064b7c6ca299 Mon Sep 17 00:00:00 2001
From: Eric Eastwood
Date: Mon, 10 Jun 2024 15:03:50 -0500
Subject: [PATCH 158/503] Reorganize Pydantic models and types used in handlers
(#17279)
Spawning from https://github.com/element-hq/synapse/pull/17187#discussion_r1619492779 around wanting to put `SlidingSyncBody` (parse the request in the rest layer), `SlidingSyncConfig` (from the rest layer, pass to the handler), `SlidingSyncResponse` (pass the response from the handler back to the rest layer to respond) somewhere that doesn't contaminate the imports and cause circular import issues.
- Moved Pydantic parsing models to `synapse/types/rest`
- Moved handler types to `synapse/types/handlers`
---
changelog.d/17279.misc | 1 +
synapse/events/validator.py | 2 +-
synapse/handlers/pagination.py | 3 +-
synapse/handlers/room.py | 3 +-
synapse/handlers/sliding_sync.py | 175 +-----------
synapse/rest/client/account.py | 6 +-
synapse/rest/client/devices.py | 4 +-
synapse/rest/client/directory.py | 2 +-
synapse/rest/client/sync.py | 2 +-
synapse/rest/key/v2/remote_key_resource.py | 2 +-
synapse/types/__init__.py | 57 ----
synapse/types/handlers/__init__.py | 252 ++++++++++++++++++
.../models.py => types/rest/__init__.py} | 0
.../rest/client/__init__.py} | 2 +-
tests/rest/client/test_models.py | 2 +-
15 files changed, 269 insertions(+), 244 deletions(-)
create mode 100644 changelog.d/17279.misc
create mode 100644 synapse/types/handlers/__init__.py
rename synapse/{rest/models.py => types/rest/__init__.py} (100%)
rename synapse/{rest/client/models.py => types/rest/client/__init__.py} (99%)
diff --git a/changelog.d/17279.misc b/changelog.d/17279.misc
new file mode 100644
index 0000000000..2090b11d7f
--- /dev/null
+++ b/changelog.d/17279.misc
@@ -0,0 +1 @@
+Re-organize Pydantic models and types used in handlers.
diff --git a/synapse/events/validator.py b/synapse/events/validator.py
index 62f0b67dbd..73b63b77f2 100644
--- a/synapse/events/validator.py
+++ b/synapse/events/validator.py
@@ -47,9 +47,9 @@ from synapse.events.utils import (
validate_canonicaljson,
)
from synapse.http.servlet import validate_json_object
-from synapse.rest.models import RequestBodyModel
from synapse.storage.controllers.state import server_acl_evaluator_from_event
from synapse.types import EventID, JsonDict, RoomID, StrCollection, UserID
+from synapse.types.rest import RequestBodyModel
class EventValidator:
diff --git a/synapse/handlers/pagination.py b/synapse/handlers/pagination.py
index f7447b8ba5..dab3f90e74 100644
--- a/synapse/handlers/pagination.py
+++ b/synapse/handlers/pagination.py
@@ -37,11 +37,10 @@ from synapse.types import (
JsonMapping,
Requester,
ScheduledTask,
- ShutdownRoomParams,
- ShutdownRoomResponse,
StreamKeyType,
TaskStatus,
)
+from synapse.types.handlers import ShutdownRoomParams, ShutdownRoomResponse
from synapse.types.state import StateFilter
from synapse.util.async_helpers import ReadWriteLock
from synapse.visibility import filter_events_for_client
diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py
index 7f1b674d10..203209427b 100644
--- a/synapse/handlers/room.py
+++ b/synapse/handlers/room.py
@@ -80,8 +80,6 @@ from synapse.types import (
RoomAlias,
RoomID,
RoomStreamToken,
- ShutdownRoomParams,
- ShutdownRoomResponse,
StateMap,
StrCollection,
StreamKeyType,
@@ -89,6 +87,7 @@ from synapse.types import (
UserID,
create_requester,
)
+from synapse.types.handlers import ShutdownRoomParams, ShutdownRoomResponse
from synapse.types.state import StateFilter
from synapse.util import stringutils
from synapse.util.caches.response_cache import ResponseCache
diff --git a/synapse/handlers/sliding_sync.py b/synapse/handlers/sliding_sync.py
index 34ae21ba50..1c37f83a2b 100644
--- a/synapse/handlers/sliding_sync.py
+++ b/synapse/handlers/sliding_sync.py
@@ -18,23 +18,14 @@
#
#
import logging
-from enum import Enum
-from typing import TYPE_CHECKING, AbstractSet, Dict, Final, List, Optional, Tuple
+from typing import TYPE_CHECKING, AbstractSet, Dict, List, Optional
-import attr
from immutabledict import immutabledict
-from synapse._pydantic_compat import HAS_PYDANTIC_V2
-
-if TYPE_CHECKING or HAS_PYDANTIC_V2:
- from pydantic.v1 import Extra
-else:
- from pydantic import Extra
-
from synapse.api.constants import Membership
from synapse.events import EventBase
-from synapse.rest.client.models import SlidingSyncBody
-from synapse.types import JsonMapping, Requester, RoomStreamToken, StreamToken, UserID
+from synapse.types import Requester, RoomStreamToken, StreamToken, UserID
+from synapse.types.handlers import OperationType, SlidingSyncConfig, SlidingSyncResult
if TYPE_CHECKING:
from synapse.server import HomeServer
@@ -62,166 +53,6 @@ def filter_membership_for_sync(*, membership: str, user_id: str, sender: str) ->
return membership != Membership.LEAVE or sender != user_id
-class SlidingSyncConfig(SlidingSyncBody):
- """
- Inherit from `SlidingSyncBody` since we need all of the same fields and add a few
- extra fields that we need in the handler
- """
-
- user: UserID
- device_id: Optional[str]
-
- # Pydantic config
- class Config:
- # By default, ignore fields that we don't recognise.
- extra = Extra.ignore
- # By default, don't allow fields to be reassigned after parsing.
- allow_mutation = False
- # Allow custom types like `UserID` to be used in the model
- arbitrary_types_allowed = True
-
-
-class OperationType(Enum):
- """
- Represents the operation types in a Sliding Sync window.
-
- Attributes:
- SYNC: Sets a range of entries. Clients SHOULD discard what they previous knew about
- entries in this range.
- INSERT: Sets a single entry. If the position is not empty then clients MUST move
- entries to the left or the right depending on where the closest empty space is.
- DELETE: Remove a single entry. Often comes before an INSERT to allow entries to move
- places.
- INVALIDATE: Remove a range of entries. Clients MAY persist the invalidated range for
- offline support, but they should be treated as empty when additional operations
- which concern indexes in the range arrive from the server.
- """
-
- SYNC: Final = "SYNC"
- INSERT: Final = "INSERT"
- DELETE: Final = "DELETE"
- INVALIDATE: Final = "INVALIDATE"
-
-
-@attr.s(slots=True, frozen=True, auto_attribs=True)
-class SlidingSyncResult:
- """
- The Sliding Sync result to be serialized to JSON for a response.
-
- Attributes:
- next_pos: The next position token in the sliding window to request (next_batch).
- lists: Sliding window API. A map of list key to list results.
- rooms: Room subscription API. A map of room ID to room subscription to room results.
- extensions: Extensions API. A map of extension key to extension results.
- """
-
- @attr.s(slots=True, frozen=True, auto_attribs=True)
- class RoomResult:
- """
- Attributes:
- name: Room name or calculated room name.
- avatar: Room avatar
- heroes: List of stripped membership events (containing `user_id` and optionally
- `avatar_url` and `displayname`) for the users used to calculate the room name.
- initial: Flag which is set when this is the first time the server is sending this
- data on this connection. Clients can use this flag to replace or update
- their local state. When there is an update, servers MUST omit this flag
- entirely and NOT send "initial":false as this is wasteful on bandwidth. The
- absence of this flag means 'false'.
- required_state: The current state of the room
- timeline: Latest events in the room. The last event is the most recent
- is_dm: Flag to specify whether the room is a direct-message room (most likely
- between two people).
- invite_state: Stripped state events. Same as `rooms.invite.$room_id.invite_state`
- in sync v2, absent on joined/left rooms
- prev_batch: A token that can be passed as a start parameter to the
- `/rooms//messages` API to retrieve earlier messages.
- limited: True if their are more events than fit between the given position and now.
- Sync again to get more.
- joined_count: The number of users with membership of join, including the client's
- own user ID. (same as sync `v2 m.joined_member_count`)
- invited_count: The number of users with membership of invite. (same as sync v2
- `m.invited_member_count`)
- notification_count: The total number of unread notifications for this room. (same
- as sync v2)
- highlight_count: The number of unread notifications for this room with the highlight
- flag set. (same as sync v2)
- num_live: The number of timeline events which have just occurred and are not historical.
- The last N events are 'live' and should be treated as such. This is mostly
- useful to determine whether a given @mention event should make a noise or not.
- Clients cannot rely solely on the absence of `initial: true` to determine live
- events because if a room not in the sliding window bumps into the window because
- of an @mention it will have `initial: true` yet contain a single live event
- (with potentially other old events in the timeline).
- """
-
- name: str
- avatar: Optional[str]
- heroes: Optional[List[EventBase]]
- initial: bool
- required_state: List[EventBase]
- timeline: List[EventBase]
- is_dm: bool
- invite_state: List[EventBase]
- prev_batch: StreamToken
- limited: bool
- joined_count: int
- invited_count: int
- notification_count: int
- highlight_count: int
- num_live: int
-
- @attr.s(slots=True, frozen=True, auto_attribs=True)
- class SlidingWindowList:
- """
- Attributes:
- count: The total number of entries in the list. Always present if this list
- is.
- ops: The sliding list operations to perform.
- """
-
- @attr.s(slots=True, frozen=True, auto_attribs=True)
- class Operation:
- """
- Attributes:
- op: The operation type to perform.
- range: Which index positions are affected by this operation. These are
- both inclusive.
- room_ids: Which room IDs are affected by this operation. These IDs match
- up to the positions in the `range`, so the last room ID in this list
- matches the 9th index. The room data is held in a separate object.
- """
-
- op: OperationType
- range: Tuple[int, int]
- room_ids: List[str]
-
- count: int
- ops: List[Operation]
-
- next_pos: StreamToken
- lists: Dict[str, SlidingWindowList]
- rooms: Dict[str, RoomResult]
- extensions: JsonMapping
-
- def __bool__(self) -> bool:
- """Make the result appear empty if there are no updates. This is used
- to tell if the notifier needs to wait for more events when polling for
- events.
- """
- return bool(self.lists or self.rooms or self.extensions)
-
- @staticmethod
- def empty(next_pos: StreamToken) -> "SlidingSyncResult":
- "Return a new empty result"
- return SlidingSyncResult(
- next_pos=next_pos,
- lists={},
- rooms={},
- extensions={},
- )
-
-
class SlidingSyncHandler:
def __init__(self, hs: "HomeServer"):
self.clock = hs.get_clock()
diff --git a/synapse/rest/client/account.py b/synapse/rest/client/account.py
index 6ac07d354c..8daa449f9e 100644
--- a/synapse/rest/client/account.py
+++ b/synapse/rest/client/account.py
@@ -56,14 +56,14 @@ from synapse.http.servlet import (
from synapse.http.site import SynapseRequest
from synapse.metrics import threepid_send_requests
from synapse.push.mailer import Mailer
-from synapse.rest.client.models import (
+from synapse.types import JsonDict
+from synapse.types.rest import RequestBodyModel
+from synapse.types.rest.client import (
AuthenticationData,
ClientSecretStr,
EmailRequestTokenBody,
MsisdnRequestTokenBody,
)
-from synapse.rest.models import RequestBodyModel
-from synapse.types import JsonDict
from synapse.util.msisdn import phone_number_to_msisdn
from synapse.util.stringutils import assert_valid_client_secret, random_string
from synapse.util.threepids import check_3pid_allowed, validate_email
diff --git a/synapse/rest/client/devices.py b/synapse/rest/client/devices.py
index b1b803549e..8313d687b7 100644
--- a/synapse/rest/client/devices.py
+++ b/synapse/rest/client/devices.py
@@ -42,9 +42,9 @@ from synapse.http.servlet import (
)
from synapse.http.site import SynapseRequest
from synapse.rest.client._base import client_patterns, interactive_auth_handler
-from synapse.rest.client.models import AuthenticationData
-from synapse.rest.models import RequestBodyModel
from synapse.types import JsonDict
+from synapse.types.rest import RequestBodyModel
+from synapse.types.rest.client import AuthenticationData
if TYPE_CHECKING:
from synapse.server import HomeServer
diff --git a/synapse/rest/client/directory.py b/synapse/rest/client/directory.py
index 8099fdf3e4..11fdd0f7c6 100644
--- a/synapse/rest/client/directory.py
+++ b/synapse/rest/client/directory.py
@@ -41,8 +41,8 @@ from synapse.http.servlet import (
)
from synapse.http.site import SynapseRequest
from synapse.rest.client._base import client_patterns
-from synapse.rest.models import RequestBodyModel
from synapse.types import JsonDict, RoomAlias
+from synapse.types.rest import RequestBodyModel
if TYPE_CHECKING:
from synapse.server import HomeServer
diff --git a/synapse/rest/client/sync.py b/synapse/rest/client/sync.py
index 385b102b3d..1b0ac20d94 100644
--- a/synapse/rest/client/sync.py
+++ b/synapse/rest/client/sync.py
@@ -53,8 +53,8 @@ from synapse.http.servlet import (
)
from synapse.http.site import SynapseRequest
from synapse.logging.opentracing import trace_with_opname
-from synapse.rest.client.models import SlidingSyncBody
from synapse.types import JsonDict, Requester, StreamToken
+from synapse.types.rest.client import SlidingSyncBody
from synapse.util import json_decoder
from synapse.util.caches.lrucache import LruCache
diff --git a/synapse/rest/key/v2/remote_key_resource.py b/synapse/rest/key/v2/remote_key_resource.py
index dc7325fc57..a411ed614e 100644
--- a/synapse/rest/key/v2/remote_key_resource.py
+++ b/synapse/rest/key/v2/remote_key_resource.py
@@ -41,9 +41,9 @@ from synapse.http.servlet import (
parse_and_validate_json_object_from_request,
parse_integer,
)
-from synapse.rest.models import RequestBodyModel
from synapse.storage.keys import FetchKeyResultForRemote
from synapse.types import JsonDict
+from synapse.types.rest import RequestBodyModel
from synapse.util import json_decoder
from synapse.util.async_helpers import yieldable_gather_results
diff --git a/synapse/types/__init__.py b/synapse/types/__init__.py
index 3a89787cab..151658df53 100644
--- a/synapse/types/__init__.py
+++ b/synapse/types/__init__.py
@@ -1279,60 +1279,3 @@ class ScheduledTask:
result: Optional[JsonMapping]
# Optional error that should be assigned a value when the status is FAILED
error: Optional[str]
-
-
-class ShutdownRoomParams(TypedDict):
- """
- Attributes:
- requester_user_id:
- User who requested the action. Will be recorded as putting the room on the
- blocking list.
- new_room_user_id:
- If set, a new room will be created with this user ID
- as the creator and admin, and all users in the old room will be
- moved into that room. If not set, no new room will be created
- and the users will just be removed from the old room.
- new_room_name:
- A string representing the name of the room that new users will
- be invited to. Defaults to `Content Violation Notification`
- message:
- A string containing the first message that will be sent as
- `new_room_user_id` in the new room. Ideally this will clearly
- convey why the original room was shut down.
- Defaults to `Sharing illegal content on this server is not
- permitted and rooms in violation will be blocked.`
- block:
- If set to `true`, this room will be added to a blocking list,
- preventing future attempts to join the room. Defaults to `false`.
- purge:
- If set to `true`, purge the given room from the database.
- force_purge:
- If set to `true`, the room will be purged from database
- even if there are still users joined to the room.
- """
-
- requester_user_id: Optional[str]
- new_room_user_id: Optional[str]
- new_room_name: Optional[str]
- message: Optional[str]
- block: bool
- purge: bool
- force_purge: bool
-
-
-class ShutdownRoomResponse(TypedDict):
- """
- Attributes:
- kicked_users: An array of users (`user_id`) that were kicked.
- failed_to_kick_users:
- An array of users (`user_id`) that that were not kicked.
- local_aliases:
- An array of strings representing the local aliases that were
- migrated from the old room to the new.
- new_room_id: A string representing the room ID of the new room.
- """
-
- kicked_users: List[str]
- failed_to_kick_users: List[str]
- local_aliases: List[str]
- new_room_id: Optional[str]
diff --git a/synapse/types/handlers/__init__.py b/synapse/types/handlers/__init__.py
new file mode 100644
index 0000000000..1d65551d5b
--- /dev/null
+++ b/synapse/types/handlers/__init__.py
@@ -0,0 +1,252 @@
+#
+# This file is licensed under the Affero General Public License (AGPL) version 3.
+#
+# Copyright (C) 2024 New Vector, Ltd
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# See the GNU Affero General Public License for more details:
+# .
+#
+# Originally licensed under the Apache License, Version 2.0:
+# .
+#
+# [This file includes modifications made by New Vector Limited]
+#
+#
+from enum import Enum
+from typing import TYPE_CHECKING, Dict, Final, List, Optional, Tuple
+
+import attr
+from typing_extensions import TypedDict
+
+from synapse._pydantic_compat import HAS_PYDANTIC_V2
+
+if TYPE_CHECKING or HAS_PYDANTIC_V2:
+ from pydantic.v1 import Extra
+else:
+ from pydantic import Extra
+
+from synapse.events import EventBase
+from synapse.types import JsonMapping, StreamToken, UserID
+from synapse.types.rest.client import SlidingSyncBody
+
+
+class ShutdownRoomParams(TypedDict):
+ """
+ Attributes:
+ requester_user_id:
+ User who requested the action. Will be recorded as putting the room on the
+ blocking list.
+ new_room_user_id:
+ If set, a new room will be created with this user ID
+ as the creator and admin, and all users in the old room will be
+ moved into that room. If not set, no new room will be created
+ and the users will just be removed from the old room.
+ new_room_name:
+ A string representing the name of the room that new users will
+ be invited to. Defaults to `Content Violation Notification`
+ message:
+ A string containing the first message that will be sent as
+ `new_room_user_id` in the new room. Ideally this will clearly
+ convey why the original room was shut down.
+ Defaults to `Sharing illegal content on this server is not
+ permitted and rooms in violation will be blocked.`
+ block:
+ If set to `true`, this room will be added to a blocking list,
+ preventing future attempts to join the room. Defaults to `false`.
+ purge:
+ If set to `true`, purge the given room from the database.
+ force_purge:
+ If set to `true`, the room will be purged from database
+ even if there are still users joined to the room.
+ """
+
+ requester_user_id: Optional[str]
+ new_room_user_id: Optional[str]
+ new_room_name: Optional[str]
+ message: Optional[str]
+ block: bool
+ purge: bool
+ force_purge: bool
+
+
+class ShutdownRoomResponse(TypedDict):
+ """
+ Attributes:
+ kicked_users: An array of users (`user_id`) that were kicked.
+ failed_to_kick_users:
+ An array of users (`user_id`) that that were not kicked.
+ local_aliases:
+ An array of strings representing the local aliases that were
+ migrated from the old room to the new.
+ new_room_id: A string representing the room ID of the new room.
+ """
+
+ kicked_users: List[str]
+ failed_to_kick_users: List[str]
+ local_aliases: List[str]
+ new_room_id: Optional[str]
+
+
+class SlidingSyncConfig(SlidingSyncBody):
+ """
+ Inherit from `SlidingSyncBody` since we need all of the same fields and add a few
+ extra fields that we need in the handler
+ """
+
+ user: UserID
+ device_id: Optional[str]
+
+ # Pydantic config
+ class Config:
+ # By default, ignore fields that we don't recognise.
+ extra = Extra.ignore
+ # By default, don't allow fields to be reassigned after parsing.
+ allow_mutation = False
+ # Allow custom types like `UserID` to be used in the model
+ arbitrary_types_allowed = True
+
+
+class OperationType(Enum):
+ """
+ Represents the operation types in a Sliding Sync window.
+
+ Attributes:
+ SYNC: Sets a range of entries. Clients SHOULD discard what they previous knew about
+ entries in this range.
+ INSERT: Sets a single entry. If the position is not empty then clients MUST move
+ entries to the left or the right depending on where the closest empty space is.
+ DELETE: Remove a single entry. Often comes before an INSERT to allow entries to move
+ places.
+ INVALIDATE: Remove a range of entries. Clients MAY persist the invalidated range for
+ offline support, but they should be treated as empty when additional operations
+ which concern indexes in the range arrive from the server.
+ """
+
+ SYNC: Final = "SYNC"
+ INSERT: Final = "INSERT"
+ DELETE: Final = "DELETE"
+ INVALIDATE: Final = "INVALIDATE"
+
+
+@attr.s(slots=True, frozen=True, auto_attribs=True)
+class SlidingSyncResult:
+ """
+ The Sliding Sync result to be serialized to JSON for a response.
+
+ Attributes:
+ next_pos: The next position token in the sliding window to request (next_batch).
+ lists: Sliding window API. A map of list key to list results.
+ rooms: Room subscription API. A map of room ID to room subscription to room results.
+ extensions: Extensions API. A map of extension key to extension results.
+ """
+
+ @attr.s(slots=True, frozen=True, auto_attribs=True)
+ class RoomResult:
+ """
+ Attributes:
+ name: Room name or calculated room name.
+ avatar: Room avatar
+ heroes: List of stripped membership events (containing `user_id` and optionally
+ `avatar_url` and `displayname`) for the users used to calculate the room name.
+ initial: Flag which is set when this is the first time the server is sending this
+ data on this connection. Clients can use this flag to replace or update
+ their local state. When there is an update, servers MUST omit this flag
+ entirely and NOT send "initial":false as this is wasteful on bandwidth. The
+ absence of this flag means 'false'.
+ required_state: The current state of the room
+ timeline: Latest events in the room. The last event is the most recent
+ is_dm: Flag to specify whether the room is a direct-message room (most likely
+ between two people).
+ invite_state: Stripped state events. Same as `rooms.invite.$room_id.invite_state`
+ in sync v2, absent on joined/left rooms
+ prev_batch: A token that can be passed as a start parameter to the
+ `/rooms//messages` API to retrieve earlier messages.
+ limited: True if their are more events than fit between the given position and now.
+ Sync again to get more.
+ joined_count: The number of users with membership of join, including the client's
+ own user ID. (same as sync `v2 m.joined_member_count`)
+ invited_count: The number of users with membership of invite. (same as sync v2
+ `m.invited_member_count`)
+ notification_count: The total number of unread notifications for this room. (same
+ as sync v2)
+ highlight_count: The number of unread notifications for this room with the highlight
+ flag set. (same as sync v2)
+ num_live: The number of timeline events which have just occurred and are not historical.
+ The last N events are 'live' and should be treated as such. This is mostly
+ useful to determine whether a given @mention event should make a noise or not.
+ Clients cannot rely solely on the absence of `initial: true` to determine live
+ events because if a room not in the sliding window bumps into the window because
+ of an @mention it will have `initial: true` yet contain a single live event
+ (with potentially other old events in the timeline).
+ """
+
+ name: str
+ avatar: Optional[str]
+ heroes: Optional[List[EventBase]]
+ initial: bool
+ required_state: List[EventBase]
+ timeline: List[EventBase]
+ is_dm: bool
+ invite_state: List[EventBase]
+ prev_batch: StreamToken
+ limited: bool
+ joined_count: int
+ invited_count: int
+ notification_count: int
+ highlight_count: int
+ num_live: int
+
+ @attr.s(slots=True, frozen=True, auto_attribs=True)
+ class SlidingWindowList:
+ """
+ Attributes:
+ count: The total number of entries in the list. Always present if this list
+ is.
+ ops: The sliding list operations to perform.
+ """
+
+ @attr.s(slots=True, frozen=True, auto_attribs=True)
+ class Operation:
+ """
+ Attributes:
+ op: The operation type to perform.
+ range: Which index positions are affected by this operation. These are
+ both inclusive.
+ room_ids: Which room IDs are affected by this operation. These IDs match
+ up to the positions in the `range`, so the last room ID in this list
+ matches the 9th index. The room data is held in a separate object.
+ """
+
+ op: OperationType
+ range: Tuple[int, int]
+ room_ids: List[str]
+
+ count: int
+ ops: List[Operation]
+
+ next_pos: StreamToken
+ lists: Dict[str, SlidingWindowList]
+ rooms: Dict[str, RoomResult]
+ extensions: JsonMapping
+
+ def __bool__(self) -> bool:
+ """Make the result appear empty if there are no updates. This is used
+ to tell if the notifier needs to wait for more events when polling for
+ events.
+ """
+ return bool(self.lists or self.rooms or self.extensions)
+
+ @staticmethod
+ def empty(next_pos: StreamToken) -> "SlidingSyncResult":
+ "Return a new empty result"
+ return SlidingSyncResult(
+ next_pos=next_pos,
+ lists={},
+ rooms={},
+ extensions={},
+ )
diff --git a/synapse/rest/models.py b/synapse/types/rest/__init__.py
similarity index 100%
rename from synapse/rest/models.py
rename to synapse/types/rest/__init__.py
diff --git a/synapse/rest/client/models.py b/synapse/types/rest/client/__init__.py
similarity index 99%
rename from synapse/rest/client/models.py
rename to synapse/types/rest/client/__init__.py
index 5433ed91ef..ef261518a0 100644
--- a/synapse/rest/client/models.py
+++ b/synapse/types/rest/client/__init__.py
@@ -43,7 +43,7 @@ else:
validator,
)
-from synapse.rest.models import RequestBodyModel
+from synapse.types.rest import RequestBodyModel
from synapse.util.threepids import validate_email
diff --git a/tests/rest/client/test_models.py b/tests/rest/client/test_models.py
index 534dd7bcf4..f8a56c80ca 100644
--- a/tests/rest/client/test_models.py
+++ b/tests/rest/client/test_models.py
@@ -24,7 +24,7 @@ from typing import TYPE_CHECKING
from typing_extensions import Literal
from synapse._pydantic_compat import HAS_PYDANTIC_V2
-from synapse.rest.client.models import EmailRequestTokenBody
+from synapse.types.rest.client import EmailRequestTokenBody
if TYPE_CHECKING or HAS_PYDANTIC_V2:
from pydantic.v1 import BaseModel, ValidationError
From 491365f19997c2ca1c42d54242cc11c5d8d0e52d Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Tue, 11 Jun 2024 10:47:03 +0100
Subject: [PATCH 159/503] Bump types-pillow from 10.2.0.20240423 to
10.2.0.20240520 (#17285)
---
poetry.lock | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/poetry.lock b/poetry.lock
index 942e26701d..54674cc7d9 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -2844,13 +2844,13 @@ files = [
[[package]]
name = "types-pillow"
-version = "10.2.0.20240423"
+version = "10.2.0.20240520"
description = "Typing stubs for Pillow"
optional = false
python-versions = ">=3.8"
files = [
- {file = "types-Pillow-10.2.0.20240423.tar.gz", hash = "sha256:696e68b9b6a58548fc307a8669830469237c5b11809ddf978ac77fafa79251cd"},
- {file = "types_Pillow-10.2.0.20240423-py3-none-any.whl", hash = "sha256:bd12923093b96c91d523efcdb66967a307f1a843bcfaf2d5a529146c10a9ced3"},
+ {file = "types-Pillow-10.2.0.20240520.tar.gz", hash = "sha256:130b979195465fa1e1676d8e81c9c7c30319e8e95b12fae945e8f0d525213107"},
+ {file = "types_Pillow-10.2.0.20240520-py3-none-any.whl", hash = "sha256:33c36494b380e2a269bb742181bea5d9b00820367822dbd3760f07210a1da23d"},
]
[[package]]
From 9e59d18022f20ef8f6c9f9644a59fdc4e08d9cbf Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Tue, 11 Jun 2024 10:50:03 +0100
Subject: [PATCH 160/503] Bump dawidd6/action-download-artifact from 3.1.4 to 5
(#17289)
---
.github/workflows/docs-pr-netlify.yaml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/.github/workflows/docs-pr-netlify.yaml b/.github/workflows/docs-pr-netlify.yaml
index 277083ded3..a724816392 100644
--- a/.github/workflows/docs-pr-netlify.yaml
+++ b/.github/workflows/docs-pr-netlify.yaml
@@ -14,7 +14,7 @@ jobs:
# There's a 'download artifact' action, but it hasn't been updated for the workflow_run action
# (https://github.com/actions/download-artifact/issues/60) so instead we get this mess:
- name: 📥 Download artifact
- uses: dawidd6/action-download-artifact@09f2f74827fd3a8607589e5ad7f9398816f540fe # v3.1.4
+ uses: dawidd6/action-download-artifact@deb3bb83256a78589fef6a7b942e5f2573ad7c13 # v5
with:
workflow: docs-pr.yaml
run_id: ${{ github.event.workflow_run.id }}
From 863578bfcf0136ccf527d13533198fc707e5b604 Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Tue, 11 Jun 2024 10:50:13 +0100
Subject: [PATCH 161/503] Bump regex from 1.10.4 to 1.10.5 (#17290)
---
Cargo.lock | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/Cargo.lock b/Cargo.lock
index e3e63fc205..7472e16291 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -444,9 +444,9 @@ dependencies = [
[[package]]
name = "regex"
-version = "1.10.4"
+version = "1.10.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c117dbdfde9c8308975b6a18d71f3f385c89461f7b3fb054288ecf2a2058ba4c"
+checksum = "b91213439dad192326a0d7c6ee3955910425f441d7038e0d6933b0aec5c4517f"
dependencies = [
"aho-corasick",
"memchr",
From a8069e9739fdef8c7200d0d33a38b85100398136 Mon Sep 17 00:00:00 2001
From: Quentin Gliech
Date: Tue, 11 Jun 2024 15:22:21 +0200
Subject: [PATCH 162/503] 1.109.0rc2
---
CHANGES.md | 10 ++++++++++
changelog.d/17275.bugfix | 1 -
changelog.d/17292.bugfix | 1 -
debian/changelog | 6 ++++++
pyproject.toml | 2 +-
5 files changed, 17 insertions(+), 3 deletions(-)
delete mode 100644 changelog.d/17275.bugfix
delete mode 100644 changelog.d/17292.bugfix
diff --git a/CHANGES.md b/CHANGES.md
index 092dbdbf2d..3116e1b2a8 100644
--- a/CHANGES.md
+++ b/CHANGES.md
@@ -1,3 +1,13 @@
+# Synapse 1.109.0rc2 (2024-06-11)
+
+### Bugfixes
+
+- Fix bug where OTKs were not always included in `/sync` response when using workers. Introduced in v1.109.0rc1. ([\#17275](https://github.com/element-hq/synapse/issues/17275))
+- Fix bug where `/sync` could get stuck due to edge case in device lists handling. Introduced in v1.109.0rc1. ([\#17292](https://github.com/element-hq/synapse/issues/17292))
+
+
+
+
# Synapse 1.109.0rc1 (2024-06-04)
### Features
diff --git a/changelog.d/17275.bugfix b/changelog.d/17275.bugfix
deleted file mode 100644
index 04e8ab5eff..0000000000
--- a/changelog.d/17275.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix bug where OTKs were not always included in `/sync` response when using workers. Introduced v1.109.0rc1.
diff --git a/changelog.d/17292.bugfix b/changelog.d/17292.bugfix
deleted file mode 100644
index c067a98ce8..0000000000
--- a/changelog.d/17292.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix bug where `/sync` could get stuck due to edge case in device lists handling. Introduced in v1.109.0rc1.
diff --git a/debian/changelog b/debian/changelog
index 927248bdab..ac2536749d 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -1,3 +1,9 @@
+matrix-synapse-py3 (1.109.0~rc2) stable; urgency=medium
+
+ * New synapse release 1.109.0rc2.
+
+ -- Synapse Packaging team Tue, 11 Jun 2024 13:20:17 +0000
+
matrix-synapse-py3 (1.109.0~rc1) stable; urgency=medium
* New Synapse release 1.109.0rc1.
diff --git a/pyproject.toml b/pyproject.toml
index 8cc99b8cba..f4f7f70603 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -96,7 +96,7 @@ module-name = "synapse.synapse_rust"
[tool.poetry]
name = "matrix-synapse"
-version = "1.109.0rc1"
+version = "1.109.0rc2"
description = "Homeserver for the Matrix decentralised comms protocol"
authors = ["Matrix.org Team and Contributors "]
license = "AGPL-3.0-or-later"
From e6816babf6ce37cca4fbd9d67e5b2a0b06f65d1a Mon Sep 17 00:00:00 2001
From: Quentin Gliech
Date: Tue, 11 Jun 2024 15:39:30 +0200
Subject: [PATCH 163/503] CHANGES.md: s/OTKs/one-time-keys/
---
CHANGES.md | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/CHANGES.md b/CHANGES.md
index 3116e1b2a8..61c6170c62 100644
--- a/CHANGES.md
+++ b/CHANGES.md
@@ -2,7 +2,7 @@
### Bugfixes
-- Fix bug where OTKs were not always included in `/sync` response when using workers. Introduced in v1.109.0rc1. ([\#17275](https://github.com/element-hq/synapse/issues/17275))
+- Fix bug where one-time-keys were not always included in `/sync` response when using workers. Introduced in v1.109.0rc1. ([\#17275](https://github.com/element-hq/synapse/issues/17275))
- Fix bug where `/sync` could get stuck due to edge case in device lists handling. Introduced in v1.109.0rc1. ([\#17292](https://github.com/element-hq/synapse/issues/17292))
From d0f90bd04e1d83983de5e8327acd214b9a2b8d43 Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Wed, 12 Jun 2024 10:52:14 +0100
Subject: [PATCH 164/503] Bump jinja2 from 3.1.3 to 3.1.4 (#17287)
---
poetry.lock | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/poetry.lock b/poetry.lock
index 54674cc7d9..76463d493c 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -912,13 +912,13 @@ trio = ["async_generator", "trio"]
[[package]]
name = "jinja2"
-version = "3.1.3"
+version = "3.1.4"
description = "A very fast and expressive template engine."
optional = false
python-versions = ">=3.7"
files = [
- {file = "Jinja2-3.1.3-py3-none-any.whl", hash = "sha256:7d6d50dd97d52cbc355597bd845fabfbac3f551e1f99619e39a35ce8c370b5fa"},
- {file = "Jinja2-3.1.3.tar.gz", hash = "sha256:ac8bd6544d4bb2c9792bf3a159e80bba8fda7f07e81bc3aed565432d5925ba90"},
+ {file = "jinja2-3.1.4-py3-none-any.whl", hash = "sha256:bc5dd2abb727a5319567b7a813e6a2e7318c39f4f487cfe6c89c6f9c7d25197d"},
+ {file = "jinja2-3.1.4.tar.gz", hash = "sha256:4a3aee7acbbe7303aede8e9648d13b8bf88a429282aa6122a993f0ac800cb369"},
]
[package.dependencies]
From 0edf1cacf72c2307b0a3611b6fbef1033cbc2b2a Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Wed, 12 Jun 2024 10:52:26 +0100
Subject: [PATCH 165/503] Bump types-jsonschema from 4.21.0.20240311 to
4.22.0.20240610 (#17288)
---
poetry.lock | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/poetry.lock b/poetry.lock
index 76463d493c..028a11f527 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -2808,13 +2808,13 @@ files = [
[[package]]
name = "types-jsonschema"
-version = "4.21.0.20240311"
+version = "4.22.0.20240610"
description = "Typing stubs for jsonschema"
optional = false
python-versions = ">=3.8"
files = [
- {file = "types-jsonschema-4.21.0.20240311.tar.gz", hash = "sha256:f7165ce70abd91df490c73b089873afd2899c5e56430ee495b64f851ad01f287"},
- {file = "types_jsonschema-4.21.0.20240311-py3-none-any.whl", hash = "sha256:e872f5661513824edf9698f73a66c9c114713d93eab58699bd0532e7e6db5750"},
+ {file = "types-jsonschema-4.22.0.20240610.tar.gz", hash = "sha256:f82ab9fe756e3a2642ea9712c46b403ce61eb380b939b696cff3252af42f65b0"},
+ {file = "types_jsonschema-4.22.0.20240610-py3-none-any.whl", hash = "sha256:89996b9bd1928f820a0e252b2844be21cd2e55d062b6fa1048d88453006ad89e"},
]
[package.dependencies]
From f1c4dfb08b530f2bfaf9c6723ce69ccd231a3370 Mon Sep 17 00:00:00 2001
From: Travis Ralston
Date: Wed, 12 Jun 2024 04:27:46 -0600
Subject: [PATCH 166/503] Add report room API (MSC4151) (#17270)
https://github.com/matrix-org/matrix-spec-proposals/pull/4151
This is intended to be enabled by default for immediate use. When FCP is
complete, the unstable endpoint will be dropped and stable endpoint
supported instead - no backwards compatibility is expected for the
unstable endpoint.
---
changelog.d/17270.feature | 1 +
synapse/config/experimental.py | 3 +
synapse/rest/__init__.py | 4 +-
.../client/{report_event.py => reporting.py} | 57 +++++++++++-
synapse/rest/client/versions.py | 2 +
synapse/storage/databases/main/room.py | 32 +++++++
.../main/delta/85/06_add_room_reports.sql | 20 ++++
tests/rest/admin/test_event_reports.py | 6 +-
...test_report_event.py => test_reporting.py} | 93 ++++++++++++++++++-
9 files changed, 210 insertions(+), 8 deletions(-)
create mode 100644 changelog.d/17270.feature
rename synapse/rest/client/{report_event.py => reporting.py} (65%)
create mode 100644 synapse/storage/schema/main/delta/85/06_add_room_reports.sql
rename tests/rest/client/{test_report_event.py => test_reporting.py} (64%)
diff --git a/changelog.d/17270.feature b/changelog.d/17270.feature
new file mode 100644
index 0000000000..4ea5e7be85
--- /dev/null
+++ b/changelog.d/17270.feature
@@ -0,0 +1 @@
+Add support for the unstable [MSC4151](https://github.com/matrix-org/matrix-spec-proposals/pull/4151) report room API.
diff --git a/synapse/config/experimental.py b/synapse/config/experimental.py
index 75fe6d7b24..5fe5b951dd 100644
--- a/synapse/config/experimental.py
+++ b/synapse/config/experimental.py
@@ -443,3 +443,6 @@ class ExperimentalConfig(Config):
self.msc3916_authenticated_media_enabled = experimental.get(
"msc3916_authenticated_media_enabled", False
)
+
+ # MSC4151: Report room API (Client-Server API)
+ self.msc4151_enabled: bool = experimental.get("msc4151_enabled", False)
diff --git a/synapse/rest/__init__.py b/synapse/rest/__init__.py
index 534dc0e276..0024ccf708 100644
--- a/synapse/rest/__init__.py
+++ b/synapse/rest/__init__.py
@@ -53,7 +53,7 @@ from synapse.rest.client import (
register,
relations,
rendezvous,
- report_event,
+ reporting,
room,
room_keys,
room_upgrade_rest_servlet,
@@ -128,7 +128,7 @@ class ClientRestResource(JsonResource):
tags.register_servlets(hs, client_resource)
account_data.register_servlets(hs, client_resource)
if is_main_process:
- report_event.register_servlets(hs, client_resource)
+ reporting.register_servlets(hs, client_resource)
openid.register_servlets(hs, client_resource)
notifications.register_servlets(hs, client_resource)
devices.register_servlets(hs, client_resource)
diff --git a/synapse/rest/client/report_event.py b/synapse/rest/client/reporting.py
similarity index 65%
rename from synapse/rest/client/report_event.py
rename to synapse/rest/client/reporting.py
index 447281931e..a95b83b14d 100644
--- a/synapse/rest/client/report_event.py
+++ b/synapse/rest/client/reporting.py
@@ -23,17 +23,28 @@ import logging
from http import HTTPStatus
from typing import TYPE_CHECKING, Tuple
+from synapse._pydantic_compat import HAS_PYDANTIC_V2
from synapse.api.errors import AuthError, Codes, NotFoundError, SynapseError
from synapse.http.server import HttpServer
-from synapse.http.servlet import RestServlet, parse_json_object_from_request
+from synapse.http.servlet import (
+ RestServlet,
+ parse_and_validate_json_object_from_request,
+ parse_json_object_from_request,
+)
from synapse.http.site import SynapseRequest
from synapse.types import JsonDict
+from synapse.types.rest import RequestBodyModel
from ._base import client_patterns
if TYPE_CHECKING:
from synapse.server import HomeServer
+if TYPE_CHECKING or HAS_PYDANTIC_V2:
+ from pydantic.v1 import StrictStr
+else:
+ from pydantic import StrictStr
+
logger = logging.getLogger(__name__)
@@ -95,5 +106,49 @@ class ReportEventRestServlet(RestServlet):
return 200, {}
+class ReportRoomRestServlet(RestServlet):
+ # https://github.com/matrix-org/matrix-spec-proposals/pull/4151
+ PATTERNS = client_patterns(
+ "/org.matrix.msc4151/rooms/(?P[^/]*)/report$",
+ releases=[],
+ v1=False,
+ unstable=True,
+ )
+
+ def __init__(self, hs: "HomeServer"):
+ super().__init__()
+ self.hs = hs
+ self.auth = hs.get_auth()
+ self.clock = hs.get_clock()
+ self.store = hs.get_datastores().main
+
+ class PostBody(RequestBodyModel):
+ reason: StrictStr
+
+ async def on_POST(
+ self, request: SynapseRequest, room_id: str
+ ) -> Tuple[int, JsonDict]:
+ requester = await self.auth.get_user_by_req(request)
+ user_id = requester.user.to_string()
+
+ body = parse_and_validate_json_object_from_request(request, self.PostBody)
+
+ room = await self.store.get_room(room_id)
+ if room is None:
+ raise NotFoundError("Room does not exist")
+
+ await self.store.add_room_report(
+ room_id=room_id,
+ user_id=user_id,
+ reason=body.reason,
+ received_ts=self.clock.time_msec(),
+ )
+
+ return 200, {}
+
+
def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
ReportEventRestServlet(hs).register(http_server)
+
+ if hs.config.experimental.msc4151_enabled:
+ ReportRoomRestServlet(hs).register(http_server)
diff --git a/synapse/rest/client/versions.py b/synapse/rest/client/versions.py
index 56de6906d0..f428158139 100644
--- a/synapse/rest/client/versions.py
+++ b/synapse/rest/client/versions.py
@@ -149,6 +149,8 @@ class VersionsRestServlet(RestServlet):
is not None
)
),
+ # MSC4151: Report room API (Client-Server API)
+ "org.matrix.msc4151": self.config.experimental.msc4151_enabled,
},
},
)
diff --git a/synapse/storage/databases/main/room.py b/synapse/storage/databases/main/room.py
index 616c941687..b8a71c803e 100644
--- a/synapse/storage/databases/main/room.py
+++ b/synapse/storage/databases/main/room.py
@@ -2207,6 +2207,7 @@ class RoomStore(RoomBackgroundUpdateStore, RoomWorkerStore):
super().__init__(database, db_conn, hs)
self._event_reports_id_gen = IdGenerator(db_conn, "event_reports", "id")
+ self._room_reports_id_gen = IdGenerator(db_conn, "room_reports", "id")
self._instance_name = hs.get_instance_name()
@@ -2416,6 +2417,37 @@ class RoomStore(RoomBackgroundUpdateStore, RoomWorkerStore):
)
return next_id
+ async def add_room_report(
+ self,
+ room_id: str,
+ user_id: str,
+ reason: str,
+ received_ts: int,
+ ) -> int:
+ """Add a room report
+
+ Args:
+ room_id: The room ID being reported.
+ user_id: User who reports the room.
+ reason: Description that the user specifies.
+ received_ts: Time when the user submitted the report (milliseconds).
+ Returns:
+ Id of the room report.
+ """
+ next_id = self._room_reports_id_gen.get_next()
+ await self.db_pool.simple_insert(
+ table="room_reports",
+ values={
+ "id": next_id,
+ "received_ts": received_ts,
+ "room_id": room_id,
+ "user_id": user_id,
+ "reason": reason,
+ },
+ desc="add_room_report",
+ )
+ return next_id
+
async def block_room(self, room_id: str, user_id: str) -> None:
"""Marks the room as blocked.
diff --git a/synapse/storage/schema/main/delta/85/06_add_room_reports.sql b/synapse/storage/schema/main/delta/85/06_add_room_reports.sql
new file mode 100644
index 0000000000..f7b45276cf
--- /dev/null
+++ b/synapse/storage/schema/main/delta/85/06_add_room_reports.sql
@@ -0,0 +1,20 @@
+--
+-- This file is licensed under the Affero General Public License (AGPL) version 3.
+--
+-- Copyright (C) 2024 New Vector, Ltd
+--
+-- This program is free software: you can redistribute it and/or modify
+-- it under the terms of the GNU Affero General Public License as
+-- published by the Free Software Foundation, either version 3 of the
+-- License, or (at your option) any later version.
+--
+-- See the GNU Affero General Public License for more details:
+-- .
+
+CREATE TABLE room_reports (
+ id BIGINT NOT NULL PRIMARY KEY,
+ received_ts BIGINT NOT NULL,
+ room_id TEXT NOT NULL,
+ user_id TEXT NOT NULL,
+ reason TEXT NOT NULL
+);
diff --git a/tests/rest/admin/test_event_reports.py b/tests/rest/admin/test_event_reports.py
index a0f978911a..feb410a11d 100644
--- a/tests/rest/admin/test_event_reports.py
+++ b/tests/rest/admin/test_event_reports.py
@@ -24,7 +24,7 @@ from twisted.test.proto_helpers import MemoryReactor
import synapse.rest.admin
from synapse.api.errors import Codes
-from synapse.rest.client import login, report_event, room
+from synapse.rest.client import login, reporting, room
from synapse.server import HomeServer
from synapse.types import JsonDict
from synapse.util import Clock
@@ -37,7 +37,7 @@ class EventReportsTestCase(unittest.HomeserverTestCase):
synapse.rest.admin.register_servlets,
login.register_servlets,
room.register_servlets,
- report_event.register_servlets,
+ reporting.register_servlets,
]
def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
@@ -453,7 +453,7 @@ class EventReportDetailTestCase(unittest.HomeserverTestCase):
synapse.rest.admin.register_servlets,
login.register_servlets,
room.register_servlets,
- report_event.register_servlets,
+ reporting.register_servlets,
]
def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
diff --git a/tests/rest/client/test_report_event.py b/tests/rest/client/test_reporting.py
similarity index 64%
rename from tests/rest/client/test_report_event.py
rename to tests/rest/client/test_reporting.py
index 5903771e52..009deb9cb0 100644
--- a/tests/rest/client/test_report_event.py
+++ b/tests/rest/client/test_reporting.py
@@ -22,7 +22,7 @@
from twisted.test.proto_helpers import MemoryReactor
import synapse.rest.admin
-from synapse.rest.client import login, report_event, room
+from synapse.rest.client import login, reporting, room
from synapse.server import HomeServer
from synapse.types import JsonDict
from synapse.util import Clock
@@ -35,7 +35,7 @@ class ReportEventTestCase(unittest.HomeserverTestCase):
synapse.rest.admin.register_servlets,
login.register_servlets,
room.register_servlets,
- report_event.register_servlets,
+ reporting.register_servlets,
]
def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
@@ -139,3 +139,92 @@ class ReportEventTestCase(unittest.HomeserverTestCase):
"POST", self.report_path, data, access_token=self.other_user_tok
)
self.assertEqual(response_status, channel.code, msg=channel.result["body"])
+
+
+class ReportRoomTestCase(unittest.HomeserverTestCase):
+ servlets = [
+ synapse.rest.admin.register_servlets,
+ login.register_servlets,
+ room.register_servlets,
+ reporting.register_servlets,
+ ]
+
+ def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
+ self.other_user = self.register_user("user", "pass")
+ self.other_user_tok = self.login("user", "pass")
+
+ self.room_id = self.helper.create_room_as(
+ self.other_user, tok=self.other_user_tok, is_public=True
+ )
+ self.report_path = (
+ f"/_matrix/client/unstable/org.matrix.msc4151/rooms/{self.room_id}/report"
+ )
+
+ @unittest.override_config(
+ {
+ "experimental_features": {"msc4151_enabled": True},
+ }
+ )
+ def test_reason_str(self) -> None:
+ data = {"reason": "this makes me sad"}
+ self._assert_status(200, data)
+
+ @unittest.override_config(
+ {
+ "experimental_features": {"msc4151_enabled": True},
+ }
+ )
+ def test_no_reason(self) -> None:
+ data = {"not_reason": "for typechecking"}
+ self._assert_status(400, data)
+
+ @unittest.override_config(
+ {
+ "experimental_features": {"msc4151_enabled": True},
+ }
+ )
+ def test_reason_nonstring(self) -> None:
+ data = {"reason": 42}
+ self._assert_status(400, data)
+
+ @unittest.override_config(
+ {
+ "experimental_features": {"msc4151_enabled": True},
+ }
+ )
+ def test_reason_null(self) -> None:
+ data = {"reason": None}
+ self._assert_status(400, data)
+
+ @unittest.override_config(
+ {
+ "experimental_features": {"msc4151_enabled": True},
+ }
+ )
+ def test_cannot_report_nonexistent_room(self) -> None:
+ """
+ Tests that we don't accept event reports for rooms which do not exist.
+ """
+ channel = self.make_request(
+ "POST",
+ "/_matrix/client/unstable/org.matrix.msc4151/rooms/!bloop:example.org/report",
+ {"reason": "i am very sad"},
+ access_token=self.other_user_tok,
+ shorthand=False,
+ )
+ self.assertEqual(404, channel.code, msg=channel.result["body"])
+ self.assertEqual(
+ "Room does not exist",
+ channel.json_body["error"],
+ msg=channel.result["body"],
+ )
+
+ def _assert_status(self, response_status: int, data: JsonDict) -> None:
+ channel = self.make_request(
+ "POST",
+ self.report_path,
+ data,
+ access_token=self.other_user_tok,
+ shorthand=False,
+ )
+ self.assertEqual(response_status, channel.code, msg=channel.result["body"])
From 5db3eec5bcd3bc2b92c44e784264dfb5abaf89f6 Mon Sep 17 00:00:00 2001
From: Quentin Gliech
Date: Thu, 13 Jun 2024 15:49:00 +0200
Subject: [PATCH 167/503] Clarify that MSC4151 is enabled on matrix.org
(#17296)
This clarifies in the comments that the MSC is being used in matrix.org
See #17270
---
changelog.d/17296.feature | 1 +
synapse/rest/client/reporting.py | 10 +++++++++-
2 files changed, 10 insertions(+), 1 deletion(-)
create mode 100644 changelog.d/17296.feature
diff --git a/changelog.d/17296.feature b/changelog.d/17296.feature
new file mode 100644
index 0000000000..4ea5e7be85
--- /dev/null
+++ b/changelog.d/17296.feature
@@ -0,0 +1 @@
+Add support for the unstable [MSC4151](https://github.com/matrix-org/matrix-spec-proposals/pull/4151) report room API.
diff --git a/synapse/rest/client/reporting.py b/synapse/rest/client/reporting.py
index a95b83b14d..4eee53e5a8 100644
--- a/synapse/rest/client/reporting.py
+++ b/synapse/rest/client/reporting.py
@@ -107,7 +107,15 @@ class ReportEventRestServlet(RestServlet):
class ReportRoomRestServlet(RestServlet):
- # https://github.com/matrix-org/matrix-spec-proposals/pull/4151
+ """This endpoint lets clients report a room for abuse.
+
+ Whilst MSC4151 is not yet merged, this unstable endpoint is enabled on matrix.org
+ for content moderation purposes, and therefore backwards compatibility should be
+ carefully considered when changing anything on this endpoint.
+
+ More details on the MSC: https://github.com/matrix-org/matrix-spec-proposals/pull/4151
+ """
+
PATTERNS = client_patterns(
"/org.matrix.msc4151/rooms/(?P[^/]*)/report$",
releases=[],
From c6eb99c87861c9184be38107dcdf972bad6e1cf0 Mon Sep 17 00:00:00 2001
From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com>
Date: Thu, 13 Jun 2024 15:50:10 +0100
Subject: [PATCH 168/503] Bump `mypy` from 1.8.0 to 1.9.0 (#17297)
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
---
changelog.d/17297.misc | 1 +
poetry.lock | 56 +++++++++++++++----------------
tests/push/test_email.py | 37 +++++++++++++++-----
tests/rest/client/test_account.py | 28 +++++++++++++---
4 files changed, 82 insertions(+), 40 deletions(-)
create mode 100644 changelog.d/17297.misc
diff --git a/changelog.d/17297.misc b/changelog.d/17297.misc
new file mode 100644
index 0000000000..7ec351d2c1
--- /dev/null
+++ b/changelog.d/17297.misc
@@ -0,0 +1 @@
+Bump `mypy` from 1.8.0 to 1.9.0.
\ No newline at end of file
diff --git a/poetry.lock b/poetry.lock
index 028a11f527..7b169ceb6e 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -1384,38 +1384,38 @@ files = [
[[package]]
name = "mypy"
-version = "1.8.0"
+version = "1.9.0"
description = "Optional static typing for Python"
optional = false
python-versions = ">=3.8"
files = [
- {file = "mypy-1.8.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:485a8942f671120f76afffff70f259e1cd0f0cfe08f81c05d8816d958d4577d3"},
- {file = "mypy-1.8.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:df9824ac11deaf007443e7ed2a4a26bebff98d2bc43c6da21b2b64185da011c4"},
- {file = "mypy-1.8.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2afecd6354bbfb6e0160f4e4ad9ba6e4e003b767dd80d85516e71f2e955ab50d"},
- {file = "mypy-1.8.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:8963b83d53ee733a6e4196954502b33567ad07dfd74851f32be18eb932fb1cb9"},
- {file = "mypy-1.8.0-cp310-cp310-win_amd64.whl", hash = "sha256:e46f44b54ebddbeedbd3d5b289a893219065ef805d95094d16a0af6630f5d410"},
- {file = "mypy-1.8.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:855fe27b80375e5c5878492f0729540db47b186509c98dae341254c8f45f42ae"},
- {file = "mypy-1.8.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:4c886c6cce2d070bd7df4ec4a05a13ee20c0aa60cb587e8d1265b6c03cf91da3"},
- {file = "mypy-1.8.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d19c413b3c07cbecf1f991e2221746b0d2a9410b59cb3f4fb9557f0365a1a817"},
- {file = "mypy-1.8.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:9261ed810972061388918c83c3f5cd46079d875026ba97380f3e3978a72f503d"},
- {file = "mypy-1.8.0-cp311-cp311-win_amd64.whl", hash = "sha256:51720c776d148bad2372ca21ca29256ed483aa9a4cdefefcef49006dff2a6835"},
- {file = "mypy-1.8.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:52825b01f5c4c1c4eb0db253ec09c7aa17e1a7304d247c48b6f3599ef40db8bd"},
- {file = "mypy-1.8.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:f5ac9a4eeb1ec0f1ccdc6f326bcdb464de5f80eb07fb38b5ddd7b0de6bc61e55"},
- {file = "mypy-1.8.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:afe3fe972c645b4632c563d3f3eff1cdca2fa058f730df2b93a35e3b0c538218"},
- {file = "mypy-1.8.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:42c6680d256ab35637ef88891c6bd02514ccb7e1122133ac96055ff458f93fc3"},
- {file = "mypy-1.8.0-cp312-cp312-win_amd64.whl", hash = "sha256:720a5ca70e136b675af3af63db533c1c8c9181314d207568bbe79051f122669e"},
- {file = "mypy-1.8.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:028cf9f2cae89e202d7b6593cd98db6759379f17a319b5faf4f9978d7084cdc6"},
- {file = "mypy-1.8.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:4e6d97288757e1ddba10dd9549ac27982e3e74a49d8d0179fc14d4365c7add66"},
- {file = "mypy-1.8.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7f1478736fcebb90f97e40aff11a5f253af890c845ee0c850fe80aa060a267c6"},
- {file = "mypy-1.8.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:42419861b43e6962a649068a61f4a4839205a3ef525b858377a960b9e2de6e0d"},
- {file = "mypy-1.8.0-cp38-cp38-win_amd64.whl", hash = "sha256:2b5b6c721bd4aabaadead3a5e6fa85c11c6c795e0c81a7215776ef8afc66de02"},
- {file = "mypy-1.8.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:5c1538c38584029352878a0466f03a8ee7547d7bd9f641f57a0f3017a7c905b8"},
- {file = "mypy-1.8.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:4ef4be7baf08a203170f29e89d79064463b7fc7a0908b9d0d5114e8009c3a259"},
- {file = "mypy-1.8.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7178def594014aa6c35a8ff411cf37d682f428b3b5617ca79029d8ae72f5402b"},
- {file = "mypy-1.8.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:ab3c84fa13c04aeeeabb2a7f67a25ef5d77ac9d6486ff33ded762ef353aa5592"},
- {file = "mypy-1.8.0-cp39-cp39-win_amd64.whl", hash = "sha256:99b00bc72855812a60d253420d8a2eae839b0afa4938f09f4d2aa9bb4654263a"},
- {file = "mypy-1.8.0-py3-none-any.whl", hash = "sha256:538fd81bb5e430cc1381a443971c0475582ff9f434c16cd46d2c66763ce85d9d"},
- {file = "mypy-1.8.0.tar.gz", hash = "sha256:6ff8b244d7085a0b425b56d327b480c3b29cafbd2eff27316a004f9a7391ae07"},
+ {file = "mypy-1.9.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:f8a67616990062232ee4c3952f41c779afac41405806042a8126fe96e098419f"},
+ {file = "mypy-1.9.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:d357423fa57a489e8c47b7c85dfb96698caba13d66e086b412298a1a0ea3b0ed"},
+ {file = "mypy-1.9.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:49c87c15aed320de9b438ae7b00c1ac91cd393c1b854c2ce538e2a72d55df150"},
+ {file = "mypy-1.9.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:48533cdd345c3c2e5ef48ba3b0d3880b257b423e7995dada04248725c6f77374"},
+ {file = "mypy-1.9.0-cp310-cp310-win_amd64.whl", hash = "sha256:4d3dbd346cfec7cb98e6cbb6e0f3c23618af826316188d587d1c1bc34f0ede03"},
+ {file = "mypy-1.9.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:653265f9a2784db65bfca694d1edd23093ce49740b2244cde583aeb134c008f3"},
+ {file = "mypy-1.9.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:3a3c007ff3ee90f69cf0a15cbcdf0995749569b86b6d2f327af01fd1b8aee9dc"},
+ {file = "mypy-1.9.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2418488264eb41f69cc64a69a745fad4a8f86649af4b1041a4c64ee61fc61129"},
+ {file = "mypy-1.9.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:68edad3dc7d70f2f17ae4c6c1b9471a56138ca22722487eebacfd1eb5321d612"},
+ {file = "mypy-1.9.0-cp311-cp311-win_amd64.whl", hash = "sha256:85ca5fcc24f0b4aeedc1d02f93707bccc04733f21d41c88334c5482219b1ccb3"},
+ {file = "mypy-1.9.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:aceb1db093b04db5cd390821464504111b8ec3e351eb85afd1433490163d60cd"},
+ {file = "mypy-1.9.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:0235391f1c6f6ce487b23b9dbd1327b4ec33bb93934aa986efe8a9563d9349e6"},
+ {file = "mypy-1.9.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d4d5ddc13421ba3e2e082a6c2d74c2ddb3979c39b582dacd53dd5d9431237185"},
+ {file = "mypy-1.9.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:190da1ee69b427d7efa8aa0d5e5ccd67a4fb04038c380237a0d96829cb157913"},
+ {file = "mypy-1.9.0-cp312-cp312-win_amd64.whl", hash = "sha256:fe28657de3bfec596bbeef01cb219833ad9d38dd5393fc649f4b366840baefe6"},
+ {file = "mypy-1.9.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:e54396d70be04b34f31d2edf3362c1edd023246c82f1730bbf8768c28db5361b"},
+ {file = "mypy-1.9.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:5e6061f44f2313b94f920e91b204ec600982961e07a17e0f6cd83371cb23f5c2"},
+ {file = "mypy-1.9.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:81a10926e5473c5fc3da8abb04119a1f5811a236dc3a38d92015cb1e6ba4cb9e"},
+ {file = "mypy-1.9.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:b685154e22e4e9199fc95f298661deea28aaede5ae16ccc8cbb1045e716b3e04"},
+ {file = "mypy-1.9.0-cp38-cp38-win_amd64.whl", hash = "sha256:5d741d3fc7c4da608764073089e5f58ef6352bedc223ff58f2f038c2c4698a89"},
+ {file = "mypy-1.9.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:587ce887f75dd9700252a3abbc9c97bbe165a4a630597845c61279cf32dfbf02"},
+ {file = "mypy-1.9.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:f88566144752999351725ac623471661c9d1cd8caa0134ff98cceeea181789f4"},
+ {file = "mypy-1.9.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:61758fabd58ce4b0720ae1e2fea5cfd4431591d6d590b197775329264f86311d"},
+ {file = "mypy-1.9.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:e49499be624dead83927e70c756970a0bc8240e9f769389cdf5714b0784ca6bf"},
+ {file = "mypy-1.9.0-cp39-cp39-win_amd64.whl", hash = "sha256:571741dc4194b4f82d344b15e8837e8c5fcc462d66d076748142327626a1b6e9"},
+ {file = "mypy-1.9.0-py3-none-any.whl", hash = "sha256:a260627a570559181a9ea5de61ac6297aa5af202f06fd7ab093ce74e7181e43e"},
+ {file = "mypy-1.9.0.tar.gz", hash = "sha256:3cc5da0127e6a478cddd906068496a97a7618a21ce9b54bde5bf7e539c7af974"},
]
[package.dependencies]
diff --git a/tests/push/test_email.py b/tests/push/test_email.py
index c927a73fa6..e0aab1c046 100644
--- a/tests/push/test_email.py
+++ b/tests/push/test_email.py
@@ -205,8 +205,24 @@ class EmailPusherTests(HomeserverTestCase):
# Multipart: plain text, base 64 encoded; html, base 64 encoded
multipart_msg = email.message_from_bytes(msg)
- txt = multipart_msg.get_payload()[0].get_payload(decode=True).decode()
- html = multipart_msg.get_payload()[1].get_payload(decode=True).decode()
+
+ # Extract the text (non-HTML) portion of the multipart Message,
+ # as a Message.
+ txt_message = multipart_msg.get_payload(i=0)
+ assert isinstance(txt_message, email.message.Message)
+
+ # Extract the actual bytes from the Message object, and decode them to a `str`.
+ txt_bytes = txt_message.get_payload(decode=True)
+ assert isinstance(txt_bytes, bytes)
+ txt = txt_bytes.decode()
+
+ # Do the same for the HTML portion of the multipart Message.
+ html_message = multipart_msg.get_payload(i=1)
+ assert isinstance(html_message, email.message.Message)
+ html_bytes = html_message.get_payload(decode=True)
+ assert isinstance(html_bytes, bytes)
+ html = html_bytes.decode()
+
self.assertIn("/_synapse/client/unsubscribe", txt)
self.assertIn("/_synapse/client/unsubscribe", html)
@@ -347,12 +363,17 @@ class EmailPusherTests(HomeserverTestCase):
# That email should contain the room's avatar
msg: bytes = args[5]
# Multipart: plain text, base 64 encoded; html, base 64 encoded
- html = (
- email.message_from_bytes(msg)
- .get_payload()[1]
- .get_payload(decode=True)
- .decode()
- )
+
+ # Extract the html Message object from the Multipart Message.
+ # We need the asserts to convince mypy that this is OK.
+ html_message = email.message_from_bytes(msg).get_payload(i=1)
+ assert isinstance(html_message, email.message.Message)
+
+ # Extract the `bytes` from the html Message object, and decode to a `str`.
+ html = html_message.get_payload(decode=True)
+ assert isinstance(html, bytes)
+ html = html.decode()
+
self.assertIn("_matrix/media/v1/thumbnail/DUMMY_MEDIA_ID", html)
def test_empty_room(self) -> None:
diff --git a/tests/rest/client/test_account.py b/tests/rest/client/test_account.py
index 992421ffe2..a85ea994de 100644
--- a/tests/rest/client/test_account.py
+++ b/tests/rest/client/test_account.py
@@ -427,13 +427,23 @@ class PasswordResetTestCase(unittest.HomeserverTestCase):
text = None
for part in mail.walk():
if part.get_content_type() == "text/plain":
- text = part.get_payload(decode=True).decode("UTF-8")
+ text = part.get_payload(decode=True)
+ if text is not None:
+ # According to the logic table in `get_payload`, we know that
+ # the result of `get_payload` will be `bytes`, but mypy doesn't
+ # know this and complains. Thus, we assert the type.
+ assert isinstance(text, bytes)
+ text = text.decode("UTF-8")
+
break
if not text:
self.fail("Could not find text portion of email to parse")
- assert text is not None
+ # `text` must be a `str`, after being decoded and determined just above
+ # to not be `None` or an empty `str`.
+ assert isinstance(text, str)
+
match = re.search(r"https://example.com\S+", text)
assert match, "Could not find link in email"
@@ -1209,13 +1219,23 @@ class ThreepidEmailRestTestCase(unittest.HomeserverTestCase):
text = None
for part in mail.walk():
if part.get_content_type() == "text/plain":
- text = part.get_payload(decode=True).decode("UTF-8")
+ text = part.get_payload(decode=True)
+ if text is not None:
+ # According to the logic table in `get_payload`, we know that
+ # the result of `get_payload` will be `bytes`, but mypy doesn't
+ # know this and complains. Thus, we assert the type.
+ assert isinstance(text, bytes)
+ text = text.decode("UTF-8")
+
break
if not text:
self.fail("Could not find text portion of email to parse")
- assert text is not None
+ # `text` must be a `str`, after being decoded and determined just above
+ # to not be `None` or an empty `str`.
+ assert isinstance(text, str)
+
match = re.search(r"https://example.com\S+", text)
assert match, "Could not find link in email"
From ebdce69f6af3863c9db2c00d6f78eae7ec9433f5 Mon Sep 17 00:00:00 2001
From: Eric Eastwood
Date: Thu, 13 Jun 2024 11:00:52 -0500
Subject: [PATCH 169/503] Fix
`get_last_event_in_room_before_stream_ordering(...)` finding the wrong last
event (#17295)
PR where this was introduced: https://github.com/matrix-org/synapse/pull/14817
### What does this affect?
`get_last_event_in_room_before_stream_ordering(...)` is used in Sync v2 in a lot of different state calculations.
`get_last_event_in_room_before_stream_ordering(...)` is also used in `/rooms/{roomId}/members`
---
changelog.d/17295.bugfix | 1 +
synapse/storage/databases/main/stream.py | 30 ++-
tests/storage/test_stream.py | 269 ++++++++++++++++++++++-
3 files changed, 289 insertions(+), 11 deletions(-)
create mode 100644 changelog.d/17295.bugfix
diff --git a/changelog.d/17295.bugfix b/changelog.d/17295.bugfix
new file mode 100644
index 0000000000..4484253bb8
--- /dev/null
+++ b/changelog.d/17295.bugfix
@@ -0,0 +1 @@
+Fix edge case in `/sync` returning the wrong the state when using sharded event persisters.
diff --git a/synapse/storage/databases/main/stream.py b/synapse/storage/databases/main/stream.py
index 7ab6003f61..61373f0bfb 100644
--- a/synapse/storage/databases/main/stream.py
+++ b/synapse/storage/databases/main/stream.py
@@ -914,12 +914,23 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
def get_last_event_in_room_before_stream_ordering_txn(
txn: LoggingTransaction,
) -> Optional[str]:
- # We need to handle the fact that the stream tokens can be vector
- # clocks. We do this by getting all rows between the minimum and
- # maximum stream ordering in the token, plus one row less than the
- # minimum stream ordering. We then filter the results against the
- # token and return the first row that matches.
+ # We're looking for the closest event at or before the token. We need to
+ # handle the fact that the stream token can be a vector clock (with an
+ # `instance_map`) and events can be persisted on different instances
+ # (sharded event persisters). The first subquery handles the events that
+ # would be within the vector clock and gets all rows between the minimum and
+ # maximum stream ordering in the token which need to be filtered against the
+ # `instance_map`. The second subquery handles the "before" case and finds
+ # the first row before the token. We then filter out any results past the
+ # token's vector clock and return the first row that matches.
+ min_stream = end_token.stream
+ max_stream = end_token.get_max_stream_pos()
+ # We use `union all` because we don't need any of the deduplication logic
+ # (`union` is really a union + distinct). `UNION ALL` does preserve the
+ # ordering of the operand queries but there is no actual gurantee that it
+ # has this behavior in all scenarios so we need the extra `ORDER BY` at the
+ # bottom.
sql = """
SELECT * FROM (
SELECT instance_name, stream_ordering, topological_ordering, event_id
@@ -931,7 +942,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
AND rejections.event_id IS NULL
ORDER BY stream_ordering DESC
) AS a
- UNION
+ UNION ALL
SELECT * FROM (
SELECT instance_name, stream_ordering, topological_ordering, event_id
FROM events
@@ -943,15 +954,16 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
ORDER BY stream_ordering DESC
LIMIT 1
) AS b
+ ORDER BY stream_ordering DESC
"""
txn.execute(
sql,
(
room_id,
- end_token.stream,
- end_token.get_max_stream_pos(),
+ min_stream,
+ max_stream,
room_id,
- end_token.stream,
+ min_stream,
),
)
diff --git a/tests/storage/test_stream.py b/tests/storage/test_stream.py
index 2029cd9c68..ee34baf46f 100644
--- a/tests/storage/test_stream.py
+++ b/tests/storage/test_stream.py
@@ -19,7 +19,10 @@
#
#
-from typing import List
+import logging
+from typing import List, Tuple
+
+from immutabledict import immutabledict
from twisted.test.proto_helpers import MemoryReactor
@@ -28,11 +31,13 @@ from synapse.api.filtering import Filter
from synapse.rest import admin
from synapse.rest.client import login, room
from synapse.server import HomeServer
-from synapse.types import JsonDict
+from synapse.types import JsonDict, PersistedEventPosition, RoomStreamToken
from synapse.util import Clock
from tests.unittest import HomeserverTestCase
+logger = logging.getLogger(__name__)
+
class PaginationTestCase(HomeserverTestCase):
"""
@@ -268,3 +273,263 @@ class PaginationTestCase(HomeserverTestCase):
}
chunk = self._filter_messages(filter)
self.assertEqual(chunk, [self.event_id_1, self.event_id_2, self.event_id_none])
+
+
+class GetLastEventInRoomBeforeStreamOrderingTestCase(HomeserverTestCase):
+ """
+ Test `get_last_event_in_room_before_stream_ordering(...)`
+ """
+
+ servlets = [
+ admin.register_servlets,
+ room.register_servlets,
+ login.register_servlets,
+ ]
+
+ def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
+ self.store = hs.get_datastores().main
+ self.event_sources = hs.get_event_sources()
+
+ def _update_persisted_instance_name_for_event(
+ self, event_id: str, instance_name: str
+ ) -> None:
+ """
+ Update the `instance_name` that persisted the the event in the database.
+ """
+ return self.get_success(
+ self.store.db_pool.simple_update_one(
+ "events",
+ keyvalues={"event_id": event_id},
+ updatevalues={"instance_name": instance_name},
+ )
+ )
+
+ def _send_event_on_instance(
+ self, instance_name: str, room_id: str, access_token: str
+ ) -> Tuple[JsonDict, PersistedEventPosition]:
+ """
+ Send an event in a room and mimic that it was persisted by a specific
+ instance/worker.
+ """
+ event_response = self.helper.send(
+ room_id, f"{instance_name} message", tok=access_token
+ )
+
+ self._update_persisted_instance_name_for_event(
+ event_response["event_id"], instance_name
+ )
+
+ event_pos = self.get_success(
+ self.store.get_position_for_event(event_response["event_id"])
+ )
+
+ return event_response, event_pos
+
+ def test_before_room_created(self) -> None:
+ """
+ Test that no event is returned if we are using a token before the room was even created
+ """
+ user1_id = self.register_user("user1", "pass")
+ user1_tok = self.login(user1_id, "pass")
+
+ before_room_token = self.event_sources.get_current_token()
+
+ room_id = self.helper.create_room_as(user1_id, tok=user1_tok, is_public=True)
+
+ last_event = self.get_success(
+ self.store.get_last_event_in_room_before_stream_ordering(
+ room_id=room_id,
+ end_token=before_room_token.room_key,
+ )
+ )
+
+ self.assertIsNone(last_event)
+
+ def test_after_room_created(self) -> None:
+ """
+ Test that an event is returned if we are using a token after the room was created
+ """
+ user1_id = self.register_user("user1", "pass")
+ user1_tok = self.login(user1_id, "pass")
+
+ room_id = self.helper.create_room_as(user1_id, tok=user1_tok, is_public=True)
+
+ after_room_token = self.event_sources.get_current_token()
+
+ last_event = self.get_success(
+ self.store.get_last_event_in_room_before_stream_ordering(
+ room_id=room_id,
+ end_token=after_room_token.room_key,
+ )
+ )
+
+ self.assertIsNotNone(last_event)
+
+ def test_activity_in_other_rooms(self) -> None:
+ """
+ Test to make sure that the last event in the room is returned even if the
+ `stream_ordering` has advanced from activity in other rooms.
+ """
+ user1_id = self.register_user("user1", "pass")
+ user1_tok = self.login(user1_id, "pass")
+
+ room_id1 = self.helper.create_room_as(user1_id, tok=user1_tok, is_public=True)
+ event_response = self.helper.send(room_id1, "target!", tok=user1_tok)
+ # Create another room to advance the stream_ordering
+ self.helper.create_room_as(user1_id, tok=user1_tok, is_public=True)
+
+ after_room_token = self.event_sources.get_current_token()
+
+ last_event = self.get_success(
+ self.store.get_last_event_in_room_before_stream_ordering(
+ room_id=room_id1,
+ end_token=after_room_token.room_key,
+ )
+ )
+
+ # Make sure it's the event we expect (which also means we know it's from the
+ # correct room)
+ self.assertEqual(last_event, event_response["event_id"])
+
+ def test_activity_after_token_has_no_effect(self) -> None:
+ """
+ Test to make sure we return the last event before the token even if there is
+ activity after it.
+ """
+ user1_id = self.register_user("user1", "pass")
+ user1_tok = self.login(user1_id, "pass")
+
+ room_id1 = self.helper.create_room_as(user1_id, tok=user1_tok, is_public=True)
+ event_response = self.helper.send(room_id1, "target!", tok=user1_tok)
+
+ after_room_token = self.event_sources.get_current_token()
+
+ # Send some events after the token
+ self.helper.send(room_id1, "after1", tok=user1_tok)
+ self.helper.send(room_id1, "after2", tok=user1_tok)
+
+ last_event = self.get_success(
+ self.store.get_last_event_in_room_before_stream_ordering(
+ room_id=room_id1,
+ end_token=after_room_token.room_key,
+ )
+ )
+
+ # Make sure it's the last event before the token
+ self.assertEqual(last_event, event_response["event_id"])
+
+ def test_last_event_within_sharded_token(self) -> None:
+ """
+ Test to make sure we can find the last event that that is *within* the sharded
+ token (a token that has an `instance_map` and looks like
+ `m{min_pos}~{writer1}.{pos1}~{writer2}.{pos2}`). We are specifically testing
+ that we can find an event within the tokens minimum and instance
+ `stream_ordering`.
+ """
+ user1_id = self.register_user("user1", "pass")
+ user1_tok = self.login(user1_id, "pass")
+
+ room_id1 = self.helper.create_room_as(user1_id, tok=user1_tok, is_public=True)
+ event_response1, event_pos1 = self._send_event_on_instance(
+ "worker1", room_id1, user1_tok
+ )
+ event_response2, event_pos2 = self._send_event_on_instance(
+ "worker1", room_id1, user1_tok
+ )
+ event_response3, event_pos3 = self._send_event_on_instance(
+ "worker1", room_id1, user1_tok
+ )
+
+ # Create another room to advance the `stream_ordering` on the same worker
+ # so we can sandwich event3 in the middle of the token
+ room_id2 = self.helper.create_room_as(user1_id, tok=user1_tok, is_public=True)
+ event_response4, event_pos4 = self._send_event_on_instance(
+ "worker1", room_id2, user1_tok
+ )
+
+ # Assemble a token that encompasses event1 -> event4 on worker1
+ end_token = RoomStreamToken(
+ stream=event_pos2.stream,
+ instance_map=immutabledict({"worker1": event_pos4.stream}),
+ )
+
+ # Send some events after the token
+ self.helper.send(room_id1, "after1", tok=user1_tok)
+ self.helper.send(room_id1, "after2", tok=user1_tok)
+
+ last_event = self.get_success(
+ self.store.get_last_event_in_room_before_stream_ordering(
+ room_id=room_id1,
+ end_token=end_token,
+ )
+ )
+
+ # Should find closest event at/before the token in room1
+ self.assertEqual(
+ last_event,
+ event_response3["event_id"],
+ f"We expected {event_response3['event_id']} but saw {last_event} which corresponds to "
+ + str(
+ {
+ "event1": event_response1["event_id"],
+ "event2": event_response2["event_id"],
+ "event3": event_response3["event_id"],
+ }
+ ),
+ )
+
+ def test_last_event_before_sharded_token(self) -> None:
+ """
+ Test to make sure we can find the last event that is *before* the sharded token
+ (a token that has an `instance_map` and looks like
+ `m{min_pos}~{writer1}.{pos1}~{writer2}.{pos2}`).
+ """
+ user1_id = self.register_user("user1", "pass")
+ user1_tok = self.login(user1_id, "pass")
+
+ room_id1 = self.helper.create_room_as(user1_id, tok=user1_tok, is_public=True)
+ event_response1, event_pos1 = self._send_event_on_instance(
+ "worker1", room_id1, user1_tok
+ )
+ event_response2, event_pos2 = self._send_event_on_instance(
+ "worker1", room_id1, user1_tok
+ )
+
+ # Create another room to advance the `stream_ordering` on the same worker
+ room_id2 = self.helper.create_room_as(user1_id, tok=user1_tok, is_public=True)
+ event_response3, event_pos3 = self._send_event_on_instance(
+ "worker1", room_id2, user1_tok
+ )
+ event_response4, event_pos4 = self._send_event_on_instance(
+ "worker1", room_id2, user1_tok
+ )
+
+ # Assemble a token that encompasses event3 -> event4 on worker1
+ end_token = RoomStreamToken(
+ stream=event_pos3.stream,
+ instance_map=immutabledict({"worker1": event_pos4.stream}),
+ )
+
+ # Send some events after the token
+ self.helper.send(room_id1, "after1", tok=user1_tok)
+ self.helper.send(room_id1, "after2", tok=user1_tok)
+
+ last_event = self.get_success(
+ self.store.get_last_event_in_room_before_stream_ordering(
+ room_id=room_id1,
+ end_token=end_token,
+ )
+ )
+
+ # Should find closest event at/before the token in room1
+ self.assertEqual(
+ last_event,
+ event_response2["event_id"],
+ f"We expected {event_response2['event_id']} but saw {last_event} which corresponds to "
+ + str(
+ {
+ "event1": event_response1["event_id"],
+ "event2": event_response2["event_id"],
+ }
+ ),
+ )
From 8c58eb7f17bdc697e653c7920edab42ee36f975b Mon Sep 17 00:00:00 2001
From: Eric Eastwood
Date: Thu, 13 Jun 2024 11:32:50 -0500
Subject: [PATCH 170/503] Add `event.internal_metadata.instance_name` (#17300)
Add `event.internal_metadata.instance_name` (the worker instance that persisted the event) to go alongside the existing `event.internal_metadata.stream_ordering`.
`instance_name` is useful to properly compare and query for events with a token since you need to compare both the `stream_ordering` and `instance_name` against the vector clock/`instance_map` in the `RoomStreamToken`.
This is pre-requisite work and may be used in https://github.com/element-hq/synapse/pull/17293
Adding `event.internal_metadata.instance_name` was first mentioned in the initial Sliding Sync PR while pairing with @erikjohnston, see https://github.com/element-hq/synapse/pull/17187/commits/09609cb0dbca3a4cfd9fbf90cc962e765ec469c0#diff-5cd773fb307aa754bd3948871ba118b1ef0303f4d72d42a2d21e38242bf4e096R405-R410
---
changelog.d/17300.misc | 1 +
rust/src/events/internal_metadata.rs | 3 +++
synapse/events/utils.py | 2 ++
synapse/handlers/message.py | 1 +
synapse/storage/databases/main/events.py | 1 +
synapse/storage/databases/main/events_worker.py | 16 ++++++++++------
synapse/synapse_rust/events.pyi | 2 ++
tests/events/test_utils.py | 3 +++
tests/replication/storage/test_events.py | 10 +++++++---
tests/storage/test_event_chain.py | 1 +
10 files changed, 31 insertions(+), 9 deletions(-)
create mode 100644 changelog.d/17300.misc
diff --git a/changelog.d/17300.misc b/changelog.d/17300.misc
new file mode 100644
index 0000000000..cdc40bb2e5
--- /dev/null
+++ b/changelog.d/17300.misc
@@ -0,0 +1 @@
+Expose the worker instance that persisted the event on `event.internal_metadata.instance_name`.
diff --git a/rust/src/events/internal_metadata.rs b/rust/src/events/internal_metadata.rs
index 63774fbd54..ad87825f16 100644
--- a/rust/src/events/internal_metadata.rs
+++ b/rust/src/events/internal_metadata.rs
@@ -204,6 +204,8 @@ pub struct EventInternalMetadata {
/// The stream ordering of this event. None, until it has been persisted.
#[pyo3(get, set)]
stream_ordering: Option,
+ #[pyo3(get, set)]
+ instance_name: Option,
/// whether this event is an outlier (ie, whether we have the state at that
/// point in the DAG)
@@ -232,6 +234,7 @@ impl EventInternalMetadata {
Ok(EventInternalMetadata {
data,
stream_ordering: None,
+ instance_name: None,
outlier: false,
})
}
diff --git a/synapse/events/utils.py b/synapse/events/utils.py
index 0772472312..b997d82d71 100644
--- a/synapse/events/utils.py
+++ b/synapse/events/utils.py
@@ -90,6 +90,7 @@ def prune_event(event: EventBase) -> EventBase:
pruned_event.internal_metadata.stream_ordering = (
event.internal_metadata.stream_ordering
)
+ pruned_event.internal_metadata.instance_name = event.internal_metadata.instance_name
pruned_event.internal_metadata.outlier = event.internal_metadata.outlier
# Mark the event as redacted
@@ -116,6 +117,7 @@ def clone_event(event: EventBase) -> EventBase:
new_event.internal_metadata.stream_ordering = (
event.internal_metadata.stream_ordering
)
+ new_event.internal_metadata.instance_name = event.internal_metadata.instance_name
new_event.internal_metadata.outlier = event.internal_metadata.outlier
return new_event
diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py
index de5bd44a5f..721ef04f41 100644
--- a/synapse/handlers/message.py
+++ b/synapse/handlers/message.py
@@ -1551,6 +1551,7 @@ class EventCreationHandler:
# stream_ordering entry manually (as it was persisted on
# another worker).
event.internal_metadata.stream_ordering = stream_id
+ event.internal_metadata.instance_name = writer_instance
return event
diff --git a/synapse/storage/databases/main/events.py b/synapse/storage/databases/main/events.py
index f1bd85aa27..66428e6c8e 100644
--- a/synapse/storage/databases/main/events.py
+++ b/synapse/storage/databases/main/events.py
@@ -207,6 +207,7 @@ class PersistEventsStore:
async with stream_ordering_manager as stream_orderings:
for (event, _), stream in zip(events_and_contexts, stream_orderings):
event.internal_metadata.stream_ordering = stream
+ event.internal_metadata.instance_name = self._instance_name
await self.db_pool.runInteraction(
"persist_events",
diff --git a/synapse/storage/databases/main/events_worker.py b/synapse/storage/databases/main/events_worker.py
index c06c44deb1..e264d36f02 100644
--- a/synapse/storage/databases/main/events_worker.py
+++ b/synapse/storage/databases/main/events_worker.py
@@ -156,6 +156,7 @@ class _EventRow:
event_id: str
stream_ordering: int
+ instance_name: str
json: str
internal_metadata: str
format_version: Optional[int]
@@ -1354,6 +1355,7 @@ class EventsWorkerStore(SQLBaseStore):
rejected_reason=rejected_reason,
)
original_ev.internal_metadata.stream_ordering = row.stream_ordering
+ original_ev.internal_metadata.instance_name = row.instance_name
original_ev.internal_metadata.outlier = row.outlier
# Consistency check: if the content of the event has been modified in the
@@ -1439,6 +1441,7 @@ class EventsWorkerStore(SQLBaseStore):
SELECT
e.event_id,
e.stream_ordering,
+ e.instance_name,
ej.internal_metadata,
ej.json,
ej.format_version,
@@ -1462,13 +1465,14 @@ class EventsWorkerStore(SQLBaseStore):
event_dict[event_id] = _EventRow(
event_id=event_id,
stream_ordering=row[1],
- internal_metadata=row[2],
- json=row[3],
- format_version=row[4],
- room_version_id=row[5],
- rejected_reason=row[6],
+ instance_name=row[2],
+ internal_metadata=row[3],
+ json=row[4],
+ format_version=row[5],
+ room_version_id=row[6],
+ rejected_reason=row[7],
redactions=[],
- outlier=bool(row[7]), # This is an int in SQLite3
+ outlier=bool(row[8]), # This is an int in SQLite3
)
# check for redactions
diff --git a/synapse/synapse_rust/events.pyi b/synapse/synapse_rust/events.pyi
index 69837617f5..1682d0d151 100644
--- a/synapse/synapse_rust/events.pyi
+++ b/synapse/synapse_rust/events.pyi
@@ -19,6 +19,8 @@ class EventInternalMetadata:
stream_ordering: Optional[int]
"""the stream ordering of this event. None, until it has been persisted."""
+ instance_name: Optional[str]
+ """the instance name of the server that persisted this event. None, until it has been persisted."""
outlier: bool
"""whether this event is an outlier (ie, whether we have the state at that
diff --git a/tests/events/test_utils.py b/tests/events/test_utils.py
index d5ac66a6ed..30f8787758 100644
--- a/tests/events/test_utils.py
+++ b/tests/events/test_utils.py
@@ -625,6 +625,8 @@ class CloneEventTestCase(stdlib_unittest.TestCase):
)
original.internal_metadata.stream_ordering = 1234
self.assertEqual(original.internal_metadata.stream_ordering, 1234)
+ original.internal_metadata.instance_name = "worker1"
+ self.assertEqual(original.internal_metadata.instance_name, "worker1")
cloned = clone_event(original)
cloned.unsigned["b"] = 3
@@ -632,6 +634,7 @@ class CloneEventTestCase(stdlib_unittest.TestCase):
self.assertEqual(original.unsigned, {"a": 1, "b": 2})
self.assertEqual(cloned.unsigned, {"a": 1, "b": 3})
self.assertEqual(cloned.internal_metadata.stream_ordering, 1234)
+ self.assertEqual(cloned.internal_metadata.instance_name, "worker1")
self.assertEqual(cloned.internal_metadata.txn_id, "txn")
diff --git a/tests/replication/storage/test_events.py b/tests/replication/storage/test_events.py
index 4e41a1c912..a56f1e2d5d 100644
--- a/tests/replication/storage/test_events.py
+++ b/tests/replication/storage/test_events.py
@@ -141,6 +141,7 @@ class EventsWorkerStoreTestCase(BaseWorkerStoreTestCase):
self.persist(type="m.room.create", key="", creator=USER_ID)
self.check("get_invited_rooms_for_local_user", [USER_ID_2], [])
event = self.persist(type="m.room.member", key=USER_ID_2, membership="invite")
+ assert event.internal_metadata.instance_name is not None
assert event.internal_metadata.stream_ordering is not None
self.replicate()
@@ -155,7 +156,7 @@ class EventsWorkerStoreTestCase(BaseWorkerStoreTestCase):
"invite",
event.event_id,
PersistedEventPosition(
- self.hs.get_instance_name(),
+ event.internal_metadata.instance_name,
event.internal_metadata.stream_ordering,
),
RoomVersions.V1.identifier,
@@ -232,11 +233,12 @@ class EventsWorkerStoreTestCase(BaseWorkerStoreTestCase):
j2 = self.persist(
type="m.room.member", sender=USER_ID_2, key=USER_ID_2, membership="join"
)
+ assert j2.internal_metadata.instance_name is not None
assert j2.internal_metadata.stream_ordering is not None
self.replicate()
expected_pos = PersistedEventPosition(
- "master", j2.internal_metadata.stream_ordering
+ j2.internal_metadata.instance_name, j2.internal_metadata.stream_ordering
)
self.check(
"get_rooms_for_user_with_stream_ordering",
@@ -288,6 +290,7 @@ class EventsWorkerStoreTestCase(BaseWorkerStoreTestCase):
msg, msgctx = self.build_event()
self.get_success(self.persistance.persist_events([(j2, j2ctx), (msg, msgctx)]))
self.replicate()
+ assert j2.internal_metadata.instance_name is not None
assert j2.internal_metadata.stream_ordering is not None
event_source = RoomEventSource(self.hs)
@@ -329,7 +332,8 @@ class EventsWorkerStoreTestCase(BaseWorkerStoreTestCase):
# joined_rooms list.
if membership_changes:
expected_pos = PersistedEventPosition(
- "master", j2.internal_metadata.stream_ordering
+ j2.internal_metadata.instance_name,
+ j2.internal_metadata.stream_ordering,
)
self.assertEqual(
joined_rooms,
diff --git a/tests/storage/test_event_chain.py b/tests/storage/test_event_chain.py
index 27d5b0125f..81feb3ec29 100644
--- a/tests/storage/test_event_chain.py
+++ b/tests/storage/test_event_chain.py
@@ -431,6 +431,7 @@ class EventChainStoreTestCase(HomeserverTestCase):
for e in events:
e.internal_metadata.stream_ordering = self._next_stream_ordering
+ e.internal_metadata.instance_name = self.hs.get_instance_name()
self._next_stream_ordering += 1
def _persist(txn: LoggingTransaction) -> None:
From 8aaff851b1f1dbf74482282e70194a69d13ea584 Mon Sep 17 00:00:00 2001
From: Eric Eastwood
Date: Thu, 13 Jun 2024 11:36:57 -0500
Subject: [PATCH 171/503] Fix `newly_left` rooms not appearing if we returned
early (Sliding Sync) (#17301)
Fix `newly_left` rooms not appearing if we returned early when `membership_snapshot_token.is_before_or_eq(to_token.room_key)`.
Introduced in https://github.com/element-hq/synapse/pull/17187 (part of Sliding Sync)
The tests didn't catch it because they had a small typo in it `room_id1` vs `room_id2`.
Found while working on https://github.com/element-hq/synapse/pull/17293
---
changelog.d/17301.bugfix | 1 +
synapse/handlers/sliding_sync.py | 26 +++++++++++++-------------
tests/handlers/test_sliding_sync.py | 2 +-
3 files changed, 15 insertions(+), 14 deletions(-)
create mode 100644 changelog.d/17301.bugfix
diff --git a/changelog.d/17301.bugfix b/changelog.d/17301.bugfix
new file mode 100644
index 0000000000..50383cb4a4
--- /dev/null
+++ b/changelog.d/17301.bugfix
@@ -0,0 +1 @@
+Add initial implementation of an experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint.
diff --git a/synapse/handlers/sliding_sync.py b/synapse/handlers/sliding_sync.py
index 1c37f83a2b..de4f33abb8 100644
--- a/synapse/handlers/sliding_sync.py
+++ b/synapse/handlers/sliding_sync.py
@@ -275,12 +275,6 @@ class SlidingSyncHandler:
instance_map=immutabledict(instance_to_max_stream_ordering_map),
)
- # If our `to_token` is already the same or ahead of the latest room membership
- # for the user, we can just straight-up return the room list (nothing has
- # changed)
- if membership_snapshot_token.is_before_or_eq(to_token.room_key):
- return sync_room_id_set
-
# Since we fetched the users room list at some point in time after the from/to
# tokens, we need to revert/rewind some membership changes to match the point in
# time of the `to_token`. In particular, we need to make these fixups:
@@ -300,14 +294,20 @@ class SlidingSyncHandler:
# 1) Fetch membership changes that fall in the range from `to_token` up to
# `membership_snapshot_token`
- membership_change_events_after_to_token = (
- await self.store.get_membership_changes_for_user(
- user_id,
- from_key=to_token.room_key,
- to_key=membership_snapshot_token,
- excluded_rooms=self.rooms_to_exclude_globally,
+ #
+ # If our `to_token` is already the same or ahead of the latest room membership
+ # for the user, we don't need to do any "2)" fix-ups and can just straight-up
+ # use the room list from the snapshot as a base (nothing has changed)
+ membership_change_events_after_to_token = []
+ if not membership_snapshot_token.is_before_or_eq(to_token.room_key):
+ membership_change_events_after_to_token = (
+ await self.store.get_membership_changes_for_user(
+ user_id,
+ from_key=to_token.room_key,
+ to_key=membership_snapshot_token,
+ excluded_rooms=self.rooms_to_exclude_globally,
+ )
)
- )
# 1) Assemble a list of the last membership events in some given ranges. Someone
# could have left and joined multiple times during the given range but we only
diff --git a/tests/handlers/test_sliding_sync.py b/tests/handlers/test_sliding_sync.py
index 5c27474b96..41ceb517f0 100644
--- a/tests/handlers/test_sliding_sync.py
+++ b/tests/handlers/test_sliding_sync.py
@@ -326,7 +326,7 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase):
# Leave during the from_token/to_token range (newly_left)
room_id2 = self.helper.create_room_as(user1_id, tok=user1_tok)
- self.helper.leave(room_id1, user1_id, tok=user1_tok)
+ self.helper.leave(room_id2, user1_id, tok=user1_tok)
after_room2_token = self.event_sources.get_current_token()
From c12ee0d5ba5da8da8bdc0d2318d8a8bdfc7228aa Mon Sep 17 00:00:00 2001
From: Eric Eastwood
Date: Thu, 13 Jun 2024 13:56:58 -0500
Subject: [PATCH 172/503] Add `is_dm` filtering to Sliding Sync `/sync`
(#17277)
Based on [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575): Sliding Sync
---
changelog.d/17277.feature | 1 +
synapse/handlers/sliding_sync.py | 118 +++++++++++++++++++++--
synapse/types/rest/client/__init__.py | 47 ++++++++++
tests/handlers/test_sliding_sync.py | 130 +++++++++++++++++++++++++-
tests/rest/client/test_sync.py | 127 +++++++++++++++++++++++++
5 files changed, 416 insertions(+), 7 deletions(-)
create mode 100644 changelog.d/17277.feature
diff --git a/changelog.d/17277.feature b/changelog.d/17277.feature
new file mode 100644
index 0000000000..5c16342c11
--- /dev/null
+++ b/changelog.d/17277.feature
@@ -0,0 +1 @@
+Add `is_dm` filtering to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint.
diff --git a/synapse/handlers/sliding_sync.py b/synapse/handlers/sliding_sync.py
index de4f33abb8..78fb66d6e2 100644
--- a/synapse/handlers/sliding_sync.py
+++ b/synapse/handlers/sliding_sync.py
@@ -22,7 +22,7 @@ from typing import TYPE_CHECKING, AbstractSet, Dict, List, Optional
from immutabledict import immutabledict
-from synapse.api.constants import Membership
+from synapse.api.constants import AccountDataTypes, Membership
from synapse.events import EventBase
from synapse.types import Requester, RoomStreamToken, StreamToken, UserID
from synapse.types.handlers import OperationType, SlidingSyncConfig, SlidingSyncResult
@@ -69,9 +69,19 @@ class SlidingSyncHandler:
from_token: Optional[StreamToken] = None,
timeout_ms: int = 0,
) -> SlidingSyncResult:
- """Get the sync for a client if we have new data for it now. Otherwise
+ """
+ Get the sync for a client if we have new data for it now. Otherwise
wait for new data to arrive on the server. If the timeout expires, then
return an empty sync result.
+
+ Args:
+ requester: The user making the request
+ sync_config: Sync configuration
+ from_token: The point in the stream to sync from. Token of the end of the
+ previous batch. May be `None` if this is the initial sync request.
+ timeout_ms: The time in milliseconds to wait for new data to arrive. If 0,
+ we will immediately but there might not be any new data so we just return an
+ empty response.
"""
# If the user is not part of the mau group, then check that limits have
# not been exceeded (if not part of the group by this point, almost certain
@@ -143,6 +153,14 @@ class SlidingSyncHandler:
"""
Generates the response body of a Sliding Sync result, represented as a
`SlidingSyncResult`.
+
+ We fetch data according to the token range (> `from_token` and <= `to_token`).
+
+ Args:
+ sync_config: Sync configuration
+ to_token: The point in the stream to sync up to.
+ from_token: The point in the stream to sync from. Token of the end of the
+ previous batch. May be `None` if this is the initial sync request.
"""
user_id = sync_config.user.to_string()
app_service = self.store.get_app_service_by_user_id(user_id)
@@ -163,11 +181,12 @@ class SlidingSyncHandler:
lists: Dict[str, SlidingSyncResult.SlidingWindowList] = {}
if sync_config.lists:
for list_key, list_config in sync_config.lists.items():
- # TODO: Apply filters
- #
- # TODO: Exclude partially stated rooms unless the `required_state` has
- # `["m.room.member", "$LAZY"]`
+ # Apply filters
filtered_room_ids = room_id_set
+ if list_config.filters is not None:
+ filtered_room_ids = await self.filter_rooms(
+ sync_config.user, room_id_set, list_config.filters, to_token
+ )
# TODO: Apply sorts
sorted_room_ids = sorted(filtered_room_ids)
@@ -217,6 +236,12 @@ class SlidingSyncHandler:
`forgotten` flag to the `room_memberships` table in Synapse. There isn't a way
to tell when a room was forgotten at the moment so we can't factor it into the
from/to range.
+
+
+ Args:
+ user: User to fetch rooms for
+ to_token: The token to fetch rooms up to.
+ from_token: The point in the stream to sync from.
"""
user_id = user.to_string()
@@ -439,3 +464,84 @@ class SlidingSyncHandler:
sync_room_id_set.add(room_id)
return sync_room_id_set
+
+ async def filter_rooms(
+ self,
+ user: UserID,
+ room_id_set: AbstractSet[str],
+ filters: SlidingSyncConfig.SlidingSyncList.Filters,
+ to_token: StreamToken,
+ ) -> AbstractSet[str]:
+ """
+ Filter rooms based on the sync request.
+
+ Args:
+ user: User to filter rooms for
+ room_id_set: Set of room IDs to filter down
+ filters: Filters to apply
+ to_token: We filter based on the state of the room at this token
+ """
+ user_id = user.to_string()
+
+ # TODO: Apply filters
+ #
+ # TODO: Exclude partially stated rooms unless the `required_state` has
+ # `["m.room.member", "$LAZY"]`
+
+ filtered_room_id_set = set(room_id_set)
+
+ # Filter for Direct-Message (DM) rooms
+ if filters.is_dm is not None:
+ # We're using global account data (`m.direct`) instead of checking for
+ # `is_direct` on membership events because that property only appears for
+ # the invitee membership event (doesn't show up for the inviter). Account
+ # data is set by the client so it needs to be scrutinized.
+ #
+ # We're unable to take `to_token` into account for global account data since
+ # we only keep track of the latest account data for the user.
+ dm_map = await self.store.get_global_account_data_by_type_for_user(
+ user_id, AccountDataTypes.DIRECT
+ )
+
+ # Flatten out the map
+ dm_room_id_set = set()
+ if dm_map:
+ for room_ids in dm_map.values():
+ # Account data should be a list of room IDs. Ignore anything else
+ if isinstance(room_ids, list):
+ for room_id in room_ids:
+ if isinstance(room_id, str):
+ dm_room_id_set.add(room_id)
+
+ if filters.is_dm:
+ # Only DM rooms please
+ filtered_room_id_set = filtered_room_id_set.intersection(dm_room_id_set)
+ else:
+ # Only non-DM rooms please
+ filtered_room_id_set = filtered_room_id_set.difference(dm_room_id_set)
+
+ if filters.spaces:
+ raise NotImplementedError()
+
+ if filters.is_encrypted:
+ raise NotImplementedError()
+
+ if filters.is_invite:
+ raise NotImplementedError()
+
+ if filters.room_types:
+ raise NotImplementedError()
+
+ if filters.not_room_types:
+ raise NotImplementedError()
+
+ if filters.room_name_like:
+ raise NotImplementedError()
+
+ if filters.tags:
+ raise NotImplementedError()
+
+ if filters.not_tags:
+ raise NotImplementedError()
+
+ return filtered_room_id_set
diff --git a/synapse/types/rest/client/__init__.py b/synapse/types/rest/client/__init__.py
index ef261518a0..ec83d0daa6 100644
--- a/synapse/types/rest/client/__init__.py
+++ b/synapse/types/rest/client/__init__.py
@@ -238,6 +238,53 @@ class SlidingSyncBody(RequestBodyModel):
"""
class Filters(RequestBodyModel):
+ """
+ All fields are applied with AND operators, hence if `is_dm: True` and
+ `is_encrypted: True` then only Encrypted DM rooms will be returned. The
+ absence of fields implies no filter on that criteria: it does NOT imply
+ `False`. These fields may be expanded through use of extensions.
+
+ Attributes:
+ is_dm: Flag which only returns rooms present (or not) in the DM section
+ of account data. If unset, both DM rooms and non-DM rooms are returned.
+ If False, only non-DM rooms are returned. If True, only DM rooms are
+ returned.
+ spaces: Filter the room based on the space they belong to according to
+ `m.space.child` state events. If multiple spaces are present, a room can
+ be part of any one of the listed spaces (OR'd). The server will inspect
+ the `m.space.child` state events for the JOINED space room IDs given.
+ Servers MUST NOT navigate subspaces. It is up to the client to give a
+ complete list of spaces to navigate. Only rooms directly mentioned as
+ `m.space.child` events in these spaces will be returned. Unknown spaces
+ or spaces the user is not joined to will be ignored.
+ is_encrypted: Flag which only returns rooms which have an
+ `m.room.encryption` state event. If unset, both encrypted and
+ unencrypted rooms are returned. If `False`, only unencrypted rooms are
+ returned. If `True`, only encrypted rooms are returned.
+ is_invite: Flag which only returns rooms the user is currently invited
+ to. If unset, both invited and joined rooms are returned. If `False`, no
+ invited rooms are returned. If `True`, only invited rooms are returned.
+ room_types: If specified, only rooms where the `m.room.create` event has
+ a `type` matching one of the strings in this array will be returned. If
+ this field is unset, all rooms are returned regardless of type. This can
+ be used to get the initial set of spaces for an account. For rooms which
+ do not have a room type, use `null`/`None` to include them.
+ not_room_types: Same as `room_types` but inverted. This can be used to
+ filter out spaces from the room list. If a type is in both `room_types`
+ and `not_room_types`, then `not_room_types` wins and they are not included
+ in the result.
+ room_name_like: Filter the room name. Case-insensitive partial matching
+ e.g 'foo' matches 'abFooab'. The term 'like' is inspired by SQL 'LIKE',
+ and the text here is similar to '%foo%'.
+ tags: Filter the room based on its room tags. If multiple tags are
+ present, a room can have any one of the listed tags (OR'd).
+ not_tags: Filter the room based on its room tags. Takes priority over
+ `tags`. For example, a room with tags A and B with filters `tags: [A]`
+ `not_tags: [B]` would NOT be included because `not_tags` takes priority over
+ `tags`. This filter is useful if your rooms list does NOT include the
+ list of favourite rooms again.
+ """
+
is_dm: Optional[StrictBool] = None
spaces: Optional[List[StrictStr]] = None
is_encrypted: Optional[StrictBool] = None
diff --git a/tests/handlers/test_sliding_sync.py b/tests/handlers/test_sliding_sync.py
index 41ceb517f0..62fe1214fe 100644
--- a/tests/handlers/test_sliding_sync.py
+++ b/tests/handlers/test_sliding_sync.py
@@ -22,8 +22,9 @@ from unittest.mock import patch
from twisted.test.proto_helpers import MemoryReactor
-from synapse.api.constants import EventTypes, JoinRules, Membership
+from synapse.api.constants import AccountDataTypes, EventTypes, JoinRules, Membership
from synapse.api.room_versions import RoomVersions
+from synapse.handlers.sliding_sync import SlidingSyncConfig
from synapse.rest import admin
from synapse.rest.client import knock, login, room
from synapse.server import HomeServer
@@ -1116,3 +1117,130 @@ class GetSyncRoomIdsForUserEventShardTestCase(BaseMultiWorkerStreamTestCase):
room_id3,
},
)
+
+
+class FilterRoomsTestCase(HomeserverTestCase):
+ """
+ Tests Sliding Sync handler `filter_rooms()` to make sure it includes/excludes rooms
+ correctly.
+ """
+
+ servlets = [
+ admin.register_servlets,
+ knock.register_servlets,
+ login.register_servlets,
+ room.register_servlets,
+ ]
+
+ def default_config(self) -> JsonDict:
+ config = super().default_config()
+ # Enable sliding sync
+ config["experimental_features"] = {"msc3575_enabled": True}
+ return config
+
+ def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
+ self.sliding_sync_handler = self.hs.get_sliding_sync_handler()
+ self.store = self.hs.get_datastores().main
+ self.event_sources = hs.get_event_sources()
+
+ def _create_dm_room(
+ self,
+ inviter_user_id: str,
+ inviter_tok: str,
+ invitee_user_id: str,
+ invitee_tok: str,
+ ) -> str:
+ """
+ Helper to create a DM room as the "inviter" and invite the "invitee" user to the room. The
+ "invitee" user also will join the room. The `m.direct` account data will be set
+ for both users.
+ """
+
+ # Create a room and send an invite the other user
+ room_id = self.helper.create_room_as(
+ inviter_user_id,
+ is_public=False,
+ tok=inviter_tok,
+ )
+ self.helper.invite(
+ room_id,
+ src=inviter_user_id,
+ targ=invitee_user_id,
+ tok=inviter_tok,
+ extra_data={"is_direct": True},
+ )
+ # Person that was invited joins the room
+ self.helper.join(room_id, invitee_user_id, tok=invitee_tok)
+
+ # Mimic the client setting the room as a direct message in the global account
+ # data
+ self.get_success(
+ self.store.add_account_data_for_user(
+ invitee_user_id,
+ AccountDataTypes.DIRECT,
+ {inviter_user_id: [room_id]},
+ )
+ )
+ self.get_success(
+ self.store.add_account_data_for_user(
+ inviter_user_id,
+ AccountDataTypes.DIRECT,
+ {invitee_user_id: [room_id]},
+ )
+ )
+
+ return room_id
+
+ def test_filter_dm_rooms(self) -> None:
+ """
+ Test `filter.is_dm` for DM rooms
+ """
+ user1_id = self.register_user("user1", "pass")
+ user1_tok = self.login(user1_id, "pass")
+ user2_id = self.register_user("user2", "pass")
+ user2_tok = self.login(user2_id, "pass")
+
+ # Create a normal room
+ room_id = self.helper.create_room_as(
+ user1_id,
+ is_public=False,
+ tok=user1_tok,
+ )
+
+ # Create a DM room
+ dm_room_id = self._create_dm_room(
+ inviter_user_id=user1_id,
+ inviter_tok=user1_tok,
+ invitee_user_id=user2_id,
+ invitee_tok=user2_tok,
+ )
+
+ after_rooms_token = self.event_sources.get_current_token()
+
+ # Try with `is_dm=True`
+ truthy_filtered_room_ids = self.get_success(
+ self.sliding_sync_handler.filter_rooms(
+ UserID.from_string(user1_id),
+ {room_id, dm_room_id},
+ SlidingSyncConfig.SlidingSyncList.Filters(
+ is_dm=True,
+ ),
+ after_rooms_token,
+ )
+ )
+
+ self.assertEqual(truthy_filtered_room_ids, {dm_room_id})
+
+ # Try with `is_dm=False`
+ falsy_filtered_room_ids = self.get_success(
+ self.sliding_sync_handler.filter_rooms(
+ UserID.from_string(user1_id),
+ {room_id, dm_room_id},
+ SlidingSyncConfig.SlidingSyncList.Filters(
+ is_dm=False,
+ ),
+ after_rooms_token,
+ )
+ )
+
+ self.assertEqual(falsy_filtered_room_ids, {room_id})
diff --git a/tests/rest/client/test_sync.py b/tests/rest/client/test_sync.py
index a20a3fb40d..40870b2cfe 100644
--- a/tests/rest/client/test_sync.py
+++ b/tests/rest/client/test_sync.py
@@ -27,6 +27,7 @@ from twisted.test.proto_helpers import MemoryReactor
import synapse.rest.admin
from synapse.api.constants import (
+ AccountDataTypes,
EventContentFields,
EventTypes,
ReceiptTypes,
@@ -1226,10 +1227,59 @@ class SlidingSyncTestCase(unittest.HomeserverTestCase):
return config
def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
+ self.store = hs.get_datastores().main
self.sync_endpoint = "/_matrix/client/unstable/org.matrix.msc3575/sync"
self.store = hs.get_datastores().main
self.event_sources = hs.get_event_sources()
+ def _create_dm_room(
+ self,
+ inviter_user_id: str,
+ inviter_tok: str,
+ invitee_user_id: str,
+ invitee_tok: str,
+ ) -> str:
+ """
+ Helper to create a DM room as the "inviter" and invite the "invitee" user to the
+ room. The "invitee" user also will join the room. The `m.direct` account data
+ will be set for both users.
+ """
+
+ # Create a room and send an invite the other user
+ room_id = self.helper.create_room_as(
+ inviter_user_id,
+ is_public=False,
+ tok=inviter_tok,
+ )
+ self.helper.invite(
+ room_id,
+ src=inviter_user_id,
+ targ=invitee_user_id,
+ tok=inviter_tok,
+ extra_data={"is_direct": True},
+ )
+ # Person that was invited joins the room
+ self.helper.join(room_id, invitee_user_id, tok=invitee_tok)
+
+ # Mimic the client setting the room as a direct message in the global account
+ # data
+ self.get_success(
+ self.store.add_account_data_for_user(
+ invitee_user_id,
+ AccountDataTypes.DIRECT,
+ {inviter_user_id: [room_id]},
+ )
+ )
+ self.get_success(
+ self.store.add_account_data_for_user(
+ inviter_user_id,
+ AccountDataTypes.DIRECT,
+ {invitee_user_id: [room_id]},
+ )
+ )
+
+ return room_id
+
def test_sync_list(self) -> None:
"""
Test that room IDs show up in the Sliding Sync lists
@@ -1336,3 +1386,80 @@ class SlidingSyncTestCase(unittest.HomeserverTestCase):
self.assertEqual(
channel.json_body["next_pos"], future_position_token_serialized
)
+
+ def test_filter_list(self) -> None:
+ """
+ Test that filters apply to lists
+ """
+ user1_id = self.register_user("user1", "pass")
+ user1_tok = self.login(user1_id, "pass")
+ user2_id = self.register_user("user2", "pass")
+ user2_tok = self.login(user2_id, "pass")
+
+ # Create a DM room
+ dm_room_id = self._create_dm_room(
+ inviter_user_id=user1_id,
+ inviter_tok=user1_tok,
+ invitee_user_id=user2_id,
+ invitee_tok=user2_tok,
+ )
+
+ # Create a normal room
+ room_id = self.helper.create_room_as(user1_id, tok=user1_tok, is_public=True)
+
+ # Make the Sliding Sync request
+ channel = self.make_request(
+ "POST",
+ self.sync_endpoint,
+ {
+ "lists": {
+ "dms": {
+ "ranges": [[0, 99]],
+ "sort": ["by_recency"],
+ "required_state": [],
+ "timeline_limit": 1,
+ "filters": {"is_dm": True},
+ },
+ "foo-list": {
+ "ranges": [[0, 99]],
+ "sort": ["by_recency"],
+ "required_state": [],
+ "timeline_limit": 1,
+ "filters": {"is_dm": False},
+ },
+ }
+ },
+ access_token=user1_tok,
+ )
+ self.assertEqual(channel.code, 200, channel.json_body)
+
+ # Make sure it has the foo-list we requested
+ self.assertListEqual(
+ list(channel.json_body["lists"].keys()),
+ ["dms", "foo-list"],
+ channel.json_body["lists"].keys(),
+ )
+
+ # Make sure the list includes the room we are joined to
+ self.assertListEqual(
+ list(channel.json_body["lists"]["dms"]["ops"]),
+ [
+ {
+ "op": "SYNC",
+ "range": [0, 99],
+ "room_ids": [dm_room_id],
+ }
+ ],
+ list(channel.json_body["lists"]["dms"]),
+ )
+ self.assertListEqual(
+ list(channel.json_body["lists"]["foo-list"]["ops"]),
+ [
+ {
+ "op": "SYNC",
+ "range": [0, 99],
+ "room_ids": [room_id],
+ }
+ ],
+ list(channel.json_body["lists"]["foo-list"]),
+ )
From 2c36a679ae4a8c35619d706edd5f999d099be12c Mon Sep 17 00:00:00 2001
From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com>
Date: Thu, 13 Jun 2024 22:45:54 +0100
Subject: [PATCH 173/503] Include user membership on events (#17282)
MSC4115 has now completed FCP, so we can enable it by default and switch
to the stable identifier.
---
changelog.d/17282.feature | 1 +
docker/complement/conf/workers-shared-extra.yaml.j2 | 2 --
scripts-dev/complement.sh | 1 -
synapse/api/constants.py | 2 +-
synapse/config/experimental.py | 4 ----
synapse/handlers/admin.py | 2 --
synapse/handlers/events.py | 2 --
synapse/handlers/initial_sync.py | 3 ---
synapse/handlers/pagination.py | 1 -
synapse/handlers/relations.py | 3 ---
synapse/handlers/room.py | 1 -
synapse/handlers/search.py | 4 ----
synapse/handlers/sync.py | 2 --
synapse/notifier.py | 1 -
synapse/push/mailer.py | 1 -
synapse/visibility.py | 12 +++---------
tests/rest/client/test_retention.py | 1 -
tests/test_visibility.py | 10 +++-------
18 files changed, 8 insertions(+), 45 deletions(-)
create mode 100644 changelog.d/17282.feature
diff --git a/changelog.d/17282.feature b/changelog.d/17282.feature
new file mode 100644
index 0000000000..334709a3a7
--- /dev/null
+++ b/changelog.d/17282.feature
@@ -0,0 +1 @@
+Include user membership in events served to clients, per MSC4115.
\ No newline at end of file
diff --git a/docker/complement/conf/workers-shared-extra.yaml.j2 b/docker/complement/conf/workers-shared-extra.yaml.j2
index 4c41ee7709..6588b3ce14 100644
--- a/docker/complement/conf/workers-shared-extra.yaml.j2
+++ b/docker/complement/conf/workers-shared-extra.yaml.j2
@@ -105,8 +105,6 @@ experimental_features:
# Expose a room summary for public rooms
msc3266_enabled: true
- msc4115_membership_on_events: true
-
server_notices:
system_mxid_localpart: _server
system_mxid_display_name: "Server Alert"
diff --git a/scripts-dev/complement.sh b/scripts-dev/complement.sh
index b306b80749..4ad547bc7e 100755
--- a/scripts-dev/complement.sh
+++ b/scripts-dev/complement.sh
@@ -223,7 +223,6 @@ test_packages=(
./tests/msc3930
./tests/msc3902
./tests/msc3967
- ./tests/msc4115
)
# Enable dirty runs, so tests will reuse the same container where possible.
diff --git a/synapse/api/constants.py b/synapse/api/constants.py
index 542e4faaa1..9265a271d2 100644
--- a/synapse/api/constants.py
+++ b/synapse/api/constants.py
@@ -238,7 +238,7 @@ class EventUnsignedContentFields:
"""Fields found inside the 'unsigned' data on events"""
# Requesting user's membership, per MSC4115
- MSC4115_MEMBERSHIP: Final = "io.element.msc4115.membership"
+ MEMBERSHIP: Final = "membership"
class RoomTypes:
diff --git a/synapse/config/experimental.py b/synapse/config/experimental.py
index 5fe5b951dd..d9ad5fc32d 100644
--- a/synapse/config/experimental.py
+++ b/synapse/config/experimental.py
@@ -436,10 +436,6 @@ class ExperimentalConfig(Config):
("experimental", "msc4108_delegation_endpoint"),
)
- self.msc4115_membership_on_events = experimental.get(
- "msc4115_membership_on_events", False
- )
-
self.msc3916_authenticated_media_enabled = experimental.get(
"msc3916_authenticated_media_enabled", False
)
diff --git a/synapse/handlers/admin.py b/synapse/handlers/admin.py
index 21d3bb37f3..ec35784c5f 100644
--- a/synapse/handlers/admin.py
+++ b/synapse/handlers/admin.py
@@ -42,7 +42,6 @@ class AdminHandler:
self._device_handler = hs.get_device_handler()
self._storage_controllers = hs.get_storage_controllers()
self._state_storage_controller = self._storage_controllers.state
- self._hs_config = hs.config
self._msc3866_enabled = hs.config.experimental.msc3866.enabled
async def get_whois(self, user: UserID) -> JsonMapping:
@@ -215,7 +214,6 @@ class AdminHandler:
self._storage_controllers,
user_id,
events,
- msc4115_membership_on_events=self._hs_config.experimental.msc4115_membership_on_events,
)
writer.write_events(room_id, events)
diff --git a/synapse/handlers/events.py b/synapse/handlers/events.py
index 09d553cff1..3f46032a43 100644
--- a/synapse/handlers/events.py
+++ b/synapse/handlers/events.py
@@ -148,7 +148,6 @@ class EventHandler:
def __init__(self, hs: "HomeServer"):
self.store = hs.get_datastores().main
self._storage_controllers = hs.get_storage_controllers()
- self._config = hs.config
async def get_event(
self,
@@ -194,7 +193,6 @@ class EventHandler:
user.to_string(),
[event],
is_peeking=is_peeking,
- msc4115_membership_on_events=self._config.experimental.msc4115_membership_on_events,
)
if not filtered:
diff --git a/synapse/handlers/initial_sync.py b/synapse/handlers/initial_sync.py
index 84d6fecf31..bd3c87f5f4 100644
--- a/synapse/handlers/initial_sync.py
+++ b/synapse/handlers/initial_sync.py
@@ -224,7 +224,6 @@ class InitialSyncHandler:
self._storage_controllers,
user_id,
messages,
- msc4115_membership_on_events=self.hs.config.experimental.msc4115_membership_on_events,
)
start_token = now_token.copy_and_replace(StreamKeyType.ROOM, token)
@@ -383,7 +382,6 @@ class InitialSyncHandler:
requester.user.to_string(),
messages,
is_peeking=is_peeking,
- msc4115_membership_on_events=self.hs.config.experimental.msc4115_membership_on_events,
)
start_token = StreamToken.START.copy_and_replace(StreamKeyType.ROOM, token)
@@ -498,7 +496,6 @@ class InitialSyncHandler:
requester.user.to_string(),
messages,
is_peeking=is_peeking,
- msc4115_membership_on_events=self.hs.config.experimental.msc4115_membership_on_events,
)
start_token = now_token.copy_and_replace(StreamKeyType.ROOM, token)
diff --git a/synapse/handlers/pagination.py b/synapse/handlers/pagination.py
index dab3f90e74..872c85fbad 100644
--- a/synapse/handlers/pagination.py
+++ b/synapse/handlers/pagination.py
@@ -623,7 +623,6 @@ class PaginationHandler:
user_id,
events,
is_peeking=(member_event_id is None),
- msc4115_membership_on_events=self.hs.config.experimental.msc4115_membership_on_events,
)
# if after the filter applied there are no more events
diff --git a/synapse/handlers/relations.py b/synapse/handlers/relations.py
index de092f8623..efe31e81f9 100644
--- a/synapse/handlers/relations.py
+++ b/synapse/handlers/relations.py
@@ -95,7 +95,6 @@ class RelationsHandler:
self._event_handler = hs.get_event_handler()
self._event_serializer = hs.get_event_client_serializer()
self._event_creation_handler = hs.get_event_creation_handler()
- self._config = hs.config
async def get_relations(
self,
@@ -164,7 +163,6 @@ class RelationsHandler:
user_id,
events,
is_peeking=(member_event_id is None),
- msc4115_membership_on_events=self._config.experimental.msc4115_membership_on_events,
)
# The relations returned for the requested event do include their
@@ -610,7 +608,6 @@ class RelationsHandler:
user_id,
events,
is_peeking=(member_event_id is None),
- msc4115_membership_on_events=self._config.experimental.msc4115_membership_on_events,
)
aggregations = await self.get_bundled_aggregations(
diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py
index 203209427b..2302d283a7 100644
--- a/synapse/handlers/room.py
+++ b/synapse/handlers/room.py
@@ -1476,7 +1476,6 @@ class RoomContextHandler:
user.to_string(),
events,
is_peeking=is_peeking,
- msc4115_membership_on_events=self.hs.config.experimental.msc4115_membership_on_events,
)
event = await self.store.get_event(
diff --git a/synapse/handlers/search.py b/synapse/handlers/search.py
index fdbe98de3b..a7d52fa648 100644
--- a/synapse/handlers/search.py
+++ b/synapse/handlers/search.py
@@ -483,7 +483,6 @@ class SearchHandler:
self._storage_controllers,
user.to_string(),
filtered_events,
- msc4115_membership_on_events=self.hs.config.experimental.msc4115_membership_on_events,
)
events.sort(key=lambda e: -rank_map[e.event_id])
@@ -585,7 +584,6 @@ class SearchHandler:
self._storage_controllers,
user.to_string(),
filtered_events,
- msc4115_membership_on_events=self.hs.config.experimental.msc4115_membership_on_events,
)
room_events.extend(events)
@@ -673,14 +671,12 @@ class SearchHandler:
self._storage_controllers,
user.to_string(),
res.events_before,
- msc4115_membership_on_events=self.hs.config.experimental.msc4115_membership_on_events,
)
events_after = await filter_events_for_client(
self._storage_controllers,
user.to_string(),
res.events_after,
- msc4115_membership_on_events=self.hs.config.experimental.msc4115_membership_on_events,
)
context: JsonDict = {
diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py
index 39964726c5..f1c69d9893 100644
--- a/synapse/handlers/sync.py
+++ b/synapse/handlers/sync.py
@@ -844,7 +844,6 @@ class SyncHandler:
sync_config.user.to_string(),
recents,
always_include_ids=current_state_ids,
- msc4115_membership_on_events=self.hs_config.experimental.msc4115_membership_on_events,
)
log_kv({"recents_after_visibility_filtering": len(recents)})
else:
@@ -930,7 +929,6 @@ class SyncHandler:
sync_config.user.to_string(),
loaded_recents,
always_include_ids=current_state_ids,
- msc4115_membership_on_events=self.hs_config.experimental.msc4115_membership_on_events,
)
loaded_recents = []
diff --git a/synapse/notifier.py b/synapse/notifier.py
index ced9e9ad66..c87eb748c0 100644
--- a/synapse/notifier.py
+++ b/synapse/notifier.py
@@ -721,7 +721,6 @@ class Notifier:
user.to_string(),
new_events,
is_peeking=is_peeking,
- msc4115_membership_on_events=self.hs.config.experimental.msc4115_membership_on_events,
)
elif keyname == StreamKeyType.PRESENCE:
now = self.clock.time_msec()
diff --git a/synapse/push/mailer.py b/synapse/push/mailer.py
index 49ce9d6dda..77cc69a71f 100644
--- a/synapse/push/mailer.py
+++ b/synapse/push/mailer.py
@@ -532,7 +532,6 @@ class Mailer:
self._storage_controllers,
user_id,
results.events_before,
- msc4115_membership_on_events=self.hs.config.experimental.msc4115_membership_on_events,
)
the_events.append(notif_event)
diff --git a/synapse/visibility.py b/synapse/visibility.py
index c891bd845b..128413c8aa 100644
--- a/synapse/visibility.py
+++ b/synapse/visibility.py
@@ -82,7 +82,6 @@ async def filter_events_for_client(
is_peeking: bool = False,
always_include_ids: FrozenSet[str] = frozenset(),
filter_send_to_client: bool = True,
- msc4115_membership_on_events: bool = False,
) -> List[EventBase]:
"""
Check which events a user is allowed to see. If the user can see the event but its
@@ -101,12 +100,10 @@ async def filter_events_for_client(
filter_send_to_client: Whether we're checking an event that's going to be
sent to a client. This might not always be the case since this function can
also be called to check whether a user can see the state at a given point.
- msc4115_membership_on_events: Whether to include the requesting user's
- membership in the "unsigned" data, per MSC4115.
Returns:
- The filtered events. If `msc4115_membership_on_events` is true, the `unsigned`
- data is annotated with the membership state of `user_id` at each event.
+ The filtered events. The `unsigned` data is annotated with the membership state
+ of `user_id` at each event.
"""
# Filter out events that have been soft failed so that we don't relay them
# to clients.
@@ -159,9 +156,6 @@ async def filter_events_for_client(
if filtered is None:
return None
- if not msc4115_membership_on_events:
- return filtered
-
# Annotate the event with the user's membership after the event.
#
# Normally we just look in `state_after_event`, but if the event is an outlier
@@ -186,7 +180,7 @@ async def filter_events_for_client(
# Copy the event before updating the unsigned data: this shouldn't be persisted
# to the cache!
cloned = clone_event(filtered)
- cloned.unsigned[EventUnsignedContentFields.MSC4115_MEMBERSHIP] = user_membership
+ cloned.unsigned[EventUnsignedContentFields.MEMBERSHIP] = user_membership
return cloned
diff --git a/tests/rest/client/test_retention.py b/tests/rest/client/test_retention.py
index ceae40498e..1e5a1b0a4d 100644
--- a/tests/rest/client/test_retention.py
+++ b/tests/rest/client/test_retention.py
@@ -167,7 +167,6 @@ class RetentionTestCase(unittest.HomeserverTestCase):
storage_controllers,
self.user_id,
events,
- msc4115_membership_on_events=True,
)
)
diff --git a/tests/test_visibility.py b/tests/test_visibility.py
index 3e2100eab4..89cbe4e54b 100644
--- a/tests/test_visibility.py
+++ b/tests/test_visibility.py
@@ -336,7 +336,6 @@ class FilterEventsForClientTestCase(HomeserverTestCase):
self.hs.get_storage_controllers(),
"@joiner:test",
events_to_filter,
- msc4115_membership_on_events=True,
)
)
resident_filtered_events = self.get_success(
@@ -344,7 +343,6 @@ class FilterEventsForClientTestCase(HomeserverTestCase):
self.hs.get_storage_controllers(),
"@resident:test",
events_to_filter,
- msc4115_membership_on_events=True,
)
)
@@ -357,7 +355,7 @@ class FilterEventsForClientTestCase(HomeserverTestCase):
self.assertEqual(
["join", "join", "leave"],
[
- e.unsigned[EventUnsignedContentFields.MSC4115_MEMBERSHIP]
+ e.unsigned[EventUnsignedContentFields.MEMBERSHIP]
for e in joiner_filtered_events
],
)
@@ -379,7 +377,7 @@ class FilterEventsForClientTestCase(HomeserverTestCase):
self.assertEqual(
["join", "join", "join", "join", "join"],
[
- e.unsigned[EventUnsignedContentFields.MSC4115_MEMBERSHIP]
+ e.unsigned[EventUnsignedContentFields.MEMBERSHIP]
for e in resident_filtered_events
],
)
@@ -441,7 +439,6 @@ class FilterEventsOutOfBandEventsForClientTestCase(
self.hs.get_storage_controllers(),
"@user:test",
[invite_event, reject_event],
- msc4115_membership_on_events=True,
)
)
self.assertEqual(
@@ -451,7 +448,7 @@ class FilterEventsOutOfBandEventsForClientTestCase(
self.assertEqual(
["invite", "leave"],
[
- e.unsigned[EventUnsignedContentFields.MSC4115_MEMBERSHIP]
+ e.unsigned[EventUnsignedContentFields.MEMBERSHIP]
for e in filtered_events
],
)
@@ -463,7 +460,6 @@ class FilterEventsOutOfBandEventsForClientTestCase(
self.hs.get_storage_controllers(),
"@other:test",
[invite_event, reject_event],
- msc4115_membership_on_events=True,
)
),
[],
From 3aae60f17b97078b2fd4bde64be063f9d34c6352 Mon Sep 17 00:00:00 2001
From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com>
Date: Fri, 14 Jun 2024 11:14:56 +0100
Subject: [PATCH 174/503] Enable cross-signing key upload without UIA (#17284)
Per MSC3967, which is now stable, we should not require UIA when
uploading cross-signing keys for the first time.
Fixes: #17227
---
changelog.d/17284.feature | 1 +
synapse/config/experimental.py | 3 -
synapse/rest/admin/experimental_features.py | 1 -
synapse/rest/client/keys.py | 75 ++++++++-------------
tests/handlers/test_oauth_delegation.py | 2 +
tests/rest/admin/test_admin.py | 4 --
tests/rest/client/test_keys.py | 65 ------------------
7 files changed, 30 insertions(+), 121 deletions(-)
create mode 100644 changelog.d/17284.feature
diff --git a/changelog.d/17284.feature b/changelog.d/17284.feature
new file mode 100644
index 0000000000..015d925e7c
--- /dev/null
+++ b/changelog.d/17284.feature
@@ -0,0 +1 @@
+Do not require user-interactive authentication for uploading cross-signing keys for the first time, per MSC3967.
\ No newline at end of file
diff --git a/synapse/config/experimental.py b/synapse/config/experimental.py
index d9ad5fc32d..24546171e5 100644
--- a/synapse/config/experimental.py
+++ b/synapse/config/experimental.py
@@ -393,9 +393,6 @@ class ExperimentalConfig(Config):
# MSC3391: Removing account data.
self.msc3391_enabled = experimental.get("msc3391_enabled", False)
- # MSC3967: Do not require UIA when first uploading cross signing keys
- self.msc3967_enabled = experimental.get("msc3967_enabled", False)
-
# MSC3861: Matrix architecture change to delegate authentication via OIDC
try:
self.msc3861 = MSC3861(**experimental.get("msc3861", {}))
diff --git a/synapse/rest/admin/experimental_features.py b/synapse/rest/admin/experimental_features.py
index 52eb9e62db..c5a00c490c 100644
--- a/synapse/rest/admin/experimental_features.py
+++ b/synapse/rest/admin/experimental_features.py
@@ -41,7 +41,6 @@ class ExperimentalFeature(str, Enum):
MSC3026 = "msc3026"
MSC3881 = "msc3881"
- MSC3967 = "msc3967"
class ExperimentalFeaturesRestServlet(RestServlet):
diff --git a/synapse/rest/client/keys.py b/synapse/rest/client/keys.py
index 306db07b86..67de634eab 100644
--- a/synapse/rest/client/keys.py
+++ b/synapse/rest/client/keys.py
@@ -382,44 +382,35 @@ class SigningKeyUploadServlet(RestServlet):
master_key_updatable_without_uia,
) = await self.e2e_keys_handler.check_cross_signing_setup(user_id)
- # Before MSC3967 we required UIA both when setting up cross signing for the
- # first time and when resetting the device signing key. With MSC3967 we only
- # require UIA when resetting cross-signing, and not when setting up the first
- # time. Because there is no UIA in MSC3861, for now we throw an error if the
- # user tries to reset the device signing key when MSC3861 is enabled, but allow
- # first-time setup.
- if self.hs.config.experimental.msc3861.enabled:
- # The auth service has to explicitly mark the master key as replaceable
- # without UIA to reset the device signing key with MSC3861.
- if is_cross_signing_setup and not master_key_updatable_without_uia:
- config = self.hs.config.experimental.msc3861
- if config.account_management_url is not None:
- url = f"{config.account_management_url}?action=org.matrix.cross_signing_reset"
- else:
- url = config.issuer
+ # Resending exactly the same keys should just 200 OK without doing a UIA prompt.
+ keys_are_different = await self.e2e_keys_handler.has_different_keys(
+ user_id, body
+ )
+ if not keys_are_different:
+ return 200, {}
- raise SynapseError(
- HTTPStatus.NOT_IMPLEMENTED,
- "To reset your end-to-end encryption cross-signing identity, "
- f"you first need to approve it at {url} and then try again.",
- Codes.UNRECOGNIZED,
- )
- # But first-time setup is fine
+ # The keys are different; is x-signing set up? If no, then this is first-time
+ # setup, and that is allowed without UIA, per MSC3967.
+ # If yes, then we need to authenticate the change.
+ if is_cross_signing_setup:
+ # With MSC3861, UIA is not possible. Instead, the auth service has to
+ # explicitly mark the master key as replaceable.
+ if self.hs.config.experimental.msc3861.enabled:
+ if not master_key_updatable_without_uia:
+ config = self.hs.config.experimental.msc3861
+ if config.account_management_url is not None:
+ url = f"{config.account_management_url}?action=org.matrix.cross_signing_reset"
+ else:
+ url = config.issuer
- elif self.hs.config.experimental.msc3967_enabled:
- # MSC3967 allows this endpoint to 200 OK for idempotency. Resending exactly the same
- # keys should just 200 OK without doing a UIA prompt.
- keys_are_different = await self.e2e_keys_handler.has_different_keys(
- user_id, body
- )
- if not keys_are_different:
- # FIXME: we do not fallthrough to upload_signing_keys_for_user because confusingly
- # if we do, we 500 as it looks like it tries to INSERT the same key twice, causing a
- # unique key constraint violation. This sounds like a bug?
- return 200, {}
- # the keys are different, is x-signing set up? If no, then the keys don't exist which is
- # why they are different. If yes, then we need to UIA to change them.
- if is_cross_signing_setup:
+ raise SynapseError(
+ HTTPStatus.NOT_IMPLEMENTED,
+ "To reset your end-to-end encryption cross-signing identity, "
+ f"you first need to approve it at {url} and then try again.",
+ Codes.UNRECOGNIZED,
+ )
+ else:
+ # Without MSC3861, we require UIA.
await self.auth_handler.validate_user_via_ui_auth(
requester,
request,
@@ -428,18 +419,6 @@ class SigningKeyUploadServlet(RestServlet):
# Do not allow skipping of UIA auth.
can_skip_ui_auth=False,
)
- # Otherwise we don't require UIA since we are setting up cross signing for first time
- else:
- # Previous behaviour is to always require UIA but allow it to be skipped
- await self.auth_handler.validate_user_via_ui_auth(
- requester,
- request,
- body,
- "add a device signing key to your account",
- # Allow skipping of UI auth since this is frequently called directly
- # after login and it is silly to ask users to re-auth immediately.
- can_skip_ui_auth=True,
- )
result = await self.e2e_keys_handler.upload_signing_keys_for_user(user_id, body)
return 200, result
diff --git a/tests/handlers/test_oauth_delegation.py b/tests/handlers/test_oauth_delegation.py
index 9387d07de8..036c539db2 100644
--- a/tests/handlers/test_oauth_delegation.py
+++ b/tests/handlers/test_oauth_delegation.py
@@ -541,6 +541,8 @@ class MSC3861OAuthDelegation(HomeserverTestCase):
self.assertEqual(channel.code, 200, channel.json_body)
+ # Try uploading *different* keys; it should cause a 501 error.
+ keys_upload_body = self.make_device_keys(USER_ID, DEVICE)
channel = self.make_request(
"POST",
"/_matrix/client/v3/keys/device_signing/upload",
diff --git a/tests/rest/admin/test_admin.py b/tests/rest/admin/test_admin.py
index 22106eb786..5f6f7213b3 100644
--- a/tests/rest/admin/test_admin.py
+++ b/tests/rest/admin/test_admin.py
@@ -435,10 +435,6 @@ class ExperimentalFeaturesTestCase(unittest.HomeserverTestCase):
True,
channel.json_body["features"]["msc3881"],
)
- self.assertEqual(
- False,
- channel.json_body["features"]["msc3967"],
- )
# test nothing blows up if you try to disable a feature that isn't already enabled
url = f"{self.url}/{self.other_user}"
diff --git a/tests/rest/client/test_keys.py b/tests/rest/client/test_keys.py
index 5f0c005576..8bbd109092 100644
--- a/tests/rest/client/test_keys.py
+++ b/tests/rest/client/test_keys.py
@@ -155,71 +155,6 @@ class KeyQueryTestCase(unittest.HomeserverTestCase):
}
def test_device_signing_with_uia(self) -> None:
- """Device signing key upload requires UIA."""
- password = "wonderland"
- device_id = "ABCDEFGHI"
- alice_id = self.register_user("alice", password)
- alice_token = self.login("alice", password, device_id=device_id)
-
- content = self.make_device_keys(alice_id, device_id)
-
- channel = self.make_request(
- "POST",
- "/_matrix/client/v3/keys/device_signing/upload",
- content,
- alice_token,
- )
-
- self.assertEqual(channel.code, HTTPStatus.UNAUTHORIZED, channel.result)
- # Grab the session
- session = channel.json_body["session"]
- # Ensure that flows are what is expected.
- self.assertIn({"stages": ["m.login.password"]}, channel.json_body["flows"])
-
- # add UI auth
- content["auth"] = {
- "type": "m.login.password",
- "identifier": {"type": "m.id.user", "user": alice_id},
- "password": password,
- "session": session,
- }
-
- channel = self.make_request(
- "POST",
- "/_matrix/client/v3/keys/device_signing/upload",
- content,
- alice_token,
- )
-
- self.assertEqual(channel.code, HTTPStatus.OK, channel.result)
-
- @override_config({"ui_auth": {"session_timeout": "15m"}})
- def test_device_signing_with_uia_session_timeout(self) -> None:
- """Device signing key upload requires UIA buy passes with grace period."""
- password = "wonderland"
- device_id = "ABCDEFGHI"
- alice_id = self.register_user("alice", password)
- alice_token = self.login("alice", password, device_id=device_id)
-
- content = self.make_device_keys(alice_id, device_id)
-
- channel = self.make_request(
- "POST",
- "/_matrix/client/v3/keys/device_signing/upload",
- content,
- alice_token,
- )
-
- self.assertEqual(channel.code, HTTPStatus.OK, channel.result)
-
- @override_config(
- {
- "experimental_features": {"msc3967_enabled": True},
- "ui_auth": {"session_timeout": "15s"},
- }
- )
- def test_device_signing_with_msc3967(self) -> None:
- """Device signing key follows MSC3967 behaviour when enabled."""
password = "wonderland"
device_id = "ABCDEFGHI"
alice_id = self.register_user("alice", password)
From a3cb24475577c31fa2c16a26fccddb76daf2f6ae Mon Sep 17 00:00:00 2001
From: Erik Johnston
Date: Fri, 14 Jun 2024 16:40:29 +0100
Subject: [PATCH 175/503] Automatically apply SQL for inconsistent sequence
(#17305)
Rather than forcing the server operator to apply the SQL manually.
This should be safe, as there should be only one writer for these
sequences.
---
changelog.d/17305.misc | 1 +
docs/postgres.md | 10 --------
synapse/storage/util/sequence.py | 37 ++++++++++++-----------------
tests/storage/test_id_generators.py | 14 +++++++----
4 files changed, 25 insertions(+), 37 deletions(-)
create mode 100644 changelog.d/17305.misc
diff --git a/changelog.d/17305.misc b/changelog.d/17305.misc
new file mode 100644
index 0000000000..cb6b9504b3
--- /dev/null
+++ b/changelog.d/17305.misc
@@ -0,0 +1 @@
+When rolling back to a previous Synapse version and then forwards again to this release, don't require server operators to manually run SQL.
diff --git a/docs/postgres.md b/docs/postgres.md
index 4b2ba38275..d06f0cda10 100644
--- a/docs/postgres.md
+++ b/docs/postgres.md
@@ -255,13 +255,3 @@ however extreme care must be taken to avoid database corruption.
Note that the above may fail with an error about duplicate rows if corruption
has already occurred, and such duplicate rows will need to be manually removed.
-
-### Fixing inconsistent sequences error
-
-Synapse uses Postgres sequences to generate IDs for various tables. A sequence
-and associated table can get out of sync if, for example, Synapse has been
-downgraded and then upgraded again.
-
-To fix the issue shut down Synapse (including any and all workers) and run the
-SQL command included in the error message. Once done Synapse should start
-successfully.
diff --git a/synapse/storage/util/sequence.py b/synapse/storage/util/sequence.py
index f57e7ec41c..c4c0602b28 100644
--- a/synapse/storage/util/sequence.py
+++ b/synapse/storage/util/sequence.py
@@ -36,21 +36,6 @@ if TYPE_CHECKING:
logger = logging.getLogger(__name__)
-_INCONSISTENT_SEQUENCE_ERROR = """
-Postgres sequence '%(seq)s' is inconsistent with associated
-table '%(table)s'. This can happen if Synapse has been downgraded and
-then upgraded again, or due to a bad migration.
-
-To fix this error, shut down Synapse (including any and all workers)
-and run the following SQL:
-
- SELECT setval('%(seq)s', (
- %(max_id_sql)s
- ));
-
-See docs/postgres.md for more information.
-"""
-
_INCONSISTENT_STREAM_ERROR = """
Postgres sequence '%(seq)s' is inconsistent with associated stream position
of '%(stream_name)s' in the 'stream_positions' table.
@@ -169,25 +154,33 @@ class PostgresSequenceGenerator(SequenceGenerator):
if row:
max_in_stream_positions = row[0]
- txn.close()
-
# If `is_called` is False then `last_value` is actually the value that
# will be generated next, so we decrement to get the true "last value".
if not is_called:
last_value -= 1
if max_stream_id > last_value:
+ # The sequence is lagging behind the tables. This is probably due to
+ # rolling back to a version before the sequence was used and then
+ # forwards again. We resolve this by setting the sequence to the
+ # right value.
logger.warning(
- "Postgres sequence %s is behind table %s: %d < %d",
+ "Postgres sequence %s is behind table %s: %d < %d. Updating sequence.",
self._sequence_name,
table,
last_value,
max_stream_id,
)
- raise IncorrectDatabaseSetup(
- _INCONSISTENT_SEQUENCE_ERROR
- % {"seq": self._sequence_name, "table": table, "max_id_sql": table_sql}
- )
+
+ sql = f"""
+ SELECT setval('{self._sequence_name}', GREATEST(
+ (SELECT last_value FROM {self._sequence_name}),
+ ({table_sql})
+ ));
+ """
+ txn.execute(sql)
+
+ txn.close()
# If we have values in the stream positions table then they have to be
# less than or equal to `last_value`
diff --git a/tests/storage/test_id_generators.py b/tests/storage/test_id_generators.py
index f0307252f3..9be2923e6f 100644
--- a/tests/storage/test_id_generators.py
+++ b/tests/storage/test_id_generators.py
@@ -28,7 +28,6 @@ from synapse.storage.database import (
LoggingDatabaseConnection,
LoggingTransaction,
)
-from synapse.storage.engines import IncorrectDatabaseSetup
from synapse.storage.types import Cursor
from synapse.storage.util.id_generators import MultiWriterIdGenerator
from synapse.storage.util.sequence import (
@@ -525,7 +524,7 @@ class WorkerMultiWriterIdGeneratorTestCase(MultiWriterIdGeneratorBase):
self.assertEqual(id_gen_5.get_current_token_for_writer("third"), 6)
def test_sequence_consistency(self) -> None:
- """Test that we error out if the table and sequence diverges."""
+ """Test that we correct the sequence if the table and sequence diverges."""
# Prefill with some rows
self._insert_row_with_id("master", 3)
@@ -536,9 +535,14 @@ class WorkerMultiWriterIdGeneratorTestCase(MultiWriterIdGeneratorBase):
self.get_success(self.db_pool.runInteraction("_insert", _insert))
- # Creating the ID gen should error
- with self.assertRaises(IncorrectDatabaseSetup):
- self._create_id_generator("first")
+ # Creating the ID gen should now fix the inconsistency
+ id_gen = self._create_id_generator()
+
+ async def _get_next_async() -> None:
+ async with id_gen.get_next() as stream_id:
+ self.assertEqual(stream_id, 27)
+
+ self.get_success(_get_next_async())
def test_minimal_local_token(self) -> None:
self._insert_rows("first", 3)
From 12d7303707583c27c251176246876f263f4e1de2 Mon Sep 17 00:00:00 2001
From: Quentin Gliech
Date: Mon, 17 Jun 2024 13:21:51 +0200
Subject: [PATCH 176/503] Use the release branch for sytest in release-branch
PRs (#17306)
---
.github/workflows/tests.yml | 3 +++
changelog.d/17306.misc | 1 +
2 files changed, 4 insertions(+)
create mode 100644 changelog.d/17306.misc
diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml
index 20afe311fe..cdd881fbe1 100644
--- a/.github/workflows/tests.yml
+++ b/.github/workflows/tests.yml
@@ -479,6 +479,9 @@ jobs:
volumes:
- ${{ github.workspace }}:/src
env:
+ # If this is a pull request to a release branch, use that branch as default branch for sytest, else use develop
+ # This works because the release script always create a branch on the sytest repo with the same name as the release branch
+ SYTEST_DEFAULT_BRANCH: ${{ startsWith(github.base_ref, 'release-') && github.base_ref || 'develop' }}
SYTEST_BRANCH: ${{ github.head_ref }}
POSTGRES: ${{ matrix.job.postgres && 1}}
MULTI_POSTGRES: ${{ (matrix.job.postgres == 'multi-postgres') || '' }}
diff --git a/changelog.d/17306.misc b/changelog.d/17306.misc
new file mode 100644
index 0000000000..88ada5f671
--- /dev/null
+++ b/changelog.d/17306.misc
@@ -0,0 +1 @@
+Use the release branch for sytest in release-branch PRs.
From f983a77ab070eac03f0eafe8dc6b990c43c3e89b Mon Sep 17 00:00:00 2001
From: Quentin Gliech
Date: Mon, 17 Jun 2024 13:50:00 +0200
Subject: [PATCH 177/503] Set our own stream position from the current sequence
value on startup (#17309)
---
changelog.d/17309.misc | 1 +
synapse/storage/util/id_generators.py | 23 +-
tests/storage/test_id_generators.py | 301 +++++++++++---------------
3 files changed, 147 insertions(+), 178 deletions(-)
create mode 100644 changelog.d/17309.misc
diff --git a/changelog.d/17309.misc b/changelog.d/17309.misc
new file mode 100644
index 0000000000..cb6b9504b3
--- /dev/null
+++ b/changelog.d/17309.misc
@@ -0,0 +1 @@
+When rolling back to a previous Synapse version and then forwards again to this release, don't require server operators to manually run SQL.
diff --git a/synapse/storage/util/id_generators.py b/synapse/storage/util/id_generators.py
index 59c8e05c39..48f88a6f8a 100644
--- a/synapse/storage/util/id_generators.py
+++ b/synapse/storage/util/id_generators.py
@@ -276,9 +276,6 @@ class MultiWriterIdGenerator(AbstractStreamIdGenerator):
# no active writes in progress.
self._max_position_of_local_instance = self._max_seen_allocated_stream_id
- # This goes and fills out the above state from the database.
- self._load_current_ids(db_conn, tables)
-
self._sequence_gen = build_sequence_generator(
db_conn=db_conn,
database_engine=db.engine,
@@ -303,6 +300,13 @@ class MultiWriterIdGenerator(AbstractStreamIdGenerator):
positive=positive,
)
+ # This goes and fills out the above state from the database.
+ # This may read on the PostgreSQL sequence, and
+ # SequenceGenerator.check_consistency might have fixed up the sequence, which
+ # means the SequenceGenerator needs to be setup before we read the value from
+ # the sequence.
+ self._load_current_ids(db_conn, tables, sequence_name)
+
self._max_seen_allocated_stream_id = max(
self._current_positions.values(), default=1
)
@@ -327,6 +331,7 @@ class MultiWriterIdGenerator(AbstractStreamIdGenerator):
self,
db_conn: LoggingDatabaseConnection,
tables: List[Tuple[str, str, str]],
+ sequence_name: str,
) -> None:
cur = db_conn.cursor(txn_name="_load_current_ids")
@@ -360,6 +365,18 @@ class MultiWriterIdGenerator(AbstractStreamIdGenerator):
if instance in self._writers
}
+ # If we're a writer, we can assume we're at the end of the stream
+ # Usually, we would get that from the stream_positions, but in some cases,
+ # like if we rolled back Synapse, the stream_positions table might not be up to
+ # date. If we're using Postgres for the sequences, we can just use the current
+ # sequence value as our own position.
+ if self._instance_name in self._writers:
+ if isinstance(self._db.engine, PostgresEngine):
+ cur.execute(f"SELECT last_value FROM {sequence_name}")
+ row = cur.fetchone()
+ assert row is not None
+ self._current_positions[self._instance_name] = row[0]
+
# We set the `_persisted_upto_position` to be the minimum of all current
# positions. If empty we use the max stream ID from the DB table.
min_stream_id = min(self._current_positions.values(), default=None)
diff --git a/tests/storage/test_id_generators.py b/tests/storage/test_id_generators.py
index 9be2923e6f..12b89cecb6 100644
--- a/tests/storage/test_id_generators.py
+++ b/tests/storage/test_id_generators.py
@@ -18,7 +18,7 @@
# [This file includes modifications made by New Vector Limited]
#
#
-from typing import List, Optional
+from typing import Dict, List, Optional
from twisted.test.proto_helpers import MemoryReactor
@@ -42,9 +42,13 @@ from tests.utils import USE_POSTGRES_FOR_TESTS
class MultiWriterIdGeneratorBase(HomeserverTestCase):
+ positive: bool = True
+ tables: List[str] = ["foobar"]
+
def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
self.store = hs.get_datastores().main
self.db_pool: DatabasePool = self.store.db_pool
+ self.instances: Dict[str, MultiWriterIdGenerator] = {}
self.get_success(self.db_pool.runInteraction("_setup_db", self._setup_db))
@@ -57,18 +61,22 @@ class MultiWriterIdGeneratorBase(HomeserverTestCase):
if USE_POSTGRES_FOR_TESTS:
txn.execute("CREATE SEQUENCE foobar_seq")
- txn.execute(
- """
- CREATE TABLE foobar (
- stream_id BIGINT NOT NULL,
- instance_name TEXT NOT NULL,
- data TEXT
- );
- """
- )
+ for table in self.tables:
+ txn.execute(
+ """
+ CREATE TABLE %s (
+ stream_id BIGINT NOT NULL,
+ instance_name TEXT NOT NULL,
+ data TEXT
+ );
+ """
+ % (table,)
+ )
def _create_id_generator(
- self, instance_name: str = "master", writers: Optional[List[str]] = None
+ self,
+ instance_name: str = "master",
+ writers: Optional[List[str]] = None,
) -> MultiWriterIdGenerator:
def _create(conn: LoggingDatabaseConnection) -> MultiWriterIdGenerator:
return MultiWriterIdGenerator(
@@ -77,36 +85,93 @@ class MultiWriterIdGeneratorBase(HomeserverTestCase):
notifier=self.hs.get_replication_notifier(),
stream_name="test_stream",
instance_name=instance_name,
- tables=[("foobar", "instance_name", "stream_id")],
+ tables=[(table, "instance_name", "stream_id") for table in self.tables],
sequence_name="foobar_seq",
writers=writers or ["master"],
+ positive=self.positive,
)
- return self.get_success_or_raise(self.db_pool.runWithConnection(_create))
+ self.instances[instance_name] = self.get_success_or_raise(
+ self.db_pool.runWithConnection(_create)
+ )
+ return self.instances[instance_name]
- def _insert_rows(self, instance_name: str, number: int) -> None:
+ def _replicate(self, instance_name: str) -> None:
+ """Similate a replication event for the given instance."""
+
+ writer = self.instances[instance_name]
+ token = writer.get_current_token_for_writer(instance_name)
+ for generator in self.instances.values():
+ if writer != generator:
+ generator.advance(instance_name, token)
+
+ def _replicate_all(self) -> None:
+ """Similate a replication event for all instances."""
+
+ for instance_name in self.instances:
+ self._replicate(instance_name)
+
+ def _insert_row(
+ self, instance_name: str, stream_id: int, table: Optional[str] = None
+ ) -> None:
+ """Insert one row as the given instance with given stream_id."""
+
+ if table is None:
+ table = self.tables[0]
+
+ factor = 1 if self.positive else -1
+
+ def _insert(txn: LoggingTransaction) -> None:
+ txn.execute(
+ "INSERT INTO %s VALUES (?, ?)" % (table,),
+ (
+ stream_id,
+ instance_name,
+ ),
+ )
+ txn.execute(
+ """
+ INSERT INTO stream_positions VALUES ('test_stream', ?, ?)
+ ON CONFLICT (stream_name, instance_name) DO UPDATE SET stream_id = ?
+ """,
+ (instance_name, stream_id * factor, stream_id * factor),
+ )
+
+ self.get_success(self.db_pool.runInteraction("_insert_row", _insert))
+
+ def _insert_rows(
+ self,
+ instance_name: str,
+ number: int,
+ table: Optional[str] = None,
+ update_stream_table: bool = True,
+ ) -> None:
"""Insert N rows as the given instance, inserting with stream IDs pulled
from the postgres sequence.
"""
+ if table is None:
+ table = self.tables[0]
+
+ factor = 1 if self.positive else -1
+
def _insert(txn: LoggingTransaction) -> None:
for _ in range(number):
next_val = self.seq_gen.get_next_id_txn(txn)
txn.execute(
- "INSERT INTO foobar (stream_id, instance_name) VALUES (?, ?)",
- (
- next_val,
- instance_name,
- ),
+ "INSERT INTO %s (stream_id, instance_name) VALUES (?, ?)"
+ % (table,),
+ (next_val, instance_name),
)
- txn.execute(
- """
- INSERT INTO stream_positions VALUES ('test_stream', ?, ?)
- ON CONFLICT (stream_name, instance_name) DO UPDATE SET stream_id = ?
- """,
- (instance_name, next_val, next_val),
- )
+ if update_stream_table:
+ txn.execute(
+ """
+ INSERT INTO stream_positions VALUES ('test_stream', ?, ?)
+ ON CONFLICT (stream_name, instance_name) DO UPDATE SET stream_id = ?
+ """,
+ (instance_name, next_val * factor, next_val * factor),
+ )
self.get_success(self.db_pool.runInteraction("_insert_rows", _insert))
@@ -353,7 +418,9 @@ class WorkerMultiWriterIdGeneratorTestCase(MultiWriterIdGeneratorBase):
id_gen = self._create_id_generator("first", writers=["first", "second"])
- self.assertEqual(id_gen.get_positions(), {"first": 3, "second": 5})
+ # When the writer is created, it assumes its own position is the current head of
+ # the sequence
+ self.assertEqual(id_gen.get_positions(), {"first": 5, "second": 5})
self.assertEqual(id_gen.get_persisted_upto_position(), 5)
@@ -375,11 +442,13 @@ class WorkerMultiWriterIdGeneratorTestCase(MultiWriterIdGeneratorBase):
correctly.
"""
self._insert_rows("first", 3)
- self._insert_rows("second", 4)
-
first_id_gen = self._create_id_generator("first", writers=["first", "second"])
+
+ self._insert_rows("second", 4)
second_id_gen = self._create_id_generator("second", writers=["first", "second"])
+ self._replicate_all()
+
self.assertEqual(first_id_gen.get_positions(), {"first": 3, "second": 7})
self.assertEqual(first_id_gen.get_current_token_for_writer("first"), 7)
self.assertEqual(first_id_gen.get_current_token_for_writer("second"), 7)
@@ -398,6 +467,9 @@ class WorkerMultiWriterIdGeneratorTestCase(MultiWriterIdGeneratorBase):
self.assertEqual(
first_id_gen.get_positions(), {"first": 3, "second": 7}
)
+ self.assertEqual(
+ second_id_gen.get_positions(), {"first": 3, "second": 7}
+ )
self.assertEqual(first_id_gen.get_persisted_upto_position(), 7)
self.get_success(_get_next_async())
@@ -432,11 +504,11 @@ class WorkerMultiWriterIdGeneratorTestCase(MultiWriterIdGeneratorBase):
"""
# Insert some rows for two out of three of the ID gens.
self._insert_rows("first", 3)
- self._insert_rows("second", 4)
-
first_id_gen = self._create_id_generator(
"first", writers=["first", "second", "third"]
)
+
+ self._insert_rows("second", 4)
second_id_gen = self._create_id_generator(
"second", writers=["first", "second", "third"]
)
@@ -444,6 +516,8 @@ class WorkerMultiWriterIdGeneratorTestCase(MultiWriterIdGeneratorBase):
"third", writers=["first", "second", "third"]
)
+ self._replicate_all()
+
self.assertEqual(
first_id_gen.get_positions(), {"first": 3, "second": 7, "third": 7}
)
@@ -546,11 +620,13 @@ class WorkerMultiWriterIdGeneratorTestCase(MultiWriterIdGeneratorBase):
def test_minimal_local_token(self) -> None:
self._insert_rows("first", 3)
- self._insert_rows("second", 4)
-
first_id_gen = self._create_id_generator("first", writers=["first", "second"])
+
+ self._insert_rows("second", 4)
second_id_gen = self._create_id_generator("second", writers=["first", "second"])
+ self._replicate_all()
+
self.assertEqual(first_id_gen.get_positions(), {"first": 3, "second": 7})
self.assertEqual(first_id_gen.get_minimal_local_current_token(), 3)
@@ -562,15 +638,17 @@ class WorkerMultiWriterIdGeneratorTestCase(MultiWriterIdGeneratorBase):
token when there are no writes.
"""
self._insert_rows("first", 3)
- self._insert_rows("second", 4)
-
first_id_gen = self._create_id_generator(
"first", writers=["first", "second", "third"]
)
+
+ self._insert_rows("second", 4)
second_id_gen = self._create_id_generator(
"second", writers=["first", "second", "third"]
)
+ self._replicate_all()
+
self.assertEqual(second_id_gen.get_current_token_for_writer("first"), 7)
self.assertEqual(second_id_gen.get_current_token_for_writer("second"), 7)
self.assertEqual(second_id_gen.get_current_token(), 7)
@@ -609,68 +687,13 @@ class WorkerMultiWriterIdGeneratorTestCase(MultiWriterIdGeneratorBase):
self.assertEqual(second_id_gen.get_current_token(), 7)
-class BackwardsMultiWriterIdGeneratorTestCase(HomeserverTestCase):
+class BackwardsMultiWriterIdGeneratorTestCase(MultiWriterIdGeneratorBase):
"""Tests MultiWriterIdGenerator that produce *negative* stream IDs."""
if not USE_POSTGRES_FOR_TESTS:
skip = "Requires Postgres"
- def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
- self.store = hs.get_datastores().main
- self.db_pool: DatabasePool = self.store.db_pool
-
- self.get_success(self.db_pool.runInteraction("_setup_db", self._setup_db))
-
- def _setup_db(self, txn: LoggingTransaction) -> None:
- txn.execute("CREATE SEQUENCE foobar_seq")
- txn.execute(
- """
- CREATE TABLE foobar (
- stream_id BIGINT NOT NULL,
- instance_name TEXT NOT NULL,
- data TEXT
- );
- """
- )
-
- def _create_id_generator(
- self, instance_name: str = "master", writers: Optional[List[str]] = None
- ) -> MultiWriterIdGenerator:
- def _create(conn: LoggingDatabaseConnection) -> MultiWriterIdGenerator:
- return MultiWriterIdGenerator(
- conn,
- self.db_pool,
- notifier=self.hs.get_replication_notifier(),
- stream_name="test_stream",
- instance_name=instance_name,
- tables=[("foobar", "instance_name", "stream_id")],
- sequence_name="foobar_seq",
- writers=writers or ["master"],
- positive=False,
- )
-
- return self.get_success(self.db_pool.runWithConnection(_create))
-
- def _insert_row(self, instance_name: str, stream_id: int) -> None:
- """Insert one row as the given instance with given stream_id."""
-
- def _insert(txn: LoggingTransaction) -> None:
- txn.execute(
- "INSERT INTO foobar VALUES (?, ?)",
- (
- stream_id,
- instance_name,
- ),
- )
- txn.execute(
- """
- INSERT INTO stream_positions VALUES ('test_stream', ?, ?)
- ON CONFLICT (stream_name, instance_name) DO UPDATE SET stream_id = ?
- """,
- (instance_name, -stream_id, -stream_id),
- )
-
- self.get_success(self.db_pool.runInteraction("_insert_row", _insert))
+ positive = False
def test_single_instance(self) -> None:
"""Test that reads and writes from a single process are handled
@@ -716,7 +739,7 @@ class BackwardsMultiWriterIdGeneratorTestCase(HomeserverTestCase):
async def _get_next_async() -> None:
async with id_gen_1.get_next() as stream_id:
self._insert_row("first", stream_id)
- id_gen_2.advance("first", stream_id)
+ self._replicate("first")
self.get_success(_get_next_async())
@@ -728,7 +751,7 @@ class BackwardsMultiWriterIdGeneratorTestCase(HomeserverTestCase):
async def _get_next_async2() -> None:
async with id_gen_2.get_next() as stream_id:
self._insert_row("second", stream_id)
- id_gen_1.advance("second", stream_id)
+ self._replicate("second")
self.get_success(_get_next_async2())
@@ -738,98 +761,26 @@ class BackwardsMultiWriterIdGeneratorTestCase(HomeserverTestCase):
self.assertEqual(id_gen_2.get_persisted_upto_position(), -2)
-class MultiTableMultiWriterIdGeneratorTestCase(HomeserverTestCase):
+class MultiTableMultiWriterIdGeneratorTestCase(MultiWriterIdGeneratorBase):
if not USE_POSTGRES_FOR_TESTS:
skip = "Requires Postgres"
- def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
- self.store = hs.get_datastores().main
- self.db_pool: DatabasePool = self.store.db_pool
-
- self.get_success(self.db_pool.runInteraction("_setup_db", self._setup_db))
-
- def _setup_db(self, txn: LoggingTransaction) -> None:
- txn.execute("CREATE SEQUENCE foobar_seq")
- txn.execute(
- """
- CREATE TABLE foobar1 (
- stream_id BIGINT NOT NULL,
- instance_name TEXT NOT NULL,
- data TEXT
- );
- """
- )
-
- txn.execute(
- """
- CREATE TABLE foobar2 (
- stream_id BIGINT NOT NULL,
- instance_name TEXT NOT NULL,
- data TEXT
- );
- """
- )
-
- def _create_id_generator(
- self, instance_name: str = "master", writers: Optional[List[str]] = None
- ) -> MultiWriterIdGenerator:
- def _create(conn: LoggingDatabaseConnection) -> MultiWriterIdGenerator:
- return MultiWriterIdGenerator(
- conn,
- self.db_pool,
- notifier=self.hs.get_replication_notifier(),
- stream_name="test_stream",
- instance_name=instance_name,
- tables=[
- ("foobar1", "instance_name", "stream_id"),
- ("foobar2", "instance_name", "stream_id"),
- ],
- sequence_name="foobar_seq",
- writers=writers or ["master"],
- )
-
- return self.get_success_or_raise(self.db_pool.runWithConnection(_create))
-
- def _insert_rows(
- self,
- table: str,
- instance_name: str,
- number: int,
- update_stream_table: bool = True,
- ) -> None:
- """Insert N rows as the given instance, inserting with stream IDs pulled
- from the postgres sequence.
- """
-
- def _insert(txn: LoggingTransaction) -> None:
- for _ in range(number):
- txn.execute(
- "INSERT INTO %s VALUES (nextval('foobar_seq'), ?)" % (table,),
- (instance_name,),
- )
- if update_stream_table:
- txn.execute(
- """
- INSERT INTO stream_positions VALUES ('test_stream', ?, lastval())
- ON CONFLICT (stream_name, instance_name) DO UPDATE SET stream_id = lastval()
- """,
- (instance_name,),
- )
-
- self.get_success(self.db_pool.runInteraction("_insert_rows", _insert))
+ tables = ["foobar1", "foobar2"]
def test_load_existing_stream(self) -> None:
"""Test creating ID gens with multiple tables that have rows from after
the position in `stream_positions` table.
"""
- self._insert_rows("foobar1", "first", 3)
- self._insert_rows("foobar2", "second", 3)
- self._insert_rows("foobar2", "second", 1, update_stream_table=False)
-
+ self._insert_rows("first", 3, table="foobar1")
first_id_gen = self._create_id_generator("first", writers=["first", "second"])
+
+ self._insert_rows("second", 3, table="foobar2")
+ self._insert_rows("second", 1, table="foobar2", update_stream_table=False)
second_id_gen = self._create_id_generator("second", writers=["first", "second"])
- self.assertEqual(first_id_gen.get_positions(), {"first": 3, "second": 6})
+ self._replicate_all()
+
+ self.assertEqual(first_id_gen.get_positions(), {"first": 3, "second": 7})
self.assertEqual(first_id_gen.get_current_token_for_writer("first"), 7)
self.assertEqual(first_id_gen.get_current_token_for_writer("second"), 7)
self.assertEqual(first_id_gen.get_persisted_upto_position(), 7)
From edfb7aad3ab9aebcd08f6b36707b6bd929247028 Mon Sep 17 00:00:00 2001
From: Quentin Gliech
Date: Mon, 17 Jun 2024 14:07:49 +0200
Subject: [PATCH 178/503] 1.109.0rc3
---
CHANGES.md | 13 +++++++++++++
changelog.d/17305.misc | 1 -
changelog.d/17306.misc | 1 -
changelog.d/17309.misc | 1 -
debian/changelog | 6 ++++++
pyproject.toml | 2 +-
6 files changed, 20 insertions(+), 4 deletions(-)
delete mode 100644 changelog.d/17305.misc
delete mode 100644 changelog.d/17306.misc
delete mode 100644 changelog.d/17309.misc
diff --git a/CHANGES.md b/CHANGES.md
index 61c6170c62..94ee661151 100644
--- a/CHANGES.md
+++ b/CHANGES.md
@@ -1,3 +1,16 @@
+# Synapse 1.109.0rc3 (2024-06-17)
+
+### Bugfixes
+
+- When rolling back to a previous Synapse version and then forwards again to this release, don't require server operators to manually run SQL. ([\#17305](https://github.com/element-hq/synapse/issues/17305), [\#17309](https://github.com/element-hq/synapse/issues/17309))
+
+### Internal Changes
+
+- Use the release branch for sytest in release-branch PRs. ([\#17306](https://github.com/element-hq/synapse/issues/17306))
+
+
+
+
# Synapse 1.109.0rc2 (2024-06-11)
### Bugfixes
diff --git a/changelog.d/17305.misc b/changelog.d/17305.misc
deleted file mode 100644
index cb6b9504b3..0000000000
--- a/changelog.d/17305.misc
+++ /dev/null
@@ -1 +0,0 @@
-When rolling back to a previous Synapse version and then forwards again to this release, don't require server operators to manually run SQL.
diff --git a/changelog.d/17306.misc b/changelog.d/17306.misc
deleted file mode 100644
index 88ada5f671..0000000000
--- a/changelog.d/17306.misc
+++ /dev/null
@@ -1 +0,0 @@
-Use the release branch for sytest in release-branch PRs.
diff --git a/changelog.d/17309.misc b/changelog.d/17309.misc
deleted file mode 100644
index cb6b9504b3..0000000000
--- a/changelog.d/17309.misc
+++ /dev/null
@@ -1 +0,0 @@
-When rolling back to a previous Synapse version and then forwards again to this release, don't require server operators to manually run SQL.
diff --git a/debian/changelog b/debian/changelog
index ac2536749d..e5b7809bcf 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -1,3 +1,9 @@
+matrix-synapse-py3 (1.109.0~rc3) stable; urgency=medium
+
+ * New synapse release 1.109.0rc3.
+
+ -- Synapse Packaging team Mon, 17 Jun 2024 12:05:24 +0000
+
matrix-synapse-py3 (1.109.0~rc2) stable; urgency=medium
* New synapse release 1.109.0rc2.
diff --git a/pyproject.toml b/pyproject.toml
index f4f7f70603..7567b1de40 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -96,7 +96,7 @@ module-name = "synapse.synapse_rust"
[tool.poetry]
name = "matrix-synapse"
-version = "1.109.0rc2"
+version = "1.109.0rc3"
description = "Homeserver for the Matrix decentralised comms protocol"
authors = ["Matrix.org Team and Contributors "]
license = "AGPL-3.0-or-later"
From e5b8a3e37f10168953124282c296821b9d9d81ad Mon Sep 17 00:00:00 2001
From: Eric Eastwood
Date: Mon, 17 Jun 2024 11:27:14 -0500
Subject: [PATCH 179/503] Add `stream_ordering` sort to Sliding Sync `/sync`
(#17293)
Sort is no longer configurable and we always sort rooms by the `stream_ordering` of the last event in the room or the point where the user can see up to in cases of leave/ban/invite/knock.
---
changelog.d/17293.feature | 1 +
synapse/handlers/message.py | 2 +-
synapse/handlers/sliding_sync.py | 158 +++++++++++++---
synapse/handlers/sync.py | 10 +-
synapse/storage/databases/main/stream.py | 42 ++++-
synapse/types/rest/client/__init__.py | 24 +--
tests/handlers/test_sliding_sync.py | 226 +++++++++++++++++++----
tests/rest/client/test_sync.py | 61 +++++-
tests/storage/test_stream.py | 56 +++---
9 files changed, 459 insertions(+), 121 deletions(-)
create mode 100644 changelog.d/17293.feature
diff --git a/changelog.d/17293.feature b/changelog.d/17293.feature
new file mode 100644
index 0000000000..60ca7721a0
--- /dev/null
+++ b/changelog.d/17293.feature
@@ -0,0 +1 @@
+Add `stream_ordering` sort to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint.
diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py
index 721ef04f41..16d01efc67 100644
--- a/synapse/handlers/message.py
+++ b/synapse/handlers/message.py
@@ -201,7 +201,7 @@ class MessageHandler:
if at_token:
last_event_id = (
- await self.store.get_last_event_in_room_before_stream_ordering(
+ await self.store.get_last_event_id_in_room_before_stream_ordering(
room_id,
end_token=at_token.room_key,
)
diff --git a/synapse/handlers/sliding_sync.py b/synapse/handlers/sliding_sync.py
index 78fb66d6e2..b84cf67f7d 100644
--- a/synapse/handlers/sliding_sync.py
+++ b/synapse/handlers/sliding_sync.py
@@ -18,13 +18,20 @@
#
#
import logging
-from typing import TYPE_CHECKING, AbstractSet, Dict, List, Optional
+from typing import TYPE_CHECKING, Dict, List, Optional, Tuple
from immutabledict import immutabledict
from synapse.api.constants import AccountDataTypes, Membership
from synapse.events import EventBase
-from synapse.types import Requester, RoomStreamToken, StreamToken, UserID
+from synapse.storage.roommember import RoomsForUser
+from synapse.types import (
+ PersistedEventPosition,
+ Requester,
+ RoomStreamToken,
+ StreamToken,
+ UserID,
+)
from synapse.types.handlers import OperationType, SlidingSyncConfig, SlidingSyncResult
if TYPE_CHECKING:
@@ -33,6 +40,27 @@ if TYPE_CHECKING:
logger = logging.getLogger(__name__)
+def convert_event_to_rooms_for_user(event: EventBase) -> RoomsForUser:
+ """
+ Quick helper to convert an event to a `RoomsForUser` object.
+ """
+ # These fields should be present for all persisted events
+ assert event.internal_metadata.stream_ordering is not None
+ assert event.internal_metadata.instance_name is not None
+
+ return RoomsForUser(
+ room_id=event.room_id,
+ sender=event.sender,
+ membership=event.membership,
+ event_id=event.event_id,
+ event_pos=PersistedEventPosition(
+ event.internal_metadata.instance_name,
+ event.internal_metadata.stream_ordering,
+ ),
+ room_version_id=event.room_version.identifier,
+ )
+
+
def filter_membership_for_sync(*, membership: str, user_id: str, sender: str) -> bool:
"""
Returns True if the membership event should be included in the sync response,
@@ -169,26 +197,28 @@ class SlidingSyncHandler:
# See https://github.com/matrix-org/matrix-doc/issues/1144
raise NotImplementedError()
- # Get all of the room IDs that the user should be able to see in the sync
- # response
- room_id_set = await self.get_sync_room_ids_for_user(
- sync_config.user,
- from_token=from_token,
- to_token=to_token,
- )
-
# Assemble sliding window lists
lists: Dict[str, SlidingSyncResult.SlidingWindowList] = {}
if sync_config.lists:
+ # Get all of the room IDs that the user should be able to see in the sync
+ # response
+ sync_room_map = await self.get_sync_room_ids_for_user(
+ sync_config.user,
+ from_token=from_token,
+ to_token=to_token,
+ )
+
for list_key, list_config in sync_config.lists.items():
# Apply filters
- filtered_room_ids = room_id_set
+ filtered_sync_room_map = sync_room_map
if list_config.filters is not None:
- filtered_room_ids = await self.filter_rooms(
- sync_config.user, room_id_set, list_config.filters, to_token
+ filtered_sync_room_map = await self.filter_rooms(
+ sync_config.user, sync_room_map, list_config.filters, to_token
)
- # TODO: Apply sorts
- sorted_room_ids = sorted(filtered_room_ids)
+
+ sorted_room_info = await self.sort_rooms(
+ filtered_sync_room_map, to_token
+ )
ops: List[SlidingSyncResult.SlidingWindowList.Operation] = []
if list_config.ranges:
@@ -197,12 +227,17 @@ class SlidingSyncHandler:
SlidingSyncResult.SlidingWindowList.Operation(
op=OperationType.SYNC,
range=range,
- room_ids=sorted_room_ids[range[0] : range[1]],
+ room_ids=[
+ room_id
+ for room_id, _ in sorted_room_info[
+ range[0] : range[1]
+ ]
+ ],
)
)
lists[list_key] = SlidingSyncResult.SlidingWindowList(
- count=len(sorted_room_ids),
+ count=len(sorted_room_info),
ops=ops,
)
@@ -219,7 +254,7 @@ class SlidingSyncHandler:
user: UserID,
to_token: StreamToken,
from_token: Optional[StreamToken] = None,
- ) -> AbstractSet[str]:
+ ) -> Dict[str, RoomsForUser]:
"""
Fetch room IDs that should be listed for this user in the sync response (the
full room list that will be filtered, sorted, and sliced).
@@ -237,11 +272,14 @@ class SlidingSyncHandler:
to tell when a room was forgotten at the moment so we can't factor it into the
from/to range.
-
Args:
user: User to fetch rooms for
to_token: The token to fetch rooms up to.
from_token: The point in the stream to sync from.
+
+ Returns:
+ A dictionary of room IDs that should be listed in the sync response along
+ with membership information in that room at the time of `to_token`.
"""
user_id = user.to_string()
@@ -261,11 +299,11 @@ class SlidingSyncHandler:
# If the user has never joined any rooms before, we can just return an empty list
if not room_for_user_list:
- return set()
+ return {}
# Our working list of rooms that can show up in the sync response
sync_room_id_set = {
- room_for_user.room_id
+ room_for_user.room_id: room_for_user
for room_for_user in room_for_user_list
if filter_membership_for_sync(
membership=room_for_user.membership,
@@ -415,7 +453,9 @@ class SlidingSyncHandler:
not was_last_membership_already_included
and should_prev_membership_be_included
):
- sync_room_id_set.add(room_id)
+ sync_room_id_set[room_id] = convert_event_to_rooms_for_user(
+ last_membership_change_after_to_token
+ )
# 1b) Remove rooms that the user joined (hasn't left) after the `to_token`
#
# For example, if the last membership event after the `to_token` is a "join"
@@ -426,7 +466,7 @@ class SlidingSyncHandler:
was_last_membership_already_included
and not should_prev_membership_be_included
):
- sync_room_id_set.discard(room_id)
+ del sync_room_id_set[room_id]
# 2) -----------------------------------------------------
# We fix-up newly_left rooms after the first fixup because it may have removed
@@ -461,25 +501,32 @@ class SlidingSyncHandler:
# include newly_left rooms because the last event that the user should see
# is their own leave event
if last_membership_change_in_from_to_range.membership == Membership.LEAVE:
- sync_room_id_set.add(room_id)
+ sync_room_id_set[room_id] = convert_event_to_rooms_for_user(
+ last_membership_change_in_from_to_range
+ )
return sync_room_id_set
async def filter_rooms(
self,
user: UserID,
- room_id_set: AbstractSet[str],
+ sync_room_map: Dict[str, RoomsForUser],
filters: SlidingSyncConfig.SlidingSyncList.Filters,
to_token: StreamToken,
- ) -> AbstractSet[str]:
+ ) -> Dict[str, RoomsForUser]:
"""
Filter rooms based on the sync request.
Args:
user: User to filter rooms for
- room_id_set: Set of room IDs to filter down
+ sync_room_map: Dictionary of room IDs to sort along with membership
+ information in the room at the time of `to_token`.
filters: Filters to apply
to_token: We filter based on the state of the room at this token
+
+ Returns:
+ A filtered dictionary of room IDs along with membership information in the
+ room at the time of `to_token`.
"""
user_id = user.to_string()
@@ -488,7 +535,7 @@ class SlidingSyncHandler:
# TODO: Exclude partially stated rooms unless the `required_state` has
# `["m.room.member", "$LAZY"]`
- filtered_room_id_set = set(room_id_set)
+ filtered_room_id_set = set(sync_room_map.keys())
# Filter for Direct-Message (DM) rooms
if filters.is_dm is not None:
@@ -544,4 +591,57 @@ class SlidingSyncHandler:
if filters.not_tags:
raise NotImplementedError()
- return filtered_room_id_set
+ # Assemble a new sync room map but only with the `filtered_room_id_set`
+ return {room_id: sync_room_map[room_id] for room_id in filtered_room_id_set}
+
+ async def sort_rooms(
+ self,
+ sync_room_map: Dict[str, RoomsForUser],
+ to_token: StreamToken,
+ ) -> List[Tuple[str, RoomsForUser]]:
+ """
+ Sort by `stream_ordering` of the last event that the user should see in the
+ room. `stream_ordering` is unique so we get a stable sort.
+
+ Args:
+ sync_room_map: Dictionary of room IDs to sort along with membership
+ information in the room at the time of `to_token`.
+ to_token: We sort based on the events in the room at this token (<= `to_token`)
+
+ Returns:
+ A sorted list of room IDs by `stream_ordering` along with membership information.
+ """
+
+ # Assemble a map of room ID to the `stream_ordering` of the last activity that the
+ # user should see in the room (<= `to_token`)
+ last_activity_in_room_map: Dict[str, int] = {}
+ for room_id, room_for_user in sync_room_map.items():
+ # If they are fully-joined to the room, let's find the latest activity
+ # at/before the `to_token`.
+ if room_for_user.membership == Membership.JOIN:
+ last_event_result = (
+ await self.store.get_last_event_pos_in_room_before_stream_ordering(
+ room_id, to_token.room_key
+ )
+ )
+
+ # If the room has no events at/before the `to_token`, this is probably a
+ # mistake in the code that generates the `sync_room_map` since that should
+ # only give us rooms that the user had membership in during the token range.
+ assert last_event_result is not None
+
+ _, event_pos = last_event_result
+
+ last_activity_in_room_map[room_id] = event_pos.stream
+ else:
+ # Otherwise, if the user has left/been invited/knocked/been banned from
+ # a room, they shouldn't see anything past that point.
+ last_activity_in_room_map[room_id] = room_for_user.event_pos.stream
+
+ return sorted(
+ sync_room_map.items(),
+ # Sort by the last activity (stream_ordering) in the room
+ key=lambda room_info: last_activity_in_room_map[room_info[0]],
+ # We want descending order
+ reverse=True,
+ )
diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py
index f1c69d9893..0a40d62c6a 100644
--- a/synapse/handlers/sync.py
+++ b/synapse/handlers/sync.py
@@ -1036,9 +1036,11 @@ class SyncHandler:
# FIXME: This gets the state at the latest event before the stream ordering,
# which might not be the same as the "current state" of the room at the time
# of the stream token if there were multiple forward extremities at the time.
- last_event_id = await self.store.get_last_event_in_room_before_stream_ordering(
- room_id,
- end_token=stream_position.room_key,
+ last_event_id = (
+ await self.store.get_last_event_id_in_room_before_stream_ordering(
+ room_id,
+ end_token=stream_position.room_key,
+ )
)
if last_event_id:
@@ -1519,7 +1521,7 @@ class SyncHandler:
# We need to make sure the first event in our batch points to the
# last event in the previous batch.
last_event_id_prev_batch = (
- await self.store.get_last_event_in_room_before_stream_ordering(
+ await self.store.get_last_event_id_in_room_before_stream_ordering(
room_id,
end_token=since_token.room_key,
)
diff --git a/synapse/storage/databases/main/stream.py b/synapse/storage/databases/main/stream.py
index 61373f0bfb..ff0d723684 100644
--- a/synapse/storage/databases/main/stream.py
+++ b/synapse/storage/databases/main/stream.py
@@ -895,7 +895,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
"get_room_event_before_stream_ordering", _f
)
- async def get_last_event_in_room_before_stream_ordering(
+ async def get_last_event_id_in_room_before_stream_ordering(
self,
room_id: str,
end_token: RoomStreamToken,
@@ -910,10 +910,38 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
The ID of the most recent event, or None if there are no events in the room
before this stream ordering.
"""
+ last_event_result = (
+ await self.get_last_event_pos_in_room_before_stream_ordering(
+ room_id, end_token
+ )
+ )
- def get_last_event_in_room_before_stream_ordering_txn(
+ if last_event_result:
+ return last_event_result[0]
+
+ return None
+
+ async def get_last_event_pos_in_room_before_stream_ordering(
+ self,
+ room_id: str,
+ end_token: RoomStreamToken,
+ ) -> Optional[Tuple[str, PersistedEventPosition]]:
+ """
+ Returns the ID and event position of the last event in a room at or before a
+ stream ordering.
+
+ Args:
+ room_id
+ end_token: The token used to stream from
+
+ Returns:
+ The ID of the most recent event and it's position, or None if there are no
+ events in the room before this stream ordering.
+ """
+
+ def get_last_event_pos_in_room_before_stream_ordering_txn(
txn: LoggingTransaction,
- ) -> Optional[str]:
+ ) -> Optional[Tuple[str, PersistedEventPosition]]:
# We're looking for the closest event at or before the token. We need to
# handle the fact that the stream token can be a vector clock (with an
# `instance_map`) and events can be persisted on different instances
@@ -975,13 +1003,15 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
topological_ordering=topological_ordering,
stream_ordering=stream_ordering,
):
- return event_id
+ return event_id, PersistedEventPosition(
+ instance_name, stream_ordering
+ )
return None
return await self.db_pool.runInteraction(
- "get_last_event_in_room_before_stream_ordering",
- get_last_event_in_room_before_stream_ordering_txn,
+ "get_last_event_pos_in_room_before_stream_ordering",
+ get_last_event_pos_in_room_before_stream_ordering_txn,
)
async def get_current_room_stream_token_for_room_id(
diff --git a/synapse/types/rest/client/__init__.py b/synapse/types/rest/client/__init__.py
index ec83d0daa6..e2c79c4106 100644
--- a/synapse/types/rest/client/__init__.py
+++ b/synapse/types/rest/client/__init__.py
@@ -175,22 +175,8 @@ class SlidingSyncBody(RequestBodyModel):
ranges: Sliding window ranges. If this field is missing, no sliding window
is used and all rooms are returned in this list. Integers are
*inclusive*.
- sort: How the list should be sorted on the server. The first value is
- applied first, then tiebreaks are performed with each subsequent sort
- listed.
-
- FIXME: Furthermore, it's not currently defined how servers should behave
- if they encounter a filter or sort operation they do not recognise. If
- the server rejects the request with an HTTP 400 then that will break
- backwards compatibility with new clients vs old servers. However, the
- client would be otherwise unaware that only some of the sort/filter
- operations have taken effect. We may need to include a "warnings"
- section to indicate which sort/filter operations are unrecognised,
- allowing for some form of graceful degradation of service.
- -- https://github.com/matrix-org/matrix-spec-proposals/blob/kegan/sync-v3/proposals/3575-sync.md#filter-and-sort-extensions
-
slow_get_all_rooms: Just get all rooms (for clients that don't want to deal with
- sliding windows). When true, the `ranges` and `sort` fields are ignored.
+ sliding windows). When true, the `ranges` field is ignored.
required_state: Required state for each room returned. An array of event
type and state key tuples. Elements in this array are ORd together to
produce the final set of state events to return.
@@ -229,12 +215,6 @@ class SlidingSyncBody(RequestBodyModel):
`user_id` and optionally `avatar_url` and `displayname`) for the users used
to calculate the room name.
filters: Filters to apply to the list before sorting.
- bump_event_types: Allowlist of event types which should be considered recent activity
- when sorting `by_recency`. By omitting event types from this field,
- clients can ensure that uninteresting events (e.g. a profile rename) do
- not cause a room to jump to the top of its list(s). Empty or omitted
- `bump_event_types` have no effect—all events in a room will be
- considered recent activity.
"""
class Filters(RequestBodyModel):
@@ -300,11 +280,9 @@ class SlidingSyncBody(RequestBodyModel):
ranges: Optional[List[Tuple[int, int]]] = None
else:
ranges: Optional[List[Tuple[conint(ge=0, strict=True), conint(ge=0, strict=True)]]] = None # type: ignore[valid-type]
- sort: Optional[List[StrictStr]] = None
slow_get_all_rooms: Optional[StrictBool] = False
include_heroes: Optional[StrictBool] = False
filters: Optional[Filters] = None
- bump_event_types: Optional[List[StrictStr]] = None
class RoomSubscription(CommonRoomParameters):
pass
diff --git a/tests/handlers/test_sliding_sync.py b/tests/handlers/test_sliding_sync.py
index 62fe1214fe..af48041f1f 100644
--- a/tests/handlers/test_sliding_sync.py
+++ b/tests/handlers/test_sliding_sync.py
@@ -20,6 +20,8 @@
import logging
from unittest.mock import patch
+from parameterized import parameterized
+
from twisted.test.proto_helpers import MemoryReactor
from synapse.api.constants import AccountDataTypes, EventTypes, JoinRules, Membership
@@ -79,7 +81,7 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase):
)
)
- self.assertEqual(room_id_results, set())
+ self.assertEqual(room_id_results.keys(), set())
def test_get_newly_joined_room(self) -> None:
"""
@@ -103,7 +105,7 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase):
)
)
- self.assertEqual(room_id_results, {room_id})
+ self.assertEqual(room_id_results.keys(), {room_id})
def test_get_already_joined_room(self) -> None:
"""
@@ -124,7 +126,7 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase):
)
)
- self.assertEqual(room_id_results, {room_id})
+ self.assertEqual(room_id_results.keys(), {room_id})
def test_get_invited_banned_knocked_room(self) -> None:
"""
@@ -180,7 +182,7 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase):
# Ensure that the invited, ban, and knock rooms show up
self.assertEqual(
- room_id_results,
+ room_id_results.keys(),
{
invited_room_id,
ban_room_id,
@@ -226,7 +228,7 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase):
)
# The kicked room should show up
- self.assertEqual(room_id_results, {kick_room_id})
+ self.assertEqual(room_id_results.keys(), {kick_room_id})
def test_forgotten_rooms(self) -> None:
"""
@@ -308,7 +310,7 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase):
)
# We shouldn't see the room because it was forgotten
- self.assertEqual(room_id_results, set())
+ self.assertEqual(room_id_results.keys(), set())
def test_only_newly_left_rooms_show_up(self) -> None:
"""
@@ -340,7 +342,7 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase):
)
# Only the newly_left room should show up
- self.assertEqual(room_id_results, {room_id2})
+ self.assertEqual(room_id_results.keys(), {room_id2})
def test_no_joins_after_to_token(self) -> None:
"""
@@ -368,7 +370,7 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase):
)
)
- self.assertEqual(room_id_results, {room_id1})
+ self.assertEqual(room_id_results.keys(), {room_id1})
def test_join_during_range_and_left_room_after_to_token(self) -> None:
"""
@@ -398,7 +400,7 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase):
# We should still see the room because we were joined during the
# from_token/to_token time period.
- self.assertEqual(room_id_results, {room_id1})
+ self.assertEqual(room_id_results.keys(), {room_id1})
def test_join_before_range_and_left_room_after_to_token(self) -> None:
"""
@@ -425,7 +427,7 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase):
)
# We should still see the room because we were joined before the `from_token`
- self.assertEqual(room_id_results, {room_id1})
+ self.assertEqual(room_id_results.keys(), {room_id1})
def test_kicked_before_range_and_left_after_to_token(self) -> None:
"""
@@ -473,7 +475,7 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase):
)
# We shouldn't see the room because it was forgotten
- self.assertEqual(room_id_results, {kick_room_id})
+ self.assertEqual(room_id_results.keys(), {kick_room_id})
def test_newly_left_during_range_and_join_leave_after_to_token(self) -> None:
"""
@@ -510,7 +512,7 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase):
)
# Room should still show up because it's newly_left during the from/to range
- self.assertEqual(room_id_results, {room_id1})
+ self.assertEqual(room_id_results.keys(), {room_id1})
def test_newly_left_during_range_and_join_after_to_token(self) -> None:
"""
@@ -546,7 +548,7 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase):
)
# Room should still show up because it's newly_left during the from/to range
- self.assertEqual(room_id_results, {room_id1})
+ self.assertEqual(room_id_results.keys(), {room_id1})
def test_no_from_token(self) -> None:
"""
@@ -587,7 +589,7 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase):
)
# Only rooms we were joined to before the `to_token` should show up
- self.assertEqual(room_id_results, {room_id1})
+ self.assertEqual(room_id_results.keys(), {room_id1})
def test_from_token_ahead_of_to_token(self) -> None:
"""
@@ -648,7 +650,7 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase):
#
# There won't be any newly_left rooms because the `from_token` is ahead of the
# `to_token` and that range will give no membership changes to check.
- self.assertEqual(room_id_results, {room_id1})
+ self.assertEqual(room_id_results.keys(), {room_id1})
def test_leave_before_range_and_join_leave_after_to_token(self) -> None:
"""
@@ -683,7 +685,7 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase):
)
# Room shouldn't show up because it was left before the `from_token`
- self.assertEqual(room_id_results, set())
+ self.assertEqual(room_id_results.keys(), set())
def test_leave_before_range_and_join_after_to_token(self) -> None:
"""
@@ -717,7 +719,7 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase):
)
# Room shouldn't show up because it was left before the `from_token`
- self.assertEqual(room_id_results, set())
+ self.assertEqual(room_id_results.keys(), set())
def test_join_leave_multiple_times_during_range_and_after_to_token(
self,
@@ -759,7 +761,7 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase):
)
# Room should show up because it was newly_left and joined during the from/to range
- self.assertEqual(room_id_results, {room_id1})
+ self.assertEqual(room_id_results.keys(), {room_id1})
def test_join_leave_multiple_times_before_range_and_after_to_token(
self,
@@ -799,7 +801,7 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase):
)
# Room should show up because we were joined before the from/to range
- self.assertEqual(room_id_results, {room_id1})
+ self.assertEqual(room_id_results.keys(), {room_id1})
def test_invite_before_range_and_join_leave_after_to_token(
self,
@@ -836,7 +838,7 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase):
)
# Room should show up because we were invited before the from/to range
- self.assertEqual(room_id_results, {room_id1})
+ self.assertEqual(room_id_results.keys(), {room_id1})
def test_multiple_rooms_are_not_confused(
self,
@@ -889,7 +891,7 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase):
)
self.assertEqual(
- room_id_results,
+ room_id_results.keys(),
{
# `room_id1` shouldn't show up because we left before the from/to range
#
@@ -1048,7 +1050,6 @@ class GetSyncRoomIdsForUserEventShardTestCase(BaseMultiWorkerStreamTestCase):
# Get a token while things are stuck after our activity
stuck_activity_token = self.event_sources.get_current_token()
- logger.info("stuck_activity_token %s", stuck_activity_token)
# Let's make sure we're working with a token that has an `instance_map`
self.assertNotEqual(len(stuck_activity_token.room_key.instance_map), 0)
@@ -1058,7 +1059,6 @@ class GetSyncRoomIdsForUserEventShardTestCase(BaseMultiWorkerStreamTestCase):
join_on_worker2_pos = self.get_success(
self.store.get_position_for_event(join_on_worker2_response["event_id"])
)
- logger.info("join_on_worker2_pos %s", join_on_worker2_pos)
# Ensure the join technially came after our token
self.assertGreater(
join_on_worker2_pos.stream,
@@ -1077,7 +1077,6 @@ class GetSyncRoomIdsForUserEventShardTestCase(BaseMultiWorkerStreamTestCase):
join_on_worker3_pos = self.get_success(
self.store.get_position_for_event(join_on_worker3_response["event_id"])
)
- logger.info("join_on_worker3_pos %s", join_on_worker3_pos)
# Ensure the join came after the min but still encapsulated by the token
self.assertGreaterEqual(
join_on_worker3_pos.stream,
@@ -1103,7 +1102,7 @@ class GetSyncRoomIdsForUserEventShardTestCase(BaseMultiWorkerStreamTestCase):
)
self.assertEqual(
- room_id_results,
+ room_id_results.keys(),
{
room_id1,
# room_id2 shouldn't show up because we left before the from/to range
@@ -1217,11 +1216,20 @@ class FilterRoomsTestCase(HomeserverTestCase):
after_rooms_token = self.event_sources.get_current_token()
+ # Get the rooms the user should be syncing with
+ sync_room_map = self.get_success(
+ self.sliding_sync_handler.get_sync_room_ids_for_user(
+ UserID.from_string(user1_id),
+ from_token=None,
+ to_token=after_rooms_token,
+ )
+ )
+
# Try with `is_dm=True`
- truthy_filtered_room_ids = self.get_success(
+ truthy_filtered_room_map = self.get_success(
self.sliding_sync_handler.filter_rooms(
UserID.from_string(user1_id),
- {room_id, dm_room_id},
+ sync_room_map,
SlidingSyncConfig.SlidingSyncList.Filters(
is_dm=True,
),
@@ -1229,13 +1237,13 @@ class FilterRoomsTestCase(HomeserverTestCase):
)
)
- self.assertEqual(truthy_filtered_room_ids, {dm_room_id})
+ self.assertEqual(truthy_filtered_room_map.keys(), {dm_room_id})
# Try with `is_dm=False`
- falsy_filtered_room_ids = self.get_success(
+ falsy_filtered_room_map = self.get_success(
self.sliding_sync_handler.filter_rooms(
UserID.from_string(user1_id),
- {room_id, dm_room_id},
+ sync_room_map,
SlidingSyncConfig.SlidingSyncList.Filters(
is_dm=False,
),
@@ -1243,4 +1251,160 @@ class FilterRoomsTestCase(HomeserverTestCase):
)
)
- self.assertEqual(falsy_filtered_room_ids, {room_id})
+ self.assertEqual(falsy_filtered_room_map.keys(), {room_id})
+
+
+class SortRoomsTestCase(HomeserverTestCase):
+ """
+ Tests Sliding Sync handler `sort_rooms()` to make sure it sorts/orders rooms
+ correctly.
+ """
+
+ servlets = [
+ admin.register_servlets,
+ knock.register_servlets,
+ login.register_servlets,
+ room.register_servlets,
+ ]
+
+ def default_config(self) -> JsonDict:
+ config = super().default_config()
+ # Enable sliding sync
+ config["experimental_features"] = {"msc3575_enabled": True}
+ return config
+
+ def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
+ self.sliding_sync_handler = self.hs.get_sliding_sync_handler()
+ self.store = self.hs.get_datastores().main
+ self.event_sources = hs.get_event_sources()
+
+ def test_sort_activity_basic(self) -> None:
+ """
+ Rooms with newer activity are sorted first.
+ """
+ user1_id = self.register_user("user1", "pass")
+ user1_tok = self.login(user1_id, "pass")
+
+ room_id1 = self.helper.create_room_as(
+ user1_id,
+ tok=user1_tok,
+ )
+ room_id2 = self.helper.create_room_as(
+ user1_id,
+ tok=user1_tok,
+ )
+
+ after_rooms_token = self.event_sources.get_current_token()
+
+ # Get the rooms the user should be syncing with
+ sync_room_map = self.get_success(
+ self.sliding_sync_handler.get_sync_room_ids_for_user(
+ UserID.from_string(user1_id),
+ from_token=None,
+ to_token=after_rooms_token,
+ )
+ )
+
+ # Sort the rooms (what we're testing)
+ sorted_room_info = self.get_success(
+ self.sliding_sync_handler.sort_rooms(
+ sync_room_map=sync_room_map,
+ to_token=after_rooms_token,
+ )
+ )
+
+ self.assertEqual(
+ [room_id for room_id, _ in sorted_room_info],
+ [room_id2, room_id1],
+ )
+
+ @parameterized.expand(
+ [
+ (Membership.LEAVE,),
+ (Membership.INVITE,),
+ (Membership.KNOCK,),
+ (Membership.BAN,),
+ ]
+ )
+ def test_activity_after_xxx(self, room1_membership: str) -> None:
+ """
+ When someone has left/been invited/knocked/been banned from a room, they
+ shouldn't take anything into account after that membership event.
+ """
+ user1_id = self.register_user("user1", "pass")
+ user1_tok = self.login(user1_id, "pass")
+ user2_id = self.register_user("user2", "pass")
+ user2_tok = self.login(user2_id, "pass")
+
+ before_rooms_token = self.event_sources.get_current_token()
+
+ # Create the rooms as user2 so we can have user1 with a clean slate to work from
+ # and join in whatever order we need for the tests.
+ room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok, is_public=True)
+ # If we're testing knocks, set the room to knock
+ if room1_membership == Membership.KNOCK:
+ self.helper.send_state(
+ room_id1,
+ EventTypes.JoinRules,
+ {"join_rule": JoinRules.KNOCK},
+ tok=user2_tok,
+ )
+ room_id2 = self.helper.create_room_as(user2_id, tok=user2_tok, is_public=True)
+ room_id3 = self.helper.create_room_as(user2_id, tok=user2_tok, is_public=True)
+
+ # Here is the activity with user1 that will determine the sort of the rooms
+ # (room2, room1, room3)
+ self.helper.join(room_id3, user1_id, tok=user1_tok)
+ if room1_membership == Membership.LEAVE:
+ self.helper.join(room_id1, user1_id, tok=user1_tok)
+ self.helper.leave(room_id1, user1_id, tok=user1_tok)
+ elif room1_membership == Membership.INVITE:
+ self.helper.invite(room_id1, src=user2_id, targ=user1_id, tok=user2_tok)
+ elif room1_membership == Membership.KNOCK:
+ self.helper.knock(room_id1, user1_id, tok=user1_tok)
+ elif room1_membership == Membership.BAN:
+ self.helper.ban(room_id1, src=user2_id, targ=user1_id, tok=user2_tok)
+ self.helper.join(room_id2, user1_id, tok=user1_tok)
+
+ # Activity before the token but the user is only been xxx to this room so it
+ # shouldn't be taken into account
+ self.helper.send(room_id1, "activity in room1", tok=user2_tok)
+
+ after_rooms_token = self.event_sources.get_current_token()
+
+ # Activity after the token. Just make it in a different order than what we
+ # expect to make sure we're not taking the activity after the token into
+ # account.
+ self.helper.send(room_id1, "activity in room1", tok=user2_tok)
+ self.helper.send(room_id2, "activity in room2", tok=user2_tok)
+ self.helper.send(room_id3, "activity in room3", tok=user2_tok)
+
+ # Get the rooms the user should be syncing with
+ sync_room_map = self.get_success(
+ self.sliding_sync_handler.get_sync_room_ids_for_user(
+ UserID.from_string(user1_id),
+ from_token=before_rooms_token,
+ to_token=after_rooms_token,
+ )
+ )
+
+ # Sort the rooms (what we're testing)
+ sorted_room_info = self.get_success(
+ self.sliding_sync_handler.sort_rooms(
+ sync_room_map=sync_room_map,
+ to_token=after_rooms_token,
+ )
+ )
+
+ self.assertEqual(
+ [room_id for room_id, _ in sorted_room_info],
+ [room_id2, room_id1, room_id3],
+ "Corresponding map to disambiguate the opaque room IDs: "
+ + str(
+ {
+ "room_id1": room_id1,
+ "room_id2": room_id2,
+ "room_id3": room_id3,
+ }
+ ),
+ )
diff --git a/tests/rest/client/test_sync.py b/tests/rest/client/test_sync.py
index 40870b2cfe..2b06767b8a 100644
--- a/tests/rest/client/test_sync.py
+++ b/tests/rest/client/test_sync.py
@@ -1299,7 +1299,6 @@ class SlidingSyncTestCase(unittest.HomeserverTestCase):
"lists": {
"foo-list": {
"ranges": [[0, 99]],
- "sort": ["by_notification_level", "by_recency", "by_name"],
"required_state": [
["m.room.join_rules", ""],
["m.room.history_visibility", ""],
@@ -1361,7 +1360,6 @@ class SlidingSyncTestCase(unittest.HomeserverTestCase):
"lists": {
"foo-list": {
"ranges": [[0, 99]],
- "sort": ["by_notification_level", "by_recency", "by_name"],
"required_state": [
["m.room.join_rules", ""],
["m.room.history_visibility", ""],
@@ -1415,14 +1413,12 @@ class SlidingSyncTestCase(unittest.HomeserverTestCase):
"lists": {
"dms": {
"ranges": [[0, 99]],
- "sort": ["by_recency"],
"required_state": [],
"timeline_limit": 1,
"filters": {"is_dm": True},
},
"foo-list": {
"ranges": [[0, 99]],
- "sort": ["by_recency"],
"required_state": [],
"timeline_limit": 1,
"filters": {"is_dm": False},
@@ -1463,3 +1459,60 @@ class SlidingSyncTestCase(unittest.HomeserverTestCase):
],
list(channel.json_body["lists"]["foo-list"]),
)
+
+ def test_sort_list(self) -> None:
+ """
+ Test that the lists are sorted by `stream_ordering`
+ """
+ user1_id = self.register_user("user1", "pass")
+ user1_tok = self.login(user1_id, "pass")
+
+ room_id1 = self.helper.create_room_as(user1_id, tok=user1_tok, is_public=True)
+ room_id2 = self.helper.create_room_as(user1_id, tok=user1_tok, is_public=True)
+ room_id3 = self.helper.create_room_as(user1_id, tok=user1_tok, is_public=True)
+
+ # Activity that will order the rooms
+ self.helper.send(room_id3, "activity in room3", tok=user1_tok)
+ self.helper.send(room_id1, "activity in room1", tok=user1_tok)
+ self.helper.send(room_id2, "activity in room2", tok=user1_tok)
+
+ # Make the Sliding Sync request
+ channel = self.make_request(
+ "POST",
+ self.sync_endpoint,
+ {
+ "lists": {
+ "foo-list": {
+ "ranges": [[0, 99]],
+ "required_state": [
+ ["m.room.join_rules", ""],
+ ["m.room.history_visibility", ""],
+ ["m.space.child", "*"],
+ ],
+ "timeline_limit": 1,
+ }
+ }
+ },
+ access_token=user1_tok,
+ )
+ self.assertEqual(channel.code, 200, channel.json_body)
+
+ # Make sure it has the foo-list we requested
+ self.assertListEqual(
+ list(channel.json_body["lists"].keys()),
+ ["foo-list"],
+ channel.json_body["lists"].keys(),
+ )
+
+ # Make sure the list is sorted in the way we expect
+ self.assertListEqual(
+ list(channel.json_body["lists"]["foo-list"]["ops"]),
+ [
+ {
+ "op": "SYNC",
+ "range": [0, 99],
+ "room_ids": [room_id2, room_id1, room_id3],
+ }
+ ],
+ channel.json_body["lists"]["foo-list"],
+ )
diff --git a/tests/storage/test_stream.py b/tests/storage/test_stream.py
index ee34baf46f..fe1e873e15 100644
--- a/tests/storage/test_stream.py
+++ b/tests/storage/test_stream.py
@@ -277,7 +277,7 @@ class PaginationTestCase(HomeserverTestCase):
class GetLastEventInRoomBeforeStreamOrderingTestCase(HomeserverTestCase):
"""
- Test `get_last_event_in_room_before_stream_ordering(...)`
+ Test `get_last_event_pos_in_room_before_stream_ordering(...)`
"""
servlets = [
@@ -336,14 +336,14 @@ class GetLastEventInRoomBeforeStreamOrderingTestCase(HomeserverTestCase):
room_id = self.helper.create_room_as(user1_id, tok=user1_tok, is_public=True)
- last_event = self.get_success(
- self.store.get_last_event_in_room_before_stream_ordering(
+ last_event_result = self.get_success(
+ self.store.get_last_event_pos_in_room_before_stream_ordering(
room_id=room_id,
end_token=before_room_token.room_key,
)
)
- self.assertIsNone(last_event)
+ self.assertIsNone(last_event_result)
def test_after_room_created(self) -> None:
"""
@@ -356,14 +356,16 @@ class GetLastEventInRoomBeforeStreamOrderingTestCase(HomeserverTestCase):
after_room_token = self.event_sources.get_current_token()
- last_event = self.get_success(
- self.store.get_last_event_in_room_before_stream_ordering(
+ last_event_result = self.get_success(
+ self.store.get_last_event_pos_in_room_before_stream_ordering(
room_id=room_id,
end_token=after_room_token.room_key,
)
)
+ assert last_event_result is not None
+ last_event_id, _ = last_event_result
- self.assertIsNotNone(last_event)
+ self.assertIsNotNone(last_event_id)
def test_activity_in_other_rooms(self) -> None:
"""
@@ -380,16 +382,18 @@ class GetLastEventInRoomBeforeStreamOrderingTestCase(HomeserverTestCase):
after_room_token = self.event_sources.get_current_token()
- last_event = self.get_success(
- self.store.get_last_event_in_room_before_stream_ordering(
+ last_event_result = self.get_success(
+ self.store.get_last_event_pos_in_room_before_stream_ordering(
room_id=room_id1,
end_token=after_room_token.room_key,
)
)
+ assert last_event_result is not None
+ last_event_id, _ = last_event_result
# Make sure it's the event we expect (which also means we know it's from the
# correct room)
- self.assertEqual(last_event, event_response["event_id"])
+ self.assertEqual(last_event_id, event_response["event_id"])
def test_activity_after_token_has_no_effect(self) -> None:
"""
@@ -408,15 +412,17 @@ class GetLastEventInRoomBeforeStreamOrderingTestCase(HomeserverTestCase):
self.helper.send(room_id1, "after1", tok=user1_tok)
self.helper.send(room_id1, "after2", tok=user1_tok)
- last_event = self.get_success(
- self.store.get_last_event_in_room_before_stream_ordering(
+ last_event_result = self.get_success(
+ self.store.get_last_event_pos_in_room_before_stream_ordering(
room_id=room_id1,
end_token=after_room_token.room_key,
)
)
+ assert last_event_result is not None
+ last_event_id, _ = last_event_result
# Make sure it's the last event before the token
- self.assertEqual(last_event, event_response["event_id"])
+ self.assertEqual(last_event_id, event_response["event_id"])
def test_last_event_within_sharded_token(self) -> None:
"""
@@ -457,18 +463,20 @@ class GetLastEventInRoomBeforeStreamOrderingTestCase(HomeserverTestCase):
self.helper.send(room_id1, "after1", tok=user1_tok)
self.helper.send(room_id1, "after2", tok=user1_tok)
- last_event = self.get_success(
- self.store.get_last_event_in_room_before_stream_ordering(
+ last_event_result = self.get_success(
+ self.store.get_last_event_pos_in_room_before_stream_ordering(
room_id=room_id1,
end_token=end_token,
)
)
+ assert last_event_result is not None
+ last_event_id, _ = last_event_result
- # Should find closest event at/before the token in room1
+ # Should find closest event before the token in room1
self.assertEqual(
- last_event,
+ last_event_id,
event_response3["event_id"],
- f"We expected {event_response3['event_id']} but saw {last_event} which corresponds to "
+ f"We expected {event_response3['event_id']} but saw {last_event_id} which corresponds to "
+ str(
{
"event1": event_response1["event_id"],
@@ -514,18 +522,20 @@ class GetLastEventInRoomBeforeStreamOrderingTestCase(HomeserverTestCase):
self.helper.send(room_id1, "after1", tok=user1_tok)
self.helper.send(room_id1, "after2", tok=user1_tok)
- last_event = self.get_success(
- self.store.get_last_event_in_room_before_stream_ordering(
+ last_event_result = self.get_success(
+ self.store.get_last_event_pos_in_room_before_stream_ordering(
room_id=room_id1,
end_token=end_token,
)
)
+ assert last_event_result is not None
+ last_event_id, _ = last_event_result
- # Should find closest event at/before the token in room1
+ # Should find closest event before the token in room1
self.assertEqual(
- last_event,
+ last_event_id,
event_response2["event_id"],
- f"We expected {event_response2['event_id']} but saw {last_event} which corresponds to "
+ f"We expected {event_response2['event_id']} but saw {last_event_id} which corresponds to "
+ str(
{
"event1": event_response1["event_id"],
From a5485437cf8006b80345f2e0af6e233881e9de21 Mon Sep 17 00:00:00 2001
From: Eric Eastwood
Date: Mon, 17 Jun 2024 12:06:18 -0500
Subject: [PATCH 180/503] Add `is_encrypted` filtering to Sliding Sync `/sync`
(#17281)
Based on [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575): Sliding Sync
---
changelog.d/17281.feature | 1 +
synapse/handlers/sliding_sync.py | 26 ++++++-
synapse/handlers/sync.py | 109 ++++-----------------------
synapse/storage/controllers/state.py | 87 ++++++++++++++++++++-
tests/handlers/test_sliding_sync.py | 66 ++++++++++++++++
5 files changed, 189 insertions(+), 100 deletions(-)
create mode 100644 changelog.d/17281.feature
diff --git a/changelog.d/17281.feature b/changelog.d/17281.feature
new file mode 100644
index 0000000000..fce512692c
--- /dev/null
+++ b/changelog.d/17281.feature
@@ -0,0 +1 @@
+Add `is_encrypted` filtering to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint.
diff --git a/synapse/handlers/sliding_sync.py b/synapse/handlers/sliding_sync.py
index b84cf67f7d..16d94925f5 100644
--- a/synapse/handlers/sliding_sync.py
+++ b/synapse/handlers/sliding_sync.py
@@ -22,7 +22,7 @@ from typing import TYPE_CHECKING, Dict, List, Optional, Tuple
from immutabledict import immutabledict
-from synapse.api.constants import AccountDataTypes, Membership
+from synapse.api.constants import AccountDataTypes, EventTypes, Membership
from synapse.events import EventBase
from synapse.storage.roommember import RoomsForUser
from synapse.types import (
@@ -33,6 +33,7 @@ from synapse.types import (
UserID,
)
from synapse.types.handlers import OperationType, SlidingSyncConfig, SlidingSyncResult
+from synapse.types.state import StateFilter
if TYPE_CHECKING:
from synapse.server import HomeServer
@@ -85,6 +86,7 @@ class SlidingSyncHandler:
def __init__(self, hs: "HomeServer"):
self.clock = hs.get_clock()
self.store = hs.get_datastores().main
+ self.storage_controllers = hs.get_storage_controllers()
self.auth_blocking = hs.get_auth_blocking()
self.notifier = hs.get_notifier()
self.event_sources = hs.get_event_sources()
@@ -570,8 +572,26 @@ class SlidingSyncHandler:
if filters.spaces:
raise NotImplementedError()
- if filters.is_encrypted:
- raise NotImplementedError()
+ # Filter for encrypted rooms
+ if filters.is_encrypted is not None:
+ # Make a copy so we don't run into an error: `Set changed size during
+ # iteration`, when we filter out and remove items
+ for room_id in list(filtered_room_id_set):
+ state_at_to_token = await self.storage_controllers.state.get_state_at(
+ room_id,
+ to_token,
+ state_filter=StateFilter.from_types(
+ [(EventTypes.RoomEncryption, "")]
+ ),
+ )
+ is_encrypted = state_at_to_token.get((EventTypes.RoomEncryption, ""))
+
+ # If we're looking for encrypted rooms, filter out rooms that are not
+ # encrypted and vice versa
+ if (filters.is_encrypted and not is_encrypted) or (
+ not filters.is_encrypted and is_encrypted
+ ):
+ filtered_room_id_set.remove(room_id)
if filters.is_invite:
raise NotImplementedError()
diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py
index 0a40d62c6a..e2563428d2 100644
--- a/synapse/handlers/sync.py
+++ b/synapse/handlers/sync.py
@@ -979,91 +979,6 @@ class SyncHandler:
bundled_aggregations=bundled_aggregations,
)
- async def get_state_after_event(
- self,
- event_id: str,
- state_filter: Optional[StateFilter] = None,
- await_full_state: bool = True,
- ) -> StateMap[str]:
- """
- Get the room state after the given event
-
- Args:
- event_id: event of interest
- state_filter: The state filter used to fetch state from the database.
- await_full_state: if `True`, will block if we do not yet have complete state
- at the event and `state_filter` is not satisfied by partial state.
- Defaults to `True`.
- """
- state_ids = await self._state_storage_controller.get_state_ids_for_event(
- event_id,
- state_filter=state_filter or StateFilter.all(),
- await_full_state=await_full_state,
- )
-
- # using get_metadata_for_events here (instead of get_event) sidesteps an issue
- # with redactions: if `event_id` is a redaction event, and we don't have the
- # original (possibly because it got purged), get_event will refuse to return
- # the redaction event, which isn't terribly helpful here.
- #
- # (To be fair, in that case we could assume it's *not* a state event, and
- # therefore we don't need to worry about it. But still, it seems cleaner just
- # to pull the metadata.)
- m = (await self.store.get_metadata_for_events([event_id]))[event_id]
- if m.state_key is not None and m.rejection_reason is None:
- state_ids = dict(state_ids)
- state_ids[(m.event_type, m.state_key)] = event_id
-
- return state_ids
-
- async def get_state_at(
- self,
- room_id: str,
- stream_position: StreamToken,
- state_filter: Optional[StateFilter] = None,
- await_full_state: bool = True,
- ) -> StateMap[str]:
- """Get the room state at a particular stream position
-
- Args:
- room_id: room for which to get state
- stream_position: point at which to get state
- state_filter: The state filter used to fetch state from the database.
- await_full_state: if `True`, will block if we do not yet have complete state
- at the last event in the room before `stream_position` and
- `state_filter` is not satisfied by partial state. Defaults to `True`.
- """
- # FIXME: This gets the state at the latest event before the stream ordering,
- # which might not be the same as the "current state" of the room at the time
- # of the stream token if there were multiple forward extremities at the time.
- last_event_id = (
- await self.store.get_last_event_id_in_room_before_stream_ordering(
- room_id,
- end_token=stream_position.room_key,
- )
- )
-
- if last_event_id:
- state = await self.get_state_after_event(
- last_event_id,
- state_filter=state_filter or StateFilter.all(),
- await_full_state=await_full_state,
- )
-
- else:
- # no events in this room - so presumably no state
- state = {}
-
- # (erikj) This should be rarely hit, but we've had some reports that
- # we get more state down gappy syncs than we should, so let's add
- # some logging.
- logger.info(
- "Failed to find any events in room %s at %s",
- room_id,
- stream_position.room_key,
- )
- return state
-
async def compute_summary(
self,
room_id: str,
@@ -1437,7 +1352,7 @@ class SyncHandler:
await_full_state = True
lazy_load_members = False
- state_at_timeline_end = await self.get_state_at(
+ state_at_timeline_end = await self._state_storage_controller.get_state_at(
room_id,
stream_position=end_token,
state_filter=state_filter,
@@ -1565,7 +1480,7 @@ class SyncHandler:
else:
# We can get here if the user has ignored the senders of all
# the recent events.
- state_at_timeline_start = await self.get_state_at(
+ state_at_timeline_start = await self._state_storage_controller.get_state_at(
room_id,
stream_position=end_token,
state_filter=state_filter,
@@ -1587,14 +1502,14 @@ class SyncHandler:
# about them).
state_filter = StateFilter.all()
- state_at_previous_sync = await self.get_state_at(
+ state_at_previous_sync = await self._state_storage_controller.get_state_at(
room_id,
stream_position=since_token,
state_filter=state_filter,
await_full_state=await_full_state,
)
- state_at_timeline_end = await self.get_state_at(
+ state_at_timeline_end = await self._state_storage_controller.get_state_at(
room_id,
stream_position=end_token,
state_filter=state_filter,
@@ -2593,7 +2508,7 @@ class SyncHandler:
continue
if room_id in sync_result_builder.joined_room_ids or has_join:
- old_state_ids = await self.get_state_at(
+ old_state_ids = await self._state_storage_controller.get_state_at(
room_id,
since_token,
state_filter=StateFilter.from_types([(EventTypes.Member, user_id)]),
@@ -2623,12 +2538,14 @@ class SyncHandler:
newly_left_rooms.append(room_id)
else:
if not old_state_ids:
- old_state_ids = await self.get_state_at(
- room_id,
- since_token,
- state_filter=StateFilter.from_types(
- [(EventTypes.Member, user_id)]
- ),
+ old_state_ids = (
+ await self._state_storage_controller.get_state_at(
+ room_id,
+ since_token,
+ state_filter=StateFilter.from_types(
+ [(EventTypes.Member, user_id)]
+ ),
+ )
)
old_mem_ev_id = old_state_ids.get(
(EventTypes.Member, user_id), None
diff --git a/synapse/storage/controllers/state.py b/synapse/storage/controllers/state.py
index f9eced23bf..cc9b162ae4 100644
--- a/synapse/storage/controllers/state.py
+++ b/synapse/storage/controllers/state.py
@@ -45,7 +45,7 @@ from synapse.storage.util.partial_state_events_tracker import (
PartialStateEventsTracker,
)
from synapse.synapse_rust.acl import ServerAclEvaluator
-from synapse.types import MutableStateMap, StateMap, get_domain_from_id
+from synapse.types import MutableStateMap, StateMap, StreamToken, get_domain_from_id
from synapse.types.state import StateFilter
from synapse.util.async_helpers import Linearizer
from synapse.util.caches import intern_string
@@ -372,6 +372,91 @@ class StateStorageController:
)
return state_map[event_id]
+ async def get_state_after_event(
+ self,
+ event_id: str,
+ state_filter: Optional[StateFilter] = None,
+ await_full_state: bool = True,
+ ) -> StateMap[str]:
+ """
+ Get the room state after the given event
+
+ Args:
+ event_id: event of interest
+ state_filter: The state filter used to fetch state from the database.
+ await_full_state: if `True`, will block if we do not yet have complete state
+ at the event and `state_filter` is not satisfied by partial state.
+ Defaults to `True`.
+ """
+ state_ids = await self.get_state_ids_for_event(
+ event_id,
+ state_filter=state_filter or StateFilter.all(),
+ await_full_state=await_full_state,
+ )
+
+ # using get_metadata_for_events here (instead of get_event) sidesteps an issue
+ # with redactions: if `event_id` is a redaction event, and we don't have the
+ # original (possibly because it got purged), get_event will refuse to return
+ # the redaction event, which isn't terribly helpful here.
+ #
+ # (To be fair, in that case we could assume it's *not* a state event, and
+ # therefore we don't need to worry about it. But still, it seems cleaner just
+ # to pull the metadata.)
+ m = (await self.stores.main.get_metadata_for_events([event_id]))[event_id]
+ if m.state_key is not None and m.rejection_reason is None:
+ state_ids = dict(state_ids)
+ state_ids[(m.event_type, m.state_key)] = event_id
+
+ return state_ids
+
+ async def get_state_at(
+ self,
+ room_id: str,
+ stream_position: StreamToken,
+ state_filter: Optional[StateFilter] = None,
+ await_full_state: bool = True,
+ ) -> StateMap[str]:
+ """Get the room state at a particular stream position
+
+ Args:
+ room_id: room for which to get state
+ stream_position: point at which to get state
+ state_filter: The state filter used to fetch state from the database.
+ await_full_state: if `True`, will block if we do not yet have complete state
+ at the last event in the room before `stream_position` and
+ `state_filter` is not satisfied by partial state. Defaults to `True`.
+ """
+ # FIXME: This gets the state at the latest event before the stream ordering,
+ # which might not be the same as the "current state" of the room at the time
+ # of the stream token if there were multiple forward extremities at the time.
+ last_event_id = (
+ await self.stores.main.get_last_event_id_in_room_before_stream_ordering(
+ room_id,
+ end_token=stream_position.room_key,
+ )
+ )
+
+ if last_event_id:
+ state = await self.get_state_after_event(
+ last_event_id,
+ state_filter=state_filter or StateFilter.all(),
+ await_full_state=await_full_state,
+ )
+
+ else:
+ # no events in this room - so presumably no state
+ state = {}
+
+ # (erikj) This should be rarely hit, but we've had some reports that
+ # we get more state down gappy syncs than we should, so let's add
+ # some logging.
+ logger.info(
+ "Failed to find any events in room %s at %s",
+ room_id,
+ stream_position.room_key,
+ )
+ return state
+
@trace
@tag_args
async def get_state_for_groups(
diff --git a/tests/handlers/test_sliding_sync.py b/tests/handlers/test_sliding_sync.py
index af48041f1f..0358239c7f 100644
--- a/tests/handlers/test_sliding_sync.py
+++ b/tests/handlers/test_sliding_sync.py
@@ -1253,6 +1253,72 @@ class FilterRoomsTestCase(HomeserverTestCase):
self.assertEqual(falsy_filtered_room_map.keys(), {room_id})
+ def test_filter_encrypted_rooms(self) -> None:
+ """
+ Test `filter.is_encrypted` for encrypted rooms
+ """
+ user1_id = self.register_user("user1", "pass")
+ user1_tok = self.login(user1_id, "pass")
+
+ # Create a normal room
+ room_id = self.helper.create_room_as(
+ user1_id,
+ is_public=False,
+ tok=user1_tok,
+ )
+
+ # Create an encrypted room
+ encrypted_room_id = self.helper.create_room_as(
+ user1_id,
+ is_public=False,
+ tok=user1_tok,
+ )
+ self.helper.send_state(
+ encrypted_room_id,
+ EventTypes.RoomEncryption,
+ {"algorithm": "m.megolm.v1.aes-sha2"},
+ tok=user1_tok,
+ )
+
+ after_rooms_token = self.event_sources.get_current_token()
+
+ # Get the rooms the user should be syncing with
+ sync_room_map = self.get_success(
+ self.sliding_sync_handler.get_sync_room_ids_for_user(
+ UserID.from_string(user1_id),
+ from_token=None,
+ to_token=after_rooms_token,
+ )
+ )
+
+ # Try with `is_encrypted=True`
+ truthy_filtered_room_map = self.get_success(
+ self.sliding_sync_handler.filter_rooms(
+ UserID.from_string(user1_id),
+ sync_room_map,
+ SlidingSyncConfig.SlidingSyncList.Filters(
+ is_encrypted=True,
+ ),
+ after_rooms_token,
+ )
+ )
+
+ self.assertEqual(truthy_filtered_room_map.keys(), {encrypted_room_id})
+
+ # Try with `is_encrypted=False`
+ falsy_filtered_room_map = self.get_success(
+ self.sliding_sync_handler.filter_rooms(
+ UserID.from_string(user1_id),
+ sync_room_map,
+ SlidingSyncConfig.SlidingSyncList.Filters(
+ is_encrypted=False,
+ ),
+ after_rooms_token,
+ )
+ )
+
+ self.assertEqual(falsy_filtered_room_map.keys(), {room_id})
+
class SortRoomsTestCase(HomeserverTestCase):
"""
From 52813a8d9453d7dfb1dcadf68fcb9f77cdabcf67 Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Tue, 18 Jun 2024 09:56:56 +0100
Subject: [PATCH 181/503] Bump msgpack from 1.0.7 to 1.0.8 (#17317)
---
poetry.lock | 114 ++++++++++++++++++++++++++--------------------------
1 file changed, 57 insertions(+), 57 deletions(-)
diff --git a/poetry.lock b/poetry.lock
index 7b169ceb6e..9db6db1904 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -1319,67 +1319,67 @@ files = [
[[package]]
name = "msgpack"
-version = "1.0.7"
+version = "1.0.8"
description = "MessagePack serializer"
optional = false
python-versions = ">=3.8"
files = [
- {file = "msgpack-1.0.7-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:04ad6069c86e531682f9e1e71b71c1c3937d6014a7c3e9edd2aa81ad58842862"},
- {file = "msgpack-1.0.7-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:cca1b62fe70d761a282496b96a5e51c44c213e410a964bdffe0928e611368329"},
- {file = "msgpack-1.0.7-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:e50ebce52f41370707f1e21a59514e3375e3edd6e1832f5e5235237db933c98b"},
- {file = "msgpack-1.0.7-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4a7b4f35de6a304b5533c238bee86b670b75b03d31b7797929caa7a624b5dda6"},
- {file = "msgpack-1.0.7-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:28efb066cde83c479dfe5a48141a53bc7e5f13f785b92ddde336c716663039ee"},
- {file = "msgpack-1.0.7-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4cb14ce54d9b857be9591ac364cb08dc2d6a5c4318c1182cb1d02274029d590d"},
- {file = "msgpack-1.0.7-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:b573a43ef7c368ba4ea06050a957c2a7550f729c31f11dd616d2ac4aba99888d"},
- {file = "msgpack-1.0.7-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:ccf9a39706b604d884d2cb1e27fe973bc55f2890c52f38df742bc1d79ab9f5e1"},
- {file = "msgpack-1.0.7-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:cb70766519500281815dfd7a87d3a178acf7ce95390544b8c90587d76b227681"},
- {file = "msgpack-1.0.7-cp310-cp310-win32.whl", hash = "sha256:b610ff0f24e9f11c9ae653c67ff8cc03c075131401b3e5ef4b82570d1728f8a9"},
- {file = "msgpack-1.0.7-cp310-cp310-win_amd64.whl", hash = "sha256:a40821a89dc373d6427e2b44b572efc36a2778d3f543299e2f24eb1a5de65415"},
- {file = "msgpack-1.0.7-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:576eb384292b139821c41995523654ad82d1916da6a60cff129c715a6223ea84"},
- {file = "msgpack-1.0.7-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:730076207cb816138cf1af7f7237b208340a2c5e749707457d70705715c93b93"},
- {file = "msgpack-1.0.7-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:85765fdf4b27eb5086f05ac0491090fc76f4f2b28e09d9350c31aac25a5aaff8"},
- {file = "msgpack-1.0.7-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3476fae43db72bd11f29a5147ae2f3cb22e2f1a91d575ef130d2bf49afd21c46"},
- {file = "msgpack-1.0.7-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6d4c80667de2e36970ebf74f42d1088cc9ee7ef5f4e8c35eee1b40eafd33ca5b"},
- {file = "msgpack-1.0.7-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5b0bf0effb196ed76b7ad883848143427a73c355ae8e569fa538365064188b8e"},
- {file = "msgpack-1.0.7-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:f9a7c509542db4eceed3dcf21ee5267ab565a83555c9b88a8109dcecc4709002"},
- {file = "msgpack-1.0.7-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:84b0daf226913133f899ea9b30618722d45feffa67e4fe867b0b5ae83a34060c"},
- {file = "msgpack-1.0.7-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:ec79ff6159dffcc30853b2ad612ed572af86c92b5168aa3fc01a67b0fa40665e"},
- {file = "msgpack-1.0.7-cp311-cp311-win32.whl", hash = "sha256:3e7bf4442b310ff154b7bb9d81eb2c016b7d597e364f97d72b1acc3817a0fdc1"},
- {file = "msgpack-1.0.7-cp311-cp311-win_amd64.whl", hash = "sha256:3f0c8c6dfa6605ab8ff0611995ee30d4f9fcff89966cf562733b4008a3d60d82"},
- {file = "msgpack-1.0.7-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:f0936e08e0003f66bfd97e74ee530427707297b0d0361247e9b4f59ab78ddc8b"},
- {file = "msgpack-1.0.7-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:98bbd754a422a0b123c66a4c341de0474cad4a5c10c164ceed6ea090f3563db4"},
- {file = "msgpack-1.0.7-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:b291f0ee7961a597cbbcc77709374087fa2a9afe7bdb6a40dbbd9b127e79afee"},
- {file = "msgpack-1.0.7-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ebbbba226f0a108a7366bf4b59bf0f30a12fd5e75100c630267d94d7f0ad20e5"},
- {file = "msgpack-1.0.7-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1e2d69948e4132813b8d1131f29f9101bc2c915f26089a6d632001a5c1349672"},
- {file = "msgpack-1.0.7-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bdf38ba2d393c7911ae989c3bbba510ebbcdf4ecbdbfec36272abe350c454075"},
- {file = "msgpack-1.0.7-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:993584fc821c58d5993521bfdcd31a4adf025c7d745bbd4d12ccfecf695af5ba"},
- {file = "msgpack-1.0.7-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:52700dc63a4676669b341ba33520f4d6e43d3ca58d422e22ba66d1736b0a6e4c"},
- {file = "msgpack-1.0.7-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:e45ae4927759289c30ccba8d9fdce62bb414977ba158286b5ddaf8df2cddb5c5"},
- {file = "msgpack-1.0.7-cp312-cp312-win32.whl", hash = "sha256:27dcd6f46a21c18fa5e5deed92a43d4554e3df8d8ca5a47bf0615d6a5f39dbc9"},
- {file = "msgpack-1.0.7-cp312-cp312-win_amd64.whl", hash = "sha256:7687e22a31e976a0e7fc99c2f4d11ca45eff652a81eb8c8085e9609298916dcf"},
- {file = "msgpack-1.0.7-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:5b6ccc0c85916998d788b295765ea0e9cb9aac7e4a8ed71d12e7d8ac31c23c95"},
- {file = "msgpack-1.0.7-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:235a31ec7db685f5c82233bddf9858748b89b8119bf4538d514536c485c15fe0"},
- {file = "msgpack-1.0.7-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:cab3db8bab4b7e635c1c97270d7a4b2a90c070b33cbc00c99ef3f9be03d3e1f7"},
- {file = "msgpack-1.0.7-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0bfdd914e55e0d2c9e1526de210f6fe8ffe9705f2b1dfcc4aecc92a4cb4b533d"},
- {file = "msgpack-1.0.7-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:36e17c4592231a7dbd2ed09027823ab295d2791b3b1efb2aee874b10548b7524"},
- {file = "msgpack-1.0.7-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:38949d30b11ae5f95c3c91917ee7a6b239f5ec276f271f28638dec9156f82cfc"},
- {file = "msgpack-1.0.7-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:ff1d0899f104f3921d94579a5638847f783c9b04f2d5f229392ca77fba5b82fc"},
- {file = "msgpack-1.0.7-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:dc43f1ec66eb8440567186ae2f8c447d91e0372d793dfe8c222aec857b81a8cf"},
- {file = "msgpack-1.0.7-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:dd632777ff3beaaf629f1ab4396caf7ba0bdd075d948a69460d13d44357aca4c"},
- {file = "msgpack-1.0.7-cp38-cp38-win32.whl", hash = "sha256:4e71bc4416de195d6e9b4ee93ad3f2f6b2ce11d042b4d7a7ee00bbe0358bd0c2"},
- {file = "msgpack-1.0.7-cp38-cp38-win_amd64.whl", hash = "sha256:8f5b234f567cf76ee489502ceb7165c2a5cecec081db2b37e35332b537f8157c"},
- {file = "msgpack-1.0.7-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:bfef2bb6ef068827bbd021017a107194956918ab43ce4d6dc945ffa13efbc25f"},
- {file = "msgpack-1.0.7-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:484ae3240666ad34cfa31eea7b8c6cd2f1fdaae21d73ce2974211df099a95d81"},
- {file = "msgpack-1.0.7-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:3967e4ad1aa9da62fd53e346ed17d7b2e922cba5ab93bdd46febcac39be636fc"},
- {file = "msgpack-1.0.7-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8dd178c4c80706546702c59529ffc005681bd6dc2ea234c450661b205445a34d"},
- {file = "msgpack-1.0.7-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f6ffbc252eb0d229aeb2f9ad051200668fc3a9aaa8994e49f0cb2ffe2b7867e7"},
- {file = "msgpack-1.0.7-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:822ea70dc4018c7e6223f13affd1c5c30c0f5c12ac1f96cd8e9949acddb48a61"},
- {file = "msgpack-1.0.7-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:384d779f0d6f1b110eae74cb0659d9aa6ff35aaf547b3955abf2ab4c901c4819"},
- {file = "msgpack-1.0.7-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:f64e376cd20d3f030190e8c32e1c64582eba56ac6dc7d5b0b49a9d44021b52fd"},
- {file = "msgpack-1.0.7-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:5ed82f5a7af3697b1c4786053736f24a0efd0a1b8a130d4c7bfee4b9ded0f08f"},
- {file = "msgpack-1.0.7-cp39-cp39-win32.whl", hash = "sha256:f26a07a6e877c76a88e3cecac8531908d980d3d5067ff69213653649ec0f60ad"},
- {file = "msgpack-1.0.7-cp39-cp39-win_amd64.whl", hash = "sha256:1dc93e8e4653bdb5910aed79f11e165c85732067614f180f70534f056da97db3"},
- {file = "msgpack-1.0.7.tar.gz", hash = "sha256:572efc93db7a4d27e404501975ca6d2d9775705c2d922390d878fcf768d92c87"},
+ {file = "msgpack-1.0.8-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:505fe3d03856ac7d215dbe005414bc28505d26f0c128906037e66d98c4e95868"},
+ {file = "msgpack-1.0.8-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e6b7842518a63a9f17107eb176320960ec095a8ee3b4420b5f688e24bf50c53c"},
+ {file = "msgpack-1.0.8-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:376081f471a2ef24828b83a641a02c575d6103a3ad7fd7dade5486cad10ea659"},
+ {file = "msgpack-1.0.8-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5e390971d082dba073c05dbd56322427d3280b7cc8b53484c9377adfbae67dc2"},
+ {file = "msgpack-1.0.8-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:00e073efcba9ea99db5acef3959efa45b52bc67b61b00823d2a1a6944bf45982"},
+ {file = "msgpack-1.0.8-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:82d92c773fbc6942a7a8b520d22c11cfc8fd83bba86116bfcf962c2f5c2ecdaa"},
+ {file = "msgpack-1.0.8-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:9ee32dcb8e531adae1f1ca568822e9b3a738369b3b686d1477cbc643c4a9c128"},
+ {file = "msgpack-1.0.8-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:e3aa7e51d738e0ec0afbed661261513b38b3014754c9459508399baf14ae0c9d"},
+ {file = "msgpack-1.0.8-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:69284049d07fce531c17404fcba2bb1df472bc2dcdac642ae71a2d079d950653"},
+ {file = "msgpack-1.0.8-cp310-cp310-win32.whl", hash = "sha256:13577ec9e247f8741c84d06b9ece5f654920d8365a4b636ce0e44f15e07ec693"},
+ {file = "msgpack-1.0.8-cp310-cp310-win_amd64.whl", hash = "sha256:e532dbd6ddfe13946de050d7474e3f5fb6ec774fbb1a188aaf469b08cf04189a"},
+ {file = "msgpack-1.0.8-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:9517004e21664f2b5a5fd6333b0731b9cf0817403a941b393d89a2f1dc2bd836"},
+ {file = "msgpack-1.0.8-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:d16a786905034e7e34098634b184a7d81f91d4c3d246edc6bd7aefb2fd8ea6ad"},
+ {file = "msgpack-1.0.8-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:e2872993e209f7ed04d963e4b4fbae72d034844ec66bc4ca403329db2074377b"},
+ {file = "msgpack-1.0.8-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5c330eace3dd100bdb54b5653b966de7f51c26ec4a7d4e87132d9b4f738220ba"},
+ {file = "msgpack-1.0.8-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:83b5c044f3eff2a6534768ccfd50425939e7a8b5cf9a7261c385de1e20dcfc85"},
+ {file = "msgpack-1.0.8-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1876b0b653a808fcd50123b953af170c535027bf1d053b59790eebb0aeb38950"},
+ {file = "msgpack-1.0.8-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:dfe1f0f0ed5785c187144c46a292b8c34c1295c01da12e10ccddfc16def4448a"},
+ {file = "msgpack-1.0.8-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:3528807cbbb7f315bb81959d5961855e7ba52aa60a3097151cb21956fbc7502b"},
+ {file = "msgpack-1.0.8-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:e2f879ab92ce502a1e65fce390eab619774dda6a6ff719718069ac94084098ce"},
+ {file = "msgpack-1.0.8-cp311-cp311-win32.whl", hash = "sha256:26ee97a8261e6e35885c2ecd2fd4a6d38252246f94a2aec23665a4e66d066305"},
+ {file = "msgpack-1.0.8-cp311-cp311-win_amd64.whl", hash = "sha256:eadb9f826c138e6cf3c49d6f8de88225a3c0ab181a9b4ba792e006e5292d150e"},
+ {file = "msgpack-1.0.8-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:114be227f5213ef8b215c22dde19532f5da9652e56e8ce969bf0a26d7c419fee"},
+ {file = "msgpack-1.0.8-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:d661dc4785affa9d0edfdd1e59ec056a58b3dbb9f196fa43587f3ddac654ac7b"},
+ {file = "msgpack-1.0.8-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:d56fd9f1f1cdc8227d7b7918f55091349741904d9520c65f0139a9755952c9e8"},
+ {file = "msgpack-1.0.8-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0726c282d188e204281ebd8de31724b7d749adebc086873a59efb8cf7ae27df3"},
+ {file = "msgpack-1.0.8-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8db8e423192303ed77cff4dce3a4b88dbfaf43979d280181558af5e2c3c71afc"},
+ {file = "msgpack-1.0.8-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:99881222f4a8c2f641f25703963a5cefb076adffd959e0558dc9f803a52d6a58"},
+ {file = "msgpack-1.0.8-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:b5505774ea2a73a86ea176e8a9a4a7c8bf5d521050f0f6f8426afe798689243f"},
+ {file = "msgpack-1.0.8-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:ef254a06bcea461e65ff0373d8a0dd1ed3aa004af48839f002a0c994a6f72d04"},
+ {file = "msgpack-1.0.8-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:e1dd7839443592d00e96db831eddb4111a2a81a46b028f0facd60a09ebbdd543"},
+ {file = "msgpack-1.0.8-cp312-cp312-win32.whl", hash = "sha256:64d0fcd436c5683fdd7c907eeae5e2cbb5eb872fafbc03a43609d7941840995c"},
+ {file = "msgpack-1.0.8-cp312-cp312-win_amd64.whl", hash = "sha256:74398a4cf19de42e1498368c36eed45d9528f5fd0155241e82c4082b7e16cffd"},
+ {file = "msgpack-1.0.8-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:0ceea77719d45c839fd73abcb190b8390412a890df2f83fb8cf49b2a4b5c2f40"},
+ {file = "msgpack-1.0.8-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1ab0bbcd4d1f7b6991ee7c753655b481c50084294218de69365f8f1970d4c151"},
+ {file = "msgpack-1.0.8-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:1cce488457370ffd1f953846f82323cb6b2ad2190987cd4d70b2713e17268d24"},
+ {file = "msgpack-1.0.8-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3923a1778f7e5ef31865893fdca12a8d7dc03a44b33e2a5f3295416314c09f5d"},
+ {file = "msgpack-1.0.8-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a22e47578b30a3e199ab067a4d43d790249b3c0587d9a771921f86250c8435db"},
+ {file = "msgpack-1.0.8-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bd739c9251d01e0279ce729e37b39d49a08c0420d3fee7f2a4968c0576678f77"},
+ {file = "msgpack-1.0.8-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:d3420522057ebab1728b21ad473aa950026d07cb09da41103f8e597dfbfaeb13"},
+ {file = "msgpack-1.0.8-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:5845fdf5e5d5b78a49b826fcdc0eb2e2aa7191980e3d2cfd2a30303a74f212e2"},
+ {file = "msgpack-1.0.8-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:6a0e76621f6e1f908ae52860bdcb58e1ca85231a9b0545e64509c931dd34275a"},
+ {file = "msgpack-1.0.8-cp38-cp38-win32.whl", hash = "sha256:374a8e88ddab84b9ada695d255679fb99c53513c0a51778796fcf0944d6c789c"},
+ {file = "msgpack-1.0.8-cp38-cp38-win_amd64.whl", hash = "sha256:f3709997b228685fe53e8c433e2df9f0cdb5f4542bd5114ed17ac3c0129b0480"},
+ {file = "msgpack-1.0.8-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:f51bab98d52739c50c56658cc303f190785f9a2cd97b823357e7aeae54c8f68a"},
+ {file = "msgpack-1.0.8-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:73ee792784d48aa338bba28063e19a27e8d989344f34aad14ea6e1b9bd83f596"},
+ {file = "msgpack-1.0.8-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:f9904e24646570539a8950400602d66d2b2c492b9010ea7e965025cb71d0c86d"},
+ {file = "msgpack-1.0.8-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e75753aeda0ddc4c28dce4c32ba2f6ec30b1b02f6c0b14e547841ba5b24f753f"},
+ {file = "msgpack-1.0.8-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5dbf059fb4b7c240c873c1245ee112505be27497e90f7c6591261c7d3c3a8228"},
+ {file = "msgpack-1.0.8-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4916727e31c28be8beaf11cf117d6f6f188dcc36daae4e851fee88646f5b6b18"},
+ {file = "msgpack-1.0.8-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:7938111ed1358f536daf311be244f34df7bf3cdedb3ed883787aca97778b28d8"},
+ {file = "msgpack-1.0.8-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:493c5c5e44b06d6c9268ce21b302c9ca055c1fd3484c25ba41d34476c76ee746"},
+ {file = "msgpack-1.0.8-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:5fbb160554e319f7b22ecf530a80a3ff496d38e8e07ae763b9e82fadfe96f273"},
+ {file = "msgpack-1.0.8-cp39-cp39-win32.whl", hash = "sha256:f9af38a89b6a5c04b7d18c492c8ccf2aee7048aff1ce8437c4683bb5a1df893d"},
+ {file = "msgpack-1.0.8-cp39-cp39-win_amd64.whl", hash = "sha256:ed59dd52075f8fc91da6053b12e8c89e37aa043f8986efd89e61fae69dc1b011"},
+ {file = "msgpack-1.0.8.tar.gz", hash = "sha256:95c02b0e27e706e48d0e5426d1710ca78e0f0628d6e89d5b5a5b91a5f12274f3"},
]
[[package]]
From 19a3d5b60662429eb77b670d8469ad88ee4ded58 Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Tue, 18 Jun 2024 09:57:07 +0100
Subject: [PATCH 182/503] Bump phonenumbers from 8.13.37 to 8.13.39 (#17315)
---
poetry.lock | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/poetry.lock b/poetry.lock
index 9db6db1904..0277858935 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -1524,13 +1524,13 @@ files = [
[[package]]
name = "phonenumbers"
-version = "8.13.37"
+version = "8.13.39"
description = "Python version of Google's common library for parsing, formatting, storing and validating international phone numbers."
optional = false
python-versions = "*"
files = [
- {file = "phonenumbers-8.13.37-py2.py3-none-any.whl", hash = "sha256:4ea00ef5012422c08c7955c21131e7ae5baa9a3ef52cf2d561e963f023006b80"},
- {file = "phonenumbers-8.13.37.tar.gz", hash = "sha256:bd315fed159aea0516f7c367231810fe8344d5bec26156b88fa18374c11d1cf2"},
+ {file = "phonenumbers-8.13.39-py2.py3-none-any.whl", hash = "sha256:3ad2d086fa71e7eef409001b9195ac54bebb0c6e3e752209b558ca192c9229a0"},
+ {file = "phonenumbers-8.13.39.tar.gz", hash = "sha256:db7ca4970d206b2056231105300753b1a5b229f43416f8c2b3010e63fbb68d77"},
]
[[package]]
From d8e81f67ebf22fff482cd9b81b719991cfb817e7 Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Tue, 18 Jun 2024 09:57:24 +0100
Subject: [PATCH 183/503] Bump types-netaddr from 1.2.0.20240219 to
1.3.0.20240530 (#17314)
---
poetry.lock | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/poetry.lock b/poetry.lock
index 0277858935..a2a24e0773 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -2822,13 +2822,13 @@ referencing = "*"
[[package]]
name = "types-netaddr"
-version = "1.2.0.20240219"
+version = "1.3.0.20240530"
description = "Typing stubs for netaddr"
optional = false
python-versions = ">=3.8"
files = [
- {file = "types-netaddr-1.2.0.20240219.tar.gz", hash = "sha256:984e70ad838218d3032f37f05a7e294f7b007fe274ec9d774265c8c06698395f"},
- {file = "types_netaddr-1.2.0.20240219-py3-none-any.whl", hash = "sha256:b26144e878acb8a1a9008e6997863714db04f8029a0f7f6bfe483c977d21b522"},
+ {file = "types-netaddr-1.3.0.20240530.tar.gz", hash = "sha256:742c2ec1f202b666f544223e2616b34f1f13df80c91e5aeaaa93a72e4d0774ea"},
+ {file = "types_netaddr-1.3.0.20240530-py3-none-any.whl", hash = "sha256:354998d018e326da4f1d9b005fc91137b7c2c473aaf03c4ef64bf83c6861b440"},
]
[[package]]
From 334123f0cd29fb5f1ae84a4cec86eefbfac89278 Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Tue, 18 Jun 2024 09:57:42 +0100
Subject: [PATCH 184/503] Bump dawidd6/action-download-artifact from 5 to 6
(#17313)
---
.github/workflows/docs-pr-netlify.yaml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/.github/workflows/docs-pr-netlify.yaml b/.github/workflows/docs-pr-netlify.yaml
index a724816392..6d184a21e0 100644
--- a/.github/workflows/docs-pr-netlify.yaml
+++ b/.github/workflows/docs-pr-netlify.yaml
@@ -14,7 +14,7 @@ jobs:
# There's a 'download artifact' action, but it hasn't been updated for the workflow_run action
# (https://github.com/actions/download-artifact/issues/60) so instead we get this mess:
- name: 📥 Download artifact
- uses: dawidd6/action-download-artifact@deb3bb83256a78589fef6a7b942e5f2573ad7c13 # v5
+ uses: dawidd6/action-download-artifact@bf251b5aa9c2f7eeb574a96ee720e24f801b7c11 # v6
with:
workflow: docs-pr.yaml
run_id: ${{ github.event.workflow_run.id }}
From d17d931a53f5b684e1aaac2f30e9764268f6f632 Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Tue, 18 Jun 2024 09:57:58 +0100
Subject: [PATCH 185/503] Bump docker/build-push-action from 5 to 6 (#17312)
---
.github/workflows/docker.yml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml
index 391e9c96ff..06aaeb851f 100644
--- a/.github/workflows/docker.yml
+++ b/.github/workflows/docker.yml
@@ -72,7 +72,7 @@ jobs:
- name: Build and push all platforms
id: build-and-push
- uses: docker/build-push-action@v5
+ uses: docker/build-push-action@v6
with:
push: true
labels: |
From 088992a4840a3af1dc221dee3d40254c1c8a3f16 Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Tue, 18 Jun 2024 10:01:34 +0100
Subject: [PATCH 186/503] Bump types-pyyaml from 6.0.12.12 to 6.0.12.20240311
(#17316)
---
poetry.lock | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/poetry.lock b/poetry.lock
index a2a24e0773..58981ff6e1 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -2881,13 +2881,13 @@ types-cffi = "*"
[[package]]
name = "types-pyyaml"
-version = "6.0.12.12"
+version = "6.0.12.20240311"
description = "Typing stubs for PyYAML"
optional = false
-python-versions = "*"
+python-versions = ">=3.8"
files = [
- {file = "types-PyYAML-6.0.12.12.tar.gz", hash = "sha256:334373d392fde0fdf95af5c3f1661885fa10c52167b14593eb856289e1855062"},
- {file = "types_PyYAML-6.0.12.12-py3-none-any.whl", hash = "sha256:c05bc6c158facb0676674b7f11fe3960db4f389718e19e62bd2b84d6205cfd24"},
+ {file = "types-PyYAML-6.0.12.20240311.tar.gz", hash = "sha256:a9e0f0f88dc835739b0c1ca51ee90d04ca2a897a71af79de9aec5f38cb0a5342"},
+ {file = "types_PyYAML-6.0.12.20240311-py3-none-any.whl", hash = "sha256:b845b06a1c7e54b8e5b4c683043de0d9caf205e7434b3edc678ff2411979b8f6"},
]
[[package]]
From 5a65e8a0d164e63b3d4fe4e4753ff686315a5446 Mon Sep 17 00:00:00 2001
From: Quentin Gliech
Date: Tue, 18 Jun 2024 11:26:36 +0200
Subject: [PATCH 187/503] Switch to macOS 12 runners to fix building of wheels
for macOS (#17319)
This changes the release artefacts workflow to use `macos-12` runners
instead of `macos-11`, as the latter will be fully deprecated in a few
days.
This also updates `cibuildwheel` to a newer version, as it would not
'repair' the macOS wheels correctly
The difference is that now instead of outputting a macOS 11+ compatible
wheel, we output a macOS 12+ compatible one. This is fine, as macOS 11
is considered EOL since September 2023.
We can also expect that macOS 12 will be considered EOL in September
2024, as Apple usually supports the last 3 macOS version, and macOS 15
is scheduled to be released around that time.
---
.github/workflows/release-artifacts.yml | 8 ++++----
changelog.d/17319.misc | 1 +
2 files changed, 5 insertions(+), 4 deletions(-)
create mode 100644 changelog.d/17319.misc
diff --git a/.github/workflows/release-artifacts.yml b/.github/workflows/release-artifacts.yml
index 5d4a4fe1d6..9f0feffd94 100644
--- a/.github/workflows/release-artifacts.yml
+++ b/.github/workflows/release-artifacts.yml
@@ -102,7 +102,7 @@ jobs:
runs-on: ${{ matrix.os }}
strategy:
matrix:
- os: [ubuntu-20.04, macos-11]
+ os: [ubuntu-20.04, macos-12]
arch: [x86_64, aarch64]
# is_pr is a flag used to exclude certain jobs from the matrix on PRs.
# It is not read by the rest of the workflow.
@@ -112,9 +112,9 @@ jobs:
exclude:
# Don't build macos wheels on PR CI.
- is_pr: true
- os: "macos-11"
+ os: "macos-12"
# Don't build aarch64 wheels on mac.
- - os: "macos-11"
+ - os: "macos-12"
arch: aarch64
# Don't build aarch64 wheels on PR CI.
- is_pr: true
@@ -130,7 +130,7 @@ jobs:
python-version: "3.x"
- name: Install cibuildwheel
- run: python -m pip install cibuildwheel==2.16.2
+ run: python -m pip install cibuildwheel==2.19.1
- name: Set up QEMU to emulate aarch64
if: matrix.arch == 'aarch64'
diff --git a/changelog.d/17319.misc b/changelog.d/17319.misc
new file mode 100644
index 0000000000..6bb0eb6ade
--- /dev/null
+++ b/changelog.d/17319.misc
@@ -0,0 +1 @@
+Switch to macOS 12 runners to fix building of wheels for macOS.
From 1c7d85fdfea386c1d9631ca10bacb77ce66dbf37 Mon Sep 17 00:00:00 2001
From: Dirk Klimpel <5740567+dklimpel@users.noreply.github.com>
Date: Tue, 18 Jun 2024 11:37:02 +0200
Subject: [PATCH 188/503] fix missing quotes for exclude_rooms_from_sync
(#17308)
We tried to configure rooms `exclude_rooms_from_sync`. If we do not
quote we get an error.
The example should be valid.
---
changelog.d/17308.doc | 1 +
docs/usage/configuration/config_documentation.md | 2 +-
2 files changed, 2 insertions(+), 1 deletion(-)
create mode 100644 changelog.d/17308.doc
diff --git a/changelog.d/17308.doc b/changelog.d/17308.doc
new file mode 100644
index 0000000000..7ae080a684
--- /dev/null
+++ b/changelog.d/17308.doc
@@ -0,0 +1 @@
+Add missing quotes for example for `exclude_rooms_from_sync`.
diff --git a/docs/usage/configuration/config_documentation.md b/docs/usage/configuration/config_documentation.md
index d23f8c4c4f..22c545359d 100644
--- a/docs/usage/configuration/config_documentation.md
+++ b/docs/usage/configuration/config_documentation.md
@@ -4150,7 +4150,7 @@ By default, no room is excluded.
Example configuration:
```yaml
exclude_rooms_from_sync:
- - !foo:example.com
+ - "!foo:example.com"
```
---
From 4af654f0da31072bf0e0cac33202c621369ee823 Mon Sep 17 00:00:00 2001
From: Quentin Gliech
Date: Tue, 18 Jun 2024 11:46:34 +0200
Subject: [PATCH 189/503] 1.109.0
---
CHANGES.md | 9 +++++++++
changelog.d/17319.misc | 1 -
debian/changelog | 6 ++++++
pyproject.toml | 2 +-
4 files changed, 16 insertions(+), 2 deletions(-)
delete mode 100644 changelog.d/17319.misc
diff --git a/CHANGES.md b/CHANGES.md
index 94ee661151..9060b84853 100644
--- a/CHANGES.md
+++ b/CHANGES.md
@@ -1,3 +1,12 @@
+# Synapse 1.109.0 (2024-06-18)
+
+### Internal Changes
+
+- Fix the building of binary wheels for macOS by switching to macOS 12 CI runners. ([\#17319](https://github.com/element-hq/synapse/issues/17319))
+
+
+
+
# Synapse 1.109.0rc3 (2024-06-17)
### Bugfixes
diff --git a/changelog.d/17319.misc b/changelog.d/17319.misc
deleted file mode 100644
index 6bb0eb6ade..0000000000
--- a/changelog.d/17319.misc
+++ /dev/null
@@ -1 +0,0 @@
-Switch to macOS 12 runners to fix building of wheels for macOS.
diff --git a/debian/changelog b/debian/changelog
index e5b7809bcf..e9b05f8553 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -1,3 +1,9 @@
+matrix-synapse-py3 (1.109.0) stable; urgency=medium
+
+ * New synapse release 1.109.0.
+
+ -- Synapse Packaging team Tue, 18 Jun 2024 09:45:15 +0000
+
matrix-synapse-py3 (1.109.0~rc3) stable; urgency=medium
* New synapse release 1.109.0rc3.
diff --git a/pyproject.toml b/pyproject.toml
index 7567b1de40..1485016a5a 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -96,7 +96,7 @@ module-name = "synapse.synapse_rust"
[tool.poetry]
name = "matrix-synapse"
-version = "1.109.0rc3"
+version = "1.109.0"
description = "Homeserver for the Matrix decentralised comms protocol"
authors = ["Matrix.org Team and Contributors "]
license = "AGPL-3.0-or-later"
From 79767a1108a9a126b18e063eb7e8b063cac94e66 Mon Sep 17 00:00:00 2001
From: Johannes Marbach
Date: Tue, 18 Jun 2024 12:03:39 +0200
Subject: [PATCH 190/503] Add support for via query parameter from MSC4156
(#17322)
This adds support for the `via` query parameter from
https://github.com/matrix-org/matrix-spec-proposals/pull/4156.
---
changelog.d/17322.feature | 1 +
synapse/config/experimental.py | 3 +++
synapse/rest/client/knock.py | 8 ++++++++
synapse/rest/client/room.py | 8 ++++++++
4 files changed, 20 insertions(+)
create mode 100644 changelog.d/17322.feature
diff --git a/changelog.d/17322.feature b/changelog.d/17322.feature
new file mode 100644
index 0000000000..85386c2df7
--- /dev/null
+++ b/changelog.d/17322.feature
@@ -0,0 +1 @@
+Add support for via query parameter from MSC415.
diff --git a/synapse/config/experimental.py b/synapse/config/experimental.py
index 24546171e5..23e96da6a3 100644
--- a/synapse/config/experimental.py
+++ b/synapse/config/experimental.py
@@ -439,3 +439,6 @@ class ExperimentalConfig(Config):
# MSC4151: Report room API (Client-Server API)
self.msc4151_enabled: bool = experimental.get("msc4151_enabled", False)
+
+ # MSC4156: Migrate server_name to via
+ self.msc4156_enabled: bool = experimental.get("msc4156_enabled", False)
diff --git a/synapse/rest/client/knock.py b/synapse/rest/client/knock.py
index ff52a9bf8c..e31687fc13 100644
--- a/synapse/rest/client/knock.py
+++ b/synapse/rest/client/knock.py
@@ -53,6 +53,7 @@ class KnockRoomAliasServlet(RestServlet):
super().__init__()
self.room_member_handler = hs.get_room_member_handler()
self.auth = hs.get_auth()
+ self._support_via = hs.config.experimental.msc4156_enabled
async def on_POST(
self,
@@ -74,6 +75,13 @@ class KnockRoomAliasServlet(RestServlet):
remote_room_hosts = parse_strings_from_args(
args, "server_name", required=False
)
+ if self._support_via:
+ remote_room_hosts = parse_strings_from_args(
+ args,
+ "org.matrix.msc4156.via",
+ default=remote_room_hosts,
+ required=False,
+ )
elif RoomAlias.is_valid(room_identifier):
handler = self.room_member_handler
room_alias = RoomAlias.from_string(room_identifier)
diff --git a/synapse/rest/client/room.py b/synapse/rest/client/room.py
index 61fdf71a27..c98241f6ce 100644
--- a/synapse/rest/client/room.py
+++ b/synapse/rest/client/room.py
@@ -417,6 +417,7 @@ class JoinRoomAliasServlet(ResolveRoomIdMixin, TransactionRestServlet):
super().__init__(hs)
super(ResolveRoomIdMixin, self).__init__(hs) # ensure the Mixin is set up
self.auth = hs.get_auth()
+ self._support_via = hs.config.experimental.msc4156_enabled
def register(self, http_server: HttpServer) -> None:
# /join/$room_identifier[/$txn_id]
@@ -435,6 +436,13 @@ class JoinRoomAliasServlet(ResolveRoomIdMixin, TransactionRestServlet):
# twisted.web.server.Request.args is incorrectly defined as Optional[Any]
args: Dict[bytes, List[bytes]] = request.args # type: ignore
remote_room_hosts = parse_strings_from_args(args, "server_name", required=False)
+ if self._support_via:
+ remote_room_hosts = parse_strings_from_args(
+ args,
+ "org.matrix.msc4156.via",
+ default=remote_room_hosts,
+ required=False,
+ )
room_id, remote_room_hosts = await self.resolve_room_id(
room_identifier,
remote_room_hosts,
From 97c3d988161f69821f00b722aafaea4fcb31759f Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?J=C3=B6rg=20Thalheim?=
Date: Tue, 18 Jun 2024 17:21:51 +0200
Subject: [PATCH 191/503] register_new_matrix_user: add password-file flag
(#17294)
Co-authored-by: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com>
Co-authored-by: Andrew Morgan
---
changelog.d/17294.feature | 2 ++
debian/changelog | 6 ++++++
debian/register_new_matrix_user.ronn | 8 ++++++--
synapse/_scripts/register_new_matrix_user.py | 20 +++++++++++++++-----
4 files changed, 29 insertions(+), 7 deletions(-)
create mode 100644 changelog.d/17294.feature
diff --git a/changelog.d/17294.feature b/changelog.d/17294.feature
new file mode 100644
index 0000000000..33aac7b0bc
--- /dev/null
+++ b/changelog.d/17294.feature
@@ -0,0 +1,2 @@
+`register_new_matrix_user` now supports a --password-file flag, which
+is useful for scripting.
diff --git a/debian/changelog b/debian/changelog
index e9b05f8553..55e17bd868 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -1,3 +1,9 @@
+matrix-synapse-py3 (1.109.0+nmu1) UNRELEASED; urgency=medium
+
+ * `register_new_matrix_user` now supports a --password-file flag.
+
+ -- Synapse Packaging team Tue, 18 Jun 2024 13:29:36 +0100
+
matrix-synapse-py3 (1.109.0) stable; urgency=medium
* New synapse release 1.109.0.
diff --git a/debian/register_new_matrix_user.ronn b/debian/register_new_matrix_user.ronn
index 0410b1f4cd..963e67c004 100644
--- a/debian/register_new_matrix_user.ronn
+++ b/debian/register_new_matrix_user.ronn
@@ -31,8 +31,12 @@ A sample YAML file accepted by `register_new_matrix_user` is described below:
Local part of the new user. Will prompt if omitted.
* `-p`, `--password`:
- New password for user. Will prompt if omitted. Supplying the password
- on the command line is not recommended. Use the STDIN instead.
+ New password for user. Will prompt if this option and `--password-file` are omitted.
+ Supplying the password on the command line is not recommended.
+
+ * `--password-file`:
+ File containing the new password for user. If set, overrides `--password`.
+ This is a more secure alternative to specifying the password on the command line.
* `-a`, `--admin`:
Register new user as an admin. Will prompt if omitted.
diff --git a/synapse/_scripts/register_new_matrix_user.py b/synapse/_scripts/register_new_matrix_user.py
index 77a7129ee2..972b35e2dc 100644
--- a/synapse/_scripts/register_new_matrix_user.py
+++ b/synapse/_scripts/register_new_matrix_user.py
@@ -173,11 +173,18 @@ def main() -> None:
default=None,
help="Local part of the new user. Will prompt if omitted.",
)
- parser.add_argument(
+ password_group = parser.add_mutually_exclusive_group()
+ password_group.add_argument(
"-p",
"--password",
default=None,
- help="New password for user. Will prompt if omitted.",
+ help="New password for user. Will prompt for a password if "
+ "this flag and `--password-file` are both omitted.",
+ )
+ password_group.add_argument(
+ "--password-file",
+ default=None,
+ help="File containing the new password for user. If set, will override `--password`.",
)
parser.add_argument(
"-t",
@@ -247,6 +254,11 @@ def main() -> None:
print(_NO_SHARED_SECRET_OPTS_ERROR, file=sys.stderr)
sys.exit(1)
+ if args.password_file:
+ password = _read_file(args.password_file, "password-file").strip()
+ else:
+ password = args.password
+
if args.server_url:
server_url = args.server_url
elif config is not None:
@@ -269,9 +281,7 @@ def main() -> None:
if args.admin or args.no_admin:
admin = args.admin
- register_new_user(
- args.user, args.password, server_url, secret, admin, args.user_type
- )
+ register_new_user(args.user, password, server_url, secret, admin, args.user_type)
def _read_file(file_path: Any, config_path: str) -> str:
From 199223062aff38936aee50910418ddc81451dc9e Mon Sep 17 00:00:00 2001
From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com>
Date: Tue, 18 Jun 2024 16:54:19 +0100
Subject: [PATCH 192/503] Revert "Support MSC3916 by adding a federation
`/download` endpoint" (#17325)
---
changelog.d/17172.feature | 2 -
changelog.d/17325.misc | 1 +
.../federation/transport/server/__init__.py | 24 --
synapse/federation/transport/server/_base.py | 24 +-
.../federation/transport/server/federation.py | 41 ---
synapse/media/_base.py | 63 +----
synapse/media/media_repository.py | 18 +-
synapse/media/media_storage.py | 223 +----------------
synapse/media/storage_provider.py | 40 +--
tests/federation/test_federation_media.py | 234 ------------------
tests/media/test_media_storage.py | 14 +-
11 files changed, 25 insertions(+), 659 deletions(-)
delete mode 100644 changelog.d/17172.feature
create mode 100644 changelog.d/17325.misc
delete mode 100644 tests/federation/test_federation_media.py
diff --git a/changelog.d/17172.feature b/changelog.d/17172.feature
deleted file mode 100644
index 245dea815c..0000000000
--- a/changelog.d/17172.feature
+++ /dev/null
@@ -1,2 +0,0 @@
-Support [MSC3916](https://github.com/matrix-org/matrix-spec-proposals/blob/rav/authentication-for-media/proposals/3916-authentication-for-media.md)
-by adding a federation /download endpoint (#17172).
\ No newline at end of file
diff --git a/changelog.d/17325.misc b/changelog.d/17325.misc
new file mode 100644
index 0000000000..1a4ce7ceec
--- /dev/null
+++ b/changelog.d/17325.misc
@@ -0,0 +1 @@
+This is a changelog so tests will run.
\ No newline at end of file
diff --git a/synapse/federation/transport/server/__init__.py b/synapse/federation/transport/server/__init__.py
index 266675c9b8..bac569e977 100644
--- a/synapse/federation/transport/server/__init__.py
+++ b/synapse/federation/transport/server/__init__.py
@@ -19,7 +19,6 @@
# [This file includes modifications made by New Vector Limited]
#
#
-import inspect
import logging
from typing import TYPE_CHECKING, Dict, Iterable, List, Optional, Tuple, Type
@@ -34,7 +33,6 @@ from synapse.federation.transport.server.federation import (
FEDERATION_SERVLET_CLASSES,
FederationAccountStatusServlet,
FederationUnstableClientKeysClaimServlet,
- FederationUnstableMediaDownloadServlet,
)
from synapse.http.server import HttpServer, JsonResource
from synapse.http.servlet import (
@@ -317,28 +315,6 @@ def register_servlets(
):
continue
- if servletclass == FederationUnstableMediaDownloadServlet:
- if (
- not hs.config.server.enable_media_repo
- or not hs.config.experimental.msc3916_authenticated_media_enabled
- ):
- continue
-
- # don't load the endpoint if the storage provider is incompatible
- media_repo = hs.get_media_repository()
- load_download_endpoint = True
- for provider in media_repo.media_storage.storage_providers:
- signature = inspect.signature(provider.backend.fetch)
- if "federation" not in signature.parameters:
- logger.warning(
- f"Federation media `/download` endpoint will not be enabled as storage provider {provider.backend} is not compatible with this endpoint."
- )
- load_download_endpoint = False
- break
-
- if not load_download_endpoint:
- continue
-
servletclass(
hs=hs,
authenticator=authenticator,
diff --git a/synapse/federation/transport/server/_base.py b/synapse/federation/transport/server/_base.py
index 4e2717b565..db0f5076a9 100644
--- a/synapse/federation/transport/server/_base.py
+++ b/synapse/federation/transport/server/_base.py
@@ -360,29 +360,13 @@ class BaseFederationServlet:
"request"
)
return None
- if (
- func.__self__.__class__.__name__ # type: ignore
- == "FederationUnstableMediaDownloadServlet"
- ):
- response = await func(
- origin, content, request, *args, **kwargs
- )
- else:
- response = await func(
- origin, content, request.args, *args, **kwargs
- )
- else:
- if (
- func.__self__.__class__.__name__ # type: ignore
- == "FederationUnstableMediaDownloadServlet"
- ):
- response = await func(
- origin, content, request, *args, **kwargs
- )
- else:
response = await func(
origin, content, request.args, *args, **kwargs
)
+ else:
+ response = await func(
+ origin, content, request.args, *args, **kwargs
+ )
finally:
# if we used the origin's context as the parent, add a new span using
# the servlet span as a parent, so that we have a link
diff --git a/synapse/federation/transport/server/federation.py b/synapse/federation/transport/server/federation.py
index 1f02451efa..a59734785f 100644
--- a/synapse/federation/transport/server/federation.py
+++ b/synapse/federation/transport/server/federation.py
@@ -44,13 +44,10 @@ from synapse.federation.transport.server._base import (
)
from synapse.http.servlet import (
parse_boolean_from_args,
- parse_integer,
parse_integer_from_args,
parse_string_from_args,
parse_strings_from_args,
)
-from synapse.http.site import SynapseRequest
-from synapse.media._base import DEFAULT_MAX_TIMEOUT_MS, MAXIMUM_ALLOWED_MAX_TIMEOUT_MS
from synapse.types import JsonDict
from synapse.util import SYNAPSE_VERSION
from synapse.util.ratelimitutils import FederationRateLimiter
@@ -790,43 +787,6 @@ class FederationAccountStatusServlet(BaseFederationServerServlet):
return 200, {"account_statuses": statuses, "failures": failures}
-class FederationUnstableMediaDownloadServlet(BaseFederationServerServlet):
- """
- Implementation of new federation media `/download` endpoint outlined in MSC3916. Returns
- a multipart/form-data response consisting of a JSON object and the requested media
- item. This endpoint only returns local media.
- """
-
- PATH = "/media/download/(?P[^/]*)"
- PREFIX = FEDERATION_UNSTABLE_PREFIX + "/org.matrix.msc3916"
- RATELIMIT = True
-
- def __init__(
- self,
- hs: "HomeServer",
- ratelimiter: FederationRateLimiter,
- authenticator: Authenticator,
- server_name: str,
- ):
- super().__init__(hs, authenticator, ratelimiter, server_name)
- self.media_repo = self.hs.get_media_repository()
-
- async def on_GET(
- self,
- origin: Optional[str],
- content: Literal[None],
- request: SynapseRequest,
- media_id: str,
- ) -> None:
- max_timeout_ms = parse_integer(
- request, "timeout_ms", default=DEFAULT_MAX_TIMEOUT_MS
- )
- max_timeout_ms = min(max_timeout_ms, MAXIMUM_ALLOWED_MAX_TIMEOUT_MS)
- await self.media_repo.get_local_media(
- request, media_id, None, max_timeout_ms, federation=True
- )
-
-
FEDERATION_SERVLET_CLASSES: Tuple[Type[BaseFederationServlet], ...] = (
FederationSendServlet,
FederationEventServlet,
@@ -858,5 +818,4 @@ FEDERATION_SERVLET_CLASSES: Tuple[Type[BaseFederationServlet], ...] = (
FederationV1SendKnockServlet,
FederationMakeKnockServlet,
FederationAccountStatusServlet,
- FederationUnstableMediaDownloadServlet,
)
diff --git a/synapse/media/_base.py b/synapse/media/_base.py
index 19bca94170..3fbed6062f 100644
--- a/synapse/media/_base.py
+++ b/synapse/media/_base.py
@@ -25,16 +25,7 @@ import os
import urllib
from abc import ABC, abstractmethod
from types import TracebackType
-from typing import (
- TYPE_CHECKING,
- Awaitable,
- Dict,
- Generator,
- List,
- Optional,
- Tuple,
- Type,
-)
+from typing import Awaitable, Dict, Generator, List, Optional, Tuple, Type
import attr
@@ -48,11 +39,6 @@ from synapse.http.site import SynapseRequest
from synapse.logging.context import make_deferred_yieldable
from synapse.util.stringutils import is_ascii
-if TYPE_CHECKING:
- from synapse.media.media_storage import MultipartResponder
- from synapse.storage.databases.main.media_repository import LocalMedia
-
-
logger = logging.getLogger(__name__)
# list all text content types that will have the charset default to UTF-8 when
@@ -274,53 +260,6 @@ def _can_encode_filename_as_token(x: str) -> bool:
return True
-async def respond_with_multipart_responder(
- request: SynapseRequest,
- responder: "Optional[MultipartResponder]",
- media_info: "LocalMedia",
-) -> None:
- """
- Responds via a Multipart responder for the federation media `/download` requests
-
- Args:
- request: the federation request to respond to
- responder: the Multipart responder which will send the response
- media_info: metadata about the media item
- """
- if not responder:
- respond_404(request)
- return
-
- # If we have a responder we *must* use it as a context manager.
- with responder:
- if request._disconnected:
- logger.warning(
- "Not sending response to request %s, already disconnected.", request
- )
- return
-
- logger.debug("Responding to media request with responder %s", responder)
- if media_info.media_length is not None:
- request.setHeader(b"Content-Length", b"%d" % (media_info.media_length,))
- request.setHeader(
- b"Content-Type", b"multipart/mixed; boundary=%s" % responder.boundary
- )
-
- try:
- await responder.write_to_consumer(request)
- except Exception as e:
- # The majority of the time this will be due to the client having gone
- # away. Unfortunately, Twisted simply throws a generic exception at us
- # in that case.
- logger.warning("Failed to write to consumer: %s %s", type(e), e)
-
- # Unregister the producer, if it has one, so Twisted doesn't complain
- if request.producer:
- request.unregisterProducer()
-
- finish_request(request)
-
-
async def respond_with_responder(
request: SynapseRequest,
responder: "Optional[Responder]",
diff --git a/synapse/media/media_repository.py b/synapse/media/media_repository.py
index c335e518a0..6ed56099ca 100644
--- a/synapse/media/media_repository.py
+++ b/synapse/media/media_repository.py
@@ -54,11 +54,10 @@ from synapse.media._base import (
ThumbnailInfo,
get_filename_from_headers,
respond_404,
- respond_with_multipart_responder,
respond_with_responder,
)
from synapse.media.filepath import MediaFilePaths
-from synapse.media.media_storage import MediaStorage, MultipartResponder
+from synapse.media.media_storage import MediaStorage
from synapse.media.storage_provider import StorageProviderWrapper
from synapse.media.thumbnailer import Thumbnailer, ThumbnailError
from synapse.media.url_previewer import UrlPreviewer
@@ -430,7 +429,6 @@ class MediaRepository:
media_id: str,
name: Optional[str],
max_timeout_ms: int,
- federation: bool = False,
) -> None:
"""Responds to requests for local media, if exists, or returns 404.
@@ -442,7 +440,6 @@ class MediaRepository:
the filename in the Content-Disposition header of the response.
max_timeout_ms: the maximum number of milliseconds to wait for the
media to be uploaded.
- federation: whether the local media being fetched is for a federation request
Returns:
Resolves once a response has successfully been written to request
@@ -462,17 +459,10 @@ class MediaRepository:
file_info = FileInfo(None, media_id, url_cache=bool(url_cache))
- responder = await self.media_storage.fetch_media(
- file_info, media_info, federation
+ responder = await self.media_storage.fetch_media(file_info)
+ await respond_with_responder(
+ request, responder, media_type, media_length, upload_name
)
- if federation:
- # this really should be a Multipart responder but just in case
- assert isinstance(responder, MultipartResponder)
- await respond_with_multipart_responder(request, responder, media_info)
- else:
- await respond_with_responder(
- request, responder, media_type, media_length, upload_name
- )
async def get_remote_media(
self,
diff --git a/synapse/media/media_storage.py b/synapse/media/media_storage.py
index 2f55d12b6b..b3cd3fd8f4 100644
--- a/synapse/media/media_storage.py
+++ b/synapse/media/media_storage.py
@@ -19,12 +19,9 @@
#
#
import contextlib
-import json
import logging
import os
import shutil
-from contextlib import closing
-from io import BytesIO
from types import TracebackType
from typing import (
IO,
@@ -33,19 +30,14 @@ from typing import (
AsyncIterator,
BinaryIO,
Callable,
- List,
Optional,
Sequence,
Tuple,
Type,
- Union,
)
-from uuid import uuid4
import attr
-from zope.interface import implementer
-from twisted.internet import defer, interfaces
from twisted.internet.defer import Deferred
from twisted.internet.interfaces import IConsumer
from twisted.protocols.basic import FileSender
@@ -56,19 +48,15 @@ from synapse.logging.opentracing import start_active_span, trace, trace_with_opn
from synapse.util import Clock
from synapse.util.file_consumer import BackgroundFileConsumer
-from ..storage.databases.main.media_repository import LocalMedia
-from ..types import JsonDict
from ._base import FileInfo, Responder
from .filepath import MediaFilePaths
if TYPE_CHECKING:
- from synapse.media.storage_provider import StorageProviderWrapper
+ from synapse.media.storage_provider import StorageProvider
from synapse.server import HomeServer
logger = logging.getLogger(__name__)
-CRLF = b"\r\n"
-
class MediaStorage:
"""Responsible for storing/fetching files from local sources.
@@ -85,7 +73,7 @@ class MediaStorage:
hs: "HomeServer",
local_media_directory: str,
filepaths: MediaFilePaths,
- storage_providers: Sequence["StorageProviderWrapper"],
+ storage_providers: Sequence["StorageProvider"],
):
self.hs = hs
self.reactor = hs.get_reactor()
@@ -181,23 +169,15 @@ class MediaStorage:
raise e from None
- async def fetch_media(
- self,
- file_info: FileInfo,
- media_info: Optional[LocalMedia] = None,
- federation: bool = False,
- ) -> Optional[Responder]:
+ async def fetch_media(self, file_info: FileInfo) -> Optional[Responder]:
"""Attempts to fetch media described by file_info from the local cache
and configured storage providers.
Args:
- file_info: Metadata about the media file
- media_info: Metadata about the media item
- federation: Whether this file is being fetched for a federation request
+ file_info
Returns:
- If the file was found returns a Responder (a Multipart Responder if the requested
- file is for the federation /download endpoint), otherwise None.
+ Returns a Responder if the file was found, otherwise None.
"""
paths = [self._file_info_to_path(file_info)]
@@ -217,19 +197,12 @@ class MediaStorage:
local_path = os.path.join(self.local_media_directory, path)
if os.path.exists(local_path):
logger.debug("responding with local file %s", local_path)
- if federation:
- assert media_info is not None
- boundary = uuid4().hex.encode("ascii")
- return MultipartResponder(
- open(local_path, "rb"), media_info, boundary
- )
- else:
- return FileResponder(open(local_path, "rb"))
+ return FileResponder(open(local_path, "rb"))
logger.debug("local file %s did not exist", local_path)
for provider in self.storage_providers:
for path in paths:
- res: Any = await provider.fetch(path, file_info, media_info, federation)
+ res: Any = await provider.fetch(path, file_info)
if res:
logger.debug("Streaming %s from %s", path, provider)
return res
@@ -343,7 +316,7 @@ class FileResponder(Responder):
"""Wraps an open file that can be sent to a request.
Args:
- open_file: A file like object to be streamed to the client,
+ open_file: A file like object to be streamed ot the client,
is closed when finished streaming.
"""
@@ -364,38 +337,6 @@ class FileResponder(Responder):
self.open_file.close()
-class MultipartResponder(Responder):
- """Wraps an open file, formats the response according to MSC3916 and sends it to a
- federation request.
-
- Args:
- open_file: A file like object to be streamed to the client,
- is closed when finished streaming.
- media_info: metadata about the media item
- boundary: bytes to use for the multipart response boundary
- """
-
- def __init__(self, open_file: IO, media_info: LocalMedia, boundary: bytes) -> None:
- self.open_file = open_file
- self.media_info = media_info
- self.boundary = boundary
-
- def write_to_consumer(self, consumer: IConsumer) -> Deferred:
- return make_deferred_yieldable(
- MultipartFileSender().beginFileTransfer(
- self.open_file, consumer, self.media_info.media_type, {}, self.boundary
- )
- )
-
- def __exit__(
- self,
- exc_type: Optional[Type[BaseException]],
- exc_val: Optional[BaseException],
- exc_tb: Optional[TracebackType],
- ) -> None:
- self.open_file.close()
-
-
class SpamMediaException(NotFoundError):
"""The media was blocked by a spam checker, so we simply 404 the request (in
the same way as if it was quarantined).
@@ -429,151 +370,3 @@ class ReadableFileWrapper:
# We yield to the reactor by sleeping for 0 seconds.
await self.clock.sleep(0)
-
-
-@implementer(interfaces.IProducer)
-class MultipartFileSender:
- """
- A producer that sends the contents of a file to a federation request in the format
- outlined in MSC3916 - a multipart/format-data response where the first field is a
- JSON object and the second is the requested file.
-
- This is a slight re-writing of twisted.protocols.basic.FileSender to achieve the format
- outlined above.
- """
-
- CHUNK_SIZE = 2**14
-
- lastSent = ""
- deferred: Optional[defer.Deferred] = None
-
- def beginFileTransfer(
- self,
- file: IO,
- consumer: IConsumer,
- file_content_type: str,
- json_object: JsonDict,
- boundary: bytes,
- ) -> Deferred:
- """
- Begin transferring a file
-
- Args:
- file: The file object to read data from
- consumer: The synapse request to write the data to
- file_content_type: The content-type of the file
- json_object: The JSON object to write to the first field of the response
- boundary: bytes to be used as the multipart/form-data boundary
-
- Returns: A deferred whose callback will be invoked when the file has
- been completely written to the consumer. The last byte written to the
- consumer is passed to the callback.
- """
- self.file: Optional[IO] = file
- self.consumer = consumer
- self.json_field = json_object
- self.json_field_written = False
- self.content_type_written = False
- self.file_content_type = file_content_type
- self.boundary = boundary
- self.deferred: Deferred = defer.Deferred()
- self.consumer.registerProducer(self, False)
- # while it's not entirely clear why this assignment is necessary, it mirrors
- # the behavior in FileSender.beginFileTransfer and thus is preserved here
- deferred = self.deferred
- return deferred
-
- def resumeProducing(self) -> None:
- # write the first field, which will always be a json field
- if not self.json_field_written:
- self.consumer.write(CRLF + b"--" + self.boundary + CRLF)
-
- content_type = Header(b"Content-Type", b"application/json")
- self.consumer.write(bytes(content_type) + CRLF)
-
- json_field = json.dumps(self.json_field)
- json_bytes = json_field.encode("utf-8")
- self.consumer.write(json_bytes)
- self.consumer.write(CRLF + b"--" + self.boundary + CRLF)
-
- self.json_field_written = True
-
- chunk: Any = ""
- if self.file:
- # if we haven't written the content type yet, do so
- if not self.content_type_written:
- type = self.file_content_type.encode("utf-8")
- content_type = Header(b"Content-Type", type)
- self.consumer.write(bytes(content_type) + CRLF)
- self.content_type_written = True
-
- chunk = self.file.read(self.CHUNK_SIZE)
-
- if not chunk:
- # we've reached the end of the file
- self.consumer.write(CRLF + b"--" + self.boundary + b"--" + CRLF)
- self.file = None
- self.consumer.unregisterProducer()
-
- if self.deferred:
- self.deferred.callback(self.lastSent)
- self.deferred = None
- return
-
- self.consumer.write(chunk)
- self.lastSent = chunk[-1:]
-
- def pauseProducing(self) -> None:
- pass
-
- def stopProducing(self) -> None:
- if self.deferred:
- self.deferred.errback(Exception("Consumer asked us to stop producing"))
- self.deferred = None
-
-
-class Header:
- """
- `Header` This class is a tiny wrapper that produces
- request headers. We can't use standard python header
- class because it encodes unicode fields using =? bla bla ?=
- encoding, which is correct, but no one in HTTP world expects
- that, everyone wants utf-8 raw bytes. (stolen from treq.multipart)
-
- """
-
- def __init__(
- self,
- name: bytes,
- value: Any,
- params: Optional[List[Tuple[Any, Any]]] = None,
- ):
- self.name = name
- self.value = value
- self.params = params or []
-
- def add_param(self, name: Any, value: Any) -> None:
- self.params.append((name, value))
-
- def __bytes__(self) -> bytes:
- with closing(BytesIO()) as h:
- h.write(self.name + b": " + escape(self.value).encode("us-ascii"))
- if self.params:
- for name, val in self.params:
- h.write(b"; ")
- h.write(escape(name).encode("us-ascii"))
- h.write(b"=")
- h.write(b'"' + escape(val).encode("utf-8") + b'"')
- h.seek(0)
- return h.read()
-
-
-def escape(value: Union[str, bytes]) -> str:
- """
- This function prevents header values from corrupting the request,
- a newline in the file name parameter makes form-data request unreadable
- for a majority of parsers. (stolen from treq.multipart)
- """
- if isinstance(value, bytes):
- value = value.decode("utf-8")
- return value.replace("\r", "").replace("\n", "").replace('"', '\\"')
diff --git a/synapse/media/storage_provider.py b/synapse/media/storage_provider.py
index a2d50adf65..06e5d27a53 100644
--- a/synapse/media/storage_provider.py
+++ b/synapse/media/storage_provider.py
@@ -24,16 +24,14 @@ import logging
import os
import shutil
from typing import TYPE_CHECKING, Callable, Optional
-from uuid import uuid4
from synapse.config._base import Config
from synapse.logging.context import defer_to_thread, run_in_background
from synapse.logging.opentracing import start_active_span, trace_with_opname
from synapse.util.async_helpers import maybe_awaitable
-from ..storage.databases.main.media_repository import LocalMedia
from ._base import FileInfo, Responder
-from .media_storage import FileResponder, MultipartResponder
+from .media_storage import FileResponder
logger = logging.getLogger(__name__)
@@ -57,21 +55,13 @@ class StorageProvider(metaclass=abc.ABCMeta):
"""
@abc.abstractmethod
- async def fetch(
- self,
- path: str,
- file_info: FileInfo,
- media_info: Optional[LocalMedia] = None,
- federation: bool = False,
- ) -> Optional[Responder]:
+ async def fetch(self, path: str, file_info: FileInfo) -> Optional[Responder]:
"""Attempt to fetch the file described by file_info and stream it
into writer.
Args:
path: Relative path of file in local cache
file_info: The metadata of the file.
- media_info: metadata of the media item
- federation: Whether the requested media is for a federation request
Returns:
Returns a Responder if the provider has the file, otherwise returns None.
@@ -134,13 +124,7 @@ class StorageProviderWrapper(StorageProvider):
run_in_background(store)
@trace_with_opname("StorageProviderWrapper.fetch")
- async def fetch(
- self,
- path: str,
- file_info: FileInfo,
- media_info: Optional[LocalMedia] = None,
- federation: bool = False,
- ) -> Optional[Responder]:
+ async def fetch(self, path: str, file_info: FileInfo) -> Optional[Responder]:
if file_info.url_cache:
# Files in the URL preview cache definitely aren't stored here,
# so avoid any potentially slow I/O or network access.
@@ -148,9 +132,7 @@ class StorageProviderWrapper(StorageProvider):
# store_file is supposed to return an Awaitable, but guard
# against improper implementations.
- return await maybe_awaitable(
- self.backend.fetch(path, file_info, media_info, federation)
- )
+ return await maybe_awaitable(self.backend.fetch(path, file_info))
class FileStorageProviderBackend(StorageProvider):
@@ -190,23 +172,11 @@ class FileStorageProviderBackend(StorageProvider):
)
@trace_with_opname("FileStorageProviderBackend.fetch")
- async def fetch(
- self,
- path: str,
- file_info: FileInfo,
- media_info: Optional[LocalMedia] = None,
- federation: bool = False,
- ) -> Optional[Responder]:
+ async def fetch(self, path: str, file_info: FileInfo) -> Optional[Responder]:
"""See StorageProvider.fetch"""
backup_fname = os.path.join(self.base_directory, path)
if os.path.isfile(backup_fname):
- if federation:
- assert media_info is not None
- boundary = uuid4().hex.encode("ascii")
- return MultipartResponder(
- open(backup_fname, "rb"), media_info, boundary
- )
return FileResponder(open(backup_fname, "rb"))
return None
diff --git a/tests/federation/test_federation_media.py b/tests/federation/test_federation_media.py
deleted file mode 100644
index 1c89d19e99..0000000000
--- a/tests/federation/test_federation_media.py
+++ /dev/null
@@ -1,234 +0,0 @@
-#
-# This file is licensed under the Affero General Public License (AGPL) version 3.
-#
-# Copyright (C) 2024 New Vector, Ltd
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Affero General Public License as
-# published by the Free Software Foundation, either version 3 of the
-# License, or (at your option) any later version.
-#
-# See the GNU Affero General Public License for more details:
-# .
-#
-# Originally licensed under the Apache License, Version 2.0:
-# .
-#
-# [This file includes modifications made by New Vector Limited]
-#
-#
-import io
-import os
-import shutil
-import tempfile
-from typing import Optional
-
-from twisted.test.proto_helpers import MemoryReactor
-
-from synapse.media._base import FileInfo, Responder
-from synapse.media.filepath import MediaFilePaths
-from synapse.media.media_storage import MediaStorage
-from synapse.media.storage_provider import (
- FileStorageProviderBackend,
- StorageProviderWrapper,
-)
-from synapse.server import HomeServer
-from synapse.storage.databases.main.media_repository import LocalMedia
-from synapse.types import JsonDict, UserID
-from synapse.util import Clock
-
-from tests import unittest
-from tests.test_utils import SMALL_PNG
-from tests.unittest import override_config
-
-
-class FederationUnstableMediaDownloadsTest(unittest.FederatingHomeserverTestCase):
-
- def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
- super().prepare(reactor, clock, hs)
- self.test_dir = tempfile.mkdtemp(prefix="synapse-tests-")
- self.addCleanup(shutil.rmtree, self.test_dir)
- self.primary_base_path = os.path.join(self.test_dir, "primary")
- self.secondary_base_path = os.path.join(self.test_dir, "secondary")
-
- hs.config.media.media_store_path = self.primary_base_path
-
- storage_providers = [
- StorageProviderWrapper(
- FileStorageProviderBackend(hs, self.secondary_base_path),
- store_local=True,
- store_remote=False,
- store_synchronous=True,
- )
- ]
-
- self.filepaths = MediaFilePaths(self.primary_base_path)
- self.media_storage = MediaStorage(
- hs, self.primary_base_path, self.filepaths, storage_providers
- )
- self.media_repo = hs.get_media_repository()
-
- @override_config(
- {"experimental_features": {"msc3916_authenticated_media_enabled": True}}
- )
- def test_file_download(self) -> None:
- content = io.BytesIO(b"file_to_stream")
- content_uri = self.get_success(
- self.media_repo.create_content(
- "text/plain",
- "test_upload",
- content,
- 46,
- UserID.from_string("@user_id:whatever.org"),
- )
- )
- # test with a text file
- channel = self.make_signed_federation_request(
- "GET",
- f"/_matrix/federation/unstable/org.matrix.msc3916/media/download/{content_uri.media_id}",
- )
- self.pump()
- self.assertEqual(200, channel.code)
-
- content_type = channel.headers.getRawHeaders("content-type")
- assert content_type is not None
- assert "multipart/mixed" in content_type[0]
- assert "boundary" in content_type[0]
-
- # extract boundary
- boundary = content_type[0].split("boundary=")[1]
- # split on boundary and check that json field and expected value exist
- stripped = channel.text_body.split("\r\n" + "--" + boundary)
- # TODO: the json object expected will change once MSC3911 is implemented, currently
- # {} is returned for all requests as a placeholder (per MSC3196)
- found_json = any(
- "\r\nContent-Type: application/json\r\n{}" in field for field in stripped
- )
- self.assertTrue(found_json)
-
- # check that text file and expected value exist
- found_file = any(
- "\r\nContent-Type: text/plain\r\nfile_to_stream" in field
- for field in stripped
- )
- self.assertTrue(found_file)
-
- content = io.BytesIO(SMALL_PNG)
- content_uri = self.get_success(
- self.media_repo.create_content(
- "image/png",
- "test_png_upload",
- content,
- 67,
- UserID.from_string("@user_id:whatever.org"),
- )
- )
- # test with an image file
- channel = self.make_signed_federation_request(
- "GET",
- f"/_matrix/federation/unstable/org.matrix.msc3916/media/download/{content_uri.media_id}",
- )
- self.pump()
- self.assertEqual(200, channel.code)
-
- content_type = channel.headers.getRawHeaders("content-type")
- assert content_type is not None
- assert "multipart/mixed" in content_type[0]
- assert "boundary" in content_type[0]
-
- # extract boundary
- boundary = content_type[0].split("boundary=")[1]
- # split on boundary and check that json field and expected value exist
- body = channel.result.get("body")
- assert body is not None
- stripped_bytes = body.split(b"\r\n" + b"--" + boundary.encode("utf-8"))
- found_json = any(
- b"\r\nContent-Type: application/json\r\n{}" in field
- for field in stripped_bytes
- )
- self.assertTrue(found_json)
-
- # check that png file exists and matches what was uploaded
- found_file = any(SMALL_PNG in field for field in stripped_bytes)
- self.assertTrue(found_file)
-
- @override_config(
- {"experimental_features": {"msc3916_authenticated_media_enabled": False}}
- )
- def test_disable_config(self) -> None:
- content = io.BytesIO(b"file_to_stream")
- content_uri = self.get_success(
- self.media_repo.create_content(
- "text/plain",
- "test_upload",
- content,
- 46,
- UserID.from_string("@user_id:whatever.org"),
- )
- )
- channel = self.make_signed_federation_request(
- "GET",
- f"/_matrix/federation/unstable/org.matrix.msc3916/media/download/{content_uri.media_id}",
- )
- self.pump()
- self.assertEqual(404, channel.code)
- self.assertEqual(channel.json_body.get("errcode"), "M_UNRECOGNIZED")
-
-
-class FakeFileStorageProviderBackend:
- """
- Fake storage provider stub with incompatible `fetch` signature for testing
- """
-
- def __init__(self, hs: "HomeServer", config: str):
- self.hs = hs
- self.cache_directory = hs.config.media.media_store_path
- self.base_directory = config
-
- def __str__(self) -> str:
- return "FakeFileStorageProviderBackend[%s]" % (self.base_directory,)
-
- async def fetch(
- self, path: str, file_info: FileInfo, media_info: Optional[LocalMedia] = None
- ) -> Optional[Responder]:
- pass
-
-
-TEST_DIR = tempfile.mkdtemp(prefix="synapse-tests-")
-
-
-class FederationUnstableMediaEndpointCompatibilityTest(
- unittest.FederatingHomeserverTestCase
-):
-
- def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
- super().prepare(reactor, clock, hs)
- self.test_dir = TEST_DIR
- self.addCleanup(shutil.rmtree, self.test_dir)
- self.media_repo = hs.get_media_repository()
-
- def default_config(self) -> JsonDict:
- config = super().default_config()
- primary_base_path = os.path.join(TEST_DIR, "primary")
- config["media_storage_providers"] = [
- {
- "module": "tests.federation.test_federation_media.FakeFileStorageProviderBackend",
- "store_local": "True",
- "store_remote": "False",
- "store_synchronous": "False",
- "config": {"directory": primary_base_path},
- }
- ]
- return config
-
- @override_config(
- {"experimental_features": {"msc3916_authenticated_media_enabled": True}}
- )
- def test_incompatible_storage_provider_fails_to_load_endpoint(self) -> None:
- channel = self.make_signed_federation_request(
- "GET",
- "/_matrix/federation/unstable/org.matrix.msc3916/media/download/xyz",
- )
- self.pump()
- self.assertEqual(404, channel.code)
- self.assertEqual(channel.json_body.get("errcode"), "M_UNRECOGNIZED")
diff --git a/tests/media/test_media_storage.py b/tests/media/test_media_storage.py
index 47a89e9c66..46d20ce775 100644
--- a/tests/media/test_media_storage.py
+++ b/tests/media/test_media_storage.py
@@ -49,10 +49,7 @@ from synapse.logging.context import make_deferred_yieldable
from synapse.media._base import FileInfo, ThumbnailInfo
from synapse.media.filepath import MediaFilePaths
from synapse.media.media_storage import MediaStorage, ReadableFileWrapper
-from synapse.media.storage_provider import (
- FileStorageProviderBackend,
- StorageProviderWrapper,
-)
+from synapse.media.storage_provider import FileStorageProviderBackend
from synapse.media.thumbnailer import ThumbnailProvider
from synapse.module_api import ModuleApi
from synapse.module_api.callbacks.spamchecker_callbacks import load_legacy_spam_checkers
@@ -81,14 +78,7 @@ class MediaStorageTests(unittest.HomeserverTestCase):
hs.config.media.media_store_path = self.primary_base_path
- storage_providers = [
- StorageProviderWrapper(
- FileStorageProviderBackend(hs, self.secondary_base_path),
- store_local=True,
- store_remote=False,
- store_synchronous=True,
- )
- ]
+ storage_providers = [FileStorageProviderBackend(hs, self.secondary_base_path)]
self.filepaths = MediaFilePaths(self.primary_base_path)
self.media_storage = MediaStorage(
From afaf2d9388f7012d0500932dad0af4bdb8d40d20 Mon Sep 17 00:00:00 2001
From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com>
Date: Wed, 19 Jun 2024 10:05:39 +0100
Subject: [PATCH 193/503] Require the 'from' parameter for `/notifications` be
an integer (#17283)
Co-authored-by: Erik Johnston
---
changelog.d/17283.bugfix | 1 +
synapse/rest/client/notifications.py | 18 +-
.../databases/main/event_push_actions.py | 2 +-
tests/module_api/test_api.py | 2 +-
tests/rest/client/test_notifications.py | 171 ++++++++++++++++--
5 files changed, 173 insertions(+), 21 deletions(-)
create mode 100644 changelog.d/17283.bugfix
diff --git a/changelog.d/17283.bugfix b/changelog.d/17283.bugfix
new file mode 100644
index 0000000000..98c1f05cc2
--- /dev/null
+++ b/changelog.d/17283.bugfix
@@ -0,0 +1 @@
+Fix a long-standing bug where an invalid 'from' parameter to [`/notifications`](https://spec.matrix.org/v1.10/client-server-api/#get_matrixclientv3notifications) would result in an Internal Server Error.
\ No newline at end of file
diff --git a/synapse/rest/client/notifications.py b/synapse/rest/client/notifications.py
index be9b584748..168ce50d3f 100644
--- a/synapse/rest/client/notifications.py
+++ b/synapse/rest/client/notifications.py
@@ -32,6 +32,7 @@ from synapse.http.servlet import RestServlet, parse_integer, parse_string
from synapse.http.site import SynapseRequest
from synapse.types import JsonDict
+from ...api.errors import SynapseError
from ._base import client_patterns
if TYPE_CHECKING:
@@ -56,7 +57,22 @@ class NotificationsServlet(RestServlet):
requester = await self.auth.get_user_by_req(request)
user_id = requester.user.to_string()
- from_token = parse_string(request, "from", required=False)
+ # While this is intended to be "string" to clients, the 'from' token
+ # is actually based on a numeric ID. So it must parse to an int.
+ from_token_str = parse_string(request, "from", required=False)
+ if from_token_str is not None:
+ # Parse to an integer.
+ try:
+ from_token = int(from_token_str)
+ except ValueError:
+ # If it doesn't parse to an integer, then this cannot possibly be a valid
+ # pagination token, as we only hand out integers.
+ raise SynapseError(
+ 400, 'Query parameter "from" contains unrecognised token'
+ )
+ else:
+ from_token = None
+
limit = parse_integer(request, "limit", default=50)
only = parse_string(request, "only", required=False)
diff --git a/synapse/storage/databases/main/event_push_actions.py b/synapse/storage/databases/main/event_push_actions.py
index bdd0781c48..0ebf5b53d5 100644
--- a/synapse/storage/databases/main/event_push_actions.py
+++ b/synapse/storage/databases/main/event_push_actions.py
@@ -1829,7 +1829,7 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas
async def get_push_actions_for_user(
self,
user_id: str,
- before: Optional[str] = None,
+ before: Optional[int] = None,
limit: int = 50,
only_highlight: bool = False,
) -> List[UserPushAction]:
diff --git a/tests/module_api/test_api.py b/tests/module_api/test_api.py
index 5eb1406a06..b6ba472d7d 100644
--- a/tests/module_api/test_api.py
+++ b/tests/module_api/test_api.py
@@ -688,7 +688,7 @@ class ModuleApiTestCase(BaseModuleApiTestCase):
channel = self.make_request(
"GET",
- "/notifications?from=",
+ "/notifications",
access_token=tok,
)
self.assertEqual(channel.code, 200, channel.result)
diff --git a/tests/rest/client/test_notifications.py b/tests/rest/client/test_notifications.py
index e9aa2e450e..e4b0455ce8 100644
--- a/tests/rest/client/test_notifications.py
+++ b/tests/rest/client/test_notifications.py
@@ -18,6 +18,7 @@
# [This file includes modifications made by New Vector Limited]
#
#
+from typing import List, Optional, Tuple
from unittest.mock import AsyncMock, Mock
from twisted.test.proto_helpers import MemoryReactor
@@ -48,6 +49,14 @@ class HTTPPusherTests(HomeserverTestCase):
self.sync_handler = homeserver.get_sync_handler()
self.auth_handler = homeserver.get_auth_handler()
+ self.user_id = self.register_user("user", "pass")
+ self.access_token = self.login("user", "pass")
+ self.other_user_id = self.register_user("otheruser", "pass")
+ self.other_access_token = self.login("otheruser", "pass")
+
+ # Create a room
+ self.room_id = self.helper.create_room_as(self.user_id, tok=self.access_token)
+
def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer:
# Mock out the calls over federation.
fed_transport_client = Mock(spec=["send_transaction"])
@@ -61,32 +70,22 @@ class HTTPPusherTests(HomeserverTestCase):
"""
Local users will get notified for invites
"""
-
- user_id = self.register_user("user", "pass")
- access_token = self.login("user", "pass")
- other_user_id = self.register_user("otheruser", "pass")
- other_access_token = self.login("otheruser", "pass")
-
- # Create a room
- room = self.helper.create_room_as(user_id, tok=access_token)
-
# Check we start with no pushes
- channel = self.make_request(
- "GET",
- "/notifications",
- access_token=other_access_token,
- )
- self.assertEqual(channel.code, 200, channel.result)
- self.assertEqual(len(channel.json_body["notifications"]), 0, channel.json_body)
+ self._request_notifications(from_token=None, limit=1, expected_count=0)
# Send an invite
- self.helper.invite(room=room, src=user_id, targ=other_user_id, tok=access_token)
+ self.helper.invite(
+ room=self.room_id,
+ src=self.user_id,
+ targ=self.other_user_id,
+ tok=self.access_token,
+ )
# We should have a notification now
channel = self.make_request(
"GET",
"/notifications",
- access_token=other_access_token,
+ access_token=self.other_access_token,
)
self.assertEqual(channel.code, 200)
self.assertEqual(len(channel.json_body["notifications"]), 1, channel.json_body)
@@ -95,3 +94,139 @@ class HTTPPusherTests(HomeserverTestCase):
"invite",
channel.json_body,
)
+
+ def test_pagination_of_notifications(self) -> None:
+ """
+ Check that pagination of notifications works.
+ """
+ # Check we start with no pushes
+ self._request_notifications(from_token=None, limit=1, expected_count=0)
+
+ # Send an invite and have the other user join the room.
+ self.helper.invite(
+ room=self.room_id,
+ src=self.user_id,
+ targ=self.other_user_id,
+ tok=self.access_token,
+ )
+ self.helper.join(self.room_id, self.other_user_id, tok=self.other_access_token)
+
+ # Send 5 messages in the room and note down their event IDs.
+ sent_event_ids = []
+ for _ in range(5):
+ resp = self.helper.send_event(
+ self.room_id,
+ "m.room.message",
+ {"body": "honk", "msgtype": "m.text"},
+ tok=self.access_token,
+ )
+ sent_event_ids.append(resp["event_id"])
+
+ # We expect to get notifications for messages in reverse order.
+ # So reverse this list of event IDs to make it easier to compare
+ # against later.
+ sent_event_ids.reverse()
+
+ # We should have a few notifications now. Let's try and fetch the first 2.
+ notification_event_ids, _ = self._request_notifications(
+ from_token=None, limit=2, expected_count=2
+ )
+
+ # Check we got the expected event IDs back.
+ self.assertEqual(notification_event_ids, sent_event_ids[:2])
+
+ # Try requesting again without a 'from' query parameter. We should get the
+ # same two notifications back.
+ notification_event_ids, next_token = self._request_notifications(
+ from_token=None, limit=2, expected_count=2
+ )
+ self.assertEqual(notification_event_ids, sent_event_ids[:2])
+
+ # Ask for the next 5 notifications, though there should only be
+ # 4 remaining; the next 3 messages and the invite.
+ #
+ # We need to use the "next_token" from the response as the "from"
+ # query parameter in the next request in order to paginate.
+ notification_event_ids, next_token = self._request_notifications(
+ from_token=next_token, limit=5, expected_count=4
+ )
+ # Ensure we chop off the invite on the end.
+ notification_event_ids = notification_event_ids[:-1]
+ self.assertEqual(notification_event_ids, sent_event_ids[2:])
+
+ def _request_notifications(
+ self, from_token: Optional[str], limit: int, expected_count: int
+ ) -> Tuple[List[str], str]:
+ """
+ Make a request to /notifications to get the latest events to be notified about.
+
+ Only the event IDs are returned. The request is made by the "other user".
+
+ Args:
+ from_token: An optional starting parameter.
+ limit: The maximum number of results to return.
+ expected_count: The number of events to expect in the response.
+
+ Returns:
+ A list of event IDs that the client should be notified about.
+ Events are returned newest-first.
+ """
+ # Construct the request path.
+ path = f"/notifications?limit={limit}"
+ if from_token is not None:
+ path += f"&from={from_token}"
+
+ channel = self.make_request(
+ "GET",
+ path,
+ access_token=self.other_access_token,
+ )
+
+ self.assertEqual(channel.code, 200)
+ self.assertEqual(
+ len(channel.json_body["notifications"]), expected_count, channel.json_body
+ )
+
+ # Extract the necessary data from the response.
+ next_token = channel.json_body["next_token"]
+ event_ids = [
+ event["event"]["event_id"] for event in channel.json_body["notifications"]
+ ]
+
+ return event_ids, next_token
+
+ def test_parameters(self) -> None:
+ """
+ Test that appropriate errors are returned when query parameters are malformed.
+ """
+ # Test that no parameters are required.
+ channel = self.make_request(
+ "GET",
+ "/notifications",
+ access_token=self.other_access_token,
+ )
+ self.assertEqual(channel.code, 200)
+
+ # Test that limit cannot be negative
+ channel = self.make_request(
+ "GET",
+ "/notifications?limit=-1",
+ access_token=self.other_access_token,
+ )
+ self.assertEqual(channel.code, 400)
+
+ # Test that the 'limit' parameter must be an integer.
+ channel = self.make_request(
+ "GET",
+ "/notifications?limit=foobar",
+ access_token=self.other_access_token,
+ )
+ self.assertEqual(channel.code, 400)
+
+ # Test that the 'from' parameter must be an integer.
+ channel = self.make_request(
+ "GET",
+ "/notifications?from=osborne",
+ access_token=self.other_access_token,
+ )
+ self.assertEqual(channel.code, 400)
From bdf82efea505c488953b46eb681b5a63c4e9655d Mon Sep 17 00:00:00 2001
From: Erik Johnston
Date: Wed, 19 Jun 2024 10:33:53 +0100
Subject: [PATCH 194/503] Handle large chain calc better (#17291)
We calculate the auth chain links outside of the main persist event
transaction to ensure that we do not block other event sending during
the calculation.
---
changelog.d/17291.misc | 1 +
synapse/storage/controllers/persist_events.py | 12 +
synapse/storage/databases/main/events.py | 261 +++++++++++++-----
tests/storage/test_event_chain.py | 9 +-
tests/storage/test_event_federation.py | 41 ++-
5 files changed, 236 insertions(+), 88 deletions(-)
create mode 100644 changelog.d/17291.misc
diff --git a/changelog.d/17291.misc b/changelog.d/17291.misc
new file mode 100644
index 0000000000..b1f89a324d
--- /dev/null
+++ b/changelog.d/17291.misc
@@ -0,0 +1 @@
+Do not block event sending/receiving while calulating large event auth chains.
diff --git a/synapse/storage/controllers/persist_events.py b/synapse/storage/controllers/persist_events.py
index 84699a2ee1..d0e015bf19 100644
--- a/synapse/storage/controllers/persist_events.py
+++ b/synapse/storage/controllers/persist_events.py
@@ -617,6 +617,17 @@ class EventsPersistenceStorageController:
room_id, chunk
)
+ with Measure(self._clock, "calculate_chain_cover_index_for_events"):
+ # We now calculate chain ID/sequence numbers for any state events we're
+ # persisting. We ignore out of band memberships as we're not in the room
+ # and won't have their auth chain (we'll fix it up later if we join the
+ # room).
+ #
+ # See: docs/auth_chain_difference_algorithm.md
+ new_event_links = await self.persist_events_store.calculate_chain_cover_index_for_events(
+ room_id, [e for e, _ in chunk]
+ )
+
await self.persist_events_store._persist_events_and_state_updates(
room_id,
chunk,
@@ -624,6 +635,7 @@ class EventsPersistenceStorageController:
new_forward_extremities=new_forward_extremities,
use_negative_stream_ordering=backfilled,
inhibit_local_membership_updates=backfilled,
+ new_event_links=new_event_links,
)
return replaced_events
diff --git a/synapse/storage/databases/main/events.py b/synapse/storage/databases/main/events.py
index 66428e6c8e..c6df13c064 100644
--- a/synapse/storage/databases/main/events.py
+++ b/synapse/storage/databases/main/events.py
@@ -34,7 +34,6 @@ from typing import (
Optional,
Set,
Tuple,
- Union,
cast,
)
@@ -100,6 +99,23 @@ class DeltaState:
return not self.to_delete and not self.to_insert and not self.no_longer_in_room
+@attr.s(slots=True, auto_attribs=True)
+class NewEventChainLinks:
+ """Information about new auth chain links that need to be added to the DB.
+
+ Attributes:
+ chain_id, sequence_number: the IDs corresponding to the event being
+ inserted, and the starting point of the links
+ links: Lists the links that need to be added, 2-tuple of the chain
+ ID/sequence number of the end point of the link.
+ """
+
+ chain_id: int
+ sequence_number: int
+
+ links: List[Tuple[int, int]] = attr.Factory(list)
+
+
class PersistEventsStore:
"""Contains all the functions for writing events to the database.
@@ -148,6 +164,7 @@ class PersistEventsStore:
*,
state_delta_for_room: Optional[DeltaState],
new_forward_extremities: Optional[Set[str]],
+ new_event_links: Dict[str, NewEventChainLinks],
use_negative_stream_ordering: bool = False,
inhibit_local_membership_updates: bool = False,
) -> None:
@@ -217,6 +234,7 @@ class PersistEventsStore:
inhibit_local_membership_updates=inhibit_local_membership_updates,
state_delta_for_room=state_delta_for_room,
new_forward_extremities=new_forward_extremities,
+ new_event_links=new_event_links,
)
persist_event_counter.inc(len(events_and_contexts))
@@ -243,6 +261,87 @@ class PersistEventsStore:
(room_id,), frozenset(new_forward_extremities)
)
+ async def calculate_chain_cover_index_for_events(
+ self, room_id: str, events: Collection[EventBase]
+ ) -> Dict[str, NewEventChainLinks]:
+ # Filter to state events, and ensure there are no duplicates.
+ state_events = []
+ seen_events = set()
+ for event in events:
+ if not event.is_state() or event.event_id in seen_events:
+ continue
+
+ state_events.append(event)
+ seen_events.add(event.event_id)
+
+ if not state_events:
+ return {}
+
+ return await self.db_pool.runInteraction(
+ "_calculate_chain_cover_index_for_events",
+ self.calculate_chain_cover_index_for_events_txn,
+ room_id,
+ state_events,
+ )
+
+ def calculate_chain_cover_index_for_events_txn(
+ self, txn: LoggingTransaction, room_id: str, state_events: Collection[EventBase]
+ ) -> Dict[str, NewEventChainLinks]:
+ # We now calculate chain ID/sequence numbers for any state events we're
+ # persisting. We ignore out of band memberships as we're not in the room
+ # and won't have their auth chain (we'll fix it up later if we join the
+ # room).
+ #
+ # See: docs/auth_chain_difference_algorithm.md
+
+ # We ignore legacy rooms that we aren't filling the chain cover index
+ # for.
+ row = self.db_pool.simple_select_one_txn(
+ txn,
+ table="rooms",
+ keyvalues={"room_id": room_id},
+ retcols=("room_id", "has_auth_chain_index"),
+ allow_none=True,
+ )
+ if row is None:
+ return {}
+
+ # Filter out already persisted events.
+ rows = self.db_pool.simple_select_many_txn(
+ txn,
+ table="events",
+ column="event_id",
+ iterable=[e.event_id for e in state_events],
+ keyvalues={},
+ retcols=("event_id",),
+ )
+ already_persisted_events = {event_id for event_id, in rows}
+ state_events = [
+ event
+ for event in state_events
+ if event.event_id in already_persisted_events
+ ]
+
+ if not state_events:
+ return {}
+
+ # We need to know the type/state_key and auth events of the events we're
+ # calculating chain IDs for. We don't rely on having the full Event
+ # instances as we'll potentially be pulling more events from the DB and
+ # we don't need the overhead of fetching/parsing the full event JSON.
+ event_to_types = {e.event_id: (e.type, e.state_key) for e in state_events}
+ event_to_auth_chain = {e.event_id: e.auth_event_ids() for e in state_events}
+ event_to_room_id = {e.event_id: e.room_id for e in state_events}
+
+ return self._calculate_chain_cover_index(
+ txn,
+ self.db_pool,
+ self.store.event_chain_id_gen,
+ event_to_room_id,
+ event_to_types,
+ event_to_auth_chain,
+ )
+
async def _get_events_which_are_prevs(self, event_ids: Iterable[str]) -> List[str]:
"""Filter the supplied list of event_ids to get those which are prev_events of
existing (non-outlier/rejected) events.
@@ -358,6 +457,7 @@ class PersistEventsStore:
inhibit_local_membership_updates: bool,
state_delta_for_room: Optional[DeltaState],
new_forward_extremities: Optional[Set[str]],
+ new_event_links: Dict[str, NewEventChainLinks],
) -> None:
"""Insert some number of room events into the necessary database tables.
@@ -466,7 +566,9 @@ class PersistEventsStore:
# Insert into event_to_state_groups.
self._store_event_state_mappings_txn(txn, events_and_contexts)
- self._persist_event_auth_chain_txn(txn, [e for e, _ in events_and_contexts])
+ self._persist_event_auth_chain_txn(
+ txn, [e for e, _ in events_and_contexts], new_event_links
+ )
# _store_rejected_events_txn filters out any events which were
# rejected, and returns the filtered list.
@@ -496,6 +598,7 @@ class PersistEventsStore:
self,
txn: LoggingTransaction,
events: List[EventBase],
+ new_event_links: Dict[str, NewEventChainLinks],
) -> None:
# We only care about state events, so this if there are no state events.
if not any(e.is_state() for e in events):
@@ -519,59 +622,8 @@ class PersistEventsStore:
],
)
- # We now calculate chain ID/sequence numbers for any state events we're
- # persisting. We ignore out of band memberships as we're not in the room
- # and won't have their auth chain (we'll fix it up later if we join the
- # room).
- #
- # See: docs/auth_chain_difference_algorithm.md
-
- # We ignore legacy rooms that we aren't filling the chain cover index
- # for.
- rows = cast(
- List[Tuple[str, Optional[Union[int, bool]]]],
- self.db_pool.simple_select_many_txn(
- txn,
- table="rooms",
- column="room_id",
- iterable={event.room_id for event in events if event.is_state()},
- keyvalues={},
- retcols=("room_id", "has_auth_chain_index"),
- ),
- )
- rooms_using_chain_index = {
- room_id for room_id, has_auth_chain_index in rows if has_auth_chain_index
- }
-
- state_events = {
- event.event_id: event
- for event in events
- if event.is_state() and event.room_id in rooms_using_chain_index
- }
-
- if not state_events:
- return
-
- # We need to know the type/state_key and auth events of the events we're
- # calculating chain IDs for. We don't rely on having the full Event
- # instances as we'll potentially be pulling more events from the DB and
- # we don't need the overhead of fetching/parsing the full event JSON.
- event_to_types = {
- e.event_id: (e.type, e.state_key) for e in state_events.values()
- }
- event_to_auth_chain = {
- e.event_id: e.auth_event_ids() for e in state_events.values()
- }
- event_to_room_id = {e.event_id: e.room_id for e in state_events.values()}
-
- self._add_chain_cover_index(
- txn,
- self.db_pool,
- self.store.event_chain_id_gen,
- event_to_room_id,
- event_to_types,
- event_to_auth_chain,
- )
+ if new_event_links:
+ self._persist_chain_cover_index(txn, self.db_pool, new_event_links)
@classmethod
def _add_chain_cover_index(
@@ -583,6 +635,35 @@ class PersistEventsStore:
event_to_types: Dict[str, Tuple[str, str]],
event_to_auth_chain: Dict[str, StrCollection],
) -> None:
+ """Calculate and persist the chain cover index for the given events.
+
+ Args:
+ event_to_room_id: Event ID to the room ID of the event
+ event_to_types: Event ID to type and state_key of the event
+ event_to_auth_chain: Event ID to list of auth event IDs of the
+ event (events with no auth events can be excluded).
+ """
+
+ new_event_links = cls._calculate_chain_cover_index(
+ txn,
+ db_pool,
+ event_chain_id_gen,
+ event_to_room_id,
+ event_to_types,
+ event_to_auth_chain,
+ )
+ cls._persist_chain_cover_index(txn, db_pool, new_event_links)
+
+ @classmethod
+ def _calculate_chain_cover_index(
+ cls,
+ txn: LoggingTransaction,
+ db_pool: DatabasePool,
+ event_chain_id_gen: SequenceGenerator,
+ event_to_room_id: Dict[str, str],
+ event_to_types: Dict[str, Tuple[str, str]],
+ event_to_auth_chain: Dict[str, StrCollection],
+ ) -> Dict[str, NewEventChainLinks]:
"""Calculate the chain cover index for the given events.
Args:
@@ -590,6 +671,10 @@ class PersistEventsStore:
event_to_types: Event ID to type and state_key of the event
event_to_auth_chain: Event ID to list of auth event IDs of the
event (events with no auth events can be excluded).
+
+ Returns:
+ A mapping with any new auth chain links we need to add, keyed by
+ event ID.
"""
# Map from event ID to chain ID/sequence number.
@@ -708,11 +793,11 @@ class PersistEventsStore:
room_id = event_to_room_id.get(event_id)
if room_id:
e_type, state_key = event_to_types[event_id]
- db_pool.simple_insert_txn(
+ db_pool.simple_upsert_txn(
txn,
table="event_auth_chain_to_calculate",
+ keyvalues={"event_id": event_id},
values={
- "event_id": event_id,
"room_id": room_id,
"type": e_type,
"state_key": state_key,
@@ -724,7 +809,7 @@ class PersistEventsStore:
break
if not events_to_calc_chain_id_for:
- return
+ return {}
# Allocate chain ID/sequence numbers to each new event.
new_chain_tuples = cls._allocate_chain_ids(
@@ -739,23 +824,10 @@ class PersistEventsStore:
)
chain_map.update(new_chain_tuples)
- db_pool.simple_insert_many_txn(
- txn,
- table="event_auth_chains",
- keys=("event_id", "chain_id", "sequence_number"),
- values=[
- (event_id, c_id, seq)
- for event_id, (c_id, seq) in new_chain_tuples.items()
- ],
- )
-
- db_pool.simple_delete_many_txn(
- txn,
- table="event_auth_chain_to_calculate",
- keyvalues={},
- column="event_id",
- values=new_chain_tuples,
- )
+ to_return = {
+ event_id: NewEventChainLinks(chain_id, sequence_number)
+ for event_id, (chain_id, sequence_number) in new_chain_tuples.items()
+ }
# Now we need to calculate any new links between chains caused by
# the new events.
@@ -825,10 +897,38 @@ class PersistEventsStore:
auth_chain_id, auth_sequence_number = chain_map[auth_id]
# Step 2a, add link between the event and auth event
+ to_return[event_id].links.append((auth_chain_id, auth_sequence_number))
chain_links.add_link(
(chain_id, sequence_number), (auth_chain_id, auth_sequence_number)
)
+ return to_return
+
+ @classmethod
+ def _persist_chain_cover_index(
+ cls,
+ txn: LoggingTransaction,
+ db_pool: DatabasePool,
+ new_event_links: Dict[str, NewEventChainLinks],
+ ) -> None:
+ db_pool.simple_insert_many_txn(
+ txn,
+ table="event_auth_chains",
+ keys=("event_id", "chain_id", "sequence_number"),
+ values=[
+ (event_id, new_links.chain_id, new_links.sequence_number)
+ for event_id, new_links in new_event_links.items()
+ ],
+ )
+
+ db_pool.simple_delete_many_txn(
+ txn,
+ table="event_auth_chain_to_calculate",
+ keyvalues={},
+ column="event_id",
+ values=new_event_links,
+ )
+
db_pool.simple_insert_many_txn(
txn,
table="event_auth_chain_links",
@@ -838,7 +938,16 @@ class PersistEventsStore:
"target_chain_id",
"target_sequence_number",
),
- values=list(chain_links.get_additions()),
+ values=[
+ (
+ new_links.chain_id,
+ new_links.sequence_number,
+ target_chain_id,
+ target_sequence_number,
+ )
+ for new_links in new_event_links.values()
+ for (target_chain_id, target_sequence_number) in new_links.links
+ ],
)
@staticmethod
diff --git a/tests/storage/test_event_chain.py b/tests/storage/test_event_chain.py
index 81feb3ec29..c4e216c308 100644
--- a/tests/storage/test_event_chain.py
+++ b/tests/storage/test_event_chain.py
@@ -447,7 +447,14 @@ class EventChainStoreTestCase(HomeserverTestCase):
)
# Actually call the function that calculates the auth chain stuff.
- persist_events_store._persist_event_auth_chain_txn(txn, events)
+ new_event_links = (
+ persist_events_store.calculate_chain_cover_index_for_events_txn(
+ txn, events[0].room_id, [e for e in events if e.is_state()]
+ )
+ )
+ persist_events_store._persist_event_auth_chain_txn(
+ txn, events, new_event_links
+ )
self.get_success(
persist_events_store.db_pool.runInteraction(
diff --git a/tests/storage/test_event_federation.py b/tests/storage/test_event_federation.py
index 0a6253e22c..1832a23714 100644
--- a/tests/storage/test_event_federation.py
+++ b/tests/storage/test_event_federation.py
@@ -365,12 +365,19 @@ class EventFederationWorkerStoreTestCase(tests.unittest.HomeserverTestCase):
},
)
+ events = [
+ cast(EventBase, FakeEvent(event_id, room_id, AUTH_GRAPH[event_id]))
+ for event_id in AUTH_GRAPH
+ ]
+ new_event_links = (
+ self.persist_events.calculate_chain_cover_index_for_events_txn(
+ txn, room_id, [e for e in events if e.is_state()]
+ )
+ )
self.persist_events._persist_event_auth_chain_txn(
txn,
- [
- cast(EventBase, FakeEvent(event_id, room_id, AUTH_GRAPH[event_id]))
- for event_id in AUTH_GRAPH
- ],
+ events,
+ new_event_links,
)
self.get_success(
@@ -628,13 +635,20 @@ class EventFederationWorkerStoreTestCase(tests.unittest.HomeserverTestCase):
)
# Insert all events apart from 'B'
+ events = [
+ cast(EventBase, FakeEvent(event_id, room_id, auth_graph[event_id]))
+ for event_id in auth_graph
+ if event_id != "b"
+ ]
+ new_event_links = (
+ self.persist_events.calculate_chain_cover_index_for_events_txn(
+ txn, room_id, [e for e in events if e.is_state()]
+ )
+ )
self.persist_events._persist_event_auth_chain_txn(
txn,
- [
- cast(EventBase, FakeEvent(event_id, room_id, auth_graph[event_id]))
- for event_id in auth_graph
- if event_id != "b"
- ],
+ events,
+ new_event_links,
)
# Now we insert the event 'B' without a chain cover, by temporarily
@@ -647,9 +661,14 @@ class EventFederationWorkerStoreTestCase(tests.unittest.HomeserverTestCase):
updatevalues={"has_auth_chain_index": False},
)
+ events = [cast(EventBase, FakeEvent("b", room_id, auth_graph["b"]))]
+ new_event_links = (
+ self.persist_events.calculate_chain_cover_index_for_events_txn(
+ txn, room_id, [e for e in events if e.is_state()]
+ )
+ )
self.persist_events._persist_event_auth_chain_txn(
- txn,
- [cast(EventBase, FakeEvent("b", room_id, auth_graph["b"]))],
+ txn, events, new_event_links
)
self.store.db_pool.simple_update_txn(
From 7ef89b985d2feedb9f76e0524cdf5117bc6a96c1 Mon Sep 17 00:00:00 2001
From: Aaron Dewes
Date: Wed, 19 Jun 2024 11:58:06 +0200
Subject: [PATCH 195/503] Remove `expire_access_token` from Docker
configuration (#17198)
Co-authored-by: Andrew Morgan
---
changelog.d/17198.misc | 1 +
docker/conf/homeserver.yaml | 1 -
2 files changed, 1 insertion(+), 1 deletion(-)
create mode 100644 changelog.d/17198.misc
diff --git a/changelog.d/17198.misc b/changelog.d/17198.misc
new file mode 100644
index 0000000000..8973eb2bac
--- /dev/null
+++ b/changelog.d/17198.misc
@@ -0,0 +1 @@
+Remove unused `expire_access_token` option in the Synapse Docker config file. Contributed by @AaronDewes.
\ No newline at end of file
diff --git a/docker/conf/homeserver.yaml b/docker/conf/homeserver.yaml
index c412ba2e87..2890990705 100644
--- a/docker/conf/homeserver.yaml
+++ b/docker/conf/homeserver.yaml
@@ -176,7 +176,6 @@ app_service_config_files:
{% endif %}
macaroon_secret_key: "{{ SYNAPSE_MACAROON_SECRET_KEY }}"
-expire_access_token: False
## Signing Keys ##
From a412a5829d3c5daa876f45f82c5018b13a1a2fc4 Mon Sep 17 00:00:00 2001
From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com>
Date: Wed, 19 Jun 2024 10:58:22 +0100
Subject: [PATCH 196/503] Update the README with Element branding and a few
fixes (#17324)
Co-authored-by: Hugh Nimmo-Smith
---
README.rst | 71 ++++++++++++++++++++++++++++--------------
changelog.d/17324.misc | 1 +
2 files changed, 49 insertions(+), 23 deletions(-)
create mode 100644 changelog.d/17324.misc
diff --git a/README.rst b/README.rst
index d13dc0cb78..db9b79a237 100644
--- a/README.rst
+++ b/README.rst
@@ -1,21 +1,34 @@
-=========================================================================
-Synapse |support| |development| |documentation| |license| |pypi| |python|
-=========================================================================
+.. image:: https://github.com/element-hq/product/assets/87339233/7abf477a-5277-47f3-be44-ea44917d8ed7
+ :height: 60px
-Synapse is an open-source `Matrix `_ homeserver written and
-maintained by the Matrix.org Foundation. We began rapid development in 2014,
-reaching v1.0.0 in 2019. Development on Synapse and the Matrix protocol itself continues
-in earnest today.
+===========================================================================================================
+Element Synapse - Matrix homeserver implementation |support| |development| |documentation| |license| |pypi| |python|
+===========================================================================================================
-Briefly, Matrix is an open standard for communications on the internet, supporting
-federation, encryption and VoIP. Matrix.org has more to say about the `goals of the
-Matrix project `_, and the `formal specification
- `_ describes the technical details.
+Synapse is an open source `Matrix `_ homeserver
+implementation, written and maintained by `Element `_.
+`Matrix `_ is the open standard for
+secure and interoperable real time communications. You can directly run
+and manage the source code in this repository, available under an AGPL
+license. There is no support provided from Element unless you have a
+subscription.
+
+Subscription alternative
+------------------------
+
+Alternatively, for those that need an enterprise-ready solution, Element
+Server Suite (ESS) is `available as a subscription `_.
+ESS builds on Synapse to offer a complete Matrix-based backend including the full
+`Admin Console product `_,
+giving admins the power to easily manage an organization-wide
+deployment. It includes advanced identity management, auditing,
+moderation and data retention options as well as Long Term Support and
+SLAs. ESS can be used to support any Matrix-based frontend client.
.. contents::
-Installing and configuration
-============================
+🛠️ Installing and configuration
+===============================
The Synapse documentation describes `how to install Synapse `_. We recommend using
`Docker images `_ or `Debian packages from Matrix.org
@@ -105,8 +118,8 @@ Following this advice ensures that even if an XSS is found in Synapse, the
impact to other applications will be minimal.
-Testing a new installation
-==========================
+🧪 Testing a new installation
+============================
The easiest way to try out your new Synapse installation is by connecting to it
from a web client.
@@ -159,8 +172,20 @@ the form of::
As when logging in, you will need to specify a "Custom server". Specify your
desired ``localpart`` in the 'User name' box.
-Troubleshooting and support
-===========================
+🎯 Troubleshooting and support
+=============================
+
+🚀 Professional support
+----------------------
+
+Enterprise quality support for Synapse including SLAs is available as part of an
+`Element Server Suite (ESS) ` subscription.
+
+If you are an existing ESS subscriber then you can raise a `support request `
+and access the `knowledge base `.
+
+🤝 Community support
+-------------------
The `Admin FAQ `_
includes tips on dealing with some common problems. For more details, see
@@ -176,8 +201,8 @@ issues for support requests, only for bug reports and feature requests.
.. |docs| replace:: ``docs``
.. _docs: docs
-Identity Servers
-================
+🪪 Identity Servers
+==================
Identity servers have the job of mapping email addresses and other 3rd Party
IDs (3PIDs) to Matrix user IDs, as well as verifying the ownership of 3PIDs
@@ -206,8 +231,8 @@ an email address with your account, or send an invite to another user via their
email address.
-Development
-===========
+🛠️ Development
+==============
We welcome contributions to Synapse from the community!
The best place to get started is our
@@ -225,8 +250,8 @@ Alongside all that, join our developer community on Matrix:
`#synapse-dev:matrix.org `_, featuring real humans!
-.. |support| image:: https://img.shields.io/matrix/synapse:matrix.org?label=support&logo=matrix
- :alt: (get support on #synapse:matrix.org)
+.. |support| image:: https://img.shields.io/badge/matrix-community%20support-success
+ :alt: (get community support in #synapse:matrix.org)
:target: https://matrix.to/#/#synapse:matrix.org
.. |development| image:: https://img.shields.io/matrix/synapse-dev:matrix.org?label=development&logo=matrix
diff --git a/changelog.d/17324.misc b/changelog.d/17324.misc
new file mode 100644
index 0000000000..c0d7196ee0
--- /dev/null
+++ b/changelog.d/17324.misc
@@ -0,0 +1 @@
+Update the README with Element branding, improve headers and fix the #synapse:matrix.org support room link rendering.
\ No newline at end of file
From 9104a9f0d05fef9718ae4611b164b6dd1d85243c Mon Sep 17 00:00:00 2001
From: Alexander Fechler <141915399+afechler@users.noreply.github.com>
Date: Wed, 19 Jun 2024 12:45:48 +0200
Subject: [PATCH 197/503] Filter added to Admin-API GET /rooms (#17276)
---
changelog.d/17276.feature | 1 +
docs/admin_api/rooms.md | 4 ++
synapse/rest/admin/rooms.py | 13 ++++-
synapse/storage/databases/main/room.py | 51 ++++++++++++-----
tests/rest/admin/test_room.py | 77 ++++++++++++++++++++++++++
5 files changed, 131 insertions(+), 15 deletions(-)
create mode 100644 changelog.d/17276.feature
diff --git a/changelog.d/17276.feature b/changelog.d/17276.feature
new file mode 100644
index 0000000000..a1edfae0aa
--- /dev/null
+++ b/changelog.d/17276.feature
@@ -0,0 +1 @@
+Filter for public and empty rooms added to Admin-API [List Room API](https://element-hq.github.io/synapse/latest/admin_api/rooms.html#list-room-api).
diff --git a/docs/admin_api/rooms.md b/docs/admin_api/rooms.md
index 6935ec4a45..8e3a367e90 100644
--- a/docs/admin_api/rooms.md
+++ b/docs/admin_api/rooms.md
@@ -36,6 +36,10 @@ The following query parameters are available:
- the room's name,
- the local part of the room's canonical alias, or
- the complete (local and server part) room's id (case sensitive).
+* `public_rooms` - Optional flag to filter public rooms. If `true`, only public rooms are queried. If `false`, public rooms are excluded from
+ the query. When the flag is absent (the default), **both** public and non-public rooms are included in the search results.
+* `empty_rooms` - Optional flag to filter empty rooms. A room is empty if joined_members is zero. If `true`, only empty rooms are queried. If `false`, empty rooms are excluded from
+ the query. When the flag is absent (the default), **both** empty and non-empty rooms are included in the search results.
Defaults to no filtering.
diff --git a/synapse/rest/admin/rooms.py b/synapse/rest/admin/rooms.py
index 0d86a4e15f..01f9de9ffa 100644
--- a/synapse/rest/admin/rooms.py
+++ b/synapse/rest/admin/rooms.py
@@ -35,6 +35,7 @@ from synapse.http.servlet import (
ResolveRoomIdMixin,
RestServlet,
assert_params_in_dict,
+ parse_boolean,
parse_enum,
parse_integer,
parse_json,
@@ -242,13 +243,23 @@ class ListRoomRestServlet(RestServlet):
errcode=Codes.INVALID_PARAM,
)
+ public_rooms = parse_boolean(request, "public_rooms")
+ empty_rooms = parse_boolean(request, "empty_rooms")
+
direction = parse_enum(request, "dir", Direction, default=Direction.FORWARDS)
reverse_order = True if direction == Direction.BACKWARDS else False
# Return list of rooms according to parameters
rooms, total_rooms = await self.store.get_rooms_paginate(
- start, limit, order_by, reverse_order, search_term
+ start,
+ limit,
+ order_by,
+ reverse_order,
+ search_term,
+ public_rooms,
+ empty_rooms,
)
+
response = {
# next_token should be opaque, so return a value the client can parse
"offset": start,
diff --git a/synapse/storage/databases/main/room.py b/synapse/storage/databases/main/room.py
index b8a71c803e..d5627b1d6e 100644
--- a/synapse/storage/databases/main/room.py
+++ b/synapse/storage/databases/main/room.py
@@ -606,6 +606,8 @@ class RoomWorkerStore(CacheInvalidationWorkerStore):
order_by: str,
reverse_order: bool,
search_term: Optional[str],
+ public_rooms: Optional[bool],
+ empty_rooms: Optional[bool],
) -> Tuple[List[Dict[str, Any]], int]:
"""Function to retrieve a paginated list of rooms as json.
@@ -617,30 +619,49 @@ class RoomWorkerStore(CacheInvalidationWorkerStore):
search_term: a string to filter room names,
canonical alias and room ids by.
Room ID must match exactly. Canonical alias must match a substring of the local part.
+ public_rooms: Optional flag to filter public and non-public rooms. If true, public rooms are queried.
+ if false, public rooms are excluded from the query. When it is
+ none (the default), both public rooms and none-public-rooms are queried.
+ empty_rooms: Optional flag to filter empty and non-empty rooms.
+ A room is empty if joined_members is zero.
+ If true, empty rooms are queried.
+ if false, empty rooms are excluded from the query. When it is
+ none (the default), both empty rooms and none-empty rooms are queried.
Returns:
A list of room dicts and an integer representing the total number of
rooms that exist given this query
"""
# Filter room names by a string
- where_statement = ""
- search_pattern: List[object] = []
+ filter_ = []
+ where_args = []
if search_term:
- where_statement = """
- WHERE LOWER(state.name) LIKE ?
- OR LOWER(state.canonical_alias) LIKE ?
- OR state.room_id = ?
- """
+ filter_ = [
+ "LOWER(state.name) LIKE ? OR "
+ "LOWER(state.canonical_alias) LIKE ? OR "
+ "state.room_id = ?"
+ ]
# Our postgres db driver converts ? -> %s in SQL strings as that's the
# placeholder for postgres.
# HOWEVER, if you put a % into your SQL then everything goes wibbly.
# To get around this, we're going to surround search_term with %'s
# before giving it to the database in python instead
- search_pattern = [
- "%" + search_term.lower() + "%",
- "#%" + search_term.lower() + "%:%",
+ where_args = [
+ f"%{search_term.lower()}%",
+ f"#%{search_term.lower()}%:%",
search_term,
]
+ if public_rooms is not None:
+ filter_arg = "1" if public_rooms else "0"
+ filter_.append(f"rooms.is_public = '{filter_arg}'")
+
+ if empty_rooms is not None:
+ if empty_rooms:
+ filter_.append("curr.joined_members = 0")
+ else:
+ filter_.append("curr.joined_members <> 0")
+
+ where_clause = "WHERE " + " AND ".join(filter_) if len(filter_) > 0 else ""
# Set ordering
if RoomSortOrder(order_by) == RoomSortOrder.SIZE:
@@ -717,7 +738,7 @@ class RoomWorkerStore(CacheInvalidationWorkerStore):
LIMIT ?
OFFSET ?
""".format(
- where=where_statement,
+ where=where_clause,
order_by=order_by_column,
direction="ASC" if order_by_asc else "DESC",
)
@@ -726,10 +747,12 @@ class RoomWorkerStore(CacheInvalidationWorkerStore):
count_sql = """
SELECT count(*) FROM (
SELECT room_id FROM room_stats_state state
+ INNER JOIN room_stats_current curr USING (room_id)
+ INNER JOIN rooms USING (room_id)
{where}
) AS get_room_ids
""".format(
- where=where_statement,
+ where=where_clause,
)
def _get_rooms_paginate_txn(
@@ -737,7 +760,7 @@ class RoomWorkerStore(CacheInvalidationWorkerStore):
) -> Tuple[List[Dict[str, Any]], int]:
# Add the search term into the WHERE clause
# and execute the data query
- txn.execute(info_sql, search_pattern + [limit, start])
+ txn.execute(info_sql, where_args + [limit, start])
# Refactor room query data into a structured dictionary
rooms = []
@@ -767,7 +790,7 @@ class RoomWorkerStore(CacheInvalidationWorkerStore):
# Execute the count query
# Add the search term into the WHERE clause if present
- txn.execute(count_sql, search_pattern)
+ txn.execute(count_sql, where_args)
room_count = cast(Tuple[int], txn.fetchone())
return rooms, room_count[0]
diff --git a/tests/rest/admin/test_room.py b/tests/rest/admin/test_room.py
index 7562747260..95ed736451 100644
--- a/tests/rest/admin/test_room.py
+++ b/tests/rest/admin/test_room.py
@@ -1795,6 +1795,83 @@ class RoomTestCase(unittest.HomeserverTestCase):
self.assertEqual(room_id, channel.json_body["rooms"][0].get("room_id"))
self.assertEqual("ж", channel.json_body["rooms"][0].get("name"))
+ def test_filter_public_rooms(self) -> None:
+ self.helper.create_room_as(
+ self.admin_user, tok=self.admin_user_tok, is_public=True
+ )
+ self.helper.create_room_as(
+ self.admin_user, tok=self.admin_user_tok, is_public=True
+ )
+ self.helper.create_room_as(
+ self.admin_user, tok=self.admin_user_tok, is_public=False
+ )
+
+ response = self.make_request(
+ "GET",
+ "/_synapse/admin/v1/rooms",
+ access_token=self.admin_user_tok,
+ )
+ self.assertEqual(200, response.code, msg=response.json_body)
+ self.assertEqual(3, response.json_body["total_rooms"])
+ self.assertEqual(3, len(response.json_body["rooms"]))
+
+ response = self.make_request(
+ "GET",
+ "/_synapse/admin/v1/rooms?public_rooms=true",
+ access_token=self.admin_user_tok,
+ )
+ self.assertEqual(200, response.code, msg=response.json_body)
+ self.assertEqual(2, response.json_body["total_rooms"])
+ self.assertEqual(2, len(response.json_body["rooms"]))
+
+ response = self.make_request(
+ "GET",
+ "/_synapse/admin/v1/rooms?public_rooms=false",
+ access_token=self.admin_user_tok,
+ )
+ self.assertEqual(200, response.code, msg=response.json_body)
+ self.assertEqual(1, response.json_body["total_rooms"])
+ self.assertEqual(1, len(response.json_body["rooms"]))
+
+ def test_filter_empty_rooms(self) -> None:
+ self.helper.create_room_as(
+ self.admin_user, tok=self.admin_user_tok, is_public=True
+ )
+ self.helper.create_room_as(
+ self.admin_user, tok=self.admin_user_tok, is_public=True
+ )
+ room_id = self.helper.create_room_as(
+ self.admin_user, tok=self.admin_user_tok, is_public=False
+ )
+ self.helper.leave(room_id, self.admin_user, tok=self.admin_user_tok)
+
+ response = self.make_request(
+ "GET",
+ "/_synapse/admin/v1/rooms",
+ access_token=self.admin_user_tok,
+ )
+ self.assertEqual(200, response.code, msg=response.json_body)
+ self.assertEqual(3, response.json_body["total_rooms"])
+ self.assertEqual(3, len(response.json_body["rooms"]))
+
+ response = self.make_request(
+ "GET",
+ "/_synapse/admin/v1/rooms?empty_rooms=false",
+ access_token=self.admin_user_tok,
+ )
+ self.assertEqual(200, response.code, msg=response.json_body)
+ self.assertEqual(2, response.json_body["total_rooms"])
+ self.assertEqual(2, len(response.json_body["rooms"]))
+
+ response = self.make_request(
+ "GET",
+ "/_synapse/admin/v1/rooms?empty_rooms=true",
+ access_token=self.admin_user_tok,
+ )
+ self.assertEqual(200, response.code, msg=response.json_body)
+ self.assertEqual(1, response.json_body["total_rooms"])
+ self.assertEqual(1, len(response.json_body["rooms"]))
+
def test_single_room(self) -> None:
"""Test that a single room can be requested correctly"""
# Create two test rooms
From c99203d98c823c4bae07e144280df29ebf3ee668 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?J=C3=B6rg=20Thalheim?=
Date: Wed, 19 Jun 2024 13:03:08 +0200
Subject: [PATCH 198/503] register-new-matrix-user: add a flag to ignore
already existing users (#17304)
Co-authored-by: Andrew Morgan
---
changelog.d/17304.feature | 2 ++
debian/changelog | 2 +-
debian/register_new_matrix_user.ronn | 3 ++
synapse/_scripts/register_new_matrix_user.py | 30 ++++++++++++++++++--
4 files changed, 34 insertions(+), 3 deletions(-)
create mode 100644 changelog.d/17304.feature
diff --git a/changelog.d/17304.feature b/changelog.d/17304.feature
new file mode 100644
index 0000000000..a969d8bf58
--- /dev/null
+++ b/changelog.d/17304.feature
@@ -0,0 +1,2 @@
+`register_new_matrix_user` now supports a --exists-ok flag to allow registration of users that already exist in the database.
+This is useful for scripts that bootstrap user accounts with initial passwords.
diff --git a/debian/changelog b/debian/changelog
index 55e17bd868..731eacf20f 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -1,6 +1,6 @@
matrix-synapse-py3 (1.109.0+nmu1) UNRELEASED; urgency=medium
- * `register_new_matrix_user` now supports a --password-file flag.
+ * `register_new_matrix_user` now supports a --password-file and a --exists-ok flag.
-- Synapse Packaging team Tue, 18 Jun 2024 13:29:36 +0100
diff --git a/debian/register_new_matrix_user.ronn b/debian/register_new_matrix_user.ronn
index 963e67c004..aa305ec671 100644
--- a/debian/register_new_matrix_user.ronn
+++ b/debian/register_new_matrix_user.ronn
@@ -48,6 +48,9 @@ A sample YAML file accepted by `register_new_matrix_user` is described below:
Shared secret as defined in server config file. This is an optional
parameter as it can be also supplied via the YAML file.
+ * `--exists-ok`:
+ Do not fail if the user already exists. The user account will be not updated in this case.
+
* `server_url`:
URL of the home server. Defaults to 'https://localhost:8448'.
diff --git a/synapse/_scripts/register_new_matrix_user.py b/synapse/_scripts/register_new_matrix_user.py
index 972b35e2dc..14cb21c7fb 100644
--- a/synapse/_scripts/register_new_matrix_user.py
+++ b/synapse/_scripts/register_new_matrix_user.py
@@ -52,6 +52,7 @@ def request_registration(
user_type: Optional[str] = None,
_print: Callable[[str], None] = print,
exit: Callable[[int], None] = sys.exit,
+ exists_ok: bool = False,
) -> None:
url = "%s/_synapse/admin/v1/register" % (server_location.rstrip("/"),)
@@ -97,6 +98,10 @@ def request_registration(
r = requests.post(url, json=data)
if r.status_code != 200:
+ response = r.json()
+ if exists_ok and response["errcode"] == "M_USER_IN_USE":
+ _print("User already exists. Skipping.")
+ return
_print("ERROR! Received %d %s" % (r.status_code, r.reason))
if 400 <= r.status_code < 500:
try:
@@ -115,6 +120,7 @@ def register_new_user(
shared_secret: str,
admin: Optional[bool],
user_type: Optional[str],
+ exists_ok: bool = False,
) -> None:
if not user:
try:
@@ -154,7 +160,13 @@ def register_new_user(
admin = False
request_registration(
- user, password, server_location, shared_secret, bool(admin), user_type
+ user,
+ password,
+ server_location,
+ shared_secret,
+ bool(admin),
+ user_type,
+ exists_ok=exists_ok,
)
@@ -173,6 +185,11 @@ def main() -> None:
default=None,
help="Local part of the new user. Will prompt if omitted.",
)
+ parser.add_argument(
+ "--exists-ok",
+ action="store_true",
+ help="Do not fail if user already exists.",
+ )
password_group = parser.add_mutually_exclusive_group()
password_group.add_argument(
"-p",
@@ -192,6 +209,7 @@ def main() -> None:
default=None,
help="User type as specified in synapse.api.constants.UserTypes",
)
+
admin_group = parser.add_mutually_exclusive_group()
admin_group.add_argument(
"-a",
@@ -281,7 +299,15 @@ def main() -> None:
if args.admin or args.no_admin:
admin = args.admin
- register_new_user(args.user, password, server_url, secret, admin, args.user_type)
+ register_new_user(
+ args.user,
+ password,
+ server_url,
+ secret,
+ admin,
+ args.user_type,
+ exists_ok=args.exists_ok,
+ )
def _read_file(file_path: Any, config_path: str) -> str:
From 3239b7459c88d02be33975addcddfc39126575e7 Mon Sep 17 00:00:00 2001
From: Erik Johnston
Date: Wed, 19 Jun 2024 17:18:45 +0100
Subject: [PATCH 199/503] Register sliding sync under a different path (#17331)
As the API is slightly incompatible.
---
changelog.d/17331.misc | 1 +
synapse/rest/client/sync.py | 2 +-
tests/rest/client/test_sync.py | 4 +++-
3 files changed, 5 insertions(+), 2 deletions(-)
create mode 100644 changelog.d/17331.misc
diff --git a/changelog.d/17331.misc b/changelog.d/17331.misc
new file mode 100644
index 0000000000..79d3f33996
--- /dev/null
+++ b/changelog.d/17331.misc
@@ -0,0 +1 @@
+Change path of the experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync implementation to `/org.matrix.simplified_msc3575/sync` since our simplified API is slightly incompatible with what's in the current MSC.
diff --git a/synapse/rest/client/sync.py b/synapse/rest/client/sync.py
index 1b0ac20d94..b5ab0d8534 100644
--- a/synapse/rest/client/sync.py
+++ b/synapse/rest/client/sync.py
@@ -864,7 +864,7 @@ class SlidingSyncRestServlet(RestServlet):
"""
PATTERNS = client_patterns(
- "/org.matrix.msc3575/sync$", releases=[], v1=False, unstable=True
+ "/org.matrix.simplified_msc3575/sync$", releases=[], v1=False, unstable=True
)
def __init__(self, hs: "HomeServer"):
diff --git a/tests/rest/client/test_sync.py b/tests/rest/client/test_sync.py
index 2b06767b8a..5195659ec2 100644
--- a/tests/rest/client/test_sync.py
+++ b/tests/rest/client/test_sync.py
@@ -1228,7 +1228,9 @@ class SlidingSyncTestCase(unittest.HomeserverTestCase):
def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
self.store = hs.get_datastores().main
- self.sync_endpoint = "/_matrix/client/unstable/org.matrix.msc3575/sync"
+ self.sync_endpoint = (
+ "/_matrix/client/unstable/org.matrix.simplified_msc3575/sync"
+ )
self.store = hs.get_datastores().main
self.event_sources = hs.get_event_sources()
From 4243c1f074c919367dbbcf733df3015f6ad96549 Mon Sep 17 00:00:00 2001
From: Erik Johnston
Date: Wed, 19 Jun 2024 17:39:33 +0100
Subject: [PATCH 200/503] Revert "Handle large chain calc better (#17291)"
(#17334)
This reverts commit bdf82efea505c488953b46eb681b5a63c4e9655d (#17291)
This seems to have stopped persisting auth chains for new events, and so
is causing state res to fall back to the slow methods
---
changelog.d/17291.misc | 1 -
synapse/storage/controllers/persist_events.py | 12 -
synapse/storage/databases/main/events.py | 261 +++++-------------
tests/storage/test_event_chain.py | 9 +-
tests/storage/test_event_federation.py | 41 +--
5 files changed, 88 insertions(+), 236 deletions(-)
delete mode 100644 changelog.d/17291.misc
diff --git a/changelog.d/17291.misc b/changelog.d/17291.misc
deleted file mode 100644
index b1f89a324d..0000000000
--- a/changelog.d/17291.misc
+++ /dev/null
@@ -1 +0,0 @@
-Do not block event sending/receiving while calulating large event auth chains.
diff --git a/synapse/storage/controllers/persist_events.py b/synapse/storage/controllers/persist_events.py
index d0e015bf19..84699a2ee1 100644
--- a/synapse/storage/controllers/persist_events.py
+++ b/synapse/storage/controllers/persist_events.py
@@ -617,17 +617,6 @@ class EventsPersistenceStorageController:
room_id, chunk
)
- with Measure(self._clock, "calculate_chain_cover_index_for_events"):
- # We now calculate chain ID/sequence numbers for any state events we're
- # persisting. We ignore out of band memberships as we're not in the room
- # and won't have their auth chain (we'll fix it up later if we join the
- # room).
- #
- # See: docs/auth_chain_difference_algorithm.md
- new_event_links = await self.persist_events_store.calculate_chain_cover_index_for_events(
- room_id, [e for e, _ in chunk]
- )
-
await self.persist_events_store._persist_events_and_state_updates(
room_id,
chunk,
@@ -635,7 +624,6 @@ class EventsPersistenceStorageController:
new_forward_extremities=new_forward_extremities,
use_negative_stream_ordering=backfilled,
inhibit_local_membership_updates=backfilled,
- new_event_links=new_event_links,
)
return replaced_events
diff --git a/synapse/storage/databases/main/events.py b/synapse/storage/databases/main/events.py
index c6df13c064..66428e6c8e 100644
--- a/synapse/storage/databases/main/events.py
+++ b/synapse/storage/databases/main/events.py
@@ -34,6 +34,7 @@ from typing import (
Optional,
Set,
Tuple,
+ Union,
cast,
)
@@ -99,23 +100,6 @@ class DeltaState:
return not self.to_delete and not self.to_insert and not self.no_longer_in_room
-@attr.s(slots=True, auto_attribs=True)
-class NewEventChainLinks:
- """Information about new auth chain links that need to be added to the DB.
-
- Attributes:
- chain_id, sequence_number: the IDs corresponding to the event being
- inserted, and the starting point of the links
- links: Lists the links that need to be added, 2-tuple of the chain
- ID/sequence number of the end point of the link.
- """
-
- chain_id: int
- sequence_number: int
-
- links: List[Tuple[int, int]] = attr.Factory(list)
-
-
class PersistEventsStore:
"""Contains all the functions for writing events to the database.
@@ -164,7 +148,6 @@ class PersistEventsStore:
*,
state_delta_for_room: Optional[DeltaState],
new_forward_extremities: Optional[Set[str]],
- new_event_links: Dict[str, NewEventChainLinks],
use_negative_stream_ordering: bool = False,
inhibit_local_membership_updates: bool = False,
) -> None:
@@ -234,7 +217,6 @@ class PersistEventsStore:
inhibit_local_membership_updates=inhibit_local_membership_updates,
state_delta_for_room=state_delta_for_room,
new_forward_extremities=new_forward_extremities,
- new_event_links=new_event_links,
)
persist_event_counter.inc(len(events_and_contexts))
@@ -261,87 +243,6 @@ class PersistEventsStore:
(room_id,), frozenset(new_forward_extremities)
)
- async def calculate_chain_cover_index_for_events(
- self, room_id: str, events: Collection[EventBase]
- ) -> Dict[str, NewEventChainLinks]:
- # Filter to state events, and ensure there are no duplicates.
- state_events = []
- seen_events = set()
- for event in events:
- if not event.is_state() or event.event_id in seen_events:
- continue
-
- state_events.append(event)
- seen_events.add(event.event_id)
-
- if not state_events:
- return {}
-
- return await self.db_pool.runInteraction(
- "_calculate_chain_cover_index_for_events",
- self.calculate_chain_cover_index_for_events_txn,
- room_id,
- state_events,
- )
-
- def calculate_chain_cover_index_for_events_txn(
- self, txn: LoggingTransaction, room_id: str, state_events: Collection[EventBase]
- ) -> Dict[str, NewEventChainLinks]:
- # We now calculate chain ID/sequence numbers for any state events we're
- # persisting. We ignore out of band memberships as we're not in the room
- # and won't have their auth chain (we'll fix it up later if we join the
- # room).
- #
- # See: docs/auth_chain_difference_algorithm.md
-
- # We ignore legacy rooms that we aren't filling the chain cover index
- # for.
- row = self.db_pool.simple_select_one_txn(
- txn,
- table="rooms",
- keyvalues={"room_id": room_id},
- retcols=("room_id", "has_auth_chain_index"),
- allow_none=True,
- )
- if row is None:
- return {}
-
- # Filter out already persisted events.
- rows = self.db_pool.simple_select_many_txn(
- txn,
- table="events",
- column="event_id",
- iterable=[e.event_id for e in state_events],
- keyvalues={},
- retcols=("event_id",),
- )
- already_persisted_events = {event_id for event_id, in rows}
- state_events = [
- event
- for event in state_events
- if event.event_id in already_persisted_events
- ]
-
- if not state_events:
- return {}
-
- # We need to know the type/state_key and auth events of the events we're
- # calculating chain IDs for. We don't rely on having the full Event
- # instances as we'll potentially be pulling more events from the DB and
- # we don't need the overhead of fetching/parsing the full event JSON.
- event_to_types = {e.event_id: (e.type, e.state_key) for e in state_events}
- event_to_auth_chain = {e.event_id: e.auth_event_ids() for e in state_events}
- event_to_room_id = {e.event_id: e.room_id for e in state_events}
-
- return self._calculate_chain_cover_index(
- txn,
- self.db_pool,
- self.store.event_chain_id_gen,
- event_to_room_id,
- event_to_types,
- event_to_auth_chain,
- )
-
async def _get_events_which_are_prevs(self, event_ids: Iterable[str]) -> List[str]:
"""Filter the supplied list of event_ids to get those which are prev_events of
existing (non-outlier/rejected) events.
@@ -457,7 +358,6 @@ class PersistEventsStore:
inhibit_local_membership_updates: bool,
state_delta_for_room: Optional[DeltaState],
new_forward_extremities: Optional[Set[str]],
- new_event_links: Dict[str, NewEventChainLinks],
) -> None:
"""Insert some number of room events into the necessary database tables.
@@ -566,9 +466,7 @@ class PersistEventsStore:
# Insert into event_to_state_groups.
self._store_event_state_mappings_txn(txn, events_and_contexts)
- self._persist_event_auth_chain_txn(
- txn, [e for e, _ in events_and_contexts], new_event_links
- )
+ self._persist_event_auth_chain_txn(txn, [e for e, _ in events_and_contexts])
# _store_rejected_events_txn filters out any events which were
# rejected, and returns the filtered list.
@@ -598,7 +496,6 @@ class PersistEventsStore:
self,
txn: LoggingTransaction,
events: List[EventBase],
- new_event_links: Dict[str, NewEventChainLinks],
) -> None:
# We only care about state events, so this if there are no state events.
if not any(e.is_state() for e in events):
@@ -622,8 +519,59 @@ class PersistEventsStore:
],
)
- if new_event_links:
- self._persist_chain_cover_index(txn, self.db_pool, new_event_links)
+ # We now calculate chain ID/sequence numbers for any state events we're
+ # persisting. We ignore out of band memberships as we're not in the room
+ # and won't have their auth chain (we'll fix it up later if we join the
+ # room).
+ #
+ # See: docs/auth_chain_difference_algorithm.md
+
+ # We ignore legacy rooms that we aren't filling the chain cover index
+ # for.
+ rows = cast(
+ List[Tuple[str, Optional[Union[int, bool]]]],
+ self.db_pool.simple_select_many_txn(
+ txn,
+ table="rooms",
+ column="room_id",
+ iterable={event.room_id for event in events if event.is_state()},
+ keyvalues={},
+ retcols=("room_id", "has_auth_chain_index"),
+ ),
+ )
+ rooms_using_chain_index = {
+ room_id for room_id, has_auth_chain_index in rows if has_auth_chain_index
+ }
+
+ state_events = {
+ event.event_id: event
+ for event in events
+ if event.is_state() and event.room_id in rooms_using_chain_index
+ }
+
+ if not state_events:
+ return
+
+ # We need to know the type/state_key and auth events of the events we're
+ # calculating chain IDs for. We don't rely on having the full Event
+ # instances as we'll potentially be pulling more events from the DB and
+ # we don't need the overhead of fetching/parsing the full event JSON.
+ event_to_types = {
+ e.event_id: (e.type, e.state_key) for e in state_events.values()
+ }
+ event_to_auth_chain = {
+ e.event_id: e.auth_event_ids() for e in state_events.values()
+ }
+ event_to_room_id = {e.event_id: e.room_id for e in state_events.values()}
+
+ self._add_chain_cover_index(
+ txn,
+ self.db_pool,
+ self.store.event_chain_id_gen,
+ event_to_room_id,
+ event_to_types,
+ event_to_auth_chain,
+ )
@classmethod
def _add_chain_cover_index(
@@ -635,35 +583,6 @@ class PersistEventsStore:
event_to_types: Dict[str, Tuple[str, str]],
event_to_auth_chain: Dict[str, StrCollection],
) -> None:
- """Calculate and persist the chain cover index for the given events.
-
- Args:
- event_to_room_id: Event ID to the room ID of the event
- event_to_types: Event ID to type and state_key of the event
- event_to_auth_chain: Event ID to list of auth event IDs of the
- event (events with no auth events can be excluded).
- """
-
- new_event_links = cls._calculate_chain_cover_index(
- txn,
- db_pool,
- event_chain_id_gen,
- event_to_room_id,
- event_to_types,
- event_to_auth_chain,
- )
- cls._persist_chain_cover_index(txn, db_pool, new_event_links)
-
- @classmethod
- def _calculate_chain_cover_index(
- cls,
- txn: LoggingTransaction,
- db_pool: DatabasePool,
- event_chain_id_gen: SequenceGenerator,
- event_to_room_id: Dict[str, str],
- event_to_types: Dict[str, Tuple[str, str]],
- event_to_auth_chain: Dict[str, StrCollection],
- ) -> Dict[str, NewEventChainLinks]:
"""Calculate the chain cover index for the given events.
Args:
@@ -671,10 +590,6 @@ class PersistEventsStore:
event_to_types: Event ID to type and state_key of the event
event_to_auth_chain: Event ID to list of auth event IDs of the
event (events with no auth events can be excluded).
-
- Returns:
- A mapping with any new auth chain links we need to add, keyed by
- event ID.
"""
# Map from event ID to chain ID/sequence number.
@@ -793,11 +708,11 @@ class PersistEventsStore:
room_id = event_to_room_id.get(event_id)
if room_id:
e_type, state_key = event_to_types[event_id]
- db_pool.simple_upsert_txn(
+ db_pool.simple_insert_txn(
txn,
table="event_auth_chain_to_calculate",
- keyvalues={"event_id": event_id},
values={
+ "event_id": event_id,
"room_id": room_id,
"type": e_type,
"state_key": state_key,
@@ -809,7 +724,7 @@ class PersistEventsStore:
break
if not events_to_calc_chain_id_for:
- return {}
+ return
# Allocate chain ID/sequence numbers to each new event.
new_chain_tuples = cls._allocate_chain_ids(
@@ -824,10 +739,23 @@ class PersistEventsStore:
)
chain_map.update(new_chain_tuples)
- to_return = {
- event_id: NewEventChainLinks(chain_id, sequence_number)
- for event_id, (chain_id, sequence_number) in new_chain_tuples.items()
- }
+ db_pool.simple_insert_many_txn(
+ txn,
+ table="event_auth_chains",
+ keys=("event_id", "chain_id", "sequence_number"),
+ values=[
+ (event_id, c_id, seq)
+ for event_id, (c_id, seq) in new_chain_tuples.items()
+ ],
+ )
+
+ db_pool.simple_delete_many_txn(
+ txn,
+ table="event_auth_chain_to_calculate",
+ keyvalues={},
+ column="event_id",
+ values=new_chain_tuples,
+ )
# Now we need to calculate any new links between chains caused by
# the new events.
@@ -897,38 +825,10 @@ class PersistEventsStore:
auth_chain_id, auth_sequence_number = chain_map[auth_id]
# Step 2a, add link between the event and auth event
- to_return[event_id].links.append((auth_chain_id, auth_sequence_number))
chain_links.add_link(
(chain_id, sequence_number), (auth_chain_id, auth_sequence_number)
)
- return to_return
-
- @classmethod
- def _persist_chain_cover_index(
- cls,
- txn: LoggingTransaction,
- db_pool: DatabasePool,
- new_event_links: Dict[str, NewEventChainLinks],
- ) -> None:
- db_pool.simple_insert_many_txn(
- txn,
- table="event_auth_chains",
- keys=("event_id", "chain_id", "sequence_number"),
- values=[
- (event_id, new_links.chain_id, new_links.sequence_number)
- for event_id, new_links in new_event_links.items()
- ],
- )
-
- db_pool.simple_delete_many_txn(
- txn,
- table="event_auth_chain_to_calculate",
- keyvalues={},
- column="event_id",
- values=new_event_links,
- )
-
db_pool.simple_insert_many_txn(
txn,
table="event_auth_chain_links",
@@ -938,16 +838,7 @@ class PersistEventsStore:
"target_chain_id",
"target_sequence_number",
),
- values=[
- (
- new_links.chain_id,
- new_links.sequence_number,
- target_chain_id,
- target_sequence_number,
- )
- for new_links in new_event_links.values()
- for (target_chain_id, target_sequence_number) in new_links.links
- ],
+ values=list(chain_links.get_additions()),
)
@staticmethod
diff --git a/tests/storage/test_event_chain.py b/tests/storage/test_event_chain.py
index c4e216c308..81feb3ec29 100644
--- a/tests/storage/test_event_chain.py
+++ b/tests/storage/test_event_chain.py
@@ -447,14 +447,7 @@ class EventChainStoreTestCase(HomeserverTestCase):
)
# Actually call the function that calculates the auth chain stuff.
- new_event_links = (
- persist_events_store.calculate_chain_cover_index_for_events_txn(
- txn, events[0].room_id, [e for e in events if e.is_state()]
- )
- )
- persist_events_store._persist_event_auth_chain_txn(
- txn, events, new_event_links
- )
+ persist_events_store._persist_event_auth_chain_txn(txn, events)
self.get_success(
persist_events_store.db_pool.runInteraction(
diff --git a/tests/storage/test_event_federation.py b/tests/storage/test_event_federation.py
index 1832a23714..0a6253e22c 100644
--- a/tests/storage/test_event_federation.py
+++ b/tests/storage/test_event_federation.py
@@ -365,19 +365,12 @@ class EventFederationWorkerStoreTestCase(tests.unittest.HomeserverTestCase):
},
)
- events = [
- cast(EventBase, FakeEvent(event_id, room_id, AUTH_GRAPH[event_id]))
- for event_id in AUTH_GRAPH
- ]
- new_event_links = (
- self.persist_events.calculate_chain_cover_index_for_events_txn(
- txn, room_id, [e for e in events if e.is_state()]
- )
- )
self.persist_events._persist_event_auth_chain_txn(
txn,
- events,
- new_event_links,
+ [
+ cast(EventBase, FakeEvent(event_id, room_id, AUTH_GRAPH[event_id]))
+ for event_id in AUTH_GRAPH
+ ],
)
self.get_success(
@@ -635,20 +628,13 @@ class EventFederationWorkerStoreTestCase(tests.unittest.HomeserverTestCase):
)
# Insert all events apart from 'B'
- events = [
- cast(EventBase, FakeEvent(event_id, room_id, auth_graph[event_id]))
- for event_id in auth_graph
- if event_id != "b"
- ]
- new_event_links = (
- self.persist_events.calculate_chain_cover_index_for_events_txn(
- txn, room_id, [e for e in events if e.is_state()]
- )
- )
self.persist_events._persist_event_auth_chain_txn(
txn,
- events,
- new_event_links,
+ [
+ cast(EventBase, FakeEvent(event_id, room_id, auth_graph[event_id]))
+ for event_id in auth_graph
+ if event_id != "b"
+ ],
)
# Now we insert the event 'B' without a chain cover, by temporarily
@@ -661,14 +647,9 @@ class EventFederationWorkerStoreTestCase(tests.unittest.HomeserverTestCase):
updatevalues={"has_auth_chain_index": False},
)
- events = [cast(EventBase, FakeEvent("b", room_id, auth_graph["b"]))]
- new_event_links = (
- self.persist_events.calculate_chain_cover_index_for_events_txn(
- txn, room_id, [e for e in events if e.is_state()]
- )
- )
self.persist_events._persist_event_auth_chain_txn(
- txn, events, new_event_links
+ txn,
+ [cast(EventBase, FakeEvent("b", room_id, auth_graph["b"]))],
)
self.store.db_pool.simple_update_txn(
From 13ed84c5738c3a4b25866df64e48b9266b6507fb Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Fri, 21 Jun 2024 13:41:44 +0100
Subject: [PATCH 201/503] Bump authlib from 1.3.0 to 1.3.1 (#17343)
---
poetry.lock | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/poetry.lock b/poetry.lock
index 58981ff6e1..dc26846f3d 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -35,13 +35,13 @@ tests-no-zope = ["attrs[tests-mypy]", "cloudpickle", "hypothesis", "pympler", "p
[[package]]
name = "authlib"
-version = "1.3.0"
+version = "1.3.1"
description = "The ultimate Python library in building OAuth and OpenID Connect servers and clients."
optional = true
python-versions = ">=3.8"
files = [
- {file = "Authlib-1.3.0-py2.py3-none-any.whl", hash = "sha256:9637e4de1fb498310a56900b3e2043a206b03cb11c05422014b0302cbc814be3"},
- {file = "Authlib-1.3.0.tar.gz", hash = "sha256:959ea62a5b7b5123c5059758296122b57cd2585ae2ed1c0622c21b371ffdae06"},
+ {file = "Authlib-1.3.1-py2.py3-none-any.whl", hash = "sha256:d35800b973099bbadc49b42b256ecb80041ad56b7fe1216a362c7943c088f377"},
+ {file = "authlib-1.3.1.tar.gz", hash = "sha256:7ae843f03c06c5c0debd63c9db91f9fda64fa62a42a77419fa15fbb7e7a58917"},
]
[package.dependencies]
From f8d57ce656a7f6f3a6629cf17339ebcfbe3f2dba Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Fri, 21 Jun 2024 13:41:53 +0100
Subject: [PATCH 202/503] Bump tornado from 6.4 to 6.4.1 (#17344)
---
poetry.lock | 28 ++++++++++++++--------------
1 file changed, 14 insertions(+), 14 deletions(-)
diff --git a/poetry.lock b/poetry.lock
index dc26846f3d..d3a37944b5 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -2598,22 +2598,22 @@ files = [
[[package]]
name = "tornado"
-version = "6.4"
+version = "6.4.1"
description = "Tornado is a Python web framework and asynchronous networking library, originally developed at FriendFeed."
-optional = true
-python-versions = ">= 3.8"
+optional = false
+python-versions = ">=3.8"
files = [
- {file = "tornado-6.4-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:02ccefc7d8211e5a7f9e8bc3f9e5b0ad6262ba2fbb683a6443ecc804e5224ce0"},
- {file = "tornado-6.4-cp38-abi3-macosx_10_9_x86_64.whl", hash = "sha256:27787de946a9cffd63ce5814c33f734c627a87072ec7eed71f7fc4417bb16263"},
- {file = "tornado-6.4-cp38-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f7894c581ecdcf91666a0912f18ce5e757213999e183ebfc2c3fdbf4d5bd764e"},
- {file = "tornado-6.4-cp38-abi3-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e43bc2e5370a6a8e413e1e1cd0c91bedc5bd62a74a532371042a18ef19e10579"},
- {file = "tornado-6.4-cp38-abi3-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f0251554cdd50b4b44362f73ad5ba7126fc5b2c2895cc62b14a1c2d7ea32f212"},
- {file = "tornado-6.4-cp38-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:fd03192e287fbd0899dd8f81c6fb9cbbc69194d2074b38f384cb6fa72b80e9c2"},
- {file = "tornado-6.4-cp38-abi3-musllinux_1_1_i686.whl", hash = "sha256:88b84956273fbd73420e6d4b8d5ccbe913c65d31351b4c004ae362eba06e1f78"},
- {file = "tornado-6.4-cp38-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:71ddfc23a0e03ef2df1c1397d859868d158c8276a0603b96cf86892bff58149f"},
- {file = "tornado-6.4-cp38-abi3-win32.whl", hash = "sha256:6f8a6c77900f5ae93d8b4ae1196472d0ccc2775cc1dfdc9e7727889145c45052"},
- {file = "tornado-6.4-cp38-abi3-win_amd64.whl", hash = "sha256:10aeaa8006333433da48dec9fe417877f8bcc21f48dda8d661ae79da357b2a63"},
- {file = "tornado-6.4.tar.gz", hash = "sha256:72291fa6e6bc84e626589f1c29d90a5a6d593ef5ae68052ee2ef000dfd273dee"},
+ {file = "tornado-6.4.1-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:163b0aafc8e23d8cdc3c9dfb24c5368af84a81e3364745ccb4427669bf84aec8"},
+ {file = "tornado-6.4.1-cp38-abi3-macosx_10_9_x86_64.whl", hash = "sha256:6d5ce3437e18a2b66fbadb183c1d3364fb03f2be71299e7d10dbeeb69f4b2a14"},
+ {file = "tornado-6.4.1-cp38-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e2e20b9113cd7293f164dc46fffb13535266e713cdb87bd2d15ddb336e96cfc4"},
+ {file = "tornado-6.4.1-cp38-abi3-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8ae50a504a740365267b2a8d1a90c9fbc86b780a39170feca9bcc1787ff80842"},
+ {file = "tornado-6.4.1-cp38-abi3-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:613bf4ddf5c7a95509218b149b555621497a6cc0d46ac341b30bd9ec19eac7f3"},
+ {file = "tornado-6.4.1-cp38-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:25486eb223babe3eed4b8aecbac33b37e3dd6d776bc730ca14e1bf93888b979f"},
+ {file = "tornado-6.4.1-cp38-abi3-musllinux_1_2_i686.whl", hash = "sha256:454db8a7ecfcf2ff6042dde58404164d969b6f5d58b926da15e6b23817950fc4"},
+ {file = "tornado-6.4.1-cp38-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:a02a08cc7a9314b006f653ce40483b9b3c12cda222d6a46d4ac63bb6c9057698"},
+ {file = "tornado-6.4.1-cp38-abi3-win32.whl", hash = "sha256:d9a566c40b89757c9aa8e6f032bcdb8ca8795d7c1a9762910c722b1635c9de4d"},
+ {file = "tornado-6.4.1-cp38-abi3-win_amd64.whl", hash = "sha256:b24b8982ed444378d7f21d563f4180a2de31ced9d8d84443907a0a64da2072e7"},
+ {file = "tornado-6.4.1.tar.gz", hash = "sha256:92d3ab53183d8c50f8204a51e6f91d18a15d5ef261e84d452800d4ff6fc504e9"},
]
[[package]]
From 7c5fb13f7b0776e20eccede75827e515fdaa1146 Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Fri, 21 Jun 2024 13:42:01 +0100
Subject: [PATCH 203/503] Bump requests from 2.31.0 to 2.32.2 (#17345)
---
poetry.lock | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/poetry.lock b/poetry.lock
index d3a37944b5..0e5195bf34 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -2157,13 +2157,13 @@ rpds-py = ">=0.7.0"
[[package]]
name = "requests"
-version = "2.31.0"
+version = "2.32.2"
description = "Python HTTP for Humans."
optional = false
-python-versions = ">=3.7"
+python-versions = ">=3.8"
files = [
- {file = "requests-2.31.0-py3-none-any.whl", hash = "sha256:58cd2187c01e70e6e26505bca751777aa9f2ee0b7f4300988b709f44e013003f"},
- {file = "requests-2.31.0.tar.gz", hash = "sha256:942c5a758f98d790eaed1a29cb6eefc7ffb0d1cf7af05c3d2791656dbd6ad1e1"},
+ {file = "requests-2.32.2-py3-none-any.whl", hash = "sha256:fc06670dd0ed212426dfeb94fc1b983d917c4f9847c863f313c9dfaaffb7c23c"},
+ {file = "requests-2.32.2.tar.gz", hash = "sha256:dd951ff5ecf3e3b3aa26b40703ba77495dab41da839ae72ef3c8e5d8e2433289"},
]
[package.dependencies]
From adeedb7b7c80842665a0b7d46c9188a2c49076fb Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Fri, 21 Jun 2024 13:42:09 +0100
Subject: [PATCH 204/503] Bump urllib3 from 2.0.7 to 2.2.2 (#17346)
---
poetry.lock | 10 +++++-----
1 file changed, 5 insertions(+), 5 deletions(-)
diff --git a/poetry.lock b/poetry.lock
index 0e5195bf34..27e9fe5f4e 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -2939,18 +2939,18 @@ files = [
[[package]]
name = "urllib3"
-version = "2.0.7"
+version = "2.2.2"
description = "HTTP library with thread-safe connection pooling, file post, and more."
optional = false
-python-versions = ">=3.7"
+python-versions = ">=3.8"
files = [
- {file = "urllib3-2.0.7-py3-none-any.whl", hash = "sha256:fdb6d215c776278489906c2f8916e6e7d4f5a9b602ccbcfdf7f016fc8da0596e"},
- {file = "urllib3-2.0.7.tar.gz", hash = "sha256:c97dfde1f7bd43a71c8d2a58e369e9b2bf692d1334ea9f9cae55add7d0dd0f84"},
+ {file = "urllib3-2.2.2-py3-none-any.whl", hash = "sha256:a448b2f64d686155468037e1ace9f2d2199776e17f0a46610480d311f73e3472"},
+ {file = "urllib3-2.2.2.tar.gz", hash = "sha256:dd505485549a7a552833da5e6063639d0d177c04f23bc3864e41e5dc5f612168"},
]
[package.extras]
brotli = ["brotli (>=1.0.9)", "brotlicffi (>=0.8.0)"]
-secure = ["certifi", "cryptography (>=1.9)", "idna (>=2.0.0)", "pyopenssl (>=17.1.0)", "urllib3-secure-extra"]
+h2 = ["h2 (>=4,<5)"]
socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"]
zstd = ["zstandard (>=0.18.0)"]
From 452a59f887dd306df01359676ca8efe7d107a106 Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Mon, 24 Jun 2024 13:31:54 +0100
Subject: [PATCH 205/503] Bump sentry-sdk from 2.3.1 to 2.6.0 (#17351)
---
poetry.lock | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/poetry.lock b/poetry.lock
index 27e9fe5f4e..ebb3efcf88 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -2387,13 +2387,13 @@ doc = ["Sphinx", "sphinx-rtd-theme"]
[[package]]
name = "sentry-sdk"
-version = "2.3.1"
+version = "2.6.0"
description = "Python client for Sentry (https://sentry.io)"
optional = true
python-versions = ">=3.6"
files = [
- {file = "sentry_sdk-2.3.1-py2.py3-none-any.whl", hash = "sha256:c5aeb095ba226391d337dd42a6f9470d86c9fc236ecc71cfc7cd1942b45010c6"},
- {file = "sentry_sdk-2.3.1.tar.gz", hash = "sha256:139a71a19f5e9eb5d3623942491ce03cf8ebc14ea2e39ba3e6fe79560d8a5b1f"},
+ {file = "sentry_sdk-2.6.0-py2.py3-none-any.whl", hash = "sha256:422b91cb49378b97e7e8d0e8d5a1069df23689d45262b86f54988a7db264e874"},
+ {file = "sentry_sdk-2.6.0.tar.gz", hash = "sha256:65cc07e9c6995c5e316109f138570b32da3bd7ff8d0d0ee4aaf2628c3dd8127d"},
]
[package.dependencies]
@@ -2600,7 +2600,7 @@ files = [
name = "tornado"
version = "6.4.1"
description = "Tornado is a Python web framework and asynchronous networking library, originally developed at FriendFeed."
-optional = false
+optional = true
python-versions = ">=3.8"
files = [
{file = "tornado-6.4.1-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:163b0aafc8e23d8cdc3c9dfb24c5368af84a81e3364745ccb4427669bf84aec8"},
From 7a6186b8880a37a6f891659ea70110c2b8ad0139 Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Mon, 24 Jun 2024 13:32:05 +0100
Subject: [PATCH 206/503] Bump packaging from 24.0 to 24.1 (#17352)
---
poetry.lock | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/poetry.lock b/poetry.lock
index ebb3efcf88..e1eba225cd 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -1488,13 +1488,13 @@ tests = ["Sphinx", "doubles", "flake8", "flake8-quotes", "gevent", "mock", "pyte
[[package]]
name = "packaging"
-version = "24.0"
+version = "24.1"
description = "Core utilities for Python packages"
optional = false
-python-versions = ">=3.7"
+python-versions = ">=3.8"
files = [
- {file = "packaging-24.0-py3-none-any.whl", hash = "sha256:2ddfb553fdf02fb784c234c7ba6ccc288296ceabec964ad2eae3777778130bc5"},
- {file = "packaging-24.0.tar.gz", hash = "sha256:eb82c5e3e56209074766e6885bb04b8c38a0c015d0a30036ebe7ece34c9989e9"},
+ {file = "packaging-24.1-py3-none-any.whl", hash = "sha256:5b8f2217dbdbd2f7f384c41c628544e6d52f2d0f53c6d0c3ea61aa5d1d7ff124"},
+ {file = "packaging-24.1.tar.gz", hash = "sha256:026ed72c8ed3fcce5bf8950572258698927fd1dbda10a5e981cdf0ac37f4f002"},
]
[[package]]
From 118b734081af78e89fdb8d2212ff651a9666b343 Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Mon, 24 Jun 2024 13:32:14 +0100
Subject: [PATCH 207/503] Bump netaddr from 1.2.1 to 1.3.0 (#17353)
---
poetry.lock | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/poetry.lock b/poetry.lock
index e1eba225cd..7690ea2b26 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -1461,13 +1461,13 @@ test = ["lxml", "pytest (>=4.6)", "pytest-cov"]
[[package]]
name = "netaddr"
-version = "1.2.1"
+version = "1.3.0"
description = "A network address manipulation library for Python"
optional = false
python-versions = ">=3.7"
files = [
- {file = "netaddr-1.2.1-py3-none-any.whl", hash = "sha256:bd9e9534b0d46af328cf64f0e5a23a5a43fca292df221c85580b27394793496e"},
- {file = "netaddr-1.2.1.tar.gz", hash = "sha256:6eb8fedf0412c6d294d06885c110de945cf4d22d2b510d0404f4e06950857987"},
+ {file = "netaddr-1.3.0-py3-none-any.whl", hash = "sha256:c2c6a8ebe5554ce33b7d5b3a306b71bbb373e000bbbf2350dd5213cc56e3dbbe"},
+ {file = "netaddr-1.3.0.tar.gz", hash = "sha256:5c3c3d9895b551b763779ba7db7a03487dc1f8e3b385af819af341ae9ef6e48a"},
]
[package.extras]
From 7c2d8f1f0171b89a0e280bf18d522acd8bdf610e Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Mon, 24 Jun 2024 13:32:44 +0100
Subject: [PATCH 208/503] Bump typing-extensions from 4.11.0 to 4.12.2 (#17354)
---
poetry.lock | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/poetry.lock b/poetry.lock
index 7690ea2b26..1bae0ea388 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -2917,13 +2917,13 @@ files = [
[[package]]
name = "typing-extensions"
-version = "4.11.0"
+version = "4.12.2"
description = "Backported and Experimental Type Hints for Python 3.8+"
optional = false
python-versions = ">=3.8"
files = [
- {file = "typing_extensions-4.11.0-py3-none-any.whl", hash = "sha256:c1f94d72897edaf4ce775bb7558d5b79d8126906a14ea5ed1635921406c0387a"},
- {file = "typing_extensions-4.11.0.tar.gz", hash = "sha256:83f085bd5ca59c80295fc2a82ab5dac679cbe02b9f33f7d83af68e241bea51b0"},
+ {file = "typing_extensions-4.12.2-py3-none-any.whl", hash = "sha256:04e5ca0351e0f3f85c6853954072df659d0d13fac324d0072316b67d7794700d"},
+ {file = "typing_extensions-4.12.2.tar.gz", hash = "sha256:1a7ead55c7e559dd4dee8856e3a88b41225abfe1ce8df57b7c13915fe121ffb8"},
]
[[package]]
From 1e74b50dc63d79887168b19a9f3ad240bec96590 Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Mon, 24 Jun 2024 13:34:56 +0100
Subject: [PATCH 209/503] Bump lazy_static from 1.4.0 to 1.5.0 (#17355)
---
Cargo.lock | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/Cargo.lock b/Cargo.lock
index 7472e16291..1955c1a4e7 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -212,9 +212,9 @@ dependencies = [
[[package]]
name = "lazy_static"
-version = "1.4.0"
+version = "1.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646"
+checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe"
[[package]]
name = "libc"
From 700d2cc4a0d457642edb43bc3714d212f15d797f Mon Sep 17 00:00:00 2001
From: Denis Kasak
Date: Mon, 24 Jun 2024 15:12:14 +0200
Subject: [PATCH 210/503] Tidy up integer parsing (#17339)
The parse_integer function was previously made to reject negative values by
default in https://github.com/element-hq/synapse/pull/16920, but the
documentation stated otherwise. This fixes the documentation and also:
- Removes explicit negative=False parameters from call sites.
- Brings the negative default of parse_integer_from_args in alignment with
parse_integer.
---
changelog.d/17339.misc | 1 +
synapse/http/servlet.py | 12 +++++++-----
synapse/rest/admin/federation.py | 8 ++++----
synapse/rest/admin/media.py | 12 ++++++------
synapse/rest/admin/statistics.py | 8 ++++----
synapse/rest/admin/users.py | 4 ++--
synapse/rest/client/room.py | 11 +----------
synapse/streams/config.py | 3 ---
8 files changed, 25 insertions(+), 34 deletions(-)
create mode 100644 changelog.d/17339.misc
diff --git a/changelog.d/17339.misc b/changelog.d/17339.misc
new file mode 100644
index 0000000000..1d7cb96c8b
--- /dev/null
+++ b/changelog.d/17339.misc
@@ -0,0 +1 @@
+Tidy up `parse_integer` docs and call sites to reflect the fact that they require non-negative integers by default, and bring `parse_integer_from_args` default in alignment. Contributed by Denis Kasak (@dkasak).
diff --git a/synapse/http/servlet.py b/synapse/http/servlet.py
index ab12951da8..08b8ff7afd 100644
--- a/synapse/http/servlet.py
+++ b/synapse/http/servlet.py
@@ -119,14 +119,15 @@ def parse_integer(
default: value to use if the parameter is absent, defaults to None.
required: whether to raise a 400 SynapseError if the parameter is absent,
defaults to False.
- negative: whether to allow negative integers, defaults to True.
+ negative: whether to allow negative integers, defaults to False (disallowing
+ negatives).
Returns:
An int value or the default.
Raises:
SynapseError: if the parameter is absent and required, if the
parameter is present and not an integer, or if the
- parameter is illegitimate negative.
+ parameter is illegitimately negative.
"""
args: Mapping[bytes, Sequence[bytes]] = request.args # type: ignore
return parse_integer_from_args(args, name, default, required, negative)
@@ -164,7 +165,7 @@ def parse_integer_from_args(
name: str,
default: Optional[int] = None,
required: bool = False,
- negative: bool = True,
+ negative: bool = False,
) -> Optional[int]:
"""Parse an integer parameter from the request string
@@ -174,7 +175,8 @@ def parse_integer_from_args(
default: value to use if the parameter is absent, defaults to None.
required: whether to raise a 400 SynapseError if the parameter is absent,
defaults to False.
- negative: whether to allow negative integers, defaults to True.
+ negative: whether to allow negative integers, defaults to False (disallowing
+ negatives).
Returns:
An int value or the default.
@@ -182,7 +184,7 @@ def parse_integer_from_args(
Raises:
SynapseError: if the parameter is absent and required, if the
parameter is present and not an integer, or if the
- parameter is illegitimate negative.
+ parameter is illegitimately negative.
"""
name_bytes = name.encode("ascii")
diff --git a/synapse/rest/admin/federation.py b/synapse/rest/admin/federation.py
index 14ab4644cb..d85a04b825 100644
--- a/synapse/rest/admin/federation.py
+++ b/synapse/rest/admin/federation.py
@@ -61,8 +61,8 @@ class ListDestinationsRestServlet(RestServlet):
async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
await assert_requester_is_admin(self._auth, request)
- start = parse_integer(request, "from", default=0, negative=False)
- limit = parse_integer(request, "limit", default=100, negative=False)
+ start = parse_integer(request, "from", default=0)
+ limit = parse_integer(request, "limit", default=100)
destination = parse_string(request, "destination")
@@ -181,8 +181,8 @@ class DestinationMembershipRestServlet(RestServlet):
if not await self._store.is_destination_known(destination):
raise NotFoundError("Unknown destination")
- start = parse_integer(request, "from", default=0, negative=False)
- limit = parse_integer(request, "limit", default=100, negative=False)
+ start = parse_integer(request, "from", default=0)
+ limit = parse_integer(request, "limit", default=100)
direction = parse_enum(request, "dir", Direction, default=Direction.FORWARDS)
diff --git a/synapse/rest/admin/media.py b/synapse/rest/admin/media.py
index a05b7252ec..ee6a681285 100644
--- a/synapse/rest/admin/media.py
+++ b/synapse/rest/admin/media.py
@@ -311,8 +311,8 @@ class DeleteMediaByDateSize(RestServlet):
) -> Tuple[int, JsonDict]:
await assert_requester_is_admin(self.auth, request)
- before_ts = parse_integer(request, "before_ts", required=True, negative=False)
- size_gt = parse_integer(request, "size_gt", default=0, negative=False)
+ before_ts = parse_integer(request, "before_ts", required=True)
+ size_gt = parse_integer(request, "size_gt", default=0)
keep_profiles = parse_boolean(request, "keep_profiles", default=True)
if before_ts < 30000000000: # Dec 1970 in milliseconds, Aug 2920 in seconds
@@ -377,8 +377,8 @@ class UserMediaRestServlet(RestServlet):
if user is None:
raise NotFoundError("Unknown user")
- start = parse_integer(request, "from", default=0, negative=False)
- limit = parse_integer(request, "limit", default=100, negative=False)
+ start = parse_integer(request, "from", default=0)
+ limit = parse_integer(request, "limit", default=100)
# If neither `order_by` nor `dir` is set, set the default order
# to newest media is on top for backward compatibility.
@@ -421,8 +421,8 @@ class UserMediaRestServlet(RestServlet):
if user is None:
raise NotFoundError("Unknown user")
- start = parse_integer(request, "from", default=0, negative=False)
- limit = parse_integer(request, "limit", default=100, negative=False)
+ start = parse_integer(request, "from", default=0)
+ limit = parse_integer(request, "limit", default=100)
# If neither `order_by` nor `dir` is set, set the default order
# to newest media is on top for backward compatibility.
diff --git a/synapse/rest/admin/statistics.py b/synapse/rest/admin/statistics.py
index dc27a41dd9..0adc5b7005 100644
--- a/synapse/rest/admin/statistics.py
+++ b/synapse/rest/admin/statistics.py
@@ -63,10 +63,10 @@ class UserMediaStatisticsRestServlet(RestServlet):
),
)
- start = parse_integer(request, "from", default=0, negative=False)
- limit = parse_integer(request, "limit", default=100, negative=False)
- from_ts = parse_integer(request, "from_ts", default=0, negative=False)
- until_ts = parse_integer(request, "until_ts", negative=False)
+ start = parse_integer(request, "from", default=0)
+ limit = parse_integer(request, "limit", default=100)
+ from_ts = parse_integer(request, "from_ts", default=0)
+ until_ts = parse_integer(request, "until_ts")
if until_ts is not None:
if until_ts <= from_ts:
diff --git a/synapse/rest/admin/users.py b/synapse/rest/admin/users.py
index 5bf12c4979..f7cb9e02cc 100644
--- a/synapse/rest/admin/users.py
+++ b/synapse/rest/admin/users.py
@@ -90,8 +90,8 @@ class UsersRestServletV2(RestServlet):
async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
await assert_requester_is_admin(self.auth, request)
- start = parse_integer(request, "from", default=0, negative=False)
- limit = parse_integer(request, "limit", default=100, negative=False)
+ start = parse_integer(request, "from", default=0)
+ limit = parse_integer(request, "limit", default=100)
user_id = parse_string(request, "user_id")
name = parse_string(request, "name", encoding="utf-8")
diff --git a/synapse/rest/client/room.py b/synapse/rest/client/room.py
index c98241f6ce..bd65cf4b83 100644
--- a/synapse/rest/client/room.py
+++ b/synapse/rest/client/room.py
@@ -510,7 +510,7 @@ class PublicRoomListRestServlet(RestServlet):
if server:
raise e
- limit: Optional[int] = parse_integer(request, "limit", 0, negative=False)
+ limit: Optional[int] = parse_integer(request, "limit", 0)
since_token = parse_string(request, "since")
if limit == 0:
@@ -1430,16 +1430,7 @@ class RoomHierarchyRestServlet(RestServlet):
requester = await self._auth.get_user_by_req(request, allow_guest=True)
max_depth = parse_integer(request, "max_depth")
- if max_depth is not None and max_depth < 0:
- raise SynapseError(
- 400, "'max_depth' must be a non-negative integer", Codes.BAD_JSON
- )
-
limit = parse_integer(request, "limit")
- if limit is not None and limit <= 0:
- raise SynapseError(
- 400, "'limit' must be a positive integer", Codes.BAD_JSON
- )
return 200, await self._room_summary_handler.get_room_hierarchy(
requester,
diff --git a/synapse/streams/config.py b/synapse/streams/config.py
index eeafe889de..9fee5bfb92 100644
--- a/synapse/streams/config.py
+++ b/synapse/streams/config.py
@@ -75,9 +75,6 @@ class PaginationConfig:
raise SynapseError(400, "'to' parameter is invalid")
limit = parse_integer(request, "limit", default=default_limit)
- if limit < 0:
- raise SynapseError(400, "Limit must be 0 or above")
-
limit = min(limit, MAX_LIMIT)
try:
From cf711ac03cd88b70568b3ac9df4aed4de5b33523 Mon Sep 17 00:00:00 2001
From: Erik Johnston
Date: Mon, 24 Jun 2024 14:15:13 +0100
Subject: [PATCH 211/503] Reduce device lists replication traffic. (#17333)
Reduce the replication traffic of device lists, by not sending every
destination that needs to be sent the device list update over
replication. Instead a "hosts to send to have been calculated"
notification over replication, and then federation senders read the
destinations from the DB.
For non federation senders this should heavily reduce the impact of a
user in many large rooms changing a device.
---
changelog.d/17333.misc | 1 +
synapse/replication/tcp/client.py | 19 ++--
synapse/replication/tcp/streams/_base.py | 12 ++-
synapse/storage/databases/main/devices.py | 93 ++++++++++++-------
.../storage/databases/main/end_to_end_keys.py | 4 +-
tests/storage/test_devices.py | 8 ++
6 files changed, 89 insertions(+), 48 deletions(-)
create mode 100644 changelog.d/17333.misc
diff --git a/changelog.d/17333.misc b/changelog.d/17333.misc
new file mode 100644
index 0000000000..d3ef0b3777
--- /dev/null
+++ b/changelog.d/17333.misc
@@ -0,0 +1 @@
+Handle device lists notifications for large accounts more efficiently in worker mode.
diff --git a/synapse/replication/tcp/client.py b/synapse/replication/tcp/client.py
index 2d6d49eed7..3dddbb70b4 100644
--- a/synapse/replication/tcp/client.py
+++ b/synapse/replication/tcp/client.py
@@ -114,13 +114,19 @@ class ReplicationDataHandler:
"""
all_room_ids: Set[str] = set()
if stream_name == DeviceListsStream.NAME:
- if any(row.entity.startswith("@") and not row.is_signature for row in rows):
+ if any(not row.is_signature and not row.hosts_calculated for row in rows):
prev_token = self.store.get_device_stream_token()
all_room_ids = await self.store.get_all_device_list_changes(
prev_token, token
)
self.store.device_lists_in_rooms_have_changed(all_room_ids, token)
+ # If we're sending federation we need to update the device lists
+ # outbound pokes stream change cache with updated hosts.
+ if self.send_handler and any(row.hosts_calculated for row in rows):
+ hosts = await self.store.get_destinations_for_device(token)
+ self.store.device_lists_outbound_pokes_have_changed(hosts, token)
+
self.store.process_replication_rows(stream_name, instance_name, token, rows)
# NOTE: this must be called after process_replication_rows to ensure any
# cache invalidations are first handled before any stream ID advances.
@@ -433,12 +439,11 @@ class FederationSenderHandler:
# The entities are either user IDs (starting with '@') whose devices
# have changed, or remote servers that we need to tell about
# changes.
- hosts = {
- row.entity
- for row in rows
- if not row.entity.startswith("@") and not row.is_signature
- }
- await self.federation_sender.send_device_messages(hosts, immediate=False)
+ if any(row.hosts_calculated for row in rows):
+ hosts = await self.store.get_destinations_for_device(token)
+ await self.federation_sender.send_device_messages(
+ hosts, immediate=False
+ )
elif stream_name == ToDeviceStream.NAME:
# The to_device stream includes stuff to be pushed to both local
diff --git a/synapse/replication/tcp/streams/_base.py b/synapse/replication/tcp/streams/_base.py
index 661206c841..d021904de7 100644
--- a/synapse/replication/tcp/streams/_base.py
+++ b/synapse/replication/tcp/streams/_base.py
@@ -549,10 +549,14 @@ class DeviceListsStream(_StreamFromIdGen):
@attr.s(slots=True, frozen=True, auto_attribs=True)
class DeviceListsStreamRow:
- entity: str
+ user_id: str
# Indicates that a user has signed their own device with their user-signing key
is_signature: bool
+ # Indicates if this is a notification that we've calculated the hosts we
+ # need to send the update to.
+ hosts_calculated: bool
+
NAME = "device_lists"
ROW_TYPE = DeviceListsStreamRow
@@ -594,13 +598,13 @@ class DeviceListsStream(_StreamFromIdGen):
upper_limit_token = min(upper_limit_token, signatures_to_token)
device_updates = [
- (stream_id, (entity, False))
- for stream_id, (entity,) in device_updates
+ (stream_id, (entity, False, hosts))
+ for stream_id, (entity, hosts) in device_updates
if stream_id <= upper_limit_token
]
signatures_updates = [
- (stream_id, (entity, True))
+ (stream_id, (entity, True, False))
for stream_id, (entity,) in signatures_updates
if stream_id <= upper_limit_token
]
diff --git a/synapse/storage/databases/main/devices.py b/synapse/storage/databases/main/devices.py
index 40187496e2..5eeca6165d 100644
--- a/synapse/storage/databases/main/devices.py
+++ b/synapse/storage/databases/main/devices.py
@@ -164,22 +164,24 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore):
prefilled_cache=user_signature_stream_prefill,
)
- (
- device_list_federation_prefill,
- device_list_federation_list_id,
- ) = self.db_pool.get_cache_dict(
- db_conn,
- "device_lists_outbound_pokes",
- entity_column="destination",
- stream_column="stream_id",
- max_value=device_list_max,
- limit=10000,
- )
- self._device_list_federation_stream_cache = StreamChangeCache(
- "DeviceListFederationStreamChangeCache",
- device_list_federation_list_id,
- prefilled_cache=device_list_federation_prefill,
- )
+ self._device_list_federation_stream_cache = None
+ if hs.should_send_federation():
+ (
+ device_list_federation_prefill,
+ device_list_federation_list_id,
+ ) = self.db_pool.get_cache_dict(
+ db_conn,
+ "device_lists_outbound_pokes",
+ entity_column="destination",
+ stream_column="stream_id",
+ max_value=device_list_max,
+ limit=10000,
+ )
+ self._device_list_federation_stream_cache = StreamChangeCache(
+ "DeviceListFederationStreamChangeCache",
+ device_list_federation_list_id,
+ prefilled_cache=device_list_federation_prefill,
+ )
if hs.config.worker.run_background_tasks:
self._clock.looping_call(
@@ -207,23 +209,30 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore):
) -> None:
for row in rows:
if row.is_signature:
- self._user_signature_stream_cache.entity_has_changed(row.entity, token)
+ self._user_signature_stream_cache.entity_has_changed(row.user_id, token)
continue
# The entities are either user IDs (starting with '@') whose devices
# have changed, or remote servers that we need to tell about
# changes.
- if row.entity.startswith("@"):
- self._device_list_stream_cache.entity_has_changed(row.entity, token)
- self.get_cached_devices_for_user.invalidate((row.entity,))
- self._get_cached_user_device.invalidate((row.entity,))
- self.get_device_list_last_stream_id_for_remote.invalidate((row.entity,))
-
- else:
- self._device_list_federation_stream_cache.entity_has_changed(
- row.entity, token
+ if not row.hosts_calculated:
+ self._device_list_stream_cache.entity_has_changed(row.user_id, token)
+ self.get_cached_devices_for_user.invalidate((row.user_id,))
+ self._get_cached_user_device.invalidate((row.user_id,))
+ self.get_device_list_last_stream_id_for_remote.invalidate(
+ (row.user_id,)
)
+ def device_lists_outbound_pokes_have_changed(
+ self, destinations: StrCollection, token: int
+ ) -> None:
+ assert self._device_list_federation_stream_cache is not None
+
+ for destination in destinations:
+ self._device_list_federation_stream_cache.entity_has_changed(
+ destination, token
+ )
+
def device_lists_in_rooms_have_changed(
self, room_ids: StrCollection, token: int
) -> None:
@@ -363,6 +372,11 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore):
EDU contents.
"""
now_stream_id = self.get_device_stream_token()
+ if from_stream_id == now_stream_id:
+ return now_stream_id, []
+
+ if self._device_list_federation_stream_cache is None:
+ raise Exception("Func can only be used on federation senders")
has_changed = self._device_list_federation_stream_cache.has_entity_changed(
destination, int(from_stream_id)
@@ -1018,10 +1032,10 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore):
# This query Does The Right Thing where it'll correctly apply the
# bounds to the inner queries.
sql = """
- SELECT stream_id, entity FROM (
- SELECT stream_id, user_id AS entity FROM device_lists_stream
+ SELECT stream_id, user_id, hosts FROM (
+ SELECT stream_id, user_id, false AS hosts FROM device_lists_stream
UNION ALL
- SELECT stream_id, destination AS entity FROM device_lists_outbound_pokes
+ SELECT DISTINCT stream_id, user_id, true AS hosts FROM device_lists_outbound_pokes
) AS e
WHERE ? < stream_id AND stream_id <= ?
ORDER BY stream_id ASC
@@ -1577,6 +1591,14 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore):
get_device_list_changes_in_room_txn,
)
+ async def get_destinations_for_device(self, stream_id: int) -> StrCollection:
+ return await self.db_pool.simple_select_onecol(
+ table="device_lists_outbound_pokes",
+ keyvalues={"stream_id": stream_id},
+ retcol="destination",
+ desc="get_destinations_for_device",
+ )
+
class DeviceBackgroundUpdateStore(SQLBaseStore):
def __init__(
@@ -2112,12 +2134,13 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore):
stream_ids: List[int],
context: Optional[Dict[str, str]],
) -> None:
- for host in hosts:
- txn.call_after(
- self._device_list_federation_stream_cache.entity_has_changed,
- host,
- stream_ids[-1],
- )
+ if self._device_list_federation_stream_cache:
+ for host in hosts:
+ txn.call_after(
+ self._device_list_federation_stream_cache.entity_has_changed,
+ host,
+ stream_ids[-1],
+ )
now = self._clock.time_msec()
stream_id_iterator = iter(stream_ids)
diff --git a/synapse/storage/databases/main/end_to_end_keys.py b/synapse/storage/databases/main/end_to_end_keys.py
index 38d8785faa..9e6c9561ae 100644
--- a/synapse/storage/databases/main/end_to_end_keys.py
+++ b/synapse/storage/databases/main/end_to_end_keys.py
@@ -123,9 +123,9 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker
if stream_name == DeviceListsStream.NAME:
for row in rows:
assert isinstance(row, DeviceListsStream.DeviceListsStreamRow)
- if row.entity.startswith("@"):
+ if not row.hosts_calculated:
self._get_e2e_device_keys_for_federation_query_inner.invalidate(
- (row.entity,)
+ (row.user_id,)
)
super().process_replication_rows(stream_name, instance_name, token, rows)
diff --git a/tests/storage/test_devices.py b/tests/storage/test_devices.py
index 7f975d04ff..ba01b038ab 100644
--- a/tests/storage/test_devices.py
+++ b/tests/storage/test_devices.py
@@ -36,6 +36,14 @@ class DeviceStoreTestCase(HomeserverTestCase):
def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
self.store = hs.get_datastores().main
+ def default_config(self) -> JsonDict:
+ config = super().default_config()
+
+ # We 'enable' federation otherwise `get_device_updates_by_remote` will
+ # throw an exception.
+ config["federation_sender_instances"] = ["master"]
+ return config
+
def add_device_change(self, user_id: str, device_ids: List[str], host: str) -> None:
"""Add a device list change for the given device to
`device_lists_outbound_pokes` table.
From 7a11c0ac4fd3bab42d6edb17593c9d7ed8371001 Mon Sep 17 00:00:00 2001
From: Shay
Date: Mon, 24 Jun 2024 06:21:22 -0700
Subject: [PATCH 212/503] Add support for MSC3823 - Account Suspension Part 2
(#17255)
---
changelog.d/17255.feature | 1 +
synapse/config/experimental.py | 4 ++
synapse/handlers/message.py | 11 ++++
synapse/rest/admin/__init__.py | 3 +
synapse/rest/admin/users.py | 39 ++++++++++++
synapse/rest/client/profile.py | 26 ++++++++
synapse/rest/client/room.py | 14 +++++
tests/rest/admin/test_user.py | 84 +++++++++++++++++++++++++
tests/rest/client/test_rooms.py | 105 ++++++++++++++++++++++++++++++++
9 files changed, 287 insertions(+)
create mode 100644 changelog.d/17255.feature
diff --git a/changelog.d/17255.feature b/changelog.d/17255.feature
new file mode 100644
index 0000000000..4093de1146
--- /dev/null
+++ b/changelog.d/17255.feature
@@ -0,0 +1 @@
+Add support for [MSC823](https://github.com/matrix-org/matrix-spec-proposals/pull/3823) - Account suspension.
\ No newline at end of file
diff --git a/synapse/config/experimental.py b/synapse/config/experimental.py
index 23e96da6a3..1b72727b75 100644
--- a/synapse/config/experimental.py
+++ b/synapse/config/experimental.py
@@ -433,6 +433,10 @@ class ExperimentalConfig(Config):
("experimental", "msc4108_delegation_endpoint"),
)
+ self.msc3823_account_suspension = experimental.get(
+ "msc3823_account_suspension", False
+ )
+
self.msc3916_authenticated_media_enabled = experimental.get(
"msc3916_authenticated_media_enabled", False
)
diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py
index 16d01efc67..5aa48230ec 100644
--- a/synapse/handlers/message.py
+++ b/synapse/handlers/message.py
@@ -642,6 +642,17 @@ class EventCreationHandler:
"""
await self.auth_blocking.check_auth_blocking(requester=requester)
+ if event_dict["type"] == EventTypes.Message:
+ requester_suspended = await self.store.get_user_suspended_status(
+ requester.user.to_string()
+ )
+ if requester_suspended:
+ raise SynapseError(
+ 403,
+ "Sending messages while account is suspended is not allowed.",
+ Codes.USER_ACCOUNT_SUSPENDED,
+ )
+
if event_dict["type"] == EventTypes.Create and event_dict["state_key"] == "":
room_version_id = event_dict["content"]["room_version"]
maybe_room_version_obj = KNOWN_ROOM_VERSIONS.get(room_version_id)
diff --git a/synapse/rest/admin/__init__.py b/synapse/rest/admin/__init__.py
index 6da1d79168..cdaee17451 100644
--- a/synapse/rest/admin/__init__.py
+++ b/synapse/rest/admin/__init__.py
@@ -101,6 +101,7 @@ from synapse.rest.admin.users import (
ResetPasswordRestServlet,
SearchUsersRestServlet,
ShadowBanRestServlet,
+ SuspendAccountRestServlet,
UserAdminServlet,
UserByExternalId,
UserByThreePid,
@@ -327,6 +328,8 @@ def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
BackgroundUpdateRestServlet(hs).register(http_server)
BackgroundUpdateStartJobRestServlet(hs).register(http_server)
ExperimentalFeaturesRestServlet(hs).register(http_server)
+ if hs.config.experimental.msc3823_account_suspension:
+ SuspendAccountRestServlet(hs).register(http_server)
def register_servlets_for_client_rest_resource(
diff --git a/synapse/rest/admin/users.py b/synapse/rest/admin/users.py
index f7cb9e02cc..ad515bd5a3 100644
--- a/synapse/rest/admin/users.py
+++ b/synapse/rest/admin/users.py
@@ -27,11 +27,13 @@ from typing import TYPE_CHECKING, Dict, List, Optional, Tuple, Union
import attr
+from synapse._pydantic_compat import HAS_PYDANTIC_V2
from synapse.api.constants import Direction, UserTypes
from synapse.api.errors import Codes, NotFoundError, SynapseError
from synapse.http.servlet import (
RestServlet,
assert_params_in_dict,
+ parse_and_validate_json_object_from_request,
parse_boolean,
parse_enum,
parse_integer,
@@ -49,10 +51,17 @@ from synapse.rest.client._base import client_patterns
from synapse.storage.databases.main.registration import ExternalIDReuseException
from synapse.storage.databases.main.stats import UserSortOrder
from synapse.types import JsonDict, JsonMapping, UserID
+from synapse.types.rest import RequestBodyModel
if TYPE_CHECKING:
from synapse.server import HomeServer
+if TYPE_CHECKING or HAS_PYDANTIC_V2:
+ from pydantic.v1 import StrictBool
+else:
+ from pydantic import StrictBool
+
+
logger = logging.getLogger(__name__)
@@ -732,6 +741,36 @@ class DeactivateAccountRestServlet(RestServlet):
return HTTPStatus.OK, {"id_server_unbind_result": id_server_unbind_result}
+class SuspendAccountRestServlet(RestServlet):
+ PATTERNS = admin_patterns("/suspend/(?P[^/]*)$")
+
+ def __init__(self, hs: "HomeServer"):
+ self.auth = hs.get_auth()
+ self.is_mine = hs.is_mine
+ self.store = hs.get_datastores().main
+
+ class PutBody(RequestBodyModel):
+ suspend: StrictBool
+
+ async def on_PUT(
+ self, request: SynapseRequest, target_user_id: str
+ ) -> Tuple[int, JsonDict]:
+ requester = await self.auth.get_user_by_req(request)
+ await assert_user_is_admin(self.auth, requester)
+
+ if not self.is_mine(UserID.from_string(target_user_id)):
+ raise SynapseError(HTTPStatus.BAD_REQUEST, "Can only suspend local users")
+
+ if not await self.store.get_user_by_id(target_user_id):
+ raise NotFoundError("User not found")
+
+ body = parse_and_validate_json_object_from_request(request, self.PutBody)
+ suspend = body.suspend
+ await self.store.set_user_suspended_status(target_user_id, suspend)
+
+ return HTTPStatus.OK, {f"user_{target_user_id}_suspended": suspend}
+
+
class AccountValidityRenewServlet(RestServlet):
PATTERNS = admin_patterns("/account_validity/validity$")
diff --git a/synapse/rest/client/profile.py b/synapse/rest/client/profile.py
index 0323f6afa1..c1a80c5c3d 100644
--- a/synapse/rest/client/profile.py
+++ b/synapse/rest/client/profile.py
@@ -108,6 +108,19 @@ class ProfileDisplaynameRestServlet(RestServlet):
propagate = _read_propagate(self.hs, request)
+ requester_suspended = (
+ await self.hs.get_datastores().main.get_user_suspended_status(
+ requester.user.to_string()
+ )
+ )
+
+ if requester_suspended:
+ raise SynapseError(
+ 403,
+ "Updating displayname while account is suspended is not allowed.",
+ Codes.USER_ACCOUNT_SUSPENDED,
+ )
+
await self.profile_handler.set_displayname(
user, requester, new_name, is_admin, propagate=propagate
)
@@ -167,6 +180,19 @@ class ProfileAvatarURLRestServlet(RestServlet):
propagate = _read_propagate(self.hs, request)
+ requester_suspended = (
+ await self.hs.get_datastores().main.get_user_suspended_status(
+ requester.user.to_string()
+ )
+ )
+
+ if requester_suspended:
+ raise SynapseError(
+ 403,
+ "Updating avatar URL while account is suspended is not allowed.",
+ Codes.USER_ACCOUNT_SUSPENDED,
+ )
+
await self.profile_handler.set_avatar_url(
user, requester, new_avatar_url, is_admin, propagate=propagate
)
diff --git a/synapse/rest/client/room.py b/synapse/rest/client/room.py
index bd65cf4b83..903c74f6d8 100644
--- a/synapse/rest/client/room.py
+++ b/synapse/rest/client/room.py
@@ -1120,6 +1120,20 @@ class RoomRedactEventRestServlet(TransactionRestServlet):
) -> Tuple[int, JsonDict]:
content = parse_json_object_from_request(request)
+ requester_suspended = await self._store.get_user_suspended_status(
+ requester.user.to_string()
+ )
+
+ if requester_suspended:
+ event = await self._store.get_event(event_id, allow_none=True)
+ if event:
+ if event.sender != requester.user.to_string():
+ raise SynapseError(
+ 403,
+ "You can only redact your own events while account is suspended.",
+ Codes.USER_ACCOUNT_SUSPENDED,
+ )
+
# Ensure the redacts property in the content matches the one provided in
# the URL.
room_version = await self._store.get_room_version(room_id)
diff --git a/tests/rest/admin/test_user.py b/tests/rest/admin/test_user.py
index c5da1e9686..16bb4349f5 100644
--- a/tests/rest/admin/test_user.py
+++ b/tests/rest/admin/test_user.py
@@ -37,6 +37,7 @@ from synapse.api.constants import ApprovalNoticeMedium, LoginType, UserTypes
from synapse.api.errors import Codes, HttpResponseException, ResourceLimitError
from synapse.api.room_versions import RoomVersions
from synapse.media.filepath import MediaFilePaths
+from synapse.rest import admin
from synapse.rest.client import (
devices,
login,
@@ -5005,3 +5006,86 @@ class AllowCrossSigningReplacementTestCase(unittest.HomeserverTestCase):
)
assert timestamp is not None
self.assertGreater(timestamp, self.clock.time_msec())
+
+
+class UserSuspensionTestCase(unittest.HomeserverTestCase):
+ servlets = [
+ synapse.rest.admin.register_servlets,
+ login.register_servlets,
+ admin.register_servlets,
+ ]
+
+ def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
+ self.admin = self.register_user("thomas", "hackme", True)
+ self.admin_tok = self.login("thomas", "hackme")
+
+ self.bad_user = self.register_user("teresa", "hackme")
+ self.bad_user_tok = self.login("teresa", "hackme")
+
+ self.store = hs.get_datastores().main
+
+ @override_config({"experimental_features": {"msc3823_account_suspension": True}})
+ def test_suspend_user(self) -> None:
+ # test that suspending user works
+ channel = self.make_request(
+ "PUT",
+ f"/_synapse/admin/v1/suspend/{self.bad_user}",
+ {"suspend": True},
+ access_token=self.admin_tok,
+ )
+ self.assertEqual(channel.code, 200)
+ self.assertEqual(channel.json_body, {f"user_{self.bad_user}_suspended": True})
+
+ res = self.get_success(self.store.get_user_suspended_status(self.bad_user))
+ self.assertEqual(True, res)
+
+ # test that un-suspending user works
+ channel2 = self.make_request(
+ "PUT",
+ f"/_synapse/admin/v1/suspend/{self.bad_user}",
+ {"suspend": False},
+ access_token=self.admin_tok,
+ )
+ self.assertEqual(channel2.code, 200)
+ self.assertEqual(channel2.json_body, {f"user_{self.bad_user}_suspended": False})
+
+ res2 = self.get_success(self.store.get_user_suspended_status(self.bad_user))
+ self.assertEqual(False, res2)
+
+ # test that trying to un-suspend user who isn't suspended doesn't cause problems
+ channel3 = self.make_request(
+ "PUT",
+ f"/_synapse/admin/v1/suspend/{self.bad_user}",
+ {"suspend": False},
+ access_token=self.admin_tok,
+ )
+ self.assertEqual(channel3.code, 200)
+ self.assertEqual(channel3.json_body, {f"user_{self.bad_user}_suspended": False})
+
+ res3 = self.get_success(self.store.get_user_suspended_status(self.bad_user))
+ self.assertEqual(False, res3)
+
+ # test that trying to suspend user who is already suspended doesn't cause problems
+ channel4 = self.make_request(
+ "PUT",
+ f"/_synapse/admin/v1/suspend/{self.bad_user}",
+ {"suspend": True},
+ access_token=self.admin_tok,
+ )
+ self.assertEqual(channel4.code, 200)
+ self.assertEqual(channel4.json_body, {f"user_{self.bad_user}_suspended": True})
+
+ res4 = self.get_success(self.store.get_user_suspended_status(self.bad_user))
+ self.assertEqual(True, res4)
+
+ channel5 = self.make_request(
+ "PUT",
+ f"/_synapse/admin/v1/suspend/{self.bad_user}",
+ {"suspend": True},
+ access_token=self.admin_tok,
+ )
+ self.assertEqual(channel5.code, 200)
+ self.assertEqual(channel5.json_body, {f"user_{self.bad_user}_suspended": True})
+
+ res5 = self.get_success(self.store.get_user_suspended_status(self.bad_user))
+ self.assertEqual(True, res5)
diff --git a/tests/rest/client/test_rooms.py b/tests/rest/client/test_rooms.py
index d398cead1c..c559dfda83 100644
--- a/tests/rest/client/test_rooms.py
+++ b/tests/rest/client/test_rooms.py
@@ -3819,3 +3819,108 @@ class TimestampLookupTestCase(unittest.HomeserverTestCase):
# Make sure the outlier event is not returned
self.assertNotEqual(channel.json_body["event_id"], outlier_event.event_id)
+
+
+class UserSuspensionTests(unittest.HomeserverTestCase):
+ servlets = [
+ admin.register_servlets,
+ login.register_servlets,
+ room.register_servlets,
+ profile.register_servlets,
+ ]
+
+ def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
+ self.user1 = self.register_user("thomas", "hackme")
+ self.tok1 = self.login("thomas", "hackme")
+
+ self.user2 = self.register_user("teresa", "hackme")
+ self.tok2 = self.login("teresa", "hackme")
+
+ self.room1 = self.helper.create_room_as(room_creator=self.user1, tok=self.tok1)
+ self.store = hs.get_datastores().main
+
+ def test_suspended_user_cannot_send_message_to_room(self) -> None:
+ # set the user as suspended
+ self.get_success(self.store.set_user_suspended_status(self.user1, True))
+
+ channel = self.make_request(
+ "PUT",
+ f"/rooms/{self.room1}/send/m.room.message/1",
+ access_token=self.tok1,
+ content={"body": "hello", "msgtype": "m.text"},
+ )
+ self.assertEqual(
+ channel.json_body["errcode"], "ORG.MATRIX.MSC3823.USER_ACCOUNT_SUSPENDED"
+ )
+
+ def test_suspended_user_cannot_change_profile_data(self) -> None:
+ # set the user as suspended
+ self.get_success(self.store.set_user_suspended_status(self.user1, True))
+
+ channel = self.make_request(
+ "PUT",
+ f"/_matrix/client/v3/profile/{self.user1}/avatar_url",
+ access_token=self.tok1,
+ content={"avatar_url": "mxc://matrix.org/wefh34uihSDRGhw34"},
+ shorthand=False,
+ )
+ self.assertEqual(
+ channel.json_body["errcode"], "ORG.MATRIX.MSC3823.USER_ACCOUNT_SUSPENDED"
+ )
+
+ channel2 = self.make_request(
+ "PUT",
+ f"/_matrix/client/v3/profile/{self.user1}/displayname",
+ access_token=self.tok1,
+ content={"displayname": "something offensive"},
+ shorthand=False,
+ )
+ self.assertEqual(
+ channel2.json_body["errcode"], "ORG.MATRIX.MSC3823.USER_ACCOUNT_SUSPENDED"
+ )
+
+ def test_suspended_user_cannot_redact_messages_other_than_their_own(self) -> None:
+ # first user sends message
+ self.make_request("POST", f"/rooms/{self.room1}/join", access_token=self.tok2)
+ res = self.helper.send_event(
+ self.room1,
+ "m.room.message",
+ {"body": "hello", "msgtype": "m.text"},
+ tok=self.tok2,
+ )
+ event_id = res["event_id"]
+
+ # second user sends message
+ self.make_request("POST", f"/rooms/{self.room1}/join", access_token=self.tok1)
+ res2 = self.helper.send_event(
+ self.room1,
+ "m.room.message",
+ {"body": "bad_message", "msgtype": "m.text"},
+ tok=self.tok1,
+ )
+ event_id2 = res2["event_id"]
+
+ # set the second user as suspended
+ self.get_success(self.store.set_user_suspended_status(self.user1, True))
+
+ # second user can't redact first user's message
+ channel = self.make_request(
+ "PUT",
+ f"/_matrix/client/v3/rooms/{self.room1}/redact/{event_id}/1",
+ access_token=self.tok1,
+ content={"reason": "bogus"},
+ shorthand=False,
+ )
+ self.assertEqual(
+ channel.json_body["errcode"], "ORG.MATRIX.MSC3823.USER_ACCOUNT_SUSPENDED"
+ )
+
+ # but can redact their own
+ channel = self.make_request(
+ "PUT",
+ f"/_matrix/client/v3/rooms/{self.room1}/redact/{event_id2}/1",
+ access_token=self.tok1,
+ content={"reason": "bogus"},
+ shorthand=False,
+ )
+ self.assertEqual(channel.code, 200)
From 930a64b6c1a4fe096d541bf9c5f0279fb636ed16 Mon Sep 17 00:00:00 2001
From: Erik Johnston
Date: Mon, 24 Jun 2024 15:40:28 +0100
Subject: [PATCH 213/503] Reintroduce #17291. (#17338)
This is #17291 (which got reverted), with some added fixups, and change
so that tests actually pick up the error.
The problem was that we were not calculating any new chain IDs due to a
missing `not` in a condition.
---
changelog.d/17338.misc | 1 +
synapse/storage/controllers/persist_events.py | 12 +
.../databases/main/event_federation.py | 20 +-
synapse/storage/databases/main/events.py | 263 +++++++++++++-----
tests/storage/test_event_chain.py | 9 +-
tests/storage/test_event_federation.py | 44 ++-
tests/unittest.py | 2 +
7 files changed, 256 insertions(+), 95 deletions(-)
create mode 100644 changelog.d/17338.misc
diff --git a/changelog.d/17338.misc b/changelog.d/17338.misc
new file mode 100644
index 0000000000..1a81bdef85
--- /dev/null
+++ b/changelog.d/17338.misc
@@ -0,0 +1 @@
+Do not block event sending/receiving while calculating large event auth chains.
diff --git a/synapse/storage/controllers/persist_events.py b/synapse/storage/controllers/persist_events.py
index 84699a2ee1..d0e015bf19 100644
--- a/synapse/storage/controllers/persist_events.py
+++ b/synapse/storage/controllers/persist_events.py
@@ -617,6 +617,17 @@ class EventsPersistenceStorageController:
room_id, chunk
)
+ with Measure(self._clock, "calculate_chain_cover_index_for_events"):
+ # We now calculate chain ID/sequence numbers for any state events we're
+ # persisting. We ignore out of band memberships as we're not in the room
+ # and won't have their auth chain (we'll fix it up later if we join the
+ # room).
+ #
+ # See: docs/auth_chain_difference_algorithm.md
+ new_event_links = await self.persist_events_store.calculate_chain_cover_index_for_events(
+ room_id, [e for e, _ in chunk]
+ )
+
await self.persist_events_store._persist_events_and_state_updates(
room_id,
chunk,
@@ -624,6 +635,7 @@ class EventsPersistenceStorageController:
new_forward_extremities=new_forward_extremities,
use_negative_stream_ordering=backfilled,
inhibit_local_membership_updates=backfilled,
+ new_event_links=new_event_links,
)
return replaced_events
diff --git a/synapse/storage/databases/main/event_federation.py b/synapse/storage/databases/main/event_federation.py
index fb132ef090..24abab4a23 100644
--- a/synapse/storage/databases/main/event_federation.py
+++ b/synapse/storage/databases/main/event_federation.py
@@ -148,6 +148,10 @@ class EventFederationWorkerStore(SignatureWorkerStore, EventsWorkerStore, SQLBas
500000, "_event_auth_cache", size_callback=len
)
+ # Flag used by unit tests to disable fallback when there is no chain cover
+ # index.
+ self.tests_allow_no_chain_cover_index = True
+
self._clock.looping_call(self._get_stats_for_federation_staging, 30 * 1000)
if isinstance(self.database_engine, PostgresEngine):
@@ -220,8 +224,10 @@ class EventFederationWorkerStore(SignatureWorkerStore, EventsWorkerStore, SQLBas
)
except _NoChainCoverIndex:
# For whatever reason we don't actually have a chain cover index
- # for the events in question, so we fall back to the old method.
- pass
+ # for the events in question, so we fall back to the old method
+ # (except in tests)
+ if not self.tests_allow_no_chain_cover_index:
+ raise
return await self.db_pool.runInteraction(
"get_auth_chain_ids",
@@ -271,7 +277,7 @@ class EventFederationWorkerStore(SignatureWorkerStore, EventsWorkerStore, SQLBas
if events_missing_chain_info:
# This can happen due to e.g. downgrade/upgrade of the server. We
# raise an exception and fall back to the previous algorithm.
- logger.info(
+ logger.error(
"Unexpectedly found that events don't have chain IDs in room %s: %s",
room_id,
events_missing_chain_info,
@@ -482,8 +488,10 @@ class EventFederationWorkerStore(SignatureWorkerStore, EventsWorkerStore, SQLBas
)
except _NoChainCoverIndex:
# For whatever reason we don't actually have a chain cover index
- # for the events in question, so we fall back to the old method.
- pass
+ # for the events in question, so we fall back to the old method
+ # (except in tests)
+ if not self.tests_allow_no_chain_cover_index:
+ raise
return await self.db_pool.runInteraction(
"get_auth_chain_difference",
@@ -710,7 +718,7 @@ class EventFederationWorkerStore(SignatureWorkerStore, EventsWorkerStore, SQLBas
if events_missing_chain_info - event_to_auth_ids.keys():
# Uh oh, we somehow haven't correctly done the chain cover index,
# bail and fall back to the old method.
- logger.info(
+ logger.error(
"Unexpectedly found that events don't have chain IDs in room %s: %s",
room_id,
events_missing_chain_info - event_to_auth_ids.keys(),
diff --git a/synapse/storage/databases/main/events.py b/synapse/storage/databases/main/events.py
index 66428e6c8e..1f7acdb859 100644
--- a/synapse/storage/databases/main/events.py
+++ b/synapse/storage/databases/main/events.py
@@ -34,7 +34,6 @@ from typing import (
Optional,
Set,
Tuple,
- Union,
cast,
)
@@ -100,6 +99,23 @@ class DeltaState:
return not self.to_delete and not self.to_insert and not self.no_longer_in_room
+@attr.s(slots=True, auto_attribs=True)
+class NewEventChainLinks:
+ """Information about new auth chain links that need to be added to the DB.
+
+ Attributes:
+ chain_id, sequence_number: the IDs corresponding to the event being
+ inserted, and the starting point of the links
+ links: Lists the links that need to be added, 2-tuple of the chain
+ ID/sequence number of the end point of the link.
+ """
+
+ chain_id: int
+ sequence_number: int
+
+ links: List[Tuple[int, int]] = attr.Factory(list)
+
+
class PersistEventsStore:
"""Contains all the functions for writing events to the database.
@@ -148,6 +164,7 @@ class PersistEventsStore:
*,
state_delta_for_room: Optional[DeltaState],
new_forward_extremities: Optional[Set[str]],
+ new_event_links: Dict[str, NewEventChainLinks],
use_negative_stream_ordering: bool = False,
inhibit_local_membership_updates: bool = False,
) -> None:
@@ -217,6 +234,7 @@ class PersistEventsStore:
inhibit_local_membership_updates=inhibit_local_membership_updates,
state_delta_for_room=state_delta_for_room,
new_forward_extremities=new_forward_extremities,
+ new_event_links=new_event_links,
)
persist_event_counter.inc(len(events_and_contexts))
@@ -243,6 +261,87 @@ class PersistEventsStore:
(room_id,), frozenset(new_forward_extremities)
)
+ async def calculate_chain_cover_index_for_events(
+ self, room_id: str, events: Collection[EventBase]
+ ) -> Dict[str, NewEventChainLinks]:
+ # Filter to state events, and ensure there are no duplicates.
+ state_events = []
+ seen_events = set()
+ for event in events:
+ if not event.is_state() or event.event_id in seen_events:
+ continue
+
+ state_events.append(event)
+ seen_events.add(event.event_id)
+
+ if not state_events:
+ return {}
+
+ return await self.db_pool.runInteraction(
+ "_calculate_chain_cover_index_for_events",
+ self.calculate_chain_cover_index_for_events_txn,
+ room_id,
+ state_events,
+ )
+
+ def calculate_chain_cover_index_for_events_txn(
+ self, txn: LoggingTransaction, room_id: str, state_events: Collection[EventBase]
+ ) -> Dict[str, NewEventChainLinks]:
+ # We now calculate chain ID/sequence numbers for any state events we're
+ # persisting. We ignore out of band memberships as we're not in the room
+ # and won't have their auth chain (we'll fix it up later if we join the
+ # room).
+ #
+ # See: docs/auth_chain_difference_algorithm.md
+
+ # We ignore legacy rooms that we aren't filling the chain cover index
+ # for.
+ row = self.db_pool.simple_select_one_txn(
+ txn,
+ table="rooms",
+ keyvalues={"room_id": room_id},
+ retcols=("room_id", "has_auth_chain_index"),
+ allow_none=True,
+ )
+ if row is None or row[1] is False:
+ return {}
+
+ # Filter out events that we've already calculated.
+ rows = self.db_pool.simple_select_many_txn(
+ txn,
+ table="event_auth_chains",
+ column="event_id",
+ iterable=[e.event_id for e in state_events],
+ keyvalues={},
+ retcols=("event_id",),
+ )
+ already_persisted_events = {event_id for event_id, in rows}
+ state_events = [
+ event
+ for event in state_events
+ if event.event_id not in already_persisted_events
+ ]
+
+ if not state_events:
+ return {}
+
+ # We need to know the type/state_key and auth events of the events we're
+ # calculating chain IDs for. We don't rely on having the full Event
+ # instances as we'll potentially be pulling more events from the DB and
+ # we don't need the overhead of fetching/parsing the full event JSON.
+ event_to_types = {e.event_id: (e.type, e.state_key) for e in state_events}
+ event_to_auth_chain = {e.event_id: e.auth_event_ids() for e in state_events}
+ event_to_room_id = {e.event_id: e.room_id for e in state_events}
+
+ return self._calculate_chain_cover_index(
+ txn,
+ self.db_pool,
+ self.store.event_chain_id_gen,
+ event_to_room_id,
+ event_to_types,
+ event_to_auth_chain,
+ )
+
async def _get_events_which_are_prevs(self, event_ids: Iterable[str]) -> List[str]:
"""Filter the supplied list of event_ids to get those which are prev_events of
existing (non-outlier/rejected) events.
@@ -358,6 +457,7 @@ class PersistEventsStore:
inhibit_local_membership_updates: bool,
state_delta_for_room: Optional[DeltaState],
new_forward_extremities: Optional[Set[str]],
+ new_event_links: Dict[str, NewEventChainLinks],
) -> None:
"""Insert some number of room events into the necessary database tables.
@@ -466,7 +566,9 @@ class PersistEventsStore:
# Insert into event_to_state_groups.
self._store_event_state_mappings_txn(txn, events_and_contexts)
- self._persist_event_auth_chain_txn(txn, [e for e, _ in events_and_contexts])
+ self._persist_event_auth_chain_txn(
+ txn, [e for e, _ in events_and_contexts], new_event_links
+ )
# _store_rejected_events_txn filters out any events which were
# rejected, and returns the filtered list.
@@ -496,7 +598,11 @@ class PersistEventsStore:
self,
txn: LoggingTransaction,
events: List[EventBase],
+ new_event_links: Dict[str, NewEventChainLinks],
) -> None:
+ if new_event_links:
+ self._persist_chain_cover_index(txn, self.db_pool, new_event_links)
+
# We only care about state events, so this if there are no state events.
if not any(e.is_state() for e in events):
return
@@ -519,60 +625,6 @@ class PersistEventsStore:
],
)
- # We now calculate chain ID/sequence numbers for any state events we're
- # persisting. We ignore out of band memberships as we're not in the room
- # and won't have their auth chain (we'll fix it up later if we join the
- # room).
- #
- # See: docs/auth_chain_difference_algorithm.md
-
- # We ignore legacy rooms that we aren't filling the chain cover index
- # for.
- rows = cast(
- List[Tuple[str, Optional[Union[int, bool]]]],
- self.db_pool.simple_select_many_txn(
- txn,
- table="rooms",
- column="room_id",
- iterable={event.room_id for event in events if event.is_state()},
- keyvalues={},
- retcols=("room_id", "has_auth_chain_index"),
- ),
- )
- rooms_using_chain_index = {
- room_id for room_id, has_auth_chain_index in rows if has_auth_chain_index
- }
-
- state_events = {
- event.event_id: event
- for event in events
- if event.is_state() and event.room_id in rooms_using_chain_index
- }
-
- if not state_events:
- return
-
- # We need to know the type/state_key and auth events of the events we're
- # calculating chain IDs for. We don't rely on having the full Event
- # instances as we'll potentially be pulling more events from the DB and
- # we don't need the overhead of fetching/parsing the full event JSON.
- event_to_types = {
- e.event_id: (e.type, e.state_key) for e in state_events.values()
- }
- event_to_auth_chain = {
- e.event_id: e.auth_event_ids() for e in state_events.values()
- }
- event_to_room_id = {e.event_id: e.room_id for e in state_events.values()}
-
- self._add_chain_cover_index(
- txn,
- self.db_pool,
- self.store.event_chain_id_gen,
- event_to_room_id,
- event_to_types,
- event_to_auth_chain,
- )
-
@classmethod
def _add_chain_cover_index(
cls,
@@ -583,6 +635,35 @@ class PersistEventsStore:
event_to_types: Dict[str, Tuple[str, str]],
event_to_auth_chain: Dict[str, StrCollection],
) -> None:
+ """Calculate and persist the chain cover index for the given events.
+
+ Args:
+ event_to_room_id: Event ID to the room ID of the event
+ event_to_types: Event ID to type and state_key of the event
+ event_to_auth_chain: Event ID to list of auth event IDs of the
+ event (events with no auth events can be excluded).
+ """
+
+ new_event_links = cls._calculate_chain_cover_index(
+ txn,
+ db_pool,
+ event_chain_id_gen,
+ event_to_room_id,
+ event_to_types,
+ event_to_auth_chain,
+ )
+ cls._persist_chain_cover_index(txn, db_pool, new_event_links)
+
+ @classmethod
+ def _calculate_chain_cover_index(
+ cls,
+ txn: LoggingTransaction,
+ db_pool: DatabasePool,
+ event_chain_id_gen: SequenceGenerator,
+ event_to_room_id: Dict[str, str],
+ event_to_types: Dict[str, Tuple[str, str]],
+ event_to_auth_chain: Dict[str, StrCollection],
+ ) -> Dict[str, NewEventChainLinks]:
"""Calculate the chain cover index for the given events.
Args:
@@ -590,6 +671,10 @@ class PersistEventsStore:
event_to_types: Event ID to type and state_key of the event
event_to_auth_chain: Event ID to list of auth event IDs of the
event (events with no auth events can be excluded).
+
+ Returns:
+ A mapping with any new auth chain links we need to add, keyed by
+ event ID.
"""
# Map from event ID to chain ID/sequence number.
@@ -708,11 +793,11 @@ class PersistEventsStore:
room_id = event_to_room_id.get(event_id)
if room_id:
e_type, state_key = event_to_types[event_id]
- db_pool.simple_insert_txn(
+ db_pool.simple_upsert_txn(
txn,
table="event_auth_chain_to_calculate",
+ keyvalues={"event_id": event_id},
values={
- "event_id": event_id,
"room_id": room_id,
"type": e_type,
"state_key": state_key,
@@ -724,7 +809,7 @@ class PersistEventsStore:
break
if not events_to_calc_chain_id_for:
- return
+ return {}
# Allocate chain ID/sequence numbers to each new event.
new_chain_tuples = cls._allocate_chain_ids(
@@ -739,23 +824,10 @@ class PersistEventsStore:
)
chain_map.update(new_chain_tuples)
- db_pool.simple_insert_many_txn(
- txn,
- table="event_auth_chains",
- keys=("event_id", "chain_id", "sequence_number"),
- values=[
- (event_id, c_id, seq)
- for event_id, (c_id, seq) in new_chain_tuples.items()
- ],
- )
-
- db_pool.simple_delete_many_txn(
- txn,
- table="event_auth_chain_to_calculate",
- keyvalues={},
- column="event_id",
- values=new_chain_tuples,
- )
+ to_return = {
+ event_id: NewEventChainLinks(chain_id, sequence_number)
+ for event_id, (chain_id, sequence_number) in new_chain_tuples.items()
+ }
# Now we need to calculate any new links between chains caused by
# the new events.
@@ -825,10 +897,38 @@ class PersistEventsStore:
auth_chain_id, auth_sequence_number = chain_map[auth_id]
# Step 2a, add link between the event and auth event
+ to_return[event_id].links.append((auth_chain_id, auth_sequence_number))
chain_links.add_link(
(chain_id, sequence_number), (auth_chain_id, auth_sequence_number)
)
+ return to_return
+
+ @classmethod
+ def _persist_chain_cover_index(
+ cls,
+ txn: LoggingTransaction,
+ db_pool: DatabasePool,
+ new_event_links: Dict[str, NewEventChainLinks],
+ ) -> None:
+ db_pool.simple_insert_many_txn(
+ txn,
+ table="event_auth_chains",
+ keys=("event_id", "chain_id", "sequence_number"),
+ values=[
+ (event_id, new_links.chain_id, new_links.sequence_number)
+ for event_id, new_links in new_event_links.items()
+ ],
+ )
+
+ db_pool.simple_delete_many_txn(
+ txn,
+ table="event_auth_chain_to_calculate",
+ keyvalues={},
+ column="event_id",
+ values=new_event_links,
+ )
+
db_pool.simple_insert_many_txn(
txn,
table="event_auth_chain_links",
@@ -838,7 +938,16 @@ class PersistEventsStore:
"target_chain_id",
"target_sequence_number",
),
- values=list(chain_links.get_additions()),
+ values=[
+ (
+ new_links.chain_id,
+ new_links.sequence_number,
+ target_chain_id,
+ target_sequence_number,
+ )
+ for new_links in new_event_links.values()
+ for (target_chain_id, target_sequence_number) in new_links.links
+ ],
)
@staticmethod
diff --git a/tests/storage/test_event_chain.py b/tests/storage/test_event_chain.py
index 81feb3ec29..c4e216c308 100644
--- a/tests/storage/test_event_chain.py
+++ b/tests/storage/test_event_chain.py
@@ -447,7 +447,14 @@ class EventChainStoreTestCase(HomeserverTestCase):
)
# Actually call the function that calculates the auth chain stuff.
- persist_events_store._persist_event_auth_chain_txn(txn, events)
+ new_event_links = (
+ persist_events_store.calculate_chain_cover_index_for_events_txn(
+ txn, events[0].room_id, [e for e in events if e.is_state()]
+ )
+ )
+ persist_events_store._persist_event_auth_chain_txn(
+ txn, events, new_event_links
+ )
self.get_success(
persist_events_store.db_pool.runInteraction(
diff --git a/tests/storage/test_event_federation.py b/tests/storage/test_event_federation.py
index 0a6253e22c..088f0d24f9 100644
--- a/tests/storage/test_event_federation.py
+++ b/tests/storage/test_event_federation.py
@@ -365,12 +365,19 @@ class EventFederationWorkerStoreTestCase(tests.unittest.HomeserverTestCase):
},
)
+ events = [
+ cast(EventBase, FakeEvent(event_id, room_id, AUTH_GRAPH[event_id]))
+ for event_id in AUTH_GRAPH
+ ]
+ new_event_links = (
+ self.persist_events.calculate_chain_cover_index_for_events_txn(
+ txn, room_id, [e for e in events if e.is_state()]
+ )
+ )
self.persist_events._persist_event_auth_chain_txn(
txn,
- [
- cast(EventBase, FakeEvent(event_id, room_id, AUTH_GRAPH[event_id]))
- for event_id in AUTH_GRAPH
- ],
+ events,
+ new_event_links,
)
self.get_success(
@@ -544,6 +551,9 @@ class EventFederationWorkerStoreTestCase(tests.unittest.HomeserverTestCase):
rooms.
"""
+ # We allow partial covers for this test
+ self.hs.get_datastores().main.tests_allow_no_chain_cover_index = True
+
room_id = "@ROOM:local"
# The silly auth graph we use to test the auth difference algorithm,
@@ -628,13 +638,20 @@ class EventFederationWorkerStoreTestCase(tests.unittest.HomeserverTestCase):
)
# Insert all events apart from 'B'
+ events = [
+ cast(EventBase, FakeEvent(event_id, room_id, auth_graph[event_id]))
+ for event_id in auth_graph
+ if event_id != "b"
+ ]
+ new_event_links = (
+ self.persist_events.calculate_chain_cover_index_for_events_txn(
+ txn, room_id, [e for e in events if e.is_state()]
+ )
+ )
self.persist_events._persist_event_auth_chain_txn(
txn,
- [
- cast(EventBase, FakeEvent(event_id, room_id, auth_graph[event_id]))
- for event_id in auth_graph
- if event_id != "b"
- ],
+ events,
+ new_event_links,
)
# Now we insert the event 'B' without a chain cover, by temporarily
@@ -647,9 +664,14 @@ class EventFederationWorkerStoreTestCase(tests.unittest.HomeserverTestCase):
updatevalues={"has_auth_chain_index": False},
)
+ events = [cast(EventBase, FakeEvent("b", room_id, auth_graph["b"]))]
+ new_event_links = (
+ self.persist_events.calculate_chain_cover_index_for_events_txn(
+ txn, room_id, [e for e in events if e.is_state()]
+ )
+ )
self.persist_events._persist_event_auth_chain_txn(
- txn,
- [cast(EventBase, FakeEvent("b", room_id, auth_graph["b"]))],
+ txn, events, new_event_links
)
self.store.db_pool.simple_update_txn(
diff --git a/tests/unittest.py b/tests/unittest.py
index 18963b9e32..a7c20556a0 100644
--- a/tests/unittest.py
+++ b/tests/unittest.py
@@ -344,6 +344,8 @@ class HomeserverTestCase(TestCase):
self._hs_args = {"clock": self.clock, "reactor": self.reactor}
self.hs = self.make_homeserver(self.reactor, self.clock)
+ self.hs.get_datastores().main.tests_allow_no_chain_cover_index = False
+
# Honour the `use_frozen_dicts` config option. We have to do this
# manually because this is taken care of in the app `start` code, which
# we don't run. Plus we want to reset it on tearDown.
From ae4c236a6d6ef76565240ac964c5f540b9c1e1ed Mon Sep 17 00:00:00 2001
From: Eric Eastwood
Date: Mon, 24 Jun 2024 10:02:16 -0500
Subject: [PATCH 214/503] Fix room `type` typo in mailer (#17336)
Correct event content field is `EventContentFields.ROOM_TYPE` (`type`) :white_check_mark: , not `room_type` :x:
Spec: https://spec.matrix.org/v1.10/client-server-api/#mroomcreate
---
changelog.d/17336.bugfix | 1 +
synapse/push/mailer.py | 5 +++--
2 files changed, 4 insertions(+), 2 deletions(-)
create mode 100644 changelog.d/17336.bugfix
diff --git a/changelog.d/17336.bugfix b/changelog.d/17336.bugfix
new file mode 100644
index 0000000000..618834302e
--- /dev/null
+++ b/changelog.d/17336.bugfix
@@ -0,0 +1 @@
+Fix email notification subject when invited to a space.
diff --git a/synapse/push/mailer.py b/synapse/push/mailer.py
index 77cc69a71f..cf611bd90b 100644
--- a/synapse/push/mailer.py
+++ b/synapse/push/mailer.py
@@ -28,7 +28,7 @@ import jinja2
from markupsafe import Markup
from prometheus_client import Counter
-from synapse.api.constants import EventTypes, Membership, RoomTypes
+from synapse.api.constants import EventContentFields, EventTypes, Membership, RoomTypes
from synapse.api.errors import StoreError
from synapse.config.emailconfig import EmailSubjectConfig
from synapse.events import EventBase
@@ -716,7 +716,8 @@ class Mailer:
)
if (
create_event
- and create_event.content.get("room_type") == RoomTypes.SPACE
+ and create_event.content.get(EventContentFields.ROOM_TYPE)
+ == RoomTypes.SPACE
):
return self.email_subjects.invite_from_person_to_space % {
"person": inviter_name,
From 3c61ddbbc9ee313447c16fa7f19bdc29ce647a32 Mon Sep 17 00:00:00 2001
From: devonh
Date: Mon, 24 Jun 2024 17:16:09 +0000
Subject: [PATCH 215/503] Add default values for rc_invites per_issuer to docs
(#17347)
A simple change to update the docs where default values were missing.
### Pull Request Checklist
* [X] Pull request is based on the develop branch
* [X] Pull request includes a [changelog
file](https://element-hq.github.io/synapse/latest/development/contributing_guide.html#changelog).
The entry should:
- Be a short description of your change which makes sense to users.
"Fixed a bug that prevented receiving messages from other servers."
instead of "Moved X method from `EventStore` to `EventWorkerStore`.".
- Use markdown where necessary, mostly for `code blocks`.
- End with either a period (.) or an exclamation mark (!).
- Start with a capital letter.
- Feel free to credit yourself, by adding a sentence "Contributed by
@github_username." or "Contributed by [Your Name]." to the end of the
entry.
* [X] [Code
style](https://element-hq.github.io/synapse/latest/code_style.html) is
correct
(run the
[linters](https://element-hq.github.io/synapse/latest/development/contributing_guide.html#run-the-linters))
---------
Co-authored-by: Kim Brose <2803622+HarHarLinks@users.noreply.github.com>
Co-authored-by: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com>
---
changelog.d/17347.doc | 1 +
docs/usage/configuration/config_documentation.md | 5 +++--
2 files changed, 4 insertions(+), 2 deletions(-)
create mode 100644 changelog.d/17347.doc
diff --git a/changelog.d/17347.doc b/changelog.d/17347.doc
new file mode 100644
index 0000000000..6cd41be60f
--- /dev/null
+++ b/changelog.d/17347.doc
@@ -0,0 +1 @@
+Add default values for `rc_invites.per_issuer` to docs.
diff --git a/docs/usage/configuration/config_documentation.md b/docs/usage/configuration/config_documentation.md
index 22c545359d..b3db078703 100644
--- a/docs/usage/configuration/config_documentation.md
+++ b/docs/usage/configuration/config_documentation.md
@@ -1759,8 +1759,9 @@ rc_3pid_validation:
### `rc_invites`
This option sets ratelimiting how often invites can be sent in a room or to a
-specific user. `per_room` defaults to `per_second: 0.3`, `burst_count: 10` and
-`per_user` defaults to `per_second: 0.003`, `burst_count: 5`.
+specific user. `per_room` defaults to `per_second: 0.3`, `burst_count: 10`,
+`per_user` defaults to `per_second: 0.003`, `burst_count: 5`, and `per_issuer`
+defaults to `per_second: 0.3`, `burst_count: 10`.
Client requests that invite user(s) when [creating a
room](https://spec.matrix.org/v1.2/client-server-api/#post_matrixclientv3createroom)
From 805e6c9a8f703a0a774321bd0755be63dcdcc807 Mon Sep 17 00:00:00 2001
From: devonh
Date: Mon, 24 Jun 2024 17:18:58 +0000
Subject: [PATCH 216/503] Correct error in user_directory docs (#17348)
### Pull Request Checklist
* [X] Pull request is based on the develop branch
* [X] Pull request includes a [changelog
file](https://element-hq.github.io/synapse/latest/development/contributing_guide.html#changelog).
The entry should:
- Be a short description of your change which makes sense to users.
"Fixed a bug that prevented receiving messages from other servers."
instead of "Moved X method from `EventStore` to `EventWorkerStore`.".
- Use markdown where necessary, mostly for `code blocks`.
- End with either a period (.) or an exclamation mark (!).
- Start with a capital letter.
- Feel free to credit yourself, by adding a sentence "Contributed by
@github_username." or "Contributed by [Your Name]." to the end of the
entry.
* [X] [Code
style](https://element-hq.github.io/synapse/latest/code_style.html) is
correct
(run the
[linters](https://element-hq.github.io/synapse/latest/development/contributing_guide.html#run-the-linters))
---------
Co-authored-by: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com>
Co-authored-by: reivilibre
---
changelog.d/17348.doc | 1 +
docs/usage/configuration/config_documentation.md | 3 ++-
2 files changed, 3 insertions(+), 1 deletion(-)
create mode 100644 changelog.d/17348.doc
diff --git a/changelog.d/17348.doc b/changelog.d/17348.doc
new file mode 100644
index 0000000000..4ce42bbadb
--- /dev/null
+++ b/changelog.d/17348.doc
@@ -0,0 +1 @@
+Fix an error in the docs for `search_all_users` parameter under `user_directory`.
diff --git a/docs/usage/configuration/config_documentation.md b/docs/usage/configuration/config_documentation.md
index b3db078703..ba9f21cdee 100644
--- a/docs/usage/configuration/config_documentation.md
+++ b/docs/usage/configuration/config_documentation.md
@@ -3807,7 +3807,8 @@ This setting defines options related to the user directory.
This option has the following sub-options:
* `enabled`: Defines whether users can search the user directory. If false then
empty responses are returned to all queries. Defaults to true.
-* `search_all_users`: Defines whether to search all users visible to your HS at the time the search is performed. If set to true, will return all users who share a room with the user from the homeserver.
+* `search_all_users`: Defines whether to search all users visible to your homeserver at the time the search is performed.
+ If set to true, will return all users known to the homeserver matching the search query.
If false, search results will only contain users
visible in public rooms and users sharing a room with the requester.
Defaults to false.
From 6e8af8319373e1ab470f1d8eee0420f3be84184f Mon Sep 17 00:00:00 2001
From: Eric Eastwood
Date: Mon, 24 Jun 2024 19:07:56 -0500
Subject: [PATCH 217/503] Add `is_invite` filtering to Sliding Sync `/sync`
(#17335)
Based on [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575): Sliding Sync
---
changelog.d/17335.feature | 1 +
synapse/handlers/sliding_sync.py | 19 +++-
tests/handlers/test_sliding_sync.py | 74 +++++++++++---
tests/rest/client/test_sync.py | 148 +++++++++++++++++++++++-----
4 files changed, 199 insertions(+), 43 deletions(-)
create mode 100644 changelog.d/17335.feature
diff --git a/changelog.d/17335.feature b/changelog.d/17335.feature
new file mode 100644
index 0000000000..c6beed42ed
--- /dev/null
+++ b/changelog.d/17335.feature
@@ -0,0 +1 @@
+Add `is_invite` filtering to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint.
diff --git a/synapse/handlers/sliding_sync.py b/synapse/handlers/sliding_sync.py
index 16d94925f5..847a638bba 100644
--- a/synapse/handlers/sliding_sync.py
+++ b/synapse/handlers/sliding_sync.py
@@ -554,7 +554,7 @@ class SlidingSyncHandler:
# Flatten out the map
dm_room_id_set = set()
- if dm_map:
+ if isinstance(dm_map, dict):
for room_ids in dm_map.values():
# Account data should be a list of room IDs. Ignore anything else
if isinstance(room_ids, list):
@@ -593,8 +593,21 @@ class SlidingSyncHandler:
):
filtered_room_id_set.remove(room_id)
- if filters.is_invite:
- raise NotImplementedError()
+ # Filter for rooms that the user has been invited to
+ if filters.is_invite is not None:
+ # Make a copy so we don't run into an error: `Set changed size during
+ # iteration`, when we filter out and remove items
+ for room_id in list(filtered_room_id_set):
+ room_for_user = sync_room_map[room_id]
+ # If we're looking for invite rooms, filter out rooms that the user is
+ # not invited to and vice versa
+ if (
+ filters.is_invite and room_for_user.membership != Membership.INVITE
+ ) or (
+ not filters.is_invite
+ and room_for_user.membership == Membership.INVITE
+ ):
+ filtered_room_id_set.remove(room_id)
if filters.room_types:
raise NotImplementedError()
diff --git a/tests/handlers/test_sliding_sync.py b/tests/handlers/test_sliding_sync.py
index 0358239c7f..8dd4521b18 100644
--- a/tests/handlers/test_sliding_sync.py
+++ b/tests/handlers/test_sliding_sync.py
@@ -1200,11 +1200,7 @@ class FilterRoomsTestCase(HomeserverTestCase):
user2_tok = self.login(user2_id, "pass")
# Create a normal room
- room_id = self.helper.create_room_as(
- user1_id,
- is_public=False,
- tok=user1_tok,
- )
+ room_id = self.helper.create_room_as(user1_id, tok=user1_tok)
# Create a DM room
dm_room_id = self._create_dm_room(
@@ -1261,18 +1257,10 @@ class FilterRoomsTestCase(HomeserverTestCase):
user1_tok = self.login(user1_id, "pass")
# Create a normal room
- room_id = self.helper.create_room_as(
- user1_id,
- is_public=False,
- tok=user1_tok,
- )
+ room_id = self.helper.create_room_as(user1_id, tok=user1_tok)
# Create an encrypted room
- encrypted_room_id = self.helper.create_room_as(
- user1_id,
- is_public=False,
- tok=user1_tok,
- )
+ encrypted_room_id = self.helper.create_room_as(user1_id, tok=user1_tok)
self.helper.send_state(
encrypted_room_id,
EventTypes.RoomEncryption,
@@ -1319,6 +1307,62 @@ class FilterRoomsTestCase(HomeserverTestCase):
self.assertEqual(falsy_filtered_room_map.keys(), {room_id})
+ def test_filter_invite_rooms(self) -> None:
+ """
+ Test `filter.is_invite` for rooms that the user has been invited to
+ """
+ user1_id = self.register_user("user1", "pass")
+ user1_tok = self.login(user1_id, "pass")
+ user2_id = self.register_user("user2", "pass")
+ user2_tok = self.login(user2_id, "pass")
+
+ # Create a normal room
+ room_id = self.helper.create_room_as(user2_id, tok=user2_tok)
+ self.helper.join(room_id, user1_id, tok=user1_tok)
+
+ # Create a room that user1 is invited to
+ invite_room_id = self.helper.create_room_as(user2_id, tok=user2_tok)
+ self.helper.invite(invite_room_id, src=user2_id, targ=user1_id, tok=user2_tok)
+
+ after_rooms_token = self.event_sources.get_current_token()
+
+ # Get the rooms the user should be syncing with
+ sync_room_map = self.get_success(
+ self.sliding_sync_handler.get_sync_room_ids_for_user(
+ UserID.from_string(user1_id),
+ from_token=None,
+ to_token=after_rooms_token,
+ )
+ )
+
+ # Try with `is_invite=True`
+ truthy_filtered_room_map = self.get_success(
+ self.sliding_sync_handler.filter_rooms(
+ UserID.from_string(user1_id),
+ sync_room_map,
+ SlidingSyncConfig.SlidingSyncList.Filters(
+ is_invite=True,
+ ),
+ after_rooms_token,
+ )
+ )
+
+ self.assertEqual(truthy_filtered_room_map.keys(), {invite_room_id})
+
+ # Try with `is_invite=False`
+ falsy_filtered_room_map = self.get_success(
+ self.sliding_sync_handler.filter_rooms(
+ UserID.from_string(user1_id),
+ sync_room_map,
+ SlidingSyncConfig.SlidingSyncList.Filters(
+ is_invite=False,
+ ),
+ after_rooms_token,
+ )
+ )
+
+ self.assertEqual(falsy_filtered_room_map.keys(), {room_id})
+
class SortRoomsTestCase(HomeserverTestCase):
"""
diff --git a/tests/rest/client/test_sync.py b/tests/rest/client/test_sync.py
index 5195659ec2..bfb26139d3 100644
--- a/tests/rest/client/test_sync.py
+++ b/tests/rest/client/test_sync.py
@@ -19,7 +19,8 @@
#
#
import json
-from typing import List
+import logging
+from typing import Dict, List
from parameterized import parameterized, parameterized_class
@@ -44,6 +45,8 @@ from tests.federation.transport.test_knocking import (
)
from tests.server import TimedOutException
+logger = logging.getLogger(__name__)
+
class FilterTestCase(unittest.HomeserverTestCase):
user_id = "@apple:test"
@@ -1234,12 +1237,58 @@ class SlidingSyncTestCase(unittest.HomeserverTestCase):
self.store = hs.get_datastores().main
self.event_sources = hs.get_event_sources()
+ def _add_new_dm_to_global_account_data(
+ self, source_user_id: str, target_user_id: str, target_room_id: str
+ ) -> None:
+ """
+ Helper to handle inserting a new DM for the source user into global account data
+ (handles all of the list merging).
+
+ Args:
+ source_user_id: The user ID of the DM mapping we're going to update
+ target_user_id: User ID of the person the DM is with
+ target_room_id: Room ID of the DM
+ """
+
+ # Get the current DM map
+ existing_dm_map = self.get_success(
+ self.store.get_global_account_data_by_type_for_user(
+ source_user_id, AccountDataTypes.DIRECT
+ )
+ )
+ # Scrutinize the account data since it has no concrete type. We're just copying
+ # everything into a known type. It should be a mapping from user ID to a list of
+ # room IDs. Ignore anything else.
+ new_dm_map: Dict[str, List[str]] = {}
+ if isinstance(existing_dm_map, dict):
+ for user_id, room_ids in existing_dm_map.items():
+ if isinstance(user_id, str) and isinstance(room_ids, list):
+ for room_id in room_ids:
+ if isinstance(room_id, str):
+ new_dm_map[user_id] = new_dm_map.get(user_id, []) + [
+ room_id
+ ]
+
+ # Add the new DM to the map
+ new_dm_map[target_user_id] = new_dm_map.get(target_user_id, []) + [
+ target_room_id
+ ]
+ # Save the DM map to global account data
+ self.get_success(
+ self.store.add_account_data_for_user(
+ source_user_id,
+ AccountDataTypes.DIRECT,
+ new_dm_map,
+ )
+ )
+
def _create_dm_room(
self,
inviter_user_id: str,
inviter_tok: str,
invitee_user_id: str,
invitee_tok: str,
+ should_join_room: bool = True,
) -> str:
"""
Helper to create a DM room as the "inviter" and invite the "invitee" user to the
@@ -1260,24 +1309,17 @@ class SlidingSyncTestCase(unittest.HomeserverTestCase):
tok=inviter_tok,
extra_data={"is_direct": True},
)
- # Person that was invited joins the room
- self.helper.join(room_id, invitee_user_id, tok=invitee_tok)
+ if should_join_room:
+ # Person that was invited joins the room
+ self.helper.join(room_id, invitee_user_id, tok=invitee_tok)
# Mimic the client setting the room as a direct message in the global account
- # data
- self.get_success(
- self.store.add_account_data_for_user(
- invitee_user_id,
- AccountDataTypes.DIRECT,
- {inviter_user_id: [room_id]},
- )
+ # data for both users.
+ self._add_new_dm_to_global_account_data(
+ invitee_user_id, inviter_user_id, room_id
)
- self.get_success(
- self.store.add_account_data_for_user(
- inviter_user_id,
- AccountDataTypes.DIRECT,
- {invitee_user_id: [room_id]},
- )
+ self._add_new_dm_to_global_account_data(
+ inviter_user_id, invitee_user_id, room_id
)
return room_id
@@ -1397,15 +1439,28 @@ class SlidingSyncTestCase(unittest.HomeserverTestCase):
user2_tok = self.login(user2_id, "pass")
# Create a DM room
- dm_room_id = self._create_dm_room(
+ joined_dm_room_id = self._create_dm_room(
inviter_user_id=user1_id,
inviter_tok=user1_tok,
invitee_user_id=user2_id,
invitee_tok=user2_tok,
+ should_join_room=True,
+ )
+ invited_dm_room_id = self._create_dm_room(
+ inviter_user_id=user1_id,
+ inviter_tok=user1_tok,
+ invitee_user_id=user2_id,
+ invitee_tok=user2_tok,
+ should_join_room=False,
)
# Create a normal room
- room_id = self.helper.create_room_as(user1_id, tok=user1_tok, is_public=True)
+ room_id = self.helper.create_room_as(user1_id, tok=user2_tok)
+ self.helper.join(room_id, user1_id, tok=user1_tok)
+
+ # Create a room that user1 is invited to
+ invite_room_id = self.helper.create_room_as(user1_id, tok=user2_tok)
+ self.helper.invite(invite_room_id, src=user2_id, targ=user1_id, tok=user2_tok)
# Make the Sliding Sync request
channel = self.make_request(
@@ -1413,18 +1468,34 @@ class SlidingSyncTestCase(unittest.HomeserverTestCase):
self.sync_endpoint,
{
"lists": {
+ # Absense of filters does not imply "False" values
+ "all": {
+ "ranges": [[0, 99]],
+ "required_state": [],
+ "timeline_limit": 1,
+ "filters": {},
+ },
+ # Test single truthy filter
"dms": {
"ranges": [[0, 99]],
"required_state": [],
"timeline_limit": 1,
"filters": {"is_dm": True},
},
- "foo-list": {
+ # Test single falsy filter
+ "non-dms": {
"ranges": [[0, 99]],
"required_state": [],
"timeline_limit": 1,
"filters": {"is_dm": False},
},
+ # Test how multiple filters should stack (AND'd together)
+ "room-invites": {
+ "ranges": [[0, 99]],
+ "required_state": [],
+ "timeline_limit": 1,
+ "filters": {"is_dm": False, "is_invite": True},
+ },
}
},
access_token=user1_tok,
@@ -1434,32 +1505,59 @@ class SlidingSyncTestCase(unittest.HomeserverTestCase):
# Make sure it has the foo-list we requested
self.assertListEqual(
list(channel.json_body["lists"].keys()),
- ["dms", "foo-list"],
+ ["all", "dms", "non-dms", "room-invites"],
channel.json_body["lists"].keys(),
)
- # Make sure the list includes the room we are joined to
+ # Make sure the lists have the correct rooms
+ self.assertListEqual(
+ list(channel.json_body["lists"]["all"]["ops"]),
+ [
+ {
+ "op": "SYNC",
+ "range": [0, 99],
+ "room_ids": [
+ invite_room_id,
+ room_id,
+ invited_dm_room_id,
+ joined_dm_room_id,
+ ],
+ }
+ ],
+ list(channel.json_body["lists"]["all"]),
+ )
self.assertListEqual(
list(channel.json_body["lists"]["dms"]["ops"]),
[
{
"op": "SYNC",
"range": [0, 99],
- "room_ids": [dm_room_id],
+ "room_ids": [invited_dm_room_id, joined_dm_room_id],
}
],
list(channel.json_body["lists"]["dms"]),
)
self.assertListEqual(
- list(channel.json_body["lists"]["foo-list"]["ops"]),
+ list(channel.json_body["lists"]["non-dms"]["ops"]),
[
{
"op": "SYNC",
"range": [0, 99],
- "room_ids": [room_id],
+ "room_ids": [invite_room_id, room_id],
}
],
- list(channel.json_body["lists"]["foo-list"]),
+ list(channel.json_body["lists"]["non-dms"]),
+ )
+ self.assertListEqual(
+ list(channel.json_body["lists"]["room-invites"]["ops"]),
+ [
+ {
+ "op": "SYNC",
+ "range": [0, 99],
+ "room_ids": [invite_room_id],
+ }
+ ],
+ list(channel.json_body["lists"]["room-invites"]),
)
def test_sort_list(self) -> None:
From a98cb87bee18c9028d03676ce544860239e1ff34 Mon Sep 17 00:00:00 2001
From: Erik Johnston
Date: Tue, 25 Jun 2024 09:57:34 +0100
Subject: [PATCH 218/503] Revert "Reduce device lists replication traffic."
(#17360)
Reverts element-hq/synapse#17333
It looks like master was still sending out replication RDATA with the
old format... somehow
---
changelog.d/17333.misc | 1 -
synapse/replication/tcp/client.py | 19 ++--
synapse/replication/tcp/streams/_base.py | 12 +--
synapse/storage/databases/main/devices.py | 93 +++++++------------
.../storage/databases/main/end_to_end_keys.py | 4 +-
tests/storage/test_devices.py | 8 --
6 files changed, 48 insertions(+), 89 deletions(-)
delete mode 100644 changelog.d/17333.misc
diff --git a/changelog.d/17333.misc b/changelog.d/17333.misc
deleted file mode 100644
index d3ef0b3777..0000000000
--- a/changelog.d/17333.misc
+++ /dev/null
@@ -1 +0,0 @@
-Handle device lists notifications for large accounts more efficiently in worker mode.
diff --git a/synapse/replication/tcp/client.py b/synapse/replication/tcp/client.py
index 3dddbb70b4..2d6d49eed7 100644
--- a/synapse/replication/tcp/client.py
+++ b/synapse/replication/tcp/client.py
@@ -114,19 +114,13 @@ class ReplicationDataHandler:
"""
all_room_ids: Set[str] = set()
if stream_name == DeviceListsStream.NAME:
- if any(not row.is_signature and not row.hosts_calculated for row in rows):
+ if any(row.entity.startswith("@") and not row.is_signature for row in rows):
prev_token = self.store.get_device_stream_token()
all_room_ids = await self.store.get_all_device_list_changes(
prev_token, token
)
self.store.device_lists_in_rooms_have_changed(all_room_ids, token)
- # If we're sending federation we need to update the device lists
- # outbound pokes stream change cache with updated hosts.
- if self.send_handler and any(row.hosts_calculated for row in rows):
- hosts = await self.store.get_destinations_for_device(token)
- self.store.device_lists_outbound_pokes_have_changed(hosts, token)
-
self.store.process_replication_rows(stream_name, instance_name, token, rows)
# NOTE: this must be called after process_replication_rows to ensure any
# cache invalidations are first handled before any stream ID advances.
@@ -439,11 +433,12 @@ class FederationSenderHandler:
# The entities are either user IDs (starting with '@') whose devices
# have changed, or remote servers that we need to tell about
# changes.
- if any(row.hosts_calculated for row in rows):
- hosts = await self.store.get_destinations_for_device(token)
- await self.federation_sender.send_device_messages(
- hosts, immediate=False
- )
+ hosts = {
+ row.entity
+ for row in rows
+ if not row.entity.startswith("@") and not row.is_signature
+ }
+ await self.federation_sender.send_device_messages(hosts, immediate=False)
elif stream_name == ToDeviceStream.NAME:
# The to_device stream includes stuff to be pushed to both local
diff --git a/synapse/replication/tcp/streams/_base.py b/synapse/replication/tcp/streams/_base.py
index d021904de7..661206c841 100644
--- a/synapse/replication/tcp/streams/_base.py
+++ b/synapse/replication/tcp/streams/_base.py
@@ -549,14 +549,10 @@ class DeviceListsStream(_StreamFromIdGen):
@attr.s(slots=True, frozen=True, auto_attribs=True)
class DeviceListsStreamRow:
- user_id: str
+ entity: str
# Indicates that a user has signed their own device with their user-signing key
is_signature: bool
- # Indicates if this is a notification that we've calculated the hosts we
- # need to send the update to.
- hosts_calculated: bool
-
NAME = "device_lists"
ROW_TYPE = DeviceListsStreamRow
@@ -598,13 +594,13 @@ class DeviceListsStream(_StreamFromIdGen):
upper_limit_token = min(upper_limit_token, signatures_to_token)
device_updates = [
- (stream_id, (entity, False, hosts))
- for stream_id, (entity, hosts) in device_updates
+ (stream_id, (entity, False))
+ for stream_id, (entity,) in device_updates
if stream_id <= upper_limit_token
]
signatures_updates = [
- (stream_id, (entity, True, False))
+ (stream_id, (entity, True))
for stream_id, (entity,) in signatures_updates
if stream_id <= upper_limit_token
]
diff --git a/synapse/storage/databases/main/devices.py b/synapse/storage/databases/main/devices.py
index 5eeca6165d..40187496e2 100644
--- a/synapse/storage/databases/main/devices.py
+++ b/synapse/storage/databases/main/devices.py
@@ -164,24 +164,22 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore):
prefilled_cache=user_signature_stream_prefill,
)
- self._device_list_federation_stream_cache = None
- if hs.should_send_federation():
- (
- device_list_federation_prefill,
- device_list_federation_list_id,
- ) = self.db_pool.get_cache_dict(
- db_conn,
- "device_lists_outbound_pokes",
- entity_column="destination",
- stream_column="stream_id",
- max_value=device_list_max,
- limit=10000,
- )
- self._device_list_federation_stream_cache = StreamChangeCache(
- "DeviceListFederationStreamChangeCache",
- device_list_federation_list_id,
- prefilled_cache=device_list_federation_prefill,
- )
+ (
+ device_list_federation_prefill,
+ device_list_federation_list_id,
+ ) = self.db_pool.get_cache_dict(
+ db_conn,
+ "device_lists_outbound_pokes",
+ entity_column="destination",
+ stream_column="stream_id",
+ max_value=device_list_max,
+ limit=10000,
+ )
+ self._device_list_federation_stream_cache = StreamChangeCache(
+ "DeviceListFederationStreamChangeCache",
+ device_list_federation_list_id,
+ prefilled_cache=device_list_federation_prefill,
+ )
if hs.config.worker.run_background_tasks:
self._clock.looping_call(
@@ -209,30 +207,23 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore):
) -> None:
for row in rows:
if row.is_signature:
- self._user_signature_stream_cache.entity_has_changed(row.user_id, token)
+ self._user_signature_stream_cache.entity_has_changed(row.entity, token)
continue
# The entities are either user IDs (starting with '@') whose devices
# have changed, or remote servers that we need to tell about
# changes.
- if not row.hosts_calculated:
- self._device_list_stream_cache.entity_has_changed(row.user_id, token)
- self.get_cached_devices_for_user.invalidate((row.user_id,))
- self._get_cached_user_device.invalidate((row.user_id,))
- self.get_device_list_last_stream_id_for_remote.invalidate(
- (row.user_id,)
+ if row.entity.startswith("@"):
+ self._device_list_stream_cache.entity_has_changed(row.entity, token)
+ self.get_cached_devices_for_user.invalidate((row.entity,))
+ self._get_cached_user_device.invalidate((row.entity,))
+ self.get_device_list_last_stream_id_for_remote.invalidate((row.entity,))
+
+ else:
+ self._device_list_federation_stream_cache.entity_has_changed(
+ row.entity, token
)
- def device_lists_outbound_pokes_have_changed(
- self, destinations: StrCollection, token: int
- ) -> None:
- assert self._device_list_federation_stream_cache is not None
-
- for destination in destinations:
- self._device_list_federation_stream_cache.entity_has_changed(
- destination, token
- )
-
def device_lists_in_rooms_have_changed(
self, room_ids: StrCollection, token: int
) -> None:
@@ -372,11 +363,6 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore):
EDU contents.
"""
now_stream_id = self.get_device_stream_token()
- if from_stream_id == now_stream_id:
- return now_stream_id, []
-
- if self._device_list_federation_stream_cache is None:
- raise Exception("Func can only be used on federation senders")
has_changed = self._device_list_federation_stream_cache.has_entity_changed(
destination, int(from_stream_id)
@@ -1032,10 +1018,10 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore):
# This query Does The Right Thing where it'll correctly apply the
# bounds to the inner queries.
sql = """
- SELECT stream_id, user_id, hosts FROM (
- SELECT stream_id, user_id, false AS hosts FROM device_lists_stream
+ SELECT stream_id, entity FROM (
+ SELECT stream_id, user_id AS entity FROM device_lists_stream
UNION ALL
- SELECT DISTINCT stream_id, user_id, true AS hosts FROM device_lists_outbound_pokes
+ SELECT stream_id, destination AS entity FROM device_lists_outbound_pokes
) AS e
WHERE ? < stream_id AND stream_id <= ?
ORDER BY stream_id ASC
@@ -1591,14 +1577,6 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore):
get_device_list_changes_in_room_txn,
)
- async def get_destinations_for_device(self, stream_id: int) -> StrCollection:
- return await self.db_pool.simple_select_onecol(
- table="device_lists_outbound_pokes",
- keyvalues={"stream_id": stream_id},
- retcol="destination",
- desc="get_destinations_for_device",
- )
-
class DeviceBackgroundUpdateStore(SQLBaseStore):
def __init__(
@@ -2134,13 +2112,12 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore):
stream_ids: List[int],
context: Optional[Dict[str, str]],
) -> None:
- if self._device_list_federation_stream_cache:
- for host in hosts:
- txn.call_after(
- self._device_list_federation_stream_cache.entity_has_changed,
- host,
- stream_ids[-1],
- )
+ for host in hosts:
+ txn.call_after(
+ self._device_list_federation_stream_cache.entity_has_changed,
+ host,
+ stream_ids[-1],
+ )
now = self._clock.time_msec()
stream_id_iterator = iter(stream_ids)
diff --git a/synapse/storage/databases/main/end_to_end_keys.py b/synapse/storage/databases/main/end_to_end_keys.py
index 9e6c9561ae..38d8785faa 100644
--- a/synapse/storage/databases/main/end_to_end_keys.py
+++ b/synapse/storage/databases/main/end_to_end_keys.py
@@ -123,9 +123,9 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker
if stream_name == DeviceListsStream.NAME:
for row in rows:
assert isinstance(row, DeviceListsStream.DeviceListsStreamRow)
- if not row.hosts_calculated:
+ if row.entity.startswith("@"):
self._get_e2e_device_keys_for_federation_query_inner.invalidate(
- (row.user_id,)
+ (row.entity,)
)
super().process_replication_rows(stream_name, instance_name, token, rows)
diff --git a/tests/storage/test_devices.py b/tests/storage/test_devices.py
index ba01b038ab..7f975d04ff 100644
--- a/tests/storage/test_devices.py
+++ b/tests/storage/test_devices.py
@@ -36,14 +36,6 @@ class DeviceStoreTestCase(HomeserverTestCase):
def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
self.store = hs.get_datastores().main
- def default_config(self) -> JsonDict:
- config = super().default_config()
-
- # We 'enable' federation otherwise `get_device_updates_by_remote` will
- # throw an exception.
- config["federation_sender_instances"] = ["master"]
- return config
-
def add_device_change(self, user_id: str, device_ids: List[str], host: str) -> None:
"""Add a device list change for the given device to
`device_lists_outbound_pokes` table.
From 554a92601a4bf61f9076adfffb613a2c19871446 Mon Sep 17 00:00:00 2001
From: Erik Johnston
Date: Tue, 25 Jun 2024 10:34:34 +0100
Subject: [PATCH 219/503] Reintroduce "Reduce device lists replication
traffic."" (#17361)
Reintroduces https://github.com/element-hq/synapse/pull/17333
Turns out the reason for revert was down two master instances running
---
changelog.d/17333.misc | 1 +
synapse/replication/tcp/client.py | 19 ++--
synapse/replication/tcp/streams/_base.py | 12 ++-
synapse/storage/databases/main/devices.py | 93 ++++++++++++-------
.../storage/databases/main/end_to_end_keys.py | 4 +-
tests/storage/test_devices.py | 8 ++
6 files changed, 89 insertions(+), 48 deletions(-)
create mode 100644 changelog.d/17333.misc
diff --git a/changelog.d/17333.misc b/changelog.d/17333.misc
new file mode 100644
index 0000000000..d3ef0b3777
--- /dev/null
+++ b/changelog.d/17333.misc
@@ -0,0 +1 @@
+Handle device lists notifications for large accounts more efficiently in worker mode.
diff --git a/synapse/replication/tcp/client.py b/synapse/replication/tcp/client.py
index 2d6d49eed7..3dddbb70b4 100644
--- a/synapse/replication/tcp/client.py
+++ b/synapse/replication/tcp/client.py
@@ -114,13 +114,19 @@ class ReplicationDataHandler:
"""
all_room_ids: Set[str] = set()
if stream_name == DeviceListsStream.NAME:
- if any(row.entity.startswith("@") and not row.is_signature for row in rows):
+ if any(not row.is_signature and not row.hosts_calculated for row in rows):
prev_token = self.store.get_device_stream_token()
all_room_ids = await self.store.get_all_device_list_changes(
prev_token, token
)
self.store.device_lists_in_rooms_have_changed(all_room_ids, token)
+ # If we're sending federation we need to update the device lists
+ # outbound pokes stream change cache with updated hosts.
+ if self.send_handler and any(row.hosts_calculated for row in rows):
+ hosts = await self.store.get_destinations_for_device(token)
+ self.store.device_lists_outbound_pokes_have_changed(hosts, token)
+
self.store.process_replication_rows(stream_name, instance_name, token, rows)
# NOTE: this must be called after process_replication_rows to ensure any
# cache invalidations are first handled before any stream ID advances.
@@ -433,12 +439,11 @@ class FederationSenderHandler:
# The entities are either user IDs (starting with '@') whose devices
# have changed, or remote servers that we need to tell about
# changes.
- hosts = {
- row.entity
- for row in rows
- if not row.entity.startswith("@") and not row.is_signature
- }
- await self.federation_sender.send_device_messages(hosts, immediate=False)
+ if any(row.hosts_calculated for row in rows):
+ hosts = await self.store.get_destinations_for_device(token)
+ await self.federation_sender.send_device_messages(
+ hosts, immediate=False
+ )
elif stream_name == ToDeviceStream.NAME:
# The to_device stream includes stuff to be pushed to both local
diff --git a/synapse/replication/tcp/streams/_base.py b/synapse/replication/tcp/streams/_base.py
index 661206c841..d021904de7 100644
--- a/synapse/replication/tcp/streams/_base.py
+++ b/synapse/replication/tcp/streams/_base.py
@@ -549,10 +549,14 @@ class DeviceListsStream(_StreamFromIdGen):
@attr.s(slots=True, frozen=True, auto_attribs=True)
class DeviceListsStreamRow:
- entity: str
+ user_id: str
# Indicates that a user has signed their own device with their user-signing key
is_signature: bool
+ # Indicates if this is a notification that we've calculated the hosts we
+ # need to send the update to.
+ hosts_calculated: bool
+
NAME = "device_lists"
ROW_TYPE = DeviceListsStreamRow
@@ -594,13 +598,13 @@ class DeviceListsStream(_StreamFromIdGen):
upper_limit_token = min(upper_limit_token, signatures_to_token)
device_updates = [
- (stream_id, (entity, False))
- for stream_id, (entity,) in device_updates
+ (stream_id, (entity, False, hosts))
+ for stream_id, (entity, hosts) in device_updates
if stream_id <= upper_limit_token
]
signatures_updates = [
- (stream_id, (entity, True))
+ (stream_id, (entity, True, False))
for stream_id, (entity,) in signatures_updates
if stream_id <= upper_limit_token
]
diff --git a/synapse/storage/databases/main/devices.py b/synapse/storage/databases/main/devices.py
index 40187496e2..5eeca6165d 100644
--- a/synapse/storage/databases/main/devices.py
+++ b/synapse/storage/databases/main/devices.py
@@ -164,22 +164,24 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore):
prefilled_cache=user_signature_stream_prefill,
)
- (
- device_list_federation_prefill,
- device_list_federation_list_id,
- ) = self.db_pool.get_cache_dict(
- db_conn,
- "device_lists_outbound_pokes",
- entity_column="destination",
- stream_column="stream_id",
- max_value=device_list_max,
- limit=10000,
- )
- self._device_list_federation_stream_cache = StreamChangeCache(
- "DeviceListFederationStreamChangeCache",
- device_list_federation_list_id,
- prefilled_cache=device_list_federation_prefill,
- )
+ self._device_list_federation_stream_cache = None
+ if hs.should_send_federation():
+ (
+ device_list_federation_prefill,
+ device_list_federation_list_id,
+ ) = self.db_pool.get_cache_dict(
+ db_conn,
+ "device_lists_outbound_pokes",
+ entity_column="destination",
+ stream_column="stream_id",
+ max_value=device_list_max,
+ limit=10000,
+ )
+ self._device_list_federation_stream_cache = StreamChangeCache(
+ "DeviceListFederationStreamChangeCache",
+ device_list_federation_list_id,
+ prefilled_cache=device_list_federation_prefill,
+ )
if hs.config.worker.run_background_tasks:
self._clock.looping_call(
@@ -207,23 +209,30 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore):
) -> None:
for row in rows:
if row.is_signature:
- self._user_signature_stream_cache.entity_has_changed(row.entity, token)
+ self._user_signature_stream_cache.entity_has_changed(row.user_id, token)
continue
# The entities are either user IDs (starting with '@') whose devices
# have changed, or remote servers that we need to tell about
# changes.
- if row.entity.startswith("@"):
- self._device_list_stream_cache.entity_has_changed(row.entity, token)
- self.get_cached_devices_for_user.invalidate((row.entity,))
- self._get_cached_user_device.invalidate((row.entity,))
- self.get_device_list_last_stream_id_for_remote.invalidate((row.entity,))
-
- else:
- self._device_list_federation_stream_cache.entity_has_changed(
- row.entity, token
+ if not row.hosts_calculated:
+ self._device_list_stream_cache.entity_has_changed(row.user_id, token)
+ self.get_cached_devices_for_user.invalidate((row.user_id,))
+ self._get_cached_user_device.invalidate((row.user_id,))
+ self.get_device_list_last_stream_id_for_remote.invalidate(
+ (row.user_id,)
)
+ def device_lists_outbound_pokes_have_changed(
+ self, destinations: StrCollection, token: int
+ ) -> None:
+ assert self._device_list_federation_stream_cache is not None
+
+ for destination in destinations:
+ self._device_list_federation_stream_cache.entity_has_changed(
+ destination, token
+ )
+
def device_lists_in_rooms_have_changed(
self, room_ids: StrCollection, token: int
) -> None:
@@ -363,6 +372,11 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore):
EDU contents.
"""
now_stream_id = self.get_device_stream_token()
+ if from_stream_id == now_stream_id:
+ return now_stream_id, []
+
+ if self._device_list_federation_stream_cache is None:
+ raise Exception("Func can only be used on federation senders")
has_changed = self._device_list_federation_stream_cache.has_entity_changed(
destination, int(from_stream_id)
@@ -1018,10 +1032,10 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore):
# This query Does The Right Thing where it'll correctly apply the
# bounds to the inner queries.
sql = """
- SELECT stream_id, entity FROM (
- SELECT stream_id, user_id AS entity FROM device_lists_stream
+ SELECT stream_id, user_id, hosts FROM (
+ SELECT stream_id, user_id, false AS hosts FROM device_lists_stream
UNION ALL
- SELECT stream_id, destination AS entity FROM device_lists_outbound_pokes
+ SELECT DISTINCT stream_id, user_id, true AS hosts FROM device_lists_outbound_pokes
) AS e
WHERE ? < stream_id AND stream_id <= ?
ORDER BY stream_id ASC
@@ -1577,6 +1591,14 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore):
get_device_list_changes_in_room_txn,
)
+ async def get_destinations_for_device(self, stream_id: int) -> StrCollection:
+ return await self.db_pool.simple_select_onecol(
+ table="device_lists_outbound_pokes",
+ keyvalues={"stream_id": stream_id},
+ retcol="destination",
+ desc="get_destinations_for_device",
+ )
+
class DeviceBackgroundUpdateStore(SQLBaseStore):
def __init__(
@@ -2112,12 +2134,13 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore):
stream_ids: List[int],
context: Optional[Dict[str, str]],
) -> None:
- for host in hosts:
- txn.call_after(
- self._device_list_federation_stream_cache.entity_has_changed,
- host,
- stream_ids[-1],
- )
+ if self._device_list_federation_stream_cache:
+ for host in hosts:
+ txn.call_after(
+ self._device_list_federation_stream_cache.entity_has_changed,
+ host,
+ stream_ids[-1],
+ )
now = self._clock.time_msec()
stream_id_iterator = iter(stream_ids)
diff --git a/synapse/storage/databases/main/end_to_end_keys.py b/synapse/storage/databases/main/end_to_end_keys.py
index 38d8785faa..9e6c9561ae 100644
--- a/synapse/storage/databases/main/end_to_end_keys.py
+++ b/synapse/storage/databases/main/end_to_end_keys.py
@@ -123,9 +123,9 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker
if stream_name == DeviceListsStream.NAME:
for row in rows:
assert isinstance(row, DeviceListsStream.DeviceListsStreamRow)
- if row.entity.startswith("@"):
+ if not row.hosts_calculated:
self._get_e2e_device_keys_for_federation_query_inner.invalidate(
- (row.entity,)
+ (row.user_id,)
)
super().process_replication_rows(stream_name, instance_name, token, rows)
diff --git a/tests/storage/test_devices.py b/tests/storage/test_devices.py
index 7f975d04ff..ba01b038ab 100644
--- a/tests/storage/test_devices.py
+++ b/tests/storage/test_devices.py
@@ -36,6 +36,14 @@ class DeviceStoreTestCase(HomeserverTestCase):
def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
self.store = hs.get_datastores().main
+ def default_config(self) -> JsonDict:
+ config = super().default_config()
+
+ # We 'enable' federation otherwise `get_device_updates_by_remote` will
+ # throw an exception.
+ config["federation_sender_instances"] = ["master"]
+ return config
+
def add_device_change(self, user_id: str, device_ids: List[str], host: str) -> None:
"""Add a device list change for the given device to
`device_lists_outbound_pokes` table.
From c89fea3fd1f47b43c4d500dd7d024b2f9b24d2ad Mon Sep 17 00:00:00 2001
From: Erik Johnston
Date: Tue, 25 Jun 2024 11:17:39 +0100
Subject: [PATCH 220/503] Limit amount of replication we send (#17358)
Fixes up #17333, where we failed to actually send less data (the
`DISTINCT` didn't work due to `stream_id` being different).
We fix this by making it so that every device list outbound poke for a
given user ID has the same stream ID. We can't change the query to only
return e.g. max stream ID as the receivers look up the destinations to
send to by doing `SELECT WHERE stream_id = ?`
---
changelog.d/17358.misc | 1 +
synapse/storage/databases/main/devices.py | 15 +++++++--------
2 files changed, 8 insertions(+), 8 deletions(-)
create mode 100644 changelog.d/17358.misc
diff --git a/changelog.d/17358.misc b/changelog.d/17358.misc
new file mode 100644
index 0000000000..d3ef0b3777
--- /dev/null
+++ b/changelog.d/17358.misc
@@ -0,0 +1 @@
+Handle device lists notifications for large accounts more efficiently in worker mode.
diff --git a/synapse/storage/databases/main/devices.py b/synapse/storage/databases/main/devices.py
index 5eeca6165d..59a035dd62 100644
--- a/synapse/storage/databases/main/devices.py
+++ b/synapse/storage/databases/main/devices.py
@@ -2131,7 +2131,7 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore):
user_id: str,
device_id: str,
hosts: Collection[str],
- stream_ids: List[int],
+ stream_id: int,
context: Optional[Dict[str, str]],
) -> None:
if self._device_list_federation_stream_cache:
@@ -2139,11 +2139,10 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore):
txn.call_after(
self._device_list_federation_stream_cache.entity_has_changed,
host,
- stream_ids[-1],
+ stream_id,
)
now = self._clock.time_msec()
- stream_id_iterator = iter(stream_ids)
encoded_context = json_encoder.encode(context)
mark_sent = not self.hs.is_mine_id(user_id)
@@ -2152,7 +2151,7 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore):
(
destination,
self._instance_name,
- next(stream_id_iterator),
+ stream_id,
user_id,
device_id,
mark_sent,
@@ -2337,22 +2336,22 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore):
return
def add_device_list_outbound_pokes_txn(
- txn: LoggingTransaction, stream_ids: List[int]
+ txn: LoggingTransaction, stream_id: int
) -> None:
self._add_device_outbound_poke_to_stream_txn(
txn,
user_id=user_id,
device_id=device_id,
hosts=hosts,
- stream_ids=stream_ids,
+ stream_id=stream_id,
context=context,
)
- async with self._device_list_id_gen.get_next_mult(len(hosts)) as stream_ids:
+ async with self._device_list_id_gen.get_next() as stream_id:
return await self.db_pool.runInteraction(
"add_device_list_outbound_pokes",
add_device_list_outbound_pokes_txn,
- stream_ids,
+ stream_id,
)
async def add_remote_device_list_to_pending(
From f79dbd0f61194929585d7010a3ec1b9ee208f033 Mon Sep 17 00:00:00 2001
From: douglaz
Date: Tue, 25 Jun 2024 11:07:13 +0000
Subject: [PATCH 221/503] Fix refreshable_access_token_lifetime typo (#17357)
Simple typo in the docs
---
docs/usage/configuration/config_documentation.md | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/docs/usage/configuration/config_documentation.md b/docs/usage/configuration/config_documentation.md
index ba9f21cdee..80a7bf9d21 100644
--- a/docs/usage/configuration/config_documentation.md
+++ b/docs/usage/configuration/config_documentation.md
@@ -2719,7 +2719,7 @@ Example configuration:
session_lifetime: 24h
```
---
-### `refresh_access_token_lifetime`
+### `refreshable_access_token_lifetime`
Time that an access token remains valid for, if the session is using refresh tokens.
From a023538822c8e241cdd3180c9cbbcb0f4eb84844 Mon Sep 17 00:00:00 2001
From: Shay
Date: Tue, 25 Jun 2024 07:35:37 -0700
Subject: [PATCH 222/503] Re-introduce federation /download endpoint (#17350)
---
changelog.d/17350.feature | 2 +
.../federation/transport/server/__init__.py | 8 +
synapse/federation/transport/server/_base.py | 24 +-
.../federation/transport/server/federation.py | 41 +++
synapse/media/_base.py | 78 +++++-
synapse/media/media_repository.py | 14 +-
synapse/media/media_storage.py | 259 +++++++++++++++++-
tests/federation/test_federation_media.py | 173 ++++++++++++
8 files changed, 588 insertions(+), 11 deletions(-)
create mode 100644 changelog.d/17350.feature
create mode 100644 tests/federation/test_federation_media.py
diff --git a/changelog.d/17350.feature b/changelog.d/17350.feature
new file mode 100644
index 0000000000..709366f5b8
--- /dev/null
+++ b/changelog.d/17350.feature
@@ -0,0 +1,2 @@
+Support [MSC3916](https://github.com/matrix-org/matrix-spec-proposals/blob/rav/authentication-for-media/proposals/3916-authentication-for-media.md)
+by adding a federation /download endpoint.
\ No newline at end of file
diff --git a/synapse/federation/transport/server/__init__.py b/synapse/federation/transport/server/__init__.py
index bac569e977..edaf0196d6 100644
--- a/synapse/federation/transport/server/__init__.py
+++ b/synapse/federation/transport/server/__init__.py
@@ -33,6 +33,7 @@ from synapse.federation.transport.server.federation import (
FEDERATION_SERVLET_CLASSES,
FederationAccountStatusServlet,
FederationUnstableClientKeysClaimServlet,
+ FederationUnstableMediaDownloadServlet,
)
from synapse.http.server import HttpServer, JsonResource
from synapse.http.servlet import (
@@ -315,6 +316,13 @@ def register_servlets(
):
continue
+ if servletclass == FederationUnstableMediaDownloadServlet:
+ if (
+ not hs.config.server.enable_media_repo
+ or not hs.config.experimental.msc3916_authenticated_media_enabled
+ ):
+ continue
+
servletclass(
hs=hs,
authenticator=authenticator,
diff --git a/synapse/federation/transport/server/_base.py b/synapse/federation/transport/server/_base.py
index db0f5076a9..4e2717b565 100644
--- a/synapse/federation/transport/server/_base.py
+++ b/synapse/federation/transport/server/_base.py
@@ -360,13 +360,29 @@ class BaseFederationServlet:
"request"
)
return None
+ if (
+ func.__self__.__class__.__name__ # type: ignore
+ == "FederationUnstableMediaDownloadServlet"
+ ):
+ response = await func(
+ origin, content, request, *args, **kwargs
+ )
+ else:
+ response = await func(
+ origin, content, request.args, *args, **kwargs
+ )
+ else:
+ if (
+ func.__self__.__class__.__name__ # type: ignore
+ == "FederationUnstableMediaDownloadServlet"
+ ):
+ response = await func(
+ origin, content, request, *args, **kwargs
+ )
+ else:
response = await func(
origin, content, request.args, *args, **kwargs
)
- else:
- response = await func(
- origin, content, request.args, *args, **kwargs
- )
finally:
# if we used the origin's context as the parent, add a new span using
# the servlet span as a parent, so that we have a link
diff --git a/synapse/federation/transport/server/federation.py b/synapse/federation/transport/server/federation.py
index a59734785f..67bb907050 100644
--- a/synapse/federation/transport/server/federation.py
+++ b/synapse/federation/transport/server/federation.py
@@ -44,10 +44,13 @@ from synapse.federation.transport.server._base import (
)
from synapse.http.servlet import (
parse_boolean_from_args,
+ parse_integer,
parse_integer_from_args,
parse_string_from_args,
parse_strings_from_args,
)
+from synapse.http.site import SynapseRequest
+from synapse.media._base import DEFAULT_MAX_TIMEOUT_MS, MAXIMUM_ALLOWED_MAX_TIMEOUT_MS
from synapse.types import JsonDict
from synapse.util import SYNAPSE_VERSION
from synapse.util.ratelimitutils import FederationRateLimiter
@@ -787,6 +790,43 @@ class FederationAccountStatusServlet(BaseFederationServerServlet):
return 200, {"account_statuses": statuses, "failures": failures}
+class FederationUnstableMediaDownloadServlet(BaseFederationServerServlet):
+ """
+ Implementation of new federation media `/download` endpoint outlined in MSC3916. Returns
+ a multipart/mixed response consisting of a JSON object and the requested media
+ item. This endpoint only returns local media.
+ """
+
+ PATH = "/media/download/(?P[^/]*)"
+ PREFIX = FEDERATION_UNSTABLE_PREFIX + "/org.matrix.msc3916"
+ RATELIMIT = True
+
+ def __init__(
+ self,
+ hs: "HomeServer",
+ ratelimiter: FederationRateLimiter,
+ authenticator: Authenticator,
+ server_name: str,
+ ):
+ super().__init__(hs, authenticator, ratelimiter, server_name)
+ self.media_repo = self.hs.get_media_repository()
+
+ async def on_GET(
+ self,
+ origin: Optional[str],
+ content: Literal[None],
+ request: SynapseRequest,
+ media_id: str,
+ ) -> None:
+ max_timeout_ms = parse_integer(
+ request, "timeout_ms", default=DEFAULT_MAX_TIMEOUT_MS
+ )
+ max_timeout_ms = min(max_timeout_ms, MAXIMUM_ALLOWED_MAX_TIMEOUT_MS)
+ await self.media_repo.get_local_media(
+ request, media_id, None, max_timeout_ms, federation=True
+ )
+
+
FEDERATION_SERVLET_CLASSES: Tuple[Type[BaseFederationServlet], ...] = (
FederationSendServlet,
FederationEventServlet,
@@ -818,4 +858,5 @@ FEDERATION_SERVLET_CLASSES: Tuple[Type[BaseFederationServlet], ...] = (
FederationV1SendKnockServlet,
FederationMakeKnockServlet,
FederationAccountStatusServlet,
+ FederationUnstableMediaDownloadServlet,
)
diff --git a/synapse/media/_base.py b/synapse/media/_base.py
index 3fbed6062f..7ad0b7c3cf 100644
--- a/synapse/media/_base.py
+++ b/synapse/media/_base.py
@@ -25,7 +25,16 @@ import os
import urllib
from abc import ABC, abstractmethod
from types import TracebackType
-from typing import Awaitable, Dict, Generator, List, Optional, Tuple, Type
+from typing import (
+ TYPE_CHECKING,
+ Awaitable,
+ Dict,
+ Generator,
+ List,
+ Optional,
+ Tuple,
+ Type,
+)
import attr
@@ -37,8 +46,13 @@ from synapse.api.errors import Codes, cs_error
from synapse.http.server import finish_request, respond_with_json
from synapse.http.site import SynapseRequest
from synapse.logging.context import make_deferred_yieldable
+from synapse.util import Clock
from synapse.util.stringutils import is_ascii
+if TYPE_CHECKING:
+ from synapse.storage.databases.main.media_repository import LocalMedia
+
+
logger = logging.getLogger(__name__)
# list all text content types that will have the charset default to UTF-8 when
@@ -260,6 +274,68 @@ def _can_encode_filename_as_token(x: str) -> bool:
return True
+async def respond_with_multipart_responder(
+ clock: Clock,
+ request: SynapseRequest,
+ responder: "Optional[Responder]",
+ media_info: "LocalMedia",
+) -> None:
+ """
+ Responds to requests originating from the federation media `/download` endpoint by
+ streaming a multipart/mixed response
+
+ Args:
+ clock:
+ request: the federation request to respond to
+ responder: the responder which will send the response
+ media_info: metadata about the media item
+ """
+ if not responder:
+ respond_404(request)
+ return
+
+ # If we have a responder we *must* use it as a context manager.
+ with responder:
+ if request._disconnected:
+ logger.warning(
+ "Not sending response to request %s, already disconnected.", request
+ )
+ return
+
+ from synapse.media.media_storage import MultipartFileConsumer
+
+ # note that currently the json_object is just {}, this will change when linked media
+ # is implemented
+ multipart_consumer = MultipartFileConsumer(
+ clock, request, media_info.media_type, {}, media_info.media_length
+ )
+
+ logger.debug("Responding to media request with responder %s", responder)
+ if media_info.media_length is not None:
+ content_length = multipart_consumer.content_length()
+ assert content_length is not None
+ request.setHeader(b"Content-Length", b"%d" % (content_length,))
+
+ request.setHeader(
+ b"Content-Type",
+ b"multipart/mixed; boundary=%s" % multipart_consumer.boundary,
+ )
+
+ try:
+ await responder.write_to_consumer(multipart_consumer)
+ except Exception as e:
+ # The majority of the time this will be due to the client having gone
+ # away. Unfortunately, Twisted simply throws a generic exception at us
+ # in that case.
+ logger.warning("Failed to write to consumer: %s %s", type(e), e)
+
+ # Unregister the producer, if it has one, so Twisted doesn't complain
+ if request.producer:
+ request.unregisterProducer()
+
+ finish_request(request)
+
+
async def respond_with_responder(
request: SynapseRequest,
responder: "Optional[Responder]",
diff --git a/synapse/media/media_repository.py b/synapse/media/media_repository.py
index 6ed56099ca..1436329fad 100644
--- a/synapse/media/media_repository.py
+++ b/synapse/media/media_repository.py
@@ -54,6 +54,7 @@ from synapse.media._base import (
ThumbnailInfo,
get_filename_from_headers,
respond_404,
+ respond_with_multipart_responder,
respond_with_responder,
)
from synapse.media.filepath import MediaFilePaths
@@ -429,6 +430,7 @@ class MediaRepository:
media_id: str,
name: Optional[str],
max_timeout_ms: int,
+ federation: bool = False,
) -> None:
"""Responds to requests for local media, if exists, or returns 404.
@@ -440,6 +442,7 @@ class MediaRepository:
the filename in the Content-Disposition header of the response.
max_timeout_ms: the maximum number of milliseconds to wait for the
media to be uploaded.
+ federation: whether the local media being fetched is for a federation request
Returns:
Resolves once a response has successfully been written to request
@@ -460,9 +463,14 @@ class MediaRepository:
file_info = FileInfo(None, media_id, url_cache=bool(url_cache))
responder = await self.media_storage.fetch_media(file_info)
- await respond_with_responder(
- request, responder, media_type, media_length, upload_name
- )
+ if federation:
+ await respond_with_multipart_responder(
+ self.clock, request, responder, media_info
+ )
+ else:
+ await respond_with_responder(
+ request, responder, media_type, media_length, upload_name
+ )
async def get_remote_media(
self,
diff --git a/synapse/media/media_storage.py b/synapse/media/media_storage.py
index b3cd3fd8f4..1be2c9b5f5 100644
--- a/synapse/media/media_storage.py
+++ b/synapse/media/media_storage.py
@@ -19,9 +19,12 @@
#
#
import contextlib
+import json
import logging
import os
import shutil
+from contextlib import closing
+from io import BytesIO
from types import TracebackType
from typing import (
IO,
@@ -30,24 +33,35 @@ from typing import (
AsyncIterator,
BinaryIO,
Callable,
+ List,
Optional,
Sequence,
Tuple,
Type,
+ Union,
+ cast,
)
+from uuid import uuid4
import attr
+from zope.interface import implementer
+from twisted.internet import interfaces
from twisted.internet.defer import Deferred
from twisted.internet.interfaces import IConsumer
from twisted.protocols.basic import FileSender
from synapse.api.errors import NotFoundError
-from synapse.logging.context import defer_to_thread, make_deferred_yieldable
+from synapse.logging.context import (
+ defer_to_thread,
+ make_deferred_yieldable,
+ run_in_background,
+)
from synapse.logging.opentracing import start_active_span, trace, trace_with_opname
from synapse.util import Clock
from synapse.util.file_consumer import BackgroundFileConsumer
+from ..types import JsonDict
from ._base import FileInfo, Responder
from .filepath import MediaFilePaths
@@ -57,6 +71,8 @@ if TYPE_CHECKING:
logger = logging.getLogger(__name__)
+CRLF = b"\r\n"
+
class MediaStorage:
"""Responsible for storing/fetching files from local sources.
@@ -174,7 +190,7 @@ class MediaStorage:
and configured storage providers.
Args:
- file_info
+ file_info: Metadata about the media file
Returns:
Returns a Responder if the file was found, otherwise None.
@@ -316,7 +332,7 @@ class FileResponder(Responder):
"""Wraps an open file that can be sent to a request.
Args:
- open_file: A file like object to be streamed ot the client,
+ open_file: A file like object to be streamed to the client,
is closed when finished streaming.
"""
@@ -370,3 +386,240 @@ class ReadableFileWrapper:
# We yield to the reactor by sleeping for 0 seconds.
await self.clock.sleep(0)
+
+
+@implementer(interfaces.IConsumer)
+@implementer(interfaces.IPushProducer)
+class MultipartFileConsumer:
+ """Wraps a given consumer so that any data that gets written to it gets
+ converted to a multipart format.
+ """
+
+ def __init__(
+ self,
+ clock: Clock,
+ wrapped_consumer: interfaces.IConsumer,
+ file_content_type: str,
+ json_object: JsonDict,
+ content_length: Optional[int] = None,
+ ) -> None:
+ self.clock = clock
+ self.wrapped_consumer = wrapped_consumer
+ self.json_field = json_object
+ self.json_field_written = False
+ self.content_type_written = False
+ self.file_content_type = file_content_type
+ self.boundary = uuid4().hex.encode("ascii")
+
+ # The producer that registered with us, and if it's a push or pull
+ # producer.
+ self.producer: Optional["interfaces.IProducer"] = None
+ self.streaming: Optional[bool] = None
+
+ # Whether the wrapped consumer has asked us to pause.
+ self.paused = False
+
+ self.length = content_length
+
+ ### IConsumer APIs ###
+
+ def registerProducer(
+ self, producer: "interfaces.IProducer", streaming: bool
+ ) -> None:
+ """
+ Register to receive data from a producer.
+
+ This sets self to be a consumer for a producer. When this object runs
+ out of data (as when a send(2) call on a socket succeeds in moving the
+ last data from a userspace buffer into a kernelspace buffer), it will
+ ask the producer to resumeProducing().
+
+ For L{IPullProducer} providers, C{resumeProducing} will be called once
+ each time data is required.
+
+ For L{IPushProducer} providers, C{pauseProducing} will be called
+ whenever the write buffer fills up and C{resumeProducing} will only be
+ called when it empties. The consumer will only call C{resumeProducing}
+ to balance a previous C{pauseProducing} call; the producer is assumed
+ to start in an un-paused state.
+
+ @param streaming: C{True} if C{producer} provides L{IPushProducer},
+ C{False} if C{producer} provides L{IPullProducer}.
+
+ @raise RuntimeError: If a producer is already registered.
+ """
+ self.producer = producer
+ self.streaming = streaming
+
+ self.wrapped_consumer.registerProducer(self, True)
+
+ # kick off producing if `self.producer` is not a streaming producer
+ if not streaming:
+ self.resumeProducing()
+
+ def unregisterProducer(self) -> None:
+ """
+ Stop consuming data from a producer, without disconnecting.
+ """
+ self.wrapped_consumer.write(CRLF + b"--" + self.boundary + b"--" + CRLF)
+ self.wrapped_consumer.unregisterProducer()
+ self.paused = True
+
+ def write(self, data: bytes) -> None:
+ """
+ The producer will write data by calling this method.
+
+ The implementation must be non-blocking and perform whatever
+ buffering is necessary. If the producer has provided enough data
+ for now and it is a L{IPushProducer}, the consumer may call its
+ C{pauseProducing} method.
+ """
+ if not self.json_field_written:
+ self.wrapped_consumer.write(CRLF + b"--" + self.boundary + CRLF)
+
+ content_type = Header(b"Content-Type", b"application/json")
+ self.wrapped_consumer.write(bytes(content_type) + CRLF)
+
+ json_field = json.dumps(self.json_field)
+ json_bytes = json_field.encode("utf-8")
+ self.wrapped_consumer.write(CRLF + json_bytes)
+ self.wrapped_consumer.write(CRLF + b"--" + self.boundary + CRLF)
+
+ self.json_field_written = True
+
+ # if we haven't written the content type yet, do so
+ if not self.content_type_written:
+ type = self.file_content_type.encode("utf-8")
+ content_type = Header(b"Content-Type", type)
+ self.wrapped_consumer.write(bytes(content_type) + CRLF + CRLF)
+ self.content_type_written = True
+
+ self.wrapped_consumer.write(data)
+
+ ### IPushProducer APIs ###
+
+ def stopProducing(self) -> None:
+ """
+ Stop producing data.
+
+ This tells a producer that its consumer has died, so it must stop
+ producing data for good.
+ """
+ assert self.producer is not None
+
+ self.paused = True
+ self.producer.stopProducing()
+
+ def pauseProducing(self) -> None:
+ """
+ Pause producing data.
+
+ Tells a producer that it has produced too much data to process for
+ the time being, and to stop until C{resumeProducing()} is called.
+ """
+ assert self.producer is not None
+
+ self.paused = True
+
+ if self.streaming:
+ cast("interfaces.IPushProducer", self.producer).pauseProducing()
+ else:
+ self.paused = True
+
+ def resumeProducing(self) -> None:
+ """
+ Resume producing data.
+
+ This tells a producer to re-add itself to the main loop and produce
+ more data for its consumer.
+ """
+ assert self.producer is not None
+
+ if self.streaming:
+ cast("interfaces.IPushProducer", self.producer).resumeProducing()
+ else:
+ # If the producer is not a streaming producer we need to start
+ # repeatedly calling `resumeProducing` in a loop.
+ run_in_background(self._resumeProducingRepeatedly)
+
+ def content_length(self) -> Optional[int]:
+ """
+ Calculate the content length of the multipart response
+ in bytes.
+ """
+ if not self.length:
+ return None
+ # calculate length of json field and content-type header
+ json_field = json.dumps(self.json_field)
+ json_bytes = json_field.encode("utf-8")
+ json_length = len(json_bytes)
+
+ type = self.file_content_type.encode("utf-8")
+ content_type = Header(b"Content-Type", type)
+ type_length = len(bytes(content_type))
+
+ # 154 is the length of the elements that aren't variable, ie
+ # CRLFs and boundary strings, etc
+ self.length += json_length + type_length + 154
+
+ return self.length
+
+ ### Internal APIs. ###
+
+ async def _resumeProducingRepeatedly(self) -> None:
+ assert self.producer is not None
+ assert not self.streaming
+
+ producer = cast("interfaces.IPullProducer", self.producer)
+
+ self.paused = False
+ while not self.paused:
+ producer.resumeProducing()
+ await self.clock.sleep(0)
+
+
+class Header:
+ """
+ `Header` This class is a tiny wrapper that produces
+ request headers. We can't use standard python header
+ class because it encodes unicode fields using =? bla bla ?=
+ encoding, which is correct, but no one in HTTP world expects
+ that, everyone wants utf-8 raw bytes. (stolen from treq.multipart)
+
+ """
+
+ def __init__(
+ self,
+ name: bytes,
+ value: Any,
+ params: Optional[List[Tuple[Any, Any]]] = None,
+ ):
+ self.name = name
+ self.value = value
+ self.params = params or []
+
+ def add_param(self, name: Any, value: Any) -> None:
+ self.params.append((name, value))
+
+ def __bytes__(self) -> bytes:
+ with closing(BytesIO()) as h:
+ h.write(self.name + b": " + escape(self.value).encode("us-ascii"))
+ if self.params:
+ for name, val in self.params:
+ h.write(b"; ")
+ h.write(escape(name).encode("us-ascii"))
+ h.write(b"=")
+ h.write(b'"' + escape(val).encode("utf-8") + b'"')
+ h.seek(0)
+ return h.read()
+
+
+def escape(value: Union[str, bytes]) -> str:
+ """
+ This function prevents header values from corrupting the request,
+ a newline in the file name parameter makes form-data request unreadable
+ for a majority of parsers. (stolen from treq.multipart)
+ """
+ if isinstance(value, bytes):
+ value = value.decode("utf-8")
+ return value.replace("\r", "").replace("\n", "").replace('"', '\\"')
diff --git a/tests/federation/test_federation_media.py b/tests/federation/test_federation_media.py
new file mode 100644
index 0000000000..2c396adbe3
--- /dev/null
+++ b/tests/federation/test_federation_media.py
@@ -0,0 +1,173 @@
+#
+# This file is licensed under the Affero General Public License (AGPL) version 3.
+#
+# Copyright (C) 2024 New Vector, Ltd
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# See the GNU Affero General Public License for more details:
+# .
+#
+# Originally licensed under the Apache License, Version 2.0:
+# .
+#
+# [This file includes modifications made by New Vector Limited]
+#
+#
+import io
+import os
+import shutil
+import tempfile
+
+from twisted.test.proto_helpers import MemoryReactor
+
+from synapse.media.filepath import MediaFilePaths
+from synapse.media.media_storage import MediaStorage
+from synapse.media.storage_provider import (
+ FileStorageProviderBackend,
+ StorageProviderWrapper,
+)
+from synapse.server import HomeServer
+from synapse.types import UserID
+from synapse.util import Clock
+
+from tests import unittest
+from tests.test_utils import SMALL_PNG
+from tests.unittest import override_config
+
+
+class FederationUnstableMediaDownloadsTest(unittest.FederatingHomeserverTestCase):
+
+ def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
+ super().prepare(reactor, clock, hs)
+ self.test_dir = tempfile.mkdtemp(prefix="synapse-tests-")
+ self.addCleanup(shutil.rmtree, self.test_dir)
+ self.primary_base_path = os.path.join(self.test_dir, "primary")
+ self.secondary_base_path = os.path.join(self.test_dir, "secondary")
+
+ hs.config.media.media_store_path = self.primary_base_path
+
+ storage_providers = [
+ StorageProviderWrapper(
+ FileStorageProviderBackend(hs, self.secondary_base_path),
+ store_local=True,
+ store_remote=False,
+ store_synchronous=True,
+ )
+ ]
+
+ self.filepaths = MediaFilePaths(self.primary_base_path)
+ self.media_storage = MediaStorage(
+ hs, self.primary_base_path, self.filepaths, storage_providers
+ )
+ self.media_repo = hs.get_media_repository()
+
+ @override_config(
+ {"experimental_features": {"msc3916_authenticated_media_enabled": True}}
+ )
+ def test_file_download(self) -> None:
+ content = io.BytesIO(b"file_to_stream")
+ content_uri = self.get_success(
+ self.media_repo.create_content(
+ "text/plain",
+ "test_upload",
+ content,
+ 46,
+ UserID.from_string("@user_id:whatever.org"),
+ )
+ )
+ # test with a text file
+ channel = self.make_signed_federation_request(
+ "GET",
+ f"/_matrix/federation/unstable/org.matrix.msc3916/media/download/{content_uri.media_id}",
+ )
+ self.pump()
+ self.assertEqual(200, channel.code)
+
+ content_type = channel.headers.getRawHeaders("content-type")
+ assert content_type is not None
+ assert "multipart/mixed" in content_type[0]
+ assert "boundary" in content_type[0]
+
+ # extract boundary
+ boundary = content_type[0].split("boundary=")[1]
+ # split on boundary and check that json field and expected value exist
+ stripped = channel.text_body.split("\r\n" + "--" + boundary)
+ # TODO: the json object expected will change once MSC3911 is implemented, currently
+ # {} is returned for all requests as a placeholder (per MSC3196)
+ found_json = any(
+ "\r\nContent-Type: application/json\r\n\r\n{}" in field
+ for field in stripped
+ )
+ self.assertTrue(found_json)
+
+ # check that the text file and expected value exist
+ found_file = any(
+ "\r\nContent-Type: text/plain\r\n\r\nfile_to_stream" in field
+ for field in stripped
+ )
+ self.assertTrue(found_file)
+
+ content = io.BytesIO(SMALL_PNG)
+ content_uri = self.get_success(
+ self.media_repo.create_content(
+ "image/png",
+ "test_png_upload",
+ content,
+ 67,
+ UserID.from_string("@user_id:whatever.org"),
+ )
+ )
+ # test with an image file
+ channel = self.make_signed_federation_request(
+ "GET",
+ f"/_matrix/federation/unstable/org.matrix.msc3916/media/download/{content_uri.media_id}",
+ )
+ self.pump()
+ self.assertEqual(200, channel.code)
+
+ content_type = channel.headers.getRawHeaders("content-type")
+ assert content_type is not None
+ assert "multipart/mixed" in content_type[0]
+ assert "boundary" in content_type[0]
+
+ # extract boundary
+ boundary = content_type[0].split("boundary=")[1]
+ # split on boundary and check that json field and expected value exist
+ body = channel.result.get("body")
+ assert body is not None
+ stripped_bytes = body.split(b"\r\n" + b"--" + boundary.encode("utf-8"))
+ found_json = any(
+ b"\r\nContent-Type: application/json\r\n\r\n{}" in field
+ for field in stripped_bytes
+ )
+ self.assertTrue(found_json)
+
+ # check that the png file exists and matches what was uploaded
+ found_file = any(SMALL_PNG in field for field in stripped_bytes)
+ self.assertTrue(found_file)
+
+ @override_config(
+ {"experimental_features": {"msc3916_authenticated_media_enabled": False}}
+ )
+ def test_disable_config(self) -> None:
+ content = io.BytesIO(b"file_to_stream")
+ content_uri = self.get_success(
+ self.media_repo.create_content(
+ "text/plain",
+ "test_upload",
+ content,
+ 46,
+ UserID.from_string("@user_id:whatever.org"),
+ )
+ )
+ channel = self.make_signed_federation_request(
+ "GET",
+ f"/_matrix/federation/unstable/org.matrix.msc3916/media/download/{content_uri.media_id}",
+ )
+ self.pump()
+ self.assertEqual(404, channel.code)
+ self.assertEqual(channel.json_body.get("errcode"), "M_UNRECOGNIZED")
From 9cf0ef9c70c0f2b93f4056d6273f130e7a75a201 Mon Sep 17 00:00:00 2001
From: Denis Kasak
Date: Tue, 25 Jun 2024 16:58:30 +0200
Subject: [PATCH 223/503] Fix outdated Security Disclosure Policy references
(#17341)
---
.github/ISSUE_TEMPLATE.md | 2 +-
.github/ISSUE_TEMPLATE/BUG_REPORT.yml | 2 +-
changelog.d/17341.doc | 1 +
docs/welcome_and_overview.md | 6 +++---
4 files changed, 6 insertions(+), 5 deletions(-)
create mode 100644 changelog.d/17341.doc
diff --git a/.github/ISSUE_TEMPLATE.md b/.github/ISSUE_TEMPLATE.md
index 1632170c9d..d3114882d7 100644
--- a/.github/ISSUE_TEMPLATE.md
+++ b/.github/ISSUE_TEMPLATE.md
@@ -2,4 +2,4 @@
(using a matrix.org account if necessary). We do not use GitHub issues for
support.
-**If you want to report a security issue** please see https://matrix.org/security-disclosure-policy/
+**If you want to report a security issue** please see https://element.io/security/security-disclosure-policy
diff --git a/.github/ISSUE_TEMPLATE/BUG_REPORT.yml b/.github/ISSUE_TEMPLATE/BUG_REPORT.yml
index 77a04109dd..ebd36a9398 100644
--- a/.github/ISSUE_TEMPLATE/BUG_REPORT.yml
+++ b/.github/ISSUE_TEMPLATE/BUG_REPORT.yml
@@ -7,7 +7,7 @@ body:
**THIS IS NOT A SUPPORT CHANNEL!**
**IF YOU HAVE SUPPORT QUESTIONS ABOUT RUNNING OR CONFIGURING YOUR OWN HOME SERVER**, please ask in **[#synapse:matrix.org](https://matrix.to/#/#synapse:matrix.org)** (using a matrix.org account if necessary).
- If you want to report a security issue, please see https://matrix.org/security-disclosure-policy/
+ If you want to report a security issue, please see https://element.io/security/security-disclosure-policy
This is a bug report form. By following the instructions below and completing the sections with your information, you will help the us to get all the necessary data to fix your issue.
diff --git a/changelog.d/17341.doc b/changelog.d/17341.doc
new file mode 100644
index 0000000000..353c8adbe8
--- /dev/null
+++ b/changelog.d/17341.doc
@@ -0,0 +1 @@
+Fix stale references to the Foundation's Security Disclosure Policy.
diff --git a/docs/welcome_and_overview.md b/docs/welcome_and_overview.md
index ae5d0f5d90..b88fed7e44 100644
--- a/docs/welcome_and_overview.md
+++ b/docs/welcome_and_overview.md
@@ -62,6 +62,6 @@ following documentation:
## Reporting a security vulnerability
-If you've found a security issue in Synapse or any other Matrix.org Foundation
-project, please report it to us in accordance with our [Security Disclosure
-Policy](https://www.matrix.org/security-disclosure-policy/). Thank you!
+If you've found a security issue in Synapse or any other Element project,
+please report it to us in accordance with our [Security Disclosure
+Policy](https://element.io/security/security-disclosure-policy). Thank you!
From ef7fbdfebd009d70dbf3e2dddfea8d6edb8ea94c Mon Sep 17 00:00:00 2001
From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com>
Date: Tue, 25 Jun 2024 16:20:59 +0100
Subject: [PATCH 224/503] Fixes to the table of contents in the README (#17329)
---
README.rst | 8 ++++----
changelog.d/17329.doc | 1 +
2 files changed, 5 insertions(+), 4 deletions(-)
create mode 100644 changelog.d/17329.doc
diff --git a/README.rst b/README.rst
index db9b79a237..145315a7fe 100644
--- a/README.rst
+++ b/README.rst
@@ -1,9 +1,9 @@
.. image:: https://github.com/element-hq/product/assets/87339233/7abf477a-5277-47f3-be44-ea44917d8ed7
:height: 60px
-===========================================================================================================
-Element Synapse - Matrix homeserver implementation |support| |development| |documentation| |license| |pypi| |python|
-===========================================================================================================
+**Element Synapse - Matrix homeserver implementation**
+
+|support| |development| |documentation| |license| |pypi| |python|
Synapse is an open source `Matrix `_ homeserver
implementation, written and maintained by `Element `_.
@@ -14,7 +14,7 @@ license. There is no support provided from Element unless you have a
subscription.
Subscription alternative
-------------------------
+========================
Alternatively, for those that need an enterprise-ready solution, Element
Server Suite (ESS) is `available as a subscription `_.
diff --git a/changelog.d/17329.doc b/changelog.d/17329.doc
new file mode 100644
index 0000000000..2486256dad
--- /dev/null
+++ b/changelog.d/17329.doc
@@ -0,0 +1 @@
+Update header in the README to visually fix the the auto-generated table of contents.
\ No newline at end of file
From 9f47513458e064be0b7579c42d672464a4200ffe Mon Sep 17 00:00:00 2001
From: Till Faelligen <2353100+S7evinK@users.noreply.github.com>
Date: Wed, 26 Jun 2024 14:09:13 +0200
Subject: [PATCH 225/503] 1.110.0rc1
---
CHANGES.md | 84 +++++++++++++++++++++++++++++++++++++++
changelog.d/17187.feature | 1 -
changelog.d/17198.misc | 1 -
changelog.d/17254.bugfix | 1 -
changelog.d/17255.feature | 1 -
changelog.d/17256.feature | 1 -
changelog.d/17265.misc | 1 -
changelog.d/17266.misc | 1 -
changelog.d/17270.feature | 1 -
changelog.d/17271.misc | 1 -
changelog.d/17272.bugfix | 1 -
changelog.d/17273.misc | 1 -
changelog.d/17275.bugfix | 1 -
changelog.d/17276.feature | 1 -
changelog.d/17277.feature | 1 -
changelog.d/17279.misc | 1 -
changelog.d/17281.feature | 1 -
changelog.d/17282.feature | 1 -
changelog.d/17283.bugfix | 1 -
changelog.d/17284.feature | 1 -
changelog.d/17293.feature | 1 -
changelog.d/17294.feature | 2 -
changelog.d/17295.bugfix | 1 -
changelog.d/17296.feature | 1 -
changelog.d/17297.misc | 1 -
changelog.d/17300.misc | 1 -
changelog.d/17301.bugfix | 1 -
changelog.d/17304.feature | 2 -
changelog.d/17308.doc | 1 -
changelog.d/17322.feature | 1 -
changelog.d/17324.misc | 1 -
changelog.d/17325.misc | 1 -
changelog.d/17329.doc | 1 -
changelog.d/17331.misc | 1 -
changelog.d/17333.misc | 1 -
changelog.d/17335.feature | 1 -
changelog.d/17336.bugfix | 1 -
changelog.d/17338.misc | 1 -
changelog.d/17339.misc | 1 -
changelog.d/17341.doc | 1 -
changelog.d/17347.doc | 1 -
changelog.d/17348.doc | 1 -
changelog.d/17350.feature | 2 -
changelog.d/17358.misc | 1 -
debian/changelog | 5 ++-
pyproject.toml | 2 +-
46 files changed, 88 insertions(+), 49 deletions(-)
delete mode 100644 changelog.d/17187.feature
delete mode 100644 changelog.d/17198.misc
delete mode 100644 changelog.d/17254.bugfix
delete mode 100644 changelog.d/17255.feature
delete mode 100644 changelog.d/17256.feature
delete mode 100644 changelog.d/17265.misc
delete mode 100644 changelog.d/17266.misc
delete mode 100644 changelog.d/17270.feature
delete mode 100644 changelog.d/17271.misc
delete mode 100644 changelog.d/17272.bugfix
delete mode 100644 changelog.d/17273.misc
delete mode 100644 changelog.d/17275.bugfix
delete mode 100644 changelog.d/17276.feature
delete mode 100644 changelog.d/17277.feature
delete mode 100644 changelog.d/17279.misc
delete mode 100644 changelog.d/17281.feature
delete mode 100644 changelog.d/17282.feature
delete mode 100644 changelog.d/17283.bugfix
delete mode 100644 changelog.d/17284.feature
delete mode 100644 changelog.d/17293.feature
delete mode 100644 changelog.d/17294.feature
delete mode 100644 changelog.d/17295.bugfix
delete mode 100644 changelog.d/17296.feature
delete mode 100644 changelog.d/17297.misc
delete mode 100644 changelog.d/17300.misc
delete mode 100644 changelog.d/17301.bugfix
delete mode 100644 changelog.d/17304.feature
delete mode 100644 changelog.d/17308.doc
delete mode 100644 changelog.d/17322.feature
delete mode 100644 changelog.d/17324.misc
delete mode 100644 changelog.d/17325.misc
delete mode 100644 changelog.d/17329.doc
delete mode 100644 changelog.d/17331.misc
delete mode 100644 changelog.d/17333.misc
delete mode 100644 changelog.d/17335.feature
delete mode 100644 changelog.d/17336.bugfix
delete mode 100644 changelog.d/17338.misc
delete mode 100644 changelog.d/17339.misc
delete mode 100644 changelog.d/17341.doc
delete mode 100644 changelog.d/17347.doc
delete mode 100644 changelog.d/17348.doc
delete mode 100644 changelog.d/17350.feature
delete mode 100644 changelog.d/17358.misc
diff --git a/CHANGES.md b/CHANGES.md
index 9060b84853..e2c8cc937c 100644
--- a/CHANGES.md
+++ b/CHANGES.md
@@ -1,3 +1,87 @@
+# Synapse 1.110.0rc1 (2024-06-26)
+
+### Features
+
+- Add initial implementation of an experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17187](https://github.com/element-hq/synapse/issues/17187))
+- Add support for [MSC823](https://github.com/matrix-org/matrix-spec-proposals/pull/3823) - Account suspension. ([\#17255](https://github.com/element-hq/synapse/issues/17255))
+- Improve ratelimiting in Synapse (#17256). ([\#17256](https://github.com/element-hq/synapse/issues/17256))
+- Add support for the unstable [MSC4151](https://github.com/matrix-org/matrix-spec-proposals/pull/4151) report room API. ([\#17270](https://github.com/element-hq/synapse/issues/17270), [\#17296](https://github.com/element-hq/synapse/issues/17296))
+- Filter for public and empty rooms added to Admin-API [List Room API](https://element-hq.github.io/synapse/latest/admin_api/rooms.html#list-room-api). ([\#17276](https://github.com/element-hq/synapse/issues/17276))
+- Add `is_dm` filtering to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17277](https://github.com/element-hq/synapse/issues/17277))
+- Add `is_encrypted` filtering to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17281](https://github.com/element-hq/synapse/issues/17281))
+- Include user membership in events served to clients, per MSC4115. ([\#17282](https://github.com/element-hq/synapse/issues/17282))
+- Do not require user-interactive authentication for uploading cross-signing keys for the first time, per MSC3967. ([\#17284](https://github.com/element-hq/synapse/issues/17284))
+- Add `stream_ordering` sort to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17293](https://github.com/element-hq/synapse/issues/17293))
+- `register_new_matrix_user` now supports a --password-file flag, which
+ is useful for scripting. ([\#17294](https://github.com/element-hq/synapse/issues/17294))
+- `register_new_matrix_user` now supports a --exists-ok flag to allow registration of users that already exist in the database.
+ This is useful for scripts that bootstrap user accounts with initial passwords. ([\#17304](https://github.com/element-hq/synapse/issues/17304))
+- Add support for via query parameter from MSC415. ([\#17322](https://github.com/element-hq/synapse/issues/17322))
+- Add `is_invite` filtering to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17335](https://github.com/element-hq/synapse/issues/17335))
+- Support [MSC3916](https://github.com/matrix-org/matrix-spec-proposals/blob/rav/authentication-for-media/proposals/3916-authentication-for-media.md)
+ by adding a federation /download endpoint. ([\#17350](https://github.com/element-hq/synapse/issues/17350))
+
+### Bugfixes
+
+- Fix searching for users with their exact localpart whose ID includes a hyphen. ([\#17254](https://github.com/element-hq/synapse/issues/17254))
+- Fix wrong retention policy being used when filtering events. ([\#17272](https://github.com/element-hq/synapse/issues/17272))
+- Fix bug where OTKs were not always included in `/sync` response when using workers. ([\#17275](https://github.com/element-hq/synapse/issues/17275))
+- Fix a long-standing bug where an invalid 'from' parameter to [`/notifications`](https://spec.matrix.org/v1.10/client-server-api/#get_matrixclientv3notifications) would result in an Internal Server Error. ([\#17283](https://github.com/element-hq/synapse/issues/17283))
+- Fix edge case in `/sync` returning the wrong the state when using sharded event persisters. ([\#17295](https://github.com/element-hq/synapse/issues/17295))
+- Add initial implementation of an experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17301](https://github.com/element-hq/synapse/issues/17301))
+- Fix email notification subject when invited to a space. ([\#17336](https://github.com/element-hq/synapse/issues/17336))
+
+### Improved Documentation
+
+- Add missing quotes for example for `exclude_rooms_from_sync`. ([\#17308](https://github.com/element-hq/synapse/issues/17308))
+- Update header in the README to visually fix the the auto-generated table of contents. ([\#17329](https://github.com/element-hq/synapse/issues/17329))
+- Fix stale references to the Foundation's Security Disclosure Policy. ([\#17341](https://github.com/element-hq/synapse/issues/17341))
+- Add default values for `rc_invites.per_issuer` to docs. ([\#17347](https://github.com/element-hq/synapse/issues/17347))
+- Fix an error in the docs for `search_all_users` parameter under `user_directory`. ([\#17348](https://github.com/element-hq/synapse/issues/17348))
+
+### Internal Changes
+
+- Remove unused `expire_access_token` option in the Synapse Docker config file. Contributed by @AaronDewes. ([\#17198](https://github.com/element-hq/synapse/issues/17198))
+- Use fully-qualified `PersistedEventPosition` when returning `RoomsForUser` to facilitate proper comparisons and `RoomStreamToken` generation. ([\#17265](https://github.com/element-hq/synapse/issues/17265))
+- Add debug logging for when room keys are uploaded, including whether they are replacing other room keys. ([\#17266](https://github.com/element-hq/synapse/issues/17266))
+- Handle OTK uploads off master. ([\#17271](https://github.com/element-hq/synapse/issues/17271))
+- Don't try and resync devices for remote users whose servers are marked as down. ([\#17273](https://github.com/element-hq/synapse/issues/17273))
+- Re-organize Pydantic models and types used in handlers. ([\#17279](https://github.com/element-hq/synapse/issues/17279))
+- Bump `mypy` from 1.8.0 to 1.9.0. ([\#17297](https://github.com/element-hq/synapse/issues/17297))
+- Expose the worker instance that persisted the event on `event.internal_metadata.instance_name`. ([\#17300](https://github.com/element-hq/synapse/issues/17300))
+- Update the README with Element branding, improve headers and fix the #synapse:matrix.org support room link rendering. ([\#17324](https://github.com/element-hq/synapse/issues/17324))
+- This is a changelog so tests will run. ([\#17325](https://github.com/element-hq/synapse/issues/17325))
+- Change path of the experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync implementation to `/org.matrix.simplified_msc3575/sync` since our simplified API is slightly incompatible with what's in the current MSC. ([\#17331](https://github.com/element-hq/synapse/issues/17331))
+- Handle device lists notifications for large accounts more efficiently in worker mode. ([\#17333](https://github.com/element-hq/synapse/issues/17333), [\#17358](https://github.com/element-hq/synapse/issues/17358))
+- Do not block event sending/receiving while calculating large event auth chains. ([\#17338](https://github.com/element-hq/synapse/issues/17338))
+- Tidy up `parse_integer` docs and call sites to reflect the fact that they require non-negative integers by default, and bring `parse_integer_from_args` default in alignment. Contributed by Denis Kasak (@dkasak). ([\#17339](https://github.com/element-hq/synapse/issues/17339))
+
+
+
+### Updates to locked dependencies
+
+* Bump authlib from 1.3.0 to 1.3.1. ([\#17343](https://github.com/element-hq/synapse/issues/17343))
+* Bump dawidd6/action-download-artifact from 3.1.4 to 5. ([\#17289](https://github.com/element-hq/synapse/issues/17289))
+* Bump dawidd6/action-download-artifact from 5 to 6. ([\#17313](https://github.com/element-hq/synapse/issues/17313))
+* Bump docker/build-push-action from 5 to 6. ([\#17312](https://github.com/element-hq/synapse/issues/17312))
+* Bump jinja2 from 3.1.3 to 3.1.4. ([\#17287](https://github.com/element-hq/synapse/issues/17287))
+* Bump lazy_static from 1.4.0 to 1.5.0. ([\#17355](https://github.com/element-hq/synapse/issues/17355))
+* Bump msgpack from 1.0.7 to 1.0.8. ([\#17317](https://github.com/element-hq/synapse/issues/17317))
+* Bump netaddr from 1.2.1 to 1.3.0. ([\#17353](https://github.com/element-hq/synapse/issues/17353))
+* Bump packaging from 24.0 to 24.1. ([\#17352](https://github.com/element-hq/synapse/issues/17352))
+* Bump phonenumbers from 8.13.37 to 8.13.39. ([\#17315](https://github.com/element-hq/synapse/issues/17315))
+* Bump regex from 1.10.4 to 1.10.5. ([\#17290](https://github.com/element-hq/synapse/issues/17290))
+* Bump requests from 2.31.0 to 2.32.2. ([\#17345](https://github.com/element-hq/synapse/issues/17345))
+* Bump sentry-sdk from 2.1.1 to 2.3.1. ([\#17263](https://github.com/element-hq/synapse/issues/17263))
+* Bump sentry-sdk from 2.3.1 to 2.6.0. ([\#17351](https://github.com/element-hq/synapse/issues/17351))
+* Bump tornado from 6.4 to 6.4.1. ([\#17344](https://github.com/element-hq/synapse/issues/17344))
+* Bump types-jsonschema from 4.21.0.20240311 to 4.22.0.20240610. ([\#17288](https://github.com/element-hq/synapse/issues/17288))
+* Bump types-netaddr from 1.2.0.20240219 to 1.3.0.20240530. ([\#17314](https://github.com/element-hq/synapse/issues/17314))
+* Bump types-pillow from 10.2.0.20240423 to 10.2.0.20240520. ([\#17285](https://github.com/element-hq/synapse/issues/17285))
+* Bump types-pyyaml from 6.0.12.12 to 6.0.12.20240311. ([\#17316](https://github.com/element-hq/synapse/issues/17316))
+* Bump typing-extensions from 4.11.0 to 4.12.2. ([\#17354](https://github.com/element-hq/synapse/issues/17354))
+* Bump urllib3 from 2.0.7 to 2.2.2. ([\#17346](https://github.com/element-hq/synapse/issues/17346))
+
# Synapse 1.109.0 (2024-06-18)
### Internal Changes
diff --git a/changelog.d/17187.feature b/changelog.d/17187.feature
deleted file mode 100644
index 50383cb4a4..0000000000
--- a/changelog.d/17187.feature
+++ /dev/null
@@ -1 +0,0 @@
-Add initial implementation of an experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint.
diff --git a/changelog.d/17198.misc b/changelog.d/17198.misc
deleted file mode 100644
index 8973eb2bac..0000000000
--- a/changelog.d/17198.misc
+++ /dev/null
@@ -1 +0,0 @@
-Remove unused `expire_access_token` option in the Synapse Docker config file. Contributed by @AaronDewes.
\ No newline at end of file
diff --git a/changelog.d/17254.bugfix b/changelog.d/17254.bugfix
deleted file mode 100644
index b0d61309e2..0000000000
--- a/changelog.d/17254.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix searching for users with their exact localpart whose ID includes a hyphen.
diff --git a/changelog.d/17255.feature b/changelog.d/17255.feature
deleted file mode 100644
index 4093de1146..0000000000
--- a/changelog.d/17255.feature
+++ /dev/null
@@ -1 +0,0 @@
-Add support for [MSC823](https://github.com/matrix-org/matrix-spec-proposals/pull/3823) - Account suspension.
\ No newline at end of file
diff --git a/changelog.d/17256.feature b/changelog.d/17256.feature
deleted file mode 100644
index 6ec4cb7a31..0000000000
--- a/changelog.d/17256.feature
+++ /dev/null
@@ -1 +0,0 @@
- Improve ratelimiting in Synapse (#17256).
\ No newline at end of file
diff --git a/changelog.d/17265.misc b/changelog.d/17265.misc
deleted file mode 100644
index e6d4d8b4ee..0000000000
--- a/changelog.d/17265.misc
+++ /dev/null
@@ -1 +0,0 @@
-Use fully-qualified `PersistedEventPosition` when returning `RoomsForUser` to facilitate proper comparisons and `RoomStreamToken` generation.
diff --git a/changelog.d/17266.misc b/changelog.d/17266.misc
deleted file mode 100644
index ce8c4ab086..0000000000
--- a/changelog.d/17266.misc
+++ /dev/null
@@ -1 +0,0 @@
-Add debug logging for when room keys are uploaded, including whether they are replacing other room keys.
\ No newline at end of file
diff --git a/changelog.d/17270.feature b/changelog.d/17270.feature
deleted file mode 100644
index 4ea5e7be85..0000000000
--- a/changelog.d/17270.feature
+++ /dev/null
@@ -1 +0,0 @@
-Add support for the unstable [MSC4151](https://github.com/matrix-org/matrix-spec-proposals/pull/4151) report room API.
diff --git a/changelog.d/17271.misc b/changelog.d/17271.misc
deleted file mode 100644
index 915d717ad7..0000000000
--- a/changelog.d/17271.misc
+++ /dev/null
@@ -1 +0,0 @@
-Handle OTK uploads off master.
diff --git a/changelog.d/17272.bugfix b/changelog.d/17272.bugfix
deleted file mode 100644
index 83e7ca426a..0000000000
--- a/changelog.d/17272.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix wrong retention policy being used when filtering events.
diff --git a/changelog.d/17273.misc b/changelog.d/17273.misc
deleted file mode 100644
index 2c1c6bc0d5..0000000000
--- a/changelog.d/17273.misc
+++ /dev/null
@@ -1 +0,0 @@
-Don't try and resync devices for remote users whose servers are marked as down.
diff --git a/changelog.d/17275.bugfix b/changelog.d/17275.bugfix
deleted file mode 100644
index eb522bb997..0000000000
--- a/changelog.d/17275.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix bug where OTKs were not always included in `/sync` response when using workers.
diff --git a/changelog.d/17276.feature b/changelog.d/17276.feature
deleted file mode 100644
index a1edfae0aa..0000000000
--- a/changelog.d/17276.feature
+++ /dev/null
@@ -1 +0,0 @@
-Filter for public and empty rooms added to Admin-API [List Room API](https://element-hq.github.io/synapse/latest/admin_api/rooms.html#list-room-api).
diff --git a/changelog.d/17277.feature b/changelog.d/17277.feature
deleted file mode 100644
index 5c16342c11..0000000000
--- a/changelog.d/17277.feature
+++ /dev/null
@@ -1 +0,0 @@
-Add `is_dm` filtering to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint.
diff --git a/changelog.d/17279.misc b/changelog.d/17279.misc
deleted file mode 100644
index 2090b11d7f..0000000000
--- a/changelog.d/17279.misc
+++ /dev/null
@@ -1 +0,0 @@
-Re-organize Pydantic models and types used in handlers.
diff --git a/changelog.d/17281.feature b/changelog.d/17281.feature
deleted file mode 100644
index fce512692c..0000000000
--- a/changelog.d/17281.feature
+++ /dev/null
@@ -1 +0,0 @@
-Add `is_encrypted` filtering to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint.
diff --git a/changelog.d/17282.feature b/changelog.d/17282.feature
deleted file mode 100644
index 334709a3a7..0000000000
--- a/changelog.d/17282.feature
+++ /dev/null
@@ -1 +0,0 @@
-Include user membership in events served to clients, per MSC4115.
\ No newline at end of file
diff --git a/changelog.d/17283.bugfix b/changelog.d/17283.bugfix
deleted file mode 100644
index 98c1f05cc2..0000000000
--- a/changelog.d/17283.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix a long-standing bug where an invalid 'from' parameter to [`/notifications`](https://spec.matrix.org/v1.10/client-server-api/#get_matrixclientv3notifications) would result in an Internal Server Error.
\ No newline at end of file
diff --git a/changelog.d/17284.feature b/changelog.d/17284.feature
deleted file mode 100644
index 015d925e7c..0000000000
--- a/changelog.d/17284.feature
+++ /dev/null
@@ -1 +0,0 @@
-Do not require user-interactive authentication for uploading cross-signing keys for the first time, per MSC3967.
\ No newline at end of file
diff --git a/changelog.d/17293.feature b/changelog.d/17293.feature
deleted file mode 100644
index 60ca7721a0..0000000000
--- a/changelog.d/17293.feature
+++ /dev/null
@@ -1 +0,0 @@
-Add `stream_ordering` sort to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint.
diff --git a/changelog.d/17294.feature b/changelog.d/17294.feature
deleted file mode 100644
index 33aac7b0bc..0000000000
--- a/changelog.d/17294.feature
+++ /dev/null
@@ -1,2 +0,0 @@
-`register_new_matrix_user` now supports a --password-file flag, which
-is useful for scripting.
diff --git a/changelog.d/17295.bugfix b/changelog.d/17295.bugfix
deleted file mode 100644
index 4484253bb8..0000000000
--- a/changelog.d/17295.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix edge case in `/sync` returning the wrong the state when using sharded event persisters.
diff --git a/changelog.d/17296.feature b/changelog.d/17296.feature
deleted file mode 100644
index 4ea5e7be85..0000000000
--- a/changelog.d/17296.feature
+++ /dev/null
@@ -1 +0,0 @@
-Add support for the unstable [MSC4151](https://github.com/matrix-org/matrix-spec-proposals/pull/4151) report room API.
diff --git a/changelog.d/17297.misc b/changelog.d/17297.misc
deleted file mode 100644
index 7ec351d2c1..0000000000
--- a/changelog.d/17297.misc
+++ /dev/null
@@ -1 +0,0 @@
-Bump `mypy` from 1.8.0 to 1.9.0.
\ No newline at end of file
diff --git a/changelog.d/17300.misc b/changelog.d/17300.misc
deleted file mode 100644
index cdc40bb2e5..0000000000
--- a/changelog.d/17300.misc
+++ /dev/null
@@ -1 +0,0 @@
-Expose the worker instance that persisted the event on `event.internal_metadata.instance_name`.
diff --git a/changelog.d/17301.bugfix b/changelog.d/17301.bugfix
deleted file mode 100644
index 50383cb4a4..0000000000
--- a/changelog.d/17301.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Add initial implementation of an experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint.
diff --git a/changelog.d/17304.feature b/changelog.d/17304.feature
deleted file mode 100644
index a969d8bf58..0000000000
--- a/changelog.d/17304.feature
+++ /dev/null
@@ -1,2 +0,0 @@
-`register_new_matrix_user` now supports a --exists-ok flag to allow registration of users that already exist in the database.
-This is useful for scripts that bootstrap user accounts with initial passwords.
diff --git a/changelog.d/17308.doc b/changelog.d/17308.doc
deleted file mode 100644
index 7ae080a684..0000000000
--- a/changelog.d/17308.doc
+++ /dev/null
@@ -1 +0,0 @@
-Add missing quotes for example for `exclude_rooms_from_sync`.
diff --git a/changelog.d/17322.feature b/changelog.d/17322.feature
deleted file mode 100644
index 85386c2df7..0000000000
--- a/changelog.d/17322.feature
+++ /dev/null
@@ -1 +0,0 @@
-Add support for via query parameter from MSC415.
diff --git a/changelog.d/17324.misc b/changelog.d/17324.misc
deleted file mode 100644
index c0d7196ee0..0000000000
--- a/changelog.d/17324.misc
+++ /dev/null
@@ -1 +0,0 @@
-Update the README with Element branding, improve headers and fix the #synapse:matrix.org support room link rendering.
\ No newline at end of file
diff --git a/changelog.d/17325.misc b/changelog.d/17325.misc
deleted file mode 100644
index 1a4ce7ceec..0000000000
--- a/changelog.d/17325.misc
+++ /dev/null
@@ -1 +0,0 @@
-This is a changelog so tests will run.
\ No newline at end of file
diff --git a/changelog.d/17329.doc b/changelog.d/17329.doc
deleted file mode 100644
index 2486256dad..0000000000
--- a/changelog.d/17329.doc
+++ /dev/null
@@ -1 +0,0 @@
-Update header in the README to visually fix the the auto-generated table of contents.
\ No newline at end of file
diff --git a/changelog.d/17331.misc b/changelog.d/17331.misc
deleted file mode 100644
index 79d3f33996..0000000000
--- a/changelog.d/17331.misc
+++ /dev/null
@@ -1 +0,0 @@
-Change path of the experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync implementation to `/org.matrix.simplified_msc3575/sync` since our simplified API is slightly incompatible with what's in the current MSC.
diff --git a/changelog.d/17333.misc b/changelog.d/17333.misc
deleted file mode 100644
index d3ef0b3777..0000000000
--- a/changelog.d/17333.misc
+++ /dev/null
@@ -1 +0,0 @@
-Handle device lists notifications for large accounts more efficiently in worker mode.
diff --git a/changelog.d/17335.feature b/changelog.d/17335.feature
deleted file mode 100644
index c6beed42ed..0000000000
--- a/changelog.d/17335.feature
+++ /dev/null
@@ -1 +0,0 @@
-Add `is_invite` filtering to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint.
diff --git a/changelog.d/17336.bugfix b/changelog.d/17336.bugfix
deleted file mode 100644
index 618834302e..0000000000
--- a/changelog.d/17336.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix email notification subject when invited to a space.
diff --git a/changelog.d/17338.misc b/changelog.d/17338.misc
deleted file mode 100644
index 1a81bdef85..0000000000
--- a/changelog.d/17338.misc
+++ /dev/null
@@ -1 +0,0 @@
-Do not block event sending/receiving while calculating large event auth chains.
diff --git a/changelog.d/17339.misc b/changelog.d/17339.misc
deleted file mode 100644
index 1d7cb96c8b..0000000000
--- a/changelog.d/17339.misc
+++ /dev/null
@@ -1 +0,0 @@
-Tidy up `parse_integer` docs and call sites to reflect the fact that they require non-negative integers by default, and bring `parse_integer_from_args` default in alignment. Contributed by Denis Kasak (@dkasak).
diff --git a/changelog.d/17341.doc b/changelog.d/17341.doc
deleted file mode 100644
index 353c8adbe8..0000000000
--- a/changelog.d/17341.doc
+++ /dev/null
@@ -1 +0,0 @@
-Fix stale references to the Foundation's Security Disclosure Policy.
diff --git a/changelog.d/17347.doc b/changelog.d/17347.doc
deleted file mode 100644
index 6cd41be60f..0000000000
--- a/changelog.d/17347.doc
+++ /dev/null
@@ -1 +0,0 @@
-Add default values for `rc_invites.per_issuer` to docs.
diff --git a/changelog.d/17348.doc b/changelog.d/17348.doc
deleted file mode 100644
index 4ce42bbadb..0000000000
--- a/changelog.d/17348.doc
+++ /dev/null
@@ -1 +0,0 @@
-Fix an error in the docs for `search_all_users` parameter under `user_directory`.
diff --git a/changelog.d/17350.feature b/changelog.d/17350.feature
deleted file mode 100644
index 709366f5b8..0000000000
--- a/changelog.d/17350.feature
+++ /dev/null
@@ -1,2 +0,0 @@
-Support [MSC3916](https://github.com/matrix-org/matrix-spec-proposals/blob/rav/authentication-for-media/proposals/3916-authentication-for-media.md)
-by adding a federation /download endpoint.
\ No newline at end of file
diff --git a/changelog.d/17358.misc b/changelog.d/17358.misc
deleted file mode 100644
index d3ef0b3777..0000000000
--- a/changelog.d/17358.misc
+++ /dev/null
@@ -1 +0,0 @@
-Handle device lists notifications for large accounts more efficiently in worker mode.
diff --git a/debian/changelog b/debian/changelog
index 731eacf20f..ceef366a3f 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -1,8 +1,9 @@
-matrix-synapse-py3 (1.109.0+nmu1) UNRELEASED; urgency=medium
+matrix-synapse-py3 (1.110.0~rc1) stable; urgency=medium
* `register_new_matrix_user` now supports a --password-file and a --exists-ok flag.
+ * New Synapse release 1.110.0rc1.
- -- Synapse Packaging team Tue, 18 Jun 2024 13:29:36 +0100
+ -- Synapse Packaging team Wed, 26 Jun 2024 14:07:56 +0200
matrix-synapse-py3 (1.109.0) stable; urgency=medium
diff --git a/pyproject.toml b/pyproject.toml
index 1485016a5a..19998c1acf 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -96,7 +96,7 @@ module-name = "synapse.synapse_rust"
[tool.poetry]
name = "matrix-synapse"
-version = "1.109.0"
+version = "1.110.0rc1"
description = "Homeserver for the Matrix decentralised comms protocol"
authors = ["Matrix.org Team and Contributors "]
license = "AGPL-3.0-or-later"
From 315b8d20324065f645b38fb0808f9f82109b3127 Mon Sep 17 00:00:00 2001
From: Till Faelligen <2353100+S7evinK@users.noreply.github.com>
Date: Wed, 26 Jun 2024 14:39:57 +0200
Subject: [PATCH 226/503] Update changelog
---
CHANGES.md | 14 ++++++--------
1 file changed, 6 insertions(+), 8 deletions(-)
diff --git a/CHANGES.md b/CHANGES.md
index e2c8cc937c..3171ac32ff 100644
--- a/CHANGES.md
+++ b/CHANGES.md
@@ -4,22 +4,21 @@
- Add initial implementation of an experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17187](https://github.com/element-hq/synapse/issues/17187))
- Add support for [MSC823](https://github.com/matrix-org/matrix-spec-proposals/pull/3823) - Account suspension. ([\#17255](https://github.com/element-hq/synapse/issues/17255))
-- Improve ratelimiting in Synapse (#17256). ([\#17256](https://github.com/element-hq/synapse/issues/17256))
+- Improve ratelimiting in Synapse. ([\#17256](https://github.com/element-hq/synapse/issues/17256))
- Add support for the unstable [MSC4151](https://github.com/matrix-org/matrix-spec-proposals/pull/4151) report room API. ([\#17270](https://github.com/element-hq/synapse/issues/17270), [\#17296](https://github.com/element-hq/synapse/issues/17296))
- Filter for public and empty rooms added to Admin-API [List Room API](https://element-hq.github.io/synapse/latest/admin_api/rooms.html#list-room-api). ([\#17276](https://github.com/element-hq/synapse/issues/17276))
- Add `is_dm` filtering to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17277](https://github.com/element-hq/synapse/issues/17277))
- Add `is_encrypted` filtering to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17281](https://github.com/element-hq/synapse/issues/17281))
-- Include user membership in events served to clients, per MSC4115. ([\#17282](https://github.com/element-hq/synapse/issues/17282))
-- Do not require user-interactive authentication for uploading cross-signing keys for the first time, per MSC3967. ([\#17284](https://github.com/element-hq/synapse/issues/17284))
+- Include user membership in events served to clients, per [MSC4115](https://github.com/matrix-org/matrix-spec-proposals/pull/4115). ([\#17282](https://github.com/element-hq/synapse/issues/17282))
+- Do not require user-interactive authentication for uploading cross-signing keys for the first time, per [MSC3967](https://github.com/matrix-org/matrix-spec-proposals/pull/3967). ([\#17284](https://github.com/element-hq/synapse/issues/17284))
- Add `stream_ordering` sort to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17293](https://github.com/element-hq/synapse/issues/17293))
- `register_new_matrix_user` now supports a --password-file flag, which
is useful for scripting. ([\#17294](https://github.com/element-hq/synapse/issues/17294))
- `register_new_matrix_user` now supports a --exists-ok flag to allow registration of users that already exist in the database.
This is useful for scripts that bootstrap user accounts with initial passwords. ([\#17304](https://github.com/element-hq/synapse/issues/17304))
-- Add support for via query parameter from MSC415. ([\#17322](https://github.com/element-hq/synapse/issues/17322))
+- Add support for via query parameter from [MSC4156](https://github.com/matrix-org/matrix-spec-proposals/pull/4156). ([\#17322](https://github.com/element-hq/synapse/issues/17322))
- Add `is_invite` filtering to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17335](https://github.com/element-hq/synapse/issues/17335))
-- Support [MSC3916](https://github.com/matrix-org/matrix-spec-proposals/blob/rav/authentication-for-media/proposals/3916-authentication-for-media.md)
- by adding a federation /download endpoint. ([\#17350](https://github.com/element-hq/synapse/issues/17350))
+- Support [MSC3916](https://github.com/matrix-org/matrix-spec-proposals/blob/rav/authentication-for-media/proposals/3916-authentication-for-media.md) by adding a federation /download endpoint. ([\#17350](https://github.com/element-hq/synapse/issues/17350))
### Bugfixes
@@ -47,10 +46,8 @@
- Handle OTK uploads off master. ([\#17271](https://github.com/element-hq/synapse/issues/17271))
- Don't try and resync devices for remote users whose servers are marked as down. ([\#17273](https://github.com/element-hq/synapse/issues/17273))
- Re-organize Pydantic models and types used in handlers. ([\#17279](https://github.com/element-hq/synapse/issues/17279))
-- Bump `mypy` from 1.8.0 to 1.9.0. ([\#17297](https://github.com/element-hq/synapse/issues/17297))
- Expose the worker instance that persisted the event on `event.internal_metadata.instance_name`. ([\#17300](https://github.com/element-hq/synapse/issues/17300))
- Update the README with Element branding, improve headers and fix the #synapse:matrix.org support room link rendering. ([\#17324](https://github.com/element-hq/synapse/issues/17324))
-- This is a changelog so tests will run. ([\#17325](https://github.com/element-hq/synapse/issues/17325))
- Change path of the experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync implementation to `/org.matrix.simplified_msc3575/sync` since our simplified API is slightly incompatible with what's in the current MSC. ([\#17331](https://github.com/element-hq/synapse/issues/17331))
- Handle device lists notifications for large accounts more efficiently in worker mode. ([\#17333](https://github.com/element-hq/synapse/issues/17333), [\#17358](https://github.com/element-hq/synapse/issues/17358))
- Do not block event sending/receiving while calculating large event auth chains. ([\#17338](https://github.com/element-hq/synapse/issues/17338))
@@ -75,6 +72,7 @@
* Bump sentry-sdk from 2.1.1 to 2.3.1. ([\#17263](https://github.com/element-hq/synapse/issues/17263))
* Bump sentry-sdk from 2.3.1 to 2.6.0. ([\#17351](https://github.com/element-hq/synapse/issues/17351))
* Bump tornado from 6.4 to 6.4.1. ([\#17344](https://github.com/element-hq/synapse/issues/17344))
+* Bump mypy from 1.8.0 to 1.9.0. ([\#17297](https://github.com/element-hq/synapse/issues/17297))
* Bump types-jsonschema from 4.21.0.20240311 to 4.22.0.20240610. ([\#17288](https://github.com/element-hq/synapse/issues/17288))
* Bump types-netaddr from 1.2.0.20240219 to 1.3.0.20240530. ([\#17314](https://github.com/element-hq/synapse/issues/17314))
* Bump types-pillow from 10.2.0.20240423 to 10.2.0.20240520. ([\#17285](https://github.com/element-hq/synapse/issues/17285))
From a8dcd686fb7d4744b3364f35f07c9b5ce2b895b9 Mon Sep 17 00:00:00 2001
From: Till Faelligen <2353100+S7evinK@users.noreply.github.com>
Date: Wed, 26 Jun 2024 15:10:49 +0200
Subject: [PATCH 227/503] Fix typo
---
CHANGES.md | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/CHANGES.md b/CHANGES.md
index 3171ac32ff..09722ca3db 100644
--- a/CHANGES.md
+++ b/CHANGES.md
@@ -3,7 +3,7 @@
### Features
- Add initial implementation of an experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17187](https://github.com/element-hq/synapse/issues/17187))
-- Add support for [MSC823](https://github.com/matrix-org/matrix-spec-proposals/pull/3823) - Account suspension. ([\#17255](https://github.com/element-hq/synapse/issues/17255))
+- Add experimental support for [MSC3823](https://github.com/matrix-org/matrix-spec-proposals/pull/3823) - Account suspension. ([\#17255](https://github.com/element-hq/synapse/issues/17255))
- Improve ratelimiting in Synapse. ([\#17256](https://github.com/element-hq/synapse/issues/17256))
- Add support for the unstable [MSC4151](https://github.com/matrix-org/matrix-spec-proposals/pull/4151) report room API. ([\#17270](https://github.com/element-hq/synapse/issues/17270), [\#17296](https://github.com/element-hq/synapse/issues/17296))
- Filter for public and empty rooms added to Admin-API [List Room API](https://element-hq.github.io/synapse/latest/admin_api/rooms.html#list-room-api). ([\#17276](https://github.com/element-hq/synapse/issues/17276))
From b924a8e1a9c3772abf1d19602cfc52591bc6bc62 Mon Sep 17 00:00:00 2001
From: Till <2353100+S7evinK@users.noreply.github.com>
Date: Wed, 26 Jun 2024 18:01:39 +0200
Subject: [PATCH 228/503] Fix uploading packages to PyPi (#17363)
As per
https://github.com/sphinx-doc/sphinx/issues/3921#issuecomment-315581557,
we need double underscores.
Running `rst2html README.rst > /dev/null` found some more warnings.
---
README.rst | 14 +++++++-------
changelog.d/17363.misc | 1 +
2 files changed, 8 insertions(+), 7 deletions(-)
create mode 100644 changelog.d/17363.misc
diff --git a/README.rst b/README.rst
index 145315a7fe..9ecb6b5816 100644
--- a/README.rst
+++ b/README.rst
@@ -5,9 +5,9 @@
|support| |development| |documentation| |license| |pypi| |python|
-Synapse is an open source `Matrix `_ homeserver
+Synapse is an open source `Matrix