From 72020f3f2c1890e6b262001bcdd6f642b729b9a9 Mon Sep 17 00:00:00 2001 From: Andrew Morgan Date: Tue, 30 Sep 2025 11:58:59 +0100 Subject: [PATCH 001/149] 1.139.0 --- CHANGES.md | 7 +++++++ debian/changelog | 6 ++++++ pyproject.toml | 2 +- 3 files changed, 14 insertions(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index d9a95f8e72..9984efc99e 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,10 @@ +# Synapse 1.139.0 (2025-09-30) + +No significant changes since 1.139.0rc3. + + + + # Synapse 1.139.0rc3 (2025-09-25) ## Bugfixes diff --git a/debian/changelog b/debian/changelog index 8f8877638a..f3a2314dca 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.139.0) stable; urgency=medium + + * New Synapse release 1.139.0. + + -- Synapse Packaging team Tue, 30 Sep 2025 11:58:55 +0100 + matrix-synapse-py3 (1.139.0~rc3) stable; urgency=medium * New Synapse release 1.139.0rc3. diff --git a/pyproject.toml b/pyproject.toml index 7f58386087..0f886a6b6a 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -101,7 +101,7 @@ module-name = "synapse.synapse_rust" [tool.poetry] name = "matrix-synapse" -version = "1.139.0rc3" +version = "1.139.0" description = "Homeserver for the Matrix decentralised comms protocol" authors = ["Matrix.org Team and Contributors "] license = "AGPL-3.0-or-later" From 0aeb95fb07066636362bf109e8da98969e6667a3 Mon Sep 17 00:00:00 2001 From: Andrew Morgan Date: Tue, 30 Sep 2025 12:05:28 +0100 Subject: [PATCH 002/149] Add MAS note to 1.139.0 changelog --- CHANGES.md | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/CHANGES.md b/CHANGES.md index 9984efc99e..e8b04c419c 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,5 +1,15 @@ # Synapse 1.139.0 (2025-09-30) +### `/register` requests from old application service implementations may break when using MAS + +If you are using Matrix Authentication Service (MAS), as of this release any +Application Services that do not set `inhibit_login=true` when calling `POST +/_matrix/client/v3/register` will receive the error +`IO.ELEMENT.MSC4190.M_APPSERVICE_LOGIN_UNSUPPORTED` in response. Please see [the +upgrade +notes](https://element-hq.github.io/synapse/develop/upgrade.html#register-requests-from-old-application-service-implementations-may-break-when-using-mas) +for more information. + No significant changes since 1.139.0rc3. From 2aab171042346c4c94f3248b513319762b7c2f7c Mon Sep 17 00:00:00 2001 From: Andrew Morgan Date: Tue, 30 Sep 2025 17:10:32 +0100 Subject: [PATCH 003/149] Remove unstable prefixes for MSC2732 This MSC was accepted in 2022. We shouldn't need to continue supporting the unstable field names. --- synapse/handlers/e2e_keys.py | 4 +--- synapse/rest/client/sync.py | 3 --- tests/handlers/test_e2e_keys.py | 23 ----------------------- 3 files changed, 1 insertion(+), 29 deletions(-) diff --git a/synapse/handlers/e2e_keys.py b/synapse/handlers/e2e_keys.py index 2774417c0b..791a0fa684 100644 --- a/synapse/handlers/e2e_keys.py +++ b/synapse/handlers/e2e_keys.py @@ -872,9 +872,7 @@ class E2eKeysHandler: log_kv( {"message": "Did not update one_time_keys", "reason": "no keys given"} ) - fallback_keys = keys.get("fallback_keys") or keys.get( - "org.matrix.msc2732.fallback_keys" - ) + fallback_keys = keys.get("fallback_keys") if fallback_keys and isinstance(fallback_keys, dict): log_kv( { diff --git a/synapse/rest/client/sync.py b/synapse/rest/client/sync.py index bb63b51599..2ddb319809 100644 --- a/synapse/rest/client/sync.py +++ b/synapse/rest/client/sync.py @@ -363,9 +363,6 @@ class SyncRestServlet(RestServlet): # https://github.com/matrix-org/matrix-doc/blob/54255851f642f84a4f1aaf7bc063eebe3d76752b/proposals/2732-olm-fallback-keys.md # states that this field should always be included, as long as the server supports the feature. - response["org.matrix.msc2732.device_unused_fallback_key_types"] = ( - sync_result.device_unused_fallback_key_types - ) response["device_unused_fallback_key_types"] = ( sync_result.device_unused_fallback_key_types ) diff --git a/tests/handlers/test_e2e_keys.py b/tests/handlers/test_e2e_keys.py index 4f0b1574b3..fca1f2cc44 100644 --- a/tests/handlers/test_e2e_keys.py +++ b/tests/handlers/test_e2e_keys.py @@ -410,7 +410,6 @@ class E2eKeysHandlerTestCase(unittest.HomeserverTestCase): device_id = "xyz" fallback_key = {"alg1:k1": "fallback_key1"} fallback_key2 = {"alg1:k2": "fallback_key2"} - fallback_key3 = {"alg1:k2": "fallback_key3"} otk = {"alg1:k2": "key2"} # we shouldn't have any unused fallback keys yet @@ -531,28 +530,6 @@ class E2eKeysHandlerTestCase(unittest.HomeserverTestCase): {"failures": {}, "one_time_keys": {local_user: {device_id: fallback_key2}}}, ) - # using the unstable prefix should also set the fallback key - self.get_success( - self.handler.upload_keys_for_user( - local_user, - device_id, - {"org.matrix.msc2732.fallback_keys": fallback_key3}, - ) - ) - - claim_res = self.get_success( - self.handler.claim_one_time_keys( - {local_user: {device_id: {"alg1": 1}}}, - self.requester, - timeout=None, - always_include_fallback_keys=False, - ) - ) - self.assertEqual( - claim_res, - {"failures": {}, "one_time_keys": {local_user: {device_id: fallback_key3}}}, - ) - def test_fallback_key_bulk(self) -> None: """Like test_fallback_key, but claims multiple keys in one handler call.""" alice = f"@alice:{self.hs.hostname}" From 5adb08f3c9a7736ee21fdbff2806de15c4b35aaf Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Tue, 30 Sep 2025 11:27:29 -0500 Subject: [PATCH 004/149] Remove `MockClock()` (#18992) Spawning from adding some logcontext debug logs in https://github.com/element-hq/synapse/pull/18966 and since we're not logging at the `set_current_context(...)` level (see reasoning there), this removes some usage of `set_current_context(...)`. Specifically, `MockClock.call_later(...)` doesn't handle logcontexts correctly. It uses the calling logcontext as the callback context (wrong, as the logcontext could finish before the callback finishes) and it didn't reset back to the sentinel context before handing back to the reactor. It was like this since it was [introduced 10+ years ago](https://github.com/element-hq/synapse/commit/38da9884e70e8e44bde14c67a7a8a9d49a8b87ac). Instead of fixing the implementation which would just be a copy of our normal `Clock`, we can just remove `MockClock` --- changelog.d/18992.misc | 1 + synapse/app/phone_stats_home.py | 8 +- synapse/util/constants.py | 2 + tests/appservice/test_scheduler.py | 23 ++-- tests/config/test_oauth_delegation.py | 10 +- tests/handlers/test_appservice.py | 6 +- tests/handlers/test_e2e_room_keys.py | 3 +- tests/http/federation/test_srv_resolver.py | 4 +- tests/http/test_matrixfederationclient.py | 4 - tests/media/test_media_retention.py | 7 -- tests/rest/client/test_transactions.py | 15 ++- tests/rest/key/v2/test_remote_key_resource.py | 2 +- tests/server.py | 57 +++++---- tests/storage/test_base.py | 2 +- tests/test_server.py | 8 +- tests/test_state.py | 8 +- tests/test_test_utils.py | 79 ------------ tests/unittest.py | 45 ++++--- tests/util/test_expiring_cache.py | 27 ++-- tests/utils.py | 117 ++---------------- 20 files changed, 139 insertions(+), 289 deletions(-) create mode 100644 changelog.d/18992.misc delete mode 100644 tests/test_test_utils.py diff --git a/changelog.d/18992.misc b/changelog.d/18992.misc new file mode 100644 index 0000000000..ba4470bff1 --- /dev/null +++ b/changelog.d/18992.misc @@ -0,0 +1 @@ +Remove `MockClock()` in tests. diff --git a/synapse/app/phone_stats_home.py b/synapse/app/phone_stats_home.py index 69d3ac78fd..7b8e7fe700 100644 --- a/synapse/app/phone_stats_home.py +++ b/synapse/app/phone_stats_home.py @@ -33,15 +33,17 @@ from synapse.metrics.background_process_metrics import ( run_as_background_process, ) from synapse.types import JsonDict -from synapse.util.constants import ONE_HOUR_SECONDS, ONE_MINUTE_SECONDS +from synapse.util.constants import ( + MILLISECONDS_PER_SECOND, + ONE_HOUR_SECONDS, + ONE_MINUTE_SECONDS, +) if TYPE_CHECKING: from synapse.server import HomeServer logger = logging.getLogger("synapse.app.homeserver") -MILLISECONDS_PER_SECOND = 1000 - INITIAL_DELAY_BEFORE_FIRST_PHONE_HOME_SECONDS = 5 * ONE_MINUTE_SECONDS """ We wait 5 minutes to send the first set of stats as the server can be quite busy the diff --git a/synapse/util/constants.py b/synapse/util/constants.py index 9986017147..7a3d073df5 100644 --- a/synapse/util/constants.py +++ b/synapse/util/constants.py @@ -18,3 +18,5 @@ # readability and catching bugs. ONE_MINUTE_SECONDS = 60 ONE_HOUR_SECONDS = 60 * ONE_MINUTE_SECONDS + +MILLISECONDS_PER_SECOND = 1000 diff --git a/tests/appservice/test_scheduler.py b/tests/appservice/test_scheduler.py index 9498ea1279..0385190f34 100644 --- a/tests/appservice/test_scheduler.py +++ b/tests/appservice/test_scheduler.py @@ -18,7 +18,7 @@ # [This file includes modifications made by New Vector Limited] # # -from typing import List, Optional, Sequence, Tuple, cast +from typing import List, Optional, Sequence, Tuple from unittest.mock import AsyncMock, Mock from typing_extensions import TypeAlias @@ -44,13 +44,12 @@ from synapse.types import DeviceListUpdates, JsonDict from synapse.util.clock import Clock from tests import unittest - -from ..utils import MockClock +from tests.server import get_clock class ApplicationServiceSchedulerTransactionCtrlTestCase(unittest.TestCase): def setUp(self) -> None: - self.clock = MockClock() + self.reactor, self.clock = get_clock() self.store = Mock() self.as_api = Mock() @@ -170,14 +169,14 @@ class ApplicationServiceSchedulerTransactionCtrlTestCase(unittest.TestCase): class ApplicationServiceSchedulerRecovererTestCase(unittest.TestCase): def setUp(self) -> None: - self.clock = MockClock() + self.reactor, self.clock = get_clock() self.as_api = Mock() self.store = Mock() self.service = Mock() self.callback = AsyncMock() self.recoverer = _Recoverer( server_name="test_server", - clock=cast(Clock, self.clock), + clock=self.clock, as_api=self.as_api, store=self.store, service=self.service, @@ -202,7 +201,7 @@ class ApplicationServiceSchedulerRecovererTestCase(unittest.TestCase): txn.send = AsyncMock(return_value=True) txn.complete = AsyncMock(return_value=None) # wait for exp backoff - self.clock.advance_time(2) + self.reactor.advance(2) self.assertEqual(1, txn.send.call_count) self.assertEqual(1, txn.complete.call_count) # 2 because it needs to get None to know there are no more txns @@ -229,21 +228,21 @@ class ApplicationServiceSchedulerRecovererTestCase(unittest.TestCase): self.assertEqual(0, self.store.get_oldest_unsent_txn.call_count) txn.send = AsyncMock(return_value=False) txn.complete = AsyncMock(return_value=None) - self.clock.advance_time(2) + self.reactor.advance(2) self.assertEqual(1, txn.send.call_count) self.assertEqual(0, txn.complete.call_count) self.assertEqual(0, self.callback.call_count) - self.clock.advance_time(4) + self.reactor.advance(4) self.assertEqual(2, txn.send.call_count) self.assertEqual(0, txn.complete.call_count) self.assertEqual(0, self.callback.call_count) - self.clock.advance_time(8) + self.reactor.advance(8) self.assertEqual(3, txn.send.call_count) self.assertEqual(0, txn.complete.call_count) self.assertEqual(0, self.callback.call_count) txn.send = AsyncMock(return_value=True) # successfully send the txn pop_txn = True # returns the txn the first time, then no more. - self.clock.advance_time(16) + self.reactor.advance(16) self.assertEqual(1, txn.send.call_count) # new mock reset call count self.assertEqual(1, txn.complete.call_count) self.callback.assert_called_once_with(self.recoverer) @@ -268,7 +267,7 @@ class ApplicationServiceSchedulerRecovererTestCase(unittest.TestCase): self.assertEqual(0, self.store.get_oldest_unsent_txn.call_count) txn.send = AsyncMock(return_value=False) txn.complete = AsyncMock(return_value=None) - self.clock.advance_time(2) + self.reactor.advance(2) self.assertEqual(1, txn.send.call_count) self.assertEqual(0, txn.complete.call_count) self.assertEqual(0, self.callback.call_count) diff --git a/tests/config/test_oauth_delegation.py b/tests/config/test_oauth_delegation.py index 833cfe628b..85e0a3b6b6 100644 --- a/tests/config/test_oauth_delegation.py +++ b/tests/config/test_oauth_delegation.py @@ -231,7 +231,10 @@ class MSC3861OAuthDelegation(TestCase): reactor, clock = get_clock() with self.assertRaises(ConfigError): setup_test_homeserver( - self.addCleanup, reactor=reactor, clock=clock, config=config + cleanup_func=self.addCleanup, + config=config, + reactor=reactor, + clock=clock, ) def test_jwt_auth_cannot_be_enabled(self) -> None: @@ -395,7 +398,10 @@ class MasAuthDelegation(TestCase): reactor, clock = get_clock() with self.assertRaises(ConfigError): setup_test_homeserver( - self.addCleanup, reactor=reactor, clock=clock, config=config + cleanup_func=self.addCleanup, + config=config, + reactor=reactor, + clock=clock, ) @skip_unless(HAS_AUTHLIB, "requires authlib") diff --git a/tests/handlers/test_appservice.py b/tests/handlers/test_appservice.py index 999d7f5e6c..6516b7db17 100644 --- a/tests/handlers/test_appservice.py +++ b/tests/handlers/test_appservice.py @@ -49,9 +49,9 @@ from synapse.util.clock import Clock from synapse.util.stringutils import random_string from tests import unittest +from tests.server import get_clock from tests.test_utils import event_injection from tests.unittest import override_config -from tests.utils import MockClock class AppServiceHandlerTestCase(unittest.TestCase): @@ -61,6 +61,8 @@ class AppServiceHandlerTestCase(unittest.TestCase): self.mock_store = Mock() self.mock_as_api = AsyncMock() self.mock_scheduler = Mock() + self.reactor, self.clock = get_clock() + hs = Mock() hs.get_datastores.return_value = Mock(main=self.mock_store) self.mock_store.get_appservice_last_pos = AsyncMock(return_value=None) @@ -68,7 +70,7 @@ class AppServiceHandlerTestCase(unittest.TestCase): self.mock_store.set_appservice_stream_type_pos = AsyncMock(return_value=None) hs.get_application_service_api.return_value = self.mock_as_api hs.get_application_service_scheduler.return_value = self.mock_scheduler - hs.get_clock.return_value = MockClock() + hs.get_clock.return_value = self.clock self.handler = ApplicationServicesHandler(hs) self.event_source = hs.get_event_sources() diff --git a/tests/handlers/test_e2e_room_keys.py b/tests/handlers/test_e2e_room_keys.py index 910c24c167..5085a0309b 100644 --- a/tests/handlers/test_e2e_room_keys.py +++ b/tests/handlers/test_e2e_room_keys.py @@ -21,7 +21,6 @@ # import copy -from unittest import mock from twisted.internet.testing import MemoryReactor @@ -50,7 +49,7 @@ room_keys = { class E2eRoomKeysHandlerTestCase(unittest.HomeserverTestCase): def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: - return self.setup_test_homeserver(replication_layer=mock.Mock()) + return self.setup_test_homeserver() def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.handler = hs.get_e2e_room_keys_handler() diff --git a/tests/http/federation/test_srv_resolver.py b/tests/http/federation/test_srv_resolver.py index 4fd0fb9226..a359b0a141 100644 --- a/tests/http/federation/test_srv_resolver.py +++ b/tests/http/federation/test_srv_resolver.py @@ -30,7 +30,7 @@ from synapse.http.federation.srv_resolver import Server, SrvResolver from synapse.logging.context import LoggingContext, current_context from tests import unittest -from tests.utils import MockClock +from tests.server import get_clock class SrvResolverTestCase(unittest.TestCase): @@ -105,7 +105,7 @@ class SrvResolverTestCase(unittest.TestCase): @defer.inlineCallbacks def test_from_cache(self) -> Generator["Deferred[object]", object, None]: - clock = MockClock() + reactor, clock = get_clock() dns_client_mock = Mock(spec_set=["lookupService"]) dns_client_mock.lookupService = Mock(spec_set=[]) diff --git a/tests/http/test_matrixfederationclient.py b/tests/http/test_matrixfederationclient.py index d06ea8c3ab..6d87541888 100644 --- a/tests/http/test_matrixfederationclient.py +++ b/tests/http/test_matrixfederationclient.py @@ -63,10 +63,6 @@ def check_logcontext(context: LoggingContextOrSentinel) -> None: class FederationClientTests(HomeserverTestCase): - def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: - hs = self.setup_test_homeserver(reactor=reactor, clock=clock) - return hs - def prepare( self, reactor: MemoryReactor, clock: Clock, homeserver: HomeServer ) -> None: diff --git a/tests/media/test_media_retention.py b/tests/media/test_media_retention.py index aec1adb040..6dba214514 100644 --- a/tests/media/test_media_retention.py +++ b/tests/media/test_media_retention.py @@ -37,7 +37,6 @@ from synapse.util.stringutils import ( from tests import unittest from tests.unittest import override_config -from tests.utils import MockClock class MediaRetentionTestCase(unittest.HomeserverTestCase): @@ -51,12 +50,6 @@ class MediaRetentionTestCase(unittest.HomeserverTestCase): admin.register_servlets_for_client_rest_resource, ] - def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: - # We need to be able to test advancing time in the homeserver, so we - # replace the test homeserver's default clock with a MockClock, which - # supports advancing time. - return self.setup_test_homeserver(clock=MockClock()) - def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.remote_server_name = "remote.homeserver" self.store = hs.get_datastores().main diff --git a/tests/rest/client/test_transactions.py b/tests/rest/client/test_transactions.py index 9c9eca5415..c22c1a6612 100644 --- a/tests/rest/client/test_transactions.py +++ b/tests/rest/client/test_transactions.py @@ -29,16 +29,19 @@ from synapse.logging.context import SENTINEL_CONTEXT, LoggingContext, current_co from synapse.rest.client.transactions import CLEANUP_PERIOD_MS, HttpTransactionCache from synapse.types import ISynapseReactor, JsonDict from synapse.util.clock import Clock +from synapse.util.constants import ( + MILLISECONDS_PER_SECOND, +) from tests import unittest -from tests.utils import MockClock +from tests.server import get_clock reactor = cast(ISynapseReactor, _reactor) class HttpTransactionCacheTestCase(unittest.TestCase): def setUp(self) -> None: - self.clock = MockClock() + self.reactor, self.clock = get_clock() self.hs = Mock() self.hs.get_clock = Mock(return_value=self.clock) self.hs.get_auth = Mock() @@ -180,8 +183,9 @@ class HttpTransactionCacheTestCase(unittest.TestCase): yield self.cache.fetch_or_execute_request( self.mock_request, self.mock_requester, cb, "an arg" ) - # should NOT have cleaned up yet - self.clock.advance_time_msec(CLEANUP_PERIOD_MS / 2) + # Advance time just under the cleanup period. + # Should NOT have cleaned up yet + self.reactor.advance((CLEANUP_PERIOD_MS - 1) / MILLISECONDS_PER_SECOND) yield self.cache.fetch_or_execute_request( self.mock_request, self.mock_requester, cb, "an arg" @@ -189,7 +193,8 @@ class HttpTransactionCacheTestCase(unittest.TestCase): # still using cache cb.assert_called_once_with("an arg") - self.clock.advance_time_msec(CLEANUP_PERIOD_MS) + # Advance time just after the cleanup period. + self.reactor.advance(2 / MILLISECONDS_PER_SECOND) yield self.cache.fetch_or_execute_request( self.mock_request, self.mock_requester, cb, "an arg" diff --git a/tests/rest/key/v2/test_remote_key_resource.py b/tests/rest/key/v2/test_remote_key_resource.py index cf8241438c..8d2489f718 100644 --- a/tests/rest/key/v2/test_remote_key_resource.py +++ b/tests/rest/key/v2/test_remote_key_resource.py @@ -170,7 +170,7 @@ class EndToEndPerspectivesTests(BaseRemoteKeyResourceTestCase): def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: # make a second homeserver, configured to use the first one as a key notary self.http_client2 = Mock() - config = default_config(name="keyclient") + config = default_config(server_name="keyclient") config["trusted_key_servers"] = [ { "server_name": self.hs.hostname, diff --git a/tests/server.py b/tests/server.py index 858b41d56d..226bdf4bbe 100644 --- a/tests/server.py +++ b/tests/server.py @@ -114,7 +114,6 @@ from tests.utils import ( POSTGRES_USER, SQLITE_PERSIST_DB, USE_POSTGRES_FOR_TESTS, - MockClock, default_config, ) @@ -786,9 +785,9 @@ class ThreadPool: def get_clock() -> Tuple[ThreadedMemoryReactorClock, Clock]: - clock = ThreadedMemoryReactorClock() - hs_clock = Clock(clock, server_name="test_server") - return clock, hs_clock + reactor = ThreadedMemoryReactorClock() + hs_clock = Clock(reactor, server_name="test_server") + return reactor, hs_clock @implementer(ITCPTransport) @@ -1020,12 +1019,14 @@ class TestHomeServer(HomeServer): def setup_test_homeserver( + *, cleanup_func: Callable[[Callable[[], None]], None], - name: str = "test", + server_name: str = "test", config: Optional[HomeServerConfig] = None, reactor: Optional[ISynapseReactor] = None, homeserver_to_use: Type[HomeServer] = TestHomeServer, - **kwargs: Any, + db_txn_limit: Optional[int] = None, + **extra_homeserver_attributes: Any, ) -> HomeServer: """ Setup a homeserver suitable for running tests against. Keyword arguments @@ -1034,29 +1035,41 @@ def setup_test_homeserver( If no datastore is supplied, one is created and given to the homeserver. Args: - cleanup_func : The function used to register a cleanup routine for - after the test. + cleanup_func: The function used to register a cleanup routine for after the + test. + server_name: Homeserver name + config: Homeserver config + reactor: Twisted reactor + homeserver_to_use: Homeserver class to instantiate. + db_txn_limit: Gives the maximum number of database transactions to run per + connection before reconnecting. 0 means no limit. If unset, defaults to None + here which will default upstream to `0`. + **extra_homeserver_attributes: Additional keyword arguments to install as + `@cache_in_self` attributes on the homeserver. For example, `clock` will be + installed as `hs._clock`. Calling this method directly is deprecated: you should instead derive from HomeserverTestCase. """ if reactor is None: - from twisted.internet import reactor as _reactor - - reactor = cast(ISynapseReactor, _reactor) + reactor = ThreadedMemoryReactorClock() if config is None: - config = default_config(name, parse=True) + config = default_config(server_name, parse=True) + + server_name = config.server.server_name + if not isinstance(server_name, str): + raise ConfigError("Must be a string", ("server_name",)) + + if "clock" not in extra_homeserver_attributes: + extra_homeserver_attributes["clock"] = Clock(reactor, server_name=server_name) config.caches.resize_all_caches() - if "clock" not in kwargs: - kwargs["clock"] = MockClock() - if USE_POSTGRES_FOR_TESTS: test_db = "synapse_test_%s" % uuid.uuid4().hex - database_config = { + database_config: JsonDict = { "name": "psycopg2", "args": { "dbname": test_db, @@ -1088,10 +1101,6 @@ def setup_test_homeserver( "args": {"database": test_db_location, "cp_min": 1, "cp_max": 1}, } - server_name = config.server.server_name - if not isinstance(server_name, str): - raise ConfigError("Must be a string", ("server_name",)) - # Check if we have set up a DB that we can use as a template. global PREPPED_SQLITE_DB_CONN if PREPPED_SQLITE_DB_CONN is None: @@ -1111,8 +1120,8 @@ def setup_test_homeserver( database_config["_TEST_PREPPED_CONN"] = PREPPED_SQLITE_DB_CONN - if "db_txn_limit" in kwargs: - database_config["txn_limit"] = kwargs["db_txn_limit"] + if db_txn_limit is not None: + database_config["txn_limit"] = db_txn_limit database = DatabaseConnectionConfig("master", database_config) config.database.databases = [database] @@ -1139,7 +1148,7 @@ def setup_test_homeserver( db_conn.close() hs = homeserver_to_use( - name, + server_name, config=config, version_string="Synapse/tests", reactor=reactor, @@ -1149,7 +1158,7 @@ def setup_test_homeserver( cleanup_func(hs.cleanup) # Install @cache_in_self attributes - for key, val in kwargs.items(): + for key, val in extra_homeserver_attributes.items(): setattr(hs, "_" + key, val) # Mock TLS diff --git a/tests/storage/test_base.py b/tests/storage/test_base.py index 11313fc933..577229c119 100644 --- a/tests/storage/test_base.py +++ b/tests/storage/test_base.py @@ -86,7 +86,7 @@ class SQLBaseStoreTestCase(unittest.TestCase): conn_pool.runWithConnection = runWithConnection - config = default_config(name="test", parse=True) + config = default_config(server_name="test", parse=True) hs = TestHomeServer("test", config=config) if USE_POSTGRES_FOR_TESTS: diff --git a/tests/test_server.py b/tests/test_server.py index 69efceafe8..66c5cf9e37 100644 --- a/tests/test_server.py +++ b/tests/test_server.py @@ -55,9 +55,9 @@ class JsonResourceTests(unittest.TestCase): reactor, clock = get_clock() self.reactor = reactor self.homeserver = setup_test_homeserver( - self.addCleanup, - clock=clock, + cleanup_func=self.addCleanup, reactor=self.reactor, + clock=clock, ) def test_handler_for_request(self) -> None: @@ -217,9 +217,9 @@ class OptionsResourceTests(unittest.TestCase): reactor, clock = get_clock() self.reactor = reactor self.homeserver = setup_test_homeserver( - self.addCleanup, - clock=clock, + cleanup_func=self.addCleanup, reactor=self.reactor, + clock=clock, ) class DummyResource(Resource): diff --git a/tests/test_state.py b/tests/test_state.py index 16446c16bc..ab7b52e90c 100644 --- a/tests/test_state.py +++ b/tests/test_state.py @@ -29,7 +29,6 @@ from typing import ( Optional, Set, Tuple, - cast, ) from unittest.mock import AsyncMock, Mock @@ -43,12 +42,11 @@ from synapse.events.snapshot import EventContext from synapse.state import StateHandler, StateResolutionHandler, _make_state_cache_entry from synapse.types import MutableStateMap, StateMap from synapse.types.state import StateFilter -from synapse.util.clock import Clock from synapse.util.macaroons import MacaroonGenerator from tests import unittest - -from .utils import MockClock, default_config +from tests.server import get_clock +from tests.utils import default_config _next_event_id = 1000 @@ -248,7 +246,7 @@ class StateTestCase(unittest.TestCase): "hostname", ] ) - clock = cast(Clock, MockClock()) + reactor, clock = get_clock() hs.config = default_config("tesths", True) hs.get_datastores.return_value = Mock( main=self.dummy_store, diff --git a/tests/test_test_utils.py b/tests/test_test_utils.py deleted file mode 100644 index c52f963a7e..0000000000 --- a/tests/test_test_utils.py +++ /dev/null @@ -1,79 +0,0 @@ -# -# This file is licensed under the Affero General Public License (AGPL) version 3. -# -# Copyright 2014-2016 OpenMarket Ltd -# Copyright (C) 2023 New Vector, Ltd -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU Affero General Public License as -# published by the Free Software Foundation, either version 3 of the -# License, or (at your option) any later version. -# -# See the GNU Affero General Public License for more details: -# . -# -# Originally licensed under the Apache License, Version 2.0: -# . -# -# [This file includes modifications made by New Vector Limited] -# -# - -from tests import unittest -from tests.utils import MockClock - - -class MockClockTestCase(unittest.TestCase): - def setUp(self) -> None: - self.clock = MockClock() - - def test_advance_time(self) -> None: - start_time = self.clock.time() - - self.clock.advance_time(20) - - self.assertEqual(20, self.clock.time() - start_time) - - def test_later(self) -> None: - invoked = [0, 0] - - def _cb0() -> None: - invoked[0] = 1 - - self.clock.call_later(10, _cb0) - - def _cb1() -> None: - invoked[1] = 1 - - self.clock.call_later(20, _cb1) - - self.assertFalse(invoked[0]) - - self.clock.advance_time(15) - - self.assertTrue(invoked[0]) - self.assertFalse(invoked[1]) - - self.clock.advance_time(5) - - self.assertTrue(invoked[1]) - - def test_cancel_later(self) -> None: - invoked = [0, 0] - - def _cb0() -> None: - invoked[0] = 1 - - t0 = self.clock.call_later(10, _cb0) - - def _cb1() -> None: - invoked[1] = 1 - - self.clock.call_later(20, _cb1) - - self.clock.cancel_call_later(t0) - - self.clock.advance_time(30) - - self.assertFalse(invoked[0]) - self.assertTrue(invoked[1]) diff --git a/tests/unittest.py b/tests/unittest.py index 8be4e635a5..9ab052e7c0 100644 --- a/tests/unittest.py +++ b/tests/unittest.py @@ -80,7 +80,7 @@ from synapse.logging.context import ( from synapse.rest import RegisterServletsFunc from synapse.server import HomeServer from synapse.storage.keys import FetchKeyResult -from synapse.types import JsonDict, Requester, UserID, create_requester +from synapse.types import ISynapseReactor, JsonDict, Requester, UserID, create_requester from synapse.util.clock import Clock from synapse.util.httpresourcetree import create_resource_tree @@ -99,6 +99,8 @@ from tests.utils import checked_cast, default_config, setupdb setupdb() setup_logging() +logger = logging.getLogger(__name__) + TV = TypeVar("TV") _ExcType = TypeVar("_ExcType", bound=BaseException, covariant=True) @@ -135,7 +137,7 @@ def around(target: TV) -> Callable[[Callable[Concatenate[S, P], R]], None]: return _around -_TConfig = TypeVar("_TConfig", Config, RootConfig) +_TConfig = TypeVar("_TConfig", Config, HomeServerConfig) def deepcopy_config(config: _TConfig) -> _TConfig: @@ -161,13 +163,13 @@ def deepcopy_config(config: _TConfig) -> _TConfig: @functools.lru_cache(maxsize=8) -def _parse_config_dict(config: str) -> RootConfig: +def _parse_config_dict(config: str) -> HomeServerConfig: config_obj = HomeServerConfig() config_obj.parse_config_dict(json.loads(config), "", "") return config_obj -def make_homeserver_config_obj(config: Dict[str, Any]) -> RootConfig: +def make_homeserver_config_obj(config: Dict[str, Any]) -> HomeServerConfig: """Creates a :class:`HomeServerConfig` instance with the given configuration dict. This is equivalent to:: @@ -392,8 +394,8 @@ class HomeserverTestCase(TestCase): hijacking the authentication system to return a fixed user, and then calling the prepare function. """ + # We need to share the reactor between the homeserver and all of our test utils. self.reactor, self.clock = get_clock() - self._hs_args = {"clock": self.clock, "reactor": self.reactor} self.hs = self.make_homeserver(self.reactor, self.clock) self.hs.get_datastores().main.tests_allow_no_chain_cover_index = False @@ -511,7 +513,7 @@ class HomeserverTestCase(TestCase): Function to be overridden in subclasses. """ - hs = self.setup_test_homeserver() + hs = self.setup_test_homeserver(reactor=reactor, clock=clock) return hs def create_test_resource(self) -> Resource: @@ -634,7 +636,12 @@ class HomeserverTestCase(TestCase): ) def setup_test_homeserver( - self, server_name: Optional[str] = None, **kwargs: Any + self, + server_name: Optional[str] = None, + config: Optional[JsonDict] = None, + reactor: Optional[ISynapseReactor] = None, + clock: Optional[Clock] = None, + **extra_homeserver_attributes: Any, ) -> HomeServer: """ Set up the test homeserver, meant to be called by the overridable @@ -647,12 +654,15 @@ class HomeserverTestCase(TestCase): Returns: synapse.server.HomeServer """ - kwargs = dict(kwargs) - kwargs.update(self._hs_args) - if "config" not in kwargs: + if config is None: config = self.default_config() - else: - config = kwargs["config"] + + # The sane default is to use the same reactor and clock as our other test utils + if reactor is None: + reactor = self.reactor + + if clock is None: + clock = self.clock # The server name can be specified using either the `name` argument or a config # override. The `name` argument takes precedence over any config overrides. @@ -661,19 +671,24 @@ class HomeserverTestCase(TestCase): # Parse the config from a config dict into a HomeServerConfig config_obj = make_homeserver_config_obj(config) - kwargs["config"] = config_obj # The server name in the config is now `name`, if provided, or the `server_name` # from a config override, or the default of "test". Whichever it is, we # construct a homeserver with a matching name. server_name = config_obj.server.server_name - kwargs["name"] = server_name async def run_bg_updates() -> None: with LoggingContext(name="run_bg_updates", server_name=server_name): self.get_success(stor.db_pool.updates.run_background_updates(False)) - hs = setup_test_homeserver(self.addCleanup, **kwargs) + hs = setup_test_homeserver( + cleanup_func=self.addCleanup, + server_name=server_name, + config=config_obj, + reactor=reactor, + clock=clock, + **extra_homeserver_attributes, + ) stor = hs.get_datastores().main # Run the database background updates, when running against "master". diff --git a/tests/util/test_expiring_cache.py b/tests/util/test_expiring_cache.py index bfcc6cd12f..eda2d586f6 100644 --- a/tests/util/test_expiring_cache.py +++ b/tests/util/test_expiring_cache.py @@ -19,23 +19,22 @@ # # -from typing import List, cast +from typing import List from synapse.util.caches.expiringcache import ExpiringCache -from synapse.util.clock import Clock -from tests.utils import MockClock +from tests.server import get_clock from .. import unittest class ExpiringCacheTestCase(unittest.HomeserverTestCase): def test_get_set(self) -> None: - clock = MockClock() + reactor, clock = get_clock() cache: ExpiringCache[str, str] = ExpiringCache( cache_name="test", server_name="testserver", - clock=cast(Clock, clock), + clock=clock, max_len=1, ) @@ -44,11 +43,11 @@ class ExpiringCacheTestCase(unittest.HomeserverTestCase): self.assertEqual(cache["key"], "value") def test_eviction(self) -> None: - clock = MockClock() + reactor, clock = get_clock() cache: ExpiringCache[str, str] = ExpiringCache( cache_name="test", server_name="testserver", - clock=cast(Clock, clock), + clock=clock, max_len=2, ) @@ -63,11 +62,11 @@ class ExpiringCacheTestCase(unittest.HomeserverTestCase): self.assertEqual(cache.get("key3"), "value3") def test_iterable_eviction(self) -> None: - clock = MockClock() + reactor, clock = get_clock() cache: ExpiringCache[str, List[int]] = ExpiringCache( cache_name="test", server_name="testserver", - clock=cast(Clock, clock), + clock=clock, max_len=5, iterable=True, ) @@ -87,25 +86,25 @@ class ExpiringCacheTestCase(unittest.HomeserverTestCase): self.assertEqual(cache.get("key4"), [6, 7]) def test_time_eviction(self) -> None: - clock = MockClock() + reactor, clock = get_clock() cache: ExpiringCache[str, int] = ExpiringCache( cache_name="test", server_name="testserver", - clock=cast(Clock, clock), + clock=clock, expiry_ms=1000, ) cache["key"] = 1 - clock.advance_time(0.5) + reactor.advance(0.5) cache["key2"] = 2 self.assertEqual(cache.get("key"), 1) self.assertEqual(cache.get("key2"), 2) - clock.advance_time(0.9) + reactor.advance(0.9) self.assertEqual(cache.get("key"), None) self.assertEqual(cache.get("key2"), 2) - clock.advance_time(1) + reactor.advance(1) self.assertEqual(cache.get("key"), None) self.assertEqual(cache.get("key2"), None) diff --git a/tests/utils.py b/tests/utils.py index d1b66d4159..051388ee2e 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -24,27 +24,19 @@ import os import signal from types import FrameType, TracebackType from typing import ( - Any, - Callable, Dict, - List, Literal, Optional, - Tuple, Type, TypeVar, Union, overload, ) -import attr -from typing_extensions import ParamSpec - from synapse.api.constants import EventTypes from synapse.api.room_versions import RoomVersions from synapse.config.homeserver import HomeServerConfig from synapse.config.server import DEFAULT_ROOM_VERSION -from synapse.logging.context import current_context, set_current_context from synapse.server import HomeServer from synapse.storage.database import LoggingDatabaseConnection from synapse.storage.engines import create_engine @@ -140,21 +132,27 @@ def setupdb() -> None: @overload -def default_config(name: str, parse: Literal[False] = ...) -> Dict[str, object]: ... +def default_config( + server_name: str, parse: Literal[False] = ... +) -> Dict[str, object]: ... @overload -def default_config(name: str, parse: Literal[True]) -> HomeServerConfig: ... +def default_config(server_name: str, parse: Literal[True]) -> HomeServerConfig: ... def default_config( - name: str, parse: bool = False + server_name: str, parse: bool = False ) -> Union[Dict[str, object], HomeServerConfig]: """ Create a reasonable test config. + + Args: + server_name: homeserver name + parse: TODO """ config_dict = { - "server_name": name, + "server_name": server_name, # Setting this to an empty list turns off federation sending. "federation_sender_instances": [], "media_store_path": "media", @@ -247,101 +245,6 @@ def mock_getRawHeaders(headers=None): # type: ignore[no-untyped-def] return getRawHeaders -P = ParamSpec("P") - - -@attr.s(slots=True, auto_attribs=True) -class Timer: - absolute_time: float - callback: Callable[[], None] - expired: bool - - -# TODO: Make this generic over a ParamSpec? -@attr.s(slots=True, auto_attribs=True) -class Looper: - func: Callable[..., Any] - interval: float # seconds - last: float - args: Tuple[object, ...] - kwargs: Dict[str, object] - - -class MockClock: - now = 1000.0 - - def __init__(self) -> None: - # Timers in no particular order - self.timers: List[Timer] = [] - self.loopers: List[Looper] = [] - - def time(self) -> float: - return self.now - - def time_msec(self) -> int: - return int(self.time() * 1000) - - def call_later( - self, - delay: float, - callback: Callable[P, object], - *args: P.args, - **kwargs: P.kwargs, - ) -> Timer: - ctx = current_context() - - def wrapped_callback() -> None: - set_current_context(ctx) - callback(*args, **kwargs) - - t = Timer(self.now + delay, wrapped_callback, False) - self.timers.append(t) - - return t - - def looping_call( - self, - function: Callable[P, object], - interval: float, - *args: P.args, - **kwargs: P.kwargs, - ) -> None: - self.loopers.append(Looper(function, interval / 1000.0, self.now, args, kwargs)) - - def cancel_call_later(self, timer: Timer, ignore_errs: bool = False) -> None: - if timer.expired: - if not ignore_errs: - raise Exception("Cannot cancel an expired timer") - - timer.expired = True - self.timers = [t for t in self.timers if t != timer] - - # For unit testing - def advance_time(self, secs: float) -> None: - self.now += secs - - timers = self.timers - self.timers = [] - - for t in timers: - if t.expired: - raise Exception("Timer already expired") - - if self.now >= t.absolute_time: - t.expired = True - t.callback() - else: - self.timers.append(t) - - for looped in self.loopers: - if looped.last + looped.interval < self.now: - looped.func(*looped.args, **looped.kwargs) - looped.last = self.now - - def advance_time_msec(self, ms: float) -> None: - self.advance_time(ms / 1000.0) - - async def create_room(hs: HomeServer, room_id: str, creator_id: str) -> None: """Creates and persist a creation event for the given room""" From d1c96ee0f22e5e199eedff96914eda04822a7e35 Mon Sep 17 00:00:00 2001 From: Sebastian Spaeth Date: Tue, 30 Sep 2025 22:17:11 +0200 Subject: [PATCH 005/149] Fix `rc_room_creation` and `rc_reports` docs - remove `per_user` typo (#18998) --- changelog.d/18998.doc | 1 + docs/usage/configuration/config_documentation.md | 10 ++++------ schema/synapse-config.schema.yaml | 10 ++++------ 3 files changed, 9 insertions(+), 12 deletions(-) create mode 100644 changelog.d/18998.doc diff --git a/changelog.d/18998.doc b/changelog.d/18998.doc new file mode 100644 index 0000000000..9ddc2d41c0 --- /dev/null +++ b/changelog.d/18998.doc @@ -0,0 +1 @@ +Fix documentation for `rc_room_creation` and `rc_reports` to clarify that a `per_user` rate limit is not supported. diff --git a/docs/usage/configuration/config_documentation.md b/docs/usage/configuration/config_documentation.md index 3c401d569b..7bcf82e7ab 100644 --- a/docs/usage/configuration/config_documentation.md +++ b/docs/usage/configuration/config_documentation.md @@ -2006,9 +2006,8 @@ This setting has the following sub-options: Default configuration: ```yaml rc_reports: - per_user: - per_second: 1.0 - burst_count: 5.0 + per_second: 1.0 + burst_count: 5.0 ``` Example configuration: @@ -2031,9 +2030,8 @@ This setting has the following sub-options: Default configuration: ```yaml rc_room_creation: - per_user: - per_second: 0.016 - burst_count: 10.0 + per_second: 0.016 + burst_count: 10.0 ``` Example configuration: diff --git a/schema/synapse-config.schema.yaml b/schema/synapse-config.schema.yaml index 2a7f94a700..b406af0409 100644 --- a/schema/synapse-config.schema.yaml +++ b/schema/synapse-config.schema.yaml @@ -2259,9 +2259,8 @@ properties: Setting this to a high value allows users to report content quickly, possibly in duplicate. This can result in higher database usage. default: - per_user: - per_second: 1.0 - burst_count: 5.0 + per_second: 1.0 + burst_count: 5.0 examples: - per_second: 2.0 burst_count: 20.0 @@ -2270,9 +2269,8 @@ properties: description: >- Sets rate limits for how often users are able to create rooms. default: - per_user: - per_second: 0.016 - burst_count: 10.0 + per_second: 0.016 + burst_count: 10.0 examples: - per_second: 1.0 burst_count: 5.0 From 396de6544ac8b4fafa5011e3308e9e16f38929de Mon Sep 17 00:00:00 2001 From: Devon Hudson Date: Wed, 1 Oct 2025 02:42:09 +0000 Subject: [PATCH 006/149] Cleanly shutdown SynapseHomeServer object (#18828) This PR aims to allow for a clean shutdown of the `SynapseHomeServer` object so that it can be fully deleted and cleaned up by garbage collection without shutting down the entire python process. Fix https://github.com/element-hq/synapse-small-hosts/issues/50 ### Pull Request Checklist * [x] Pull request is based on the develop branch * [x] Pull request includes a [changelog file](https://element-hq.github.io/synapse/latest/development/contributing_guide.html#changelog). The entry should: - Be a short description of your change which makes sense to users. "Fixed a bug that prevented receiving messages from other servers." instead of "Moved X method from `EventStore` to `EventWorkerStore`.". - Use markdown where necessary, mostly for `code blocks`. - End with either a period (.) or an exclamation mark (!). - Start with a capital letter. - Feel free to credit yourself, by adding a sentence "Contributed by @github_username." or "Contributed by [Your Name]." to the end of the entry. * [x] [Code style](https://element-hq.github.io/synapse/latest/code_style.html) is correct (run the [linters](https://element-hq.github.io/synapse/latest/development/contributing_guide.html#run-the-linters)) --------- Co-authored-by: Eric Eastwood --- changelog.d/18828.feature | 1 + scripts-dev/mypy_synapse_plugin.py | 140 +++++++- synapse/_scripts/generate_workers_map.py | 7 +- synapse/_scripts/update_synapse_database.py | 5 +- synapse/app/_base.py | 113 +++++-- synapse/app/generic_worker.py | 20 +- synapse/app/homeserver.py | 39 ++- synapse/app/phone_stats_home.py | 22 +- synapse/appservice/__init__.py | 31 +- synapse/appservice/scheduler.py | 16 +- synapse/config/logger.py | 4 +- synapse/crypto/keyring.py | 18 +- synapse/federation/federation_client.py | 2 + synapse/federation/send_queue.py | 3 + synapse/federation/sender/__init__.py | 32 +- .../sender/per_destination_queue.py | 39 ++- .../federation/sender/transaction_manager.py | 12 + synapse/federation/transport/client.py | 3 + synapse/handlers/account_validity.py | 6 +- synapse/handlers/appservice.py | 10 +- synapse/handlers/deactivate_account.py | 5 +- synapse/handlers/delayed_events.py | 23 +- synapse/handlers/device.py | 20 +- synapse/handlers/federation.py | 13 +- synapse/handlers/federation_event.py | 9 +- synapse/handlers/message.py | 16 +- synapse/handlers/pagination.py | 10 +- synapse/handlers/presence.py | 44 ++- synapse/handlers/profile.py | 2 +- synapse/handlers/register.py | 9 +- synapse/handlers/room_member.py | 10 +- synapse/handlers/sso.py | 2 +- synapse/handlers/stats.py | 8 +- synapse/handlers/sync.py | 2 + synapse/handlers/typing.py | 15 +- synapse/handlers/user_directory.py | 23 +- synapse/handlers/worker_lock.py | 34 +- synapse/http/client.py | 31 +- .../federation/matrix_federation_agent.py | 7 +- .../http/federation/well_known_resolver.py | 4 +- synapse/http/matrixfederationclient.py | 57 +++- synapse/http/proxy.py | 4 +- synapse/http/server.py | 18 +- synapse/http/site.py | 114 ++++++- synapse/media/_base.py | 3 +- synapse/media/media_repository.py | 7 +- synapse/media/url_previewer.py | 11 +- synapse/metrics/_gc.py | 4 +- synapse/metrics/background_process_metrics.py | 27 +- synapse/metrics/common_usage_metrics.py | 8 +- synapse/module_api/__init__.py | 18 +- synapse/notifier.py | 13 +- synapse/push/emailpusher.py | 9 +- synapse/push/httppusher.py | 12 +- synapse/push/pusherpool.py | 11 +- synapse/replication/tcp/client.py | 9 +- synapse/replication/tcp/handler.py | 5 +- synapse/replication/tcp/protocol.py | 21 +- synapse/replication/tcp/redis.py | 20 +- synapse/replication/tcp/resource.py | 9 +- synapse/replication/tcp/streams/__init__.py | 2 + synapse/rest/client/room.py | 5 +- synapse/rest/client/sync.py | 1 + synapse/rest/client/transactions.py | 2 +- synapse/server.py | 309 ++++++++++++++++-- .../server_notices/server_notices_manager.py | 1 + synapse/state/__init__.py | 1 + synapse/storage/_base.py | 2 +- synapse/storage/background_updates.py | 11 +- synapse/storage/controllers/persist_events.py | 7 +- synapse/storage/controllers/purge_events.py | 5 +- synapse/storage/controllers/state.py | 8 +- synapse/storage/database.py | 13 +- synapse/storage/databases/main/cache.py | 7 +- .../storage/databases/main/censor_events.py | 2 +- synapse/storage/databases/main/client_ips.py | 17 +- synapse/storage/databases/main/deviceinbox.py | 13 +- synapse/storage/databases/main/devices.py | 8 +- .../storage/databases/main/end_to_end_keys.py | 4 +- .../databases/main/event_federation.py | 13 +- .../databases/main/event_push_actions.py | 26 +- .../databases/main/events_bg_updates.py | 2 +- .../storage/databases/main/events_worker.py | 14 +- synapse/storage/databases/main/lock.py | 30 +- .../databases/main/media_repository.py | 4 +- synapse/storage/databases/main/metrics.py | 12 +- .../databases/main/monthly_active_users.py | 7 +- synapse/storage/databases/main/receipts.py | 2 +- .../storage/databases/main/registration.py | 37 +-- synapse/storage/databases/main/roommember.py | 2 +- synapse/storage/databases/main/session.py | 6 +- .../storage/databases/main/sliding_sync.py | 4 +- .../storage/databases/main/transactions.py | 6 +- synapse/storage/databases/state/store.py | 2 + synapse/util/async_helpers.py | 43 ++- synapse/util/batching_queue.py | 22 +- synapse/util/caches/__init__.py | 2 +- synapse/util/caches/deferred_cache.py | 4 + synapse/util/caches/descriptors.py | 16 +- synapse/util/caches/dictionary_cache.py | 7 +- synapse/util/caches/expiringcache.py | 21 +- synapse/util/caches/lrucache.py | 30 +- synapse/util/caches/response_cache.py | 12 +- synapse/util/clock.py | 237 +++++++++++--- synapse/util/distributor.py | 14 +- synapse/util/metrics.py | 35 +- synapse/util/ratelimitutils.py | 5 +- synapse/util/retryutils.py | 11 +- synapse/util/task_scheduler.py | 18 +- synmark/suites/logging.py | 4 +- synmark/suites/lrucache.py | 7 +- synmark/suites/lrucache_evict.py | 7 +- tests/app/test_homeserver_shutdown.py | 193 +++++++++++ tests/appservice/test_scheduler.py | 4 +- tests/config/test_cache.py | 31 +- tests/handlers/test_appservice.py | 29 +- tests/handlers/test_typing.py | 8 +- .../test_matrix_federation_agent.py | 8 +- tests/logging/test_opentracing.py | 8 +- tests/metrics/test_metrics.py | 9 +- tests/replication/_base.py | 12 +- .../test_federation_sender_shard.py | 3 +- .../test_module_cache_invalidation.py | 2 + tests/rest/client/test_transactions.py | 4 +- tests/server.py | 95 +++++- tests/test_distributor.py | 5 +- tests/util/caches/test_deferred_cache.py | 25 +- tests/util/caches/test_descriptors.py | 35 +- tests/util/test_async_helpers.py | 41 ++- tests/util/test_batching_queue.py | 25 +- tests/util/test_dict_cache.py | 4 +- tests/util/test_expiring_cache.py | 4 + tests/util/test_logcontext.py | 39 ++- tests/util/test_lrucache.py | 82 ++++- tests/util/test_retryutils.py | 10 + 135 files changed, 2190 insertions(+), 756 deletions(-) create mode 100644 changelog.d/18828.feature create mode 100644 tests/app/test_homeserver_shutdown.py diff --git a/changelog.d/18828.feature b/changelog.d/18828.feature new file mode 100644 index 0000000000..e7f3541de4 --- /dev/null +++ b/changelog.d/18828.feature @@ -0,0 +1 @@ +Cleanly shutdown `SynapseHomeServer` object. diff --git a/scripts-dev/mypy_synapse_plugin.py b/scripts-dev/mypy_synapse_plugin.py index e170aabdae..0b854cdba5 100644 --- a/scripts-dev/mypy_synapse_plugin.py +++ b/scripts-dev/mypy_synapse_plugin.py @@ -68,18 +68,42 @@ PROMETHEUS_METRIC_MISSING_FROM_LIST_TO_CHECK = ErrorCode( category="per-homeserver-tenant-metrics", ) +PREFER_SYNAPSE_CLOCK_CALL_LATER = ErrorCode( + "call-later-not-tracked", + "Prefer using `synapse.util.Clock.call_later` instead of `reactor.callLater`", + category="synapse-reactor-clock", +) + +PREFER_SYNAPSE_CLOCK_LOOPING_CALL = ErrorCode( + "prefer-synapse-clock-looping-call", + "Prefer using `synapse.util.Clock.looping_call` instead of `task.LoopingCall`", + category="synapse-reactor-clock", +) + PREFER_SYNAPSE_CLOCK_CALL_WHEN_RUNNING = ErrorCode( "prefer-synapse-clock-call-when-running", - "`synapse.util.Clock.call_when_running` should be used instead of `reactor.callWhenRunning`", + "Prefer using `synapse.util.Clock.call_when_running` instead of `reactor.callWhenRunning`", category="synapse-reactor-clock", ) PREFER_SYNAPSE_CLOCK_ADD_SYSTEM_EVENT_TRIGGER = ErrorCode( "prefer-synapse-clock-add-system-event-trigger", - "`synapse.util.Clock.add_system_event_trigger` should be used instead of `reactor.addSystemEventTrigger`", + "Prefer using `synapse.util.Clock.add_system_event_trigger` instead of `reactor.addSystemEventTrigger`", category="synapse-reactor-clock", ) +MULTIPLE_INTERNAL_CLOCKS_CREATED = ErrorCode( + "multiple-internal-clocks", + "Only one instance of `clock.Clock` should be created", + category="synapse-reactor-clock", +) + +UNTRACKED_BACKGROUND_PROCESS = ErrorCode( + "untracked-background-process", + "Prefer using `HomeServer.run_as_background_process` method over the bare `run_as_background_process`", + category="synapse-tracked-calls", +) + class Sentinel(enum.Enum): # defining a sentinel in this way allows mypy to correctly handle the @@ -222,6 +246,18 @@ class SynapsePlugin(Plugin): # callback, let's just pass it in while we have it. return lambda ctx: check_prometheus_metric_instantiation(ctx, fullname) + if fullname == "twisted.internet.task.LoopingCall": + return check_looping_call + + if fullname == "synapse.util.clock.Clock": + return check_clock_creation + + if ( + fullname + == "synapse.metrics.background_process_metrics.run_as_background_process" + ): + return check_background_process + return None def get_method_signature_hook( @@ -241,6 +277,13 @@ class SynapsePlugin(Plugin): ): return check_is_cacheable_wrapper + if fullname in ( + "twisted.internet.interfaces.IReactorTime.callLater", + "synapse.types.ISynapseThreadlessReactor.callLater", + "synapse.types.ISynapseReactor.callLater", + ): + return check_call_later + if fullname in ( "twisted.internet.interfaces.IReactorCore.callWhenRunning", "synapse.types.ISynapseThreadlessReactor.callWhenRunning", @@ -258,6 +301,78 @@ class SynapsePlugin(Plugin): return None +def check_clock_creation(ctx: FunctionSigContext) -> CallableType: + """ + Ensure that the only `clock.Clock` instance is the one used by the `HomeServer`. + This is so that the `HomeServer` can cancel any tracked delayed or looping calls + during server shutdown. + + Args: + ctx: The `FunctionSigContext` from mypy. + """ + signature: CallableType = ctx.default_signature + ctx.api.fail( + "Expected the only `clock.Clock` instance to be the one used by the `HomeServer`. " + "This is so that the `HomeServer` can cancel any tracked delayed or looping calls " + "during server shutdown", + ctx.context, + code=MULTIPLE_INTERNAL_CLOCKS_CREATED, + ) + + return signature + + +def check_call_later(ctx: MethodSigContext) -> CallableType: + """ + Ensure that the `reactor.callLater` callsites aren't used. + + `synapse.util.Clock.call_later` should always be used instead of `reactor.callLater`. + This is because the `synapse.util.Clock` tracks delayed calls in order to cancel any + outstanding calls during server shutdown. Delayed calls which are either short lived + (<~60s) or frequently called and can be tracked via other means could be candidates for + using `synapse.util.Clock.call_later` with `call_later_cancel_on_shutdown` set to + `False`. There shouldn't be a need to use `reactor.callLater` outside of tests or the + `Clock` class itself. If a need arises, you can use a type ignore comment to disable the + check, e.g. `# type: ignore[call-later-not-tracked]`. + + Args: + ctx: The `FunctionSigContext` from mypy. + """ + signature: CallableType = ctx.default_signature + ctx.api.fail( + "Expected all `reactor.callLater` calls to use `synapse.util.Clock.call_later` " + "instead. This is so that long lived calls can be tracked for cancellation during " + "server shutdown", + ctx.context, + code=PREFER_SYNAPSE_CLOCK_CALL_LATER, + ) + + return signature + + +def check_looping_call(ctx: FunctionSigContext) -> CallableType: + """ + Ensure that the `task.LoopingCall` callsites aren't used. + + `synapse.util.Clock.looping_call` should always be used instead of `task.LoopingCall`. + `synapse.util.Clock` tracks looping calls in order to cancel any outstanding calls + during server shutdown. + + Args: + ctx: The `FunctionSigContext` from mypy. + """ + signature: CallableType = ctx.default_signature + ctx.api.fail( + "Expected all `task.LoopingCall` instances to use `synapse.util.Clock.looping_call` " + "instead. This is so that long lived calls can be tracked for cancellation during " + "server shutdown", + ctx.context, + code=PREFER_SYNAPSE_CLOCK_LOOPING_CALL, + ) + + return signature + + def check_call_when_running(ctx: MethodSigContext) -> CallableType: """ Ensure that the `reactor.callWhenRunning` callsites aren't used. @@ -312,6 +427,27 @@ def check_add_system_event_trigger(ctx: MethodSigContext) -> CallableType: return signature +def check_background_process(ctx: FunctionSigContext) -> CallableType: + """ + Ensure that calls to `run_as_background_process` use the `HomeServer` method. + This is so that the `HomeServer` can cancel any running background processes during + server shutdown. + + Args: + ctx: The `FunctionSigContext` from mypy. + """ + signature: CallableType = ctx.default_signature + ctx.api.fail( + "Prefer using `HomeServer.run_as_background_process` method over the bare " + "`run_as_background_process`. This is so that the `HomeServer` can cancel " + "any background processes during server shutdown", + ctx.context, + code=UNTRACKED_BACKGROUND_PROCESS, + ) + + return signature + + def analyze_prometheus_metric_classes(ctx: ClassDefContext) -> None: """ Cross-check the list of Prometheus metric classes against the diff --git a/synapse/_scripts/generate_workers_map.py b/synapse/_scripts/generate_workers_map.py index 8878e364e2..f66c01040c 100755 --- a/synapse/_scripts/generate_workers_map.py +++ b/synapse/_scripts/generate_workers_map.py @@ -157,7 +157,12 @@ def get_registered_paths_for_default( # TODO We only do this to avoid an error, but don't need the database etc hs.setup() registered_paths = get_registered_paths_for_hs(hs) - hs.cleanup() + # NOTE: a more robust implementation would properly shutdown/cleanup each server + # to avoid resource buildup. + # However, the call to `shutdown` is `async` so it would require additional complexity here. + # We are intentionally skipping this cleanup because this is a short-lived, one-off + # utility script where the simpler approach is sufficient and we shouldn't run into + # any resource buildup issues. return registered_paths diff --git a/synapse/_scripts/update_synapse_database.py b/synapse/_scripts/update_synapse_database.py index caaecda161..ad02f0ed88 100644 --- a/synapse/_scripts/update_synapse_database.py +++ b/synapse/_scripts/update_synapse_database.py @@ -28,7 +28,6 @@ import yaml from twisted.internet import defer, reactor as reactor_ from synapse.config.homeserver import HomeServerConfig -from synapse.metrics.background_process_metrics import run_as_background_process from synapse.server import HomeServer from synapse.storage import DataStore from synapse.types import ISynapseReactor @@ -53,7 +52,6 @@ class MockHomeserver(HomeServer): def run_background_updates(hs: HomeServer) -> None: - server_name = hs.hostname main = hs.get_datastores().main state = hs.get_datastores().state @@ -67,9 +65,8 @@ def run_background_updates(hs: HomeServer) -> None: def run() -> None: # Apply all background updates on the database. defer.ensureDeferred( - run_as_background_process( + hs.run_as_background_process( "background_updates", - server_name, run_background_updates, ) ) diff --git a/synapse/app/_base.py b/synapse/app/_base.py index 5638724896..655f684ecf 100644 --- a/synapse/app/_base.py +++ b/synapse/app/_base.py @@ -28,6 +28,7 @@ import sys import traceback import warnings from textwrap import indent +from threading import Thread from typing import ( TYPE_CHECKING, Any, @@ -40,6 +41,7 @@ from typing import ( Tuple, cast, ) +from wsgiref.simple_server import WSGIServer from cryptography.utils import CryptographyDeprecationWarning from typing_extensions import ParamSpec @@ -97,22 +99,47 @@ reactor = cast(ISynapseReactor, _reactor) logger = logging.getLogger(__name__) -# list of tuples of function, args list, kwargs dict -_sighup_callbacks: List[ - Tuple[Callable[..., None], Tuple[object, ...], Dict[str, object]] -] = [] +_instance_id_to_sighup_callbacks_map: Dict[ + str, List[Tuple[Callable[..., None], Tuple[object, ...], Dict[str, object]]] +] = {} +""" +Map from homeserver instance_id to a list of callbacks. + +We use `instance_id` instead of `server_name` because it's possible to have multiple +workers running in the same process with the same `server_name`. +""" P = ParamSpec("P") -def register_sighup(func: Callable[P, None], *args: P.args, **kwargs: P.kwargs) -> None: +def register_sighup( + homeserver_instance_id: str, + func: Callable[P, None], + *args: P.args, + **kwargs: P.kwargs, +) -> None: """ Register a function to be called when a SIGHUP occurs. Args: + homeserver_instance_id: The unique ID for this Synapse process instance + (`hs.get_instance_id()`) that this hook is associated with. func: Function to be called when sent a SIGHUP signal. *args, **kwargs: args and kwargs to be passed to the target function. """ - _sighup_callbacks.append((func, args, kwargs)) + + _instance_id_to_sighup_callbacks_map.setdefault(homeserver_instance_id, []).append( + (func, args, kwargs) + ) + + +def unregister_sighups(instance_id: str) -> None: + """ + Unregister all sighup functions associated with this Synapse instance. + + Args: + instance_id: Unique ID for this Synapse process instance. + """ + _instance_id_to_sighup_callbacks_map.pop(instance_id, []) def start_worker_reactor( @@ -281,7 +308,9 @@ def register_start( clock.call_when_running(lambda: defer.ensureDeferred(wrapper())) -def listen_metrics(bind_addresses: StrCollection, port: int) -> None: +def listen_metrics( + bind_addresses: StrCollection, port: int +) -> List[Tuple[WSGIServer, Thread]]: """ Start Prometheus metrics server. @@ -294,14 +323,22 @@ def listen_metrics(bind_addresses: StrCollection, port: int) -> None: bytecode at a time), this still works because the metrics thread can preempt the Twisted reactor thread between bytecode boundaries and the metrics thread gets scheduled with roughly equal priority to the Twisted reactor thread. + + Returns: + List of WSGIServer with the thread they are running on. """ from prometheus_client import start_http_server as start_http_server_prometheus from synapse.metrics import RegistryProxy + servers: List[Tuple[WSGIServer, Thread]] = [] for host in bind_addresses: logger.info("Starting metrics listener on %s:%d", host, port) - start_http_server_prometheus(port, addr=host, registry=RegistryProxy) + server, thread = start_http_server_prometheus( + port, addr=host, registry=RegistryProxy + ) + servers.append((server, thread)) + return servers def listen_manhole( @@ -309,7 +346,7 @@ def listen_manhole( port: int, manhole_settings: ManholeConfig, manhole_globals: dict, -) -> None: +) -> List[Port]: # twisted.conch.manhole 21.1.0 uses "int_from_bytes", which produces a confusing # warning. It's fixed by https://github.com/twisted/twisted/pull/1522), so # suppress the warning for now. @@ -321,7 +358,7 @@ def listen_manhole( from synapse.util.manhole import manhole - listen_tcp( + return listen_tcp( bind_addresses, port, manhole(settings=manhole_settings, globals=manhole_globals), @@ -498,7 +535,7 @@ def refresh_certificate(hs: "HomeServer") -> None: logger.info("Context factories updated.") -async def start(hs: "HomeServer") -> None: +async def start(hs: "HomeServer", freeze: bool = True) -> None: """ Start a Synapse server or worker. @@ -509,6 +546,11 @@ async def start(hs: "HomeServer") -> None: Args: hs: homeserver instance + freeze: whether to freeze the homeserver base objects in the garbage collector. + May improve garbage collection performance by marking objects with an effectively + static lifetime as frozen so they don't need to be considered for cleanup. + If you ever want to `shutdown` the homeserver, this needs to be + False otherwise the homeserver cannot be garbage collected after `shutdown`. """ server_name = hs.hostname reactor = hs.get_reactor() @@ -541,12 +583,17 @@ async def start(hs: "HomeServer") -> None: # we're not using systemd. sdnotify(b"RELOADING=1") - for i, args, kwargs in _sighup_callbacks: - i(*args, **kwargs) + for sighup_callbacks in _instance_id_to_sighup_callbacks_map.values(): + for func, args, kwargs in sighup_callbacks: + func(*args, **kwargs) sdnotify(b"READY=1") - return run_as_background_process( + # It's okay to ignore the linter error here and call + # `run_as_background_process` directly because `_handle_sighup` operates + # outside of the scope of a specific `HomeServer` instance and holds no + # references to it which would prevent a clean shutdown. + return run_as_background_process( # type: ignore[untracked-background-process] "sighup", server_name, _handle_sighup, @@ -564,8 +611,8 @@ async def start(hs: "HomeServer") -> None: signal.signal(signal.SIGHUP, run_sighup) - register_sighup(refresh_certificate, hs) - register_sighup(reload_cache_config, hs.config) + register_sighup(hs.get_instance_id(), refresh_certificate, hs) + register_sighup(hs.get_instance_id(), reload_cache_config, hs.config) # Apply the cache config. hs.config.caches.resize_all_caches() @@ -603,7 +650,11 @@ async def start(hs: "HomeServer") -> None: logger.info("Shutting down...") # Log when we start the shut down process. - hs.get_clock().add_system_event_trigger("before", "shutdown", log_shutdown) + hs.register_sync_shutdown_handler( + phase="before", + eventType="shutdown", + shutdown_func=log_shutdown, + ) setup_sentry(hs) setup_sdnotify(hs) @@ -632,18 +683,24 @@ async def start(hs: "HomeServer") -> None: # `REQUIRED_ON_BACKGROUND_TASK_STARTUP` start_phone_stats_home(hs) - # We now freeze all allocated objects in the hopes that (almost) - # everything currently allocated are things that will be used for the - # rest of time. Doing so means less work each GC (hopefully). - # - # PyPy does not (yet?) implement gc.freeze() - if hasattr(gc, "freeze"): - gc.collect() - gc.freeze() + if freeze: + # We now freeze all allocated objects in the hopes that (almost) + # everything currently allocated are things that will be used for the + # rest of time. Doing so means less work each GC (hopefully). + # + # Note that freezing the homeserver object means that it won't be able to be + # garbage collected in the case of attempting an in-memory `shutdown`. This only + # needs to be considered if such a case is desirable. Exiting the entire Python + # process will function expectedly either way. + # + # PyPy does not (yet?) implement gc.freeze() + if hasattr(gc, "freeze"): + gc.collect() + gc.freeze() - # Speed up shutdowns by freezing all allocated objects. This moves everything - # into the permanent generation and excludes them from the final GC. - atexit.register(gc.freeze) + # Speed up process exit by freezing all allocated objects. This moves everything + # into the permanent generation and excludes them from the final GC. + atexit.register(gc.freeze) def reload_cache_config(config: HomeServerConfig) -> None: diff --git a/synapse/app/generic_worker.py b/synapse/app/generic_worker.py index 51b8adaa27..7e8b47c20a 100644 --- a/synapse/app/generic_worker.py +++ b/synapse/app/generic_worker.py @@ -278,11 +278,13 @@ class GenericWorkerServer(HomeServer): self._listen_http(listener) elif listener.type == "manhole": if isinstance(listener, TCPListenerConfig): - _base.listen_manhole( - listener.bind_addresses, - listener.port, - manhole_settings=self.config.server.manhole_settings, - manhole_globals={"hs": self}, + self._listening_services.extend( + _base.listen_manhole( + listener.bind_addresses, + listener.port, + manhole_settings=self.config.server.manhole_settings, + manhole_globals={"hs": self}, + ) ) else: raise ConfigError( @@ -296,9 +298,11 @@ class GenericWorkerServer(HomeServer): ) else: if isinstance(listener, TCPListenerConfig): - _base.listen_metrics( - listener.bind_addresses, - listener.port, + self._metrics_listeners.extend( + _base.listen_metrics( + listener.bind_addresses, + listener.port, + ) ) else: raise ConfigError( diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index 35d633d527..3c691906ca 100644 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -22,7 +22,7 @@ import logging import os import sys -from typing import Dict, Iterable, List +from typing import Dict, Iterable, List, Optional from twisted.internet.tcp import Port from twisted.web.resource import EncodingResourceWrapper, Resource @@ -70,6 +70,7 @@ from synapse.rest.synapse.client import build_synapse_client_resource_tree from synapse.rest.well_known import well_known_resource from synapse.server import HomeServer from synapse.storage import DataStore +from synapse.types import ISynapseReactor from synapse.util.check_dependencies import VERSION, check_requirements from synapse.util.httpresourcetree import create_resource_tree from synapse.util.module_loader import load_module @@ -277,11 +278,13 @@ class SynapseHomeServer(HomeServer): ) elif listener.type == "manhole": if isinstance(listener, TCPListenerConfig): - _base.listen_manhole( - listener.bind_addresses, - listener.port, - manhole_settings=self.config.server.manhole_settings, - manhole_globals={"hs": self}, + self._listening_services.extend( + _base.listen_manhole( + listener.bind_addresses, + listener.port, + manhole_settings=self.config.server.manhole_settings, + manhole_globals={"hs": self}, + ) ) else: raise ConfigError( @@ -294,9 +297,11 @@ class SynapseHomeServer(HomeServer): ) else: if isinstance(listener, TCPListenerConfig): - _base.listen_metrics( - listener.bind_addresses, - listener.port, + self._metrics_listeners.extend( + _base.listen_metrics( + listener.bind_addresses, + listener.port, + ) ) else: raise ConfigError( @@ -340,12 +345,23 @@ def load_or_generate_config(argv_options: List[str]) -> HomeServerConfig: return config -def setup(config: HomeServerConfig) -> SynapseHomeServer: +def setup( + config: HomeServerConfig, + reactor: Optional[ISynapseReactor] = None, + freeze: bool = True, +) -> SynapseHomeServer: """ Create and setup a Synapse homeserver instance given a configuration. Args: config: The configuration for the homeserver. + reactor: Optionally provide a reactor to use. Can be useful in different + scenarios that you want control over the reactor, such as tests. + freeze: whether to freeze the homeserver base objects in the garbage collector. + May improve garbage collection performance by marking objects with an effectively + static lifetime as frozen so they don't need to be considered for cleanup. + If you ever want to `shutdown` the homeserver, this needs to be + False otherwise the homeserver cannot be garbage collected after `shutdown`. Returns: A homeserver instance. @@ -384,6 +400,7 @@ def setup(config: HomeServerConfig) -> SynapseHomeServer: config.server.server_name, config=config, version_string=f"Synapse/{VERSION}", + reactor=reactor, ) setup_logging(hs, config, use_worker_options=False) @@ -405,7 +422,7 @@ def setup(config: HomeServerConfig) -> SynapseHomeServer: # Loading the provider metadata also ensures the provider config is valid. await oidc.load_metadata() - await _base.start(hs) + await _base.start(hs, freeze) hs.get_datastores().main.db_pool.updates.start_doing_background_updates() diff --git a/synapse/app/phone_stats_home.py b/synapse/app/phone_stats_home.py index 7b8e7fe700..4bbc33cba2 100644 --- a/synapse/app/phone_stats_home.py +++ b/synapse/app/phone_stats_home.py @@ -29,9 +29,6 @@ from prometheus_client import Gauge from twisted.internet import defer from synapse.metrics import SERVER_NAME_LABEL -from synapse.metrics.background_process_metrics import ( - run_as_background_process, -) from synapse.types import JsonDict from synapse.util.constants import ( MILLISECONDS_PER_SECOND, @@ -87,8 +84,6 @@ def phone_stats_home( stats: JsonDict, stats_process: List[Tuple[int, "resource.struct_rusage"]] = _stats_process, ) -> "defer.Deferred[None]": - server_name = hs.hostname - async def _phone_stats_home( hs: "HomeServer", stats: JsonDict, @@ -202,8 +197,8 @@ def phone_stats_home( except Exception as e: logger.warning("Error reporting stats: %s", e) - return run_as_background_process( - "phone_stats_home", server_name, _phone_stats_home, hs, stats, stats_process + return hs.run_as_background_process( + "phone_stats_home", _phone_stats_home, hs, stats, stats_process ) @@ -265,9 +260,8 @@ def start_phone_stats_home(hs: "HomeServer") -> None: float(hs.config.server.max_mau_value) ) - return run_as_background_process( + return hs.run_as_background_process( "generate_monthly_active_users", - server_name, _generate_monthly_active_users, ) @@ -287,10 +281,16 @@ def start_phone_stats_home(hs: "HomeServer") -> None: # We need to defer this init for the cases that we daemonize # otherwise the process ID we get is that of the non-daemon process - clock.call_later(0, performance_stats_init) + clock.call_later( + 0, + performance_stats_init, + ) # We wait 5 minutes to send the first set of stats as the server can # be quite busy the first few minutes clock.call_later( - INITIAL_DELAY_BEFORE_FIRST_PHONE_HOME_SECONDS, phone_stats_home, hs, stats + INITIAL_DELAY_BEFORE_FIRST_PHONE_HOME_SECONDS, + phone_stats_home, + hs, + stats, ) diff --git a/synapse/appservice/__init__.py b/synapse/appservice/__init__.py index 2d8d382e68..1d0735ca1d 100644 --- a/synapse/appservice/__init__.py +++ b/synapse/appservice/__init__.py @@ -23,15 +23,33 @@ import logging import re from enum import Enum -from typing import TYPE_CHECKING, Dict, Iterable, List, Optional, Pattern, Sequence +from typing import ( + TYPE_CHECKING, + Dict, + Iterable, + List, + Optional, + Pattern, + Sequence, + cast, +) import attr from netaddr import IPSet +from twisted.internet import reactor + from synapse.api.constants import EventTypes from synapse.events import EventBase -from synapse.types import DeviceListUpdates, JsonDict, JsonMapping, UserID +from synapse.types import ( + DeviceListUpdates, + ISynapseThreadlessReactor, + JsonDict, + JsonMapping, + UserID, +) from synapse.util.caches.descriptors import _CacheContext, cached +from synapse.util.clock import Clock if TYPE_CHECKING: from synapse.appservice.api import ApplicationServiceApi @@ -98,6 +116,15 @@ class ApplicationService: self.sender = sender # The application service user should be part of the server's domain. self.server_name = sender.domain # nb must be called this for @cached + + # Ideally we would require passing in the `HomeServer` `Clock` instance. + # However this is not currently possible as there are places which use + # `@cached` that aren't aware of the `HomeServer` instance. + # nb must be called this for @cached + self.clock = Clock( + cast(ISynapseThreadlessReactor, reactor), server_name=self.server_name + ) # type: ignore[multiple-internal-clocks] + self.namespaces = self._check_namespaces(namespaces) self.id = id self.ip_range_whitelist = ip_range_whitelist diff --git a/synapse/appservice/scheduler.py b/synapse/appservice/scheduler.py index c8678406a1..b4de759b67 100644 --- a/synapse/appservice/scheduler.py +++ b/synapse/appservice/scheduler.py @@ -81,7 +81,6 @@ from synapse.appservice import ( from synapse.appservice.api import ApplicationServiceApi from synapse.events import EventBase from synapse.logging.context import run_in_background -from synapse.metrics.background_process_metrics import run_as_background_process from synapse.storage.databases.main import DataStore from synapse.types import DeviceListUpdates, JsonMapping from synapse.util.clock import Clock @@ -200,6 +199,7 @@ class _ServiceQueuer: ) self.server_name = hs.hostname self.clock = hs.get_clock() + self.hs = hs self._store = hs.get_datastores().main def start_background_request(self, service: ApplicationService) -> None: @@ -207,9 +207,7 @@ class _ServiceQueuer: if service.id in self.requests_in_flight: return - run_as_background_process( - "as-sender", self.server_name, self._send_request, service - ) + self.hs.run_as_background_process("as-sender", self._send_request, service) async def _send_request(self, service: ApplicationService) -> None: # sanity-check: we shouldn't get here if this service already has a sender @@ -361,6 +359,7 @@ class _TransactionController: def __init__(self, hs: "HomeServer"): self.server_name = hs.hostname self.clock = hs.get_clock() + self.hs = hs self.store = hs.get_datastores().main self.as_api = hs.get_application_service_api() @@ -448,6 +447,7 @@ class _TransactionController: recoverer = self.RECOVERER_CLASS( self.server_name, self.clock, + self.hs, self.store, self.as_api, service, @@ -494,6 +494,7 @@ class _Recoverer: self, server_name: str, clock: Clock, + hs: "HomeServer", store: DataStore, as_api: ApplicationServiceApi, service: ApplicationService, @@ -501,6 +502,7 @@ class _Recoverer: ): self.server_name = server_name self.clock = clock + self.hs = hs self.store = store self.as_api = as_api self.service = service @@ -513,9 +515,8 @@ class _Recoverer: logger.info("Scheduling retries on %s in %fs", self.service.id, delay) self.scheduled_recovery = self.clock.call_later( delay, - run_as_background_process, + self.hs.run_as_background_process, "as-recoverer", - self.server_name, self.retry, ) @@ -535,9 +536,8 @@ class _Recoverer: if self.scheduled_recovery: self.clock.cancel_call_later(self.scheduled_recovery) # Run a retry, which will resechedule a recovery if it fails. - run_as_background_process( + self.hs.run_as_background_process( "retry", - self.server_name, self.retry, ) diff --git a/synapse/config/logger.py b/synapse/config/logger.py index 0531ae7875..9dde4c4003 100644 --- a/synapse/config/logger.py +++ b/synapse/config/logger.py @@ -345,7 +345,9 @@ def setup_logging( # Add a SIGHUP handler to reload the logging configuration, if one is available. from synapse.app import _base as appbase - appbase.register_sighup(_reload_logging_config, log_config_path) + appbase.register_sighup( + hs.get_instance_id(), _reload_logging_config, log_config_path + ) # Log immediately so we can grep backwards. logger.warning("***** STARTING SERVER *****") diff --git a/synapse/crypto/keyring.py b/synapse/crypto/keyring.py index eac2d776f9..258bc29357 100644 --- a/synapse/crypto/keyring.py +++ b/synapse/crypto/keyring.py @@ -172,7 +172,7 @@ class Keyring: _FetchKeyRequest, Dict[str, Dict[str, FetchKeyResult]] ] = BatchingQueue( name="keyring_server", - server_name=self.server_name, + hs=hs, clock=hs.get_clock(), # The method called to fetch each key process_batch_callback=self._inner_fetch_key_requests, @@ -194,6 +194,14 @@ class Keyring: valid_until_ts=2**63, # fake future timestamp ) + def shutdown(self) -> None: + """ + Prepares the KeyRing for garbage collection by shutting down it's queues. + """ + self._fetch_keys_queue.shutdown() + for key_fetcher in self._key_fetchers: + key_fetcher.shutdown() + async def verify_json_for_server( self, server_name: str, @@ -479,11 +487,17 @@ class KeyFetcher(metaclass=abc.ABCMeta): self.server_name = hs.hostname self._queue = BatchingQueue( name=self.__class__.__name__, - server_name=self.server_name, + hs=hs, clock=hs.get_clock(), process_batch_callback=self._fetch_keys, ) + def shutdown(self) -> None: + """ + Prepares the KeyFetcher for garbage collection by shutting down it's queue. + """ + self._queue.shutdown() + async def get_keys( self, server_name: str, key_ids: List[str], minimum_valid_until_ts: int ) -> Dict[str, FetchKeyResult]: diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index 41595043d1..8c91336dbc 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -148,6 +148,7 @@ class FederationClient(FederationBase): self._get_pdu_cache: ExpiringCache[str, Tuple[EventBase, str]] = ExpiringCache( cache_name="get_pdu_cache", server_name=self.server_name, + hs=self.hs, clock=self._clock, max_len=1000, expiry_ms=120 * 1000, @@ -167,6 +168,7 @@ class FederationClient(FederationBase): ] = ExpiringCache( cache_name="get_room_hierarchy_cache", server_name=self.server_name, + hs=self.hs, clock=self._clock, max_len=1000, expiry_ms=5 * 60 * 1000, diff --git a/synapse/federation/send_queue.py b/synapse/federation/send_queue.py index 2fdee9ac54..759df9836b 100644 --- a/synapse/federation/send_queue.py +++ b/synapse/federation/send_queue.py @@ -144,6 +144,9 @@ class FederationRemoteSendQueue(AbstractFederationSender): self.clock.looping_call(self._clear_queue, 30 * 1000) + def shutdown(self) -> None: + """Stops this federation sender instance from sending further transactions.""" + def _next_pos(self) -> int: pos = self.pos self.pos += 1 diff --git a/synapse/federation/sender/__init__.py b/synapse/federation/sender/__init__.py index 8e3619d1bc..4410ffc5c5 100644 --- a/synapse/federation/sender/__init__.py +++ b/synapse/federation/sender/__init__.py @@ -168,7 +168,6 @@ from synapse.metrics import ( events_processed_counter, ) from synapse.metrics.background_process_metrics import ( - run_as_background_process, wrap_as_background_process, ) from synapse.types import ( @@ -232,6 +231,11 @@ WAKEUP_INTERVAL_BETWEEN_DESTINATIONS_SEC = 5 class AbstractFederationSender(metaclass=abc.ABCMeta): + @abc.abstractmethod + def shutdown(self) -> None: + """Stops this federation sender instance from sending further transactions.""" + raise NotImplementedError() + @abc.abstractmethod def notify_new_events(self, max_token: RoomStreamToken) -> None: """This gets called when we have some new events we might want to @@ -326,6 +330,7 @@ class _DestinationWakeupQueue: _MAX_TIME_IN_QUEUE = 30.0 sender: "FederationSender" = attr.ib() + hs: "HomeServer" = attr.ib() server_name: str = attr.ib() """ Our homeserver name (used to label metrics) (`hs.hostname`). @@ -453,18 +458,30 @@ class FederationSender(AbstractFederationSender): 1.0 / hs.config.ratelimiting.federation_rr_transactions_per_room_per_second ) self._destination_wakeup_queue = _DestinationWakeupQueue( - self, self.server_name, self.clock, max_delay_s=rr_txn_interval_per_room_s + self, + hs, + self.server_name, + self.clock, + max_delay_s=rr_txn_interval_per_room_s, ) + # It is important for `_is_shutdown` to be instantiated before the looping call + # for `wake_destinations_needing_catchup`. + self._is_shutdown = False + # Regularly wake up destinations that have outstanding PDUs to be caught up self.clock.looping_call_now( - run_as_background_process, + self.hs.run_as_background_process, WAKEUP_RETRY_PERIOD_SEC * 1000.0, "wake_destinations_needing_catchup", - self.server_name, self._wake_destinations_needing_catchup, ) + def shutdown(self) -> None: + self._is_shutdown = True + for queue in self._per_destination_queues.values(): + queue.shutdown() + def _get_per_destination_queue( self, destination: str ) -> Optional[PerDestinationQueue]: @@ -503,16 +520,15 @@ class FederationSender(AbstractFederationSender): return # fire off a processing loop in the background - run_as_background_process( + self.hs.run_as_background_process( "process_event_queue_for_federation", - self.server_name, self._process_event_queue_loop, ) async def _process_event_queue_loop(self) -> None: try: self._is_processing = True - while True: + while not self._is_shutdown: last_token = await self.store.get_federation_out_pos("events") ( next_token, @@ -1123,7 +1139,7 @@ class FederationSender(AbstractFederationSender): last_processed: Optional[str] = None - while True: + while not self._is_shutdown: destinations_to_wake = ( await self.store.get_catch_up_outstanding_destinations(last_processed) ) diff --git a/synapse/federation/sender/per_destination_queue.py b/synapse/federation/sender/per_destination_queue.py index 4c844d403a..845af92fac 100644 --- a/synapse/federation/sender/per_destination_queue.py +++ b/synapse/federation/sender/per_destination_queue.py @@ -28,6 +28,8 @@ from typing import TYPE_CHECKING, Dict, Hashable, Iterable, List, Optional, Tupl import attr from prometheus_client import Counter +from twisted.internet import defer + from synapse.api.constants import EduTypes from synapse.api.errors import ( FederationDeniedError, @@ -41,7 +43,6 @@ from synapse.handlers.presence import format_user_presence_state from synapse.logging import issue9533_logger from synapse.logging.opentracing import SynapseTags, set_tag from synapse.metrics import SERVER_NAME_LABEL, sent_transactions_counter -from synapse.metrics.background_process_metrics import run_as_background_process from synapse.types import JsonDict, ReadReceipt from synapse.util.retryutils import NotRetryingDestination, get_retry_limiter from synapse.visibility import filter_events_for_server @@ -79,6 +80,7 @@ MAX_PRESENCE_STATES_PER_EDU = 50 class PerDestinationQueue: """ Manages the per-destination transmission queues. + Runs until `shutdown()` is called on the queue. Args: hs @@ -94,6 +96,7 @@ class PerDestinationQueue: destination: str, ): self.server_name = hs.hostname + self._hs = hs self._clock = hs.get_clock() self._storage_controllers = hs.get_storage_controllers() self._store = hs.get_datastores().main @@ -117,6 +120,8 @@ class PerDestinationQueue: self._destination = destination self.transmission_loop_running = False + self._transmission_loop_enabled = True + self.active_transmission_loop: Optional[defer.Deferred] = None # Flag to signal to any running transmission loop that there is new data # queued up to be sent. @@ -171,6 +176,20 @@ class PerDestinationQueue: def __str__(self) -> str: return "PerDestinationQueue[%s]" % self._destination + def shutdown(self) -> None: + """Instruct the queue to stop processing any further requests""" + self._transmission_loop_enabled = False + # The transaction manager must be shutdown before cancelling the active + # transmission loop. Otherwise the transmission loop can enter a new cycle of + # sleeping before retrying since the shutdown flag of the _transaction_manager + # hasn't been set yet. + self._transaction_manager.shutdown() + try: + if self.active_transmission_loop is not None: + self.active_transmission_loop.cancel() + except Exception: + pass + def pending_pdu_count(self) -> int: return len(self._pending_pdus) @@ -309,11 +328,14 @@ class PerDestinationQueue: ) return + if not self._transmission_loop_enabled: + logger.warning("Shutdown has been requested. Not sending transaction") + return + logger.debug("TX [%s] Starting transaction loop", self._destination) - run_as_background_process( + self.active_transmission_loop = self._hs.run_as_background_process( "federation_transaction_transmission_loop", - self.server_name, self._transaction_transmission_loop, ) @@ -321,13 +343,13 @@ class PerDestinationQueue: pending_pdus: List[EventBase] = [] try: self.transmission_loop_running = True - # This will throw if we wouldn't retry. We do this here so we fail # quickly, but we will later check this again in the http client, # hence why we throw the result away. await get_retry_limiter( destination=self._destination, our_server_name=self.server_name, + hs=self._hs, clock=self._clock, store=self._store, ) @@ -339,7 +361,7 @@ class PerDestinationQueue: # not caught up yet return - while True: + while self._transmission_loop_enabled: self._new_data_to_send = False async with _TransactionQueueManager(self) as ( @@ -352,8 +374,8 @@ class PerDestinationQueue: # If we've gotten told about new things to send during # checking for things to send, we try looking again. # Otherwise new PDUs or EDUs might arrive in the meantime, - # but not get sent because we hold the - # `transmission_loop_running` flag. + # but not get sent because we currently have an + # `_active_transmission_loop` running. if self._new_data_to_send: continue else: @@ -442,6 +464,7 @@ class PerDestinationQueue: ) finally: # We want to be *very* sure we clear this after we stop processing + self.active_transmission_loop = None self.transmission_loop_running = False async def _catch_up_transmission_loop(self) -> None: @@ -469,7 +492,7 @@ class PerDestinationQueue: last_successful_stream_ordering: int = _tmp_last_successful_stream_ordering # get at most 50 catchup room/PDUs - while True: + while self._transmission_loop_enabled: event_ids = await self._store.get_catch_up_room_event_ids( self._destination, last_successful_stream_ordering ) diff --git a/synapse/federation/sender/transaction_manager.py b/synapse/federation/sender/transaction_manager.py index b548d9ed70..f47c011487 100644 --- a/synapse/federation/sender/transaction_manager.py +++ b/synapse/federation/sender/transaction_manager.py @@ -72,6 +72,12 @@ class TransactionManager: # HACK to get unique tx id self._next_txn_id = int(self.clock.time_msec()) + self._is_shutdown = False + + def shutdown(self) -> None: + self._is_shutdown = True + self._transport_layer.shutdown() + @measure_func("_send_new_transaction") async def send_new_transaction( self, @@ -86,6 +92,12 @@ class TransactionManager: edus: List of EDUs to send """ + if self._is_shutdown: + logger.warning( + "TransactionManager has been shutdown, not sending transaction" + ) + return + # Make a transaction-sending opentracing span. This span follows on from # all the edus in that transaction. This needs to be done since there is # no active span here, so if the edus were not received by the remote the diff --git a/synapse/federation/transport/client.py b/synapse/federation/transport/client.py index 5a5dc45f10..02e56e8e27 100644 --- a/synapse/federation/transport/client.py +++ b/synapse/federation/transport/client.py @@ -70,6 +70,9 @@ class TransportLayerClient: self.client = hs.get_federation_http_client() self._is_mine_server_name = hs.is_mine_server_name + def shutdown(self) -> None: + self.client.shutdown() + async def get_room_state_ids( self, destination: str, room_id: str, event_id: str ) -> JsonDict: diff --git a/synapse/handlers/account_validity.py b/synapse/handlers/account_validity.py index 39a22b8cbb..eed50ef69a 100644 --- a/synapse/handlers/account_validity.py +++ b/synapse/handlers/account_validity.py @@ -37,10 +37,8 @@ logger = logging.getLogger(__name__) class AccountValidityHandler: def __init__(self, hs: "HomeServer"): - self.hs = hs - self.server_name = ( - hs.hostname - ) # nb must be called this for @wrap_as_background_process + self.hs = hs # nb must be called this for @wrap_as_background_process + self.server_name = hs.hostname self.config = hs.config self.store = hs.get_datastores().main self.send_email_handler = hs.get_send_email_handler() diff --git a/synapse/handlers/appservice.py b/synapse/handlers/appservice.py index bf36cf39a1..6536d9fe51 100644 --- a/synapse/handlers/appservice.py +++ b/synapse/handlers/appservice.py @@ -47,7 +47,6 @@ from synapse.metrics import ( event_processing_loop_room_count, ) from synapse.metrics.background_process_metrics import ( - run_as_background_process, wrap_as_background_process, ) from synapse.storage.databases.main.directory import RoomAliasMapping @@ -76,9 +75,8 @@ events_processed_counter = Counter( class ApplicationServicesHandler: def __init__(self, hs: "HomeServer"): - self.server_name = ( - hs.hostname - ) # nb must be called this for @wrap_as_background_process + self.server_name = hs.hostname + self.hs = hs # nb must be called this for @wrap_as_background_process self.store = hs.get_datastores().main self.is_mine_id = hs.is_mine_id self.appservice_api = hs.get_application_service_api() @@ -171,8 +169,8 @@ class ApplicationServicesHandler: except Exception: logger.error("Application Services Failure") - run_as_background_process( - "as_scheduler", self.server_name, start_scheduler + self.hs.run_as_background_process( + "as_scheduler", start_scheduler ) self.started_scheduler = True diff --git a/synapse/handlers/deactivate_account.py b/synapse/handlers/deactivate_account.py index c0684380a7..204dffd288 100644 --- a/synapse/handlers/deactivate_account.py +++ b/synapse/handlers/deactivate_account.py @@ -24,7 +24,6 @@ from typing import TYPE_CHECKING, Optional from synapse.api.constants import Membership from synapse.api.errors import SynapseError -from synapse.metrics.background_process_metrics import run_as_background_process from synapse.replication.http.deactivate_account import ( ReplicationNotifyAccountDeactivatedServlet, ) @@ -272,8 +271,8 @@ class DeactivateAccountHandler: pending deactivation, if it isn't already running. """ if not self._user_parter_running: - run_as_background_process( - "user_parter_loop", self.server_name, self._user_parter_loop + self.hs.run_as_background_process( + "user_parter_loop", self._user_parter_loop ) async def _user_parter_loop(self) -> None: diff --git a/synapse/handlers/delayed_events.py b/synapse/handlers/delayed_events.py index d47e3fd263..79dd3e8416 100644 --- a/synapse/handlers/delayed_events.py +++ b/synapse/handlers/delayed_events.py @@ -24,9 +24,6 @@ from synapse.config.workers import MAIN_PROCESS_INSTANCE_NAME from synapse.logging.context import make_deferred_yieldable from synapse.logging.opentracing import set_tag from synapse.metrics import SERVER_NAME_LABEL, event_processing_positions -from synapse.metrics.background_process_metrics import ( - run_as_background_process, -) from synapse.replication.http.delayed_events import ( ReplicationAddedDelayedEventRestServlet, ) @@ -58,6 +55,7 @@ logger = logging.getLogger(__name__) class DelayedEventsHandler: def __init__(self, hs: "HomeServer"): + self.hs = hs self.server_name = hs.hostname self._store = hs.get_datastores().main self._storage_controllers = hs.get_storage_controllers() @@ -94,7 +92,10 @@ class DelayedEventsHandler: hs.get_notifier().add_replication_callback(self.notify_new_event) # Kick off again (without blocking) to catch any missed notifications # that may have fired before the callback was added. - self._clock.call_later(0, self.notify_new_event) + self._clock.call_later( + 0, + self.notify_new_event, + ) # Delayed events that are already marked as processed on startup might not have been # sent properly on the last run of the server, so unmark them to send them again. @@ -112,15 +113,14 @@ class DelayedEventsHandler: self._schedule_next_at(next_send_ts) # Can send the events in background after having awaited on marking them as processed - run_as_background_process( + self.hs.run_as_background_process( "_send_events", - self.server_name, self._send_events, events, ) - self._initialized_from_db = run_as_background_process( - "_schedule_db_events", self.server_name, _schedule_db_events + self._initialized_from_db = self.hs.run_as_background_process( + "_schedule_db_events", _schedule_db_events ) else: self._repl_client = ReplicationAddedDelayedEventRestServlet.make_client(hs) @@ -145,9 +145,7 @@ class DelayedEventsHandler: finally: self._event_processing = False - run_as_background_process( - "delayed_events.notify_new_event", self.server_name, process - ) + self.hs.run_as_background_process("delayed_events.notify_new_event", process) async def _unsafe_process_new_event(self) -> None: # We purposefully fetch the current max room stream ordering before @@ -542,9 +540,8 @@ class DelayedEventsHandler: if self._next_delayed_event_call is None: self._next_delayed_event_call = self._clock.call_later( delay_sec, - run_as_background_process, + self.hs.run_as_background_process, "_send_on_timeout", - self.server_name, self._send_on_timeout, ) else: diff --git a/synapse/handlers/device.py b/synapse/handlers/device.py index 9509ac422e..c6024597b7 100644 --- a/synapse/handlers/device.py +++ b/synapse/handlers/device.py @@ -47,7 +47,6 @@ from synapse.api.errors import ( ) from synapse.logging.opentracing import log_kv, set_tag, trace from synapse.metrics.background_process_metrics import ( - run_as_background_process, wrap_as_background_process, ) from synapse.replication.http.devices import ( @@ -125,7 +124,7 @@ class DeviceHandler: def __init__(self, hs: "HomeServer"): self.server_name = hs.hostname # nb must be called this for @measure_func self.clock = hs.get_clock() # nb must be called this for @measure_func - self.hs = hs + self.hs = hs # nb must be called this for @wrap_as_background_process self.store = cast("GenericWorkerStore", hs.get_datastores().main) self.notifier = hs.get_notifier() self.state = hs.get_state_handler() @@ -191,10 +190,9 @@ class DeviceHandler: and self._delete_stale_devices_after is not None ): self.clock.looping_call( - run_as_background_process, + self.hs.run_as_background_process, DELETE_STALE_DEVICES_INTERVAL_MS, desc="delete_stale_devices", - server_name=self.server_name, func=self._delete_stale_devices, ) @@ -963,10 +961,9 @@ class DeviceWriterHandler(DeviceHandler): def __init__(self, hs: "HomeServer"): super().__init__(hs) + self.server_name = hs.hostname # nb must be called this for @measure_func + self.hs = hs # nb must be called this for @wrap_as_background_process - self.server_name = ( - hs.hostname - ) # nb must be called this for @measure_func and @wrap_as_background_process # We only need to poke the federation sender explicitly if its on the # same instance. Other federation sender instances will get notified by # `synapse.app.generic_worker.FederationSenderHandler` when it sees it @@ -1444,7 +1441,7 @@ class DeviceListUpdater(DeviceListWorkerUpdater): def __init__(self, hs: "HomeServer", device_handler: DeviceWriterHandler): super().__init__(hs) - self.server_name = hs.hostname + self.hs = hs self.federation = hs.get_federation_client() self.server_name = hs.hostname # nb must be called this for @measure_func self.clock = hs.get_clock() # nb must be called this for @measure_func @@ -1468,6 +1465,7 @@ class DeviceListUpdater(DeviceListWorkerUpdater): self._seen_updates: ExpiringCache[str, Set[str]] = ExpiringCache( cache_name="device_update_edu", server_name=self.server_name, + hs=self.hs, clock=self.clock, max_len=10000, expiry_ms=30 * 60 * 1000, @@ -1477,9 +1475,8 @@ class DeviceListUpdater(DeviceListWorkerUpdater): # Attempt to resync out of sync device lists every 30s. self._resync_retry_lock = Lock() self.clock.looping_call( - run_as_background_process, + self.hs.run_as_background_process, 30 * 1000, - server_name=self.server_name, func=self._maybe_retry_device_resync, desc="_maybe_retry_device_resync", ) @@ -1599,9 +1596,8 @@ class DeviceListUpdater(DeviceListWorkerUpdater): if resync: # We mark as stale up front in case we get restarted. await self.store.mark_remote_users_device_caches_as_stale([user_id]) - run_as_background_process( + self.hs.run_as_background_process( "_maybe_retry_device_resync", - self.server_name, self.multi_user_device_resync, [user_id], False, diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 41fb3076c3..adc20f4ad0 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -72,7 +72,6 @@ from synapse.http.servlet import assert_params_in_dict from synapse.logging.context import nested_logging_context from synapse.logging.opentracing import SynapseTags, set_tag, tag_args, trace from synapse.metrics import SERVER_NAME_LABEL -from synapse.metrics.background_process_metrics import run_as_background_process from synapse.module_api import NOT_SPAM from synapse.storage.databases.main.events_worker import EventRedactBehaviour from synapse.storage.invite_rule import InviteRule @@ -188,9 +187,8 @@ class FederationHandler: # any partial-state-resync operations which were in flight when we # were shut down. if not hs.config.worker.worker_app: - run_as_background_process( + self.hs.run_as_background_process( "resume_sync_partial_state_room", - self.server_name, self._resume_partial_state_room_sync, ) @@ -318,9 +316,8 @@ class FederationHandler: logger.debug( "_maybe_backfill_inner: all backfill points are *after* current depth. Trying again with later backfill points." ) - run_as_background_process( + self.hs.run_as_background_process( "_maybe_backfill_inner_anyway_with_max_depth", - self.server_name, self.maybe_backfill, room_id=room_id, # We use `MAX_DEPTH` so that we find all backfill points next @@ -802,9 +799,8 @@ class FederationHandler: # lots of requests for missing prev_events which we do actually # have. Hence we fire off the background task, but don't wait for it. - run_as_background_process( + self.hs.run_as_background_process( "handle_queued_pdus", - self.server_name, self._handle_queued_pdus, room_queue, ) @@ -1877,9 +1873,8 @@ class FederationHandler: room_id=room_id, ) - run_as_background_process( + self.hs.run_as_background_process( desc="sync_partial_state_room", - server_name=self.server_name, func=_sync_partial_state_room_wrapper, ) diff --git a/synapse/handlers/federation_event.py b/synapse/handlers/federation_event.py index 59886f04c4..d6390b79c7 100644 --- a/synapse/handlers/federation_event.py +++ b/synapse/handlers/federation_event.py @@ -81,7 +81,6 @@ from synapse.logging.opentracing import ( trace, ) from synapse.metrics import SERVER_NAME_LABEL -from synapse.metrics.background_process_metrics import run_as_background_process from synapse.replication.http.federation import ( ReplicationFederationSendEventsRestServlet, ) @@ -153,6 +152,7 @@ class FederationEventHandler: def __init__(self, hs: "HomeServer"): self.server_name = hs.hostname + self.hs = hs self._clock = hs.get_clock() self._store = hs.get_datastores().main self._state_store = hs.get_datastores().state @@ -175,6 +175,7 @@ class FederationEventHandler: ) self._notifier = hs.get_notifier() + self._server_name = hs.hostname self._is_mine_id = hs.is_mine_id self._is_mine_server_name = hs.is_mine_server_name self._instance_name = hs.get_instance_name() @@ -974,9 +975,8 @@ class FederationEventHandler: # Process previously failed backfill events in the background to not waste # time on something that is likely to fail again. if len(events_with_failed_pull_attempts) > 0: - run_as_background_process( + self.hs.run_as_background_process( "_process_new_pulled_events_with_failed_pull_attempts", - self.server_name, _process_new_pulled_events, events_with_failed_pull_attempts, ) @@ -1568,9 +1568,8 @@ class FederationEventHandler: resync = True if resync: - run_as_background_process( + self.hs.run_as_background_process( "resync_device_due_to_pdu", - self.server_name, self._resync_device, event.sender, ) diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index 4ff8b3704b..e874b60000 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -67,7 +67,6 @@ from synapse.handlers.directory import DirectoryHandler from synapse.handlers.worker_lock import NEW_EVENT_DURING_PURGE_LOCK_NAME from synapse.logging import opentracing from synapse.logging.context import make_deferred_yieldable, run_in_background -from synapse.metrics.background_process_metrics import run_as_background_process from synapse.replication.http.send_events import ReplicationSendEventsRestServlet from synapse.storage.databases.main.events_worker import EventRedactBehaviour from synapse.types import ( @@ -99,6 +98,7 @@ class MessageHandler: def __init__(self, hs: "HomeServer"): self.server_name = hs.hostname + self.hs = hs self.auth = hs.get_auth() self.clock = hs.get_clock() self.state = hs.get_state_handler() @@ -113,8 +113,8 @@ class MessageHandler: self._scheduled_expiry: Optional[IDelayedCall] = None if not hs.config.worker.worker_app: - run_as_background_process( - "_schedule_next_expiry", self.server_name, self._schedule_next_expiry + self.hs.run_as_background_process( + "_schedule_next_expiry", self._schedule_next_expiry ) async def get_room_data( @@ -444,9 +444,8 @@ class MessageHandler: self._scheduled_expiry = self.clock.call_later( delay, - run_as_background_process, + self.hs.run_as_background_process, "_expire_event", - self.server_name, self._expire_event, event_id, ) @@ -548,9 +547,8 @@ class EventCreationHandler: and self.config.server.cleanup_extremities_with_dummy_events ): self.clock.looping_call( - lambda: run_as_background_process( + lambda: self.hs.run_as_background_process( "send_dummy_events_to_fill_extremities", - self.server_name, self._send_dummy_events_to_fill_extremities, ), 5 * 60 * 1000, @@ -570,6 +568,7 @@ class EventCreationHandler: self._external_cache_joined_hosts_updates = ExpiringCache( cache_name="_external_cache_joined_hosts_updates", server_name=self.server_name, + hs=self.hs, clock=self.clock, expiry_ms=30 * 60 * 1000, ) @@ -2113,9 +2112,8 @@ class EventCreationHandler: if event.type == EventTypes.Message: # We don't want to block sending messages on any presence code. This # matters as sometimes presence code can take a while. - run_as_background_process( + self.hs.run_as_background_process( "bump_presence_active_time", - self.server_name, self._bump_active_time, requester.user, requester.device_id, diff --git a/synapse/handlers/pagination.py b/synapse/handlers/pagination.py index df1a7e714c..02a67581e7 100644 --- a/synapse/handlers/pagination.py +++ b/synapse/handlers/pagination.py @@ -29,7 +29,6 @@ from synapse.api.filtering import Filter from synapse.events.utils import SerializeEventConfig from synapse.handlers.worker_lock import NEW_EVENT_DURING_PURGE_LOCK_NAME from synapse.logging.opentracing import trace -from synapse.metrics.background_process_metrics import run_as_background_process from synapse.rest.admin._base import assert_user_is_admin from synapse.streams.config import PaginationConfig from synapse.types import ( @@ -116,10 +115,9 @@ class PaginationHandler: logger.info("Setting up purge job with config: %s", job) self.clock.looping_call( - run_as_background_process, + self.hs.run_as_background_process, job.interval, "purge_history_for_rooms_in_range", - self.server_name, self.purge_history_for_rooms_in_range, job.shortest_max_lifetime, job.longest_max_lifetime, @@ -244,9 +242,8 @@ class PaginationHandler: # We want to purge everything, including local events, and to run the purge in # the background so that it's not blocking any other operation apart from # other purges in the same room. - run_as_background_process( + self.hs.run_as_background_process( PURGE_HISTORY_ACTION_NAME, - self.server_name, self.purge_history, room_id, token, @@ -604,9 +601,8 @@ class PaginationHandler: # Otherwise, we can backfill in the background for eventual # consistency's sake but we don't need to block the client waiting # for a costly federation call and processing. - run_as_background_process( + self.hs.run_as_background_process( "maybe_backfill_in_the_background", - self.server_name, self.hs.get_federation_handler().maybe_backfill, room_id, curr_topo, diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py index 4d246fadbd..1610683066 100644 --- a/synapse/handlers/presence.py +++ b/synapse/handlers/presence.py @@ -107,7 +107,6 @@ from synapse.events.presence_router import PresenceRouter from synapse.logging.context import run_in_background from synapse.metrics import SERVER_NAME_LABEL, LaterGauge from synapse.metrics.background_process_metrics import ( - run_as_background_process, wrap_as_background_process, ) from synapse.replication.http.presence import ( @@ -537,19 +536,15 @@ class WorkerPresenceHandler(BasePresenceHandler): self._bump_active_client = ReplicationBumpPresenceActiveTime.make_client(hs) self._set_state_client = ReplicationPresenceSetState.make_client(hs) - self._send_stop_syncing_loop = self.clock.looping_call( - self.send_stop_syncing, UPDATE_SYNCING_USERS_MS - ) - - hs.get_clock().add_system_event_trigger( - "before", - "shutdown", - run_as_background_process, - "generic_presence.on_shutdown", - self.server_name, - self._on_shutdown, + self.clock.looping_call(self.send_stop_syncing, UPDATE_SYNCING_USERS_MS) + + hs.register_async_shutdown_handler( + phase="before", + eventType="shutdown", + shutdown_func=self._on_shutdown, ) + @wrap_as_background_process("WorkerPresenceHandler._on_shutdown") async def _on_shutdown(self) -> None: if self._track_presence: self.hs.get_replication_command_handler().send_command( @@ -779,9 +774,7 @@ class WorkerPresenceHandler(BasePresenceHandler): class PresenceHandler(BasePresenceHandler): def __init__(self, hs: "HomeServer"): super().__init__(hs) - self.server_name = ( - hs.hostname - ) # nb must be called this for @wrap_as_background_process + self.server_name = hs.hostname self.wheel_timer: WheelTimer[str] = WheelTimer() self.notifier = hs.get_notifier() @@ -842,13 +835,10 @@ class PresenceHandler(BasePresenceHandler): # have not yet been persisted self.unpersisted_users_changes: Set[str] = set() - hs.get_clock().add_system_event_trigger( - "before", - "shutdown", - run_as_background_process, - "presence.on_shutdown", - self.server_name, - self._on_shutdown, + hs.register_async_shutdown_handler( + phase="before", + eventType="shutdown", + shutdown_func=self._on_shutdown, ) # Keeps track of the number of *ongoing* syncs on this process. While @@ -881,7 +871,10 @@ class PresenceHandler(BasePresenceHandler): # The initial delay is to allow disconnected clients a chance to # reconnect before we treat them as offline. self.clock.call_later( - 30, self.clock.looping_call, self._handle_timeouts, 5000 + 30, + self.clock.looping_call, + self._handle_timeouts, + 5000, ) # Presence information is persisted, whether or not it is being tracked @@ -908,6 +901,7 @@ class PresenceHandler(BasePresenceHandler): self._event_pos = self.store.get_room_max_stream_ordering() self._event_processing = False + @wrap_as_background_process("PresenceHandler._on_shutdown") async def _on_shutdown(self) -> None: """Gets called when shutting down. This lets us persist any updates that we haven't yet persisted, e.g. updates that only changes some internal @@ -1539,8 +1533,8 @@ class PresenceHandler(BasePresenceHandler): finally: self._event_processing = False - run_as_background_process( - "presence.notify_new_event", self.server_name, _process_presence + self.hs.run_as_background_process( + "presence.notify_new_event", _process_presence ) async def _unsafe_process(self) -> None: diff --git a/synapse/handlers/profile.py b/synapse/handlers/profile.py index dbff28e7fb..9dda89d85b 100644 --- a/synapse/handlers/profile.py +++ b/synapse/handlers/profile.py @@ -56,8 +56,8 @@ class ProfileHandler: def __init__(self, hs: "HomeServer"): self.server_name = hs.hostname # nb must be called this for @cached + self.clock = hs.get_clock() # nb must be called this for @cached self.store = hs.get_datastores().main - self.clock = hs.get_clock() self.hs = hs self.federation = hs.get_federation_client() diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py index 5761a7f70b..c3ff0cfaf8 100644 --- a/synapse/handlers/register.py +++ b/synapse/handlers/register.py @@ -23,7 +23,14 @@ """Contains functions for registering clients.""" import logging -from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, TypedDict +from typing import ( + TYPE_CHECKING, + Iterable, + List, + Optional, + Tuple, + TypedDict, +) from prometheus_client import Counter diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py index 623823acb0..2ab9b70f8c 100644 --- a/synapse/handlers/room_member.py +++ b/synapse/handlers/room_member.py @@ -50,7 +50,6 @@ from synapse.handlers.state_deltas import MatchChange, StateDeltasHandler from synapse.handlers.worker_lock import NEW_EVENT_DURING_PURGE_LOCK_NAME from synapse.logging import opentracing from synapse.metrics import SERVER_NAME_LABEL, event_processing_positions -from synapse.metrics.background_process_metrics import run_as_background_process from synapse.replication.http.push import ReplicationCopyPusherRestServlet from synapse.storage.databases.main.state_deltas import StateDelta from synapse.storage.invite_rule import InviteRule @@ -2190,7 +2189,10 @@ class RoomForgetterHandler(StateDeltasHandler): self._notifier.add_replication_callback(self.notify_new_event) # We kick this off to pick up outstanding work from before the last restart. - self._clock.call_later(0, self.notify_new_event) + self._clock.call_later( + 0, + self.notify_new_event, + ) def notify_new_event(self) -> None: """Called when there may be more deltas to process""" @@ -2205,9 +2207,7 @@ class RoomForgetterHandler(StateDeltasHandler): finally: self._is_processing = False - run_as_background_process( - "room_forgetter.notify_new_event", self.server_name, process - ) + self._hs.run_as_background_process("room_forgetter.notify_new_event", process) async def _unsafe_process(self) -> None: # If self.pos is None then means we haven't fetched it from DB diff --git a/synapse/handlers/sso.py b/synapse/handlers/sso.py index eec420cbb1..735cfa0a0f 100644 --- a/synapse/handlers/sso.py +++ b/synapse/handlers/sso.py @@ -224,7 +224,7 @@ class SsoHandler: ) # a lock on the mappings - self._mapping_lock = Linearizer(name="sso_user_mapping", clock=hs.get_clock()) + self._mapping_lock = Linearizer(clock=hs.get_clock(), name="sso_user_mapping") # a map from session id to session data self._username_mapping_sessions: Dict[str, UsernameMappingSession] = {} diff --git a/synapse/handlers/stats.py b/synapse/handlers/stats.py index a2602ea818..5b4a2cc62d 100644 --- a/synapse/handlers/stats.py +++ b/synapse/handlers/stats.py @@ -33,7 +33,6 @@ from typing import ( from synapse.api.constants import EventContentFields, EventTypes, Membership from synapse.metrics import SERVER_NAME_LABEL, event_processing_positions -from synapse.metrics.background_process_metrics import run_as_background_process from synapse.storage.databases.main.state_deltas import StateDelta from synapse.types import JsonDict from synapse.util.events import get_plain_text_topic_from_event_content @@ -75,7 +74,10 @@ class StatsHandler: # We kick this off so that we don't have to wait for a change before # we start populating stats - self.clock.call_later(0, self.notify_new_event) + self.clock.call_later( + 0, + self.notify_new_event, + ) def notify_new_event(self) -> None: """Called when there may be more deltas to process""" @@ -90,7 +92,7 @@ class StatsHandler: finally: self._is_processing = False - run_as_background_process("stats.notify_new_event", self.server_name, process) + self.hs.run_as_background_process("stats.notify_new_event", process) async def _unsafe_process(self) -> None: # If self.pos is None then means we haven't fetched it from DB diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index c0341c5654..6f0522d5bb 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -323,6 +323,7 @@ class SyncHandler: ] = ExpiringCache( cache_name="lazy_loaded_members_cache", server_name=self.server_name, + hs=hs, clock=self.clock, max_len=0, expiry_ms=LAZY_LOADED_MEMBERS_CACHE_MAX_AGE, @@ -982,6 +983,7 @@ class SyncHandler: logger.debug("creating LruCache for %r", cache_key) cache = LruCache( max_size=LAZY_LOADED_MEMBERS_CACHE_MAX_SIZE, + clock=self.clock, server_name=self.server_name, ) self.lazy_loaded_members_cache[cache_key] = cache diff --git a/synapse/handlers/typing.py b/synapse/handlers/typing.py index 6a7b36ea0c..77c5b747c3 100644 --- a/synapse/handlers/typing.py +++ b/synapse/handlers/typing.py @@ -28,7 +28,6 @@ from synapse.api.constants import EduTypes from synapse.api.errors import AuthError, ShadowBanError, SynapseError from synapse.appservice import ApplicationService from synapse.metrics.background_process_metrics import ( - run_as_background_process, wrap_as_background_process, ) from synapse.replication.tcp.streams import TypingStream @@ -78,11 +77,10 @@ class FollowerTypingHandler: """ def __init__(self, hs: "HomeServer"): + self.hs = hs # nb must be called this for @wrap_as_background_process self.store = hs.get_datastores().main self._storage_controllers = hs.get_storage_controllers() - self.server_name = ( - hs.hostname - ) # nb must be called this for @wrap_as_background_process + self.server_name = hs.hostname self.clock = hs.get_clock() self.is_mine_id = hs.is_mine_id self.is_mine_server_name = hs.is_mine_server_name @@ -144,9 +142,8 @@ class FollowerTypingHandler: if self.federation and self.is_mine_id(member.user_id): last_fed_poke = self._member_last_federation_poke.get(member, None) if not last_fed_poke or last_fed_poke + FEDERATION_PING_INTERVAL <= now: - run_as_background_process( + self.hs.run_as_background_process( "typing._push_remote", - self.server_name, self._push_remote, member=member, typing=True, @@ -220,9 +217,8 @@ class FollowerTypingHandler: self._rooms_updated.add(row.room_id) if self.federation: - run_as_background_process( + self.hs.run_as_background_process( "_send_changes_in_typing_to_remotes", - self.server_name, self._send_changes_in_typing_to_remotes, row.room_id, prev_typing, @@ -384,9 +380,8 @@ class TypingWriterHandler(FollowerTypingHandler): def _push_update(self, member: RoomMember, typing: bool) -> None: if self.hs.is_mine_id(member.user_id): # Only send updates for changes to our own users. - run_as_background_process( + self.hs.run_as_background_process( "typing._push_remote", - self.server_name, self._push_remote, member, typing, diff --git a/synapse/handlers/user_directory.py b/synapse/handlers/user_directory.py index 130099a239..28961f5925 100644 --- a/synapse/handlers/user_directory.py +++ b/synapse/handlers/user_directory.py @@ -36,7 +36,6 @@ from synapse.api.constants import ( from synapse.api.errors import Codes, SynapseError from synapse.handlers.state_deltas import MatchChange, StateDeltasHandler from synapse.metrics import SERVER_NAME_LABEL -from synapse.metrics.background_process_metrics import run_as_background_process from synapse.storage.databases.main.state_deltas import StateDelta from synapse.storage.databases.main.user_directory import SearchResult from synapse.storage.roommember import ProfileInfo @@ -137,11 +136,15 @@ class UserDirectoryHandler(StateDeltasHandler): # We kick this off so that we don't have to wait for a change before # we start populating the user directory - self.clock.call_later(0, self.notify_new_event) + self.clock.call_later( + 0, + self.notify_new_event, + ) # Kick off the profile refresh process on startup self._refresh_remote_profiles_call_later = self.clock.call_later( - 10, self.kick_off_remote_profile_refresh_process + 10, + self.kick_off_remote_profile_refresh_process, ) async def search_users( @@ -193,9 +196,7 @@ class UserDirectoryHandler(StateDeltasHandler): self._is_processing = False self._is_processing = True - run_as_background_process( - "user_directory.notify_new_event", self.server_name, process - ) + self._hs.run_as_background_process("user_directory.notify_new_event", process) async def handle_local_profile_change( self, user_id: str, profile: ProfileInfo @@ -609,8 +610,8 @@ class UserDirectoryHandler(StateDeltasHandler): self._is_refreshing_remote_profiles = False self._is_refreshing_remote_profiles = True - run_as_background_process( - "user_directory.refresh_remote_profiles", self.server_name, process + self._hs.run_as_background_process( + "user_directory.refresh_remote_profiles", process ) async def _unsafe_refresh_remote_profiles(self) -> None: @@ -655,8 +656,9 @@ class UserDirectoryHandler(StateDeltasHandler): if not users: return _, _, next_try_at_ts = users[0] + delay = ((next_try_at_ts - self.clock.time_msec()) // 1000) + 2 self._refresh_remote_profiles_call_later = self.clock.call_later( - ((next_try_at_ts - self.clock.time_msec()) // 1000) + 2, + delay, self.kick_off_remote_profile_refresh_process, ) @@ -692,9 +694,8 @@ class UserDirectoryHandler(StateDeltasHandler): self._is_refreshing_remote_profiles_for_servers.remove(server_name) self._is_refreshing_remote_profiles_for_servers.add(server_name) - run_as_background_process( + self._hs.run_as_background_process( "user_directory.refresh_remote_profiles_for_remote_server", - self.server_name, process, ) diff --git a/synapse/handlers/worker_lock.py b/synapse/handlers/worker_lock.py index 0b375790dd..ca1e2b166c 100644 --- a/synapse/handlers/worker_lock.py +++ b/synapse/handlers/worker_lock.py @@ -37,13 +37,13 @@ from weakref import WeakSet import attr from twisted.internet import defer -from twisted.internet.interfaces import IReactorTime from synapse.logging.context import PreserveLoggingContext from synapse.logging.opentracing import start_active_span from synapse.metrics.background_process_metrics import wrap_as_background_process from synapse.storage.databases.main.lock import Lock, LockStore from synapse.util.async_helpers import timeout_deferred +from synapse.util.clock import Clock from synapse.util.constants import ONE_MINUTE_SECONDS if TYPE_CHECKING: @@ -66,10 +66,8 @@ class WorkerLocksHandler: """ def __init__(self, hs: "HomeServer") -> None: - self.server_name = ( - hs.hostname - ) # nb must be called this for @wrap_as_background_process - self._reactor = hs.get_reactor() + self.hs = hs # nb must be called this for @wrap_as_background_process + self._clock = hs.get_clock() self._store = hs.get_datastores().main self._clock = hs.get_clock() self._notifier = hs.get_notifier() @@ -98,7 +96,7 @@ class WorkerLocksHandler: """ lock = WaitingLock( - reactor=self._reactor, + clock=self._clock, store=self._store, handler=self, lock_name=lock_name, @@ -129,7 +127,7 @@ class WorkerLocksHandler: """ lock = WaitingLock( - reactor=self._reactor, + clock=self._clock, store=self._store, handler=self, lock_name=lock_name, @@ -160,7 +158,7 @@ class WorkerLocksHandler: lock = WaitingMultiLock( lock_names=lock_names, write=write, - reactor=self._reactor, + clock=self._clock, store=self._store, handler=self, ) @@ -197,7 +195,11 @@ class WorkerLocksHandler: if not deferred.called: deferred.callback(None) - self._clock.call_later(0, _wake_all_locks, locks) + self._clock.call_later( + 0, + _wake_all_locks, + locks, + ) @wrap_as_background_process("_cleanup_locks") async def _cleanup_locks(self) -> None: @@ -207,7 +209,7 @@ class WorkerLocksHandler: @attr.s(auto_attribs=True, eq=False) class WaitingLock: - reactor: IReactorTime + clock: Clock store: LockStore handler: WorkerLocksHandler lock_name: str @@ -246,10 +248,11 @@ class WaitingLock: # periodically wake up in case the lock was released but we # weren't notified. with PreserveLoggingContext(): + timeout = self._get_next_retry_interval() await timeout_deferred( deferred=self.deferred, - timeout=self._get_next_retry_interval(), - reactor=self.reactor, + timeout=timeout, + clock=self.clock, ) except Exception: pass @@ -290,7 +293,7 @@ class WaitingMultiLock: write: bool - reactor: IReactorTime + clock: Clock store: LockStore handler: WorkerLocksHandler @@ -323,10 +326,11 @@ class WaitingMultiLock: # periodically wake up in case the lock was released but we # weren't notified. with PreserveLoggingContext(): + timeout = self._get_next_retry_interval() await timeout_deferred( deferred=self.deferred, - timeout=self._get_next_retry_interval(), - reactor=self.reactor, + timeout=timeout, + clock=self.clock, ) except Exception: pass diff --git a/synapse/http/client.py b/synapse/http/client.py index bbb0efe8b5..370cdc3568 100644 --- a/synapse/http/client.py +++ b/synapse/http/client.py @@ -54,7 +54,6 @@ from twisted.internet.interfaces import ( IOpenSSLContextFactory, IReactorCore, IReactorPluggableNameResolver, - IReactorTime, IResolutionReceiver, ITCPTransport, ) @@ -88,6 +87,7 @@ from synapse.logging.opentracing import set_tag, start_active_span, tags from synapse.metrics import SERVER_NAME_LABEL from synapse.types import ISynapseReactor, StrSequence from synapse.util.async_helpers import timeout_deferred +from synapse.util.clock import Clock from synapse.util.json import json_decoder if TYPE_CHECKING: @@ -165,16 +165,17 @@ def _is_ip_blocked( _EPSILON = 0.00000001 -def _make_scheduler( - reactor: IReactorTime, -) -> Callable[[Callable[[], object]], IDelayedCall]: +def _make_scheduler(clock: Clock) -> Callable[[Callable[[], object]], IDelayedCall]: """Makes a schedular suitable for a Cooperator using the given reactor. (This is effectively just a copy from `twisted.internet.task`) """ def _scheduler(x: Callable[[], object]) -> IDelayedCall: - return reactor.callLater(_EPSILON, x) + return clock.call_later( + _EPSILON, + x, + ) return _scheduler @@ -367,7 +368,7 @@ class BaseHttpClient: # We use this for our body producers to ensure that they use the correct # reactor. - self._cooperator = Cooperator(scheduler=_make_scheduler(hs.get_reactor())) + self._cooperator = Cooperator(scheduler=_make_scheduler(hs.get_clock())) async def request( self, @@ -436,9 +437,9 @@ class BaseHttpClient: # we use our own timeout mechanism rather than treq's as a workaround # for https://twistedmatrix.com/trac/ticket/9534. request_deferred = timeout_deferred( - request_deferred, - 60, - self.hs.get_reactor(), + deferred=request_deferred, + timeout=60, + clock=self.hs.get_clock(), ) # turn timeouts into RequestTimedOutErrors @@ -763,7 +764,11 @@ class BaseHttpClient: d = read_body_with_max_size(response, output_stream, max_size) # Ensure that the body is not read forever. - d = timeout_deferred(d, 30, self.hs.get_reactor()) + d = timeout_deferred( + deferred=d, + timeout=30, + clock=self.hs.get_clock(), + ) length = await make_deferred_yieldable(d) except BodyExceededMaxSize: @@ -957,9 +962,9 @@ class ReplicationClient(BaseHttpClient): # for https://twistedmatrix.com/trac/ticket/9534. # (Updated url https://github.com/twisted/twisted/issues/9534) request_deferred = timeout_deferred( - request_deferred, - 60, - self.hs.get_reactor(), + deferred=request_deferred, + timeout=60, + clock=self.hs.get_clock(), ) # turn timeouts into RequestTimedOutErrors diff --git a/synapse/http/federation/matrix_federation_agent.py b/synapse/http/federation/matrix_federation_agent.py index 98826c9171..9d87514be0 100644 --- a/synapse/http/federation/matrix_federation_agent.py +++ b/synapse/http/federation/matrix_federation_agent.py @@ -67,6 +67,9 @@ class MatrixFederationAgent: Args: reactor: twisted reactor to use for underlying requests + clock: Internal `HomeServer` clock used to track delayed and looping calls. + Should be obtained from `hs.get_clock()`. + tls_client_options_factory: factory to use for fetching client tls options, or none to disable TLS. @@ -97,6 +100,7 @@ class MatrixFederationAgent: *, server_name: str, reactor: ISynapseReactor, + clock: Clock, tls_client_options_factory: Optional[FederationPolicyForHTTPS], user_agent: bytes, ip_allowlist: Optional[IPSet], @@ -109,6 +113,7 @@ class MatrixFederationAgent: Args: server_name: Our homeserver name (used to label metrics) (`hs.hostname`). reactor + clock: Should be the `hs` clock from `hs.get_clock()` tls_client_options_factory user_agent ip_allowlist @@ -124,7 +129,6 @@ class MatrixFederationAgent: # addresses, to prevent DNS rebinding. reactor = BlocklistingReactorWrapper(reactor, ip_allowlist, ip_blocklist) - self._clock = Clock(reactor, server_name=server_name) self._pool = HTTPConnectionPool(reactor) self._pool.retryAutomatically = False self._pool.maxPersistentPerHost = 5 @@ -147,6 +151,7 @@ class MatrixFederationAgent: _well_known_resolver = WellKnownResolver( server_name=server_name, reactor=reactor, + clock=clock, agent=BlocklistingAgentWrapper( ProxyAgent( reactor=reactor, diff --git a/synapse/http/federation/well_known_resolver.py b/synapse/http/federation/well_known_resolver.py index 97bba8231a..2f52abcc03 100644 --- a/synapse/http/federation/well_known_resolver.py +++ b/synapse/http/federation/well_known_resolver.py @@ -90,6 +90,7 @@ class WellKnownResolver: self, server_name: str, reactor: ISynapseThreadlessReactor, + clock: Clock, agent: IAgent, user_agent: bytes, well_known_cache: Optional[TTLCache[bytes, Optional[bytes]]] = None, @@ -99,6 +100,7 @@ class WellKnownResolver: Args: server_name: Our homeserver name (used to label metrics) (`hs.hostname`). reactor + clock: Should be the `hs` clock from `hs.get_clock()` agent user_agent well_known_cache @@ -107,7 +109,7 @@ class WellKnownResolver: self.server_name = server_name self._reactor = reactor - self._clock = Clock(reactor, server_name=server_name) + self._clock = clock if well_known_cache is None: well_known_cache = TTLCache( diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py index c264bae6e5..4d72c72d01 100644 --- a/synapse/http/matrixfederationclient.py +++ b/synapse/http/matrixfederationclient.py @@ -90,6 +90,7 @@ from synapse.logging.opentracing import set_tag, start_active_span, tags from synapse.metrics import SERVER_NAME_LABEL from synapse.types import JsonDict from synapse.util.async_helpers import AwakenableSleeper, Linearizer, timeout_deferred +from synapse.util.clock import Clock from synapse.util.json import json_decoder from synapse.util.metrics import Measure from synapse.util.stringutils import parse_and_validate_server_name @@ -270,6 +271,7 @@ class LegacyJsonSendParser(_BaseJsonParser[Tuple[int, JsonDict]]): async def _handle_response( + clock: Clock, reactor: IReactorTime, timeout_sec: float, request: MatrixFederationRequest, @@ -299,7 +301,11 @@ async def _handle_response( check_content_type_is(response.headers, parser.CONTENT_TYPE) d = read_body_with_max_size(response, parser, max_response_size) - d = timeout_deferred(d, timeout=timeout_sec, reactor=reactor) + d = timeout_deferred( + deferred=d, + timeout=timeout_sec, + clock=clock, + ) length = await make_deferred_yieldable(d) @@ -411,6 +417,7 @@ class MatrixFederationHttpClient: self.server_name = hs.hostname self.reactor = hs.get_reactor() + self.clock = hs.get_clock() user_agent = hs.version_string if hs.config.server.user_agent_suffix: @@ -424,6 +431,7 @@ class MatrixFederationHttpClient: federation_agent: IAgent = MatrixFederationAgent( server_name=self.server_name, reactor=self.reactor, + clock=self.clock, tls_client_options_factory=tls_client_options_factory, user_agent=user_agent.encode("ascii"), ip_allowlist=hs.config.server.federation_ip_range_allowlist, @@ -457,7 +465,6 @@ class MatrixFederationHttpClient: ip_blocklist=hs.config.server.federation_ip_range_blocklist, ) - self.clock = hs.get_clock() self._store = hs.get_datastores().main self.version_string_bytes = hs.version_string.encode("ascii") self.default_timeout_seconds = hs.config.federation.client_timeout_ms / 1000 @@ -470,9 +477,9 @@ class MatrixFederationHttpClient: self.max_long_retries = hs.config.federation.max_long_retries self.max_short_retries = hs.config.federation.max_short_retries - self._cooperator = Cooperator(scheduler=_make_scheduler(self.reactor)) + self._cooperator = Cooperator(scheduler=_make_scheduler(self.clock)) - self._sleeper = AwakenableSleeper(self.reactor) + self._sleeper = AwakenableSleeper(self.clock) self._simple_http_client = SimpleHttpClient( hs, @@ -484,6 +491,10 @@ class MatrixFederationHttpClient: self.remote_download_linearizer = Linearizer( name="remote_download_linearizer", max_count=6, clock=self.clock ) + self._is_shutdown = False + + def shutdown(self) -> None: + self._is_shutdown = True def wake_destination(self, destination: str) -> None: """Called when the remote server may have come back online.""" @@ -629,6 +640,7 @@ class MatrixFederationHttpClient: limiter = await synapse.util.retryutils.get_retry_limiter( destination=request.destination, our_server_name=self.server_name, + hs=self.hs, clock=self.clock, store=self._store, backoff_on_404=backoff_on_404, @@ -675,7 +687,7 @@ class MatrixFederationHttpClient: (b"", b"", path_bytes, None, query_bytes, b"") ) - while True: + while not self._is_shutdown: try: json = request.get_json() if json: @@ -733,9 +745,9 @@ class MatrixFederationHttpClient: bodyProducer=producer, ) request_deferred = timeout_deferred( - request_deferred, + deferred=request_deferred, timeout=_sec_timeout, - reactor=self.reactor, + clock=self.clock, ) response = await make_deferred_yieldable(request_deferred) @@ -793,7 +805,9 @@ class MatrixFederationHttpClient: # Update transactions table? d = treq.content(response) d = timeout_deferred( - d, timeout=_sec_timeout, reactor=self.reactor + deferred=d, + timeout=_sec_timeout, + clock=self.clock, ) try: @@ -862,6 +876,15 @@ class MatrixFederationHttpClient: delay_seconds, ) + if self._is_shutdown: + # Immediately fail sending the request instead of starting a + # potentially long sleep after the server has requested + # shutdown. + # This is the code path followed when the + # `federation_transaction_transmission_loop` has been + # cancelled. + raise + # Sleep for the calculated delay, or wake up immediately # if we get notified that the server is back up. await self._sleeper.sleep( @@ -1074,6 +1097,7 @@ class MatrixFederationHttpClient: parser = cast(ByteParser[T], JsonParser()) body = await _handle_response( + self.clock, self.reactor, _sec_timeout, request, @@ -1152,7 +1176,13 @@ class MatrixFederationHttpClient: _sec_timeout = self.default_timeout_seconds body = await _handle_response( - self.reactor, _sec_timeout, request, response, start_ms, parser=JsonParser() + self.clock, + self.reactor, + _sec_timeout, + request, + response, + start_ms, + parser=JsonParser(), ) return body @@ -1358,6 +1388,7 @@ class MatrixFederationHttpClient: parser = cast(ByteParser[T], JsonParser()) body = await _handle_response( + self.clock, self.reactor, _sec_timeout, request, @@ -1431,7 +1462,13 @@ class MatrixFederationHttpClient: _sec_timeout = self.default_timeout_seconds body = await _handle_response( - self.reactor, _sec_timeout, request, response, start_ms, parser=JsonParser() + self.clock, + self.reactor, + _sec_timeout, + request, + response, + start_ms, + parser=JsonParser(), ) return body diff --git a/synapse/http/proxy.py b/synapse/http/proxy.py index 9b044f3b0a..fa17432984 100644 --- a/synapse/http/proxy.py +++ b/synapse/http/proxy.py @@ -161,12 +161,12 @@ class ProxyResource(_AsyncResource): bodyProducer=QuieterFileBodyProducer(request.content), ) request_deferred = timeout_deferred( - request_deferred, + deferred=request_deferred, # This should be set longer than the timeout in `MatrixFederationHttpClient` # so that it has enough time to complete and pass us the data before we give # up. timeout=90, - reactor=self.reactor, + clock=self._clock, ) response = await make_deferred_yieldable(request_deferred) diff --git a/synapse/http/server.py b/synapse/http/server.py index ce9d5630df..d5af8758ac 100644 --- a/synapse/http/server.py +++ b/synapse/http/server.py @@ -420,7 +420,14 @@ class DirectServeJsonResource(_AsyncResource): """ if clock is None: - clock = Clock( + # Ideally we wouldn't ignore the linter error here and instead enforce a + # required `Clock` be passed into the `__init__` function. + # However, this would change the function signature which is currently being + # exported to the module api. Since we don't want to break that api, we have + # to settle with ignoring the linter error here. + # As of the time of writing this, all Synapse internal usages of + # `DirectServeJsonResource` pass in the existing homeserver clock instance. + clock = Clock( # type: ignore[multiple-internal-clocks] cast(ISynapseThreadlessReactor, reactor), server_name="synapse_module_running_from_unknown_server", ) @@ -608,7 +615,14 @@ class DirectServeHtmlResource(_AsyncResource): Only optional for the Module API. """ if clock is None: - clock = Clock( + # Ideally we wouldn't ignore the linter error here and instead enforce a + # required `Clock` be passed into the `__init__` function. + # However, this would change the function signature which is currently being + # exported to the module api. Since we don't want to break that api, we have + # to settle with ignoring the linter error here. + # As of the time of writing this, all Synapse internal usages of + # `DirectServeHtmlResource` pass in the existing homeserver clock instance. + clock = Clock( # type: ignore[multiple-internal-clocks] cast(ISynapseThreadlessReactor, reactor), server_name="synapse_module_running_from_unknown_server", ) diff --git a/synapse/http/site.py b/synapse/http/site.py index 2c0c301c03..f4f326cfde 100644 --- a/synapse/http/site.py +++ b/synapse/http/site.py @@ -22,7 +22,7 @@ import contextlib import logging import time from http import HTTPStatus -from typing import TYPE_CHECKING, Any, Generator, Optional, Tuple, Union +from typing import TYPE_CHECKING, Any, Generator, List, Optional, Tuple, Union import attr from zope.interface import implementer @@ -30,6 +30,7 @@ from zope.interface import implementer from twisted.internet.address import UNIXAddress from twisted.internet.defer import Deferred from twisted.internet.interfaces import IAddress +from twisted.internet.protocol import Protocol from twisted.python.failure import Failure from twisted.web.http import HTTPChannel from twisted.web.resource import IResource, Resource @@ -660,6 +661,70 @@ class _XForwardedForAddress: host: str +class SynapseProtocol(HTTPChannel): + """ + Synapse-specific twisted http Protocol. + + This is a small wrapper around the twisted HTTPChannel so we can track active + connections in order to close any outstanding connections on shutdown. + """ + + def __init__( + self, + site: "SynapseSite", + our_server_name: str, + max_request_body_size: int, + request_id_header: Optional[str], + request_class: type, + ): + super().__init__() + self.factory: SynapseSite = site + self.site = site + self.our_server_name = our_server_name + self.max_request_body_size = max_request_body_size + self.request_id_header = request_id_header + self.request_class = request_class + + def connectionMade(self) -> None: + """ + Called when a connection is made. + + This may be considered the initializer of the protocol, because + it is called when the connection is completed. + + Add the connection to the factory's connection list when it's established. + """ + super().connectionMade() + self.factory.addConnection(self) + + def connectionLost(self, reason: Failure) -> None: # type: ignore[override] + """ + Called when the connection is shut down. + + Clear any circular references here, and any external references to this + Protocol. The connection has been closed. In our case, we need to remove the + connection from the factory's connection list, when it's lost. + """ + super().connectionLost(reason) + self.factory.removeConnection(self) + + def requestFactory(self, http_channel: HTTPChannel, queued: bool) -> SynapseRequest: # type: ignore[override] + """ + A callable used to build `twisted.web.iweb.IRequest` objects. + + Use our own custom SynapseRequest type instead of the regular + twisted.web.server.Request. + """ + return self.request_class( + self, + self.factory, + our_server_name=self.our_server_name, + max_request_body_size=self.max_request_body_size, + queued=queued, + request_id_header=self.request_id_header, + ) + + class SynapseSite(ProxySite): """ Synapse-specific twisted http Site @@ -710,23 +775,44 @@ class SynapseSite(ProxySite): assert config.http_options is not None proxied = config.http_options.x_forwarded - request_class = XForwardedForRequest if proxied else SynapseRequest + self.request_class = XForwardedForRequest if proxied else SynapseRequest - request_id_header = config.http_options.request_id_header + self.request_id_header = config.http_options.request_id_header + self.max_request_body_size = max_request_body_size - def request_factory(channel: HTTPChannel, queued: bool) -> Request: - return request_class( - channel, - self, - our_server_name=self.server_name, - max_request_body_size=max_request_body_size, - queued=queued, - request_id_header=request_id_header, - ) - - self.requestFactory = request_factory # type: ignore self.access_logger = logging.getLogger(logger_name) self.server_version_string = server_version_string.encode("ascii") + self.connections: List[Protocol] = [] + + def buildProtocol(self, addr: IAddress) -> SynapseProtocol: + protocol = SynapseProtocol( + self, + self.server_name, + self.max_request_body_size, + self.request_id_header, + self.request_class, + ) + return protocol + + def addConnection(self, protocol: Protocol) -> None: + self.connections.append(protocol) + + def removeConnection(self, protocol: Protocol) -> None: + if protocol in self.connections: + self.connections.remove(protocol) + + def stopFactory(self) -> None: + super().stopFactory() + + # Shutdown any connections which are still active. + # These can be long lived HTTP connections which wouldn't normally be closed + # when calling `shutdown` on the respective `Port`. + # Closing the connections here is required for us to fully shutdown the + # `SynapseHomeServer` in order for it to be garbage collected. + for protocol in self.connections[:]: + if protocol.transport is not None: + protocol.transport.loseConnection() + self.connections.clear() def log(self, request: SynapseRequest) -> None: # type: ignore[override] pass diff --git a/synapse/media/_base.py b/synapse/media/_base.py index 15b28074fd..d3a9a66f5a 100644 --- a/synapse/media/_base.py +++ b/synapse/media/_base.py @@ -704,6 +704,7 @@ class ThreadedFileSender: def __init__(self, hs: "HomeServer") -> None: self.reactor = hs.get_reactor() + self.clock = hs.get_clock() self.thread_pool = hs.get_media_sender_thread_pool() self.file: Optional[BinaryIO] = None @@ -712,7 +713,7 @@ class ThreadedFileSender: # Signals if the thread should keep reading/sending data. Set means # continue, clear means pause. - self.wakeup_event = DeferredEvent(self.reactor) + self.wakeup_event = DeferredEvent(self.clock) # Signals if the thread should terminate, e.g. because the consumer has # gone away. diff --git a/synapse/media/media_repository.py b/synapse/media/media_repository.py index 436d9b7e35..238dc6cb2f 100644 --- a/synapse/media/media_repository.py +++ b/synapse/media/media_repository.py @@ -67,7 +67,6 @@ from synapse.media.media_storage import ( from synapse.media.storage_provider import StorageProviderWrapper from synapse.media.thumbnailer import Thumbnailer, ThumbnailError from synapse.media.url_previewer import UrlPreviewer -from synapse.metrics.background_process_metrics import run_as_background_process from synapse.storage.databases.main.media_repository import LocalMedia, RemoteMedia from synapse.types import UserID from synapse.util.async_helpers import Linearizer @@ -187,16 +186,14 @@ class MediaRepository: self.media_repository_callbacks = hs.get_module_api_callbacks().media_repository def _start_update_recently_accessed(self) -> Deferred: - return run_as_background_process( + return self.hs.run_as_background_process( "update_recently_accessed_media", - self.server_name, self._update_recently_accessed, ) def _start_apply_media_retention_rules(self) -> Deferred: - return run_as_background_process( + return self.hs.run_as_background_process( "apply_media_retention_rules", - self.server_name, self._apply_media_retention_rules, ) diff --git a/synapse/media/url_previewer.py b/synapse/media/url_previewer.py index 81204913f7..1a82cc46e3 100644 --- a/synapse/media/url_previewer.py +++ b/synapse/media/url_previewer.py @@ -44,7 +44,6 @@ from synapse.media._base import FileInfo, get_filename_from_headers from synapse.media.media_storage import MediaStorage, SHA256TransparentIOWriter from synapse.media.oembed import OEmbedProvider from synapse.media.preview_html import decode_body, parse_html_to_open_graph -from synapse.metrics.background_process_metrics import run_as_background_process from synapse.types import JsonDict, UserID from synapse.util.async_helpers import ObservableDeferred from synapse.util.caches.expiringcache import ExpiringCache @@ -167,6 +166,7 @@ class UrlPreviewer: media_storage: MediaStorage, ): self.clock = hs.get_clock() + self.hs = hs self.filepaths = media_repo.filepaths self.max_spider_size = hs.config.media.max_spider_size self.server_name = hs.hostname @@ -201,15 +201,14 @@ class UrlPreviewer: self._cache: ExpiringCache[str, ObservableDeferred] = ExpiringCache( cache_name="url_previews", server_name=self.server_name, + hs=self.hs, clock=self.clock, # don't spider URLs more often than once an hour expiry_ms=ONE_HOUR, ) if self._worker_run_media_background_jobs: - self._cleaner_loop = self.clock.looping_call( - self._start_expire_url_cache_data, 10 * 1000 - ) + self.clock.looping_call(self._start_expire_url_cache_data, 10 * 1000) async def preview(self, url: str, user: UserID, ts: int) -> bytes: # the in-memory cache: @@ -739,8 +738,8 @@ class UrlPreviewer: return open_graph_result, oembed_response.author_name, expiration_ms def _start_expire_url_cache_data(self) -> Deferred: - return run_as_background_process( - "expire_url_cache_data", self.server_name, self._expire_url_cache_data + return self.hs.run_as_background_process( + "expire_url_cache_data", self._expire_url_cache_data ) async def _expire_url_cache_data(self) -> None: diff --git a/synapse/metrics/_gc.py b/synapse/metrics/_gc.py index e7783b05e6..1da871f18f 100644 --- a/synapse/metrics/_gc.py +++ b/synapse/metrics/_gc.py @@ -138,7 +138,9 @@ def install_gc_manager() -> None: gc_time.labels(i).observe(end - start) gc_unreachable.labels(i).set(unreachable) - gc_task = task.LoopingCall(_maybe_gc) + # We can ignore the lint here since this looping call does not hold a `HomeServer` + # reference so can be cleaned up by other means on shutdown. + gc_task = task.LoopingCall(_maybe_gc) # type: ignore[prefer-synapse-clock-looping-call] gc_task.start(0.1) diff --git a/synapse/metrics/background_process_metrics.py b/synapse/metrics/background_process_metrics.py index 93345b0e9d..6dc2cbe132 100644 --- a/synapse/metrics/background_process_metrics.py +++ b/synapse/metrics/background_process_metrics.py @@ -66,6 +66,8 @@ if TYPE_CHECKING: # Old versions don't have `LiteralString` from typing_extensions import LiteralString + from synapse.server import HomeServer + logger = logging.getLogger(__name__) @@ -397,11 +399,11 @@ def run_as_background_process( P = ParamSpec("P") -class HasServerName(Protocol): - server_name: str +class HasHomeServer(Protocol): + hs: "HomeServer" """ - The homeserver name that this cache is associated with (used to label the metric) - (`hs.hostname`). + The homeserver that this cache is associated with (used to label the metric and + track backgroun processes for clean shutdown). """ @@ -431,27 +433,22 @@ def wrap_as_background_process( """ def wrapper( - func: Callable[Concatenate[HasServerName, P], Awaitable[Optional[R]]], + func: Callable[Concatenate[HasHomeServer, P], Awaitable[Optional[R]]], ) -> Callable[P, "defer.Deferred[Optional[R]]"]: @wraps(func) def wrapped_func( - self: HasServerName, *args: P.args, **kwargs: P.kwargs + self: HasHomeServer, *args: P.args, **kwargs: P.kwargs ) -> "defer.Deferred[Optional[R]]": - assert self.server_name is not None, ( - "The `server_name` attribute must be set on the object where `@wrap_as_background_process` decorator is used." + assert self.hs is not None, ( + "The `hs` attribute must be set on the object where `@wrap_as_background_process` decorator is used." ) - return run_as_background_process( + return self.hs.run_as_background_process( desc, - self.server_name, func, self, *args, - # type-ignore: mypy is confusing kwargs with the bg_start_span kwarg. - # Argument 4 to "run_as_background_process" has incompatible type - # "**P.kwargs"; expected "bool" - # See https://github.com/python/mypy/issues/8862 - **kwargs, # type: ignore[arg-type] + **kwargs, ) # There are some shenanigans here, because we're decorating a method but diff --git a/synapse/metrics/common_usage_metrics.py b/synapse/metrics/common_usage_metrics.py index cd1c3c8649..43e0913d27 100644 --- a/synapse/metrics/common_usage_metrics.py +++ b/synapse/metrics/common_usage_metrics.py @@ -23,7 +23,6 @@ from typing import TYPE_CHECKING import attr from synapse.metrics import SERVER_NAME_LABEL -from synapse.metrics.background_process_metrics import run_as_background_process if TYPE_CHECKING: from synapse.server import HomeServer @@ -52,6 +51,7 @@ class CommonUsageMetricsManager: self.server_name = hs.hostname self._store = hs.get_datastores().main self._clock = hs.get_clock() + self._hs = hs async def get_metrics(self) -> CommonUsageMetrics: """Get the CommonUsageMetrics object. If no collection has happened yet, do it @@ -64,16 +64,14 @@ class CommonUsageMetricsManager: async def setup(self) -> None: """Keep the gauges for common usage metrics up to date.""" - run_as_background_process( + self._hs.run_as_background_process( desc="common_usage_metrics_update_gauges", - server_name=self.server_name, func=self._update_gauges, ) self._clock.looping_call( - run_as_background_process, + self._hs.run_as_background_process, 5 * 60 * 1000, desc="common_usage_metrics_update_gauges", - server_name=self.server_name, func=self._update_gauges, ) diff --git a/synapse/module_api/__init__.py b/synapse/module_api/__init__.py index 7a419145e0..12a31dd2ab 100644 --- a/synapse/module_api/__init__.py +++ b/synapse/module_api/__init__.py @@ -275,7 +275,15 @@ def run_as_background_process( # function instead. stub_server_name = "synapse_module_running_from_unknown_server" - return _run_as_background_process( + # Ignore the linter error here. Since this is leveraging the + # `run_as_background_process` function directly and we don't want to break the + # module api, we need to keep the function signature the same. This means we don't + # have access to the running `HomeServer` and cannot track this background process + # for cleanup during shutdown. + # This is not an issue during runtime and is only potentially problematic if the + # application cares about being able to garbage collect `HomeServer` instances + # during runtime. + return _run_as_background_process( # type: ignore[untracked-background-process] desc, stub_server_name, func, @@ -1402,7 +1410,7 @@ class ModuleApi: if self._hs.config.worker.run_background_tasks or run_on_all_instances: self._clock.looping_call( - self.run_as_background_process, + self._hs.run_as_background_process, msec, desc, lambda: maybe_awaitable(f(*args, **kwargs)), @@ -1460,7 +1468,7 @@ class ModuleApi: return self._clock.call_later( # convert ms to seconds as needed by call_later. msec * 0.001, - self.run_as_background_process, + self._hs.run_as_background_process, desc, lambda: maybe_awaitable(f(*args, **kwargs)), ) @@ -1701,8 +1709,8 @@ class ModuleApi: Note that the returned Deferred does not follow the synapse logcontext rules. """ - return _run_as_background_process( - desc, self.server_name, func, *args, bg_start_span=bg_start_span, **kwargs + return self._hs.run_as_background_process( + desc, func, *args, bg_start_span=bg_start_span, **kwargs ) async def defer_to_thread( diff --git a/synapse/notifier.py b/synapse/notifier.py index e684df4866..9169f50c4d 100644 --- a/synapse/notifier.py +++ b/synapse/notifier.py @@ -676,9 +676,16 @@ class Notifier: # is a new token. listener = user_stream.new_listener(prev_token) listener = timeout_deferred( - listener, - (end_time - now) / 1000.0, - self.hs.get_reactor(), + deferred=listener, + timeout=(end_time - now) / 1000.0, + # We don't track these calls since they are constantly being + # overridden by new calls to /sync and they don't hold the + # `HomeServer` in memory on shutdown. It is safe to let them + # timeout of their own accord after shutting down since it + # won't delay shutdown and there won't be any adverse + # behaviour. + cancel_on_shutdown=False, + clock=self.hs.get_clock(), ) log_kv( diff --git a/synapse/push/emailpusher.py b/synapse/push/emailpusher.py index 09ca14584a..1484bc8fc0 100644 --- a/synapse/push/emailpusher.py +++ b/synapse/push/emailpusher.py @@ -25,7 +25,6 @@ from typing import TYPE_CHECKING, Dict, List, Optional from twisted.internet.error import AlreadyCalled, AlreadyCancelled from twisted.internet.interfaces import IDelayedCall -from synapse.metrics.background_process_metrics import run_as_background_process from synapse.push import Pusher, PusherConfig, PusherConfigException, ThrottleParams from synapse.push.mailer import Mailer from synapse.push.push_types import EmailReason @@ -118,7 +117,7 @@ class EmailPusher(Pusher): if self._is_processing: return - run_as_background_process("emailpush.process", self.server_name, self._process) + self.hs.run_as_background_process("emailpush.process", self._process) def _pause_processing(self) -> None: """Used by tests to temporarily pause processing of events. @@ -228,8 +227,10 @@ class EmailPusher(Pusher): self.timed_call = None if soonest_due_at is not None: - self.timed_call = self.hs.get_reactor().callLater( - self.seconds_until(soonest_due_at), self.on_timer + delay = self.seconds_until(soonest_due_at) + self.timed_call = self.hs.get_clock().call_later( + delay, + self.on_timer, ) async def save_last_stream_ordering_and_success( diff --git a/synapse/push/httppusher.py b/synapse/push/httppusher.py index 5946a6e972..5cac5de8cb 100644 --- a/synapse/push/httppusher.py +++ b/synapse/push/httppusher.py @@ -32,7 +32,6 @@ from synapse.api.constants import EventTypes from synapse.events import EventBase from synapse.logging import opentracing from synapse.metrics import SERVER_NAME_LABEL -from synapse.metrics.background_process_metrics import run_as_background_process from synapse.push import Pusher, PusherConfig, PusherConfigException from synapse.storage.databases.main.event_push_actions import HttpPushAction from synapse.types import JsonDict, JsonMapping @@ -182,8 +181,8 @@ class HttpPusher(Pusher): # We could check the receipts are actually m.read receipts here, # but currently that's the only type of receipt anyway... - run_as_background_process( - "http_pusher.on_new_receipts", self.server_name, self._update_badge + self.hs.run_as_background_process( + "http_pusher.on_new_receipts", self._update_badge ) async def _update_badge(self) -> None: @@ -219,7 +218,7 @@ class HttpPusher(Pusher): if self.failing_since and self.timed_call and self.timed_call.active(): return - run_as_background_process("httppush.process", self.server_name, self._process) + self.hs.run_as_background_process("httppush.process", self._process) async def _process(self) -> None: # we should never get here if we are already processing @@ -336,8 +335,9 @@ class HttpPusher(Pusher): ) else: logger.info("Push failed: delaying for %ds", self.backoff_delay) - self.timed_call = self.hs.get_reactor().callLater( - self.backoff_delay, self.on_timer + self.timed_call = self.hs.get_clock().call_later( + self.backoff_delay, + self.on_timer, ) self.backoff_delay = min( self.backoff_delay * 2, self.MAX_BACKOFF_SEC diff --git a/synapse/push/pusherpool.py b/synapse/push/pusherpool.py index d1f79ec999..977c55b683 100644 --- a/synapse/push/pusherpool.py +++ b/synapse/push/pusherpool.py @@ -27,7 +27,6 @@ from prometheus_client import Gauge from synapse.api.errors import Codes, SynapseError from synapse.metrics import SERVER_NAME_LABEL from synapse.metrics.background_process_metrics import ( - run_as_background_process, wrap_as_background_process, ) from synapse.push import Pusher, PusherConfig, PusherConfigException @@ -70,10 +69,8 @@ class PusherPool: """ def __init__(self, hs: "HomeServer"): - self.hs = hs - self.server_name = ( - hs.hostname - ) # nb must be called this for @wrap_as_background_process + self.hs = hs # nb must be called this for @wrap_as_background_process + self.server_name = hs.hostname self.pusher_factory = PusherFactory(hs) self.store = self.hs.get_datastores().main self.clock = self.hs.get_clock() @@ -112,9 +109,7 @@ class PusherPool: if not self._should_start_pushers: logger.info("Not starting pushers because they are disabled in the config") return - run_as_background_process( - "start_pushers", self.server_name, self._start_pushers - ) + self.hs.run_as_background_process("start_pushers", self._start_pushers) async def add_or_update_pusher( self, diff --git a/synapse/replication/tcp/client.py b/synapse/replication/tcp/client.py index d96f5541f1..f2561bc0c5 100644 --- a/synapse/replication/tcp/client.py +++ b/synapse/replication/tcp/client.py @@ -32,7 +32,6 @@ from synapse.api.constants import EventTypes, Membership, ReceiptTypes from synapse.federation import send_queue from synapse.federation.sender import FederationSender from synapse.logging.context import PreserveLoggingContext, make_deferred_yieldable -from synapse.metrics.background_process_metrics import run_as_background_process from synapse.replication.tcp.streams import ( AccountDataStream, DeviceListsStream, @@ -344,7 +343,9 @@ class ReplicationDataHandler: # to wedge here forever. deferred: "Deferred[None]" = Deferred() deferred = timeout_deferred( - deferred, _WAIT_FOR_REPLICATION_TIMEOUT_SECONDS, self._reactor + deferred=deferred, + timeout=_WAIT_FOR_REPLICATION_TIMEOUT_SECONDS, + clock=self._clock, ) waiting_list = self._streams_to_waiters.setdefault( @@ -513,8 +514,8 @@ class FederationSenderHandler: # no need to queue up another task. return - run_as_background_process( - "_save_and_send_ack", self.server_name, self._save_and_send_ack + self._hs.run_as_background_process( + "_save_and_send_ack", self._save_and_send_ack ) async def _save_and_send_ack(self) -> None: diff --git a/synapse/replication/tcp/handler.py b/synapse/replication/tcp/handler.py index dd7e38dd78..4d0d3d44ab 100644 --- a/synapse/replication/tcp/handler.py +++ b/synapse/replication/tcp/handler.py @@ -41,7 +41,6 @@ from prometheus_client import Counter from twisted.internet.protocol import ReconnectingClientFactory from synapse.metrics import SERVER_NAME_LABEL, LaterGauge -from synapse.metrics.background_process_metrics import run_as_background_process from synapse.replication.tcp.commands import ( ClearUserSyncsCommand, Command, @@ -132,6 +131,7 @@ class ReplicationCommandHandler: def __init__(self, hs: "HomeServer"): self.server_name = hs.hostname + self.hs = hs self._replication_data_handler = hs.get_replication_data_handler() self._presence_handler = hs.get_presence_handler() self._store = hs.get_datastores().main @@ -361,9 +361,8 @@ class ReplicationCommandHandler: return # fire off a background process to start processing the queue. - run_as_background_process( + self.hs.run_as_background_process( "process-replication-data", - self.server_name, self._unsafe_process_queue, stream_name, ) diff --git a/synapse/replication/tcp/protocol.py b/synapse/replication/tcp/protocol.py index 25a7868cd7..bcfc65c2c0 100644 --- a/synapse/replication/tcp/protocol.py +++ b/synapse/replication/tcp/protocol.py @@ -42,7 +42,6 @@ from synapse.logging.context import PreserveLoggingContext from synapse.metrics import SERVER_NAME_LABEL, LaterGauge from synapse.metrics.background_process_metrics import ( BackgroundProcessLoggingContext, - run_as_background_process, ) from synapse.replication.tcp.commands import ( VALID_CLIENT_COMMANDS, @@ -140,9 +139,14 @@ class BaseReplicationStreamProtocol(LineOnlyReceiver): max_line_buffer = 10000 def __init__( - self, server_name: str, clock: Clock, handler: "ReplicationCommandHandler" + self, + hs: "HomeServer", + server_name: str, + clock: Clock, + handler: "ReplicationCommandHandler", ): self.server_name = server_name + self.hs = hs self.clock = clock self.command_handler = handler @@ -290,9 +294,8 @@ class BaseReplicationStreamProtocol(LineOnlyReceiver): # if so. if isawaitable(res): - run_as_background_process( + self.hs.run_as_background_process( "replication-" + cmd.get_logcontext_id(), - self.server_name, lambda: res, ) @@ -470,9 +473,13 @@ class ServerReplicationStreamProtocol(BaseReplicationStreamProtocol): VALID_OUTBOUND_COMMANDS = VALID_SERVER_COMMANDS def __init__( - self, server_name: str, clock: Clock, handler: "ReplicationCommandHandler" + self, + hs: "HomeServer", + server_name: str, + clock: Clock, + handler: "ReplicationCommandHandler", ): - super().__init__(server_name, clock, handler) + super().__init__(hs, server_name, clock, handler) self.server_name = server_name @@ -497,7 +504,7 @@ class ClientReplicationStreamProtocol(BaseReplicationStreamProtocol): clock: Clock, command_handler: "ReplicationCommandHandler", ): - super().__init__(server_name, clock, command_handler) + super().__init__(hs, server_name, clock, command_handler) self.client_name = client_name self.server_name = server_name diff --git a/synapse/replication/tcp/redis.py b/synapse/replication/tcp/redis.py index 0b1be033b1..caffb2913e 100644 --- a/synapse/replication/tcp/redis.py +++ b/synapse/replication/tcp/redis.py @@ -40,7 +40,6 @@ from synapse.logging.context import PreserveLoggingContext, make_deferred_yielda from synapse.metrics import SERVER_NAME_LABEL from synapse.metrics.background_process_metrics import ( BackgroundProcessLoggingContext, - run_as_background_process, wrap_as_background_process, ) from synapse.replication.tcp.commands import ( @@ -109,6 +108,7 @@ class RedisSubscriber(SubscriberProtocol): """ server_name: str + hs: "HomeServer" synapse_handler: "ReplicationCommandHandler" synapse_stream_prefix: str synapse_channel_names: List[str] @@ -146,9 +146,7 @@ class RedisSubscriber(SubscriberProtocol): def connectionMade(self) -> None: logger.info("Connected to redis") super().connectionMade() - run_as_background_process( - "subscribe-replication", self.server_name, self._send_subscribe - ) + self.hs.run_as_background_process("subscribe-replication", self._send_subscribe) async def _send_subscribe(self) -> None: # it's important to make sure that we only send the REPLICATE command once we @@ -223,8 +221,8 @@ class RedisSubscriber(SubscriberProtocol): # if so. if isawaitable(res): - run_as_background_process( - "replication-" + cmd.get_logcontext_id(), self.server_name, lambda: res + self.hs.run_as_background_process( + "replication-" + cmd.get_logcontext_id(), lambda: res ) def connectionLost(self, reason: Failure) -> None: # type: ignore[override] @@ -245,9 +243,8 @@ class RedisSubscriber(SubscriberProtocol): Args: cmd: The command to send """ - run_as_background_process( + self.hs.run_as_background_process( "send-cmd", - self.server_name, self._async_send_command, cmd, # We originally started tracing background processes to avoid `There was no @@ -317,9 +314,8 @@ class SynapseRedisFactory(RedisFactory): convertNumbers=convertNumbers, ) - self.server_name = ( - hs.hostname - ) # nb must be called this for @wrap_as_background_process + self.hs = hs # nb must be called this for @wrap_as_background_process + self.server_name = hs.hostname hs.get_clock().looping_call(self._send_ping, 30 * 1000) @@ -397,6 +393,7 @@ class RedisDirectTcpReplicationClientFactory(SynapseRedisFactory): ) self.server_name = hs.hostname + self.hs = hs self.synapse_handler = hs.get_replication_command_handler() self.synapse_stream_prefix = hs.hostname self.synapse_channel_names = channel_names @@ -412,6 +409,7 @@ class RedisDirectTcpReplicationClientFactory(SynapseRedisFactory): # the base method does some other things than just instantiating the # protocol. p.server_name = self.server_name + p.hs = self.hs p.synapse_handler = self.synapse_handler p.synapse_outbound_redis_connection = self.synapse_outbound_redis_connection p.synapse_stream_prefix = self.synapse_stream_prefix diff --git a/synapse/replication/tcp/resource.py b/synapse/replication/tcp/resource.py index d800cfe6f6..ef72a0a532 100644 --- a/synapse/replication/tcp/resource.py +++ b/synapse/replication/tcp/resource.py @@ -30,7 +30,6 @@ from twisted.internet.interfaces import IAddress from twisted.internet.protocol import ServerFactory from synapse.metrics import SERVER_NAME_LABEL -from synapse.metrics.background_process_metrics import run_as_background_process from synapse.replication.tcp.commands import PositionCommand from synapse.replication.tcp.protocol import ServerReplicationStreamProtocol from synapse.replication.tcp.streams import EventsStream @@ -55,6 +54,7 @@ class ReplicationStreamProtocolFactory(ServerFactory): def __init__(self, hs: "HomeServer"): self.command_handler = hs.get_replication_command_handler() self.clock = hs.get_clock() + self.hs = hs self.server_name = hs.config.server.server_name # If we've created a `ReplicationStreamProtocolFactory` then we're @@ -69,7 +69,7 @@ class ReplicationStreamProtocolFactory(ServerFactory): def buildProtocol(self, addr: IAddress) -> ServerReplicationStreamProtocol: return ServerReplicationStreamProtocol( - self.server_name, self.clock, self.command_handler + self.hs, self.server_name, self.clock, self.command_handler ) @@ -82,6 +82,7 @@ class ReplicationStreamer: def __init__(self, hs: "HomeServer"): self.server_name = hs.hostname + self.hs = hs self.store = hs.get_datastores().main self.clock = hs.get_clock() self.notifier = hs.get_notifier() @@ -147,8 +148,8 @@ class ReplicationStreamer: logger.debug("Notifier poke loop already running") return - run_as_background_process( - "replication_notifier", self.server_name, self._run_notifier_loop + self.hs.run_as_background_process( + "replication_notifier", self._run_notifier_loop ) async def _run_notifier_loop(self) -> None: diff --git a/synapse/replication/tcp/streams/__init__.py b/synapse/replication/tcp/streams/__init__.py index 25c15e5d48..87ac0a5ae1 100644 --- a/synapse/replication/tcp/streams/__init__.py +++ b/synapse/replication/tcp/streams/__init__.py @@ -77,6 +77,7 @@ STREAMS_MAP = { __all__ = [ "STREAMS_MAP", "Stream", + "EventsStream", "BackfillStream", "PresenceStream", "PresenceFederationStream", @@ -87,6 +88,7 @@ __all__ = [ "CachesStream", "DeviceListsStream", "ToDeviceStream", + "FederationStream", "AccountDataStream", "ThreadSubscriptionsStream", "UnPartialStatedRoomStream", diff --git a/synapse/rest/client/room.py b/synapse/rest/client/room.py index 64deae7650..1084139df0 100644 --- a/synapse/rest/client/room.py +++ b/synapse/rest/client/room.py @@ -66,7 +66,6 @@ from synapse.http.site import SynapseRequest from synapse.logging.context import make_deferred_yieldable, run_in_background from synapse.logging.opentracing import set_tag from synapse.metrics import SERVER_NAME_LABEL -from synapse.metrics.background_process_metrics import run_as_background_process from synapse.rest.client._base import client_patterns from synapse.rest.client.transactions import HttpTransactionCache from synapse.state import CREATE_KEY, POWER_KEY @@ -1225,6 +1224,7 @@ class RoomRedactEventRestServlet(TransactionRestServlet): def __init__(self, hs: "HomeServer"): super().__init__(hs) self.server_name = hs.hostname + self.hs = hs self.event_creation_handler = hs.get_event_creation_handler() self.auth = hs.get_auth() self._store = hs.get_datastores().main @@ -1307,9 +1307,8 @@ class RoomRedactEventRestServlet(TransactionRestServlet): ) if with_relations: - run_as_background_process( + self.hs.run_as_background_process( "redact_related_events", - self.server_name, self._relation_handler.redact_events_related_to, requester=requester, event_id=event_id, diff --git a/synapse/rest/client/sync.py b/synapse/rest/client/sync.py index bb63b51599..0f3cc84dcc 100644 --- a/synapse/rest/client/sync.py +++ b/synapse/rest/client/sync.py @@ -126,6 +126,7 @@ class SyncRestServlet(RestServlet): self._json_filter_cache: LruCache[str, bool] = LruCache( max_size=1000, + clock=self.clock, cache_name="sync_valid_filter", server_name=self.server_name, ) diff --git a/synapse/rest/client/transactions.py b/synapse/rest/client/transactions.py index 1a57996aec..571ba2fa62 100644 --- a/synapse/rest/client/transactions.py +++ b/synapse/rest/client/transactions.py @@ -56,7 +56,7 @@ class HttpTransactionCache: ] = {} # Try to clean entries every 30 mins. This means entries will exist # for at *LEAST* 30 mins, and at *MOST* 60 mins. - self.cleaner = self.clock.looping_call(self._cleanup, CLEANUP_PERIOD_MS) + self.clock.looping_call(self._cleanup, CLEANUP_PERIOD_MS) def _get_transaction_key(self, request: IRequest, requester: Requester) -> Hashable: """A helper function which returns a transaction key that can be used diff --git a/synapse/server.py b/synapse/server.py index edcab19d72..cc0d3a427b 100644 --- a/synapse/server.py +++ b/synapse/server.py @@ -28,10 +28,27 @@ import abc import functools import logging -from typing import TYPE_CHECKING, Callable, Dict, List, Optional, Type, TypeVar, cast +from threading import Thread +from typing import ( + TYPE_CHECKING, + Any, + Awaitable, + Callable, + Dict, + List, + Optional, + Tuple, + Type, + TypeVar, + cast, +) +from wsgiref.simple_server import WSGIServer +from attr import dataclass from typing_extensions import TypeAlias +from twisted.internet import defer +from twisted.internet.base import _SystemEventID from twisted.internet.interfaces import IOpenSSLContextFactory from twisted.internet.tcp import Port from twisted.python.threadpool import ThreadPool @@ -44,6 +61,7 @@ from synapse.api.auth.mas import MasDelegatedAuth from synapse.api.auth_blocking import AuthBlocking from synapse.api.filtering import Filtering from synapse.api.ratelimiting import Ratelimiter, RequestRatelimiter +from synapse.app._base import unregister_sighups from synapse.appservice.api import ApplicationServiceApi from synapse.appservice.scheduler import ApplicationServiceScheduler from synapse.config.homeserver import HomeServerConfig @@ -133,6 +151,7 @@ from synapse.metrics import ( all_later_gauges_to_clean_up_on_shutdown, register_threadpool, ) +from synapse.metrics.background_process_metrics import run_as_background_process from synapse.metrics.common_usage_metrics import CommonUsageMetricsManager from synapse.module_api import ModuleApi from synapse.module_api.callbacks import ModuleApiCallbacks @@ -156,6 +175,7 @@ from synapse.storage.controllers import StorageControllers from synapse.streams.events import EventSources from synapse.synapse_rust.rendezvous import RendezvousHandler from synapse.types import DomainSpecificString, ISynapseReactor +from synapse.util.caches import CACHE_METRIC_REGISTRY from synapse.util.clock import Clock from synapse.util.distributor import Distributor from synapse.util.macaroons import MacaroonGenerator @@ -166,7 +186,9 @@ from synapse.util.task_scheduler import TaskScheduler logger = logging.getLogger(__name__) if TYPE_CHECKING: + # Old Python versions don't have `LiteralString` from txredisapi import ConnectionHandler + from typing_extensions import LiteralString from synapse.handlers.jwt import JwtHandler from synapse.handlers.oidc import OidcHandler @@ -196,6 +218,7 @@ if TYPE_CHECKING: T: TypeAlias = object F = TypeVar("F", bound=Callable[["HomeServer"], T]) +R = TypeVar("R") def cache_in_self(builder: F) -> F: @@ -219,7 +242,8 @@ def cache_in_self(builder: F) -> F: @functools.wraps(builder) def _get(self: "HomeServer") -> T: try: - return getattr(self, depname) + dep = getattr(self, depname) + return dep except AttributeError: pass @@ -239,6 +263,22 @@ def cache_in_self(builder: F) -> F: return cast(F, _get) +@dataclass +class ShutdownInfo: + """Information for callable functions called at time of shutdown. + + Attributes: + func: the object to call before shutdown. + trigger_id: an ID returned when registering this event trigger. + args: the arguments to call the function with. + kwargs: the keyword arguments to call the function with. + """ + + func: Callable[..., Any] + trigger_id: _SystemEventID + kwargs: Dict[str, object] + + class HomeServer(metaclass=abc.ABCMeta): """A basic homeserver object without lazy component builders. @@ -289,6 +329,7 @@ class HomeServer(metaclass=abc.ABCMeta): hostname : The hostname for the server. config: The full config for the homeserver. """ + if not reactor: from twisted.internet import reactor as _reactor @@ -300,6 +341,7 @@ class HomeServer(metaclass=abc.ABCMeta): self.signing_key = config.key.signing_key[0] self.config = config self._listening_services: List[Port] = [] + self._metrics_listeners: List[Tuple[WSGIServer, Thread]] = [] self.start_time: Optional[int] = None self._instance_id = random_string(5) @@ -315,6 +357,211 @@ class HomeServer(metaclass=abc.ABCMeta): # This attribute is set by the free function `refresh_certificate`. self.tls_server_context_factory: Optional[IOpenSSLContextFactory] = None + self._is_shutdown = False + self._async_shutdown_handlers: List[ShutdownInfo] = [] + self._sync_shutdown_handlers: List[ShutdownInfo] = [] + self._background_processes: set[defer.Deferred[Optional[Any]]] = set() + + def run_as_background_process( + self, + desc: "LiteralString", + func: Callable[..., Awaitable[Optional[R]]], + *args: Any, + **kwargs: Any, + ) -> "defer.Deferred[Optional[R]]": + """Run the given function in its own logcontext, with resource metrics + + This should be used to wrap processes which are fired off to run in the + background, instead of being associated with a particular request. + + It returns a Deferred which completes when the function completes, but it doesn't + follow the synapse logcontext rules, which makes it appropriate for passing to + clock.looping_call and friends (or for firing-and-forgetting in the middle of a + normal synapse async function). + + Because the returned Deferred does not follow the synapse logcontext rules, awaiting + the result of this function will result in the log context being cleared (bad). In + order to properly await the result of this function and maintain the current log + context, use `make_deferred_yieldable`. + + Args: + desc: a description for this background process type + server_name: The homeserver name that this background process is being run for + (this should be `hs.hostname`). + func: a function, which may return a Deferred or a coroutine + bg_start_span: Whether to start an opentracing span. Defaults to True. + Should only be disabled for processes that will not log to or tag + a span. + args: positional args for func + kwargs: keyword args for func + + Returns: + Deferred which returns the result of func, or `None` if func raises. + Note that the returned Deferred does not follow the synapse logcontext + rules. + """ + if self._is_shutdown: + raise Exception( + f"Cannot start background process. HomeServer has been shutdown {len(self._background_processes)} {len(self.get_clock()._looping_calls)} {len(self.get_clock()._call_id_to_delayed_call)}" + ) + + # Ignore linter error as this is the one location this should be called. + deferred = run_as_background_process(desc, self.hostname, func, *args, **kwargs) # type: ignore[untracked-background-process] + self._background_processes.add(deferred) + + def on_done(res: R) -> R: + try: + self._background_processes.remove(deferred) + except KeyError: + # If the background process isn't being tracked anymore we can just move on. + pass + return res + + deferred.addBoth(on_done) + return deferred + + async def shutdown(self) -> None: + """ + Cleanly stops all aspects of the HomeServer and removes any references that + have been handed out in order to allow the HomeServer object to be garbage + collected. + + You must ensure the HomeServer object to not be frozen in the garbage collector + in order for it to be cleaned up. By default, Synapse freezes the HomeServer + object in the garbage collector. + """ + + self._is_shutdown = True + + logger.info( + "Received shutdown request for %s (%s).", + self.hostname, + self.get_instance_id(), + ) + + # Unregister sighups first. If a shutdown was requested we shouldn't be responding + # to things like config changes. So it would be best to stop listening to these first. + unregister_sighups(self._instance_id) + + # TODO: It would be desireable to be able to report an error if the HomeServer + # object is frozen in the garbage collector as that would prevent it from being + # collected after being shutdown. + # In theory the following should work, but it doesn't seem to make a difference + # when I test it locally. + # + # if gc.is_tracked(self): + # logger.error("HomeServer object is tracked by garbage collection so cannot be fully cleaned up") + + for listener in self._listening_services: + # During unit tests, an incomplete `twisted.pair.testing._FakePort` is used + # for listeners so check listener type here to ensure shutdown procedure is + # only applied to actual `Port` instances. + if type(listener) is Port: + port_shutdown = listener.stopListening() + if port_shutdown is not None: + await port_shutdown + self._listening_services.clear() + + for server, thread in self._metrics_listeners: + server.shutdown() + thread.join() + self._metrics_listeners.clear() + + # TODO: Cleanup replication pieces + + self.get_keyring().shutdown() + + # Cleanup metrics associated with the homeserver + for later_gauge in all_later_gauges_to_clean_up_on_shutdown.values(): + later_gauge.unregister_hooks_for_homeserver_instance_id( + self.get_instance_id() + ) + + CACHE_METRIC_REGISTRY.unregister_hooks_for_homeserver( + self.config.server.server_name + ) + + for db in self.get_datastores().databases: + db.stop_background_updates() + + if self.should_send_federation(): + try: + self.get_federation_sender().shutdown() + except Exception: + pass + + for shutdown_handler in self._async_shutdown_handlers: + try: + self.get_reactor().removeSystemEventTrigger(shutdown_handler.trigger_id) + defer.ensureDeferred(shutdown_handler.func(**shutdown_handler.kwargs)) + except Exception as e: + logger.error("Error calling shutdown async handler: %s", e) + self._async_shutdown_handlers.clear() + + for shutdown_handler in self._sync_shutdown_handlers: + try: + self.get_reactor().removeSystemEventTrigger(shutdown_handler.trigger_id) + shutdown_handler.func(**shutdown_handler.kwargs) + except Exception as e: + logger.error("Error calling shutdown sync handler: %s", e) + self._sync_shutdown_handlers.clear() + + self.get_clock().shutdown() + + for background_process in list(self._background_processes): + try: + background_process.cancel() + except Exception: + pass + self._background_processes.clear() + + for db in self.get_datastores().databases: + db._db_pool.close() + + def register_async_shutdown_handler( + self, + *, + phase: str, + eventType: str, + shutdown_func: Callable[..., Any], + **kwargs: object, + ) -> None: + """ + Register a system event trigger with the HomeServer so it can be cleanly + removed when the HomeServer is shutdown. + """ + id = self.get_clock().add_system_event_trigger( + phase, + eventType, + shutdown_func, + **kwargs, + ) + self._async_shutdown_handlers.append( + ShutdownInfo(func=shutdown_func, trigger_id=id, kwargs=kwargs) + ) + + def register_sync_shutdown_handler( + self, + *, + phase: str, + eventType: str, + shutdown_func: Callable[..., Any], + **kwargs: object, + ) -> None: + """ + Register a system event trigger with the HomeServer so it can be cleanly + removed when the HomeServer is shutdown. + """ + id = self.get_clock().add_system_event_trigger( + phase, + eventType, + shutdown_func, + **kwargs, + ) + self._sync_shutdown_handlers.append( + ShutdownInfo(func=shutdown_func, trigger_id=id, kwargs=kwargs) + ) + def register_module_web_resource(self, path: str, resource: Resource) -> None: """Allows a module to register a web resource to be served at the given path. @@ -366,36 +613,25 @@ class HomeServer(metaclass=abc.ABCMeta): self.datastores = Databases(self.DATASTORE_CLASS, self) logger.info("Finished setting up.") - def __del__(self) -> None: - """ - Called when an the homeserver is garbage collected. + # Register background tasks required by this server. This must be done + # somewhat manually due to the background tasks not being registered + # unless handlers are instantiated. + if self.config.worker.run_background_tasks: + self.start_background_tasks() - Make sure we actually do some clean-up, rather than leak data. - """ - self.cleanup() - - def cleanup(self) -> None: - """ - WIP: Clean-up any references to the homeserver and stop any running related - processes, timers, loops, replication stream, etc. - - This should be called wherever you care about the HomeServer being completely - garbage collected like in tests. It's not necessary to call if you plan to just - shut down the whole Python process anyway. - - Can be called multiple times. - """ - logger.info("Received cleanup request for %s.", self.hostname) - - # TODO: Stop background processes, timers, loops, replication stream, etc. - - # Cleanup metrics associated with the homeserver - for later_gauge in all_later_gauges_to_clean_up_on_shutdown.values(): - later_gauge.unregister_hooks_for_homeserver_instance_id( - self.get_instance_id() - ) - - logger.info("Cleanup complete for %s.", self.hostname) + # def __del__(self) -> None: + # """ + # Called when an the homeserver is garbage collected. + # + # Make sure we actually do some clean-up, rather than leak data. + # """ + # + # # NOTE: This is a chicken and egg problem. + # # __del__ will never be called since the HomeServer cannot be garbage collected + # # until the shutdown function has been called. So it makes no sense to call + # # shutdown inside of __del__, even though that is a logical place to assume it + # # should be called. + # self.shutdown() def start_listening(self) -> None: # noqa: B027 (no-op by design) """Start the HTTP, manhole, metrics, etc listeners @@ -442,7 +678,8 @@ class HomeServer(metaclass=abc.ABCMeta): @cache_in_self def get_clock(self) -> Clock: - return Clock(self._reactor, server_name=self.hostname) + # Ignore the linter error since this is the one place the `Clock` should be created. + return Clock(self._reactor, server_name=self.hostname) # type: ignore[multiple-internal-clocks] def get_datastores(self) -> Databases: if not self.datastores: @@ -452,7 +689,7 @@ class HomeServer(metaclass=abc.ABCMeta): @cache_in_self def get_distributor(self) -> Distributor: - return Distributor(server_name=self.hostname) + return Distributor(hs=self) @cache_in_self def get_registration_ratelimiter(self) -> Ratelimiter: @@ -1007,8 +1244,10 @@ class HomeServer(metaclass=abc.ABCMeta): ) media_threadpool.start() - self.get_clock().add_system_event_trigger( - "during", "shutdown", media_threadpool.stop + self.register_sync_shutdown_handler( + phase="during", + eventType="shutdown", + shutdown_func=media_threadpool.stop, ) # Register the threadpool with our metrics. diff --git a/synapse/server_notices/server_notices_manager.py b/synapse/server_notices/server_notices_manager.py index 19f86b5a56..73cf4091eb 100644 --- a/synapse/server_notices/server_notices_manager.py +++ b/synapse/server_notices/server_notices_manager.py @@ -36,6 +36,7 @@ SERVER_NOTICE_ROOM_TAG = "m.server_notice" class ServerNoticesManager: def __init__(self, hs: "HomeServer"): self.server_name = hs.hostname # nb must be called this for @cached + self.clock = hs.get_clock() # nb must be called this for @cached self._store = hs.get_datastores().main self._config = hs.config self._account_data_handler = hs.get_account_data_handler() diff --git a/synapse/state/__init__.py b/synapse/state/__init__.py index dd8d7135ba..394dc72fa6 100644 --- a/synapse/state/__init__.py +++ b/synapse/state/__init__.py @@ -651,6 +651,7 @@ class StateResolutionHandler: ExpiringCache( cache_name="state_cache", server_name=self.server_name, + hs=hs, clock=self.clock, max_len=100000, expiry_ms=EVICTION_TIMEOUT_SECONDS * 1000, diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py index f214f55897..1fddcc0799 100644 --- a/synapse/storage/_base.py +++ b/synapse/storage/_base.py @@ -56,7 +56,7 @@ class SQLBaseStore(metaclass=ABCMeta): ): self.hs = hs self.server_name = hs.hostname # nb must be called this for @cached - self._clock = hs.get_clock() + self.clock = hs.get_clock() # nb must be called this for @cached self.database_engine = database.engine self.db_pool = database diff --git a/synapse/storage/background_updates.py b/synapse/storage/background_updates.py index 9aa9e51aeb..e3e793d5f5 100644 --- a/synapse/storage/background_updates.py +++ b/synapse/storage/background_updates.py @@ -41,7 +41,6 @@ from typing import ( import attr from synapse._pydantic_compat import BaseModel -from synapse.metrics.background_process_metrics import run_as_background_process from synapse.storage.engines import PostgresEngine from synapse.storage.types import Connection, Cursor from synapse.types import JsonDict, StrCollection @@ -285,6 +284,13 @@ class BackgroundUpdater: self.sleep_duration_ms = hs.config.background_updates.sleep_duration_ms self.sleep_enabled = hs.config.background_updates.sleep_enabled + def shutdown(self) -> None: + """ + Stop any further background updates from happening. + """ + self.enabled = False + self._background_update_handlers.clear() + def get_status(self) -> UpdaterStatus: """An integer summarising the updater status. Used as a metric.""" if self._aborted: @@ -396,9 +402,8 @@ class BackgroundUpdater: # if we start a new background update, not all updates are done. self._all_done = False sleep = self.sleep_enabled - run_as_background_process( + self.hs.run_as_background_process( "background_updates", - self.server_name, self.run_background_updates, sleep, ) diff --git a/synapse/storage/controllers/persist_events.py b/synapse/storage/controllers/persist_events.py index 120934af57..646e2cf115 100644 --- a/synapse/storage/controllers/persist_events.py +++ b/synapse/storage/controllers/persist_events.py @@ -62,7 +62,6 @@ from synapse.logging.opentracing import ( trace, ) from synapse.metrics import SERVER_NAME_LABEL -from synapse.metrics.background_process_metrics import run_as_background_process from synapse.storage.controllers.state import StateStorageController from synapse.storage.databases import Databases from synapse.storage.databases.main.events import DeltaState @@ -195,6 +194,7 @@ class _EventPeristenceQueue(Generic[_PersistResult]): def __init__( self, + hs: "HomeServer", server_name: str, per_item_callback: Callable[ [str, _EventPersistQueueTask], @@ -207,6 +207,7 @@ class _EventPeristenceQueue(Generic[_PersistResult]): and its result will be returned via the Deferreds returned from add_to_queue. """ self.server_name = server_name + self.hs = hs self._event_persist_queues: Dict[str, Deque[_EventPersistQueueItem]] = {} self._currently_persisting_rooms: Set[str] = set() self._per_item_callback = per_item_callback @@ -311,7 +312,7 @@ class _EventPeristenceQueue(Generic[_PersistResult]): self._currently_persisting_rooms.discard(room_id) # set handle_queue_loop off in the background - run_as_background_process("persist_events", self.server_name, handle_queue_loop) + self.hs.run_as_background_process("persist_events", handle_queue_loop) def _get_drainining_queue( self, room_id: str @@ -354,7 +355,7 @@ class EventsPersistenceStorageController: self._instance_name = hs.get_instance_name() self.is_mine_id = hs.is_mine_id self._event_persist_queue = _EventPeristenceQueue( - self.server_name, self._process_event_persist_queue_task + hs, self.server_name, self._process_event_persist_queue_task ) self._state_resolution_handler = hs.get_state_resolution_handler() self._state_controller = state_controller diff --git a/synapse/storage/controllers/purge_events.py b/synapse/storage/controllers/purge_events.py index 14b37ac543..ded9cb0567 100644 --- a/synapse/storage/controllers/purge_events.py +++ b/synapse/storage/controllers/purge_events.py @@ -46,9 +46,8 @@ class PurgeEventsStorageController: """High level interface for purging rooms and event history.""" def __init__(self, hs: "HomeServer", stores: Databases): - self.server_name = ( - hs.hostname - ) # nb must be called this for @wrap_as_background_process + self.hs = hs # nb must be called this for @wrap_as_background_process + self.server_name = hs.hostname self.stores = stores if hs.config.worker.run_background_tasks: diff --git a/synapse/storage/controllers/state.py b/synapse/storage/controllers/state.py index 66f3289d86..76978402b9 100644 --- a/synapse/storage/controllers/state.py +++ b/synapse/storage/controllers/state.py @@ -69,8 +69,8 @@ class StateStorageController: def __init__(self, hs: "HomeServer", stores: "Databases"): self.server_name = hs.hostname # nb must be called this for @cached + self.clock = hs.get_clock() self._is_mine_id = hs.is_mine_id - self._clock = hs.get_clock() self.stores = stores self._partial_state_events_tracker = PartialStateEventsTracker(stores.main) self._partial_state_room_tracker = PartialCurrentStateTracker(stores.main) @@ -78,7 +78,7 @@ class StateStorageController: # Used by `_get_joined_hosts` to ensure only one thing mutates the cache # at a time. Keyed by room_id. self._joined_host_linearizer = Linearizer( - name="_JoinedHostsCache", clock=self._clock + name="_JoinedHostsCache", clock=self.clock ) def notify_event_un_partial_stated(self, event_id: str) -> None: @@ -817,9 +817,7 @@ class StateStorageController: state_group = object() assert state_group is not None - with Measure( - self._clock, name="get_joined_hosts", server_name=self.server_name - ): + with Measure(self.clock, name="get_joined_hosts", server_name=self.server_name): return await self._get_joined_hosts( room_id, state_group, state_entry=state_entry ) diff --git a/synapse/storage/database.py b/synapse/storage/database.py index 249a0a933c..a4b2b26795 100644 --- a/synapse/storage/database.py +++ b/synapse/storage/database.py @@ -62,7 +62,6 @@ from synapse.logging.context import ( make_deferred_yieldable, ) from synapse.metrics import SERVER_NAME_LABEL, register_threadpool -from synapse.metrics.background_process_metrics import run_as_background_process from synapse.storage.background_updates import BackgroundUpdater from synapse.storage.engines import BaseDatabaseEngine, PostgresEngine, Sqlite3Engine from synapse.storage.types import Connection, Cursor, SQLQueryParameters @@ -638,12 +637,17 @@ class DatabasePool: # background updates of tables that aren't safe to update. self._clock.call_later( 0.0, - run_as_background_process, + self.hs.run_as_background_process, "upsert_safety_check", - self.server_name, self._check_safe_to_upsert, ) + def stop_background_updates(self) -> None: + """ + Stops the database from running any further background updates. + """ + self.updates.shutdown() + def name(self) -> str: "Return the name of this database" return self._database_config.name @@ -681,9 +685,8 @@ class DatabasePool: if background_update_names: self._clock.call_later( 15.0, - run_as_background_process, + self.hs.run_as_background_process, "upsert_safety_check", - self.server_name, self._check_safe_to_upsert, ) diff --git a/synapse/storage/databases/main/cache.py b/synapse/storage/databases/main/cache.py index cad26fefa4..674c6b921e 100644 --- a/synapse/storage/databases/main/cache.py +++ b/synapse/storage/databases/main/cache.py @@ -751,7 +751,7 @@ class CacheInvalidationWorkerStore(SQLBaseStore): "instance_name": self._instance_name, "cache_func": cache_name, "keys": keys, - "invalidation_ts": self._clock.time_msec(), + "invalidation_ts": self.clock.time_msec(), }, ) @@ -778,7 +778,7 @@ class CacheInvalidationWorkerStore(SQLBaseStore): assert self._cache_id_gen is not None stream_ids = self._cache_id_gen.get_next_mult_txn(txn, len(key_tuples)) - ts = self._clock.time_msec() + ts = self.clock.time_msec() txn.call_after(self.hs.get_notifier().on_new_replication_data) self.db_pool.simple_insert_many_txn( txn, @@ -830,7 +830,8 @@ class CacheInvalidationWorkerStore(SQLBaseStore): next_interval = REGULAR_CLEANUP_INTERVAL_MS self.hs.get_clock().call_later( - next_interval / 1000, self._clean_up_cache_invalidation_wrapper + next_interval / 1000, + self._clean_up_cache_invalidation_wrapper, ) async def _clean_up_batch_of_old_cache_invalidations( diff --git a/synapse/storage/databases/main/censor_events.py b/synapse/storage/databases/main/censor_events.py index 3f9f482add..45cfe97dba 100644 --- a/synapse/storage/databases/main/censor_events.py +++ b/synapse/storage/databases/main/censor_events.py @@ -77,7 +77,7 @@ class CensorEventsStore(EventsWorkerStore, CacheInvalidationWorkerStore, SQLBase return before_ts = ( - self._clock.time_msec() - self.hs.config.server.redaction_retention_period + self.clock.time_msec() - self.hs.config.server.redaction_retention_period ) # We fetch all redactions that: diff --git a/synapse/storage/databases/main/client_ips.py b/synapse/storage/databases/main/client_ips.py index c7a330cc83..dc6ab99a6c 100644 --- a/synapse/storage/databases/main/client_ips.py +++ b/synapse/storage/databases/main/client_ips.py @@ -438,10 +438,11 @@ class ClientIpWorkerStore(ClientIpBackgroundUpdateStore, MonthlyActiveUsersWorke cache_name="client_ip_last_seen", server_name=self.server_name, max_size=50000, + clock=hs.get_clock(), ) if hs.config.worker.run_background_tasks and self.user_ips_max_age: - self._clock.looping_call(self._prune_old_user_ips, 5 * 1000) + self.clock.looping_call(self._prune_old_user_ips, 5 * 1000) if self._update_on_this_worker: # This is the designated worker that can write to the client IP @@ -452,11 +453,11 @@ class ClientIpWorkerStore(ClientIpBackgroundUpdateStore, MonthlyActiveUsersWorke Tuple[str, str, str], Tuple[str, Optional[str], int] ] = {} - self._client_ip_looper = self._clock.looping_call( - self._update_client_ips_batch, 5 * 1000 - ) - self.hs.get_clock().add_system_event_trigger( - "before", "shutdown", self._update_client_ips_batch + self.clock.looping_call(self._update_client_ips_batch, 5 * 1000) + hs.register_async_shutdown_handler( + phase="before", + eventType="shutdown", + shutdown_func=self._update_client_ips_batch, ) @wrap_as_background_process("prune_old_user_ips") @@ -492,7 +493,7 @@ class ClientIpWorkerStore(ClientIpBackgroundUpdateStore, MonthlyActiveUsersWorke ) """ - timestamp = self._clock.time_msec() - self.user_ips_max_age + timestamp = self.clock.time_msec() - self.user_ips_max_age def _prune_old_user_ips_txn(txn: LoggingTransaction) -> None: txn.execute(sql, (timestamp,)) @@ -628,7 +629,7 @@ class ClientIpWorkerStore(ClientIpBackgroundUpdateStore, MonthlyActiveUsersWorke return if not now: - now = int(self._clock.time_msec()) + now = int(self.clock.time_msec()) key = (user_id, access_token, ip) try: diff --git a/synapse/storage/databases/main/deviceinbox.py b/synapse/storage/databases/main/deviceinbox.py index f6f3c94a0d..a66e11f738 100644 --- a/synapse/storage/databases/main/deviceinbox.py +++ b/synapse/storage/databases/main/deviceinbox.py @@ -96,7 +96,8 @@ class DeviceInboxWorkerStore(SQLBaseStore): ] = ExpiringCache( cache_name="last_device_delete_cache", server_name=self.server_name, - clock=self._clock, + hs=hs, + clock=self.clock, max_len=10000, expiry_ms=30 * 60 * 1000, ) @@ -154,7 +155,7 @@ class DeviceInboxWorkerStore(SQLBaseStore): ) if hs.config.worker.run_background_tasks: - self._clock.looping_call( + self.clock.looping_call( run_as_background_process, DEVICE_FEDERATION_INBOX_CLEANUP_INTERVAL_MS, "_delete_old_federation_inbox_rows", @@ -826,7 +827,7 @@ class DeviceInboxWorkerStore(SQLBaseStore): ) async with self._to_device_msg_id_gen.get_next() as stream_id: - now_ms = self._clock.time_msec() + now_ms = self.clock.time_msec() await self.db_pool.runInteraction( "add_messages_to_device_inbox", add_messages_txn, now_ms, stream_id ) @@ -881,7 +882,7 @@ class DeviceInboxWorkerStore(SQLBaseStore): ) async with self._to_device_msg_id_gen.get_next() as stream_id: - now_ms = self._clock.time_msec() + now_ms = self.clock.time_msec() await self.db_pool.runInteraction( "add_messages_from_remote_to_device_inbox", add_messages_txn, @@ -1002,7 +1003,7 @@ class DeviceInboxWorkerStore(SQLBaseStore): # We delete at most 100 rows that are older than # DEVICE_FEDERATION_INBOX_CLEANUP_DELAY_MS delete_before_ts = ( - self._clock.time_msec() - DEVICE_FEDERATION_INBOX_CLEANUP_DELAY_MS + self.clock.time_msec() - DEVICE_FEDERATION_INBOX_CLEANUP_DELAY_MS ) sql = """ WITH to_delete AS ( @@ -1032,7 +1033,7 @@ class DeviceInboxWorkerStore(SQLBaseStore): # We sleep a bit so that we don't hammer the database in a tight # loop first time we run this. - await self._clock.sleep(1) + await self.clock.sleep(1) async def get_devices_with_messages( self, user_id: str, device_ids: StrCollection diff --git a/synapse/storage/databases/main/devices.py b/synapse/storage/databases/main/devices.py index fc1e1c73f1..d4b9ce0ea0 100644 --- a/synapse/storage/databases/main/devices.py +++ b/synapse/storage/databases/main/devices.py @@ -195,7 +195,7 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore): ) if hs.config.worker.run_background_tasks: - self._clock.looping_call( + self.clock.looping_call( self._prune_old_outbound_device_pokes, 60 * 60 * 1000 ) @@ -1390,7 +1390,7 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore): table="device_lists_remote_resync", keyvalues={"user_id": user_id}, values={}, - insertion_values={"added_ts": self._clock.time_msec()}, + insertion_values={"added_ts": self.clock.time_msec()}, ) await self.db_pool.runInteraction( @@ -1601,7 +1601,7 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore): that user when the destination comes back. It doesn't matter which device we keep. """ - yesterday = self._clock.time_msec() - prune_age + yesterday = self.clock.time_msec() - prune_age def _prune_txn(txn: LoggingTransaction) -> None: # look for (user, destination) pairs which have an update older than @@ -2086,7 +2086,7 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore): stream_id, ) - now = self._clock.time_msec() + now = self.clock.time_msec() encoded_context = json_encoder.encode(context) mark_sent = not self.hs.is_mine_id(user_id) diff --git a/synapse/storage/databases/main/end_to_end_keys.py b/synapse/storage/databases/main/end_to_end_keys.py index 2e9f62075a..2d3d0c0036 100644 --- a/synapse/storage/databases/main/end_to_end_keys.py +++ b/synapse/storage/databases/main/end_to_end_keys.py @@ -1564,7 +1564,7 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker DELETE FROM e2e_one_time_keys_json WHERE {clause} AND ts_added_ms < ? AND length(key_id) = 6 """ - args.append(self._clock.time_msec() - (7 * 24 * 3600 * 1000)) + args.append(self.clock.time_msec() - (7 * 24 * 3600 * 1000)) txn.execute(sql, args) return users, txn.rowcount @@ -1585,7 +1585,7 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker None, if there is no such key. Otherwise, the timestamp before which replacement is allowed without UIA. """ - timestamp = self._clock.time_msec() + duration_ms + timestamp = self.clock.time_msec() + duration_ms def impl(txn: LoggingTransaction) -> Optional[int]: txn.execute( diff --git a/synapse/storage/databases/main/event_federation.py b/synapse/storage/databases/main/event_federation.py index 5c9bd2e848..d77420ff47 100644 --- a/synapse/storage/databases/main/event_federation.py +++ b/synapse/storage/databases/main/event_federation.py @@ -167,6 +167,7 @@ class EventFederationWorkerStore( # Cache of event ID to list of auth event IDs and their depths. self._event_auth_cache: LruCache[str, List[Tuple[str, int]]] = LruCache( max_size=500000, + clock=self.hs.get_clock(), server_name=self.server_name, cache_name="_event_auth_cache", size_callback=len, @@ -176,7 +177,7 @@ class EventFederationWorkerStore( # index. self.tests_allow_no_chain_cover_index = True - self._clock.looping_call(self._get_stats_for_federation_staging, 30 * 1000) + self.clock.looping_call(self._get_stats_for_federation_staging, 30 * 1000) if isinstance(self.database_engine, PostgresEngine): self.db_pool.updates.register_background_validate_constraint_and_delete_rows( @@ -1328,7 +1329,7 @@ class EventFederationWorkerStore( ( room_id, current_depth, - self._clock.time_msec(), + self.clock.time_msec(), BACKFILL_EVENT_EXPONENTIAL_BACKOFF_MAXIMUM_DOUBLING_STEPS, BACKFILL_EVENT_EXPONENTIAL_BACKOFF_STEP_MILLISECONDS, limit, @@ -1841,7 +1842,7 @@ class EventFederationWorkerStore( last_cause=EXCLUDED.last_cause; """ - txn.execute(sql, (room_id, event_id, 1, self._clock.time_msec(), cause)) + txn.execute(sql, (room_id, event_id, 1, self.clock.time_msec(), cause)) @trace async def get_event_ids_with_failed_pull_attempts( @@ -1905,7 +1906,7 @@ class EventFederationWorkerStore( ), ) - current_time = self._clock.time_msec() + current_time = self.clock.time_msec() event_ids_with_backoff = {} for event_id, last_attempt_ts, num_attempts in event_failed_pull_attempts: @@ -2025,7 +2026,7 @@ class EventFederationWorkerStore( values={}, insertion_values={ "room_id": event.room_id, - "received_ts": self._clock.time_msec(), + "received_ts": self.clock.time_msec(), "event_json": json_encoder.encode(event.get_dict()), "internal_metadata": json_encoder.encode( event.internal_metadata.get_dict() @@ -2299,7 +2300,7 @@ class EventFederationWorkerStore( # If there is nothing in the staging area default it to 0. age = 0 if received_ts is not None: - age = self._clock.time_msec() - received_ts + age = self.clock.time_msec() - received_ts return count, age diff --git a/synapse/storage/databases/main/event_push_actions.py b/synapse/storage/databases/main/event_push_actions.py index 4db0230421..ec26aedc6b 100644 --- a/synapse/storage/databases/main/event_push_actions.py +++ b/synapse/storage/databases/main/event_push_actions.py @@ -95,6 +95,8 @@ from typing import ( import attr +from twisted.internet.task import LoopingCall + from synapse.api.constants import MAIN_TIMELINE, ReceiptTypes from synapse.metrics.background_process_metrics import wrap_as_background_process from synapse.storage._base import SQLBaseStore, db_to_json, make_in_list_sql_clause @@ -254,6 +256,8 @@ def _deserialize_action(actions: str, is_highlight: bool) -> List[Union[dict, st class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBaseStore): + _background_tasks: List[LoopingCall] = [] + def __init__( self, database: DatabasePool, @@ -263,7 +267,7 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas super().__init__(database, db_conn, hs) # Track when the process started. - self._started_ts = self._clock.time_msec() + self._started_ts = self.clock.time_msec() # These get correctly set by _find_stream_orderings_for_times_txn self.stream_ordering_month_ago: Optional[int] = None @@ -273,18 +277,14 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas self._find_stream_orderings_for_times_txn(cur) cur.close() - self.find_stream_orderings_looping_call = self._clock.looping_call( - self._find_stream_orderings_for_times, 10 * 60 * 1000 - ) + self.clock.looping_call(self._find_stream_orderings_for_times, 10 * 60 * 1000) self._rotate_count = 10000 self._doing_notif_rotation = False if hs.config.worker.run_background_tasks: - self._rotate_notif_loop = self._clock.looping_call( - self._rotate_notifs, 30 * 1000 - ) + self.clock.looping_call(self._rotate_notifs, 30 * 1000) - self._clear_old_staging_loop = self._clock.looping_call( + self.clock.looping_call( self._clear_old_push_actions_staging, 30 * 60 * 1000 ) @@ -1190,7 +1190,7 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas is_highlight, # highlight column int(count_as_unread), # unread column thread_id, # thread_id column - self._clock.time_msec(), # inserted_ts column + self.clock.time_msec(), # inserted_ts column ) await self.db_pool.simple_insert_many( @@ -1241,14 +1241,14 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas def _find_stream_orderings_for_times_txn(self, txn: LoggingTransaction) -> None: logger.info("Searching for stream ordering 1 month ago") self.stream_ordering_month_ago = self._find_first_stream_ordering_after_ts_txn( - txn, self._clock.time_msec() - 30 * 24 * 60 * 60 * 1000 + txn, self.clock.time_msec() - 30 * 24 * 60 * 60 * 1000 ) logger.info( "Found stream ordering 1 month ago: it's %d", self.stream_ordering_month_ago ) logger.info("Searching for stream ordering 1 day ago") self.stream_ordering_day_ago = self._find_first_stream_ordering_after_ts_txn( - txn, self._clock.time_msec() - 24 * 60 * 60 * 1000 + txn, self.clock.time_msec() - 24 * 60 * 60 * 1000 ) logger.info( "Found stream ordering 1 day ago: it's %d", self.stream_ordering_day_ago @@ -1787,7 +1787,7 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas # We delete anything more than an hour old, on the assumption that we'll # never take more than an hour to persist an event. - delete_before_ts = self._clock.time_msec() - 60 * 60 * 1000 + delete_before_ts = self.clock.time_msec() - 60 * 60 * 1000 if self._started_ts > delete_before_ts: # We need to wait for at least an hour before we started deleting, @@ -1824,7 +1824,7 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas return # We sleep to ensure that we don't overwhelm the DB. - await self._clock.sleep(1.0) + await self.clock.sleep(1.0) async def get_push_actions_for_user( self, diff --git a/synapse/storage/databases/main/events_bg_updates.py b/synapse/storage/databases/main/events_bg_updates.py index 0a0102ee64..37dd8e48d5 100644 --- a/synapse/storage/databases/main/events_bg_updates.py +++ b/synapse/storage/databases/main/events_bg_updates.py @@ -730,7 +730,7 @@ class EventsBackgroundUpdatesStore(StreamWorkerStore, StateDeltasStore, SQLBaseS WHERE ? <= event_id AND event_id <= ? """ - txn.execute(sql, (self._clock.time_msec(), last_event_id, upper_event_id)) + txn.execute(sql, (self.clock.time_msec(), last_event_id, upper_event_id)) self.db_pool.updates._background_update_progress_txn( txn, "redactions_received_ts", {"last_event_id": upper_event_id} diff --git a/synapse/storage/databases/main/events_worker.py b/synapse/storage/databases/main/events_worker.py index 31e2312211..4f9a1a4f78 100644 --- a/synapse/storage/databases/main/events_worker.py +++ b/synapse/storage/databases/main/events_worker.py @@ -70,7 +70,6 @@ from synapse.logging.opentracing import ( ) from synapse.metrics import SERVER_NAME_LABEL from synapse.metrics.background_process_metrics import ( - run_as_background_process, wrap_as_background_process, ) from synapse.replication.tcp.streams import BackfillStream, UnPartialStatedEventStream @@ -282,13 +281,14 @@ class EventsWorkerStore(SQLBaseStore): if hs.config.worker.run_background_tasks: # We periodically clean out old transaction ID mappings - self._clock.looping_call( + self.clock.looping_call( self._cleanup_old_transaction_ids, 5 * 60 * 1000, ) self._get_event_cache: AsyncLruCache[Tuple[str], EventCacheEntry] = ( AsyncLruCache( + clock=hs.get_clock(), server_name=self.server_name, cache_name="*getEvent*", max_size=hs.config.caches.event_cache_size, @@ -1154,9 +1154,7 @@ class EventsWorkerStore(SQLBaseStore): should_start = False if should_start: - run_as_background_process( - "fetch_events", self.server_name, self._fetch_thread - ) + self.hs.run_as_background_process("fetch_events", self._fetch_thread) async def _fetch_thread(self) -> None: """Services requests for events from `_event_fetch_list`.""" @@ -1276,7 +1274,7 @@ class EventsWorkerStore(SQLBaseStore): were not part of this request. """ with Measure( - self._clock, name="_fetch_event_list", server_name=self.server_name + self.clock, name="_fetch_event_list", server_name=self.server_name ): try: events_to_fetch = { @@ -2278,7 +2276,7 @@ class EventsWorkerStore(SQLBaseStore): """Cleans out transaction id mappings older than 24hrs.""" def _cleanup_old_transaction_ids_txn(txn: LoggingTransaction) -> None: - one_day_ago = self._clock.time_msec() - 24 * 60 * 60 * 1000 + one_day_ago = self.clock.time_msec() - 24 * 60 * 60 * 1000 sql = """ DELETE FROM event_txn_id_device_id WHERE inserted_ts < ? @@ -2633,7 +2631,7 @@ class EventsWorkerStore(SQLBaseStore): keyvalues={"event_id": event_id}, values={ "reason": rejection_reason, - "last_check": self._clock.time_msec(), + "last_check": self.clock.time_msec(), }, ) self.db_pool.simple_update_txn( diff --git a/synapse/storage/databases/main/lock.py b/synapse/storage/databases/main/lock.py index d0e4a91b59..e2b15eaf6a 100644 --- a/synapse/storage/databases/main/lock.py +++ b/synapse/storage/databases/main/lock.py @@ -28,7 +28,6 @@ from twisted.internet import defer from twisted.internet.task import LoopingCall from synapse.metrics.background_process_metrics import ( - run_as_background_process, wrap_as_background_process, ) from synapse.storage._base import SQLBaseStore @@ -99,15 +98,15 @@ class LockStore(SQLBaseStore): # lead to a race, as we may drop the lock while we are still processing. # However, a) it should be a small window, b) the lock is best effort # anyway and c) we want to really avoid leaking locks when we restart. - hs.get_clock().add_system_event_trigger( - "before", - "shutdown", - self._on_shutdown, + hs.register_async_shutdown_handler( + phase="before", + eventType="shutdown", + shutdown_func=self._on_shutdown, ) self._acquiring_locks: Set[Tuple[str, str]] = set() - self._clock.looping_call( + self.clock.looping_call( self._reap_stale_read_write_locks, _LOCK_TIMEOUT_MS / 10.0 ) @@ -153,7 +152,7 @@ class LockStore(SQLBaseStore): if lock and await lock.is_still_valid(): return None - now = self._clock.time_msec() + now = self.clock.time_msec() token = random_string(6) def _try_acquire_lock_txn(txn: LoggingTransaction) -> bool: @@ -202,7 +201,8 @@ class LockStore(SQLBaseStore): lock = Lock( self.server_name, self._reactor, - self._clock, + self.hs, + self.clock, self, read_write=False, lock_name=lock_name, @@ -251,7 +251,7 @@ class LockStore(SQLBaseStore): # constraints. If it doesn't then we have acquired the lock, # otherwise we haven't. - now = self._clock.time_msec() + now = self.clock.time_msec() token = random_string(6) self.db_pool.simple_insert_txn( @@ -270,7 +270,8 @@ class LockStore(SQLBaseStore): lock = Lock( self.server_name, self._reactor, - self._clock, + self.hs, + self.clock, self, read_write=True, lock_name=lock_name, @@ -338,7 +339,7 @@ class LockStore(SQLBaseStore): """ def reap_stale_read_write_locks_txn(txn: LoggingTransaction) -> None: - txn.execute(delete_sql, (self._clock.time_msec() - _LOCK_TIMEOUT_MS,)) + txn.execute(delete_sql, (self.clock.time_msec() - _LOCK_TIMEOUT_MS,)) if txn.rowcount: logger.info("Reaped %d stale locks", txn.rowcount) @@ -374,6 +375,7 @@ class Lock: self, server_name: str, reactor: ISynapseReactor, + hs: "HomeServer", clock: Clock, store: LockStore, read_write: bool, @@ -387,6 +389,7 @@ class Lock: """ self._server_name = server_name self._reactor = reactor + self._hs = hs self._clock = clock self._store = store self._read_write = read_write @@ -410,6 +413,7 @@ class Lock: _RENEWAL_INTERVAL_MS, self._server_name, self._store, + self._hs, self._clock, self._read_write, self._lock_name, @@ -421,6 +425,7 @@ class Lock: def _renew( server_name: str, store: LockStore, + hs: "HomeServer", clock: Clock, read_write: bool, lock_name: str, @@ -457,9 +462,8 @@ class Lock: desc="renew_lock", ) - return run_as_background_process( + return hs.run_as_background_process( "Lock._renew", - server_name, _internal_renew, store, clock, diff --git a/synapse/storage/databases/main/media_repository.py b/synapse/storage/databases/main/media_repository.py index f726846e57..b8bd0042d7 100644 --- a/synapse/storage/databases/main/media_repository.py +++ b/synapse/storage/databases/main/media_repository.py @@ -565,7 +565,7 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore): sql, ( user_id.to_string(), - self._clock.time_msec() - self.unused_expiration_time, + self.clock.time_msec() - self.unused_expiration_time, ), ) row = txn.fetchone() @@ -1059,7 +1059,7 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore): txn: LoggingTransaction, ) -> int: # Calculate the timestamp for the start of the time period - start_ts = self._clock.time_msec() - time_period_ms + start_ts = self.clock.time_msec() - time_period_ms txn.execute(sql, (user_id, start_ts)) row = txn.fetchone() if row is None: diff --git a/synapse/storage/databases/main/metrics.py b/synapse/storage/databases/main/metrics.py index a3467bff3d..49411ed034 100644 --- a/synapse/storage/databases/main/metrics.py +++ b/synapse/storage/databases/main/metrics.py @@ -78,7 +78,7 @@ class ServerMetricsStore(EventPushActionsWorkerStore, SQLBaseStore): # Read the extrems every 60 minutes if hs.config.worker.run_background_tasks: - self._clock.looping_call(self._read_forward_extremities, 60 * 60 * 1000) + self.clock.looping_call(self._read_forward_extremities, 60 * 60 * 1000) # Used in _generate_user_daily_visits to keep track of progress self._last_user_visit_update = self._get_start_of_day() @@ -224,7 +224,7 @@ class ServerMetricsStore(EventPushActionsWorkerStore, SQLBaseStore): """ Counts the number of users who used this homeserver in the last 24 hours. """ - yesterday = int(self._clock.time_msec()) - (1000 * 60 * 60 * 24) + yesterday = int(self.clock.time_msec()) - (1000 * 60 * 60 * 24) return await self.db_pool.runInteraction( "count_daily_users", self._count_users, yesterday ) @@ -236,7 +236,7 @@ class ServerMetricsStore(EventPushActionsWorkerStore, SQLBaseStore): from the mau figure in synapse.storage.monthly_active_users which, amongst other things, includes a 3 day grace period before a user counts. """ - thirty_days_ago = int(self._clock.time_msec()) - (1000 * 60 * 60 * 24 * 30) + thirty_days_ago = int(self.clock.time_msec()) - (1000 * 60 * 60 * 24 * 30) return await self.db_pool.runInteraction( "count_monthly_users", self._count_users, thirty_days_ago ) @@ -281,7 +281,7 @@ class ServerMetricsStore(EventPushActionsWorkerStore, SQLBaseStore): def _count_r30v2_users(txn: LoggingTransaction) -> Dict[str, int]: thirty_days_in_secs = 86400 * 30 - now = int(self._clock.time()) + now = int(self.clock.time()) sixty_days_ago_in_secs = now - 2 * thirty_days_in_secs one_day_from_now_in_secs = now + 86400 @@ -389,7 +389,7 @@ class ServerMetricsStore(EventPushActionsWorkerStore, SQLBaseStore): """ Returns millisecond unixtime for start of UTC day. """ - now = time.gmtime(self._clock.time()) + now = time.gmtime(self.clock.time()) today_start = calendar.timegm((now.tm_year, now.tm_mon, now.tm_mday, 0, 0, 0)) return today_start * 1000 @@ -403,7 +403,7 @@ class ServerMetricsStore(EventPushActionsWorkerStore, SQLBaseStore): logger.info("Calling _generate_user_daily_visits") today_start = self._get_start_of_day() a_day_in_milliseconds = 24 * 60 * 60 * 1000 - now = self._clock.time_msec() + now = self.clock.time_msec() # A note on user_agent. Technically a given device can have multiple # user agents, so we need to decide which one to pick. We could have diff --git a/synapse/storage/databases/main/monthly_active_users.py b/synapse/storage/databases/main/monthly_active_users.py index f5a6b98be7..86744f616c 100644 --- a/synapse/storage/databases/main/monthly_active_users.py +++ b/synapse/storage/databases/main/monthly_active_users.py @@ -49,7 +49,6 @@ class MonthlyActiveUsersWorkerStore(RegistrationWorkerStore): hs: "HomeServer", ): super().__init__(database, db_conn, hs) - self._clock = hs.get_clock() self.hs = hs if hs.config.redis.redis_enabled: @@ -226,7 +225,7 @@ class MonthlyActiveUsersWorkerStore(RegistrationWorkerStore): reserved_users: reserved users to preserve """ - thirty_days_ago = int(self._clock.time_msec()) - (1000 * 60 * 60 * 24 * 30) + thirty_days_ago = int(self.clock.time_msec()) - (1000 * 60 * 60 * 24 * 30) in_clause, in_clause_args = make_in_list_sql_clause( self.database_engine, "user_id", reserved_users @@ -328,7 +327,7 @@ class MonthlyActiveUsersWorkerStore(RegistrationWorkerStore): txn, table="monthly_active_users", keyvalues={"user_id": user_id}, - values={"timestamp": int(self._clock.time_msec())}, + values={"timestamp": int(self.clock.time_msec())}, ) else: logger.warning("mau limit reserved threepid %s not found in db", tp) @@ -391,7 +390,7 @@ class MonthlyActiveUsersWorkerStore(RegistrationWorkerStore): txn, table="monthly_active_users", keyvalues={"user_id": user_id}, - values={"timestamp": int(self._clock.time_msec())}, + values={"timestamp": int(self.clock.time_msec())}, ) self._invalidate_cache_and_stream(txn, self.get_monthly_active_count, ()) diff --git a/synapse/storage/databases/main/receipts.py b/synapse/storage/databases/main/receipts.py index ff4eb9acb2..f1dbf68971 100644 --- a/synapse/storage/databases/main/receipts.py +++ b/synapse/storage/databases/main/receipts.py @@ -1073,7 +1073,7 @@ class ReceiptsWorkerStore(SQLBaseStore): if event_ts is None: return None - now = self._clock.time_msec() + now = self.clock.time_msec() logger.debug( "Receipt %s for event %s in %s (%i ms old)", receipt_type, diff --git a/synapse/storage/databases/main/registration.py b/synapse/storage/databases/main/registration.py index 117444e7b7..906d1a91f6 100644 --- a/synapse/storage/databases/main/registration.py +++ b/synapse/storage/databases/main/registration.py @@ -212,7 +212,7 @@ class RegistrationWorkerStore(StatsStore, CacheInvalidationWorkerStore): ) if hs.config.worker.run_background_tasks: - self._clock.call_later( + self.clock.call_later( 0.0, self._set_expiration_date_when_missing, ) @@ -226,7 +226,7 @@ class RegistrationWorkerStore(StatsStore, CacheInvalidationWorkerStore): # Create a background job for culling expired 3PID validity tokens if hs.config.worker.run_background_tasks: - self._clock.looping_call( + self.clock.looping_call( self.cull_expired_threepid_validation_tokens, THIRTY_MINUTES_IN_MS ) @@ -298,7 +298,7 @@ class RegistrationWorkerStore(StatsStore, CacheInvalidationWorkerStore): ) -> None: user_id_obj = UserID.from_string(user_id) - now = int(self._clock.time()) + now = int(self.clock.time()) user_approved = approved or not self._require_approval @@ -457,7 +457,7 @@ class RegistrationWorkerStore(StatsStore, CacheInvalidationWorkerStore): if not info: return False - now = self._clock.time_msec() + now = self.clock.time_msec() days = self.config.server.mau_appservice_trial_days.get( info.appservice_id, self.config.server.mau_trial_days ) @@ -640,7 +640,7 @@ class RegistrationWorkerStore(StatsStore, CacheInvalidationWorkerStore): return await self.db_pool.runInteraction( "get_users_expiring_soon", select_users_txn, - self._clock.time_msec(), + self.clock.time_msec(), self.config.account_validity.account_validity_renew_at, ) @@ -1084,7 +1084,7 @@ class RegistrationWorkerStore(StatsStore, CacheInvalidationWorkerStore): """ def _count_daily_user_type(txn: LoggingTransaction) -> Dict[str, int]: - yesterday = int(self._clock.time()) - (60 * 60 * 24) + yesterday = int(self.clock.time()) - (60 * 60 * 24) sql = """ SELECT user_type, COUNT(*) AS count FROM ( @@ -1496,7 +1496,7 @@ class RegistrationWorkerStore(StatsStore, CacheInvalidationWorkerStore): await self.db_pool.runInteraction( "cull_expired_threepid_validation_tokens", cull_expired_threepid_validation_tokens_txn, - self._clock.time_msec(), + self.clock.time_msec(), ) @wrap_as_background_process("account_validity_set_expiration_dates") @@ -1537,7 +1537,7 @@ class RegistrationWorkerStore(StatsStore, CacheInvalidationWorkerStore): random value in the [now + period - d ; now + period] range, d being a delta equal to 10% of the validity period. """ - now_ms = self._clock.time_msec() + now_ms = self.clock.time_msec() assert self._account_validity_period is not None expiration_ts = now_ms + self._account_validity_period @@ -1608,7 +1608,7 @@ class RegistrationWorkerStore(StatsStore, CacheInvalidationWorkerStore): Raises: StoreError if there was a problem updating this. """ - now = self._clock.time_msec() + now = self.clock.time_msec() await self.db_pool.simple_update_one( "access_tokens", @@ -1639,7 +1639,7 @@ class RegistrationWorkerStore(StatsStore, CacheInvalidationWorkerStore): uses_allowed, pending, completed, expiry_time = res # Check if the token has expired - now = self._clock.time_msec() + now = self.clock.time_msec() if expiry_time and expiry_time < now: return False @@ -1771,7 +1771,7 @@ class RegistrationWorkerStore(StatsStore, CacheInvalidationWorkerStore): return await self.db_pool.runInteraction( "select_registration_tokens", select_registration_tokens_txn, - self._clock.time_msec(), + self.clock.time_msec(), valid, ) @@ -2251,7 +2251,7 @@ class RegistrationWorkerStore(StatsStore, CacheInvalidationWorkerStore): "consume_login_token", self._consume_login_token, token, - self._clock.time_msec(), + self.clock.time_msec(), ) async def invalidate_login_tokens_by_session_id( @@ -2271,7 +2271,7 @@ class RegistrationWorkerStore(StatsStore, CacheInvalidationWorkerStore): "auth_provider_id": auth_provider_id, "auth_provider_session_id": auth_provider_session_id, }, - updatevalues={"used_ts": self._clock.time_msec()}, + updatevalues={"used_ts": self.clock.time_msec()}, desc="invalidate_login_tokens_by_session_id", ) @@ -2640,7 +2640,6 @@ class RegistrationBackgroundUpdateStore(RegistrationWorkerStore): ): super().__init__(database, db_conn, hs) - self._clock = hs.get_clock() self.config = hs.config self.db_pool.updates.register_background_index_update( @@ -2761,7 +2760,7 @@ class RegistrationStore(RegistrationBackgroundUpdateStore): # Create a background job for removing expired login tokens if hs.config.worker.run_background_tasks: - self._clock.looping_call( + self.clock.looping_call( self._delete_expired_login_tokens, THIRTY_MINUTES_IN_MS ) @@ -2790,7 +2789,7 @@ class RegistrationStore(RegistrationBackgroundUpdateStore): The token ID """ next_id = self._access_tokens_id_gen.get_next() - now = self._clock.time_msec() + now = self.clock.time_msec() await self.db_pool.simple_insert( "access_tokens", @@ -2874,7 +2873,7 @@ class RegistrationStore(RegistrationBackgroundUpdateStore): keyvalues={"name": user_id}, updatevalues={ "consent_version": consent_version, - "consent_ts": self._clock.time_msec(), + "consent_ts": self.clock.time_msec(), }, ) self._invalidate_cache_and_stream(txn, self.get_user_by_id, (user_id,)) @@ -2986,7 +2985,7 @@ class RegistrationStore(RegistrationBackgroundUpdateStore): txn, table="threepid_validation_session", keyvalues={"session_id": session_id}, - updatevalues={"validated_at": self._clock.time_msec()}, + updatevalues={"validated_at": self.clock.time_msec()}, ) return next_link @@ -3064,7 +3063,7 @@ class RegistrationStore(RegistrationBackgroundUpdateStore): # We keep the expired tokens for an extra 5 minutes so we can measure how many # times a token is being used after its expiry - now = self._clock.time_msec() + now = self.clock.time_msec() await self.db_pool.runInteraction( "delete_expired_login_tokens", _delete_expired_login_tokens_txn, diff --git a/synapse/storage/databases/main/roommember.py b/synapse/storage/databases/main/roommember.py index 9db2e14a06..65caf4b1ea 100644 --- a/synapse/storage/databases/main/roommember.py +++ b/synapse/storage/databases/main/roommember.py @@ -1002,7 +1002,7 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore): """ with Measure( - self._clock, + self.clock, name="get_joined_user_ids_from_state", server_name=self.server_name, ): diff --git a/synapse/storage/databases/main/session.py b/synapse/storage/databases/main/session.py index 8a5fa8386c..1154bb2d59 100644 --- a/synapse/storage/databases/main/session.py +++ b/synapse/storage/databases/main/session.py @@ -55,7 +55,7 @@ class SessionStore(SQLBaseStore): # Create a background job for culling expired sessions. if hs.config.worker.run_background_tasks: - self._clock.looping_call(self._delete_expired_sessions, 30 * 60 * 1000) + self.clock.looping_call(self._delete_expired_sessions, 30 * 60 * 1000) async def create_session( self, session_type: str, value: JsonDict, expiry_ms: int @@ -133,7 +133,7 @@ class SessionStore(SQLBaseStore): _get_session, session_type, session_id, - self._clock.time_msec(), + self.clock.time_msec(), ) @wrap_as_background_process("delete_expired_sessions") @@ -147,5 +147,5 @@ class SessionStore(SQLBaseStore): await self.db_pool.runInteraction( "delete_expired_sessions", _delete_expired_sessions_txn, - self._clock.time_msec(), + self.clock.time_msec(), ) diff --git a/synapse/storage/databases/main/sliding_sync.py b/synapse/storage/databases/main/sliding_sync.py index f7af3e88d3..c0c5087b13 100644 --- a/synapse/storage/databases/main/sliding_sync.py +++ b/synapse/storage/databases/main/sliding_sync.py @@ -201,7 +201,7 @@ class SlidingSyncStore(SQLBaseStore): "user_id": user_id, "effective_device_id": device_id, "conn_id": conn_id, - "created_ts": self._clock.time_msec(), + "created_ts": self.clock.time_msec(), }, returning=("connection_key",), ) @@ -212,7 +212,7 @@ class SlidingSyncStore(SQLBaseStore): table="sliding_sync_connection_positions", values={ "connection_key": connection_key, - "created_ts": self._clock.time_msec(), + "created_ts": self.clock.time_msec(), }, returning=("connection_position",), ) diff --git a/synapse/storage/databases/main/transactions.py b/synapse/storage/databases/main/transactions.py index bfc324b80d..41c9483927 100644 --- a/synapse/storage/databases/main/transactions.py +++ b/synapse/storage/databases/main/transactions.py @@ -81,11 +81,11 @@ class TransactionWorkerStore(CacheInvalidationWorkerStore): super().__init__(database, db_conn, hs) if hs.config.worker.run_background_tasks: - self._clock.looping_call(self._cleanup_transactions, 30 * 60 * 1000) + self.clock.looping_call(self._cleanup_transactions, 30 * 60 * 1000) @wrap_as_background_process("cleanup_transactions") async def _cleanup_transactions(self) -> None: - now = self._clock.time_msec() + now = self.clock.time_msec() day_ago = now - 24 * 60 * 60 * 1000 def _cleanup_transactions_txn(txn: LoggingTransaction) -> None: @@ -160,7 +160,7 @@ class TransactionWorkerStore(CacheInvalidationWorkerStore): insertion_values={ "response_code": code, "response_json": db_binary_type(encode_canonical_json(response_dict)), - "ts": self._clock.time_msec(), + "ts": self.clock.time_msec(), }, desc="set_received_txn_response", ) diff --git a/synapse/storage/databases/state/store.py b/synapse/storage/databases/state/store.py index 9b3b7e086f..b62f3e6f5b 100644 --- a/synapse/storage/databases/state/store.py +++ b/synapse/storage/databases/state/store.py @@ -125,6 +125,7 @@ class StateGroupDataStore(StateBackgroundUpdateStore, SQLBaseStore): self._state_group_cache: DictionaryCache[int, StateKey, str] = DictionaryCache( name="*stateGroupCache*", + clock=hs.get_clock(), server_name=self.server_name, # TODO: this hasn't been tuned yet max_entries=50000, @@ -132,6 +133,7 @@ class StateGroupDataStore(StateBackgroundUpdateStore, SQLBaseStore): self._state_group_members_cache: DictionaryCache[int, StateKey, str] = ( DictionaryCache( name="*stateGroupMembersCache*", + clock=hs.get_clock(), server_name=self.server_name, max_entries=500000, ) diff --git a/synapse/util/async_helpers.py b/synapse/util/async_helpers.py index 1f90988525..2a167f209c 100644 --- a/synapse/util/async_helpers.py +++ b/synapse/util/async_helpers.py @@ -55,7 +55,6 @@ from typing_extensions import Concatenate, ParamSpec, Unpack from twisted.internet import defer from twisted.internet.defer import CancelledError -from twisted.internet.interfaces import IReactorTime from twisted.python.failure import Failure from synapse.logging.context import ( @@ -549,10 +548,9 @@ class Linearizer: def __init__( self, - *, name: str, - max_count: int = 1, clock: Clock, + max_count: int = 1, ): """ Args: @@ -772,7 +770,11 @@ class ReadWriteLock: def timeout_deferred( - deferred: "defer.Deferred[_T]", timeout: float, reactor: IReactorTime + *, + deferred: "defer.Deferred[_T]", + timeout: float, + cancel_on_shutdown: bool = True, + clock: Clock, ) -> "defer.Deferred[_T]": """The in built twisted `Deferred.addTimeout` fails to time out deferreds that have a canceller that throws exceptions. This method creates a new @@ -790,7 +792,13 @@ def timeout_deferred( Args: deferred: The Deferred to potentially timeout. timeout: Timeout in seconds - reactor: The twisted reactor to use + cancel_on_shutdown: Whether this call should be tracked for cleanup during + shutdown. In general, all calls should be tracked. There may be a use case + not to track calls with a `timeout` of 0 (or similarly short) since tracking + them may result in rapid insertions and removals of tracked calls + unnecessarily. But unless a specific instance of tracking proves to be an + issue, we can just track all delayed calls. + clock: The `Clock` instance used to track delayed calls. Returns: @@ -814,7 +822,10 @@ def timeout_deferred( if not new_d.called: new_d.errback(defer.TimeoutError("Timed out after %gs" % (timeout,))) - delayed_call = reactor.callLater(timeout, time_it_out) + # We don't track these calls since they are short. + delayed_call = clock.call_later( + timeout, time_it_out, call_later_cancel_on_shutdown=cancel_on_shutdown + ) def convert_cancelled(value: Failure) -> Failure: # if the original deferred was cancelled, and our timeout has fired, then @@ -956,9 +967,9 @@ class AwakenableSleeper: currently sleeping. """ - def __init__(self, reactor: IReactorTime) -> None: + def __init__(self, clock: Clock) -> None: self._streams: Dict[str, Set[defer.Deferred[None]]] = {} - self._reactor = reactor + self._clock = clock def wake(self, name: str) -> None: """Wake everything related to `name` that is currently sleeping.""" @@ -977,7 +988,11 @@ class AwakenableSleeper: # Create a deferred that gets called in N seconds sleep_deferred: "defer.Deferred[None]" = defer.Deferred() - call = self._reactor.callLater(delay_ms / 1000, sleep_deferred.callback, None) + call = self._clock.call_later( + delay_ms / 1000, + sleep_deferred.callback, + None, + ) # Create a deferred that will get called if `wake` is called with # the same `name`. @@ -1011,8 +1026,8 @@ class AwakenableSleeper: class DeferredEvent: """Like threading.Event but for async code""" - def __init__(self, reactor: IReactorTime) -> None: - self._reactor = reactor + def __init__(self, clock: Clock) -> None: + self._clock = clock self._deferred: "defer.Deferred[None]" = defer.Deferred() def set(self) -> None: @@ -1032,7 +1047,11 @@ class DeferredEvent: # Create a deferred that gets called in N seconds sleep_deferred: "defer.Deferred[None]" = defer.Deferred() - call = self._reactor.callLater(timeout_seconds, sleep_deferred.callback, None) + call = self._clock.call_later( + timeout_seconds, + sleep_deferred.callback, + None, + ) try: await make_deferred_yieldable( diff --git a/synapse/util/batching_queue.py b/synapse/util/batching_queue.py index 4c4037412a..f77301afd8 100644 --- a/synapse/util/batching_queue.py +++ b/synapse/util/batching_queue.py @@ -21,6 +21,7 @@ import logging from typing import ( + TYPE_CHECKING, Awaitable, Callable, Dict, @@ -38,9 +39,11 @@ from twisted.internet import defer from synapse.logging.context import PreserveLoggingContext, make_deferred_yieldable from synapse.metrics import SERVER_NAME_LABEL -from synapse.metrics.background_process_metrics import run_as_background_process from synapse.util.clock import Clock +if TYPE_CHECKING: + from synapse.server import HomeServer + logger = logging.getLogger(__name__) @@ -97,12 +100,13 @@ class BatchingQueue(Generic[V, R]): self, *, name: str, - server_name: str, + hs: "HomeServer", clock: Clock, process_batch_callback: Callable[[List[V]], Awaitable[R]], ): self._name = name - self.server_name = server_name + self.hs = hs + self.server_name = hs.hostname self._clock = clock # The set of keys currently being processed. @@ -127,6 +131,14 @@ class BatchingQueue(Generic[V, R]): name=self._name, **{SERVER_NAME_LABEL: self.server_name} ) + def shutdown(self) -> None: + """ + Prepares the object for garbage collection by removing any handed out + references. + """ + number_queued.remove(self._name, self.server_name) + number_of_keys.remove(self._name, self.server_name) + async def add_to_queue(self, value: V, key: Hashable = ()) -> R: """Adds the value to the queue with the given key, returning the result of the processing function for the batch that included the given value. @@ -145,9 +157,7 @@ class BatchingQueue(Generic[V, R]): # If we're not currently processing the key fire off a background # process to start processing. if key not in self._processing_keys: - run_as_background_process( - self._name, self.server_name, self._process_queue, key - ) + self.hs.run_as_background_process(self._name, self._process_queue, key) with self._number_in_flight_metric.track_inprogress(): return await make_deferred_yieldable(d) diff --git a/synapse/util/caches/__init__.py b/synapse/util/caches/__init__.py index 710a29e3f0..08ff842af0 100644 --- a/synapse/util/caches/__init__.py +++ b/synapse/util/caches/__init__.py @@ -244,7 +244,7 @@ def register_cache( collect_callback=collect_callback, ) metric_name = "cache_%s_%s_%s" % (cache_type, cache_name, server_name) - CACHE_METRIC_REGISTRY.register_hook(metric_name, metric.collect) + CACHE_METRIC_REGISTRY.register_hook(server_name, metric_name, metric.collect) return metric diff --git a/synapse/util/caches/deferred_cache.py b/synapse/util/caches/deferred_cache.py index 92d446ce2a..016acbac71 100644 --- a/synapse/util/caches/deferred_cache.py +++ b/synapse/util/caches/deferred_cache.py @@ -47,6 +47,7 @@ from synapse.metrics import SERVER_NAME_LABEL from synapse.util.async_helpers import ObservableDeferred from synapse.util.caches.lrucache import LruCache from synapse.util.caches.treecache import TreeCache, iterate_tree_cache_entry +from synapse.util.clock import Clock cache_pending_metric = Gauge( "synapse_util_caches_cache_pending", @@ -82,6 +83,7 @@ class DeferredCache(Generic[KT, VT]): self, *, name: str, + clock: Clock, server_name: str, max_entries: int = 1000, tree: bool = False, @@ -103,6 +105,7 @@ class DeferredCache(Generic[KT, VT]): prune_unread_entries: If True, cache entries that haven't been read recently will be evicted from the cache in the background. Set to False to opt-out of this behaviour. + clock: The homeserver `Clock` instance """ cache_type = TreeCache if tree else dict @@ -120,6 +123,7 @@ class DeferredCache(Generic[KT, VT]): # a Deferred. self.cache: LruCache[KT, VT] = LruCache( max_size=max_entries, + clock=clock, server_name=server_name, cache_name=name, cache_type=cache_type, diff --git a/synapse/util/caches/descriptors.py b/synapse/util/caches/descriptors.py index 47b8f4ddc8..6e3c8eada9 100644 --- a/synapse/util/caches/descriptors.py +++ b/synapse/util/caches/descriptors.py @@ -53,6 +53,7 @@ from synapse.util import unwrapFirstError from synapse.util.async_helpers import delay_cancellation from synapse.util.caches.deferred_cache import DeferredCache from synapse.util.caches.lrucache import LruCache +from synapse.util.clock import Clock logger = logging.getLogger(__name__) @@ -154,13 +155,20 @@ class _CacheDescriptorBase: ) -class HasServerName(Protocol): +class HasServerNameAndClock(Protocol): server_name: str """ The homeserver name that this cache is associated with (used to label the metric) (`hs.hostname`). """ + clock: Clock + """ + The homeserver clock instance used to track delayed and looping calls. Important to + be able to fully cleanup the homeserver instance on server shutdown. + (`hs.get_clock()`). + """ + class DeferredCacheDescriptor(_CacheDescriptorBase): """A method decorator that applies a memoizing cache around the function. @@ -239,7 +247,7 @@ class DeferredCacheDescriptor(_CacheDescriptorBase): self.prune_unread_entries = prune_unread_entries def __get__( - self, obj: Optional[HasServerName], owner: Optional[Type] + self, obj: Optional[HasServerNameAndClock], owner: Optional[Type] ) -> Callable[..., "defer.Deferred[Any]"]: # We need access to instance-level `obj.server_name` attribute assert obj is not None, ( @@ -249,9 +257,13 @@ class DeferredCacheDescriptor(_CacheDescriptorBase): assert obj.server_name is not None, ( "The `server_name` attribute must be set on the object where `@cached` decorator is used." ) + assert obj.clock is not None, ( + "The `clock` attribute must be set on the object where `@cached` decorator is used." + ) cache: DeferredCache[CacheKey, Any] = DeferredCache( name=self.name, + clock=obj.clock, server_name=obj.server_name, max_entries=self.max_entries, tree=self.tree, diff --git a/synapse/util/caches/dictionary_cache.py b/synapse/util/caches/dictionary_cache.py index 168ddc51cd..eb5493d322 100644 --- a/synapse/util/caches/dictionary_cache.py +++ b/synapse/util/caches/dictionary_cache.py @@ -37,6 +37,7 @@ import attr from synapse.util.caches.lrucache import LruCache from synapse.util.caches.treecache import TreeCache +from synapse.util.clock import Clock logger = logging.getLogger(__name__) @@ -127,10 +128,13 @@ class DictionaryCache(Generic[KT, DKT, DV]): for the '2' dict key. """ - def __init__(self, *, name: str, server_name: str, max_entries: int = 1000): + def __init__( + self, *, name: str, clock: Clock, server_name: str, max_entries: int = 1000 + ): """ Args: name + clock: The homeserver `Clock` instance server_name: The homeserver name that this cache is associated with (used to label the metric) (`hs.hostname`). max_entries @@ -160,6 +164,7 @@ class DictionaryCache(Generic[KT, DKT, DV]): Union[_PerKeyValue, Dict[DKT, DV]], ] = LruCache( max_size=max_entries, + clock=clock, server_name=server_name, cache_name=name, cache_type=TreeCache, diff --git a/synapse/util/caches/expiringcache.py b/synapse/util/caches/expiringcache.py index 305af5051c..29ce6c0a77 100644 --- a/synapse/util/caches/expiringcache.py +++ b/synapse/util/caches/expiringcache.py @@ -21,17 +21,29 @@ import logging from collections import OrderedDict -from typing import Any, Generic, Iterable, Literal, Optional, TypeVar, Union, overload +from typing import ( + TYPE_CHECKING, + Any, + Generic, + Iterable, + Literal, + Optional, + TypeVar, + Union, + overload, +) import attr from twisted.internet import defer from synapse.config import cache as cache_config -from synapse.metrics.background_process_metrics import run_as_background_process from synapse.util.caches import EvictionReason, register_cache from synapse.util.clock import Clock +if TYPE_CHECKING: + from synapse.server import HomeServer + logger = logging.getLogger(__name__) @@ -49,6 +61,7 @@ class ExpiringCache(Generic[KT, VT]): *, cache_name: str, server_name: str, + hs: "HomeServer", clock: Clock, max_len: int = 0, expiry_ms: int = 0, @@ -99,9 +112,7 @@ class ExpiringCache(Generic[KT, VT]): return def f() -> "defer.Deferred[None]": - return run_as_background_process( - "prune_cache", server_name, self._prune_cache - ) + return hs.run_as_background_process("prune_cache", self._prune_cache) self._clock.looping_call(f, self._expiry_ms / 2) diff --git a/synapse/util/caches/lrucache.py b/synapse/util/caches/lrucache.py index 2d4cde19a5..324acb728a 100644 --- a/synapse/util/caches/lrucache.py +++ b/synapse/util/caches/lrucache.py @@ -45,14 +45,10 @@ from typing import ( overload, ) -from twisted.internet import defer, reactor +from twisted.internet import defer from synapse.config import cache as cache_config -from synapse.metrics.background_process_metrics import ( - run_as_background_process, -) from synapse.metrics.jemalloc import get_jemalloc_stats -from synapse.types import ISynapseThreadlessReactor from synapse.util import caches from synapse.util.caches import CacheMetric, EvictionReason, register_cache from synapse.util.caches.treecache import ( @@ -123,6 +119,7 @@ GLOBAL_ROOT = ListNode["_Node"].create_root_node() def _expire_old_entries( server_name: str, + hs: "HomeServer", clock: Clock, expiry_seconds: float, autotune_config: Optional[dict], @@ -228,9 +225,8 @@ def _expire_old_entries( logger.info("Dropped %d items from caches", i) - return run_as_background_process( + return hs.run_as_background_process( "LruCache._expire_old_entries", - server_name, _internal_expire_old_entries, clock, expiry_seconds, @@ -261,6 +257,7 @@ def setup_expire_lru_cache_entries(hs: "HomeServer") -> None: _expire_old_entries, 30 * 1000, server_name, + hs, clock, expiry_time, hs.config.caches.cache_autotuning, @@ -404,13 +401,13 @@ class LruCache(Generic[KT, VT]): self, *, max_size: int, + clock: Clock, server_name: str, cache_name: str, cache_type: Type[Union[dict, TreeCache]] = dict, size_callback: Optional[Callable[[VT], int]] = None, metrics_collection_callback: Optional[Callable[[], None]] = None, apply_cache_factor_from_config: bool = True, - clock: Optional[Clock] = None, prune_unread_entries: bool = True, extra_index_cb: Optional[Callable[[KT, VT], KT]] = None, ): ... @@ -420,13 +417,13 @@ class LruCache(Generic[KT, VT]): self, *, max_size: int, + clock: Clock, server_name: str, cache_name: Literal[None] = None, cache_type: Type[Union[dict, TreeCache]] = dict, size_callback: Optional[Callable[[VT], int]] = None, metrics_collection_callback: Optional[Callable[[], None]] = None, apply_cache_factor_from_config: bool = True, - clock: Optional[Clock] = None, prune_unread_entries: bool = True, extra_index_cb: Optional[Callable[[KT, VT], KT]] = None, ): ... @@ -435,13 +432,13 @@ class LruCache(Generic[KT, VT]): self, *, max_size: int, + clock: Clock, server_name: str, cache_name: Optional[str] = None, cache_type: Type[Union[dict, TreeCache]] = dict, size_callback: Optional[Callable[[VT], int]] = None, metrics_collection_callback: Optional[Callable[[], None]] = None, apply_cache_factor_from_config: bool = True, - clock: Optional[Clock] = None, prune_unread_entries: bool = True, extra_index_cb: Optional[Callable[[KT, VT], KT]] = None, ): @@ -492,15 +489,6 @@ class LruCache(Generic[KT, VT]): Note: The new key does not have to be unique. """ - # Default `clock` to something sensible. Note that we rename it to - # `real_clock` so that mypy doesn't think its still `Optional`. - if clock is None: - real_clock = Clock( - cast(ISynapseThreadlessReactor, reactor), server_name=server_name - ) - else: - real_clock = clock - cache: Union[Dict[KT, _Node[KT, VT]], TreeCache] = cache_type() self.cache = cache # Used for introspection. self.apply_cache_factor_from_config = apply_cache_factor_from_config @@ -592,7 +580,7 @@ class LruCache(Generic[KT, VT]): key, value, weak_ref_to_self, - real_clock, + clock, callbacks, prune_unread_entries, ) @@ -610,7 +598,7 @@ class LruCache(Generic[KT, VT]): metrics.inc_memory_usage(node.memory) def move_node_to_front(node: _Node[KT, VT]) -> None: - node.move_to_front(real_clock, list_root) + node.move_to_front(clock, list_root) def delete_node(node: _Node[KT, VT]) -> int: node.drop_from_lists() diff --git a/synapse/util/caches/response_cache.py b/synapse/util/caches/response_cache.py index 79e34262df..3d39357236 100644 --- a/synapse/util/caches/response_cache.py +++ b/synapse/util/caches/response_cache.py @@ -198,7 +198,17 @@ class ResponseCache(Generic[KV]): # the should_cache bit, we leave it in the cache for now and schedule # its removal later. if self.timeout_sec and context.should_cache: - self.clock.call_later(self.timeout_sec, self._entry_timeout, key) + self.clock.call_later( + self.timeout_sec, + self._entry_timeout, + key, + # We don't need to track these calls since they don't hold any strong + # references which would keep the `HomeServer` in memory after shutdown. + # We don't want to track these because they can get cancelled really + # quickly and thrash the tracking mechanism, ie. during repeated calls + # to /sync. + call_later_cancel_on_shutdown=False, + ) else: # otherwise, remove the result immediately. self.unset(key) diff --git a/synapse/util/clock.py b/synapse/util/clock.py index e85af17005..5e65cf32a4 100644 --- a/synapse/util/clock.py +++ b/synapse/util/clock.py @@ -17,10 +17,12 @@ from typing import ( Any, Callable, + Dict, + List, ) -import attr from typing_extensions import ParamSpec +from zope.interface import implementer from twisted.internet import defer, task from twisted.internet.defer import Deferred @@ -34,24 +36,54 @@ from synapse.util import log_failure P = ParamSpec("P") -@attr.s(slots=True) class Clock: """ A Clock wraps a Twisted reactor and provides utilities on top of it. + This clock should be used in place of calls to the base reactor wherever `LoopingCall` + or `DelayedCall` are made (such as when calling `reactor.callLater`. This is to + ensure the calls made by this `HomeServer` instance are tracked and can be cleaned + up during `HomeServer.shutdown()`. + + We enforce usage of this clock instead of using the reactor directly via lints in + `scripts-dev/mypy_synapse_plugin.py`. + + Args: reactor: The Twisted reactor to use. """ - _reactor: ISynapseThreadlessReactor = attr.ib() - _server_name: str = attr.ib() + _reactor: ISynapseThreadlessReactor + + def __init__(self, reactor: ISynapseThreadlessReactor, server_name: str) -> None: + self._reactor = reactor + self._server_name = server_name + + self._delayed_call_id: int = 0 + """Unique ID used to track delayed calls""" + + self._looping_calls: List[LoopingCall] = [] + """List of active looping calls""" + + self._call_id_to_delayed_call: Dict[int, IDelayedCall] = {} + """Mapping from unique call ID to delayed call""" + + self._is_shutdown = False + """Whether shutdown has been requested by the HomeServer""" + + def shutdown(self) -> None: + self._is_shutdown = True + self.cancel_all_looping_calls() + self.cancel_all_delayed_calls() async def sleep(self, seconds: float) -> None: d: defer.Deferred[float] = defer.Deferred() # Start task in the `sentinel` logcontext, to avoid leaking the current context # into the reactor once it finishes. with context.PreserveLoggingContext(): - self._reactor.callLater(seconds, d.callback, seconds) + # We can ignore the lint here since this class is the one location callLater should + # be called. + self._reactor.callLater(seconds, d.callback, seconds) # type: ignore[call-later-not-tracked] await d def time(self) -> float: @@ -124,6 +156,9 @@ class Clock: ) -> LoopingCall: """Common functionality for `looping_call` and `looping_call_now`""" + if self._is_shutdown: + raise Exception("Cannot start looping call. Clock has been shutdown") + def wrapped_f(*args: P.args, **kwargs: P.kwargs) -> Deferred: assert context.current_context() is context.SENTINEL_CONTEXT, ( "Expected `looping_call` callback from the reactor to start with the sentinel logcontext " @@ -155,7 +190,9 @@ class Clock: # logcontext to the reactor return context.run_in_background(f, *args, **kwargs) - call = task.LoopingCall(wrapped_f, *args, **kwargs) + # We can ignore the lint here since this is the one location LoopingCall's + # should be created. + call = task.LoopingCall(wrapped_f, *args, **kwargs) # type: ignore[prefer-synapse-clock-looping-call] call.clock = self._reactor # If `now=true`, the function will be called here immediately so we need to be # in the sentinel context now. @@ -165,10 +202,32 @@ class Clock: with context.PreserveLoggingContext(): d = call.start(msec / 1000.0, now=now) d.addErrback(log_failure, "Looping call died", consumeErrors=False) + self._looping_calls.append(call) return call + def cancel_all_looping_calls(self, consumeErrors: bool = True) -> None: + """ + Stop all running looping calls. + + Args: + consumeErrors: Whether to re-raise errors encountered when cancelling the + scheduled call. + """ + for call in self._looping_calls: + try: + call.stop() + except Exception: + if not consumeErrors: + raise + self._looping_calls.clear() + def call_later( - self, delay: float, callback: Callable, *args: Any, **kwargs: Any + self, + delay: float, + callback: Callable, + *args: Any, + call_later_cancel_on_shutdown: bool = True, + **kwargs: Any, ) -> IDelayedCall: """Call something later @@ -180,39 +239,78 @@ class Clock: delay: How long to wait in seconds. callback: Function to call *args: Postional arguments to pass to function. + call_later_cancel_on_shutdown: Whether this call should be tracked for cleanup during + shutdown. In general, all calls should be tracked. There may be a use case + not to track calls with a `timeout` of 0 (or similarly short) since tracking + them may result in rapid insertions and removals of tracked calls + unnecessarily. But unless a specific instance of tracking proves to be an + issue, we can just track all delayed calls. **kwargs: Key arguments to pass to function. """ - def wrapped_callback(*args: Any, **kwargs: Any) -> None: - assert context.current_context() is context.SENTINEL_CONTEXT, ( - "Expected `call_later` callback from the reactor to start with the sentinel logcontext " - f"but saw {context.current_context()}. In other words, another task shouldn't have " - "leaked their logcontext to us." - ) + if self._is_shutdown: + raise Exception("Cannot start delayed call. Clock has been shutdown") - # Because this is a callback from the reactor, we will be using the - # `sentinel` log context at this point. We want the function to log with - # some logcontext as we want to know which server the logs came from. - # - # We use `PreserveLoggingContext` to prevent our new `call_later` - # logcontext from finishing as soon as we exit this function, in case `f` - # returns an awaitable/deferred which would continue running and may try to - # restore the `loop_call` context when it's done (because it's trying to - # adhere to the Synapse logcontext rules.) - # - # This also ensures that we return to the `sentinel` context when we exit - # this function and yield control back to the reactor to avoid leaking the - # current logcontext to the reactor (which would then get picked up and - # associated with the next thing the reactor does) - with context.PreserveLoggingContext( - context.LoggingContext(name="call_later", server_name=self._server_name) - ): - # We use `run_in_background` to reset the logcontext after `f` (or the - # awaitable returned by `f`) completes to avoid leaking the current - # logcontext to the reactor - context.run_in_background(callback, *args, **kwargs) + def create_wrapped_callback( + track_for_shutdown_cancellation: bool, + ) -> Callable[P, None]: + def wrapped_callback(*args: Any, **kwargs: Any) -> None: + assert context.current_context() is context.SENTINEL_CONTEXT, ( + "Expected `call_later` callback from the reactor to start with the sentinel logcontext " + f"but saw {context.current_context()}. In other words, another task shouldn't have " + "leaked their logcontext to us." + ) - return self._reactor.callLater(delay, wrapped_callback, *args, **kwargs) + # Because this is a callback from the reactor, we will be using the + # `sentinel` log context at this point. We want the function to log with + # some logcontext as we want to know which server the logs came from. + # + # We use `PreserveLoggingContext` to prevent our new `call_later` + # logcontext from finishing as soon as we exit this function, in case `f` + # returns an awaitable/deferred which would continue running and may try to + # restore the `loop_call` context when it's done (because it's trying to + # adhere to the Synapse logcontext rules.) + # + # This also ensures that we return to the `sentinel` context when we exit + # this function and yield control back to the reactor to avoid leaking the + # current logcontext to the reactor (which would then get picked up and + # associated with the next thing the reactor does) + try: + with context.PreserveLoggingContext( + context.LoggingContext( + name="call_later", server_name=self._server_name + ) + ): + # We use `run_in_background` to reset the logcontext after `f` (or the + # awaitable returned by `f`) completes to avoid leaking the current + # logcontext to the reactor + context.run_in_background(callback, *args, **kwargs) + finally: + if track_for_shutdown_cancellation: + # We still want to remove the call from the tracking map. Even if + # the callback raises an exception. + self._call_id_to_delayed_call.pop(call_id) + + return wrapped_callback + + if call_later_cancel_on_shutdown: + call_id = self._delayed_call_id + self._delayed_call_id = self._delayed_call_id + 1 + + # We can ignore the lint here since this class is the one location callLater + # should be called. + call = self._reactor.callLater( + delay, create_wrapped_callback(True), *args, **kwargs + ) # type: ignore[call-later-not-tracked] + call = DelayedCallWrapper(call, call_id, self) + self._call_id_to_delayed_call[call_id] = call + return call + else: + # We can ignore the lint here since this class is the one location callLater should + # be called. + return self._reactor.callLater( + delay, create_wrapped_callback(False), *args, **kwargs + ) # type: ignore[call-later-not-tracked] def cancel_call_later(self, timer: IDelayedCall, ignore_errs: bool = False) -> None: try: @@ -221,6 +319,24 @@ class Clock: if not ignore_errs: raise + def cancel_all_delayed_calls(self, ignore_errs: bool = True) -> None: + """ + Stop all scheduled calls that were marked with `cancel_on_shutdown` when they were created. + + Args: + ignore_errs: Whether to re-raise errors encountered when cancelling the + scheduled call. + """ + # We make a copy here since calling `cancel()` on a delayed_call + # will result in the call removing itself from the map mid-iteration. + for call in list(self._call_id_to_delayed_call.values()): + try: + call.cancel() + except Exception: + if not ignore_errs: + raise + self._call_id_to_delayed_call.clear() + def call_when_running( self, callback: Callable[P, object], @@ -285,7 +401,7 @@ class Clock: callback: Callable[P, object], *args: P.args, **kwargs: P.kwargs, - ) -> None: + ) -> Any: """ Add a function to be called when a system event occurs. @@ -299,6 +415,9 @@ class Clock: callback: Function to call *args: Postional arguments to pass to function. **kwargs: Key arguments to pass to function. + + Returns: + an ID that can be used to remove this call with `reactor.removeSystemEventTrigger`. """ def wrapped_callback(*args: Any, **kwargs: Any) -> None: @@ -334,6 +453,50 @@ class Clock: # We can ignore the lint here since this class is the one location # `addSystemEventTrigger` should be called. - self._reactor.addSystemEventTrigger( + return self._reactor.addSystemEventTrigger( phase, event_type, wrapped_callback, *args, **kwargs ) # type: ignore[prefer-synapse-clock-add-system-event-trigger] + + +@implementer(IDelayedCall) +class DelayedCallWrapper: + """Wraps an `IDelayedCall` so that we can intercept the call to `cancel()` and + properly cleanup the delayed call from the tracking map of the `Clock`. + + args: + delayed_call: The actual `IDelayedCall` + call_id: Unique identifier for this delayed call + clock: The clock instance tracking this call + """ + + def __init__(self, delayed_call: IDelayedCall, call_id: int, clock: Clock): + self.delayed_call = delayed_call + self.call_id = call_id + self.clock = clock + + def cancel(self) -> None: + """Remove the call from the tracking map and propagate the call to the + underlying delayed_call. + """ + self.delayed_call.cancel() + try: + self.clock._call_id_to_delayed_call.pop(self.call_id) + except KeyError: + # If the delayed call isn't being tracked anymore we can just move on. + pass + + def getTime(self) -> float: + """Propagate the call to the underlying delayed_call.""" + return self.delayed_call.getTime() + + def delay(self, secondsLater: float) -> None: + """Propagate the call to the underlying delayed_call.""" + self.delayed_call.delay(secondsLater) + + def reset(self, secondsFromNow: float) -> None: + """Propagate the call to the underlying delayed_call.""" + self.delayed_call.reset(secondsFromNow) + + def active(self) -> bool: + """Propagate the call to the underlying delayed_call.""" + return self.delayed_call.active() diff --git a/synapse/util/distributor.py b/synapse/util/distributor.py index f48ae3373c..dec6536e4e 100644 --- a/synapse/util/distributor.py +++ b/synapse/util/distributor.py @@ -20,6 +20,7 @@ # import logging from typing import ( + TYPE_CHECKING, Any, Awaitable, Callable, @@ -36,10 +37,13 @@ from typing_extensions import ParamSpec from twisted.internet import defer from synapse.logging.context import make_deferred_yieldable, run_in_background -from synapse.metrics.background_process_metrics import run_as_background_process from synapse.types import UserID from synapse.util.async_helpers import maybe_awaitable +if TYPE_CHECKING: + from synapse.server import HomeServer + + logger = logging.getLogger(__name__) @@ -58,13 +62,13 @@ class Distributor: model will do for today. """ - def __init__(self, server_name: str) -> None: + def __init__(self, hs: "HomeServer") -> None: """ Args: server_name: The homeserver name of the server (used to label metrics) (this should be `hs.hostname`). """ - self.server_name = server_name + self.hs = hs self.signals: Dict[str, Signal] = {} self.pre_registration: Dict[str, List[Callable]] = {} @@ -97,8 +101,8 @@ class Distributor: if name not in self.signals: raise KeyError("%r does not have a signal named %s" % (self, name)) - run_as_background_process( - name, self.server_name, self.signals[name].fire, *args, **kwargs + self.hs.run_as_background_process( + name, self.signals[name].fire, *args, **kwargs ) diff --git a/synapse/util/metrics.py b/synapse/util/metrics.py index c4f3c8b965..7b6ad0e459 100644 --- a/synapse/util/metrics.py +++ b/synapse/util/metrics.py @@ -293,21 +293,46 @@ class DynamicCollectorRegistry(CollectorRegistry): def __init__(self) -> None: super().__init__() - self._pre_update_hooks: Dict[str, Callable[[], None]] = {} + self._server_name_to_pre_update_hooks: Dict[ + str, Dict[str, Callable[[], None]] + ] = {} + """ + Mapping of server name to a mapping of metric name to metric pre-update + hook + """ def collect(self) -> Generator[Metric, None, None]: """ Collects metrics, calling pre-update hooks first. """ - for pre_update_hook in self._pre_update_hooks.values(): - pre_update_hook() + for pre_update_hooks in self._server_name_to_pre_update_hooks.values(): + for pre_update_hook in pre_update_hooks.values(): + pre_update_hook() yield from super().collect() - def register_hook(self, metric_name: str, hook: Callable[[], None]) -> None: + def register_hook( + self, server_name: str, metric_name: str, hook: Callable[[], None] + ) -> None: """ Registers a hook that is called before metric collection. """ - self._pre_update_hooks[metric_name] = hook + server_hooks = self._server_name_to_pre_update_hooks.setdefault(server_name, {}) + if server_hooks.get(metric_name) is not None: + # TODO: This should be an `assert` since registering the same metric name + # multiple times will clobber the old metric. + # We currently rely on this behaviour as we instantiate multiple + # `SyncRestServlet`, one per listener, and in the `__init__` we setup a new + # LruCache. + # Once the above behaviour is changed, this should be changed to an `assert`. + logger.error( + "Metric named %s already registered for server %s", + metric_name, + server_name, + ) + server_hooks[metric_name] = hook + + def unregister_hooks_for_homeserver(self, server_name: str) -> None: + self._server_name_to_pre_update_hooks.pop(server_name, None) diff --git a/synapse/util/ratelimitutils.py b/synapse/util/ratelimitutils.py index 695eb462bf..756677fe6c 100644 --- a/synapse/util/ratelimitutils.py +++ b/synapse/util/ratelimitutils.py @@ -419,4 +419,7 @@ class _PerHostRatelimiter: except KeyError: pass - self.clock.call_later(0.0, start_next_request) + self.clock.call_later( + 0.0, + start_next_request, + ) diff --git a/synapse/util/retryutils.py b/synapse/util/retryutils.py index 42a0cc7aa8..96fe2bd566 100644 --- a/synapse/util/retryutils.py +++ b/synapse/util/retryutils.py @@ -24,7 +24,6 @@ from types import TracebackType from typing import TYPE_CHECKING, Any, Optional, Type from synapse.api.errors import CodeMessageException -from synapse.metrics.background_process_metrics import run_as_background_process from synapse.storage import DataStore from synapse.types import StrCollection from synapse.util.clock import Clock @@ -32,6 +31,7 @@ from synapse.util.clock import Clock if TYPE_CHECKING: from synapse.notifier import Notifier from synapse.replication.tcp.handler import ReplicationCommandHandler + from synapse.server import HomeServer logger = logging.getLogger(__name__) @@ -62,6 +62,7 @@ async def get_retry_limiter( *, destination: str, our_server_name: str, + hs: "HomeServer", clock: Clock, store: DataStore, ignore_backoff: bool = False, @@ -124,6 +125,7 @@ async def get_retry_limiter( return RetryDestinationLimiter( destination=destination, our_server_name=our_server_name, + hs=hs, clock=clock, store=store, failure_ts=failure_ts, @@ -163,6 +165,7 @@ class RetryDestinationLimiter: *, destination: str, our_server_name: str, + hs: "HomeServer", clock: Clock, store: DataStore, failure_ts: Optional[int], @@ -181,6 +184,7 @@ class RetryDestinationLimiter: Args: destination our_server_name: Our homeserver name (used to label metrics) (`hs.hostname`) + hs: The homeserver instance clock store failure_ts: when this destination started failing (in ms since @@ -197,6 +201,7 @@ class RetryDestinationLimiter: error code. """ self.our_server_name = our_server_name + self.hs = hs self.clock = clock self.store = store self.destination = destination @@ -331,6 +336,4 @@ class RetryDestinationLimiter: logger.exception("Failed to store destination_retry_timings") # we deliberately do this in the background. - run_as_background_process( - "store_retry_timings", self.our_server_name, store_retry_timings - ) + self.hs.run_as_background_process("store_retry_timings", store_retry_timings) diff --git a/synapse/util/task_scheduler.py b/synapse/util/task_scheduler.py index 0539989320..7443d4e097 100644 --- a/synapse/util/task_scheduler.py +++ b/synapse/util/task_scheduler.py @@ -32,7 +32,6 @@ from synapse.logging.context import ( ) from synapse.metrics import SERVER_NAME_LABEL, LaterGauge from synapse.metrics.background_process_metrics import ( - run_as_background_process, wrap_as_background_process, ) from synapse.types import JsonMapping, ScheduledTask, TaskStatus @@ -107,10 +106,8 @@ class TaskScheduler: OCCASIONAL_REPORT_INTERVAL_MS = 5 * 60 * 1000 # 5 minutes def __init__(self, hs: "HomeServer"): - self._hs = hs - self.server_name = ( - hs.hostname - ) # nb must be called this for @wrap_as_background_process + self.hs = hs # nb must be called this for @wrap_as_background_process + self.server_name = hs.hostname self._store = hs.get_datastores().main self._clock = hs.get_clock() self._running_tasks: Set[str] = set() @@ -215,7 +212,7 @@ class TaskScheduler: if self._run_background_tasks: self._launch_scheduled_tasks() else: - self._hs.get_replication_command_handler().send_new_active_task(task.id) + self.hs.get_replication_command_handler().send_new_active_task(task.id) return task.id @@ -362,7 +359,7 @@ class TaskScheduler: finally: self._launching_new_tasks = False - run_as_background_process("launch_scheduled_tasks", self.server_name, inner) + self.hs.run_as_background_process("launch_scheduled_tasks", inner) @wrap_as_background_process("clean_scheduled_tasks") async def _clean_scheduled_tasks(self) -> None: @@ -473,7 +470,10 @@ class TaskScheduler: occasional_status_call.stop() # Try launch a new task since we've finished with this one. - self._clock.call_later(0.1, self._launch_scheduled_tasks) + self._clock.call_later( + 0.1, + self._launch_scheduled_tasks, + ) if len(self._running_tasks) >= TaskScheduler.MAX_CONCURRENT_RUNNING_TASKS: return @@ -493,4 +493,4 @@ class TaskScheduler: self._running_tasks.add(task.id) await self.update_task(task.id, status=TaskStatus.ACTIVE) - run_as_background_process(f"task-{task.action}", self.server_name, wrapper) + self.hs.run_as_background_process(f"task-{task.action}", wrapper) diff --git a/synmark/suites/logging.py b/synmark/suites/logging.py index c3f3cceaa6..cf9c836e06 100644 --- a/synmark/suites/logging.py +++ b/synmark/suites/logging.py @@ -86,7 +86,9 @@ async def main(reactor: ISynapseReactor, loops: int) -> float: hs_config = Config() # To be able to sleep. - clock = Clock(reactor, server_name=hs_config.server.server_name) + # Ignore linter error here since we are running outside of the context of a + # Synapse `HomeServer`. + clock = Clock(reactor, server_name=hs_config.server.server_name) # type: ignore[multiple-internal-clocks] errors = StringIO() publisher = LogPublisher() diff --git a/synmark/suites/lrucache.py b/synmark/suites/lrucache.py index 6314035bd7..830a3daa8f 100644 --- a/synmark/suites/lrucache.py +++ b/synmark/suites/lrucache.py @@ -23,14 +23,19 @@ from pyperf import perf_counter from synapse.types import ISynapseReactor from synapse.util.caches.lrucache import LruCache +from synapse.util.clock import Clock async def main(reactor: ISynapseReactor, loops: int) -> float: """ Benchmark `loops` number of insertions into LruCache without eviction. """ + # Ignore linter error here since we are running outside of the context of a + # Synapse `HomeServer`. cache: LruCache[int, bool] = LruCache( - max_size=loops, server_name="synmark_benchmark" + max_size=loops, + clock=Clock(reactor, server_name="synmark_benchmark"), # type: ignore[multiple-internal-clocks] + server_name="synmark_benchmark", ) start = perf_counter() diff --git a/synmark/suites/lrucache_evict.py b/synmark/suites/lrucache_evict.py index b8cd589697..c67e0c9001 100644 --- a/synmark/suites/lrucache_evict.py +++ b/synmark/suites/lrucache_evict.py @@ -23,6 +23,7 @@ from pyperf import perf_counter from synapse.types import ISynapseReactor from synapse.util.caches.lrucache import LruCache +from synapse.util.clock import Clock async def main(reactor: ISynapseReactor, loops: int) -> float: @@ -30,8 +31,12 @@ async def main(reactor: ISynapseReactor, loops: int) -> float: Benchmark `loops` number of insertions into LruCache where half of them are evicted. """ + # Ignore linter error here since we are running outside of the context of a + # Synapse `HomeServer`. cache: LruCache[int, bool] = LruCache( - max_size=loops // 2, server_name="synmark_benchmark" + max_size=loops // 2, + clock=Clock(reactor, server_name="synmark_benchmark"), # type: ignore[multiple-internal-clocks] + server_name="synmark_benchmark", ) start = perf_counter() diff --git a/tests/app/test_homeserver_shutdown.py b/tests/app/test_homeserver_shutdown.py new file mode 100644 index 0000000000..d8119ba310 --- /dev/null +++ b/tests/app/test_homeserver_shutdown.py @@ -0,0 +1,193 @@ +# +# This file is licensed under the Affero General Public License (AGPL) version 3. +# +# Copyright (C) 2025 New Vector, Ltd +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as +# published by the Free Software Foundation, either version 3 of the +# License, or (at your option) any later version. +# +# See the GNU Affero General Public License for more details: +# . +# +# Originally licensed under the Apache License, Version 2.0: +# . +# +# [This file includes modifications made by New Vector Limited] +# +# + +import gc +import weakref + +from synapse.app.homeserver import SynapseHomeServer +from synapse.storage.background_updates import UpdaterStatus + +from tests.server import ( + cleanup_test_reactor_system_event_triggers, + get_clock, + setup_test_homeserver, +) +from tests.unittest import HomeserverTestCase + + +class HomeserverCleanShutdownTestCase(HomeserverTestCase): + def setUp(self) -> None: + pass + + # NOTE: ideally we'd have another test to ensure we properly shutdown with + # real in-flight HTTP requests since those result in additional resources being + # setup that hold strong references to the homeserver. + # Mainly, the HTTP channel created by a real TCP connection from client to server + # is held open between requests and care needs to be taken in Twisted to ensure it is properly + # closed in a timely manner during shutdown. Simulating this behaviour in a unit test + # won't be as good as a proper integration test in complement. + + def test_clean_homeserver_shutdown(self) -> None: + """Ensure the `SynapseHomeServer` can be fully shutdown and garbage collected""" + self.reactor, self.clock = get_clock() + self.hs = setup_test_homeserver( + cleanup_func=self.addCleanup, + reactor=self.reactor, + homeserver_to_use=SynapseHomeServer, + clock=self.clock, + ) + self.wait_for_background_updates() + + hs_ref = weakref.ref(self.hs) + + # Run the reactor so any `callWhenRunning` functions can be cleared out. + self.reactor.run() + # This would normally happen as part of `HomeServer.shutdown` but the `MemoryReactor` + # we use in tests doesn't handle this properly (see doc comment) + cleanup_test_reactor_system_event_triggers(self.reactor) + + # Cleanup the homeserver. + self.get_success(self.hs.shutdown()) + + # Cleanup the internal reference in our test case + del self.hs + + # Force garbage collection. + gc.collect() + + # Ensure the `HomeServer` hs been garbage collected by attempting to use the + # weakref to it. + if hs_ref() is not None: + self.fail("HomeServer reference should not be valid at this point") + + # To help debug this test when it fails, it is useful to leverage the + # `objgraph` module. + # The following code serves as an example of what I have found to be useful + # when tracking down references holding the `SynapseHomeServer` in memory: + # + # all_objects = gc.get_objects() + # for obj in all_objects: + # try: + # # These are a subset of types that are typically involved with + # # holding the `HomeServer` in memory. You may want to inspect + # # other types as well. + # if isinstance(obj, DataStore): + # print(sys.getrefcount(obj), "refs to", obj) + # if not isinstance(obj, weakref.ProxyType): + # db_obj = obj + # if isinstance(obj, SynapseHomeServer): + # print(sys.getrefcount(obj), "refs to", obj) + # if not isinstance(obj, weakref.ProxyType): + # synapse_hs = obj + # if isinstance(obj, SynapseSite): + # print(sys.getrefcount(obj), "refs to", obj) + # if not isinstance(obj, weakref.ProxyType): + # sysite = obj + # if isinstance(obj, DatabasePool): + # print(sys.getrefcount(obj), "refs to", obj) + # if not isinstance(obj, weakref.ProxyType): + # dbpool = obj + # except Exception: + # pass + # + # print(sys.getrefcount(hs_ref()), "refs to", hs_ref()) + # + # # The following values for `max_depth` and `too_many` have been found to + # # render a useful amount of information without taking an overly long time + # # to generate the result. + # objgraph.show_backrefs(synapse_hs, max_depth=10, too_many=10) + + def test_clean_homeserver_shutdown_mid_background_updates(self) -> None: + """Ensure the `SynapseHomeServer` can be fully shutdown and garbage collected + before background updates have completed""" + self.reactor, self.clock = get_clock() + self.hs = setup_test_homeserver( + cleanup_func=self.addCleanup, + reactor=self.reactor, + homeserver_to_use=SynapseHomeServer, + clock=self.clock, + ) + + # Pump the background updates by a single iteration, just to ensure any extra + # resources it uses have been started. + store = weakref.proxy(self.hs.get_datastores().main) + self.get_success(store.db_pool.updates.do_next_background_update(False), by=0.1) + + hs_ref = weakref.ref(self.hs) + + # Run the reactor so any `callWhenRunning` functions can be cleared out. + self.reactor.run() + # This would normally happen as part of `HomeServer.shutdown` but the `MemoryReactor` + # we use in tests doesn't handle this properly (see doc comment) + cleanup_test_reactor_system_event_triggers(self.reactor) + + # Ensure the background updates are not complete. + self.assertNotEqual(store.db_pool.updates.get_status(), UpdaterStatus.COMPLETE) + + # Cleanup the homeserver. + self.get_success(self.hs.shutdown()) + + # Cleanup the internal reference in our test case + del self.hs + + # Force garbage collection. + gc.collect() + + # Ensure the `HomeServer` hs been garbage collected by attempting to use the + # weakref to it. + if hs_ref() is not None: + self.fail("HomeServer reference should not be valid at this point") + + # To help debug this test when it fails, it is useful to leverage the + # `objgraph` module. + # The following code serves as an example of what I have found to be useful + # when tracking down references holding the `SynapseHomeServer` in memory: + # + # all_objects = gc.get_objects() + # for obj in all_objects: + # try: + # # These are a subset of types that are typically involved with + # # holding the `HomeServer` in memory. You may want to inspect + # # other types as well. + # if isinstance(obj, DataStore): + # print(sys.getrefcount(obj), "refs to", obj) + # if not isinstance(obj, weakref.ProxyType): + # db_obj = obj + # if isinstance(obj, SynapseHomeServer): + # print(sys.getrefcount(obj), "refs to", obj) + # if not isinstance(obj, weakref.ProxyType): + # synapse_hs = obj + # if isinstance(obj, SynapseSite): + # print(sys.getrefcount(obj), "refs to", obj) + # if not isinstance(obj, weakref.ProxyType): + # sysite = obj + # if isinstance(obj, DatabasePool): + # print(sys.getrefcount(obj), "refs to", obj) + # if not isinstance(obj, weakref.ProxyType): + # dbpool = obj + # except Exception: + # pass + # + # print(sys.getrefcount(hs_ref()), "refs to", hs_ref()) + # + # # The following values for `max_depth` and `too_many` have been found to + # # render a useful amount of information without taking an overly long time + # # to generate the result. + # objgraph.show_backrefs(synapse_hs, max_depth=10, too_many=10) diff --git a/tests/appservice/test_scheduler.py b/tests/appservice/test_scheduler.py index 0385190f34..f4490a1a79 100644 --- a/tests/appservice/test_scheduler.py +++ b/tests/appservice/test_scheduler.py @@ -167,8 +167,9 @@ class ApplicationServiceSchedulerTransactionCtrlTestCase(unittest.TestCase): ) -class ApplicationServiceSchedulerRecovererTestCase(unittest.TestCase): +class ApplicationServiceSchedulerRecovererTestCase(unittest.HomeserverTestCase): def setUp(self) -> None: + super().setUp() self.reactor, self.clock = get_clock() self.as_api = Mock() self.store = Mock() @@ -176,6 +177,7 @@ class ApplicationServiceSchedulerRecovererTestCase(unittest.TestCase): self.callback = AsyncMock() self.recoverer = _Recoverer( server_name="test_server", + hs=self.hs, clock=self.clock, as_api=self.as_api, store=self.store, diff --git a/tests/config/test_cache.py b/tests/config/test_cache.py index f56d6044a9..74db2dab08 100644 --- a/tests/config/test_cache.py +++ b/tests/config/test_cache.py @@ -24,6 +24,7 @@ from synapse.config.cache import CacheConfig, add_resizable_cache from synapse.types import JsonDict from synapse.util.caches.lrucache import LruCache +from tests.server import get_clock from tests.unittest import TestCase @@ -32,6 +33,7 @@ class CacheConfigTests(TestCase): # Reset caches before each test since there's global state involved. self.config = CacheConfig(RootConfig()) self.config.reset() + _, self.clock = get_clock() def tearDown(self) -> None: # Also reset the caches after each test to leave state pristine. @@ -75,7 +77,9 @@ class CacheConfigTests(TestCase): the default cache size in the interim, and then resized once the config is loaded. """ - cache: LruCache = LruCache(max_size=100, server_name="test_server") + cache: LruCache = LruCache( + max_size=100, clock=self.clock, server_name="test_server" + ) add_resizable_cache("foo", cache_resize_callback=cache.set_cache_factor) self.assertEqual(cache.max_size, 50) @@ -96,7 +100,9 @@ class CacheConfigTests(TestCase): self.config.read_config(config, config_dir_path="", data_dir_path="") self.config.resize_all_caches() - cache: LruCache = LruCache(max_size=100, server_name="test_server") + cache: LruCache = LruCache( + max_size=100, clock=self.clock, server_name="test_server" + ) add_resizable_cache("foo", cache_resize_callback=cache.set_cache_factor) self.assertEqual(cache.max_size, 200) @@ -106,7 +112,9 @@ class CacheConfigTests(TestCase): the default cache size in the interim, and then resized to the new default cache size once the config is loaded. """ - cache: LruCache = LruCache(max_size=100, server_name="test_server") + cache: LruCache = LruCache( + max_size=100, clock=self.clock, server_name="test_server" + ) add_resizable_cache("foo", cache_resize_callback=cache.set_cache_factor) self.assertEqual(cache.max_size, 50) @@ -126,7 +134,9 @@ class CacheConfigTests(TestCase): self.config.read_config(config, config_dir_path="", data_dir_path="") self.config.resize_all_caches() - cache: LruCache = LruCache(max_size=100, server_name="test_server") + cache: LruCache = LruCache( + max_size=100, clock=self.clock, server_name="test_server" + ) add_resizable_cache("foo", cache_resize_callback=cache.set_cache_factor) self.assertEqual(cache.max_size, 150) @@ -145,15 +155,21 @@ class CacheConfigTests(TestCase): self.config.read_config(config, config_dir_path="", data_dir_path="") self.config.resize_all_caches() - cache_a: LruCache = LruCache(max_size=100, server_name="test_server") + cache_a: LruCache = LruCache( + max_size=100, clock=self.clock, server_name="test_server" + ) add_resizable_cache("*cache_a*", cache_resize_callback=cache_a.set_cache_factor) self.assertEqual(cache_a.max_size, 200) - cache_b: LruCache = LruCache(max_size=100, server_name="test_server") + cache_b: LruCache = LruCache( + max_size=100, clock=self.clock, server_name="test_server" + ) add_resizable_cache("*Cache_b*", cache_resize_callback=cache_b.set_cache_factor) self.assertEqual(cache_b.max_size, 300) - cache_c: LruCache = LruCache(max_size=100, server_name="test_server") + cache_c: LruCache = LruCache( + max_size=100, clock=self.clock, server_name="test_server" + ) add_resizable_cache("*cache_c*", cache_resize_callback=cache_c.set_cache_factor) self.assertEqual(cache_c.max_size, 200) @@ -168,6 +184,7 @@ class CacheConfigTests(TestCase): cache: LruCache = LruCache( max_size=self.config.event_cache_size, + clock=self.clock, apply_cache_factor_from_config=False, server_name="test_server", ) diff --git a/tests/handlers/test_appservice.py b/tests/handlers/test_appservice.py index 6516b7db17..df36185b99 100644 --- a/tests/handlers/test_appservice.py +++ b/tests/handlers/test_appservice.py @@ -19,7 +19,17 @@ # # -from typing import Dict, Iterable, List, Optional +from typing import ( + TYPE_CHECKING, + Any, + Awaitable, + Callable, + Dict, + Iterable, + List, + Optional, + TypeVar, +) from unittest.mock import AsyncMock, Mock from parameterized import parameterized @@ -36,6 +46,7 @@ from synapse.appservice import ( TransactionUnusedFallbackKeys, ) from synapse.handlers.appservice import ApplicationServicesHandler +from synapse.metrics.background_process_metrics import run_as_background_process from synapse.rest.client import login, receipts, register, room, sendtodevice from synapse.server import HomeServer from synapse.types import ( @@ -53,6 +64,11 @@ from tests.server import get_clock from tests.test_utils import event_injection from tests.unittest import override_config +if TYPE_CHECKING: + from typing_extensions import LiteralString + +R = TypeVar("R") + class AppServiceHandlerTestCase(unittest.TestCase): """Tests the ApplicationServicesHandler.""" @@ -64,6 +80,17 @@ class AppServiceHandlerTestCase(unittest.TestCase): self.reactor, self.clock = get_clock() hs = Mock() + + def test_run_as_background_process( + desc: "LiteralString", + func: Callable[..., Awaitable[Optional[R]]], + *args: Any, + **kwargs: Any, + ) -> "defer.Deferred[Optional[R]]": + # Ignore linter error as this is used only for testing purposes (i.e. outside of Synapse). + return run_as_background_process(desc, "test_server", func, *args, **kwargs) # type: ignore[untracked-background-process] + + hs.run_as_background_process = test_run_as_background_process hs.get_datastores.return_value = Mock(main=self.mock_store) self.mock_store.get_appservice_last_pos = AsyncMock(return_value=None) self.mock_store.set_appservice_last_pos = AsyncMock(return_value=None) diff --git a/tests/handlers/test_typing.py b/tests/handlers/test_typing.py index 4d2807151e..90c185bc3d 100644 --- a/tests/handlers/test_typing.py +++ b/tests/handlers/test_typing.py @@ -79,15 +79,17 @@ class TypingNotificationsTestCase(unittest.HomeserverTestCase): ) -> HomeServer: # we mock out the keyring so as to skip the authentication check on the # federation API call. - mock_keyring = Mock(spec=["verify_json_for_server"]) + mock_keyring = Mock(spec=["verify_json_for_server", "shutdown"]) mock_keyring.verify_json_for_server = AsyncMock(return_value=True) + mock_keyring.shutdown = Mock() # we mock out the federation client too self.mock_federation_client = AsyncMock(spec=["put_json"]) self.mock_federation_client.put_json.return_value = (200, "OK") self.mock_federation_client.agent = MatrixFederationAgent( server_name="OUR_STUB_HOMESERVER_NAME", - reactor=reactor, + reactor=self.reactor, + clock=self.clock, tls_client_options_factory=None, user_agent=b"SynapseInTrialTest/0.0.0", ip_allowlist=None, @@ -96,7 +98,7 @@ class TypingNotificationsTestCase(unittest.HomeserverTestCase): ) # the tests assume that we are starting at unix time 1000 - reactor.pump((1000,)) + self.reactor.pump((1000,)) self.mock_hs_notifier = Mock() hs = self.setup_test_homeserver( diff --git a/tests/http/federation/test_matrix_federation_agent.py b/tests/http/federation/test_matrix_federation_agent.py index a5e1b7c284..c66ca489a4 100644 --- a/tests/http/federation/test_matrix_federation_agent.py +++ b/tests/http/federation/test_matrix_federation_agent.py @@ -65,7 +65,7 @@ from synapse.util.caches.ttlcache import TTLCache from tests import unittest from tests.http import dummy_address, get_test_ca_cert_file, wrap_server_factory_for_tls -from tests.server import FakeTransport, ThreadedMemoryReactorClock +from tests.server import FakeTransport, get_clock from tests.utils import checked_cast, default_config logger = logging.getLogger(__name__) @@ -73,7 +73,7 @@ logger = logging.getLogger(__name__) class MatrixFederationAgentTests(unittest.TestCase): def setUp(self) -> None: - self.reactor = ThreadedMemoryReactorClock() + self.reactor, self.clock = get_clock() self.mock_resolver = AsyncMock(spec=SrvResolver) @@ -98,6 +98,7 @@ class MatrixFederationAgentTests(unittest.TestCase): self.well_known_resolver = WellKnownResolver( server_name="OUR_STUB_HOMESERVER_NAME", reactor=self.reactor, + clock=self.clock, agent=Agent(self.reactor, contextFactory=self.tls_factory), user_agent=b"test-agent", well_known_cache=self.well_known_cache, @@ -280,6 +281,7 @@ class MatrixFederationAgentTests(unittest.TestCase): return MatrixFederationAgent( server_name="OUR_STUB_HOMESERVER_NAME", reactor=cast(ISynapseReactor, self.reactor), + clock=self.clock, tls_client_options_factory=self.tls_factory, user_agent=b"test-agent", # Note that this is unused since _well_known_resolver is provided. ip_allowlist=IPSet(), @@ -1024,6 +1026,7 @@ class MatrixFederationAgentTests(unittest.TestCase): agent = MatrixFederationAgent( server_name="OUR_STUB_HOMESERVER_NAME", reactor=self.reactor, + clock=self.clock, tls_client_options_factory=tls_factory, user_agent=b"test-agent", # This is unused since _well_known_resolver is passed below. ip_allowlist=IPSet(), @@ -1033,6 +1036,7 @@ class MatrixFederationAgentTests(unittest.TestCase): _well_known_resolver=WellKnownResolver( server_name="OUR_STUB_HOMESERVER_NAME", reactor=cast(ISynapseReactor, self.reactor), + clock=self.clock, agent=Agent(self.reactor, contextFactory=tls_factory), user_agent=b"test-agent", well_known_cache=self.well_known_cache, diff --git a/tests/logging/test_opentracing.py b/tests/logging/test_opentracing.py index 057ca0db45..31cdfacd2c 100644 --- a/tests/logging/test_opentracing.py +++ b/tests/logging/test_opentracing.py @@ -163,7 +163,9 @@ class TracingScopeTestCase(TestCase): # implements `ISynapseThreadlessReactor` (combination of the normal Twisted # Reactor/Clock interfaces), via inheritance from # `twisted.internet.testing.MemoryReactor` and `twisted.internet.testing.Clock` - clock = Clock( + # Ignore `multiple-internal-clocks` linter error here since we are creating a `Clock` + # for testing purposes. + clock = Clock( # type: ignore[multiple-internal-clocks] reactor, # type: ignore[arg-type] server_name="test_server", ) @@ -234,7 +236,9 @@ class TracingScopeTestCase(TestCase): # implements `ISynapseThreadlessReactor` (combination of the normal Twisted # Reactor/Clock interfaces), via inheritance from # `twisted.internet.testing.MemoryReactor` and `twisted.internet.testing.Clock` - clock = Clock( + # Ignore `multiple-internal-clocks` linter error here since we are creating a `Clock` + # for testing purposes. + clock = Clock( # type: ignore[multiple-internal-clocks] reactor, # type: ignore[arg-type] server_name="test_server", ) diff --git a/tests/metrics/test_metrics.py b/tests/metrics/test_metrics.py index 832e991730..b3f42c76f1 100644 --- a/tests/metrics/test_metrics.py +++ b/tests/metrics/test_metrics.py @@ -164,7 +164,10 @@ class CacheMetricsTests(unittest.HomeserverTestCase): """ CACHE_NAME = "cache_metrics_test_fgjkbdfg" cache: DeferredCache[str, str] = DeferredCache( - name=CACHE_NAME, server_name=self.hs.hostname, max_entries=777 + name=CACHE_NAME, + clock=self.hs.get_clock(), + server_name=self.hs.hostname, + max_entries=777, ) metrics_map = get_latest_metrics() @@ -212,10 +215,10 @@ class CacheMetricsTests(unittest.HomeserverTestCase): """ CACHE_NAME = "cache_metric_multiple_servers_test" cache1: DeferredCache[str, str] = DeferredCache( - name=CACHE_NAME, server_name="hs1", max_entries=777 + name=CACHE_NAME, clock=self.clock, server_name="hs1", max_entries=777 ) cache2: DeferredCache[str, str] = DeferredCache( - name=CACHE_NAME, server_name="hs2", max_entries=777 + name=CACHE_NAME, clock=self.clock, server_name="hs2", max_entries=777 ) metrics_map = get_latest_metrics() diff --git a/tests/replication/_base.py b/tests/replication/_base.py index 36d3213908..1a2dab4c7d 100644 --- a/tests/replication/_base.py +++ b/tests/replication/_base.py @@ -173,7 +173,13 @@ class BaseStreamTestCase(unittest.HomeserverTestCase): # Set up the server side protocol server_address = IPv4Address("TCP", host, port) - channel = self.site.buildProtocol((host, port)) + # The type ignore is here because mypy doesn't think the host/port tuple is of + # the correct type, even though it is the exact example given for + # `twisted.internet.interfaces.IAddress`. + # Mypy was happy with the type before we overrode `buildProtocol` in + # `SynapseSite`, probably because there was enough inheritance indirection before + # withe the argument not having a type associated with it. + channel = self.site.buildProtocol((host, port)) # type: ignore[arg-type] # hook into the channel's request factory so that we can keep a record # of the requests @@ -185,7 +191,7 @@ class BaseStreamTestCase(unittest.HomeserverTestCase): requests.append(request) return request - channel.requestFactory = request_factory + channel.requestFactory = request_factory # type: ignore[method-assign] # Connect client to server and vice versa. client_to_server_transport = FakeTransport( @@ -427,7 +433,7 @@ class BaseMultiWorkerStreamTestCase(unittest.HomeserverTestCase): # Set up the server side protocol server_address = IPv4Address("TCP", host, port) - channel = self._hs_to_site[hs].buildProtocol((host, port)) + channel = self._hs_to_site[hs].buildProtocol((host, port)) # type: ignore[arg-type] # Connect client to server and vice versa. client_to_server_transport = FakeTransport( diff --git a/tests/replication/test_federation_sender_shard.py b/tests/replication/test_federation_sender_shard.py index 92259f2542..3896e0ce8a 100644 --- a/tests/replication/test_federation_sender_shard.py +++ b/tests/replication/test_federation_sender_shard.py @@ -66,10 +66,11 @@ class FederationSenderTestCase(BaseMultiWorkerStreamTestCase): def setUp(self) -> None: super().setUp() - reactor, _ = get_clock() + reactor, clock = get_clock() self.matrix_federation_agent = MatrixFederationAgent( server_name="OUR_STUB_HOMESERVER_NAME", reactor=reactor, + clock=clock, tls_client_options_factory=None, user_agent=b"SynapseInTrialTest/0.0.0", ip_allowlist=None, diff --git a/tests/replication/test_module_cache_invalidation.py b/tests/replication/test_module_cache_invalidation.py index 8d5d0cce9a..1cb898673b 100644 --- a/tests/replication/test_module_cache_invalidation.py +++ b/tests/replication/test_module_cache_invalidation.py @@ -24,6 +24,7 @@ import synapse from synapse.module_api import cached from tests.replication._base import BaseMultiWorkerStreamTestCase +from tests.server import get_clock logger = logging.getLogger(__name__) @@ -36,6 +37,7 @@ KEY = "mykey" class TestCache: current_value = FIRST_VALUE server_name = "test_server" # nb must be called this for @cached + _, clock = get_clock() # nb must be called this for @cached @cached() async def cached_function(self, user_id: str) -> str: diff --git a/tests/rest/client/test_transactions.py b/tests/rest/client/test_transactions.py index c22c1a6612..bb83988d76 100644 --- a/tests/rest/client/test_transactions.py +++ b/tests/rest/client/test_transactions.py @@ -93,8 +93,10 @@ class HttpTransactionCacheTestCase(unittest.TestCase): ) -> Generator["defer.Deferred[Any]", object, None]: @defer.inlineCallbacks def cb() -> Generator["defer.Deferred[object]", object, Tuple[int, JsonDict]]: + # Ignore `multiple-internal-clocks` linter error here since we are creating a `Clock` + # for testing purposes. yield defer.ensureDeferred( - Clock(reactor, server_name="test_server").sleep(0) + Clock(reactor, server_name="test_server").sleep(0) # type: ignore[multiple-internal-clocks] ) return 1, {} diff --git a/tests/server.py b/tests/server.py index 226bdf4bbe..a9a53eb8a4 100644 --- a/tests/server.py +++ b/tests/server.py @@ -28,6 +28,7 @@ import sqlite3 import time import uuid import warnings +import weakref from collections import deque from io import SEEK_END, BytesIO from typing import ( @@ -56,7 +57,7 @@ from zope.interface import implementer import twisted from twisted.enterprise import adbapi -from twisted.internet import address, tcp, threads, udp +from twisted.internet import address, defer, tcp, threads, udp from twisted.internet._resolver import SimpleResolverComplexifier from twisted.internet.address import IPv4Address, IPv6Address from twisted.internet.defer import Deferred, fail, maybeDeferred, succeed @@ -524,6 +525,19 @@ class ThreadedMemoryReactorClock(MemoryReactorClock): # overwrite it again. self.nameResolver = SimpleResolverComplexifier(FakeResolver()) + def run(self) -> None: + """ + Override the call from `MemoryReactorClock` to add an additional step that + cleans up any `whenRunningHooks` that have been called. + This is necessary for a clean shutdown to occur as these hooks can hold + references to the `SynapseHomeServer`. + """ + super().run() + + # `MemoryReactorClock` never clears the hooks that have already been called. + # So manually clear the hooks here after they have been run. + self.whenRunningHooks.clear() + def installNameResolver(self, resolver: IHostnameResolver) -> IHostnameResolver: raise NotImplementedError() @@ -649,6 +663,19 @@ class ThreadedMemoryReactorClock(MemoryReactorClock): super().advance(0) +def cleanup_test_reactor_system_event_triggers( + reactor: ThreadedMemoryReactorClock, +) -> None: + """Cleanup any registered system event triggers. + The `twisted.internet.test.ThreadedMemoryReactor` does not implement + `removeSystemEventTrigger` so won't clean these triggers up on it's own properly. + When trying to override `removeSystemEventTrigger` in `ThreadedMemoryReactorClock` + in order to implement this functionality, twisted complains about the reactor being + unclean and fails some tests. + """ + reactor.triggers.clear() + + def validate_connector(connector: tcp.Connector, expected_ip: str) -> None: """Try to validate the obtained connector as it would happen when synapse is running and the conection will be established. @@ -780,13 +807,18 @@ class ThreadPool: d: "Deferred[None]" = Deferred() d.addCallback(lambda x: function(*args, **kwargs)) d.addBoth(_) - self._reactor.callLater(0, d.callback, True) + # mypy ignored here because: + # - this is part of the test infrastructure (outside of Synapse) so tracking + # these calls for for homeserver shutdown doesn't make sense. + self._reactor.callLater(0, d.callback, True) # type: ignore[call-later-not-tracked] return d def get_clock() -> Tuple[ThreadedMemoryReactorClock, Clock]: + # Ignore the linter error since this is an expected usage of creating a `Clock` for + # testing purposes. reactor = ThreadedMemoryReactorClock() - hs_clock = Clock(reactor, server_name="test_server") + hs_clock = Clock(reactor, server_name="test_server") # type: ignore[multiple-internal-clocks] return reactor, hs_clock @@ -898,10 +930,16 @@ class FakeTransport: # some implementations of IProducer (for example, FileSender) # don't return a deferred. d = maybeDeferred(self.producer.resumeProducing) - d.addCallback(lambda x: self._reactor.callLater(0.1, _produce)) + # mypy ignored here because: + # - this is part of the test infrastructure (outside of Synapse) so tracking + # these calls for for homeserver shutdown doesn't make sense. + d.addCallback(lambda x: self._reactor.callLater(0.1, _produce)) # type: ignore[call-later-not-tracked,call-overload] if not streaming: - self._reactor.callLater(0.0, _produce) + # mypy ignored here because: + # - this is part of the test infrastructure (outside of Synapse) so tracking + # these calls for for homeserver shutdown doesn't make sense. + self._reactor.callLater(0.0, _produce) # type: ignore[call-later-not-tracked] def write(self, byt: bytes) -> None: if self.disconnecting: @@ -913,7 +951,10 @@ class FakeTransport: # TLSMemoryBIOProtocol) get very confused if a read comes back while they are # still doing a write. Doing a callLater here breaks the cycle. if self.autoflush: - self._reactor.callLater(0.0, self.flush) + # mypy ignored here because: + # - this is part of the test infrastructure (outside of Synapse) so tracking + # these calls for for homeserver shutdown doesn't make sense. + self._reactor.callLater(0.0, self.flush) # type: ignore[call-later-not-tracked] def writeSequence(self, seq: Iterable[bytes]) -> None: for x in seq: @@ -943,7 +984,10 @@ class FakeTransport: self.buffer = self.buffer[len(to_write) :] if self.buffer and self.autoflush: - self._reactor.callLater(0.0, self.flush) + # mypy ignored here because: + # - this is part of the test infrastructure (outside of Synapse) so tracking + # these calls for for homeserver shutdown doesn't make sense. + self._reactor.callLater(0.0, self.flush) # type: ignore[call-later-not-tracked] if not self.buffer and self.disconnecting: logger.info("FakeTransport: Buffer now empty, completing disconnect") @@ -1020,7 +1064,7 @@ class TestHomeServer(HomeServer): def setup_test_homeserver( *, - cleanup_func: Callable[[Callable[[], None]], None], + cleanup_func: Callable[[Callable[[], Optional["Deferred[None]"]]], None], server_name: str = "test", config: Optional[HomeServerConfig] = None, reactor: Optional[ISynapseReactor] = None, @@ -1035,8 +1079,10 @@ def setup_test_homeserver( If no datastore is supplied, one is created and given to the homeserver. Args: - cleanup_func: The function used to register a cleanup routine for after the - test. + cleanup_func : The function used to register a cleanup routine for + after the test. If the function returns a Deferred, the + test case will wait until the Deferred has fired before + proceeding to the next cleanup function. server_name: Homeserver name config: Homeserver config reactor: Twisted reactor @@ -1062,7 +1108,9 @@ def setup_test_homeserver( raise ConfigError("Must be a string", ("server_name",)) if "clock" not in extra_homeserver_attributes: - extra_homeserver_attributes["clock"] = Clock(reactor, server_name=server_name) + # Ignore `multiple-internal-clocks` linter error here since we are creating a `Clock` + # for testing purposes (i.e. outside of Synapse). + extra_homeserver_attributes["clock"] = Clock(reactor, server_name=server_name) # type: ignore[multiple-internal-clocks] config.caches.resize_all_caches() @@ -1154,8 +1202,21 @@ def setup_test_homeserver( reactor=reactor, ) - # Register the cleanup hook - cleanup_func(hs.cleanup) + # Capture the `hs` as a `weakref` here to ensure there is no scenario where uncalled + # cleanup functions result in holding the `hs` in memory. + cleanup_hs_ref = weakref.ref(hs) + + def shutdown_hs_on_cleanup() -> "Deferred[None]": + cleanup_hs = cleanup_hs_ref() + deferred: "Deferred[None]" = defer.succeed(None) + if cleanup_hs is not None: + deferred = defer.ensureDeferred(cleanup_hs.shutdown()) + return deferred + + # Register the cleanup hook for the homeserver. + # A full `hs.shutdown()` is necessary otherwise CI tests will fail while exhibiting + # strange behaviours. + cleanup_func(shutdown_hs_on_cleanup) # Install @cache_in_self attributes for key, val in extra_homeserver_attributes.items(): @@ -1184,14 +1245,18 @@ def setup_test_homeserver( hs.get_datastores().main.USE_DEDICATED_DB_THREADS_FOR_EVENT_FETCHING = False if USE_POSTGRES_FOR_TESTS: - database_pool = hs.get_datastores().databases[0] + # Capture the `database_pool` as a `weakref` here to ensure there is no scenario where uncalled + # cleanup functions result in holding the `hs` in memory. + database_pool = weakref.ref(hs.get_datastores().databases[0]) # We need to do cleanup on PostgreSQL def cleanup() -> None: import psycopg2 # Close all the db pools - database_pool._db_pool.close() + db_pool = database_pool() + if db_pool is not None: + db_pool._db_pool.close() dropped = False diff --git a/tests/test_distributor.py b/tests/test_distributor.py index 19dafe64ed..2dd26833c8 100644 --- a/tests/test_distributor.py +++ b/tests/test_distributor.py @@ -26,9 +26,10 @@ from synapse.util.distributor import Distributor from . import unittest -class DistributorTestCase(unittest.TestCase): +class DistributorTestCase(unittest.HomeserverTestCase): def setUp(self) -> None: - self.dist = Distributor(server_name="test_server") + super().setUp() + self.dist = Distributor(hs=self.hs) def test_signal_dispatch(self) -> None: self.dist.declare("alert") diff --git a/tests/util/caches/test_deferred_cache.py b/tests/util/caches/test_deferred_cache.py index 7017d6d70a..f0deb1554e 100644 --- a/tests/util/caches/test_deferred_cache.py +++ b/tests/util/caches/test_deferred_cache.py @@ -26,20 +26,26 @@ from twisted.internet import defer from synapse.util.caches.deferred_cache import DeferredCache +from tests.server import get_clock from tests.unittest import TestCase class DeferredCacheTestCase(TestCase): + def setUp(self) -> None: + super().setUp() + + _, self.clock = get_clock() + def test_empty(self) -> None: cache: DeferredCache[str, int] = DeferredCache( - name="test", server_name="test_server" + name="test", clock=self.clock, server_name="test_server" ) with self.assertRaises(KeyError): cache.get("foo") def test_hit(self) -> None: cache: DeferredCache[str, int] = DeferredCache( - name="test", server_name="test_server" + name="test", clock=self.clock, server_name="test_server" ) cache.prefill("foo", 123) @@ -47,7 +53,7 @@ class DeferredCacheTestCase(TestCase): def test_hit_deferred(self) -> None: cache: DeferredCache[str, int] = DeferredCache( - name="test", server_name="test_server" + name="test", clock=self.clock, server_name="test_server" ) origin_d: "defer.Deferred[int]" = defer.Deferred() set_d = cache.set("k1", origin_d) @@ -72,7 +78,7 @@ class DeferredCacheTestCase(TestCase): def test_callbacks(self) -> None: """Invalidation callbacks are called at the right time""" cache: DeferredCache[str, int] = DeferredCache( - name="test", server_name="test_server" + name="test", clock=self.clock, server_name="test_server" ) callbacks = set() @@ -107,7 +113,7 @@ class DeferredCacheTestCase(TestCase): def test_set_fail(self) -> None: cache: DeferredCache[str, int] = DeferredCache( - name="test", server_name="test_server" + name="test", clock=self.clock, server_name="test_server" ) callbacks = set() @@ -146,7 +152,7 @@ class DeferredCacheTestCase(TestCase): def test_get_immediate(self) -> None: cache: DeferredCache[str, int] = DeferredCache( - name="test", server_name="test_server" + name="test", clock=self.clock, server_name="test_server" ) d1: "defer.Deferred[int]" = defer.Deferred() cache.set("key1", d1) @@ -164,7 +170,7 @@ class DeferredCacheTestCase(TestCase): def test_invalidate(self) -> None: cache: DeferredCache[Tuple[str], int] = DeferredCache( - name="test", server_name="test_server" + name="test", clock=self.clock, server_name="test_server" ) cache.prefill(("foo",), 123) cache.invalidate(("foo",)) @@ -174,7 +180,7 @@ class DeferredCacheTestCase(TestCase): def test_invalidate_all(self) -> None: cache: DeferredCache[str, str] = DeferredCache( - name="testcache", server_name="test_server" + name="testcache", clock=self.clock, server_name="test_server" ) callback_record = [False, False] @@ -220,6 +226,7 @@ class DeferredCacheTestCase(TestCase): def test_eviction(self) -> None: cache: DeferredCache[int, str] = DeferredCache( name="test", + clock=self.clock, server_name="test_server", max_entries=2, apply_cache_factor_from_config=False, @@ -238,6 +245,7 @@ class DeferredCacheTestCase(TestCase): def test_eviction_lru(self) -> None: cache: DeferredCache[int, str] = DeferredCache( name="test", + clock=self.clock, server_name="test_server", max_entries=2, apply_cache_factor_from_config=False, @@ -260,6 +268,7 @@ class DeferredCacheTestCase(TestCase): def test_eviction_iterable(self) -> None: cache: DeferredCache[int, List[str]] = DeferredCache( name="test", + clock=self.clock, server_name="test_server", max_entries=3, apply_cache_factor_from_config=False, diff --git a/tests/util/caches/test_descriptors.py b/tests/util/caches/test_descriptors.py index 3eb502f902..0e3b6ae36b 100644 --- a/tests/util/caches/test_descriptors.py +++ b/tests/util/caches/test_descriptors.py @@ -49,6 +49,7 @@ from synapse.util.caches import descriptors from synapse.util.caches.descriptors import _CacheContext, cached, cachedList from tests import unittest +from tests.server import get_clock from tests.test_utils import get_awaitable_result logger = logging.getLogger(__name__) @@ -56,7 +57,10 @@ logger = logging.getLogger(__name__) def run_on_reactor() -> "Deferred[int]": d: "Deferred[int]" = Deferred() - cast(IReactorTime, reactor).callLater(0, d.callback, 0) + # mypy ignored here because: + # - this is part of the test infrastructure (outside of Synapse) so tracking + # these calls for for homeserver shutdown doesn't make sense. + cast(IReactorTime, reactor).callLater(0, d.callback, 0) # type: ignore[call-later-not-tracked] return make_deferred_yieldable(d) @@ -67,6 +71,7 @@ class DescriptorTestCase(unittest.TestCase): def __init__(self) -> None: self.mock = mock.Mock() self.server_name = "test_server" + _, self.clock = get_clock() # nb must be called this for @cached @descriptors.cached() def fn(self, arg1: int, arg2: int) -> str: @@ -102,6 +107,7 @@ class DescriptorTestCase(unittest.TestCase): def __init__(self) -> None: self.mock = mock.Mock() self.server_name = "test_server" + _, self.clock = get_clock() # nb must be called this for @cached @descriptors.cached(num_args=1) def fn(self, arg1: int, arg2: int) -> str: @@ -148,6 +154,7 @@ class DescriptorTestCase(unittest.TestCase): def __init__(self) -> None: self.mock = mock.Mock() self.server_name = "test_server" + _, self.clock = get_clock() # nb must be called this for @cached obj = Cls() obj.mock.return_value = "fish" @@ -179,6 +186,7 @@ class DescriptorTestCase(unittest.TestCase): def __init__(self) -> None: self.mock = mock.Mock() self.server_name = "test_server" + _, self.clock = get_clock() # nb must be called this for @cached @descriptors.cached() def fn(self, arg1: int, kwarg1: int = 2) -> str: @@ -214,6 +222,7 @@ class DescriptorTestCase(unittest.TestCase): class Cls: server_name = "test_server" # nb must be called this for @cached + _, clock = get_clock() # nb must be called this for @cached @cached() def fn(self, arg1: int) -> NoReturn: @@ -239,6 +248,7 @@ class DescriptorTestCase(unittest.TestCase): result: Optional[Deferred] = None call_count = 0 server_name = "test_server" # nb must be called this for @cached + _, clock = get_clock() # nb must be called this for @cached @cached() def fn(self, arg1: int) -> Deferred: @@ -293,6 +303,7 @@ class DescriptorTestCase(unittest.TestCase): class Cls: server_name = "test_server" + _, clock = get_clock() # nb must be called this for @cached @descriptors.cached() def fn(self, arg1: int) -> "Deferred[int]": @@ -337,6 +348,7 @@ class DescriptorTestCase(unittest.TestCase): class Cls: server_name = "test_server" + _, clock = get_clock() # nb must be called this for @cached @descriptors.cached() def fn(self, arg1: int) -> Deferred: @@ -381,6 +393,7 @@ class DescriptorTestCase(unittest.TestCase): def __init__(self) -> None: self.mock = mock.Mock() self.server_name = "test_server" + _, self.clock = get_clock() # nb must be called this for @cached @descriptors.cached() def fn(self, arg1: int, arg2: int = 2, arg3: int = 3) -> str: @@ -419,6 +432,7 @@ class DescriptorTestCase(unittest.TestCase): def __init__(self) -> None: self.mock = mock.Mock() self.server_name = "test_server" + _, self.clock = get_clock() # nb must be called this for @cached @descriptors.cached(iterable=True) def fn(self, arg1: int, arg2: int) -> Tuple[str, ...]: @@ -453,6 +467,7 @@ class DescriptorTestCase(unittest.TestCase): class Cls: server_name = "test_server" + _, clock = get_clock() # nb must be called this for @cached @descriptors.cached(iterable=True) def fn(self, arg1: int) -> NoReturn: @@ -476,6 +491,7 @@ class DescriptorTestCase(unittest.TestCase): class Cls: server_name = "test_server" # nb must be called this for @cached + _, clock = get_clock() # nb must be called this for @cached @cached(cache_context=True) async def func1(self, key: str, cache_context: _CacheContext) -> int: @@ -504,6 +520,7 @@ class DescriptorTestCase(unittest.TestCase): class Cls: server_name = "test_server" + _, clock = get_clock() # nb must be called this for @cached @cached() async def fn(self, arg1: int) -> str: @@ -537,6 +554,7 @@ class DescriptorTestCase(unittest.TestCase): class Cls: inner_context_was_finished = False server_name = "test_server" # nb must be called this for @cached + _, clock = get_clock() # nb must be called this for @cached @cached() async def fn(self, arg1: int) -> str: @@ -583,6 +601,7 @@ class CacheDecoratorTestCase(unittest.HomeserverTestCase): def test_passthrough(self) -> Generator["Deferred[Any]", object, None]: class A: server_name = "test_server" # nb must be called this for @cached + _, clock = get_clock() # nb must be called this for @cached @cached() def func(self, key: str) -> str: @@ -599,6 +618,7 @@ class CacheDecoratorTestCase(unittest.HomeserverTestCase): class A: server_name = "test_server" # nb must be called this for @cached + _, clock = get_clock() # nb must be called this for @cached @cached() def func(self, key: str) -> str: @@ -619,6 +639,7 @@ class CacheDecoratorTestCase(unittest.HomeserverTestCase): class A: server_name = "test_server" # nb must be called this for @cached + _, clock = get_clock() # nb must be called this for @cached @cached() def func(self, key: str) -> str: @@ -639,6 +660,7 @@ class CacheDecoratorTestCase(unittest.HomeserverTestCase): def test_invalidate_missing(self) -> None: class A: server_name = "test_server" # nb must be called this for @cached + _, clock = get_clock() # nb must be called this for @cached @cached() def func(self, key: str) -> str: @@ -652,6 +674,7 @@ class CacheDecoratorTestCase(unittest.HomeserverTestCase): class A: server_name = "test_server" # nb must be called this for @cached + _, clock = get_clock() # nb must be called this for @cached @cached(max_entries=10) def func(self, key: int) -> int: @@ -681,6 +704,7 @@ class CacheDecoratorTestCase(unittest.HomeserverTestCase): class A: server_name = "test_server" # nb must be called this for @cached + _, clock = get_clock() # nb must be called this for @cached @cached() def func(self, key: str) -> "Deferred[int]": @@ -701,6 +725,7 @@ class CacheDecoratorTestCase(unittest.HomeserverTestCase): class A: server_name = "test_server" # nb must be called this for @cached + _, clock = get_clock() # nb must be called this for @cached @cached() def func(self, key: str) -> str: @@ -736,6 +761,7 @@ class CacheDecoratorTestCase(unittest.HomeserverTestCase): class A: server_name = "test_server" # nb must be called this for @cached + _, clock = get_clock() # nb must be called this for @cached @cached(max_entries=2) def func(self, key: str) -> str: @@ -775,6 +801,7 @@ class CacheDecoratorTestCase(unittest.HomeserverTestCase): class A: server_name = "test_server" # nb must be called this for @cached + _, clock = get_clock() # nb must be called this for @cached @cached() def func(self, key: str) -> str: @@ -824,6 +851,7 @@ class CachedListDescriptorTestCase(unittest.TestCase): def __init__(self) -> None: self.mock = mock.Mock() self.server_name = "test_server" + _, self.clock = get_clock() # nb must be called this for @cached @descriptors.cached() def fn(self, arg1: int, arg2: int) -> None: @@ -890,6 +918,7 @@ class CachedListDescriptorTestCase(unittest.TestCase): def __init__(self) -> None: self.mock = mock.Mock() self.server_name = "test_server" + _, self.clock = get_clock() # nb must be called this for @cached @descriptors.cached() def fn(self, arg1: int) -> None: @@ -934,6 +963,7 @@ class CachedListDescriptorTestCase(unittest.TestCase): def __init__(self) -> None: self.mock = mock.Mock() self.server_name = "test_server" + _, self.clock = get_clock() # nb must be called this for @cached @descriptors.cached() def fn(self, arg1: int, arg2: int) -> None: @@ -975,6 +1005,7 @@ class CachedListDescriptorTestCase(unittest.TestCase): class Cls: server_name = "test_server" # nb must be called this for @cached + _, clock = get_clock() # nb must be called this for @cached @cached() def fn(self, arg1: int) -> None: @@ -1011,6 +1042,7 @@ class CachedListDescriptorTestCase(unittest.TestCase): class Cls: inner_context_was_finished = False server_name = "test_server" # nb must be called this for @cached + _, clock = get_clock() # nb must be called this for @cached @cached() def fn(self, arg1: int) -> None: @@ -1055,6 +1087,7 @@ class CachedListDescriptorTestCase(unittest.TestCase): class Cls: server_name = "test_server" + _, clock = get_clock() # nb must be called this for @cached @descriptors.cached(tree=True) def fn(self, room_id: str, event_id: str) -> None: diff --git a/tests/util/test_async_helpers.py b/tests/util/test_async_helpers.py index 54f7b55511..fd8d576aea 100644 --- a/tests/util/test_async_helpers.py +++ b/tests/util/test_async_helpers.py @@ -25,7 +25,6 @@ from parameterized import parameterized_class from twisted.internet import defer from twisted.internet.defer import CancelledError, Deferred, ensureDeferred -from twisted.internet.task import Clock from twisted.python.failure import Failure from synapse.logging.context import ( @@ -152,7 +151,7 @@ class ObservableDeferredTest(TestCase): class TimeoutDeferredTest(TestCase): def setUp(self) -> None: - self.clock = Clock() + self.reactor, self.clock = get_clock() def test_times_out(self) -> None: """Basic test case that checks that the original deferred is cancelled and that @@ -165,12 +164,16 @@ class TimeoutDeferredTest(TestCase): cancelled = True non_completing_d: Deferred = Deferred(canceller) - timing_out_d = timeout_deferred(non_completing_d, 1.0, self.clock) + timing_out_d = timeout_deferred( + deferred=non_completing_d, + timeout=1.0, + clock=self.clock, + ) self.assertNoResult(timing_out_d) self.assertFalse(cancelled, "deferred was cancelled prematurely") - self.clock.pump((1.0,)) + self.reactor.pump((1.0,)) self.assertTrue(cancelled, "deferred was not cancelled by timeout") self.failureResultOf(timing_out_d, defer.TimeoutError) @@ -183,11 +186,15 @@ class TimeoutDeferredTest(TestCase): raise Exception("can't cancel this deferred") non_completing_d: Deferred = Deferred(canceller) - timing_out_d = timeout_deferred(non_completing_d, 1.0, self.clock) + timing_out_d = timeout_deferred( + deferred=non_completing_d, + timeout=1.0, + clock=self.clock, + ) self.assertNoResult(timing_out_d) - self.clock.pump((1.0,)) + self.reactor.pump((1.0,)) self.failureResultOf(timing_out_d, defer.TimeoutError) @@ -227,7 +234,7 @@ class TimeoutDeferredTest(TestCase): timing_out_d = timeout_deferred( deferred=incomplete_d, timeout=1.0, - reactor=self.clock, + clock=self.clock, ) self.assertNoResult(timing_out_d) # We should still be in the logcontext we started in @@ -243,7 +250,7 @@ class TimeoutDeferredTest(TestCase): # we're pumping the reactor in the block and return us back to our current # logcontext after the block. with PreserveLoggingContext(): - self.clock.pump( + self.reactor.pump( # We only need to pump `1.0` (seconds) as we set # `timeout_deferred(timeout=1.0)` above (1.0,) @@ -264,7 +271,7 @@ class TimeoutDeferredTest(TestCase): self.assertEqual(current_context(), SENTINEL_CONTEXT) -class _TestException(Exception): +class _TestException(Exception): # pass @@ -560,8 +567,8 @@ class AwakenableSleeperTests(TestCase): "Tests AwakenableSleeper" def test_sleep(self) -> None: - reactor, _ = get_clock() - sleeper = AwakenableSleeper(reactor) + reactor, clock = get_clock() + sleeper = AwakenableSleeper(clock) d = defer.ensureDeferred(sleeper.sleep("name", 1000)) @@ -575,8 +582,8 @@ class AwakenableSleeperTests(TestCase): self.assertTrue(d.called) def test_explicit_wake(self) -> None: - reactor, _ = get_clock() - sleeper = AwakenableSleeper(reactor) + reactor, clock = get_clock() + sleeper = AwakenableSleeper(clock) d = defer.ensureDeferred(sleeper.sleep("name", 1000)) @@ -592,8 +599,8 @@ class AwakenableSleeperTests(TestCase): reactor.advance(0.6) def test_multiple_sleepers_timeout(self) -> None: - reactor, _ = get_clock() - sleeper = AwakenableSleeper(reactor) + reactor, clock = get_clock() + sleeper = AwakenableSleeper(clock) d1 = defer.ensureDeferred(sleeper.sleep("name", 1000)) @@ -612,8 +619,8 @@ class AwakenableSleeperTests(TestCase): self.assertTrue(d2.called) def test_multiple_sleepers_wake(self) -> None: - reactor, _ = get_clock() - sleeper = AwakenableSleeper(reactor) + reactor, clock = get_clock() + sleeper = AwakenableSleeper(clock) d1 = defer.ensureDeferred(sleeper.sleep("name", 1000)) diff --git a/tests/util/test_batching_queue.py b/tests/util/test_batching_queue.py index 532582cf87..60bfdf38aa 100644 --- a/tests/util/test_batching_queue.py +++ b/tests/util/test_batching_queue.py @@ -32,13 +32,12 @@ from synapse.util.batching_queue import ( number_queued, ) -from tests.server import get_clock -from tests.unittest import TestCase +from tests.unittest import HomeserverTestCase -class BatchingQueueTestCase(TestCase): +class BatchingQueueTestCase(HomeserverTestCase): def setUp(self) -> None: - self.clock, hs_clock = get_clock() + super().setUp() # We ensure that we remove any existing metrics for "test_queue". try: @@ -51,8 +50,8 @@ class BatchingQueueTestCase(TestCase): self._pending_calls: List[Tuple[List[str], defer.Deferred]] = [] self.queue: BatchingQueue[str, str] = BatchingQueue( name="test_queue", - server_name="test_server", - clock=hs_clock, + hs=self.hs, + clock=self.clock, process_batch_callback=self._process_queue, ) @@ -108,7 +107,7 @@ class BatchingQueueTestCase(TestCase): self.assertFalse(queue_d.called) # We should see a call to `_process_queue` after a reactor tick. - self.clock.pump([0]) + self.reactor.pump([0]) self.assertEqual(len(self._pending_calls), 1) self.assertEqual(self._pending_calls[0][0], ["foo"]) @@ -134,7 +133,7 @@ class BatchingQueueTestCase(TestCase): self._assert_metrics(queued=2, keys=1, in_flight=2) - self.clock.pump([0]) + self.reactor.pump([0]) # We should see only *one* call to `_process_queue` self.assertEqual(len(self._pending_calls), 1) @@ -158,7 +157,7 @@ class BatchingQueueTestCase(TestCase): self.assertFalse(self._pending_calls) queue_d1 = defer.ensureDeferred(self.queue.add_to_queue("foo1")) - self.clock.pump([0]) + self.reactor.pump([0]) self.assertEqual(len(self._pending_calls), 1) @@ -185,7 +184,7 @@ class BatchingQueueTestCase(TestCase): self._assert_metrics(queued=2, keys=1, in_flight=2) # We should now see a second call to `_process_queue` - self.clock.pump([0]) + self.reactor.pump([0]) self.assertEqual(len(self._pending_calls), 1) self.assertEqual(self._pending_calls[0][0], ["foo2", "foo3"]) self.assertFalse(queue_d2.called) @@ -206,9 +205,9 @@ class BatchingQueueTestCase(TestCase): self.assertFalse(self._pending_calls) queue_d1 = defer.ensureDeferred(self.queue.add_to_queue("foo1", key=1)) - self.clock.pump([0]) + self.reactor.pump([0]) queue_d2 = defer.ensureDeferred(self.queue.add_to_queue("foo2", key=2)) - self.clock.pump([0]) + self.reactor.pump([0]) # We queue up another item with key=2 to check that we will keep taking # things off the queue. @@ -240,7 +239,7 @@ class BatchingQueueTestCase(TestCase): self.assertFalse(queue_d3.called) # We should now see a call `_pending_calls` for `foo3` - self.clock.pump([0]) + self.reactor.pump([0]) self.assertEqual(len(self._pending_calls), 1) self.assertEqual(self._pending_calls[0][0], ["foo3"]) self.assertFalse(queue_d3.called) diff --git a/tests/util/test_dict_cache.py b/tests/util/test_dict_cache.py index 246e18fd15..16e096a4b2 100644 --- a/tests/util/test_dict_cache.py +++ b/tests/util/test_dict_cache.py @@ -23,12 +23,14 @@ from synapse.util.caches.dictionary_cache import DictionaryCache from tests import unittest +from tests.server import get_clock class DictCacheTestCase(unittest.TestCase): def setUp(self) -> None: + _, clock = get_clock() self.cache: DictionaryCache[str, str, str] = DictionaryCache( - name="foobar", server_name="test_server", max_entries=10 + name="foobar", clock=clock, server_name="test_server", max_entries=10 ) def test_simple_cache_hit_full(self) -> None: diff --git a/tests/util/test_expiring_cache.py b/tests/util/test_expiring_cache.py index eda2d586f6..35c0f02e3f 100644 --- a/tests/util/test_expiring_cache.py +++ b/tests/util/test_expiring_cache.py @@ -34,6 +34,7 @@ class ExpiringCacheTestCase(unittest.HomeserverTestCase): cache: ExpiringCache[str, str] = ExpiringCache( cache_name="test", server_name="testserver", + hs=self.hs, clock=clock, max_len=1, ) @@ -47,6 +48,7 @@ class ExpiringCacheTestCase(unittest.HomeserverTestCase): cache: ExpiringCache[str, str] = ExpiringCache( cache_name="test", server_name="testserver", + hs=self.hs, clock=clock, max_len=2, ) @@ -66,6 +68,7 @@ class ExpiringCacheTestCase(unittest.HomeserverTestCase): cache: ExpiringCache[str, List[int]] = ExpiringCache( cache_name="test", server_name="testserver", + hs=self.hs, clock=clock, max_len=5, iterable=True, @@ -90,6 +93,7 @@ class ExpiringCacheTestCase(unittest.HomeserverTestCase): cache: ExpiringCache[str, int] = ExpiringCache( cache_name="test", server_name="testserver", + hs=self.hs, clock=clock, expiry_ms=1000, ) diff --git a/tests/util/test_logcontext.py b/tests/util/test_logcontext.py index 966ea31f1a..ca805bb20a 100644 --- a/tests/util/test_logcontext.py +++ b/tests/util/test_logcontext.py @@ -66,7 +66,8 @@ class LoggingContextTestCase(unittest.TestCase): """ Test `Clock.sleep` """ - clock = Clock(reactor, server_name="test_server") + # Ignore linter error since we are creating a `Clock` for testing purposes. + clock = Clock(reactor, server_name="test_server") # type: ignore[multiple-internal-clocks] # Sanity check that we start in the sentinel context self._check_test_key("sentinel") @@ -90,7 +91,7 @@ class LoggingContextTestCase(unittest.TestCase): # so that the test can complete and we see the underlying error. callback_finished = True - reactor.callLater(0, lambda: defer.ensureDeferred(competing_callback())) + reactor.callLater(0, lambda: defer.ensureDeferred(competing_callback())) # type: ignore[call-later-not-tracked] with LoggingContext(name="foo", server_name="test_server"): await clock.sleep(0) @@ -111,7 +112,8 @@ class LoggingContextTestCase(unittest.TestCase): """ Test `Clock.looping_call` """ - clock = Clock(reactor, server_name="test_server") + # Ignore linter error since we are creating a `Clock` for testing purposes. + clock = Clock(reactor, server_name="test_server") # type: ignore[multiple-internal-clocks] # Sanity check that we start in the sentinel context self._check_test_key("sentinel") @@ -161,7 +163,8 @@ class LoggingContextTestCase(unittest.TestCase): """ Test `Clock.looping_call_now` """ - clock = Clock(reactor, server_name="test_server") + # Ignore linter error since we are creating a `Clock` for testing purposes. + clock = Clock(reactor, server_name="test_server") # type: ignore[multiple-internal-clocks] # Sanity check that we start in the sentinel context self._check_test_key("sentinel") @@ -209,7 +212,8 @@ class LoggingContextTestCase(unittest.TestCase): """ Test `Clock.call_later` """ - clock = Clock(reactor, server_name="test_server") + # Ignore linter error since we are creating a `Clock` for testing purposes. + clock = Clock(reactor, server_name="test_server") # type: ignore[multiple-internal-clocks] # Sanity check that we start in the sentinel context self._check_test_key("sentinel") @@ -261,7 +265,8 @@ class LoggingContextTestCase(unittest.TestCase): `d.callback(None)` without anything else. See the *Deferred callbacks* section of docs/log_contexts.md for more details. """ - clock = Clock(reactor, server_name="test_server") + # Ignore linter error since we are creating a `Clock` for testing purposes. + clock = Clock(reactor, server_name="test_server") # type: ignore[multiple-internal-clocks] # Sanity check that we start in the sentinel context self._check_test_key("sentinel") @@ -318,7 +323,8 @@ class LoggingContextTestCase(unittest.TestCase): `d.callback(None)` without anything else. See the *Deferred callbacks* section of docs/log_contexts.md for more details. """ - clock = Clock(reactor, server_name="test_server") + # Ignore linter error since we are creating a `Clock` for testing purposes. + clock = Clock(reactor, server_name="test_server") # type: ignore[multiple-internal-clocks] # Sanity check that we start in the sentinel context self._check_test_key("sentinel") @@ -379,7 +385,8 @@ class LoggingContextTestCase(unittest.TestCase): `d.callback(None)` without anything else. See the *Deferred callbacks* section of docs/log_contexts.md for more details. """ - clock = Clock(reactor, server_name="test_server") + # Ignore linter error since we are creating a `Clock` for testing purposes. + clock = Clock(reactor, server_name="test_server") # type: ignore[multiple-internal-clocks] # Sanity check that we start in the sentinel context self._check_test_key("sentinel") @@ -450,7 +457,8 @@ class LoggingContextTestCase(unittest.TestCase): self._check_test_key("sentinel") async def _test_run_in_background(self, function: Callable[[], object]) -> None: - clock = Clock(reactor, server_name="test_server") + # Ignore linter error since we are creating a `Clock` for testing purposes. + clock = Clock(reactor, server_name="test_server") # type: ignore[multiple-internal-clocks] # Sanity check that we start in the sentinel context self._check_test_key("sentinel") @@ -492,7 +500,8 @@ class LoggingContextTestCase(unittest.TestCase): @logcontext_clean async def test_run_in_background_with_blocking_fn(self) -> None: async def blocking_function() -> None: - await Clock(reactor, server_name="test_server").sleep(0) + # Ignore linter error since we are creating a `Clock` for testing purposes. + await Clock(reactor, server_name="test_server").sleep(0) # type: ignore[multiple-internal-clocks] await self._test_run_in_background(blocking_function) @@ -525,7 +534,8 @@ class LoggingContextTestCase(unittest.TestCase): async def testfunc() -> None: self._check_test_key("foo") - d = defer.ensureDeferred(Clock(reactor, server_name="test_server").sleep(0)) + # Ignore linter error since we are creating a `Clock` for testing purposes. + d = defer.ensureDeferred(Clock(reactor, server_name="test_server").sleep(0)) # type: ignore[multiple-internal-clocks] self.assertIs(current_context(), SENTINEL_CONTEXT) await d self._check_test_key("foo") @@ -554,7 +564,8 @@ class LoggingContextTestCase(unittest.TestCase): This will stress the logic around incomplete deferreds in `run_coroutine_in_background`. """ - clock = Clock(reactor, server_name="test_server") + # Ignore linter error since we are creating a `Clock` for testing purposes. + clock = Clock(reactor, server_name="test_server") # type: ignore[multiple-internal-clocks] # Sanity check that we start in the sentinel context self._check_test_key("sentinel") @@ -645,7 +656,7 @@ class LoggingContextTestCase(unittest.TestCase): # the synapse rules. def blocking_function() -> defer.Deferred: d: defer.Deferred = defer.Deferred() - reactor.callLater(0, d.callback, None) + reactor.callLater(0, d.callback, None) # type: ignore[call-later-not-tracked] return d sentinel_context = current_context() @@ -692,7 +703,7 @@ def _chained_deferred_function() -> defer.Deferred: def cb(res: object) -> defer.Deferred: d2: defer.Deferred = defer.Deferred() - reactor.callLater(0, d2.callback, res) + reactor.callLater(0, d2.callback, res) # type: ignore[call-later-not-tracked] return d2 d.addCallback(cb) diff --git a/tests/util/test_lrucache.py b/tests/util/test_lrucache.py index 4d37ad0975..56e9996b00 100644 --- a/tests/util/test_lrucache.py +++ b/tests/util/test_lrucache.py @@ -29,18 +29,28 @@ from synapse.util.caches.lrucache import LruCache, setup_expire_lru_cache_entrie from synapse.util.caches.treecache import TreeCache from tests import unittest +from tests.server import get_clock from tests.unittest import override_config class LruCacheTestCase(unittest.HomeserverTestCase): + def setUp(self) -> None: + super().setUp() + + _, self.clock = get_clock() + def test_get_set(self) -> None: - cache: LruCache[str, str] = LruCache(max_size=1, server_name="test_server") + cache: LruCache[str, str] = LruCache( + max_size=1, clock=self.clock, server_name="test_server" + ) cache["key"] = "value" self.assertEqual(cache.get("key"), "value") self.assertEqual(cache["key"], "value") def test_eviction(self) -> None: - cache: LruCache[int, int] = LruCache(max_size=2, server_name="test_server") + cache: LruCache[int, int] = LruCache( + max_size=2, clock=self.clock, server_name="test_server" + ) cache[1] = 1 cache[2] = 2 @@ -54,7 +64,9 @@ class LruCacheTestCase(unittest.HomeserverTestCase): self.assertEqual(cache.get(3), 3) def test_setdefault(self) -> None: - cache: LruCache[str, int] = LruCache(max_size=1, server_name="test_server") + cache: LruCache[str, int] = LruCache( + max_size=1, clock=self.clock, server_name="test_server" + ) self.assertEqual(cache.setdefault("key", 1), 1) self.assertEqual(cache.get("key"), 1) self.assertEqual(cache.setdefault("key", 2), 1) @@ -63,7 +75,9 @@ class LruCacheTestCase(unittest.HomeserverTestCase): self.assertEqual(cache.get("key"), 2) def test_pop(self) -> None: - cache: LruCache[str, int] = LruCache(max_size=1, server_name="test_server") + cache: LruCache[str, int] = LruCache( + max_size=1, clock=self.clock, server_name="test_server" + ) cache["key"] = 1 self.assertEqual(cache.pop("key"), 1) self.assertEqual(cache.pop("key"), None) @@ -71,7 +85,10 @@ class LruCacheTestCase(unittest.HomeserverTestCase): def test_del_multi(self) -> None: # The type here isn't quite correct as they don't handle TreeCache well. cache: LruCache[Tuple[str, str], str] = LruCache( - max_size=4, cache_type=TreeCache, server_name="test_server" + max_size=4, + clock=self.clock, + cache_type=TreeCache, + server_name="test_server", ) cache[("animal", "cat")] = "mew" cache[("animal", "dog")] = "woof" @@ -91,7 +108,9 @@ class LruCacheTestCase(unittest.HomeserverTestCase): # Man from del_multi say "Yes". def test_clear(self) -> None: - cache: LruCache[str, int] = LruCache(max_size=1, server_name="test_server") + cache: LruCache[str, int] = LruCache( + max_size=1, clock=self.clock, server_name="test_server" + ) cache["key"] = 1 cache.clear() self.assertEqual(len(cache), 0) @@ -99,7 +118,10 @@ class LruCacheTestCase(unittest.HomeserverTestCase): @override_config({"caches": {"per_cache_factors": {"mycache": 10}}}) def test_special_size(self) -> None: cache: LruCache = LruCache( - max_size=10, server_name="test_server", cache_name="mycache" + max_size=10, + clock=self.clock, + server_name="test_server", + cache_name="mycache", ) self.assertEqual(cache.max_size, 100) @@ -107,7 +129,9 @@ class LruCacheTestCase(unittest.HomeserverTestCase): class LruCacheCallbacksTestCase(unittest.HomeserverTestCase): def test_get(self) -> None: m = Mock() - cache: LruCache[str, str] = LruCache(max_size=1, server_name="test_server") + cache: LruCache[str, str] = LruCache( + max_size=1, clock=self.clock, server_name="test_server" + ) cache.set("key", "value") self.assertFalse(m.called) @@ -126,7 +150,9 @@ class LruCacheCallbacksTestCase(unittest.HomeserverTestCase): def test_multi_get(self) -> None: m = Mock() - cache: LruCache[str, str] = LruCache(max_size=1, server_name="test_server") + cache: LruCache[str, str] = LruCache( + max_size=1, clock=self.clock, server_name="test_server" + ) cache.set("key", "value") self.assertFalse(m.called) @@ -145,7 +171,9 @@ class LruCacheCallbacksTestCase(unittest.HomeserverTestCase): def test_set(self) -> None: m = Mock() - cache: LruCache[str, str] = LruCache(max_size=1, server_name="test_server") + cache: LruCache[str, str] = LruCache( + max_size=1, clock=self.clock, server_name="test_server" + ) cache.set("key", "value", callbacks=[m]) self.assertFalse(m.called) @@ -161,7 +189,9 @@ class LruCacheCallbacksTestCase(unittest.HomeserverTestCase): def test_pop(self) -> None: m = Mock() - cache: LruCache[str, str] = LruCache(max_size=1, server_name="test_server") + cache: LruCache[str, str] = LruCache( + max_size=1, clock=self.clock, server_name="test_server" + ) cache.set("key", "value", callbacks=[m]) self.assertFalse(m.called) @@ -182,7 +212,10 @@ class LruCacheCallbacksTestCase(unittest.HomeserverTestCase): m4 = Mock() # The type here isn't quite correct as they don't handle TreeCache well. cache: LruCache[Tuple[str, str], str] = LruCache( - max_size=4, cache_type=TreeCache, server_name="test_server" + max_size=4, + clock=self.clock, + cache_type=TreeCache, + server_name="test_server", ) cache.set(("a", "1"), "value", callbacks=[m1]) @@ -205,7 +238,9 @@ class LruCacheCallbacksTestCase(unittest.HomeserverTestCase): def test_clear(self) -> None: m1 = Mock() m2 = Mock() - cache: LruCache[str, str] = LruCache(max_size=5, server_name="test_server") + cache: LruCache[str, str] = LruCache( + max_size=5, clock=self.clock, server_name="test_server" + ) cache.set("key1", "value", callbacks=[m1]) cache.set("key2", "value", callbacks=[m2]) @@ -222,7 +257,9 @@ class LruCacheCallbacksTestCase(unittest.HomeserverTestCase): m1 = Mock(name="m1") m2 = Mock(name="m2") m3 = Mock(name="m3") - cache: LruCache[str, str] = LruCache(max_size=2, server_name="test_server") + cache: LruCache[str, str] = LruCache( + max_size=2, clock=self.clock, server_name="test_server" + ) cache.set("key1", "value", callbacks=[m1]) cache.set("key2", "value", callbacks=[m2]) @@ -259,7 +296,7 @@ class LruCacheCallbacksTestCase(unittest.HomeserverTestCase): class LruCacheSizedTestCase(unittest.HomeserverTestCase): def test_evict(self) -> None: cache: LruCache[str, List[int]] = LruCache( - max_size=5, size_callback=len, server_name="test_server" + max_size=5, clock=self.clock, size_callback=len, server_name="test_server" ) cache["key1"] = [0] cache["key2"] = [1, 2] @@ -284,7 +321,10 @@ class LruCacheSizedTestCase(unittest.HomeserverTestCase): def test_zero_size_drop_from_cache(self) -> None: """Test that `drop_from_cache` works correctly with 0-sized entries.""" cache: LruCache[str, List[int]] = LruCache( - max_size=5, size_callback=lambda x: 0, server_name="test_server" + max_size=5, + clock=self.clock, + size_callback=lambda x: 0, + server_name="test_server", ) cache["key1"] = [] @@ -402,7 +442,10 @@ class MemoryEvictionTestCase(unittest.HomeserverTestCase): class ExtraIndexLruCacheTestCase(unittest.HomeserverTestCase): def test_invalidate_simple(self) -> None: cache: LruCache[str, int] = LruCache( - max_size=10, server_name="test_server", extra_index_cb=lambda k, v: str(v) + max_size=10, + clock=self.hs.get_clock(), + server_name="test_server", + extra_index_cb=lambda k, v: str(v), ) cache["key1"] = 1 cache["key2"] = 2 @@ -417,7 +460,10 @@ class ExtraIndexLruCacheTestCase(unittest.HomeserverTestCase): def test_invalidate_multi(self) -> None: cache: LruCache[str, int] = LruCache( - max_size=10, server_name="test_server", extra_index_cb=lambda k, v: str(v) + max_size=10, + clock=self.hs.get_clock(), + server_name="test_server", + extra_index_cb=lambda k, v: str(v), ) cache["key1"] = 1 cache["key2"] = 1 diff --git a/tests/util/test_retryutils.py b/tests/util/test_retryutils.py index 82baff5883..593be93ea3 100644 --- a/tests/util/test_retryutils.py +++ b/tests/util/test_retryutils.py @@ -35,6 +35,7 @@ class RetryLimiterTestCase(HomeserverTestCase): get_retry_limiter( destination="test_dest", our_server_name=self.hs.hostname, + hs=self.hs, clock=self.clock, store=store, ) @@ -57,6 +58,7 @@ class RetryLimiterTestCase(HomeserverTestCase): get_retry_limiter( destination="test_dest", our_server_name=self.hs.hostname, + hs=self.hs, clock=self.clock, store=store, ) @@ -89,6 +91,7 @@ class RetryLimiterTestCase(HomeserverTestCase): get_retry_limiter( destination="test_dest", our_server_name=self.hs.hostname, + hs=self.hs, clock=self.clock, store=store, ), @@ -104,6 +107,7 @@ class RetryLimiterTestCase(HomeserverTestCase): get_retry_limiter( destination="test_dest", our_server_name=self.hs.hostname, + hs=self.hs, clock=self.clock, store=store, ) @@ -139,6 +143,7 @@ class RetryLimiterTestCase(HomeserverTestCase): get_retry_limiter( destination="test_dest", our_server_name=self.hs.hostname, + hs=self.hs, clock=self.clock, store=store, ) @@ -165,6 +170,7 @@ class RetryLimiterTestCase(HomeserverTestCase): get_retry_limiter( destination="test_dest", our_server_name=self.hs.hostname, + hs=self.hs, clock=self.clock, store=store, notifier=notifier, @@ -238,6 +244,7 @@ class RetryLimiterTestCase(HomeserverTestCase): get_retry_limiter( destination="test_dest", our_server_name=self.hs.hostname, + hs=self.hs, clock=self.clock, store=store, ) @@ -261,6 +268,7 @@ class RetryLimiterTestCase(HomeserverTestCase): get_retry_limiter( destination="test_dest", our_server_name=self.hs.hostname, + hs=self.hs, clock=self.clock, store=store, ), @@ -273,6 +281,7 @@ class RetryLimiterTestCase(HomeserverTestCase): get_retry_limiter( destination="test_dest", our_server_name=self.hs.hostname, + hs=self.hs, clock=self.clock, store=store, ) @@ -297,6 +306,7 @@ class RetryLimiterTestCase(HomeserverTestCase): get_retry_limiter( destination="test_dest", our_server_name=self.hs.hostname, + hs=self.hs, clock=self.clock, store=store, ), From 0615b64bb49684b846110465052642a46fd27028 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 1 Oct 2025 13:50:12 +0100 Subject: [PATCH 007/149] Bump phonenumbers from 9.0.14 to 9.0.15 (#18991) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/poetry.lock b/poetry.lock index 55de265559..6ff90ed361 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1589,14 +1589,14 @@ files = [ [[package]] name = "phonenumbers" -version = "9.0.14" +version = "9.0.15" description = "Python version of Google's common library for parsing, formatting, storing and validating international phone numbers." optional = false python-versions = "*" groups = ["main"] files = [ - {file = "phonenumbers-9.0.14-py2.py3-none-any.whl", hash = "sha256:6bdf5c46dbfefa1d941d122432d1958418d1dfe3f8c8c81d4c8e80f5442ea41f"}, - {file = "phonenumbers-9.0.14.tar.gz", hash = "sha256:98afb3e86bf9ae02cc7c98ca44fa8827babb72842f90da9884c5d998937572ae"}, + {file = "phonenumbers-9.0.15-py2.py3-none-any.whl", hash = "sha256:269b73bc05258e8fd57582770b9559307099ea677c8f1dc5272476f661344776"}, + {file = "phonenumbers-9.0.15.tar.gz", hash = "sha256:345ff7f23768332d866f37732f815cdf1d33c7f0961246562a5c5b78c12c3ff3"}, ] [[package]] From 1c093509ceb04ee8ce0eb6a408b76b0fda3ac87c Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Thu, 2 Oct 2025 10:22:25 -0500 Subject: [PATCH 008/149] Switch task scheduler from raw logcontext manipulation (`set_current_context`) to utils (`PreserveLoggingContext`) (#18990) Prefer the utils over raw logcontext manipulation. Spawning from adding some logcontext debug logs in https://github.com/element-hq/synapse/pull/18966 and since we're not logging at the `set_current_context(...)` level (see reasoning there), this removes some usage of `set_current_context(...)`. --- changelog.d/18990.misc | 1 + synapse/util/task_scheduler.py | 7 ++----- 2 files changed, 3 insertions(+), 5 deletions(-) create mode 100644 changelog.d/18990.misc diff --git a/changelog.d/18990.misc b/changelog.d/18990.misc new file mode 100644 index 0000000000..f7f8ac5ffd --- /dev/null +++ b/changelog.d/18990.misc @@ -0,0 +1 @@ +Switch task scheduler from raw logcontext manipulation to using the dedicated logcontext utils. diff --git a/synapse/util/task_scheduler.py b/synapse/util/task_scheduler.py index 7443d4e097..8dd6f12feb 100644 --- a/synapse/util/task_scheduler.py +++ b/synapse/util/task_scheduler.py @@ -27,8 +27,8 @@ from twisted.python.failure import Failure from synapse.logging.context import ( ContextResourceUsage, LoggingContext, + PreserveLoggingContext, nested_logging_context, - set_current_context, ) from synapse.metrics import SERVER_NAME_LABEL, LaterGauge from synapse.metrics.background_process_metrics import ( @@ -422,14 +422,11 @@ class TaskScheduler: """ current_time = self._clock.time() - calling_context = set_current_context(task_log_context) - try: + with PreserveLoggingContext(task_log_context): usage = task_log_context.get_resource_usage() TaskScheduler._log_task_usage( "continuing", task, usage, current_time - start_time ) - finally: - set_current_context(calling_context) async def wrapper() -> None: with nested_logging_context(task.id) as log_context: From 06a84f4fe0c6b88ba3c0c05869e43522238bc0a2 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Thu, 2 Oct 2025 11:27:26 -0500 Subject: [PATCH 009/149] Revert "Switch to OpenTracing's `ContextVarsScopeManager` (#18849)" (#19007) Revert https://github.com/element-hq/synapse/pull/18849 Go back to our custom `LogContextScopeManager` after trying OpenTracing's `ContextVarsScopeManager`. Fix https://github.com/element-hq/synapse/issues/19004 ### Why revert? For reference, with the normal reactor, `ContextVarsScopeManager` worked just as good as our custom `LogContextScopeManager` as far as I can tell (and even better in some cases). But since Twisted appears to not fully support `ContextVar`'s, it doesn't work as expected in all cases. Compounding things, `ContextVarsScopeManager` was causing errors with the experimental `SYNAPSE_ASYNC_IO_REACTOR` option. Since we're not getting the full benefit that we originally desired, we might as well revert and figure out alternatives for extending the logcontext lifetimes to support the use case we were trying to unlock (c.f. https://github.com/element-hq/synapse/pull/18804). See https://github.com/element-hq/synapse/issues/19004#issuecomment-3358052171 for more info. ### Does this require backporting and patch releases? No. Since `ContextVarsScopeManager` operates just as good with the normal reactor and was only causing actual errors with the experimental `SYNAPSE_ASYNC_IO_REACTOR` option, I don't think this requires us to backport and make patch releases at all. ### Maintain cross-links between main trace and background process work In order to maintain the functionality introduced in https://github.com/element-hq/synapse/pull/18932 (cross-links between the background process trace and currently active trace), we also needed a small change. Previously, when we were using `ContextVarsScopeManager`, it tracked the tracing scope across the logcontext changes without issue. Now that we're using our own custom `LogContextScopeManager` again, we need to capture the active span from the logcontext before we reset to the sentinel context because of the `PreserveLoggingContext()` below. Added some tests to ensure we maintain the `run_as_background` tracing behavior regardless of the tracing scope manager we use. --- changelog.d/19007.misc | 1 + synapse/logging/context.py | 16 +- synapse/logging/opentracing.py | 25 +- synapse/logging/scopecontextmanager.py | 161 ++++++++ synapse/metrics/background_process_metrics.py | 24 +- tests/logging/test_opentracing.py | 364 +++++++++++------- 6 files changed, 432 insertions(+), 159 deletions(-) create mode 100644 changelog.d/19007.misc create mode 100644 synapse/logging/scopecontextmanager.py diff --git a/changelog.d/19007.misc b/changelog.d/19007.misc new file mode 100644 index 0000000000..720623e98e --- /dev/null +++ b/changelog.d/19007.misc @@ -0,0 +1 @@ +Switch back to our own custom `LogContextScopeManager` instead of OpenTracing's `ContextVarsScopeManager` which was causing problems when using the experimental `SYNAPSE_ASYNC_IO_REACTOR` option with tracing enabled. diff --git a/synapse/logging/context.py b/synapse/logging/context.py index 119d3be7bf..5cfd861685 100644 --- a/synapse/logging/context.py +++ b/synapse/logging/context.py @@ -56,6 +56,7 @@ from twisted.internet import defer, threads from twisted.python.threadpool import ThreadPool if TYPE_CHECKING: + from synapse.logging.scopecontextmanager import _LogContextScope from synapse.types import ISynapseReactor logger = logging.getLogger(__name__) @@ -238,7 +239,14 @@ class _Sentinel: we should always know which server the logs are coming from. """ - __slots__ = ["previous_context", "finished", "server_name", "request", "tag"] + __slots__ = [ + "previous_context", + "finished", + "scope", + "server_name", + "request", + "tag", + ] def __init__(self) -> None: # Minimal set for compatibility with LoggingContext @@ -246,6 +254,7 @@ class _Sentinel: self.finished = False self.server_name = "unknown_server_from_sentinel_context" self.request = None + self.scope = None self.tag = None def __str__(self) -> str: @@ -303,6 +312,7 @@ class LoggingContext: "finished", "request", "tag", + "scope", ] def __init__( @@ -327,6 +337,7 @@ class LoggingContext: self.main_thread = get_thread_id() self.request = None self.tag = "" + self.scope: Optional["_LogContextScope"] = None # keep track of whether we have hit the __exit__ block for this context # (suggesting that the the thing that created the context thinks it should @@ -340,6 +351,9 @@ class LoggingContext: # which request this corresponds to self.request = self.parent_context.request + # we also track the current scope: + self.scope = self.parent_context.scope + if request is not None: # the request param overrides the request from the parent context self.request = request diff --git a/synapse/logging/opentracing.py b/synapse/logging/opentracing.py index 8d350016ce..1c89a358df 100644 --- a/synapse/logging/opentracing.py +++ b/synapse/logging/opentracing.py @@ -251,17 +251,18 @@ class _DummyTagNames: try: import opentracing import opentracing.tags - from opentracing.scope_managers.contextvars import ContextVarsScopeManager tags = opentracing.tags except ImportError: opentracing = None # type: ignore[assignment] tags = _DummyTagNames # type: ignore[assignment] - ContextVarsScopeManager = None # type: ignore try: from jaeger_client import Config as JaegerConfig + + from synapse.logging.scopecontextmanager import LogContextScopeManager except ImportError: JaegerConfig = None # type: ignore + LogContextScopeManager = None # type: ignore try: @@ -483,7 +484,7 @@ def init_tracer(hs: "HomeServer") -> None: config = JaegerConfig( config=jaeger_config, service_name=f"{hs.config.server.server_name} {instance_name_by_type}", - scope_manager=ContextVarsScopeManager(), + scope_manager=LogContextScopeManager(), metrics_factory=PrometheusMetricsFactory(), ) @@ -683,9 +684,21 @@ def start_active_span_from_edu( # Opentracing setters for tags, logs, etc @only_if_tracing -def active_span() -> Optional["opentracing.Span"]: - """Get the currently active span, if any""" - return opentracing.tracer.active_span +def active_span( + *, + tracer: Optional["opentracing.Tracer"] = None, +) -> Optional["opentracing.Span"]: + """ + Get the currently active span, if any + + Args: + tracer: override the opentracing tracer. By default the global tracer is used. + """ + if tracer is None: + # use the global tracer by default + tracer = opentracing.tracer + + return tracer.active_span @ensure_active_span("set a tag") diff --git a/synapse/logging/scopecontextmanager.py b/synapse/logging/scopecontextmanager.py new file mode 100644 index 0000000000..feaadc4d87 --- /dev/null +++ b/synapse/logging/scopecontextmanager.py @@ -0,0 +1,161 @@ +# +# This file is licensed under the Affero General Public License (AGPL) version 3. +# +# Copyright 2019 The Matrix.org Foundation C.I.C. +# Copyright (C) 2023 New Vector, Ltd +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as +# published by the Free Software Foundation, either version 3 of the +# License, or (at your option) any later version. +# +# See the GNU Affero General Public License for more details: +# . +# +# Originally licensed under the Apache License, Version 2.0: +# . +# +# [This file includes modifications made by New Vector Limited] +# +# + +import logging +from typing import Optional + +from opentracing import Scope, ScopeManager, Span + +from synapse.logging.context import ( + LoggingContext, + current_context, + nested_logging_context, +) + +logger = logging.getLogger(__name__) + + +class LogContextScopeManager(ScopeManager): + """ + The LogContextScopeManager tracks the active scope in opentracing + by using the log contexts which are native to synapse. This is so + that the basic opentracing api can be used across twisted defereds. + + It would be nice just to use opentracing's ContextVarsScopeManager, + but currently that doesn't work due to https://twistedmatrix.com/trac/ticket/10301. + """ + + def __init__(self) -> None: + pass + + @property + def active(self) -> Optional[Scope]: + """ + Returns the currently active Scope which can be used to access the + currently active Scope.span. + If there is a non-null Scope, its wrapped Span + becomes an implicit parent of any newly-created Span at + Tracer.start_active_span() time. + + Return: + The Scope that is active, or None if not available. + """ + ctx = current_context() + return ctx.scope + + def activate(self, span: Span, finish_on_close: bool) -> Scope: + """ + Makes a Span active. + Args + span: the span that should become active. + finish_on_close: whether Span should be automatically finished when + Scope.close() is called. + + Returns: + Scope to control the end of the active period for + *span*. It is a programming error to neglect to call + Scope.close() on the returned instance. + """ + + ctx = current_context() + + if not ctx: + logger.error("Tried to activate scope outside of loggingcontext") + return Scope(None, span) # type: ignore[arg-type] + + if ctx.scope is not None: + # start a new logging context as a child of the existing one. + # Doing so -- rather than updating the existing logcontext -- means that + # creating several concurrent spans under the same logcontext works + # correctly. + ctx = nested_logging_context("") + enter_logcontext = True + else: + # if there is no span currently associated with the current logcontext, we + # just store the scope in it. + # + # This feels a bit dubious, but it does hack around a problem where a + # span outlasts its parent logcontext (which would otherwise lead to + # "Re-starting finished log context" errors). + enter_logcontext = False + + scope = _LogContextScope(self, span, ctx, enter_logcontext, finish_on_close) + ctx.scope = scope + if enter_logcontext: + ctx.__enter__() + + return scope + + +class _LogContextScope(Scope): + """ + A custom opentracing scope, associated with a LogContext + + * When the scope is closed, the logcontext's active scope is reset to None. + and - if enter_logcontext was set - the logcontext is finished too. + """ + + def __init__( + self, + manager: LogContextScopeManager, + span: Span, + logcontext: LoggingContext, + enter_logcontext: bool, + finish_on_close: bool, + ): + """ + Args: + manager: + the manager that is responsible for this scope. + span: + the opentracing span which this scope represents the local + lifetime for. + logcontext: + the log context to which this scope is attached. + enter_logcontext: + if True the log context will be exited when the scope is finished + finish_on_close: + if True finish the span when the scope is closed + """ + super().__init__(manager, span) + self.logcontext = logcontext + self._finish_on_close = finish_on_close + self._enter_logcontext = enter_logcontext + + def __str__(self) -> str: + return f"Scope<{self.span}>" + + def close(self) -> None: + active_scope = self.manager.active + if active_scope is not self: + logger.error( + "Closing scope %s which is not the currently-active one %s", + self, + active_scope, + ) + + if self._finish_on_close: + self.span.finish() + + self.logcontext.scope = None + + if self._enter_logcontext: + self.logcontext.__exit__(None, None, None) diff --git a/synapse/metrics/background_process_metrics.py b/synapse/metrics/background_process_metrics.py index 6dc2cbe132..05e84038ac 100644 --- a/synapse/metrics/background_process_metrics.py +++ b/synapse/metrics/background_process_metrics.py @@ -68,6 +68,11 @@ if TYPE_CHECKING: from synapse.server import HomeServer + try: + import opentracing + except ImportError: + opentracing = None # type: ignore[assignment] + logger = logging.getLogger(__name__) @@ -225,6 +230,7 @@ def run_as_background_process( func: Callable[..., Awaitable[Optional[R]]], *args: Any, bg_start_span: bool = True, + test_only_tracer: Optional["opentracing.Tracer"] = None, **kwargs: Any, ) -> "defer.Deferred[Optional[R]]": """Run the given function in its own logcontext, with resource metrics @@ -250,6 +256,8 @@ def run_as_background_process( bg_start_span: Whether to start an opentracing span. Defaults to True. Should only be disabled for processes that will not log to or tag a span. + test_only_tracer: Set the OpenTracing tracer to use. This is only useful for + tests. args: positional args for func kwargs: keyword args for func @@ -259,6 +267,12 @@ def run_as_background_process( rules. """ + # Since we track the tracing scope in the `LoggingContext`, before we move to the + # sentinel logcontext (or a new `LoggingContext`), grab the currently active + # tracing span (if any) so that we can create a cross-link to the background process + # trace. + original_active_tracing_span = active_span(tracer=test_only_tracer) + async def run() -> Optional[R]: with _bg_metrics_lock: count = _background_process_counts.get(desc, 0) @@ -276,8 +290,6 @@ def run_as_background_process( ) as logging_context: try: if bg_start_span: - original_active_tracing_span = active_span() - # If there is already an active span (e.g. because this background # process was started as part of handling a request for example), # because this is a long-running background task that may serve a @@ -308,6 +320,7 @@ def run_as_background_process( # Create a root span for the background process (disconnected # from other spans) ignore_active_span=True, + tracer=test_only_tracer, ) # Also add a span in the original request trace that cross-links @@ -324,8 +337,11 @@ def run_as_background_process( f"start_bgproc.{desc}", child_of=original_active_tracing_span, ignore_active_span=True, - # Points to the background process span. + # Create the `FOLLOWS_FROM` reference to the background + # process span so there is a loose coupling between the two + # traces and it's easy to jump between. contexts=[root_tracing_scope.span.context], + tracer=test_only_tracer, ): pass @@ -341,6 +357,7 @@ def run_as_background_process( # span so there is a loose coupling between the two # traces and it's easy to jump between. contexts=[original_active_tracing_span.context], + tracer=test_only_tracer, ) # For easy usage down below, we create a context manager that @@ -359,6 +376,7 @@ def run_as_background_process( tracing_scope = start_active_span( f"bgproc.{desc}", tags={SynapseTags.REQUEST_ID: str(logging_context)}, + tracer=test_only_tracer, ) else: tracing_scope = nullcontext() diff --git a/tests/logging/test_opentracing.py b/tests/logging/test_opentracing.py index 31cdfacd2c..2f389f7f44 100644 --- a/tests/logging/test_opentracing.py +++ b/tests/logging/test_opentracing.py @@ -19,7 +19,7 @@ # # -from typing import Awaitable, Dict, cast +from typing import Awaitable, Optional, cast from twisted.internet import defer from twisted.internet.testing import MemoryReactorClock @@ -35,20 +35,25 @@ from synapse.logging.opentracing import ( tag_args, trace_with_opname, ) +from synapse.metrics.background_process_metrics import run_as_background_process from synapse.util.clock import Clock -try: - import opentracing - from opentracing.scope_managers.contextvars import ContextVarsScopeManager -except ImportError: - opentracing = None # type: ignore - ContextVarsScopeManager = None # type: ignore +from tests.server import get_clock try: import jaeger_client except ImportError: jaeger_client = None # type: ignore + +try: + import opentracing + + from synapse.logging.scopecontextmanager import LogContextScopeManager +except ImportError: + opentracing = None # type: ignore + LogContextScopeManager = None # type: ignore + import logging from tests.unittest import TestCase @@ -56,7 +61,7 @@ from tests.unittest import TestCase logger = logging.getLogger(__name__) -class TracingScopeTestCase(TestCase): +class LogContextScopeManagerTestCase(TestCase): """ Test that our tracing machinery works well in a variety of situations (especially with Twisted's runtime and deferreds). @@ -67,7 +72,7 @@ class TracingScopeTestCase(TestCase): opentracing backend is Jaeger. """ - if opentracing is None: + if opentracing is None or LogContextScopeManager is None: skip = "Requires opentracing" # type: ignore[unreachable] if jaeger_client is None: skip = "Requires jaeger_client" # type: ignore[unreachable] @@ -77,9 +82,8 @@ class TracingScopeTestCase(TestCase): # global variables that power opentracing. We create our own tracer instance # and test with it. - scope_manager = ContextVarsScopeManager() config = jaeger_client.config.Config( - config={}, service_name="test", scope_manager=scope_manager + config={}, service_name="test", scope_manager=LogContextScopeManager() ) self._reporter = jaeger_client.reporter.InMemoryReporter() @@ -220,144 +224,6 @@ class TracingScopeTestCase(TestCase): [scopes[1].span, scopes[2].span, scopes[0].span], ) - def test_run_in_background_active_scope_still_available(self) -> None: - """ - Test that tasks running via `run_in_background` still have access to the - active tracing scope. - - This is a regression test for a previous Synapse issue where the tracing scope - would `__exit__` and close before the `run_in_background` task completed and our - own previous custom `_LogContextScope.close(...)` would clear - `LoggingContext.scope` preventing further tracing spans from having the correct - parent. - """ - reactor = MemoryReactorClock() - # type-ignore: mypy-zope doesn't seem to recognise that `MemoryReactorClock` - # implements `ISynapseThreadlessReactor` (combination of the normal Twisted - # Reactor/Clock interfaces), via inheritance from - # `twisted.internet.testing.MemoryReactor` and `twisted.internet.testing.Clock` - # Ignore `multiple-internal-clocks` linter error here since we are creating a `Clock` - # for testing purposes. - clock = Clock( # type: ignore[multiple-internal-clocks] - reactor, # type: ignore[arg-type] - server_name="test_server", - ) - - scope_map: Dict[str, opentracing.Scope] = {} - - async def async_task() -> None: - root_scope = scope_map["root"] - root_context = cast(jaeger_client.SpanContext, root_scope.span.context) - - self.assertEqual( - self._tracer.active_span, - root_scope.span, - "expected to inherit the root tracing scope from where this was run", - ) - - # Return control back to the reactor thread and wait an arbitrary amount - await clock.sleep(4) - - # This is a key part of what we're testing! In a previous version of - # Synapse, we would lose the active span at this point. - self.assertEqual( - self._tracer.active_span, - root_scope.span, - "expected to still have a root tracing scope/span active", - ) - - # For complete-ness sake, let's also trace more sub-tasks here and assert - # they have the correct span parents as well (root) - - # Start tracing some other sub-task. - # - # This is a key part of what we're testing! In a previous version of - # Synapse, it would have the incorrect span parents. - scope = start_active_span( - "task1", - tracer=self._tracer, - ) - scope_map["task1"] = scope - - # Ensure the span parent is pointing to the root scope - context = cast(jaeger_client.SpanContext, scope.span.context) - self.assertEqual( - context.parent_id, - root_context.span_id, - "expected task1 parent to be the root span", - ) - - # Ensure that the active span is our new sub-task now - self.assertEqual(self._tracer.active_span, scope.span) - # Return control back to the reactor thread and wait an arbitrary amount - await clock.sleep(4) - # We should still see the active span as the scope wasn't closed yet - self.assertEqual(self._tracer.active_span, scope.span) - scope.close() - - async def root() -> None: - with start_active_span( - "root span", - tracer=self._tracer, - # We will close this off later. We're basically just mimicking the same - # pattern for how we handle requests. We pass the span off to the - # request for it to finish. - finish_on_close=False, - ) as root_scope: - scope_map["root"] = root_scope - self.assertEqual(self._tracer.active_span, root_scope.span) - - # Fire-and-forget a task - # - # XXX: The root scope context manager will `__exit__` before this task - # completes. - run_in_background(async_task) - - # Because we used `run_in_background`, the active span should still be - # the root. - self.assertEqual(self._tracer.active_span, root_scope.span) - - # We shouldn't see any active spans outside of the scope - self.assertIsNone(self._tracer.active_span) - - with LoggingContext(name="root context", server_name="test_server"): - # Start the test off - d_root = defer.ensureDeferred(root()) - - # Let the tasks complete - reactor.pump((2,) * 8) - self.successResultOf(d_root) - - # After we see all of the tasks are done (like a request when it - # `_finished_processing`), let's finish our root span - scope_map["root"].span.finish() - - # Sanity check again: We shouldn't see any active spans leftover in this - # this context. - self.assertIsNone(self._tracer.active_span) - - # The spans should be reported in order of their finishing: task 1, task 2, - # root. - # - # We use `assertIncludes` just as an easier way to see if items are missing or - # added. We assert the order just below - self.assertIncludes( - set(self._reporter.get_spans()), - { - scope_map["task1"].span, - scope_map["root"].span, - }, - exact=True, - ) - # This is where we actually assert the correct order - self.assertEqual( - self._reporter.get_spans(), - [ - scope_map["task1"].span, - scope_map["root"].span, - ], - ) - def test_trace_decorator_sync(self) -> None: """ Test whether we can use `@trace_with_opname` (`@trace`) and `@tag_args` @@ -455,3 +321,203 @@ class TracingScopeTestCase(TestCase): [span.operation_name for span in self._reporter.get_spans()], ["fixture_awaitable_return_func"], ) + + async def test_run_as_background_process_standalone(self) -> None: + """ + Test to make sure that the background process work starts its own trace. + """ + reactor, clock = get_clock() + + callback_finished = False + active_span_in_callback: Optional[jaeger_client.Span] = None + + async def bg_task() -> None: + nonlocal callback_finished, active_span_in_callback + try: + assert isinstance(self._tracer.active_span, jaeger_client.Span) + active_span_in_callback = self._tracer.active_span + finally: + # When exceptions happen, we still want to mark the callback as finished + # so that the test can complete and we see the underlying error. + callback_finished = True + + # type-ignore: We ignore because the point is to test the bare function + run_as_background_process( # type: ignore[untracked-background-process] + desc="some-bg-task", + server_name="test_server", + func=bg_task, + test_only_tracer=self._tracer, + ) + + # Now wait for the background process to finish + while not callback_finished: + await clock.sleep(0) + + self.assertTrue( + callback_finished, + "Callback never finished which means the test probably didn't wait long enough", + ) + + self.assertEqual( + active_span_in_callback.operation_name if active_span_in_callback else None, + "bgproc.some-bg-task", + "expected a new span to be started for the background task", + ) + + # The spans should be reported in order of their finishing. + # + # We use `assertIncludes` just as an easier way to see if items are missing or + # added. We assert the order just below + actual_spans = [span.operation_name for span in self._reporter.get_spans()] + expected_spans = ["bgproc.some-bg-task"] + self.assertIncludes( + set(actual_spans), + set(expected_spans), + exact=True, + ) + # This is where we actually assert the correct order + self.assertEqual( + actual_spans, + expected_spans, + ) + + async def test_run_as_background_process_cross_link(self) -> None: + """ + Test to make sure that the background process work has its own trace and is + disconnected from any currently active trace (like a request). But we still have + cross-links between the two traces if there was already an active trace/span when + we kicked off the background process. + """ + reactor, clock = get_clock() + + callback_finished = False + active_span_in_callback: Optional[jaeger_client.Span] = None + + async def bg_task() -> None: + nonlocal callback_finished, active_span_in_callback + try: + assert isinstance(self._tracer.active_span, jaeger_client.Span) + active_span_in_callback = self._tracer.active_span + finally: + # When exceptions happen, we still want to mark the callback as finished + # so that the test can complete and we see the underlying error. + callback_finished = True + + with LoggingContext(name="some-request", server_name="test_server"): + with start_active_span( + "some-request", + tracer=self._tracer, + ): + # type-ignore: We ignore because the point is to test the bare function + run_as_background_process( # type: ignore[untracked-background-process] + desc="some-bg-task", + server_name="test_server", + func=bg_task, + test_only_tracer=self._tracer, + ) + + # Now wait for the background process to finish + while not callback_finished: + await clock.sleep(0) + + self.assertTrue( + callback_finished, + "Callback never finished which means the test probably didn't wait long enough", + ) + + # We start `bgproc.some-bg-task` and `bgproc_child.some-bg-task` (see + # `run_as_background_process` implementation for why). Either is fine but for + # now we expect the child as its the innermost one that was started. + self.assertEqual( + active_span_in_callback.operation_name if active_span_in_callback else None, + "bgproc_child.some-bg-task", + "expected a new span to be started for the background task", + ) + + # The spans should be reported in order of their finishing. + # + # We use `assertIncludes` just as an easier way to see if items are missing or + # added. We assert the order just below + actual_spans = [span.operation_name for span in self._reporter.get_spans()] + expected_spans = [ + "start_bgproc.some-bg-task", + "bgproc_child.some-bg-task", + "bgproc.some-bg-task", + "some-request", + ] + self.assertIncludes( + set(actual_spans), + set(expected_spans), + exact=True, + ) + # This is where we actually assert the correct order + self.assertEqual( + actual_spans, + expected_spans, + ) + + span_map = {span.operation_name: span for span in self._reporter.get_spans()} + span_id_to_friendly_name = { + span.span_id: span.operation_name for span in self._reporter.get_spans() + } + + def get_span_friendly_name(span_id: Optional[int]) -> str: + if span_id is None: + return "None" + + return span_id_to_friendly_name.get(span_id, f"unknown span {span_id}") + + # Ensure the background process trace/span is disconnected from the request + # trace/span. + self.assertNotEqual( + get_span_friendly_name(span_map["bgproc.some-bg-task"].parent_id), + get_span_friendly_name(span_map["some-request"].span_id), + ) + + # We should see a cross-link in the request trace pointing to the background + # process trace. + # + # Make sure `start_bgproc.some-bg-task` is part of the request trace + self.assertEqual( + get_span_friendly_name(span_map["start_bgproc.some-bg-task"].parent_id), + get_span_friendly_name(span_map["some-request"].span_id), + ) + # And has some references to the background process trace + self.assertIncludes( + { + f"{reference.type}:{get_span_friendly_name(reference.referenced_context.span_id)}" + if isinstance(reference.referenced_context, jaeger_client.SpanContext) + else f"{reference.type}:None" + for reference in ( + span_map["start_bgproc.some-bg-task"].references or [] + ) + }, + { + f"follows_from:{get_span_friendly_name(span_map['bgproc.some-bg-task'].span_id)}" + }, + exact=True, + ) + + # We should see a cross-link in the background process trace pointing to the + # request trace that kicked off the work. + # + # Make sure `start_bgproc.some-bg-task` is part of the request trace + self.assertEqual( + get_span_friendly_name(span_map["bgproc_child.some-bg-task"].parent_id), + get_span_friendly_name(span_map["bgproc.some-bg-task"].span_id), + ) + # And has some references to the background process trace + self.assertIncludes( + { + f"{reference.type}:{get_span_friendly_name(reference.referenced_context.span_id)}" + if isinstance(reference.referenced_context, jaeger_client.SpanContext) + else f"{reference.type}:None" + for reference in ( + span_map["bgproc_child.some-bg-task"].references or [] + ) + }, + { + f"follows_from:{get_span_friendly_name(span_map['some-request'].span_id)}" + }, + exact=True, + ) From d27ff161f5392a8319d435b4e5b8e289d8c96375 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Thu, 2 Oct 2025 11:51:17 -0500 Subject: [PATCH 010/149] Add debug logs wherever we change current logcontext (#18966) Add debug logs wherever we change current logcontext (`LoggingContext`). I've had to make this same set of changes over and over as I've been debugging things so it seems useful enough to include by default. Instead of tracing things at the `set_current_context(...)` level, I've added the debug logging on all of the utilities that utilize `set_current_context(...)`. It's much easier to reason about the log context changing because of `PreserveLoggingContext` changing things than an opaque `set_current_context(...)` call. --- changelog.d/18966.misc | 1 + docs/log_contexts.md | 16 ++++++ synapse/logging/context.py | 106 +++++++++++++++++++++++++++++++++++-- 3 files changed, 119 insertions(+), 4 deletions(-) create mode 100644 changelog.d/18966.misc diff --git a/changelog.d/18966.misc b/changelog.d/18966.misc new file mode 100644 index 0000000000..42c8782a42 --- /dev/null +++ b/changelog.d/18966.misc @@ -0,0 +1 @@ +Add debug logs wherever we change current logcontext. diff --git a/docs/log_contexts.md b/docs/log_contexts.md index bbe9e86827..76710e10e0 100644 --- a/docs/log_contexts.md +++ b/docs/log_contexts.md @@ -548,3 +548,19 @@ chain are dropped. Dropping the the reference to an awaitable you're supposed to be awaiting is bad practice, so this doesn't actually happen too much. Unfortunately, when it does happen, it will lead to leaked logcontexts which are incredibly hard to track down. + + +## Debugging logcontext issues + +Debugging logcontext issues can be tricky as leaking or losing a logcontext will surface +downstream and can point to an unrelated part of the codebase. It's best to enable debug +logging for `synapse.logging.context.debug` (needs to be explicitly configured) and go +backwards in the logs from the point where the issue is observed to find the root cause. + +`log.config.yaml` +```yaml +loggers: + # Unlike other loggers, this one needs to be explicitly configured to see debug logs. + synapse.logging.context.debug: + level: DEBUG +``` diff --git a/synapse/logging/context.py b/synapse/logging/context.py index 5cfd861685..42fc7148c1 100644 --- a/synapse/logging/context.py +++ b/synapse/logging/context.py @@ -55,12 +55,29 @@ from typing_extensions import ParamSpec from twisted.internet import defer, threads from twisted.python.threadpool import ThreadPool +from synapse.logging.loggers import ExplicitlyConfiguredLogger +from synapse.util.stringutils import random_string + if TYPE_CHECKING: from synapse.logging.scopecontextmanager import _LogContextScope from synapse.types import ISynapseReactor logger = logging.getLogger(__name__) +original_logger_class = logging.getLoggerClass() +logging.setLoggerClass(ExplicitlyConfiguredLogger) +logcontext_debug_logger = logging.getLogger("synapse.logging.context.debug") +""" +A logger for debugging when the logcontext switches. + +Because this is very noisy and probably something only developers want to see when +debugging logcontext problems, we want people to explictly opt-in before seeing anything +in the logs. Requires explicitly setting `synapse.logging.context.debug` in the logging +configuration and does not inherit the log level from the parent logger. +""" +# Restore the original logger class +logging.setLoggerClass(original_logger_class) + try: import resource @@ -404,6 +421,7 @@ class LoggingContext: def __enter__(self) -> "LoggingContext": """Enters this logging context into thread local storage""" + logcontext_debug_logger.debug("LoggingContext(%s).__enter__", self.name) old_context = set_current_context(self) if self.previous_context != old_context: logcontext_error( @@ -426,6 +444,9 @@ class LoggingContext: Returns: None to avoid suppressing any exceptions that were thrown. """ + logcontext_debug_logger.debug( + "LoggingContext(%s).__exit__ --> %s", self.name, self.previous_context + ) current = set_current_context(self.previous_context) if current is not self: if current is SENTINEL_CONTEXT: @@ -674,14 +695,21 @@ class PreserveLoggingContext: reactor back to the code). """ - __slots__ = ["_old_context", "_new_context"] + __slots__ = ["_old_context", "_new_context", "_instance_id"] def __init__( self, new_context: LoggingContextOrSentinel = SENTINEL_CONTEXT ) -> None: self._new_context = new_context + self._instance_id = random_string(5) def __enter__(self) -> None: + logcontext_debug_logger.debug( + "PreserveLoggingContext(%s).__enter__ %s --> %s", + self._instance_id, + current_context(), + self._new_context, + ) self._old_context = set_current_context(self._new_context) def __exit__( @@ -690,6 +718,12 @@ class PreserveLoggingContext: value: Optional[BaseException], traceback: Optional[TracebackType], ) -> None: + logcontext_debug_logger.debug( + "PreserveLoggingContext(%s).__exit %s --> %s", + self._instance_id, + current_context(), + self._old_context, + ) context = set_current_context(self._old_context) if context != self._new_context: @@ -869,7 +903,11 @@ def run_in_background( Note that the returned Deferred does not follow the synapse logcontext rules. """ + instance_id = random_string(5) calling_context = current_context() + logcontext_debug_logger.debug( + "run_in_background(%s): called with logcontext=%s", instance_id, calling_context + ) try: # (kick off the task in the current context) res = f(*args, **kwargs) @@ -911,6 +949,11 @@ def run_in_background( # to reset the logcontext to the sentinel logcontext as that would run # immediately (remember our goal is to maintain the calling logcontext when we # return). + logcontext_debug_logger.debug( + "run_in_background(%s): deferred already completed and the function should have maintained the logcontext %s", + instance_id, + calling_context, + ) return d # Since the function we called may follow the Synapse logcontext rules (Rules for @@ -921,6 +964,11 @@ def run_in_background( # # Our goal is to have the caller logcontext unchanged after firing off the # background task and returning. + logcontext_debug_logger.debug( + "run_in_background(%s): restoring calling logcontext %s", + instance_id, + calling_context, + ) set_current_context(calling_context) # If the function we called is playing nice and following the Synapse logcontext @@ -936,7 +984,23 @@ def run_in_background( # which is supposed to have a single entry and exit point. But # by spawning off another deferred, we are effectively # adding a new exit point.) - d.addBoth(_set_context_cb, SENTINEL_CONTEXT) + if logcontext_debug_logger.isEnabledFor(logging.DEBUG): + + def _log_set_context_cb( + result: ResultT, context: LoggingContextOrSentinel + ) -> ResultT: + logcontext_debug_logger.debug( + "run_in_background(%s): resetting logcontext to %s", + instance_id, + context, + ) + set_current_context(context) + return result + + d.addBoth(_log_set_context_cb, SENTINEL_CONTEXT) + else: + d.addBoth(_set_context_cb, SENTINEL_CONTEXT) + return d @@ -992,10 +1056,21 @@ def make_deferred_yieldable(deferred: "defer.Deferred[T]") -> "defer.Deferred[T] restores the old context once the awaitable completes (execution passes from the reactor back to the code). """ + instance_id = random_string(5) + logcontext_debug_logger.debug( + "make_deferred_yieldable(%s): called with logcontext=%s", + instance_id, + current_context(), + ) + # The deferred has already completed if deferred.called and not deferred.paused: # it looks like this deferred is ready to run any callbacks we give it # immediately. We may as well optimise out the logcontext faffery. + logcontext_debug_logger.debug( + "make_deferred_yieldable(%s): deferred already completed and the function should have maintained the logcontext", + instance_id, + ) return deferred # Our goal is to have the caller logcontext unchanged after they yield/await the @@ -1007,8 +1082,31 @@ def make_deferred_yieldable(deferred: "defer.Deferred[T]") -> "defer.Deferred[T] # does) while the deferred runs in the reactor event loop, we reset the logcontext # and add a callback to the deferred to restore it so the caller's logcontext is # active when the deferred completes. - prev_context = set_current_context(SENTINEL_CONTEXT) - deferred.addBoth(_set_context_cb, prev_context) + + logcontext_debug_logger.debug( + "make_deferred_yieldable(%s): resetting logcontext to %s", + instance_id, + SENTINEL_CONTEXT, + ) + calling_context = set_current_context(SENTINEL_CONTEXT) + + if logcontext_debug_logger.isEnabledFor(logging.DEBUG): + + def _log_set_context_cb( + result: ResultT, context: LoggingContextOrSentinel + ) -> ResultT: + logcontext_debug_logger.debug( + "make_deferred_yieldable(%s): restoring calling logcontext to %s", + instance_id, + context, + ) + set_current_context(context) + return result + + deferred.addBoth(_log_set_context_cb, calling_context) + else: + deferred.addBoth(_set_context_cb, calling_context) + return deferred From 6835e7be0de632f14ac0c62b27f442db433421e0 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Thu, 2 Oct 2025 13:00:50 -0500 Subject: [PATCH 011/149] Wrap the Rust HTTP client with `make_deferred_yieldable` (#18903) Wrap the Rust HTTP client with `make_deferred_yieldable` so downstream usage doesn't need to use `PreserveLoggingContext()` or `make_deferred_yieldable`. > it seems like we should have some wrapper around it that uses [`make_deferred_yieldable(...)`](https://github.com/element-hq/synapse/blob/40edb10a98ae24c637b7a9cf6a3003bf6fa48b5f/docs/log_contexts.md#where-you-create-a-new-awaitable-make-it-follow-the-rules) to make things right so we don't have to do this in the downstream code. > > *-- @MadLittleMods, https://github.com/element-hq/synapse/pull/18357#discussion_r2294941827* Spawning from wanting to [remove `PreserveLoggingContext()` from the codebase](https://github.com/element-hq/synapse/pull/18870) and thinking that we [shouldn't have to pollute all downstream usage with `PreserveLoggingContext()` or `make_deferred_yieldable`](https://github.com/element-hq/synapse/pull/18357#discussion_r2294941827) Part of https://github.com/element-hq/synapse/issues/18905 (Remove `sentinel` logcontext where we log in Synapse) --- changelog.d/18903.misc | 1 + rust/src/http_client.rs | 21 ++- synapse/api/auth/mas.py | 14 +- synapse/api/auth/msc3861_delegated.py | 14 +- synapse/synapse_rust/http_client.pyi | 4 + tests/synapse_rust/__init__.py | 11 ++ tests/synapse_rust/test_http_client.py | 225 +++++++++++++++++++++++++ 7 files changed, 272 insertions(+), 18 deletions(-) create mode 100644 changelog.d/18903.misc create mode 100644 tests/synapse_rust/__init__.py create mode 100644 tests/synapse_rust/test_http_client.py diff --git a/changelog.d/18903.misc b/changelog.d/18903.misc new file mode 100644 index 0000000000..bafa7dad5c --- /dev/null +++ b/changelog.d/18903.misc @@ -0,0 +1 @@ +Wrap the Rust HTTP client with `make_deferred_yieldable` so it follows Synapse logcontext rules. diff --git a/rust/src/http_client.rs b/rust/src/http_client.rs index b6cdf98f55..e67dae169f 100644 --- a/rust/src/http_client.rs +++ b/rust/src/http_client.rs @@ -12,7 +12,7 @@ * . */ -use std::{collections::HashMap, future::Future}; +use std::{collections::HashMap, future::Future, sync::OnceLock}; use anyhow::Context; use futures::TryStreamExt; @@ -299,5 +299,22 @@ where }); }); - Ok(deferred) + // Make the deferred follow the Synapse logcontext rules + make_deferred_yieldable(py, &deferred) +} + +static MAKE_DEFERRED_YIELDABLE: OnceLock> = OnceLock::new(); + +/// Given a deferred, make it follow the Synapse logcontext rules +fn make_deferred_yieldable<'py>( + py: Python<'py>, + deferred: &Bound<'py, PyAny>, +) -> PyResult> { + let make_deferred_yieldable = MAKE_DEFERRED_YIELDABLE.get_or_init(|| { + let sys = PyModule::import(py, "synapse.logging.context").unwrap(); + let func = sys.getattr("make_deferred_yieldable").unwrap().unbind(); + func + }); + + make_deferred_yieldable.call1(py, (deferred,))?.extract(py) } diff --git a/synapse/api/auth/mas.py b/synapse/api/auth/mas.py index ef82ea9cc7..baa6b27336 100644 --- a/synapse/api/auth/mas.py +++ b/synapse/api/auth/mas.py @@ -33,7 +33,6 @@ from synapse.api.errors import ( UnrecognizedRequestError, ) from synapse.http.site import SynapseRequest -from synapse.logging.context import PreserveLoggingContext from synapse.logging.opentracing import ( active_span, force_tracing, @@ -229,13 +228,12 @@ class MasDelegatedAuth(BaseAuth): try: with start_active_span("mas-introspect-token"): inject_request_headers(raw_headers) - with PreserveLoggingContext(): - resp_body = await self._rust_http_client.post( - url=self._introspection_endpoint, - response_limit=1 * 1024 * 1024, - headers=raw_headers, - request_body=body, - ) + resp_body = await self._rust_http_client.post( + url=self._introspection_endpoint, + response_limit=1 * 1024 * 1024, + headers=raw_headers, + request_body=body, + ) except HttpResponseException as e: end_time = self._clock.time() introspection_response_timer.labels( diff --git a/synapse/api/auth/msc3861_delegated.py b/synapse/api/auth/msc3861_delegated.py index 11a89dd3ed..b6adcc83dc 100644 --- a/synapse/api/auth/msc3861_delegated.py +++ b/synapse/api/auth/msc3861_delegated.py @@ -38,7 +38,6 @@ from synapse.api.errors import ( UnrecognizedRequestError, ) from synapse.http.site import SynapseRequest -from synapse.logging.context import PreserveLoggingContext from synapse.logging.opentracing import ( active_span, force_tracing, @@ -327,13 +326,12 @@ class MSC3861DelegatedAuth(BaseAuth): try: with start_active_span("mas-introspect-token"): inject_request_headers(raw_headers) - with PreserveLoggingContext(): - resp_body = await self._rust_http_client.post( - url=uri, - response_limit=1 * 1024 * 1024, - headers=raw_headers, - request_body=body, - ) + resp_body = await self._rust_http_client.post( + url=uri, + response_limit=1 * 1024 * 1024, + headers=raw_headers, + request_body=body, + ) except HttpResponseException as e: end_time = self._clock.time() introspection_response_timer.labels( diff --git a/synapse/synapse_rust/http_client.pyi b/synapse/synapse_rust/http_client.pyi index 9fb7831e6b..530d2be8e3 100644 --- a/synapse/synapse_rust/http_client.pyi +++ b/synapse/synapse_rust/http_client.pyi @@ -17,6 +17,10 @@ from twisted.internet.defer import Deferred from synapse.types import ISynapseReactor class HttpClient: + """ + The returned deferreds follow Synapse logcontext rules. + """ + def __init__(self, reactor: ISynapseReactor, user_agent: str) -> None: ... def get(self, url: str, response_limit: int) -> Deferred[bytes]: ... def post( diff --git a/tests/synapse_rust/__init__.py b/tests/synapse_rust/__init__.py new file mode 100644 index 0000000000..e056679fd5 --- /dev/null +++ b/tests/synapse_rust/__init__.py @@ -0,0 +1,11 @@ +# This file is licensed under the Affero General Public License (AGPL) version 3. +# +# Copyright (C) 2025 New Vector, Ltd +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as +# published by the Free Software Foundation, either version 3 of the +# License, or (at your option) any later version. +# +# See the GNU Affero General Public License for more details: +# . diff --git a/tests/synapse_rust/test_http_client.py b/tests/synapse_rust/test_http_client.py new file mode 100644 index 0000000000..032eab77e8 --- /dev/null +++ b/tests/synapse_rust/test_http_client.py @@ -0,0 +1,225 @@ +# This file is licensed under the Affero General Public License (AGPL) version 3. +# +# Copyright (C) 2025 New Vector, Ltd +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as +# published by the Free Software Foundation, either version 3 of the +# License, or (at your option) any later version. +# +# See the GNU Affero General Public License for more details: +# . + +import json +import logging +import threading +import time +from http.server import BaseHTTPRequestHandler, HTTPServer +from typing import Any, Coroutine, Generator, TypeVar, Union + +from twisted.internet.defer import Deferred, ensureDeferred +from twisted.internet.testing import MemoryReactor + +from synapse.logging.context import ( + LoggingContext, + PreserveLoggingContext, + _Sentinel, + current_context, + run_in_background, +) +from synapse.server import HomeServer +from synapse.synapse_rust.http_client import HttpClient +from synapse.util.clock import Clock +from synapse.util.json import json_decoder + +from tests.unittest import HomeserverTestCase + +logger = logging.getLogger(__name__) + +T = TypeVar("T") + + +class StubRequestHandler(BaseHTTPRequestHandler): + server: "StubServer" + + def do_GET(self) -> None: + self.server.calls += 1 + + self.send_response(200) + self.send_header("Content-Type", "application/json") + self.end_headers() + self.wfile.write(json.dumps({"ok": True}).encode("utf-8")) + + def log_message(self, format: str, *args: Any) -> None: + # Don't log anything; by default, the server logs to stderr + pass + + +class StubServer(HTTPServer): + """A stub HTTP server that we can send requests to for testing. + + This opens a real HTTP server on a random port, on a separate thread. + """ + + calls: int = 0 + """How many times has the endpoint been requested.""" + + _thread: threading.Thread + + def __init__(self) -> None: + super().__init__(("127.0.0.1", 0), StubRequestHandler) + + self._thread = threading.Thread( + target=self.serve_forever, + name="StubServer", + kwargs={"poll_interval": 0.01}, + daemon=True, + ) + self._thread.start() + + def shutdown(self) -> None: + super().shutdown() + self._thread.join() + + @property + def endpoint(self) -> str: + return f"http://127.0.0.1:{self.server_port}/" + + +class HttpClientTestCase(HomeserverTestCase): + def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: + hs = self.setup_test_homeserver() + + # XXX: We must create the Rust HTTP client before we call `reactor.run()` below. + # Twisted's `MemoryReactor` doesn't invoke `callWhenRunning` callbacks if it's + # already running and we rely on that to start the Tokio thread pool in Rust. In + # the future, this may not matter, see https://github.com/twisted/twisted/pull/12514 + self._http_client = hs.get_proxied_http_client() + self._rust_http_client = HttpClient( + reactor=hs.get_reactor(), + user_agent=self._http_client.user_agent.decode("utf8"), + ) + + # This triggers the server startup hooks, which starts the Tokio thread pool + reactor.run() + + return hs + + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: + self.server = StubServer() + + def tearDown(self) -> None: + # MemoryReactor doesn't trigger the shutdown phases, and we want the + # Tokio thread pool to be stopped + # XXX: This logic should probably get moved somewhere else + shutdown_triggers = self.reactor.triggers.get("shutdown", {}) + for phase in ["before", "during", "after"]: + triggers = shutdown_triggers.get(phase, []) + for callbable, args, kwargs in triggers: + callbable(*args, **kwargs) + + def till_deferred_has_result( + self, + awaitable: Union[ + "Coroutine[Deferred[Any], Any, T]", + "Generator[Deferred[Any], Any, T]", + "Deferred[T]", + ], + ) -> "Deferred[T]": + """Wait until a deferred has a result. + + This is useful because the Rust HTTP client will resolve the deferred + using reactor.callFromThread, which are only run when we call + reactor.advance. + """ + deferred = ensureDeferred(awaitable) + tries = 0 + while not deferred.called: + time.sleep(0.1) + self.reactor.advance(0) + tries += 1 + if tries > 100: + raise Exception("Timed out waiting for deferred to resolve") + + return deferred + + def _check_current_logcontext(self, expected_logcontext_string: str) -> None: + context = current_context() + assert isinstance(context, LoggingContext) or isinstance(context, _Sentinel), ( + f"Expected LoggingContext({expected_logcontext_string}) but saw {context}" + ) + self.assertEqual( + str(context), + expected_logcontext_string, + f"Expected LoggingContext({expected_logcontext_string}) but saw {context}", + ) + + def test_request_response(self) -> None: + """ + Test to make sure we can make a basic request and get the expected + response. + """ + + async def do_request() -> None: + resp_body = await self._rust_http_client.get( + url=self.server.endpoint, + response_limit=1 * 1024 * 1024, + ) + raw_response = json_decoder.decode(resp_body.decode("utf-8")) + self.assertEqual(raw_response, {"ok": True}) + + self.get_success(self.till_deferred_has_result(do_request())) + self.assertEqual(self.server.calls, 1) + + async def test_logging_context(self) -> None: + """ + Test to make sure the `LoggingContext` (logcontext) is handled correctly + when making requests. + """ + # Sanity check that we start in the sentinel context + self._check_current_logcontext("sentinel") + + callback_finished = False + + async def do_request() -> None: + nonlocal callback_finished + try: + # Should have the same logcontext as the caller + self._check_current_logcontext("foo") + + with LoggingContext(name="competing", server_name="test_server"): + # Make the actual request + await self._rust_http_client.get( + url=self.server.endpoint, + response_limit=1 * 1024 * 1024, + ) + self._check_current_logcontext("competing") + + # Back to the caller's context outside of the `LoggingContext` block + self._check_current_logcontext("foo") + finally: + # When exceptions happen, we still want to mark the callback as finished + # so that the test can complete and we see the underlying error. + callback_finished = True + + with LoggingContext(name="foo", server_name="test_server"): + # Fire off the function, but don't wait on it. + run_in_background(do_request) + + # Now wait for the function under test to have run + with PreserveLoggingContext(): + while not callback_finished: + # await self.hs.get_clock().sleep(0) + time.sleep(0.1) + self.reactor.advance(0) + + # check that the logcontext is left in a sane state. + self._check_current_logcontext("foo") + + self.assertTrue( + callback_finished, + "Callback never finished which means the test probably didn't wait long enough", + ) + + # Back to the sentinel context + self._check_current_logcontext("sentinel") From 70c044db8efabacf3deaf8635d98c593b722541a Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Thu, 2 Oct 2025 13:21:37 -0500 Subject: [PATCH 012/149] Remove deprecated `LoggingContext.set_current_context`/`LoggingContext.current_context` methods (#18989) These were added for backwards compatibility (and essentially deprecated) in https://github.com/matrix-org/synapse/pull/7408 (2020-05-04) because [`synapse-s3-storage-provider`](https://github.com/matrix-org/synapse-s3-storage-provider) previously relied on them -- but `synapse-s3-storage-provider` since been [updated](https://github.com/matrix-org/synapse-s3-storage-provider/pull/36) to no longer use them. --- changelog.d/18989.removal | 1 + synapse/logging/context.py | 42 -------------------------------------- 2 files changed, 1 insertion(+), 42 deletions(-) create mode 100644 changelog.d/18989.removal diff --git a/changelog.d/18989.removal b/changelog.d/18989.removal new file mode 100644 index 0000000000..356b9ffe3a --- /dev/null +++ b/changelog.d/18989.removal @@ -0,0 +1 @@ +Remove deprecated `LoggingContext.set_current_context`/`LoggingContext.current_context` methods which already have equivalent bare methods in `synapse.logging.context`. diff --git a/synapse/logging/context.py b/synapse/logging/context.py index 42fc7148c1..1b9c770311 100644 --- a/synapse/logging/context.py +++ b/synapse/logging/context.py @@ -33,7 +33,6 @@ See doc/log_contexts.rst for details on how this works. import logging import threading import typing -import warnings from types import TracebackType from typing import ( TYPE_CHECKING, @@ -378,47 +377,6 @@ class LoggingContext: def __str__(self) -> str: return self.name - @classmethod - def current_context(cls) -> LoggingContextOrSentinel: - """Get the current logging context from thread local storage - - This exists for backwards compatibility. ``current_context()`` should be - called directly. - - Returns: - The current logging context - """ - warnings.warn( - "synapse.logging.context.LoggingContext.current_context() is deprecated " - "in favor of synapse.logging.context.current_context().", - DeprecationWarning, - stacklevel=2, - ) - return current_context() - - @classmethod - def set_current_context( - cls, context: LoggingContextOrSentinel - ) -> LoggingContextOrSentinel: - """Set the current logging context in thread local storage - - This exists for backwards compatibility. ``set_current_context()`` should be - called directly. - - Args: - context: The context to activate. - - Returns: - The context that was previously active - """ - warnings.warn( - "synapse.logging.context.LoggingContext.set_current_context() is deprecated " - "in favor of synapse.logging.context.set_current_context().", - DeprecationWarning, - stacklevel=2, - ) - return set_current_context(context) - def __enter__(self) -> "LoggingContext": """Enters this logging context into thread local storage""" logcontext_debug_logger.debug("LoggingContext(%s).__enter__", self.name) From 1d2ddbc76e8817ea946f611bac9fe960f9a8ff28 Mon Sep 17 00:00:00 2001 From: Francesco Stefanini <110470709+frastefanini@users.noreply.github.com> Date: Fri, 3 Oct 2025 14:19:57 +0200 Subject: [PATCH 013/149] Fix bug where ephemeral events were not filtered by room ID (#19002) Co-authored-by: Andrew Morgan --- changelog.d/19002.bugfix | 1 + synapse/handlers/sync.py | 26 ++++++++++++++------------ 2 files changed, 15 insertions(+), 12 deletions(-) create mode 100644 changelog.d/19002.bugfix diff --git a/changelog.d/19002.bugfix b/changelog.d/19002.bugfix new file mode 100644 index 0000000000..d54c7f0e87 --- /dev/null +++ b/changelog.d/19002.bugfix @@ -0,0 +1 @@ +Fix bug where ephemeral events were not filtered by room ID. Contributed by @frastefanini. diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index 6f0522d5bb..2a6652b585 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -553,7 +553,7 @@ class SyncHandler: Returns: A tuple of the now StreamToken, updated to reflect the which typing events are included, and a dict mapping from room_id to a list of - typing events for that room. + ephemeral events for that room. """ sync_config = sync_result_builder.sync_config @@ -578,12 +578,8 @@ class SyncHandler: ephemeral_by_room: JsonDict = {} for event in typing: - # we want to exclude the room_id from the event, but modifying the - # result returned by the event source is poor form (it might cache - # the object) room_id = event["room_id"] - event_copy = {k: v for (k, v) in event.items() if k != "room_id"} - ephemeral_by_room.setdefault(room_id, []).append(event_copy) + ephemeral_by_room.setdefault(room_id, []).append(event) receipt_key = ( since_token.receipt_key @@ -603,9 +599,7 @@ class SyncHandler: for event in receipts: room_id = event["room_id"] - # exclude room id, as above - event_copy = {k: v for (k, v) in event.items() if k != "room_id"} - ephemeral_by_room.setdefault(room_id, []).append(event_copy) + ephemeral_by_room.setdefault(room_id, []).append(event) return now_token, ephemeral_by_room @@ -2734,9 +2728,17 @@ class SyncHandler: ) ) - ephemeral = await sync_config.filter_collection.filter_room_ephemeral( - ephemeral - ) + ephemeral = [ + # per spec, ephemeral events (typing notifications and read receipts) + # should not have a `room_id` field when sent to clients + # refs: + # - https://spec.matrix.org/v1.16/client-server-api/#mtyping + # - https://spec.matrix.org/v1.16/client-server-api/#mreceipt + {k: v for (k, v) in event.items() if k != "room_id"} + for event in await sync_config.filter_collection.filter_room_ephemeral( + ephemeral + ) + ] if not ( always_include From 5465c685538b0feab6531f0a7fbaef18049b4cc1 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Tue, 7 Oct 2025 11:15:35 +0100 Subject: [PATCH 014/149] Remove unstable prefixes for MSC2732: Olm fallback keys (#18996) Co-authored-by: Eric Eastwood --- changelog.d/18996.removal | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/18996.removal diff --git a/changelog.d/18996.removal b/changelog.d/18996.removal new file mode 100644 index 0000000000..fa06fcc929 --- /dev/null +++ b/changelog.d/18996.removal @@ -0,0 +1 @@ +Drop support for unstable field names from the long-accepted [MSC2732](https://github.com/matrix-org/matrix-spec-proposals/pull/2732) (Olm fallback keys) proposal. \ No newline at end of file From 42bbff8294ddd72fc45891f1fdc41a0b6fd26031 Mon Sep 17 00:00:00 2001 From: Till <2353100+S7evinK@users.noreply.github.com> Date: Tue, 7 Oct 2025 12:27:53 +0200 Subject: [PATCH 015/149] Validate the body of requests to `/keys/upload` (#17097) Co-authored-by: Andrew Morgan Co-authored-by: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Co-authored-by: Eric Eastwood --- changelog.d/17097.misc | 1 + synapse/handlers/e2e_keys.py | 16 ++-- synapse/rest/client/keys.py | 150 ++++++++++++++++++++++++++++++++- tests/rest/client/test_keys.py | 121 ++++++++++++++++++++++++++ 4 files changed, 280 insertions(+), 8 deletions(-) create mode 100644 changelog.d/17097.misc diff --git a/changelog.d/17097.misc b/changelog.d/17097.misc new file mode 100644 index 0000000000..42792e5f38 --- /dev/null +++ b/changelog.d/17097.misc @@ -0,0 +1 @@ +Extend validation of uploaded device keys. \ No newline at end of file diff --git a/synapse/handlers/e2e_keys.py b/synapse/handlers/e2e_keys.py index 791a0fa684..b10472f1d2 100644 --- a/synapse/handlers/e2e_keys.py +++ b/synapse/handlers/e2e_keys.py @@ -57,7 +57,6 @@ if TYPE_CHECKING: logger = logging.getLogger(__name__) - ONE_TIME_KEY_UPLOAD = "one_time_key_upload_lock" @@ -847,14 +846,22 @@ class E2eKeysHandler: """ time_now = self.clock.time_msec() - # TODO: Validate the JSON to make sure it has the right keys. device_keys = keys.get("device_keys", None) if device_keys: + log_kv( + { + "message": "Updating device_keys for user.", + "user_id": user_id, + "device_id": device_id, + } + ) await self.upload_device_keys_for_user( user_id=user_id, device_id=device_id, keys={"device_keys": device_keys}, ) + else: + log_kv({"message": "Did not update device_keys", "reason": "not a dict"}) one_time_keys = keys.get("one_time_keys", None) if one_time_keys: @@ -872,8 +879,9 @@ class E2eKeysHandler: log_kv( {"message": "Did not update one_time_keys", "reason": "no keys given"} ) + fallback_keys = keys.get("fallback_keys") - if fallback_keys and isinstance(fallback_keys, dict): + if fallback_keys: log_kv( { "message": "Updating fallback_keys for device.", @@ -882,8 +890,6 @@ class E2eKeysHandler: } ) await self.store.set_e2e_fallback_keys(user_id, device_id, fallback_keys) - elif fallback_keys: - log_kv({"message": "Did not update fallback_keys", "reason": "not a dict"}) else: log_kv( {"message": "Did not update fallback_keys", "reason": "no keys given"} diff --git a/synapse/rest/client/keys.py b/synapse/rest/client/keys.py index 6cf480952e..9d22e22b72 100644 --- a/synapse/rest/client/keys.py +++ b/synapse/rest/client/keys.py @@ -23,10 +23,19 @@ import logging import re from collections import Counter -from typing import TYPE_CHECKING, Any, Dict, Optional, Tuple +from http import HTTPStatus +from typing import TYPE_CHECKING, Any, Dict, List, Mapping, Optional, Tuple, Union +from typing_extensions import Self + +from synapse._pydantic_compat import ( + StrictBool, + StrictStr, + validator, +) from synapse.api.auth.mas import MasDelegatedAuth from synapse.api.errors import ( + Codes, InteractiveAuthIncompleteError, InvalidAPICallError, SynapseError, @@ -37,11 +46,13 @@ from synapse.http.servlet import ( parse_integer, parse_json_object_from_request, parse_string, + validate_json_object, ) from synapse.http.site import SynapseRequest from synapse.logging.opentracing import log_kv, set_tag from synapse.rest.client._base import client_patterns, interactive_auth_handler from synapse.types import JsonDict, StreamToken +from synapse.types.rest import RequestBodyModel from synapse.util.cancellation import cancellable if TYPE_CHECKING: @@ -59,7 +70,6 @@ class KeyUploadServlet(RestServlet): "device_keys": { "user_id": "", "device_id": "", - "valid_until_ts": , "algorithms": [ "m.olm.curve25519-aes-sha2", ] @@ -111,12 +121,123 @@ class KeyUploadServlet(RestServlet): self._clock = hs.get_clock() self._store = hs.get_datastores().main + class KeyUploadRequestBody(RequestBodyModel): + """ + The body of a `POST /_matrix/client/v3/keys/upload` request. + + Based on https://spec.matrix.org/v1.16/client-server-api/#post_matrixclientv3keysupload. + """ + + class DeviceKeys(RequestBodyModel): + algorithms: List[StrictStr] + """The encryption algorithms supported by this device.""" + + device_id: StrictStr + """The ID of the device these keys belong to. Must match the device ID used when logging in.""" + + keys: Mapping[StrictStr, StrictStr] + """ + Public identity keys. The names of the properties should be in the + format `:`. The keys themselves should be encoded as + specified by the key algorithm. + """ + + signatures: Mapping[StrictStr, Mapping[StrictStr, StrictStr]] + """Signatures for the device key object. A map from user ID, to a map from ":" to the signature.""" + + user_id: StrictStr + """The ID of the user the device belongs to. Must match the user ID used when logging in.""" + + class KeyObject(RequestBodyModel): + key: StrictStr + """The key, encoded using unpadded base64.""" + + fallback: Optional[StrictBool] = False + """Whether this is a fallback key. Only used when handling fallback keys.""" + + signatures: Mapping[StrictStr, Mapping[StrictStr, StrictStr]] + """Signature for the device. Mapped from user ID to another map of key signing identifier to the signature itself. + + See the following for more detail: https://spec.matrix.org/v1.16/appendices/#signing-details + """ + + device_keys: Optional[DeviceKeys] = None + """Identity keys for the device. May be absent if no new identity keys are required.""" + + fallback_keys: Optional[Mapping[StrictStr, Union[StrictStr, KeyObject]]] + """ + The public key which should be used if the device's one-time keys are + exhausted. The fallback key is not deleted once used, but should be + replaced when additional one-time keys are being uploaded. The server + will notify the client of the fallback key being used through `/sync`. + + There can only be at most one key per algorithm uploaded, and the server + will only persist one key per algorithm. + + When uploading a signed key, an additional fallback: true key should be + included to denote that the key is a fallback key. + + May be absent if a new fallback key is not required. + """ + + @validator("fallback_keys", pre=True) + def validate_fallback_keys(cls: Self, v: Any) -> Any: + if v is None: + return v + if not isinstance(v, dict): + raise TypeError("fallback_keys must be a mapping") + + for k in v.keys(): + if not len(k.split(":")) == 2: + raise SynapseError( + code=HTTPStatus.BAD_REQUEST, + errcode=Codes.BAD_JSON, + msg=f"Invalid fallback_keys key {k!r}. " + 'Expected ":".', + ) + return v + + one_time_keys: Optional[Mapping[StrictStr, Union[StrictStr, KeyObject]]] = None + """ + One-time public keys for "pre-key" messages. The names of the properties + should be in the format `:`. + + The format of the key is determined by the key algorithm, see: + https://spec.matrix.org/v1.16/client-server-api/#key-algorithms. + """ + + @validator("one_time_keys", pre=True) + def validate_one_time_keys(cls: Self, v: Any) -> Any: + if v is None: + return v + if not isinstance(v, dict): + raise TypeError("one_time_keys must be a mapping") + + for k, _ in v.items(): + if not len(k.split(":")) == 2: + raise SynapseError( + code=HTTPStatus.BAD_REQUEST, + errcode=Codes.BAD_JSON, + msg=f"Invalid one_time_keys key {k!r}. " + 'Expected ":".', + ) + return v + async def on_POST( self, request: SynapseRequest, device_id: Optional[str] ) -> Tuple[int, JsonDict]: requester = await self.auth.get_user_by_req(request, allow_guest=True) user_id = requester.user.to_string() + + # Parse the request body. Validate separately, as the handler expects a + # plain dict, rather than any parsed object. + # + # Note: It would be nice to work with a parsed object, but the handler + # needs to encode portions of the request body as canonical JSON before + # storing the result in the DB. There's little point in converted to a + # parsed object and then back to a dict. body = parse_json_object_from_request(request) + validate_json_object(body, self.KeyUploadRequestBody) if device_id is not None: # Providing the device_id should only be done for setting keys @@ -149,8 +270,31 @@ class KeyUploadServlet(RestServlet): 400, "To upload keys, you must pass device_id when authenticating" ) + if "device_keys" in body: + # Validate the provided `user_id` and `device_id` fields in + # `device_keys` match that of the requesting user. We can't do + # this directly in the pydantic model as we don't have access + # to the requester yet. + # + # TODO: We could use ValidationInfo when we switch to Pydantic v2. + # https://docs.pydantic.dev/latest/concepts/validators/#validation-info + if body["device_keys"]["user_id"] != user_id: + raise SynapseError( + code=HTTPStatus.BAD_REQUEST, + errcode=Codes.BAD_JSON, + msg="Provided `user_id` in `device_keys` does not match that of the authenticated user", + ) + if body["device_keys"]["device_id"] != device_id: + raise SynapseError( + code=HTTPStatus.BAD_REQUEST, + errcode=Codes.BAD_JSON, + msg="Provided `device_id` in `device_keys` does not match that of the authenticated user device", + ) + result = await self.e2e_keys_handler.upload_keys_for_user( - user_id=user_id, device_id=device_id, keys=body + user_id=user_id, + device_id=device_id, + keys=body, ) return 200, result diff --git a/tests/rest/client/test_keys.py b/tests/rest/client/test_keys.py index d9a210b616..ef3aef5dc8 100644 --- a/tests/rest/client/test_keys.py +++ b/tests/rest/client/test_keys.py @@ -40,6 +40,127 @@ from tests.unittest import override_config from tests.utils import HAS_AUTHLIB +class KeyUploadTestCase(unittest.HomeserverTestCase): + servlets = [ + keys.register_servlets, + admin.register_servlets_for_client_rest_resource, + login.register_servlets, + ] + + def test_upload_keys_fails_on_invalid_structure(self) -> None: + """Check that we validate the structure of keys upon upload. + + Regression test for https://github.com/element-hq/synapse/pull/17097 + """ + self.register_user("alice", "wonderland") + alice_token = self.login("alice", "wonderland") + + channel = self.make_request( + "POST", + "/_matrix/client/v3/keys/upload", + { + # Error: device_keys must be a dict + "device_keys": ["some", "stuff", "weewoo"] + }, + alice_token, + ) + self.assertEqual(channel.code, HTTPStatus.BAD_REQUEST, channel.result) + self.assertEqual( + channel.json_body["errcode"], + Codes.BAD_JSON, + channel.result, + ) + + channel = self.make_request( + "POST", + "/_matrix/client/v3/keys/upload", + { + # Error: properties of fallback_keys must be in the form `:` + "fallback_keys": {"invalid_key": "signature_base64"} + }, + alice_token, + ) + self.assertEqual(channel.code, HTTPStatus.BAD_REQUEST, channel.result) + self.assertEqual( + channel.json_body["errcode"], + Codes.BAD_JSON, + channel.result, + ) + + channel = self.make_request( + "POST", + "/_matrix/client/v3/keys/upload", + { + # Same as above, but for one_time_keys + "one_time_keys": {"invalid_key": "signature_base64"} + }, + alice_token, + ) + self.assertEqual(channel.code, HTTPStatus.BAD_REQUEST, channel.result) + self.assertEqual( + channel.json_body["errcode"], + Codes.BAD_JSON, + channel.result, + ) + + def test_upload_keys_fails_on_invalid_user_id_or_device_id(self) -> None: + """ + Validate that the requesting user is uploading their own keys and nobody + else's. + """ + device_id = "DEVICE_ID" + alice_user_id = self.register_user("alice", "wonderland") + alice_token = self.login("alice", "wonderland", device_id=device_id) + + channel = self.make_request( + "POST", + "/_matrix/client/v3/keys/upload", + { + "device_keys": { + # Included `user_id` does not match requesting user. + "user_id": "@unknown_user:test", + "device_id": device_id, + "algorithms": ["m.olm.curve25519-aes-sha2"], + "keys": { + f"ed25519:{device_id}": "publickey", + }, + "signatures": {}, + } + }, + alice_token, + ) + self.assertEqual(channel.code, HTTPStatus.BAD_REQUEST, channel.result) + self.assertEqual( + channel.json_body["errcode"], + Codes.BAD_JSON, + channel.result, + ) + + channel = self.make_request( + "POST", + "/_matrix/client/v3/keys/upload", + { + "device_keys": { + "user_id": alice_user_id, + # Included `device_id` does not match requesting user's. + "device_id": "UNKNOWN_DEVICE_ID", + "algorithms": ["m.olm.curve25519-aes-sha2"], + "keys": { + f"ed25519:{device_id}": "publickey", + }, + "signatures": {}, + } + }, + alice_token, + ) + self.assertEqual(channel.code, HTTPStatus.BAD_REQUEST, channel.result) + self.assertEqual( + channel.json_body["errcode"], + Codes.BAD_JSON, + channel.result, + ) + + class KeyQueryTestCase(unittest.HomeserverTestCase): servlets = [ keys.register_servlets, From d67280f5d8e865875a45d5d27790b604dcbcff70 Mon Sep 17 00:00:00 2001 From: Andrew Morgan Date: Tue, 30 Sep 2025 17:10:32 +0100 Subject: [PATCH 016/149] Remove unstable prefixes for MSC2732 This MSC was accepted in 2022. We shouldn't need to continue supporting the unstable field names. --- synapse/handlers/e2e_keys.py | 4 +--- synapse/rest/client/sync.py | 3 --- tests/handlers/test_e2e_keys.py | 23 ----------------------- 3 files changed, 1 insertion(+), 29 deletions(-) diff --git a/synapse/handlers/e2e_keys.py b/synapse/handlers/e2e_keys.py index fa3d207a90..d87e1bdad4 100644 --- a/synapse/handlers/e2e_keys.py +++ b/synapse/handlers/e2e_keys.py @@ -873,9 +873,7 @@ class E2eKeysHandler: log_kv( {"message": "Did not update one_time_keys", "reason": "no keys given"} ) - fallback_keys = keys.get("fallback_keys") or keys.get( - "org.matrix.msc2732.fallback_keys" - ) + fallback_keys = keys.get("fallback_keys") if fallback_keys and isinstance(fallback_keys, dict): log_kv( { diff --git a/synapse/rest/client/sync.py b/synapse/rest/client/sync.py index bb63b51599..2ddb319809 100644 --- a/synapse/rest/client/sync.py +++ b/synapse/rest/client/sync.py @@ -363,9 +363,6 @@ class SyncRestServlet(RestServlet): # https://github.com/matrix-org/matrix-doc/blob/54255851f642f84a4f1aaf7bc063eebe3d76752b/proposals/2732-olm-fallback-keys.md # states that this field should always be included, as long as the server supports the feature. - response["org.matrix.msc2732.device_unused_fallback_key_types"] = ( - sync_result.device_unused_fallback_key_types - ) response["device_unused_fallback_key_types"] = ( sync_result.device_unused_fallback_key_types ) diff --git a/tests/handlers/test_e2e_keys.py b/tests/handlers/test_e2e_keys.py index 4f0b1574b3..fca1f2cc44 100644 --- a/tests/handlers/test_e2e_keys.py +++ b/tests/handlers/test_e2e_keys.py @@ -410,7 +410,6 @@ class E2eKeysHandlerTestCase(unittest.HomeserverTestCase): device_id = "xyz" fallback_key = {"alg1:k1": "fallback_key1"} fallback_key2 = {"alg1:k2": "fallback_key2"} - fallback_key3 = {"alg1:k2": "fallback_key3"} otk = {"alg1:k2": "key2"} # we shouldn't have any unused fallback keys yet @@ -531,28 +530,6 @@ class E2eKeysHandlerTestCase(unittest.HomeserverTestCase): {"failures": {}, "one_time_keys": {local_user: {device_id: fallback_key2}}}, ) - # using the unstable prefix should also set the fallback key - self.get_success( - self.handler.upload_keys_for_user( - local_user, - device_id, - {"org.matrix.msc2732.fallback_keys": fallback_key3}, - ) - ) - - claim_res = self.get_success( - self.handler.claim_one_time_keys( - {local_user: {device_id: {"alg1": 1}}}, - self.requester, - timeout=None, - always_include_fallback_keys=False, - ) - ) - self.assertEqual( - claim_res, - {"failures": {}, "one_time_keys": {local_user: {device_id: fallback_key3}}}, - ) - def test_fallback_key_bulk(self) -> None: """Like test_fallback_key, but claims multiple keys in one handler call.""" alice = f"@alice:{self.hs.hostname}" From 4a37c4d87aaa92ad62853ca8b79b84e8c934b4a2 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Tue, 7 Oct 2025 11:15:35 +0100 Subject: [PATCH 017/149] Remove unstable prefixes for MSC2732: Olm fallback keys (#18996) Co-authored-by: Eric Eastwood --- changelog.d/18996.removal | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/18996.removal diff --git a/changelog.d/18996.removal b/changelog.d/18996.removal new file mode 100644 index 0000000000..fa06fcc929 --- /dev/null +++ b/changelog.d/18996.removal @@ -0,0 +1 @@ +Drop support for unstable field names from the long-accepted [MSC2732](https://github.com/matrix-org/matrix-spec-proposals/pull/2732) (Olm fallback keys) proposal. \ No newline at end of file From 26aaaf9e48fff80cf67a20c691c75d670034b3c1 Mon Sep 17 00:00:00 2001 From: Till <2353100+S7evinK@users.noreply.github.com> Date: Tue, 7 Oct 2025 12:27:53 +0200 Subject: [PATCH 018/149] Validate the body of requests to `/keys/upload` (#17097) Co-authored-by: Andrew Morgan Co-authored-by: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Co-authored-by: Eric Eastwood --- changelog.d/17097.misc | 1 + synapse/handlers/e2e_keys.py | 16 ++-- synapse/rest/client/keys.py | 150 ++++++++++++++++++++++++++++++++- tests/rest/client/test_keys.py | 121 ++++++++++++++++++++++++++ 4 files changed, 280 insertions(+), 8 deletions(-) create mode 100644 changelog.d/17097.misc diff --git a/changelog.d/17097.misc b/changelog.d/17097.misc new file mode 100644 index 0000000000..42792e5f38 --- /dev/null +++ b/changelog.d/17097.misc @@ -0,0 +1 @@ +Extend validation of uploaded device keys. \ No newline at end of file diff --git a/synapse/handlers/e2e_keys.py b/synapse/handlers/e2e_keys.py index d87e1bdad4..051828158a 100644 --- a/synapse/handlers/e2e_keys.py +++ b/synapse/handlers/e2e_keys.py @@ -57,7 +57,6 @@ if TYPE_CHECKING: logger = logging.getLogger(__name__) - ONE_TIME_KEY_UPLOAD = "one_time_key_upload_lock" @@ -848,14 +847,22 @@ class E2eKeysHandler: """ time_now = self.clock.time_msec() - # TODO: Validate the JSON to make sure it has the right keys. device_keys = keys.get("device_keys", None) if device_keys: + log_kv( + { + "message": "Updating device_keys for user.", + "user_id": user_id, + "device_id": device_id, + } + ) await self.upload_device_keys_for_user( user_id=user_id, device_id=device_id, keys={"device_keys": device_keys}, ) + else: + log_kv({"message": "Did not update device_keys", "reason": "not a dict"}) one_time_keys = keys.get("one_time_keys", None) if one_time_keys: @@ -873,8 +880,9 @@ class E2eKeysHandler: log_kv( {"message": "Did not update one_time_keys", "reason": "no keys given"} ) + fallback_keys = keys.get("fallback_keys") - if fallback_keys and isinstance(fallback_keys, dict): + if fallback_keys: log_kv( { "message": "Updating fallback_keys for device.", @@ -883,8 +891,6 @@ class E2eKeysHandler: } ) await self.store.set_e2e_fallback_keys(user_id, device_id, fallback_keys) - elif fallback_keys: - log_kv({"message": "Did not update fallback_keys", "reason": "not a dict"}) else: log_kv( {"message": "Did not update fallback_keys", "reason": "no keys given"} diff --git a/synapse/rest/client/keys.py b/synapse/rest/client/keys.py index 6cf480952e..9d22e22b72 100644 --- a/synapse/rest/client/keys.py +++ b/synapse/rest/client/keys.py @@ -23,10 +23,19 @@ import logging import re from collections import Counter -from typing import TYPE_CHECKING, Any, Dict, Optional, Tuple +from http import HTTPStatus +from typing import TYPE_CHECKING, Any, Dict, List, Mapping, Optional, Tuple, Union +from typing_extensions import Self + +from synapse._pydantic_compat import ( + StrictBool, + StrictStr, + validator, +) from synapse.api.auth.mas import MasDelegatedAuth from synapse.api.errors import ( + Codes, InteractiveAuthIncompleteError, InvalidAPICallError, SynapseError, @@ -37,11 +46,13 @@ from synapse.http.servlet import ( parse_integer, parse_json_object_from_request, parse_string, + validate_json_object, ) from synapse.http.site import SynapseRequest from synapse.logging.opentracing import log_kv, set_tag from synapse.rest.client._base import client_patterns, interactive_auth_handler from synapse.types import JsonDict, StreamToken +from synapse.types.rest import RequestBodyModel from synapse.util.cancellation import cancellable if TYPE_CHECKING: @@ -59,7 +70,6 @@ class KeyUploadServlet(RestServlet): "device_keys": { "user_id": "", "device_id": "", - "valid_until_ts": , "algorithms": [ "m.olm.curve25519-aes-sha2", ] @@ -111,12 +121,123 @@ class KeyUploadServlet(RestServlet): self._clock = hs.get_clock() self._store = hs.get_datastores().main + class KeyUploadRequestBody(RequestBodyModel): + """ + The body of a `POST /_matrix/client/v3/keys/upload` request. + + Based on https://spec.matrix.org/v1.16/client-server-api/#post_matrixclientv3keysupload. + """ + + class DeviceKeys(RequestBodyModel): + algorithms: List[StrictStr] + """The encryption algorithms supported by this device.""" + + device_id: StrictStr + """The ID of the device these keys belong to. Must match the device ID used when logging in.""" + + keys: Mapping[StrictStr, StrictStr] + """ + Public identity keys. The names of the properties should be in the + format `:`. The keys themselves should be encoded as + specified by the key algorithm. + """ + + signatures: Mapping[StrictStr, Mapping[StrictStr, StrictStr]] + """Signatures for the device key object. A map from user ID, to a map from ":" to the signature.""" + + user_id: StrictStr + """The ID of the user the device belongs to. Must match the user ID used when logging in.""" + + class KeyObject(RequestBodyModel): + key: StrictStr + """The key, encoded using unpadded base64.""" + + fallback: Optional[StrictBool] = False + """Whether this is a fallback key. Only used when handling fallback keys.""" + + signatures: Mapping[StrictStr, Mapping[StrictStr, StrictStr]] + """Signature for the device. Mapped from user ID to another map of key signing identifier to the signature itself. + + See the following for more detail: https://spec.matrix.org/v1.16/appendices/#signing-details + """ + + device_keys: Optional[DeviceKeys] = None + """Identity keys for the device. May be absent if no new identity keys are required.""" + + fallback_keys: Optional[Mapping[StrictStr, Union[StrictStr, KeyObject]]] + """ + The public key which should be used if the device's one-time keys are + exhausted. The fallback key is not deleted once used, but should be + replaced when additional one-time keys are being uploaded. The server + will notify the client of the fallback key being used through `/sync`. + + There can only be at most one key per algorithm uploaded, and the server + will only persist one key per algorithm. + + When uploading a signed key, an additional fallback: true key should be + included to denote that the key is a fallback key. + + May be absent if a new fallback key is not required. + """ + + @validator("fallback_keys", pre=True) + def validate_fallback_keys(cls: Self, v: Any) -> Any: + if v is None: + return v + if not isinstance(v, dict): + raise TypeError("fallback_keys must be a mapping") + + for k in v.keys(): + if not len(k.split(":")) == 2: + raise SynapseError( + code=HTTPStatus.BAD_REQUEST, + errcode=Codes.BAD_JSON, + msg=f"Invalid fallback_keys key {k!r}. " + 'Expected ":".', + ) + return v + + one_time_keys: Optional[Mapping[StrictStr, Union[StrictStr, KeyObject]]] = None + """ + One-time public keys for "pre-key" messages. The names of the properties + should be in the format `:`. + + The format of the key is determined by the key algorithm, see: + https://spec.matrix.org/v1.16/client-server-api/#key-algorithms. + """ + + @validator("one_time_keys", pre=True) + def validate_one_time_keys(cls: Self, v: Any) -> Any: + if v is None: + return v + if not isinstance(v, dict): + raise TypeError("one_time_keys must be a mapping") + + for k, _ in v.items(): + if not len(k.split(":")) == 2: + raise SynapseError( + code=HTTPStatus.BAD_REQUEST, + errcode=Codes.BAD_JSON, + msg=f"Invalid one_time_keys key {k!r}. " + 'Expected ":".', + ) + return v + async def on_POST( self, request: SynapseRequest, device_id: Optional[str] ) -> Tuple[int, JsonDict]: requester = await self.auth.get_user_by_req(request, allow_guest=True) user_id = requester.user.to_string() + + # Parse the request body. Validate separately, as the handler expects a + # plain dict, rather than any parsed object. + # + # Note: It would be nice to work with a parsed object, but the handler + # needs to encode portions of the request body as canonical JSON before + # storing the result in the DB. There's little point in converted to a + # parsed object and then back to a dict. body = parse_json_object_from_request(request) + validate_json_object(body, self.KeyUploadRequestBody) if device_id is not None: # Providing the device_id should only be done for setting keys @@ -149,8 +270,31 @@ class KeyUploadServlet(RestServlet): 400, "To upload keys, you must pass device_id when authenticating" ) + if "device_keys" in body: + # Validate the provided `user_id` and `device_id` fields in + # `device_keys` match that of the requesting user. We can't do + # this directly in the pydantic model as we don't have access + # to the requester yet. + # + # TODO: We could use ValidationInfo when we switch to Pydantic v2. + # https://docs.pydantic.dev/latest/concepts/validators/#validation-info + if body["device_keys"]["user_id"] != user_id: + raise SynapseError( + code=HTTPStatus.BAD_REQUEST, + errcode=Codes.BAD_JSON, + msg="Provided `user_id` in `device_keys` does not match that of the authenticated user", + ) + if body["device_keys"]["device_id"] != device_id: + raise SynapseError( + code=HTTPStatus.BAD_REQUEST, + errcode=Codes.BAD_JSON, + msg="Provided `device_id` in `device_keys` does not match that of the authenticated user device", + ) + result = await self.e2e_keys_handler.upload_keys_for_user( - user_id=user_id, device_id=device_id, keys=body + user_id=user_id, + device_id=device_id, + keys=body, ) return 200, result diff --git a/tests/rest/client/test_keys.py b/tests/rest/client/test_keys.py index d9a210b616..ef3aef5dc8 100644 --- a/tests/rest/client/test_keys.py +++ b/tests/rest/client/test_keys.py @@ -40,6 +40,127 @@ from tests.unittest import override_config from tests.utils import HAS_AUTHLIB +class KeyUploadTestCase(unittest.HomeserverTestCase): + servlets = [ + keys.register_servlets, + admin.register_servlets_for_client_rest_resource, + login.register_servlets, + ] + + def test_upload_keys_fails_on_invalid_structure(self) -> None: + """Check that we validate the structure of keys upon upload. + + Regression test for https://github.com/element-hq/synapse/pull/17097 + """ + self.register_user("alice", "wonderland") + alice_token = self.login("alice", "wonderland") + + channel = self.make_request( + "POST", + "/_matrix/client/v3/keys/upload", + { + # Error: device_keys must be a dict + "device_keys": ["some", "stuff", "weewoo"] + }, + alice_token, + ) + self.assertEqual(channel.code, HTTPStatus.BAD_REQUEST, channel.result) + self.assertEqual( + channel.json_body["errcode"], + Codes.BAD_JSON, + channel.result, + ) + + channel = self.make_request( + "POST", + "/_matrix/client/v3/keys/upload", + { + # Error: properties of fallback_keys must be in the form `:` + "fallback_keys": {"invalid_key": "signature_base64"} + }, + alice_token, + ) + self.assertEqual(channel.code, HTTPStatus.BAD_REQUEST, channel.result) + self.assertEqual( + channel.json_body["errcode"], + Codes.BAD_JSON, + channel.result, + ) + + channel = self.make_request( + "POST", + "/_matrix/client/v3/keys/upload", + { + # Same as above, but for one_time_keys + "one_time_keys": {"invalid_key": "signature_base64"} + }, + alice_token, + ) + self.assertEqual(channel.code, HTTPStatus.BAD_REQUEST, channel.result) + self.assertEqual( + channel.json_body["errcode"], + Codes.BAD_JSON, + channel.result, + ) + + def test_upload_keys_fails_on_invalid_user_id_or_device_id(self) -> None: + """ + Validate that the requesting user is uploading their own keys and nobody + else's. + """ + device_id = "DEVICE_ID" + alice_user_id = self.register_user("alice", "wonderland") + alice_token = self.login("alice", "wonderland", device_id=device_id) + + channel = self.make_request( + "POST", + "/_matrix/client/v3/keys/upload", + { + "device_keys": { + # Included `user_id` does not match requesting user. + "user_id": "@unknown_user:test", + "device_id": device_id, + "algorithms": ["m.olm.curve25519-aes-sha2"], + "keys": { + f"ed25519:{device_id}": "publickey", + }, + "signatures": {}, + } + }, + alice_token, + ) + self.assertEqual(channel.code, HTTPStatus.BAD_REQUEST, channel.result) + self.assertEqual( + channel.json_body["errcode"], + Codes.BAD_JSON, + channel.result, + ) + + channel = self.make_request( + "POST", + "/_matrix/client/v3/keys/upload", + { + "device_keys": { + "user_id": alice_user_id, + # Included `device_id` does not match requesting user's. + "device_id": "UNKNOWN_DEVICE_ID", + "algorithms": ["m.olm.curve25519-aes-sha2"], + "keys": { + f"ed25519:{device_id}": "publickey", + }, + "signatures": {}, + } + }, + alice_token, + ) + self.assertEqual(channel.code, HTTPStatus.BAD_REQUEST, channel.result) + self.assertEqual( + channel.json_body["errcode"], + Codes.BAD_JSON, + channel.result, + ) + + class KeyQueryTestCase(unittest.HomeserverTestCase): servlets = [ keys.register_servlets, From 533d5e0a7abe3d13ed2490194b0b0d52ec8aedaf Mon Sep 17 00:00:00 2001 From: Andrew Morgan Date: Tue, 30 Sep 2025 17:10:32 +0100 Subject: [PATCH 019/149] Remove unstable prefixes for MSC2732 This MSC was accepted in 2022. We shouldn't need to continue supporting the unstable field names. --- synapse/handlers/e2e_keys.py | 4 +--- synapse/rest/client/sync.py | 3 --- tests/handlers/test_e2e_keys.py | 23 ----------------------- 3 files changed, 1 insertion(+), 29 deletions(-) diff --git a/synapse/handlers/e2e_keys.py b/synapse/handlers/e2e_keys.py index b9abad2188..a257f72a54 100644 --- a/synapse/handlers/e2e_keys.py +++ b/synapse/handlers/e2e_keys.py @@ -873,9 +873,7 @@ class E2eKeysHandler: log_kv( {"message": "Did not update one_time_keys", "reason": "no keys given"} ) - fallback_keys = keys.get("fallback_keys") or keys.get( - "org.matrix.msc2732.fallback_keys" - ) + fallback_keys = keys.get("fallback_keys") if fallback_keys and isinstance(fallback_keys, dict): log_kv( { diff --git a/synapse/rest/client/sync.py b/synapse/rest/client/sync.py index 5e0596d1bc..c7117e6608 100644 --- a/synapse/rest/client/sync.py +++ b/synapse/rest/client/sync.py @@ -363,9 +363,6 @@ class SyncRestServlet(RestServlet): # https://github.com/matrix-org/matrix-doc/blob/54255851f642f84a4f1aaf7bc063eebe3d76752b/proposals/2732-olm-fallback-keys.md # states that this field should always be included, as long as the server supports the feature. - response["org.matrix.msc2732.device_unused_fallback_key_types"] = ( - sync_result.device_unused_fallback_key_types - ) response["device_unused_fallback_key_types"] = ( sync_result.device_unused_fallback_key_types ) diff --git a/tests/handlers/test_e2e_keys.py b/tests/handlers/test_e2e_keys.py index fda485d413..5fcbcc41e5 100644 --- a/tests/handlers/test_e2e_keys.py +++ b/tests/handlers/test_e2e_keys.py @@ -410,7 +410,6 @@ class E2eKeysHandlerTestCase(unittest.HomeserverTestCase): device_id = "xyz" fallback_key = {"alg1:k1": "fallback_key1"} fallback_key2 = {"alg1:k2": "fallback_key2"} - fallback_key3 = {"alg1:k2": "fallback_key3"} otk = {"alg1:k2": "key2"} # we shouldn't have any unused fallback keys yet @@ -531,28 +530,6 @@ class E2eKeysHandlerTestCase(unittest.HomeserverTestCase): {"failures": {}, "one_time_keys": {local_user: {device_id: fallback_key2}}}, ) - # using the unstable prefix should also set the fallback key - self.get_success( - self.handler.upload_keys_for_user( - local_user, - device_id, - {"org.matrix.msc2732.fallback_keys": fallback_key3}, - ) - ) - - claim_res = self.get_success( - self.handler.claim_one_time_keys( - {local_user: {device_id: {"alg1": 1}}}, - self.requester, - timeout=None, - always_include_fallback_keys=False, - ) - ) - self.assertEqual( - claim_res, - {"failures": {}, "one_time_keys": {local_user: {device_id: fallback_key3}}}, - ) - def test_fallback_key_bulk(self) -> None: """Like test_fallback_key, but claims multiple keys in one handler call.""" alice = f"@alice:{self.hs.hostname}" From dde1e012a43d83ff9806ae517d89f3dac2815cbb Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Tue, 7 Oct 2025 11:15:35 +0100 Subject: [PATCH 020/149] Remove unstable prefixes for MSC2732: Olm fallback keys (#18996) Co-authored-by: Eric Eastwood --- changelog.d/18996.removal | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/18996.removal diff --git a/changelog.d/18996.removal b/changelog.d/18996.removal new file mode 100644 index 0000000000..fa06fcc929 --- /dev/null +++ b/changelog.d/18996.removal @@ -0,0 +1 @@ +Drop support for unstable field names from the long-accepted [MSC2732](https://github.com/matrix-org/matrix-spec-proposals/pull/2732) (Olm fallback keys) proposal. \ No newline at end of file From 7069636c2d6d1ef2022287addf3ed8b919ef2740 Mon Sep 17 00:00:00 2001 From: Till <2353100+S7evinK@users.noreply.github.com> Date: Tue, 7 Oct 2025 12:27:53 +0200 Subject: [PATCH 021/149] Validate the body of requests to `/keys/upload` (#17097) Co-authored-by: Andrew Morgan Co-authored-by: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Co-authored-by: Eric Eastwood --- changelog.d/17097.misc | 1 + synapse/handlers/e2e_keys.py | 16 ++-- synapse/rest/client/keys.py | 150 ++++++++++++++++++++++++++++++++- tests/rest/client/test_keys.py | 121 ++++++++++++++++++++++++++ 4 files changed, 280 insertions(+), 8 deletions(-) create mode 100644 changelog.d/17097.misc diff --git a/changelog.d/17097.misc b/changelog.d/17097.misc new file mode 100644 index 0000000000..42792e5f38 --- /dev/null +++ b/changelog.d/17097.misc @@ -0,0 +1 @@ +Extend validation of uploaded device keys. \ No newline at end of file diff --git a/synapse/handlers/e2e_keys.py b/synapse/handlers/e2e_keys.py index a257f72a54..48a60dbd7b 100644 --- a/synapse/handlers/e2e_keys.py +++ b/synapse/handlers/e2e_keys.py @@ -57,7 +57,6 @@ if TYPE_CHECKING: logger = logging.getLogger(__name__) - ONE_TIME_KEY_UPLOAD = "one_time_key_upload_lock" @@ -848,14 +847,22 @@ class E2eKeysHandler: """ time_now = self.clock.time_msec() - # TODO: Validate the JSON to make sure it has the right keys. device_keys = keys.get("device_keys", None) if device_keys: + log_kv( + { + "message": "Updating device_keys for user.", + "user_id": user_id, + "device_id": device_id, + } + ) await self.upload_device_keys_for_user( user_id=user_id, device_id=device_id, keys={"device_keys": device_keys}, ) + else: + log_kv({"message": "Did not update device_keys", "reason": "not a dict"}) one_time_keys = keys.get("one_time_keys", None) if one_time_keys: @@ -873,8 +880,9 @@ class E2eKeysHandler: log_kv( {"message": "Did not update one_time_keys", "reason": "no keys given"} ) + fallback_keys = keys.get("fallback_keys") - if fallback_keys and isinstance(fallback_keys, dict): + if fallback_keys: log_kv( { "message": "Updating fallback_keys for device.", @@ -883,8 +891,6 @@ class E2eKeysHandler: } ) await self.store.set_e2e_fallback_keys(user_id, device_id, fallback_keys) - elif fallback_keys: - log_kv({"message": "Did not update fallback_keys", "reason": "not a dict"}) else: log_kv( {"message": "Did not update fallback_keys", "reason": "no keys given"} diff --git a/synapse/rest/client/keys.py b/synapse/rest/client/keys.py index 9f39889c75..017941bfc4 100644 --- a/synapse/rest/client/keys.py +++ b/synapse/rest/client/keys.py @@ -23,10 +23,19 @@ import logging import re from collections import Counter -from typing import TYPE_CHECKING, Any, Dict, Optional, Tuple +from http import HTTPStatus +from typing import TYPE_CHECKING, Any, Dict, List, Mapping, Optional, Tuple, Union +from typing_extensions import Self + +from synapse._pydantic_compat import ( + StrictBool, + StrictStr, + validator, +) from synapse.api.auth.mas import MasDelegatedAuth from synapse.api.errors import ( + Codes, InteractiveAuthIncompleteError, InvalidAPICallError, SynapseError, @@ -37,11 +46,13 @@ from synapse.http.servlet import ( parse_integer, parse_json_object_from_request, parse_string, + validate_json_object, ) from synapse.http.site import SynapseRequest from synapse.logging.opentracing import log_kv, set_tag from synapse.rest.client._base import client_patterns, interactive_auth_handler from synapse.types import JsonDict, StreamToken +from synapse.types.rest import RequestBodyModel from synapse.util.cancellation import cancellable if TYPE_CHECKING: @@ -59,7 +70,6 @@ class KeyUploadServlet(RestServlet): "device_keys": { "user_id": "", "device_id": "", - "valid_until_ts": , "algorithms": [ "m.olm.curve25519-aes-sha2", ] @@ -111,12 +121,123 @@ class KeyUploadServlet(RestServlet): self._clock = hs.get_clock() self._store = hs.get_datastores().main + class KeyUploadRequestBody(RequestBodyModel): + """ + The body of a `POST /_matrix/client/v3/keys/upload` request. + + Based on https://spec.matrix.org/v1.16/client-server-api/#post_matrixclientv3keysupload. + """ + + class DeviceKeys(RequestBodyModel): + algorithms: List[StrictStr] + """The encryption algorithms supported by this device.""" + + device_id: StrictStr + """The ID of the device these keys belong to. Must match the device ID used when logging in.""" + + keys: Mapping[StrictStr, StrictStr] + """ + Public identity keys. The names of the properties should be in the + format `:`. The keys themselves should be encoded as + specified by the key algorithm. + """ + + signatures: Mapping[StrictStr, Mapping[StrictStr, StrictStr]] + """Signatures for the device key object. A map from user ID, to a map from ":" to the signature.""" + + user_id: StrictStr + """The ID of the user the device belongs to. Must match the user ID used when logging in.""" + + class KeyObject(RequestBodyModel): + key: StrictStr + """The key, encoded using unpadded base64.""" + + fallback: Optional[StrictBool] = False + """Whether this is a fallback key. Only used when handling fallback keys.""" + + signatures: Mapping[StrictStr, Mapping[StrictStr, StrictStr]] + """Signature for the device. Mapped from user ID to another map of key signing identifier to the signature itself. + + See the following for more detail: https://spec.matrix.org/v1.16/appendices/#signing-details + """ + + device_keys: Optional[DeviceKeys] = None + """Identity keys for the device. May be absent if no new identity keys are required.""" + + fallback_keys: Optional[Mapping[StrictStr, Union[StrictStr, KeyObject]]] + """ + The public key which should be used if the device's one-time keys are + exhausted. The fallback key is not deleted once used, but should be + replaced when additional one-time keys are being uploaded. The server + will notify the client of the fallback key being used through `/sync`. + + There can only be at most one key per algorithm uploaded, and the server + will only persist one key per algorithm. + + When uploading a signed key, an additional fallback: true key should be + included to denote that the key is a fallback key. + + May be absent if a new fallback key is not required. + """ + + @validator("fallback_keys", pre=True) + def validate_fallback_keys(cls: Self, v: Any) -> Any: + if v is None: + return v + if not isinstance(v, dict): + raise TypeError("fallback_keys must be a mapping") + + for k in v.keys(): + if not len(k.split(":")) == 2: + raise SynapseError( + code=HTTPStatus.BAD_REQUEST, + errcode=Codes.BAD_JSON, + msg=f"Invalid fallback_keys key {k!r}. " + 'Expected ":".', + ) + return v + + one_time_keys: Optional[Mapping[StrictStr, Union[StrictStr, KeyObject]]] = None + """ + One-time public keys for "pre-key" messages. The names of the properties + should be in the format `:`. + + The format of the key is determined by the key algorithm, see: + https://spec.matrix.org/v1.16/client-server-api/#key-algorithms. + """ + + @validator("one_time_keys", pre=True) + def validate_one_time_keys(cls: Self, v: Any) -> Any: + if v is None: + return v + if not isinstance(v, dict): + raise TypeError("one_time_keys must be a mapping") + + for k, _ in v.items(): + if not len(k.split(":")) == 2: + raise SynapseError( + code=HTTPStatus.BAD_REQUEST, + errcode=Codes.BAD_JSON, + msg=f"Invalid one_time_keys key {k!r}. " + 'Expected ":".', + ) + return v + async def on_POST( self, request: SynapseRequest, device_id: Optional[str] ) -> Tuple[int, JsonDict]: requester = await self.auth.get_user_by_req(request, allow_guest=True) user_id = requester.user.to_string() + + # Parse the request body. Validate separately, as the handler expects a + # plain dict, rather than any parsed object. + # + # Note: It would be nice to work with a parsed object, but the handler + # needs to encode portions of the request body as canonical JSON before + # storing the result in the DB. There's little point in converted to a + # parsed object and then back to a dict. body = parse_json_object_from_request(request) + validate_json_object(body, self.KeyUploadRequestBody) if device_id is not None: # Providing the device_id should only be done for setting keys @@ -149,8 +270,31 @@ class KeyUploadServlet(RestServlet): 400, "To upload keys, you must pass device_id when authenticating" ) + if "device_keys" in body: + # Validate the provided `user_id` and `device_id` fields in + # `device_keys` match that of the requesting user. We can't do + # this directly in the pydantic model as we don't have access + # to the requester yet. + # + # TODO: We could use ValidationInfo when we switch to Pydantic v2. + # https://docs.pydantic.dev/latest/concepts/validators/#validation-info + if body["device_keys"]["user_id"] != user_id: + raise SynapseError( + code=HTTPStatus.BAD_REQUEST, + errcode=Codes.BAD_JSON, + msg="Provided `user_id` in `device_keys` does not match that of the authenticated user", + ) + if body["device_keys"]["device_id"] != device_id: + raise SynapseError( + code=HTTPStatus.BAD_REQUEST, + errcode=Codes.BAD_JSON, + msg="Provided `device_id` in `device_keys` does not match that of the authenticated user device", + ) + result = await self.e2e_keys_handler.upload_keys_for_user( - user_id=user_id, device_id=device_id, keys=body + user_id=user_id, + device_id=device_id, + keys=body, ) return 200, result diff --git a/tests/rest/client/test_keys.py b/tests/rest/client/test_keys.py index d9a210b616..ef3aef5dc8 100644 --- a/tests/rest/client/test_keys.py +++ b/tests/rest/client/test_keys.py @@ -40,6 +40,127 @@ from tests.unittest import override_config from tests.utils import HAS_AUTHLIB +class KeyUploadTestCase(unittest.HomeserverTestCase): + servlets = [ + keys.register_servlets, + admin.register_servlets_for_client_rest_resource, + login.register_servlets, + ] + + def test_upload_keys_fails_on_invalid_structure(self) -> None: + """Check that we validate the structure of keys upon upload. + + Regression test for https://github.com/element-hq/synapse/pull/17097 + """ + self.register_user("alice", "wonderland") + alice_token = self.login("alice", "wonderland") + + channel = self.make_request( + "POST", + "/_matrix/client/v3/keys/upload", + { + # Error: device_keys must be a dict + "device_keys": ["some", "stuff", "weewoo"] + }, + alice_token, + ) + self.assertEqual(channel.code, HTTPStatus.BAD_REQUEST, channel.result) + self.assertEqual( + channel.json_body["errcode"], + Codes.BAD_JSON, + channel.result, + ) + + channel = self.make_request( + "POST", + "/_matrix/client/v3/keys/upload", + { + # Error: properties of fallback_keys must be in the form `:` + "fallback_keys": {"invalid_key": "signature_base64"} + }, + alice_token, + ) + self.assertEqual(channel.code, HTTPStatus.BAD_REQUEST, channel.result) + self.assertEqual( + channel.json_body["errcode"], + Codes.BAD_JSON, + channel.result, + ) + + channel = self.make_request( + "POST", + "/_matrix/client/v3/keys/upload", + { + # Same as above, but for one_time_keys + "one_time_keys": {"invalid_key": "signature_base64"} + }, + alice_token, + ) + self.assertEqual(channel.code, HTTPStatus.BAD_REQUEST, channel.result) + self.assertEqual( + channel.json_body["errcode"], + Codes.BAD_JSON, + channel.result, + ) + + def test_upload_keys_fails_on_invalid_user_id_or_device_id(self) -> None: + """ + Validate that the requesting user is uploading their own keys and nobody + else's. + """ + device_id = "DEVICE_ID" + alice_user_id = self.register_user("alice", "wonderland") + alice_token = self.login("alice", "wonderland", device_id=device_id) + + channel = self.make_request( + "POST", + "/_matrix/client/v3/keys/upload", + { + "device_keys": { + # Included `user_id` does not match requesting user. + "user_id": "@unknown_user:test", + "device_id": device_id, + "algorithms": ["m.olm.curve25519-aes-sha2"], + "keys": { + f"ed25519:{device_id}": "publickey", + }, + "signatures": {}, + } + }, + alice_token, + ) + self.assertEqual(channel.code, HTTPStatus.BAD_REQUEST, channel.result) + self.assertEqual( + channel.json_body["errcode"], + Codes.BAD_JSON, + channel.result, + ) + + channel = self.make_request( + "POST", + "/_matrix/client/v3/keys/upload", + { + "device_keys": { + "user_id": alice_user_id, + # Included `device_id` does not match requesting user's. + "device_id": "UNKNOWN_DEVICE_ID", + "algorithms": ["m.olm.curve25519-aes-sha2"], + "keys": { + f"ed25519:{device_id}": "publickey", + }, + "signatures": {}, + } + }, + alice_token, + ) + self.assertEqual(channel.code, HTTPStatus.BAD_REQUEST, channel.result) + self.assertEqual( + channel.json_body["errcode"], + Codes.BAD_JSON, + channel.result, + ) + + class KeyQueryTestCase(unittest.HomeserverTestCase): servlets = [ keys.register_servlets, From 76b012c3f5a1d51294dabcabde31e5dce94dddf8 Mon Sep 17 00:00:00 2001 From: Andrew Morgan Date: Tue, 7 Oct 2025 11:58:08 +0100 Subject: [PATCH 022/149] 1.139.1 --- CHANGES.md | 12 ++++++++++++ changelog.d/17097.misc | 1 - changelog.d/18996.removal | 1 - debian/changelog | 6 ++++++ pyproject.toml | 2 +- 5 files changed, 19 insertions(+), 3 deletions(-) delete mode 100644 changelog.d/17097.misc delete mode 100644 changelog.d/18996.removal diff --git a/CHANGES.md b/CHANGES.md index e8b04c419c..b9c5eb01b0 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,15 @@ +# Synapse 1.139.1 (2025-10-07) + +## Security Fixes + +- Fix [CVE-2025-61672](https://www.cve.org/CVERecord?id=CVE-2025-61672) / [GHSA-fh66-fcv5-jjfr](https://github.com/element-hq/synapse/security/advisories/GHSA-fh66-fcv5-jjfr). Lack of validation for device keys in Synapse before 1.139.1 allows an attacker registered on the victim homeserver to degrade federation functionality, unpredictably breaking outbound federation to other homeservers. ([\#17097](https://github.com/element-hq/synapse/issues/17097)) + +## Deprecations and Removals + +- Drop support for unstable field names from the long-accepted [MSC2732](https://github.com/matrix-org/matrix-spec-proposals/pull/2732) (Olm fallback keys) proposal. This change allows unit tests to pass following the security patch above. ([\#18996](https://github.com/element-hq/synapse/issues/18996)) + + + # Synapse 1.139.0 (2025-09-30) ### `/register` requests from old application service implementations may break when using MAS diff --git a/changelog.d/17097.misc b/changelog.d/17097.misc deleted file mode 100644 index 42792e5f38..0000000000 --- a/changelog.d/17097.misc +++ /dev/null @@ -1 +0,0 @@ -Extend validation of uploaded device keys. \ No newline at end of file diff --git a/changelog.d/18996.removal b/changelog.d/18996.removal deleted file mode 100644 index fa06fcc929..0000000000 --- a/changelog.d/18996.removal +++ /dev/null @@ -1 +0,0 @@ -Drop support for unstable field names from the long-accepted [MSC2732](https://github.com/matrix-org/matrix-spec-proposals/pull/2732) (Olm fallback keys) proposal. \ No newline at end of file diff --git a/debian/changelog b/debian/changelog index f3a2314dca..5d7ed231c0 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.139.1) stable; urgency=medium + + * New Synapse release 1.139.1. + + -- Synapse Packaging team Tue, 07 Oct 2025 11:46:51 +0100 + matrix-synapse-py3 (1.139.0) stable; urgency=medium * New Synapse release 1.139.0. diff --git a/pyproject.toml b/pyproject.toml index 0f886a6b6a..93a8abc5f5 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -101,7 +101,7 @@ module-name = "synapse.synapse_rust" [tool.poetry] name = "matrix-synapse" -version = "1.139.0" +version = "1.139.1" description = "Homeserver for the Matrix decentralised comms protocol" authors = ["Matrix.org Team and Contributors "] license = "AGPL-3.0-or-later" From 527e831b61bcf04106b6338b53048ec3986be7c0 Mon Sep 17 00:00:00 2001 From: Andrew Morgan Date: Tue, 7 Oct 2025 12:54:43 +0100 Subject: [PATCH 023/149] 1.138.3 --- CHANGES.md | 13 +++++++++++++ changelog.d/17097.misc | 1 - changelog.d/18996.removal | 1 - debian/changelog | 6 ++++++ pyproject.toml | 2 +- 5 files changed, 20 insertions(+), 3 deletions(-) delete mode 100644 changelog.d/17097.misc delete mode 100644 changelog.d/18996.removal diff --git a/CHANGES.md b/CHANGES.md index e49091205e..cf2c9b634f 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,16 @@ +# Synapse 1.138.3 (2025-10-07) + +## Security Fixes + +- Fix [CVE-2025-61672](https://www.cve.org/CVERecord?id=CVE-2025-61672) / [GHSA-fh66-fcv5-jjfr](https://github.com/element-hq/synapse/security/advisories/GHSA-fh66-fcv5-jjfr). Lack of validation for device keys in Synapse before 1.139.1 allows an attacker registered on the victim homeserver to degrade federation functionality, unpredictably breaking outbound federation to other homeservers. ([\#17097](https://github.com/element-hq/synapse/issues/17097)) + +## Deprecations and Removals + +- Drop support for unstable field names from the long-accepted [MSC2732](https://github.com/matrix-org/matrix-spec-proposals/pull/2732) (Olm fallback keys) proposal. This change allows unit tests to pass following the security patch above. ([\#18996](https://github.com/element-hq/synapse/issues/18996)) + + + + # Synapse 1.138.2 (2025-09-24) ## Internal Changes diff --git a/changelog.d/17097.misc b/changelog.d/17097.misc deleted file mode 100644 index 42792e5f38..0000000000 --- a/changelog.d/17097.misc +++ /dev/null @@ -1 +0,0 @@ -Extend validation of uploaded device keys. \ No newline at end of file diff --git a/changelog.d/18996.removal b/changelog.d/18996.removal deleted file mode 100644 index fa06fcc929..0000000000 --- a/changelog.d/18996.removal +++ /dev/null @@ -1 +0,0 @@ -Drop support for unstable field names from the long-accepted [MSC2732](https://github.com/matrix-org/matrix-spec-proposals/pull/2732) (Olm fallback keys) proposal. \ No newline at end of file diff --git a/debian/changelog b/debian/changelog index ea15974d63..f7ebd148a7 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.138.3) stable; urgency=medium + + * New Synapse release 1.138.3. + + -- Synapse Packaging team Tue, 07 Oct 2025 12:54:18 +0100 + matrix-synapse-py3 (1.138.2) stable; urgency=medium * New Synapse release 1.138.2. diff --git a/pyproject.toml b/pyproject.toml index c548a652e9..50e8a7ed84 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -101,7 +101,7 @@ module-name = "synapse.synapse_rust" [tool.poetry] name = "matrix-synapse" -version = "1.138.2" +version = "1.138.3" description = "Homeserver for the Matrix decentralised comms protocol" authors = ["Matrix.org Team and Contributors "] license = "AGPL-3.0-or-later" From 2443760d0dfeedddfb6a7ab087c0ad54285e5916 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Tue, 7 Oct 2025 16:23:55 +0100 Subject: [PATCH 024/149] Update `KeyUploadServlet` to handle case where client sends `device_keys: null` (#19023) --- changelog.d/19023.bugfix | 1 + synapse/rest/client/keys.py | 6 +++--- tests/rest/client/test_keys.py | 20 ++++++++++++++++++++ 3 files changed, 24 insertions(+), 3 deletions(-) create mode 100644 changelog.d/19023.bugfix diff --git a/changelog.d/19023.bugfix b/changelog.d/19023.bugfix new file mode 100644 index 0000000000..816336080e --- /dev/null +++ b/changelog.d/19023.bugfix @@ -0,0 +1 @@ +Fix a bug introduced in 1.139.1 where a client could receive an Internal Server Error if they set `device_keys: null` in the request to [`POST /_matrix/client/v3/keys/upload`](https://spec.matrix.org/v1.16/client-server-api/#post_matrixclientv3keysupload). \ No newline at end of file diff --git a/synapse/rest/client/keys.py b/synapse/rest/client/keys.py index 9d22e22b72..55922b97d4 100644 --- a/synapse/rest/client/keys.py +++ b/synapse/rest/client/keys.py @@ -270,7 +270,7 @@ class KeyUploadServlet(RestServlet): 400, "To upload keys, you must pass device_id when authenticating" ) - if "device_keys" in body: + if "device_keys" in body and isinstance(body["device_keys"], dict): # Validate the provided `user_id` and `device_id` fields in # `device_keys` match that of the requesting user. We can't do # this directly in the pydantic model as we don't have access @@ -278,13 +278,13 @@ class KeyUploadServlet(RestServlet): # # TODO: We could use ValidationInfo when we switch to Pydantic v2. # https://docs.pydantic.dev/latest/concepts/validators/#validation-info - if body["device_keys"]["user_id"] != user_id: + if body["device_keys"].get("user_id") != user_id: raise SynapseError( code=HTTPStatus.BAD_REQUEST, errcode=Codes.BAD_JSON, msg="Provided `user_id` in `device_keys` does not match that of the authenticated user", ) - if body["device_keys"]["device_id"] != device_id: + if body["device_keys"].get("device_id") != device_id: raise SynapseError( code=HTTPStatus.BAD_REQUEST, errcode=Codes.BAD_JSON, diff --git a/tests/rest/client/test_keys.py b/tests/rest/client/test_keys.py index ef3aef5dc8..817edfb8d3 100644 --- a/tests/rest/client/test_keys.py +++ b/tests/rest/client/test_keys.py @@ -160,6 +160,26 @@ class KeyUploadTestCase(unittest.HomeserverTestCase): channel.result, ) + def test_upload_keys_succeeds_when_fields_are_explicitly_set_to_null(self) -> None: + """ + This is a regression test for https://github.com/element-hq/synapse/pull/19023. + """ + device_id = "DEVICE_ID" + self.register_user("alice", "wonderland") + alice_token = self.login("alice", "wonderland", device_id=device_id) + + channel = self.make_request( + "POST", + "/_matrix/client/v3/keys/upload", + { + "device_keys": None, + "one_time_keys": None, + "fallback_keys": None, + }, + alice_token, + ) + self.assertEqual(channel.code, HTTPStatus.OK, channel.result) + class KeyQueryTestCase(unittest.HomeserverTestCase): servlets = [ From 0ae1f105b22e51802b01f60ef5a386d8b27d7079 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Tue, 7 Oct 2025 16:23:55 +0100 Subject: [PATCH 025/149] Update `KeyUploadServlet` to handle case where client sends `device_keys: null` (#19023) --- changelog.d/19023.bugfix | 1 + synapse/rest/client/keys.py | 6 +++--- tests/rest/client/test_keys.py | 20 ++++++++++++++++++++ 3 files changed, 24 insertions(+), 3 deletions(-) create mode 100644 changelog.d/19023.bugfix diff --git a/changelog.d/19023.bugfix b/changelog.d/19023.bugfix new file mode 100644 index 0000000000..816336080e --- /dev/null +++ b/changelog.d/19023.bugfix @@ -0,0 +1 @@ +Fix a bug introduced in 1.139.1 where a client could receive an Internal Server Error if they set `device_keys: null` in the request to [`POST /_matrix/client/v3/keys/upload`](https://spec.matrix.org/v1.16/client-server-api/#post_matrixclientv3keysupload). \ No newline at end of file diff --git a/synapse/rest/client/keys.py b/synapse/rest/client/keys.py index 017941bfc4..68be734ccd 100644 --- a/synapse/rest/client/keys.py +++ b/synapse/rest/client/keys.py @@ -270,7 +270,7 @@ class KeyUploadServlet(RestServlet): 400, "To upload keys, you must pass device_id when authenticating" ) - if "device_keys" in body: + if "device_keys" in body and isinstance(body["device_keys"], dict): # Validate the provided `user_id` and `device_id` fields in # `device_keys` match that of the requesting user. We can't do # this directly in the pydantic model as we don't have access @@ -278,13 +278,13 @@ class KeyUploadServlet(RestServlet): # # TODO: We could use ValidationInfo when we switch to Pydantic v2. # https://docs.pydantic.dev/latest/concepts/validators/#validation-info - if body["device_keys"]["user_id"] != user_id: + if body["device_keys"].get("user_id") != user_id: raise SynapseError( code=HTTPStatus.BAD_REQUEST, errcode=Codes.BAD_JSON, msg="Provided `user_id` in `device_keys` does not match that of the authenticated user", ) - if body["device_keys"]["device_id"] != device_id: + if body["device_keys"].get("device_id") != device_id: raise SynapseError( code=HTTPStatus.BAD_REQUEST, errcode=Codes.BAD_JSON, diff --git a/tests/rest/client/test_keys.py b/tests/rest/client/test_keys.py index ef3aef5dc8..817edfb8d3 100644 --- a/tests/rest/client/test_keys.py +++ b/tests/rest/client/test_keys.py @@ -160,6 +160,26 @@ class KeyUploadTestCase(unittest.HomeserverTestCase): channel.result, ) + def test_upload_keys_succeeds_when_fields_are_explicitly_set_to_null(self) -> None: + """ + This is a regression test for https://github.com/element-hq/synapse/pull/19023. + """ + device_id = "DEVICE_ID" + self.register_user("alice", "wonderland") + alice_token = self.login("alice", "wonderland", device_id=device_id) + + channel = self.make_request( + "POST", + "/_matrix/client/v3/keys/upload", + { + "device_keys": None, + "one_time_keys": None, + "fallback_keys": None, + }, + alice_token, + ) + self.assertEqual(channel.code, HTTPStatus.OK, channel.result) + class KeyQueryTestCase(unittest.HomeserverTestCase): servlets = [ From 5e3839e2afc56b27d4abb281146d30432b7efac6 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Tue, 7 Oct 2025 16:23:55 +0100 Subject: [PATCH 026/149] Update `KeyUploadServlet` to handle case where client sends `device_keys: null` (#19023) --- changelog.d/19023.bugfix | 1 + synapse/rest/client/keys.py | 6 +++--- tests/rest/client/test_keys.py | 20 ++++++++++++++++++++ 3 files changed, 24 insertions(+), 3 deletions(-) create mode 100644 changelog.d/19023.bugfix diff --git a/changelog.d/19023.bugfix b/changelog.d/19023.bugfix new file mode 100644 index 0000000000..816336080e --- /dev/null +++ b/changelog.d/19023.bugfix @@ -0,0 +1 @@ +Fix a bug introduced in 1.139.1 where a client could receive an Internal Server Error if they set `device_keys: null` in the request to [`POST /_matrix/client/v3/keys/upload`](https://spec.matrix.org/v1.16/client-server-api/#post_matrixclientv3keysupload). \ No newline at end of file diff --git a/synapse/rest/client/keys.py b/synapse/rest/client/keys.py index 9d22e22b72..55922b97d4 100644 --- a/synapse/rest/client/keys.py +++ b/synapse/rest/client/keys.py @@ -270,7 +270,7 @@ class KeyUploadServlet(RestServlet): 400, "To upload keys, you must pass device_id when authenticating" ) - if "device_keys" in body: + if "device_keys" in body and isinstance(body["device_keys"], dict): # Validate the provided `user_id` and `device_id` fields in # `device_keys` match that of the requesting user. We can't do # this directly in the pydantic model as we don't have access @@ -278,13 +278,13 @@ class KeyUploadServlet(RestServlet): # # TODO: We could use ValidationInfo when we switch to Pydantic v2. # https://docs.pydantic.dev/latest/concepts/validators/#validation-info - if body["device_keys"]["user_id"] != user_id: + if body["device_keys"].get("user_id") != user_id: raise SynapseError( code=HTTPStatus.BAD_REQUEST, errcode=Codes.BAD_JSON, msg="Provided `user_id` in `device_keys` does not match that of the authenticated user", ) - if body["device_keys"]["device_id"] != device_id: + if body["device_keys"].get("device_id") != device_id: raise SynapseError( code=HTTPStatus.BAD_REQUEST, errcode=Codes.BAD_JSON, diff --git a/tests/rest/client/test_keys.py b/tests/rest/client/test_keys.py index ef3aef5dc8..817edfb8d3 100644 --- a/tests/rest/client/test_keys.py +++ b/tests/rest/client/test_keys.py @@ -160,6 +160,26 @@ class KeyUploadTestCase(unittest.HomeserverTestCase): channel.result, ) + def test_upload_keys_succeeds_when_fields_are_explicitly_set_to_null(self) -> None: + """ + This is a regression test for https://github.com/element-hq/synapse/pull/19023. + """ + device_id = "DEVICE_ID" + self.register_user("alice", "wonderland") + alice_token = self.login("alice", "wonderland", device_id=device_id) + + channel = self.make_request( + "POST", + "/_matrix/client/v3/keys/upload", + { + "device_keys": None, + "one_time_keys": None, + "fallback_keys": None, + }, + alice_token, + ) + self.assertEqual(channel.code, HTTPStatus.OK, channel.result) + class KeyQueryTestCase(unittest.HomeserverTestCase): servlets = [ From abe974cd2b17467c10700cb168748fdeee7b4f9b Mon Sep 17 00:00:00 2001 From: Andrew Morgan Date: Tue, 7 Oct 2025 16:28:59 +0100 Subject: [PATCH 027/149] 1.138.4 --- CHANGES.md | 9 +++++++++ changelog.d/19023.bugfix | 1 - debian/changelog | 6 ++++++ pyproject.toml | 2 +- 4 files changed, 16 insertions(+), 2 deletions(-) delete mode 100644 changelog.d/19023.bugfix diff --git a/CHANGES.md b/CHANGES.md index cf2c9b634f..9fd0631bd6 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,12 @@ +# Synapse 1.138.4 (2025-10-07) + +## Bugfixes + +- Fix a bug introduced in 1.138.3 where a client could receive an Internal Server Error if they set `device_keys: null` in the request to [`POST /_matrix/client/v3/keys/upload`](https://spec.matrix.org/v1.16/client-server-api/#post_matrixclientv3keysupload). ([\#19023](https://github.com/element-hq/synapse/issues/19023)) + + + + # Synapse 1.138.3 (2025-10-07) ## Security Fixes diff --git a/changelog.d/19023.bugfix b/changelog.d/19023.bugfix deleted file mode 100644 index 816336080e..0000000000 --- a/changelog.d/19023.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug introduced in 1.139.1 where a client could receive an Internal Server Error if they set `device_keys: null` in the request to [`POST /_matrix/client/v3/keys/upload`](https://spec.matrix.org/v1.16/client-server-api/#post_matrixclientv3keysupload). \ No newline at end of file diff --git a/debian/changelog b/debian/changelog index f7ebd148a7..d054a94d00 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.138.4) stable; urgency=medium + + * New Synapse release 1.138.4. + + -- Synapse Packaging team Tue, 07 Oct 2025 16:28:38 +0100 + matrix-synapse-py3 (1.138.3) stable; urgency=medium * New Synapse release 1.138.3. diff --git a/pyproject.toml b/pyproject.toml index 50e8a7ed84..f51d21e66b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -101,7 +101,7 @@ module-name = "synapse.synapse_rust" [tool.poetry] name = "matrix-synapse" -version = "1.138.3" +version = "1.138.4" description = "Homeserver for the Matrix decentralised comms protocol" authors = ["Matrix.org Team and Contributors "] license = "AGPL-3.0-or-later" From 036fb875842fcc4105cdba2d23fd425df1eec3b6 Mon Sep 17 00:00:00 2001 From: Andrew Morgan Date: Tue, 7 Oct 2025 16:30:03 +0100 Subject: [PATCH 028/149] 1.139.2 --- CHANGES.md | 9 +++++++++ changelog.d/19023.bugfix | 1 - debian/changelog | 6 ++++++ pyproject.toml | 2 +- 4 files changed, 16 insertions(+), 2 deletions(-) delete mode 100644 changelog.d/19023.bugfix diff --git a/CHANGES.md b/CHANGES.md index b9c5eb01b0..7a507aa276 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,12 @@ +# Synapse 1.139.2 (2025-10-07) + +## Bugfixes + +- Fix a bug introduced in 1.139.1 where a client could receive an Internal Server Error if they set `device_keys: null` in the request to [`POST /_matrix/client/v3/keys/upload`](https://spec.matrix.org/v1.16/client-server-api/#post_matrixclientv3keysupload). ([\#19023](https://github.com/element-hq/synapse/issues/19023)) + + + + # Synapse 1.139.1 (2025-10-07) ## Security Fixes diff --git a/changelog.d/19023.bugfix b/changelog.d/19023.bugfix deleted file mode 100644 index 816336080e..0000000000 --- a/changelog.d/19023.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug introduced in 1.139.1 where a client could receive an Internal Server Error if they set `device_keys: null` in the request to [`POST /_matrix/client/v3/keys/upload`](https://spec.matrix.org/v1.16/client-server-api/#post_matrixclientv3keysupload). \ No newline at end of file diff --git a/debian/changelog b/debian/changelog index 5d7ed231c0..0318d273c3 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.139.2) stable; urgency=medium + + * New Synapse release 1.139.2. + + -- Synapse Packaging team Tue, 07 Oct 2025 16:29:47 +0100 + matrix-synapse-py3 (1.139.1) stable; urgency=medium * New Synapse release 1.139.1. diff --git a/pyproject.toml b/pyproject.toml index 93a8abc5f5..af67ec08a7 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -101,7 +101,7 @@ module-name = "synapse.synapse_rust" [tool.poetry] name = "matrix-synapse" -version = "1.139.1" +version = "1.139.2" description = "Homeserver for the Matrix decentralised comms protocol" authors = ["Matrix.org Team and Contributors "] license = "AGPL-3.0-or-later" From ca27938257bd9915de15b88d1748cf4075c95b9f Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Tue, 7 Oct 2025 10:44:56 -0500 Subject: [PATCH 029/149] Align Synapse version string to use `SYNAPSE_VERSION` (#19011) See https://github.com/matrix-org/synapse/pull/12973 where we previously used `version_string="Synapse/" + get_distribution_version_string("matrix-synapse")` everywhere; and then updated to use `version_string=f"Synapse/{SYNAPSE_VERSION}"` for every other place except `synapse/app/homeserver.py` (why?!?!?!). This seems more like a typo than something on purpose especially without any context in the comments or PR. The whole point of that PR was trying to solve the missing git info in version strings. For reference, here is what both variables look like for me locally on the latest `develop`: - `SYNAPSE_VERSION`: `1.139.0 (b=develop,1d2ddbc76e,dirty)` - `VERSION`: `1.139.0` Only reason we may want to do this is to hide the branch name (some sensitive name that exposes a security fix, etc). But we don't hide anything: `https://matrix.org/_matrix/federation/v1/version` ```json { "server": { "name": "Synapse", "version": "1.139.0rc3 (b=matrix-org-hotfixes-priv,f538ed5ac3)" } } ``` On `matrix.org`, the `Server` response header is masked as `cloudflare` which would otherwise show `1.139.0rc3` for everything from the main process. --- This is spawning from looking at the way we setup and start Synapse for homeserver tenant provisioning in the Synapse Pro for Small Hosts project (https://github.com/element-hq/synapse-small-hosts/issues/221) --- changelog.d/19011.bugfix | 1 + synapse/app/homeserver.py | 5 +++-- 2 files changed, 4 insertions(+), 2 deletions(-) create mode 100644 changelog.d/19011.bugfix diff --git a/changelog.d/19011.bugfix b/changelog.d/19011.bugfix new file mode 100644 index 0000000000..460c71856e --- /dev/null +++ b/changelog.d/19011.bugfix @@ -0,0 +1 @@ +Update Synapse main process version string to include git info. diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index 3c691906ca..c45251d581 100644 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -71,7 +71,8 @@ from synapse.rest.well_known import well_known_resource from synapse.server import HomeServer from synapse.storage import DataStore from synapse.types import ISynapseReactor -from synapse.util.check_dependencies import VERSION, check_requirements +from synapse.util import SYNAPSE_VERSION +from synapse.util.check_dependencies import check_requirements from synapse.util.httpresourcetree import create_resource_tree from synapse.util.module_loader import load_module @@ -399,7 +400,7 @@ def setup( hs = SynapseHomeServer( config.server.server_name, config=config, - version_string=f"Synapse/{VERSION}", + version_string=f"Synapse/{SYNAPSE_VERSION}", reactor=reactor, ) From 28bc486bff48617181bf8c17a76637e0befb9756 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 7 Oct 2025 17:59:39 +0100 Subject: [PATCH 030/149] Bump prometheus-client from 0.22.1 to 0.23.1 (#19016) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/poetry.lock b/poetry.lock index 6ff90ed361..23c8c32099 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1726,14 +1726,14 @@ xmp = ["defusedxml"] [[package]] name = "prometheus-client" -version = "0.22.1" +version = "0.23.1" description = "Python client for the Prometheus monitoring system." optional = false python-versions = ">=3.9" groups = ["main"] files = [ - {file = "prometheus_client-0.22.1-py3-none-any.whl", hash = "sha256:cca895342e308174341b2cbf99a56bef291fbc0ef7b9e5412a0f26d653ba7094"}, - {file = "prometheus_client-0.22.1.tar.gz", hash = "sha256:190f1331e783cf21eb60bca559354e0a4d4378facecf78f5428c39b675d20d28"}, + {file = "prometheus_client-0.23.1-py3-none-any.whl", hash = "sha256:dd1913e6e76b59cfe44e7a4b83e01afc9873c1bdfd2ed8739f1e76aeca115f99"}, + {file = "prometheus_client-0.23.1.tar.gz", hash = "sha256:6ae8f9081eaaaf153a2e959d2e6c4f4fb57b12ef76c8c7980202f1e57b48b2ce"}, ] [package.extras] From 8696551e7f767cee3b680abe7be576a20216fe09 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 7 Oct 2025 18:00:02 +0100 Subject: [PATCH 031/149] Bump pydantic from 2.11.9 to 2.11.10 (#19017) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/poetry.lock b/poetry.lock index 23c8c32099..3a0ed56706 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1832,14 +1832,14 @@ files = [ [[package]] name = "pydantic" -version = "2.11.9" +version = "2.11.10" description = "Data validation using Python type hints" optional = false python-versions = ">=3.9" groups = ["main", "dev"] files = [ - {file = "pydantic-2.11.9-py3-none-any.whl", hash = "sha256:c42dd626f5cfc1c6950ce6205ea58c93efa406da65f479dcb4029d5934857da2"}, - {file = "pydantic-2.11.9.tar.gz", hash = "sha256:6b8ffda597a14812a7975c90b82a8a2e777d9257aba3453f973acd3c032a18e2"}, + {file = "pydantic-2.11.10-py3-none-any.whl", hash = "sha256:802a655709d49bd004c31e865ef37da30b540786a46bfce02333e0e24b5fe29a"}, + {file = "pydantic-2.11.10.tar.gz", hash = "sha256:dc280f0982fbda6c38fada4e476dc0a4f3aeaf9c6ad4c28df68a666ec3c61423"}, ] [package.dependencies] From dde4e0e83dc8970cf0d170607ca9345dc511154d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 7 Oct 2025 18:00:28 +0100 Subject: [PATCH 032/149] Bump types-pyyaml from 6.0.12.20250809 to 6.0.12.20250915 (#19018) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/poetry.lock b/poetry.lock index 3a0ed56706..b8de9fbd77 100644 --- a/poetry.lock +++ b/poetry.lock @@ -3057,14 +3057,14 @@ types-cffi = "*" [[package]] name = "types-pyyaml" -version = "6.0.12.20250809" +version = "6.0.12.20250915" description = "Typing stubs for PyYAML" optional = false python-versions = ">=3.9" groups = ["dev"] files = [ - {file = "types_pyyaml-6.0.12.20250809-py3-none-any.whl", hash = "sha256:032b6003b798e7de1a1ddfeefee32fac6486bdfe4845e0ae0e7fb3ee4512b52f"}, - {file = "types_pyyaml-6.0.12.20250809.tar.gz", hash = "sha256:af4a1aca028f18e75297da2ee0da465f799627370d74073e96fee876524f61b5"}, + {file = "types_pyyaml-6.0.12.20250915-py3-none-any.whl", hash = "sha256:e7d4d9e064e89a3b3cae120b4990cd370874d2bf12fa5f46c97018dd5d3c9ab6"}, + {file = "types_pyyaml-6.0.12.20250915.tar.gz", hash = "sha256:0f8b54a528c303f0e6f7165687dd33fafa81c807fcac23f632b63aa624ced1d3"}, ] [[package]] From fb12d516cdcc40cc39dd4c4cd00f0da7b5ea8650 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 7 Oct 2025 18:00:46 +0100 Subject: [PATCH 033/149] Bump authlib from 1.6.4 to 1.6.5 (#19019) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/poetry.lock b/poetry.lock index b8de9fbd77..4445bbe256 100644 --- a/poetry.lock +++ b/poetry.lock @@ -34,15 +34,15 @@ tests-mypy = ["mypy (>=1.11.1) ; platform_python_implementation == \"CPython\" a [[package]] name = "authlib" -version = "1.6.4" +version = "1.6.5" description = "The ultimate Python library in building OAuth and OpenID Connect servers and clients." optional = true python-versions = ">=3.9" groups = ["main"] markers = "extra == \"all\" or extra == \"jwt\" or extra == \"oidc\"" files = [ - {file = "authlib-1.6.4-py2.py3-none-any.whl", hash = "sha256:39313d2a2caac3ecf6d8f95fbebdfd30ae6ea6ae6a6db794d976405fdd9aa796"}, - {file = "authlib-1.6.4.tar.gz", hash = "sha256:104b0442a43061dc8bc23b133d1d06a2b0a9c2e3e33f34c4338929e816287649"}, + {file = "authlib-1.6.5-py2.py3-none-any.whl", hash = "sha256:3e0e0507807f842b02175507bdee8957a1d5707fd4afb17c32fb43fee90b6e3a"}, + {file = "authlib-1.6.5.tar.gz", hash = "sha256:6aaf9c79b7cc96c900f0b284061691c5d4e61221640a948fe690b556a6d6d10b"}, ] [package.dependencies] From 7b8831310f692cd0d28276dd6d0ff521baa8e8fe Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Tue, 7 Oct 2025 13:27:24 -0500 Subject: [PATCH 034/149] No need to have `version_string` as an argument since it's always the same (#19012) Assuming, we're happy with https://github.com/element-hq/synapse/pull/19011, this PR makes sense. --- changelog.d/19012.misc | 1 + synapse/_scripts/synapse_port_db.py | 2 -- synapse/_scripts/update_synapse_database.py | 2 -- synapse/app/_base.py | 19 ++++++++++++++----- synapse/app/admin_cmd.py | 2 -- synapse/app/generic_worker.py | 2 -- synapse/app/homeserver.py | 2 -- synapse/http/site.py | 1 + synapse/server.py | 4 ++-- tests/server.py | 1 - tests/test_server.py | 10 +++++----- 11 files changed, 23 insertions(+), 23 deletions(-) create mode 100644 changelog.d/19012.misc diff --git a/changelog.d/19012.misc b/changelog.d/19012.misc new file mode 100644 index 0000000000..2677ca1432 --- /dev/null +++ b/changelog.d/19012.misc @@ -0,0 +1 @@ +Remove `version_string` argument from `HomeServer` since it's always the same. diff --git a/synapse/_scripts/synapse_port_db.py b/synapse/_scripts/synapse_port_db.py index b0a067edcb..3c79919fea 100755 --- a/synapse/_scripts/synapse_port_db.py +++ b/synapse/_scripts/synapse_port_db.py @@ -98,7 +98,6 @@ from synapse.storage.databases.state.bg_updates import StateBackgroundUpdateStor from synapse.storage.engines import create_engine from synapse.storage.prepare_database import prepare_database from synapse.types import ISynapseReactor -from synapse.util import SYNAPSE_VERSION # Cast safety: Twisted does some naughty magic which replaces the # twisted.internet.reactor module with a Reactor instance at runtime. @@ -325,7 +324,6 @@ class MockHomeserver(HomeServer): hostname=config.server.server_name, config=config, reactor=reactor, - version_string=f"Synapse/{SYNAPSE_VERSION}", ) diff --git a/synapse/_scripts/update_synapse_database.py b/synapse/_scripts/update_synapse_database.py index ad02f0ed88..ce32f47d63 100644 --- a/synapse/_scripts/update_synapse_database.py +++ b/synapse/_scripts/update_synapse_database.py @@ -31,7 +31,6 @@ from synapse.config.homeserver import HomeServerConfig from synapse.server import HomeServer from synapse.storage import DataStore from synapse.types import ISynapseReactor -from synapse.util import SYNAPSE_VERSION # Cast safety: Twisted does some naughty magic which replaces the # twisted.internet.reactor module with a Reactor instance at runtime. @@ -47,7 +46,6 @@ class MockHomeserver(HomeServer): hostname=config.server.server_name, config=config, reactor=reactor, - version_string=f"Synapse/{SYNAPSE_VERSION}", ) diff --git a/synapse/app/_base.py b/synapse/app/_base.py index 655f684ecf..a3e4b4ea4b 100644 --- a/synapse/app/_base.py +++ b/synapse/app/_base.py @@ -421,17 +421,26 @@ def listen_http( context_factory: Optional[IOpenSSLContextFactory], reactor: ISynapseReactor = reactor, ) -> List[Port]: + """ + Args: + listener_config: TODO + root_resource: TODO + version_string: A string to present for the Server header + max_request_body_size: TODO + context_factory: TODO + reactor: TODO + """ assert listener_config.http_options is not None site_tag = listener_config.get_site_tag() site = SynapseSite( - "synapse.access.%s.%s" + logger_name="synapse.access.%s.%s" % ("https" if listener_config.is_tls() else "http", site_tag), - site_tag, - listener_config, - root_resource, - version_string, + site_tag=site_tag, + config=listener_config, + resource=root_resource, + server_version_string=version_string, max_request_body_size=max_request_body_size, reactor=reactor, hs=hs, diff --git a/synapse/app/admin_cmd.py b/synapse/app/admin_cmd.py index bc84dbdf49..bafeb46971 100644 --- a/synapse/app/admin_cmd.py +++ b/synapse/app/admin_cmd.py @@ -65,7 +65,6 @@ from synapse.storage.databases.main.stream import StreamWorkerStore from synapse.storage.databases.main.tags import TagsWorkerStore from synapse.storage.databases.main.user_erasure_store import UserErasureWorkerStore from synapse.types import JsonMapping, StateMap -from synapse.util import SYNAPSE_VERSION from synapse.util.logcontext import LoggingContext logger = logging.getLogger("synapse.app.admin_cmd") @@ -316,7 +315,6 @@ def start(config: HomeServerConfig, args: argparse.Namespace) -> None: ss = AdminCmdServer( config.server.server_name, config=config, - version_string=f"Synapse/{SYNAPSE_VERSION}", ) setup_logging(ss, config, use_worker_options=True) diff --git a/synapse/app/generic_worker.py b/synapse/app/generic_worker.py index 7e8b47c20a..7518661265 100644 --- a/synapse/app/generic_worker.py +++ b/synapse/app/generic_worker.py @@ -112,7 +112,6 @@ from synapse.storage.databases.main.transactions import TransactionWorkerStore from synapse.storage.databases.main.ui_auth import UIAuthWorkerStore from synapse.storage.databases.main.user_directory import UserDirectoryStore from synapse.storage.databases.main.user_erasure_store import UserErasureWorkerStore -from synapse.util import SYNAPSE_VERSION from synapse.util.httpresourcetree import create_resource_tree logger = logging.getLogger("synapse.app.generic_worker") @@ -359,7 +358,6 @@ def start(config: HomeServerConfig) -> None: hs = GenericWorkerServer( config.server.server_name, config=config, - version_string=f"Synapse/{SYNAPSE_VERSION}", ) setup_logging(hs, config, use_worker_options=True) diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index c45251d581..3424cdbdb8 100644 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -71,7 +71,6 @@ from synapse.rest.well_known import well_known_resource from synapse.server import HomeServer from synapse.storage import DataStore from synapse.types import ISynapseReactor -from synapse.util import SYNAPSE_VERSION from synapse.util.check_dependencies import check_requirements from synapse.util.httpresourcetree import create_resource_tree from synapse.util.module_loader import load_module @@ -400,7 +399,6 @@ def setup( hs = SynapseHomeServer( config.server.server_name, config=config, - version_string=f"Synapse/{SYNAPSE_VERSION}", reactor=reactor, ) diff --git a/synapse/http/site.py b/synapse/http/site.py index f4f326cfde..cf31b64d80 100644 --- a/synapse/http/site.py +++ b/synapse/http/site.py @@ -741,6 +741,7 @@ class SynapseSite(ProxySite): def __init__( self, + *, logger_name: str, site_tag: str, config: ListenerConfig, diff --git a/synapse/server.py b/synapse/server.py index cc0d3a427b..1c2132b8cc 100644 --- a/synapse/server.py +++ b/synapse/server.py @@ -175,6 +175,7 @@ from synapse.storage.controllers import StorageControllers from synapse.streams.events import EventSources from synapse.synapse_rust.rendezvous import RendezvousHandler from synapse.types import DomainSpecificString, ISynapseReactor +from synapse.util import SYNAPSE_VERSION from synapse.util.caches import CACHE_METRIC_REGISTRY from synapse.util.clock import Clock from synapse.util.distributor import Distributor @@ -322,7 +323,6 @@ class HomeServer(metaclass=abc.ABCMeta): hostname: str, config: HomeServerConfig, reactor: Optional[ISynapseReactor] = None, - version_string: str = "Synapse", ): """ Args: @@ -347,7 +347,7 @@ class HomeServer(metaclass=abc.ABCMeta): self._instance_id = random_string(5) self._instance_name = config.worker.instance_name - self.version_string = version_string + self.version_string = f"Synapse/{SYNAPSE_VERSION}" self.datastores: Optional[Databases] = None diff --git a/tests/server.py b/tests/server.py index a9a53eb8a4..208556abaf 100644 --- a/tests/server.py +++ b/tests/server.py @@ -1198,7 +1198,6 @@ def setup_test_homeserver( hs = homeserver_to_use( server_name, config=config, - version_string="Synapse/tests", reactor=reactor, ) diff --git a/tests/test_server.py b/tests/test_server.py index 66c5cf9e37..1854a3c4d4 100644 --- a/tests/test_server.py +++ b/tests/test_server.py @@ -236,17 +236,17 @@ class OptionsResourceTests(unittest.TestCase): """Create a request from the method/path and return a channel with the response.""" # Create a site and query for the resource. site = SynapseSite( - "test", - "site_tag", - parse_listener_def( + logger_name="test", + site_tag="site_tag", + config=parse_listener_def( 0, { "type": "http", "port": 0, }, ), - self.resource, - "1.0", + resource=self.resource, + server_version_string="1", max_request_body_size=4096, reactor=self.reactor, hs=self.homeserver, From 631eed91f1df4d8f15fffc0e8521f7532186abb0 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Tue, 7 Oct 2025 13:29:22 -0500 Subject: [PATCH 035/149] Fix bad merge with `start_background_tasks` (#19013) This was originally removed in https://github.com/element-hq/synapse/pull/18886 but it looks like it snuck back in https://github.com/element-hq/synapse/pull/18828 during a [bad merge](https://github.com/element-hq/synapse/commit/4cd3d9172ed7b87e509746851a376c861a27820e). Noticed while looking at Synapse setup and startup (just by happen stance). I don't think this has adverse effects on Synapse actually working and `start_background_tasks()` can be called multiple times. ### Is there a good way to audit all of these merges? As I would like to see the conflicts for each merge. This works but it's still hard to notice anything is wrong: ``` git log --remerge-diff ``` > shows the difference from mechanical merge result and the result that is actually recorded in a merge commit via https://stackoverflow.com/questions/15277708/how-do-you-see-show-a-git-merge-conflict-resolution-that-was-done-given-a-mer/71181334#71181334 The following better. Specify the version range to the commit right before the merge to the merge. And can even specify which file to look at to make it more obvious with the hindsight we have now. ``` git log --remerge-diff ~1.. -- synapse/server.py ``` Example: ``` git log --remerge-diff 4cd3d9172ed7b87e509746851a376c861a27820e~1..4cd3d9172ed7b87e509746851a376c861a27820e -- synapse/server.py ``` --- changelog.d/19013.misc | 1 + synapse/server.py | 6 ------ 2 files changed, 1 insertion(+), 6 deletions(-) create mode 100644 changelog.d/19013.misc diff --git a/changelog.d/19013.misc b/changelog.d/19013.misc new file mode 100644 index 0000000000..626a6e3db4 --- /dev/null +++ b/changelog.d/19013.misc @@ -0,0 +1 @@ +Remove duplicate call to `hs.start_background_tasks()` introduced from a bad merge. diff --git a/synapse/server.py b/synapse/server.py index 1c2132b8cc..1316249dda 100644 --- a/synapse/server.py +++ b/synapse/server.py @@ -613,12 +613,6 @@ class HomeServer(metaclass=abc.ABCMeta): self.datastores = Databases(self.DATASTORE_CLASS, self) logger.info("Finished setting up.") - # Register background tasks required by this server. This must be done - # somewhat manually due to the background tasks not being registered - # unless handlers are instantiated. - if self.config.worker.run_background_tasks: - self.start_background_tasks() - # def __del__(self) -> None: # """ # Called when an the homeserver is garbage collected. From 8f01eb8ee01bb2dc130748535088079339a958ee Mon Sep 17 00:00:00 2001 From: Shay Date: Wed, 8 Oct 2025 03:38:15 -0700 Subject: [PATCH 036/149] Add an Admin API to fetch an event by ID (#18963) Adds an endpoint to allow server admins to fetch an event regardless of their membership in the originating room. --- changelog.d/18963.feature | 1 + docs/admin_api/fetch_event.md | 53 ++++++++++++++++++++++++ synapse/rest/admin/__init__.py | 4 ++ synapse/rest/admin/events.py | 69 +++++++++++++++++++++++++++++++ tests/rest/admin/test_event.py | 74 ++++++++++++++++++++++++++++++++++ 5 files changed, 201 insertions(+) create mode 100644 changelog.d/18963.feature create mode 100644 docs/admin_api/fetch_event.md create mode 100644 synapse/rest/admin/events.py create mode 100644 tests/rest/admin/test_event.py diff --git a/changelog.d/18963.feature b/changelog.d/18963.feature new file mode 100644 index 0000000000..2cb0d57995 --- /dev/null +++ b/changelog.d/18963.feature @@ -0,0 +1 @@ +Add an Admin API to fetch an event by ID. diff --git a/docs/admin_api/fetch_event.md b/docs/admin_api/fetch_event.md new file mode 100644 index 0000000000..baf45b8aa7 --- /dev/null +++ b/docs/admin_api/fetch_event.md @@ -0,0 +1,53 @@ +# Fetch Event API + +The fetch event API allows admins to fetch an event regardless of their membership in the room it +originated in. + +To use it, you will need to authenticate by providing an `access_token` +for a server admin: see [Admin API](../usage/administration/admin_api/). + +Request: +```http +GET /_synapse/admin/v1/fetch_event/ +``` + +The API returns a JSON body like the following: + +Response: +```json +{ + "event": { + "auth_events": [ + "$WhLChbYg6atHuFRP7cUd95naUtc8L0f7fqeizlsUVvc", + "$9Wj8dt02lrNEWweeq-KjRABUYKba0K9DL2liRvsAdtQ", + "$qJxBFxBt8_ODd9b3pgOL_jXP98S_igc1_kizuPSZFi4" + ], + "content": { + "body": "Hey now", + "msgtype": "m.text" + }, + "depth": 6, + "event_id": "$hJ_kcXbVMcI82JDrbqfUJIHu61tJD86uIFJ_8hNHi7s", + "hashes": { + "sha256": "LiNw8DtrRVf55EgAH8R42Wz7WCJUqGsPt2We6qZO5Rg" + }, + "origin_server_ts": 799, + "prev_events": [ + "$cnSUrNMnC3Ywh9_W7EquFxYQjC_sT3BAAVzcUVxZq1g" + ], + "room_id": "!aIhKToCqgPTBloWMpf:test", + "sender": "@user:test", + "signatures": { + "test": { + "ed25519:a_lPym": "7mqSDwK1k7rnw34Dd8Fahu0rhPW7jPmcWPRtRDoEN9Yuv+BCM2+Rfdpv2MjxNKy3AYDEBwUwYEuaKMBaEMiKAQ" + } + }, + "type": "m.room.message", + "unsigned": { + "age_ts": 799 + } + } +} +``` + + diff --git a/synapse/rest/admin/__init__.py b/synapse/rest/admin/__init__.py index d9a6e99c5d..0386f8a34b 100644 --- a/synapse/rest/admin/__init__.py +++ b/synapse/rest/admin/__init__.py @@ -57,6 +57,9 @@ from synapse.rest.admin.event_reports import ( EventReportDetailRestServlet, EventReportsRestServlet, ) +from synapse.rest.admin.events import ( + EventRestServlet, +) from synapse.rest.admin.experimental_features import ExperimentalFeaturesRestServlet from synapse.rest.admin.federation import ( DestinationMembershipRestServlet, @@ -339,6 +342,7 @@ def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None: ExperimentalFeaturesRestServlet(hs).register(http_server) SuspendAccountRestServlet(hs).register(http_server) ScheduledTasksRestServlet(hs).register(http_server) + EventRestServlet(hs).register(http_server) def register_servlets_for_client_rest_resource( diff --git a/synapse/rest/admin/events.py b/synapse/rest/admin/events.py new file mode 100644 index 0000000000..61b347f8f4 --- /dev/null +++ b/synapse/rest/admin/events.py @@ -0,0 +1,69 @@ +from http import HTTPStatus +from typing import TYPE_CHECKING, Tuple + +from synapse.api.errors import NotFoundError +from synapse.events.utils import ( + SerializeEventConfig, + format_event_raw, + serialize_event, +) +from synapse.http.servlet import RestServlet +from synapse.http.site import SynapseRequest +from synapse.rest.admin import admin_patterns +from synapse.rest.admin._base import assert_user_is_admin +from synapse.storage.databases.main.events_worker import EventRedactBehaviour +from synapse.types import JsonDict + +if TYPE_CHECKING: + from synapse.server import HomeServer + + +class EventRestServlet(RestServlet): + """ + Get an event that is known to the homeserver. + The requester must have administrator access in Synapse. + + GET /_synapse/admin/v1/fetch_event/ + returns: + 200 OK with event json if the event is known to the homeserver. Otherwise raises + a NotFound error. + + Args: + event_id: the id of the requested event. + Returns: + JSON blob of the event + """ + + PATTERNS = admin_patterns("/fetch_event/(?P[^/]*)$") + + def __init__(self, hs: "HomeServer"): + self._auth = hs.get_auth() + self._store = hs.get_datastores().main + self._clock = hs.get_clock() + + async def on_GET( + self, request: SynapseRequest, event_id: str + ) -> Tuple[int, JsonDict]: + requester = await self._auth.get_user_by_req(request) + await assert_user_is_admin(self._auth, requester) + + event = await self._store.get_event( + event_id, + EventRedactBehaviour.as_is, + allow_none=True, + ) + + if event is None: + raise NotFoundError("Event not found") + + config = SerializeEventConfig( + as_client_event=False, + event_format=format_event_raw, + requester=requester, + only_event_fields=None, + include_stripped_room_state=True, + include_admin_metadata=True, + ) + res = {"event": serialize_event(event, self._clock.time_msec(), config=config)} + + return HTTPStatus.OK, res diff --git a/tests/rest/admin/test_event.py b/tests/rest/admin/test_event.py new file mode 100644 index 0000000000..4494804210 --- /dev/null +++ b/tests/rest/admin/test_event.py @@ -0,0 +1,74 @@ +from twisted.internet.testing import MemoryReactor + +import synapse.rest.admin +from synapse.api.errors import Codes +from synapse.rest.client import login, room +from synapse.server import HomeServer +from synapse.util.clock import Clock + +from tests import unittest + + +class FetchEventTestCase(unittest.HomeserverTestCase): + servlets = [ + synapse.rest.admin.register_servlets, + login.register_servlets, + room.register_servlets, + ] + + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: + self.admin_user = self.register_user("admin", "pass", admin=True) + self.admin_user_tok = self.login("admin", "pass") + + self.other_user = self.register_user("user", "pass") + self.other_user_tok = self.login("user", "pass") + + self.room_id1 = self.helper.create_room_as( + self.other_user, tok=self.other_user_tok, is_public=True + ) + resp = self.helper.send(self.room_id1, body="Hey now", tok=self.other_user_tok) + self.event_id = resp["event_id"] + + def test_no_auth(self) -> None: + """ + Try to get an event without authentication. + """ + channel = self.make_request( + "GET", + f"/_synapse/admin/v1/fetch_event/{self.event_id}", + ) + + self.assertEqual(401, channel.code, msg=channel.json_body) + self.assertEqual(Codes.MISSING_TOKEN, channel.json_body["errcode"]) + + def test_requester_is_not_admin(self) -> None: + """ + If the user is not a server admin, an error 403 is returned. + """ + + channel = self.make_request( + "GET", + f"/_synapse/admin/v1/fetch_event/{self.event_id}", + access_token=self.other_user_tok, + ) + + self.assertEqual(403, channel.code, msg=channel.json_body) + self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"]) + + def test_fetch_event(self) -> None: + """ + Test that we can successfully fetch an event + """ + channel = self.make_request( + "GET", + f"/_synapse/admin/v1/fetch_event/{self.event_id}", + access_token=self.admin_user_tok, + ) + self.assertEqual(200, channel.code, msg=channel.json_body) + self.assertEqual( + channel.json_body["event"]["content"], + {"body": "Hey now", "msgtype": "m.text"}, + ) + self.assertEqual(channel.json_body["event"]["event_id"], self.event_id) + self.assertEqual(channel.json_body["event"]["type"], "m.room.message") + self.assertEqual(channel.json_body["event"]["sender"], self.other_user) From bcbbccca23f13cbef90bbd3dc7ff8d0a0166b900 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Wed, 8 Oct 2025 12:58:42 +0100 Subject: [PATCH 037/149] Swap `macos-13` with `macos-15-intel` GHA runner in CI (#19025) --- .github/workflows/release-artifacts.yml | 4 ++-- changelog.d/19025.misc | 1 + 2 files changed, 3 insertions(+), 2 deletions(-) create mode 100644 changelog.d/19025.misc diff --git a/.github/workflows/release-artifacts.yml b/.github/workflows/release-artifacts.yml index 4969ca6723..da6996742b 100644 --- a/.github/workflows/release-artifacts.yml +++ b/.github/workflows/release-artifacts.yml @@ -114,8 +114,8 @@ jobs: os: - ubuntu-24.04 - ubuntu-24.04-arm - - macos-13 # This uses x86-64 - macos-14 # This uses arm64 + - macos-15-intel # This uses x86-64 # is_pr is a flag used to exclude certain jobs from the matrix on PRs. # It is not read by the rest of the workflow. is_pr: @@ -124,7 +124,7 @@ jobs: exclude: # Don't build macos wheels on PR CI. - is_pr: true - os: "macos-13" + os: "macos-15-intel" - is_pr: true os: "macos-14" # Don't build aarch64 wheels on PR CI. diff --git a/changelog.d/19025.misc b/changelog.d/19025.misc new file mode 100644 index 0000000000..4c0c5d4bce --- /dev/null +++ b/changelog.d/19025.misc @@ -0,0 +1 @@ +Swap near-end-of-life `macos-13` GitHub Actions runner for the `macos-15-intel` variant. \ No newline at end of file From e3344dc0c3a11b0bb661657da51655a0b02a4bfc Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Thu, 9 Oct 2025 15:15:13 +0100 Subject: [PATCH 038/149] Expose `defer_to_threadpool` in the module API (#19032) --- changelog.d/19032.feature | 1 + synapse/module_api/__init__.py | 29 +++++++++++++++++++++++++++++ 2 files changed, 30 insertions(+) create mode 100644 changelog.d/19032.feature diff --git a/changelog.d/19032.feature b/changelog.d/19032.feature new file mode 100644 index 0000000000..2e3bdbe391 --- /dev/null +++ b/changelog.d/19032.feature @@ -0,0 +1 @@ +Expose a `defer_to_threadpool` function in the Synapse Module API that allows modules to run a function on a separate thread in a custom threadpool. \ No newline at end of file diff --git a/synapse/module_api/__init__.py b/synapse/module_api/__init__.py index 12a31dd2ab..ea0887966a 100644 --- a/synapse/module_api/__init__.py +++ b/synapse/module_api/__init__.py @@ -43,6 +43,7 @@ from typing_extensions import Concatenate, ParamSpec from twisted.internet import defer from twisted.internet.interfaces import IDelayedCall +from twisted.python.threadpool import ThreadPool from twisted.web.resource import Resource from synapse.api import errors @@ -79,6 +80,7 @@ from synapse.http.servlet import parse_json_object_from_request from synapse.http.site import SynapseRequest from synapse.logging.context import ( defer_to_thread, + defer_to_threadpool, make_deferred_yieldable, run_in_background, ) @@ -1733,6 +1735,33 @@ class ModuleApi: """ return await defer_to_thread(self._hs.get_reactor(), f, *args, **kwargs) + async def defer_to_threadpool( + self, + threadpool: ThreadPool, + f: Callable[P, T], + *args: P.args, + **kwargs: P.kwargs, + ) -> T: + """Runs the given function in a separate thread from the given thread pool. + + Allows specifying a custom thread pool instead of using the default Synapse + one. To use the default Synapse threadpool, use `defer_to_thread` instead. + + Added in Synapse v1.140.0. + + Args: + threadpool: The thread pool to use. + f: The function to run. + args: The function's arguments. + kwargs: The function's keyword arguments. + + Returns: + The return value of the function once ran in a thread. + """ + return await defer_to_threadpool( + self._hs.get_reactor(), threadpool, f, *args, **kwargs + ) + async def check_username(self, username: str) -> None: """Checks if the provided username uses the grammar defined in the Matrix specification, and is already being used by an existing user. From 18f07fdc4c63d119d2b02842400b9f6b04153501 Mon Sep 17 00:00:00 2001 From: fkwp Date: Thu, 9 Oct 2025 18:15:47 +0200 Subject: [PATCH 039/149] Add MatrixRTC backend/services discovery endpoint (#18967) Co-authored-by: Andrew Morgan --- changelog.d/18967.feature | 1 + .../configuration/config_documentation.md | 22 ++++ schema/synapse-config.schema.yaml | 29 +++++ synapse/config/_base.pyi | 2 + synapse/config/experimental.py | 3 + synapse/config/homeserver.py | 2 + synapse/config/matrixrtc.py | 67 +++++++++++ synapse/rest/__init__.py | 2 + synapse/rest/client/matrixrtc.py | 52 +++++++++ tests/rest/client/test_matrixrtc.py | 105 ++++++++++++++++++ 10 files changed, 285 insertions(+) create mode 100644 changelog.d/18967.feature create mode 100644 synapse/config/matrixrtc.py create mode 100644 synapse/rest/client/matrixrtc.py create mode 100644 tests/rest/client/test_matrixrtc.py diff --git a/changelog.d/18967.feature b/changelog.d/18967.feature new file mode 100644 index 0000000000..58337d9e5d --- /dev/null +++ b/changelog.d/18967.feature @@ -0,0 +1 @@ +Add experimental implementation for the latest draft of [MSC4143](https://github.com/matrix-org/matrix-spec-proposals/pull/4143). \ No newline at end of file diff --git a/docs/usage/configuration/config_documentation.md b/docs/usage/configuration/config_documentation.md index 7bcf82e7ab..fec8d468a8 100644 --- a/docs/usage/configuration/config_documentation.md +++ b/docs/usage/configuration/config_documentation.md @@ -2573,6 +2573,28 @@ Example configuration: turn_allow_guests: false ``` --- +### `matrix_rtc` + +*(object)* Options related to MatrixRTC. Defaults to `{}`. + +This setting has the following sub-options: + +* `transports` (array): A list of transport types and arguments to use for MatrixRTC connections. Defaults to `[]`. + + Options for each entry include: + + * `type` (string): The type of transport to use to connect to the selective forwarding unit (SFU). + + * `livekit_service_url` (string): The base URL of the LiveKit service. Should only be used with LiveKit-based transports. + +Example configuration: +```yaml +matrix_rtc: + transports: + - type: livekit + livekit_service_url: https://matrix-rtc.example.com/livekit/jwt +``` +--- ## Registration Registration can be rate-limited using the parameters in the [Ratelimiting](#ratelimiting) section of this manual. diff --git a/schema/synapse-config.schema.yaml b/schema/synapse-config.schema.yaml index b406af0409..285df53afe 100644 --- a/schema/synapse-config.schema.yaml +++ b/schema/synapse-config.schema.yaml @@ -2884,6 +2884,35 @@ properties: default: true examples: - false + matrix_rtc: + type: object + description: >- + Options related to MatrixRTC. + properties: + transports: + type: array + items: + type: object + required: + - type + properties: + type: + type: string + description: The type of transport to use to connect to the selective forwarding unit (SFU). + example: livekit + livekit_service_url: + type: string + description: >- + The base URL of the LiveKit service. Should only be used with LiveKit-based transports. + example: https://matrix-rtc.example.com/livekit/jwt + description: + A list of transport types and arguments to use for MatrixRTC connections. + default: [] + default: {} + examples: + - transports: + - type: livekit + livekit_service_url: https://matrix-rtc.example.com/livekit/jwt enable_registration: type: boolean description: >- diff --git a/synapse/config/_base.pyi b/synapse/config/_base.pyi index 5e03635206..ed16d5b313 100644 --- a/synapse/config/_base.pyi +++ b/synapse/config/_base.pyi @@ -37,6 +37,7 @@ from synapse.config import ( # noqa: F401 key, logger, mas, + matrixrtc, metrics, modules, oembed, @@ -126,6 +127,7 @@ class RootConfig: auto_accept_invites: auto_accept_invites.AutoAcceptInvitesConfig user_types: user_types.UserTypesConfig mas: mas.MasConfig + matrix_rtc: matrixrtc.MatrixRtcConfig config_classes: List[Type["Config"]] = ... config_files: List[str] diff --git a/synapse/config/experimental.py b/synapse/config/experimental.py index d7a3d67558..04ca6e3c51 100644 --- a/synapse/config/experimental.py +++ b/synapse/config/experimental.py @@ -556,6 +556,9 @@ class ExperimentalConfig(Config): # MSC4133: Custom profile fields self.msc4133_enabled: bool = experimental.get("msc4133_enabled", False) + # MSC4143: Matrix RTC Transport using Livekit Backend + self.msc4143_enabled: bool = experimental.get("msc4143_enabled", False) + # MSC4169: Backwards-compatible redaction sending using `/send` self.msc4169_enabled: bool = experimental.get("msc4169_enabled", False) diff --git a/synapse/config/homeserver.py b/synapse/config/homeserver.py index 5d7089c2e6..f46f41da31 100644 --- a/synapse/config/homeserver.py +++ b/synapse/config/homeserver.py @@ -37,6 +37,7 @@ from .jwt import JWTConfig from .key import KeyConfig from .logger import LoggingConfig from .mas import MasConfig +from .matrixrtc import MatrixRtcConfig from .metrics import MetricsConfig from .modules import ModulesConfig from .oembed import OembedConfig @@ -80,6 +81,7 @@ class HomeServerConfig(RootConfig): OembedConfig, CaptchaConfig, VoipConfig, + MatrixRtcConfig, RegistrationConfig, AccountValidityConfig, MetricsConfig, diff --git a/synapse/config/matrixrtc.py b/synapse/config/matrixrtc.py new file mode 100644 index 0000000000..7844d8f398 --- /dev/null +++ b/synapse/config/matrixrtc.py @@ -0,0 +1,67 @@ +# +# This file is licensed under the Affero General Public License (AGPL) version 3. +# +# Copyright (C) 2025 New Vector, Ltd +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as +# published by the Free Software Foundation, either version 3 of the +# License, or (at your option) any later version. +# +# See the GNU Affero General Public License for more details: +# . +# +# [This file includes modifications made by New Vector Limited] +# +# + +from typing import Any, Optional + +from pydantic import ValidationError + +from synapse._pydantic_compat import Field, StrictStr, validator +from synapse.types import JsonDict +from synapse.util.pydantic_models import ParseModel + +from ._base import Config, ConfigError + + +class TransportConfigModel(ParseModel): + type: StrictStr + + livekit_service_url: Optional[StrictStr] = Field(default=None) + """An optional livekit service URL. Only required if type is "livekit".""" + + @validator("livekit_service_url", always=True) + def validate_livekit_service_url(cls, v: Any, values: dict) -> Any: + if values.get("type") == "livekit" and not v: + raise ValueError( + "You must set a `livekit_service_url` when using the 'livekit' transport." + ) + + return v + + +class MatrixRtcConfigModel(ParseModel): + transports: list = [] + + +class MatrixRtcConfig(Config): + section = "matrix_rtc" + + def read_config( + self, config: JsonDict, allow_secrets_in_config: bool, **kwargs: Any + ) -> None: + matrix_rtc = config.get("matrix_rtc", {}) + if matrix_rtc is None: + matrix_rtc = {} + + try: + parsed = MatrixRtcConfigModel(**matrix_rtc) + except ValidationError as e: + raise ConfigError( + "Could not validate matrix_rtc config", + ("matrix_rtc",), + ) from e + + self.transports = parsed.transports diff --git a/synapse/rest/__init__.py b/synapse/rest/__init__.py index a24ca09846..db3bd46542 100644 --- a/synapse/rest/__init__.py +++ b/synapse/rest/__init__.py @@ -42,6 +42,7 @@ from synapse.rest.client import ( login, login_token_request, logout, + matrixrtc, mutual_rooms, notifications, openid, @@ -89,6 +90,7 @@ CLIENT_SERVLET_FUNCTIONS: Tuple[RegisterServletsFunc, ...] = ( presence.register_servlets, directory.register_servlets, voip.register_servlets, + matrixrtc.register_servlets, pusher.register_servlets, push_rule.register_servlets, logout.register_servlets, diff --git a/synapse/rest/client/matrixrtc.py b/synapse/rest/client/matrixrtc.py new file mode 100644 index 0000000000..afe4d4fa83 --- /dev/null +++ b/synapse/rest/client/matrixrtc.py @@ -0,0 +1,52 @@ +# +# This file is licensed under the Affero General Public License (AGPL) version 3. +# +# Copyright (C) 2025 New Vector, Ltd +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as +# published by the Free Software Foundation, either version 3 of the +# License, or (at your option) any later version. +# +# See the GNU Affero General Public License for more details: +# . +# +# [This file includes modifications made by New Vector Limited] +# +# + +from typing import TYPE_CHECKING, Tuple + +from synapse.http.server import HttpServer +from synapse.http.servlet import RestServlet +from synapse.http.site import SynapseRequest +from synapse.rest.client._base import client_patterns +from synapse.types import JsonDict + +if TYPE_CHECKING: + from synapse.server import HomeServer + + +class MatrixRTCRestServlet(RestServlet): + PATTERNS = client_patterns(r"/org\.matrix\.msc4143/rtc/transports$", releases=()) + CATEGORY = "Client API requests" + + def __init__(self, hs: "HomeServer"): + super().__init__() + self._hs = hs + self._auth = hs.get_auth() + self._transports = hs.config.matrix_rtc.transports + + async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: + # Require authentication for this endpoint. + await self._auth.get_user_by_req(request) + + if self._transports: + return 200, {"rtc_transports": self._transports} + + return 200, {} + + +def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None: + if hs.config.experimental.msc4143_enabled: + MatrixRTCRestServlet(hs).register(http_server) diff --git a/tests/rest/client/test_matrixrtc.py b/tests/rest/client/test_matrixrtc.py new file mode 100644 index 0000000000..b5216c7adc --- /dev/null +++ b/tests/rest/client/test_matrixrtc.py @@ -0,0 +1,105 @@ +# +# This file is licensed under the Affero General Public License (AGPL) version 3. +# +# Copyright (C) 2025 New Vector, Ltd +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as +# published by the Free Software Foundation, either version 3 of the +# License, or (at your option) any later version. +# +# See the GNU Affero General Public License for more details: +# . +# +# [This file includes modifications made by New Vector Limited] +# +# + +"""Tests REST events for /rtc/endpoints path.""" + +from twisted.internet.testing import MemoryReactor + +from synapse.rest import admin +from synapse.rest.client import login, matrixrtc, register, room +from synapse.server import HomeServer +from synapse.util.clock import Clock + +from tests.unittest import HomeserverTestCase, override_config + +PATH_PREFIX = "/_matrix/client/unstable/org.matrix.msc4143" +RTC_ENDPOINT = {"type": "focusA", "required_field": "theField"} +LIVEKIT_ENDPOINT = { + "type": "livekit", + "livekit_service_url": "https://livekit.example.com", +} + + +class MatrixRtcTestCase(HomeserverTestCase): + """Tests /rtc/transports Client-Server REST API.""" + + servlets = [ + admin.register_servlets, + room.register_servlets, + login.register_servlets, + register.register_servlets, + matrixrtc.register_servlets, + ] + + def prepare( + self, reactor: MemoryReactor, clock: Clock, homeserver: HomeServer + ) -> None: + self.register_user("alice", "password") + self._alice_tok = self.login("alice", "password") + + def test_matrixrtc_endpoint_not_enabled(self) -> None: + channel = self.make_request( + "GET", f"{PATH_PREFIX}/rtc/transports", access_token=self._alice_tok + ) + self.assertEqual(404, channel.code, channel.json_body) + self.assertEqual( + "M_UNRECOGNIZED", channel.json_body["errcode"], channel.json_body + ) + + @override_config({"experimental_features": {"msc4143_enabled": True}}) + def test_matrixrtc_endpoint_requires_authentication(self) -> None: + channel = self.make_request("GET", f"{PATH_PREFIX}/rtc/transports") + self.assertEqual(401, channel.code, channel.json_body) + + @override_config( + { + "experimental_features": {"msc4143_enabled": True}, + "matrix_rtc": {"transports": [RTC_ENDPOINT]}, + } + ) + def test_matrixrtc_endpoint_contains_expected_transport(self) -> None: + channel = self.make_request( + "GET", f"{PATH_PREFIX}/rtc/transports", access_token=self._alice_tok + ) + self.assertEqual(200, channel.code, channel.json_body) + self.assert_dict({"rtc_transports": [RTC_ENDPOINT]}, channel.json_body) + + @override_config( + { + "experimental_features": {"msc4143_enabled": True}, + "matrix_rtc": {"transports": []}, + } + ) + def test_matrixrtc_endpoint_no_transports_configured(self) -> None: + channel = self.make_request( + "GET", f"{PATH_PREFIX}/rtc/transports", access_token=self._alice_tok + ) + self.assertEqual(200, channel.code, channel.json_body) + self.assert_dict({}, channel.json_body) + + @override_config( + { + "experimental_features": {"msc4143_enabled": True}, + "matrix_rtc": {"transports": [LIVEKIT_ENDPOINT]}, + } + ) + def test_matrixrtc_endpoint_livekit_transport(self) -> None: + channel = self.make_request( + "GET", f"{PATH_PREFIX}/rtc/transports", access_token=self._alice_tok + ) + self.assertEqual(200, channel.code, channel.json_body) + self.assert_dict({"rtc_transports": [LIVEKIT_ENDPOINT]}, channel.json_body) From d440cfc9e2178ae0ff60060ef2e4cf1f95bb1df0 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Thu, 9 Oct 2025 17:15:54 +0100 Subject: [PATCH 040/149] Allow any release script command to accept `--gh-token` (#19035) --- changelog.d/19035.misc | 1 + scripts-dev/release.py | 22 ++++++++++++++++++++-- 2 files changed, 21 insertions(+), 2 deletions(-) create mode 100644 changelog.d/19035.misc diff --git a/changelog.d/19035.misc b/changelog.d/19035.misc new file mode 100644 index 0000000000..a667919045 --- /dev/null +++ b/changelog.d/19035.misc @@ -0,0 +1 @@ +Allow any command of the `release.py` to accept a `--gh-token` argument. \ No newline at end of file diff --git a/scripts-dev/release.py b/scripts-dev/release.py index 73a4e7b7a9..a7e967116e 100755 --- a/scripts-dev/release.py +++ b/scripts-dev/release.py @@ -639,7 +639,16 @@ def _notify(message: str) -> None: @cli.command() -def merge_back() -> None: +# Although this option is not used, allow it anyways. Otherwise the user will +# receive an error when providing it, which is annoying as other commands accept +# it. +@click.option( + "--gh-token", + "_gh_token", + envvar=["GH_TOKEN", "GITHUB_TOKEN"], + required=False, +) +def merge_back(_gh_token: Optional[str]) -> None: _merge_back() @@ -687,7 +696,16 @@ def _merge_back() -> None: @cli.command() -def announce() -> None: +# Although this option is not used, allow it anyways. Otherwise the user will +# receive an error when providing it, which is annoying as other commands accept +# it. +@click.option( + "--gh-token", + "_gh_token", + envvar=["GH_TOKEN", "GITHUB_TOKEN"], + required=False, +) +def announce(_gh_token: Optional[str]) -> None: _announce() From 715cc5ee37d031cdd512a9d6b5691c8d48a25d03 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Thu, 9 Oct 2025 13:12:10 -0500 Subject: [PATCH 041/149] Split homeserver creation and setup (#19015) ### Background As part of Element's plan to support a light form of vhosting (virtual host) (multiple instances of Synapse in the same Python process), we're currently diving into the details and implications of running multiple instances of Synapse in the same Python process. "Clean tenant provisioning" tracked internally by https://github.com/element-hq/synapse-small-hosts/issues/221 ### Partial startup problem In the context of Synapse Pro for Small Hosts, since the Twisted reactor is already running (from the `multi_synapse` shard process itself), when provisioning a homeserver tenant, the `reactor.callWhenRunning(...)` callbacks will be invoked immediately. This includes the Synapse's [`start`](https://github.com/element-hq/synapse/blob/0615b64bb49684b846110465052642a46fd27028/synapse/app/homeserver.py#L418-L429) callback which sets up everything (including listeners, background tasks, etc). If we encounter an error at this point, we are partially setup but the exception will [bubble back to us](https://github.com/element-hq/synapse-small-hosts/blob/8be122186bf1acb8c0426d84eb3abded25d682b7/multi_synapse/app/shard.py#L114-L121) without us having a handle to the homeserver yet so we can't call `hs.shutdown()` and clean everything up. ### What does this PR do? Structures Synapse so we split creating the homeserver instance from setting everything up. This way we have access to `hs` if anything goes wrong during setup and can subsequently `hs.shutdown()` to clean everything up. --- changelog.d/19015.misc | 1 + synapse/app/homeserver.py | 84 +++++++++++++++++------- tests/app/test_homeserver_start.py | 8 ++- tests/config/test_registration_config.py | 8 ++- 4 files changed, 74 insertions(+), 27 deletions(-) create mode 100644 changelog.d/19015.misc diff --git a/changelog.d/19015.misc b/changelog.d/19015.misc new file mode 100644 index 0000000000..cabc453469 --- /dev/null +++ b/changelog.d/19015.misc @@ -0,0 +1 @@ +Split homeserver creation (`create_homeserver`) and setup (`setup`). diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index 3424cdbdb8..6ea412b9f4 100644 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -83,6 +83,10 @@ def gz_wrap(r: Resource) -> Resource: class SynapseHomeServer(HomeServer): + """ + Homeserver class for the main Synapse process. + """ + DATASTORE_CLASS = DataStore def _listener_http( @@ -345,23 +349,17 @@ def load_or_generate_config(argv_options: List[str]) -> HomeServerConfig: return config -def setup( +def create_homeserver( config: HomeServerConfig, reactor: Optional[ISynapseReactor] = None, - freeze: bool = True, ) -> SynapseHomeServer: """ - Create and setup a Synapse homeserver instance given a configuration. + Create a homeserver instance for the Synapse main process. Args: config: The configuration for the homeserver. reactor: Optionally provide a reactor to use. Can be useful in different scenarios that you want control over the reactor, such as tests. - freeze: whether to freeze the homeserver base objects in the garbage collector. - May improve garbage collection performance by marking objects with an effectively - static lifetime as frozen so they don't need to be considered for cleanup. - If you ever want to `shutdown` the homeserver, this needs to be - False otherwise the homeserver cannot be garbage collected after `shutdown`. Returns: A homeserver instance. @@ -372,7 +370,6 @@ def setup( "You have specified `worker_app` in the config but are attempting to start a non-worker " "instance. Please use `python -m synapse.app.generic_worker` instead (or remove the option if this is the main process)." ) - sys.exit(1) events.USE_FROZEN_DICTS = config.server.use_frozen_dicts synapse.util.caches.TRACK_MEMORY_USAGE = config.caches.track_memory_usage @@ -397,24 +394,50 @@ def setup( ) hs = SynapseHomeServer( - config.server.server_name, + hostname=config.server.server_name, config=config, reactor=reactor, ) - setup_logging(hs, config, use_worker_options=False) + return hs + + +def setup( + hs: SynapseHomeServer, + *, + freeze: bool = True, +) -> None: + """ + Setup a Synapse homeserver instance given a configuration. + + Args: + hs: The homeserver to setup. + freeze: whether to freeze the homeserver base objects in the garbage collector. + May improve garbage collection performance by marking objects with an effectively + static lifetime as frozen so they don't need to be considered for cleanup. + If you ever want to `shutdown` the homeserver, this needs to be + False otherwise the homeserver cannot be garbage collected after `shutdown`. + + Returns: + A homeserver instance. + """ + + setup_logging(hs, hs.config, use_worker_options=False) + + # Log after we've configured logging. + logger.info("Setting up server") # Start the tracer init_tracer(hs) # noqa - logger.info("Setting up server") - try: hs.setup() except Exception as e: handle_startup_exception(e) - async def start() -> None: + async def _start_when_reactor_running() -> None: + # TODO: Feels like this should be moved somewhere else. + # # Load the OIDC provider metadatas, if OIDC is enabled. if hs.config.oidc.oidc_enabled: oidc = hs.get_oidc_handler() @@ -423,21 +446,31 @@ def setup( await _base.start(hs, freeze) + # TODO: This should be moved to `SynapseHomeServer.start_background_tasks` (not + # `HomeServer.start_background_tasks`) (this way it matches the behavior of only + # running on `main`) hs.get_datastores().main.db_pool.updates.start_doing_background_updates() - register_start(hs, start) - - return hs + # Register a callback to be invoked once the reactor is running + register_start(hs, _start_when_reactor_running) -def run(hs: HomeServer) -> None: +def start_reactor( + config: HomeServerConfig, +) -> None: + """ + Start the reactor (Twisted event-loop). + + Args: + config: The configuration for the homeserver. + """ _base.start_reactor( "synapse-homeserver", - soft_file_limit=hs.config.server.soft_file_limit, - gc_thresholds=hs.config.server.gc_thresholds, - pid_file=hs.config.server.pid_file, - daemonize=hs.config.server.daemonize, - print_pidfile=hs.config.server.print_pidfile, + soft_file_limit=config.server.soft_file_limit, + gc_thresholds=config.server.gc_thresholds, + pid_file=config.server.pid_file, + daemonize=config.server.daemonize, + print_pidfile=config.server.print_pidfile, logger=logger, ) @@ -448,13 +481,14 @@ def main() -> None: with LoggingContext(name="main", server_name=homeserver_config.server.server_name): # check base requirements check_requirements() - hs = setup(homeserver_config) + hs = create_homeserver(homeserver_config) + setup(hs) # redirect stdio to the logs, if configured. if not hs.config.logging.no_redirect_stdio: redirect_stdio_to_logs() - run(hs) + start_reactor(homeserver_config) if __name__ == "__main__": diff --git a/tests/app/test_homeserver_start.py b/tests/app/test_homeserver_start.py index 0d257c98aa..36a7170d12 100644 --- a/tests/app/test_homeserver_start.py +++ b/tests/app/test_homeserver_start.py @@ -37,7 +37,13 @@ class HomeserverAppStartTestCase(ConfigFileTestCase): self.add_lines_to_config([" main:", " host: 127.0.0.1", " port: 1234"]) # Ensure that starting master process with worker config raises an exception with self.assertRaises(ConfigError): + # Do a normal homeserver creation and setup homeserver_config = synapse.app.homeserver.load_or_generate_config( ["-c", self.config_file] ) - synapse.app.homeserver.setup(homeserver_config) + # XXX: The error will be raised at this point + hs = synapse.app.homeserver.create_homeserver(homeserver_config) + # Continue with the setup. We don't expect this to run because we raised + # earlier, but in the future, the code could be refactored to raise the + # error in a different place. + synapse.app.homeserver.setup(hs) diff --git a/tests/config/test_registration_config.py b/tests/config/test_registration_config.py index a8520c91d1..9da0a3f426 100644 --- a/tests/config/test_registration_config.py +++ b/tests/config/test_registration_config.py @@ -112,7 +112,13 @@ class RegistrationConfigTestCase(ConfigFileTestCase): # Test that allowing open registration without verification raises an error with self.assertRaises(ConfigError): + # Do a normal homeserver creation and setup homeserver_config = synapse.app.homeserver.load_or_generate_config( ["-c", self.config_file] ) - synapse.app.homeserver.setup(homeserver_config) + # XXX: The error will be raised at this point + hs = synapse.app.homeserver.create_homeserver(homeserver_config) + # Continue with the setup. We don't expect this to run because we raised + # earlier, but in the future, the code could be refactored to raise the + # error in a different place. + synapse.app.homeserver.setup(hs) From 47fb4b43ca464f4a0b2e14530a3be83aa6119d2d Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Thu, 9 Oct 2025 14:56:22 -0500 Subject: [PATCH 042/149] Introduce `RootConfig.validate_config()` which can be subclassed in `HomeServerConfig` to do cross-config class validation (#19027) This means we can move the open registration config validation from `setup()` to `HomeServerConfig.validate_config()` (much more sane). Spawning from looking at this area of code in https://github.com/element-hq/synapse/pull/19015 --- changelog.d/19027.misc | 1 + synapse/app/homeserver.py | 18 +---- synapse/config/_base.py | 40 +++++++--- synapse/config/_base.pyi | 6 +- synapse/config/homeserver.py | 26 ++++++- tests/config/test_load.py | 9 ++- tests/config/test_registration_config.py | 97 +++++++++++++++++++++--- 7 files changed, 155 insertions(+), 42 deletions(-) create mode 100644 changelog.d/19027.misc diff --git a/changelog.d/19027.misc b/changelog.d/19027.misc new file mode 100644 index 0000000000..727f3ee5ff --- /dev/null +++ b/changelog.d/19027.misc @@ -0,0 +1 @@ +Introduce `RootConfig.validate_config()` which can be subclassed in `HomeServerConfig` to do cross-config class validation. diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index 6ea412b9f4..8d9b76e083 100644 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -367,7 +367,7 @@ def create_homeserver( if config.worker.worker_app: raise ConfigError( - "You have specified `worker_app` in the config but are attempting to start a non-worker " + "You have specified `worker_app` in the config but are attempting to setup a non-worker " "instance. Please use `python -m synapse.app.generic_worker` instead (or remove the option if this is the main process)." ) @@ -377,22 +377,6 @@ def create_homeserver( if config.server.gc_seconds: synapse.metrics.MIN_TIME_BETWEEN_GCS = config.server.gc_seconds - if ( - config.registration.enable_registration - and not config.registration.enable_registration_without_verification - ): - if ( - not config.captcha.enable_registration_captcha - and not config.registration.registrations_require_3pid - and not config.registration.registration_requires_token - ): - raise ConfigError( - "You have enabled open registration without any verification. This is a known vector for " - "spam and abuse. If you would like to allow public registration, please consider adding email, " - "captcha, or token-based verification. Otherwise this check can be removed by setting the " - "`enable_registration_without_verification` config option to `true`." - ) - hs = SynapseHomeServer( hostname=config.server.server_name, config=config, diff --git a/synapse/config/_base.py b/synapse/config/_base.py index 6de4c12c96..5d0560e0f2 100644 --- a/synapse/config/_base.py +++ b/synapse/config/_base.py @@ -545,18 +545,22 @@ class RootConfig: @classmethod def load_config( - cls: Type[TRootConfig], description: str, argv: List[str] + cls: Type[TRootConfig], description: str, argv_options: List[str] ) -> TRootConfig: """Parse the commandline and config files Doesn't support config-file-generation: used by the worker apps. + Args: + description: TODO + argv_options: The options passed to Synapse. Usually `sys.argv[1:]`. + Returns: Config object. """ config_parser = argparse.ArgumentParser(description=description) cls.add_arguments_to_parser(config_parser) - obj, _ = cls.load_config_with_parser(config_parser, argv) + obj, _ = cls.load_config_with_parser(config_parser, argv_options) return obj @@ -609,6 +613,10 @@ class RootConfig: Used for workers where we want to add extra flags/subcommands. + Note: This is the common denominator for loading config and is also used by + `load_config` and `load_or_generate_config`. Which is why we call + `validate_config()` here. + Args: parser argv_options: The options passed to Synapse. Usually `sys.argv[1:]`. @@ -642,6 +650,10 @@ class RootConfig: obj.invoke_all("read_arguments", config_args) + # Now that we finally have the full config sections parsed, allow subclasses to + # do some extra validation across the entire config. + obj.validate_config() + return obj, config_args @classmethod @@ -842,15 +854,7 @@ class RootConfig: ): return None - obj.parse_config_dict( - config_dict, - config_dir_path=config_dir_path, - data_dir_path=data_dir_path, - allow_secrets_in_config=config_args.secrets_in_config, - ) - obj.invoke_all("read_arguments", config_args) - - return obj + return cls.load_config(description, argv_options) def parse_config_dict( self, @@ -911,6 +915,20 @@ class RootConfig: existing_config.root = None return existing_config + def validate_config(self) -> None: + """ + Additional config validation across all config sections. + + Override this in subclasses to add extra validation. This is called once all + config option values have been populated. + + XXX: This should only validate, not modify the configuration, as the final + config state is required for proper validation across all config sections. + + Raises: + ConfigError: if the config is invalid. + """ + def read_config_files(config_files: Iterable[str]) -> Dict[str, Any]: """Read the config files and shallowly merge them into a dict. diff --git a/synapse/config/_base.pyi b/synapse/config/_base.pyi index ed16d5b313..02543da388 100644 --- a/synapse/config/_base.pyi +++ b/synapse/config/_base.pyi @@ -158,11 +158,11 @@ class RootConfig: ) -> str: ... @classmethod def load_or_generate_config( - cls: Type[TRootConfig], description: str, argv: List[str] + cls: Type[TRootConfig], description: str, argv_options: List[str] ) -> Optional[TRootConfig]: ... @classmethod def load_config( - cls: Type[TRootConfig], description: str, argv: List[str] + cls: Type[TRootConfig], description: str, argv_options: List[str] ) -> TRootConfig: ... @classmethod def add_arguments_to_parser( @@ -170,7 +170,7 @@ class RootConfig: ) -> None: ... @classmethod def load_config_with_parser( - cls: Type[TRootConfig], parser: argparse.ArgumentParser, argv: List[str] + cls: Type[TRootConfig], parser: argparse.ArgumentParser, argv_options: List[str] ) -> Tuple[TRootConfig, argparse.Namespace]: ... def generate_missing_files( self, config_dict: dict, config_dir_path: str diff --git a/synapse/config/homeserver.py b/synapse/config/homeserver.py index f46f41da31..94ebe583a4 100644 --- a/synapse/config/homeserver.py +++ b/synapse/config/homeserver.py @@ -18,7 +18,8 @@ # [This file includes modifications made by New Vector Limited] # # -from ._base import RootConfig + +from ._base import ConfigError, RootConfig from .account_validity import AccountValidityConfig from .api import ApiConfig from .appservice import AppServiceConfig @@ -67,6 +68,10 @@ from .workers import WorkerConfig class HomeServerConfig(RootConfig): + """ + Top-level config object for Synapse homeserver (main process and workers). + """ + config_classes = [ ModulesConfig, ServerConfig, @@ -115,3 +120,22 @@ class HomeServerConfig(RootConfig): # This must be last, as it checks for conflicts with other config options. MasConfig, ] + + def validate_config( + self, + ) -> None: + if ( + self.registration.enable_registration + and not self.registration.enable_registration_without_verification + ): + if ( + not self.captcha.enable_registration_captcha + and not self.registration.registrations_require_3pid + and not self.registration.registration_requires_token + ): + raise ConfigError( + "You have enabled open registration without any verification. This is a known vector for " + "spam and abuse. If you would like to allow public registration, please consider adding email, " + "captcha, or token-based verification. Otherwise this check can be removed by setting the " + "`enable_registration_without_verification` config option to `true`." + ) diff --git a/tests/config/test_load.py b/tests/config/test_load.py index b72365b6e3..c1b787346e 100644 --- a/tests/config/test_load.py +++ b/tests/config/test_load.py @@ -99,7 +99,14 @@ class ConfigLoadingFileTestCase(ConfigFileTestCase): def test_disable_registration(self) -> None: self.generate_config() self.add_lines_to_config( - ["enable_registration: true", "disable_registration: true"] + [ + "enable_registration: true", + "disable_registration: true", + # We're not worried about open registration in this test. This test is + # focused on making sure that enable/disable_registration properly + # override each other. + "enable_registration_without_verification: true", + ] ) # Check that disable_registration clobbers enable_registration. config = HomeServerConfig.load_config("", ["-c", self.config_file]) diff --git a/tests/config/test_registration_config.py b/tests/config/test_registration_config.py index 9da0a3f426..9eb5323a24 100644 --- a/tests/config/test_registration_config.py +++ b/tests/config/test_registration_config.py @@ -19,6 +19,8 @@ # # +import argparse + import synapse.app.homeserver from synapse.config import ConfigError from synapse.config.homeserver import HomeServerConfig @@ -99,6 +101,39 @@ class RegistrationConfigTestCase(ConfigFileTestCase): ) def test_refuse_to_start_if_open_registration_and_no_verification(self) -> None: + """ + Test that our utilities to start the main Synapse homeserver process refuses + to start if we detect open registration. + """ + self.generate_config() + self.add_lines_to_config( + [ + " ", + "enable_registration: true", + "registrations_require_3pid: []", + "enable_registration_captcha: false", + "registration_requires_token: false", + ] + ) + + # Test that allowing open registration without verification raises an error + with self.assertRaises(SystemExit): + # Do a normal homeserver creation and setup + homeserver_config = synapse.app.homeserver.load_or_generate_config( + ["-c", self.config_file] + ) + # XXX: The error will be raised at this point + hs = synapse.app.homeserver.create_homeserver(homeserver_config) + # Continue with the setup. We don't expect this to run because we raised + # earlier, but in the future, the code could be refactored to raise the + # error in a different place. + synapse.app.homeserver.setup(hs) + + def test_load_config_error_if_open_registration_and_no_verification(self) -> None: + """ + Test that `HomeServerConfig.load_config(...)` raises an exception when we detect open + registration. + """ self.generate_config() self.add_lines_to_config( [ @@ -112,13 +147,57 @@ class RegistrationConfigTestCase(ConfigFileTestCase): # Test that allowing open registration without verification raises an error with self.assertRaises(ConfigError): - # Do a normal homeserver creation and setup - homeserver_config = synapse.app.homeserver.load_or_generate_config( - ["-c", self.config_file] + _homeserver_config = HomeServerConfig.load_config( + description="test", argv_options=["-c", self.config_file] + ) + + def test_load_or_generate_config_error_if_open_registration_and_no_verification( + self, + ) -> None: + """ + Test that `HomeServerConfig.load_or_generate_config(...)` raises an exception when we detect open + registration. + """ + self.generate_config() + self.add_lines_to_config( + [ + " ", + "enable_registration: true", + "registrations_require_3pid: []", + "enable_registration_captcha: false", + "registration_requires_token: false", + ] + ) + + # Test that allowing open registration without verification raises an error + with self.assertRaises(ConfigError): + _homeserver_config = HomeServerConfig.load_or_generate_config( + description="test", argv_options=["-c", self.config_file] + ) + + def test_load_config_with_parser_error_if_open_registration_and_no_verification( + self, + ) -> None: + """ + Test that `HomeServerConfig.load_config_with_parser(...)` raises an exception when we detect open + registration. + """ + self.generate_config() + self.add_lines_to_config( + [ + " ", + "enable_registration: true", + "registrations_require_3pid: []", + "enable_registration_captcha: false", + "registration_requires_token: false", + ] + ) + + # Test that allowing open registration without verification raises an error + with self.assertRaises(ConfigError): + config_parser = argparse.ArgumentParser(description="test") + HomeServerConfig.add_arguments_to_parser(config_parser) + + _homeserver_config = HomeServerConfig.load_config_with_parser( + parser=config_parser, argv_options=["-c", self.config_file] ) - # XXX: The error will be raised at this point - hs = synapse.app.homeserver.create_homeserver(homeserver_config) - # Continue with the setup. We don't expect this to run because we raised - # earlier, but in the future, the code could be refactored to raise the - # error in a different place. - synapse.app.homeserver.setup(hs) From 627be7e0a7db61342810e5a12f9906e7c8802879 Mon Sep 17 00:00:00 2001 From: Andrew Morgan Date: Fri, 10 Oct 2025 11:20:04 +0100 Subject: [PATCH 043/149] Add 'Fetch Event' Admin API page to the docs SUMMARY.md Otherwise it won't appear on the documentation website's sidebar. --- docs/SUMMARY.md | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/SUMMARY.md b/docs/SUMMARY.md index 52f827c8df..64869eca8e 100644 --- a/docs/SUMMARY.md +++ b/docs/SUMMARY.md @@ -60,6 +60,7 @@ - [Admin API](usage/administration/admin_api/README.md) - [Account Validity](admin_api/account_validity.md) - [Background Updates](usage/administration/admin_api/background_updates.md) + - [Fetch Event](admin_api/fetch_event.md) - [Event Reports](admin_api/event_reports.md) - [Experimental Features](admin_api/experimental_features.md) - [Media](admin_api/media_admin_api.md) From 8390138fa47601647fb2aa5de24777ed5523b1f5 Mon Sep 17 00:00:00 2001 From: Andrew Morgan Date: Fri, 10 Oct 2025 11:20:04 +0100 Subject: [PATCH 044/149] Add 'Fetch Event' Admin API page to the docs SUMMARY.md Otherwise it won't appear on the documentation website's sidebar. --- docs/SUMMARY.md | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/SUMMARY.md b/docs/SUMMARY.md index 52f827c8df..64869eca8e 100644 --- a/docs/SUMMARY.md +++ b/docs/SUMMARY.md @@ -60,6 +60,7 @@ - [Admin API](usage/administration/admin_api/README.md) - [Account Validity](admin_api/account_validity.md) - [Background Updates](usage/administration/admin_api/background_updates.md) + - [Fetch Event](admin_api/fetch_event.md) - [Event Reports](admin_api/event_reports.md) - [Experimental Features](admin_api/experimental_features.md) - [Media](admin_api/media_admin_api.md) From c0d6998dea7f0ae4ffebcb2f9a7c4f15bf4a2558 Mon Sep 17 00:00:00 2001 From: Andrew Morgan Date: Fri, 10 Oct 2025 11:24:27 +0100 Subject: [PATCH 045/149] 1.140.0rc1 --- CHANGES.md | 81 +++++++++++++++++++++++++++++++ changelog.d/17097.misc | 1 - changelog.d/18721.bugfix | 1 - changelog.d/18767.misc | 1 - changelog.d/18828.feature | 1 - changelog.d/18868.misc | 1 - changelog.d/18903.misc | 1 - changelog.d/18911.feature | 2 - changelog.d/18913.misc | 1 - changelog.d/18914.doc | 1 - changelog.d/18932.misc | 1 - changelog.d/18934.feature | 1 - changelog.d/18939.misc | 1 - changelog.d/18947.misc | 1 - changelog.d/18948.bugfix | 1 - changelog.d/18959.misc | 1 - changelog.d/18963.feature | 1 - changelog.d/18964.misc | 1 - changelog.d/18966.misc | 1 - changelog.d/18967.feature | 1 - changelog.d/18971.misc | 1 - changelog.d/18973.misc | 1 - changelog.d/18974.misc | 1 - changelog.d/18988.misc | 1 - changelog.d/18989.removal | 1 - changelog.d/18990.misc | 1 - changelog.d/18992.misc | 1 - changelog.d/18996.removal | 1 - changelog.d/18998.doc | 1 - changelog.d/19002.bugfix | 1 - changelog.d/19007.misc | 1 - changelog.d/19011.bugfix | 1 - changelog.d/19012.misc | 1 - changelog.d/19013.misc | 1 - changelog.d/19015.misc | 1 - changelog.d/19023.bugfix | 1 - changelog.d/19025.misc | 1 - changelog.d/19027.misc | 1 - changelog.d/19032.feature | 1 - changelog.d/19035.misc | 1 - debian/changelog | 6 +++ pyproject.toml | 2 +- schema/synapse-config.schema.yaml | 2 +- 43 files changed, 89 insertions(+), 42 deletions(-) delete mode 100644 changelog.d/17097.misc delete mode 100644 changelog.d/18721.bugfix delete mode 100644 changelog.d/18767.misc delete mode 100644 changelog.d/18828.feature delete mode 100644 changelog.d/18868.misc delete mode 100644 changelog.d/18903.misc delete mode 100644 changelog.d/18911.feature delete mode 100644 changelog.d/18913.misc delete mode 100644 changelog.d/18914.doc delete mode 100644 changelog.d/18932.misc delete mode 100644 changelog.d/18934.feature delete mode 100644 changelog.d/18939.misc delete mode 100644 changelog.d/18947.misc delete mode 100644 changelog.d/18948.bugfix delete mode 100644 changelog.d/18959.misc delete mode 100644 changelog.d/18963.feature delete mode 100644 changelog.d/18964.misc delete mode 100644 changelog.d/18966.misc delete mode 100644 changelog.d/18967.feature delete mode 100644 changelog.d/18971.misc delete mode 100644 changelog.d/18973.misc delete mode 100644 changelog.d/18974.misc delete mode 100644 changelog.d/18988.misc delete mode 100644 changelog.d/18989.removal delete mode 100644 changelog.d/18990.misc delete mode 100644 changelog.d/18992.misc delete mode 100644 changelog.d/18996.removal delete mode 100644 changelog.d/18998.doc delete mode 100644 changelog.d/19002.bugfix delete mode 100644 changelog.d/19007.misc delete mode 100644 changelog.d/19011.bugfix delete mode 100644 changelog.d/19012.misc delete mode 100644 changelog.d/19013.misc delete mode 100644 changelog.d/19015.misc delete mode 100644 changelog.d/19023.bugfix delete mode 100644 changelog.d/19025.misc delete mode 100644 changelog.d/19027.misc delete mode 100644 changelog.d/19032.feature delete mode 100644 changelog.d/19035.misc diff --git a/CHANGES.md b/CHANGES.md index e62856255d..8229657866 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,84 @@ +# Synapse 1.140.0rc1 (2025-10-10) + +## Features + +- Cleanly shutdown `SynapseHomeServer` object, allowing artifacts of embedded small hosts to be properly garbage collected. ([\#18828](https://github.com/element-hq/synapse/issues/18828)) +- Add [a new Admin API](https://element-hq.github.io/synapse/v1.140/admin_api/media_admin_api.html#query-a-piece-of-media-by-id) that allows server admins to query and investigate the metadata of local or cached remote media via +- Add [a new Admin API](https://element-hq.github.io/synapse/v1.140/admin_api/fetch_events.html) to fetch an event by ID. ([\#18963](https://github.com/element-hq/synapse/issues/18963)) + the `origin/media_id` identifier found in a [Matrix Content URI](https://spec.matrix.org/v1.14/client-server-api/#matrix-content-mxc-uris). ([\#18911](https://github.com/element-hq/synapse/issues/18911)) +- Update [MSC4284: Policy Servers](https://github.com/matrix-org/matrix-spec-proposals/pull/4284) implementation to support signatures when available. ([\#18934](https://github.com/element-hq/synapse/issues/18934)) +- Add experimental implementation of the `GET /_matrix/client/v1/rtc/transports` endpoint for the latest draft of [MSC4143: MatrixRTC](https://github.com/matrix-org/matrix-spec-proposals/pull/4143). ([\#18967](https://github.com/element-hq/synapse/issues/18967)) +- Expose a `defer_to_threadpool` function in the Synapse Module API that allows modules to run a function on a separate thread in a custom threadpool. ([\#19032](https://github.com/element-hq/synapse/issues/19032)) + +## Bugfixes + +- Fix room upgrade `room_config` argument and documentation for `user_may_create_room` spam-checker callback. ([\#18721](https://github.com/element-hq/synapse/issues/18721)) +- Compute a user's last seen timestamp from their devices' last seen timestamps instead of IPs, because the latter are automatically cleared according to `user_ips_max_age`. ([\#18948](https://github.com/element-hq/synapse/issues/18948)) +- Fix bug where ephemeral events were not filtered by room ID. Contributed by @frastefanini. ([\#19002](https://github.com/element-hq/synapse/issues/19002)) +- Update Synapse main process version string to include git info. ([\#19011](https://github.com/element-hq/synapse/issues/19011)) + +## Improved Documentation + +- Explain how Deferred callbacks interact with logcontexts. ([\#18914](https://github.com/element-hq/synapse/issues/18914)) +- Fix documentation for `rc_room_creation` and `rc_reports` to clarify that a `per_user` rate limit is not supported. ([\#18998](https://github.com/element-hq/synapse/issues/18998)) + +## Deprecations and Removals + +- Remove deprecated `LoggingContext.set_current_context`/`LoggingContext.current_context` methods which already have equivalent bare methods in `synapse.logging.context`. ([\#18989](https://github.com/element-hq/synapse/issues/18989)) +- Drop support for unstable field names from the long-accepted [MSC2732](https://github.com/matrix-org/matrix-spec-proposals/pull/2732) (Olm fallback keys) proposal. ([\#18996](https://github.com/element-hq/synapse/issues/18996)) + +## Internal Changes + +- Update OEmbed providers to use 'X' instead of 'Twitter' in URL previews, following a rebrand. Contributed by @HammyHavoc. ([\#18767](https://github.com/element-hq/synapse/issues/18767)) +- Fix `server_name` in logging context for multiple Synapse instances in one process. ([\#18868](https://github.com/element-hq/synapse/issues/18868)) +- Wrap the Rust HTTP client with `make_deferred_yieldable` so it follows Synapse logcontext rules. ([\#18903](https://github.com/element-hq/synapse/issues/18903)) +- Fix the GitHub Actions workflow that moves issues labeled "X-Needs-Info" to the "Needs info" column on the team's internal triage board. ([\#18913](https://github.com/element-hq/synapse/issues/18913)) +- Disconnect background process work from request trace. ([\#18932](https://github.com/element-hq/synapse/issues/18932)) +- Reduce overall number of calls to `_get_e2e_cross_signing_signatures_for_devices` by increasing the batch size of devices the query is called with, reducing DB load. ([\#18939](https://github.com/element-hq/synapse/issues/18939)) +- Update error code used when an appservice tries to masquerade as an unknown device using [MSC4326](https://github.com/matrix-org/matrix-spec-proposals/pull/4326). Contributed by @tulir @ Beeper. ([\#18947](https://github.com/element-hq/synapse/issues/18947)) +- Fix `no active span when trying to log` tracing error on startup (when OpenTracing is enabled). ([\#18959](https://github.com/element-hq/synapse/issues/18959)) +- Fix `run_coroutine_in_background(...)` incorrectly handling logcontext. ([\#18964](https://github.com/element-hq/synapse/issues/18964)) +- Add debug logs wherever we change current logcontext. ([\#18966](https://github.com/element-hq/synapse/issues/18966)) +- Update dockerfile metadata to fix broken link; point to documentation website. ([\#18971](https://github.com/element-hq/synapse/issues/18971)) +- Note that the code is additionally licensed under the [Element Commercial license](https://github.com/element-hq/synapse/blob/develop/LICENSE-COMMERCIAL) in SPDX expression field configs. ([\#18973](https://github.com/element-hq/synapse/issues/18973)) +- Fix logcontext handling in `timeout_deferred` tests. ([\#18974](https://github.com/element-hq/synapse/issues/18974)) +- Remove internal `ReplicationUploadKeysForUserRestServlet` as a follow-up to the work in https://github.com/element-hq/synapse/pull/18581 that moved device changes off the main process. ([\#18988](https://github.com/element-hq/synapse/issues/18988)) +- Switch task scheduler from raw logcontext manipulation to using the dedicated logcontext utils. ([\#18990](https://github.com/element-hq/synapse/issues/18990)) +- Remove `MockClock()` in tests. ([\#18992](https://github.com/element-hq/synapse/issues/18992)) +- Switch back to our own custom `LogContextScopeManager` instead of OpenTracing's `ContextVarsScopeManager` which was causing problems when using the experimental `SYNAPSE_ASYNC_IO_REACTOR` option with tracing enabled. ([\#19007](https://github.com/element-hq/synapse/issues/19007)) +- Remove `version_string` argument from `HomeServer` since it's always the same. ([\#19012](https://github.com/element-hq/synapse/issues/19012)) +- Remove duplicate call to `hs.start_background_tasks()` introduced from a bad merge. ([\#19013](https://github.com/element-hq/synapse/issues/19013)) +- Split homeserver creation (`create_homeserver`) and setup (`setup`). ([\#19015](https://github.com/element-hq/synapse/issues/19015)) +- Swap near-end-of-life `macos-13` GitHub Actions runner for the `macos-15-intel` variant. ([\#19025](https://github.com/element-hq/synapse/issues/19025)) +- Introduce `RootConfig.validate_config()` which can be subclassed in `HomeServerConfig` to do cross-config class validation. ([\#19027](https://github.com/element-hq/synapse/issues/19027)) +- Allow any command of the `release.py` script to accept a `--gh-token` argument. ([\#19035](https://github.com/element-hq/synapse/issues/19035)) + + + +### Updates to locked dependencies + +* Bump Swatinem/rust-cache from 2.8.0 to 2.8.1. ([\#18949](https://github.com/element-hq/synapse/issues/18949)) +* Bump actions/cache from 4.2.4 to 4.3.0. ([\#18983](https://github.com/element-hq/synapse/issues/18983)) +* Bump anyhow from 1.0.99 to 1.0.100. ([\#18950](https://github.com/element-hq/synapse/issues/18950)) +* Bump authlib from 1.6.3 to 1.6.4. ([\#18957](https://github.com/element-hq/synapse/issues/18957)) +* Bump authlib from 1.6.4 to 1.6.5. ([\#19019](https://github.com/element-hq/synapse/issues/19019)) +* Bump bcrypt from 4.3.0 to 5.0.0. ([\#18984](https://github.com/element-hq/synapse/issues/18984)) +* Bump docker/login-action from 3.5.0 to 3.6.0. ([\#18978](https://github.com/element-hq/synapse/issues/18978)) +* Bump lxml from 6.0.0 to 6.0.2. ([\#18979](https://github.com/element-hq/synapse/issues/18979)) +* Bump phonenumbers from 9.0.13 to 9.0.14. ([\#18954](https://github.com/element-hq/synapse/issues/18954)) +* Bump phonenumbers from 9.0.14 to 9.0.15. ([\#18991](https://github.com/element-hq/synapse/issues/18991)) +* Bump prometheus-client from 0.22.1 to 0.23.1. ([\#19016](https://github.com/element-hq/synapse/issues/19016)) +* Bump pydantic from 2.11.9 to 2.11.10. ([\#19017](https://github.com/element-hq/synapse/issues/19017)) +* Bump pygithub from 2.7.0 to 2.8.1. ([\#18952](https://github.com/element-hq/synapse/issues/18952)) +* Bump regex from 1.11.2 to 1.11.3. ([\#18981](https://github.com/element-hq/synapse/issues/18981)) +* Bump serde from 1.0.224 to 1.0.226. ([\#18953](https://github.com/element-hq/synapse/issues/18953)) +* Bump serde from 1.0.226 to 1.0.228. ([\#18982](https://github.com/element-hq/synapse/issues/18982)) +* Bump setuptools-rust from 1.11.1 to 1.12.0. ([\#18980](https://github.com/element-hq/synapse/issues/18980)) +* Bump twine from 6.1.0 to 6.2.0. ([\#18985](https://github.com/element-hq/synapse/issues/18985)) +* Bump types-pyyaml from 6.0.12.20250809 to 6.0.12.20250915. ([\#19018](https://github.com/element-hq/synapse/issues/19018)) +* Bump types-requests from 2.32.4.20250809 to 2.32.4.20250913. ([\#18951](https://github.com/element-hq/synapse/issues/18951)) +* Bump typing-extensions from 4.14.1 to 4.15.0. ([\#18956](https://github.com/element-hq/synapse/issues/18956)) + # Synapse 1.139.2 (2025-10-07) ## Bugfixes diff --git a/changelog.d/17097.misc b/changelog.d/17097.misc deleted file mode 100644 index 42792e5f38..0000000000 --- a/changelog.d/17097.misc +++ /dev/null @@ -1 +0,0 @@ -Extend validation of uploaded device keys. \ No newline at end of file diff --git a/changelog.d/18721.bugfix b/changelog.d/18721.bugfix deleted file mode 100644 index 0aa0b3962d..0000000000 --- a/changelog.d/18721.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix room upgrade `room_config` argument and documentation for `user_may_create_room` spam-checker callback. diff --git a/changelog.d/18767.misc b/changelog.d/18767.misc deleted file mode 100644 index 5fa32a9f97..0000000000 --- a/changelog.d/18767.misc +++ /dev/null @@ -1 +0,0 @@ -Update OEmbed providers to use 'X' instead of 'Twitter' in URL previews, following a rebrand. Contributed by @HammyHavoc. diff --git a/changelog.d/18828.feature b/changelog.d/18828.feature deleted file mode 100644 index e7f3541de4..0000000000 --- a/changelog.d/18828.feature +++ /dev/null @@ -1 +0,0 @@ -Cleanly shutdown `SynapseHomeServer` object. diff --git a/changelog.d/18868.misc b/changelog.d/18868.misc deleted file mode 100644 index a9251f9da0..0000000000 --- a/changelog.d/18868.misc +++ /dev/null @@ -1 +0,0 @@ -Fix `server_name` in logging context for multiple Synapse instances in one process. diff --git a/changelog.d/18903.misc b/changelog.d/18903.misc deleted file mode 100644 index bafa7dad5c..0000000000 --- a/changelog.d/18903.misc +++ /dev/null @@ -1 +0,0 @@ -Wrap the Rust HTTP client with `make_deferred_yieldable` so it follows Synapse logcontext rules. diff --git a/changelog.d/18911.feature b/changelog.d/18911.feature deleted file mode 100644 index ac576e2913..0000000000 --- a/changelog.d/18911.feature +++ /dev/null @@ -1,2 +0,0 @@ -Add an Admin API that allows server admins to to query and investigate the metadata of local or cached remote media via -the `origin/media_id` identifier found in a [Matrix Content URI](https://spec.matrix.org/v1.14/client-server-api/#matrix-content-mxc-uris). \ No newline at end of file diff --git a/changelog.d/18913.misc b/changelog.d/18913.misc deleted file mode 100644 index e9093cb567..0000000000 --- a/changelog.d/18913.misc +++ /dev/null @@ -1 +0,0 @@ -Fix the GitHub Actions workflow that moves issues labeled "X-Needs-Info" to the "Needs info" column on the team's internal triage board. \ No newline at end of file diff --git a/changelog.d/18914.doc b/changelog.d/18914.doc deleted file mode 100644 index 9d4f03ade7..0000000000 --- a/changelog.d/18914.doc +++ /dev/null @@ -1 +0,0 @@ -Explain how Deferred callbacks interact with logcontexts. diff --git a/changelog.d/18932.misc b/changelog.d/18932.misc deleted file mode 100644 index 675412ddb9..0000000000 --- a/changelog.d/18932.misc +++ /dev/null @@ -1 +0,0 @@ -Disconnect background process work from request trace. diff --git a/changelog.d/18934.feature b/changelog.d/18934.feature deleted file mode 100644 index e24b7a7e34..0000000000 --- a/changelog.d/18934.feature +++ /dev/null @@ -1 +0,0 @@ -Update [MSC4284: Policy Servers](https://github.com/matrix-org/matrix-spec-proposals/pull/4284) implementation to support signatures when available. \ No newline at end of file diff --git a/changelog.d/18939.misc b/changelog.d/18939.misc deleted file mode 100644 index 6cfae2ceaf..0000000000 --- a/changelog.d/18939.misc +++ /dev/null @@ -1 +0,0 @@ -Reduce overall number of calls to `_get_e2e_cross_signing_signatures_for_devices` by increasing the batch size of devices the query is called with, reducing DB load. diff --git a/changelog.d/18947.misc b/changelog.d/18947.misc deleted file mode 100644 index 51c100b6dc..0000000000 --- a/changelog.d/18947.misc +++ /dev/null @@ -1 +0,0 @@ -Update error code used when an appservice tries to masquerade as an unknown device using [MSC4326](https://github.com/matrix-org/matrix-spec-proposals/pull/4326). Contributed by @tulir @ Beeper. diff --git a/changelog.d/18948.bugfix b/changelog.d/18948.bugfix deleted file mode 100644 index 7a8af0a286..0000000000 --- a/changelog.d/18948.bugfix +++ /dev/null @@ -1 +0,0 @@ -Compute a user's last seen timestamp from their devices' last seen timestamps instead of IPs, because the latter are automatically cleared according to `user_ips_max_age`. diff --git a/changelog.d/18959.misc b/changelog.d/18959.misc deleted file mode 100644 index e97475eec2..0000000000 --- a/changelog.d/18959.misc +++ /dev/null @@ -1 +0,0 @@ -Fix `no active span when trying to log` tracing error on startup (when OpenTracing is enabled). diff --git a/changelog.d/18963.feature b/changelog.d/18963.feature deleted file mode 100644 index 2cb0d57995..0000000000 --- a/changelog.d/18963.feature +++ /dev/null @@ -1 +0,0 @@ -Add an Admin API to fetch an event by ID. diff --git a/changelog.d/18964.misc b/changelog.d/18964.misc deleted file mode 100644 index 69be53ad27..0000000000 --- a/changelog.d/18964.misc +++ /dev/null @@ -1 +0,0 @@ -Fix `run_coroutine_in_background(...)` incorrectly handling logcontext. diff --git a/changelog.d/18966.misc b/changelog.d/18966.misc deleted file mode 100644 index 42c8782a42..0000000000 --- a/changelog.d/18966.misc +++ /dev/null @@ -1 +0,0 @@ -Add debug logs wherever we change current logcontext. diff --git a/changelog.d/18967.feature b/changelog.d/18967.feature deleted file mode 100644 index 58337d9e5d..0000000000 --- a/changelog.d/18967.feature +++ /dev/null @@ -1 +0,0 @@ -Add experimental implementation for the latest draft of [MSC4143](https://github.com/matrix-org/matrix-spec-proposals/pull/4143). \ No newline at end of file diff --git a/changelog.d/18971.misc b/changelog.d/18971.misc deleted file mode 100644 index 2d417d1319..0000000000 --- a/changelog.d/18971.misc +++ /dev/null @@ -1 +0,0 @@ -Update dockerfile metadata to fix broken link; point to documentation website. \ No newline at end of file diff --git a/changelog.d/18973.misc b/changelog.d/18973.misc deleted file mode 100644 index e88fd74795..0000000000 --- a/changelog.d/18973.misc +++ /dev/null @@ -1 +0,0 @@ -Note that the code is additionally licensed under the [Element Commercial license](https://github.com/element-hq/synapse/blob/develop/LICENSE-COMMERCIAL) in SPDX expression field configs. \ No newline at end of file diff --git a/changelog.d/18974.misc b/changelog.d/18974.misc deleted file mode 100644 index ca300a17a5..0000000000 --- a/changelog.d/18974.misc +++ /dev/null @@ -1 +0,0 @@ -Fix logcontext handling in `timeout_deferred` tests. diff --git a/changelog.d/18988.misc b/changelog.d/18988.misc deleted file mode 100644 index 14dbe92812..0000000000 --- a/changelog.d/18988.misc +++ /dev/null @@ -1 +0,0 @@ -Remove internal `ReplicationUploadKeysForUserRestServlet` as a follow-up to the work in https://github.com/element-hq/synapse/pull/18581 that moved device changes off the main process. \ No newline at end of file diff --git a/changelog.d/18989.removal b/changelog.d/18989.removal deleted file mode 100644 index 356b9ffe3a..0000000000 --- a/changelog.d/18989.removal +++ /dev/null @@ -1 +0,0 @@ -Remove deprecated `LoggingContext.set_current_context`/`LoggingContext.current_context` methods which already have equivalent bare methods in `synapse.logging.context`. diff --git a/changelog.d/18990.misc b/changelog.d/18990.misc deleted file mode 100644 index f7f8ac5ffd..0000000000 --- a/changelog.d/18990.misc +++ /dev/null @@ -1 +0,0 @@ -Switch task scheduler from raw logcontext manipulation to using the dedicated logcontext utils. diff --git a/changelog.d/18992.misc b/changelog.d/18992.misc deleted file mode 100644 index ba4470bff1..0000000000 --- a/changelog.d/18992.misc +++ /dev/null @@ -1 +0,0 @@ -Remove `MockClock()` in tests. diff --git a/changelog.d/18996.removal b/changelog.d/18996.removal deleted file mode 100644 index fa06fcc929..0000000000 --- a/changelog.d/18996.removal +++ /dev/null @@ -1 +0,0 @@ -Drop support for unstable field names from the long-accepted [MSC2732](https://github.com/matrix-org/matrix-spec-proposals/pull/2732) (Olm fallback keys) proposal. \ No newline at end of file diff --git a/changelog.d/18998.doc b/changelog.d/18998.doc deleted file mode 100644 index 9ddc2d41c0..0000000000 --- a/changelog.d/18998.doc +++ /dev/null @@ -1 +0,0 @@ -Fix documentation for `rc_room_creation` and `rc_reports` to clarify that a `per_user` rate limit is not supported. diff --git a/changelog.d/19002.bugfix b/changelog.d/19002.bugfix deleted file mode 100644 index d54c7f0e87..0000000000 --- a/changelog.d/19002.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix bug where ephemeral events were not filtered by room ID. Contributed by @frastefanini. diff --git a/changelog.d/19007.misc b/changelog.d/19007.misc deleted file mode 100644 index 720623e98e..0000000000 --- a/changelog.d/19007.misc +++ /dev/null @@ -1 +0,0 @@ -Switch back to our own custom `LogContextScopeManager` instead of OpenTracing's `ContextVarsScopeManager` which was causing problems when using the experimental `SYNAPSE_ASYNC_IO_REACTOR` option with tracing enabled. diff --git a/changelog.d/19011.bugfix b/changelog.d/19011.bugfix deleted file mode 100644 index 460c71856e..0000000000 --- a/changelog.d/19011.bugfix +++ /dev/null @@ -1 +0,0 @@ -Update Synapse main process version string to include git info. diff --git a/changelog.d/19012.misc b/changelog.d/19012.misc deleted file mode 100644 index 2677ca1432..0000000000 --- a/changelog.d/19012.misc +++ /dev/null @@ -1 +0,0 @@ -Remove `version_string` argument from `HomeServer` since it's always the same. diff --git a/changelog.d/19013.misc b/changelog.d/19013.misc deleted file mode 100644 index 626a6e3db4..0000000000 --- a/changelog.d/19013.misc +++ /dev/null @@ -1 +0,0 @@ -Remove duplicate call to `hs.start_background_tasks()` introduced from a bad merge. diff --git a/changelog.d/19015.misc b/changelog.d/19015.misc deleted file mode 100644 index cabc453469..0000000000 --- a/changelog.d/19015.misc +++ /dev/null @@ -1 +0,0 @@ -Split homeserver creation (`create_homeserver`) and setup (`setup`). diff --git a/changelog.d/19023.bugfix b/changelog.d/19023.bugfix deleted file mode 100644 index 816336080e..0000000000 --- a/changelog.d/19023.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug introduced in 1.139.1 where a client could receive an Internal Server Error if they set `device_keys: null` in the request to [`POST /_matrix/client/v3/keys/upload`](https://spec.matrix.org/v1.16/client-server-api/#post_matrixclientv3keysupload). \ No newline at end of file diff --git a/changelog.d/19025.misc b/changelog.d/19025.misc deleted file mode 100644 index 4c0c5d4bce..0000000000 --- a/changelog.d/19025.misc +++ /dev/null @@ -1 +0,0 @@ -Swap near-end-of-life `macos-13` GitHub Actions runner for the `macos-15-intel` variant. \ No newline at end of file diff --git a/changelog.d/19027.misc b/changelog.d/19027.misc deleted file mode 100644 index 727f3ee5ff..0000000000 --- a/changelog.d/19027.misc +++ /dev/null @@ -1 +0,0 @@ -Introduce `RootConfig.validate_config()` which can be subclassed in `HomeServerConfig` to do cross-config class validation. diff --git a/changelog.d/19032.feature b/changelog.d/19032.feature deleted file mode 100644 index 2e3bdbe391..0000000000 --- a/changelog.d/19032.feature +++ /dev/null @@ -1 +0,0 @@ -Expose a `defer_to_threadpool` function in the Synapse Module API that allows modules to run a function on a separate thread in a custom threadpool. \ No newline at end of file diff --git a/changelog.d/19035.misc b/changelog.d/19035.misc deleted file mode 100644 index a667919045..0000000000 --- a/changelog.d/19035.misc +++ /dev/null @@ -1 +0,0 @@ -Allow any command of the `release.py` to accept a `--gh-token` argument. \ No newline at end of file diff --git a/debian/changelog b/debian/changelog index e08883042f..d3d7db39c3 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.140.0~rc1) stable; urgency=medium + + * New Synapse release 1.140.0rc1. + + -- Synapse Packaging team Fri, 10 Oct 2025 10:56:51 +0100 + matrix-synapse-py3 (1.139.2) stable; urgency=medium * New Synapse release 1.139.2. diff --git a/pyproject.toml b/pyproject.toml index 2a5fb41ade..009d1553e5 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -101,7 +101,7 @@ module-name = "synapse.synapse_rust" [tool.poetry] name = "matrix-synapse" -version = "1.139.2" +version = "1.140.0rc1" description = "Homeserver for the Matrix decentralised comms protocol" authors = ["Matrix.org Team and Contributors "] license = "AGPL-3.0-or-later OR LicenseRef-Element-Commercial" diff --git a/schema/synapse-config.schema.yaml b/schema/synapse-config.schema.yaml index 285df53afe..c4a98065d0 100644 --- a/schema/synapse-config.schema.yaml +++ b/schema/synapse-config.schema.yaml @@ -1,5 +1,5 @@ $schema: https://element-hq.github.io/synapse/latest/schema/v1/meta.schema.json -$id: https://element-hq.github.io/synapse/schema/synapse/v1.139/synapse-config.schema.json +$id: https://element-hq.github.io/synapse/schema/synapse/v1.140/synapse-config.schema.json type: object properties: modules: From 07cfb697782f554dbb80022e27f63122a0c3ccbf Mon Sep 17 00:00:00 2001 From: Andrew Morgan Date: Fri, 10 Oct 2025 11:28:56 +0100 Subject: [PATCH 046/149] Changelog updates --- CHANGES.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index 8229657866..37850b7b14 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -3,9 +3,9 @@ ## Features - Cleanly shutdown `SynapseHomeServer` object, allowing artifacts of embedded small hosts to be properly garbage collected. ([\#18828](https://github.com/element-hq/synapse/issues/18828)) -- Add [a new Admin API](https://element-hq.github.io/synapse/v1.140/admin_api/media_admin_api.html#query-a-piece-of-media-by-id) that allows server admins to query and investigate the metadata of local or cached remote media via -- Add [a new Admin API](https://element-hq.github.io/synapse/v1.140/admin_api/fetch_events.html) to fetch an event by ID. ([\#18963](https://github.com/element-hq/synapse/issues/18963)) +- Add [a new Media Query by ID Admin API](https://element-hq.github.io/synapse/v1.140/admin_api/media_admin_api.html#query-a-piece-of-media-by-id) that allows server admins to query and investigate the metadata of local or cached remote media via the `origin/media_id` identifier found in a [Matrix Content URI](https://spec.matrix.org/v1.14/client-server-api/#matrix-content-mxc-uris). ([\#18911](https://github.com/element-hq/synapse/issues/18911)) +- Add [a new Event Fetch Admin API](https://element-hq.github.io/synapse/v1.140/admin_api/fetch_events.html) to fetch an event by ID. ([\#18963](https://github.com/element-hq/synapse/issues/18963)) - Update [MSC4284: Policy Servers](https://github.com/matrix-org/matrix-spec-proposals/pull/4284) implementation to support signatures when available. ([\#18934](https://github.com/element-hq/synapse/issues/18934)) - Add experimental implementation of the `GET /_matrix/client/v1/rtc/transports` endpoint for the latest draft of [MSC4143: MatrixRTC](https://github.com/matrix-org/matrix-spec-proposals/pull/4143). ([\#18967](https://github.com/element-hq/synapse/issues/18967)) - Expose a `defer_to_threadpool` function in the Synapse Module API that allows modules to run a function on a separate thread in a custom threadpool. ([\#19032](https://github.com/element-hq/synapse/issues/19032)) From be75de2cfcdba3d6424a29fed9e9b8f4beb0f1ee Mon Sep 17 00:00:00 2001 From: Andrew Morgan Date: Fri, 10 Oct 2025 11:52:07 +0100 Subject: [PATCH 047/149] changelog updates --- CHANGES.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index 37850b7b14..4e2ed39487 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -2,10 +2,9 @@ ## Features -- Cleanly shutdown `SynapseHomeServer` object, allowing artifacts of embedded small hosts to be properly garbage collected. ([\#18828](https://github.com/element-hq/synapse/issues/18828)) - Add [a new Media Query by ID Admin API](https://element-hq.github.io/synapse/v1.140/admin_api/media_admin_api.html#query-a-piece-of-media-by-id) that allows server admins to query and investigate the metadata of local or cached remote media via the `origin/media_id` identifier found in a [Matrix Content URI](https://spec.matrix.org/v1.14/client-server-api/#matrix-content-mxc-uris). ([\#18911](https://github.com/element-hq/synapse/issues/18911)) -- Add [a new Event Fetch Admin API](https://element-hq.github.io/synapse/v1.140/admin_api/fetch_events.html) to fetch an event by ID. ([\#18963](https://github.com/element-hq/synapse/issues/18963)) +- Add [a new Fetch Event Admin API](https://element-hq.github.io/synapse/v1.140/admin_api/fetch_event.html) to fetch an event by ID. ([\#18963](https://github.com/element-hq/synapse/issues/18963)) - Update [MSC4284: Policy Servers](https://github.com/matrix-org/matrix-spec-proposals/pull/4284) implementation to support signatures when available. ([\#18934](https://github.com/element-hq/synapse/issues/18934)) - Add experimental implementation of the `GET /_matrix/client/v1/rtc/transports` endpoint for the latest draft of [MSC4143: MatrixRTC](https://github.com/matrix-org/matrix-spec-proposals/pull/4143). ([\#18967](https://github.com/element-hq/synapse/issues/18967)) - Expose a `defer_to_threadpool` function in the Synapse Module API that allows modules to run a function on a separate thread in a custom threadpool. ([\#19032](https://github.com/element-hq/synapse/issues/19032)) @@ -19,7 +18,7 @@ ## Improved Documentation -- Explain how Deferred callbacks interact with logcontexts. ([\#18914](https://github.com/element-hq/synapse/issues/18914)) +- Explain how `Deferred` callbacks interact with logcontexts. ([\#18914](https://github.com/element-hq/synapse/issues/18914)) - Fix documentation for `rc_room_creation` and `rc_reports` to clarify that a `per_user` rate limit is not supported. ([\#18998](https://github.com/element-hq/synapse/issues/18998)) ## Deprecations and Removals @@ -29,6 +28,7 @@ ## Internal Changes +- Cleanly shutdown `SynapseHomeServer` object, allowing artifacts of embedded small hosts to be properly garbage collected. ([\#18828](https://github.com/element-hq/synapse/issues/18828)) - Update OEmbed providers to use 'X' instead of 'Twitter' in URL previews, following a rebrand. Contributed by @HammyHavoc. ([\#18767](https://github.com/element-hq/synapse/issues/18767)) - Fix `server_name` in logging context for multiple Synapse instances in one process. ([\#18868](https://github.com/element-hq/synapse/issues/18868)) - Wrap the Rust HTTP client with `make_deferred_yieldable` so it follows Synapse logcontext rules. ([\#18903](https://github.com/element-hq/synapse/issues/18903)) From ef80338c2d49e72da3a8886a72649bc90ae9d4d7 Mon Sep 17 00:00:00 2001 From: Andrew Morgan Date: Fri, 10 Oct 2025 12:09:14 +0100 Subject: [PATCH 048/149] Add s3 warning to changelog and upgrade notes --- CHANGES.md | 9 +++++++++ docs/upgrade.md | 10 ++++++++++ 2 files changed, 19 insertions(+) diff --git a/CHANGES.md b/CHANGES.md index 4e2ed39487..2f3926e106 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,5 +1,14 @@ # Synapse 1.140.0rc1 (2025-10-10) +## Compatibility notice for users of `synapse-s3-storage-provider` + +Deployments that make use of the +[synapse-s3-storage-provider](https://github.com/matrix-org/synapse-s3-storage-provider) +module must upgrade to +[v1.6.0](https://github.com/matrix-org/synapse-s3-storage-provider/releases/tag/v1.6.0). +Using older versions of the module with this release of Synapse will prevent +users from being able to upload or download media. + ## Features - Add [a new Media Query by ID Admin API](https://element-hq.github.io/synapse/v1.140/admin_api/media_admin_api.html#query-a-piece-of-media-by-id) that allows server admins to query and investigate the metadata of local or cached remote media via diff --git a/docs/upgrade.md b/docs/upgrade.md index 5f998e9708..c049a50984 100644 --- a/docs/upgrade.md +++ b/docs/upgrade.md @@ -117,6 +117,16 @@ each upgrade are complete before moving on to the next upgrade, to avoid stacking them up. You can monitor the currently running background updates with [the Admin API](usage/administration/admin_api/background_updates.html#status). +# Upgrading to v1.140.0 + +## Users of `synapse-s3-storage-provider` must update the module to `v1.6.0` + +Deployments that make use of the +[synapse-s3-storage-provider](https://github.com/matrix-org/synapse-s3-storage-provider/) +module must update it to +[v1.6.0](https://github.com/matrix-org/synapse-s3-storage-provider/releases/tag/v1.6.0), +otherwise users will be unable to upload or download media. + # Upgrading to v1.139.0 ## `/register` requests from old application service implementations may break when using MAS From d399d7649aedaeb3cd9edbc258917c407893c172 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Fri, 10 Oct 2025 14:30:38 -0500 Subject: [PATCH 049/149] Move `start_doing_background_updates()` to `SynapseHomeServer.start_background_tasks()` (#19036) (more sane standard location for this sort of thing) The one difference here is that previously, `start_doing_background_updates ()` only ran on the main Synapse instance. But since it now lives in `start_background_tasks()`, it will run on the worker that supposed to `run_background_tasks`. Doesn't seem like a problem though. --- changelog.d/19036.misc | 1 + synapse/app/homeserver.py | 10 +++++----- 2 files changed, 6 insertions(+), 5 deletions(-) create mode 100644 changelog.d/19036.misc diff --git a/changelog.d/19036.misc b/changelog.d/19036.misc new file mode 100644 index 0000000000..95b8daab9b --- /dev/null +++ b/changelog.d/19036.misc @@ -0,0 +1 @@ +Move `start_doing_background_updates()` to `SynapseHomeServer.start_background_tasks()`. diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index 8d9b76e083..b9ac86c2fc 100644 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -317,6 +317,11 @@ class SynapseHomeServer(HomeServer): # during parsing logger.warning("Unrecognized listener type: %s", listener.type) + def start_background_tasks(self) -> None: + super().start_background_tasks() + + self.get_datastores().main.db_pool.updates.start_doing_background_updates() + def load_or_generate_config(argv_options: List[str]) -> HomeServerConfig: """ @@ -430,11 +435,6 @@ def setup( await _base.start(hs, freeze) - # TODO: This should be moved to `SynapseHomeServer.start_background_tasks` (not - # `HomeServer.start_background_tasks`) (this way it matches the behavior of only - # running on `main`) - hs.get_datastores().main.db_pool.updates.start_doing_background_updates() - # Register a callback to be invoked once the reactor is running register_start(hs, _start_when_reactor_running) From 690b3a4fcce3a58b2b09533c3e3173f5ecfffbc4 Mon Sep 17 00:00:00 2001 From: Tulir Asokan Date: Mon, 13 Oct 2025 16:07:11 +0300 Subject: [PATCH 050/149] Allow using MSC4190 features without opt-in (#19031) --- changelog.d/19031.feature | 1 + synapse/rest/client/devices.py | 6 +++--- synapse/rest/client/keys.py | 8 ++------ tests/rest/client/test_devices.py | 10 ---------- 4 files changed, 6 insertions(+), 19 deletions(-) create mode 100644 changelog.d/19031.feature diff --git a/changelog.d/19031.feature b/changelog.d/19031.feature new file mode 100644 index 0000000000..711664499b --- /dev/null +++ b/changelog.d/19031.feature @@ -0,0 +1 @@ +Allow using [MSC4190](https://github.com/matrix-org/matrix-spec-proposals/pull/4190) behavior without the opt-in registration flag. Contributed by @tulir @ Beeper. diff --git a/synapse/rest/client/devices.py b/synapse/rest/client/devices.py index 0777abde7f..37bc9ae513 100644 --- a/synapse/rest/client/devices.py +++ b/synapse/rest/client/devices.py @@ -112,7 +112,7 @@ class DeleteDevicesRestServlet(RestServlet): else: raise e - if requester.app_service and requester.app_service.msc4190_device_management: + if requester.app_service: # MSC4190 can skip UIA for this endpoint pass else: @@ -192,7 +192,7 @@ class DeviceRestServlet(RestServlet): else: raise - if requester.app_service and requester.app_service.msc4190_device_management: + if requester.app_service: # MSC4190 allows appservices to delete devices through this endpoint without UIA # It's also allowed with MSC3861 enabled pass @@ -227,7 +227,7 @@ class DeviceRestServlet(RestServlet): body = parse_and_validate_json_object_from_request(request, self.PutBody) # MSC4190 allows appservices to create devices through this endpoint - if requester.app_service and requester.app_service.msc4190_device_management: + if requester.app_service: created = await self.device_handler.upsert_device( user_id=requester.user.to_string(), device_id=device_id, diff --git a/synapse/rest/client/keys.py b/synapse/rest/client/keys.py index 55922b97d4..f8974e34a8 100644 --- a/synapse/rest/client/keys.py +++ b/synapse/rest/client/keys.py @@ -543,15 +543,11 @@ class SigningKeyUploadServlet(RestServlet): if not keys_are_different: return 200, {} - # MSC4190 can skip UIA for replacing cross-signing keys as well. - is_appservice_with_msc4190 = ( - requester.app_service and requester.app_service.msc4190_device_management - ) - # The keys are different; is x-signing set up? If no, then this is first-time # setup, and that is allowed without UIA, per MSC3967. # If yes, then we need to authenticate the change. - if is_cross_signing_setup and not is_appservice_with_msc4190: + # MSC4190 can skip UIA for replacing cross-signing keys as well. + if is_cross_signing_setup and not requester.app_service: # With MSC3861, UIA is not possible. Instead, the auth service has to # explicitly mark the master key as replaceable. if self.hs.config.mas.enabled: diff --git a/tests/rest/client/test_devices.py b/tests/rest/client/test_devices.py index de80b7c186..93dff77d80 100644 --- a/tests/rest/client/test_devices.py +++ b/tests/rest/client/test_devices.py @@ -533,16 +533,6 @@ class MSC4190AppserviceDevicesTestCase(unittest.HomeserverTestCase): ) self.assertEqual(channel.code, 200, channel.json_body) - # On the regular service, that API should not allow for the - # creation of new devices. - channel = self.make_request( - "PUT", - "/_matrix/client/v3/devices/AABBCCDD?user_id=@bob:test", - content={"display_name": "Bob's device"}, - access_token=self.pre_msc_service.token, - ) - self.assertEqual(channel.code, 404, channel.json_body) - def test_DELETE_device(self) -> None: self.register_appservice_user( "alice", self.msc4190_service.token, inhibit_login=True From a7303c53115f9f2da4d0f9cf80bfbf169f5169ed Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Mon, 13 Oct 2025 15:31:09 +0200 Subject: [PATCH 051/149] Fix deprecated token field in release script (#19039) --- changelog.d/19039.misc | 1 + poetry.lock | 69 +++++++++++++++--------------------------- pyproject.toml | 2 +- scripts-dev/release.py | 3 +- 4 files changed, 28 insertions(+), 47 deletions(-) create mode 100644 changelog.d/19039.misc diff --git a/changelog.d/19039.misc b/changelog.d/19039.misc new file mode 100644 index 0000000000..1cd6b4d83c --- /dev/null +++ b/changelog.d/19039.misc @@ -0,0 +1 @@ +Drop a deprecated field of the `PyGitHub` dependency in the release script and raise the dependency's minimum version to `1.59.0`. \ No newline at end of file diff --git a/poetry.lock b/poetry.lock index 4445bbe256..1a26e23fad 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 2.2.0 and should not be changed by hand. +# This file is automatically @generated by Poetry 2.2.1 and should not be changed by hand. [[package]] name = "annotated-types" @@ -39,7 +39,7 @@ description = "The ultimate Python library in building OAuth and OpenID Connect optional = true python-versions = ">=3.9" groups = ["main"] -markers = "extra == \"all\" or extra == \"jwt\" or extra == \"oidc\"" +markers = "extra == \"oidc\" or extra == \"jwt\" or extra == \"all\"" files = [ {file = "authlib-1.6.5-py2.py3-none-any.whl", hash = "sha256:3e0e0507807f842b02175507bdee8957a1d5707fd4afb17c32fb43fee90b6e3a"}, {file = "authlib-1.6.5.tar.gz", hash = "sha256:6aaf9c79b7cc96c900f0b284061691c5d4e61221640a948fe690b556a6d6d10b"}, @@ -447,7 +447,7 @@ description = "XML bomb protection for Python stdlib modules" optional = true python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" groups = ["main"] -markers = "extra == \"all\" or extra == \"saml2\"" +markers = "extra == \"saml2\" or extra == \"all\"" files = [ {file = "defusedxml-0.7.1-py2.py3-none-any.whl", hash = "sha256:a352e7e428770286cc899e2542b6cdaedb2b4953ff269a210103ec58f6198a61"}, {file = "defusedxml-0.7.1.tar.gz", hash = "sha256:1bb3032db185915b62d7c6209c5a8792be6a32ab2fedacc84e01b52c51aa3e69"}, @@ -472,7 +472,7 @@ description = "XPath 1.0/2.0/3.0/3.1 parsers and selectors for ElementTree and l optional = true python-versions = ">=3.7" groups = ["main"] -markers = "extra == \"all\" or extra == \"saml2\"" +markers = "extra == \"saml2\" or extra == \"all\"" files = [ {file = "elementpath-4.1.5-py3-none-any.whl", hash = "sha256:2ac1a2fb31eb22bbbf817f8cf6752f844513216263f0e3892c8e79782fe4bb55"}, {file = "elementpath-4.1.5.tar.gz", hash = "sha256:c2d6dc524b29ef751ecfc416b0627668119d8812441c555d7471da41d4bacb8d"}, @@ -523,7 +523,7 @@ description = "Python wrapper for hiredis" optional = true python-versions = ">=3.8" groups = ["main"] -markers = "extra == \"all\" or extra == \"redis\"" +markers = "extra == \"redis\" or extra == \"all\"" files = [ {file = "hiredis-3.2.1-cp310-cp310-macosx_10_15_universal2.whl", hash = "sha256:add17efcbae46c5a6a13b244ff0b4a8fa079602ceb62290095c941b42e9d5dec"}, {file = "hiredis-3.2.1-cp310-cp310-macosx_10_15_x86_64.whl", hash = "sha256:5fe955cc4f66c57df1ae8e5caf4de2925d43b5efab4e40859662311d1bcc5f54"}, @@ -860,7 +860,7 @@ description = "Jaeger Python OpenTracing Tracer implementation" optional = true python-versions = ">=3.7" groups = ["main"] -markers = "extra == \"all\" or extra == \"opentracing\"" +markers = "extra == \"opentracing\" or extra == \"all\"" files = [ {file = "jaeger-client-4.8.0.tar.gz", hash = "sha256:3157836edab8e2c209bd2d6ae61113db36f7ee399e66b1dcbb715d87ab49bfe0"}, ] @@ -998,7 +998,7 @@ description = "A strictly RFC 4510 conforming LDAP V3 pure Python client library optional = true python-versions = "*" groups = ["main"] -markers = "extra == \"all\" or extra == \"matrix-synapse-ldap3\"" +markers = "extra == \"matrix-synapse-ldap3\" or extra == \"all\"" files = [ {file = "ldap3-2.9.1-py2.py3-none-any.whl", hash = "sha256:5869596fc4948797020d3f03b7939da938778a0f9e2009f7a072ccf92b8e8d70"}, {file = "ldap3-2.9.1.tar.gz", hash = "sha256:f3e7fc4718e3f09dda568b57100095e0ce58633bcabbed8667ce3f8fbaa4229f"}, @@ -1014,7 +1014,7 @@ description = "Powerful and Pythonic XML processing library combining libxml2/li optional = true python-versions = ">=3.8" groups = ["main"] -markers = "extra == \"all\" or extra == \"url-preview\"" +markers = "extra == \"url-preview\" or extra == \"all\"" files = [ {file = "lxml-6.0.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:e77dd455b9a16bbd2a5036a63ddbd479c19572af81b624e79ef422f929eef388"}, {file = "lxml-6.0.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:5d444858b9f07cefff6455b983aea9a67f7462ba1f6cbe4a21e8bf6791bf2153"}, @@ -1301,7 +1301,7 @@ description = "An LDAP3 auth provider for Synapse" optional = true python-versions = ">=3.7" groups = ["main"] -markers = "extra == \"all\" or extra == \"matrix-synapse-ldap3\"" +markers = "extra == \"matrix-synapse-ldap3\" or extra == \"all\"" files = [ {file = "matrix-synapse-ldap3-0.3.0.tar.gz", hash = "sha256:8bb6517173164d4b9cc44f49de411d8cebdb2e705d5dd1ea1f38733c4a009e1d"}, {file = "matrix_synapse_ldap3-0.3.0-py3-none-any.whl", hash = "sha256:8b4d701f8702551e98cc1d8c20dbed532de5613584c08d0df22de376ba99159d"}, @@ -1540,7 +1540,7 @@ description = "OpenTracing API for Python. See documentation at http://opentraci optional = true python-versions = "*" groups = ["main"] -markers = "extra == \"all\" or extra == \"opentracing\"" +markers = "extra == \"opentracing\" or extra == \"all\"" files = [ {file = "opentracing-2.4.0.tar.gz", hash = "sha256:a173117e6ef580d55874734d1fa7ecb6f3655160b8b8974a2a1e98e5ec9c840d"}, ] @@ -1609,8 +1609,6 @@ groups = ["main"] files = [ {file = "pillow-11.3.0-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:1b9c17fd4ace828b3003dfd1e30bff24863e0eb59b535e8f80194d9cc7ecf860"}, {file = "pillow-11.3.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:65dc69160114cdd0ca0f35cb434633c75e8e7fad4cf855177a05bf38678f73ad"}, - {file = "pillow-11.3.0-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:7107195ddc914f656c7fc8e4a5e1c25f32e9236ea3ea860f257b0436011fddd0"}, - {file = "pillow-11.3.0-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:cc3e831b563b3114baac7ec2ee86819eb03caa1a2cef0b481a5675b59c4fe23b"}, {file = "pillow-11.3.0-cp310-cp310-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f1f182ebd2303acf8c380a54f615ec883322593320a9b00438eb842c1f37ae50"}, {file = "pillow-11.3.0-cp310-cp310-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4445fa62e15936a028672fd48c4c11a66d641d2c05726c7ec1f8ba6a572036ae"}, {file = "pillow-11.3.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:71f511f6b3b91dd543282477be45a033e4845a40278fa8dcdbfdb07109bf18f9"}, @@ -1620,8 +1618,6 @@ files = [ {file = "pillow-11.3.0-cp310-cp310-win_arm64.whl", hash = "sha256:819931d25e57b513242859ce1876c58c59dc31587847bf74cfe06b2e0cb22d2f"}, {file = "pillow-11.3.0-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:1cd110edf822773368b396281a2293aeb91c90a2db00d78ea43e7e861631b722"}, {file = "pillow-11.3.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:9c412fddd1b77a75aa904615ebaa6001f169b26fd467b4be93aded278266b288"}, - {file = "pillow-11.3.0-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:7d1aa4de119a0ecac0a34a9c8bde33f34022e2e8f99104e47a3ca392fd60e37d"}, - {file = "pillow-11.3.0-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:91da1d88226663594e3f6b4b8c3c8d85bd504117d043740a8e0ec449087cc494"}, {file = "pillow-11.3.0-cp311-cp311-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:643f189248837533073c405ec2f0bb250ba54598cf80e8c1e043381a60632f58"}, {file = "pillow-11.3.0-cp311-cp311-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:106064daa23a745510dabce1d84f29137a37224831d88eb4ce94bb187b1d7e5f"}, {file = "pillow-11.3.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:cd8ff254faf15591e724dc7c4ddb6bf4793efcbe13802a4ae3e863cd300b493e"}, @@ -1631,8 +1627,6 @@ files = [ {file = "pillow-11.3.0-cp311-cp311-win_arm64.whl", hash = "sha256:30807c931ff7c095620fe04448e2c2fc673fcbb1ffe2a7da3fb39613489b1ddd"}, {file = "pillow-11.3.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:fdae223722da47b024b867c1ea0be64e0df702c5e0a60e27daad39bf960dd1e4"}, {file = "pillow-11.3.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:921bd305b10e82b4d1f5e802b6850677f965d8394203d182f078873851dada69"}, - {file = "pillow-11.3.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:eb76541cba2f958032d79d143b98a3a6b3ea87f0959bbe256c0b5e416599fd5d"}, - {file = "pillow-11.3.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:67172f2944ebba3d4a7b54f2e95c786a3a50c21b88456329314caaa28cda70f6"}, {file = "pillow-11.3.0-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:97f07ed9f56a3b9b5f49d3661dc9607484e85c67e27f3e8be2c7d28ca032fec7"}, {file = "pillow-11.3.0-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:676b2815362456b5b3216b4fd5bd89d362100dc6f4945154ff172e206a22c024"}, {file = "pillow-11.3.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:3e184b2f26ff146363dd07bde8b711833d7b0202e27d13540bfe2e35a323a809"}, @@ -1645,8 +1639,6 @@ files = [ {file = "pillow-11.3.0-cp313-cp313-ios_13_0_x86_64_iphonesimulator.whl", hash = "sha256:7859a4cc7c9295f5838015d8cc0a9c215b77e43d07a25e460f35cf516df8626f"}, {file = "pillow-11.3.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:ec1ee50470b0d050984394423d96325b744d55c701a439d2bd66089bff963d3c"}, {file = "pillow-11.3.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:7db51d222548ccfd274e4572fdbf3e810a5e66b00608862f947b163e613b67dd"}, - {file = "pillow-11.3.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:2d6fcc902a24ac74495df63faad1884282239265c6839a0a6416d33faedfae7e"}, - {file = "pillow-11.3.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:f0f5d8f4a08090c6d6d578351a2b91acf519a54986c055af27e7a93feae6d3f1"}, {file = "pillow-11.3.0-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c37d8ba9411d6003bba9e518db0db0c58a680ab9fe5179f040b0463644bc9805"}, {file = "pillow-11.3.0-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:13f87d581e71d9189ab21fe0efb5a23e9f28552d5be6979e84001d3b8505abe8"}, {file = "pillow-11.3.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:023f6d2d11784a465f09fd09a34b150ea4672e85fb3d05931d89f373ab14abb2"}, @@ -1656,8 +1648,6 @@ files = [ {file = "pillow-11.3.0-cp313-cp313-win_arm64.whl", hash = "sha256:1904e1264881f682f02b7f8167935cce37bc97db457f8e7849dc3a6a52b99580"}, {file = "pillow-11.3.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:4c834a3921375c48ee6b9624061076bc0a32a60b5532b322cc0ea64e639dd50e"}, {file = "pillow-11.3.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:5e05688ccef30ea69b9317a9ead994b93975104a677a36a8ed8106be9260aa6d"}, - {file = "pillow-11.3.0-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:1019b04af07fc0163e2810167918cb5add8d74674b6267616021ab558dc98ced"}, - {file = "pillow-11.3.0-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:f944255db153ebb2b19c51fe85dd99ef0ce494123f21b9db4877ffdfc5590c7c"}, {file = "pillow-11.3.0-cp313-cp313t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1f85acb69adf2aaee8b7da124efebbdb959a104db34d3a2cb0f3793dbae422a8"}, {file = "pillow-11.3.0-cp313-cp313t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:05f6ecbeff5005399bb48d198f098a9b4b6bdf27b8487c7f38ca16eeb070cd59"}, {file = "pillow-11.3.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:a7bc6e6fd0395bc052f16b1a8670859964dbd7003bd0af2ff08342eb6e442cfe"}, @@ -1667,8 +1657,6 @@ files = [ {file = "pillow-11.3.0-cp313-cp313t-win_arm64.whl", hash = "sha256:8797edc41f3e8536ae4b10897ee2f637235c94f27404cac7297f7b607dd0716e"}, {file = "pillow-11.3.0-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:d9da3df5f9ea2a89b81bb6087177fb1f4d1c7146d583a3fe5c672c0d94e55e12"}, {file = "pillow-11.3.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:0b275ff9b04df7b640c59ec5a3cb113eefd3795a8df80bac69646ef699c6981a"}, - {file = "pillow-11.3.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:0743841cabd3dba6a83f38a92672cccbd69af56e3e91777b0ee7f4dba4385632"}, - {file = "pillow-11.3.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:2465a69cf967b8b49ee1b96d76718cd98c4e925414ead59fdf75cf0fd07df673"}, {file = "pillow-11.3.0-cp314-cp314-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:41742638139424703b4d01665b807c6468e23e699e8e90cffefe291c5832b027"}, {file = "pillow-11.3.0-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:93efb0b4de7e340d99057415c749175e24c8864302369e05914682ba642e5d77"}, {file = "pillow-11.3.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:7966e38dcd0fa11ca390aed7c6f20454443581d758242023cf36fcb319b1a874"}, @@ -1678,8 +1666,6 @@ files = [ {file = "pillow-11.3.0-cp314-cp314-win_arm64.whl", hash = "sha256:155658efb5e044669c08896c0c44231c5e9abcaadbc5cd3648df2f7c0b96b9a6"}, {file = "pillow-11.3.0-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:59a03cdf019efbfeeed910bf79c7c93255c3d54bc45898ac2a4140071b02b4ae"}, {file = "pillow-11.3.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:f8a5827f84d973d8636e9dc5764af4f0cf2318d26744b3d902931701b0d46653"}, - {file = "pillow-11.3.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:ee92f2fd10f4adc4b43d07ec5e779932b4eb3dbfbc34790ada5a6669bc095aa6"}, - {file = "pillow-11.3.0-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:c96d333dcf42d01f47b37e0979b6bd73ec91eae18614864622d9b87bbd5bbf36"}, {file = "pillow-11.3.0-cp314-cp314t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:4c96f993ab8c98460cd0c001447bff6194403e8b1d7e149ade5f00594918128b"}, {file = "pillow-11.3.0-cp314-cp314t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:41342b64afeba938edb034d122b2dda5db2139b9a4af999729ba8818e0056477"}, {file = "pillow-11.3.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:068d9c39a2d1b358eb9f245ce7ab1b5c3246c7c8c7d9ba58cfa5b43146c06e50"}, @@ -1689,8 +1675,6 @@ files = [ {file = "pillow-11.3.0-cp314-cp314t-win_arm64.whl", hash = "sha256:79ea0d14d3ebad43ec77ad5272e6ff9bba5b679ef73375ea760261207fa8e0aa"}, {file = "pillow-11.3.0-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:48d254f8a4c776de343051023eb61ffe818299eeac478da55227d96e241de53f"}, {file = "pillow-11.3.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:7aee118e30a4cf54fdd873bd3a29de51e29105ab11f9aad8c32123f58c8f8081"}, - {file = "pillow-11.3.0-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:23cff760a9049c502721bdb743a7cb3e03365fafcdfc2ef9784610714166e5a4"}, - {file = "pillow-11.3.0-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:6359a3bc43f57d5b375d1ad54a0074318a0844d11b76abccf478c37c986d3cfc"}, {file = "pillow-11.3.0-cp39-cp39-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:092c80c76635f5ecb10f3f83d76716165c96f5229addbd1ec2bdbbda7d496e06"}, {file = "pillow-11.3.0-cp39-cp39-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:cadc9e0ea0a2431124cde7e1697106471fc4c1da01530e679b2391c37d3fbb3a"}, {file = "pillow-11.3.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:6a418691000f2a418c9135a7cf0d797c1bb7d9a485e61fe8e7722845b95ef978"}, @@ -1700,15 +1684,11 @@ files = [ {file = "pillow-11.3.0-cp39-cp39-win_arm64.whl", hash = "sha256:6abdbfd3aea42be05702a8dd98832329c167ee84400a1d1f61ab11437f1717eb"}, {file = "pillow-11.3.0-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:3cee80663f29e3843b68199b9d6f4f54bd1d4a6b59bdd91bceefc51238bcb967"}, {file = "pillow-11.3.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:b5f56c3f344f2ccaf0dd875d3e180f631dc60a51b314295a3e681fe8cf851fbe"}, - {file = "pillow-11.3.0-pp310-pypy310_pp73-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:e67d793d180c9df62f1f40aee3accca4829d3794c95098887edc18af4b8b780c"}, - {file = "pillow-11.3.0-pp310-pypy310_pp73-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:d000f46e2917c705e9fb93a3606ee4a819d1e3aa7a9b442f6444f07e77cf5e25"}, {file = "pillow-11.3.0-pp310-pypy310_pp73-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:527b37216b6ac3a12d7838dc3bd75208ec57c1c6d11ef01902266a5a0c14fc27"}, {file = "pillow-11.3.0-pp310-pypy310_pp73-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:be5463ac478b623b9dd3937afd7fb7ab3d79dd290a28e2b6df292dc75063eb8a"}, {file = "pillow-11.3.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:8dc70ca24c110503e16918a658b869019126ecfe03109b754c402daff12b3d9f"}, {file = "pillow-11.3.0-pp311-pypy311_pp73-macosx_10_15_x86_64.whl", hash = "sha256:7c8ec7a017ad1bd562f93dbd8505763e688d388cde6e4a010ae1486916e713e6"}, {file = "pillow-11.3.0-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:9ab6ae226de48019caa8074894544af5b53a117ccb9d3b3dcb2871464c829438"}, - {file = "pillow-11.3.0-pp311-pypy311_pp73-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:fe27fb049cdcca11f11a7bfda64043c37b30e6b91f10cb5bab275806c32f6ab3"}, - {file = "pillow-11.3.0-pp311-pypy311_pp73-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:465b9e8844e3c3519a983d58b80be3f668e2a7a5db97f2784e7079fbc9f9822c"}, {file = "pillow-11.3.0-pp311-pypy311_pp73-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5418b53c0d59b3824d05e029669efa023bbef0f3e92e75ec8428f3799487f361"}, {file = "pillow-11.3.0-pp311-pypy311_pp73-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:504b6f59505f08ae014f724b6207ff6222662aab5cc9542577fb084ed0676ac7"}, {file = "pillow-11.3.0-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:c84d689db21a1c397d001aa08241044aa2069e7587b398c8cc63020390b1c1b8"}, @@ -1746,7 +1726,7 @@ description = "psycopg2 - Python-PostgreSQL Database Adapter" optional = true python-versions = ">=3.8" groups = ["main"] -markers = "extra == \"all\" or extra == \"postgres\"" +markers = "extra == \"postgres\" or extra == \"all\"" files = [ {file = "psycopg2-2.9.10-cp310-cp310-win32.whl", hash = "sha256:5df2b672140f95adb453af93a7d669d7a7bf0a56bcd26f1502329166f4a61716"}, {file = "psycopg2-2.9.10-cp310-cp310-win_amd64.whl", hash = "sha256:c6f7b8561225f9e711a9c47087388a97fdc948211c10a4bccbf0ba68ab7b3b5a"}, @@ -1754,7 +1734,6 @@ files = [ {file = "psycopg2-2.9.10-cp311-cp311-win_amd64.whl", hash = "sha256:0435034157049f6846e95103bd8f5a668788dd913a7c30162ca9503fdf542cb4"}, {file = "psycopg2-2.9.10-cp312-cp312-win32.whl", hash = "sha256:65a63d7ab0e067e2cdb3cf266de39663203d38d6a8ed97f5ca0cb315c73fe067"}, {file = "psycopg2-2.9.10-cp312-cp312-win_amd64.whl", hash = "sha256:4a579d6243da40a7b3182e0430493dbd55950c493d8c68f4eec0b302f6bbf20e"}, - {file = "psycopg2-2.9.10-cp313-cp313-win_amd64.whl", hash = "sha256:91fd603a2155da8d0cfcdbf8ab24a2d54bca72795b90d2a3ed2b6da8d979dee2"}, {file = "psycopg2-2.9.10-cp39-cp39-win32.whl", hash = "sha256:9d5b3b94b79a844a986d029eee38998232451119ad653aea42bb9220a8c5066b"}, {file = "psycopg2-2.9.10-cp39-cp39-win_amd64.whl", hash = "sha256:88138c8dedcbfa96408023ea2b0c369eda40fe5d75002c0964c78f46f11fa442"}, {file = "psycopg2-2.9.10.tar.gz", hash = "sha256:12ec0b40b0273f95296233e8750441339298e6a572f7039da5b260e3c8b60e11"}, @@ -1767,7 +1746,7 @@ description = ".. image:: https://travis-ci.org/chtd/psycopg2cffi.svg?branch=mas optional = true python-versions = "*" groups = ["main"] -markers = "platform_python_implementation == \"PyPy\" and (extra == \"all\" or extra == \"postgres\")" +markers = "platform_python_implementation == \"PyPy\" and (extra == \"postgres\" or extra == \"all\")" files = [ {file = "psycopg2cffi-2.9.0.tar.gz", hash = "sha256:7e272edcd837de3a1d12b62185eb85c45a19feda9e62fa1b120c54f9e8d35c52"}, ] @@ -1783,7 +1762,7 @@ description = "A Simple library to enable psycopg2 compatability" optional = true python-versions = "*" groups = ["main"] -markers = "platform_python_implementation == \"PyPy\" and (extra == \"all\" or extra == \"postgres\")" +markers = "platform_python_implementation == \"PyPy\" and (extra == \"postgres\" or extra == \"all\")" files = [ {file = "psycopg2cffi-compat-1.1.tar.gz", hash = "sha256:d25e921748475522b33d13420aad5c2831c743227dc1f1f2585e0fdb5c914e05"}, ] @@ -2042,7 +2021,7 @@ description = "A development tool to measure, monitor and analyze the memory beh optional = true python-versions = ">=3.6" groups = ["main"] -markers = "extra == \"all\" or extra == \"cache-memory\"" +markers = "extra == \"cache-memory\" or extra == \"all\"" files = [ {file = "Pympler-1.0.1-py3-none-any.whl", hash = "sha256:d260dda9ae781e1eab6ea15bacb84015849833ba5555f141d2d9b7b7473b307d"}, {file = "Pympler-1.0.1.tar.gz", hash = "sha256:993f1a3599ca3f4fcd7160c7545ad06310c9e12f70174ae7ae8d4e25f6c5d3fa"}, @@ -2102,7 +2081,7 @@ description = "Python implementation of SAML Version 2 Standard" optional = true python-versions = ">=3.9,<4.0" groups = ["main"] -markers = "extra == \"all\" or extra == \"saml2\"" +markers = "extra == \"saml2\" or extra == \"all\"" files = [ {file = "pysaml2-7.5.0-py3-none-any.whl", hash = "sha256:bc6627cc344476a83c757f440a73fda1369f13b6fda1b4e16bca63ffbabb5318"}, {file = "pysaml2-7.5.0.tar.gz", hash = "sha256:f36871d4e5ee857c6b85532e942550d2cf90ea4ee943d75eb681044bbc4f54f7"}, @@ -2127,7 +2106,7 @@ description = "Extensions to the standard Python datetime module" optional = true python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" groups = ["main"] -markers = "extra == \"all\" or extra == \"saml2\"" +markers = "extra == \"saml2\" or extra == \"all\"" files = [ {file = "python-dateutil-2.8.2.tar.gz", hash = "sha256:0123cacc1627ae19ddf3c27a5de5bd67ee4586fbdd6440d9748f8abb483d3e86"}, {file = "python_dateutil-2.8.2-py2.py3-none-any.whl", hash = "sha256:961d03dc3453ebbc59dbdea9e4e11c5651520a876d0f4db161e8674aae935da9"}, @@ -2155,7 +2134,7 @@ description = "World timezone definitions, modern and historical" optional = true python-versions = "*" groups = ["main"] -markers = "extra == \"all\" or extra == \"saml2\"" +markers = "extra == \"saml2\" or extra == \"all\"" files = [ {file = "pytz-2022.7.1-py2.py3-none-any.whl", hash = "sha256:78f4f37d8198e0627c5f1143240bb0206b8691d8d7ac6d78fee88b78733f8c4a"}, {file = "pytz-2022.7.1.tar.gz", hash = "sha256:01a0681c4b9684a28304615eba55d1ab31ae00bf68ec157ec3708a8182dbbcd0"}, @@ -2521,7 +2500,7 @@ description = "Python client for Sentry (https://sentry.io)" optional = true python-versions = ">=3.6" groups = ["main"] -markers = "extra == \"all\" or extra == \"sentry\"" +markers = "extra == \"sentry\" or extra == \"all\"" files = [ {file = "sentry_sdk-2.34.1-py2.py3-none-any.whl", hash = "sha256:b7a072e1cdc5abc48101d5146e1ae680fa81fe886d8d95aaa25a0b450c818d32"}, {file = "sentry_sdk-2.34.1.tar.gz", hash = "sha256:69274eb8c5c38562a544c3e9f68b5be0a43be4b697f5fd385bf98e4fbe672687"}, @@ -2709,7 +2688,7 @@ description = "Tornado IOLoop Backed Concurrent Futures" optional = true python-versions = "*" groups = ["main"] -markers = "extra == \"all\" or extra == \"opentracing\"" +markers = "extra == \"opentracing\" or extra == \"all\"" files = [ {file = "threadloop-1.0.2-py2-none-any.whl", hash = "sha256:5c90dbefab6ffbdba26afb4829d2a9df8275d13ac7dc58dccb0e279992679599"}, {file = "threadloop-1.0.2.tar.gz", hash = "sha256:8b180aac31013de13c2ad5c834819771992d350267bddb854613ae77ef571944"}, @@ -2725,7 +2704,7 @@ description = "Python bindings for the Apache Thrift RPC system" optional = true python-versions = "*" groups = ["main"] -markers = "extra == \"all\" or extra == \"opentracing\"" +markers = "extra == \"opentracing\" or extra == \"all\"" files = [ {file = "thrift-0.16.0.tar.gz", hash = "sha256:2b5b6488fcded21f9d312aa23c9ff6a0195d0f6ae26ddbd5ad9e3e25dfc14408"}, ] @@ -2787,7 +2766,7 @@ description = "Tornado is a Python web framework and asynchronous networking lib optional = true python-versions = ">=3.9" groups = ["main"] -markers = "extra == \"all\" or extra == \"opentracing\"" +markers = "extra == \"opentracing\" or extra == \"all\"" files = [ {file = "tornado-6.5-cp39-abi3-macosx_10_9_universal2.whl", hash = "sha256:f81067dad2e4443b015368b24e802d0083fecada4f0a4572fdb72fc06e54a9a6"}, {file = "tornado-6.5-cp39-abi3-macosx_10_9_x86_64.whl", hash = "sha256:9ac1cbe1db860b3cbb251e795c701c41d343f06a96049d6274e7c77559117e41"}, @@ -2924,7 +2903,7 @@ description = "non-blocking redis client for python" optional = true python-versions = "*" groups = ["main"] -markers = "extra == \"all\" or extra == \"redis\"" +markers = "extra == \"redis\" or extra == \"all\"" files = [ {file = "txredisapi-1.4.11-py3-none-any.whl", hash = "sha256:ac64d7a9342b58edca13ef267d4fa7637c1aa63f8595e066801c1e8b56b22d0b"}, {file = "txredisapi-1.4.11.tar.gz", hash = "sha256:3eb1af99aefdefb59eb877b1dd08861efad60915e30ad5bf3d5bf6c5cedcdbc6"}, @@ -3170,7 +3149,7 @@ description = "An XML Schema validator and decoder" optional = true python-versions = ">=3.7" groups = ["main"] -markers = "extra == \"all\" or extra == \"saml2\"" +markers = "extra == \"saml2\" or extra == \"all\"" files = [ {file = "xmlschema-2.4.0-py3-none-any.whl", hash = "sha256:dc87be0caaa61f42649899189aab2fd8e0d567f2cf548433ba7b79278d231a4a"}, {file = "xmlschema-2.4.0.tar.gz", hash = "sha256:d74cd0c10866ac609e1ef94a5a69b018ad16e39077bc6393408b40c6babee793"}, @@ -3314,4 +3293,4 @@ url-preview = ["lxml"] [metadata] lock-version = "2.1" python-versions = "^3.9.0" -content-hash = "2e8ea085e1a0c6f0ac051d4bc457a96827d01f621b1827086de01a5ffa98cf79" +content-hash = "0058b93ca13a3f2a0cfc28485ddd8202c42d0015dbaf3b9692e43f37fe2a0be6" diff --git a/pyproject.toml b/pyproject.toml index 009d1553e5..b237ff87ae 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -356,7 +356,7 @@ click = ">=8.1.3" # GitPython was == 3.1.14; bumped to 3.1.20, the first release with type hints. GitPython = ">=3.1.20" markdown-it-py = ">=3.0.0" -pygithub = ">=1.55" +pygithub = ">=1.59" # The following are executed as commands by the release script. twine = "*" # Towncrier min version comes from https://github.com/matrix-org/synapse/pull/3425. Rationale unclear. diff --git a/scripts-dev/release.py b/scripts-dev/release.py index a7e967116e..391881797e 100755 --- a/scripts-dev/release.py +++ b/scripts-dev/release.py @@ -37,6 +37,7 @@ from typing import Any, List, Match, Optional, Union import attr import click import git +import github from click.exceptions import ClickException from git import GitCommandError, Repo from github import BadCredentialsException, Github @@ -397,7 +398,7 @@ def _tag(gh_token: Optional[str]) -> None: return # Create a new draft release - gh = Github(gh_token) + gh = Github(auth=github.Auth.Token(token=gh_token)) gh_repo = gh.get_repo("element-hq/synapse") release = gh_repo.create_git_release( tag=tag_name, From 2d07bd7fd231536175b0d5cd086896366d83a3a3 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Mon, 13 Oct 2025 10:15:47 -0500 Subject: [PATCH 052/149] Update TODO list of conflicting areas where we encounter metrics being clobbered (`ApplicationService`) (#19040) These errors are harmless and are a long-standing issue that is just now being logged, see https://github.com/element-hq/synapse/issues/19042 ``` 2025-10-10 15:30:00,026 - synapse.util.metrics - 330 - ERROR - notify_interested_services-0 - Metric named cache_lru_cache__matches_user_in_member_list_example.com already registered for server example.com 2025-10-10 16:30:00.167 2025-10-10 15:30:00,026 - synapse.util.metrics - 330 - ERROR - notify_interested_services-0 - Metric named cache_lru_cache_is_interested_in_room_example.com already registered for server example.com 2025-10-10 16:30:00.167 2025-10-10 15:30:00,025 - synapse.util.metrics - 330 - ERROR - notify_interested_services-0 - Metric named cache_lru_cache_is_interested_in_event_example.com already registered for server example.com 2025-10-10 16:29:15.560 2025-10-10 15:29:15,449 - synapse.util.metrics - 330 - ERROR - notify_interested_services_ephemeral-0 - Metric named cache_lru_cache__matches_user_in_member_list_example.com already registered for server example.com 2025-10-10 16:29:15.560 2025-10-10 15:29:15,449 - synapse.util.metrics - 330 - ERROR - notify_interested_services_ephemeral-0 - Metric named cache_lru_cache_is_interested_in_room_example.com already registered for server example.com ``` --- changelog.d/19040.misc | 1 + synapse/util/metrics.py | 10 +++++++--- 2 files changed, 8 insertions(+), 3 deletions(-) create mode 100644 changelog.d/19040.misc diff --git a/changelog.d/19040.misc b/changelog.d/19040.misc new file mode 100644 index 0000000000..9af18fc50e --- /dev/null +++ b/changelog.d/19040.misc @@ -0,0 +1 @@ +Update TODO list of conflicting areas where we encounter metrics being clobbered (`ApplicationService`). diff --git a/synapse/util/metrics.py b/synapse/util/metrics.py index 7b6ad0e459..f71380d689 100644 --- a/synapse/util/metrics.py +++ b/synapse/util/metrics.py @@ -323,9 +323,13 @@ class DynamicCollectorRegistry(CollectorRegistry): if server_hooks.get(metric_name) is not None: # TODO: This should be an `assert` since registering the same metric name # multiple times will clobber the old metric. - # We currently rely on this behaviour as we instantiate multiple - # `SyncRestServlet`, one per listener, and in the `__init__` we setup a new - # LruCache. + # + # We currently rely on this behaviour in a few places: + # - We instantiate multiple `SyncRestServlet`, one per listener, and in the + # `__init__` we setup a new `LruCache`. + # - We instantiate multiple `ApplicationService` (one per configured + # application service) which use the `@cached` decorator on some methods. + # # Once the above behaviour is changed, this should be changed to an `assert`. logger.error( "Metric named %s already registered for server %s", From d2c582ef3ce963e01ed0f545962ec18577ed1f14 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Mon, 13 Oct 2025 10:19:09 -0500 Subject: [PATCH 053/149] Move unique snowflake homeserver background tasks to `start_background_tasks` (#19037) (the standard pattern for this kind of thing) --- changelog.d/19037.misc | 1 + synapse/app/_base.py | 10 -- synapse/metrics/common_usage_metrics.py | 2 +- synapse/server.py | 3 + tests/replication/_base.py | 7 +- .../tcp/streams/test_account_data.py | 49 ++++-- tests/replication/tcp/streams/test_events.py | 125 +++++++++------ .../tcp/streams/test_federation.py | 58 +++++-- .../replication/tcp/streams/test_receipts.py | 50 +++--- .../tcp/streams/test_thread_subscriptions.py | 26 ++-- .../replication/tcp/streams/test_to_device.py | 28 +++- tests/replication/tcp/streams/test_typing.py | 142 +++++++++++------- tests/storage/test_monthly_active_users.py | 10 +- tests/test_phone_home.py | 2 +- 14 files changed, 331 insertions(+), 182 deletions(-) create mode 100644 changelog.d/19037.misc diff --git a/changelog.d/19037.misc b/changelog.d/19037.misc new file mode 100644 index 0000000000..763050067e --- /dev/null +++ b/changelog.d/19037.misc @@ -0,0 +1 @@ +Move unique snowflake homeserver background tasks to `start_background_tasks` (the standard pattern for this kind of thing). diff --git a/synapse/app/_base.py b/synapse/app/_base.py index a3e4b4ea4b..b416b66ac6 100644 --- a/synapse/app/_base.py +++ b/synapse/app/_base.py @@ -64,7 +64,6 @@ from twisted.web.resource import Resource import synapse.util.caches from synapse.api.constants import MAX_PDU_SIZE from synapse.app import check_bind_error -from synapse.app.phone_stats_home import start_phone_stats_home from synapse.config import ConfigError from synapse.config._base import format_config_error from synapse.config.homeserver import HomeServerConfig @@ -683,15 +682,6 @@ async def start(hs: "HomeServer", freeze: bool = True) -> None: if hs.config.worker.run_background_tasks: hs.start_background_tasks() - # TODO: This should be moved to same pattern we use for other background tasks: - # Add to `REQUIRED_ON_BACKGROUND_TASK_STARTUP` and rely on - # `start_background_tasks` to start it. - await hs.get_common_usage_metrics_manager().setup() - - # TODO: This feels like another pattern that should refactored as one of the - # `REQUIRED_ON_BACKGROUND_TASK_STARTUP` - start_phone_stats_home(hs) - if freeze: # We now freeze all allocated objects in the hopes that (almost) # everything currently allocated are things that will be used for the diff --git a/synapse/metrics/common_usage_metrics.py b/synapse/metrics/common_usage_metrics.py index 43e0913d27..3f38412fa7 100644 --- a/synapse/metrics/common_usage_metrics.py +++ b/synapse/metrics/common_usage_metrics.py @@ -62,7 +62,7 @@ class CommonUsageMetricsManager: """ return await self._collect() - async def setup(self) -> None: + def setup(self) -> None: """Keep the gauges for common usage metrics up to date.""" self._hs.run_as_background_process( desc="common_usage_metrics_update_gauges", diff --git a/synapse/server.py b/synapse/server.py index 1316249dda..b63a11273a 100644 --- a/synapse/server.py +++ b/synapse/server.py @@ -62,6 +62,7 @@ from synapse.api.auth_blocking import AuthBlocking from synapse.api.filtering import Filtering from synapse.api.ratelimiting import Ratelimiter, RequestRatelimiter from synapse.app._base import unregister_sighups +from synapse.app.phone_stats_home import start_phone_stats_home from synapse.appservice.api import ApplicationServiceApi from synapse.appservice.scheduler import ApplicationServiceScheduler from synapse.config.homeserver import HomeServerConfig @@ -643,6 +644,8 @@ class HomeServer(metaclass=abc.ABCMeta): for i in self.REQUIRED_ON_BACKGROUND_TASK_STARTUP: getattr(self, "get_" + i + "_handler")() self.get_task_scheduler() + self.get_common_usage_metrics_manager().setup() + start_phone_stats_home(self) def get_reactor(self) -> ISynapseReactor: """ diff --git a/tests/replication/_base.py b/tests/replication/_base.py index 1a2dab4c7d..8a6394e9ef 100644 --- a/tests/replication/_base.py +++ b/tests/replication/_base.py @@ -214,7 +214,12 @@ class BaseStreamTestCase(unittest.HomeserverTestCase): client_to_server_transport.loseConnection() # there should have been exactly one request - self.assertEqual(len(requests), 1) + self.assertEqual( + len(requests), + 1, + "Expected to handle exactly one HTTP replication request but saw %d - requests=%s" + % (len(requests), requests), + ) return requests[0] diff --git a/tests/replication/tcp/streams/test_account_data.py b/tests/replication/tcp/streams/test_account_data.py index 6dea29ae15..d0c189230c 100644 --- a/tests/replication/tcp/streams/test_account_data.py +++ b/tests/replication/tcp/streams/test_account_data.py @@ -46,28 +46,39 @@ class AccountDataStreamTestCase(BaseStreamTestCase): # check we're testing what we think we are: no rows should yet have been # received - self.assertEqual([], self.test_handler.received_rdata_rows) + received_account_data_rows = [ + row + for row in self.test_handler.received_rdata_rows + if row[0] == AccountDataStream.NAME + ] + self.assertEqual([], received_account_data_rows) # now reconnect to pull the updates self.reconnect() self.replicate() - # we should have received all the expected rows in the right order - received_rows = self.test_handler.received_rdata_rows + # We should have received all the expected rows in the right order + # + # Filter the updates to only include account data changes + received_account_data_rows = [ + row + for row in self.test_handler.received_rdata_rows + if row[0] == AccountDataStream.NAME + ] for t in updates: - (stream_name, token, row) = received_rows.pop(0) + (stream_name, token, row) = received_account_data_rows.pop(0) self.assertEqual(stream_name, AccountDataStream.NAME) self.assertIsInstance(row, AccountDataStream.AccountDataStreamRow) self.assertEqual(row.data_type, t) self.assertEqual(row.room_id, "test_room") - (stream_name, token, row) = received_rows.pop(0) + (stream_name, token, row) = received_account_data_rows.pop(0) self.assertIsInstance(row, AccountDataStream.AccountDataStreamRow) self.assertEqual(row.data_type, "m.global") self.assertIsNone(row.room_id) - self.assertEqual([], received_rows) + self.assertEqual([], received_account_data_rows) def test_update_function_global_account_data_limit(self) -> None: """Test replication with many global account data updates""" @@ -85,32 +96,38 @@ class AccountDataStreamTestCase(BaseStreamTestCase): store.add_account_data_to_room("test_user", "test_room", "m.per_room", {}) ) - # tell the notifier to catch up to avoid duplicate rows. - # workaround for https://github.com/matrix-org/synapse/issues/7360 - # FIXME remove this when the above is fixed - self.replicate() - # check we're testing what we think we are: no rows should yet have been # received - self.assertEqual([], self.test_handler.received_rdata_rows) + received_account_data_rows = [ + row + for row in self.test_handler.received_rdata_rows + if row[0] == AccountDataStream.NAME + ] + self.assertEqual([], received_account_data_rows) # now reconnect to pull the updates self.reconnect() self.replicate() # we should have received all the expected rows in the right order - received_rows = self.test_handler.received_rdata_rows + # + # Filter the updates to only include typing changes + received_account_data_rows = [ + row + for row in self.test_handler.received_rdata_rows + if row[0] == AccountDataStream.NAME + ] for t in updates: - (stream_name, token, row) = received_rows.pop(0) + (stream_name, token, row) = received_account_data_rows.pop(0) self.assertEqual(stream_name, AccountDataStream.NAME) self.assertIsInstance(row, AccountDataStream.AccountDataStreamRow) self.assertEqual(row.data_type, t) self.assertIsNone(row.room_id) - (stream_name, token, row) = received_rows.pop(0) + (stream_name, token, row) = received_account_data_rows.pop(0) self.assertIsInstance(row, AccountDataStream.AccountDataStreamRow) self.assertEqual(row.data_type, "m.per_room") self.assertEqual(row.room_id, "test_room") - self.assertEqual([], received_rows) + self.assertEqual([], received_account_data_rows) diff --git a/tests/replication/tcp/streams/test_events.py b/tests/replication/tcp/streams/test_events.py index 782dad39f5..452032205f 100644 --- a/tests/replication/tcp/streams/test_events.py +++ b/tests/replication/tcp/streams/test_events.py @@ -30,6 +30,7 @@ from synapse.replication.tcp.commands import RdataCommand from synapse.replication.tcp.streams._base import _STREAM_UPDATE_TARGET_ROW_COUNT from synapse.replication.tcp.streams.events import ( _MAX_STATE_UPDATES_PER_ROOM, + EventsStream, EventsStreamAllStateRow, EventsStreamCurrentStateRow, EventsStreamEventRow, @@ -82,7 +83,12 @@ class EventsStreamTestCase(BaseStreamTestCase): # check we're testing what we think we are: no rows should yet have been # received - self.assertEqual([], self.test_handler.received_rdata_rows) + received_event_rows = [ + row + for row in self.test_handler.received_rdata_rows + if row[0] == EventsStream.NAME + ] + self.assertEqual([], received_event_rows) # now reconnect to pull the updates self.reconnect() @@ -90,31 +96,34 @@ class EventsStreamTestCase(BaseStreamTestCase): # we should have received all the expected rows in the right order (as # well as various cache invalidation updates which we ignore) - received_rows = [ - row for row in self.test_handler.received_rdata_rows if row[0] == "events" + # + # Filter the updates to only include event changes + received_event_rows = [ + row + for row in self.test_handler.received_rdata_rows + if row[0] == EventsStream.NAME ] - for event in events: - stream_name, token, row = received_rows.pop(0) - self.assertEqual("events", stream_name) + stream_name, token, row = received_event_rows.pop(0) + self.assertEqual(EventsStream.NAME, stream_name) self.assertIsInstance(row, EventsStreamRow) self.assertEqual(row.type, "ev") self.assertIsInstance(row.data, EventsStreamEventRow) self.assertEqual(row.data.event_id, event.event_id) - stream_name, token, row = received_rows.pop(0) + stream_name, token, row = received_event_rows.pop(0) self.assertIsInstance(row, EventsStreamRow) self.assertIsInstance(row.data, EventsStreamEventRow) self.assertEqual(row.data.event_id, state_event.event_id) - stream_name, token, row = received_rows.pop(0) + stream_name, token, row = received_event_rows.pop(0) self.assertEqual("events", stream_name) self.assertIsInstance(row, EventsStreamRow) self.assertEqual(row.type, "state") self.assertIsInstance(row.data, EventsStreamCurrentStateRow) self.assertEqual(row.data.event_id, state_event.event_id) - self.assertEqual([], received_rows) + self.assertEqual([], received_event_rows) @parameterized.expand( [(_STREAM_UPDATE_TARGET_ROW_COUNT, False), (_MAX_STATE_UPDATES_PER_ROOM, True)] @@ -170,9 +179,12 @@ class EventsStreamTestCase(BaseStreamTestCase): self.replicate() # all those events and state changes should have landed - self.assertGreaterEqual( - len(self.test_handler.received_rdata_rows), 2 * len(events) - ) + received_event_rows = [ + row + for row in self.test_handler.received_rdata_rows + if row[0] == EventsStream.NAME + ] + self.assertGreaterEqual(len(received_event_rows), 2 * len(events)) # disconnect, so that we can stack up the changes self.disconnect() @@ -202,7 +214,12 @@ class EventsStreamTestCase(BaseStreamTestCase): # check we're testing what we think we are: no rows should yet have been # received - self.assertEqual([], self.test_handler.received_rdata_rows) + received_event_rows = [ + row + for row in self.test_handler.received_rdata_rows + if row[0] == EventsStream.NAME + ] + self.assertEqual([], received_event_rows) # now reconnect to pull the updates self.reconnect() @@ -218,33 +235,34 @@ class EventsStreamTestCase(BaseStreamTestCase): # of the states that got reverted. # - two rows for state2 - received_rows = [ - row for row in self.test_handler.received_rdata_rows if row[0] == "events" + received_event_rows = [ + row + for row in self.test_handler.received_rdata_rows + if row[0] == EventsStream.NAME ] - # first check the first two rows, which should be the state1 event. - stream_name, token, row = received_rows.pop(0) + stream_name, token, row = received_event_rows.pop(0) self.assertEqual("events", stream_name) self.assertIsInstance(row, EventsStreamRow) self.assertEqual(row.type, "ev") self.assertIsInstance(row.data, EventsStreamEventRow) self.assertEqual(row.data.event_id, state1.event_id) - stream_name, token, row = received_rows.pop(0) + stream_name, token, row = received_event_rows.pop(0) self.assertIsInstance(row, EventsStreamRow) self.assertEqual(row.type, "state") self.assertIsInstance(row.data, EventsStreamCurrentStateRow) self.assertEqual(row.data.event_id, state1.event_id) # now the last two rows, which should be the state2 event. - stream_name, token, row = received_rows.pop(-2) + stream_name, token, row = received_event_rows.pop(-2) self.assertEqual("events", stream_name) self.assertIsInstance(row, EventsStreamRow) self.assertEqual(row.type, "ev") self.assertIsInstance(row.data, EventsStreamEventRow) self.assertEqual(row.data.event_id, state2.event_id) - stream_name, token, row = received_rows.pop(-1) + stream_name, token, row = received_event_rows.pop(-1) self.assertIsInstance(row, EventsStreamRow) self.assertEqual(row.type, "state") self.assertIsInstance(row.data, EventsStreamCurrentStateRow) @@ -254,16 +272,16 @@ class EventsStreamTestCase(BaseStreamTestCase): if collapse_state_changes: # that should leave us with the rows for the PL event, the state changes # get collapsed into a single row. - self.assertEqual(len(received_rows), 2) + self.assertEqual(len(received_event_rows), 2) - stream_name, token, row = received_rows.pop(0) + stream_name, token, row = received_event_rows.pop(0) self.assertEqual("events", stream_name) self.assertIsInstance(row, EventsStreamRow) self.assertEqual(row.type, "ev") self.assertIsInstance(row.data, EventsStreamEventRow) self.assertEqual(row.data.event_id, pl_event.event_id) - stream_name, token, row = received_rows.pop(0) + stream_name, token, row = received_event_rows.pop(0) self.assertIsInstance(row, EventsStreamRow) self.assertEqual(row.type, "state-all") self.assertIsInstance(row.data, EventsStreamAllStateRow) @@ -271,9 +289,9 @@ class EventsStreamTestCase(BaseStreamTestCase): else: # that should leave us with the rows for the PL event - self.assertEqual(len(received_rows), len(events) + 2) + self.assertEqual(len(received_event_rows), len(events) + 2) - stream_name, token, row = received_rows.pop(0) + stream_name, token, row = received_event_rows.pop(0) self.assertEqual("events", stream_name) self.assertIsInstance(row, EventsStreamRow) self.assertEqual(row.type, "ev") @@ -282,7 +300,7 @@ class EventsStreamTestCase(BaseStreamTestCase): # the state rows are unsorted state_rows: List[EventsStreamCurrentStateRow] = [] - for stream_name, _, row in received_rows: + for stream_name, _, row in received_event_rows: self.assertEqual("events", stream_name) self.assertIsInstance(row, EventsStreamRow) self.assertEqual(row.type, "state") @@ -346,9 +364,12 @@ class EventsStreamTestCase(BaseStreamTestCase): self.replicate() # all those events and state changes should have landed - self.assertGreaterEqual( - len(self.test_handler.received_rdata_rows), 2 * len(events) - ) + received_event_rows = [ + row + for row in self.test_handler.received_rdata_rows + if row[0] == EventsStream.NAME + ] + self.assertGreaterEqual(len(received_event_rows), 2 * len(events)) # disconnect, so that we can stack up the changes self.disconnect() @@ -375,7 +396,12 @@ class EventsStreamTestCase(BaseStreamTestCase): # check we're testing what we think we are: no rows should yet have been # received - self.assertEqual([], self.test_handler.received_rdata_rows) + received_event_rows = [ + row + for row in self.test_handler.received_rdata_rows + if row[0] == EventsStream.NAME + ] + self.assertEqual([], received_event_rows) # now reconnect to pull the updates self.reconnect() @@ -383,14 +409,16 @@ class EventsStreamTestCase(BaseStreamTestCase): # we should have received all the expected rows in the right order (as # well as various cache invalidation updates which we ignore) - received_rows = [ - row for row in self.test_handler.received_rdata_rows if row[0] == "events" + received_event_rows = [ + row + for row in self.test_handler.received_rdata_rows + if row[0] == EventsStream.NAME ] - self.assertGreaterEqual(len(received_rows), len(events)) + self.assertGreaterEqual(len(received_event_rows), len(events)) for i in range(NUM_USERS): # for each user, we expect the PL event row, followed by state rows for # the PL event and each of the states that got reverted. - stream_name, token, row = received_rows.pop(0) + stream_name, token, row = received_event_rows.pop(0) self.assertEqual("events", stream_name) self.assertIsInstance(row, EventsStreamRow) self.assertEqual(row.type, "ev") @@ -400,7 +428,7 @@ class EventsStreamTestCase(BaseStreamTestCase): # the state rows are unsorted state_rows: List[EventsStreamCurrentStateRow] = [] for _ in range(STATES_PER_USER + 1): - stream_name, token, row = received_rows.pop(0) + stream_name, token, row = received_event_rows.pop(0) self.assertEqual("events", stream_name) self.assertIsInstance(row, EventsStreamRow) self.assertEqual(row.type, "state") @@ -417,7 +445,7 @@ class EventsStreamTestCase(BaseStreamTestCase): # "None" indicates the state has been deleted self.assertIsNone(sr.event_id) - self.assertEqual([], received_rows) + self.assertEqual([], received_event_rows) def test_backwards_stream_id(self) -> None: """ @@ -432,7 +460,12 @@ class EventsStreamTestCase(BaseStreamTestCase): # check we're testing what we think we are: no rows should yet have been # received - self.assertEqual([], self.test_handler.received_rdata_rows) + received_event_rows = [ + row + for row in self.test_handler.received_rdata_rows + if row[0] == EventsStream.NAME + ] + self.assertEqual([], received_event_rows) # now reconnect to pull the updates self.reconnect() @@ -440,14 +473,16 @@ class EventsStreamTestCase(BaseStreamTestCase): # We should have received the expected single row (as well as various # cache invalidation updates which we ignore). - received_rows = [ - row for row in self.test_handler.received_rdata_rows if row[0] == "events" + received_event_rows = [ + row + for row in self.test_handler.received_rdata_rows + if row[0] == EventsStream.NAME ] # There should be a single received row. - self.assertEqual(len(received_rows), 1) + self.assertEqual(len(received_event_rows), 1) - stream_name, token, row = received_rows[0] + stream_name, token, row = received_event_rows[0] self.assertEqual("events", stream_name) self.assertIsInstance(row, EventsStreamRow) self.assertEqual(row.type, "ev") @@ -468,10 +503,12 @@ class EventsStreamTestCase(BaseStreamTestCase): ) # No updates have been received (because it was discard as old). - received_rows = [ - row for row in self.test_handler.received_rdata_rows if row[0] == "events" + received_event_rows = [ + row + for row in self.test_handler.received_rdata_rows + if row[0] == EventsStream.NAME ] - self.assertEqual(len(received_rows), 0) + self.assertEqual(len(received_event_rows), 0) # Ensure the stream has not gone backwards. current_token = worker_events_stream.current_token("master") diff --git a/tests/replication/tcp/streams/test_federation.py b/tests/replication/tcp/streams/test_federation.py index fd81e0dc17..172968c108 100644 --- a/tests/replication/tcp/streams/test_federation.py +++ b/tests/replication/tcp/streams/test_federation.py @@ -38,24 +38,45 @@ class FederationStreamTestCase(BaseStreamTestCase): Makes sure that updates sent while we are offline are received later. """ fed_sender = self.hs.get_federation_sender() - received_rows = self.test_handler.received_rdata_rows + # Send an update before we connect fed_sender.build_and_send_edu("testdest", "m.test_edu", {"a": "b"}) + # Now reconnect and pull the updates self.reconnect() + # FIXME: This seems odd, why aren't we calling `self.replicate()` here? but also + # doing so, causes other assumptions to fail (multiple HTTP replication attempts + # are made). self.reactor.advance(0) - # check we're testing what we think we are: no rows should yet have been + # Check we're testing what we think we are: no rows should yet have been # received - self.assertEqual(received_rows, []) + # + # Filter the updates to only include typing changes + received_federation_rows = [ + row + for row in self.test_handler.received_rdata_rows + if row[0] == FederationStream.NAME + ] + self.assertEqual(received_federation_rows, []) # We should now see an attempt to connect to the master request = self.handle_http_replication_attempt() - self.assert_request_is_get_repl_stream_updates(request, "federation") + self.assert_request_is_get_repl_stream_updates(request, FederationStream.NAME) # we should have received an update row - stream_name, token, row = received_rows.pop() - self.assertEqual(stream_name, "federation") + received_federation_rows = [ + row + for row in self.test_handler.received_rdata_rows + if row[0] == FederationStream.NAME + ] + self.assertEqual( + len(received_federation_rows), + 1, + "Expected exactly one row for the federation stream", + ) + (stream_name, token, row) = received_federation_rows[0] + self.assertEqual(stream_name, FederationStream.NAME) self.assertIsInstance(row, FederationStream.FederationStreamRow) self.assertEqual(row.type, EduRow.TypeId) edurow = EduRow.from_data(row.data) @@ -63,19 +84,30 @@ class FederationStreamTestCase(BaseStreamTestCase): self.assertEqual(edurow.edu.origin, self.hs.hostname) self.assertEqual(edurow.edu.destination, "testdest") self.assertEqual(edurow.edu.content, {"a": "b"}) - - self.assertEqual(received_rows, []) + # Clear out the received rows that we've checked so we can check for new ones later + self.test_handler.received_rdata_rows.clear() # additional updates should be transferred without an HTTP hit fed_sender.build_and_send_edu("testdest", "m.test1", {"c": "d"}) - self.reactor.advance(0) + # Pull in the updates + self.replicate() + # there should be no http hit self.assertEqual(len(self.reactor.tcpClients), 0) - # ... but we should have a row - self.assertEqual(len(received_rows), 1) - stream_name, token, row = received_rows.pop() - self.assertEqual(stream_name, "federation") + # ... but we should have a row + received_federation_rows = [ + row + for row in self.test_handler.received_rdata_rows + if row[0] == FederationStream.NAME + ] + self.assertEqual( + len(received_federation_rows), + 1, + "Expected exactly one row for the federation stream", + ) + (stream_name, token, row) = received_federation_rows[0] + self.assertEqual(stream_name, FederationStream.NAME) self.assertIsInstance(row, FederationStream.FederationStreamRow) self.assertEqual(row.type, EduRow.TypeId) edurow = EduRow.from_data(row.data) diff --git a/tests/replication/tcp/streams/test_receipts.py b/tests/replication/tcp/streams/test_receipts.py index c2f1f8dc4a..c5332f6b5f 100644 --- a/tests/replication/tcp/streams/test_receipts.py +++ b/tests/replication/tcp/streams/test_receipts.py @@ -20,7 +20,6 @@ # type: ignore -from unittest.mock import Mock from synapse.replication.tcp.streams._base import ReceiptsStream @@ -30,9 +29,6 @@ USER_ID = "@feeling:blue" class ReceiptsStreamTestCase(BaseStreamTestCase): - def _build_replication_data_handler(self): - return Mock(wraps=super()._build_replication_data_handler()) - def test_receipt(self): self.reconnect() @@ -50,23 +46,30 @@ class ReceiptsStreamTestCase(BaseStreamTestCase): self.replicate() # there should be one RDATA command - self.test_handler.on_rdata.assert_called_once() - stream_name, _, token, rdata_rows = self.test_handler.on_rdata.call_args[0] - self.assertEqual(stream_name, "receipts") - self.assertEqual(1, len(rdata_rows)) - row: ReceiptsStream.ReceiptsStreamRow = rdata_rows[0] + received_receipt_rows = [ + row + for row in self.test_handler.received_rdata_rows + if row[0] == ReceiptsStream.NAME + ] + self.assertEqual( + len(received_receipt_rows), + 1, + "Expected exactly one row for the receipts stream", + ) + (stream_name, token, row) = received_receipt_rows[0] + self.assertEqual(stream_name, ReceiptsStream.NAME) self.assertEqual("!room:blue", row.room_id) self.assertEqual("m.read", row.receipt_type) self.assertEqual(USER_ID, row.user_id) self.assertEqual("$event:blue", row.event_id) self.assertIsNone(row.thread_id) self.assertEqual({"a": 1}, row.data) + # Clear out the received rows that we've checked so we can check for new ones later + self.test_handler.received_rdata_rows.clear() # Now let's disconnect and insert some data. self.disconnect() - self.test_handler.on_rdata.reset_mock() - self.get_success( self.hs.get_datastores().main.insert_receipt( "!room2:blue", @@ -79,20 +82,27 @@ class ReceiptsStreamTestCase(BaseStreamTestCase): ) self.replicate() - # Nothing should have happened as we are disconnected - self.test_handler.on_rdata.assert_not_called() + # Not yet connected: no rows should yet have been received + self.assertEqual([], self.test_handler.received_rdata_rows) + # Now reconnect and pull the updates self.reconnect() - self.pump(0.1) + self.replicate() # We should now have caught up and get the missing data - self.test_handler.on_rdata.assert_called_once() - stream_name, _, token, rdata_rows = self.test_handler.on_rdata.call_args[0] - self.assertEqual(stream_name, "receipts") + received_receipt_rows = [ + row + for row in self.test_handler.received_rdata_rows + if row[0] == ReceiptsStream.NAME + ] + self.assertEqual( + len(received_receipt_rows), + 1, + "Expected exactly one row for the receipts stream", + ) + (stream_name, token, row) = received_receipt_rows[0] + self.assertEqual(stream_name, ReceiptsStream.NAME) self.assertEqual(token, 3) - self.assertEqual(1, len(rdata_rows)) - - row: ReceiptsStream.ReceiptsStreamRow = rdata_rows[0] self.assertEqual("!room2:blue", row.room_id) self.assertEqual("m.read", row.receipt_type) self.assertEqual(USER_ID, row.user_id) diff --git a/tests/replication/tcp/streams/test_thread_subscriptions.py b/tests/replication/tcp/streams/test_thread_subscriptions.py index 04e46b9d93..5405316048 100644 --- a/tests/replication/tcp/streams/test_thread_subscriptions.py +++ b/tests/replication/tcp/streams/test_thread_subscriptions.py @@ -88,15 +88,15 @@ class ThreadSubscriptionsStreamTestCase(BaseStreamTestCase): # We should have received all the expected rows in the right order # Filter the updates to only include thread subscription changes - received_rows = [ - upd - for upd in self.test_handler.received_rdata_rows - if upd[0] == ThreadSubscriptionsStream.NAME + received_thread_subscription_rows = [ + row + for row in self.test_handler.received_rdata_rows + if row[0] == ThreadSubscriptionsStream.NAME ] # Verify all the thread subscription updates for thread_id in updates: - (stream_name, token, row) = received_rows.pop(0) + (stream_name, token, row) = received_thread_subscription_rows.pop(0) self.assertEqual(stream_name, ThreadSubscriptionsStream.NAME) self.assertIsInstance(row, ThreadSubscriptionsStream.ROW_TYPE) self.assertEqual(row.user_id, "@test_user:example.org") @@ -104,14 +104,14 @@ class ThreadSubscriptionsStreamTestCase(BaseStreamTestCase): self.assertEqual(row.event_id, thread_id) # Verify the last update in the different room - (stream_name, token, row) = received_rows.pop(0) + (stream_name, token, row) = received_thread_subscription_rows.pop(0) self.assertEqual(stream_name, ThreadSubscriptionsStream.NAME) self.assertIsInstance(row, ThreadSubscriptionsStream.ROW_TYPE) self.assertEqual(row.user_id, "@test_user:example.org") self.assertEqual(row.room_id, other_room_id) self.assertEqual(row.event_id, other_thread_root_id) - self.assertEqual([], received_rows) + self.assertEqual([], received_thread_subscription_rows) def test_multiple_users_thread_subscription_updates(self) -> None: """Test replication with thread subscription updates for multiple users""" @@ -138,18 +138,18 @@ class ThreadSubscriptionsStreamTestCase(BaseStreamTestCase): # We should have received all the expected rows # Filter the updates to only include thread subscription changes - received_rows = [ - upd - for upd in self.test_handler.received_rdata_rows - if upd[0] == ThreadSubscriptionsStream.NAME + received_thread_subscription_rows = [ + row + for row in self.test_handler.received_rdata_rows + if row[0] == ThreadSubscriptionsStream.NAME ] # Should have one update per user - self.assertEqual(len(received_rows), len(users)) + self.assertEqual(len(received_thread_subscription_rows), len(users)) # Verify all updates for i, user_id in enumerate(users): - (stream_name, token, row) = received_rows[i] + (stream_name, token, row) = received_thread_subscription_rows[i] self.assertEqual(stream_name, ThreadSubscriptionsStream.NAME) self.assertIsInstance(row, ThreadSubscriptionsStream.ROW_TYPE) self.assertEqual(row.user_id, user_id) diff --git a/tests/replication/tcp/streams/test_to_device.py b/tests/replication/tcp/streams/test_to_device.py index cb07e93d6b..d6fd9f91ed 100644 --- a/tests/replication/tcp/streams/test_to_device.py +++ b/tests/replication/tcp/streams/test_to_device.py @@ -21,7 +21,10 @@ import logging import synapse -from synapse.replication.tcp.streams._base import _STREAM_UPDATE_TARGET_ROW_COUNT +from synapse.replication.tcp.streams._base import ( + _STREAM_UPDATE_TARGET_ROW_COUNT, + ToDeviceStream, +) from synapse.types import JsonDict from tests.replication._base import BaseStreamTestCase @@ -82,7 +85,12 @@ class ToDeviceStreamTestCase(BaseStreamTestCase): ) # replication is disconnected so we shouldn't get any updates yet - self.assertEqual([], self.test_handler.received_rdata_rows) + received_to_device_rows = [ + row + for row in self.test_handler.received_rdata_rows + if row[0] == ToDeviceStream.NAME + ] + self.assertEqual([], received_to_device_rows) # now reconnect to pull the updates self.reconnect() @@ -90,7 +98,15 @@ class ToDeviceStreamTestCase(BaseStreamTestCase): # we should receive the fact that we have to_device updates # for user1 and user2 - received_rows = self.test_handler.received_rdata_rows - self.assertEqual(len(received_rows), 2) - self.assertEqual(received_rows[0][2].entity, user1) - self.assertEqual(received_rows[1][2].entity, user2) + received_to_device_rows = [ + row + for row in self.test_handler.received_rdata_rows + if row[0] == ToDeviceStream.NAME + ] + self.assertEqual( + len(received_to_device_rows), + 2, + "Expected two rows in the to_device stream", + ) + self.assertEqual(received_to_device_rows[0][2].entity, user1) + self.assertEqual(received_to_device_rows[1][2].entity, user2) diff --git a/tests/replication/tcp/streams/test_typing.py b/tests/replication/tcp/streams/test_typing.py index e2b2299106..df91416b9b 100644 --- a/tests/replication/tcp/streams/test_typing.py +++ b/tests/replication/tcp/streams/test_typing.py @@ -19,7 +19,6 @@ # # import logging -from unittest.mock import Mock from synapse.handlers.typing import RoomMember, TypingWriterHandler from synapse.replication.tcp.streams import TypingStream @@ -27,6 +26,8 @@ from synapse.util.caches.stream_change_cache import StreamChangeCache from tests.replication._base import BaseStreamTestCase +logger = logging.getLogger(__name__) + USER_ID = "@feeling:blue" USER_ID_2 = "@da-ba-dee:blue" @@ -35,10 +36,6 @@ ROOM_ID_2 = "!foo:blue" class TypingStreamTestCase(BaseStreamTestCase): - def _build_replication_data_handler(self) -> Mock: - self.mock_handler = Mock(wraps=super()._build_replication_data_handler()) - return self.mock_handler - def test_typing(self) -> None: typing = self.hs.get_typing_handler() assert isinstance(typing, TypingWriterHandler) @@ -47,51 +44,74 @@ class TypingStreamTestCase(BaseStreamTestCase): # update to fetch. typing._push_update(member=RoomMember(ROOM_ID, USER_ID), typing=True) + # Not yet connected: no rows should yet have been received + self.assertEqual([], self.test_handler.received_rdata_rows) + + # Reconnect self.reconnect() typing._push_update(member=RoomMember(ROOM_ID, USER_ID), typing=True) - - self.reactor.advance(0) + # Pull in the updates + self.replicate() # We should now see an attempt to connect to the master request = self.handle_http_replication_attempt() - self.assert_request_is_get_repl_stream_updates(request, "typing") + self.assert_request_is_get_repl_stream_updates(request, TypingStream.NAME) - self.mock_handler.on_rdata.assert_called_once() - stream_name, _, token, rdata_rows = self.mock_handler.on_rdata.call_args[0] - self.assertEqual(stream_name, "typing") - self.assertEqual(1, len(rdata_rows)) - row: TypingStream.TypingStreamRow = rdata_rows[0] - self.assertEqual(ROOM_ID, row.room_id) - self.assertEqual([USER_ID], row.user_ids) + # Filter the updates to only include typing changes + received_typing_rows = [ + row + for row in self.test_handler.received_rdata_rows + if row[0] == TypingStream.NAME + ] + self.assertEqual( + len(received_typing_rows), + 1, + "Expected exactly one row for the typing stream", + ) + (stream_name, token, row) = received_typing_rows[0] + self.assertEqual(stream_name, TypingStream.NAME) + self.assertIsInstance(row, TypingStream.ROW_TYPE) + self.assertEqual(row.room_id, ROOM_ID) + self.assertEqual(row.user_ids, [USER_ID]) + # Clear out the received rows that we've checked so we can check for new ones later + self.test_handler.received_rdata_rows.clear() # Now let's disconnect and insert some data. self.disconnect() - self.mock_handler.on_rdata.reset_mock() - typing._push_update(member=RoomMember(ROOM_ID, USER_ID), typing=False) - self.mock_handler.on_rdata.assert_not_called() + # Not yet connected: no rows should yet have been received + self.assertEqual([], self.test_handler.received_rdata_rows) + # Now reconnect and pull the updates self.reconnect() - self.pump(0.1) + self.replicate() # We should now see an attempt to connect to the master request = self.handle_http_replication_attempt() - self.assert_request_is_get_repl_stream_updates(request, "typing") + self.assert_request_is_get_repl_stream_updates(request, TypingStream.NAME) # The from token should be the token from the last RDATA we got. assert request.args is not None self.assertEqual(int(request.args[b"from_token"][0]), token) - self.mock_handler.on_rdata.assert_called_once() - stream_name, _, token, rdata_rows = self.mock_handler.on_rdata.call_args[0] - self.assertEqual(stream_name, "typing") - self.assertEqual(1, len(rdata_rows)) - row = rdata_rows[0] - self.assertEqual(ROOM_ID, row.room_id) - self.assertEqual([], row.user_ids) + received_typing_rows = [ + row + for row in self.test_handler.received_rdata_rows + if row[0] == TypingStream.NAME + ] + self.assertEqual( + len(received_typing_rows), + 1, + "Expected exactly one row for the typing stream", + ) + (stream_name, token, row) = received_typing_rows[0] + self.assertEqual(stream_name, TypingStream.NAME) + self.assertIsInstance(row, TypingStream.ROW_TYPE) + self.assertEqual(row.room_id, ROOM_ID) + self.assertEqual(row.user_ids, []) def test_reset(self) -> None: """ @@ -116,33 +136,47 @@ class TypingStreamTestCase(BaseStreamTestCase): # update to fetch. typing._push_update(member=RoomMember(ROOM_ID, USER_ID), typing=True) + # Not yet connected: no rows should yet have been received + self.assertEqual([], self.test_handler.received_rdata_rows) + + # Now reconnect to pull the updates self.reconnect() typing._push_update(member=RoomMember(ROOM_ID, USER_ID), typing=True) - - self.reactor.advance(0) + # Pull in the updates + self.replicate() # We should now see an attempt to connect to the master request = self.handle_http_replication_attempt() self.assert_request_is_get_repl_stream_updates(request, "typing") - self.mock_handler.on_rdata.assert_called_once() - stream_name, _, token, rdata_rows = self.mock_handler.on_rdata.call_args[0] - self.assertEqual(stream_name, "typing") - self.assertEqual(1, len(rdata_rows)) - row: TypingStream.TypingStreamRow = rdata_rows[0] - self.assertEqual(ROOM_ID, row.room_id) - self.assertEqual([USER_ID], row.user_ids) + received_typing_rows = [ + row + for row in self.test_handler.received_rdata_rows + if row[0] == TypingStream.NAME + ] + self.assertEqual( + len(received_typing_rows), + 1, + "Expected exactly one row for the typing stream", + ) + (stream_name, token, row) = received_typing_rows[0] + self.assertEqual(stream_name, TypingStream.NAME) + self.assertIsInstance(row, TypingStream.ROW_TYPE) + self.assertEqual(row.room_id, ROOM_ID) + self.assertEqual(row.user_ids, [USER_ID]) # Push the stream forward a bunch so it can be reset. for i in range(100): typing._push_update( member=RoomMember(ROOM_ID, "@test%s:blue" % i), typing=True ) - self.reactor.advance(0) + # Pull in the updates + self.replicate() # Disconnect. self.disconnect() + self.test_handler.received_rdata_rows.clear() # Reset the typing handler self.hs.get_replication_streams()["typing"].last_token = 0 @@ -155,30 +189,34 @@ class TypingStreamTestCase(BaseStreamTestCase): ) typing._reset() - # Reconnect. + # Now reconnect and pull the updates self.reconnect() - self.pump(0.1) + self.replicate() # We should now see an attempt to connect to the master request = self.handle_http_replication_attempt() self.assert_request_is_get_repl_stream_updates(request, "typing") - # Reset the test code. - self.mock_handler.on_rdata.reset_mock() - self.mock_handler.on_rdata.assert_not_called() - # Push additional data. typing._push_update(member=RoomMember(ROOM_ID_2, USER_ID_2), typing=False) - self.reactor.advance(0) - - self.mock_handler.on_rdata.assert_called_once() - stream_name, _, token, rdata_rows = self.mock_handler.on_rdata.call_args[0] - self.assertEqual(stream_name, "typing") - self.assertEqual(1, len(rdata_rows)) - row = rdata_rows[0] - self.assertEqual(ROOM_ID_2, row.room_id) - self.assertEqual([], row.user_ids) + # Pull the updates + self.replicate() + received_typing_rows = [ + row + for row in self.test_handler.received_rdata_rows + if row[0] == TypingStream.NAME + ] + self.assertEqual( + len(received_typing_rows), + 1, + "Expected exactly one row for the typing stream", + ) + (stream_name, token, row) = received_typing_rows[0] + self.assertEqual(stream_name, TypingStream.NAME) + self.assertIsInstance(row, TypingStream.ROW_TYPE) + self.assertEqual(row.room_id, ROOM_ID_2) + self.assertEqual(row.user_ids, []) # The token should have been reset. self.assertEqual(token, 1) finally: diff --git a/tests/storage/test_monthly_active_users.py b/tests/storage/test_monthly_active_users.py index e684c6c161..9a3b44219d 100644 --- a/tests/storage/test_monthly_active_users.py +++ b/tests/storage/test_monthly_active_users.py @@ -110,13 +110,13 @@ class MonthlyActiveUsersTestCase(unittest.HomeserverTestCase): self.assertGreater(timestamp, 0) # Test that users with reserved 3pids are not removed from the MAU table - # XXX some of this is redundant. poking things into the config shouldn't - # work, and in any case it's not obvious what we expect to happen when - # we advance the reactor. - self.hs.config.server.max_mau_value = 0 + # + # The `start_phone_stats_home()` looping call will cause us to run + # `reap_monthly_active_users` after the time has advanced self.reactor.advance(FORTY_DAYS) - self.hs.config.server.max_mau_value = 5 + # I guess we call this one more time for good measure? Perhaps because + # previously, the phone home stats weren't running in tests? self.get_success(self.store.reap_monthly_active_users()) active_count = self.get_success(self.store.get_monthly_active_count()) diff --git a/tests/test_phone_home.py b/tests/test_phone_home.py index ab21a5dde4..1d450f8251 100644 --- a/tests/test_phone_home.py +++ b/tests/test_phone_home.py @@ -75,7 +75,7 @@ class CommonMetricsTestCase(HomeserverTestCase): def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.metrics_manager = hs.get_common_usage_metrics_manager() - self.get_success(self.metrics_manager.setup()) + self.metrics_manager.setup() def test_dau(self) -> None: """Tests that the daily active users count is correctly updated.""" From ec7554b7686d148cda6bf9ef4639f79692170716 Mon Sep 17 00:00:00 2001 From: Tulir Asokan Date: Mon, 13 Oct 2025 19:13:07 +0300 Subject: [PATCH 054/149] Stabilize support for MSC4326: Device masquerading for appservices (#19033) Note: the code references MSC3202, which is what MSC4326 was split off from. Only MSC4326 was accepted, MSC3202 wasn't yet. --- changelog.d/19033.feature | 1 + synapse/api/auth/base.py | 22 ++++++++++------------ synapse/config/experimental.py | 5 ----- tests/api/test_auth.py | 7 ++----- 4 files changed, 13 insertions(+), 22 deletions(-) create mode 100644 changelog.d/19033.feature diff --git a/changelog.d/19033.feature b/changelog.d/19033.feature new file mode 100644 index 0000000000..74042d9823 --- /dev/null +++ b/changelog.d/19033.feature @@ -0,0 +1 @@ +Stabilized support for [MSC4326](https://github.com/matrix-org/matrix-spec-proposals/pull/4326): Device masquerading for appservices. Contributed by @tulir @ Beeper. diff --git a/synapse/api/auth/base.py b/synapse/api/auth/base.py index 76c8c71628..fd7d761f7d 100644 --- a/synapse/api/auth/base.py +++ b/synapse/api/auth/base.py @@ -302,12 +302,9 @@ class BaseAuth: (the user_id URI parameter allows an application service to masquerade any applicable user in its namespace) - what device the application service should be treated as controlling - (the device_id[^1] URI parameter allows an application service to masquerade + (the device_id URI parameter allows an application service to masquerade as any device that exists for the relevant user) - [^1] Unstable and provided by MSC3202. - Must use `org.matrix.msc3202.device_id` in place of `device_id` for now. - Returns: the application service `Requester` of that request @@ -319,7 +316,8 @@ class BaseAuth: - The returned device ID, if present, has been checked to be a valid device ID for the returned user ID. """ - DEVICE_ID_ARG_NAME = b"org.matrix.msc3202.device_id" + # TODO: We can drop unstable support after 2026-01-01 (couple months after stable support) + UNSTABLE_DEVICE_ID_ARG_NAME = b"org.matrix.msc3202.device_id" app_service = self.store.get_app_service_by_token(access_token) if app_service is None: @@ -341,13 +339,11 @@ class BaseAuth: else: effective_user_id = app_service.sender - effective_device_id: Optional[str] = None - - if ( - self.hs.config.experimental.msc3202_device_masquerading_enabled - and DEVICE_ID_ARG_NAME in request.args - ): - effective_device_id = request.args[DEVICE_ID_ARG_NAME][0].decode("utf8") + effective_device_id_args = request.args.get( + b"device_id", request.args.get(UNSTABLE_DEVICE_ID_ARG_NAME) + ) + if effective_device_id_args: + effective_device_id = effective_device_id_args[0].decode("utf8") # We only just set this so it can't be None! assert effective_device_id is not None device_opt = await self.store.get_device( @@ -359,6 +355,8 @@ class BaseAuth: f"Application service trying to use a device that doesn't exist ('{effective_device_id}' for {effective_user_id})", Codes.UNKNOWN_DEVICE, ) + else: + effective_device_id = None return create_requester( effective_user_id, app_service=app_service, device_id=effective_device_id diff --git a/synapse/config/experimental.py b/synapse/config/experimental.py index 04ca6e3c51..f82e8572f2 100644 --- a/synapse/config/experimental.py +++ b/synapse/config/experimental.py @@ -412,11 +412,6 @@ class ExperimentalConfig(Config): "msc2409_to_device_messages_enabled", False ) - # The portion of MSC3202 which is related to device masquerading. - self.msc3202_device_masquerading_enabled: bool = experimental.get( - "msc3202_device_masquerading", False - ) - # The portion of MSC3202 related to transaction extensions: # sending device list changes, one-time key counts and fallback key # usage to application services. diff --git a/tests/api/test_auth.py b/tests/api/test_auth.py index e7fcd928d7..f7905ced7e 100644 --- a/tests/api/test_auth.py +++ b/tests/api/test_auth.py @@ -42,7 +42,6 @@ from synapse.types import Requester, UserID from synapse.util.clock import Clock from tests import unittest -from tests.unittest import override_config from tests.utils import mock_getRawHeaders @@ -237,7 +236,6 @@ class AuthTestCase(unittest.HomeserverTestCase): request.requestHeaders.getRawHeaders = mock_getRawHeaders() self.get_failure(self.auth.get_user_by_req(request), AuthError) - @override_config({"experimental_features": {"msc3202_device_masquerading": True}}) def test_get_user_by_req_appservice_valid_token_valid_device_id(self) -> None: """ Tests that when an application service passes the device_id URL parameter @@ -264,7 +262,7 @@ class AuthTestCase(unittest.HomeserverTestCase): request.getClientAddress.return_value.host = "127.0.0.1" request.args[b"access_token"] = [self.test_token] request.args[b"user_id"] = [masquerading_user_id] - request.args[b"org.matrix.msc3202.device_id"] = [masquerading_device_id] + request.args[b"device_id"] = [masquerading_device_id] request.requestHeaders.getRawHeaders = mock_getRawHeaders() requester = self.get_success(self.auth.get_user_by_req(request)) self.assertEqual( @@ -272,7 +270,6 @@ class AuthTestCase(unittest.HomeserverTestCase): ) self.assertEqual(requester.device_id, masquerading_device_id.decode("utf8")) - @override_config({"experimental_features": {"msc3202_device_masquerading": True}}) def test_get_user_by_req_appservice_valid_token_invalid_device_id(self) -> None: """ Tests that when an application service passes the device_id URL parameter @@ -299,7 +296,7 @@ class AuthTestCase(unittest.HomeserverTestCase): request.getClientAddress.return_value.host = "127.0.0.1" request.args[b"access_token"] = [self.test_token] request.args[b"user_id"] = [masquerading_user_id] - request.args[b"org.matrix.msc3202.device_id"] = [masquerading_device_id] + request.args[b"device_id"] = [masquerading_device_id] request.requestHeaders.getRawHeaders = mock_getRawHeaders() failure = self.get_failure(self.auth.get_user_by_req(request), AuthError) From a4f92741072368ad5a5689a2e28b3c034f3da4c4 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Tue, 14 Oct 2025 16:10:48 +0200 Subject: [PATCH 055/149] Fix indentation of sighup handler calling code (#19060) --- changelog.d/19060.bugfix | 1 + synapse/app/_base.py | 6 +++--- 2 files changed, 4 insertions(+), 3 deletions(-) create mode 100644 changelog.d/19060.bugfix diff --git a/changelog.d/19060.bugfix b/changelog.d/19060.bugfix new file mode 100644 index 0000000000..81a6e54567 --- /dev/null +++ b/changelog.d/19060.bugfix @@ -0,0 +1 @@ +Fix a bug introduced in 1.136.0 that would prevent Synapse from being able to be `reload`-ed more than once when running under systemd. \ No newline at end of file diff --git a/synapse/app/_base.py b/synapse/app/_base.py index b416b66ac6..e30151dfb4 100644 --- a/synapse/app/_base.py +++ b/synapse/app/_base.py @@ -591,9 +591,9 @@ async def start(hs: "HomeServer", freeze: bool = True) -> None: # we're not using systemd. sdnotify(b"RELOADING=1") - for sighup_callbacks in _instance_id_to_sighup_callbacks_map.values(): - for func, args, kwargs in sighup_callbacks: - func(*args, **kwargs) + for sighup_callbacks in _instance_id_to_sighup_callbacks_map.values(): + for func, args, kwargs in sighup_callbacks: + func(*args, **kwargs) sdnotify(b"READY=1") From ecc90593cb1c764dee3763bf6a7062dd59f87a80 Mon Sep 17 00:00:00 2001 From: Andrew Morgan Date: Tue, 14 Oct 2025 15:26:15 +0100 Subject: [PATCH 056/149] 1.140.0 --- CHANGES.md | 7 +++++++ debian/changelog | 6 ++++++ pyproject.toml | 2 +- 3 files changed, 14 insertions(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index 2f3926e106..a75ce73d87 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,10 @@ +# Synapse 1.140.0 (2025-10-14) + +No significant changes since 1.140.0rc1. + + + + # Synapse 1.140.0rc1 (2025-10-10) ## Compatibility notice for users of `synapse-s3-storage-provider` diff --git a/debian/changelog b/debian/changelog index d3d7db39c3..8cf346ef54 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.140.0) stable; urgency=medium + + * New Synapse release 1.140.0. + + -- Synapse Packaging team Tue, 14 Oct 2025 15:22:36 +0100 + matrix-synapse-py3 (1.140.0~rc1) stable; urgency=medium * New Synapse release 1.140.0rc1. diff --git a/pyproject.toml b/pyproject.toml index 009d1553e5..6fb3e20c50 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -101,7 +101,7 @@ module-name = "synapse.synapse_rust" [tool.poetry] name = "matrix-synapse" -version = "1.140.0rc1" +version = "1.140.0" description = "Homeserver for the Matrix decentralised comms protocol" authors = ["Matrix.org Team and Contributors "] license = "AGPL-3.0-or-later OR LicenseRef-Element-Commercial" From b8f6ad27368755f62375a25829e788c43d91cb94 Mon Sep 17 00:00:00 2001 From: Andrew Morgan Date: Tue, 14 Oct 2025 15:27:34 +0100 Subject: [PATCH 057/149] Move storage provider compatibility notice to the top of the changelog --- CHANGES.md | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index a75ce73d87..4d5a4ceec9 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,12 +1,5 @@ # Synapse 1.140.0 (2025-10-14) -No significant changes since 1.140.0rc1. - - - - -# Synapse 1.140.0rc1 (2025-10-10) - ## Compatibility notice for users of `synapse-s3-storage-provider` Deployments that make use of the @@ -16,6 +9,14 @@ module must upgrade to Using older versions of the module with this release of Synapse will prevent users from being able to upload or download media. + +No significant changes since 1.140.0rc1. + + + + +# Synapse 1.140.0rc1 (2025-10-10) + ## Features - Add [a new Media Query by ID Admin API](https://element-hq.github.io/synapse/v1.140/admin_api/media_admin_api.html#query-a-piece-of-media-by-id) that allows server admins to query and investigate the metadata of local or cached remote media via From bf594a28a86556296f9e09efa926fe4f44b8c43a Mon Sep 17 00:00:00 2001 From: Devon Hudson Date: Thu, 16 Oct 2025 17:37:01 -0600 Subject: [PATCH 058/149] Move constants to designated file --- synapse/api/constants.py | 10 ++++++++++ synapse/handlers/sliding_sync/extensions.py | 14 ++++++++++---- 2 files changed, 20 insertions(+), 4 deletions(-) diff --git a/synapse/api/constants.py b/synapse/api/constants.py index 7a8f546d6b..86620bda33 100644 --- a/synapse/api/constants.py +++ b/synapse/api/constants.py @@ -272,6 +272,9 @@ class EventContentFields: M_TOPIC: Final = "m.topic" M_TEXT: Final = "m.text" + # Event relations + RELATIONS: Final = "m.relates_to" + class EventUnsignedContentFields: """Fields found inside the 'unsigned' data on events""" @@ -360,3 +363,10 @@ class Direction(enum.Enum): class ProfileFields: DISPLAYNAME: Final = "displayname" AVATAR_URL: Final = "avatar_url" + + +class MRelatesToFields: + """Fields found inside m.relates_to content blocks.""" + + EVENT_ID: Final = "event_id" + REL_TYPE: Final = "rel_type" diff --git a/synapse/handlers/sliding_sync/extensions.py b/synapse/handlers/sliding_sync/extensions.py index 2c39838fa8..995c54dd6f 100644 --- a/synapse/handlers/sliding_sync/extensions.py +++ b/synapse/handlers/sliding_sync/extensions.py @@ -30,7 +30,13 @@ from typing import ( from typing_extensions import TypeAlias, assert_never -from synapse.api.constants import AccountDataTypes, EduTypes, RelationTypes +from synapse.api.constants import ( + AccountDataTypes, + EduTypes, + EventContentFields, + MRelatesToFields, + RelationTypes, +) from synapse.handlers.receipts import ReceiptEventSource from synapse.logging.opentracing import trace from synapse.storage.databases.main.receipts import ReceiptInRoom @@ -1041,15 +1047,15 @@ class SlidingSyncExtensionHandler: if room_result.timeline_events: for event in room_result.timeline_events: # Check if this event is part of a thread - relates_to = event.content.get("m.relates_to") + relates_to = event.content.get(EventContentFields.RELATIONS) if not isinstance(relates_to, dict): continue - rel_type = relates_to.get("rel_type") + rel_type = relates_to.get(MRelatesToFields.REL_TYPE) # If this is a thread reply, track the thread if rel_type == RelationTypes.THREAD: - thread_id = relates_to.get("event_id") + thread_id = relates_to.get(MRelatesToFields.EVENT_ID) if thread_id: threads_in_timeline.add((room_id, thread_id)) From a3c7b3ecb97da10e1efe5dcea269c2e8fe78acb2 Mon Sep 17 00:00:00 2001 From: Devon Hudson Date: Thu, 16 Oct 2025 18:06:26 -0600 Subject: [PATCH 059/149] Don't fetch bundled aggregations if we don't have to --- synapse/handlers/sliding_sync/extensions.py | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/synapse/handlers/sliding_sync/extensions.py b/synapse/handlers/sliding_sync/extensions.py index 995c54dd6f..66663c5e68 100644 --- a/synapse/handlers/sliding_sync/extensions.py +++ b/synapse/handlers/sliding_sync/extensions.py @@ -24,7 +24,6 @@ from typing import ( Optional, Sequence, Set, - Tuple, cast, ) @@ -1041,9 +1040,9 @@ class SlidingSyncExtensionHandler: # since the client already sees the thread activity in the timeline. # If include_roots=True, we include all threads regardless, because the client # wants the thread root events. - threads_in_timeline: Set[Tuple[str, str]] = set() # (room_id, thread_id) + threads_in_timeline: Set[str] = set() # thread_id if not threads_request.include_roots: - for room_id, room_result in actual_room_response_map.items(): + for _, room_result in actual_room_response_map.items(): if room_result.timeline_events: for event in room_result.timeline_events: # Check if this event is part of a thread @@ -1057,14 +1056,17 @@ class SlidingSyncExtensionHandler: if rel_type == RelationTypes.THREAD: thread_id = relates_to.get(MRelatesToFields.EVENT_ID) if thread_id: - threads_in_timeline.add((room_id, thread_id)) + threads_in_timeline.add(thread_id) # Collect thread root events and get bundled aggregations. # Only fetch bundled aggregations if we have thread root events to attach them to. thread_root_events = [ update.thread_root_event for update in all_thread_updates + # Don't fetch bundled aggregations for threads with events already in the + # timeline response since they will get filtered out later anyway. if update.thread_root_event + and update.thread_root_event.event_id not in threads_in_timeline ] aggregations_map = {} if thread_root_events: @@ -1077,7 +1079,7 @@ class SlidingSyncExtensionHandler: for update in all_thread_updates: # Skip this thread if it already has events in the room timeline # (unless include_roots=True, in which case we always include it) - if (update.room_id, update.thread_id) in threads_in_timeline: + if update.thread_id in threads_in_timeline: continue # Only look up bundled aggregations if we have a thread root event From 67f22a200d0c4d2ea396e7052aba0ef86cc4527d Mon Sep 17 00:00:00 2001 From: Ben Banfield-Zanin Date: Mon, 20 Oct 2025 16:49:17 +0100 Subject: [PATCH 060/149] Update Docker images to use Debian trixie (13) and thus Python 3.13 (#19064) --- changelog.d/19064.docker | 1 + docker/Dockerfile | 11 +++-------- docker/Dockerfile-workers | 32 +++++++++++++++++++------------- docker/complement/Dockerfile | 10 +++++----- docker/editable.Dockerfile | 6 +++--- docs/upgrade.md | 8 ++++++++ 6 files changed, 39 insertions(+), 29 deletions(-) create mode 100644 changelog.d/19064.docker diff --git a/changelog.d/19064.docker b/changelog.d/19064.docker new file mode 100644 index 0000000000..cc220a8d49 --- /dev/null +++ b/changelog.d/19064.docker @@ -0,0 +1 @@ +Update docker image to use Debian trixie as the base and thus Python 3.13. diff --git a/docker/Dockerfile b/docker/Dockerfile index 727bc8bf5d..f83486036d 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -20,8 +20,8 @@ # `poetry export | pip install -r /dev/stdin`, but beware: we have experienced bugs in # in `poetry export` in the past. -ARG DEBIAN_VERSION=bookworm -ARG PYTHON_VERSION=3.12 +ARG DEBIAN_VERSION=trixie +ARG PYTHON_VERSION=3.13 ARG POETRY_VERSION=2.1.1 ### @@ -142,10 +142,10 @@ RUN \ libwebp7 \ xmlsec1 \ libjemalloc2 \ - libicu \ | grep '^\w' > /tmp/pkg-list && \ for arch in arm64 amd64; do \ mkdir -p /tmp/debs-${arch} && \ + chown _apt:root /tmp/debs-${arch} && \ cd /tmp/debs-${arch} && \ apt-get -o APT::Architecture="${arch}" download $(cat /tmp/pkg-list); \ done @@ -176,11 +176,6 @@ LABEL org.opencontainers.image.documentation='https://element-hq.github.io/synap LABEL org.opencontainers.image.source='https://github.com/element-hq/synapse.git' LABEL org.opencontainers.image.licenses='AGPL-3.0-or-later OR LicenseRef-Element-Commercial' -# On the runtime image, /lib is a symlink to /usr/lib, so we need to copy the -# libraries to the right place, else the `COPY` won't work. -# On amd64, we'll also have a /lib64 folder with ld-linux-x86-64.so.2, which is -# already present in the runtime image. -COPY --from=runtime-deps /install-${TARGETARCH}/lib /usr/lib COPY --from=runtime-deps /install-${TARGETARCH}/etc /etc COPY --from=runtime-deps /install-${TARGETARCH}/usr /usr COPY --from=runtime-deps /install-${TARGETARCH}/var /var diff --git a/docker/Dockerfile-workers b/docker/Dockerfile-workers index 6d0fc1440b..ba8bb3b753 100644 --- a/docker/Dockerfile-workers +++ b/docker/Dockerfile-workers @@ -1,9 +1,10 @@ -# syntax=docker/dockerfile:1 +# syntax=docker/dockerfile:1-labs ARG SYNAPSE_VERSION=latest ARG FROM=matrixdotorg/synapse:$SYNAPSE_VERSION -ARG DEBIAN_VERSION=bookworm -ARG PYTHON_VERSION=3.12 +ARG DEBIAN_VERSION=trixie +ARG PYTHON_VERSION=3.13 +ARG REDIS_VERSION=7.2 # first of all, we create a base image with dependencies which we can copy into the # target image. For repeated rebuilds, this is much faster than apt installing @@ -11,15 +12,27 @@ ARG PYTHON_VERSION=3.12 FROM ghcr.io/astral-sh/uv:python${PYTHON_VERSION}-${DEBIAN_VERSION} AS deps_base + ARG DEBIAN_VERSION + ARG REDIS_VERSION + # Tell apt to keep downloaded package files, as we're using cache mounts. RUN rm -f /etc/apt/apt.conf.d/docker-clean; echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache + # The upstream redis-server deb has fewer dynamic libraries than Debian's package which makes it easier to copy later on + RUN \ + curl -fsSL https://packages.redis.io/gpg | gpg --dearmor -o /usr/share/keyrings/redis-archive-keyring.gpg && \ + chmod 644 /usr/share/keyrings/redis-archive-keyring.gpg && \ + echo "deb [signed-by=/usr/share/keyrings/redis-archive-keyring.gpg] https://packages.redis.io/deb ${DEBIAN_VERSION} main" | tee /etc/apt/sources.list.d/redis.list + RUN \ --mount=type=cache,target=/var/cache/apt,sharing=locked \ --mount=type=cache,target=/var/lib/apt,sharing=locked \ apt-get update -qq && \ DEBIAN_FRONTEND=noninteractive apt-get install -yqq --no-install-recommends \ - nginx-light + nginx-light \ + redis-server="6:${REDIS_VERSION}.*" redis-tools="6:${REDIS_VERSION}.*" \ + # libicu is required by postgres, see `docker/complement/Dockerfile` + libicu76 RUN \ # remove default page @@ -35,19 +48,12 @@ FROM ghcr.io/astral-sh/uv:python${PYTHON_VERSION}-${DEBIAN_VERSION} AS deps_base RUN mkdir -p /uv/etc/supervisor/conf.d -# Similarly, a base to copy the redis server from. -# -# The redis docker image has fewer dynamic libraries than the debian package, -# which makes it much easier to copy (but we need to make sure we use an image -# based on the same debian version as the synapse image, to make sure we get -# the expected version of libc. -FROM docker.io/library/redis:7-${DEBIAN_VERSION} AS redis_base - # now build the final image, based on the the regular Synapse docker image FROM $FROM # Copy over dependencies - COPY --from=redis_base /usr/local/bin/redis-server /usr/local/bin + COPY --from=deps_base --parents /usr/lib/*-linux-gnu/libicu* / + COPY --from=deps_base /usr/bin/redis-server /usr/local/bin COPY --from=deps_base /uv / COPY --from=deps_base /usr/sbin/nginx /usr/sbin COPY --from=deps_base /usr/share/nginx /usr/share/nginx diff --git a/docker/complement/Dockerfile b/docker/complement/Dockerfile index 6ed084fe5d..8766f14454 100644 --- a/docker/complement/Dockerfile +++ b/docker/complement/Dockerfile @@ -9,7 +9,7 @@ ARG SYNAPSE_VERSION=latest # This is an intermediate image, to be built locally (not pulled from a registry). ARG FROM=matrixdotorg/synapse-workers:$SYNAPSE_VERSION -ARG DEBIAN_VERSION=bookworm +ARG DEBIAN_VERSION=trixie FROM docker.io/library/postgres:13-${DEBIAN_VERSION} AS postgres_base @@ -18,10 +18,10 @@ FROM $FROM # since for repeated rebuilds, this is much faster than apt installing # postgres each time. -# This trick only works because (a) the Synapse image happens to have all the -# shared libraries that postgres wants, (b) we use a postgres image based on -# the same debian version as Synapse's docker image (so the versions of the -# shared libraries match). +# This trick only works because we use a postgres image based on the same +# debian version as Synapse's docker image (so the versions of the shared +# libraries match). Any missing libraries need to be added to either the +# Synapse image or docker/Dockerfile-workers. RUN adduser --system --uid 999 postgres --home /var/lib/postgresql COPY --from=postgres_base /usr/lib/postgresql /usr/lib/postgresql COPY --from=postgres_base /usr/share/postgresql /usr/share/postgresql diff --git a/docker/editable.Dockerfile b/docker/editable.Dockerfile index f18cf6a5d9..7e5da4e4f4 100644 --- a/docker/editable.Dockerfile +++ b/docker/editable.Dockerfile @@ -8,9 +8,9 @@ ARG PYTHON_VERSION=3.9 ### ### Stage 0: generate requirements.txt ### -# We hardcode the use of Debian bookworm here because this could change upstream -# and other Dockerfiles used for testing are expecting bookworm. -FROM docker.io/library/python:${PYTHON_VERSION}-slim-bookworm +# We hardcode the use of Debian trixie here because this could change upstream +# and other Dockerfiles used for testing are expecting trixie. +FROM docker.io/library/python:${PYTHON_VERSION}-slim-trixie # Install Rust and other dependencies (stolen from normal Dockerfile) # install the OS build deps diff --git a/docs/upgrade.md b/docs/upgrade.md index c049a50984..63d567505f 100644 --- a/docs/upgrade.md +++ b/docs/upgrade.md @@ -117,6 +117,14 @@ each upgrade are complete before moving on to the next upgrade, to avoid stacking them up. You can monitor the currently running background updates with [the Admin API](usage/administration/admin_api/background_updates.html#status). +# Upgrading to v1.141.0 + +## Docker images now based on Debian `trixie` with Python 3.13 + +The Docker images are now based on Debian `trixie` and use Python 3.13. If you +are using the Docker images as a base image you may need to e.g. adjust the +paths you mount any additional Python packages at. + # Upgrading to v1.140.0 ## Users of `synapse-s3-storage-provider` must update the module to `v1.6.0` From eac862629f41d2ef62bddb027d20aba9aa32f5d3 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Mon, 20 Oct 2025 10:55:41 -0500 Subject: [PATCH 061/149] Revert "Move `start_doing_background_updates()` to `SynapseHomeServer.start_background_tasks()` (#19036)" (#19059) ### Why See https://github.com/element-hq/synapse/pull/19036#discussion_r2427070612 Revert while I figure out the tests in https://github.com/element-hq/synapse/pull/19057 --- changelog.d/19036.misc | 1 - synapse/app/homeserver.py | 8 +++----- 2 files changed, 3 insertions(+), 6 deletions(-) delete mode 100644 changelog.d/19036.misc diff --git a/changelog.d/19036.misc b/changelog.d/19036.misc deleted file mode 100644 index 95b8daab9b..0000000000 --- a/changelog.d/19036.misc +++ /dev/null @@ -1 +0,0 @@ -Move `start_doing_background_updates()` to `SynapseHomeServer.start_background_tasks()`. diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index b9ac86c2fc..e415d651bc 100644 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -317,11 +317,6 @@ class SynapseHomeServer(HomeServer): # during parsing logger.warning("Unrecognized listener type: %s", listener.type) - def start_background_tasks(self) -> None: - super().start_background_tasks() - - self.get_datastores().main.db_pool.updates.start_doing_background_updates() - def load_or_generate_config(argv_options: List[str]) -> HomeServerConfig: """ @@ -435,6 +430,9 @@ def setup( await _base.start(hs, freeze) + # TODO: Feels like this should be moved somewhere else. + hs.get_datastores().main.db_pool.updates.start_doing_background_updates() + # Register a callback to be invoked once the reactor is running register_start(hs, _start_when_reactor_running) From 418c9f3fe5d8359f6c77813a2902e69a82800b71 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Tue, 21 Oct 2025 11:52:28 +0200 Subject: [PATCH 062/149] Prevent `bcrypt` from raising a `ValueError` and log (#19078) --- changelog.d/19078.bugfix | 1 + synapse/_scripts/hash_password.py | 12 +++++++++++- synapse/handlers/auth.py | 16 +++++++++++++++- 3 files changed, 27 insertions(+), 2 deletions(-) create mode 100644 changelog.d/19078.bugfix diff --git a/changelog.d/19078.bugfix b/changelog.d/19078.bugfix new file mode 100644 index 0000000000..0046afcccf --- /dev/null +++ b/changelog.d/19078.bugfix @@ -0,0 +1 @@ +Fix a bug introduced in 1.140.0 where an internal server error could be raised when hashing user passwords that are too long. \ No newline at end of file diff --git a/synapse/_scripts/hash_password.py b/synapse/_scripts/hash_password.py index 2b7d3585cb..6a87303fc9 100755 --- a/synapse/_scripts/hash_password.py +++ b/synapse/_scripts/hash_password.py @@ -73,8 +73,18 @@ def main() -> None: pw = unicodedata.normalize("NFKC", password) + bytes_to_hash = pw.encode("utf8") + password_pepper.encode("utf8") + if len(bytes_to_hash) > 72: + # bcrypt only looks at the first 72 bytes + print( + f"Password is too long ({len(bytes_to_hash)} bytes); truncating to 72 bytes for bcrypt. " + "This is expected behaviour and will not affect a user's ability to log in. 72 bytes is " + "sufficient entropy for a password." + ) + bytes_to_hash = bytes_to_hash[:72] + hashed = bcrypt.hashpw( - pw.encode("utf8") + password_pepper.encode("utf8"), + bytes_to_hash, bcrypt.gensalt(bcrypt_rounds), ).decode("ascii") diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index 2d1990cce5..f4583e33c3 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -1683,8 +1683,22 @@ class AuthHandler: # Normalise the Unicode in the password pw = unicodedata.normalize("NFKC", password) + bytes_to_hash = pw.encode( + "utf8" + ) + self.hs.config.auth.password_pepper.encode("utf8") + if len(bytes_to_hash) > 72: + # bcrypt only looks at the first 72 bytes. + # + # Note: we explicitly DO NOT log the length of the user's password here. + logger.debug( + "Password is too long; truncating to 72 bytes for bcrypt. " + "This is expected behaviour and will not affect a user's ability to log in. 72 bytes is " + "sufficient entropy for a password." + ) + bytes_to_hash = bytes_to_hash[:72] + return bcrypt.hashpw( - pw.encode("utf8") + self.hs.config.auth.password_pepper.encode("utf8"), + bytes_to_hash, bcrypt.gensalt(self.bcrypt_rounds), ).decode("ascii") From 1271e896b5d92a0f4a0a718e30a48cb884a5e299 Mon Sep 17 00:00:00 2001 From: Andrew Morgan Date: Tue, 21 Oct 2025 11:12:59 +0100 Subject: [PATCH 063/149] 1.141.0rc1 --- CHANGES.md | 37 +++++++++++++++++++++++++++++++ changelog.d/19031.feature | 1 - changelog.d/19033.feature | 1 - changelog.d/19037.misc | 1 - changelog.d/19039.misc | 1 - changelog.d/19040.misc | 1 - changelog.d/19060.bugfix | 1 - changelog.d/19064.docker | 1 - changelog.d/19078.bugfix | 1 - debian/changelog | 6 +++++ pyproject.toml | 2 +- schema/synapse-config.schema.yaml | 2 +- 12 files changed, 45 insertions(+), 10 deletions(-) delete mode 100644 changelog.d/19031.feature delete mode 100644 changelog.d/19033.feature delete mode 100644 changelog.d/19037.misc delete mode 100644 changelog.d/19039.misc delete mode 100644 changelog.d/19040.misc delete mode 100644 changelog.d/19060.bugfix delete mode 100644 changelog.d/19064.docker delete mode 100644 changelog.d/19078.bugfix diff --git a/CHANGES.md b/CHANGES.md index 4d5a4ceec9..a26f012c64 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,40 @@ +# Synapse 1.141.0rc1 (2025-10-21) + +## Deprecation of MacOS Python wheels + +The team has decided to deprecate and eventually stop publishing python wheels +for MacOS. This is a burden on the team, and we're not aware of any parties +that use them. Synapse docker images will continue to work on MacOS, as will +building Synapse from source (though note this requires a Rust compiler). + +Publishing MacOS Python wheels will continue for the next few releases. If you +do make use of these wheels downstream, please reach out to us in +[#synapse-dev:matrix.org](https://matrix.to/#/#synapse-dev:matrix.org). We'd +love to hear from you! + +## Features + +- Allow using [MSC4190](https://github.com/matrix-org/matrix-spec-proposals/pull/4190) behavior without the opt-in registration flag. Contributed by @tulir @ Beeper. ([\#19031](https://github.com/element-hq/synapse/issues/19031)) +- Stabilized support for [MSC4326](https://github.com/matrix-org/matrix-spec-proposals/pull/4326): Device masquerading for appservices. Contributed by @tulir @ Beeper. ([\#19033](https://github.com/element-hq/synapse/issues/19033)) + +## Bugfixes + +- Fix a bug introduced in 1.136.0 that would prevent Synapse from being able to be `reload`-ed more than once when running under systemd. ([\#19060](https://github.com/element-hq/synapse/issues/19060)) +- Fix a bug introduced in 1.140.0 where an internal server error could be raised when hashing user passwords that are too long. ([\#19078](https://github.com/element-hq/synapse/issues/19078)) + +## Updates to the Docker image + +- Update docker image to use Debian trixie as the base and thus Python 3.13. ([\#19064](https://github.com/element-hq/synapse/issues/19064)) + +## Internal Changes + +- Move unique snowflake homeserver background tasks to `start_background_tasks` (the standard pattern for this kind of thing). ([\#19037](https://github.com/element-hq/synapse/issues/19037)) +- Drop a deprecated field of the `PyGitHub` dependency in the release script and raise the dependency's minimum version to `1.59.0`. ([\#19039](https://github.com/element-hq/synapse/issues/19039)) +- Update TODO list of conflicting areas where we encounter metrics being clobbered (`ApplicationService`). ([\#19040](https://github.com/element-hq/synapse/issues/19040)) + + + + # Synapse 1.140.0 (2025-10-14) ## Compatibility notice for users of `synapse-s3-storage-provider` diff --git a/changelog.d/19031.feature b/changelog.d/19031.feature deleted file mode 100644 index 711664499b..0000000000 --- a/changelog.d/19031.feature +++ /dev/null @@ -1 +0,0 @@ -Allow using [MSC4190](https://github.com/matrix-org/matrix-spec-proposals/pull/4190) behavior without the opt-in registration flag. Contributed by @tulir @ Beeper. diff --git a/changelog.d/19033.feature b/changelog.d/19033.feature deleted file mode 100644 index 74042d9823..0000000000 --- a/changelog.d/19033.feature +++ /dev/null @@ -1 +0,0 @@ -Stabilized support for [MSC4326](https://github.com/matrix-org/matrix-spec-proposals/pull/4326): Device masquerading for appservices. Contributed by @tulir @ Beeper. diff --git a/changelog.d/19037.misc b/changelog.d/19037.misc deleted file mode 100644 index 763050067e..0000000000 --- a/changelog.d/19037.misc +++ /dev/null @@ -1 +0,0 @@ -Move unique snowflake homeserver background tasks to `start_background_tasks` (the standard pattern for this kind of thing). diff --git a/changelog.d/19039.misc b/changelog.d/19039.misc deleted file mode 100644 index 1cd6b4d83c..0000000000 --- a/changelog.d/19039.misc +++ /dev/null @@ -1 +0,0 @@ -Drop a deprecated field of the `PyGitHub` dependency in the release script and raise the dependency's minimum version to `1.59.0`. \ No newline at end of file diff --git a/changelog.d/19040.misc b/changelog.d/19040.misc deleted file mode 100644 index 9af18fc50e..0000000000 --- a/changelog.d/19040.misc +++ /dev/null @@ -1 +0,0 @@ -Update TODO list of conflicting areas where we encounter metrics being clobbered (`ApplicationService`). diff --git a/changelog.d/19060.bugfix b/changelog.d/19060.bugfix deleted file mode 100644 index 81a6e54567..0000000000 --- a/changelog.d/19060.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug introduced in 1.136.0 that would prevent Synapse from being able to be `reload`-ed more than once when running under systemd. \ No newline at end of file diff --git a/changelog.d/19064.docker b/changelog.d/19064.docker deleted file mode 100644 index cc220a8d49..0000000000 --- a/changelog.d/19064.docker +++ /dev/null @@ -1 +0,0 @@ -Update docker image to use Debian trixie as the base and thus Python 3.13. diff --git a/changelog.d/19078.bugfix b/changelog.d/19078.bugfix deleted file mode 100644 index 0046afcccf..0000000000 --- a/changelog.d/19078.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug introduced in 1.140.0 where an internal server error could be raised when hashing user passwords that are too long. \ No newline at end of file diff --git a/debian/changelog b/debian/changelog index 8cf346ef54..0f61e38b1f 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.141.0~rc1) stable; urgency=medium + + * New Synapse release 1.141.0rc1. + + -- Synapse Packaging team Tue, 21 Oct 2025 11:01:44 +0100 + matrix-synapse-py3 (1.140.0) stable; urgency=medium * New Synapse release 1.140.0. diff --git a/pyproject.toml b/pyproject.toml index 2ac9a25569..ee7016b1d0 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -101,7 +101,7 @@ module-name = "synapse.synapse_rust" [tool.poetry] name = "matrix-synapse" -version = "1.140.0" +version = "1.141.0rc1" description = "Homeserver for the Matrix decentralised comms protocol" authors = ["Matrix.org Team and Contributors "] license = "AGPL-3.0-or-later OR LicenseRef-Element-Commercial" diff --git a/schema/synapse-config.schema.yaml b/schema/synapse-config.schema.yaml index c4a98065d0..419a0ab91b 100644 --- a/schema/synapse-config.schema.yaml +++ b/schema/synapse-config.schema.yaml @@ -1,5 +1,5 @@ $schema: https://element-hq.github.io/synapse/latest/schema/v1/meta.schema.json -$id: https://element-hq.github.io/synapse/schema/synapse/v1.140/synapse-config.schema.json +$id: https://element-hq.github.io/synapse/schema/synapse/v1.141/synapse-config.schema.json type: object properties: modules: From 2f65b9e001eeef0f2494cb1d1cc958a91c74d190 Mon Sep 17 00:00:00 2001 From: Kieran Lane Date: Tue, 21 Oct 2025 13:35:55 +0100 Subject: [PATCH 064/149] Update `oidc_session_no_samesite` cookie to be `Secure` (#19079) --- changelog.d/19079.bugfix | 1 + synapse/handlers/oidc.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/19079.bugfix diff --git a/changelog.d/19079.bugfix b/changelog.d/19079.bugfix new file mode 100644 index 0000000000..a7d9800d1d --- /dev/null +++ b/changelog.d/19079.bugfix @@ -0,0 +1 @@ +Fix the `oidc_session_no_samesite` cookie to have the `Secure` attribute, so the only difference between it and the paired `oidc_session` cookie, is the configuration of the `SameSite` attribute as described in the comments / cookie names. Contributed by @kieranlane. \ No newline at end of file diff --git a/synapse/handlers/oidc.py b/synapse/handlers/oidc.py index fc93c6b2a8..39505463bb 100644 --- a/synapse/handlers/oidc.py +++ b/synapse/handlers/oidc.py @@ -96,7 +96,7 @@ logger = logging.getLogger(__name__) # Here we have the names of the cookies, and the options we use to set them. _SESSION_COOKIES = [ (b"oidc_session", b"HttpOnly; Secure; SameSite=None"), - (b"oidc_session_no_samesite", b"HttpOnly"), + (b"oidc_session_no_samesite", b"HttpOnly; Secure"), ] From 44279083402fb1a046216bfe716fc8689e80f21e Mon Sep 17 00:00:00 2001 From: Andrew Morgan Date: Tue, 21 Oct 2025 14:17:53 +0100 Subject: [PATCH 065/149] newsfile --- changelog.d/19081.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/19081.misc diff --git a/changelog.d/19081.misc b/changelog.d/19081.misc new file mode 100644 index 0000000000..8518840fb6 --- /dev/null +++ b/changelog.d/19081.misc @@ -0,0 +1 @@ +Update the deprecated poetry development dependencies group name in `pyproject.toml`. \ No newline at end of file From 6c16734cf34f9229fc1f552bf8aa8b9f2e7a53ad Mon Sep 17 00:00:00 2001 From: Andrew Morgan Date: Tue, 21 Oct 2025 14:18:40 +0100 Subject: [PATCH 066/149] Revert "newsfile" This reverts commit 44279083402fb1a046216bfe716fc8689e80f21e. This should not have been committed to `develop`. --- changelog.d/19081.misc | 1 - 1 file changed, 1 deletion(-) delete mode 100644 changelog.d/19081.misc diff --git a/changelog.d/19081.misc b/changelog.d/19081.misc deleted file mode 100644 index 8518840fb6..0000000000 --- a/changelog.d/19081.misc +++ /dev/null @@ -1 +0,0 @@ -Update the deprecated poetry development dependencies group name in `pyproject.toml`. \ No newline at end of file From ff242faad0ce3f6a53c365f1470f782aeee19963 Mon Sep 17 00:00:00 2001 From: Andrew Morgan Date: Tue, 21 Oct 2025 16:40:26 +0100 Subject: [PATCH 067/149] Don't exit the release script if there are uncommitted changes Instead, all the user to fix them and retry. --- scripts-dev/release.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/scripts-dev/release.py b/scripts-dev/release.py index 391881797e..fafa55c770 100755 --- a/scripts-dev/release.py +++ b/scripts-dev/release.py @@ -820,8 +820,10 @@ def get_repo_and_check_clean_checkout( raise click.ClickException( f"{path} is not a git repository (expecting a {name} repository)." ) - if repo.is_dirty(): - raise click.ClickException(f"Uncommitted changes exist in {path}.") + while repo.is_dirty(): + if not click.confirm(f"Uncommitted changes exist in {path}. Commit or stash them. Ready to continue?"): + raise click.ClickException("Aborted.") + return repo From cba3a814c63ad877c482df3bc75570e4a7d61ddb Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Wed, 22 Oct 2025 10:39:04 -0500 Subject: [PATCH 068/149] Fix lints on `develop` (#19092) Snuck in with https://github.com/element-hq/synapse/commit/ff242faad0ce3f6a53c365f1470f782aeee19963 --- changelog.d/19092.misc | 1 + scripts-dev/release.py | 4 +++- 2 files changed, 4 insertions(+), 1 deletion(-) create mode 100644 changelog.d/19092.misc diff --git a/changelog.d/19092.misc b/changelog.d/19092.misc new file mode 100644 index 0000000000..c5060c1c8b --- /dev/null +++ b/changelog.d/19092.misc @@ -0,0 +1 @@ +Fix lints on main branch. diff --git a/scripts-dev/release.py b/scripts-dev/release.py index fafa55c770..c5c72156cc 100755 --- a/scripts-dev/release.py +++ b/scripts-dev/release.py @@ -821,7 +821,9 @@ def get_repo_and_check_clean_checkout( f"{path} is not a git repository (expecting a {name} repository)." ) while repo.is_dirty(): - if not click.confirm(f"Uncommitted changes exist in {path}. Commit or stash them. Ready to continue?"): + if not click.confirm( + f"Uncommitted changes exist in {path}. Commit or stash them. Ready to continue?" + ): raise click.ClickException("Aborted.") return repo From fc244bb592aa481faf28214a2e2ce3bb4e95d990 Mon Sep 17 00:00:00 2001 From: Andrew Ferrazzutti Date: Wed, 22 Oct 2025 17:48:19 -0400 Subject: [PATCH 069/149] Use type hinting generics in standard collections (#19046) aka PEP 585, added in Python 3.9 - https://peps.python.org/pep-0585/ - https://docs.astral.sh/ruff/rules/non-pep585-annotation/ --- build_rust.py | 4 +- changelog.d/19046.misc | 1 + contrib/graph/graph.py | 5 +- docker/configure_workers_and_start.py | 55 ++--- docker/start.py | 6 +- pyproject.toml | 6 + scripts-dev/build_debian_packages.py | 4 +- scripts-dev/check_locked_deps_have_sdists.py | 3 +- scripts-dev/check_pydantic_models.py | 28 +-- scripts-dev/check_schema_delta.py | 8 +- scripts-dev/federation_client.py | 8 +- scripts-dev/mypy_synapse_plugin.py | 10 +- scripts-dev/release.py | 4 +- scripts-dev/schema_versions.py | 6 +- stubs/sortedcontainers/sorteddict.pyi | 32 ++- stubs/sortedcontainers/sortedlist.pyi | 19 +- stubs/sortedcontainers/sortedset.pyi | 12 +- stubs/txredisapi.pyi | 8 +- synapse/__init__.py | 4 +- synapse/_scripts/generate_workers_map.py | 30 +-- synapse/_scripts/register_new_matrix_user.py | 6 +- synapse/_scripts/review_recent_signups.py | 11 +- synapse/_scripts/synapse_port_db.py | 51 ++-- synapse/api/auth/__init__.py | 6 +- synapse/api/auth/base.py | 6 +- synapse/api/auth/mas.py | 4 +- synapse/api/auth/msc3861_delegated.py | 14 +- synapse/api/errors.py | 16 +- synapse/api/filtering.py | 23 +- synapse/api/ratelimiting.py | 8 +- synapse/api/room_versions.py | 6 +- synapse/app/_base.py | 23 +- synapse/app/admin_cmd.py | 6 +- synapse/app/complement_fork_starter.py | 10 +- synapse/app/generic_worker.py | 5 +- synapse/app/homeserver.py | 10 +- synapse/app/phone_stats_home.py | 8 +- synapse/appservice/__init__.py | 16 +- synapse/appservice/api.py | 31 ++- synapse/appservice/scheduler.py | 26 +- synapse/config/__main__.py | 3 +- synapse/config/_base.py | 30 +-- synapse/config/_base.pyi | 30 +-- synapse/config/_util.py | 8 +- synapse/config/api.py | 4 +- synapse/config/appservice.py | 10 +- synapse/config/cache.py | 6 +- synapse/config/cas.py | 4 +- synapse/config/database.py | 4 +- synapse/config/key.py | 14 +- synapse/config/logger.py | 4 +- synapse/config/modules.py | 4 +- synapse/config/oembed.py | 12 +- synapse/config/oidc.py | 10 +- synapse/config/password_auth_providers.py | 4 +- synapse/config/ratelimiting.py | 8 +- synapse/config/registration.py | 4 +- synapse/config/repository.py | 12 +- synapse/config/retention.py | 4 +- synapse/config/saml2.py | 11 +- synapse/config/server.py | 28 +-- synapse/config/spam_checker.py | 4 +- synapse/config/sso.py | 6 +- synapse/config/tls.py | 4 +- synapse/config/tracer.py | 6 +- synapse/config/user_types.py | 6 +- synapse/config/workers.py | 36 +-- synapse/crypto/event_signing.py | 10 +- synapse/crypto/keyring.py | 72 +++--- synapse/event_auth.py | 12 +- synapse/events/__init__.py | 30 +-- synapse/events/auto_accept_invites.py | 4 +- synapse/events/builder.py | 12 +- synapse/events/presence_router.py | 25 +- synapse/events/snapshot.py | 26 +- synapse/events/utils.py | 26 +- synapse/events/validator.py | 6 +- synapse/federation/federation_base.py | 4 +- synapse/federation/federation_client.py | 59 +++-- synapse/federation/federation_server.py | 89 ++++--- synapse/federation/persistence.py | 4 +- synapse/federation/send_queue.py | 34 ++- synapse/federation/sender/__init__.py | 13 +- .../sender/per_destination_queue.py | 26 +- .../federation/sender/transaction_manager.py | 6 +- synapse/federation/transport/client.py | 43 ++-- .../federation/transport/server/__init__.py | 18 +- synapse/federation/transport/server/_base.py | 12 +- .../federation/transport/server/federation.py | 122 +++++----- synapse/federation/units.py | 12 +- synapse/handlers/account.py | 16 +- synapse/handlers/account_data.py | 6 +- synapse/handlers/account_validity.py | 6 +- synapse/handlers/admin.py | 12 +- synapse/handlers/appservice.py | 55 ++--- synapse/handlers/auth.py | 106 ++++----- synapse/handlers/cas.py | 10 +- synapse/handlers/delayed_events.py | 10 +- synapse/handlers/device.py | 56 ++--- synapse/handlers/devicemessage.py | 8 +- synapse/handlers/directory.py | 4 +- synapse/handlers/e2e_keys.py | 76 +++--- synapse/handlers/e2e_room_keys.py | 6 +- synapse/handlers/event_auth.py | 4 +- synapse/handlers/events.py | 4 +- synapse/handlers/federation.py | 48 ++-- synapse/handlers/federation_event.py | 24 +- synapse/handlers/identity.py | 8 +- synapse/handlers/initial_sync.py | 8 +- synapse/handlers/jwt.py | 4 +- synapse/handlers/message.py | 50 ++-- synapse/handlers/oidc.py | 29 ++- synapse/handlers/pagination.py | 14 +- synapse/handlers/presence.py | 101 ++++---- synapse/handlers/profile.py | 4 +- synapse/handlers/push_rules.py | 6 +- synapse/handlers/receipts.py | 14 +- synapse/handlers/register.py | 8 +- synapse/handlers/relations.py | 21 +- synapse/handlers/room.py | 59 +++-- synapse/handlers/room_list.py | 10 +- synapse/handlers/room_member.py | 72 +++--- synapse/handlers/room_member_worker.py | 14 +- synapse/handlers/room_summary.py | 32 +-- synapse/handlers/saml.py | 8 +- synapse/handlers/search.py | 34 +-- synapse/handlers/send_email.py | 4 +- synapse/handlers/sliding_sync/__init__.py | 32 +-- synapse/handlers/sliding_sync/extensions.py | 26 +- synapse/handlers/sliding_sync/room_lists.py | 108 ++++----- synapse/handlers/sso.py | 13 +- synapse/handlers/stats.py | 10 +- synapse/handlers/sync.py | 127 +++++----- synapse/handlers/typing.py | 24 +- synapse/handlers/ui_auth/checkers.py | 4 +- synapse/handlers/user_directory.py | 8 +- synapse/handlers/worker_lock.py | 15 +- synapse/http/additional_resource.py | 6 +- synapse/http/client.py | 21 +- .../federation/matrix_federation_agent.py | 4 +- synapse/http/federation/srv_resolver.py | 12 +- .../http/federation/well_known_resolver.py | 8 +- synapse/http/matrixfederationclient.py | 19 +- synapse/http/proxy.py | 8 +- synapse/http/proxyagent.py | 12 +- synapse/http/replicationagent.py | 6 +- synapse/http/request_metrics.py | 8 +- synapse/http/server.py | 15 +- synapse/http/servlet.py | 33 ++- synapse/http/site.py | 6 +- synapse/logging/_remote.py | 6 +- synapse/logging/context.py | 10 +- synapse/logging/formatter.py | 6 +- synapse/logging/opentracing.py | 37 ++- synapse/media/_base.py | 10 +- synapse/media/filepath.py | 10 +- synapse/media/media_repository.py | 30 +-- synapse/media/media_storage.py | 9 +- synapse/media/oembed.py | 10 +- synapse/media/preview_html.py | 29 +-- synapse/media/thumbnailer.py | 24 +- synapse/media/url_previewer.py | 4 +- synapse/metrics/__init__.py | 24 +- synapse/metrics/background_process_metrics.py | 9 +- synapse/module_api/__init__.py | 43 ++-- .../callbacks/account_validity_callbacks.py | 10 +- .../callbacks/media_repository_callbacks.py | 16 +- .../callbacks/ratelimit_callbacks.py | 4 +- .../callbacks/spamchecker_callbacks.py | 88 ++++--- .../third_party_event_rules_callbacks.py | 32 +-- synapse/notifier.py | 44 ++-- synapse/push/__init__.py | 4 +- synapse/push/bulk_push_rule_evaluator.py | 30 +-- synapse/push/clientformat.py | 14 +- synapse/push/emailpusher.py | 6 +- synapse/push/httppusher.py | 8 +- synapse/push/mailer.py | 24 +- synapse/push/presentable_names.py | 6 +- synapse/push/push_tools.py | 5 +- synapse/push/push_types.py | 8 +- synapse/push/pusher.py | 6 +- synapse/push/pusherpool.py | 4 +- synapse/replication/http/_base.py | 10 +- synapse/replication/http/account_data.py | 14 +- .../replication/http/deactivate_account.py | 4 +- synapse/replication/http/delayed_events.py | 4 +- synapse/replication/http/devices.py | 20 +- synapse/replication/http/federation.py | 14 +- synapse/replication/http/login.py | 4 +- synapse/replication/http/membership.py | 16 +- synapse/replication/http/presence.py | 6 +- synapse/replication/http/push.py | 8 +- synapse/replication/http/register.py | 6 +- synapse/replication/http/send_events.py | 8 +- synapse/replication/http/state.py | 4 +- synapse/replication/http/streams.py | 4 +- synapse/replication/tcp/client.py | 10 +- synapse/replication/tcp/commands.py | 26 +- synapse/replication/tcp/handler.py | 32 ++- synapse/replication/tcp/protocol.py | 6 +- synapse/replication/tcp/redis.py | 10 +- synapse/replication/tcp/resource.py | 8 +- synapse/replication/tcp/streams/_base.py | 12 +- synapse/replication/tcp/streams/events.py | 16 +- synapse/replication/tcp/streams/federation.py | 6 +- synapse/rest/__init__.py | 8 +- synapse/rest/admin/__init__.py | 8 +- synapse/rest/admin/background_updates.py | 10 +- synapse/rest/admin/devices.py | 14 +- synapse/rest/admin/event_reports.py | 8 +- synapse/rest/admin/events.py | 4 +- synapse/rest/admin/experimental_features.py | 6 +- synapse/rest/admin/federation.py | 10 +- synapse/rest/admin/media.py | 28 +-- synapse/rest/admin/registration_tokens.py | 12 +- synapse/rest/admin/rooms.py | 40 ++-- synapse/rest/admin/scheduled_tasks.py | 4 +- synapse/rest/admin/server_notice_servlet.py | 8 +- synapse/rest/admin/statistics.py | 6 +- synapse/rest/admin/username_available.py | 4 +- synapse/rest/admin/users.py | 70 +++--- synapse/rest/client/_base.py | 6 +- synapse/rest/client/account.py | 32 +-- synapse/rest/client/account_data.py | 14 +- synapse/rest/client/account_validity.py | 4 +- synapse/rest/client/appservice_ping.py | 6 +- synapse/rest/client/auth_metadata.py | 6 +- synapse/rest/client/capabilities.py | 4 +- synapse/rest/client/delayed_events.py | 6 +- synapse/rest/client/devices.py | 28 +-- synapse/rest/client/directory.py | 20 +- synapse/rest/client/events.py | 8 +- synapse/rest/client/filter.py | 6 +- synapse/rest/client/initial_sync.py | 6 +- synapse/rest/client/keys.py | 22 +- synapse/rest/client/knock.py | 6 +- synapse/rest/client/login.py | 19 +- synapse/rest/client/login_token_request.py | 4 +- synapse/rest/client/logout.py | 6 +- synapse/rest/client/matrixrtc.py | 4 +- synapse/rest/client/mutual_rooms.py | 6 +- synapse/rest/client/notifications.py | 4 +- synapse/rest/client/openid.py | 4 +- synapse/rest/client/password_policy.py | 4 +- synapse/rest/client/presence.py | 6 +- synapse/rest/client/profile.py | 10 +- synapse/rest/client/push_rule.py | 18 +- synapse/rest/client/pusher.py | 6 +- synapse/rest/client/read_marker.py | 4 +- synapse/rest/client/receipts.py | 4 +- synapse/rest/client/register.py | 18 +- synapse/rest/client/relations.py | 6 +- synapse/rest/client/reporting.py | 8 +- synapse/rest/client/room.py | 84 +++---- synapse/rest/client/room_keys.py | 18 +- .../rest/client/room_upgrade_rest_servlet.py | 4 +- synapse/rest/client/sendtodevice.py | 6 +- synapse/rest/client/sync.py | 22 +- synapse/rest/client/tags.py | 8 +- synapse/rest/client/thirdparty.py | 14 +- synapse/rest/client/thread_subscriptions.py | 14 +- synapse/rest/client/transactions.py | 10 +- synapse/rest/client/user_directory.py | 4 +- synapse/rest/client/versions.py | 4 +- synapse/rest/client/voip.py | 4 +- synapse/rest/consent/consent_resource.py | 6 +- synapse/rest/key/v2/local_key_resource.py | 4 +- synapse/rest/key/v2/remote_key_resource.py | 16 +- synapse/rest/media/upload_resource.py | 6 +- .../synapse/client/federation_whitelist.py | 4 +- synapse/rest/synapse/client/jwks.py | 4 +- synapse/rest/synapse/client/password_reset.py | 6 +- synapse/rest/synapse/client/pick_username.py | 6 +- synapse/rest/synapse/client/rendezvous.py | 8 +- synapse/rest/synapse/mas/devices.py | 10 +- synapse/rest/synapse/mas/users.py | 18 +- synapse/rest/well_known.py | 4 +- synapse/server.py | 20 +- .../server_notices/consent_server_notices.py | 4 +- .../resource_limits_server_notices.py | 8 +- synapse/state/__init__.py | 28 +-- synapse/state/v1.py | 30 +-- synapse/state/v2.py | 86 ++++--- synapse/storage/_base.py | 4 +- synapse/storage/background_updates.py | 18 +- synapse/storage/controllers/persist_events.py | 59 +++-- synapse/storage/controllers/purge_events.py | 5 +- synapse/storage/controllers/state.py | 28 +-- synapse/storage/controllers/stats.py | 4 +- synapse/storage/database.py | 202 ++++++++-------- synapse/storage/databases/__init__.py | 6 +- synapse/storage/databases/main/__init__.py | 22 +- .../storage/databases/main/account_data.py | 46 ++-- synapse/storage/databases/main/appservice.py | 20 +- synapse/storage/databases/main/cache.py | 14 +- synapse/storage/databases/main/client_ips.py | 37 ++- .../storage/databases/main/delayed_events.py | 18 +- synapse/storage/databases/main/deviceinbox.py | 54 ++--- synapse/storage/databases/main/devices.py | 126 +++++----- synapse/storage/databases/main/directory.py | 6 +- .../storage/databases/main/e2e_room_keys.py | 27 +-- .../storage/databases/main/end_to_end_keys.py | 134 +++++------ .../databases/main/event_federation.py | 225 +++++++++--------- .../databases/main/event_push_actions.py | 69 +++--- synapse/storage/databases/main/events.py | 160 ++++++------- .../databases/main/events_bg_updates.py | 64 ++--- .../main/events_forward_extremities.py | 8 +- .../storage/databases/main/events_worker.py | 150 ++++++------ .../databases/main/experimental_features.py | 8 +- synapse/storage/databases/main/filtering.py | 4 +- synapse/storage/databases/main/keys.py | 20 +- synapse/storage/databases/main/lock.py | 14 +- .../databases/main/media_repository.py | 40 ++-- synapse/storage/databases/main/metrics.py | 26 +- .../databases/main/monthly_active_users.py | 20 +- synapse/storage/databases/main/presence.py | 27 +-- synapse/storage/databases/main/profile.py | 10 +- .../storage/databases/main/purge_events.py | 10 +- synapse/storage/databases/main/push_rule.py | 35 ++- synapse/storage/databases/main/pusher.py | 29 +-- synapse/storage/databases/main/receipts.py | 68 +++--- .../storage/databases/main/registration.py | 68 +++--- synapse/storage/databases/main/relations.py | 77 +++--- synapse/storage/databases/main/room.py | 98 ++++---- synapse/storage/databases/main/roommember.py | 109 ++++----- synapse/storage/databases/main/search.py | 35 ++- synapse/storage/databases/main/signatures.py | 6 +- .../storage/databases/main/sliding_sync.py | 18 +- synapse/storage/databases/main/state.py | 31 +-- .../storage/databases/main/state_deltas.py | 14 +- synapse/storage/databases/main/stats.py | 47 ++-- synapse/storage/databases/main/stream.py | 118 +++++---- synapse/storage/databases/main/tags.py | 20 +- .../storage/databases/main/task_scheduler.py | 18 +- .../databases/main/thread_subscriptions.py | 15 +- .../storage/databases/main/transactions.py | 42 ++-- synapse/storage/databases/main/ui_auth.py | 16 +- .../storage/databases/main/user_directory.py | 51 ++-- .../databases/main/user_erasure_store.py | 4 +- synapse/storage/databases/state/bg_updates.py | 17 +- synapse/storage/databases/state/deletion.py | 10 +- synapse/storage/databases/state/store.py | 38 ++- synapse/storage/engines/postgres.py | 6 +- synapse/storage/engines/sqlite.py | 4 +- synapse/storage/prepare_database.py | 8 +- synapse/storage/roommember.py | 4 +- .../storage/schema/main/delta/30/as_users.py | 6 +- synapse/storage/types.py | 33 ++- synapse/storage/util/id_generators.py | 43 ++-- .../util/partial_state_events_tracker.py | 8 +- synapse/storage/util/sequence.py | 8 +- synapse/streams/__init__.py | 4 +- synapse/streams/events.py | 4 +- synapse/synapse_rust/acl.pyi | 4 +- synapse/synapse_rust/events.pyi | 4 +- synapse/synapse_rust/push.pyi | 8 +- synapse/synapse_rust/segmenter.pyi | 4 +- synapse/types/__init__.py | 43 ++-- synapse/types/handlers/__init__.py | 8 +- synapse/types/handlers/sliding_sync.py | 24 +- synapse/types/rest/client/__init__.py | 42 ++-- synapse/types/state.py | 42 ++-- synapse/util/__init__.py | 6 +- synapse/util/async_helpers.py | 72 +++--- synapse/util/batching_queue.py | 10 +- synapse/util/caches/__init__.py | 6 +- synapse/util/caches/deferred_cache.py | 15 +- synapse/util/caches/descriptors.py | 28 +-- synapse/util/caches/dictionary_cache.py | 15 +- synapse/util/caches/lrucache.py | 23 +- synapse/util/caches/response_cache.py | 3 +- synapse/util/caches/stream_change_cache.py | 14 +- synapse/util/caches/ttlcache.py | 6 +- synapse/util/clock.py | 6 +- synapse/util/daemonize.py | 4 +- synapse/util/distributor.py | 10 +- synapse/util/events.py | 6 +- synapse/util/gai_resolver.py | 11 +- synapse/util/httpresourcetree.py | 5 +- synapse/util/iterutils.py | 12 +- synapse/util/json.py | 3 +- synapse/util/linked_list.py | 6 +- synapse/util/manhole.py | 4 +- synapse/util/metrics.py | 8 +- synapse/util/module_loader.py | 4 +- synapse/util/patch_inline_callbacks.py | 6 +- synapse/util/ratelimitutils.py | 17 +- synapse/util/retryutils.py | 4 +- synapse/util/stringutils.py | 8 +- synapse/util/task_scheduler.py | 16 +- synapse/util/wheel_timer.py | 10 +- synapse/visibility.py | 27 +-- synmark/__main__.py | 4 +- tests/api/test_filtering.py | 11 +- tests/app/test_openid_listener.py | 5 +- tests/appservice/test_api.py | 6 +- tests/appservice/test_scheduler.py | 8 +- tests/config/utils.py | 3 +- tests/crypto/test_keyring.py | 30 +-- tests/events/test_auto_accept_invites.py | 8 +- tests/events/test_presence_router.py | 14 +- tests/events/test_utils.py | 4 +- tests/federation/test_federation_catch_up.py | 12 +- .../test_federation_out_of_band_membership.py | 8 +- tests/federation/test_federation_sender.py | 10 +- .../federation/transport/server/test__base.py | 9 +- tests/federation/transport/test_client.py | 4 +- tests/federation/transport/test_knocking.py | 6 +- tests/handlers/test_appservice.py | 10 +- tests/handlers/test_cas.py | 4 +- tests/handlers/test_directory.py | 8 +- tests/handlers/test_e2e_keys.py | 6 +- tests/handlers/test_message.py | 5 +- tests/handlers/test_oauth_delegation.py | 10 +- tests/handlers/test_oidc.py | 8 +- tests/handlers/test_password_providers.py | 10 +- tests/handlers/test_profile.py | 6 +- tests/handlers/test_receipts.py | 3 +- tests/handlers/test_register.py | 20 +- tests/handlers/test_room_list.py | 6 +- tests/handlers/test_room_summary.py | 24 +- tests/handlers/test_saml.py | 8 +- tests/handlers/test_send_email.py | 14 +- tests/handlers/test_sliding_sync.py | 16 +- tests/handlers/test_sso.py | 4 +- tests/handlers/test_stats.py | 8 +- tests/handlers/test_sync.py | 6 +- tests/handlers/test_typing.py | 9 +- tests/handlers/test_user_directory.py | 4 +- tests/http/__init__.py | 7 +- .../test_matrix_federation_agent.py | 4 +- tests/http/federation/test_srv_resolver.py | 28 +-- tests/http/server/_base.py | 20 +- tests/http/test_client.py | 6 +- tests/http/test_matrixfederationclient.py | 4 +- tests/http/test_proxy.py | 3 +- tests/http/test_proxyagent.py | 4 +- tests/http/test_servlet.py | 6 +- tests/logging/test_remote_handler.py | 3 +- tests/media/test_media_storage.py | 36 +-- tests/metrics/test_metrics.py | 10 +- tests/module_api/test_api.py | 4 +- tests/push/test_email.py | 8 +- tests/push/test_http.py | 10 +- tests/push/test_presentable_names.py | 16 +- tests/push/test_push_rule_evaluator.py | 10 +- tests/replication/_base.py | 12 +- tests/replication/http/test__base.py | 5 +- tests/replication/storage/test_events.py | 10 +- tests/replication/tcp/streams/test_events.py | 8 +- tests/replication/test_multi_media_repo.py | 6 +- tests/rest/admin/test_admin.py | 4 +- tests/rest/admin/test_event_reports.py | 3 +- tests/rest/admin/test_federation.py | 10 +- tests/rest/admin/test_jwks.py | 3 +- tests/rest/admin/test_media.py | 3 +- tests/rest/admin/test_room.py | 4 +- tests/rest/admin/test_scheduled_tasks.py | 8 +- tests/rest/admin/test_server_notice.py | 4 +- tests/rest/admin/test_statistics.py | 8 +- tests/rest/admin/test_user.py | 18 +- .../test_extension_thread_subscriptions.py | 6 +- .../sliding_sync/test_extension_to_device.py | 3 +- .../sliding_sync/test_rooms_timeline.py | 10 +- .../client/sliding_sync/test_sliding_sync.py | 8 +- tests/rest/client/test_account.py | 14 +- tests/rest/client/test_auth.py | 20 +- tests/rest/client/test_delayed_events.py | 3 +- tests/rest/client/test_login.py | 37 ++- tests/rest/client/test_media.py | 38 +-- tests/rest/client/test_notifications.py | 4 +- tests/rest/client/test_profile.py | 4 +- tests/rest/client/test_redactions.py | 6 +- tests/rest/client/test_register.py | 10 +- tests/rest/client/test_relations.py | 22 +- tests/rest/client/test_rendezvous.py | 3 +- tests/rest/client/test_retention.py | 4 +- tests/rest/client/test_rooms.py | 30 +-- tests/rest/client/test_sync.py | 3 +- tests/rest/client/test_third_party_rules.py | 22 +- tests/rest/client/test_transactions.py | 8 +- tests/rest/client/utils.py | 32 ++- tests/rest/key/v2/test_remote_key_resource.py | 4 +- tests/rest/media/test_domain_blocking.py | 3 +- tests/rest/media/test_url_preview.py | 10 +- .../client/test_federation_whitelist.py | 3 +- tests/scripts/test_new_matrix_user.py | 14 +- tests/server.py | 29 +-- .../test_resource_limits_server_notices.py | 3 +- tests/state/test_v2.py | 36 ++- tests/state/test_v21.py | 16 +- .../databases/main/test_end_to_end_keys.py | 4 +- .../databases/main/test_events_worker.py | 10 +- tests/storage/databases/main/test_receipts.py | 6 +- tests/storage/test__base.py | 6 +- tests/storage/test_account_data.py | 6 +- tests/storage/test_appservice.py | 10 +- tests/storage/test_background_update.py | 6 +- tests/storage/test_client_ips.py | 22 +- tests/storage/test_database.py | 4 +- tests/storage/test_devices.py | 6 +- tests/storage/test_event_chain.py | 14 +- tests/storage/test_event_federation.py | 51 ++-- tests/storage/test_event_push_actions.py | 4 +- tests/storage/test_events.py | 8 +- tests/storage/test_events_bg_updates.py | 5 +- tests/storage/test_id_generators.py | 8 +- tests/storage/test_monthly_active_users.py | 6 +- tests/storage/test_redaction.py | 6 +- tests/storage/test_rollback_worker.py | 3 +- tests/storage/test_room_search.py | 3 +- tests/storage/test_roommember.py | 8 +- tests/storage/test_sliding_sync_tables.py | 16 +- tests/storage/test_state.py | 8 +- tests/storage/test_stream.py | 5 +- tests/storage/test_user_directory.py | 22 +- .../util/test_partial_state_events_tracker.py | 12 +- tests/test_event_auth.py | 12 +- tests/test_mau.py | 4 +- tests/test_server.py | 14 +- tests/test_state.py | 42 ++-- tests/test_types.py | 3 +- tests/test_utils/__init__.py | 4 +- tests/test_utils/event_injection.py | 8 +- tests/test_utils/html_parsers.py | 10 +- tests/test_utils/oidc.py | 16 +- tests/unittest.py | 28 +-- tests/util/caches/test_deferred_cache.py | 5 +- tests/util/caches/test_descriptors.py | 17 +- tests/util/test_async_helpers.py | 8 +- tests/util/test_batching_queue.py | 5 +- tests/util/test_expiring_cache.py | 3 +- tests/util/test_itertools.py | 34 +-- tests/util/test_linearizer.py | 4 +- tests/util/test_lrucache.py | 9 +- tests/util/test_mutable_overlay_mapping.py | 3 +- tests/util/test_rwlock.py | 12 +- tests/util/test_task_scheduler.py | 14 +- tests/utils.py | 10 +- 539 files changed, 4599 insertions(+), 5066 deletions(-) create mode 100644 changelog.d/19046.misc diff --git a/build_rust.py b/build_rust.py index 5c796af461..af7bd2fdc5 100644 --- a/build_rust.py +++ b/build_rust.py @@ -2,13 +2,13 @@ import itertools import os -from typing import Any, Dict +from typing import Any from packaging.specifiers import SpecifierSet from setuptools_rust import Binding, RustExtension -def build(setup_kwargs: Dict[str, Any]) -> None: +def build(setup_kwargs: dict[str, Any]) -> None: original_project_dir = os.path.dirname(os.path.realpath(__file__)) cargo_toml_path = os.path.join(original_project_dir, "rust", "Cargo.toml") diff --git a/changelog.d/19046.misc b/changelog.d/19046.misc new file mode 100644 index 0000000000..4013804f7f --- /dev/null +++ b/changelog.d/19046.misc @@ -0,0 +1 @@ +Use type hinting generics in standard collections, as per PEP 585, added in Python 3.9. diff --git a/contrib/graph/graph.py b/contrib/graph/graph.py index 9d5f3c7f4f..2898bb3448 100644 --- a/contrib/graph/graph.py +++ b/contrib/graph/graph.py @@ -24,7 +24,6 @@ import datetime import html import json import urllib.request -from typing import List import pydot @@ -33,7 +32,7 @@ def make_name(pdu_id: str, origin: str) -> str: return f"{pdu_id}@{origin}" -def make_graph(pdus: List[dict], filename_prefix: str) -> None: +def make_graph(pdus: list[dict], filename_prefix: str) -> None: """ Generate a dot and SVG file for a graph of events in the room based on the topological ordering by querying a homeserver. @@ -127,7 +126,7 @@ def make_graph(pdus: List[dict], filename_prefix: str) -> None: graph.write_svg("%s.svg" % filename_prefix, prog="dot") -def get_pdus(host: str, room: str) -> List[dict]: +def get_pdus(host: str, room: str) -> list[dict]: transaction = json.loads( urllib.request.urlopen( f"http://{host}/_matrix/federation/v1/context/{room}/" diff --git a/docker/configure_workers_and_start.py b/docker/configure_workers_and_start.py index 6f25653bb7..2451d1f300 100755 --- a/docker/configure_workers_and_start.py +++ b/docker/configure_workers_and_start.py @@ -65,13 +65,10 @@ from itertools import chain from pathlib import Path from typing import ( Any, - Dict, - List, Mapping, MutableMapping, NoReturn, Optional, - Set, SupportsIndex, ) @@ -96,7 +93,7 @@ WORKER_PLACEHOLDER_NAME = "placeholder_name" # Watching /_matrix/media and related needs a "media" listener # Stream Writers require "client" and "replication" listeners because they # have to attach by instance_map to the master process and have client endpoints. -WORKERS_CONFIG: Dict[str, Dict[str, Any]] = { +WORKERS_CONFIG: dict[str, dict[str, Any]] = { "pusher": { "app": "synapse.app.generic_worker", "listener_resources": [], @@ -408,7 +405,7 @@ def convert(src: str, dst: str, **template_vars: object) -> None: def add_worker_roles_to_shared_config( shared_config: dict, - worker_types_set: Set[str], + worker_types_set: set[str], worker_name: str, worker_port: int, ) -> None: @@ -471,9 +468,9 @@ def add_worker_roles_to_shared_config( def merge_worker_template_configs( - existing_dict: Optional[Dict[str, Any]], - to_be_merged_dict: Dict[str, Any], -) -> Dict[str, Any]: + existing_dict: Optional[dict[str, Any]], + to_be_merged_dict: dict[str, Any], +) -> dict[str, Any]: """When given an existing dict of worker template configuration consisting with both dicts and lists, merge new template data from WORKERS_CONFIG(or create) and return new dict. @@ -484,7 +481,7 @@ def merge_worker_template_configs( existing_dict. Returns: The newly merged together dict values. """ - new_dict: Dict[str, Any] = {} + new_dict: dict[str, Any] = {} if not existing_dict: # It doesn't exist yet, just use the new dict(but take a copy not a reference) new_dict = to_be_merged_dict.copy() @@ -509,8 +506,8 @@ def merge_worker_template_configs( def insert_worker_name_for_worker_config( - existing_dict: Dict[str, Any], worker_name: str -) -> Dict[str, Any]: + existing_dict: dict[str, Any], worker_name: str +) -> dict[str, Any]: """Insert a given worker name into the worker's configuration dict. Args: @@ -526,7 +523,7 @@ def insert_worker_name_for_worker_config( return dict_to_edit -def apply_requested_multiplier_for_worker(worker_types: List[str]) -> List[str]: +def apply_requested_multiplier_for_worker(worker_types: list[str]) -> list[str]: """ Apply multiplier(if found) by returning a new expanded list with some basic error checking. @@ -587,7 +584,7 @@ def is_sharding_allowed_for_worker_type(worker_type: str) -> bool: def split_and_strip_string( given_string: str, split_char: str, max_split: SupportsIndex = -1 -) -> List[str]: +) -> list[str]: """ Helper to split a string on split_char and strip whitespace from each end of each element. @@ -616,8 +613,8 @@ def generate_base_homeserver_config() -> None: def parse_worker_types( - requested_worker_types: List[str], -) -> Dict[str, Set[str]]: + requested_worker_types: list[str], +) -> dict[str, set[str]]: """Read the desired list of requested workers and prepare the data for use in generating worker config files while also checking for potential gotchas. @@ -633,14 +630,14 @@ def parse_worker_types( # A counter of worker_base_name -> int. Used for determining the name for a given # worker when generating its config file, as each worker's name is just # worker_base_name followed by instance number - worker_base_name_counter: Dict[str, int] = defaultdict(int) + worker_base_name_counter: dict[str, int] = defaultdict(int) # Similar to above, but more finely grained. This is used to determine we don't have # more than a single worker for cases where multiples would be bad(e.g. presence). - worker_type_shard_counter: Dict[str, int] = defaultdict(int) + worker_type_shard_counter: dict[str, int] = defaultdict(int) # The final result of all this processing - dict_to_return: Dict[str, Set[str]] = {} + dict_to_return: dict[str, set[str]] = {} # Handle any multipliers requested for given workers. multiple_processed_worker_types = apply_requested_multiplier_for_worker( @@ -684,7 +681,7 @@ def parse_worker_types( # Split the worker_type_string on "+", remove whitespace from ends then make # the list a set so it's deduplicated. - worker_types_set: Set[str] = set( + worker_types_set: set[str] = set( split_and_strip_string(worker_type_string, "+") ) @@ -743,7 +740,7 @@ def generate_worker_files( environ: Mapping[str, str], config_path: str, data_dir: str, - requested_worker_types: Dict[str, Set[str]], + requested_worker_types: dict[str, set[str]], ) -> None: """Read the desired workers(if any) that is passed in and generate shared homeserver, nginx and supervisord configs. @@ -764,7 +761,7 @@ def generate_worker_files( # First read the original config file and extract the listeners block. Then we'll # add another listener for replication. Later we'll write out the result to the # shared config file. - listeners: List[Any] + listeners: list[Any] if using_unix_sockets: listeners = [ { @@ -792,12 +789,12 @@ def generate_worker_files( # base shared worker jinja2 template. This config file will be passed to all # workers, included Synapse's main process. It is intended mainly for disabling # functionality when certain workers are spun up, and adding a replication listener. - shared_config: Dict[str, Any] = {"listeners": listeners} + shared_config: dict[str, Any] = {"listeners": listeners} # List of dicts that describe workers. # We pass this to the Supervisor template later to generate the appropriate # program blocks. - worker_descriptors: List[Dict[str, Any]] = [] + worker_descriptors: list[dict[str, Any]] = [] # Upstreams for load-balancing purposes. This dict takes the form of the worker # type to the ports of each worker. For example: @@ -805,14 +802,14 @@ def generate_worker_files( # worker_type: {1234, 1235, ...}} # } # and will be used to construct 'upstream' nginx directives. - nginx_upstreams: Dict[str, Set[int]] = {} + nginx_upstreams: dict[str, set[int]] = {} # A map of: {"endpoint": "upstream"}, where "upstream" is a str representing what # will be placed after the proxy_pass directive. The main benefit to representing # this data as a dict over a str is that we can easily deduplicate endpoints # across multiple instances of the same worker. The final rendering will be combined # with nginx_upstreams and placed in /etc/nginx/conf.d. - nginx_locations: Dict[str, str] = {} + nginx_locations: dict[str, str] = {} # Create the worker configuration directory if it doesn't already exist os.makedirs("/conf/workers", exist_ok=True) @@ -846,7 +843,7 @@ def generate_worker_files( # yaml config file for worker_name, worker_types_set in requested_worker_types.items(): # The collected and processed data will live here. - worker_config: Dict[str, Any] = {} + worker_config: dict[str, Any] = {} # Merge all worker config templates for this worker into a single config for worker_type in worker_types_set: @@ -1029,7 +1026,7 @@ def generate_worker_log_config( Returns: the path to the generated file """ # Check whether we should write worker logs to disk, in addition to the console - extra_log_template_args: Dict[str, Optional[str]] = {} + extra_log_template_args: dict[str, Optional[str]] = {} if environ.get("SYNAPSE_WORKERS_WRITE_LOGS_TO_DISK"): extra_log_template_args["LOG_FILE_PATH"] = f"{data_dir}/logs/{worker_name}.log" @@ -1053,7 +1050,7 @@ def generate_worker_log_config( return log_config_filepath -def main(args: List[str], environ: MutableMapping[str, str]) -> None: +def main(args: list[str], environ: MutableMapping[str, str]) -> None: parser = ArgumentParser() parser.add_argument( "--generate-only", @@ -1087,7 +1084,7 @@ def main(args: List[str], environ: MutableMapping[str, str]) -> None: if not worker_types_env: # No workers, just the main process worker_types = [] - requested_worker_types: Dict[str, Any] = {} + requested_worker_types: dict[str, Any] = {} else: # Split type names by comma, ignoring whitespace. worker_types = split_and_strip_string(worker_types_env, ",") diff --git a/docker/start.py b/docker/start.py index 0be9976a0c..daa041d463 100755 --- a/docker/start.py +++ b/docker/start.py @@ -6,7 +6,7 @@ import os import platform import subprocess import sys -from typing import Any, Dict, List, Mapping, MutableMapping, NoReturn, Optional +from typing import Any, Mapping, MutableMapping, NoReturn, Optional import jinja2 @@ -69,7 +69,7 @@ def generate_config_from_template( ) # populate some params from data files (if they exist, else create new ones) - environ: Dict[str, Any] = dict(os_environ) + environ: dict[str, Any] = dict(os_environ) secrets = { "registration": "SYNAPSE_REGISTRATION_SHARED_SECRET", "macaroon": "SYNAPSE_MACAROON_SECRET_KEY", @@ -200,7 +200,7 @@ def run_generate_config(environ: Mapping[str, str], ownership: Optional[str]) -> subprocess.run(args, check=True) -def main(args: List[str], environ: MutableMapping[str, str]) -> None: +def main(args: list[str], environ: MutableMapping[str, str]) -> None: mode = args[1] if len(args) > 1 else "run" # if we were given an explicit user to switch to, do so diff --git a/pyproject.toml b/pyproject.toml index ee7016b1d0..b0cb355c52 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -78,6 +78,12 @@ select = [ "LOG", # flake8-logging-format "G", + # pyupgrade + "UP006", +] +extend-safe-fixes = [ + # pyupgrade + "UP006" ] [tool.ruff.lint.isort] diff --git a/scripts-dev/build_debian_packages.py b/scripts-dev/build_debian_packages.py index 6150dc15a3..f94c5a37fc 100755 --- a/scripts-dev/build_debian_packages.py +++ b/scripts-dev/build_debian_packages.py @@ -18,7 +18,7 @@ import sys import threading from concurrent.futures import ThreadPoolExecutor from types import FrameType -from typing import Collection, Optional, Sequence, Set +from typing import Collection, Optional, Sequence # These are expanded inside the dockerfile to be a fully qualified image name. # e.g. docker.io/library/debian:bullseye @@ -54,7 +54,7 @@ class Builder: ): self.redirect_stdout = redirect_stdout self._docker_build_args = tuple(docker_build_args or ()) - self.active_containers: Set[str] = set() + self.active_containers: set[str] = set() self._lock = threading.Lock() self._failed = False diff --git a/scripts-dev/check_locked_deps_have_sdists.py b/scripts-dev/check_locked_deps_have_sdists.py index cabe3b8de1..f035ecb644 100755 --- a/scripts-dev/check_locked_deps_have_sdists.py +++ b/scripts-dev/check_locked_deps_have_sdists.py @@ -21,7 +21,6 @@ # import sys from pathlib import Path -from typing import Dict, List import tomli @@ -33,7 +32,7 @@ def main() -> None: # Poetry 1.3+ lockfile format: # There's a `files` inline table in each [[package]] - packages_to_assets: Dict[str, List[Dict[str, str]]] = { + packages_to_assets: dict[str, list[dict[str, str]]] = { package["name"]: package["files"] for package in lockfile_content["package"] } diff --git a/scripts-dev/check_pydantic_models.py b/scripts-dev/check_pydantic_models.py index 26a473a61b..69c49e258d 100755 --- a/scripts-dev/check_pydantic_models.py +++ b/scripts-dev/check_pydantic_models.py @@ -47,11 +47,7 @@ from contextlib import contextmanager from typing import ( Any, Callable, - Dict, Generator, - List, - Set, - Type, TypeVar, ) @@ -69,7 +65,7 @@ from synapse._pydantic_compat import ( logger = logging.getLogger(__name__) -CONSTRAINED_TYPE_FACTORIES_WITH_STRICT_FLAG: List[Callable] = [ +CONSTRAINED_TYPE_FACTORIES_WITH_STRICT_FLAG: list[Callable] = [ constr, conbytes, conint, @@ -145,7 +141,7 @@ class PatchedBaseModel(PydanticBaseModel): """ @classmethod - def __init_subclass__(cls: Type[PydanticBaseModel], **kwargs: object): + def __init_subclass__(cls: type[PydanticBaseModel], **kwargs: object): for field in cls.__fields__.values(): # Note that field.type_ and field.outer_type are computed based on the # annotation type, see pydantic.fields.ModelField._type_analysis @@ -212,7 +208,7 @@ def lint() -> int: return os.EX_DATAERR if failures else os.EX_OK -def do_lint() -> Set[str]: +def do_lint() -> set[str]: """Try to import all of Synapse and see if we spot any Pydantic type coercions.""" failures = set() @@ -258,8 +254,8 @@ def run_test_snippet(source: str) -> None: # > Remember that at the module level, globals and locals are the same dictionary. # > If exec gets two separate objects as globals and locals, the code will be # > executed as if it were embedded in a class definition. - globals_: Dict[str, object] - locals_: Dict[str, object] + globals_: dict[str, object] + locals_: dict[str, object] globals_ = locals_ = {} exec(textwrap.dedent(source), globals_, locals_) @@ -394,10 +390,10 @@ class TestFieldTypeInspection(unittest.TestCase): ("bool"), ("Optional[str]",), ("Union[None, str]",), - ("List[str]",), - ("List[List[str]]",), - ("Dict[StrictStr, str]",), - ("Dict[str, StrictStr]",), + ("list[str]",), + ("list[list[str]]",), + ("dict[StrictStr, str]",), + ("dict[str, StrictStr]",), ("TypedDict('D', x=int)",), ] ) @@ -425,9 +421,9 @@ class TestFieldTypeInspection(unittest.TestCase): ("constr(strict=True, min_length=10)",), ("Optional[StrictStr]",), ("Union[None, StrictStr]",), - ("List[StrictStr]",), - ("List[List[StrictStr]]",), - ("Dict[StrictStr, StrictStr]",), + ("list[StrictStr]",), + ("list[list[StrictStr]]",), + ("dict[StrictStr, StrictStr]",), ("TypedDict('D', x=StrictInt)",), ] ) diff --git a/scripts-dev/check_schema_delta.py b/scripts-dev/check_schema_delta.py index 454784c3ae..7b2dec25d4 100755 --- a/scripts-dev/check_schema_delta.py +++ b/scripts-dev/check_schema_delta.py @@ -5,7 +5,7 @@ # Also checks that schema deltas do not try and create or drop indices. import re -from typing import Any, Dict, List +from typing import Any import click import git @@ -48,16 +48,16 @@ def main(force_colors: bool) -> None: r = repo.git.show(f"origin/{DEVELOP_BRANCH}:synapse/storage/schema/__init__.py") - locals: Dict[str, Any] = {} + locals: dict[str, Any] = {} exec(r, locals) current_schema_version = locals["SCHEMA_VERSION"] - diffs: List[git.Diff] = repo.remote().refs[DEVELOP_BRANCH].commit.diff(None) + diffs: list[git.Diff] = repo.remote().refs[DEVELOP_BRANCH].commit.diff(None) # Get the schema version of the local file to check against current schema on develop with open("synapse/storage/schema/__init__.py") as file: local_schema = file.read() - new_locals: Dict[str, Any] = {} + new_locals: dict[str, Any] = {} exec(local_schema, new_locals) local_schema_version = new_locals["SCHEMA_VERSION"] diff --git a/scripts-dev/federation_client.py b/scripts-dev/federation_client.py index fb879ef555..db8655c1ce 100755 --- a/scripts-dev/federation_client.py +++ b/scripts-dev/federation_client.py @@ -43,7 +43,7 @@ import argparse import base64 import json import sys -from typing import Any, Dict, Mapping, Optional, Tuple, Union +from typing import Any, Mapping, Optional, Union from urllib import parse as urlparse import requests @@ -147,7 +147,7 @@ def request( s = requests.Session() s.mount("matrix-federation://", MatrixConnectionAdapter()) - headers: Dict[str, str] = { + headers: dict[str, str] = { "Authorization": authorization_headers[0], } @@ -303,7 +303,7 @@ class MatrixConnectionAdapter(HTTPAdapter): request: PreparedRequest, verify: Optional[Union[bool, str]], proxies: Optional[Mapping[str, str]] = None, - cert: Optional[Union[Tuple[str, str], str]] = None, + cert: Optional[Union[tuple[str, str], str]] = None, ) -> HTTPConnectionPool: # overrides the get_connection_with_tls_context() method in the base class parsed = urlparse.urlsplit(request.url) @@ -326,7 +326,7 @@ class MatrixConnectionAdapter(HTTPAdapter): ) @staticmethod - def _lookup(server_name: str) -> Tuple[str, int, str]: + def _lookup(server_name: str) -> tuple[str, int, str]: """ Do an SRV lookup on a server name and return the host:port to connect to Given the server_name (after any .well-known lookup), return the host, port and diff --git a/scripts-dev/mypy_synapse_plugin.py b/scripts-dev/mypy_synapse_plugin.py index 0b854cdba5..830c4ac4ab 100644 --- a/scripts-dev/mypy_synapse_plugin.py +++ b/scripts-dev/mypy_synapse_plugin.py @@ -24,7 +24,7 @@ can crop up, e.g the cache descriptors. """ import enum -from typing import Callable, Mapping, Optional, Tuple, Type, Union +from typing import Callable, Mapping, Optional, Union import attr import mypy.types @@ -184,8 +184,8 @@ should be in the source code. # Unbound at this point because we don't know the mypy version yet. # This is set in the `plugin(...)` function below. -MypyPydanticPluginClass: Type[Plugin] -MypyZopePluginClass: Type[Plugin] +MypyPydanticPluginClass: type[Plugin] +MypyZopePluginClass: type[Plugin] class SynapsePlugin(Plugin): @@ -795,7 +795,7 @@ AT_CACHED_MUTABLE_RETURN = ErrorCode( def is_cacheable( rt: mypy.types.Type, signature: CallableType, verbose: bool -) -> Tuple[bool, Optional[str]]: +) -> tuple[bool, Optional[str]]: """ Check if a particular type is cachable. @@ -905,7 +905,7 @@ def is_cacheable( return False, f"Don't know how to handle {type(rt).__qualname__} return type" -def plugin(version: str) -> Type[SynapsePlugin]: +def plugin(version: str) -> type[SynapsePlugin]: global MypyPydanticPluginClass, MypyZopePluginClass # This is the entry point of the plugin, and lets us deal with the fact # that the mypy plugin interface is *not* stable by looking at the version diff --git a/scripts-dev/release.py b/scripts-dev/release.py index c5c72156cc..16f1fc5f2a 100755 --- a/scripts-dev/release.py +++ b/scripts-dev/release.py @@ -32,7 +32,7 @@ import time import urllib.request from os import path from tempfile import TemporaryDirectory -from typing import Any, List, Match, Optional, Union +from typing import Any, Match, Optional, Union import attr import click @@ -884,7 +884,7 @@ def get_changes_for_version(wanted_version: version.Version) -> str: start_line: int end_line: Optional[int] = None # Is none if its the last entry - headings: List[VersionSection] = [] + headings: list[VersionSection] = [] for i, token in enumerate(tokens): # We look for level 1 headings (h1 tags). if token.type != "heading_open" or token.tag != "h1": diff --git a/scripts-dev/schema_versions.py b/scripts-dev/schema_versions.py index 5a79a43355..cec58e177f 100755 --- a/scripts-dev/schema_versions.py +++ b/scripts-dev/schema_versions.py @@ -38,7 +38,7 @@ import io import json import sys from collections import defaultdict -from typing import Any, Dict, Iterator, Optional, Tuple +from typing import Any, Iterator, Optional import git from packaging import version @@ -57,7 +57,7 @@ SCHEMA_VERSION_FILES = ( OLDEST_SHOWN_VERSION = version.parse("v1.0") -def get_schema_versions(tag: git.Tag) -> Tuple[Optional[int], Optional[int]]: +def get_schema_versions(tag: git.Tag) -> tuple[Optional[int], Optional[int]]: """Get the schema and schema compat versions for a tag.""" schema_version = None schema_compat_version = None @@ -81,7 +81,7 @@ def get_schema_versions(tag: git.Tag) -> Tuple[Optional[int], Optional[int]]: # SCHEMA_COMPAT_VERSION is sometimes across multiple lines, the easist # thing to do is exec the code. Luckily it has only ever existed in # a file which imports nothing else from Synapse. - locals: Dict[str, Any] = {} + locals: dict[str, Any] = {} exec(schema_file.data_stream.read().decode("utf-8"), {}, locals) schema_version = locals["SCHEMA_VERSION"] schema_compat_version = locals.get("SCHEMA_COMPAT_VERSION") diff --git a/stubs/sortedcontainers/sorteddict.pyi b/stubs/sortedcontainers/sorteddict.pyi index 81f581b034..a0be3e6349 100644 --- a/stubs/sortedcontainers/sorteddict.pyi +++ b/stubs/sortedcontainers/sorteddict.pyi @@ -7,18 +7,14 @@ from __future__ import annotations from typing import ( Any, Callable, - Dict, Hashable, ItemsView, Iterable, Iterator, KeysView, - List, Mapping, Optional, Sequence, - Tuple, - Type, TypeVar, Union, ValuesView, @@ -35,14 +31,14 @@ _VT_co = TypeVar("_VT_co", covariant=True) _SD = TypeVar("_SD", bound=SortedDict) _Key = Callable[[_T], Any] -class SortedDict(Dict[_KT, _VT]): +class SortedDict(dict[_KT, _VT]): @overload def __init__(self, **kwargs: _VT) -> None: ... @overload def __init__(self, __map: Mapping[_KT, _VT], **kwargs: _VT) -> None: ... @overload def __init__( - self, __iterable: Iterable[Tuple[_KT, _VT]], **kwargs: _VT + self, __iterable: Iterable[tuple[_KT, _VT]], **kwargs: _VT ) -> None: ... @overload def __init__(self, __key: _Key[_KT], **kwargs: _VT) -> None: ... @@ -52,7 +48,7 @@ class SortedDict(Dict[_KT, _VT]): ) -> None: ... @overload def __init__( - self, __key: _Key[_KT], __iterable: Iterable[Tuple[_KT, _VT]], **kwargs: _VT + self, __key: _Key[_KT], __iterable: Iterable[tuple[_KT, _VT]], **kwargs: _VT ) -> None: ... @property def key(self) -> Optional[_Key[_KT]]: ... @@ -84,8 +80,8 @@ class SortedDict(Dict[_KT, _VT]): def pop(self, key: _KT) -> _VT: ... @overload def pop(self, key: _KT, default: _T = ...) -> Union[_VT, _T]: ... - def popitem(self, index: int = ...) -> Tuple[_KT, _VT]: ... - def peekitem(self, index: int = ...) -> Tuple[_KT, _VT]: ... + def popitem(self, index: int = ...) -> tuple[_KT, _VT]: ... + def peekitem(self, index: int = ...) -> tuple[_KT, _VT]: ... def setdefault(self, key: _KT, default: Optional[_VT] = ...) -> _VT: ... # Mypy now reports the first overload as an error, because typeshed widened the type # of `__map` to its internal `_typeshed.SupportsKeysAndGetItem` type in @@ -102,9 +98,9 @@ class SortedDict(Dict[_KT, _VT]): # def update(self, **kwargs: _VT) -> None: ... def __reduce__( self, - ) -> Tuple[ - Type[SortedDict[_KT, _VT]], - Tuple[Callable[[_KT], Any], List[Tuple[_KT, _VT]]], + ) -> tuple[ + type[SortedDict[_KT, _VT]], + tuple[Callable[[_KT], Any], list[tuple[_KT, _VT]]], ]: ... def __repr__(self) -> str: ... def _check(self) -> None: ... @@ -121,20 +117,20 @@ class SortedKeysView(KeysView[_KT_co], Sequence[_KT_co]): @overload def __getitem__(self, index: int) -> _KT_co: ... @overload - def __getitem__(self, index: slice) -> List[_KT_co]: ... + def __getitem__(self, index: slice) -> list[_KT_co]: ... def __delitem__(self, index: Union[int, slice]) -> None: ... -class SortedItemsView(ItemsView[_KT_co, _VT_co], Sequence[Tuple[_KT_co, _VT_co]]): - def __iter__(self) -> Iterator[Tuple[_KT_co, _VT_co]]: ... +class SortedItemsView(ItemsView[_KT_co, _VT_co], Sequence[tuple[_KT_co, _VT_co]]): + def __iter__(self) -> Iterator[tuple[_KT_co, _VT_co]]: ... @overload - def __getitem__(self, index: int) -> Tuple[_KT_co, _VT_co]: ... + def __getitem__(self, index: int) -> tuple[_KT_co, _VT_co]: ... @overload - def __getitem__(self, index: slice) -> List[Tuple[_KT_co, _VT_co]]: ... + def __getitem__(self, index: slice) -> list[tuple[_KT_co, _VT_co]]: ... def __delitem__(self, index: Union[int, slice]) -> None: ... class SortedValuesView(ValuesView[_VT_co], Sequence[_VT_co]): @overload def __getitem__(self, index: int) -> _VT_co: ... @overload - def __getitem__(self, index: slice) -> List[_VT_co]: ... + def __getitem__(self, index: slice) -> list[_VT_co]: ... def __delitem__(self, index: Union[int, slice]) -> None: ... diff --git a/stubs/sortedcontainers/sortedlist.pyi b/stubs/sortedcontainers/sortedlist.pyi index 0e745c0a79..25ceb74cc9 100644 --- a/stubs/sortedcontainers/sortedlist.pyi +++ b/stubs/sortedcontainers/sortedlist.pyi @@ -9,12 +9,9 @@ from typing import ( Callable, Iterable, Iterator, - List, MutableSequence, Optional, Sequence, - Tuple, - Type, TypeVar, Union, overload, @@ -37,11 +34,11 @@ class SortedList(MutableSequence[_T]): ): ... # NB: currently mypy does not honour return type, see mypy #3307 @overload - def __new__(cls: Type[_SL], iterable: None, key: None) -> _SL: ... + def __new__(cls: type[_SL], iterable: None, key: None) -> _SL: ... @overload - def __new__(cls: Type[_SL], iterable: None, key: _Key[_T]) -> SortedKeyList[_T]: ... + def __new__(cls: type[_SL], iterable: None, key: _Key[_T]) -> SortedKeyList[_T]: ... @overload - def __new__(cls: Type[_SL], iterable: Iterable[_T], key: None) -> _SL: ... + def __new__(cls: type[_SL], iterable: Iterable[_T], key: None) -> _SL: ... @overload def __new__(cls, iterable: Iterable[_T], key: _Key[_T]) -> SortedKeyList[_T]: ... @property @@ -64,11 +61,11 @@ class SortedList(MutableSequence[_T]): @overload def __getitem__(self, index: int) -> _T: ... @overload - def __getitem__(self, index: slice) -> List[_T]: ... + def __getitem__(self, index: slice) -> list[_T]: ... @overload def _getitem(self, index: int) -> _T: ... @overload - def _getitem(self, index: slice) -> List[_T]: ... + def _getitem(self, index: slice) -> list[_T]: ... @overload def __setitem__(self, index: int, value: _T) -> None: ... @overload @@ -95,7 +92,7 @@ class SortedList(MutableSequence[_T]): self, minimum: Optional[int] = ..., maximum: Optional[int] = ..., - inclusive: Tuple[bool, bool] = ..., + inclusive: tuple[bool, bool] = ..., reverse: bool = ..., ) -> Iterator[_T]: ... def bisect_left(self, value: _T) -> int: ... @@ -151,14 +148,14 @@ class SortedKeyList(SortedList[_T]): self, minimum: Optional[int] = ..., maximum: Optional[int] = ..., - inclusive: Tuple[bool, bool] = ..., + inclusive: tuple[bool, bool] = ..., reverse: bool = ..., ) -> Iterator[_T]: ... def irange_key( self, min_key: Optional[Any] = ..., max_key: Optional[Any] = ..., - inclusive: Tuple[bool, bool] = ..., + inclusive: tuple[bool, bool] = ..., reserve: bool = ..., ) -> Iterator[_T]: ... def bisect_left(self, value: _T) -> int: ... diff --git a/stubs/sortedcontainers/sortedset.pyi b/stubs/sortedcontainers/sortedset.pyi index 6db11eacbe..a3593ca579 100644 --- a/stubs/sortedcontainers/sortedset.pyi +++ b/stubs/sortedcontainers/sortedset.pyi @@ -10,13 +10,9 @@ from typing import ( Hashable, Iterable, Iterator, - List, MutableSet, Optional, Sequence, - Set, - Tuple, - Type, TypeVar, Union, overload, @@ -37,7 +33,7 @@ class SortedSet(MutableSet[_T], Sequence[_T]): ) -> None: ... @classmethod def _fromset( - cls, values: Set[_T], key: Optional[_Key[_T]] = ... + cls, values: set[_T], key: Optional[_Key[_T]] = ... ) -> SortedSet[_T]: ... @property def key(self) -> Optional[_Key[_T]]: ... @@ -45,7 +41,7 @@ class SortedSet(MutableSet[_T], Sequence[_T]): @overload def __getitem__(self, index: int) -> _T: ... @overload - def __getitem__(self, index: slice) -> List[_T]: ... + def __getitem__(self, index: slice) -> list[_T]: ... def __delitem__(self, index: Union[int, slice]) -> None: ... def __eq__(self, other: Any) -> bool: ... def __ne__(self, other: Any) -> bool: ... @@ -94,7 +90,7 @@ class SortedSet(MutableSet[_T], Sequence[_T]): def _update(self, *iterables: Iterable[_S]) -> SortedSet[Union[_T, _S]]: ... def __reduce__( self, - ) -> Tuple[Type[SortedSet[_T]], Set[_T], Callable[[_T], Any]]: ... + ) -> tuple[type[SortedSet[_T]], set[_T], Callable[[_T], Any]]: ... def __repr__(self) -> str: ... def _check(self) -> None: ... def bisect_left(self, value: _T) -> int: ... @@ -109,7 +105,7 @@ class SortedSet(MutableSet[_T], Sequence[_T]): self, minimum: Optional[_T] = ..., maximum: Optional[_T] = ..., - inclusive: Tuple[bool, bool] = ..., + inclusive: tuple[bool, bool] = ..., reverse: bool = ..., ) -> Iterator[_T]: ... def index( diff --git a/stubs/txredisapi.pyi b/stubs/txredisapi.pyi index c9a4114b1e..d2539aa37d 100644 --- a/stubs/txredisapi.pyi +++ b/stubs/txredisapi.pyi @@ -15,7 +15,7 @@ """Contains *incomplete* type hints for txredisapi.""" -from typing import Any, List, Optional, Type, Union +from typing import Any, Optional, Union from twisted.internet import protocol from twisted.internet.defer import Deferred @@ -39,7 +39,7 @@ class RedisProtocol(protocol.Protocol): class SubscriberProtocol(RedisProtocol): def __init__(self, *args: object, **kwargs: object): ... password: Optional[str] - def subscribe(self, channels: Union[str, List[str]]) -> "Deferred[None]": ... + def subscribe(self, channels: Union[str, list[str]]) -> "Deferred[None]": ... def connectionMade(self) -> None: ... # type-ignore: twisted.internet.protocol.Protocol provides a default argument for # `reason`. txredisapi's LineReceiver Protocol doesn't. But that's fine: it's what's @@ -69,7 +69,7 @@ class UnixConnectionHandler(ConnectionHandler): ... class RedisFactory(protocol.ReconnectingClientFactory): continueTrying: bool handler: ConnectionHandler - pool: List[RedisProtocol] + pool: list[RedisProtocol] replyTimeout: Optional[int] def __init__( self, @@ -77,7 +77,7 @@ class RedisFactory(protocol.ReconnectingClientFactory): dbid: Optional[int], poolsize: int, isLazy: bool = False, - handler: Type = ConnectionHandler, + handler: type = ConnectionHandler, charset: str = "utf-8", password: Optional[str] = None, replyTimeout: Optional[int] = None, diff --git a/synapse/__init__.py b/synapse/__init__.py index 3bd1b3307e..d1c306b8f3 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -24,7 +24,7 @@ import os import sys -from typing import Any, Dict +from typing import Any from PIL import ImageFile @@ -70,7 +70,7 @@ try: from canonicaljson import register_preserialisation_callback from immutabledict import immutabledict - def _immutabledict_cb(d: immutabledict) -> Dict[str, Any]: + def _immutabledict_cb(d: immutabledict) -> dict[str, Any]: try: return d._dict except Exception: diff --git a/synapse/_scripts/generate_workers_map.py b/synapse/_scripts/generate_workers_map.py index f66c01040c..e669f6902d 100755 --- a/synapse/_scripts/generate_workers_map.py +++ b/synapse/_scripts/generate_workers_map.py @@ -25,7 +25,7 @@ import logging import re from collections import defaultdict from dataclasses import dataclass -from typing import Dict, Iterable, Optional, Pattern, Set, Tuple +from typing import Iterable, Optional, Pattern import yaml @@ -81,7 +81,7 @@ class EnumerationResource(HttpServer): """ def __init__(self, is_worker: bool) -> None: - self.registrations: Dict[Tuple[str, str], EndpointDescription] = {} + self.registrations: dict[tuple[str, str], EndpointDescription] = {} self._is_worker = is_worker def register_paths( @@ -115,7 +115,7 @@ class EnumerationResource(HttpServer): def get_registered_paths_for_hs( hs: HomeServer, -) -> Dict[Tuple[str, str], EndpointDescription]: +) -> dict[tuple[str, str], EndpointDescription]: """ Given a homeserver, get all registered endpoints and their descriptions. """ @@ -142,7 +142,7 @@ def get_registered_paths_for_hs( def get_registered_paths_for_default( worker_app: Optional[str], base_config: HomeServerConfig -) -> Dict[Tuple[str, str], EndpointDescription]: +) -> dict[tuple[str, str], EndpointDescription]: """ Given the name of a worker application and a base homeserver configuration, returns: @@ -168,9 +168,9 @@ def get_registered_paths_for_default( def elide_http_methods_if_unconflicting( - registrations: Dict[Tuple[str, str], EndpointDescription], - all_possible_registrations: Dict[Tuple[str, str], EndpointDescription], -) -> Dict[Tuple[str, str], EndpointDescription]: + registrations: dict[tuple[str, str], EndpointDescription], + all_possible_registrations: dict[tuple[str, str], EndpointDescription], +) -> dict[tuple[str, str], EndpointDescription]: """ Elides HTTP methods (by replacing them with `*`) if all possible registered methods can be handled by the worker whose registration map is `registrations`. @@ -180,13 +180,13 @@ def elide_http_methods_if_unconflicting( """ def paths_to_methods_dict( - methods_and_paths: Iterable[Tuple[str, str]], - ) -> Dict[str, Set[str]]: + methods_and_paths: Iterable[tuple[str, str]], + ) -> dict[str, set[str]]: """ Given (method, path) pairs, produces a dict from path to set of methods available at that path. """ - result: Dict[str, Set[str]] = {} + result: dict[str, set[str]] = {} for method, path in methods_and_paths: result.setdefault(path, set()).add(method) return result @@ -210,8 +210,8 @@ def elide_http_methods_if_unconflicting( def simplify_path_regexes( - registrations: Dict[Tuple[str, str], EndpointDescription], -) -> Dict[Tuple[str, str], EndpointDescription]: + registrations: dict[tuple[str, str], EndpointDescription], +) -> dict[tuple[str, str], EndpointDescription]: """ Simplify all the path regexes for the dict of endpoint descriptions, so that we don't use the Python-specific regex extensions @@ -270,8 +270,8 @@ def main() -> None: # TODO SSO endpoints (pick_idp etc) NOT REGISTERED BY THIS SCRIPT - categories_to_methods_and_paths: Dict[ - Optional[str], Dict[Tuple[str, str], EndpointDescription] + categories_to_methods_and_paths: dict[ + Optional[str], dict[tuple[str, str], EndpointDescription] ] = defaultdict(dict) for (method, path), desc in elided_worker_paths.items(): @@ -283,7 +283,7 @@ def main() -> None: def print_category( category_name: Optional[str], - elided_worker_paths: Dict[Tuple[str, str], EndpointDescription], + elided_worker_paths: dict[tuple[str, str], EndpointDescription], ) -> None: """ Prints out a category, in documentation page style. diff --git a/synapse/_scripts/register_new_matrix_user.py b/synapse/_scripts/register_new_matrix_user.py index 4897fa94b0..3fe2f33e52 100644 --- a/synapse/_scripts/register_new_matrix_user.py +++ b/synapse/_scripts/register_new_matrix_user.py @@ -26,7 +26,7 @@ import hashlib import hmac import logging import sys -from typing import Any, Callable, Dict, Optional +from typing import Any, Callable, Optional import requests import yaml @@ -262,7 +262,7 @@ def main() -> None: args = parser.parse_args() - config: Optional[Dict[str, Any]] = None + config: Optional[dict[str, Any]] = None if "config" in args and args.config: config = yaml.safe_load(args.config) @@ -350,7 +350,7 @@ def _read_file(file_path: Any, config_path: str) -> str: sys.exit(1) -def _find_client_listener(config: Dict[str, Any]) -> Optional[str]: +def _find_client_listener(config: dict[str, Any]) -> Optional[str]: # try to find a listener in the config. Returns a host:port pair for listener in config.get("listeners", []): if listener.get("type") != "http" or listener.get("tls", False): diff --git a/synapse/_scripts/review_recent_signups.py b/synapse/_scripts/review_recent_signups.py index 0ff7fae567..d760a84bf2 100644 --- a/synapse/_scripts/review_recent_signups.py +++ b/synapse/_scripts/review_recent_signups.py @@ -23,7 +23,6 @@ import argparse import sys import time from datetime import datetime -from typing import List import attr @@ -50,15 +49,15 @@ class ReviewConfig(RootConfig): class UserInfo: user_id: str creation_ts: int - emails: List[str] = attr.Factory(list) - private_rooms: List[str] = attr.Factory(list) - public_rooms: List[str] = attr.Factory(list) - ips: List[str] = attr.Factory(list) + emails: list[str] = attr.Factory(list) + private_rooms: list[str] = attr.Factory(list) + public_rooms: list[str] = attr.Factory(list) + ips: list[str] = attr.Factory(list) def get_recent_users( txn: LoggingTransaction, since_ms: int, exclude_app_service: bool -) -> List[UserInfo]: +) -> list[UserInfo]: """Fetches recently registered users and some info on them.""" sql = """ diff --git a/synapse/_scripts/synapse_port_db.py b/synapse/_scripts/synapse_port_db.py index 3c79919fea..e83c0de5a4 100755 --- a/synapse/_scripts/synapse_port_db.py +++ b/synapse/_scripts/synapse_port_db.py @@ -33,15 +33,10 @@ from typing import ( Any, Awaitable, Callable, - Dict, Generator, Iterable, - List, NoReturn, Optional, - Set, - Tuple, - Type, TypedDict, TypeVar, cast, @@ -244,7 +239,7 @@ end_error: Optional[str] = None # not the error then the script will show nothing outside of what's printed in the run # function. If both are defined, the script will print both the error and the stacktrace. end_error_exec_info: Optional[ - Tuple[Type[BaseException], BaseException, TracebackType] + tuple[type[BaseException], BaseException, TracebackType] ] = None R = TypeVar("R") @@ -281,8 +276,8 @@ class Store( def execute(self, f: Callable[..., R], *args: Any, **kwargs: Any) -> Awaitable[R]: return self.db_pool.runInteraction(f.__name__, f, *args, **kwargs) - def execute_sql(self, sql: str, *args: object) -> Awaitable[List[Tuple]]: - def r(txn: LoggingTransaction) -> List[Tuple]: + def execute_sql(self, sql: str, *args: object) -> Awaitable[list[tuple]]: + def r(txn: LoggingTransaction) -> list[tuple]: txn.execute(sql, args) return txn.fetchall() @@ -292,8 +287,8 @@ class Store( self, txn: LoggingTransaction, table: str, - headers: List[str], - rows: List[Tuple], + headers: list[str], + rows: list[tuple], override_system_value: bool = False, ) -> None: sql = "INSERT INTO %s (%s) %s VALUES (%s)" % ( @@ -330,7 +325,7 @@ class MockHomeserver(HomeServer): class Porter: def __init__( self, - sqlite_config: Dict[str, Any], + sqlite_config: dict[str, Any], progress: "Progress", batch_size: int, hs: HomeServer, @@ -340,7 +335,7 @@ class Porter: self.batch_size = batch_size self.hs = hs - async def setup_table(self, table: str) -> Tuple[str, int, int, int, int]: + async def setup_table(self, table: str) -> tuple[str, int, int, int, int]: if table in APPEND_ONLY_TABLES: # It's safe to just carry on inserting. row = await self.postgres_store.db_pool.simple_select_one( @@ -403,10 +398,10 @@ class Porter: return table, already_ported, total_to_port, forward_chunk, backward_chunk - async def get_table_constraints(self) -> Dict[str, Set[str]]: + async def get_table_constraints(self) -> dict[str, set[str]]: """Returns a map of tables that have foreign key constraints to tables they depend on.""" - def _get_constraints(txn: LoggingTransaction) -> Dict[str, Set[str]]: + def _get_constraints(txn: LoggingTransaction) -> dict[str, set[str]]: # We can pull the information about foreign key constraints out from # the postgres schema tables. sql = """ @@ -422,7 +417,7 @@ class Porter: """ txn.execute(sql) - results: Dict[str, Set[str]] = {} + results: dict[str, set[str]] = {} for table, foreign_table in txn: results.setdefault(table, set()).add(foreign_table) return results @@ -490,7 +485,7 @@ class Porter: def r( txn: LoggingTransaction, - ) -> Tuple[Optional[List[str]], List[Tuple], List[Tuple]]: + ) -> tuple[Optional[list[str]], list[tuple], list[tuple]]: forward_rows = [] backward_rows = [] if do_forward[0]: @@ -507,7 +502,7 @@ class Porter: if forward_rows or backward_rows: assert txn.description is not None - headers: Optional[List[str]] = [ + headers: Optional[list[str]] = [ column[0] for column in txn.description ] else: @@ -574,7 +569,7 @@ class Porter: while True: - def r(txn: LoggingTransaction) -> Tuple[List[str], List[Tuple]]: + def r(txn: LoggingTransaction) -> tuple[list[str], list[tuple]]: txn.execute(select, (forward_chunk, self.batch_size)) rows = txn.fetchall() assert txn.description is not None @@ -956,7 +951,7 @@ class Porter: self.progress.set_state("Copying to postgres") constraints = await self.get_table_constraints() - tables_ported = set() # type: Set[str] + tables_ported = set() # type: set[str] while tables_to_port_info_map: # Pulls out all tables that are still to be ported and which @@ -995,8 +990,8 @@ class Porter: reactor.stop() def _convert_rows( - self, table: str, headers: List[str], rows: List[Tuple] - ) -> List[Tuple]: + self, table: str, headers: list[str], rows: list[tuple] + ) -> list[tuple]: bool_col_names = BOOLEAN_COLUMNS.get(table, []) bool_cols = [i for i, h in enumerate(headers) if h in bool_col_names] @@ -1030,7 +1025,7 @@ class Porter: return outrows - async def _setup_sent_transactions(self) -> Tuple[int, int, int]: + async def _setup_sent_transactions(self) -> tuple[int, int, int]: # Only save things from the last day yesterday = int(time.time() * 1000) - 86400000 @@ -1042,7 +1037,7 @@ class Porter: ")" ) - def r(txn: LoggingTransaction) -> Tuple[List[str], List[Tuple]]: + def r(txn: LoggingTransaction) -> tuple[list[str], list[tuple]]: txn.execute(select) rows = txn.fetchall() assert txn.description is not None @@ -1112,14 +1107,14 @@ class Porter: self, table: str, forward_chunk: int, backward_chunk: int ) -> int: frows = cast( - List[Tuple[int]], + list[tuple[int]], await self.sqlite_store.execute_sql( "SELECT count(*) FROM %s WHERE rowid >= ?" % (table,), forward_chunk ), ) brows = cast( - List[Tuple[int]], + list[tuple[int]], await self.sqlite_store.execute_sql( "SELECT count(*) FROM %s WHERE rowid <= ?" % (table,), backward_chunk ), @@ -1136,7 +1131,7 @@ class Porter: async def _get_total_count_to_port( self, table: str, forward_chunk: int, backward_chunk: int - ) -> Tuple[int, int]: + ) -> tuple[int, int]: remaining, done = await make_deferred_yieldable( defer.gatherResults( [ @@ -1221,7 +1216,7 @@ class Porter: async def _setup_sequence( self, sequence_name: str, - stream_id_tables: Iterable[Tuple[str, str]], + stream_id_tables: Iterable[tuple[str, str]], ) -> None: """Set a sequence to the correct value.""" current_stream_ids = [] @@ -1331,7 +1326,7 @@ class Progress: """Used to report progress of the port""" def __init__(self) -> None: - self.tables: Dict[str, TableProgress] = {} + self.tables: dict[str, TableProgress] = {} self.start_time = int(time.time()) diff --git a/synapse/api/auth/__init__.py b/synapse/api/auth/__init__.py index d253938329..cc0c0d4601 100644 --- a/synapse/api/auth/__init__.py +++ b/synapse/api/auth/__init__.py @@ -18,7 +18,7 @@ # [This file includes modifications made by New Vector Limited] # # -from typing import TYPE_CHECKING, Optional, Protocol, Tuple +from typing import TYPE_CHECKING, Optional, Protocol from prometheus_client import Histogram @@ -51,7 +51,7 @@ class Auth(Protocol): room_id: str, requester: Requester, allow_departed_users: bool = False, - ) -> Tuple[str, Optional[str]]: + ) -> tuple[str, Optional[str]]: """Check if the user is in the room, or was at some point. Args: room_id: The room to check. @@ -190,7 +190,7 @@ class Auth(Protocol): async def check_user_in_room_or_world_readable( self, room_id: str, requester: Requester, allow_departed_users: bool = False - ) -> Tuple[str, Optional[str]]: + ) -> tuple[str, Optional[str]]: """Checks that the user is or was in the room or the room is world readable. If it isn't then an exception is raised. diff --git a/synapse/api/auth/base.py b/synapse/api/auth/base.py index fd7d761f7d..d5635e588f 100644 --- a/synapse/api/auth/base.py +++ b/synapse/api/auth/base.py @@ -19,7 +19,7 @@ # # import logging -from typing import TYPE_CHECKING, Optional, Tuple +from typing import TYPE_CHECKING, Optional from netaddr import IPAddress @@ -64,7 +64,7 @@ class BaseAuth: room_id: str, requester: Requester, allow_departed_users: bool = False, - ) -> Tuple[str, Optional[str]]: + ) -> tuple[str, Optional[str]]: """Check if the user is in the room, or was at some point. Args: room_id: The room to check. @@ -114,7 +114,7 @@ class BaseAuth: @trace async def check_user_in_room_or_world_readable( self, room_id: str, requester: Requester, allow_departed_users: bool = False - ) -> Tuple[str, Optional[str]]: + ) -> tuple[str, Optional[str]]: """Checks that the user is or was in the room or the room is world readable. If it isn't then an exception is raised. diff --git a/synapse/api/auth/mas.py b/synapse/api/auth/mas.py index baa6b27336..325d264161 100644 --- a/synapse/api/auth/mas.py +++ b/synapse/api/auth/mas.py @@ -13,7 +13,7 @@ # # import logging -from typing import TYPE_CHECKING, Optional, Set +from typing import TYPE_CHECKING, Optional from urllib.parse import urlencode from synapse._pydantic_compat import ( @@ -369,7 +369,7 @@ class MasDelegatedAuth(BaseAuth): # We only allow a single device_id in the scope, so we find them all in the # scope list, and raise if there are more than one. The OIDC server should be # the one enforcing valid scopes, so we raise a 500 if we find an invalid scope. - device_ids: Set[str] = set() + device_ids: set[str] = set() for tok in scope: if tok.startswith(UNSTABLE_SCOPE_MATRIX_DEVICE_PREFIX): device_ids.add(tok[len(UNSTABLE_SCOPE_MATRIX_DEVICE_PREFIX) :]) diff --git a/synapse/api/auth/msc3861_delegated.py b/synapse/api/auth/msc3861_delegated.py index b6adcc83dc..48b32aa04a 100644 --- a/synapse/api/auth/msc3861_delegated.py +++ b/synapse/api/auth/msc3861_delegated.py @@ -20,7 +20,7 @@ # import logging from dataclasses import dataclass -from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Set +from typing import TYPE_CHECKING, Any, Callable, Optional from urllib.parse import urlencode from authlib.oauth2 import ClientAuth @@ -70,7 +70,7 @@ STABLE_SCOPE_MATRIX_DEVICE_PREFIX = "urn:matrix:client:device:" SCOPE_SYNAPSE_ADMIN = "urn:synapse:admin:*" -def scope_to_list(scope: str) -> List[str]: +def scope_to_list(scope: str) -> list[str]: """Convert a scope string to a list of scope tokens""" return scope.strip().split(" ") @@ -96,7 +96,7 @@ class IntrospectionResult: absolute_expiry_ms = expires_in * 1000 + self.retrieved_at_ms return now_ms < absolute_expiry_ms - def get_scope_list(self) -> List[str]: + def get_scope_list(self) -> list[str]: value = self._inner.get("scope") if not isinstance(value, str): return [] @@ -264,7 +264,7 @@ class MSC3861DelegatedAuth(BaseAuth): logger.warning("Failed to load metadata:", exc_info=True) return None - async def auth_metadata(self) -> Dict[str, Any]: + async def auth_metadata(self) -> dict[str, Any]: """ Returns the auth metadata dict """ @@ -303,7 +303,7 @@ class MSC3861DelegatedAuth(BaseAuth): # By default, we shouldn't cache the result unless we know it's valid cache_context.should_cache = False introspection_endpoint = await self._introspection_endpoint() - raw_headers: Dict[str, str] = { + raw_headers: dict[str, str] = { "Content-Type": "application/x-www-form-urlencoded", "Accept": "application/json", # Tell MAS that we support reading the device ID as an explicit @@ -520,7 +520,7 @@ class MSC3861DelegatedAuth(BaseAuth): raise InvalidClientTokenError("Token is not active") # Let's look at the scope - scope: List[str] = introspection_result.get_scope_list() + scope: list[str] = introspection_result.get_scope_list() # Determine type of user based on presence of particular scopes has_user_scope = ( @@ -575,7 +575,7 @@ class MSC3861DelegatedAuth(BaseAuth): # We only allow a single device_id in the scope, so we find them all in the # scope list, and raise if there are more than one. The OIDC server should be # the one enforcing valid scopes, so we raise a 500 if we find an invalid scope. - device_ids: Set[str] = set() + device_ids: set[str] = set() for tok in scope: if tok.startswith(UNSTABLE_SCOPE_MATRIX_DEVICE_PREFIX): device_ids.add(tok[len(UNSTABLE_SCOPE_MATRIX_DEVICE_PREFIX) :]) diff --git a/synapse/api/errors.py b/synapse/api/errors.py index fb6721c0ee..f75b34ef69 100644 --- a/synapse/api/errors.py +++ b/synapse/api/errors.py @@ -26,7 +26,7 @@ import math import typing from enum import Enum from http import HTTPStatus -from typing import Any, Dict, List, Optional, Union +from typing import Any, Optional, Union from twisted.web import http @@ -166,7 +166,7 @@ class CodeMessageException(RuntimeError): self, code: Union[int, HTTPStatus], msg: str, - headers: Optional[Dict[str, str]] = None, + headers: Optional[dict[str, str]] = None, ): super().__init__("%d: %s" % (code, msg)) @@ -201,7 +201,7 @@ class RedirectException(CodeMessageException): super().__init__(code=http_code, msg=msg) self.location = location - self.cookies: List[bytes] = [] + self.cookies: list[bytes] = [] class SynapseError(CodeMessageException): @@ -223,8 +223,8 @@ class SynapseError(CodeMessageException): code: int, msg: str, errcode: str = Codes.UNKNOWN, - additional_fields: Optional[Dict] = None, - headers: Optional[Dict[str, str]] = None, + additional_fields: Optional[dict] = None, + headers: Optional[dict[str, str]] = None, ): """Constructs a synapse error. @@ -236,7 +236,7 @@ class SynapseError(CodeMessageException): super().__init__(code, msg, headers) self.errcode = errcode if additional_fields is None: - self._additional_fields: Dict = {} + self._additional_fields: dict = {} else: self._additional_fields = dict(additional_fields) @@ -276,7 +276,7 @@ class ProxiedRequestError(SynapseError): code: int, msg: str, errcode: str = Codes.UNKNOWN, - additional_fields: Optional[Dict] = None, + additional_fields: Optional[dict] = None, ): super().__init__(code, msg, errcode, additional_fields) @@ -409,7 +409,7 @@ class OAuthInsufficientScopeError(SynapseError): def __init__( self, - required_scopes: List[str], + required_scopes: list[str], ): headers = { "WWW-Authenticate": 'Bearer error="insufficient_scope", scope="%s"' diff --git a/synapse/api/filtering.py b/synapse/api/filtering.py index 34dd12368a..e31bec1a00 100644 --- a/synapse/api/filtering.py +++ b/synapse/api/filtering.py @@ -26,12 +26,9 @@ from typing import ( Awaitable, Callable, Collection, - Dict, Iterable, - List, Mapping, Optional, - Set, TypeVar, Union, ) @@ -248,34 +245,34 @@ class FilterCollection: async def filter_presence( self, presence_states: Iterable[UserPresenceState] - ) -> List[UserPresenceState]: + ) -> list[UserPresenceState]: return await self._presence_filter.filter(presence_states) async def filter_global_account_data( self, events: Iterable[JsonDict] - ) -> List[JsonDict]: + ) -> list[JsonDict]: return await self._global_account_data_filter.filter(events) - async def filter_room_state(self, events: Iterable[EventBase]) -> List[EventBase]: + async def filter_room_state(self, events: Iterable[EventBase]) -> list[EventBase]: return await self._room_state_filter.filter( await self._room_filter.filter(events) ) async def filter_room_timeline( self, events: Iterable[EventBase] - ) -> List[EventBase]: + ) -> list[EventBase]: return await self._room_timeline_filter.filter( await self._room_filter.filter(events) ) - async def filter_room_ephemeral(self, events: Iterable[JsonDict]) -> List[JsonDict]: + async def filter_room_ephemeral(self, events: Iterable[JsonDict]) -> list[JsonDict]: return await self._room_ephemeral_filter.filter( await self._room_filter.filter(events) ) async def filter_room_account_data( self, events: Iterable[JsonDict] - ) -> List[JsonDict]: + ) -> list[JsonDict]: return await self._room_account_data_filter.filter( await self._room_filter.filter(events) ) @@ -440,7 +437,7 @@ class Filter: return True - def _check_fields(self, field_matchers: Dict[str, Callable[[str], bool]]) -> bool: + def _check_fields(self, field_matchers: dict[str, Callable[[str], bool]]) -> bool: """Checks whether the filter matches the given event fields. Args: @@ -474,7 +471,7 @@ class Filter: # Otherwise, accept it. return True - def filter_rooms(self, room_ids: Iterable[str]) -> Set[str]: + def filter_rooms(self, room_ids: Iterable[str]) -> set[str]: """Apply the 'rooms' filter to a given list of rooms. Args: @@ -496,7 +493,7 @@ class Filter: async def _check_event_relations( self, events: Collection[FilterEvent] - ) -> List[FilterEvent]: + ) -> list[FilterEvent]: # The event IDs to check, mypy doesn't understand the isinstance check. event_ids = [event.event_id for event in events if isinstance(event, EventBase)] # type: ignore[attr-defined] event_ids_to_keep = set( @@ -511,7 +508,7 @@ class Filter: if not isinstance(event, EventBase) or event.event_id in event_ids_to_keep ] - async def filter(self, events: Iterable[FilterEvent]) -> List[FilterEvent]: + async def filter(self, events: Iterable[FilterEvent]) -> list[FilterEvent]: result = [event for event in events if self._check(event)] if self.related_by_senders or self.related_by_rel_types: diff --git a/synapse/api/ratelimiting.py b/synapse/api/ratelimiting.py index 9d1c7801bc..1a43bdff23 100644 --- a/synapse/api/ratelimiting.py +++ b/synapse/api/ratelimiting.py @@ -20,7 +20,7 @@ # # -from typing import TYPE_CHECKING, Dict, Hashable, Optional, Tuple +from typing import TYPE_CHECKING, Hashable, Optional from synapse.api.errors import LimitExceededError from synapse.config.ratelimiting import RatelimitSettings @@ -92,7 +92,7 @@ class Ratelimiter: # * The number of tokens currently in the bucket, # * The time point when the bucket was last completely empty, and # * The rate_hz (leak rate) of this particular bucket. - self.actions: Dict[Hashable, Tuple[float, float, float]] = {} + self.actions: dict[Hashable, tuple[float, float, float]] = {} self.clock.looping_call(self._prune_message_counts, 60 * 1000) @@ -109,7 +109,7 @@ class Ratelimiter: def _get_action_counts( self, key: Hashable, time_now_s: float - ) -> Tuple[float, float, float]: + ) -> tuple[float, float, float]: """Retrieve the action counts, with a fallback representing an empty bucket.""" return self.actions.get(key, (0.0, time_now_s, 0.0)) @@ -122,7 +122,7 @@ class Ratelimiter: update: bool = True, n_actions: int = 1, _time_now_s: Optional[float] = None, - ) -> Tuple[bool, float]: + ) -> tuple[bool, float]: """Can the entity (e.g. user or IP address) perform the action? Checks if the user has ratelimiting disabled in the database by looking diff --git a/synapse/api/room_versions.py b/synapse/api/room_versions.py index 71ef5952c3..b6e76379f1 100644 --- a/synapse/api/room_versions.py +++ b/synapse/api/room_versions.py @@ -18,7 +18,7 @@ # # -from typing import Callable, Dict, Optional, Tuple +from typing import Callable, Optional import attr @@ -109,7 +109,7 @@ class RoomVersion: # is not enough to mark it "supported": the push rule evaluator also needs to # support the flag. Unknown flags are ignored by the evaluator, making conditions # fail if used. - msc3931_push_features: Tuple[str, ...] # values from PushRuleRoomFlag + msc3931_push_features: tuple[str, ...] # values from PushRuleRoomFlag # MSC3757: Restricting who can overwrite a state event msc3757_enabled: bool # MSC4289: Creator power enabled @@ -476,7 +476,7 @@ class RoomVersions: ) -KNOWN_ROOM_VERSIONS: Dict[str, RoomVersion] = { +KNOWN_ROOM_VERSIONS: dict[str, RoomVersion] = { v.identifier: v for v in ( RoomVersions.V1, diff --git a/synapse/app/_base.py b/synapse/app/_base.py index e30151dfb4..1954dbc1a0 100644 --- a/synapse/app/_base.py +++ b/synapse/app/_base.py @@ -34,11 +34,8 @@ from typing import ( Any, Awaitable, Callable, - Dict, - List, NoReturn, Optional, - Tuple, cast, ) from wsgiref.simple_server import WSGIServer @@ -98,8 +95,8 @@ reactor = cast(ISynapseReactor, _reactor) logger = logging.getLogger(__name__) -_instance_id_to_sighup_callbacks_map: Dict[ - str, List[Tuple[Callable[..., None], Tuple[object, ...], Dict[str, object]]] +_instance_id_to_sighup_callbacks_map: dict[ + str, list[tuple[Callable[..., None], tuple[object, ...], dict[str, object]]] ] = {} """ Map from homeserver instance_id to a list of callbacks. @@ -176,7 +173,7 @@ def start_worker_reactor( def start_reactor( appname: str, soft_file_limit: int, - gc_thresholds: Optional[Tuple[int, int, int]], + gc_thresholds: Optional[tuple[int, int, int]], pid_file: Optional[str], daemonize: bool, print_pidfile: bool, @@ -309,7 +306,7 @@ def register_start( def listen_metrics( bind_addresses: StrCollection, port: int -) -> List[Tuple[WSGIServer, Thread]]: +) -> list[tuple[WSGIServer, Thread]]: """ Start Prometheus metrics server. @@ -330,7 +327,7 @@ def listen_metrics( from synapse.metrics import RegistryProxy - servers: List[Tuple[WSGIServer, Thread]] = [] + servers: list[tuple[WSGIServer, Thread]] = [] for host in bind_addresses: logger.info("Starting metrics listener on %s:%d", host, port) server, thread = start_http_server_prometheus( @@ -345,7 +342,7 @@ def listen_manhole( port: int, manhole_settings: ManholeConfig, manhole_globals: dict, -) -> List[Port]: +) -> list[Port]: # twisted.conch.manhole 21.1.0 uses "int_from_bytes", which produces a confusing # warning. It's fixed by https://github.com/twisted/twisted/pull/1522), so # suppress the warning for now. @@ -370,7 +367,7 @@ def listen_tcp( factory: ServerFactory, reactor: IReactorTCP = reactor, backlog: int = 50, -) -> List[Port]: +) -> list[Port]: """ Create a TCP socket for a port and several addresses @@ -395,7 +392,7 @@ def listen_unix( factory: ServerFactory, reactor: IReactorUNIX = reactor, backlog: int = 50, -) -> List[Port]: +) -> list[Port]: """ Create a UNIX socket for a given path and 'mode' permission @@ -419,7 +416,7 @@ def listen_http( max_request_body_size: int, context_factory: Optional[IOpenSSLContextFactory], reactor: ISynapseReactor = reactor, -) -> List[Port]: +) -> list[Port]: """ Args: listener_config: TODO @@ -489,7 +486,7 @@ def listen_ssl( context_factory: IOpenSSLContextFactory, reactor: IReactorSSL = reactor, backlog: int = 50, -) -> List[Port]: +) -> list[Port]: """ Create an TLS-over-TCP socket for a port and several addresses diff --git a/synapse/app/admin_cmd.py b/synapse/app/admin_cmd.py index bafeb46971..b5b1edac0a 100644 --- a/synapse/app/admin_cmd.py +++ b/synapse/app/admin_cmd.py @@ -24,7 +24,7 @@ import logging import os import sys import tempfile -from typing import List, Mapping, Optional, Sequence, Tuple +from typing import Mapping, Optional, Sequence from twisted.internet import defer, task @@ -150,7 +150,7 @@ class FileExfiltrationWriter(ExfiltrationWriter): if list(os.listdir(self.base_directory)): raise Exception("Directory must be empty") - def write_events(self, room_id: str, events: List[EventBase]) -> None: + def write_events(self, room_id: str, events: list[EventBase]) -> None: room_directory = os.path.join(self.base_directory, "rooms", room_id) os.makedirs(room_directory, exist_ok=True) events_file = os.path.join(room_directory, "events") @@ -255,7 +255,7 @@ class FileExfiltrationWriter(ExfiltrationWriter): return self.base_directory -def load_config(argv_options: List[str]) -> Tuple[HomeServerConfig, argparse.Namespace]: +def load_config(argv_options: list[str]) -> tuple[HomeServerConfig, argparse.Namespace]: parser = argparse.ArgumentParser(description="Synapse Admin Command") HomeServerConfig.add_arguments_to_parser(parser) diff --git a/synapse/app/complement_fork_starter.py b/synapse/app/complement_fork_starter.py index b981a7631b..73e33d77a5 100644 --- a/synapse/app/complement_fork_starter.py +++ b/synapse/app/complement_fork_starter.py @@ -26,13 +26,13 @@ import os import signal import sys from types import FrameType -from typing import Any, Callable, Dict, List, Optional +from typing import Any, Callable, Optional from twisted.internet.main import installReactor # a list of the original signal handlers, before we installed our custom ones. # We restore these in our child processes. -_original_signal_handlers: Dict[int, Any] = {} +_original_signal_handlers: dict[int, Any] = {} class ProxiedReactor: @@ -72,7 +72,7 @@ class ProxiedReactor: def _worker_entrypoint( - func: Callable[[], None], proxy_reactor: ProxiedReactor, args: List[str] + func: Callable[[], None], proxy_reactor: ProxiedReactor, args: list[str] ) -> None: """ Entrypoint for a forked worker process. @@ -128,7 +128,7 @@ def main() -> None: # Split up the subsequent arguments into each workers' arguments; # `--` is our delimiter of choice. - args_by_worker: List[List[str]] = [ + args_by_worker: list[list[str]] = [ list(args) for cond, args in itertools.groupby(ns.args, lambda ele: ele != "--") if cond and args @@ -167,7 +167,7 @@ def main() -> None: update_proc.join() print("===== PREPARED DATABASE =====", file=sys.stderr) - processes: List[multiprocessing.Process] = [] + processes: list[multiprocessing.Process] = [] # Install signal handlers to propagate signals to all our children, so that they # shut down cleanly. This also inhibits our own exit, but that's good: we want to diff --git a/synapse/app/generic_worker.py b/synapse/app/generic_worker.py index 7518661265..8f512c1577 100644 --- a/synapse/app/generic_worker.py +++ b/synapse/app/generic_worker.py @@ -21,7 +21,6 @@ # import logging import sys -from typing import Dict, List from twisted.web.resource import Resource @@ -181,7 +180,7 @@ class GenericWorkerServer(HomeServer): # We always include an admin resource that we populate with servlets as needed admin_resource = JsonResource(self, canonical_json=False) - resources: Dict[str, Resource] = { + resources: dict[str, Resource] = { # We always include a health resource. "/health": HealthResource(), "/_synapse/admin": admin_resource, @@ -314,7 +313,7 @@ class GenericWorkerServer(HomeServer): self.get_replication_command_handler().start_replication(self) -def load_config(argv_options: List[str]) -> HomeServerConfig: +def load_config(argv_options: list[str]) -> HomeServerConfig: """ Parse the commandline and config files (does not generate config) diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index e415d651bc..023a0d877f 100644 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -22,7 +22,7 @@ import logging import os import sys -from typing import Dict, Iterable, List, Optional +from typing import Iterable, Optional from twisted.internet.tcp import Port from twisted.web.resource import EncodingResourceWrapper, Resource @@ -99,7 +99,7 @@ class SynapseHomeServer(HomeServer): site_tag = listener_config.get_site_tag() # We always include a health resource. - resources: Dict[str, Resource] = {"/health": HealthResource()} + resources: dict[str, Resource] = {"/health": HealthResource()} for res in listener_config.http_options.resources: for name in res.names: @@ -170,7 +170,7 @@ class SynapseHomeServer(HomeServer): def _configure_named_resource( self, name: str, compress: bool = False - ) -> Dict[str, Resource]: + ) -> dict[str, Resource]: """Build a resource map for a named resource Args: @@ -180,7 +180,7 @@ class SynapseHomeServer(HomeServer): Returns: map from path to HTTP resource """ - resources: Dict[str, Resource] = {} + resources: dict[str, Resource] = {} if name == "client": client_resource: Resource = ClientRestResource(self) if compress: @@ -318,7 +318,7 @@ class SynapseHomeServer(HomeServer): logger.warning("Unrecognized listener type: %s", listener.type) -def load_or_generate_config(argv_options: List[str]) -> HomeServerConfig: +def load_or_generate_config(argv_options: list[str]) -> HomeServerConfig: """ Parse the commandline and config files diff --git a/synapse/app/phone_stats_home.py b/synapse/app/phone_stats_home.py index 4bbc33cba2..13a0e3db7c 100644 --- a/synapse/app/phone_stats_home.py +++ b/synapse/app/phone_stats_home.py @@ -22,7 +22,7 @@ import logging import math import resource import sys -from typing import TYPE_CHECKING, List, Mapping, Sized, Tuple +from typing import TYPE_CHECKING, Mapping, Sized from prometheus_client import Gauge @@ -54,7 +54,7 @@ Phone home stats are sent every 3 hours # Contains the list of processes we will be monitoring # currently either 0 or 1 -_stats_process: List[Tuple[int, "resource.struct_rusage"]] = [] +_stats_process: list[tuple[int, "resource.struct_rusage"]] = [] # Gauges to expose monthly active user control metrics current_mau_gauge = Gauge( @@ -82,12 +82,12 @@ registered_reserved_users_mau_gauge = Gauge( def phone_stats_home( hs: "HomeServer", stats: JsonDict, - stats_process: List[Tuple[int, "resource.struct_rusage"]] = _stats_process, + stats_process: list[tuple[int, "resource.struct_rusage"]] = _stats_process, ) -> "defer.Deferred[None]": async def _phone_stats_home( hs: "HomeServer", stats: JsonDict, - stats_process: List[Tuple[int, "resource.struct_rusage"]] = _stats_process, + stats_process: list[tuple[int, "resource.struct_rusage"]] = _stats_process, ) -> None: """Collect usage statistics and send them to the configured endpoint. diff --git a/synapse/appservice/__init__.py b/synapse/appservice/__init__.py index 1d0735ca1d..e91fa3a624 100644 --- a/synapse/appservice/__init__.py +++ b/synapse/appservice/__init__.py @@ -25,9 +25,7 @@ import re from enum import Enum from typing import ( TYPE_CHECKING, - Dict, Iterable, - List, Optional, Pattern, Sequence, @@ -59,11 +57,11 @@ logger = logging.getLogger(__name__) # Type for the `device_one_time_keys_count` field in an appservice transaction # user ID -> {device ID -> {algorithm -> count}} -TransactionOneTimeKeysCount = Dict[str, Dict[str, Dict[str, int]]] +TransactionOneTimeKeysCount = dict[str, dict[str, dict[str, int]]] # Type for the `device_unused_fallback_key_types` field in an appservice transaction # user ID -> {device ID -> [algorithm]} -TransactionUnusedFallbackKeys = Dict[str, Dict[str, List[str]]] +TransactionUnusedFallbackKeys = dict[str, dict[str, list[str]]] class ApplicationServiceState(Enum): @@ -145,7 +143,7 @@ class ApplicationService: def _check_namespaces( self, namespaces: Optional[JsonDict] - ) -> Dict[str, List[Namespace]]: + ) -> dict[str, list[Namespace]]: # Sanity check that it is of the form: # { # users: [ {regex: "[A-z]+.*", exclusive: true}, ...], @@ -155,7 +153,7 @@ class ApplicationService: if namespaces is None: namespaces = {} - result: Dict[str, List[Namespace]] = {} + result: dict[str, list[Namespace]] = {} for ns in ApplicationService.NS_LIST: result[ns] = [] @@ -388,7 +386,7 @@ class ApplicationService: def is_exclusive_room(self, room_id: str) -> bool: return self._is_exclusive(ApplicationService.NS_ROOMS, room_id) - def get_exclusive_user_regexes(self) -> List[Pattern[str]]: + def get_exclusive_user_regexes(self) -> list[Pattern[str]]: """Get the list of regexes used to determine if a user is exclusively registered by the AS """ @@ -417,8 +415,8 @@ class AppServiceTransaction: service: ApplicationService, id: int, events: Sequence[EventBase], - ephemeral: List[JsonMapping], - to_device_messages: List[JsonMapping], + ephemeral: list[JsonMapping], + to_device_messages: list[JsonMapping], one_time_keys_count: TransactionOneTimeKeysCount, unused_fallback_keys: TransactionUnusedFallbackKeys, device_list_summary: DeviceListUpdates, diff --git a/synapse/appservice/api.py b/synapse/appservice/api.py index 55069cc5d3..f08a921998 100644 --- a/synapse/appservice/api.py +++ b/synapse/appservice/api.py @@ -23,13 +23,10 @@ import logging import urllib.parse from typing import ( TYPE_CHECKING, - Dict, Iterable, - List, Mapping, Optional, Sequence, - Tuple, TypeVar, Union, ) @@ -133,14 +130,14 @@ class ApplicationServiceApi(SimpleHttpClient): self.clock = hs.get_clock() self.config = hs.config.appservice - self.protocol_meta_cache: ResponseCache[Tuple[str, str]] = ResponseCache( + self.protocol_meta_cache: ResponseCache[tuple[str, str]] = ResponseCache( clock=hs.get_clock(), name="as_protocol_meta", server_name=self.server_name, timeout_ms=HOUR_IN_MS, ) - def _get_headers(self, service: "ApplicationService") -> Dict[bytes, List[bytes]]: + def _get_headers(self, service: "ApplicationService") -> dict[bytes, list[bytes]]: """This makes sure we have always the auth header and opentracing headers set.""" # This is also ensured before in the functions. However this is needed to please @@ -210,8 +207,8 @@ class ApplicationServiceApi(SimpleHttpClient): service: "ApplicationService", kind: str, protocol: str, - fields: Dict[bytes, List[bytes]], - ) -> List[JsonDict]: + fields: dict[bytes, list[bytes]], + ) -> list[JsonDict]: if kind == ThirdPartyEntityKind.USER: required_field = "userid" elif kind == ThirdPartyEntityKind.LOCATION: @@ -225,7 +222,7 @@ class ApplicationServiceApi(SimpleHttpClient): assert service.hs_token is not None try: - args: Mapping[bytes, Union[List[bytes], str]] = fields + args: Mapping[bytes, Union[list[bytes], str]] = fields if self.config.use_appservice_legacy_authorization: args = { **fields, @@ -320,8 +317,8 @@ class ApplicationServiceApi(SimpleHttpClient): self, service: "ApplicationService", events: Sequence[EventBase], - ephemeral: List[JsonMapping], - to_device_messages: List[JsonMapping], + ephemeral: list[JsonMapping], + to_device_messages: list[JsonMapping], one_time_keys_count: TransactionOneTimeKeysCount, unused_fallback_keys: TransactionUnusedFallbackKeys, device_list_summary: DeviceListUpdates, @@ -429,9 +426,9 @@ class ApplicationServiceApi(SimpleHttpClient): return False async def claim_client_keys( - self, service: "ApplicationService", query: List[Tuple[str, str, str, int]] - ) -> Tuple[ - Dict[str, Dict[str, Dict[str, JsonDict]]], List[Tuple[str, str, str, int]] + self, service: "ApplicationService", query: list[tuple[str, str, str, int]] + ) -> tuple[ + dict[str, dict[str, dict[str, JsonDict]]], list[tuple[str, str, str, int]] ]: """Claim one time keys from an application service. @@ -457,7 +454,7 @@ class ApplicationServiceApi(SimpleHttpClient): assert service.hs_token is not None # Create the expected payload shape. - body: Dict[str, Dict[str, List[str]]] = {} + body: dict[str, dict[str, list[str]]] = {} for user_id, device, algorithm, count in query: body.setdefault(user_id, {}).setdefault(device, []).extend( [algorithm] * count @@ -502,8 +499,8 @@ class ApplicationServiceApi(SimpleHttpClient): return response, missing async def query_keys( - self, service: "ApplicationService", query: Dict[str, List[str]] - ) -> Dict[str, Dict[str, Dict[str, JsonDict]]]: + self, service: "ApplicationService", query: dict[str, list[str]] + ) -> dict[str, dict[str, dict[str, JsonDict]]]: """Query the application service for keys. Note that any error (including a timeout) is treated as the application @@ -545,7 +542,7 @@ class ApplicationServiceApi(SimpleHttpClient): def _serialize( self, service: "ApplicationService", events: Iterable[EventBase] - ) -> List[JsonDict]: + ) -> list[JsonDict]: time_now = self.clock.time_msec() return [ serialize_event( diff --git a/synapse/appservice/scheduler.py b/synapse/appservice/scheduler.py index b4de759b67..b5fab5f50d 100644 --- a/synapse/appservice/scheduler.py +++ b/synapse/appservice/scheduler.py @@ -61,13 +61,9 @@ from typing import ( Awaitable, Callable, Collection, - Dict, Iterable, - List, Optional, Sequence, - Set, - Tuple, ) from twisted.internet.interfaces import IDelayedCall @@ -183,16 +179,16 @@ class _ServiceQueuer: def __init__(self, txn_ctrl: "_TransactionController", hs: "HomeServer"): # dict of {service_id: [events]} - self.queued_events: Dict[str, List[EventBase]] = {} + self.queued_events: dict[str, list[EventBase]] = {} # dict of {service_id: [events]} - self.queued_ephemeral: Dict[str, List[JsonMapping]] = {} + self.queued_ephemeral: dict[str, list[JsonMapping]] = {} # dict of {service_id: [to_device_message_json]} - self.queued_to_device_messages: Dict[str, List[JsonMapping]] = {} + self.queued_to_device_messages: dict[str, list[JsonMapping]] = {} # dict of {service_id: [device_list_summary]} - self.queued_device_list_summaries: Dict[str, List[DeviceListUpdates]] = {} + self.queued_device_list_summaries: dict[str, list[DeviceListUpdates]] = {} # the appservices which currently have a transaction in flight - self.requests_in_flight: Set[str] = set() + self.requests_in_flight: set[str] = set() self.txn_ctrl = txn_ctrl self._msc3202_transaction_extensions_enabled: bool = ( hs.config.experimental.msc3202_transaction_extensions @@ -302,7 +298,7 @@ class _ServiceQueuer: events: Iterable[EventBase], ephemerals: Iterable[JsonMapping], to_device_messages: Iterable[JsonMapping], - ) -> Tuple[TransactionOneTimeKeysCount, TransactionUnusedFallbackKeys]: + ) -> tuple[TransactionOneTimeKeysCount, TransactionUnusedFallbackKeys]: """ Given a list of the events, ephemeral messages and to-device messages, - first computes a list of application services users that may have @@ -313,14 +309,14 @@ class _ServiceQueuer: """ # Set of 'interesting' users who may have updates - users: Set[str] = set() + users: set[str] = set() # The sender is always included users.add(service.sender.to_string()) # All AS users that would receive the PDUs or EDUs sent to these rooms # are classed as 'interesting'. - rooms_of_interesting_users: Set[str] = set() + rooms_of_interesting_users: set[str] = set() # PDUs rooms_of_interesting_users.update(event.room_id for event in events) # EDUs @@ -364,7 +360,7 @@ class _TransactionController: self.as_api = hs.get_application_service_api() # map from service id to recoverer instance - self.recoverers: Dict[str, "_Recoverer"] = {} + self.recoverers: dict[str, "_Recoverer"] = {} # for UTs self.RECOVERER_CLASS = _Recoverer @@ -373,8 +369,8 @@ class _TransactionController: self, service: ApplicationService, events: Sequence[EventBase], - ephemeral: Optional[List[JsonMapping]] = None, - to_device_messages: Optional[List[JsonMapping]] = None, + ephemeral: Optional[list[JsonMapping]] = None, + to_device_messages: Optional[list[JsonMapping]] = None, one_time_keys_count: Optional[TransactionOneTimeKeysCount] = None, unused_fallback_keys: Optional[TransactionUnusedFallbackKeys] = None, device_list_summary: Optional[DeviceListUpdates] = None, diff --git a/synapse/config/__main__.py b/synapse/config/__main__.py index ef9d36b507..9169b062bf 100644 --- a/synapse/config/__main__.py +++ b/synapse/config/__main__.py @@ -20,13 +20,12 @@ # # import sys -from typing import List from synapse.config._base import ConfigError from synapse.config.homeserver import HomeServerConfig -def main(args: List[str]) -> None: +def main(args: list[str]) -> None: action = args[1] if len(args) > 1 and args[1] == "read" else None # If we're reading a key in the config file, then `args[1]` will be `read` and `args[2]` # will be the key to read. diff --git a/synapse/config/_base.py b/synapse/config/_base.py index 5d0560e0f2..ce06905390 100644 --- a/synapse/config/_base.py +++ b/synapse/config/_base.py @@ -33,14 +33,10 @@ from textwrap import dedent from typing import ( Any, ClassVar, - Dict, Iterable, Iterator, - List, MutableMapping, Optional, - Tuple, - Type, TypeVar, Union, ) @@ -321,9 +317,9 @@ class Config: def read_templates( self, - filenames: List[str], + filenames: list[str], custom_template_directories: Optional[Iterable[str]] = None, - ) -> List[jinja2.Template]: + ) -> list[jinja2.Template]: """Load a list of template files from disk using the given variables. This function will attempt to load the given templates from the default Synapse @@ -402,7 +398,7 @@ class RootConfig: class, lower-cased and with "Config" removed. """ - config_classes: List[Type[Config]] = [] + config_classes: list[type[Config]] = [] def __init__(self, config_files: StrSequence = ()): # Capture absolute paths here, so we can reload config after we daemonize. @@ -471,7 +467,7 @@ class RootConfig: generate_secrets: bool = False, report_stats: Optional[bool] = None, open_private_ports: bool = False, - listeners: Optional[List[dict]] = None, + listeners: Optional[list[dict]] = None, tls_certificate_path: Optional[str] = None, tls_private_key_path: Optional[str] = None, ) -> str: @@ -545,7 +541,7 @@ class RootConfig: @classmethod def load_config( - cls: Type[TRootConfig], description: str, argv_options: List[str] + cls: type[TRootConfig], description: str, argv_options: list[str] ) -> TRootConfig: """Parse the commandline and config files @@ -605,8 +601,8 @@ class RootConfig: @classmethod def load_config_with_parser( - cls: Type[TRootConfig], parser: argparse.ArgumentParser, argv_options: List[str] - ) -> Tuple[TRootConfig, argparse.Namespace]: + cls: type[TRootConfig], parser: argparse.ArgumentParser, argv_options: list[str] + ) -> tuple[TRootConfig, argparse.Namespace]: """Parse the commandline and config files with the given parser Doesn't support config-file-generation: used by the worker apps. @@ -658,7 +654,7 @@ class RootConfig: @classmethod def load_or_generate_config( - cls: Type[TRootConfig], description: str, argv_options: List[str] + cls: type[TRootConfig], description: str, argv_options: list[str] ) -> Optional[TRootConfig]: """Parse the commandline and config files @@ -858,7 +854,7 @@ class RootConfig: def parse_config_dict( self, - config_dict: Dict[str, Any], + config_dict: dict[str, Any], config_dir_path: str, data_dir_path: str, allow_secrets_in_config: bool = True, @@ -883,7 +879,7 @@ class RootConfig: ) def generate_missing_files( - self, config_dict: Dict[str, Any], config_dir_path: str + self, config_dict: dict[str, Any], config_dir_path: str ) -> None: self.invoke_all("generate_files", config_dict, config_dir_path) @@ -930,7 +926,7 @@ class RootConfig: """ -def read_config_files(config_files: Iterable[str]) -> Dict[str, Any]: +def read_config_files(config_files: Iterable[str]) -> dict[str, Any]: """Read the config files and shallowly merge them into a dict. Successive configurations are shallowly merged into ones provided earlier, @@ -964,7 +960,7 @@ def read_config_files(config_files: Iterable[str]) -> Dict[str, Any]: return specified_config -def find_config_files(search_paths: List[str]) -> List[str]: +def find_config_files(search_paths: list[str]) -> list[str]: """Finds config files using a list of search paths. If a path is a file then that file path is added to the list. If a search path is a directory then all the "*.yaml" files in that directory are added to the list in @@ -1018,7 +1014,7 @@ class ShardedWorkerHandlingConfig: below). """ - instances: List[str] + instances: list[str] def should_handle(self, instance_name: str, key: str) -> bool: """Whether this instance is responsible for handling the given key.""" diff --git a/synapse/config/_base.pyi b/synapse/config/_base.pyi index 02543da388..1a9cb7db47 100644 --- a/synapse/config/_base.pyi +++ b/synapse/config/_base.pyi @@ -2,15 +2,11 @@ import argparse from typing import ( Any, Collection, - Dict, Iterable, Iterator, - List, Literal, MutableMapping, Optional, - Tuple, - Type, TypeVar, Union, overload, @@ -129,8 +125,8 @@ class RootConfig: mas: mas.MasConfig matrix_rtc: matrixrtc.MatrixRtcConfig - config_classes: List[Type["Config"]] = ... - config_files: List[str] + config_classes: list[type["Config"]] = ... + config_files: list[str] def __init__(self, config_files: Collection[str] = ...) -> None: ... def invoke_all( self, func_name: str, *args: Any, **kwargs: Any @@ -139,7 +135,7 @@ class RootConfig: def invoke_all_static(cls, func_name: str, *args: Any, **kwargs: Any) -> None: ... def parse_config_dict( self, - config_dict: Dict[str, Any], + config_dict: dict[str, Any], config_dir_path: str, data_dir_path: str, allow_secrets_in_config: bool = ..., @@ -158,11 +154,11 @@ class RootConfig: ) -> str: ... @classmethod def load_or_generate_config( - cls: Type[TRootConfig], description: str, argv_options: List[str] + cls: type[TRootConfig], description: str, argv_options: list[str] ) -> Optional[TRootConfig]: ... @classmethod def load_config( - cls: Type[TRootConfig], description: str, argv_options: List[str] + cls: type[TRootConfig], description: str, argv_options: list[str] ) -> TRootConfig: ... @classmethod def add_arguments_to_parser( @@ -170,8 +166,8 @@ class RootConfig: ) -> None: ... @classmethod def load_config_with_parser( - cls: Type[TRootConfig], parser: argparse.ArgumentParser, argv_options: List[str] - ) -> Tuple[TRootConfig, argparse.Namespace]: ... + cls: type[TRootConfig], parser: argparse.ArgumentParser, argv_options: list[str] + ) -> tuple[TRootConfig, argparse.Namespace]: ... def generate_missing_files( self, config_dict: dict, config_dir_path: str ) -> None: ... @@ -203,16 +199,16 @@ class Config: def read_template(self, filenames: str) -> jinja2.Template: ... def read_templates( self, - filenames: List[str], + filenames: list[str], custom_template_directories: Optional[Iterable[str]] = None, - ) -> List[jinja2.Template]: ... + ) -> list[jinja2.Template]: ... -def read_config_files(config_files: Iterable[str]) -> Dict[str, Any]: ... -def find_config_files(search_paths: List[str]) -> List[str]: ... +def read_config_files(config_files: Iterable[str]) -> dict[str, Any]: ... +def find_config_files(search_paths: list[str]) -> list[str]: ... class ShardedWorkerHandlingConfig: - instances: List[str] - def __init__(self, instances: List[str]) -> None: ... + instances: list[str] + def __init__(self, instances: list[str]) -> None: ... def should_handle(self, instance_name: str, key: str) -> bool: ... # noqa: F811 class RoutableShardedWorkerHandlingConfig(ShardedWorkerHandlingConfig): diff --git a/synapse/config/_util.py b/synapse/config/_util.py index 731b60a840..3e239c525e 100644 --- a/synapse/config/_util.py +++ b/synapse/config/_util.py @@ -18,7 +18,7 @@ # [This file includes modifications made by New Vector Limited] # # -from typing import Any, Dict, Type, TypeVar +from typing import Any, TypeVar import jsonschema @@ -79,8 +79,8 @@ Model = TypeVar("Model", bound=BaseModel) def parse_and_validate_mapping( config: Any, - model_type: Type[Model], -) -> Dict[str, Model]: + model_type: type[Model], +) -> dict[str, Model]: """Parse `config` as a mapping from strings to a given `Model` type. Args: config: The configuration data to check @@ -93,7 +93,7 @@ def parse_and_validate_mapping( try: # type-ignore: mypy doesn't like constructing `Dict[str, model_type]` because # `model_type` is a runtime variable. Pydantic is fine with this. - instances = parse_obj_as(Dict[str, model_type], config) # type: ignore[valid-type] + instances = parse_obj_as(dict[str, model_type], config) # type: ignore[valid-type] except ValidationError as e: raise ConfigError(str(e)) from e return instances diff --git a/synapse/config/api.py b/synapse/config/api.py index 0bb99d4228..e32e03e55e 100644 --- a/synapse/config/api.py +++ b/synapse/config/api.py @@ -20,7 +20,7 @@ # import logging -from typing import Any, Iterable, Optional, Tuple +from typing import Any, Iterable, Optional from synapse.api.constants import EventTypes from synapse.config._base import Config, ConfigError @@ -46,7 +46,7 @@ class ApiConfig(Config): def _get_prejoin_state_entries( self, config: JsonDict - ) -> Iterable[Tuple[str, Optional[str]]]: + ) -> Iterable[tuple[str, Optional[str]]]: """Get the event types and state keys to include in the prejoin state.""" room_prejoin_state_config = config.get("room_prejoin_state") or {} diff --git a/synapse/config/appservice.py b/synapse/config/appservice.py index 81dbd330cc..b9ed1a702c 100644 --- a/synapse/config/appservice.py +++ b/synapse/config/appservice.py @@ -21,7 +21,7 @@ # import logging -from typing import Any, Dict, List +from typing import Any from urllib import parse as urlparse import yaml @@ -61,13 +61,13 @@ class AppServiceConfig(Config): def load_appservices( - hostname: str, config_files: List[str] -) -> List[ApplicationService]: + hostname: str, config_files: list[str] +) -> list[ApplicationService]: """Returns a list of Application Services from the config files.""" # Dicts of value -> filename - seen_as_tokens: Dict[str, str] = {} - seen_ids: Dict[str, str] = {} + seen_as_tokens: dict[str, str] = {} + seen_ids: dict[str, str] = {} appservices = [] diff --git a/synapse/config/cache.py b/synapse/config/cache.py index 35a052b254..e51efc3dbd 100644 --- a/synapse/config/cache.py +++ b/synapse/config/cache.py @@ -23,7 +23,7 @@ import logging import os import re import threading -from typing import Any, Callable, Dict, Mapping, Optional +from typing import Any, Callable, Mapping, Optional import attr @@ -38,7 +38,7 @@ logger = logging.getLogger(__name__) _CACHE_PREFIX = "SYNAPSE_CACHE_FACTOR" # Map from canonicalised cache name to cache. -_CACHES: Dict[str, Callable[[float], None]] = {} +_CACHES: dict[str, Callable[[float], None]] = {} # a lock on the contents of _CACHES _CACHES_LOCK = threading.Lock() @@ -104,7 +104,7 @@ class CacheConfig(Config): _environ: Mapping[str, str] = os.environ event_cache_size: int - cache_factors: Dict[str, float] + cache_factors: dict[str, float] global_factor: float track_memory_usage: bool expiry_time_msec: Optional[int] diff --git a/synapse/config/cas.py b/synapse/config/cas.py index 60d66d7019..e6e869bb16 100644 --- a/synapse/config/cas.py +++ b/synapse/config/cas.py @@ -20,7 +20,7 @@ # # -from typing import Any, List, Optional +from typing import Any, Optional from synapse.config.sso import SsoAttributeRequirement from synapse.types import JsonDict @@ -107,7 +107,7 @@ REQUIRED_ATTRIBUTES_SCHEMA = { def _parsed_required_attributes_def( required_attributes: Any, -) -> List[SsoAttributeRequirement]: +) -> list[SsoAttributeRequirement]: validate_config( REQUIRED_ATTRIBUTES_SCHEMA, required_attributes, diff --git a/synapse/config/database.py b/synapse/config/database.py index c4ca63a1fa..8e9d253820 100644 --- a/synapse/config/database.py +++ b/synapse/config/database.py @@ -22,7 +22,7 @@ import argparse import logging import os -from typing import Any, List +from typing import Any from synapse.config._base import Config, ConfigError from synapse.types import JsonDict @@ -83,7 +83,7 @@ class DatabaseConfig(Config): def __init__(self, *args: Any): super().__init__(*args) - self.databases: List[DatabaseConnectionConfig] = [] + self.databases: list[DatabaseConnectionConfig] = [] def read_config(self, config: JsonDict, **kwargs: Any) -> None: # We *experimentally* support specifying multiple databases via the diff --git a/synapse/config/key.py b/synapse/config/key.py index f78ff5114f..3e832b4946 100644 --- a/synapse/config/key.py +++ b/synapse/config/key.py @@ -23,7 +23,7 @@ import hashlib import logging import os -from typing import TYPE_CHECKING, Any, Dict, Iterator, List, Optional +from typing import TYPE_CHECKING, Any, Iterator, Optional import attr import jsonschema @@ -110,7 +110,7 @@ class TrustedKeyServer: server_name: str # map from key id to key object, or None to disable signature verification. - verify_keys: Optional[Dict[str, VerifyKey]] = None + verify_keys: Optional[dict[str, VerifyKey]] = None class KeyConfig(Config): @@ -250,7 +250,7 @@ class KeyConfig(Config): - server_name: "matrix.org" """ % locals() - def read_signing_keys(self, signing_key_path: str, name: str) -> List[SigningKey]: + def read_signing_keys(self, signing_key_path: str, name: str) -> list[SigningKey]: """Read the signing keys in the given path. Args: @@ -280,7 +280,7 @@ class KeyConfig(Config): def read_old_signing_keys( self, old_signing_keys: Optional[JsonDict] - ) -> Dict[str, "VerifyKeyWithExpiry"]: + ) -> dict[str, "VerifyKeyWithExpiry"]: if old_signing_keys is None: return {} keys = {} @@ -299,7 +299,7 @@ class KeyConfig(Config): ) return keys - def generate_files(self, config: Dict[str, Any], config_dir_path: str) -> None: + def generate_files(self, config: dict[str, Any], config_dir_path: str) -> None: if "signing_key" in config: return @@ -393,7 +393,7 @@ TRUSTED_KEY_SERVERS_SCHEMA = { def _parse_key_servers( - key_servers: List[Any], federation_verify_certificates: bool + key_servers: list[Any], federation_verify_certificates: bool ) -> Iterator[TrustedKeyServer]: try: jsonschema.validate(key_servers, TRUSTED_KEY_SERVERS_SCHEMA) @@ -408,7 +408,7 @@ def _parse_key_servers( server_name = server["server_name"] result = TrustedKeyServer(server_name=server_name) - verify_keys: Optional[Dict[str, str]] = server.get("verify_keys") + verify_keys: Optional[dict[str, str]] = server.get("verify_keys") if verify_keys is not None: result.verify_keys = {} for key_id, key_base64 in verify_keys.items(): diff --git a/synapse/config/logger.py b/synapse/config/logger.py index 9dde4c4003..8e355035a9 100644 --- a/synapse/config/logger.py +++ b/synapse/config/logger.py @@ -26,7 +26,7 @@ import os import sys import threading from string import Template -from typing import TYPE_CHECKING, Any, Dict, Optional +from typing import TYPE_CHECKING, Any, Optional import yaml from zope.interface import implementer @@ -186,7 +186,7 @@ class LoggingConfig(Config): help=argparse.SUPPRESS, ) - def generate_files(self, config: Dict[str, Any], config_dir_path: str) -> None: + def generate_files(self, config: dict[str, Any], config_dir_path: str) -> None: log_config = config.get("log_config") if log_config and not os.path.exists(log_config): log_file = self.abspath("homeserver.log") diff --git a/synapse/config/modules.py b/synapse/config/modules.py index 37dc26e130..17319c9e37 100644 --- a/synapse/config/modules.py +++ b/synapse/config/modules.py @@ -18,7 +18,7 @@ # [This file includes modifications made by New Vector Limited] # # -from typing import Any, Dict, List, Tuple +from typing import Any from synapse.config._base import Config, ConfigError from synapse.types import JsonDict @@ -29,7 +29,7 @@ class ModulesConfig(Config): section = "modules" def read_config(self, config: JsonDict, **kwargs: Any) -> None: - self.loaded_modules: List[Tuple[Any, Dict]] = [] + self.loaded_modules: list[tuple[Any, dict]] = [] configured_modules = config.get("modules") or [] for i, module in enumerate(configured_modules): diff --git a/synapse/config/oembed.py b/synapse/config/oembed.py index 1b6c521087..a4a192302c 100644 --- a/synapse/config/oembed.py +++ b/synapse/config/oembed.py @@ -21,7 +21,7 @@ import importlib.resources as importlib_resources import json import re -from typing import Any, Dict, Iterable, List, Optional, Pattern +from typing import Any, Iterable, Optional, Pattern from urllib import parse as urlparse import attr @@ -37,9 +37,9 @@ class OEmbedEndpointConfig: # The API endpoint to fetch. api_endpoint: str # The patterns to match. - url_patterns: List[Pattern[str]] + url_patterns: list[Pattern[str]] # The supported formats. - formats: Optional[List[str]] + formats: Optional[list[str]] class OembedConfig(Config): @@ -48,10 +48,10 @@ class OembedConfig(Config): section = "oembed" def read_config(self, config: JsonDict, **kwargs: Any) -> None: - oembed_config: Dict[str, Any] = config.get("oembed") or {} + oembed_config: dict[str, Any] = config.get("oembed") or {} # A list of patterns which will be used. - self.oembed_patterns: List[OEmbedEndpointConfig] = list( + self.oembed_patterns: list[OEmbedEndpointConfig] = list( self._parse_and_validate_providers(oembed_config) ) @@ -92,7 +92,7 @@ class OembedConfig(Config): ) def _parse_and_validate_provider( - self, providers: List[JsonDict], config_path: StrSequence + self, providers: list[JsonDict], config_path: StrSequence ) -> Iterable[OEmbedEndpointConfig]: # Ensure it is the proper form. validate_config( diff --git a/synapse/config/oidc.py b/synapse/config/oidc.py index 3ddf65a3e9..ada89bb8bc 100644 --- a/synapse/config/oidc.py +++ b/synapse/config/oidc.py @@ -21,7 +21,7 @@ # from collections import Counter -from typing import Any, Collection, Iterable, List, Mapping, Optional, Tuple, Type +from typing import Any, Collection, Iterable, Mapping, Optional import attr @@ -213,7 +213,7 @@ def _parse_oidc_provider_configs(config: JsonDict) -> Iterable["OidcProviderConf def _parse_oidc_config_dict( - oidc_config: JsonDict, config_path: Tuple[str, ...] + oidc_config: JsonDict, config_path: tuple[str, ...] ) -> "OidcProviderConfig": """Take the configuration dict and parse it into an OidcProviderConfig @@ -416,7 +416,7 @@ class OidcProviderConfig: # Valid values are 'auto', 'always', and 'never'. pkce_method: str - id_token_signing_alg_values_supported: Optional[List[str]] + id_token_signing_alg_values_supported: Optional[list[str]] """ List of the JWS signing algorithms (`alg` values) that are supported for signing the `id_token`. @@ -491,13 +491,13 @@ class OidcProviderConfig: allow_existing_users: bool # the class of the user mapping provider - user_mapping_provider_class: Type + user_mapping_provider_class: type # the config of the user mapping provider user_mapping_provider_config: Any # required attributes to require in userinfo to allow login/registration - attribute_requirements: List[SsoAttributeRequirement] + attribute_requirements: list[SsoAttributeRequirement] # Whether automatic registrations are enabled in the ODIC flow. Defaults to True enable_registration: bool diff --git a/synapse/config/password_auth_providers.py b/synapse/config/password_auth_providers.py index b2b624aea2..c2894f58dc 100644 --- a/synapse/config/password_auth_providers.py +++ b/synapse/config/password_auth_providers.py @@ -19,7 +19,7 @@ # # -from typing import Any, List, Tuple, Type +from typing import Any from synapse.types import JsonDict from synapse.util.module_loader import load_module @@ -56,7 +56,7 @@ class PasswordAuthProviderConfig(Config): for backwards compatibility. """ - self.password_providers: List[Tuple[Type, Any]] = [] + self.password_providers: list[tuple[type, Any]] = [] providers = [] # We want to be backwards compatible with the old `ldap_config` diff --git a/synapse/config/ratelimiting.py b/synapse/config/ratelimiting.py index b082daa8f7..be2f49f87c 100644 --- a/synapse/config/ratelimiting.py +++ b/synapse/config/ratelimiting.py @@ -19,7 +19,7 @@ # # -from typing import Any, Dict, Optional, cast +from typing import Any, Optional, cast import attr @@ -37,9 +37,9 @@ class RatelimitSettings: @classmethod def parse( cls, - config: Dict[str, Any], + config: dict[str, Any], key: str, - defaults: Optional[Dict[str, float]] = None, + defaults: Optional[dict[str, float]] = None, ) -> "RatelimitSettings": """Parse config[key] as a new-style rate limiter config. @@ -62,7 +62,7 @@ class RatelimitSettings: # By this point we should have hit the rate limiter parameters. # We don't actually check this though! - rl_config = cast(Dict[str, float], rl_config) + rl_config = cast(dict[str, float], rl_config) return cls( key=key, diff --git a/synapse/config/registration.py b/synapse/config/registration.py index 283199aa11..c0e7316bc3 100644 --- a/synapse/config/registration.py +++ b/synapse/config/registration.py @@ -20,7 +20,7 @@ # # import argparse -from typing import Any, Dict, Optional +from typing import Any, Optional from synapse.api.constants import RoomCreationPreset from synapse.config._base import Config, ConfigError, read_file @@ -266,7 +266,7 @@ class RegistrationConfig(Config): else: return "" - def generate_files(self, config: Dict[str, Any], config_dir_path: str) -> None: + def generate_files(self, config: dict[str, Any], config_dir_path: str) -> None: # if 'registration_shared_secret_path' is specified, and the target file # does not exist, generate it. registration_shared_secret_path = config.get("registration_shared_secret_path") diff --git a/synapse/config/repository.py b/synapse/config/repository.py index e7d23740f9..221130b0cd 100644 --- a/synapse/config/repository.py +++ b/synapse/config/repository.py @@ -21,7 +21,7 @@ import logging import os -from typing import Any, Dict, List, Tuple +from typing import Any import attr @@ -80,8 +80,8 @@ class MediaStorageProviderConfig: def parse_thumbnail_requirements( - thumbnail_sizes: List[JsonDict], -) -> Dict[str, Tuple[ThumbnailRequirement, ...]]: + thumbnail_sizes: list[JsonDict], +) -> dict[str, tuple[ThumbnailRequirement, ...]]: """Takes a list of dictionaries with "width", "height", and "method" keys and creates a map from image media types to the thumbnail size, thumbnailing method, and thumbnail media type to precalculate @@ -92,7 +92,7 @@ def parse_thumbnail_requirements( Returns: Dictionary mapping from media type string to list of ThumbnailRequirement. """ - requirements: Dict[str, List[ThumbnailRequirement]] = {} + requirements: dict[str, list[ThumbnailRequirement]] = {} for size in thumbnail_sizes: width = size["width"] height = size["height"] @@ -206,7 +206,7 @@ class ContentRepositoryConfig(Config): # # We don't create the storage providers here as not all workers need # them to be started. - self.media_storage_providers: List[tuple] = [] + self.media_storage_providers: list[tuple] = [] for i, provider_config in enumerate(storage_providers): # We special case the module "file_system" so as not to need to @@ -298,7 +298,7 @@ class ContentRepositoryConfig(Config): self.enable_authenticated_media = config.get("enable_authenticated_media", True) - self.media_upload_limits: List[MediaUploadLimit] = [] + self.media_upload_limits: list[MediaUploadLimit] = [] for limit_config in config.get("media_upload_limits", []): time_period_ms = self.parse_duration(limit_config["time_period"]) max_bytes = self.parse_size(limit_config["max_size"]) diff --git a/synapse/config/retention.py b/synapse/config/retention.py index 7e329c7f42..9d34f1e241 100644 --- a/synapse/config/retention.py +++ b/synapse/config/retention.py @@ -20,7 +20,7 @@ # import logging -from typing import Any, List, Optional +from typing import Any, Optional import attr @@ -119,7 +119,7 @@ class RetentionConfig(Config): " greater than 'allowed_lifetime_max'" ) - self.retention_purge_jobs: List[RetentionPurgeJob] = [] + self.retention_purge_jobs: list[RetentionPurgeJob] = [] for purge_job_config in retention_config.get("purge_jobs", []): interval_config = purge_job_config.get("interval") diff --git a/synapse/config/saml2.py b/synapse/config/saml2.py index 9d7ef94507..acba63ee9b 100644 --- a/synapse/config/saml2.py +++ b/synapse/config/saml2.py @@ -20,7 +20,7 @@ # import logging -from typing import Any, List, Set +from typing import Any from synapse.config.sso import SsoAttributeRequirement from synapse.types import JsonDict @@ -160,8 +160,11 @@ class SAML2Config(Config): ) # Get the desired saml auth response attributes from the module + # type-ignore: the provider class was already checked for having the method being called + # with the runtime checks above, which mypy is not aware of, and treats as an error + # ever since the typehint of provider class was changed from "typing.Type" to "type" saml2_config_dict = self._default_saml_config_dict( - *self.saml2_user_mapping_provider_class.get_saml_attributes( + *self.saml2_user_mapping_provider_class.get_saml_attributes( # type: ignore[attr-defined] self.saml2_user_mapping_provider_config ) ) @@ -191,7 +194,7 @@ class SAML2Config(Config): ) def _default_saml_config_dict( - self, required_attributes: Set[str], optional_attributes: Set[str] + self, required_attributes: set[str], optional_attributes: set[str] ) -> JsonDict: """Generate a configuration dictionary with required and optional attributes that will be needed to process new user registration @@ -239,7 +242,7 @@ ATTRIBUTE_REQUIREMENTS_SCHEMA = { def _parse_attribute_requirements_def( attribute_requirements: Any, -) -> List[SsoAttributeRequirement]: +) -> list[SsoAttributeRequirement]: validate_config( ATTRIBUTE_REQUIREMENTS_SCHEMA, attribute_requirements, diff --git a/synapse/config/server.py b/synapse/config/server.py index e15bceb296..662ed24a13 100644 --- a/synapse/config/server.py +++ b/synapse/config/server.py @@ -25,7 +25,7 @@ import logging import os.path import urllib.parse from textwrap import indent -from typing import Any, Dict, Iterable, List, Optional, Set, Tuple, TypedDict, Union +from typing import Any, Iterable, Optional, TypedDict, Union from urllib.request import getproxies_environment import attr @@ -213,7 +213,7 @@ KNOWN_RESOURCES = { @attr.s(frozen=True) class HttpResourceConfig: - names: List[str] = attr.ib( + names: list[str] = attr.ib( factory=list, validator=attr.validators.deep_iterable(attr.validators.in_(KNOWN_RESOURCES)), ) @@ -228,8 +228,8 @@ class HttpListenerConfig: """Object describing the http-specific parts of the config of a listener""" x_forwarded: bool = False - resources: List[HttpResourceConfig] = attr.Factory(list) - additional_resources: Dict[str, dict] = attr.Factory(dict) + resources: list[HttpResourceConfig] = attr.Factory(list) + additional_resources: dict[str, dict] = attr.Factory(dict) tag: Optional[str] = None request_id_header: Optional[str] = None @@ -239,7 +239,7 @@ class TCPListenerConfig: """Object describing the configuration of a single TCP listener.""" port: int = attr.ib(validator=attr.validators.instance_of(int)) - bind_addresses: List[str] = attr.ib(validator=attr.validators.instance_of(List)) + bind_addresses: list[str] = attr.ib(validator=attr.validators.instance_of(list)) type: str = attr.ib(validator=attr.validators.in_(KNOWN_LISTENER_TYPES)) tls: bool = False @@ -344,7 +344,7 @@ class ProxyConfig: """ Proxy server to use for HTTPS requests. """ - no_proxy_hosts: Optional[List[str]] + no_proxy_hosts: Optional[list[str]] """ List of hosts, IP addresses, or IP ranges in CIDR format which should not use the proxy. Synapse will directly connect to these hosts. @@ -864,11 +864,11 @@ class ServerConfig(Config): ) # Whitelist of domain names that given next_link parameters must have - next_link_domain_whitelist: Optional[List[str]] = config.get( + next_link_domain_whitelist: Optional[list[str]] = config.get( "next_link_domain_whitelist" ) - self.next_link_domain_whitelist: Optional[Set[str]] = None + self.next_link_domain_whitelist: Optional[set[str]] = None if next_link_domain_whitelist is not None: if not isinstance(next_link_domain_whitelist, list): raise ConfigError("'next_link_domain_whitelist' must be a list") @@ -892,7 +892,7 @@ class ServerConfig(Config): config.get("use_account_validity_in_account_status") or False ) - self.rooms_to_exclude_from_sync: List[str] = ( + self.rooms_to_exclude_from_sync: list[str] = ( config.get("exclude_rooms_from_sync") or [] ) @@ -927,7 +927,7 @@ class ServerConfig(Config): data_dir_path: str, server_name: str, open_private_ports: bool, - listeners: Optional[List[dict]], + listeners: Optional[list[dict]], **kwargs: Any, ) -> str: _, bind_port = parse_and_validate_server_name(server_name) @@ -1028,7 +1028,7 @@ class ServerConfig(Config): help="Turn on the twisted telnet manhole service on the given port.", ) - def read_gc_intervals(self, durations: Any) -> Optional[Tuple[float, float, float]]: + def read_gc_intervals(self, durations: Any) -> Optional[tuple[float, float, float]]: """Reads the three durations for the GC min interval option, returning seconds.""" if durations is None: return None @@ -1048,7 +1048,7 @@ class ServerConfig(Config): def is_threepid_reserved( - reserved_threepids: List[JsonDict], threepid: JsonDict + reserved_threepids: list[JsonDict], threepid: JsonDict ) -> bool: """Check the threepid against the reserved threepid config Args: @@ -1066,8 +1066,8 @@ def is_threepid_reserved( def read_gc_thresholds( - thresholds: Optional[List[Any]], -) -> Optional[Tuple[int, int, int]]: + thresholds: Optional[list[Any]], +) -> Optional[tuple[int, int, int]]: """Reads the three integer thresholds for garbage collection. Ensures that the thresholds are integers if thresholds are supplied. """ diff --git a/synapse/config/spam_checker.py b/synapse/config/spam_checker.py index 014c55d702..02d7cee88f 100644 --- a/synapse/config/spam_checker.py +++ b/synapse/config/spam_checker.py @@ -19,7 +19,7 @@ # import logging -from typing import Any, Dict, List, Tuple +from typing import Any from synapse.config import ConfigError from synapse.types import JsonDict @@ -41,7 +41,7 @@ class SpamCheckerConfig(Config): section = "spamchecker" def read_config(self, config: JsonDict, **kwargs: Any) -> None: - self.spam_checkers: List[Tuple[Any, Dict]] = [] + self.spam_checkers: list[tuple[Any, dict]] = [] spam_checkers = config.get("spam_checker") or [] if isinstance(spam_checkers, dict): diff --git a/synapse/config/sso.py b/synapse/config/sso.py index cf27a7ee13..facb418510 100644 --- a/synapse/config/sso.py +++ b/synapse/config/sso.py @@ -19,7 +19,7 @@ # # import logging -from typing import Any, Dict, List, Optional +from typing import Any, Optional import attr @@ -45,7 +45,7 @@ class SsoAttributeRequirement: attribute: str # If neither `value` nor `one_of` is given, the attribute must simply exist. value: Optional[str] = None - one_of: Optional[List[str]] = None + one_of: Optional[list[str]] = None JSON_SCHEMA = { "type": "object", @@ -64,7 +64,7 @@ class SSOConfig(Config): section = "sso" def read_config(self, config: JsonDict, **kwargs: Any) -> None: - sso_config: Dict[str, Any] = config.get("sso") or {} + sso_config: dict[str, Any] = config.get("sso") or {} # The sso-specific template_dir self.sso_template_dir = sso_config.get("template_dir") diff --git a/synapse/config/tls.py b/synapse/config/tls.py index a48d81fdc3..d03a77d9d2 100644 --- a/synapse/config/tls.py +++ b/synapse/config/tls.py @@ -20,7 +20,7 @@ # import logging -from typing import Any, List, Optional, Pattern +from typing import Any, Optional, Pattern from matrix_common.regex import glob_to_regex @@ -84,7 +84,7 @@ class TlsConfig(Config): fed_whitelist_entries = [] # Support globs (*) in whitelist values - self.federation_certificate_verification_whitelist: List[Pattern] = [] + self.federation_certificate_verification_whitelist: list[Pattern] = [] for entry in fed_whitelist_entries: try: entry_regex = glob_to_regex(entry.encode("ascii").decode("ascii")) diff --git a/synapse/config/tracer.py b/synapse/config/tracer.py index d31fd41082..ccfeed4d07 100644 --- a/synapse/config/tracer.py +++ b/synapse/config/tracer.py @@ -19,7 +19,7 @@ # # -from typing import Any, List, Set +from typing import Any from synapse.types import JsonDict from synapse.util.check_dependencies import check_requirements @@ -42,7 +42,7 @@ class TracerConfig(Config): {"sampler": {"type": "const", "param": 1}, "logging": False}, ) - self.force_tracing_for_users: Set[str] = set() + self.force_tracing_for_users: set[str] = set() if not self.opentracer_enabled: return @@ -51,7 +51,7 @@ class TracerConfig(Config): # The tracer is enabled so sanitize the config - self.opentracer_whitelist: List[str] = opentracing_config.get( + self.opentracer_whitelist: list[str] = opentracing_config.get( "homeserver_whitelist", [] ) if not isinstance(self.opentracer_whitelist, list): diff --git a/synapse/config/user_types.py b/synapse/config/user_types.py index 2d9c9f7afb..dd64425d6c 100644 --- a/synapse/config/user_types.py +++ b/synapse/config/user_types.py @@ -12,7 +12,7 @@ # . # -from typing import Any, List, Optional +from typing import Any, Optional from synapse.api.constants import UserTypes from synapse.types import JsonDict @@ -29,9 +29,9 @@ class UserTypesConfig(Config): self.default_user_type: Optional[str] = user_types.get( "default_user_type", None ) - self.extra_user_types: List[str] = user_types.get("extra_user_types", []) + self.extra_user_types: list[str] = user_types.get("extra_user_types", []) - all_user_types: List[str] = [] + all_user_types: list[str] = [] all_user_types.extend(UserTypes.ALL_BUILTIN_USER_TYPES) all_user_types.extend(self.extra_user_types) diff --git a/synapse/config/workers.py b/synapse/config/workers.py index 825ba78482..da7148b3a1 100644 --- a/synapse/config/workers.py +++ b/synapse/config/workers.py @@ -22,7 +22,7 @@ import argparse import logging -from typing import Any, Dict, List, Optional, Union +from typing import Any, Optional, Union import attr @@ -79,7 +79,7 @@ MAIN_PROCESS_INSTANCE_MAP_NAME = "main" logger = logging.getLogger(__name__) -def _instance_to_list_converter(obj: Union[str, List[str]]) -> List[str]: +def _instance_to_list_converter(obj: Union[str, list[str]]) -> list[str]: """Helper for allowing parsing a string or list of strings to a config option expecting a list of strings. """ @@ -142,39 +142,39 @@ class WriterLocations: device_lists: The instances that write to the device list stream. """ - events: List[str] = attr.ib( + events: list[str] = attr.ib( default=[MAIN_PROCESS_INSTANCE_NAME], converter=_instance_to_list_converter, ) - typing: List[str] = attr.ib( + typing: list[str] = attr.ib( default=[MAIN_PROCESS_INSTANCE_NAME], converter=_instance_to_list_converter, ) - to_device: List[str] = attr.ib( + to_device: list[str] = attr.ib( default=[MAIN_PROCESS_INSTANCE_NAME], converter=_instance_to_list_converter, ) - account_data: List[str] = attr.ib( + account_data: list[str] = attr.ib( default=[MAIN_PROCESS_INSTANCE_NAME], converter=_instance_to_list_converter, ) - receipts: List[str] = attr.ib( + receipts: list[str] = attr.ib( default=[MAIN_PROCESS_INSTANCE_NAME], converter=_instance_to_list_converter, ) - presence: List[str] = attr.ib( + presence: list[str] = attr.ib( default=[MAIN_PROCESS_INSTANCE_NAME], converter=_instance_to_list_converter, ) - push_rules: List[str] = attr.ib( + push_rules: list[str] = attr.ib( default=[MAIN_PROCESS_INSTANCE_NAME], converter=_instance_to_list_converter, ) - device_lists: List[str] = attr.ib( + device_lists: list[str] = attr.ib( default=[MAIN_PROCESS_INSTANCE_NAME], converter=_instance_to_list_converter, ) - thread_subscriptions: List[str] = attr.ib( + thread_subscriptions: list[str] = attr.ib( default=["master"], converter=_instance_to_list_converter, ) @@ -190,8 +190,8 @@ class OutboundFederationRestrictedTo: locations: list of instance locations to connect to proxy via. """ - instances: Optional[List[str]] - locations: List[InstanceLocationConfig] = attr.Factory(list) + instances: Optional[list[str]] + locations: list[InstanceLocationConfig] = attr.Factory(list) def __contains__(self, instance: str) -> bool: # It feels a bit dirty to return `True` if `instances` is `None`, but it makes @@ -295,7 +295,7 @@ class WorkerConfig(Config): # A map from instance name to host/port of their HTTP replication endpoint. # Check if the main process is declared. The main process itself doesn't need # this data as it would never have to talk to itself. - instance_map: Dict[str, Any] = config.get("instance_map", {}) + instance_map: dict[str, Any] = config.get("instance_map", {}) if self.instance_name is not MAIN_PROCESS_INSTANCE_NAME: # TODO: The next 3 condition blocks can be deleted after some time has @@ -342,7 +342,7 @@ class WorkerConfig(Config): ) # type-ignore: the expression `Union[A, B]` is not a Type[Union[A, B]] currently - self.instance_map: Dict[str, InstanceLocationConfig] = ( + self.instance_map: dict[str, InstanceLocationConfig] = ( parse_and_validate_mapping( instance_map, InstanceLocationConfig, # type: ignore[arg-type] @@ -481,7 +481,7 @@ class WorkerConfig(Config): def _should_this_worker_perform_duty( self, - config: Dict[str, Any], + config: dict[str, Any], legacy_master_option_name: str, legacy_worker_app_name: str, new_option_name: str, @@ -574,11 +574,11 @@ class WorkerConfig(Config): def _worker_names_performing_this_duty( self, - config: Dict[str, Any], + config: dict[str, Any], legacy_option_name: str, legacy_app_name: str, modern_instance_list_name: str, - ) -> List[str]: + ) -> list[str]: """ Retrieves the names of the workers handling a given duty, by either legacy option or instance list. diff --git a/synapse/crypto/event_signing.py b/synapse/crypto/event_signing.py index c36398cec0..d13d5d04c3 100644 --- a/synapse/crypto/event_signing.py +++ b/synapse/crypto/event_signing.py @@ -23,7 +23,7 @@ import collections.abc import hashlib import logging -from typing import Any, Callable, Dict, Tuple +from typing import Any, Callable from canonicaljson import encode_canonical_json from signedjson.sign import sign_json @@ -80,8 +80,8 @@ def check_event_content_hash( def compute_content_hash( - event_dict: Dict[str, Any], hash_algorithm: Hasher -) -> Tuple[str, bytes]: + event_dict: dict[str, Any], hash_algorithm: Hasher +) -> tuple[str, bytes]: """Compute the content hash of an event, which is the hash of the unredacted event. @@ -112,7 +112,7 @@ def compute_content_hash( def compute_event_reference_hash( event: EventBase, hash_algorithm: Hasher = hashlib.sha256 -) -> Tuple[str, bytes]: +) -> tuple[str, bytes]: """Computes the event reference hash. This is the hash of the redacted event. @@ -139,7 +139,7 @@ def compute_event_signature( event_dict: JsonDict, signature_name: str, signing_key: SigningKey, -) -> Dict[str, Dict[str, str]]: +) -> dict[str, dict[str, str]]: """Compute the signature of the event for the given name and key. Args: diff --git a/synapse/crypto/keyring.py b/synapse/crypto/keyring.py index 258bc29357..24a693fdb1 100644 --- a/synapse/crypto/keyring.py +++ b/synapse/crypto/keyring.py @@ -21,7 +21,7 @@ import abc import logging -from typing import TYPE_CHECKING, Callable, Dict, Iterable, List, Optional, Tuple +from typing import TYPE_CHECKING, Callable, Iterable, Optional import attr from signedjson.key import ( @@ -82,7 +82,7 @@ class VerifyJsonRequest: server_name: str get_json_object: Callable[[], JsonDict] minimum_valid_until_ts: int - key_ids: List[str] + key_ids: list[str] @staticmethod def from_json_object( @@ -141,7 +141,7 @@ class _FetchKeyRequest: server_name: str minimum_valid_until_ts: int - key_ids: List[str] + key_ids: list[str] class Keyring: @@ -156,7 +156,7 @@ class Keyring: if key_fetchers is None: # Always fetch keys from the database. - mutable_key_fetchers: List[KeyFetcher] = [StoreKeyFetcher(hs)] + mutable_key_fetchers: list[KeyFetcher] = [StoreKeyFetcher(hs)] # Fetch keys from configured trusted key servers, if any exist. key_servers = hs.config.key.key_servers if key_servers: @@ -169,7 +169,7 @@ class Keyring: self._key_fetchers = key_fetchers self._fetch_keys_queue: BatchingQueue[ - _FetchKeyRequest, Dict[str, Dict[str, FetchKeyResult]] + _FetchKeyRequest, dict[str, dict[str, FetchKeyResult]] ] = BatchingQueue( name="keyring_server", hs=hs, @@ -182,7 +182,7 @@ class Keyring: # build a FetchKeyResult for each of our own keys, to shortcircuit the # fetcher. - self._local_verify_keys: Dict[str, FetchKeyResult] = {} + self._local_verify_keys: dict[str, FetchKeyResult] = {} for key_id, key in hs.config.key.old_signing_keys.items(): self._local_verify_keys[key_id] = FetchKeyResult( verify_key=key, valid_until_ts=key.expired @@ -229,8 +229,8 @@ class Keyring: return await self.process_request(request) def verify_json_objects_for_server( - self, server_and_json: Iterable[Tuple[str, dict, int]] - ) -> List["defer.Deferred[None]"]: + self, server_and_json: Iterable[tuple[str, dict, int]] + ) -> list["defer.Deferred[None]"]: """Bulk verifies signatures of json objects, bulk fetching keys as necessary. @@ -286,7 +286,7 @@ class Keyring: Codes.UNAUTHORIZED, ) - found_keys: Dict[str, FetchKeyResult] = {} + found_keys: dict[str, FetchKeyResult] = {} # If we are the originating server, short-circuit the key-fetch for any keys # we already have @@ -368,8 +368,8 @@ class Keyring: ) async def _inner_fetch_key_requests( - self, requests: List[_FetchKeyRequest] - ) -> Dict[str, Dict[str, FetchKeyResult]]: + self, requests: list[_FetchKeyRequest] + ) -> dict[str, dict[str, FetchKeyResult]]: """Processing function for the queue of `_FetchKeyRequest`. Takes a list of key fetch requests, de-duplicates them and then carries out @@ -387,7 +387,7 @@ class Keyring: # First we need to deduplicate requests for the same key. We do this by # taking the *maximum* requested `minimum_valid_until_ts` for each pair # of server name/key ID. - server_to_key_to_ts: Dict[str, Dict[str, int]] = {} + server_to_key_to_ts: dict[str, dict[str, int]] = {} for request in requests: by_server = server_to_key_to_ts.setdefault(request.server_name, {}) for key_id in request.key_ids: @@ -412,7 +412,7 @@ class Keyring: # We now convert the returned list of results into a map from server # name to key ID to FetchKeyResult, to return. - to_return: Dict[str, Dict[str, FetchKeyResult]] = {} + to_return: dict[str, dict[str, FetchKeyResult]] = {} for request, results in zip(deduped_requests, results_per_request): to_return_by_server = to_return.setdefault(request.server_name, {}) for key_id, key_result in results.items(): @@ -424,7 +424,7 @@ class Keyring: async def _inner_fetch_key_request( self, verify_request: _FetchKeyRequest - ) -> Dict[str, FetchKeyResult]: + ) -> dict[str, FetchKeyResult]: """Attempt to fetch the given key by calling each key fetcher one by one. If a key is found, check whether its `valid_until_ts` attribute satisfies the @@ -445,7 +445,7 @@ class Keyring: """ logger.debug("Starting fetch for %s", verify_request) - found_keys: Dict[str, FetchKeyResult] = {} + found_keys: dict[str, FetchKeyResult] = {} missing_key_ids = set(verify_request.key_ids) for fetcher in self._key_fetchers: @@ -499,8 +499,8 @@ class KeyFetcher(metaclass=abc.ABCMeta): self._queue.shutdown() async def get_keys( - self, server_name: str, key_ids: List[str], minimum_valid_until_ts: int - ) -> Dict[str, FetchKeyResult]: + self, server_name: str, key_ids: list[str], minimum_valid_until_ts: int + ) -> dict[str, FetchKeyResult]: results = await self._queue.add_to_queue( _FetchKeyRequest( server_name=server_name, @@ -512,8 +512,8 @@ class KeyFetcher(metaclass=abc.ABCMeta): @abc.abstractmethod async def _fetch_keys( - self, keys_to_fetch: List[_FetchKeyRequest] - ) -> Dict[str, Dict[str, FetchKeyResult]]: + self, keys_to_fetch: list[_FetchKeyRequest] + ) -> dict[str, dict[str, FetchKeyResult]]: pass @@ -526,8 +526,8 @@ class StoreKeyFetcher(KeyFetcher): self.store = hs.get_datastores().main async def _fetch_keys( - self, keys_to_fetch: List[_FetchKeyRequest] - ) -> Dict[str, Dict[str, FetchKeyResult]]: + self, keys_to_fetch: list[_FetchKeyRequest] + ) -> dict[str, dict[str, FetchKeyResult]]: key_ids_to_fetch = ( (queue_value.server_name, key_id) for queue_value in keys_to_fetch @@ -535,7 +535,7 @@ class StoreKeyFetcher(KeyFetcher): ) res = await self.store.get_server_keys_json(key_ids_to_fetch) - keys: Dict[str, Dict[str, FetchKeyResult]] = {} + keys: dict[str, dict[str, FetchKeyResult]] = {} for (server_name, key_id), key in res.items(): keys.setdefault(server_name, {})[key_id] = key return keys @@ -549,7 +549,7 @@ class BaseV2KeyFetcher(KeyFetcher): async def process_v2_response( self, from_server: str, response_json: JsonDict, time_added_ms: int - ) -> Dict[str, FetchKeyResult]: + ) -> dict[str, FetchKeyResult]: """Parse a 'Server Keys' structure from the result of a /key request This is used to parse either the entirety of the response from @@ -640,11 +640,11 @@ class PerspectivesKeyFetcher(BaseV2KeyFetcher): self.key_servers = hs.config.key.key_servers async def _fetch_keys( - self, keys_to_fetch: List[_FetchKeyRequest] - ) -> Dict[str, Dict[str, FetchKeyResult]]: + self, keys_to_fetch: list[_FetchKeyRequest] + ) -> dict[str, dict[str, FetchKeyResult]]: """see KeyFetcher._fetch_keys""" - async def get_key(key_server: TrustedKeyServer) -> Dict: + async def get_key(key_server: TrustedKeyServer) -> dict: try: return await self.get_server_verify_key_v2_indirect( keys_to_fetch, key_server @@ -670,7 +670,7 @@ class PerspectivesKeyFetcher(BaseV2KeyFetcher): ).addErrback(unwrapFirstError) ) - union_of_keys: Dict[str, Dict[str, FetchKeyResult]] = {} + union_of_keys: dict[str, dict[str, FetchKeyResult]] = {} for result in results: for server_name, keys in result.items(): union_of_keys.setdefault(server_name, {}).update(keys) @@ -678,8 +678,8 @@ class PerspectivesKeyFetcher(BaseV2KeyFetcher): return union_of_keys async def get_server_verify_key_v2_indirect( - self, keys_to_fetch: List[_FetchKeyRequest], key_server: TrustedKeyServer - ) -> Dict[str, Dict[str, FetchKeyResult]]: + self, keys_to_fetch: list[_FetchKeyRequest], key_server: TrustedKeyServer + ) -> dict[str, dict[str, FetchKeyResult]]: """ Args: keys_to_fetch: @@ -731,8 +731,8 @@ class PerspectivesKeyFetcher(BaseV2KeyFetcher): "Response from notary server %s: %s", perspective_name, query_response ) - keys: Dict[str, Dict[str, FetchKeyResult]] = {} - added_keys: Dict[Tuple[str, str], FetchKeyResult] = {} + keys: dict[str, dict[str, FetchKeyResult]] = {} + added_keys: dict[tuple[str, str], FetchKeyResult] = {} time_now_ms = self.clock.time_msec() @@ -836,8 +836,8 @@ class ServerKeyFetcher(BaseV2KeyFetcher): self.client = hs.get_federation_http_client() async def get_keys( - self, server_name: str, key_ids: List[str], minimum_valid_until_ts: int - ) -> Dict[str, FetchKeyResult]: + self, server_name: str, key_ids: list[str], minimum_valid_until_ts: int + ) -> dict[str, FetchKeyResult]: results = await self._queue.add_to_queue( _FetchKeyRequest( server_name=server_name, @@ -849,8 +849,8 @@ class ServerKeyFetcher(BaseV2KeyFetcher): return results.get(server_name, {}) async def _fetch_keys( - self, keys_to_fetch: List[_FetchKeyRequest] - ) -> Dict[str, Dict[str, FetchKeyResult]]: + self, keys_to_fetch: list[_FetchKeyRequest] + ) -> dict[str, dict[str, FetchKeyResult]]: """ Args: keys_to_fetch: @@ -879,7 +879,7 @@ class ServerKeyFetcher(BaseV2KeyFetcher): async def get_server_verify_keys_v2_direct( self, server_name: str - ) -> Dict[str, FetchKeyResult]: + ) -> dict[str, FetchKeyResult]: """ Args: diff --git a/synapse/event_auth.py b/synapse/event_auth.py index 64de3f7ef8..5d927a925a 100644 --- a/synapse/event_auth.py +++ b/synapse/event_auth.py @@ -26,15 +26,11 @@ import typing from typing import ( Any, ChainMap, - Dict, Iterable, - List, Mapping, MutableMapping, Optional, Protocol, - Set, - Tuple, Union, cast, ) @@ -91,7 +87,7 @@ class _EventSourceStore(Protocol): redact_behaviour: EventRedactBehaviour, get_prev_content: bool = False, allow_rejected: bool = False, - ) -> Dict[str, "EventBase"]: ... + ) -> dict[str, "EventBase"]: ... def validate_event_for_room_version(event: "EventBase") -> None: @@ -993,7 +989,7 @@ def _check_power_levels( user_level = get_user_power_level(event.user_id, auth_events) # Check other levels: - levels_to_check: List[Tuple[str, Optional[str]]] = [ + levels_to_check: list[tuple[str, Optional[str]]] = [ ("users_default", None), ("events_default", None), ("state_default", None), @@ -1191,7 +1187,7 @@ def _verify_third_party_invite( return False -def get_public_keys(invite_event: "EventBase") -> List[Dict[str, Any]]: +def get_public_keys(invite_event: "EventBase") -> list[dict[str, Any]]: public_keys = [] if "public_key" in invite_event.content: o = {"public_key": invite_event.content["public_key"]} @@ -1204,7 +1200,7 @@ def get_public_keys(invite_event: "EventBase") -> List[Dict[str, Any]]: def auth_types_for_event( room_version: RoomVersion, event: Union["EventBase", "EventBuilder"] -) -> Set[Tuple[str, str]]: +) -> set[tuple[str, str]]: """Given an event, return a list of (EventType, StateKey) that may be needed to auth the event. The returned list may be a superset of what would actually be required depending on the full state of the room. diff --git a/synapse/events/__init__.py b/synapse/events/__init__.py index db38754280..a353076e0d 100644 --- a/synapse/events/__init__.py +++ b/synapse/events/__init__.py @@ -25,14 +25,10 @@ import collections.abc from typing import ( TYPE_CHECKING, Any, - Dict, Generic, Iterable, - List, Literal, Optional, - Tuple, - Type, TypeVar, Union, overload, @@ -94,20 +90,20 @@ class DictProperty(Generic[T]): def __get__( self, instance: Literal[None], - owner: Optional[Type[_DictPropertyInstance]] = None, + owner: Optional[type[_DictPropertyInstance]] = None, ) -> "DictProperty": ... @overload def __get__( self, instance: _DictPropertyInstance, - owner: Optional[Type[_DictPropertyInstance]] = None, + owner: Optional[type[_DictPropertyInstance]] = None, ) -> T: ... def __get__( self, instance: Optional[_DictPropertyInstance], - owner: Optional[Type[_DictPropertyInstance]] = None, + owner: Optional[type[_DictPropertyInstance]] = None, ) -> Union[T, "DictProperty"]: # if the property is accessed as a class property rather than an instance # property, return the property itself rather than the value @@ -160,20 +156,20 @@ class DefaultDictProperty(DictProperty, Generic[T]): def __get__( self, instance: Literal[None], - owner: Optional[Type[_DictPropertyInstance]] = None, + owner: Optional[type[_DictPropertyInstance]] = None, ) -> "DefaultDictProperty": ... @overload def __get__( self, instance: _DictPropertyInstance, - owner: Optional[Type[_DictPropertyInstance]] = None, + owner: Optional[type[_DictPropertyInstance]] = None, ) -> T: ... def __get__( self, instance: Optional[_DictPropertyInstance], - owner: Optional[Type[_DictPropertyInstance]] = None, + owner: Optional[type[_DictPropertyInstance]] = None, ) -> Union[T, "DefaultDictProperty"]: if instance is None: return self @@ -192,7 +188,7 @@ class EventBase(metaclass=abc.ABCMeta): self, event_dict: JsonDict, room_version: RoomVersion, - signatures: Dict[str, Dict[str, str]], + signatures: dict[str, dict[str, str]], unsigned: JsonDict, internal_metadata_dict: JsonDict, rejected_reason: Optional[str], @@ -210,7 +206,7 @@ class EventBase(metaclass=abc.ABCMeta): depth: DictProperty[int] = DictProperty("depth") content: DictProperty[JsonDict] = DictProperty("content") - hashes: DictProperty[Dict[str, str]] = DictProperty("hashes") + hashes: DictProperty[dict[str, str]] = DictProperty("hashes") origin_server_ts: DictProperty[int] = DictProperty("origin_server_ts") sender: DictProperty[str] = DictProperty("sender") # TODO state_key should be Optional[str]. This is generally asserted in Synapse @@ -293,13 +289,13 @@ class EventBase(metaclass=abc.ABCMeta): def __contains__(self, field: str) -> bool: return field in self._dict - def items(self) -> List[Tuple[str, Optional[Any]]]: + def items(self) -> list[tuple[str, Optional[Any]]]: return list(self._dict.items()) def keys(self) -> Iterable[str]: return self._dict.keys() - def prev_event_ids(self) -> List[str]: + def prev_event_ids(self) -> list[str]: """Returns the list of prev event IDs. The order matches the order specified in the event, though there is no meaning to it. @@ -457,7 +453,7 @@ class FrozenEventV2(EventBase): def room_id(self) -> str: return self._dict["room_id"] - def prev_event_ids(self) -> List[str]: + def prev_event_ids(self) -> list[str]: """Returns the list of prev event IDs. The order matches the order specified in the event, though there is no meaning to it. @@ -558,7 +554,7 @@ class FrozenEventV4(FrozenEventV3): def _event_type_from_format_version( format_version: int, -) -> Type[Union[FrozenEvent, FrozenEventV2, FrozenEventV3]]: +) -> type[Union[FrozenEvent, FrozenEventV2, FrozenEventV3]]: """Returns the python type to use to construct an Event object for the given event format version. @@ -669,4 +665,4 @@ class StrippedStateEvent: type: str state_key: str sender: str - content: Dict[str, Any] + content: dict[str, Any] diff --git a/synapse/events/auto_accept_invites.py b/synapse/events/auto_accept_invites.py index 9e17edd227..4c59f0dffe 100644 --- a/synapse/events/auto_accept_invites.py +++ b/synapse/events/auto_accept_invites.py @@ -20,7 +20,7 @@ # import logging from http import HTTPStatus -from typing import Any, Dict, Tuple +from typing import Any from synapse.api.constants import AccountDataTypes, EventTypes, Membership from synapse.api.errors import SynapseError @@ -146,7 +146,7 @@ class InviteAutoAccepter: # Be careful: we convert the outer frozendict into a dict here, # but the contents of the dict are still frozen (tuples in lieu of lists, # etc.) - dm_map: Dict[str, Tuple[str, ...]] = dict( + dm_map: dict[str, tuple[str, ...]] = dict( await self._api.account_data_manager.get_global( user_id, AccountDataTypes.DIRECT ) diff --git a/synapse/events/builder.py b/synapse/events/builder.py index 1c9f78c7ca..a57303c999 100644 --- a/synapse/events/builder.py +++ b/synapse/events/builder.py @@ -19,7 +19,7 @@ # # import logging -from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union +from typing import TYPE_CHECKING, Any, Optional, Union import attr from signedjson.types import SigningKey @@ -125,8 +125,8 @@ class EventBuilder: async def build( self, - prev_event_ids: List[str], - auth_event_ids: Optional[List[str]], + prev_event_ids: list[str], + auth_event_ids: Optional[list[str]], depth: Optional[int] = None, ) -> EventBase: """Transform into a fully signed and hashed event @@ -205,8 +205,8 @@ class EventBuilder: format_version = self.room_version.event_format # The types of auth/prev events changes between event versions. - prev_events: Union[StrCollection, List[Tuple[str, Dict[str, str]]]] - auth_events: Union[List[str], List[Tuple[str, Dict[str, str]]]] + prev_events: Union[StrCollection, list[tuple[str, dict[str, str]]]] + auth_events: Union[list[str], list[tuple[str, dict[str, str]]]] if format_version == EventFormatVersions.ROOM_V1_V2: auth_events = await self._store.add_event_hashes(auth_event_ids) prev_events = await self._store.add_event_hashes(prev_event_ids) @@ -228,7 +228,7 @@ class EventBuilder: # the db) depth = min(depth, MAX_DEPTH) - event_dict: Dict[str, Any] = { + event_dict: dict[str, Any] = { "auth_events": auth_events, "prev_events": prev_events, "type": self.type, diff --git a/synapse/events/presence_router.py b/synapse/events/presence_router.py index 9713b141bc..39dd7ee2b3 100644 --- a/synapse/events/presence_router.py +++ b/synapse/events/presence_router.py @@ -24,11 +24,8 @@ from typing import ( Any, Awaitable, Callable, - Dict, Iterable, - List, Optional, - Set, TypeVar, Union, ) @@ -44,10 +41,10 @@ if TYPE_CHECKING: from synapse.server import HomeServer GET_USERS_FOR_STATES_CALLBACK = Callable[ - [Iterable[UserPresenceState]], Awaitable[Dict[str, Set[UserPresenceState]]] + [Iterable[UserPresenceState]], Awaitable[dict[str, set[UserPresenceState]]] ] # This must either return a set of strings or the constant PresenceRouter.ALL_USERS. -GET_INTERESTED_USERS_CALLBACK = Callable[[str], Awaitable[Union[Set[str], str]]] +GET_INTERESTED_USERS_CALLBACK = Callable[[str], Awaitable[Union[set[str], str]]] logger = logging.getLogger(__name__) @@ -98,7 +95,7 @@ def load_legacy_presence_router(hs: "HomeServer") -> None: return run # Register the hooks through the module API. - hooks: Dict[str, Optional[Callable[..., Any]]] = { + hooks: dict[str, Optional[Callable[..., Any]]] = { hook: async_wrapper(getattr(presence_router, hook, None)) for hook in presence_router_methods } @@ -116,8 +113,8 @@ class PresenceRouter: def __init__(self, hs: "HomeServer"): # Initially there are no callbacks - self._get_users_for_states_callbacks: List[GET_USERS_FOR_STATES_CALLBACK] = [] - self._get_interested_users_callbacks: List[GET_INTERESTED_USERS_CALLBACK] = [] + self._get_users_for_states_callbacks: list[GET_USERS_FOR_STATES_CALLBACK] = [] + self._get_interested_users_callbacks: list[GET_INTERESTED_USERS_CALLBACK] = [] def register_presence_router_callbacks( self, @@ -143,7 +140,7 @@ class PresenceRouter: async def get_users_for_states( self, state_updates: Iterable[UserPresenceState], - ) -> Dict[str, Set[UserPresenceState]]: + ) -> dict[str, set[UserPresenceState]]: """ Given an iterable of user presence updates, determine where each one needs to go. @@ -161,7 +158,7 @@ class PresenceRouter: # Don't include any extra destinations for presence updates return {} - users_for_states: Dict[str, Set[UserPresenceState]] = {} + users_for_states: dict[str, set[UserPresenceState]] = {} # run all the callbacks for get_users_for_states and combine the results for callback in self._get_users_for_states_callbacks: try: @@ -174,7 +171,7 @@ class PresenceRouter: logger.warning("Failed to run module API callback %s: %s", callback, e) continue - if not isinstance(result, Dict): + if not isinstance(result, dict): logger.warning( "Wrong type returned by module API callback %s: %s, expected Dict", callback, @@ -183,7 +180,7 @@ class PresenceRouter: continue for key, new_entries in result.items(): - if not isinstance(new_entries, Set): + if not isinstance(new_entries, set): logger.warning( "Wrong type returned by module API callback %s: %s, expected Set", callback, @@ -194,7 +191,7 @@ class PresenceRouter: return users_for_states - async def get_interested_users(self, user_id: str) -> Union[Set[str], str]: + async def get_interested_users(self, user_id: str) -> Union[set[str], str]: """ Retrieve a list of users that `user_id` is interested in receiving the presence of. This will be in addition to those they share a room with. @@ -234,7 +231,7 @@ class PresenceRouter: if result == PresenceRouter.ALL_USERS: return PresenceRouter.ALL_USERS - if not isinstance(result, Set): + if not isinstance(result, set): logger.warning( "Wrong type returned by module API callback %s: %s, expected set", callback, diff --git a/synapse/events/snapshot.py b/synapse/events/snapshot.py index 63551143d8..764d31ee66 100644 --- a/synapse/events/snapshot.py +++ b/synapse/events/snapshot.py @@ -19,7 +19,7 @@ # # from abc import ABC, abstractmethod -from typing import TYPE_CHECKING, Dict, List, Optional, Tuple +from typing import TYPE_CHECKING, Optional import attr from immutabledict import immutabledict @@ -133,7 +133,7 @@ class EventContext(UnpersistedEventContextBase): """ _storage: "StorageControllers" - state_group_deltas: Dict[Tuple[int, int], StateMap[str]] + state_group_deltas: dict[tuple[int, int], StateMap[str]] rejected: Optional[str] = None _state_group: Optional[int] = None state_group_before_event: Optional[int] = None @@ -149,7 +149,7 @@ class EventContext(UnpersistedEventContextBase): state_group_before_event: Optional[int], state_delta_due_to_event: Optional[StateMap[str]], partial_state: bool, - state_group_deltas: Dict[Tuple[int, int], StateMap[str]], + state_group_deltas: dict[tuple[int, int], StateMap[str]], ) -> "EventContext": return EventContext( storage=storage, @@ -306,7 +306,7 @@ class EventContext(UnpersistedEventContextBase): ) -EventPersistencePair = Tuple[EventBase, EventContext] +EventPersistencePair = tuple[EventBase, EventContext] """ The combination of an event to be persisted and its context. """ @@ -365,11 +365,11 @@ class UnpersistedEventContext(UnpersistedEventContextBase): @classmethod async def batch_persist_unpersisted_contexts( cls, - events_and_context: List[Tuple[EventBase, "UnpersistedEventContextBase"]], + events_and_context: list[tuple[EventBase, "UnpersistedEventContextBase"]], room_id: str, last_known_state_group: int, datastore: "StateGroupDataStore", - ) -> List[EventPersistencePair]: + ) -> list[EventPersistencePair]: """ Takes a list of events and their associated unpersisted contexts and persists the unpersisted contexts, returning a list of events and persisted contexts. @@ -472,7 +472,7 @@ class UnpersistedEventContext(UnpersistedEventContextBase): partial_state=self.partial_state, ) - def _build_state_group_deltas(self) -> Dict[Tuple[int, int], StateMap]: + def _build_state_group_deltas(self) -> dict[tuple[int, int], StateMap]: """ Collect deltas between the state groups associated with this context """ @@ -510,8 +510,8 @@ class UnpersistedEventContext(UnpersistedEventContextBase): def _encode_state_group_delta( - state_group_delta: Dict[Tuple[int, int], StateMap[str]], -) -> List[Tuple[int, int, Optional[List[Tuple[str, str, str]]]]]: + state_group_delta: dict[tuple[int, int], StateMap[str]], +) -> list[tuple[int, int, Optional[list[tuple[str, str, str]]]]]: if not state_group_delta: return [] @@ -523,8 +523,8 @@ def _encode_state_group_delta( def _decode_state_group_delta( - input: List[Tuple[int, int, List[Tuple[str, str, str]]]], -) -> Dict[Tuple[int, int], StateMap[str]]: + input: list[tuple[int, int, list[tuple[str, str, str]]]], +) -> dict[tuple[int, int], StateMap[str]]: if not input: return {} @@ -539,7 +539,7 @@ def _decode_state_group_delta( def _encode_state_dict( state_dict: Optional[StateMap[str]], -) -> Optional[List[Tuple[str, str, str]]]: +) -> Optional[list[tuple[str, str, str]]]: """Since dicts of (type, state_key) -> event_id cannot be serialized in JSON we need to convert them to a form that can. """ @@ -550,7 +550,7 @@ def _encode_state_dict( def _decode_state_dict( - input: Optional[List[Tuple[str, str, str]]], + input: Optional[list[tuple[str, str, str]]], ) -> Optional[StateMap[str]]: """Decodes a state dict encoded using `_encode_state_dict` above""" if input is None: diff --git a/synapse/events/utils.py b/synapse/events/utils.py index 942072cf84..9fa251abd8 100644 --- a/synapse/events/utils.py +++ b/synapse/events/utils.py @@ -27,8 +27,6 @@ from typing import ( Awaitable, Callable, Collection, - Dict, - List, Mapping, Match, MutableMapping, @@ -239,7 +237,7 @@ def prune_event_dict(room_version: RoomVersion, event_dict: JsonDict) -> JsonDic return allowed_fields -def _copy_field(src: JsonDict, dst: JsonDict, field: List[str]) -> None: +def _copy_field(src: JsonDict, dst: JsonDict, field: list[str]) -> None: """Copy the field in 'src' to 'dst'. For example, if src={"foo":{"bar":5}} and dst={}, and field=["foo","bar"] @@ -292,7 +290,7 @@ def _escape_slash(m: Match[str]) -> str: return m.group(0) -def _split_field(field: str) -> List[str]: +def _split_field(field: str) -> list[str]: """ Splits strings on unescaped dots and removes escaping. @@ -333,7 +331,7 @@ def _split_field(field: str) -> List[str]: return result -def only_fields(dictionary: JsonDict, fields: List[str]) -> JsonDict: +def only_fields(dictionary: JsonDict, fields: list[str]) -> JsonDict: """Return a new dict with only the fields in 'dictionary' which are present in 'fields'. @@ -419,7 +417,7 @@ class SerializeEventConfig: # the transaction_id in the unsigned section of the event. requester: Optional[Requester] = None # List of event fields to include. If empty, all fields will be returned. - only_event_fields: Optional[List[str]] = None + only_event_fields: Optional[list[str]] = None # Some events can have stripped room state stored in the `unsigned` field. # This is required for invite and knock functionality. If this option is # False, that state will be removed from the event before it is returned. @@ -573,7 +571,7 @@ class EventClientSerializer: def __init__(self, hs: "HomeServer") -> None: self._store = hs.get_datastores().main self._auth = hs.get_auth() - self._add_extra_fields_to_unsigned_client_event_callbacks: List[ + self._add_extra_fields_to_unsigned_client_event_callbacks: list[ ADD_EXTRA_FIELDS_TO_UNSIGNED_CLIENT_EVENT_CALLBACK ] = [] @@ -583,7 +581,7 @@ class EventClientSerializer: time_now: int, *, config: SerializeEventConfig = _DEFAULT_SERIALIZE_EVENT_CONFIG, - bundle_aggregations: Optional[Dict[str, "BundledAggregations"]] = None, + bundle_aggregations: Optional[dict[str, "BundledAggregations"]] = None, ) -> JsonDict: """Serializes a single event. @@ -641,7 +639,7 @@ class EventClientSerializer: event: EventBase, time_now: int, config: SerializeEventConfig, - bundled_aggregations: Dict[str, "BundledAggregations"], + bundled_aggregations: dict[str, "BundledAggregations"], serialized_event: JsonDict, ) -> None: """Potentially injects bundled aggregations into the unsigned portion of the serialized event. @@ -718,8 +716,8 @@ class EventClientSerializer: time_now: int, *, config: SerializeEventConfig = _DEFAULT_SERIALIZE_EVENT_CONFIG, - bundle_aggregations: Optional[Dict[str, "BundledAggregations"]] = None, - ) -> List[JsonDict]: + bundle_aggregations: Optional[dict[str, "BundledAggregations"]] = None, + ) -> list[JsonDict]: """Serializes multiple events. Args: @@ -763,7 +761,7 @@ PowerLevelsContent = Mapping[str, Union[_PowerLevel, Mapping[str, _PowerLevel]]] def copy_and_fixup_power_levels_contents( old_power_levels: PowerLevelsContent, -) -> Dict[str, Union[int, Dict[str, int]]]: +) -> dict[str, Union[int, dict[str, int]]]: """Copy the content of a power_levels event, unfreezing immutabledicts along the way. We accept as input power level values which are strings, provided they represent an @@ -779,11 +777,11 @@ def copy_and_fixup_power_levels_contents( if not isinstance(old_power_levels, collections.abc.Mapping): raise TypeError("Not a valid power-levels content: %r" % (old_power_levels,)) - power_levels: Dict[str, Union[int, Dict[str, int]]] = {} + power_levels: dict[str, Union[int, dict[str, int]]] = {} for k, v in old_power_levels.items(): if isinstance(v, collections.abc.Mapping): - h: Dict[str, int] = {} + h: dict[str, int] = {} power_levels[k] = h for k1, v1 in v.items(): _copy_power_level_value_as_integer(v1, h, k1) diff --git a/synapse/events/validator.py b/synapse/events/validator.py index 4d9ba15829..6fb52f82c1 100644 --- a/synapse/events/validator.py +++ b/synapse/events/validator.py @@ -19,7 +19,7 @@ # # import collections.abc -from typing import List, Type, Union, cast +from typing import Union, cast import jsonschema @@ -283,13 +283,13 @@ POWER_LEVELS_SCHEMA = { class Mentions(RequestBodyModel): - user_ids: List[StrictStr] = Field(default_factory=list) + user_ids: list[StrictStr] = Field(default_factory=list) room: StrictBool = False # This could return something newer than Draft 7, but that's the current "latest" # validator. -def _create_validator(schema: JsonDict) -> Type[jsonschema.Draft7Validator]: +def _create_validator(schema: JsonDict) -> type[jsonschema.Draft7Validator]: validator = jsonschema.validators.validator_for(schema) # by default jsonschema does not consider a immutabledict to be an object so diff --git a/synapse/federation/federation_base.py b/synapse/federation/federation_base.py index a1c9c286ac..13e445456a 100644 --- a/synapse/federation/federation_base.py +++ b/synapse/federation/federation_base.py @@ -20,7 +20,7 @@ # # import logging -from typing import TYPE_CHECKING, Awaitable, Callable, List, Optional, Sequence +from typing import TYPE_CHECKING, Awaitable, Callable, Optional, Sequence from synapse.api.constants import MAX_DEPTH, EventContentFields, EventTypes, Membership from synapse.api.errors import Codes, SynapseError @@ -305,7 +305,7 @@ def _is_invite_via_3pid(event: EventBase) -> bool: def parse_events_from_pdu_json( pdus_json: Sequence[JsonDict], room_version: RoomVersion -) -> List[EventBase]: +) -> list[EventBase]: return [ event_from_pdu_json(pdu_json, room_version) for pdu_json in filter_pdus_for_valid_depth(pdus_json) diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index 8c91336dbc..cb2fa59f54 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -32,13 +32,10 @@ from typing import ( Callable, Collection, Container, - Dict, Iterable, - List, Mapping, Optional, Sequence, - Tuple, TypeVar, Union, ) @@ -120,8 +117,8 @@ class SendJoinResult: event: EventBase # A string giving the server the event was sent to. origin: str - state: List[EventBase] - auth_chain: List[EventBase] + state: list[EventBase] + auth_chain: list[EventBase] # True if 'state' elides non-critical membership events partial_state: bool @@ -135,7 +132,7 @@ class FederationClient(FederationBase): def __init__(self, hs: "HomeServer"): super().__init__(hs) - self.pdu_destination_tried: Dict[str, Dict[str, int]] = {} + self.pdu_destination_tried: dict[str, dict[str, int]] = {} self._clock.looping_call(self._clear_tried_cache, 60 * 1000) self.state = hs.get_state_handler() self.transport_layer = hs.get_federation_transport_client() @@ -145,7 +142,7 @@ class FederationClient(FederationBase): # Cache mapping `event_id` to a tuple of the event itself and the `pull_origin` # (which server we pulled the event from) - self._get_pdu_cache: ExpiringCache[str, Tuple[EventBase, str]] = ExpiringCache( + self._get_pdu_cache: ExpiringCache[str, tuple[EventBase, str]] = ExpiringCache( cache_name="get_pdu_cache", server_name=self.server_name, hs=self.hs, @@ -163,8 +160,8 @@ class FederationClient(FederationBase): # It is a map of (room ID, suggested-only) -> the response of # get_room_hierarchy. self._get_room_hierarchy_cache: ExpiringCache[ - Tuple[str, bool], - Tuple[JsonDict, Sequence[JsonDict], Sequence[JsonDict], Sequence[str]], + tuple[str, bool], + tuple[JsonDict, Sequence[JsonDict], Sequence[JsonDict], Sequence[str]], ] = ExpiringCache( cache_name="get_room_hierarchy_cache", server_name=self.server_name, @@ -265,7 +262,7 @@ class FederationClient(FederationBase): self, user: UserID, destination: str, - query: Dict[str, Dict[str, Dict[str, int]]], + query: dict[str, dict[str, dict[str, int]]], timeout: Optional[int], ) -> JsonDict: """Claims one-time keys for a device hosted on a remote server. @@ -285,8 +282,8 @@ class FederationClient(FederationBase): # Convert the query with counts into a stable and unstable query and check # if attempting to claim more than 1 OTK. - content: Dict[str, Dict[str, str]] = {} - unstable_content: Dict[str, Dict[str, List[str]]] = {} + content: dict[str, dict[str, str]] = {} + unstable_content: dict[str, dict[str, list[str]]] = {} use_unstable = False for user_id, one_time_keys in query.items(): for device_id, algorithms in one_time_keys.items(): @@ -337,7 +334,7 @@ class FederationClient(FederationBase): @tag_args async def backfill( self, dest: str, room_id: str, limit: int, extremities: Collection[str] - ) -> Optional[List[EventBase]]: + ) -> Optional[list[EventBase]]: """Requests some more historic PDUs for the given room from the given destination server. @@ -662,7 +659,7 @@ class FederationClient(FederationBase): @tag_args async def get_room_state_ids( self, destination: str, room_id: str, event_id: str - ) -> Tuple[List[str], List[str]]: + ) -> tuple[list[str], list[str]]: """Calls the /state_ids endpoint to fetch the state at a particular point in the room, and the auth events for the given event @@ -711,7 +708,7 @@ class FederationClient(FederationBase): room_id: str, event_id: str, room_version: RoomVersion, - ) -> Tuple[List[EventBase], List[EventBase]]: + ) -> tuple[list[EventBase], list[EventBase]]: """Calls the /state endpoint to fetch the state at a particular point in the room. @@ -772,7 +769,7 @@ class FederationClient(FederationBase): origin: str, pdus: Collection[EventBase], room_version: RoomVersion, - ) -> List[EventBase]: + ) -> list[EventBase]: """ Checks the signatures and hashes of a list of pulled events we got from federation and records any signature failures as failed pull attempts. @@ -806,7 +803,7 @@ class FederationClient(FederationBase): # We limit how many PDUs we check at once, as if we try to do hundreds # of thousands of PDUs at once we see large memory spikes. - valid_pdus: List[EventBase] = [] + valid_pdus: list[EventBase] = [] async def _record_failure_callback(event: EventBase, cause: str) -> None: await self.store.record_event_failed_pull_attempt( @@ -916,7 +913,7 @@ class FederationClient(FederationBase): async def get_event_auth( self, destination: str, room_id: str, event_id: str - ) -> List[EventBase]: + ) -> list[EventBase]: res = await self.transport_layer.get_event_auth(destination, room_id, event_id) room_version = await self.store.get_room_version(room_id) @@ -1050,7 +1047,7 @@ class FederationClient(FederationBase): membership: str, content: dict, params: Optional[Mapping[str, Union[str, Iterable[str]]]], - ) -> Tuple[str, EventBase, RoomVersion]: + ) -> tuple[str, EventBase, RoomVersion]: """ Creates an m.room.member event, with context, without participating in the room. @@ -1092,7 +1089,7 @@ class FederationClient(FederationBase): % (membership, ",".join(valid_memberships)) ) - async def send_request(destination: str) -> Tuple[str, EventBase, RoomVersion]: + async def send_request(destination: str) -> tuple[str, EventBase, RoomVersion]: ret = await self.transport_layer.make_membership_event( destination, room_id, user_id, membership, params ) @@ -1237,7 +1234,7 @@ class FederationClient(FederationBase): # We now go and check the signatures and hashes for the event. Note # that we limit how many events we process at a time to keep the # memory overhead from exploding. - valid_pdus_map: Dict[str, EventBase] = {} + valid_pdus_map: dict[str, EventBase] = {} async def _execute(pdu: EventBase) -> None: valid_pdu = await self._check_sigs_and_hash_and_fetch_one( @@ -1507,7 +1504,7 @@ class FederationClient(FederationBase): # content. return resp[1] - async def send_knock(self, destinations: List[str], pdu: EventBase) -> JsonDict: + async def send_knock(self, destinations: list[str], pdu: EventBase) -> JsonDict: """Attempts to send a knock event to a given list of servers. Iterates through the list until one attempt succeeds. @@ -1568,7 +1565,7 @@ class FederationClient(FederationBase): remote_server: str, limit: Optional[int] = None, since_token: Optional[str] = None, - search_filter: Optional[Dict] = None, + search_filter: Optional[dict] = None, include_all_networks: bool = False, third_party_instance_id: Optional[str] = None, ) -> JsonDict: @@ -1612,7 +1609,7 @@ class FederationClient(FederationBase): limit: int, min_depth: int, timeout: int, - ) -> List[EventBase]: + ) -> list[EventBase]: """Tries to fetch events we are missing. This is called when we receive an event without having received all of its ancestors. @@ -1718,7 +1715,7 @@ class FederationClient(FederationBase): destinations: Iterable[str], room_id: str, suggested_only: bool, - ) -> Tuple[JsonDict, Sequence[JsonDict], Sequence[JsonDict], Sequence[str]]: + ) -> tuple[JsonDict, Sequence[JsonDict], Sequence[JsonDict], Sequence[str]]: """ Call other servers to get a hierarchy of the given room. @@ -1749,7 +1746,7 @@ class FederationClient(FederationBase): async def send_request( destination: str, - ) -> Tuple[JsonDict, Sequence[JsonDict], Sequence[JsonDict], Sequence[str]]: + ) -> tuple[JsonDict, Sequence[JsonDict], Sequence[JsonDict], Sequence[str]]: try: res = await self.transport_layer.get_room_hierarchy( destination=destination, @@ -1924,8 +1921,8 @@ class FederationClient(FederationBase): raise InvalidResponseError(str(e)) async def get_account_status( - self, destination: str, user_ids: List[str] - ) -> Tuple[JsonDict, List[str]]: + self, destination: str, user_ids: list[str] + ) -> tuple[JsonDict, list[str]]: """Retrieves account statuses for a given list of users on a given remote homeserver. @@ -1991,8 +1988,8 @@ class FederationClient(FederationBase): download_ratelimiter: Ratelimiter, ip_address: str, ) -> Union[ - Tuple[int, Dict[bytes, List[bytes]], bytes], - Tuple[int, Dict[bytes, List[bytes]]], + tuple[int, dict[bytes, list[bytes]], bytes], + tuple[int, dict[bytes, list[bytes]]], ]: try: return await self.transport_layer.federation_download_media( @@ -2036,7 +2033,7 @@ class FederationClient(FederationBase): max_timeout_ms: int, download_ratelimiter: Ratelimiter, ip_address: str, - ) -> Tuple[int, Dict[bytes, List[bytes]]]: + ) -> tuple[int, dict[bytes, list[bytes]]]: try: return await self.transport_layer.download_media_v3( destination, diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py index e7da8fda0d..6e14f4a049 100644 --- a/synapse/federation/federation_server.py +++ b/synapse/federation/federation_server.py @@ -27,11 +27,8 @@ from typing import ( Awaitable, Callable, Collection, - Dict, - List, Mapping, Optional, - Tuple, Union, ) @@ -163,10 +160,10 @@ class FederationServer(FederationBase): # origins that we are currently processing a transaction from. # a dict from origin to txn id. - self._active_transactions: Dict[str, str] = {} + self._active_transactions: dict[str, str] = {} # We cache results for transaction with the same ID - self._transaction_resp_cache: ResponseCache[Tuple[str, str]] = ResponseCache( + self._transaction_resp_cache: ResponseCache[tuple[str, str]] = ResponseCache( clock=hs.get_clock(), name="fed_txn_handler", server_name=self.server_name, @@ -179,7 +176,7 @@ class FederationServer(FederationBase): # We cache responses to state queries, as they take a while and often # come in waves. - self._state_resp_cache: ResponseCache[Tuple[str, Optional[str]]] = ( + self._state_resp_cache: ResponseCache[tuple[str, Optional[str]]] = ( ResponseCache( clock=hs.get_clock(), name="state_resp", @@ -187,7 +184,7 @@ class FederationServer(FederationBase): timeout_ms=30000, ) ) - self._state_ids_resp_cache: ResponseCache[Tuple[str, str]] = ResponseCache( + self._state_ids_resp_cache: ResponseCache[tuple[str, str]] = ResponseCache( clock=hs.get_clock(), name="state_ids_resp", server_name=self.server_name, @@ -236,8 +233,8 @@ class FederationServer(FederationBase): await self._clock.sleep(random.uniform(0, 0.1)) async def on_backfill_request( - self, origin: str, room_id: str, versions: List[str], limit: int - ) -> Tuple[int, Dict[str, Any]]: + self, origin: str, room_id: str, versions: list[str], limit: int + ) -> tuple[int, dict[str, Any]]: async with self._server_linearizer.queue((origin, room_id)): origin_host, _ = parse_server_name(origin) await self.check_server_matches_acl(origin_host, room_id) @@ -252,7 +249,7 @@ class FederationServer(FederationBase): async def on_timestamp_to_event_request( self, origin: str, room_id: str, timestamp: int, direction: Direction - ) -> Tuple[int, Dict[str, Any]]: + ) -> tuple[int, dict[str, Any]]: """When we receive a federated `/timestamp_to_event` request, handle all of the logic for validating and fetching the event. @@ -298,7 +295,7 @@ class FederationServer(FederationBase): transaction_id: str, destination: str, transaction_data: JsonDict, - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: # If we receive a transaction we should make sure that kick off handling # any old events in the staging area. if not self._started_handling_of_staged_events: @@ -365,7 +362,7 @@ class FederationServer(FederationBase): async def _on_incoming_transaction_inner( self, origin: str, transaction: Transaction, request_time: int - ) -> Tuple[int, Dict[str, Any]]: + ) -> tuple[int, dict[str, Any]]: # CRITICAL SECTION: the first thing we must do (before awaiting) is # add an entry to _active_transactions. assert origin not in self._active_transactions @@ -381,7 +378,7 @@ class FederationServer(FederationBase): async def _handle_incoming_transaction( self, origin: str, transaction: Transaction, request_time: int - ) -> Tuple[int, Dict[str, Any]]: + ) -> tuple[int, dict[str, Any]]: """Process an incoming transaction and return the HTTP response Args: @@ -429,7 +426,7 @@ class FederationServer(FederationBase): async def _handle_pdus_in_txn( self, origin: str, transaction: Transaction, request_time: int - ) -> Dict[str, dict]: + ) -> dict[str, dict]: """Process the PDUs in a received transaction. Args: @@ -448,7 +445,7 @@ class FederationServer(FederationBase): origin_host, _ = parse_server_name(origin) - pdus_by_room: Dict[str, List[EventBase]] = {} + pdus_by_room: dict[str, list[EventBase]] = {} newest_pdu_ts = 0 @@ -601,7 +598,7 @@ class FederationServer(FederationBase): async def on_room_state_request( self, origin: str, room_id: str, event_id: str - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: await self._event_auth_handler.assert_host_in_room(room_id, origin) origin_host, _ = parse_server_name(origin) await self.check_server_matches_acl(origin_host, room_id) @@ -625,7 +622,7 @@ class FederationServer(FederationBase): @tag_args async def on_state_ids_request( self, origin: str, room_id: str, event_id: str - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: if not event_id: raise NotImplementedError("Specify an event") @@ -653,7 +650,7 @@ class FederationServer(FederationBase): async def _on_context_state_request_compute( self, room_id: str, event_id: str - ) -> Dict[str, list]: + ) -> dict[str, list]: pdus: Collection[EventBase] event_ids = await self.handler.get_state_ids_for_pdu(room_id, event_id) pdus = await self.store.get_events_as_list(event_ids) @@ -669,7 +666,7 @@ class FederationServer(FederationBase): async def on_pdu_request( self, origin: str, event_id: str - ) -> Tuple[int, Union[JsonDict, str]]: + ) -> tuple[int, Union[JsonDict, str]]: pdu = await self.handler.get_persisted_pdu(origin, event_id) if pdu: @@ -678,8 +675,8 @@ class FederationServer(FederationBase): return 404, "" async def on_query_request( - self, query_type: str, args: Dict[str, str] - ) -> Tuple[int, Dict[str, Any]]: + self, query_type: str, args: dict[str, str] + ) -> tuple[int, dict[str, Any]]: received_queries_counter.labels( type=query_type, **{SERVER_NAME_LABEL: self.server_name}, @@ -688,8 +685,8 @@ class FederationServer(FederationBase): return 200, resp async def on_make_join_request( - self, origin: str, room_id: str, user_id: str, supported_versions: List[str] - ) -> Dict[str, Any]: + self, origin: str, room_id: str, user_id: str, supported_versions: list[str] + ) -> dict[str, Any]: origin_host, _ = parse_server_name(origin) await self.check_server_matches_acl(origin_host, room_id) @@ -714,7 +711,7 @@ class FederationServer(FederationBase): async def on_invite_request( self, origin: str, content: JsonDict, room_version_id: str - ) -> Dict[str, Any]: + ) -> dict[str, Any]: room_version = KNOWN_ROOM_VERSIONS.get(room_version_id) if not room_version: raise SynapseError( @@ -748,7 +745,7 @@ class FederationServer(FederationBase): content: JsonDict, room_id: str, caller_supports_partial_state: bool = False, - ) -> Dict[str, Any]: + ) -> dict[str, Any]: set_tag( SynapseTags.SEND_JOIN_RESPONSE_IS_PARTIAL_STATE, caller_supports_partial_state, @@ -809,7 +806,7 @@ class FederationServer(FederationBase): async def on_make_leave_request( self, origin: str, room_id: str, user_id: str - ) -> Dict[str, Any]: + ) -> dict[str, Any]: origin_host, _ = parse_server_name(origin) await self.check_server_matches_acl(origin_host, room_id) pdu = await self.handler.on_make_leave_request(origin, room_id, user_id) @@ -826,7 +823,7 @@ class FederationServer(FederationBase): return {} async def on_make_knock_request( - self, origin: str, room_id: str, user_id: str, supported_versions: List[str] + self, origin: str, room_id: str, user_id: str, supported_versions: list[str] ) -> JsonDict: """We've received a /make_knock/ request, so we create a partial knock event for the room and hand that back, along with the room version, to the knocking @@ -884,7 +881,7 @@ class FederationServer(FederationBase): origin: str, content: JsonDict, room_id: str, - ) -> Dict[str, List[JsonDict]]: + ) -> dict[str, list[JsonDict]]: """ We have received a knock event for a room. Verify and send the event into the room on the knocking homeserver's behalf. Then reply with some stripped state from the @@ -1034,7 +1031,7 @@ class FederationServer(FederationBase): async def on_event_auth( self, origin: str, room_id: str, event_id: str - ) -> Tuple[int, Dict[str, Any]]: + ) -> tuple[int, dict[str, Any]]: async with self._server_linearizer.queue((origin, room_id)): await self._event_auth_handler.assert_host_in_room(room_id, origin) origin_host, _ = parse_server_name(origin) @@ -1046,20 +1043,20 @@ class FederationServer(FederationBase): return 200, res async def on_query_client_keys( - self, origin: str, content: Dict[str, str] - ) -> Tuple[int, Dict[str, Any]]: + self, origin: str, content: dict[str, str] + ) -> tuple[int, dict[str, Any]]: return await self.on_query_request("client_keys", content) async def on_query_user_devices( self, origin: str, user_id: str - ) -> Tuple[int, Dict[str, Any]]: + ) -> tuple[int, dict[str, Any]]: keys = await self.device_handler.on_federation_query_user_devices(user_id) return 200, keys @trace async def on_claim_client_keys( - self, query: List[Tuple[str, str, str, int]], always_include_fallback_keys: bool - ) -> Dict[str, Any]: + self, query: list[tuple[str, str, str, int]], always_include_fallback_keys: bool + ) -> dict[str, Any]: if any( not self.hs.is_mine(UserID.from_string(user_id)) for user_id, _, _, _ in query @@ -1071,7 +1068,7 @@ class FederationServer(FederationBase): query, always_include_fallback_keys=always_include_fallback_keys ) - json_result: Dict[str, Dict[str, Dict[str, JsonDict]]] = {} + json_result: dict[str, dict[str, dict[str, JsonDict]]] = {} for result in results: for user_id, device_keys in result.items(): for device_id, keys in device_keys.items(): @@ -1098,10 +1095,10 @@ class FederationServer(FederationBase): self, origin: str, room_id: str, - earliest_events: List[str], - latest_events: List[str], + earliest_events: list[str], + latest_events: list[str], limit: int, - ) -> Dict[str, list]: + ) -> dict[str, list]: async with self._server_linearizer.queue((origin, room_id)): origin_host, _ = parse_server_name(origin) await self.check_server_matches_acl(origin_host, room_id) @@ -1133,7 +1130,7 @@ class FederationServer(FederationBase): ts_now_ms = self._clock.time_msec() return await self.store.get_user_id_for_open_id_token(token, ts_now_ms) - def _transaction_dict_from_pdus(self, pdu_list: List[EventBase]) -> JsonDict: + def _transaction_dict_from_pdus(self, pdu_list: list[EventBase]) -> JsonDict: """Returns a new Transaction containing the given PDUs suitable for transmission. """ @@ -1208,7 +1205,7 @@ class FederationServer(FederationBase): async def _get_next_nonspam_staged_event_for_room( self, room_id: str, room_version: RoomVersion - ) -> Optional[Tuple[str, EventBase]]: + ) -> Optional[tuple[str, EventBase]]: """Fetch the first non-spam event from staging queue. Args: @@ -1363,13 +1360,13 @@ class FederationServer(FederationBase): lock = new_lock async def exchange_third_party_invite( - self, sender_user_id: str, target_user_id: str, room_id: str, signed: Dict + self, sender_user_id: str, target_user_id: str, room_id: str, signed: dict ) -> None: await self.handler.exchange_third_party_invite( sender_user_id, target_user_id, room_id, signed ) - async def on_exchange_third_party_invite_request(self, event_dict: Dict) -> None: + async def on_exchange_third_party_invite_request(self, event_dict: dict) -> None: await self.handler.on_exchange_third_party_invite_request(event_dict) async def check_server_matches_acl(self, server_name: str, room_id: str) -> None: @@ -1407,13 +1404,13 @@ class FederationHandlerRegistry: # the case. self._send_edu = ReplicationFederationSendEduRestServlet.make_client(hs) - self.edu_handlers: Dict[str, Callable[[str, dict], Awaitable[None]]] = {} - self.query_handlers: Dict[str, Callable[[dict], Awaitable[JsonDict]]] = {} + self.edu_handlers: dict[str, Callable[[str, dict], Awaitable[None]]] = {} + self.query_handlers: dict[str, Callable[[dict], Awaitable[JsonDict]]] = {} # Map from type to instance names that we should route EDU handling to. # We randomly choose one instance from the list to route to for each new # EDU received. - self._edu_type_to_instance: Dict[str, List[str]] = {} + self._edu_type_to_instance: dict[str, list[str]] = {} def register_edu_handler( self, edu_type: str, handler: Callable[[str, JsonDict], Awaitable[None]] @@ -1455,7 +1452,7 @@ class FederationHandlerRegistry: self.query_handlers[query_type] = handler def register_instances_for_edu( - self, edu_type: str, instance_names: List[str] + self, edu_type: str, instance_names: list[str] ) -> None: """Register that the EDU handler is on multiple instances.""" self._edu_type_to_instance[edu_type] = instance_names diff --git a/synapse/federation/persistence.py b/synapse/federation/persistence.py index 8340b48503..5628130429 100644 --- a/synapse/federation/persistence.py +++ b/synapse/federation/persistence.py @@ -27,7 +27,7 @@ These actions are mostly only used by the :py:mod:`.replication` module. """ import logging -from typing import Optional, Tuple +from typing import Optional from synapse.federation.units import Transaction from synapse.storage.databases.main import DataStore @@ -44,7 +44,7 @@ class TransactionActions: async def have_responded( self, origin: str, transaction: Transaction - ) -> Optional[Tuple[int, JsonDict]]: + ) -> Optional[tuple[int, JsonDict]]: """Have we already responded to a transaction with the same id and origin? diff --git a/synapse/federation/send_queue.py b/synapse/federation/send_queue.py index 759df9836b..80f31798e8 100644 --- a/synapse/federation/send_queue.py +++ b/synapse/federation/send_queue.py @@ -40,14 +40,10 @@ import logging from enum import Enum from typing import ( TYPE_CHECKING, - Dict, Hashable, Iterable, - List, Optional, Sized, - Tuple, - Type, ) import attr @@ -77,7 +73,7 @@ class QueueNames(str, Enum): PRESENCE_DESTINATIONS = "presence_destinations" -queue_name_to_gauge_map: Dict[QueueNames, LaterGauge] = {} +queue_name_to_gauge_map: dict[QueueNames, LaterGauge] = {} for queue_name in QueueNames: queue_name_to_gauge_map[queue_name] = LaterGauge( @@ -100,23 +96,23 @@ class FederationRemoteSendQueue(AbstractFederationSender): # We may have multiple federation sender instances, so we need to track # their positions separately. self._sender_instances = hs.config.worker.federation_shard_config.instances - self._sender_positions: Dict[str, int] = {} + self._sender_positions: dict[str, int] = {} # Pending presence map user_id -> UserPresenceState - self.presence_map: Dict[str, UserPresenceState] = {} + self.presence_map: dict[str, UserPresenceState] = {} # Stores the destinations we need to explicitly send presence to about a # given user. # Stream position -> (user_id, destinations) - self.presence_destinations: SortedDict[int, Tuple[str, Iterable[str]]] = ( + self.presence_destinations: SortedDict[int, tuple[str, Iterable[str]]] = ( SortedDict() ) # (destination, key) -> EDU - self.keyed_edu: Dict[Tuple[str, tuple], Edu] = {} + self.keyed_edu: dict[tuple[str, tuple], Edu] = {} # stream position -> (destination, key) - self.keyed_edu_changed: SortedDict[int, Tuple[str, tuple]] = SortedDict() + self.keyed_edu_changed: SortedDict[int, tuple[str, tuple]] = SortedDict() self.edus: SortedDict[int, Edu] = SortedDict() @@ -295,7 +291,7 @@ class FederationRemoteSendQueue(AbstractFederationSender): async def get_replication_rows( self, instance_name: str, from_token: int, to_token: int, target_row_count: int - ) -> Tuple[List[Tuple[int, Tuple]], int, bool]: + ) -> tuple[list[tuple[int, tuple]], int, bool]: """Get rows to be sent over federation between the two tokens Args: @@ -318,7 +314,7 @@ class FederationRemoteSendQueue(AbstractFederationSender): # list of tuple(int, BaseFederationRow), where the first is the position # of the federation stream. - rows: List[Tuple[int, BaseFederationRow]] = [] + rows: list[tuple[int, BaseFederationRow]] = [] # Fetch presence to send to destinations i = self.presence_destinations.bisect_right(from_token) @@ -413,7 +409,7 @@ class BaseFederationRow: @attr.s(slots=True, frozen=True, auto_attribs=True) class PresenceDestinationsRow(BaseFederationRow): state: UserPresenceState - destinations: List[str] + destinations: list[str] TypeId = "pd" @@ -436,7 +432,7 @@ class KeyedEduRow(BaseFederationRow): typing EDUs clobber based on room_id. """ - key: Tuple[str, ...] # the edu key passed to send_edu + key: tuple[str, ...] # the edu key passed to send_edu edu: Edu TypeId = "k" @@ -471,7 +467,7 @@ class EduRow(BaseFederationRow): buff.edus.setdefault(self.edu.destination, []).append(self.edu) -_rowtypes: Tuple[Type[BaseFederationRow], ...] = ( +_rowtypes: tuple[type[BaseFederationRow], ...] = ( PresenceDestinationsRow, KeyedEduRow, EduRow, @@ -483,16 +479,16 @@ TypeToRow = {Row.TypeId: Row for Row in _rowtypes} @attr.s(slots=True, frozen=True, auto_attribs=True) class ParsedFederationStreamData: # list of tuples of UserPresenceState and destinations - presence_destinations: List[Tuple[UserPresenceState, List[str]]] + presence_destinations: list[tuple[UserPresenceState, list[str]]] # dict of destination -> { key -> Edu } - keyed_edus: Dict[str, Dict[Tuple[str, ...], Edu]] + keyed_edus: dict[str, dict[tuple[str, ...], Edu]] # dict of destination -> [Edu] - edus: Dict[str, List[Edu]] + edus: dict[str, list[Edu]] async def process_rows_for_federation( transaction_queue: FederationSender, - rows: List[FederationStream.FederationStreamRow], + rows: list[FederationStream.FederationStreamRow], ) -> None: """Parse a list of rows from the federation stream and put them in the transaction queue ready for sending to the relevant homeservers. diff --git a/synapse/federation/sender/__init__.py b/synapse/federation/sender/__init__.py index 4410ffc5c5..229ae647c0 100644 --- a/synapse/federation/sender/__init__.py +++ b/synapse/federation/sender/__init__.py @@ -135,13 +135,10 @@ from collections import OrderedDict from typing import ( TYPE_CHECKING, Collection, - Dict, Hashable, Iterable, - List, Literal, Optional, - Tuple, ) import attr @@ -312,7 +309,7 @@ class AbstractFederationSender(metaclass=abc.ABCMeta): @abc.abstractmethod async def get_replication_rows( self, instance_name: str, from_token: int, to_token: int, target_row_count: int - ) -> Tuple[List[Tuple[int, Tuple]], int, bool]: + ) -> tuple[list[tuple[int, tuple]], int, bool]: raise NotImplementedError() @@ -420,7 +417,7 @@ class FederationSender(AbstractFederationSender): self._federation_shard_config = hs.config.worker.federation_shard_config # map from destination to PerDestinationQueue - self._per_destination_queues: Dict[str, PerDestinationQueue] = {} + self._per_destination_queues: dict[str, PerDestinationQueue] = {} transaction_queue_pending_destinations_gauge.register_hook( homeserver_instance_id=hs.get_instance_id(), @@ -724,7 +721,7 @@ class FederationSender(AbstractFederationSender): **{SERVER_NAME_LABEL: self.server_name}, ).observe((now - ts) / 1000) - async def handle_room_events(events: List[EventBase]) -> None: + async def handle_room_events(events: list[EventBase]) -> None: logger.debug( "Handling %i events in room %s", len(events), events[0].room_id ) @@ -736,7 +733,7 @@ class FederationSender(AbstractFederationSender): for event in events: await handle_event(event) - events_by_room: Dict[str, List[EventBase]] = {} + events_by_room: dict[str, list[EventBase]] = {} for event_id in event_ids: # `event_entries` is unsorted, so we have to iterate over `event_ids` @@ -1124,7 +1121,7 @@ class FederationSender(AbstractFederationSender): @staticmethod async def get_replication_rows( instance_name: str, from_token: int, to_token: int, target_row_count: int - ) -> Tuple[List[Tuple[int, Tuple]], int, bool]: + ) -> tuple[list[tuple[int, tuple]], int, bool]: # Dummy implementation for case where federation sender isn't offloaded # to a worker. return [], 0, False diff --git a/synapse/federation/sender/per_destination_queue.py b/synapse/federation/sender/per_destination_queue.py index 845af92fac..ecf4789d76 100644 --- a/synapse/federation/sender/per_destination_queue.py +++ b/synapse/federation/sender/per_destination_queue.py @@ -23,7 +23,7 @@ import datetime import logging from collections import OrderedDict from types import TracebackType -from typing import TYPE_CHECKING, Dict, Hashable, Iterable, List, Optional, Tuple, Type +from typing import TYPE_CHECKING, Hashable, Iterable, Optional import attr from prometheus_client import Counter @@ -145,16 +145,16 @@ class PerDestinationQueue: self._last_successful_stream_ordering: Optional[int] = None # a queue of pending PDUs - self._pending_pdus: List[EventBase] = [] + self._pending_pdus: list[EventBase] = [] # XXX this is never actually used: see # https://github.com/matrix-org/synapse/issues/7549 - self._pending_edus: List[Edu] = [] + self._pending_edus: list[Edu] = [] # Pending EDUs by their "key". Keyed EDUs are EDUs that get clobbered # based on their key (e.g. typing events by room_id) # Map of (edu_type, key) -> Edu - self._pending_edus_keyed: Dict[Tuple[str, Hashable], Edu] = {} + self._pending_edus_keyed: dict[tuple[str, Hashable], Edu] = {} # Map of user_id -> UserPresenceState of pending presence to be sent to this # destination @@ -164,7 +164,7 @@ class PerDestinationQueue: # # Each receipt can only have a single receipt per # (room ID, receipt type, user ID, thread ID) tuple. - self._pending_receipt_edus: List[Dict[str, Dict[str, Dict[str, dict]]]] = [] + self._pending_receipt_edus: list[dict[str, dict[str, dict[str, dict]]]] = [] # stream_id of last successfully sent to-device message. # NB: may be a long or an int. @@ -340,7 +340,7 @@ class PerDestinationQueue: ) async def _transaction_transmission_loop(self) -> None: - pending_pdus: List[EventBase] = [] + pending_pdus: list[EventBase] = [] try: self.transmission_loop_running = True # This will throw if we wouldn't retry. We do this here so we fail @@ -665,12 +665,12 @@ class PerDestinationQueue: if not self._pending_receipt_edus: self._rrs_pending_flush = False - def _pop_pending_edus(self, limit: int) -> List[Edu]: + def _pop_pending_edus(self, limit: int) -> list[Edu]: pending_edus = self._pending_edus pending_edus, self._pending_edus = pending_edus[:limit], pending_edus[limit:] return pending_edus - async def _get_device_update_edus(self, limit: int) -> Tuple[List[Edu], int]: + async def _get_device_update_edus(self, limit: int) -> tuple[list[Edu], int]: last_device_list = self._last_device_list_stream_id # Retrieve list of new device updates to send to the destination @@ -691,7 +691,7 @@ class PerDestinationQueue: return edus, now_stream_id - async def _get_to_device_message_edus(self, limit: int) -> Tuple[List[Edu], int]: + async def _get_to_device_message_edus(self, limit: int) -> tuple[list[Edu], int]: last_device_stream_id = self._last_device_stream_id to_device_stream_id = self._store.get_to_device_stream_token() contents, stream_id = await self._store.get_new_device_msgs_for_remote( @@ -745,9 +745,9 @@ class _TransactionQueueManager: _device_stream_id: Optional[int] = None _device_list_id: Optional[int] = None _last_stream_ordering: Optional[int] = None - _pdus: List[EventBase] = attr.Factory(list) + _pdus: list[EventBase] = attr.Factory(list) - async def __aenter__(self) -> Tuple[List[EventBase], List[Edu]]: + async def __aenter__(self) -> tuple[list[EventBase], list[Edu]]: # First we calculate the EDUs we want to send, if any. # There's a maximum number of EDUs that can be sent with a transaction, @@ -767,7 +767,7 @@ class _TransactionQueueManager: if self.queue._pending_presence: # Only send max 50 presence entries in the EDU, to bound the amount # of data we're sending. - presence_to_add: List[JsonDict] = [] + presence_to_add: list[JsonDict] = [] while ( self.queue._pending_presence and len(presence_to_add) < MAX_PRESENCE_STATES_PER_EDU @@ -845,7 +845,7 @@ class _TransactionQueueManager: async def __aexit__( self, - exc_type: Optional[Type[BaseException]], + exc_type: Optional[type[BaseException]], exc: Optional[BaseException], tb: Optional[TracebackType], ) -> None: diff --git a/synapse/federation/sender/transaction_manager.py b/synapse/federation/sender/transaction_manager.py index f47c011487..99aa05ebd6 100644 --- a/synapse/federation/sender/transaction_manager.py +++ b/synapse/federation/sender/transaction_manager.py @@ -18,7 +18,7 @@ # # import logging -from typing import TYPE_CHECKING, List +from typing import TYPE_CHECKING from prometheus_client import Gauge @@ -82,8 +82,8 @@ class TransactionManager: async def send_new_transaction( self, destination: str, - pdus: List[EventBase], - edus: List[Edu], + pdus: list[EventBase], + edus: list[Edu], ) -> None: """ Args: diff --git a/synapse/federation/transport/client.py b/synapse/federation/transport/client.py index 02e56e8e27..ee15b4804e 100644 --- a/synapse/federation/transport/client.py +++ b/synapse/federation/transport/client.py @@ -28,13 +28,10 @@ from typing import ( BinaryIO, Callable, Collection, - Dict, Generator, Iterable, - List, Mapping, Optional, - Tuple, Union, ) @@ -238,7 +235,7 @@ class TransportLayerClient: async def timestamp_to_event( self, destination: str, room_id: str, timestamp: int, direction: Direction - ) -> Union[JsonDict, List]: + ) -> Union[JsonDict, list]: """ Calls a remote federating server at `destination` asking for their closest event to the given timestamp in the given direction. @@ -428,7 +425,7 @@ class TransportLayerClient: omit_members: bool, ) -> "SendJoinResponse": path = _create_v2_path("/send_join/%s/%s", room_id, event_id) - query_params: Dict[str, str] = {} + query_params: dict[str, str] = {} # lazy-load state on join query_params["omit_members"] = "true" if omit_members else "false" @@ -442,7 +439,7 @@ class TransportLayerClient: async def send_leave_v1( self, destination: str, room_id: str, event_id: str, content: JsonDict - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: path = _create_v1_path("/send_leave/%s/%s", room_id, event_id) return await self.client.put_json( @@ -508,7 +505,7 @@ class TransportLayerClient: async def send_invite_v1( self, destination: str, room_id: str, event_id: str, content: JsonDict - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: path = _create_v1_path("/invite/%s/%s", room_id, event_id) return await self.client.put_json( @@ -533,7 +530,7 @@ class TransportLayerClient: remote_server: str, limit: Optional[int] = None, since_token: Optional[str] = None, - search_filter: Optional[Dict] = None, + search_filter: Optional[dict] = None, include_all_networks: bool = False, third_party_instance_id: Optional[str] = None, ) -> JsonDict: @@ -546,7 +543,7 @@ class TransportLayerClient: if search_filter: # this uses MSC2197 (Search Filtering over Federation) - data: Dict[str, Any] = {"include_all_networks": include_all_networks} + data: dict[str, Any] = {"include_all_networks": include_all_networks} if third_party_instance_id: data["third_party_instance_id"] = third_party_instance_id if limit: @@ -570,7 +567,7 @@ class TransportLayerClient: ) raise else: - args: Dict[str, Union[str, Iterable[str]]] = { + args: dict[str, Union[str, Iterable[str]]] = { "include_all_networks": "true" if include_all_networks else "false" } if third_party_instance_id: @@ -854,7 +851,7 @@ class TransportLayerClient: ) async def get_account_status( - self, destination: str, user_ids: List[str] + self, destination: str, user_ids: list[str] ) -> JsonDict: """ Args: @@ -878,7 +875,7 @@ class TransportLayerClient: max_timeout_ms: int, download_ratelimiter: Ratelimiter, ip_address: str, - ) -> Tuple[int, Dict[bytes, List[bytes]]]: + ) -> tuple[int, dict[bytes, list[bytes]]]: path = f"/_matrix/media/r0/download/{destination}/{media_id}" return await self.client.get_file( destination, @@ -905,7 +902,7 @@ class TransportLayerClient: max_timeout_ms: int, download_ratelimiter: Ratelimiter, ip_address: str, - ) -> Tuple[int, Dict[bytes, List[bytes]]]: + ) -> tuple[int, dict[bytes, list[bytes]]]: path = f"/_matrix/media/v3/download/{destination}/{media_id}" return await self.client.get_file( destination, @@ -936,7 +933,7 @@ class TransportLayerClient: max_timeout_ms: int, download_ratelimiter: Ratelimiter, ip_address: str, - ) -> Tuple[int, Dict[bytes, List[bytes]], bytes]: + ) -> tuple[int, dict[bytes, list[bytes]], bytes]: path = f"/_matrix/federation/v1/media/download/{media_id}" return await self.client.federation_get_file( destination, @@ -993,9 +990,9 @@ class SendJoinResponse: """The parsed response of a `/send_join` request.""" # The list of auth events from the /send_join response. - auth_events: List[EventBase] + auth_events: list[EventBase] # The list of state from the /send_join response. - state: List[EventBase] + state: list[EventBase] # The raw join event from the /send_join response. event_dict: JsonDict # The parsed join event from the /send_join response. This will be None if @@ -1006,19 +1003,19 @@ class SendJoinResponse: members_omitted: bool = False # List of servers in the room - servers_in_room: Optional[List[str]] = None + servers_in_room: Optional[list[str]] = None @attr.s(slots=True, auto_attribs=True) class StateRequestResponse: """The parsed response of a `/state` request.""" - auth_events: List[EventBase] - state: List[EventBase] + auth_events: list[EventBase] + state: list[EventBase] @ijson.coroutine -def _event_parser(event_dict: JsonDict) -> Generator[None, Tuple[str, Any], None]: +def _event_parser(event_dict: JsonDict) -> Generator[None, tuple[str, Any], None]: """Helper function for use with `ijson.kvitems_coro` to parse key-value pairs to add them to a given dictionary. """ @@ -1030,7 +1027,7 @@ def _event_parser(event_dict: JsonDict) -> Generator[None, Tuple[str, Any], None @ijson.coroutine def _event_list_parser( - room_version: RoomVersion, events: List[EventBase] + room_version: RoomVersion, events: list[EventBase] ) -> Generator[None, JsonDict, None]: """Helper function for use with `ijson.items_coro` to parse an array of events and add them to the given list. @@ -1086,7 +1083,7 @@ class SendJoinParser(ByteParser[SendJoinResponse]): def __init__(self, room_version: RoomVersion, v1_api: bool): self._response = SendJoinResponse([], [], event_dict={}) self._room_version = room_version - self._coros: List[Generator[None, bytes, None]] = [] + self._coros: list[Generator[None, bytes, None]] = [] # The V1 API has the shape of `[200, {...}]`, which we handle by # prefixing with `item.*`. @@ -1159,7 +1156,7 @@ class _StateParser(ByteParser[StateRequestResponse]): def __init__(self, room_version: RoomVersion): self._response = StateRequestResponse([], []) self._room_version = room_version - self._coros: List[Generator[None, bytes, None]] = [ + self._coros: list[Generator[None, bytes, None]] = [ ijson.items_coro( _event_list_parser(room_version, self._response.state), "pdus.item", diff --git a/synapse/federation/transport/server/__init__.py b/synapse/federation/transport/server/__init__.py index c4905e63dd..d5f05f7290 100644 --- a/synapse/federation/transport/server/__init__.py +++ b/synapse/federation/transport/server/__init__.py @@ -20,7 +20,7 @@ # # import logging -from typing import TYPE_CHECKING, Dict, Iterable, List, Literal, Optional, Tuple, Type +from typing import TYPE_CHECKING, Iterable, Literal, Optional from synapse.api.errors import FederationDeniedError, SynapseError from synapse.federation.transport.server._base import ( @@ -52,7 +52,7 @@ logger = logging.getLogger(__name__) class TransportLayerServer(JsonResource): """Handles incoming federation HTTP requests""" - def __init__(self, hs: "HomeServer", servlet_groups: Optional[List[str]] = None): + def __init__(self, hs: "HomeServer", servlet_groups: Optional[list[str]] = None): """Initialize the TransportLayerServer Will by default register all servlets. For custom behaviour, pass in @@ -130,8 +130,8 @@ class PublicRoomList(BaseFederationServlet): self.allow_access = hs.config.server.allow_public_rooms_over_federation async def on_GET( - self, origin: str, content: Literal[None], query: Dict[bytes, List[bytes]] - ) -> Tuple[int, JsonDict]: + self, origin: str, content: Literal[None], query: dict[bytes, list[bytes]] + ) -> tuple[int, JsonDict]: if not self.allow_access: raise FederationDeniedError(origin) @@ -164,8 +164,8 @@ class PublicRoomList(BaseFederationServlet): return 200, data async def on_POST( - self, origin: str, content: JsonDict, query: Dict[bytes, List[bytes]] - ) -> Tuple[int, JsonDict]: + self, origin: str, content: JsonDict, query: dict[bytes, list[bytes]] + ) -> tuple[int, JsonDict]: # This implements MSC2197 (Search Filtering over Federation) if not self.allow_access: raise FederationDeniedError(origin) @@ -242,8 +242,8 @@ class OpenIdUserInfo(BaseFederationServlet): self, origin: Optional[str], content: Literal[None], - query: Dict[bytes, List[bytes]], - ) -> Tuple[int, JsonDict]: + query: dict[bytes, list[bytes]], + ) -> tuple[int, JsonDict]: token = parse_string_from_args(query, "access_token") if token is None: return ( @@ -265,7 +265,7 @@ class OpenIdUserInfo(BaseFederationServlet): return 200, {"sub": user_id} -SERVLET_GROUPS: Dict[str, Iterable[Type[BaseFederationServlet]]] = { +SERVLET_GROUPS: dict[str, Iterable[type[BaseFederationServlet]]] = { "federation": FEDERATION_SERVLET_CLASSES, "room_list": (PublicRoomList,), "openid": (OpenIdUserInfo,), diff --git a/synapse/federation/transport/server/_base.py b/synapse/federation/transport/server/_base.py index cba309635b..146cbebb27 100644 --- a/synapse/federation/transport/server/_base.py +++ b/synapse/federation/transport/server/_base.py @@ -24,7 +24,7 @@ import logging import re import time from http import HTTPStatus -from typing import TYPE_CHECKING, Any, Awaitable, Callable, Dict, Optional, Tuple, cast +from typing import TYPE_CHECKING, Any, Awaitable, Callable, Optional, cast from synapse.api.errors import Codes, FederationDeniedError, SynapseError from synapse.api.urls import FEDERATION_V1_PREFIX @@ -165,7 +165,7 @@ class Authenticator: logger.exception("Error resetting retry timings on %s", origin) -def _parse_auth_header(header_bytes: bytes) -> Tuple[str, str, str, Optional[str]]: +def _parse_auth_header(header_bytes: bytes) -> tuple[str, str, str, Optional[str]]: """Parse an X-Matrix auth header Args: @@ -185,7 +185,7 @@ def _parse_auth_header(header_bytes: bytes) -> Tuple[str, str, str, Optional[str rf"{space_or_tab}*,{space_or_tab}*", re.split(r"^X-Matrix +", header_str, maxsplit=1)[1], ) - param_dict: Dict[str, str] = { + param_dict: dict[str, str] = { k.lower(): v for k, v in [param.split("=", maxsplit=1) for param in params] } @@ -252,7 +252,7 @@ class BaseFederationServlet: components as specified in the path match regexp. Returns: - Optional[Tuple[int, object]]: either (response code, response object) to + Optional[tuple[int, object]]: either (response code, response object) to return a JSON response, or None if the request has already been handled. Raises: @@ -282,14 +282,14 @@ class BaseFederationServlet: self.ratelimiter = ratelimiter self.server_name = server_name - def _wrap(self, func: Callable[..., Awaitable[Tuple[int, Any]]]) -> ServletCallback: + def _wrap(self, func: Callable[..., Awaitable[tuple[int, Any]]]) -> ServletCallback: authenticator = self.authenticator ratelimiter = self.ratelimiter @functools.wraps(func) async def new_func( request: SynapseRequest, *args: Any, **kwargs: str - ) -> Optional[Tuple[int, Any]]: + ) -> Optional[tuple[int, Any]]: """A callback which can be passed to HttpServer.RegisterPaths Args: diff --git a/synapse/federation/transport/server/federation.py b/synapse/federation/transport/server/federation.py index eb96ff27f9..54c7dac1b7 100644 --- a/synapse/federation/transport/server/federation.py +++ b/synapse/federation/transport/server/federation.py @@ -22,14 +22,10 @@ import logging from collections import Counter from typing import ( TYPE_CHECKING, - Dict, - List, Literal, Mapping, Optional, Sequence, - Tuple, - Type, Union, ) @@ -93,9 +89,9 @@ class FederationSendServlet(BaseFederationServerServlet): self, origin: str, content: JsonDict, - query: Dict[bytes, List[bytes]], + query: dict[bytes, list[bytes]], transaction_id: str, - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: """Called on PUT /send// Args: @@ -158,9 +154,9 @@ class FederationEventServlet(BaseFederationServerServlet): self, origin: str, content: Literal[None], - query: Dict[bytes, List[bytes]], + query: dict[bytes, list[bytes]], event_id: str, - ) -> Tuple[int, Union[JsonDict, str]]: + ) -> tuple[int, Union[JsonDict, str]]: return await self.handler.on_pdu_request(origin, event_id) @@ -173,9 +169,9 @@ class FederationStateV1Servlet(BaseFederationServerServlet): self, origin: str, content: Literal[None], - query: Dict[bytes, List[bytes]], + query: dict[bytes, list[bytes]], room_id: str, - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: return await self.handler.on_room_state_request( origin, room_id, @@ -191,9 +187,9 @@ class FederationStateIdsServlet(BaseFederationServerServlet): self, origin: str, content: Literal[None], - query: Dict[bytes, List[bytes]], + query: dict[bytes, list[bytes]], room_id: str, - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: return await self.handler.on_state_ids_request( origin, room_id, @@ -209,9 +205,9 @@ class FederationBackfillServlet(BaseFederationServerServlet): self, origin: str, content: Literal[None], - query: Dict[bytes, List[bytes]], + query: dict[bytes, list[bytes]], room_id: str, - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: versions = [x.decode("ascii") for x in query[b"v"]] limit = parse_integer_from_args(query, "limit", None) @@ -248,9 +244,9 @@ class FederationTimestampLookupServlet(BaseFederationServerServlet): self, origin: str, content: Literal[None], - query: Dict[bytes, List[bytes]], + query: dict[bytes, list[bytes]], room_id: str, - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: timestamp = parse_integer_from_args(query, "ts", required=True) direction_str = parse_string_from_args( query, "dir", allowed_values=["f", "b"], required=True @@ -271,9 +267,9 @@ class FederationQueryServlet(BaseFederationServerServlet): self, origin: str, content: Literal[None], - query: Dict[bytes, List[bytes]], + query: dict[bytes, list[bytes]], query_type: str, - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: args = {k.decode("utf8"): v[0].decode("utf-8") for k, v in query.items()} args["origin"] = origin return await self.handler.on_query_request(query_type, args) @@ -287,10 +283,10 @@ class FederationMakeJoinServlet(BaseFederationServerServlet): self, origin: str, content: Literal[None], - query: Dict[bytes, List[bytes]], + query: dict[bytes, list[bytes]], room_id: str, user_id: str, - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: """ Args: origin: The authenticated server_name of the calling server @@ -323,10 +319,10 @@ class FederationMakeLeaveServlet(BaseFederationServerServlet): self, origin: str, content: Literal[None], - query: Dict[bytes, List[bytes]], + query: dict[bytes, list[bytes]], room_id: str, user_id: str, - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: result = await self.handler.on_make_leave_request(origin, room_id, user_id) return 200, result @@ -339,10 +335,10 @@ class FederationV1SendLeaveServlet(BaseFederationServerServlet): self, origin: str, content: JsonDict, - query: Dict[bytes, List[bytes]], + query: dict[bytes, list[bytes]], room_id: str, event_id: str, - ) -> Tuple[int, Tuple[int, JsonDict]]: + ) -> tuple[int, tuple[int, JsonDict]]: result = await self.handler.on_send_leave_request(origin, content, room_id) return 200, (200, result) @@ -357,10 +353,10 @@ class FederationV2SendLeaveServlet(BaseFederationServerServlet): self, origin: str, content: JsonDict, - query: Dict[bytes, List[bytes]], + query: dict[bytes, list[bytes]], room_id: str, event_id: str, - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: result = await self.handler.on_send_leave_request(origin, content, room_id) return 200, result @@ -373,10 +369,10 @@ class FederationMakeKnockServlet(BaseFederationServerServlet): self, origin: str, content: Literal[None], - query: Dict[bytes, List[bytes]], + query: dict[bytes, list[bytes]], room_id: str, user_id: str, - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: # Retrieve the room versions the remote homeserver claims to support supported_versions = parse_strings_from_args( query, "ver", required=True, encoding="utf-8" @@ -396,10 +392,10 @@ class FederationV1SendKnockServlet(BaseFederationServerServlet): self, origin: str, content: JsonDict, - query: Dict[bytes, List[bytes]], + query: dict[bytes, list[bytes]], room_id: str, event_id: str, - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: result = await self.handler.on_send_knock_request(origin, content, room_id) return 200, result @@ -412,10 +408,10 @@ class FederationEventAuthServlet(BaseFederationServerServlet): self, origin: str, content: Literal[None], - query: Dict[bytes, List[bytes]], + query: dict[bytes, list[bytes]], room_id: str, event_id: str, - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: return await self.handler.on_event_auth(origin, room_id, event_id) @@ -427,10 +423,10 @@ class FederationV1SendJoinServlet(BaseFederationServerServlet): self, origin: str, content: JsonDict, - query: Dict[bytes, List[bytes]], + query: dict[bytes, list[bytes]], room_id: str, event_id: str, - ) -> Tuple[int, Tuple[int, JsonDict]]: + ) -> tuple[int, tuple[int, JsonDict]]: # TODO(paul): assert that event_id parsed from path actually # match those given in content result = await self.handler.on_send_join_request(origin, content, room_id) @@ -447,10 +443,10 @@ class FederationV2SendJoinServlet(BaseFederationServerServlet): self, origin: str, content: JsonDict, - query: Dict[bytes, List[bytes]], + query: dict[bytes, list[bytes]], room_id: str, event_id: str, - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: # TODO(paul): assert that event_id parsed from path actually # match those given in content @@ -470,10 +466,10 @@ class FederationV1InviteServlet(BaseFederationServerServlet): self, origin: str, content: JsonDict, - query: Dict[bytes, List[bytes]], + query: dict[bytes, list[bytes]], room_id: str, event_id: str, - ) -> Tuple[int, Tuple[int, JsonDict]]: + ) -> tuple[int, tuple[int, JsonDict]]: # We don't get a room version, so we have to assume its EITHER v1 or # v2. This is "fine" as the only difference between V1 and V2 is the # state resolution algorithm, and we don't use that for processing @@ -497,10 +493,10 @@ class FederationV2InviteServlet(BaseFederationServerServlet): self, origin: str, content: JsonDict, - query: Dict[bytes, List[bytes]], + query: dict[bytes, list[bytes]], room_id: str, event_id: str, - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: # TODO(paul): assert that room_id/event_id parsed from path actually # match those given in content @@ -535,9 +531,9 @@ class FederationThirdPartyInviteExchangeServlet(BaseFederationServerServlet): self, origin: str, content: JsonDict, - query: Dict[bytes, List[bytes]], + query: dict[bytes, list[bytes]], room_id: str, - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: await self.handler.on_exchange_third_party_invite_request(content) return 200, {} @@ -547,8 +543,8 @@ class FederationClientKeysQueryServlet(BaseFederationServerServlet): CATEGORY = "Federation requests" async def on_POST( - self, origin: str, content: JsonDict, query: Dict[bytes, List[bytes]] - ) -> Tuple[int, JsonDict]: + self, origin: str, content: JsonDict, query: dict[bytes, list[bytes]] + ) -> tuple[int, JsonDict]: return await self.handler.on_query_client_keys(origin, content) @@ -560,9 +556,9 @@ class FederationUserDevicesQueryServlet(BaseFederationServerServlet): self, origin: str, content: Literal[None], - query: Dict[bytes, List[bytes]], + query: dict[bytes, list[bytes]], user_id: str, - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: return await self.handler.on_query_user_devices(origin, user_id) @@ -571,10 +567,10 @@ class FederationClientKeysClaimServlet(BaseFederationServerServlet): CATEGORY = "Federation requests" async def on_POST( - self, origin: str, content: JsonDict, query: Dict[bytes, List[bytes]] - ) -> Tuple[int, JsonDict]: + self, origin: str, content: JsonDict, query: dict[bytes, list[bytes]] + ) -> tuple[int, JsonDict]: # Generate a count for each algorithm, which is hard-coded to 1. - key_query: List[Tuple[str, str, str, int]] = [] + key_query: list[tuple[str, str, str, int]] = [] for user_id, device_keys in content.get("one_time_keys", {}).items(): for device_id, algorithm in device_keys.items(): key_query.append((user_id, device_id, algorithm, 1)) @@ -597,10 +593,10 @@ class FederationUnstableClientKeysClaimServlet(BaseFederationServerServlet): CATEGORY = "Federation requests" async def on_POST( - self, origin: str, content: JsonDict, query: Dict[bytes, List[bytes]] - ) -> Tuple[int, JsonDict]: + self, origin: str, content: JsonDict, query: dict[bytes, list[bytes]] + ) -> tuple[int, JsonDict]: # Generate a count for each algorithm. - key_query: List[Tuple[str, str, str, int]] = [] + key_query: list[tuple[str, str, str, int]] = [] for user_id, device_keys in content.get("one_time_keys", {}).items(): for device_id, algorithms in device_keys.items(): counts = Counter(algorithms) @@ -621,9 +617,9 @@ class FederationGetMissingEventsServlet(BaseFederationServerServlet): self, origin: str, content: JsonDict, - query: Dict[bytes, List[bytes]], + query: dict[bytes, list[bytes]], room_id: str, - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: limit = int(content.get("limit", 10)) earliest_events = content.get("earliest_events", []) latest_events = content.get("latest_events", []) @@ -646,8 +642,8 @@ class On3pidBindServlet(BaseFederationServerServlet): REQUIRE_AUTH = False async def on_POST( - self, origin: Optional[str], content: JsonDict, query: Dict[bytes, List[bytes]] - ) -> Tuple[int, JsonDict]: + self, origin: Optional[str], content: JsonDict, query: dict[bytes, list[bytes]] + ) -> tuple[int, JsonDict]: if "invites" in content: last_exception = None for invite in content["invites"]: @@ -682,8 +678,8 @@ class FederationVersionServlet(BaseFederationServlet): self, origin: Optional[str], content: Literal[None], - query: Dict[bytes, List[bytes]], - ) -> Tuple[int, JsonDict]: + query: dict[bytes, list[bytes]], + ) -> tuple[int, JsonDict]: return ( 200, { @@ -715,7 +711,7 @@ class FederationRoomHierarchyServlet(BaseFederationServlet): content: Literal[None], query: Mapping[bytes, Sequence[bytes]], room_id: str, - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: suggested_only = parse_boolean_from_args(query, "suggested_only", default=False) return 200, await self.handler.get_federation_hierarchy( origin, room_id, suggested_only @@ -746,9 +742,9 @@ class RoomComplexityServlet(BaseFederationServlet): self, origin: str, content: Literal[None], - query: Dict[bytes, List[bytes]], + query: dict[bytes, list[bytes]], room_id: str, - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: is_public = await self._store.is_room_world_readable_or_publicly_joinable( room_id ) @@ -780,7 +776,7 @@ class FederationAccountStatusServlet(BaseFederationServerServlet): content: JsonDict, query: Mapping[bytes, Sequence[bytes]], room_id: str, - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: if "user_ids" not in content: raise SynapseError( 400, "Required parameter 'user_ids' is missing", Codes.MISSING_PARAM @@ -882,7 +878,7 @@ class FederationMediaThumbnailServlet(BaseFederationServerServlet): self.media_repo.mark_recently_accessed(None, media_id) -FEDERATION_SERVLET_CLASSES: Tuple[Type[BaseFederationServlet], ...] = ( +FEDERATION_SERVLET_CLASSES: tuple[type[BaseFederationServlet], ...] = ( FederationSendServlet, FederationEventServlet, FederationStateV1Servlet, diff --git a/synapse/federation/units.py b/synapse/federation/units.py index 3bb5f824b7..bff45bc2a9 100644 --- a/synapse/federation/units.py +++ b/synapse/federation/units.py @@ -24,7 +24,7 @@ server protocol. """ import logging -from typing import List, Optional, Sequence +from typing import Optional, Sequence import attr @@ -70,7 +70,7 @@ class Edu: getattr(self, "content", {})["org.matrix.opentracing_context"] = "{}" -def _none_to_list(edus: Optional[List[JsonDict]]) -> List[JsonDict]: +def _none_to_list(edus: Optional[list[JsonDict]]) -> list[JsonDict]: if edus is None: return [] return edus @@ -98,8 +98,8 @@ class Transaction: origin: str destination: str origin_server_ts: int - pdus: List[JsonDict] = attr.ib(factory=list, converter=_none_to_list) - edus: List[JsonDict] = attr.ib(factory=list, converter=_none_to_list) + pdus: list[JsonDict] = attr.ib(factory=list, converter=_none_to_list) + edus: list[JsonDict] = attr.ib(factory=list, converter=_none_to_list) def get_dict(self) -> JsonDict: """A JSON-ready dictionary of valid keys which aren't internal.""" @@ -113,7 +113,7 @@ class Transaction: return result -def filter_pdus_for_valid_depth(pdus: Sequence[JsonDict]) -> List[JsonDict]: +def filter_pdus_for_valid_depth(pdus: Sequence[JsonDict]) -> list[JsonDict]: filtered_pdus = [] for pdu in pdus: # Drop PDUs that have a depth that is outside of the range allowed @@ -129,5 +129,5 @@ def filter_pdus_for_valid_depth(pdus: Sequence[JsonDict]) -> List[JsonDict]: def serialize_and_filter_pdus( pdus: Sequence[EventBase], time_now: Optional[int] = None -) -> List[JsonDict]: +) -> list[JsonDict]: return filter_pdus_for_valid_depth([pdu.get_pdu_json(time_now) for pdu in pdus]) diff --git a/synapse/handlers/account.py b/synapse/handlers/account.py index 37cc3d3ff5..855027f08e 100644 --- a/synapse/handlers/account.py +++ b/synapse/handlers/account.py @@ -19,7 +19,7 @@ # # -from typing import TYPE_CHECKING, Dict, List, Tuple +from typing import TYPE_CHECKING from synapse.api.errors import Codes, SynapseError from synapse.types import JsonDict, UserID @@ -40,9 +40,9 @@ class AccountHandler: async def get_account_statuses( self, - user_ids: List[str], + user_ids: list[str], allow_remote: bool, - ) -> Tuple[JsonDict, List[str]]: + ) -> tuple[JsonDict, list[str]]: """Get account statuses for a list of user IDs. If one or more account(s) belong to remote homeservers, retrieve their status(es) @@ -63,7 +63,7 @@ class AccountHandler: """ statuses = {} failures = [] - remote_users: List[UserID] = [] + remote_users: list[UserID] = [] for raw_user_id in user_ids: try: @@ -127,8 +127,8 @@ class AccountHandler: return status async def _get_remote_account_statuses( - self, remote_users: List[UserID] - ) -> Tuple[JsonDict, List[str]]: + self, remote_users: list[UserID] + ) -> tuple[JsonDict, list[str]]: """Send out federation requests to retrieve the statuses of remote accounts. Args: @@ -140,7 +140,7 @@ class AccountHandler: """ # Group remote users by destination, so we only send one request per remote # homeserver. - by_destination: Dict[str, List[str]] = {} + by_destination: dict[str, list[str]] = {} for user in remote_users: if user.domain not in by_destination: by_destination[user.domain] = [] @@ -149,7 +149,7 @@ class AccountHandler: # Retrieve the statuses and failures for remote accounts. final_statuses: JsonDict = {} - final_failures: List[str] = [] + final_failures: list[str] = [] for destination, users in by_destination.items(): statuses, failures = await self._federation_client.get_account_status( destination, diff --git a/synapse/handlers/account_data.py b/synapse/handlers/account_data.py index 228132db48..4492612859 100644 --- a/synapse/handlers/account_data.py +++ b/synapse/handlers/account_data.py @@ -21,7 +21,7 @@ # import logging import random -from typing import TYPE_CHECKING, Awaitable, Callable, List, Optional, Tuple +from typing import TYPE_CHECKING, Awaitable, Callable, Optional from synapse.api.constants import AccountDataTypes from synapse.replication.http.account_data import ( @@ -67,7 +67,7 @@ class AccountDataHandler: self._remove_tag_client = ReplicationRemoveTagRestServlet.make_client(hs) self._account_data_writers = hs.config.worker.writers.account_data - self._on_account_data_updated_callbacks: List[ + self._on_account_data_updated_callbacks: list[ ON_ACCOUNT_DATA_UPDATED_CALLBACK ] = [] @@ -325,7 +325,7 @@ class AccountDataEventSource(EventSource[int, JsonDict]): room_ids: StrCollection, is_guest: bool, explicit_room_id: Optional[str] = None, - ) -> Tuple[List[JsonDict], int]: + ) -> tuple[list[JsonDict], int]: user_id = user.to_string() last_stream_id = from_key diff --git a/synapse/handlers/account_validity.py b/synapse/handlers/account_validity.py index eed50ef69a..a805de1f35 100644 --- a/synapse/handlers/account_validity.py +++ b/synapse/handlers/account_validity.py @@ -21,7 +21,7 @@ import email.mime.multipart import email.utils import logging -from typing import TYPE_CHECKING, List, Optional, Tuple +from typing import TYPE_CHECKING, Optional from synapse.api.errors import AuthError, StoreError, SynapseError from synapse.metrics.background_process_metrics import wrap_as_background_process @@ -222,7 +222,7 @@ class AccountValidityHandler: await self.store.set_renewal_mail_status(user_id=user_id, email_sent=True) - async def _get_email_addresses_for_user(self, user_id: str) -> List[str]: + async def _get_email_addresses_for_user(self, user_id: str) -> list[str]: """Retrieve the list of email addresses attached to a user's account. Args: @@ -263,7 +263,7 @@ class AccountValidityHandler: attempts += 1 raise StoreError(500, "Couldn't generate a unique string as refresh string.") - async def renew_account(self, renewal_token: str) -> Tuple[bool, bool, int]: + async def renew_account(self, renewal_token: str) -> tuple[bool, bool, int]: """Renews the account attached to a given renewal token by pushing back the expiration date by the current validity period in the server's configuration. diff --git a/synapse/handlers/admin.py b/synapse/handlers/admin.py index e90d675b59..3faaa4d2b3 100644 --- a/synapse/handlers/admin.py +++ b/synapse/handlers/admin.py @@ -24,13 +24,9 @@ import logging from typing import ( TYPE_CHECKING, Any, - Dict, - List, Mapping, Optional, Sequence, - Set, - Tuple, ) import attr @@ -218,7 +214,7 @@ class AdminHandler: to_key = RoomStreamToken(stream=stream_ordering) # Events that we've processed in this room - written_events: Set[str] = set() + written_events: set[str] = set() # We need to track gaps in the events stream so that we can then # write out the state at those events. We do this by keeping track @@ -231,7 +227,7 @@ class AdminHandler: # The reverse mapping to above, i.e. map from unseen event to events # that have the unseen event in their prev_events, i.e. the unseen # events "children". - unseen_to_child_events: Dict[str, Set[str]] = {} + unseen_to_child_events: dict[str, set[str]] = {} # We fetch events in the room the user could see by fetching *all* # events that we have and then filtering, this isn't the most @@ -412,7 +408,7 @@ class AdminHandler: async def _redact_all_events( self, task: ScheduledTask - ) -> Tuple[TaskStatus, Optional[Mapping[str, Any]], Optional[str]]: + ) -> tuple[TaskStatus, Optional[Mapping[str, Any]], Optional[str]]: """ Task to redact all of a users events in the given rooms, tracking which, if any, events whose redaction failed @@ -518,7 +514,7 @@ class ExfiltrationWriter(metaclass=abc.ABCMeta): """Interface used to specify how to write exported data.""" @abc.abstractmethod - def write_events(self, room_id: str, events: List[EventBase]) -> None: + def write_events(self, room_id: str, events: list[EventBase]) -> None: """Write a batch of events for a room.""" raise NotImplementedError() diff --git a/synapse/handlers/appservice.py b/synapse/handlers/appservice.py index 6536d9fe51..5240178d80 100644 --- a/synapse/handlers/appservice.py +++ b/synapse/handlers/appservice.py @@ -22,12 +22,9 @@ import logging from typing import ( TYPE_CHECKING, Collection, - Dict, Iterable, - List, Mapping, Optional, - Tuple, Union, ) @@ -143,7 +140,7 @@ class ApplicationServicesHandler: event_to_received_ts.keys(), get_prev_content=True ) - events_by_room: Dict[str, List[EventBase]] = {} + events_by_room: dict[str, list[EventBase]] = {} for event in events: events_by_room.setdefault(event.room_id, []).append(event) @@ -341,7 +338,7 @@ class ApplicationServicesHandler: @wrap_as_background_process("notify_interested_services_ephemeral") async def _notify_interested_services_ephemeral( self, - services: List[ApplicationService], + services: list[ApplicationService], stream_key: StreamKeyType, new_token: Union[int, MultiWriterStreamToken], users: Collection[Union[str, UserID]], @@ -429,7 +426,7 @@ class ApplicationServicesHandler: async def _handle_typing( self, service: ApplicationService, new_token: int - ) -> List[JsonMapping]: + ) -> list[JsonMapping]: """ Return the typing events since the given stream token that the given application service should receive. @@ -464,7 +461,7 @@ class ApplicationServicesHandler: async def _handle_receipts( self, service: ApplicationService, new_token: MultiWriterStreamToken - ) -> List[JsonMapping]: + ) -> list[JsonMapping]: """ Return the latest read receipts that the given application service should receive. @@ -503,7 +500,7 @@ class ApplicationServicesHandler: service: ApplicationService, users: Collection[Union[str, UserID]], new_token: Optional[int], - ) -> List[JsonMapping]: + ) -> list[JsonMapping]: """ Return the latest presence updates that the given application service should receive. @@ -523,7 +520,7 @@ class ApplicationServicesHandler: A list of json dictionaries containing data derived from the presence events that should be sent to the given application service. """ - events: List[JsonMapping] = [] + events: list[JsonMapping] = [] presence_source = self.event_sources.sources.presence from_key = await self.store.get_type_stream_id_for_appservice( service, "presence" @@ -563,7 +560,7 @@ class ApplicationServicesHandler: service: ApplicationService, new_token: int, users: Collection[Union[str, UserID]], - ) -> List[JsonDict]: + ) -> list[JsonDict]: """ Given an application service, determine which events it should receive from those between the last-recorded to-device message stream token for this @@ -585,7 +582,7 @@ class ApplicationServicesHandler: ) # Filter out users that this appservice is not interested in - users_appservice_is_interested_in: List[str] = [] + users_appservice_is_interested_in: list[str] = [] for user in users: # FIXME: We should do this farther up the call stack. We currently repeat # this operation in _handle_presence. @@ -612,7 +609,7 @@ class ApplicationServicesHandler: # # So we mangle this dict into a flat list of to-device messages with the relevant # user ID and device ID embedded inside each message dict. - message_payload: List[JsonDict] = [] + message_payload: list[JsonDict] = [] for ( user_id, device_id, @@ -761,8 +758,8 @@ class ApplicationServicesHandler: return None async def query_3pe( - self, kind: str, protocol: str, fields: Dict[bytes, List[bytes]] - ) -> List[JsonDict]: + self, kind: str, protocol: str, fields: dict[bytes, list[bytes]] + ) -> list[JsonDict]: services = self._get_services_for_3pn(protocol) results = await make_deferred_yieldable( @@ -786,9 +783,9 @@ class ApplicationServicesHandler: async def get_3pe_protocols( self, only_protocol: Optional[str] = None - ) -> Dict[str, JsonDict]: + ) -> dict[str, JsonDict]: services = self.store.get_app_services() - protocols: Dict[str, List[JsonDict]] = {} + protocols: dict[str, list[JsonDict]] = {} # Collect up all the individual protocol responses out of the ASes for s in services: @@ -804,7 +801,7 @@ class ApplicationServicesHandler: if info is not None: protocols[p].append(info) - def _merge_instances(infos: List[JsonDict]) -> JsonDict: + def _merge_instances(infos: list[JsonDict]) -> JsonDict: # Merge the 'instances' lists of multiple results, but just take # the other fields from the first as they ought to be identical # copy the result so as not to corrupt the cached one @@ -822,7 +819,7 @@ class ApplicationServicesHandler: async def _get_services_for_event( self, event: EventBase - ) -> List[ApplicationService]: + ) -> list[ApplicationService]: """Retrieve a list of application services interested in this event. Args: @@ -842,11 +839,11 @@ class ApplicationServicesHandler: return interested_list - def _get_services_for_user(self, user_id: str) -> List[ApplicationService]: + def _get_services_for_user(self, user_id: str) -> list[ApplicationService]: services = self.store.get_app_services() return [s for s in services if (s.is_interested_in_user(user_id))] - def _get_services_for_3pn(self, protocol: str) -> List[ApplicationService]: + def _get_services_for_3pn(self, protocol: str) -> list[ApplicationService]: services = self.store.get_app_services() return [s for s in services if s.is_interested_in_protocol(protocol)] @@ -872,9 +869,9 @@ class ApplicationServicesHandler: return True async def claim_e2e_one_time_keys( - self, query: Iterable[Tuple[str, str, str, int]] - ) -> Tuple[ - Dict[str, Dict[str, Dict[str, JsonDict]]], List[Tuple[str, str, str, int]] + self, query: Iterable[tuple[str, str, str, int]] + ) -> tuple[ + dict[str, dict[str, dict[str, JsonDict]]], list[tuple[str, str, str, int]] ]: """Claim one time keys from application services. @@ -896,7 +893,7 @@ class ApplicationServicesHandler: services = self.store.get_app_services() # Partition the users by appservice. - query_by_appservice: Dict[str, List[Tuple[str, str, str, int]]] = {} + query_by_appservice: dict[str, list[tuple[str, str, str, int]]] = {} missing = [] for user_id, device, algorithm, count in query: if not self.store.get_if_app_services_interested_in_user(user_id): @@ -929,7 +926,7 @@ class ApplicationServicesHandler: # Patch together the results -- they are all independent (since they # require exclusive control over the users, which is the outermost key). - claimed_keys: Dict[str, Dict[str, Dict[str, JsonDict]]] = {} + claimed_keys: dict[str, dict[str, dict[str, JsonDict]]] = {} for success, result in results: if success: claimed_keys.update(result[0]) @@ -938,8 +935,8 @@ class ApplicationServicesHandler: return claimed_keys, missing async def query_keys( - self, query: Mapping[str, Optional[List[str]]] - ) -> Dict[str, Dict[str, Dict[str, JsonDict]]]: + self, query: Mapping[str, Optional[list[str]]] + ) -> dict[str, dict[str, dict[str, JsonDict]]]: """Query application services for device keys. Users which are exclusively owned by an application service are queried @@ -954,7 +951,7 @@ class ApplicationServicesHandler: services = self.store.get_app_services() # Partition the users by appservice. - query_by_appservice: Dict[str, Dict[str, List[str]]] = {} + query_by_appservice: dict[str, dict[str, list[str]]] = {} for user_id, device_ids in query.items(): if not self.store.get_if_app_services_interested_in_user(user_id): continue @@ -986,7 +983,7 @@ class ApplicationServicesHandler: # Patch together the results -- they are all independent (since they # require exclusive control over the users). They get returned as a single # dictionary. - key_queries: Dict[str, Dict[str, Dict[str, JsonDict]]] = {} + key_queries: dict[str, dict[str, dict[str, JsonDict]]] = {} for success, result in results: if success: key_queries.update(result) diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index f4583e33c3..e282f38b9e 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -31,13 +31,9 @@ from typing import ( Any, Awaitable, Callable, - Dict, Iterable, - List, Mapping, Optional, - Tuple, - Type, Union, cast, ) @@ -102,7 +98,7 @@ invalid_login_token_counter = Counter( def convert_client_dict_legacy_fields_to_identifier( submission: JsonDict, -) -> Dict[str, str]: +) -> dict[str, str]: """ Convert a legacy-formatted login submission to an identifier dict. @@ -154,7 +150,7 @@ def convert_client_dict_legacy_fields_to_identifier( return identifier -def login_id_phone_to_thirdparty(identifier: JsonDict) -> Dict[str, str]: +def login_id_phone_to_thirdparty(identifier: JsonDict) -> dict[str, str]: """ Convert a phone login identifier type to a generic threepid identifier. @@ -205,7 +201,7 @@ class AuthHandler: self.auth = hs.get_auth() self.auth_blocking = hs.get_auth_blocking() self.clock = hs.get_clock() - self.checkers: Dict[str, UserInteractiveAuthChecker] = {} + self.checkers: dict[str, UserInteractiveAuthChecker] = {} for auth_checker_class in INTERACTIVE_AUTH_CHECKERS: inst = auth_checker_class(hs) if inst.is_enabled(): @@ -280,7 +276,7 @@ class AuthHandler: # A mapping of user ID to extra attributes to include in the login # response. - self._extra_attributes: Dict[str, SsoLoginExtraAttributes] = {} + self._extra_attributes: dict[str, SsoLoginExtraAttributes] = {} self._auth_delegation_enabled = ( hs.config.mas.enabled or hs.config.experimental.msc3861.enabled @@ -290,10 +286,10 @@ class AuthHandler: self, requester: Requester, request: SynapseRequest, - request_body: Dict[str, Any], + request_body: dict[str, Any], description: str, can_skip_ui_auth: bool = False, - ) -> Tuple[dict, Optional[str]]: + ) -> tuple[dict, Optional[str]]: """ Checks that the user is who they claim to be, via a UI auth. @@ -440,12 +436,12 @@ class AuthHandler: async def check_ui_auth( self, - flows: List[List[str]], + flows: list[list[str]], request: SynapseRequest, - clientdict: Dict[str, Any], + clientdict: dict[str, Any], description: str, get_new_session_data: Optional[Callable[[], JsonDict]] = None, - ) -> Tuple[dict, dict, str]: + ) -> tuple[dict, dict, str]: """ Takes a dictionary sent by the client in the login / registration protocol and handles the User-Interactive Auth flow. @@ -579,7 +575,7 @@ class AuthHandler: ) # check auth type currently being presented - errordict: Dict[str, Any] = {} + errordict: dict[str, Any] = {} if "type" in authdict: login_type: str = authdict["type"] try: @@ -617,7 +613,7 @@ class AuthHandler: raise InteractiveAuthIncompleteError(session.session_id, ret) async def add_oob_auth( - self, stagetype: str, authdict: Dict[str, Any], clientip: str + self, stagetype: str, authdict: dict[str, Any], clientip: str ) -> None: """ Adds the result of out-of-band authentication into an existing auth @@ -641,7 +637,7 @@ class AuthHandler: authdict["session"], stagetype, result ) - def get_session_id(self, clientdict: Dict[str, Any]) -> Optional[str]: + def get_session_id(self, clientdict: dict[str, Any]) -> Optional[str]: """ Gets the session ID for a client given the client dictionary @@ -702,8 +698,8 @@ class AuthHandler: await self.store.delete_old_ui_auth_sessions(expiration_time) async def _check_auth_dict( - self, authdict: Dict[str, Any], clientip: str - ) -> Union[Dict[str, Any], str]: + self, authdict: dict[str, Any], clientip: str + ) -> Union[dict[str, Any], str]: """Attempt to validate the auth dict provided by a client Args: @@ -750,9 +746,9 @@ class AuthHandler: def _auth_dict_for_flows( self, - flows: List[List[str]], + flows: list[list[str]], session_id: str, - ) -> Dict[str, Any]: + ) -> dict[str, Any]: public_flows = [] for f in flows: public_flows.append(f) @@ -762,7 +758,7 @@ class AuthHandler: LoginType.TERMS: self._get_params_terms, } - params: Dict[str, Any] = {} + params: dict[str, Any] = {} for f in public_flows: for stage in f: @@ -780,7 +776,7 @@ class AuthHandler: refresh_token: str, access_token_valid_until_ms: Optional[int], refresh_token_valid_until_ms: Optional[int], - ) -> Tuple[str, str, Optional[int]]: + ) -> tuple[str, str, Optional[int]]: """ Consumes a refresh token and generate both a new access token and a new refresh token from it. @@ -934,7 +930,7 @@ class AuthHandler: device_id: str, expiry_ts: Optional[int], ultimate_session_expiry_ts: Optional[int], - ) -> Tuple[str, int]: + ) -> tuple[str, int]: """ Creates a new refresh token for the user with the given user ID. @@ -1067,7 +1063,7 @@ class AuthHandler: async def _find_user_id_and_pwd_hash( self, user_id: str - ) -> Optional[Tuple[str, str]]: + ) -> Optional[tuple[str, str]]: """Checks to see if a user with the given id exists. Will check case insensitively, but will return None if there are multiple inexact matches. @@ -1142,10 +1138,10 @@ class AuthHandler: async def validate_login( self, - login_submission: Dict[str, Any], + login_submission: dict[str, Any], ratelimit: bool = False, is_reauth: bool = False, - ) -> Tuple[str, Optional[Callable[["LoginResponse"], Awaitable[None]]]]: + ) -> tuple[str, Optional[Callable[["LoginResponse"], Awaitable[None]]]]: """Authenticates the user for the /login API Also used by the user-interactive auth flow to validate auth types which don't @@ -1300,8 +1296,8 @@ class AuthHandler: async def _validate_userid_login( self, username: str, - login_submission: Dict[str, Any], - ) -> Tuple[str, Optional[Callable[["LoginResponse"], Awaitable[None]]]]: + login_submission: dict[str, Any], + ) -> tuple[str, Optional[Callable[["LoginResponse"], Awaitable[None]]]]: """Helper for validate_login Handles login, once we've mapped 3pids onto userids @@ -1390,7 +1386,7 @@ class AuthHandler: async def check_password_provider_3pid( self, medium: str, address: str, password: str - ) -> Tuple[Optional[str], Optional[Callable[["LoginResponse"], Awaitable[None]]]]: + ) -> tuple[Optional[str], Optional[Callable[["LoginResponse"], Awaitable[None]]]]: """Check if a password provider is able to validate a thirdparty login Args: @@ -1905,7 +1901,7 @@ class AuthHandler: extra_attributes = self._extra_attributes.get(login_result["user_id"]) if extra_attributes: - login_result_dict = cast(Dict[str, Any], login_result) + login_result_dict = cast(dict[str, Any], login_result) login_result_dict.update(extra_attributes.extra_attributes) def _expire_sso_extra_attributes(self) -> None: @@ -1941,7 +1937,7 @@ def load_legacy_password_auth_providers(hs: "HomeServer") -> None: def load_single_legacy_password_auth_provider( - module: Type, + module: type, config: JsonDict, api: "ModuleApi", ) -> None: @@ -1966,7 +1962,7 @@ def load_single_legacy_password_auth_provider( async def wrapped_check_password( username: str, login_type: str, login_dict: JsonDict - ) -> Optional[Tuple[str, Optional[Callable]]]: + ) -> Optional[tuple[str, Optional[Callable]]]: # We've already made sure f is not None above, but mypy doesn't do well # across function boundaries so we need to tell it f is definitely not # None. @@ -1985,12 +1981,12 @@ def load_single_legacy_password_auth_provider( return wrapped_check_password # We need to wrap check_auth as in the old form it could return - # just a str, but now it must return Optional[Tuple[str, Optional[Callable]] + # just a str, but now it must return Optional[tuple[str, Optional[Callable]] if f.__name__ == "check_auth": async def wrapped_check_auth( username: str, login_type: str, login_dict: JsonDict - ) -> Optional[Tuple[str, Optional[Callable]]]: + ) -> Optional[tuple[str, Optional[Callable]]]: # We've already made sure f is not None above, but mypy doesn't do well # across function boundaries so we need to tell it f is definitely not # None. @@ -2006,12 +2002,12 @@ def load_single_legacy_password_auth_provider( return wrapped_check_auth # We need to wrap check_3pid_auth as in the old form it could return - # just a str, but now it must return Optional[Tuple[str, Optional[Callable]] + # just a str, but now it must return Optional[tuple[str, Optional[Callable]] if f.__name__ == "check_3pid_auth": async def wrapped_check_3pid_auth( medium: str, address: str, password: str - ) -> Optional[Tuple[str, Optional[Callable]]]: + ) -> Optional[tuple[str, Optional[Callable]]]: # We've already made sure f is not None above, but mypy doesn't do well # across function boundaries so we need to tell it f is definitely not # None. @@ -2026,7 +2022,7 @@ def load_single_legacy_password_auth_provider( return wrapped_check_3pid_auth - def run(*args: Tuple, **kwargs: Dict) -> Awaitable: + def run(*args: tuple, **kwargs: dict) -> Awaitable: # mypy doesn't do well across function boundaries so we need to tell it # f is definitely not None. assert f is not None @@ -2079,14 +2075,14 @@ def load_single_legacy_password_auth_provider( CHECK_3PID_AUTH_CALLBACK = Callable[ [str, str, str], Awaitable[ - Optional[Tuple[str, Optional[Callable[["LoginResponse"], Awaitable[None]]]]] + Optional[tuple[str, Optional[Callable[["LoginResponse"], Awaitable[None]]]]] ], ] ON_LOGGED_OUT_CALLBACK = Callable[[str, Optional[str], str], Awaitable] CHECK_AUTH_CALLBACK = Callable[ [str, str, JsonDict], Awaitable[ - Optional[Tuple[str, Optional[Callable[["LoginResponse"], Awaitable[None]]]]] + Optional[tuple[str, Optional[Callable[["LoginResponse"], Awaitable[None]]]]] ], ] GET_USERNAME_FOR_REGISTRATION_CALLBACK = Callable[ @@ -2108,21 +2104,21 @@ class PasswordAuthProvider: def __init__(self) -> None: # lists of callbacks - self.check_3pid_auth_callbacks: List[CHECK_3PID_AUTH_CALLBACK] = [] - self.on_logged_out_callbacks: List[ON_LOGGED_OUT_CALLBACK] = [] - self.get_username_for_registration_callbacks: List[ + self.check_3pid_auth_callbacks: list[CHECK_3PID_AUTH_CALLBACK] = [] + self.on_logged_out_callbacks: list[ON_LOGGED_OUT_CALLBACK] = [] + self.get_username_for_registration_callbacks: list[ GET_USERNAME_FOR_REGISTRATION_CALLBACK ] = [] - self.get_displayname_for_registration_callbacks: List[ + self.get_displayname_for_registration_callbacks: list[ GET_DISPLAYNAME_FOR_REGISTRATION_CALLBACK ] = [] - self.is_3pid_allowed_callbacks: List[IS_3PID_ALLOWED_CALLBACK] = [] + self.is_3pid_allowed_callbacks: list[IS_3PID_ALLOWED_CALLBACK] = [] # Mapping from login type to login parameters - self._supported_login_types: Dict[str, Tuple[str, ...]] = {} + self._supported_login_types: dict[str, tuple[str, ...]] = {} # Mapping from login type to auth checker callbacks - self.auth_checker_callbacks: Dict[str, List[CHECK_AUTH_CALLBACK]] = {} + self.auth_checker_callbacks: dict[str, list[CHECK_AUTH_CALLBACK]] = {} def register_password_auth_provider_callbacks( self, @@ -2130,7 +2126,7 @@ class PasswordAuthProvider: on_logged_out: Optional[ON_LOGGED_OUT_CALLBACK] = None, is_3pid_allowed: Optional[IS_3PID_ALLOWED_CALLBACK] = None, auth_checkers: Optional[ - Dict[Tuple[str, Tuple[str, ...]], CHECK_AUTH_CALLBACK] + dict[tuple[str, tuple[str, ...]], CHECK_AUTH_CALLBACK] ] = None, get_username_for_registration: Optional[ GET_USERNAME_FOR_REGISTRATION_CALLBACK @@ -2207,7 +2203,7 @@ class PasswordAuthProvider: async def check_auth( self, username: str, login_type: str, login_dict: JsonDict - ) -> Optional[Tuple[str, Optional[Callable[["LoginResponse"], Awaitable[None]]]]]: + ) -> Optional[tuple[str, Optional[Callable[["LoginResponse"], Awaitable[None]]]]]: """Check if the user has presented valid login credentials Args: @@ -2245,7 +2241,7 @@ class PasswordAuthProvider: if not isinstance(result, tuple) or len(result) != 2: logger.warning( # type: ignore[unreachable] "Wrong type returned by module API callback %s: %s, expected" - " Optional[Tuple[str, Optional[Callable]]]", + " Optional[tuple[str, Optional[Callable]]]", callback, result, ) @@ -2258,7 +2254,7 @@ class PasswordAuthProvider: if not isinstance(str_result, str): logger.warning( # type: ignore[unreachable] "Wrong type returned by module API callback %s: %s, expected" - " Optional[Tuple[str, Optional[Callable]]]", + " Optional[tuple[str, Optional[Callable]]]", callback, result, ) @@ -2269,7 +2265,7 @@ class PasswordAuthProvider: if not callable(callback_result): logger.warning( # type: ignore[unreachable] "Wrong type returned by module API callback %s: %s, expected" - " Optional[Tuple[str, Optional[Callable]]]", + " Optional[tuple[str, Optional[Callable]]]", callback, result, ) @@ -2284,7 +2280,7 @@ class PasswordAuthProvider: async def check_3pid_auth( self, medium: str, address: str, password: str - ) -> Optional[Tuple[str, Optional[Callable[["LoginResponse"], Awaitable[None]]]]]: + ) -> Optional[tuple[str, Optional[Callable[["LoginResponse"], Awaitable[None]]]]]: # This function is able to return a deferred that either # resolves None, meaning authentication failure, or upon # success, to a str (which is the user_id) or a tuple of @@ -2308,7 +2304,7 @@ class PasswordAuthProvider: if not isinstance(result, tuple) or len(result) != 2: logger.warning( # type: ignore[unreachable] "Wrong type returned by module API callback %s: %s, expected" - " Optional[Tuple[str, Optional[Callable]]]", + " Optional[tuple[str, Optional[Callable]]]", callback, result, ) @@ -2321,7 +2317,7 @@ class PasswordAuthProvider: if not isinstance(str_result, str): logger.warning( # type: ignore[unreachable] "Wrong type returned by module API callback %s: %s, expected" - " Optional[Tuple[str, Optional[Callable]]]", + " Optional[tuple[str, Optional[Callable]]]", callback, result, ) @@ -2332,7 +2328,7 @@ class PasswordAuthProvider: if not callable(callback_result): logger.warning( # type: ignore[unreachable] "Wrong type returned by module API callback %s: %s, expected" - " Optional[Tuple[str, Optional[Callable]]]", + " Optional[tuple[str, Optional[Callable]]]", callback, result, ) diff --git a/synapse/handlers/cas.py b/synapse/handlers/cas.py index fbe79c2e4c..438dcf9f2c 100644 --- a/synapse/handlers/cas.py +++ b/synapse/handlers/cas.py @@ -20,7 +20,7 @@ # import logging import urllib.parse -from typing import TYPE_CHECKING, Dict, List, Optional +from typing import TYPE_CHECKING, Optional from xml.etree import ElementTree as ET import attr @@ -54,7 +54,7 @@ class CasError(Exception): @attr.s(slots=True, frozen=True, auto_attribs=True) class CasResponse: username: str - attributes: Dict[str, List[Optional[str]]] + attributes: dict[str, list[Optional[str]]] class CasHandler: @@ -99,7 +99,7 @@ class CasHandler: self._sso_handler.register_identity_provider(self) - def _build_service_param(self, args: Dict[str, str]) -> str: + def _build_service_param(self, args: dict[str, str]) -> str: """ Generates a value to use as the "service" parameter when redirecting or querying the CAS service. @@ -116,7 +116,7 @@ class CasHandler: ) async def _validate_ticket( - self, ticket: str, service_args: Dict[str, str] + self, ticket: str, service_args: dict[str, str] ) -> CasResponse: """ Validate a CAS ticket with the server, and return the parsed the response. @@ -186,7 +186,7 @@ class CasHandler: # Iterate through the nodes and pull out the user and any extra attributes. user = None - attributes: Dict[str, List[Optional[str]]] = {} + attributes: dict[str, list[Optional[str]]] = {} for child in root[0]: if child.tag.endswith("user"): user = child.text diff --git a/synapse/handlers/delayed_events.py b/synapse/handlers/delayed_events.py index 79dd3e8416..b89b7416e6 100644 --- a/synapse/handlers/delayed_events.py +++ b/synapse/handlers/delayed_events.py @@ -13,7 +13,7 @@ # import logging -from typing import TYPE_CHECKING, List, Optional, Set, Tuple +from typing import TYPE_CHECKING, Optional from twisted.internet.interfaces import IDelayedCall @@ -226,7 +226,7 @@ class DelayedEventsHandler: await self._store.update_delayed_events_stream_pos(max_pos) - async def _handle_state_deltas(self, deltas: List[StateDelta]) -> None: + async def _handle_state_deltas(self, deltas: list[StateDelta]) -> None: """ Process current state deltas to cancel other users' pending delayed events that target the same state. @@ -502,8 +502,8 @@ class DelayedEventsHandler: await self._send_events(events) - async def _send_events(self, events: List[DelayedEventDetails]) -> None: - sent_state: Set[Tuple[RoomID, EventType, StateKey]] = set() + async def _send_events(self, events: list[DelayedEventDetails]) -> None: + sent_state: set[tuple[RoomID, EventType, StateKey]] = set() for event in events: if event.state_key is not None: state_info = (event.room_id, event.type, event.state_key) @@ -547,7 +547,7 @@ class DelayedEventsHandler: else: self._next_delayed_event_call.reset(delay_sec) - async def get_all_for_user(self, requester: Requester) -> List[JsonDict]: + async def get_all_for_user(self, requester: Requester) -> list[JsonDict]: """Return all pending delayed events requested by the given user.""" await self._delayed_event_mgmt_ratelimiter.ratelimit( requester, diff --git a/synapse/handlers/device.py b/synapse/handlers/device.py index c6024597b7..f0558fc737 100644 --- a/synapse/handlers/device.py +++ b/synapse/handlers/device.py @@ -25,13 +25,9 @@ from threading import Lock from typing import ( TYPE_CHECKING, AbstractSet, - Dict, Iterable, - List, Mapping, Optional, - Set, - Tuple, cast, ) @@ -407,7 +403,7 @@ class DeviceHandler: raise @trace - async def get_devices_by_user(self, user_id: str) -> List[JsonDict]: + async def get_devices_by_user(self, user_id: str) -> list[JsonDict]: """ Retrieve the given user's devices @@ -431,7 +427,7 @@ class DeviceHandler: async def get_dehydrated_device( self, user_id: str - ) -> Optional[Tuple[str, JsonDict]]: + ) -> Optional[tuple[str, JsonDict]]: """Retrieve the information for a dehydrated device. Args: @@ -568,7 +564,7 @@ class DeviceHandler: room_ids: StrCollection, from_token: StreamToken, now_token: Optional[StreamToken] = None, - ) -> Set[str]: + ) -> set[str]: """Get the set of users whose devices have changed who share a room with the given user. """ @@ -644,8 +640,8 @@ class DeviceHandler: # Check for newly joined or left rooms. We need to make sure that we add # to newly joined in the case membership goes from join -> leave -> join # again. - newly_joined_rooms: Set[str] = set() - newly_left_rooms: Set[str] = set() + newly_joined_rooms: set[str] = set() + newly_left_rooms: set[str] = set() for change in membership_changes: # We check for changes in "joinedness", i.e. if the membership has # changed to or from JOIN. @@ -661,10 +657,10 @@ class DeviceHandler: # the user is currently in. # List of membership changes per room - room_to_deltas: Dict[str, List[StateDelta]] = {} + room_to_deltas: dict[str, list[StateDelta]] = {} # The set of event IDs of membership events (so we can fetch their # associated membership). - memberships_to_fetch: Set[str] = set() + memberships_to_fetch: set[str] = set() # TODO: Only pull out membership events? state_changes = await self.store.get_current_state_deltas_for_rooms( @@ -695,8 +691,8 @@ class DeviceHandler: # We now want to find any user that have newly joined/invited/knocked, # or newly left, similarly to above. - newly_joined_or_invited_or_knocked_users: Set[str] = set() - newly_left_users: Set[str] = set() + newly_joined_or_invited_or_knocked_users: set[str] = set() + newly_left_users: set[str] = set() for _, deltas in room_to_deltas.items(): for delta in deltas: # Get the prev/new memberships for the delta @@ -838,7 +834,7 @@ class DeviceHandler: # Check if the application services have any results. if self._query_appservices_for_keys: # Query the appservice for all devices for this user. - query: Dict[str, Optional[List[str]]] = {user_id: None} + query: dict[str, Optional[list[str]]] = {user_id: None} # Query the appservices for any keys. appservice_results = await self._appservice_handler.query_keys(query) @@ -898,7 +894,7 @@ class DeviceHandler: async def notify_user_signature_update( self, from_user_id: str, - user_ids: List[str], + user_ids: list[str], ) -> None: """Notify a device writer that a user have made new signatures of other users. @@ -927,7 +923,7 @@ class DeviceHandler: async def _delete_device_messages( self, task: ScheduledTask, - ) -> Tuple[TaskStatus, Optional[JsonMapping], Optional[str]]: + ) -> tuple[TaskStatus, Optional[JsonMapping], Optional[str]]: """Scheduler task to delete device messages in batch of `DEVICE_MSGS_DELETE_BATCH_LIMIT`.""" assert task.params is not None user_id = task.params["user_id"] @@ -1051,7 +1047,7 @@ class DeviceWriterHandler(DeviceHandler): await self.handle_new_device_update() async def notify_user_signature_update( - self, from_user_id: str, user_ids: List[str] + self, from_user_id: str, user_ids: list[str] ) -> None: """Notify a user that they have made new signatures of other users. @@ -1112,7 +1108,7 @@ class DeviceWriterHandler(DeviceHandler): # hosts we've already poked about for this update. This is so that we # don't poke the same remote server about the same update repeatedly. current_stream_id = None - hosts_already_sent_to: Set[str] = set() + hosts_already_sent_to: set[str] = set() try: stream_id, room_id = await self.store.get_device_change_last_converted_pos() @@ -1311,7 +1307,7 @@ class DeviceWriterHandler(DeviceHandler): def _update_device_from_client_ips( - device: JsonDict, client_ips: Mapping[Tuple[str, str], DeviceLastConnectionInfo] + device: JsonDict, client_ips: Mapping[tuple[str, str], DeviceLastConnectionInfo] ) -> None: ip = client_ips.get((device["user_id"], device["device_id"])) device.update( @@ -1338,8 +1334,8 @@ class DeviceListWorkerUpdater: async def multi_user_device_resync( self, - user_ids: List[str], - ) -> Dict[str, Optional[JsonMapping]]: + user_ids: list[str], + ) -> dict[str, Optional[JsonMapping]]: """ Like `user_device_resync` but operates on multiple users **from the same origin** at once. @@ -1365,7 +1361,7 @@ class DeviceListWorkerUpdater: user_id: str, master_key: Optional[JsonDict], self_signing_key: Optional[JsonDict], - ) -> List[str]: + ) -> list[str]: """Process the given new master and self-signing key for the given remote user. Args: @@ -1455,14 +1451,14 @@ class DeviceListUpdater(DeviceListWorkerUpdater): ) # user_id -> list of updates waiting to be handled. - self._pending_updates: Dict[ - str, List[Tuple[str, str, Iterable[str], JsonDict]] + self._pending_updates: dict[ + str, list[tuple[str, str, Iterable[str], JsonDict]] ] = {} # Recently seen stream ids. We don't bother keeping these in the DB, # but they're useful to have them about to reduce the number of spurious # resyncs. - self._seen_updates: ExpiringCache[str, Set[str]] = ExpiringCache( + self._seen_updates: ExpiringCache[str, set[str]] = ExpiringCache( cache_name="device_update_edu", server_name=self.server_name, hs=self.hs, @@ -1619,12 +1615,12 @@ class DeviceListUpdater(DeviceListWorkerUpdater): ) async def _need_to_do_resync( - self, user_id: str, updates: Iterable[Tuple[str, str, Iterable[str], JsonDict]] + self, user_id: str, updates: Iterable[tuple[str, str, Iterable[str], JsonDict]] ) -> bool: """Given a list of updates for a user figure out if we need to do a full resync, or whether we have enough data that we can just apply the delta. """ - seen_updates: Set[str] = self._seen_updates.get(user_id, set()) + seen_updates: set[str] = self._seen_updates.get(user_id, set()) extremity = await self.store.get_device_list_last_stream_id_for_remote(user_id) @@ -1702,8 +1698,8 @@ class DeviceListUpdater(DeviceListWorkerUpdater): self._resync_retry_lock.release() async def multi_user_device_resync( - self, user_ids: List[str], mark_failed_as_stale: bool = True - ) -> Dict[str, Optional[JsonMapping]]: + self, user_ids: list[str], mark_failed_as_stale: bool = True + ) -> dict[str, Optional[JsonMapping]]: """ Like `user_device_resync` but operates on multiple users **from the same origin** at once. @@ -1739,7 +1735,7 @@ class DeviceListUpdater(DeviceListWorkerUpdater): async def _user_device_resync_returning_failed( self, user_id: str - ) -> Tuple[Optional[JsonMapping], bool]: + ) -> tuple[Optional[JsonMapping], bool]: """Fetches all devices for a user and updates the device cache with them. Args: diff --git a/synapse/handlers/devicemessage.py b/synapse/handlers/devicemessage.py index 860e24d79d..4dcdcc42fe 100644 --- a/synapse/handlers/devicemessage.py +++ b/synapse/handlers/devicemessage.py @@ -21,7 +21,7 @@ import logging from http import HTTPStatus -from typing import TYPE_CHECKING, Any, Dict, Optional +from typing import TYPE_CHECKING, Any, Optional from synapse.api.constants import EduTypes, EventContentFields, ToDeviceEventTypes from synapse.api.errors import Codes, SynapseError @@ -158,7 +158,7 @@ class DeviceMessageHandler: self, message_type: str, sender_user_id: str, - by_device: Dict[str, Dict[str, Any]], + by_device: dict[str, dict[str, Any]], ) -> None: """Checks inbound device messages for unknown remote devices, and if found marks the remote cache for the user as stale. @@ -207,7 +207,7 @@ class DeviceMessageHandler: self, requester: Requester, message_type: str, - messages: Dict[str, Dict[str, JsonDict]], + messages: dict[str, dict[str, JsonDict]], ) -> None: """ Handle a request from a user to send to-device message(s). @@ -222,7 +222,7 @@ class DeviceMessageHandler: set_tag(SynapseTags.TO_DEVICE_TYPE, message_type) set_tag(SynapseTags.TO_DEVICE_SENDER, sender_user_id) local_messages = {} - remote_messages: Dict[str, Dict[str, Dict[str, JsonDict]]] = {} + remote_messages: dict[str, dict[str, dict[str, JsonDict]]] = {} for user_id, by_device in messages.items(): if not UserID.is_valid(user_id): logger.warning( diff --git a/synapse/handlers/directory.py b/synapse/handlers/directory.py index 11284ccd0b..865c32d19e 100644 --- a/synapse/handlers/directory.py +++ b/synapse/handlers/directory.py @@ -21,7 +21,7 @@ import logging import string -from typing import TYPE_CHECKING, Iterable, List, Literal, Optional, Sequence +from typing import TYPE_CHECKING, Iterable, Literal, Optional, Sequence from synapse.api.constants import MAX_ALIAS_LENGTH, EventTypes from synapse.api.errors import ( @@ -108,7 +108,7 @@ class DirectoryHandler: requester: Requester, room_alias: RoomAlias, room_id: str, - servers: Optional[List[str]] = None, + servers: Optional[list[str]] = None, check_membership: bool = True, ) -> None: """Attempt to create a new alias diff --git a/synapse/handlers/e2e_keys.py b/synapse/handlers/e2e_keys.py index b10472f1d2..85a150b71a 100644 --- a/synapse/handlers/e2e_keys.py +++ b/synapse/handlers/e2e_keys.py @@ -20,7 +20,7 @@ # # import logging -from typing import TYPE_CHECKING, Dict, Iterable, List, Mapping, Optional, Tuple +from typing import TYPE_CHECKING, Iterable, Mapping, Optional import attr from canonicaljson import encode_canonical_json @@ -162,8 +162,8 @@ class E2eKeysHandler: async with self._query_devices_linearizer.queue((from_user_id, from_device_id)): async def filter_device_key_query( - query: Dict[str, List[str]], - ) -> Dict[str, List[str]]: + query: dict[str, list[str]], + ) -> dict[str, list[str]]: if not self.config.experimental.msc4263_limit_key_queries_to_users_who_share_rooms: # Only ignore invalid user IDs, which is the same behaviour as if # the user existed but had no keys. @@ -188,7 +188,7 @@ class E2eKeysHandler: if user_id in allowed_user_ids } - device_keys_query: Dict[str, List[str]] = await filter_device_key_query( + device_keys_query: dict[str, list[str]] = await filter_device_key_query( query_body.get("device_keys", {}) ) @@ -209,7 +209,7 @@ class E2eKeysHandler: # First get local devices. # A map of destination -> failure response. - failures: Dict[str, JsonDict] = {} + failures: dict[str, JsonDict] = {} results = {} if local_query: local_result = await self.query_local_devices(local_query) @@ -224,10 +224,10 @@ class E2eKeysHandler: # Now attempt to get any remote devices from our local cache. # A map of destination -> user ID -> device IDs. - remote_queries_not_in_cache: Dict[str, Dict[str, Iterable[str]]] = {} + remote_queries_not_in_cache: dict[str, dict[str, Iterable[str]]] = {} if remote_queries: user_ids = set() - user_and_device_ids: List[Tuple[str, str]] = [] + user_and_device_ids: list[tuple[str, str]] = [] for user_id, device_ids in remote_queries.items(): if device_ids: user_and_device_ids.extend( @@ -355,9 +355,9 @@ class E2eKeysHandler: self, results: JsonDict, cross_signing_keys: JsonDict, - failures: Dict[str, JsonDict], + failures: dict[str, JsonDict], destination: str, - destination_query: Dict[str, Iterable[str]], + destination_query: dict[str, Iterable[str]], timeout: int, ) -> None: """This is called when we are querying the device list of a user on @@ -480,7 +480,7 @@ class E2eKeysHandler: @cancellable async def get_cross_signing_keys_from_cache( self, query: Iterable[str], from_user_id: Optional[str] - ) -> Dict[str, Dict[str, JsonMapping]]: + ) -> dict[str, dict[str, JsonMapping]]: """Get cross-signing keys for users from the database Args: @@ -527,9 +527,9 @@ class E2eKeysHandler: @cancellable async def query_local_devices( self, - query: Mapping[str, Optional[List[str]]], + query: Mapping[str, Optional[list[str]]], include_displaynames: bool = True, - ) -> Dict[str, Dict[str, dict]]: + ) -> dict[str, dict[str, dict]]: """Get E2E device keys for local users Args: @@ -542,9 +542,9 @@ class E2eKeysHandler: A map from user_id -> device_id -> device details """ set_tag("local_query", str(query)) - local_query: List[Tuple[str, Optional[str]]] = [] + local_query: list[tuple[str, Optional[str]]] = [] - result_dict: Dict[str, Dict[str, dict]] = {} + result_dict: dict[str, dict[str, dict]] = {} for user_id, device_ids in query.items(): # we use UserID.from_string to catch invalid user ids if not self.is_mine(UserID.from_string(user_id)): @@ -594,7 +594,7 @@ class E2eKeysHandler: return result_dict async def on_federation_query_client_keys( - self, query_body: Dict[str, Dict[str, Optional[List[str]]]] + self, query_body: dict[str, dict[str, Optional[list[str]]]] ) -> JsonDict: """Handle a device key query from a federated server: @@ -614,7 +614,7 @@ class E2eKeysHandler: - self_signing_key: An optional dictionary of user ID -> self-signing key info. """ - device_keys_query: Dict[str, Optional[List[str]]] = query_body.get( + device_keys_query: dict[str, Optional[list[str]]] = query_body.get( "device_keys", {} ) if any( @@ -639,9 +639,9 @@ class E2eKeysHandler: async def claim_local_one_time_keys( self, - local_query: List[Tuple[str, str, str, int]], + local_query: list[tuple[str, str, str, int]], always_include_fallback_keys: bool, - ) -> Iterable[Dict[str, Dict[str, Dict[str, JsonDict]]]]: + ) -> Iterable[dict[str, dict[str, dict[str, JsonDict]]]]: """Claim one time keys for local users. 1. Attempt to claim OTKs from the database. @@ -735,7 +735,7 @@ class E2eKeysHandler: @trace async def claim_one_time_keys( self, - query: Dict[str, Dict[str, Dict[str, int]]], + query: dict[str, dict[str, dict[str, int]]], user: UserID, timeout: Optional[int], always_include_fallback_keys: bool, @@ -754,8 +754,8 @@ class E2eKeysHandler: one_time_keys: chain of maps user ID -> device ID -> key ID -> key. failures: map from remote destination to a JsonDict describing the error. """ - local_query: List[Tuple[str, str, str, int]] = [] - remote_queries: Dict[str, Dict[str, Dict[str, Dict[str, int]]]] = {} + local_query: list[tuple[str, str, str, int]] = [] + remote_queries: dict[str, dict[str, dict[str, dict[str, int]]]] = {} for user_id, one_time_keys in query.items(): # we use UserID.from_string to catch invalid user ids @@ -775,7 +775,7 @@ class E2eKeysHandler: ) # A map of user ID -> device ID -> key ID -> key. - json_result: Dict[str, Dict[str, Dict[str, JsonDict]]] = {} + json_result: dict[str, dict[str, dict[str, JsonDict]]] = {} for result in results: for user_id, device_keys in result.items(): for device_id, keys in device_keys.items(): @@ -785,7 +785,7 @@ class E2eKeysHandler: ).update({key_id: key}) # Remote failures. - failures: Dict[str, JsonDict] = {} + failures: dict[str, JsonDict] = {} @trace async def claim_client_keys(destination: str) -> None: @@ -1131,7 +1131,7 @@ class E2eKeysHandler: async def _process_self_signatures( self, user_id: str, signatures: JsonDict - ) -> Tuple[List["SignatureListItem"], Dict[str, Dict[str, dict]]]: + ) -> tuple[list["SignatureListItem"], dict[str, dict[str, dict]]]: """Process uploaded signatures of the user's own keys. Signatures of the user's own keys from this API come in two forms: @@ -1149,8 +1149,8 @@ class E2eKeysHandler: Raises: SynapseError: if the input is malformed """ - signature_list: List["SignatureListItem"] = [] - failures: Dict[str, Dict[str, JsonDict]] = {} + signature_list: list["SignatureListItem"] = [] + failures: dict[str, dict[str, JsonDict]] = {} if not signatures: return signature_list, failures @@ -1250,8 +1250,8 @@ class E2eKeysHandler: master_key_id: str, signed_master_key: JsonDict, stored_master_key: JsonMapping, - devices: Dict[str, Dict[str, JsonDict]], - ) -> List["SignatureListItem"]: + devices: dict[str, dict[str, JsonDict]], + ) -> list["SignatureListItem"]: """Check signatures of a user's master key made by their devices. Args: @@ -1294,8 +1294,8 @@ class E2eKeysHandler: return master_key_signature_list async def _process_other_signatures( - self, user_id: str, signatures: Dict[str, dict] - ) -> Tuple[List["SignatureListItem"], Dict[str, Dict[str, dict]]]: + self, user_id: str, signatures: dict[str, dict] + ) -> tuple[list["SignatureListItem"], dict[str, dict[str, dict]]]: """Process uploaded signatures of other users' keys. These will be the target user's master keys, signed by the uploading user's user-signing key. @@ -1311,8 +1311,8 @@ class E2eKeysHandler: Raises: SynapseError: if the input is malformed """ - signature_list: List["SignatureListItem"] = [] - failures: Dict[str, Dict[str, JsonDict]] = {} + signature_list: list["SignatureListItem"] = [] + failures: dict[str, dict[str, JsonDict]] = {} if not signatures: return signature_list, failures @@ -1396,7 +1396,7 @@ class E2eKeysHandler: async def _get_e2e_cross_signing_verify_key( self, user_id: str, key_type: str, from_user_id: Optional[str] = None - ) -> Tuple[JsonMapping, str, VerifyKey]: + ) -> tuple[JsonMapping, str, VerifyKey]: """Fetch locally or remotely query for a cross-signing public key. First, attempt to fetch the cross-signing public key from storage. @@ -1451,7 +1451,7 @@ class E2eKeysHandler: self, user: UserID, desired_key_type: str, - ) -> Optional[Tuple[JsonMapping, str, VerifyKey]]: + ) -> Optional[tuple[JsonMapping, str, VerifyKey]]: """Queries cross-signing keys for a remote user and saves them to the database Only the key specified by `key_type` will be returned, while all retrieved keys @@ -1541,7 +1541,7 @@ class E2eKeysHandler: return desired_key_data - async def check_cross_signing_setup(self, user_id: str) -> Tuple[bool, bool]: + async def check_cross_signing_setup(self, user_id: str) -> tuple[bool, bool]: """Checks if the user has cross-signing set up Args: @@ -1599,7 +1599,7 @@ class E2eKeysHandler: async def _delete_old_one_time_keys_task( self, task: ScheduledTask - ) -> Tuple[TaskStatus, Optional[JsonMapping], Optional[str]]: + ) -> tuple[TaskStatus, Optional[JsonMapping], Optional[str]]: """Scheduler task to delete old one time keys. Until Synapse 1.119, Synapse used to issue one-time-keys in a random order, leading to the possibility @@ -1773,7 +1773,7 @@ class SigningKeyEduUpdater: ) # user_id -> list of updates waiting to be handled. - self._pending_updates: Dict[str, List[Tuple[JsonDict, JsonDict]]] = {} + self._pending_updates: dict[str, list[tuple[JsonDict, JsonDict]]] = {} async def incoming_signing_key_update( self, origin: str, edu_content: JsonDict @@ -1819,7 +1819,7 @@ class SigningKeyEduUpdater: # This can happen since we batch updates return - device_ids: List[str] = [] + device_ids: list[str] = [] logger.info("pending updates: %r", pending_updates) diff --git a/synapse/handlers/e2e_room_keys.py b/synapse/handlers/e2e_room_keys.py index 623fd33f13..094b4bc27c 100644 --- a/synapse/handlers/e2e_room_keys.py +++ b/synapse/handlers/e2e_room_keys.py @@ -20,7 +20,7 @@ # import logging -from typing import TYPE_CHECKING, Dict, Literal, Optional, cast +from typing import TYPE_CHECKING, Literal, Optional, cast from synapse.api.errors import ( Codes, @@ -65,8 +65,8 @@ class E2eRoomKeysHandler: version: str, room_id: Optional[str] = None, session_id: Optional[str] = None, - ) -> Dict[ - Literal["rooms"], Dict[str, Dict[Literal["sessions"], Dict[str, RoomKey]]] + ) -> dict[ + Literal["rooms"], dict[str, dict[Literal["sessions"], dict[str, RoomKey]]] ]: """Bulk get the E2E room keys for a given backup, optionally filtered to a given room, or a given session. diff --git a/synapse/handlers/event_auth.py b/synapse/handlers/event_auth.py index 1f1f67dc0d..b2caca8ce7 100644 --- a/synapse/handlers/event_auth.py +++ b/synapse/handlers/event_auth.py @@ -19,7 +19,7 @@ # # import logging -from typing import TYPE_CHECKING, List, Mapping, Optional, Union +from typing import TYPE_CHECKING, Mapping, Optional, Union from synapse import event_auth from synapse.api.constants import ( @@ -92,7 +92,7 @@ class EventAuthHandler: event: Union[EventBase, EventBuilder], current_state_ids: StateMap[str], for_verification: bool = False, - ) -> List[str]: + ) -> list[str]: """Given an event and current state return the list of event IDs used to auth an event. diff --git a/synapse/handlers/events.py b/synapse/handlers/events.py index 3f46032a43..9522d5a696 100644 --- a/synapse/handlers/events.py +++ b/synapse/handlers/events.py @@ -21,7 +21,7 @@ import logging import random -from typing import TYPE_CHECKING, Iterable, List, Optional +from typing import TYPE_CHECKING, Iterable, Optional from synapse.api.constants import EduTypes, EventTypes, Membership, PresenceState from synapse.api.errors import AuthError, SynapseError @@ -100,7 +100,7 @@ class EventStreamHandler: # When the user joins a new room, or another user joins a currently # joined room, we need to send down presence for those users. - to_add: List[JsonDict] = [] + to_add: list[JsonDict] = [] for event in events: if not isinstance(event, EventBase): continue diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index adc20f4ad0..3eb1d166f8 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -30,12 +30,8 @@ from http import HTTPStatus from typing import ( TYPE_CHECKING, AbstractSet, - Dict, Iterable, - List, Optional, - Set, - Tuple, Union, ) @@ -168,12 +164,12 @@ class FederationHandler: # Tracks running partial state syncs by room ID. # Partial state syncs currently only run on the main process, so it's okay to # track them in-memory for now. - self._active_partial_state_syncs: Set[str] = set() + self._active_partial_state_syncs: set[str] = set() # Tracks partial state syncs we may want to restart. # A dictionary mapping room IDs to (initial destination, other destinations) # tuples. - self._partial_state_syncs_maybe_needing_restart: Dict[ - str, Tuple[Optional[str], AbstractSet[str]] + self._partial_state_syncs_maybe_needing_restart: dict[ + str, tuple[Optional[str], AbstractSet[str]] ] = {} # A lock guarding the partial state flag for rooms. # When the lock is held for a given room, no other concurrent code may @@ -272,7 +268,7 @@ class FederationHandler: # we now have a list of potential places to backpaginate from. We prefer to # start with the most recent (ie, max depth), so let's sort the list. - sorted_backfill_points: List[_BackfillPoint] = sorted( + sorted_backfill_points: list[_BackfillPoint] = sorted( backwards_extremities, key=lambda e: -int(e.depth), ) @@ -380,7 +376,7 @@ class FederationHandler: # there is it's often sufficiently long ago that clients would stop # attempting to paginate before backfill reached the visible history. - extremities_to_request: List[str] = [] + extremities_to_request: list[str] = [] for bp in sorted_backfill_points: if len(extremities_to_request) >= 5: break @@ -562,7 +558,7 @@ class FederationHandler: return pdu - async def on_event_auth(self, event_id: str) -> List[EventBase]: + async def on_event_auth(self, event_id: str) -> list[EventBase]: event = await self.store.get_event(event_id) auth = await self.store.get_auth_chain( event.room_id, list(event.auth_event_ids()), include_given=True @@ -571,7 +567,7 @@ class FederationHandler: async def do_invite_join( self, target_hosts: Iterable[str], room_id: str, joinee: str, content: JsonDict - ) -> Tuple[str, int]: + ) -> tuple[str, int]: """Attempts to join the `joinee` to the room `room_id` via the servers contained in `target_hosts`. @@ -807,11 +803,11 @@ class FederationHandler: async def do_knock( self, - target_hosts: List[str], + target_hosts: list[str], room_id: str, knockee: str, content: JsonDict, - ) -> Tuple[str, int]: + ) -> tuple[str, int]: """Sends the knock to the remote server. This first triggers a make_knock request that returns a partial @@ -840,7 +836,7 @@ class FederationHandler: # Ask the remote server to create a valid knock event for us. Once received, # we sign the event - params: Dict[str, Iterable[str]] = {"ver": supported_room_versions} + params: dict[str, Iterable[str]] = {"ver": supported_room_versions} origin, event, event_format_version = await self._make_and_verify_event( target_hosts, room_id, knockee, Membership.KNOCK, content, params=params ) @@ -889,7 +885,7 @@ class FederationHandler: return event.event_id, stream_id async def _handle_queued_pdus( - self, room_queue: List[Tuple[EventBase, str]] + self, room_queue: list[tuple[EventBase, str]] ) -> None: """Process PDUs which got queued up while we were busy send_joining. @@ -1144,7 +1140,7 @@ class FederationHandler: async def do_remotely_reject_invite( self, target_hosts: Iterable[str], room_id: str, user_id: str, content: JsonDict - ) -> Tuple[EventBase, int]: + ) -> tuple[EventBase, int]: origin, event, room_version = await self._make_and_verify_event( target_hosts, room_id, user_id, "leave", content=content ) @@ -1178,8 +1174,8 @@ class FederationHandler: user_id: str, membership: str, content: JsonDict, - params: Optional[Dict[str, Union[str, Iterable[str]]]] = None, - ) -> Tuple[str, EventBase, RoomVersion]: + params: Optional[dict[str, Union[str, Iterable[str]]]] = None, + ) -> tuple[str, EventBase, RoomVersion]: ( origin, event, @@ -1306,7 +1302,7 @@ class FederationHandler: @trace @tag_args - async def get_state_ids_for_pdu(self, room_id: str, event_id: str) -> List[str]: + async def get_state_ids_for_pdu(self, room_id: str, event_id: str) -> list[str]: """Returns the state at the event. i.e. not including said event.""" event = await self.store.get_event(event_id, check_room_id=room_id) if event.internal_metadata.outlier: @@ -1339,8 +1335,8 @@ class FederationHandler: return list(state_map.values()) async def on_backfill_request( - self, origin: str, room_id: str, pdu_list: List[str], limit: int - ) -> List[EventBase]: + self, origin: str, room_id: str, pdu_list: list[str], limit: int + ) -> list[EventBase]: # We allow partially joined rooms since in this case we are filtering out # non-local events in `filter_events_for_server`. await self._event_auth_handler.assert_host_in_room(room_id, origin, True) @@ -1416,10 +1412,10 @@ class FederationHandler: self, origin: str, room_id: str, - earliest_events: List[str], - latest_events: List[str], + earliest_events: list[str], + latest_events: list[str], limit: int, - ) -> List[EventBase]: + ) -> list[EventBase]: # We allow partially joined rooms since in this case we are filtering out # non-local events in `filter_events_for_server`. await self._event_auth_handler.assert_host_in_room(room_id, origin, True) @@ -1602,7 +1598,7 @@ class FederationHandler: event_dict: JsonDict, event: EventBase, context: UnpersistedEventContextBase, - ) -> Tuple[EventBase, UnpersistedEventContextBase]: + ) -> tuple[EventBase, UnpersistedEventContextBase]: key = ( EventTypes.ThirdPartyInvite, event.content["third_party_invite"]["signed"]["token"], @@ -1758,7 +1754,7 @@ class FederationHandler: raise AuthError(403, "Third party certificate was invalid") async def get_room_complexity( - self, remote_room_hosts: List[str], room_id: str + self, remote_room_hosts: list[str], room_id: str ) -> Optional[dict]: """ Fetch the complexity of a remote room over federation. diff --git a/synapse/handlers/federation_event.py b/synapse/handlers/federation_event.py index d6390b79c7..32b603e947 100644 --- a/synapse/handlers/federation_event.py +++ b/synapse/handlers/federation_event.py @@ -27,13 +27,9 @@ from typing import ( TYPE_CHECKING, Collection, Container, - Dict, Iterable, - List, Optional, Sequence, - Set, - Tuple, ) from prometheus_client import Counter, Histogram @@ -190,7 +186,7 @@ class FederationEventHandler: # For each room, a list of (pdu, origin) tuples. # TODO: replace this with something more elegant, probably based around the # federation event staging area. - self.room_queues: Dict[str, List[Tuple[EventBase, str]]] = {} + self.room_queues: dict[str, list[tuple[EventBase, str]]] = {} self._room_pdu_linearizer = Linearizer(name="fed_room_pdu", clock=self._clock) @@ -511,8 +507,8 @@ class FederationEventHandler: self, origin: str, room_id: str, - auth_events: List[EventBase], - state: List[EventBase], + auth_events: list[EventBase], + state: list[EventBase], event: EventBase, room_version: RoomVersion, partial_state: bool, @@ -595,7 +591,7 @@ class FederationEventHandler: ) missing_event_ids = prev_event_ids - seen_event_ids - state_maps_to_resolve: List[StateMap[str]] = [] + state_maps_to_resolve: list[StateMap[str]] = [] # Fetch the state after the prev events that we know about. state_maps_to_resolve.extend( @@ -755,7 +751,7 @@ class FederationEventHandler: @trace async def _get_missing_events_for_pdu( - self, origin: str, pdu: EventBase, prevs: Set[str], min_depth: int + self, origin: str, pdu: EventBase, prevs: set[str], min_depth: int ) -> None: """ Args: @@ -902,7 +898,7 @@ class FederationEventHandler: [event.event_id for event in events] ) - new_events: List[EventBase] = [] + new_events: list[EventBase] = [] for event in events: event_id = event.event_id @@ -1186,7 +1182,7 @@ class FederationEventHandler: partial_state = any(partial_state_flags.values()) # state_maps is a list of mappings from (type, state_key) to event_id - state_maps: List[StateMap[str]] = [] + state_maps: list[StateMap[str]] = [] # Ask the remote server for the states we don't # know about @@ -1647,7 +1643,7 @@ class FederationEventHandler: room_version = await self._store.get_room_version(room_id) - events: List[EventBase] = [] + events: list[EventBase] = [] async def get_event(event_id: str) -> None: with nested_logging_context(event_id): @@ -1753,7 +1749,7 @@ class FederationEventHandler: ) auth_map.update(persisted_events) - events_and_contexts_to_persist: List[EventPersistencePair] = [] + events_and_contexts_to_persist: list[EventPersistencePair] = [] async def prep(event: EventBase) -> None: with nested_logging_context(suffix=event.event_id): @@ -2050,7 +2046,7 @@ class FederationEventHandler: state_sets_d = await self._state_storage_controller.get_state_groups_ids( event.room_id, extrem_ids ) - state_sets: List[StateMap[str]] = list(state_sets_d.values()) + state_sets: list[StateMap[str]] = list(state_sets_d.values()) state_ids = await context.get_prev_state_ids() state_sets.append(state_ids) current_state_ids = ( diff --git a/synapse/handlers/identity.py b/synapse/handlers/identity.py index be757201fc..0f507b3317 100644 --- a/synapse/handlers/identity.py +++ b/synapse/handlers/identity.py @@ -24,7 +24,7 @@ import logging import urllib.parse -from typing import TYPE_CHECKING, Awaitable, Callable, Dict, List, Optional, Tuple +from typing import TYPE_CHECKING, Awaitable, Callable, Optional import attr @@ -105,7 +105,7 @@ class IdentityHandler: ) async def threepid_from_creds( - self, id_server: str, creds: Dict[str, str] + self, id_server: str, creds: dict[str, str] ) -> Optional[JsonDict]: """ Retrieve and validate a threepid identifier from a "credentials" dictionary against a @@ -693,7 +693,7 @@ class IdentityHandler: inviter_display_name: str, inviter_avatar_url: str, id_access_token: str, - ) -> Tuple[str, List[Dict[str, str]], Dict[str, str], str]: + ) -> tuple[str, list[dict[str, str]], dict[str, str], str]: """ Asks an identity server for a third party invite. @@ -779,7 +779,7 @@ class IdentityHandler: return token, public_keys, fallback_public_key, display_name -def create_id_access_token_header(id_access_token: str) -> List[str]: +def create_id_access_token_header(id_access_token: str) -> list[str]: """Create an Authorization header for passing to SimpleHttpClient as the header value of an HTTP request. diff --git a/synapse/handlers/initial_sync.py b/synapse/handlers/initial_sync.py index 75d64d2d50..1c6f8bf53b 100644 --- a/synapse/handlers/initial_sync.py +++ b/synapse/handlers/initial_sync.py @@ -20,7 +20,7 @@ # import logging -from typing import TYPE_CHECKING, List, Optional, Tuple +from typing import TYPE_CHECKING, Optional from synapse.api.constants import ( AccountDataTypes, @@ -69,7 +69,7 @@ class InitialSyncHandler: self.clock = hs.get_clock() self.validator = EventValidator() self.snapshot_cache: ResponseCache[ - Tuple[ + tuple[ str, Optional[StreamToken], Optional[StreamToken], @@ -451,7 +451,7 @@ class InitialSyncHandler: presence_handler = self.hs.get_presence_handler() - async def get_presence() -> List[JsonDict]: + async def get_presence() -> list[JsonDict]: # If presence is disabled, return an empty list if not self.hs.config.server.presence_enabled: return [] @@ -468,7 +468,7 @@ class InitialSyncHandler: for s in states ] - async def get_receipts() -> List[JsonMapping]: + async def get_receipts() -> list[JsonMapping]: receipts = await self.store.get_linearized_receipts_for_room( room_id, to_key=now_token.receipt_key ) diff --git a/synapse/handlers/jwt.py b/synapse/handlers/jwt.py index 400f3a59aa..f1715f6495 100644 --- a/synapse/handlers/jwt.py +++ b/synapse/handlers/jwt.py @@ -18,7 +18,7 @@ # [This file includes modifications made by New Vector Limited] # # -from typing import TYPE_CHECKING, Optional, Tuple +from typing import TYPE_CHECKING, Optional from authlib.jose import JsonWebToken, JWTClaims from authlib.jose.errors import BadSignatureError, InvalidClaimError, JoseError @@ -41,7 +41,7 @@ class JwtHandler: self.jwt_issuer = hs.config.jwt.jwt_issuer self.jwt_audiences = hs.config.jwt.jwt_audiences - def validate_login(self, login_submission: JsonDict) -> Tuple[str, Optional[str]]: + def validate_login(self, login_submission: JsonDict) -> tuple[str, Optional[str]]: """ Authenticates the user for the /login API diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index e874b60000..2ad1dbe73f 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -22,7 +22,7 @@ import logging import random from http import HTTPStatus -from typing import TYPE_CHECKING, Any, Dict, List, Mapping, Optional, Sequence, Tuple +from typing import TYPE_CHECKING, Any, Mapping, Optional, Sequence from canonicaljson import encode_canonical_json @@ -180,7 +180,7 @@ class MessageHandler: room_id: str, state_filter: Optional[StateFilter] = None, at_token: Optional[StreamToken] = None, - ) -> List[dict]: + ) -> list[dict]: """Retrieve all state events for a given room. If the user is joined to the room then return the current state. If the user has left the room return the state events from when they left. If an explicit @@ -538,7 +538,7 @@ class EventCreationHandler: # # map from room id to time-of-last-attempt. # - self._rooms_to_exclude_from_dummy_event_insertion: Dict[str, int] = {} + self._rooms_to_exclude_from_dummy_event_insertion: dict[str, int] = {} # The number of forward extremeities before a dummy event is sent. self._dummy_events_threshold = hs.config.server.dummy_events_threshold @@ -578,16 +578,16 @@ class EventCreationHandler: requester: Requester, event_dict: dict, txn_id: Optional[str] = None, - prev_event_ids: Optional[List[str]] = None, - auth_event_ids: Optional[List[str]] = None, - state_event_ids: Optional[List[str]] = None, + prev_event_ids: Optional[list[str]] = None, + auth_event_ids: Optional[list[str]] = None, + state_event_ids: Optional[list[str]] = None, require_consent: bool = True, outlier: bool = False, depth: Optional[int] = None, state_map: Optional[StateMap[str]] = None, for_batch: bool = False, current_state_group: Optional[int] = None, - ) -> Tuple[EventBase, UnpersistedEventContextBase]: + ) -> tuple[EventBase, UnpersistedEventContextBase]: """ Given a dict from a client, create a new event. If bool for_batch is true, will create an event using the prev_event_ids, and will create an event context for @@ -961,14 +961,14 @@ class EventCreationHandler: self, requester: Requester, event_dict: dict, - prev_event_ids: Optional[List[str]] = None, - state_event_ids: Optional[List[str]] = None, + prev_event_ids: Optional[list[str]] = None, + state_event_ids: Optional[list[str]] = None, ratelimit: bool = True, txn_id: Optional[str] = None, ignore_shadow_ban: bool = False, outlier: bool = False, depth: Optional[int] = None, - ) -> Tuple[EventBase, int]: + ) -> tuple[EventBase, int]: """ Creates an event, then sends it. @@ -1098,14 +1098,14 @@ class EventCreationHandler: self, requester: Requester, event_dict: dict, - prev_event_ids: Optional[List[str]] = None, - state_event_ids: Optional[List[str]] = None, + prev_event_ids: Optional[list[str]] = None, + state_event_ids: Optional[list[str]] = None, ratelimit: bool = True, txn_id: Optional[str] = None, ignore_shadow_ban: bool = False, outlier: bool = False, depth: Optional[int] = None, - ) -> Tuple[EventBase, int]: + ) -> tuple[EventBase, int]: room_id = event_dict["room_id"] # If we don't have any prev event IDs specified then we need to @@ -1220,14 +1220,14 @@ class EventCreationHandler: self, builder: EventBuilder, requester: Optional[Requester] = None, - prev_event_ids: Optional[List[str]] = None, - auth_event_ids: Optional[List[str]] = None, - state_event_ids: Optional[List[str]] = None, + prev_event_ids: Optional[list[str]] = None, + auth_event_ids: Optional[list[str]] = None, + state_event_ids: Optional[list[str]] = None, depth: Optional[int] = None, state_map: Optional[StateMap[str]] = None, for_batch: bool = False, current_state_group: Optional[int] = None, - ) -> Tuple[EventBase, UnpersistedEventContextBase]: + ) -> tuple[EventBase, UnpersistedEventContextBase]: """Create a new event for a local client. If bool for_batch is true, will create an event using the prev_event_ids, and will create an event context for the event using the parameters state_map and current_state_group, thus these parameters @@ -1471,9 +1471,9 @@ class EventCreationHandler: async def handle_new_client_event( self, requester: Requester, - events_and_context: List[EventPersistencePair], + events_and_context: list[EventPersistencePair], ratelimit: bool = True, - extra_users: Optional[List[UserID]] = None, + extra_users: Optional[list[UserID]] = None, ignore_shadow_ban: bool = False, ) -> EventBase: """Processes new events. Please note that if batch persisting events, an error in @@ -1683,9 +1683,9 @@ class EventCreationHandler: async def _persist_events( self, requester: Requester, - events_and_context: List[EventPersistencePair], + events_and_context: list[EventPersistencePair], ratelimit: bool = True, - extra_users: Optional[List[UserID]] = None, + extra_users: Optional[list[UserID]] = None, ) -> EventBase: """Actually persists new events. Should only be called by `handle_new_client_event`, and see its docstring for documentation of @@ -1769,7 +1769,7 @@ class EventCreationHandler: raise async def cache_joined_hosts_for_events( - self, events_and_context: List[EventPersistencePair] + self, events_and_context: list[EventPersistencePair] ) -> None: """Precalculate the joined hosts at each of the given events, when using Redis, so that external federation senders don't have to recalculate it themselves. @@ -1875,9 +1875,9 @@ class EventCreationHandler: async def persist_and_notify_client_events( self, requester: Requester, - events_and_context: List[EventPersistencePair], + events_and_context: list[EventPersistencePair], ratelimit: bool = True, - extra_users: Optional[List[UserID]] = None, + extra_users: Optional[list[UserID]] = None, ) -> EventBase: """Called when we have fully built the events, have already calculated the push actions for the events, and checked auth. @@ -2285,7 +2285,7 @@ class EventCreationHandler: async def _rebuild_event_after_third_party_rules( self, third_party_result: dict, original_event: EventBase - ) -> Tuple[EventBase, UnpersistedEventContextBase]: + ) -> tuple[EventBase, UnpersistedEventContextBase]: # the third_party_event_rules want to replace the event. # we do some basic checks, and then return the replacement event. diff --git a/synapse/handlers/oidc.py b/synapse/handlers/oidc.py index 39505463bb..f140912b2a 100644 --- a/synapse/handlers/oidc.py +++ b/synapse/handlers/oidc.py @@ -26,11 +26,8 @@ import logging from typing import ( TYPE_CHECKING, Any, - Dict, Generic, - List, Optional, - Type, TypedDict, TypeVar, Union, @@ -113,14 +110,14 @@ class Token(TypedDict): #: A JWK, as per RFC7517 sec 4. The type could be more precise than that, but #: there is no real point of doing this in our case. -JWK = Dict[str, str] +JWK = dict[str, str] C = TypeVar("C") #: A JWK Set, as per RFC7517 sec 5. class JWKS(TypedDict): - keys: List[JWK] + keys: list[JWK] class OidcHandler: @@ -134,7 +131,7 @@ class OidcHandler: assert provider_confs self._macaroon_generator = hs.get_macaroon_generator() - self._providers: Dict[str, "OidcProvider"] = { + self._providers: dict[str, "OidcProvider"] = { p.idp_id: OidcProvider(hs, self._macaroon_generator, p) for p in provider_confs } @@ -332,7 +329,7 @@ class OidcHandler: # At this point we properly checked both claims types issuer: str = iss - audience: List[str] = aud + audience: list[str] = aud except (TypeError, KeyError): raise SynapseError(400, "Invalid issuer/audience in logout_token") @@ -428,8 +425,10 @@ class OidcProvider: # from the IdP's jwks_uri, if required. self._jwks = RetryOnExceptionCachedCall(self._load_jwks) + # type-ignore: we will not be instantiating a subclass of the provider class, + # so the warning about directly accessing __init__ being unsound does not apply here user_mapping_provider_init_method = ( - provider.user_mapping_provider_class.__init__ + provider.user_mapping_provider_class.__init__ # type: ignore[misc] ) if len(inspect.signature(user_mapping_provider_init_method).parameters) == 3: self._user_mapping_provider = provider.user_mapping_provider_class( @@ -758,7 +757,7 @@ class OidcProvider: """ metadata = await self.load_metadata() token_endpoint = metadata.get("token_endpoint") - raw_headers: Dict[str, str] = { + raw_headers: dict[str, str] = { "Content-Type": "application/x-www-form-urlencoded", "User-Agent": self._http_client.user_agent.decode("ascii"), "Accept": "application/json", @@ -902,9 +901,9 @@ class OidcProvider: async def _verify_jwt( self, - alg_values: List[str], + alg_values: list[str], token: str, - claims_cls: Type[C], + claims_cls: type[C], claims_options: Optional[dict] = None, claims_params: Optional[dict] = None, ) -> C: @@ -1589,7 +1588,7 @@ class UserAttributeDict(TypedDict): confirm_localpart: bool display_name: Optional[str] picture: Optional[str] # may be omitted by older `OidcMappingProviders` - emails: List[str] + emails: list[str] class OidcMappingProvider(Generic[C]): @@ -1678,7 +1677,7 @@ class JinjaOidcMappingConfig: localpart_template: Optional[Template] display_name_template: Optional[Template] email_template: Optional[Template] - extra_attributes: Dict[str, Template] + extra_attributes: dict[str, Template] confirm_localpart: bool = False @@ -1778,7 +1777,7 @@ class JinjaOidcMappingProvider(OidcMappingProvider[JinjaOidcMappingConfig]): if display_name == "": display_name = None - emails: List[str] = [] + emails: list[str] = [] email = render_template_field(self._config.email_template) if email: emails.append(email) @@ -1794,7 +1793,7 @@ class JinjaOidcMappingProvider(OidcMappingProvider[JinjaOidcMappingConfig]): ) async def get_extra_attributes(self, userinfo: UserInfo, token: Token) -> JsonDict: - extras: Dict[str, str] = {} + extras: dict[str, str] = {} for key, template in self._config.extra_attributes.items(): try: extras[key] = template.render(user=userinfo).strip() diff --git a/synapse/handlers/pagination.py b/synapse/handlers/pagination.py index 02a67581e7..7274a512b0 100644 --- a/synapse/handlers/pagination.py +++ b/synapse/handlers/pagination.py @@ -19,7 +19,7 @@ # # import logging -from typing import TYPE_CHECKING, List, Optional, Set, Tuple, cast +from typing import TYPE_CHECKING, Optional, cast from twisted.python.failure import Failure @@ -91,7 +91,7 @@ class PaginationHandler: self.pagination_lock = ReadWriteLock() # IDs of rooms in which there currently an active purge *or delete* operation. - self._purges_in_progress_by_room: Set[str] = set() + self._purges_in_progress_by_room: set[str] = set() self._event_serializer = hs.get_event_client_serializer() self._retention_default_max_lifetime = ( @@ -279,7 +279,7 @@ class PaginationHandler: async def _purge_history( self, task: ScheduledTask, - ) -> Tuple[TaskStatus, Optional[JsonMapping], Optional[str]]: + ) -> tuple[TaskStatus, Optional[JsonMapping], Optional[str]]: """ Scheduler action to purge some history of a room. """ @@ -343,7 +343,7 @@ class PaginationHandler: async def get_delete_tasks_by_room( self, room_id: str, only_active: Optional[bool] = False - ) -> List[ScheduledTask]: + ) -> list[ScheduledTask]: """Get complete, failed or active delete tasks by room Args: @@ -363,7 +363,7 @@ class PaginationHandler: async def _purge_room( self, task: ScheduledTask, - ) -> Tuple[TaskStatus, Optional[JsonMapping], Optional[str]]: + ) -> tuple[TaskStatus, Optional[JsonMapping], Optional[str]]: """ Scheduler action to purge a room. """ @@ -523,7 +523,7 @@ class PaginationHandler: # We use a `Set` because there can be multiple events at a given depth # and we only care about looking at the unique continum of depths to # find gaps. - event_depths: Set[int] = {event.depth for event in events} + event_depths: set[int] = {event.depth for event in events} sorted_event_depths = sorted(event_depths) # Inspect the depths of the returned events to see if there are any gaps @@ -691,7 +691,7 @@ class PaginationHandler: async def _shutdown_and_purge_room( self, task: ScheduledTask, - ) -> Tuple[TaskStatus, Optional[JsonMapping], Optional[str]]: + ) -> tuple[TaskStatus, Optional[JsonMapping], Optional[str]]: """ Scheduler action to shutdown and purge a room. """ diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py index 1610683066..d8150a5857 100644 --- a/synapse/handlers/presence.py +++ b/synapse/handlers/presence.py @@ -86,14 +86,9 @@ from typing import ( Callable, Collection, ContextManager, - Dict, Generator, Iterable, - List, Optional, - Set, - Tuple, - Type, ) from prometheus_client import Counter @@ -236,7 +231,7 @@ class BasePresenceHandler(abc.ABC): self._federation_queue = PresenceFederationQueue(hs, self) - self.VALID_PRESENCE: Tuple[str, ...] = ( + self.VALID_PRESENCE: tuple[str, ...] = ( PresenceState.ONLINE, PresenceState.UNAVAILABLE, PresenceState.OFFLINE, @@ -276,7 +271,7 @@ class BasePresenceHandler(abc.ABC): @abc.abstractmethod def get_currently_syncing_users_for_replication( self, - ) -> Iterable[Tuple[str, Optional[str]]]: + ) -> Iterable[tuple[str, Optional[str]]]: """Get an iterable of syncing users and devices on this worker, to send to the presence handler This is called when a replication connection is established. It should return @@ -293,7 +288,7 @@ class BasePresenceHandler(abc.ABC): async def get_states( self, target_user_ids: Iterable[str] - ) -> List[UserPresenceState]: + ) -> list[UserPresenceState]: """Get the presence state for users.""" updates_d = await self.current_state_for_users(target_user_ids) @@ -306,7 +301,7 @@ class BasePresenceHandler(abc.ABC): async def current_state_for_users( self, user_ids: Iterable[str] - ) -> Dict[str, UserPresenceState]: + ) -> dict[str, UserPresenceState]: """Get the current presence state for multiple users. Returns: @@ -417,7 +412,7 @@ class BasePresenceHandler(abc.ABC): return self._federation_queue async def maybe_send_presence_to_interested_destinations( - self, states: List[UserPresenceState] + self, states: list[UserPresenceState] ) -> None: """If this instance is a federation sender, send the states to all destinations that are interested. Filters out any states for remote @@ -501,7 +496,7 @@ class _NullContextManager(ContextManager[None]): def __exit__( self, - exc_type: Optional[Type[BaseException]], + exc_type: Optional[type[BaseException]], exc_val: Optional[BaseException], exc_tb: Optional[TracebackType], ) -> None: @@ -522,8 +517,8 @@ class WorkerPresenceHandler(BasePresenceHandler): # The number of ongoing syncs on this process, by (user ID, device ID). # Empty if _presence_enabled is false. - self._user_device_to_num_current_syncs: Dict[ - Tuple[str, Optional[str]], int + self._user_device_to_num_current_syncs: dict[ + tuple[str, Optional[str]], int ] = {} self.notifier = hs.get_notifier() @@ -531,7 +526,7 @@ class WorkerPresenceHandler(BasePresenceHandler): # (user_id, device_id) -> last_sync_ms. Lists the devices that have stopped # syncing but we haven't notified the presence writer of that yet - self._user_devices_going_offline: Dict[Tuple[str, Optional[str]], int] = {} + self._user_devices_going_offline: dict[tuple[str, Optional[str]], int] = {} self._bump_active_client = ReplicationBumpPresenceActiveTime.make_client(hs) self._set_state_client = ReplicationPresenceSetState.make_client(hs) @@ -645,7 +640,7 @@ class WorkerPresenceHandler(BasePresenceHandler): return _user_syncing() async def notify_from_replication( - self, states: List[UserPresenceState], stream_id: int + self, states: list[UserPresenceState], stream_id: int ) -> None: parties = await get_interested_parties(self.store, self.presence_router, states) room_ids_to_states, users_to_states = parties @@ -704,7 +699,7 @@ class WorkerPresenceHandler(BasePresenceHandler): def get_currently_syncing_users_for_replication( self, - ) -> Iterable[Tuple[str, Optional[str]]]: + ) -> Iterable[tuple[str, Optional[str]]]: return [ user_id_device_id for user_id_device_id, count in self._user_device_to_num_current_syncs.items() @@ -790,8 +785,8 @@ class PresenceHandler(BasePresenceHandler): ) # The per-device presence state, maps user to devices to per-device presence state. - self._user_to_device_to_current_state: Dict[ - str, Dict[Optional[str], UserDevicePresenceState] + self._user_to_device_to_current_state: dict[ + str, dict[Optional[str], UserDevicePresenceState] ] = {} now = self.clock.time_msec() @@ -833,7 +828,7 @@ class PresenceHandler(BasePresenceHandler): # Set of users who have presence in the `user_to_current_state` that # have not yet been persisted - self.unpersisted_users_changes: Set[str] = set() + self.unpersisted_users_changes: set[str] = set() hs.register_async_shutdown_handler( phase="before", @@ -843,8 +838,8 @@ class PresenceHandler(BasePresenceHandler): # Keeps track of the number of *ongoing* syncs on this process. While # this is non zero a user will never go offline. - self._user_device_to_num_current_syncs: Dict[ - Tuple[str, Optional[str]], int + self._user_device_to_num_current_syncs: dict[ + tuple[str, Optional[str]], int ] = {} # Keeps track of the number of *ongoing* syncs on other processes. @@ -857,10 +852,10 @@ class PresenceHandler(BasePresenceHandler): # we assume that all the sync requests on that process have stopped. # Stored as a dict from process_id to set of (user_id, device_id), and # a dict of process_id to millisecond timestamp last updated. - self.external_process_to_current_syncs: Dict[ - str, Set[Tuple[str, Optional[str]]] + self.external_process_to_current_syncs: dict[ + str, set[tuple[str, Optional[str]]] ] = {} - self.external_process_last_updated_ms: Dict[str, int] = {} + self.external_process_last_updated_ms: dict[str, int] = {} self.external_sync_linearizer = Linearizer( name="external_sync_linearizer", clock=self.clock @@ -1151,7 +1146,7 @@ class PresenceHandler(BasePresenceHandler): # Update the user state, this will always update last_active_ts and # might update the presence state. prev_state = await self.current_state_for_user(user_id) - new_fields: Dict[str, Any] = { + new_fields: dict[str, Any] = { "last_active_ts": now, "state": _combine_device_states(devices.values()), } @@ -1221,7 +1216,7 @@ class PresenceHandler(BasePresenceHandler): def get_currently_syncing_users_for_replication( self, - ) -> Iterable[Tuple[str, Optional[str]]]: + ) -> Iterable[tuple[str, Optional[str]]]: # since we are the process handling presence, there is nothing to do here. return [] @@ -1317,7 +1312,7 @@ class PresenceHandler(BasePresenceHandler): ) self.external_process_last_updated_ms.pop(process_id, None) - async def _persist_and_notify(self, states: List[UserPresenceState]) -> None: + async def _persist_and_notify(self, states: list[UserPresenceState]) -> None: """Persist states in the database, poke the notifier and send to interested remote servers """ @@ -1477,7 +1472,7 @@ class PresenceHandler(BasePresenceHandler): async def get_all_presence_updates( self, instance_name: str, last_id: int, current_id: int, limit: int - ) -> Tuple[List[Tuple[int, list]], int, bool]: + ) -> tuple[list[tuple[int, list]], int, bool]: """ Gets a list of presence update rows from between the given stream ids. Each row has: @@ -1562,7 +1557,7 @@ class PresenceHandler(BasePresenceHandler): # We may get multiple deltas for different rooms, but we want to # handle them on a room by room basis, so we batch them up by # room. - deltas_by_room: Dict[str, List[StateDelta]] = {} + deltas_by_room: dict[str, list[StateDelta]] = {} for delta in deltas: deltas_by_room.setdefault(delta.room_id, []).append(delta) @@ -1576,7 +1571,7 @@ class PresenceHandler(BasePresenceHandler): name="presence", **{SERVER_NAME_LABEL: self.server_name} ).set(max_pos) - async def _handle_state_delta(self, room_id: str, deltas: List[StateDelta]) -> None: + async def _handle_state_delta(self, room_id: str, deltas: list[StateDelta]) -> None: """Process current state deltas for the room to find new joins that need to be handled. """ @@ -1849,7 +1844,7 @@ class PresenceEventSource(EventSource[int, UserPresenceState]): explicit_room_id: Optional[str] = None, include_offline: bool = True, service: Optional[ApplicationService] = None, - ) -> Tuple[List[UserPresenceState], int]: + ) -> tuple[list[UserPresenceState], int]: # The process for getting presence events are: # 1. Get the rooms the user is in. # 2. Get the list of user in the rooms. @@ -2001,7 +1996,7 @@ class PresenceEventSource(EventSource[int, UserPresenceState]): user_id: str, include_offline: bool, from_key: Optional[int] = None, - ) -> List[UserPresenceState]: + ) -> list[UserPresenceState]: """ Computes the presence updates a user should receive. @@ -2058,7 +2053,7 @@ class PresenceEventSource(EventSource[int, UserPresenceState]): def _filter_offline_presence_state( self, presence_updates: Iterable[UserPresenceState] - ) -> List[UserPresenceState]: + ) -> list[UserPresenceState]: """Given an iterable containing user presence updates, return a list with any offline presence states removed. @@ -2079,12 +2074,12 @@ class PresenceEventSource(EventSource[int, UserPresenceState]): def handle_timeouts( - user_states: List[UserPresenceState], + user_states: list[UserPresenceState], is_mine_fn: Callable[[str], bool], - syncing_user_devices: AbstractSet[Tuple[str, Optional[str]]], - user_to_devices: Dict[str, Dict[Optional[str], UserDevicePresenceState]], + syncing_user_devices: AbstractSet[tuple[str, Optional[str]]], + user_to_devices: dict[str, dict[Optional[str], UserDevicePresenceState]], now: int, -) -> List[UserPresenceState]: +) -> list[UserPresenceState]: """Checks the presence of users that have timed out and updates as appropriate. @@ -2120,8 +2115,8 @@ def handle_timeouts( def handle_timeout( state: UserPresenceState, is_mine: bool, - syncing_device_ids: AbstractSet[Tuple[str, Optional[str]]], - user_devices: Dict[Optional[str], UserDevicePresenceState], + syncing_device_ids: AbstractSet[tuple[str, Optional[str]]], + user_devices: dict[Optional[str], UserDevicePresenceState], now: int, ) -> Optional[UserPresenceState]: """Checks the presence of the user to see if any of the timers have elapsed @@ -2218,7 +2213,7 @@ def handle_update( wheel_timer: WheelTimer, now: int, persist: bool, -) -> Tuple[UserPresenceState, bool, bool]: +) -> tuple[UserPresenceState, bool, bool]: """Given a presence update: 1. Add any appropriate timers. 2. Check if we should notify anyone. @@ -2344,8 +2339,8 @@ def _combine_device_states( async def get_interested_parties( - store: DataStore, presence_router: PresenceRouter, states: List[UserPresenceState] -) -> Tuple[Dict[str, List[UserPresenceState]], Dict[str, List[UserPresenceState]]]: + store: DataStore, presence_router: PresenceRouter, states: list[UserPresenceState] +) -> tuple[dict[str, list[UserPresenceState]], dict[str, list[UserPresenceState]]]: """Given a list of states return which entities (rooms, users) are interested in the given states. @@ -2358,8 +2353,8 @@ async def get_interested_parties( A 2-tuple of `(room_ids_to_states, users_to_states)`, with each item being a dict of `entity_name` -> `[UserPresenceState]` """ - room_ids_to_states: Dict[str, List[UserPresenceState]] = {} - users_to_states: Dict[str, List[UserPresenceState]] = {} + room_ids_to_states: dict[str, list[UserPresenceState]] = {} + users_to_states: dict[str, list[UserPresenceState]] = {} for state in states: room_ids = await store.get_rooms_for_user(state.user_id) for room_id in room_ids: @@ -2382,8 +2377,8 @@ async def get_interested_parties( async def get_interested_remotes( store: DataStore, presence_router: PresenceRouter, - states: List[UserPresenceState], -) -> List[Tuple[StrCollection, Collection[UserPresenceState]]]: + states: list[UserPresenceState], +) -> list[tuple[StrCollection, Collection[UserPresenceState]]]: """Given a list of presence states figure out which remote servers should be sent which. @@ -2397,14 +2392,14 @@ async def get_interested_remotes( Returns: A map from destinations to presence states to send to that destination. """ - hosts_and_states: List[Tuple[StrCollection, Collection[UserPresenceState]]] = [] + hosts_and_states: list[tuple[StrCollection, Collection[UserPresenceState]]] = [] # First we look up the rooms each user is in (as well as any explicit # subscriptions), then for each distinct room we look up the remote # hosts in those rooms. for state in states: room_ids = await store.get_rooms_for_user(state.user_id) - hosts: Set[str] = set() + hosts: set[str] = set() for room_id in room_ids: room_hosts = await store.get_current_hosts_in_room(room_id) hosts.update(room_hosts) @@ -2473,12 +2468,12 @@ class PresenceFederationQueue: # stream_id, destinations, user_ids)`. We don't store the full states # for efficiency, and remote workers will already have the full states # cached. - self._queue: List[Tuple[int, int, StrCollection, Set[str]]] = [] + self._queue: list[tuple[int, int, StrCollection, set[str]]] = [] self._next_id = 1 # Map from instance name to current token - self._current_tokens: Dict[str, int] = {} + self._current_tokens: dict[str, int] = {} if self._queue_presence_updates: self._clock.looping_call(self._clear_queue, self._CLEAR_ITEMS_EVERY_MS) @@ -2547,7 +2542,7 @@ class PresenceFederationQueue: from_token: int, upto_token: int, target_row_count: int, - ) -> Tuple[List[Tuple[int, Tuple[str, str]]], int, bool]: + ) -> tuple[list[tuple[int, tuple[str, str]]], int, bool]: """Get all the updates between the two tokens. We return rows in the form of `(destination, user_id)` to keep the size @@ -2583,7 +2578,7 @@ class PresenceFederationQueue: # handle the case where `from_token` stream ID has already been dropped. start_idx = max(from_token + 1 - self._next_id, -len(self._queue)) - to_send: List[Tuple[int, Tuple[str, str]]] = [] + to_send: list[tuple[int, tuple[str, str]]] = [] limited = False new_id = upto_token for _, stream_id, destinations, user_ids in self._queue[start_idx:]: @@ -2631,7 +2626,7 @@ class PresenceFederationQueue: if not self._federation: return - hosts_to_users: Dict[str, Set[str]] = {} + hosts_to_users: dict[str, set[str]] = {} for row in rows: hosts_to_users.setdefault(row.destination, set()).add(row.user_id) diff --git a/synapse/handlers/profile.py b/synapse/handlers/profile.py index 9dda89d85b..240a235a0e 100644 --- a/synapse/handlers/profile.py +++ b/synapse/handlers/profile.py @@ -20,7 +20,7 @@ # import logging import random -from typing import TYPE_CHECKING, List, Optional, Union +from typing import TYPE_CHECKING, Optional, Union from synapse.api.constants import ProfileFields from synapse.api.errors import ( @@ -69,7 +69,7 @@ class ProfileHandler: self.request_ratelimiter = hs.get_request_ratelimiter() self.max_avatar_size: Optional[int] = hs.config.server.max_avatar_size - self.allowed_avatar_mimetypes: Optional[List[str]] = ( + self.allowed_avatar_mimetypes: Optional[list[str]] = ( hs.config.server.allowed_avatar_mimetypes ) diff --git a/synapse/handlers/push_rules.py b/synapse/handlers/push_rules.py index 4ef6a04c51..643fa72f3f 100644 --- a/synapse/handlers/push_rules.py +++ b/synapse/handlers/push_rules.py @@ -18,7 +18,7 @@ # [This file includes modifications made by New Vector Limited] # # -from typing import TYPE_CHECKING, Any, Dict, List, Optional, Union +from typing import TYPE_CHECKING, Any, Optional, Union import attr @@ -127,7 +127,7 @@ class PushRulesHandler: async def push_rules_for_user( self, user: UserID - ) -> Dict[str, Dict[str, List[Dict[str, Any]]]]: + ) -> dict[str, dict[str, list[dict[str, Any]]]]: """ Push rules aren't really account data, but get formatted as such for /sync. """ @@ -137,7 +137,7 @@ class PushRulesHandler: return rules -def check_actions(actions: List[Union[str, JsonDict]]) -> None: +def check_actions(actions: list[Union[str, JsonDict]]) -> None: """Check if the given actions are spec compliant. Args: diff --git a/synapse/handlers/receipts.py b/synapse/handlers/receipts.py index c776654d12..ad41113b5b 100644 --- a/synapse/handlers/receipts.py +++ b/synapse/handlers/receipts.py @@ -19,7 +19,7 @@ # # import logging -from typing import TYPE_CHECKING, Iterable, List, Optional, Sequence, Tuple +from typing import TYPE_CHECKING, Iterable, Optional, Sequence from synapse.api.constants import EduTypes, ReceiptTypes from synapse.appservice import ApplicationService @@ -136,10 +136,10 @@ class ReceiptsHandler: await self._handle_new_receipts(receipts) - async def _handle_new_receipts(self, receipts: List[ReadReceipt]) -> bool: + async def _handle_new_receipts(self, receipts: list[ReadReceipt]) -> bool: """Takes a list of receipts, stores them and informs the notifier.""" - receipts_persisted: List[ReadReceipt] = [] + receipts_persisted: list[ReadReceipt] = [] for receipt in receipts: stream_id = await self.store.insert_receipt( receipt.room_id, @@ -216,7 +216,7 @@ class ReceiptEventSource(EventSource[MultiWriterStreamToken, JsonMapping]): @staticmethod def filter_out_private_receipts( rooms: Sequence[JsonMapping], user_id: str - ) -> List[JsonMapping]: + ) -> list[JsonMapping]: """ Filters a list of serialized receipts (as returned by /sync and /initialSync) and removes private read receipts of other users. @@ -233,7 +233,7 @@ class ReceiptEventSource(EventSource[MultiWriterStreamToken, JsonMapping]): The same as rooms, but filtered. """ - result: List[JsonMapping] = [] + result: list[JsonMapping] = [] # Iterate through each room's receipt content. for room in rooms: @@ -287,7 +287,7 @@ class ReceiptEventSource(EventSource[MultiWriterStreamToken, JsonMapping]): is_guest: bool, explicit_room_id: Optional[str] = None, to_key: Optional[MultiWriterStreamToken] = None, - ) -> Tuple[List[JsonMapping], MultiWriterStreamToken]: + ) -> tuple[list[JsonMapping], MultiWriterStreamToken]: """ Find read receipts for given rooms (> `from_token` and <= `to_token`) """ @@ -313,7 +313,7 @@ class ReceiptEventSource(EventSource[MultiWriterStreamToken, JsonMapping]): from_key: MultiWriterStreamToken, to_key: MultiWriterStreamToken, service: ApplicationService, - ) -> Tuple[List[JsonMapping], MultiWriterStreamToken]: + ) -> tuple[list[JsonMapping], MultiWriterStreamToken]: """Returns a set of new read receipt events that an appservice may be interested in. diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py index c3ff0cfaf8..8b620a91bc 100644 --- a/synapse/handlers/register.py +++ b/synapse/handlers/register.py @@ -26,9 +26,7 @@ import logging from typing import ( TYPE_CHECKING, Iterable, - List, Optional, - Tuple, TypedDict, ) @@ -241,7 +239,7 @@ class RegistrationHandler: address: Optional[str] = None, bind_emails: Optional[Iterable[str]] = None, by_admin: bool = False, - user_agent_ips: Optional[List[Tuple[str, str]]] = None, + user_agent_ips: Optional[list[tuple[str, str]]] = None, auth_provider_id: Optional[str] = None, approved: bool = False, ) -> str: @@ -655,7 +653,7 @@ class RegistrationHandler: async def appservice_register( self, user_localpart: str, as_token: str - ) -> Tuple[str, ApplicationService]: + ) -> tuple[str, ApplicationService]: user = UserID(user_localpart, self.hs.hostname) user_id = user.to_string() service = self.store.get_app_service_by_token(as_token) @@ -780,7 +778,7 @@ class RegistrationHandler: auth_provider_id: Optional[str] = None, should_issue_refresh_token: bool = False, auth_provider_session_id: Optional[str] = None, - ) -> Tuple[str, str, Optional[int], Optional[str]]: + ) -> tuple[str, str, Optional[int], Optional[str]]: """Register a device for a user and generate an access token. The access token will be limited by the homeserver's session_lifetime config. diff --git a/synapse/handlers/relations.py b/synapse/handlers/relations.py index b1158ee77d..217681f7c0 100644 --- a/synapse/handlers/relations.py +++ b/synapse/handlers/relations.py @@ -23,10 +23,7 @@ import logging from typing import ( TYPE_CHECKING, Collection, - Dict, - FrozenSet, Iterable, - List, Mapping, Optional, Sequence, @@ -212,7 +209,7 @@ class RelationsHandler: requester: Requester, event_id: str, initial_redaction_event: EventBase, - relation_types: List[str], + relation_types: list[str], ) -> None: """Redacts all events related to the given event ID with one of the given relation types. @@ -267,7 +264,7 @@ class RelationsHandler: ) async def get_references_for_events( - self, event_ids: Collection[str], ignored_users: FrozenSet[str] = frozenset() + self, event_ids: Collection[str], ignored_users: frozenset[str] = frozenset() ) -> Mapping[str, Sequence[_RelatedEvent]]: """Get a list of references to the given events. @@ -308,11 +305,11 @@ class RelationsHandler: async def _get_threads_for_events( self, - events_by_id: Dict[str, EventBase], - relations_by_id: Dict[str, str], + events_by_id: dict[str, EventBase], + relations_by_id: dict[str, str], user_id: str, - ignored_users: FrozenSet[str], - ) -> Dict[str, _ThreadAggregation]: + ignored_users: frozenset[str], + ) -> dict[str, _ThreadAggregation]: """Get the bundled aggregations for threads for the requested events. Args: @@ -437,7 +434,7 @@ class RelationsHandler: @trace async def get_bundled_aggregations( self, events: Iterable[EventBase], user_id: str - ) -> Dict[str, BundledAggregations]: + ) -> dict[str, BundledAggregations]: """Generate bundled aggregations for events. Args: @@ -456,7 +453,7 @@ class RelationsHandler: # De-duplicated events by ID to handle the same event requested multiple times. events_by_id = {} # A map of event ID to the relation in that event, if there is one. - relations_by_id: Dict[str, str] = {} + relations_by_id: dict[str, str] = {} for event in events: # State events do not get bundled aggregations. if event.is_state(): @@ -479,7 +476,7 @@ class RelationsHandler: events_by_id[event.event_id] = event # event ID -> bundled aggregation in non-serialized form. - results: Dict[str, BundledAggregations] = {} + results: dict[str, BundledAggregations] = {} # Fetch any ignored users of the requesting user. ignored_users = await self._main_store.ignored_users(user_id) diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index db6dc5efd0..f242accef1 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -33,10 +33,7 @@ from typing import ( Any, Awaitable, Callable, - Dict, - List, Optional, - Tuple, cast, ) @@ -112,11 +109,11 @@ FIVE_MINUTES_IN_MS = 5 * 60 * 1000 @attr.s(slots=True, frozen=True, auto_attribs=True) class EventContext: - events_before: List[EventBase] + events_before: list[EventBase] event: EventBase - events_after: List[EventBase] - state: List[EventBase] - aggregations: Dict[str, BundledAggregations] + events_after: list[EventBase] + state: list[EventBase] + aggregations: dict[str, BundledAggregations] start: str end: str @@ -143,7 +140,7 @@ class RoomCreationHandler: ) # Room state based off defined presets - self._presets_dict: Dict[str, Dict[str, Any]] = { + self._presets_dict: dict[str, dict[str, Any]] = { RoomCreationPreset.PRIVATE_CHAT: { "join_rules": JoinRules.INVITE, "history_visibility": HistoryVisibility.SHARED, @@ -184,7 +181,7 @@ class RoomCreationHandler: # If a user tries to update the same room multiple times in quick # succession, only process the first attempt and return its result to # subsequent requests - self._upgrade_response_cache: ResponseCache[Tuple[str, str]] = ResponseCache( + self._upgrade_response_cache: ResponseCache[tuple[str, str]] = ResponseCache( clock=hs.get_clock(), name="room_upgrade", server_name=self.server_name, @@ -201,7 +198,7 @@ class RoomCreationHandler: requester: Requester, old_room_id: str, new_version: RoomVersion, - additional_creators: Optional[List[str]], + additional_creators: Optional[list[str]], auto_member: bool = False, ratelimit: bool = True, ) -> str: @@ -339,14 +336,14 @@ class RoomCreationHandler: self, requester: Requester, old_room_id: str, - old_room: Tuple[bool, str, bool], + old_room: tuple[bool, str, bool], new_room_id: str, new_version: RoomVersion, tombstone_event: EventBase, tombstone_context: synapse.events.snapshot.EventContext, - additional_creators: Optional[List[str]], + additional_creators: Optional[list[str]], creation_event_with_context: Optional[ - Tuple[EventBase, synapse.events.snapshot.EventContext] + tuple[EventBase, synapse.events.snapshot.EventContext] ] = None, auto_member: bool = False, ) -> str: @@ -437,7 +434,7 @@ class RoomCreationHandler: old_room_id: str, new_room_id: str, old_room_state: StateMap[str], - additional_creators: Optional[List[str]], + additional_creators: Optional[list[str]], ) -> None: """Send updated power levels in both rooms after an upgrade @@ -529,7 +526,7 @@ class RoomCreationHandler: old_room_create_event: EventBase, tombstone_event_id: Optional[str], new_room_version: RoomVersion, - additional_creators: Optional[List[str]], + additional_creators: Optional[list[str]], ) -> JsonDict: creation_content: JsonDict = { "room_version": new_room_version.identifier, @@ -561,9 +558,9 @@ class RoomCreationHandler: new_room_id: str, new_room_version: RoomVersion, tombstone_event_id: str, - additional_creators: Optional[List[str]], + additional_creators: Optional[list[str]], creation_event_with_context: Optional[ - Tuple[EventBase, synapse.events.snapshot.EventContext] + tuple[EventBase, synapse.events.snapshot.EventContext] ] = None, auto_member: bool = False, ) -> None: @@ -600,7 +597,7 @@ class RoomCreationHandler: initial_state: MutableStateMap = {} # Replicate relevant room events - types_to_copy: List[Tuple[str, Optional[str]]] = [ + types_to_copy: list[tuple[str, Optional[str]]] = [ (EventTypes.JoinRules, ""), (EventTypes.Name, ""), (EventTypes.Topic, ""), @@ -1044,7 +1041,7 @@ class RoomCreationHandler: ratelimit: bool = True, creator_join_profile: Optional[JsonDict] = None, ignore_forced_encryption: bool = False, - ) -> Tuple[str, Optional[RoomAlias], int]: + ) -> tuple[str, Optional[RoomAlias], int]: """Creates a new room. Args: @@ -1394,7 +1391,7 @@ class RoomCreationHandler: creation_content: JsonDict, is_public: bool, room_version: RoomVersion, - ) -> Tuple[EventBase, synapse.events.snapshot.EventContext]: + ) -> tuple[EventBase, synapse.events.snapshot.EventContext]: ( creation_event, new_unpersisted_context, @@ -1426,7 +1423,7 @@ class RoomCreationHandler: room_id: str, room_version: RoomVersion, room_config: JsonDict, - invite_list: List[str], + invite_list: list[str], initial_state: MutableStateMap, creation_content: JsonDict, room_alias: Optional[RoomAlias] = None, @@ -1434,9 +1431,9 @@ class RoomCreationHandler: creator_join_profile: Optional[JsonDict] = None, ignore_forced_encryption: bool = False, creation_event_with_context: Optional[ - Tuple[EventBase, synapse.events.snapshot.EventContext] + tuple[EventBase, synapse.events.snapshot.EventContext] ] = None, - ) -> Tuple[int, str, int]: + ) -> tuple[int, str, int]: """Sends the initial events into a new room. Sends the room creation, membership, and power level events into the room sequentially, then creates and batches up the rest of the events to persist as a batch to the DB. @@ -1485,7 +1482,7 @@ class RoomCreationHandler: depth = 1 # the most recently created event - prev_event: List[str] = [] + prev_event: list[str] = [] # a map of event types, state keys -> event_ids. We collect these mappings this as events are # created (but not persisted to the db) to determine state for future created events # (as this info can't be pulled from the db) @@ -1496,7 +1493,7 @@ class RoomCreationHandler: content: JsonDict, for_batch: bool, **kwargs: Any, - ) -> Tuple[EventBase, synapse.events.snapshot.UnpersistedEventContextBase]: + ) -> tuple[EventBase, synapse.events.snapshot.UnpersistedEventContextBase]: """ Creates an event and associated event context. Args: @@ -1792,7 +1789,7 @@ class RoomCreationHandler: f"You cannot create an encrypted room. user_level ({room_admin_level}) < send_level ({encryption_level})", ) - def _room_preset_config(self, room_config: JsonDict) -> Tuple[str, dict]: + def _room_preset_config(self, room_config: JsonDict) -> tuple[str, dict]: # The spec says rooms should default to private visibility if # `visibility` is not specified. visibility = room_config.get("visibility", "private") @@ -1814,9 +1811,9 @@ class RoomCreationHandler: def _remove_creators_from_pl_users_map( self, - users_map: Dict[str, int], + users_map: dict[str, int], creator: str, - additional_creators: Optional[List[str]], + additional_creators: Optional[list[str]], ) -> None: creators = [creator] if additional_creators: @@ -1916,7 +1913,7 @@ class RoomContextHandler: # The user is peeking if they aren't in the room already is_peeking = not is_user_in_room - async def filter_evts(events: List[EventBase]) -> List[EventBase]: + async def filter_evts(events: list[EventBase]) -> list[EventBase]: if use_admin_priviledge: return events return await filter_events_for_client( @@ -2021,7 +2018,7 @@ class TimestampLookupHandler: room_id: str, timestamp: int, direction: Direction, - ) -> Tuple[str, int]: + ) -> tuple[str, int]: """Find the closest event to the given timestamp in the given direction. If we can't find an event locally or the event we have locally is next to a gap, it will ask other federated homeservers for an event. @@ -2172,7 +2169,7 @@ class RoomEventSource(EventSource[RoomStreamToken, EventBase]): room_ids: StrCollection, is_guest: bool, explicit_room_id: Optional[str] = None, - ) -> Tuple[List[EventBase], RoomStreamToken]: + ) -> tuple[list[EventBase], RoomStreamToken]: # We just ignore the key for now. to_key = self.get_current_key() diff --git a/synapse/handlers/room_list.py b/synapse/handlers/room_list.py index 9d4307fb07..97a5d07c7c 100644 --- a/synapse/handlers/room_list.py +++ b/synapse/handlers/room_list.py @@ -20,7 +20,7 @@ # import logging -from typing import TYPE_CHECKING, Any, List, Optional, Tuple +from typing import TYPE_CHECKING, Any, Optional import attr import msgpack @@ -67,14 +67,14 @@ class RoomListHandler: self.hs = hs self.enable_room_list_search = hs.config.roomdirectory.enable_room_list_search self.response_cache: ResponseCache[ - Tuple[Optional[int], Optional[str], Optional[ThirdPartyInstanceID]] + tuple[Optional[int], Optional[str], Optional[ThirdPartyInstanceID]] ] = ResponseCache( clock=hs.get_clock(), name="room_list", server_name=self.server_name, ) self.remote_response_cache: ResponseCache[ - Tuple[str, Optional[int], Optional[str], bool, Optional[str]] + tuple[str, Optional[int], Optional[str], bool, Optional[str]] ] = ResponseCache( clock=hs.get_clock(), name="remote_room_list", @@ -175,7 +175,7 @@ class RoomListHandler: if since_token: batch_token = RoomListNextBatch.from_token(since_token) - bounds: Optional[Tuple[int, str]] = ( + bounds: Optional[tuple[int, str]] = ( batch_token.last_joined_members, batch_token.last_room_id, ) @@ -226,7 +226,7 @@ class RoomListHandler: return {k: v for k, v in entry.items() if v is not None} # Build a list of up to `limit` entries. - room_entries: List[JsonDict] = [] + room_entries: list[JsonDict] = [] rooms_iterator = results if forwards else reversed(results) # Track the first and last 'considered' rooms so that we can provide correct diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py index 2ab9b70f8c..03cfc99260 100644 --- a/synapse/handlers/room_member.py +++ b/synapse/handlers/room_member.py @@ -23,7 +23,7 @@ import abc import logging import random from http import HTTPStatus -from typing import TYPE_CHECKING, Iterable, List, Optional, Set, Tuple +from typing import TYPE_CHECKING, Iterable, Optional from synapse import types from synapse.api.constants import ( @@ -217,11 +217,11 @@ class RoomMemberHandler(metaclass=abc.ABCMeta): async def _remote_join( self, requester: Requester, - remote_room_hosts: List[str], + remote_room_hosts: list[str], room_id: str, user: UserID, content: dict, - ) -> Tuple[str, int]: + ) -> tuple[str, int]: """Try and join a room that this server is not in Args: @@ -241,11 +241,11 @@ class RoomMemberHandler(metaclass=abc.ABCMeta): async def remote_knock( self, requester: Requester, - remote_room_hosts: List[str], + remote_room_hosts: list[str], room_id: str, user: UserID, content: dict, - ) -> Tuple[str, int]: + ) -> tuple[str, int]: """Try and knock on a room that this server is not in Args: @@ -263,7 +263,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta): txn_id: Optional[str], requester: Requester, content: JsonDict, - ) -> Tuple[str, int]: + ) -> tuple[str, int]: """ Rejects an out-of-band invite we have received from a remote server @@ -286,7 +286,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta): txn_id: Optional[str], requester: Requester, content: JsonDict, - ) -> Tuple[str, int]: + ) -> tuple[str, int]: """Rescind a local knock made on a remote room. Args: @@ -396,8 +396,8 @@ class RoomMemberHandler(metaclass=abc.ABCMeta): target: UserID, room_id: str, membership: str, - prev_event_ids: Optional[List[str]] = None, - state_event_ids: Optional[List[str]] = None, + prev_event_ids: Optional[list[str]] = None, + state_event_ids: Optional[list[str]] = None, depth: Optional[int] = None, txn_id: Optional[str] = None, ratelimit: bool = True, @@ -405,7 +405,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta): require_consent: bool = True, outlier: bool = False, origin_server_ts: Optional[int] = None, - ) -> Tuple[str, int]: + ) -> tuple[str, int]: """ Internal membership update function to get an existing event or create and persist a new event for the new membership change. @@ -573,18 +573,18 @@ class RoomMemberHandler(metaclass=abc.ABCMeta): room_id: str, action: str, txn_id: Optional[str] = None, - remote_room_hosts: Optional[List[str]] = None, + remote_room_hosts: Optional[list[str]] = None, third_party_signed: Optional[dict] = None, ratelimit: bool = True, content: Optional[dict] = None, new_room: bool = False, require_consent: bool = True, outlier: bool = False, - prev_event_ids: Optional[List[str]] = None, - state_event_ids: Optional[List[str]] = None, + prev_event_ids: Optional[list[str]] = None, + state_event_ids: Optional[list[str]] = None, depth: Optional[int] = None, origin_server_ts: Optional[int] = None, - ) -> Tuple[str, int]: + ) -> tuple[str, int]: """Update a user's membership in a room. Params: @@ -687,18 +687,18 @@ class RoomMemberHandler(metaclass=abc.ABCMeta): room_id: str, action: str, txn_id: Optional[str] = None, - remote_room_hosts: Optional[List[str]] = None, + remote_room_hosts: Optional[list[str]] = None, third_party_signed: Optional[dict] = None, ratelimit: bool = True, content: Optional[dict] = None, new_room: bool = False, require_consent: bool = True, outlier: bool = False, - prev_event_ids: Optional[List[str]] = None, - state_event_ids: Optional[List[str]] = None, + prev_event_ids: Optional[list[str]] = None, + state_event_ids: Optional[list[str]] = None, depth: Optional[int] = None, origin_server_ts: Optional[int] = None, - ) -> Tuple[str, int]: + ) -> tuple[str, int]: """Helper for update_membership. Assumes that the membership linearizer is already held for the room. @@ -1224,12 +1224,12 @@ class RoomMemberHandler(metaclass=abc.ABCMeta): self, user_id: str, room_id: str, - remote_room_hosts: List[str], + remote_room_hosts: list[str], content: JsonDict, is_partial_state_room: bool, is_host_in_room: bool, partial_state_before_join: StateMap[str], - ) -> Tuple[bool, List[str]]: + ) -> tuple[bool, list[str]]: """ Check whether the server should do a remote join (as opposed to a local join) for a user. @@ -1565,7 +1565,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta): async def lookup_room_alias( self, room_alias: RoomAlias - ) -> Tuple[RoomID, List[str]]: + ) -> tuple[RoomID, list[str]]: """ Get the room ID associated with a room alias. @@ -1612,9 +1612,9 @@ class RoomMemberHandler(metaclass=abc.ABCMeta): requester: Requester, txn_id: Optional[str], id_access_token: str, - prev_event_ids: Optional[List[str]] = None, + prev_event_ids: Optional[list[str]] = None, depth: Optional[int] = None, - ) -> Tuple[str, int]: + ) -> tuple[str, int]: """Invite a 3PID to a room. Args: @@ -1726,9 +1726,9 @@ class RoomMemberHandler(metaclass=abc.ABCMeta): user: UserID, txn_id: Optional[str], id_access_token: str, - prev_event_ids: Optional[List[str]] = None, + prev_event_ids: Optional[list[str]] = None, depth: Optional[int] = None, - ) -> Tuple[EventBase, int]: + ) -> tuple[EventBase, int]: room_state = await self._storage_controllers.state.get_current_state( room_id, StateFilter.from_types( @@ -1863,7 +1863,7 @@ class RoomMemberMasterHandler(RoomMemberHandler): self.distributor.declare("user_left_room") async def _is_remote_room_too_complex( - self, room_id: str, remote_room_hosts: List[str] + self, room_id: str, remote_room_hosts: list[str] ) -> Optional[bool]: """ Check if complexity of a remote room is too great. @@ -1899,11 +1899,11 @@ class RoomMemberMasterHandler(RoomMemberHandler): async def _remote_join( self, requester: Requester, - remote_room_hosts: List[str], + remote_room_hosts: list[str], room_id: str, user: UserID, content: dict, - ) -> Tuple[str, int]: + ) -> tuple[str, int]: """Implements RoomMemberHandler._remote_join""" # filter ourselves out of remote_room_hosts: do_invite_join ignores it # and if it is the only entry we'd like to return a 404 rather than a @@ -1980,7 +1980,7 @@ class RoomMemberMasterHandler(RoomMemberHandler): txn_id: Optional[str], requester: Requester, content: JsonDict, - ) -> Tuple[str, int]: + ) -> tuple[str, int]: """ Rejects an out-of-band invite received from a remote user @@ -2017,7 +2017,7 @@ class RoomMemberMasterHandler(RoomMemberHandler): txn_id: Optional[str], requester: Requester, content: JsonDict, - ) -> Tuple[str, int]: + ) -> tuple[str, int]: """ Rescinds a local knock made on a remote room @@ -2046,7 +2046,7 @@ class RoomMemberMasterHandler(RoomMemberHandler): txn_id: Optional[str], requester: Requester, content: JsonDict, - ) -> Tuple[str, int]: + ) -> tuple[str, int]: """Generate a local leave event for a room This can be called after we e.g fail to reject an invite via a remote server. @@ -2126,11 +2126,11 @@ class RoomMemberMasterHandler(RoomMemberHandler): async def remote_knock( self, requester: Requester, - remote_room_hosts: List[str], + remote_room_hosts: list[str], room_id: str, user: UserID, content: dict, - ) -> Tuple[str, int]: + ) -> tuple[str, int]: """Sends a knock to a room. Attempts to do so via one remote out of a given list. Args: @@ -2270,7 +2270,7 @@ class RoomForgetterHandler(StateDeltasHandler): await self._store.update_room_forgetter_stream_pos(max_pos) - async def _handle_deltas(self, deltas: List[StateDelta]) -> None: + async def _handle_deltas(self, deltas: list[StateDelta]) -> None: """Called with the state deltas to process""" for delta in deltas: if delta.event_type != EventTypes.Member: @@ -2300,7 +2300,7 @@ class RoomForgetterHandler(StateDeltasHandler): raise -def get_users_which_can_issue_invite(auth_events: StateMap[EventBase]) -> List[str]: +def get_users_which_can_issue_invite(auth_events: StateMap[EventBase]) -> list[str]: """ Return the list of users which can issue invites. @@ -2346,7 +2346,7 @@ def get_users_which_can_issue_invite(auth_events: StateMap[EventBase]) -> List[s return result -def get_servers_from_users(users: List[str]) -> Set[str]: +def get_servers_from_users(users: list[str]) -> set[str]: """ Resolve a list of users into their servers. diff --git a/synapse/handlers/room_member_worker.py b/synapse/handlers/room_member_worker.py index 0616a9864d..0927c031f7 100644 --- a/synapse/handlers/room_member_worker.py +++ b/synapse/handlers/room_member_worker.py @@ -20,7 +20,7 @@ # import logging -from typing import TYPE_CHECKING, List, Optional, Tuple +from typing import TYPE_CHECKING, Optional from synapse.handlers.room_member import NoKnownServersError, RoomMemberHandler from synapse.replication.http.membership import ( @@ -51,11 +51,11 @@ class RoomMemberWorkerHandler(RoomMemberHandler): async def _remote_join( self, requester: Requester, - remote_room_hosts: List[str], + remote_room_hosts: list[str], room_id: str, user: UserID, content: dict, - ) -> Tuple[str, int]: + ) -> tuple[str, int]: """Implements RoomMemberHandler._remote_join""" if len(remote_room_hosts) == 0: raise NoKnownServersError() @@ -76,7 +76,7 @@ class RoomMemberWorkerHandler(RoomMemberHandler): txn_id: Optional[str], requester: Requester, content: dict, - ) -> Tuple[str, int]: + ) -> tuple[str, int]: """ Rejects an out-of-band invite received from a remote user @@ -96,7 +96,7 @@ class RoomMemberWorkerHandler(RoomMemberHandler): txn_id: Optional[str], requester: Requester, content: JsonDict, - ) -> Tuple[str, int]: + ) -> tuple[str, int]: """ Rescinds a local knock made on a remote room @@ -121,11 +121,11 @@ class RoomMemberWorkerHandler(RoomMemberHandler): async def remote_knock( self, requester: Requester, - remote_room_hosts: List[str], + remote_room_hosts: list[str], room_id: str, user: UserID, content: dict, - ) -> Tuple[str, int]: + ) -> tuple[str, int]: """Sends a knock to a room. Implements RoomMemberHandler.remote_knock diff --git a/synapse/handlers/room_summary.py b/synapse/handlers/room_summary.py index 838fee6a30..a948202056 100644 --- a/synapse/handlers/room_summary.py +++ b/synapse/handlers/room_summary.py @@ -22,7 +22,7 @@ import itertools import logging import re -from typing import TYPE_CHECKING, Dict, Iterable, List, Optional, Sequence, Set, Tuple +from typing import TYPE_CHECKING, Iterable, Optional, Sequence import attr @@ -83,9 +83,9 @@ class _PaginationSession: # The time the pagination session was created, in milliseconds. creation_time_ms: int # The queue of rooms which are still to process. - room_queue: List["_RoomQueueEntry"] + room_queue: list["_RoomQueueEntry"] # A set of rooms which have been processed. - processed_rooms: Set[str] + processed_rooms: set[str] class RoomSummaryHandler: @@ -112,14 +112,14 @@ class RoomSummaryHandler: # If a user tries to fetch the same page multiple times in quick succession, # only process the first attempt and return its result to subsequent requests. self._pagination_response_cache: ResponseCache[ - Tuple[ + tuple[ str, str, bool, Optional[int], Optional[int], Optional[str], - Optional[Tuple[str, ...]], + Optional[tuple[str, ...]], ] ] = ResponseCache( clock=hs.get_clock(), @@ -136,7 +136,7 @@ class RoomSummaryHandler: max_depth: Optional[int] = None, limit: Optional[int] = None, from_token: Optional[str] = None, - remote_room_hosts: Optional[Tuple[str, ...]] = None, + remote_room_hosts: Optional[tuple[str, ...]] = None, ) -> JsonDict: """ Implementation of the room hierarchy C-S API. @@ -196,7 +196,7 @@ class RoomSummaryHandler: max_depth: Optional[int] = None, limit: Optional[int] = None, from_token: Optional[str] = None, - remote_room_hosts: Optional[Tuple[str, ...]] = None, + remote_room_hosts: Optional[tuple[str, ...]] = None, ) -> JsonDict: """See docstring for SpaceSummaryHandler.get_room_hierarchy.""" @@ -262,7 +262,7 @@ class RoomSummaryHandler: # Rooms we have already processed. processed_rooms = set() - rooms_result: List[JsonDict] = [] + rooms_result: list[JsonDict] = [] # Cap the limit to a server-side maximum. if limit is None: @@ -286,12 +286,12 @@ class RoomSummaryHandler: # federation. The rationale for caching these and *maybe* using them # is to prefer any information local to the homeserver before trusting # data received over federation. - children_room_entries: Dict[str, JsonDict] = {} + children_room_entries: dict[str, JsonDict] = {} # A set of room IDs which are children that did not have information # returned over federation and are known to be inaccessible to the # current server. We should not reach out over federation to try to # summarise these rooms. - inaccessible_children: Set[str] = set() + inaccessible_children: set[str] = set() # If the room is known locally, summarise it! is_in_room = await self._store.is_host_joined(room_id, self._server_name) @@ -418,8 +418,8 @@ class RoomSummaryHandler: # Room is inaccessible to the requesting server. raise SynapseError(404, "Unknown room: %s" % (requested_room_id,)) - children_rooms_result: List[JsonDict] = [] - inaccessible_children: List[str] = [] + children_rooms_result: list[JsonDict] = [] + inaccessible_children: list[str] = [] # Iterate through each child and potentially add it, but not its children, # to the response. @@ -496,7 +496,7 @@ class RoomSummaryHandler: # we only care about suggested children child_events = filter(_is_suggested_child_event, child_events) - stripped_events: List[JsonDict] = [ + stripped_events: list[JsonDict] = [ { "type": e.type, "state_key": e.state_key, @@ -510,7 +510,7 @@ class RoomSummaryHandler: async def _summarize_remote_room_hierarchy( self, room: "_RoomQueueEntry", suggested_only: bool - ) -> Tuple[Optional["_RoomEntry"], Dict[str, JsonDict], Set[str]]: + ) -> tuple[Optional["_RoomEntry"], dict[str, JsonDict], set[str]]: """ Request room entries and a list of event entries for a given room by querying a remote server. @@ -835,7 +835,7 @@ class RoomSummaryHandler: self, requester: Optional[str], room_id: str, - remote_room_hosts: Optional[List[str]] = None, + remote_room_hosts: Optional[list[str]] = None, ) -> JsonDict: """ Implementation of the room summary C-S API from MSC3266 @@ -995,7 +995,7 @@ _INVALID_ORDER_CHARS_RE = re.compile(r"[^\x20-\x7E]") def _child_events_comparison_key( child: EventBase, -) -> Tuple[bool, Optional[str], int, str]: +) -> tuple[bool, Optional[str], int, str]: """ Generate a value for comparing two child events for ordering. diff --git a/synapse/handlers/saml.py b/synapse/handlers/saml.py index 81bec7499c..218fbcaaa7 100644 --- a/synapse/handlers/saml.py +++ b/synapse/handlers/saml.py @@ -20,7 +20,7 @@ # import logging import re -from typing import TYPE_CHECKING, Callable, Dict, Optional, Set, Tuple +from typing import TYPE_CHECKING, Callable, Optional import attr import saml2 @@ -90,7 +90,7 @@ class SamlHandler: self.idp_brand = hs.config.saml2.idp_brand # a map from saml session id to Saml2SessionData object - self._outstanding_requests_dict: Dict[str, Saml2SessionData] = {} + self._outstanding_requests_dict: dict[str, Saml2SessionData] = {} self._sso_handler = hs.get_sso_handler() self._sso_handler.register_identity_provider(self) @@ -393,7 +393,7 @@ def dot_replace_for_mxid(username: str) -> str: return username -MXID_MAPPER_MAP: Dict[str, Callable[[str], str]] = { +MXID_MAPPER_MAP: dict[str, Callable[[str], str]] = { "hexencode": map_username_to_mxid_localpart, "dotreplace": dot_replace_for_mxid, } @@ -509,7 +509,7 @@ class DefaultSamlMappingProvider: return SamlConfig(mxid_source_attribute, mxid_mapper) @staticmethod - def get_saml_attributes(config: SamlConfig) -> Tuple[Set[str], Set[str]]: + def get_saml_attributes(config: SamlConfig) -> tuple[set[str], set[str]]: """Returns the required attributes of a SAML Args: diff --git a/synapse/handlers/search.py b/synapse/handlers/search.py index 1a71135d5f..8f39c6ec6b 100644 --- a/synapse/handlers/search.py +++ b/synapse/handlers/search.py @@ -21,7 +21,7 @@ import itertools import logging -from typing import TYPE_CHECKING, Dict, Iterable, List, Optional, Set, Tuple +from typing import TYPE_CHECKING, Iterable, Optional import attr from unpaddedbase64 import decode_base64, encode_base64 @@ -46,13 +46,13 @@ class _SearchResult: # The count of results. count: int # A mapping of event ID to the rank of that event. - rank_map: Dict[str, int] + rank_map: dict[str, int] # A list of the resulting events. - allowed_events: List[EventBase] + allowed_events: list[EventBase] # A map of room ID to results. - room_groups: Dict[str, JsonDict] + room_groups: dict[str, JsonDict] # A set of event IDs to highlight. - highlights: Set[str] + highlights: set[str] class SearchHandler: @@ -230,11 +230,11 @@ class SearchHandler: batch_group_key: Optional[str], batch_token: Optional[str], search_term: str, - keys: List[str], + keys: list[str], filter_dict: JsonDict, order_by: str, include_state: bool, - group_keys: List[str], + group_keys: list[str], event_context: Optional[bool], before_limit: Optional[int], after_limit: Optional[int], @@ -286,7 +286,7 @@ class SearchHandler: # If doing a subset of all rooms search, check if any of the rooms # are from an upgraded room, and search their contents as well if search_filter.rooms: - historical_room_ids: List[str] = [] + historical_room_ids: list[str] = [] for room_id in search_filter.rooms: # Add any previous rooms to the search if they exist ids = await self.get_old_rooms_from_upgraded_room(room_id) @@ -307,7 +307,7 @@ class SearchHandler: } } - sender_group: Optional[Dict[str, JsonDict]] + sender_group: Optional[dict[str, JsonDict]] if order_by == "rank": search_result, sender_group = await self._search_by_rank( @@ -442,7 +442,7 @@ class SearchHandler: search_term: str, keys: Iterable[str], search_filter: Filter, - ) -> Tuple[_SearchResult, Dict[str, JsonDict]]: + ) -> tuple[_SearchResult, dict[str, JsonDict]]: """ Performs a full text search for a user ordering by rank. @@ -461,9 +461,9 @@ class SearchHandler: """ rank_map = {} # event_id -> rank of event # Holds result of grouping by room, if applicable - room_groups: Dict[str, JsonDict] = {} + room_groups: dict[str, JsonDict] = {} # Holds result of grouping by sender, if applicable - sender_group: Dict[str, JsonDict] = {} + sender_group: dict[str, JsonDict] = {} search_result = await self.store.search_msgs(room_ids, search_term, keys) @@ -520,7 +520,7 @@ class SearchHandler: batch_group: Optional[str], batch_group_key: Optional[str], batch_token: Optional[str], - ) -> Tuple[_SearchResult, Optional[str]]: + ) -> tuple[_SearchResult, Optional[str]]: """ Performs a full text search for a user ordering by recent. @@ -542,14 +542,14 @@ class SearchHandler: """ rank_map = {} # event_id -> rank of event # Holds result of grouping by room, if applicable - room_groups: Dict[str, JsonDict] = {} + room_groups: dict[str, JsonDict] = {} # Holds the next_batch for the entire result set if one of those exists global_next_batch = None highlights = set() - room_events: List[EventBase] = [] + room_events: list[EventBase] = [] i = 0 pagination_token = batch_token @@ -632,11 +632,11 @@ class SearchHandler: async def _calculate_event_contexts( self, user: UserID, - allowed_events: List[EventBase], + allowed_events: list[EventBase], before_limit: int, after_limit: int, include_profile: bool, - ) -> Dict[str, JsonDict]: + ) -> dict[str, JsonDict]: """ Calculates the contextual events for any search results. diff --git a/synapse/handlers/send_email.py b/synapse/handlers/send_email.py index 6469b182c8..02fd48dbad 100644 --- a/synapse/handlers/send_email.py +++ b/synapse/handlers/send_email.py @@ -24,7 +24,7 @@ import logging from email.mime.multipart import MIMEMultipart from email.mime.text import MIMEText from io import BytesIO -from typing import TYPE_CHECKING, Dict, Optional +from typing import TYPE_CHECKING, Optional from twisted.internet.defer import Deferred from twisted.internet.endpoints import HostnameEndpoint @@ -136,7 +136,7 @@ class SendEmailHandler: app_name: str, html: str, text: str, - additional_headers: Optional[Dict[str, str]] = None, + additional_headers: Optional[dict[str, str]] = None, ) -> None: """Send a multipart email with the given information. diff --git a/synapse/handlers/sliding_sync/__init__.py b/synapse/handlers/sliding_sync/__init__.py index 255a041d0e..cea4b857ee 100644 --- a/synapse/handlers/sliding_sync/__init__.py +++ b/synapse/handlers/sliding_sync/__init__.py @@ -15,7 +15,7 @@ import itertools import logging from itertools import chain -from typing import TYPE_CHECKING, AbstractSet, Dict, List, Mapping, Optional, Set, Tuple +from typing import TYPE_CHECKING, AbstractSet, Mapping, Optional from prometheus_client import Histogram from typing_extensions import assert_never @@ -116,7 +116,7 @@ class SlidingSyncHandler: sync_config: SlidingSyncConfig, from_token: Optional[SlidingSyncStreamToken] = None, timeout_ms: int = 0, - ) -> Tuple[SlidingSyncResult, bool]: + ) -> tuple[SlidingSyncResult, bool]: """ Get the sync for a client if we have new data for it now. Otherwise wait for new data to arrive on the server. If the timeout expires, then @@ -262,7 +262,7 @@ class SlidingSyncHandler: relevant_rooms_to_send_map = interested_rooms.relevant_rooms_to_send_map # Fetch room data - rooms: Dict[str, SlidingSyncResult.RoomResult] = {} + rooms: dict[str, SlidingSyncResult.RoomResult] = {} new_connection_state = previous_connection_state.get_mutable() @@ -490,7 +490,7 @@ class SlidingSyncHandler: room_membership_for_user_at_to_token: RoomsForUserType, from_token: RoomStreamToken, to_token: RoomStreamToken, - ) -> List[StateDelta]: + ) -> list[StateDelta]: """ Get the state deltas between two tokens taking into account the user's membership. If the user is LEAVE/BAN, we will only get the state deltas up to @@ -677,8 +677,8 @@ class SlidingSyncHandler: # membership. Currently, we have to make all of these optional because # `invite`/`knock` rooms only have `stripped_state`. See # https://github.com/matrix-org/matrix-spec-proposals/pull/3575#discussion_r1653045932 - timeline_events: List[EventBase] = [] - bundled_aggregations: Optional[Dict[str, BundledAggregations]] = None + timeline_events: list[EventBase] = [] + bundled_aggregations: Optional[dict[str, BundledAggregations]] = None limited: Optional[bool] = None prev_batch_token: Optional[StreamToken] = None num_live: Optional[int] = None @@ -813,7 +813,7 @@ class SlidingSyncHandler: # Figure out any stripped state events for invite/knocks. This allows the # potential joiner to identify the room. - stripped_state: List[JsonDict] = [] + stripped_state: list[JsonDict] = [] if room_membership_for_user_at_to_token.membership in ( Membership.INVITE, Membership.KNOCK, @@ -924,7 +924,7 @@ class SlidingSyncHandler: # see https://github.com/matrix-org/matrix-spec/issues/380. This means that # clients won't be able to calculate the room name when necessary and just a # pitfall we have to deal with until that spec issue is resolved. - hero_user_ids: List[str] = [] + hero_user_ids: list[str] = [] # TODO: Should we also check for `EventTypes.CanonicalAlias` # (`m.room.canonical_alias`) as a fallback for the room name? see # https://github.com/matrix-org/matrix-spec-proposals/pull/3575#discussion_r1671260153 @@ -1036,7 +1036,7 @@ class SlidingSyncHandler: ) required_state_filter = StateFilter.all() else: - required_state_types: List[Tuple[str, Optional[str]]] = [] + required_state_types: list[tuple[str, Optional[str]]] = [] num_wild_state_keys = 0 lazy_load_room_members = False num_others = 0 @@ -1057,7 +1057,7 @@ class SlidingSyncHandler: lazy_load_room_members = True # Everyone in the timeline is relevant - timeline_membership: Set[str] = set() + timeline_membership: set[str] = set() if timeline_events is not None: for timeline_event in timeline_events: # Anyone who sent a message is relevant @@ -1219,7 +1219,7 @@ class SlidingSyncHandler: room_avatar = avatar_event.content.get("url") # Assemble heroes: extract the info from the state we just fetched - heroes: List[SlidingSyncResult.RoomResult.StrippedHero] = [] + heroes: list[SlidingSyncResult.RoomResult.StrippedHero] = [] for hero_user_id in hero_user_ids: member_event = room_state.get((EventTypes.Member, hero_user_id)) if member_event is not None: @@ -1374,7 +1374,7 @@ class SlidingSyncHandler: self, room_id: str, to_token: StreamToken, - timeline: List[EventBase], + timeline: list[EventBase], check_outside_timeline: bool, ) -> Optional[int]: """Get a bump stamp for the room, if we have a bump event and it has @@ -1479,7 +1479,7 @@ def _required_state_changes( prev_required_state_map: Mapping[str, AbstractSet[str]], request_required_state_map: Mapping[str, AbstractSet[str]], state_deltas: StateMap[str], -) -> Tuple[Optional[Mapping[str, AbstractSet[str]]], StateFilter]: +) -> tuple[Optional[Mapping[str, AbstractSet[str]]], StateFilter]: """Calculates the changes between the required state room config from the previous requests compared with the current request. @@ -1524,15 +1524,15 @@ def _required_state_changes( # Contains updates to the required state map compared with the previous room # config. This has the same format as `RoomSyncConfig.required_state` - changes: Dict[str, AbstractSet[str]] = {} + changes: dict[str, AbstractSet[str]] = {} # The set of types/state keys that we need to fetch and return to the # client. Passed to `StateFilter.from_types(...)` - added: List[Tuple[str, Optional[str]]] = [] + added: list[tuple[str, Optional[str]]] = [] # Convert the list of state deltas to map from type to state_keys that have # changed. - changed_types_to_state_keys: Dict[str, Set[str]] = {} + changed_types_to_state_keys: dict[str, set[str]] = {} for event_type, state_key in state_deltas: changed_types_to_state_keys.setdefault(event_type, set()).add(state_key) diff --git a/synapse/handlers/sliding_sync/extensions.py b/synapse/handlers/sliding_sync/extensions.py index 25ee954b7f..221af86f7d 100644 --- a/synapse/handlers/sliding_sync/extensions.py +++ b/synapse/handlers/sliding_sync/extensions.py @@ -18,12 +18,10 @@ from typing import ( TYPE_CHECKING, AbstractSet, ChainMap, - Dict, Mapping, MutableMapping, Optional, Sequence, - Set, cast, ) @@ -85,7 +83,7 @@ class SlidingSyncExtensionHandler: previous_connection_state: "PerConnectionState", new_connection_state: "MutablePerConnectionState", actual_lists: Mapping[str, SlidingSyncResult.SlidingWindowList], - actual_room_ids: Set[str], + actual_room_ids: set[str], actual_room_response_map: Mapping[str, SlidingSyncResult.RoomResult], to_token: StreamToken, from_token: Optional[SlidingSyncStreamToken], @@ -208,7 +206,7 @@ class SlidingSyncExtensionHandler: requested_room_ids: Optional[StrCollection], actual_lists: Mapping[str, SlidingSyncResult.SlidingWindowList], actual_room_ids: AbstractSet[str], - ) -> Set[str]: + ) -> set[str]: """ Handle the reserved `lists`/`rooms` keys for extensions. Extensions should only return results for rooms in the Sliding Sync response. This matches up the @@ -231,7 +229,7 @@ class SlidingSyncExtensionHandler: # We only want to include account data for rooms that are already in the sliding # sync response AND that were requested in the account data request. - relevant_room_ids: Set[str] = set() + relevant_room_ids: set[str] = set() # See what rooms from the room subscriptions we should get account data for if requested_room_ids is not None: @@ -406,7 +404,7 @@ class SlidingSyncExtensionHandler: previous_connection_state: "PerConnectionState", new_connection_state: "MutablePerConnectionState", actual_lists: Mapping[str, SlidingSyncResult.SlidingWindowList], - actual_room_ids: Set[str], + actual_room_ids: set[str], account_data_request: SlidingSyncConfig.Extensions.AccountDataExtension, to_token: StreamToken, from_token: Optional[SlidingSyncStreamToken], @@ -481,7 +479,7 @@ class SlidingSyncExtensionHandler: # down account data previously or not, so we split the relevant # rooms up into different collections based on status. live_rooms = set() - previously_rooms: Dict[str, int] = {} + previously_rooms: dict[str, int] = {} initial_rooms = set() for room_id in relevant_room_ids: @@ -638,7 +636,7 @@ class SlidingSyncExtensionHandler: previous_connection_state: "PerConnectionState", new_connection_state: "MutablePerConnectionState", actual_lists: Mapping[str, SlidingSyncResult.SlidingWindowList], - actual_room_ids: Set[str], + actual_room_ids: set[str], actual_room_response_map: Mapping[str, SlidingSyncResult.RoomResult], receipts_request: SlidingSyncConfig.Extensions.ReceiptsExtension, to_token: StreamToken, @@ -671,13 +669,13 @@ class SlidingSyncExtensionHandler: actual_room_ids=actual_room_ids, ) - room_id_to_receipt_map: Dict[str, JsonMapping] = {} + room_id_to_receipt_map: dict[str, JsonMapping] = {} if len(relevant_room_ids) > 0: # We need to handle the different cases depending on if we have sent # down receipts previously or not, so we split the relevant rooms # up into different collections based on status. live_rooms = set() - previously_rooms: Dict[str, MultiWriterStreamToken] = {} + previously_rooms: dict[str, MultiWriterStreamToken] = {} initial_rooms = set() for room_id in relevant_room_ids: @@ -842,7 +840,7 @@ class SlidingSyncExtensionHandler: self, sync_config: SlidingSyncConfig, actual_lists: Mapping[str, SlidingSyncResult.SlidingWindowList], - actual_room_ids: Set[str], + actual_room_ids: set[str], actual_room_response_map: Mapping[str, SlidingSyncResult.RoomResult], typing_request: SlidingSyncConfig.Extensions.TypingExtension, to_token: StreamToken, @@ -872,7 +870,7 @@ class SlidingSyncExtensionHandler: actual_room_ids=actual_room_ids, ) - room_id_to_typing_map: Dict[str, JsonMapping] = {} + room_id_to_typing_map: dict[str, JsonMapping] = {} if len(relevant_room_ids) > 0: # Note: We don't need to take connection tracking into account for typing # notifications because they'll get anything still relevant and hasn't timed @@ -942,8 +940,8 @@ class SlidingSyncExtensionHandler: if len(updates) == 0: return None - subscribed_threads: Dict[str, Dict[str, _ThreadSubscription]] = {} - unsubscribed_threads: Dict[str, Dict[str, _ThreadUnsubscription]] = {} + subscribed_threads: dict[str, dict[str, _ThreadSubscription]] = {} + unsubscribed_threads: dict[str, dict[str, _ThreadUnsubscription]] = {} for stream_id, room_id, thread_root_id, subscribed, automatic in updates: if subscribed: subscribed_threads.setdefault(room_id, {})[thread_root_id] = ( diff --git a/synapse/handlers/sliding_sync/room_lists.py b/synapse/handlers/sliding_sync/room_lists.py index 19116590f7..fc77fd3c65 100644 --- a/synapse/handlers/sliding_sync/room_lists.py +++ b/synapse/handlers/sliding_sync/room_lists.py @@ -18,14 +18,10 @@ from itertools import chain from typing import ( TYPE_CHECKING, AbstractSet, - Dict, - List, Literal, Mapping, MutableMapping, Optional, - Set, - Tuple, Union, cast, ) @@ -113,7 +109,7 @@ class SlidingSyncInterestedRooms: lists: Mapping[str, SlidingSyncResult.SlidingWindowList] relevant_room_map: Mapping[str, RoomSyncConfig] relevant_rooms_to_send_map: Mapping[str, RoomSyncConfig] - all_rooms: Set[str] + all_rooms: set[str] room_membership_for_user_map: Mapping[str, RoomsForUserType] newly_joined_rooms: AbstractSet[str] @@ -231,12 +227,12 @@ class SlidingSyncRoomLists: user_id = sync_config.user.to_string() # Assemble sliding window lists - lists: Dict[str, SlidingSyncResult.SlidingWindowList] = {} + lists: dict[str, SlidingSyncResult.SlidingWindowList] = {} # Keep track of the rooms that we can display and need to fetch more info about - relevant_room_map: Dict[str, RoomSyncConfig] = {} + relevant_room_map: dict[str, RoomSyncConfig] = {} # The set of room IDs of all rooms that could appear in any list. These # include rooms that are outside the list ranges. - all_rooms: Set[str] = set() + all_rooms: set[str] = set() # Note: this won't include rooms the user has left themselves. We add back # `newly_left` rooms below. This is more efficient than fetching all rooms and @@ -472,7 +468,7 @@ class SlidingSyncRoomLists: all_rooms.update(filtered_sync_room_map) - ops: List[SlidingSyncResult.SlidingWindowList.Operation] = [] + ops: list[SlidingSyncResult.SlidingWindowList.Operation] = [] if list_config.ranges: # Optimization: If we are asking for the full range, we don't @@ -487,7 +483,7 @@ class SlidingSyncRoomLists: and list_config.ranges[0][1] >= len(filtered_sync_room_map) - 1 ): - sorted_room_info: List[RoomsForUserType] = list( + sorted_room_info: list[RoomsForUserType] = list( filtered_sync_room_map.values() ) else: @@ -496,7 +492,7 @@ class SlidingSyncRoomLists: # Cast is safe because RoomsForUserSlidingSync is part # of the `RoomsForUserType` union. Why can't it detect this? cast( - Dict[str, RoomsForUserType], filtered_sync_room_map + dict[str, RoomsForUserType], filtered_sync_room_map ), to_token, # We only need to sort the rooms up to the end @@ -506,7 +502,7 @@ class SlidingSyncRoomLists: ) for range in list_config.ranges: - room_ids_in_list: List[str] = [] + room_ids_in_list: list[str] = [] # We're going to loop through the sorted list of rooms starting # at the range start index and keep adding rooms until we fill @@ -639,12 +635,12 @@ class SlidingSyncRoomLists: dm_room_ids = await self._get_dm_rooms_for_user(sync_config.user.to_string()) # Assemble sliding window lists - lists: Dict[str, SlidingSyncResult.SlidingWindowList] = {} + lists: dict[str, SlidingSyncResult.SlidingWindowList] = {} # Keep track of the rooms that we can display and need to fetch more info about - relevant_room_map: Dict[str, RoomSyncConfig] = {} + relevant_room_map: dict[str, RoomSyncConfig] = {} # The set of room IDs of all rooms that could appear in any list. These # include rooms that are outside the list ranges. - all_rooms: Set[str] = set() + all_rooms: set[str] = set() if sync_config.lists: with start_active_span("assemble_sliding_window_lists"): @@ -691,10 +687,10 @@ class SlidingSyncRoomLists: filtered_sync_room_map, to_token ) - ops: List[SlidingSyncResult.SlidingWindowList.Operation] = [] + ops: list[SlidingSyncResult.SlidingWindowList.Operation] = [] if list_config.ranges: for range in list_config.ranges: - room_ids_in_list: List[str] = [] + room_ids_in_list: list[str] = [] # We're going to loop through the sorted list of rooms starting # at the range start index and keep adding rooms until we fill @@ -811,14 +807,14 @@ class SlidingSyncRoomLists: self, previous_connection_state: PerConnectionState, from_token: Optional[StreamToken], - relevant_room_map: Dict[str, RoomSyncConfig], - ) -> Dict[str, RoomSyncConfig]: + relevant_room_map: dict[str, RoomSyncConfig], + ) -> dict[str, RoomSyncConfig]: """Filters the `relevant_room_map` down to those rooms that may have updates we need to fetch and return.""" # Filtered subset of `relevant_room_map` for rooms that may have updates # (in the event stream) - relevant_rooms_to_send_map: Dict[str, RoomSyncConfig] = relevant_room_map + relevant_rooms_to_send_map: dict[str, RoomSyncConfig] = relevant_room_map if relevant_room_map: with start_active_span("filter_relevant_rooms_to_send"): if from_token: @@ -908,7 +904,7 @@ class SlidingSyncRoomLists: # # First, we need to get the max stream_ordering of each event persister instance # that we queried events from. - instance_to_max_stream_ordering_map: Dict[str, int] = {} + instance_to_max_stream_ordering_map: dict[str, int] = {} for room_for_user in rooms_for_user.values(): instance_name = room_for_user.event_pos.instance_name stream_ordering = room_for_user.event_pos.stream @@ -966,12 +962,12 @@ class SlidingSyncRoomLists: # Otherwise we're about to make changes to `rooms_for_user`, so we turn # it into a mutable dict. - changes: Dict[str, Optional[RoomsForUser]] = {} + changes: dict[str, Optional[RoomsForUser]] = {} # Assemble a list of the first membership event after the `to_token` so we can # step backward to the previous membership that would apply to the from/to # range. - first_membership_change_by_room_id_after_to_token: Dict[ + first_membership_change_by_room_id_after_to_token: dict[ str, CurrentStateDeltaMembership ] = {} for membership_change in current_state_delta_membership_changes_after_to_token: @@ -1033,7 +1029,7 @@ class SlidingSyncRoomLists: user: UserID, to_token: StreamToken, from_token: Optional[StreamToken], - ) -> Tuple[Dict[str, RoomsForUserType], AbstractSet[str], AbstractSet[str]]: + ) -> tuple[dict[str, RoomsForUserType], AbstractSet[str], AbstractSet[str]]: """ Fetch room IDs that the user has had membership in (the full room list including long-lost left rooms that will be filtered, sorted, and sliced). @@ -1108,7 +1104,7 @@ class SlidingSyncRoomLists: # Since we fetched the users room list at some point in time after the # tokens, we need to revert/rewind some membership changes to match the point in # time of the `to_token`. - rooms_for_user: Dict[str, RoomsForUserType] = { + rooms_for_user: dict[str, RoomsForUserType] = { room.room_id: room for room in room_for_user_list } changes = await self._get_rewind_changes_to_current_membership_to_token( @@ -1143,7 +1139,7 @@ class SlidingSyncRoomLists: user_id: str, to_token: StreamToken, from_token: Optional[StreamToken], - ) -> Tuple[AbstractSet[str], Mapping[str, RoomsForUserStateReset]]: + ) -> tuple[AbstractSet[str], Mapping[str, RoomsForUserStateReset]]: """Fetch the sets of rooms that the user newly joined or left in the given token range. @@ -1163,8 +1159,8 @@ class SlidingSyncRoomLists: need to check if a membership still exists in the room. """ - newly_joined_room_ids: Set[str] = set() - newly_left_room_map: Dict[str, RoomsForUserStateReset] = {} + newly_joined_room_ids: set[str] = set() + newly_left_room_map: dict[str, RoomsForUserStateReset] = {} if not from_token: return newly_joined_room_ids, newly_left_room_map @@ -1190,7 +1186,7 @@ class SlidingSyncRoomLists: user_id: str, to_token: StreamToken, from_token: Optional[StreamToken], - ) -> Tuple[AbstractSet[str], Mapping[str, RoomsForUserStateReset]]: + ) -> tuple[AbstractSet[str], Mapping[str, RoomsForUserStateReset]]: """Fetch the sets of rooms that the user newly joined or left in the given token range. @@ -1209,8 +1205,8 @@ class SlidingSyncRoomLists: was state reset out of the room. To actually check for a state reset, you need to check if a membership still exists in the room. """ - newly_joined_room_ids: Set[str] = set() - newly_left_room_map: Dict[str, RoomsForUserStateReset] = {} + newly_joined_room_ids: set[str] = set() + newly_left_room_map: dict[str, RoomsForUserStateReset] = {} # We need to figure out the # @@ -1232,20 +1228,20 @@ class SlidingSyncRoomLists: # 1) Assemble a list of the last membership events in some given ranges. Someone # could have left and joined multiple times during the given range but we only # care about end-result so we grab the last one. - last_membership_change_by_room_id_in_from_to_range: Dict[ + last_membership_change_by_room_id_in_from_to_range: dict[ str, CurrentStateDeltaMembership ] = {} # We also want to assemble a list of the first membership events during the token # range so we can step backward to the previous membership that would apply to # before the token range to see if we have `newly_joined` the room. - first_membership_change_by_room_id_in_from_to_range: Dict[ + first_membership_change_by_room_id_in_from_to_range: dict[ str, CurrentStateDeltaMembership ] = {} # Keep track if the room has a non-join event in the token range so we can later # tell if it was a `newly_joined` room. If the last membership event in the # token range is a join and there is also some non-join in the range, we know # they `newly_joined`. - has_non_join_event_by_room_id_in_from_to_range: Dict[str, bool] = {} + has_non_join_event_by_room_id_in_from_to_range: dict[str, bool] = {} for ( membership_change ) in current_state_delta_membership_changes_in_from_to_range: @@ -1355,9 +1351,9 @@ class SlidingSyncRoomLists: async def filter_rooms_relevant_for_sync( self, user: UserID, - room_membership_for_user_map: Dict[str, RoomsForUserType], + room_membership_for_user_map: dict[str, RoomsForUserType], newly_left_room_ids: AbstractSet[str], - ) -> Dict[str, RoomsForUserType]: + ) -> dict[str, RoomsForUserType]: """ Filter room IDs that should/can be listed for this user in the sync response (the full room list that will be further filtered, sorted, and sliced). @@ -1402,7 +1398,7 @@ class SlidingSyncRoomLists: async def check_room_subscription_allowed_for_user( self, room_id: str, - room_membership_for_user_map: Dict[str, RoomsForUserType], + room_membership_for_user_map: dict[str, RoomsForUserType], to_token: StreamToken, ) -> Optional[RoomsForUserType]: """ @@ -1469,8 +1465,8 @@ class SlidingSyncRoomLists: async def _bulk_get_stripped_state_for_rooms_from_sync_room_map( self, room_ids: StrCollection, - sync_room_map: Dict[str, RoomsForUserType], - ) -> Dict[str, Optional[StateMap[StrippedStateEvent]]]: + sync_room_map: dict[str, RoomsForUserType], + ) -> dict[str, Optional[StateMap[StrippedStateEvent]]]: """ Fetch stripped state for a list of room IDs. Stripped state is only applicable to invite/knock rooms. Other rooms will have `None` as their @@ -1488,7 +1484,7 @@ class SlidingSyncRoomLists: Mapping from room_id to mapping of (type, state_key) to stripped state event. """ - room_id_to_stripped_state_map: Dict[ + room_id_to_stripped_state_map: dict[ str, Optional[StateMap[StrippedStateEvent]] ] = {} @@ -1500,7 +1496,7 @@ class SlidingSyncRoomLists: ] # Gather a list of event IDs we can grab stripped state from - invite_or_knock_event_ids: List[str] = [] + invite_or_knock_event_ids: list[str] = [] for room_id in room_ids_to_fetch: if sync_room_map[room_id].membership in ( Membership.INVITE, @@ -1565,10 +1561,10 @@ class SlidingSyncRoomLists: # `content.algorithm` from `EventTypes.RoomEncryption` "room_encryption", ], - room_ids: Set[str], - sync_room_map: Dict[str, RoomsForUserType], + room_ids: set[str], + sync_room_map: dict[str, RoomsForUserType], to_token: StreamToken, - room_id_to_stripped_state_map: Dict[ + room_id_to_stripped_state_map: dict[ str, Optional[StateMap[StrippedStateEvent]] ], ) -> Mapping[str, Union[Optional[str], StateSentinel]]: @@ -1593,7 +1589,7 @@ class SlidingSyncRoomLists: the given state event (event_type, ""), otherwise `None`. Rooms unknown to this server will return `ROOM_UNKNOWN_SENTINEL`. """ - room_id_to_content: Dict[str, Union[Optional[str], StateSentinel]] = {} + room_id_to_content: dict[str, Union[Optional[str], StateSentinel]] = {} # As a bulk shortcut, use the current state if the server is particpating in the # room (meaning we have current state). Ideally, for leave/ban rooms, we would @@ -1650,7 +1646,7 @@ class SlidingSyncRoomLists: # Update our `room_id_to_content` map based on the stripped state # (applies to invite/knock rooms) - rooms_ids_without_stripped_state: Set[str] = set() + rooms_ids_without_stripped_state: set[str] = set() for room_id in room_ids_without_results: stripped_state_map = room_id_to_stripped_state_map.get( room_id, Sentinel.UNSET_SENTINEL @@ -1730,12 +1726,12 @@ class SlidingSyncRoomLists: async def filter_rooms( self, user: UserID, - sync_room_map: Dict[str, RoomsForUserType], + sync_room_map: dict[str, RoomsForUserType], previous_connection_state: PerConnectionState, filters: SlidingSyncConfig.SlidingSyncList.Filters, to_token: StreamToken, dm_room_ids: AbstractSet[str], - ) -> Dict[str, RoomsForUserType]: + ) -> dict[str, RoomsForUserType]: """ Filter rooms based on the sync request. @@ -1753,7 +1749,7 @@ class SlidingSyncRoomLists: """ user_id = user.to_string() - room_id_to_stripped_state_map: Dict[ + room_id_to_stripped_state_map: dict[ str, Optional[StateMap[StrippedStateEvent]] ] = {} @@ -1891,7 +1887,7 @@ class SlidingSyncRoomLists: with start_active_span("filters.tags"): # Fetch the user tags for their rooms room_tags = await self.store.get_tags_for_user(user_id) - room_id_to_tag_name_set: Dict[str, Set[str]] = { + room_id_to_tag_name_set: dict[str, set[str]] = { room_id: set(tags.keys()) for room_id, tags in room_tags.items() } @@ -1947,7 +1943,7 @@ class SlidingSyncRoomLists: filters: SlidingSyncConfig.SlidingSyncList.Filters, to_token: StreamToken, dm_room_ids: AbstractSet[str], - ) -> Dict[str, RoomsForUserSlidingSync]: + ) -> dict[str, RoomsForUserSlidingSync]: """ Filter rooms based on the sync request. @@ -2059,7 +2055,7 @@ class SlidingSyncRoomLists: with start_active_span("filters.tags"): # Fetch the user tags for their rooms room_tags = await self.store.get_tags_for_user(user_id) - room_id_to_tag_name_set: Dict[str, Set[str]] = { + room_id_to_tag_name_set: dict[str, set[str]] = { room_id: set(tags.keys()) for room_id, tags in room_tags.items() } @@ -2109,10 +2105,10 @@ class SlidingSyncRoomLists: @trace async def sort_rooms( self, - sync_room_map: Dict[str, RoomsForUserType], + sync_room_map: dict[str, RoomsForUserType], to_token: StreamToken, limit: Optional[int] = None, - ) -> List[RoomsForUserType]: + ) -> list[RoomsForUserType]: """ Sort by `stream_ordering` of the last event that the user should see in the room. `stream_ordering` is unique so we get a stable sort. @@ -2133,11 +2129,11 @@ class SlidingSyncRoomLists: # Assemble a map of room ID to the `stream_ordering` of the last activity that the # user should see in the room (<= `to_token`) - last_activity_in_room_map: Dict[str, int] = {} + last_activity_in_room_map: dict[str, int] = {} # Same as above, except for positions that we know are in the event # stream cache. - cached_positions: Dict[str, int] = {} + cached_positions: dict[str, int] = {} earliest_cache_position = ( self.store._events_stream_cache.get_earliest_known_position() diff --git a/synapse/handlers/sso.py b/synapse/handlers/sso.py index 735cfa0a0f..641241287e 100644 --- a/synapse/handlers/sso.py +++ b/synapse/handlers/sso.py @@ -27,14 +27,11 @@ from typing import ( Any, Awaitable, Callable, - Dict, Iterable, - List, Mapping, NoReturn, Optional, Protocol, - Set, ) from urllib.parse import urlencode @@ -227,10 +224,10 @@ class SsoHandler: self._mapping_lock = Linearizer(clock=hs.get_clock(), name="sso_user_mapping") # a map from session id to session data - self._username_mapping_sessions: Dict[str, UsernameMappingSession] = {} + self._username_mapping_sessions: dict[str, UsernameMappingSession] = {} # map from idp_id to SsoIdentityProvider - self._identity_providers: Dict[str, SsoIdentityProvider] = {} + self._identity_providers: dict[str, SsoIdentityProvider] = {} self._consent_at_registration = hs.config.consent.user_consent_at_registration @@ -999,7 +996,7 @@ class SsoHandler: session.use_avatar = use_avatar emails_from_idp = set(session.emails) - filtered_emails: Set[str] = set() + filtered_emails: set[str] = set() # we iterate through the list rather than just building a set conjunction, so # that we can log attempts to use unknown addresses @@ -1142,7 +1139,7 @@ class SsoHandler: def check_required_attributes( self, request: SynapseRequest, - attributes: Mapping[str, List[Any]], + attributes: Mapping[str, list[Any]], attribute_requirements: Iterable[SsoAttributeRequirement], ) -> bool: """ @@ -1259,7 +1256,7 @@ def get_username_mapping_session_cookie_from_request(request: IRequest) -> str: def _check_attribute_requirement( - attributes: Mapping[str, List[Any]], req: SsoAttributeRequirement + attributes: Mapping[str, list[Any]], req: SsoAttributeRequirement ) -> bool: """Check if SSO attributes meet the proper requirements. diff --git a/synapse/handlers/stats.py b/synapse/handlers/stats.py index 5b4a2cc62d..0804f72c47 100644 --- a/synapse/handlers/stats.py +++ b/synapse/handlers/stats.py @@ -25,10 +25,8 @@ from typing import ( TYPE_CHECKING, Any, Counter as CounterType, - Dict, Iterable, Optional, - Tuple, ) from synapse.api.constants import EventContentFields, EventTypes, Membership @@ -157,7 +155,7 @@ class StatsHandler: async def _handle_deltas( self, deltas: Iterable[StateDelta] - ) -> Tuple[Dict[str, CounterType[str]], Dict[str, CounterType[str]]]: + ) -> tuple[dict[str, CounterType[str]], dict[str, CounterType[str]]]: """Called with the state deltas to process Returns: @@ -165,10 +163,10 @@ class StatsHandler: mapping from room/user ID to changes in the various fields. """ - room_to_stats_deltas: Dict[str, CounterType[str]] = {} - user_to_stats_deltas: Dict[str, CounterType[str]] = {} + room_to_stats_deltas: dict[str, CounterType[str]] = {} + user_to_stats_deltas: dict[str, CounterType[str]] = {} - room_to_state_updates: Dict[str, Dict[str, Any]] = {} + room_to_state_updates: dict[str, dict[str, Any]] = {} for delta in deltas: logger.debug( diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index 2a6652b585..a19b75203b 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -24,14 +24,9 @@ from typing import ( TYPE_CHECKING, AbstractSet, Any, - Dict, - FrozenSet, - List, Mapping, Optional, Sequence, - Set, - Tuple, ) import attr @@ -113,7 +108,7 @@ LAZY_LOADED_MEMBERS_CACHE_MAX_AGE = 30 * 60 * 1000 LAZY_LOADED_MEMBERS_CACHE_MAX_SIZE = 100 -SyncRequestKey = Tuple[Any, ...] +SyncRequestKey = tuple[Any, ...] @attr.s(slots=True, frozen=True, auto_attribs=True) @@ -132,7 +127,7 @@ class TimelineBatch: limited: bool # A mapping of event ID to the bundled aggregations for the above events. # This is only calculated if limited is true. - bundled_aggregations: Optional[Dict[str, BundledAggregations]] = None + bundled_aggregations: Optional[dict[str, BundledAggregations]] = None def __bool__(self) -> bool: """Make the result appear empty if there are no updates. This is used @@ -151,8 +146,8 @@ class JoinedSyncResult: room_id: str timeline: TimelineBatch state: StateMap[EventBase] - ephemeral: List[JsonDict] - account_data: List[JsonDict] + ephemeral: list[JsonDict] + account_data: list[JsonDict] unread_notifications: JsonDict unread_thread_notifications: JsonDict summary: Optional[JsonDict] @@ -174,7 +169,7 @@ class ArchivedSyncResult: room_id: str timeline: TimelineBatch state: StateMap[EventBase] - account_data: List[JsonDict] + account_data: list[JsonDict] def __bool__(self) -> bool: """Make the result appear empty if there are no updates. This is used @@ -209,11 +204,11 @@ class _RoomChanges: and left room IDs since last sync. """ - room_entries: List["RoomSyncResultBuilder"] - invited: List[InvitedSyncResult] - knocked: List[KnockedSyncResult] - newly_joined_rooms: List[str] - newly_left_rooms: List[str] + room_entries: list["RoomSyncResultBuilder"] + invited: list[InvitedSyncResult] + knocked: list[KnockedSyncResult] + newly_joined_rooms: list[str] + newly_left_rooms: list[str] @attr.s(slots=True, frozen=True, auto_attribs=True) @@ -236,16 +231,16 @@ class SyncResult: """ next_batch: StreamToken - presence: List[UserPresenceState] - account_data: List[JsonDict] - joined: List[JoinedSyncResult] - invited: List[InvitedSyncResult] - knocked: List[KnockedSyncResult] - archived: List[ArchivedSyncResult] - to_device: List[JsonDict] + presence: list[UserPresenceState] + account_data: list[JsonDict] + joined: list[JoinedSyncResult] + invited: list[InvitedSyncResult] + knocked: list[KnockedSyncResult] + archived: list[ArchivedSyncResult] + to_device: list[JsonDict] device_lists: DeviceListUpdates device_one_time_keys_count: JsonMapping - device_unused_fallback_key_types: List[str] + device_unused_fallback_key_types: list[str] def __bool__(self) -> bool: """Make the result appear empty if there are no updates. This is used @@ -267,7 +262,7 @@ class SyncResult: def empty( next_batch: StreamToken, device_one_time_keys_count: JsonMapping, - device_unused_fallback_key_types: List[str], + device_unused_fallback_key_types: list[str], ) -> "SyncResult": "Return a new empty result" return SyncResult( @@ -319,7 +314,7 @@ class SyncHandler: # ExpiringCache((User, Device)) -> LruCache(user_id => event_id) self.lazy_loaded_members_cache: ExpiringCache[ - Tuple[str, Optional[str]], LruCache[str, str] + tuple[str, Optional[str]], LruCache[str, str] ] = ExpiringCache( cache_name="lazy_loaded_members_cache", server_name=self.server_name, @@ -419,7 +414,7 @@ class SyncHandler: ) device_id = sync_config.device_id one_time_keys_count: JsonMapping = {} - unused_fallback_key_types: List[str] = [] + unused_fallback_key_types: list[str] = [] if device_id: user_id = sync_config.user.to_string() # TODO: We should have a way to let clients differentiate between the states of: @@ -543,7 +538,7 @@ class SyncHandler: sync_result_builder: "SyncResultBuilder", now_token: StreamToken, since_token: Optional[StreamToken] = None, - ) -> Tuple[StreamToken, Dict[str, List[JsonDict]]]: + ) -> tuple[StreamToken, dict[str, list[JsonDict]]]: """Get the ephemeral events for each room the user is in Args: sync_result_builder @@ -610,7 +605,7 @@ class SyncHandler: sync_config: SyncConfig, upto_token: StreamToken, since_token: Optional[StreamToken] = None, - potential_recents: Optional[List[EventBase]] = None, + potential_recents: Optional[list[EventBase]] = None, newly_joined_room: bool = False, ) -> TimelineBatch: """Create a timeline batch for the room @@ -669,7 +664,7 @@ class SyncHandler: # We check if there are any state events, if there are then we pass # all current state events to the filter_events function. This is to # ensure that we always include current state in the timeline - current_state_ids: FrozenSet[str] = frozenset() + current_state_ids: frozenset[str] = frozenset() if any(e.is_state() for e in recents): # FIXME(faster_joins): We use the partial state here as # we don't want to block `/sync` on finishing a lazy join. @@ -968,7 +963,7 @@ class SyncHandler: return summary def get_lazy_loaded_members_cache( - self, cache_key: Tuple[str, Optional[str]] + self, cache_key: tuple[str, Optional[str]] ) -> LruCache[str, str]: cache: Optional[LruCache[str, str]] = self.lazy_loaded_members_cache.get( cache_key @@ -1029,11 +1024,11 @@ class SyncHandler: ): # The memberships needed for events in the timeline. # Only calculated when `lazy_load_members` is on. - members_to_fetch: Optional[Set[str]] = None + members_to_fetch: Optional[set[str]] = None # A dictionary mapping user IDs to the first event in the timeline sent by # them. Only calculated when `lazy_load_members` is on. - first_event_by_sender_map: Optional[Dict[str, EventBase]] = None + first_event_by_sender_map: Optional[dict[str, EventBase]] = None # The contribution to the room state from state events in the timeline. # Only contains the last event for any given state key. @@ -1159,7 +1154,7 @@ class SyncHandler: if t[0] == EventTypes.Member: cache.set(t[1], event_id) - state: Dict[str, EventBase] = {} + state: dict[str, EventBase] = {} if state_ids: state = await self.store.get_events(list(state_ids.values())) @@ -1177,7 +1172,7 @@ class SyncHandler: sync_config: SyncConfig, batch: TimelineBatch, end_token: StreamToken, - members_to_fetch: Optional[Set[str]], + members_to_fetch: Optional[set[str]], timeline_state: StateMap[str], joined: bool, ) -> StateMap[str]: @@ -1327,7 +1322,7 @@ class SyncHandler: batch: TimelineBatch, since_token: StreamToken, end_token: StreamToken, - members_to_fetch: Optional[Set[str]], + members_to_fetch: Optional[set[str]], timeline_state: StateMap[str], ) -> StateMap[str]: """Calculate the state events to be included in an incremental sync response. @@ -1562,7 +1557,7 @@ class SyncHandler: # Identify memberships missing from `found_state_ids` and pick out the auth # events in which to look for them. - auth_event_ids: Set[str] = set() + auth_event_ids: set[str] = set() for member in members_to_fetch: if (EventTypes.Member, member) in found_state_ids: continue @@ -1765,7 +1760,7 @@ class SyncHandler: logger.debug("Fetching OTK data") device_id = sync_config.device_id one_time_keys_count: JsonMapping = {} - unused_fallback_key_types: List[str] = [] + unused_fallback_key_types: list[str] = [] if device_id: # TODO: We should have a way to let clients differentiate between the states of: # * no change in OTK count since the provided since token @@ -1855,7 +1850,7 @@ class SyncHandler: self.rooms_to_exclude_globally, ) - last_membership_change_by_room_id: Dict[str, EventBase] = {} + last_membership_change_by_room_id: dict[str, EventBase] = {} for event in membership_change_events: last_membership_change_by_room_id[event.room_id] = event @@ -1914,7 +1909,7 @@ class SyncHandler: # - are full-stated # - became fully-stated at some point during the sync period # (These rooms will have been omitted during a previous eager sync.) - forced_newly_joined_room_ids: Set[str] = set() + forced_newly_joined_room_ids: set[str] = set() if since_token and not sync_config.filter_collection.lazy_load_members(): un_partial_stated_rooms = ( await self.store.get_un_partial_stated_rooms_between( @@ -2123,7 +2118,7 @@ class SyncHandler: async def _generate_sync_entry_for_rooms( self, sync_result_builder: "SyncResultBuilder" - ) -> Tuple[AbstractSet[str], AbstractSet[str]]: + ) -> tuple[AbstractSet[str], AbstractSet[str]]: """Generates the rooms portion of the sync response. Populates the `sync_result_builder` with the result. @@ -2172,7 +2167,7 @@ class SyncHandler: or sync_result_builder.sync_config.filter_collection.blocks_all_room_ephemeral() ) if block_all_room_ephemeral: - ephemeral_by_room: Dict[str, List[JsonDict]] = {} + ephemeral_by_room: dict[str, list[JsonDict]] = {} else: now_token, ephemeral_by_room = await self.ephemeral_by_room( sync_result_builder, @@ -2266,7 +2261,7 @@ class SyncHandler: async def _get_room_changes_for_incremental_sync( self, sync_result_builder: "SyncResultBuilder", - ignored_users: FrozenSet[str], + ignored_users: frozenset[str], ) -> _RoomChanges: """Determine the changes in rooms to report to the user. @@ -2297,17 +2292,17 @@ class SyncHandler: assert since_token - mem_change_events_by_room_id: Dict[str, List[EventBase]] = {} + mem_change_events_by_room_id: dict[str, list[EventBase]] = {} for event in membership_change_events: mem_change_events_by_room_id.setdefault(event.room_id, []).append(event) - newly_joined_rooms: List[str] = list( + newly_joined_rooms: list[str] = list( sync_result_builder.forced_newly_joined_room_ids ) - newly_left_rooms: List[str] = [] - room_entries: List[RoomSyncResultBuilder] = [] - invited: List[InvitedSyncResult] = [] - knocked: List[KnockedSyncResult] = [] + newly_left_rooms: list[str] = [] + room_entries: list[RoomSyncResultBuilder] = [] + invited: list[InvitedSyncResult] = [] + knocked: list[KnockedSyncResult] = [] invite_config = await self.store.get_invite_config_for_user(user_id) for room_id, events in mem_change_events_by_room_id.items(): # The body of this loop will add this room to at least one of the five lists @@ -2444,7 +2439,7 @@ class SyncHandler: # This is all screaming out for a refactor, as the logic here is # subtle and the moving parts numerous. if leave_event.internal_metadata.is_out_of_band_membership(): - batch_events: Optional[List[EventBase]] = [leave_event] + batch_events: Optional[list[EventBase]] = [leave_event] else: batch_events = None @@ -2526,7 +2521,7 @@ class SyncHandler: async def _get_room_changes_for_initial_sync( self, sync_result_builder: "SyncResultBuilder", - ignored_users: FrozenSet[str], + ignored_users: frozenset[str], ) -> _RoomChanges: """Returns entries for all rooms for the user. @@ -2612,7 +2607,7 @@ class SyncHandler: self, sync_result_builder: "SyncResultBuilder", room_builder: "RoomSyncResultBuilder", - ephemeral: List[JsonDict], + ephemeral: list[JsonDict], tags: Optional[Mapping[str, JsonMapping]], account_data: Mapping[str, JsonMapping], always_include: bool = False, @@ -2791,7 +2786,7 @@ class SyncHandler: ) if room_builder.rtype == "joined": - unread_notifications: Dict[str, int] = {} + unread_notifications: dict[str, int] = {} room_sync = JoinedSyncResult( room_id=room_id, timeline=batch, @@ -2858,7 +2853,7 @@ class SyncHandler: raise Exception("Unrecognized rtype: %r", room_builder.rtype) -def _action_has_highlight(actions: List[JsonDict]) -> bool: +def _action_has_highlight(actions: list[JsonDict]) -> bool: for action in actions: try: if action.get("set_tweak", None) == "highlight": @@ -3014,20 +3009,20 @@ class SyncResultBuilder: full_state: bool since_token: Optional[StreamToken] now_token: StreamToken - joined_room_ids: FrozenSet[str] - excluded_room_ids: FrozenSet[str] - forced_newly_joined_room_ids: FrozenSet[str] - membership_change_events: List[EventBase] + joined_room_ids: frozenset[str] + excluded_room_ids: frozenset[str] + forced_newly_joined_room_ids: frozenset[str] + membership_change_events: list[EventBase] - presence: List[UserPresenceState] = attr.Factory(list) - account_data: List[JsonDict] = attr.Factory(list) - joined: List[JoinedSyncResult] = attr.Factory(list) - invited: List[InvitedSyncResult] = attr.Factory(list) - knocked: List[KnockedSyncResult] = attr.Factory(list) - archived: List[ArchivedSyncResult] = attr.Factory(list) - to_device: List[JsonDict] = attr.Factory(list) + presence: list[UserPresenceState] = attr.Factory(list) + account_data: list[JsonDict] = attr.Factory(list) + joined: list[JoinedSyncResult] = attr.Factory(list) + invited: list[InvitedSyncResult] = attr.Factory(list) + knocked: list[KnockedSyncResult] = attr.Factory(list) + archived: list[ArchivedSyncResult] = attr.Factory(list) + to_device: list[JsonDict] = attr.Factory(list) - def calculate_user_changes(self) -> Tuple[AbstractSet[str], AbstractSet[str]]: + def calculate_user_changes(self) -> tuple[AbstractSet[str], AbstractSet[str]]: """Work out which other users have joined or left rooms we are joined to. This data only is only useful for an incremental sync. @@ -3105,7 +3100,7 @@ class RoomSyncResultBuilder: room_id: str rtype: str - events: Optional[List[EventBase]] + events: Optional[list[EventBase]] newly_joined: bool full_state: bool since_token: Optional[StreamToken] diff --git a/synapse/handlers/typing.py b/synapse/handlers/typing.py index 77c5b747c3..17e43858c9 100644 --- a/synapse/handlers/typing.py +++ b/synapse/handlers/typing.py @@ -20,7 +20,7 @@ # import logging import random -from typing import TYPE_CHECKING, Dict, Iterable, List, Optional, Set, Tuple +from typing import TYPE_CHECKING, Iterable, Optional import attr @@ -96,15 +96,15 @@ class FollowerTypingHandler: ) # map room IDs to serial numbers - self._room_serials: Dict[str, int] = {} + self._room_serials: dict[str, int] = {} # map room IDs to sets of users currently typing - self._room_typing: Dict[str, Set[str]] = {} + self._room_typing: dict[str, set[str]] = {} - self._member_last_federation_poke: Dict[RoomMember, int] = {} + self._member_last_federation_poke: dict[RoomMember, int] = {} self.wheel_timer: WheelTimer[RoomMember] = WheelTimer(bucket_size=5000) self._latest_room_serial = 0 - self._rooms_updated: Set[str] = set() + self._rooms_updated: set[str] = set() self.clock.looping_call(self._handle_timeouts, 5000) self.clock.looping_call(self._prune_old_typing, FORGET_TIMEOUT) @@ -195,7 +195,7 @@ class FollowerTypingHandler: logger.exception("Error pushing typing notif to remotes") def process_replication_rows( - self, token: int, rows: List[TypingStream.TypingStreamRow] + self, token: int, rows: list[TypingStream.TypingStreamRow] ) -> None: """Should be called whenever we receive updates for typing stream.""" @@ -226,7 +226,7 @@ class FollowerTypingHandler: ) async def _send_changes_in_typing_to_remotes( - self, room_id: str, prev_typing: Set[str], now_typing: Set[str] + self, room_id: str, prev_typing: set[str], now_typing: set[str] ) -> None: """Process a change in typing of a room from replication, sending EDUs for any local users. @@ -280,7 +280,7 @@ class TypingWriterHandler(FollowerTypingHandler): hs.get_distributor().observe("user_left_room", self.user_left_room) # clock time we expect to stop - self._member_typing_until: Dict[RoomMember, int] = {} + self._member_typing_until: dict[RoomMember, int] = {} # caches which room_ids changed at which serials self._typing_stream_change_cache = StreamChangeCache( @@ -452,7 +452,7 @@ class TypingWriterHandler(FollowerTypingHandler): async def get_all_typing_updates( self, instance_name: str, last_id: int, current_id: int, limit: int - ) -> Tuple[List[Tuple[int, list]], int, bool]: + ) -> tuple[list[tuple[int, list]], int, bool]: """Get updates for typing replication stream. Args: @@ -504,7 +504,7 @@ class TypingWriterHandler(FollowerTypingHandler): return rows, current_id, limited def process_replication_rows( - self, token: int, rows: List[TypingStream.TypingStreamRow] + self, token: int, rows: list[TypingStream.TypingStreamRow] ) -> None: # The writing process should never get updates from replication. raise Exception("Typing writer instance got typing info over replication") @@ -531,7 +531,7 @@ class TypingNotificationEventSource(EventSource[int, JsonMapping]): async def get_new_events_as( self, from_key: int, service: ApplicationService - ) -> Tuple[List[JsonMapping], int]: + ) -> tuple[list[JsonMapping], int]: """Returns a set of new typing events that an appservice may be interested in. @@ -578,7 +578,7 @@ class TypingNotificationEventSource(EventSource[int, JsonMapping]): is_guest: bool, explicit_room_id: Optional[str] = None, to_key: Optional[int] = None, - ) -> Tuple[List[JsonMapping], int]: + ) -> tuple[list[JsonMapping], int]: """ Find typing notifications for given rooms (> `from_token` and <= `to_token`) """ diff --git a/synapse/handlers/ui_auth/checkers.py b/synapse/handlers/ui_auth/checkers.py index f3c295d9f2..cbae33eaec 100644 --- a/synapse/handlers/ui_auth/checkers.py +++ b/synapse/handlers/ui_auth/checkers.py @@ -21,7 +21,7 @@ import logging from abc import ABC, abstractmethod -from typing import TYPE_CHECKING, Any, ClassVar, Sequence, Type +from typing import TYPE_CHECKING, Any, ClassVar, Sequence from twisted.web.client import PartialDownloadError @@ -321,7 +321,7 @@ class RegistrationTokenAuthChecker(UserInteractiveAuthChecker): ) -INTERACTIVE_AUTH_CHECKERS: Sequence[Type[UserInteractiveAuthChecker]] = [ +INTERACTIVE_AUTH_CHECKERS: Sequence[type[UserInteractiveAuthChecker]] = [ DummyAuthChecker, TermsAuthChecker, RecaptchaAuthChecker, diff --git a/synapse/handlers/user_directory.py b/synapse/handlers/user_directory.py index 28961f5925..fd05aff4c8 100644 --- a/synapse/handlers/user_directory.py +++ b/synapse/handlers/user_directory.py @@ -21,7 +21,7 @@ import logging from http import HTTPStatus -from typing import TYPE_CHECKING, List, Optional, Set, Tuple +from typing import TYPE_CHECKING, Optional from twisted.internet.interfaces import IDelayedCall @@ -129,7 +129,7 @@ class UserDirectoryHandler(StateDeltasHandler): # Guard to ensure we only have one process for refreshing remote profiles # for the given servers. # Set of server names. - self._is_refreshing_remote_profiles_for_servers: Set[str] = set() + self._is_refreshing_remote_profiles_for_servers: set[str] = set() if self.update_user_directory: self.notifier.add_replication_callback(self.notify_new_event) @@ -270,7 +270,7 @@ class UserDirectoryHandler(StateDeltasHandler): await self.store.update_user_directory_stream_pos(max_pos) - async def _handle_deltas(self, deltas: List[StateDelta]) -> None: + async def _handle_deltas(self, deltas: list[StateDelta]) -> None: """Called with the state deltas to process""" for delta in deltas: logger.debug( @@ -466,7 +466,7 @@ class UserDirectoryHandler(StateDeltasHandler): or await self.store.should_include_local_user_in_dir(other) ) ] - updates_to_users_who_share_rooms: Set[Tuple[str, str]] = set() + updates_to_users_who_share_rooms: set[tuple[str, str]] = set() # First, if the joining user is our local user then we need an # update for every other user in the room. diff --git a/synapse/handlers/worker_lock.py b/synapse/handlers/worker_lock.py index ca1e2b166c..af5498c560 100644 --- a/synapse/handlers/worker_lock.py +++ b/synapse/handlers/worker_lock.py @@ -26,10 +26,7 @@ from typing import ( TYPE_CHECKING, AsyncContextManager, Collection, - Dict, Optional, - Tuple, - Type, Union, ) from weakref import WeakSet @@ -75,8 +72,8 @@ class WorkerLocksHandler: # Map from lock name/key to set of `WaitingLock` that are active for # that lock. - self._locks: Dict[ - Tuple[str, str], WeakSet[Union[WaitingLock, WaitingMultiLock]] + self._locks: dict[ + tuple[str, str], WeakSet[Union[WaitingLock, WaitingMultiLock]] ] = {} self._clock.looping_call(self._cleanup_locks, 30_000) @@ -141,7 +138,7 @@ class WorkerLocksHandler: def acquire_multi_read_write_lock( self, - lock_names: Collection[Tuple[str, str]], + lock_names: Collection[tuple[str, str]], *, write: bool, ) -> "WaitingMultiLock": @@ -261,7 +258,7 @@ class WaitingLock: async def __aexit__( self, - exc_type: Optional[Type[BaseException]], + exc_type: Optional[type[BaseException]], exc: Optional[BaseException], tb: Optional[TracebackType], ) -> Optional[bool]: @@ -289,7 +286,7 @@ class WaitingLock: @attr.s(auto_attribs=True, eq=False) class WaitingMultiLock: - lock_names: Collection[Tuple[str, str]] + lock_names: Collection[tuple[str, str]] write: bool @@ -341,7 +338,7 @@ class WaitingMultiLock: async def __aexit__( self, - exc_type: Optional[Type[BaseException]], + exc_type: Optional[type[BaseException]], exc: Optional[BaseException], tb: Optional[TracebackType], ) -> Optional[bool]: diff --git a/synapse/http/additional_resource.py b/synapse/http/additional_resource.py index 59eae841d5..1a17b8461f 100644 --- a/synapse/http/additional_resource.py +++ b/synapse/http/additional_resource.py @@ -18,7 +18,7 @@ # # -from typing import TYPE_CHECKING, Any, Awaitable, Callable, Optional, Tuple +from typing import TYPE_CHECKING, Any, Awaitable, Callable, Optional from twisted.web.server import Request @@ -41,7 +41,7 @@ class AdditionalResource(DirectServeJsonResource): def __init__( self, hs: "HomeServer", - handler: Callable[[Request], Awaitable[Optional[Tuple[int, Any]]]], + handler: Callable[[Request], Awaitable[Optional[tuple[int, Any]]]], ): """Initialise AdditionalResource @@ -56,7 +56,7 @@ class AdditionalResource(DirectServeJsonResource): super().__init__(clock=hs.get_clock()) self._handler = handler - async def _async_render(self, request: Request) -> Optional[Tuple[int, Any]]: + async def _async_render(self, request: Request) -> Optional[tuple[int, Any]]: # Cheekily pass the result straight through, so we don't need to worry # if its an awaitable or not. return await self._handler(request) diff --git a/synapse/http/client.py b/synapse/http/client.py index 370cdc3568..ff1f7c7128 100644 --- a/synapse/http/client.py +++ b/synapse/http/client.py @@ -27,12 +27,9 @@ from typing import ( Any, BinaryIO, Callable, - Dict, - List, Mapping, Optional, Protocol, - Tuple, Union, ) @@ -135,10 +132,10 @@ RawHeaders = Union[Mapping[str, "RawHeaderValue"], Mapping[bytes, "RawHeaderValu # the entries can either be Lists or bytes. RawHeaderValue = Union[ StrSequence, - List[bytes], - List[Union[str, bytes]], - Tuple[bytes, ...], - Tuple[Union[str, bytes], ...], + list[bytes], + list[Union[str, bytes]], + tuple[bytes, ...], + tuple[Union[str, bytes], ...], ] @@ -205,7 +202,7 @@ class _IPBlockingResolver: def resolveHostName( self, recv: IResolutionReceiver, hostname: str, portNumber: int = 0 ) -> IResolutionReceiver: - addresses: List[IAddress] = [] + addresses: list[IAddress] = [] def _callback() -> None: has_bad_ip = False @@ -349,7 +346,7 @@ class BaseHttpClient: def __init__( self, hs: "HomeServer", - treq_args: Optional[Dict[str, Any]] = None, + treq_args: Optional[dict[str, Any]] = None, ): self.hs = hs self.server_name = hs.hostname @@ -479,7 +476,7 @@ class BaseHttpClient: async def post_urlencoded_get_json( self, uri: str, - args: Optional[Mapping[str, Union[str, List[str]]]] = None, + args: Optional[Mapping[str, Union[str, list[str]]]] = None, headers: Optional[RawHeaders] = None, ) -> Any: """ @@ -707,7 +704,7 @@ class BaseHttpClient: max_size: Optional[int] = None, headers: Optional[RawHeaders] = None, is_allowed_content_type: Optional[Callable[[str], bool]] = None, - ) -> Tuple[int, Dict[bytes, List[bytes]], str, int]: + ) -> tuple[int, dict[bytes, list[bytes]], str, int]: """GETs a file from a given URL Args: url: The URL to GET @@ -815,7 +812,7 @@ class SimpleHttpClient(BaseHttpClient): def __init__( self, hs: "HomeServer", - treq_args: Optional[Dict[str, Any]] = None, + treq_args: Optional[dict[str, Any]] = None, ip_allowlist: Optional[IPSet] = None, ip_blocklist: Optional[IPSet] = None, use_proxy: bool = False, diff --git a/synapse/http/federation/matrix_federation_agent.py b/synapse/http/federation/matrix_federation_agent.py index 9d87514be0..f8482d9c48 100644 --- a/synapse/http/federation/matrix_federation_agent.py +++ b/synapse/http/federation/matrix_federation_agent.py @@ -19,7 +19,7 @@ # import logging import urllib.parse -from typing import Any, Generator, List, Optional +from typing import Any, Generator, Optional from urllib.request import ( # type: ignore[attr-defined] proxy_bypass_environment, ) @@ -413,7 +413,7 @@ class MatrixHostnameEndpoint: # to try and if that doesn't work then we'll have an exception. raise Exception("Failed to resolve server %r" % (self._parsed_uri.netloc,)) - async def _resolve_server(self) -> List[Server]: + async def _resolve_server(self) -> list[Server]: """Resolves the server name to a list of hosts and ports to attempt to connect to. """ diff --git a/synapse/http/federation/srv_resolver.py b/synapse/http/federation/srv_resolver.py index 639bf309d6..76a51e4873 100644 --- a/synapse/http/federation/srv_resolver.py +++ b/synapse/http/federation/srv_resolver.py @@ -22,7 +22,7 @@ import logging import random import time -from typing import Any, Callable, Dict, List +from typing import Any, Callable import attr @@ -34,7 +34,7 @@ from synapse.logging.context import make_deferred_yieldable logger = logging.getLogger(__name__) -SERVER_CACHE: Dict[bytes, List["Server"]] = {} +SERVER_CACHE: dict[bytes, list["Server"]] = {} @attr.s(auto_attribs=True, slots=True, frozen=True) @@ -58,11 +58,11 @@ class Server: expires: int = 0 -def _sort_server_list(server_list: List[Server]) -> List[Server]: +def _sort_server_list(server_list: list[Server]) -> list[Server]: """Given a list of SRV records sort them into priority order and shuffle each priority with the given weight. """ - priority_map: Dict[int, List[Server]] = {} + priority_map: dict[int, list[Server]] = {} for server in server_list: priority_map.setdefault(server.priority, []).append(server) @@ -116,14 +116,14 @@ class SrvResolver: def __init__( self, dns_client: Any = client, - cache: Dict[bytes, List[Server]] = SERVER_CACHE, + cache: dict[bytes, list[Server]] = SERVER_CACHE, get_time: Callable[[], float] = time.time, ): self._dns_client = dns_client self._cache = cache self._get_time = get_time - async def resolve_service(self, service_name: bytes) -> List[Server]: + async def resolve_service(self, service_name: bytes) -> list[Server]: """Look up a SRV record Args: diff --git a/synapse/http/federation/well_known_resolver.py b/synapse/http/federation/well_known_resolver.py index 2f52abcc03..ac4d954c2c 100644 --- a/synapse/http/federation/well_known_resolver.py +++ b/synapse/http/federation/well_known_resolver.py @@ -22,7 +22,7 @@ import logging import random import time from io import BytesIO -from typing import Callable, Dict, Optional, Tuple +from typing import Callable, Optional import attr @@ -188,7 +188,7 @@ class WellKnownResolver: return WellKnownLookupResult(delegated_server=result) - async def _fetch_well_known(self, server_name: bytes) -> Tuple[bytes, float]: + async def _fetch_well_known(self, server_name: bytes) -> tuple[bytes, float]: """Actually fetch and parse a .well-known, without checking the cache Args: @@ -251,7 +251,7 @@ class WellKnownResolver: async def _make_well_known_request( self, server_name: bytes, retry: bool - ) -> Tuple[IResponse, bytes]: + ) -> tuple[IResponse, bytes]: """Make the well known request. This will retry the request if requested and it fails (with unable @@ -348,7 +348,7 @@ def _cache_period_from_headers( return None -def _parse_cache_control(headers: Headers) -> Dict[bytes, Optional[bytes]]: +def _parse_cache_control(headers: Headers) -> dict[bytes, Optional[bytes]]: cache_controls = {} cache_control_headers = headers.getRawHeaders(b"cache-control") or [] for hdr in cache_control_headers: diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py index 4d72c72d01..d0e47cf8dc 100644 --- a/synapse/http/matrixfederationclient.py +++ b/synapse/http/matrixfederationclient.py @@ -31,13 +31,10 @@ from typing import ( Any, BinaryIO, Callable, - Dict, Generic, - List, Literal, Optional, TextIO, - Tuple, TypeVar, Union, cast, @@ -253,7 +250,7 @@ class JsonParser(_BaseJsonParser[JsonDict]): return isinstance(v, dict) -class LegacyJsonSendParser(_BaseJsonParser[Tuple[int, JsonDict]]): +class LegacyJsonSendParser(_BaseJsonParser[tuple[int, JsonDict]]): """Ensure the legacy responses of /send_join & /send_leave are correct.""" def __init__(self) -> None: @@ -667,7 +664,7 @@ class MatrixFederationHttpClient: ) # Inject the span into the headers - headers_dict: Dict[bytes, List[bytes]] = {} + headers_dict: dict[bytes, list[bytes]] = {} opentracing.inject_header_dict(headers_dict, request.destination) headers_dict[b"User-Agent"] = [self.version_string_bytes] @@ -913,7 +910,7 @@ class MatrixFederationHttpClient: url_bytes: bytes, content: Optional[JsonDict] = None, destination_is: Optional[bytes] = None, - ) -> List[bytes]: + ) -> list[bytes]: """ Builds the Authorization headers for a federation request Args: @@ -1291,7 +1288,7 @@ class MatrixFederationHttpClient: ignore_backoff: bool = False, try_trailing_slash_on_400: bool = False, parser: Literal[None] = None, - ) -> Tuple[JsonDict, Dict[bytes, List[bytes]]]: ... + ) -> tuple[JsonDict, dict[bytes, list[bytes]]]: ... @overload async def get_json_with_headers( @@ -1304,7 +1301,7 @@ class MatrixFederationHttpClient: ignore_backoff: bool = ..., try_trailing_slash_on_400: bool = ..., parser: ByteParser[T] = ..., - ) -> Tuple[T, Dict[bytes, List[bytes]]]: ... + ) -> tuple[T, dict[bytes, list[bytes]]]: ... async def get_json_with_headers( self, @@ -1316,7 +1313,7 @@ class MatrixFederationHttpClient: ignore_backoff: bool = False, try_trailing_slash_on_400: bool = False, parser: Optional[ByteParser[T]] = None, - ) -> Tuple[Union[JsonDict, T], Dict[bytes, List[bytes]]]: + ) -> tuple[Union[JsonDict, T], dict[bytes, list[bytes]]]: """GETs some json from the given host homeserver and path Args: @@ -1484,7 +1481,7 @@ class MatrixFederationHttpClient: retry_on_dns_fail: bool = True, ignore_backoff: bool = False, follow_redirects: bool = False, - ) -> Tuple[int, Dict[bytes, List[bytes]]]: + ) -> tuple[int, dict[bytes, list[bytes]]]: """GETs a file from a given homeserver Args: destination: The remote server to send the HTTP request to. @@ -1645,7 +1642,7 @@ class MatrixFederationHttpClient: args: Optional[QueryParams] = None, retry_on_dns_fail: bool = True, ignore_backoff: bool = False, - ) -> Tuple[int, Dict[bytes, List[bytes]], bytes]: + ) -> tuple[int, dict[bytes, list[bytes]], bytes]: """GETs a file from a given homeserver over the federation /download endpoint Args: destination: The remote server to send the HTTP request to. diff --git a/synapse/http/proxy.py b/synapse/http/proxy.py index fa17432984..583dd092bd 100644 --- a/synapse/http/proxy.py +++ b/synapse/http/proxy.py @@ -22,7 +22,7 @@ import json import logging import urllib.parse -from typing import TYPE_CHECKING, Any, Optional, Set, Tuple, cast +from typing import TYPE_CHECKING, Any, Optional, cast from twisted.internet import protocol from twisted.internet.interfaces import ITCPTransport @@ -66,7 +66,7 @@ assert all(header.lower() == header for header in HOP_BY_HOP_HEADERS_LOWERCASE) def parse_connection_header_value( connection_header_value: Optional[bytes], -) -> Set[str]: +) -> set[str]: """ Parse the `Connection` header to determine which headers we should not be copied over from the remote response. @@ -86,7 +86,7 @@ def parse_connection_header_value( The set of header names that should not be copied over from the remote response. The keys are lowercased. """ - extra_headers_to_remove: Set[str] = set() + extra_headers_to_remove: set[str] = set() if connection_header_value: extra_headers_to_remove = { connection_option.decode("ascii").strip().lower() @@ -140,7 +140,7 @@ class ProxyResource(_AsyncResource): "Invalid Proxy-Authorization header.", Codes.UNAUTHORIZED ) - async def _async_render(self, request: "SynapseRequest") -> Tuple[int, Any]: + async def _async_render(self, request: "SynapseRequest") -> tuple[int, Any]: uri = urllib.parse.urlparse(request.uri) assert uri.scheme == b"matrix-federation" diff --git a/synapse/http/proxyagent.py b/synapse/http/proxyagent.py index ab413990c5..67e04b18d9 100644 --- a/synapse/http/proxyagent.py +++ b/synapse/http/proxyagent.py @@ -21,7 +21,7 @@ import logging import random import re -from typing import Any, Collection, Dict, List, Optional, Sequence, Tuple, Union, cast +from typing import Any, Collection, Optional, Sequence, Union, cast from urllib.parse import urlparse from urllib.request import ( # type: ignore[attr-defined] proxy_bypass_environment, @@ -139,7 +139,7 @@ class ProxyAgent(_AgentBase): else: self.proxy_reactor = proxy_reactor - self._endpoint_kwargs: Dict[str, Any] = {} + self._endpoint_kwargs: dict[str, Any] = {} if connectTimeout is not None: self._endpoint_kwargs["timeout"] = connectTimeout if bindAddress is not None: @@ -182,7 +182,7 @@ class ProxyAgent(_AgentBase): "`federation_proxy_credentials` are required when using `federation_proxy_locations`" ) - endpoints: List[IStreamClientEndpoint] = [] + endpoints: list[IStreamClientEndpoint] = [] for federation_proxy_location in federation_proxy_locations: endpoint: IStreamClientEndpoint if isinstance(federation_proxy_location, InstanceTcpLocationConfig): @@ -369,7 +369,7 @@ def http_proxy_endpoint( timeout: float = 30, bindAddress: Optional[Union[bytes, str, tuple[Union[bytes, str], int]]] = None, attemptDelay: Optional[float] = None, -) -> Tuple[Optional[IStreamClientEndpoint], Optional[ProxyCredentials]]: +) -> tuple[Optional[IStreamClientEndpoint], Optional[ProxyCredentials]]: """Parses an http proxy setting and returns an endpoint for the proxy Args: @@ -418,7 +418,7 @@ def http_proxy_endpoint( def parse_proxy( proxy: bytes, default_scheme: bytes = b"http", default_port: int = 1080 -) -> Tuple[bytes, bytes, int, Optional[ProxyCredentials]]: +) -> tuple[bytes, bytes, int, Optional[ProxyCredentials]]: """ Parse a proxy connection string. @@ -487,7 +487,7 @@ class _RandomSampleEndpoints: return run_in_background(self._do_connect, protocol_factory) async def _do_connect(self, protocol_factory: IProtocolFactory) -> IProtocol: - failures: List[Failure] = [] + failures: list[Failure] = [] for endpoint in random.sample(self._endpoints, k=len(self._endpoints)): try: return await endpoint.connect(protocol_factory) diff --git a/synapse/http/replicationagent.py b/synapse/http/replicationagent.py index d70575dbd5..f4799bd1b2 100644 --- a/synapse/http/replicationagent.py +++ b/synapse/http/replicationagent.py @@ -20,7 +20,7 @@ # import logging -from typing import Dict, Optional +from typing import Optional from zope.interface import implementer @@ -60,7 +60,7 @@ class ReplicationEndpointFactory: def __init__( self, reactor: ISynapseReactor, - instance_map: Dict[str, InstanceLocationConfig], + instance_map: dict[str, InstanceLocationConfig], context_factory: IPolicyForHTTPS, ) -> None: self.reactor = reactor @@ -117,7 +117,7 @@ class ReplicationAgent(_AgentBase): def __init__( self, reactor: ISynapseReactor, - instance_map: Dict[str, InstanceLocationConfig], + instance_map: dict[str, InstanceLocationConfig], contextFactory: IPolicyForHTTPS, connectTimeout: Optional[float] = None, bindAddress: Optional[bytes] = None, diff --git a/synapse/http/request_metrics.py b/synapse/http/request_metrics.py index 83f52edb7c..5cc8a2ebd8 100644 --- a/synapse/http/request_metrics.py +++ b/synapse/http/request_metrics.py @@ -22,7 +22,7 @@ import logging import threading import traceback -from typing import Dict, Mapping, Set, Tuple +from typing import Mapping from prometheus_client.core import Counter, Histogram @@ -133,13 +133,13 @@ in_flight_requests_db_sched_duration = Counter( labelnames=["method", "servlet", SERVER_NAME_LABEL], ) -_in_flight_requests: Set["RequestMetrics"] = set() +_in_flight_requests: set["RequestMetrics"] = set() # Protects the _in_flight_requests set from concurrent access _in_flight_requests_lock = threading.Lock() -def _get_in_flight_counts() -> Mapping[Tuple[str, ...], int]: +def _get_in_flight_counts() -> Mapping[tuple[str, ...], int]: """Returns a count of all in flight requests by (method, server_name)""" # Cast to a list to prevent it changing while the Prometheus # thread is collecting metrics @@ -152,7 +152,7 @@ def _get_in_flight_counts() -> Mapping[Tuple[str, ...], int]: # Map from (method, name) -> int, the number of in flight requests of that # type. The key type is Tuple[str, str], but we leave the length unspecified # for compatability with LaterGauge's annotations. - counts: Dict[Tuple[str, ...], int] = {} + counts: dict[tuple[str, ...], int] = {} for request_metric in request_metrics: key = ( request_metric.method, diff --git a/synapse/http/server.py b/synapse/http/server.py index d5af8758ac..1f4728fba2 100644 --- a/synapse/http/server.py +++ b/synapse/http/server.py @@ -33,14 +33,11 @@ from typing import ( Any, Awaitable, Callable, - Dict, Iterable, Iterator, - List, Optional, Pattern, Protocol, - Tuple, Union, cast, ) @@ -267,7 +264,7 @@ def wrap_async_request_handler( # it is actually called with a SynapseRequest and a kwargs dict for the params, # but I can't figure out how to represent that. ServletCallback = Callable[ - ..., Union[None, Awaitable[None], Tuple[int, Any], Awaitable[Tuple[int, Any]]] + ..., Union[None, Awaitable[None], tuple[int, Any], Awaitable[tuple[int, Any]]] ] @@ -354,7 +351,7 @@ class _AsyncResource(resource.Resource, metaclass=abc.ABCMeta): async def _async_render( self, request: "SynapseRequest" - ) -> Optional[Tuple[int, Any]]: + ) -> Optional[tuple[int, Any]]: """Delegates to `_async_render_` methods, or returns a 400 if no appropriate method exists. Can be overridden in sub classes for different routing. @@ -491,7 +488,7 @@ class JsonResource(DirectServeJsonResource): self.clock = hs.get_clock() super().__init__(canonical_json, extract_context, clock=self.clock) # Map of path regex -> method -> callback. - self._routes: Dict[Pattern[str], Dict[bytes, _PathEntry]] = {} + self._routes: dict[Pattern[str], dict[bytes, _PathEntry]] = {} self.hs = hs def register_paths( @@ -527,7 +524,7 @@ class JsonResource(DirectServeJsonResource): def _get_handler_for_request( self, request: "SynapseRequest" - ) -> Tuple[ServletCallback, str, Dict[str, str]]: + ) -> tuple[ServletCallback, str, dict[str, str]]: """Finds a callback method to handle the given request. Returns: @@ -556,7 +553,7 @@ class JsonResource(DirectServeJsonResource): # Huh. No one wanted to handle that? Fiiiiiine. raise UnrecognizedRequestError(code=404) - async def _async_render(self, request: "SynapseRequest") -> Tuple[int, Any]: + async def _async_render(self, request: "SynapseRequest") -> tuple[int, Any]: callback, servlet_classname, group_dict = self._get_handler_for_request(request) request.is_render_cancellable = is_function_cancellable(callback) @@ -758,7 +755,7 @@ class _ByteProducer: # Start producing if `registerProducer` was successful self.resumeProducing() - def _send_data(self, data: List[bytes]) -> None: + def _send_data(self, data: list[bytes]) -> None: """ Send a list of bytes as a chunk of a response. """ diff --git a/synapse/http/servlet.py b/synapse/http/servlet.py index 71e809b3f1..66694e0607 100644 --- a/synapse/http/servlet.py +++ b/synapse/http/servlet.py @@ -27,13 +27,10 @@ import urllib.parse as urlparse from http import HTTPStatus from typing import ( TYPE_CHECKING, - List, Literal, Mapping, Optional, Sequence, - Tuple, - Type, TypeVar, overload, ) @@ -548,7 +545,7 @@ EnumT = TypeVar("EnumT", bound=enum.Enum) def parse_enum( request: Request, name: str, - E: Type[EnumT], + E: type[EnumT], default: EnumT, ) -> EnumT: ... @@ -557,7 +554,7 @@ def parse_enum( def parse_enum( request: Request, name: str, - E: Type[EnumT], + E: type[EnumT], *, required: Literal[True], ) -> EnumT: ... @@ -566,7 +563,7 @@ def parse_enum( def parse_enum( request: Request, name: str, - E: Type[EnumT], + E: type[EnumT], default: Optional[EnumT] = None, required: bool = False, ) -> Optional[EnumT]: @@ -637,18 +634,18 @@ def parse_strings_from_args( *, allowed_values: Optional[StrCollection] = None, encoding: str = "ascii", -) -> Optional[List[str]]: ... +) -> Optional[list[str]]: ... @overload def parse_strings_from_args( args: Mapping[bytes, Sequence[bytes]], name: str, - default: List[str], + default: list[str], *, allowed_values: Optional[StrCollection] = None, encoding: str = "ascii", -) -> List[str]: ... +) -> list[str]: ... @overload @@ -659,29 +656,29 @@ def parse_strings_from_args( required: Literal[True], allowed_values: Optional[StrCollection] = None, encoding: str = "ascii", -) -> List[str]: ... +) -> list[str]: ... @overload def parse_strings_from_args( args: Mapping[bytes, Sequence[bytes]], name: str, - default: Optional[List[str]] = None, + default: Optional[list[str]] = None, *, required: bool = False, allowed_values: Optional[StrCollection] = None, encoding: str = "ascii", -) -> Optional[List[str]]: ... +) -> Optional[list[str]]: ... def parse_strings_from_args( args: Mapping[bytes, Sequence[bytes]], name: str, - default: Optional[List[str]] = None, + default: Optional[list[str]] = None, required: bool = False, allowed_values: Optional[StrCollection] = None, encoding: str = "ascii", -) -> Optional[List[str]]: +) -> Optional[list[str]]: """ Parse a string parameter from the request query string list. @@ -892,7 +889,7 @@ def parse_json_object_from_request( Model = TypeVar("Model", bound=BaseModel) -def validate_json_object(content: JsonDict, model_type: Type[Model]) -> Model: +def validate_json_object(content: JsonDict, model_type: type[Model]) -> Model: """Validate a deserialized JSON object using the given pydantic model. Raises: @@ -922,7 +919,7 @@ def validate_json_object(content: JsonDict, model_type: Type[Model]) -> Model: def parse_and_validate_json_object_from_request( - request: Request, model_type: Type[Model] + request: Request, model_type: type[Model] ) -> Model: """Parse a JSON object from the body of a twisted HTTP request, then deserialise and validate using the given pydantic model. @@ -988,8 +985,8 @@ class ResolveRoomIdMixin: self.room_member_handler = hs.get_room_member_handler() async def resolve_room_id( - self, room_identifier: str, remote_room_hosts: Optional[List[str]] = None - ) -> Tuple[str, Optional[List[str]]]: + self, room_identifier: str, remote_room_hosts: Optional[list[str]] = None + ) -> tuple[str, Optional[list[str]]]: """ Resolve a room identifier to a room ID, if necessary. diff --git a/synapse/http/site.py b/synapse/http/site.py index cf31b64d80..ccf6ff27f0 100644 --- a/synapse/http/site.py +++ b/synapse/http/site.py @@ -22,7 +22,7 @@ import contextlib import logging import time from http import HTTPStatus -from typing import TYPE_CHECKING, Any, Generator, List, Optional, Tuple, Union +from typing import TYPE_CHECKING, Any, Generator, Optional, Union import attr from zope.interface import implementer @@ -266,7 +266,7 @@ class SynapseRequest(Request): return self.method.decode("ascii") return method - def get_authenticated_entity(self) -> Tuple[Optional[str], Optional[str]]: + def get_authenticated_entity(self) -> tuple[Optional[str], Optional[str]]: """ Get the "authenticated" entity of the request, which might be the user performing the action, or a user being puppeted by a server admin. @@ -783,7 +783,7 @@ class SynapseSite(ProxySite): self.access_logger = logging.getLogger(logger_name) self.server_version_string = server_version_string.encode("ascii") - self.connections: List[Protocol] = [] + self.connections: list[Protocol] = [] def buildProtocol(self, addr: IAddress) -> SynapseProtocol: protocol = SynapseProtocol( diff --git a/synapse/logging/_remote.py b/synapse/logging/_remote.py index ac34fa6525..a3444221a0 100644 --- a/synapse/logging/_remote.py +++ b/synapse/logging/_remote.py @@ -25,7 +25,7 @@ import traceback from collections import deque from ipaddress import IPv4Address, IPv6Address, ip_address from math import floor -from typing import Callable, Deque, Optional +from typing import Callable, Optional import attr from zope.interface import implementer @@ -66,7 +66,7 @@ class LogProducer: # (connected and registerProducer) which are part of the implementation. transport: Connection _format: Callable[[logging.LogRecord], str] - _buffer: Deque[logging.LogRecord] + _buffer: deque[logging.LogRecord] _paused: bool = attr.ib(default=False, init=False) def pauseProducing(self) -> None: @@ -120,7 +120,7 @@ class RemoteHandler(logging.Handler): self.port = port self.maximum_buffer = maximum_buffer - self._buffer: Deque[logging.LogRecord] = deque() + self._buffer: deque[logging.LogRecord] = deque() self._connection_waiter: Optional[Deferred] = None self._producer: Optional[LogProducer] = None diff --git a/synapse/logging/context.py b/synapse/logging/context.py index 1b9c770311..6a4425ff1d 100644 --- a/synapse/logging/context.py +++ b/synapse/logging/context.py @@ -41,8 +41,6 @@ from typing import ( Callable, Literal, Optional, - Tuple, - Type, TypeVar, Union, overload, @@ -393,7 +391,7 @@ class LoggingContext: def __exit__( self, - type: Optional[Type[BaseException]], + type: Optional[type[BaseException]], value: Optional[BaseException], traceback: Optional[TracebackType], ) -> None: @@ -498,13 +496,13 @@ class LoggingContext: return res - def _get_cputime(self, current: "resource.struct_rusage") -> Tuple[float, float]: + def _get_cputime(self, current: "resource.struct_rusage") -> tuple[float, float]: """Get the cpu usage time between start() and the given rusage Args: rusage: the current resource usage - Returns: Tuple[float, float]: seconds in user mode, seconds in system mode + Returns: tuple[float, float]: seconds in user mode, seconds in system mode """ assert self.usage_start is not None @@ -672,7 +670,7 @@ class PreserveLoggingContext: def __exit__( self, - type: Optional[Type[BaseException]], + type: Optional[type[BaseException]], value: Optional[BaseException], traceback: Optional[TracebackType], ) -> None: diff --git a/synapse/logging/formatter.py b/synapse/logging/formatter.py index 228e5ed278..e5d73a47a8 100644 --- a/synapse/logging/formatter.py +++ b/synapse/logging/formatter.py @@ -23,7 +23,7 @@ import logging import traceback from io import StringIO from types import TracebackType -from typing import Optional, Tuple, Type +from typing import Optional class LogFormatter(logging.Formatter): @@ -38,8 +38,8 @@ class LogFormatter(logging.Formatter): def formatException( self, - ei: Tuple[ - Optional[Type[BaseException]], + ei: tuple[ + Optional[type[BaseException]], Optional[BaseException], Optional[TracebackType], ], diff --git a/synapse/logging/opentracing.py b/synapse/logging/opentracing.py index 1c89a358df..fbb9971b32 100644 --- a/synapse/logging/opentracing.py +++ b/synapse/logging/opentracing.py @@ -183,13 +183,10 @@ from typing import ( Callable, Collection, ContextManager, - Dict, Generator, Iterable, - List, Optional, Pattern, - Type, TypeVar, Union, cast, @@ -292,7 +289,7 @@ try: except Exception: logger.exception("Failed to report span") - RustReporter: Optional[Type[_WrappedRustReporter]] = _WrappedRustReporter + RustReporter: Optional[type[_WrappedRustReporter]] = _WrappedRustReporter except ImportError: RustReporter = None @@ -536,8 +533,8 @@ def whitelisted_homeserver(destination: str) -> bool: def start_active_span( operation_name: str, child_of: Optional[Union["opentracing.Span", "opentracing.SpanContext"]] = None, - references: Optional[List["opentracing.Reference"]] = None, - tags: Optional[Dict[str, str]] = None, + references: Optional[list["opentracing.Reference"]] = None, + tags: Optional[dict[str, str]] = None, start_time: Optional[float] = None, ignore_active_span: bool = False, finish_on_close: bool = True, @@ -577,7 +574,7 @@ def start_active_span_follows_from( operation_name: str, contexts: Collection, child_of: Optional[Union["opentracing.Span", "opentracing.SpanContext"]] = None, - tags: Optional[Dict[str, str]] = None, + tags: Optional[dict[str, str]] = None, start_time: Optional[float] = None, ignore_active_span: bool = False, *, @@ -631,10 +628,10 @@ def start_active_span_follows_from( def start_active_span_from_edu( - edu_content: Dict[str, Any], + edu_content: dict[str, Any], operation_name: str, - references: Optional[List["opentracing.Reference"]] = None, - tags: Optional[Dict[str, str]] = None, + references: Optional[list["opentracing.Reference"]] = None, + tags: Optional[dict[str, str]] = None, start_time: Optional[float] = None, ignore_active_span: bool = False, finish_on_close: bool = True, @@ -709,7 +706,7 @@ def set_tag(key: str, value: Union[str, bool, int, float]) -> None: @ensure_active_span("log") -def log_kv(key_values: Dict[str, Any], timestamp: Optional[float] = None) -> None: +def log_kv(key_values: dict[str, Any], timestamp: Optional[float] = None) -> None: """Log to the active span""" assert opentracing.tracer.active_span is not None opentracing.tracer.active_span.log_kv(key_values, timestamp) @@ -760,7 +757,7 @@ def is_context_forced_tracing( @ensure_active_span("inject the span into a header dict") def inject_header_dict( - headers: Dict[bytes, List[bytes]], + headers: dict[bytes, list[bytes]], destination: Optional[str] = None, check_destination: bool = True, ) -> None: @@ -792,7 +789,7 @@ def inject_header_dict( span = opentracing.tracer.active_span - carrier: Dict[str, str] = {} + carrier: dict[str, str] = {} assert span is not None opentracing.tracer.inject(span.context, opentracing.Format.HTTP_HEADERS, carrier) @@ -820,16 +817,16 @@ def inject_response_headers(response_headers: Headers) -> None: @ensure_active_span("inject the span into a header dict") -def inject_request_headers(headers: Dict[str, str]) -> None: +def inject_request_headers(headers: dict[str, str]) -> None: span = opentracing.tracer.active_span assert span is not None opentracing.tracer.inject(span.context, opentracing.Format.HTTP_HEADERS, headers) @ensure_active_span( - "get the active span context as a dict", ret=cast(Dict[str, str], {}) + "get the active span context as a dict", ret=cast(dict[str, str], {}) ) -def get_active_span_text_map(destination: Optional[str] = None) -> Dict[str, str]: +def get_active_span_text_map(destination: Optional[str] = None) -> dict[str, str]: """ Gets a span context as a dict. This can be used instead of manually injecting a span into an empty carrier. @@ -844,7 +841,7 @@ def get_active_span_text_map(destination: Optional[str] = None) -> Dict[str, str if destination and not whitelisted_homeserver(destination): return {} - carrier: Dict[str, str] = {} + carrier: dict[str, str] = {} assert opentracing.tracer.active_span is not None opentracing.tracer.inject( opentracing.tracer.active_span.context, opentracing.Format.TEXT_MAP, carrier @@ -859,7 +856,7 @@ def active_span_context_as_string() -> str: Returns: The active span context encoded as a string. """ - carrier: Dict[str, str] = {} + carrier: dict[str, str] = {} if opentracing: assert opentracing.tracer.active_span is not None opentracing.tracer.inject( @@ -888,12 +885,12 @@ def span_context_from_string(carrier: str) -> Optional["opentracing.SpanContext" Returns: The active span context decoded from a string. """ - payload: Dict[str, str] = json_decoder.decode(carrier) + payload: dict[str, str] = json_decoder.decode(carrier) return opentracing.tracer.extract(opentracing.Format.TEXT_MAP, payload) @only_if_tracing -def extract_text_map(carrier: Dict[str, str]) -> Optional["opentracing.SpanContext"]: +def extract_text_map(carrier: dict[str, str]) -> Optional["opentracing.SpanContext"]: """ Wrapper method for opentracing's tracer.extract for TEXT_MAP. Args: diff --git a/synapse/media/_base.py b/synapse/media/_base.py index d3a9a66f5a..319ca662e2 100644 --- a/synapse/media/_base.py +++ b/synapse/media/_base.py @@ -29,12 +29,8 @@ from typing import ( TYPE_CHECKING, Awaitable, BinaryIO, - Dict, Generator, - List, Optional, - Tuple, - Type, ) import attr @@ -505,7 +501,7 @@ class Responder(ABC): def __exit__( # noqa: B027 self, - exc_type: Optional[Type[BaseException]], + exc_type: Optional[type[BaseException]], exc_val: Optional[BaseException], exc_tb: Optional[TracebackType], ) -> None: @@ -570,7 +566,7 @@ class FileInfo: return self.thumbnail.length -def get_filename_from_headers(headers: Dict[bytes, List[bytes]]) -> Optional[str]: +def get_filename_from_headers(headers: dict[bytes, list[bytes]]) -> Optional[str]: """ Get the filename of the downloaded file by inspecting the Content-Disposition HTTP header. @@ -618,7 +614,7 @@ def get_filename_from_headers(headers: Dict[bytes, List[bytes]]) -> Optional[str return upload_name -def _parse_header(line: bytes) -> Tuple[bytes, Dict[bytes, bytes]]: +def _parse_header(line: bytes) -> tuple[bytes, dict[bytes, bytes]]: """Parse a Content-type like header. Cargo-culted from `cgi`, but works on bytes rather than strings. diff --git a/synapse/media/filepath.py b/synapse/media/filepath.py index 3d7863e2fb..7659971661 100644 --- a/synapse/media/filepath.py +++ b/synapse/media/filepath.py @@ -24,7 +24,7 @@ import functools import os import re import string -from typing import Any, Callable, List, TypeVar, Union, cast +from typing import Any, Callable, TypeVar, Union, cast NEW_FORMAT_ID_RE = re.compile(r"^\d\d\d\d-\d\d-\d\d") @@ -46,7 +46,7 @@ def _wrap_in_base_path(func: F) -> F: GetPathMethod = TypeVar( - "GetPathMethod", bound=Union[Callable[..., str], Callable[..., List[str]]] + "GetPathMethod", bound=Union[Callable[..., str], Callable[..., list[str]]] ) @@ -73,7 +73,7 @@ def _wrap_with_jail_check(relative: bool) -> Callable[[GetPathMethod], GetPathMe @functools.wraps(func) def _wrapped( self: "MediaFilePaths", *args: Any, **kwargs: Any - ) -> Union[str, List[str]]: + ) -> Union[str, list[str]]: path_or_paths = func(self, *args, **kwargs) if isinstance(path_or_paths, list): @@ -303,7 +303,7 @@ class MediaFilePaths: url_cache_filepath = _wrap_in_base_path(url_cache_filepath_rel) @_wrap_with_jail_check(relative=False) - def url_cache_filepath_dirs_to_delete(self, media_id: str) -> List[str]: + def url_cache_filepath_dirs_to_delete(self, media_id: str) -> list[str]: "The dirs to try and remove if we delete the media_id file" if NEW_FORMAT_ID_RE.match(media_id): return [ @@ -376,7 +376,7 @@ class MediaFilePaths: ) @_wrap_with_jail_check(relative=False) - def url_cache_thumbnail_dirs_to_delete(self, media_id: str) -> List[str]: + def url_cache_thumbnail_dirs_to_delete(self, media_id: str) -> list[str]: "The dirs to try and remove if we delete the media_id thumbnails" # Media id is of the form # E.g.: 2017-09-28-fsdRDt24DS234dsf diff --git a/synapse/media/media_repository.py b/synapse/media/media_repository.py index 238dc6cb2f..eda1410767 100644 --- a/synapse/media/media_repository.py +++ b/synapse/media/media_repository.py @@ -24,7 +24,7 @@ import logging import os import shutil from io import BytesIO -from typing import IO, TYPE_CHECKING, Dict, List, Optional, Set, Tuple +from typing import IO, TYPE_CHECKING, Optional import attr from matrix_common.types.mxc_uri import MXCUri @@ -109,8 +109,8 @@ class MediaRepository: self.remote_media_linearizer = Linearizer(name="media_remote", clock=self.clock) - self.recently_accessed_remotes: Set[Tuple[str, str]] = set() - self.recently_accessed_locals: Set[str] = set() + self.recently_accessed_remotes: set[tuple[str, str]] = set() + self.recently_accessed_locals: set[str] = set() self.federation_domain_whitelist = ( hs.config.federation.federation_domain_whitelist @@ -221,7 +221,7 @@ class MediaRepository: self.recently_accessed_locals.add(media_id) @trace - async def create_media_id(self, auth_user: UserID) -> Tuple[str, int]: + async def create_media_id(self, auth_user: UserID) -> tuple[str, int]: """Create and store a media ID for a local user and return the MXC URI and its expiration. @@ -242,7 +242,7 @@ class MediaRepository: return f"mxc://{self.server_name}/{media_id}", now + self.unused_expiration_time @trace - async def reached_pending_media_limit(self, auth_user: UserID) -> Tuple[bool, int]: + async def reached_pending_media_limit(self, auth_user: UserID) -> tuple[bool, int]: """Check if the user is over the limit for pending media uploads. Args: @@ -696,7 +696,7 @@ class MediaRepository: ip_address: str, use_federation_endpoint: bool, allow_authenticated: bool, - ) -> Tuple[Optional[Responder], RemoteMedia]: + ) -> tuple[Optional[Responder], RemoteMedia]: """Looks for media in local cache, if not there then attempt to download from remote server. @@ -1052,7 +1052,7 @@ class MediaRepository: def _get_thumbnail_requirements( self, media_type: str - ) -> Tuple[ThumbnailRequirement, ...]: + ) -> tuple[ThumbnailRequirement, ...]: scpos = media_type.find(";") if scpos > 0: media_type = media_type[:scpos] @@ -1099,7 +1099,7 @@ class MediaRepository: t_method: str, t_type: str, url_cache: bool, - ) -> Optional[Tuple[str, FileInfo]]: + ) -> Optional[tuple[str, FileInfo]]: input_path = await self.media_storage.ensure_media_is_in_local_cache( FileInfo(None, media_id, url_cache=url_cache) ) @@ -1308,7 +1308,7 @@ class MediaRepository: # We deduplicate the thumbnail sizes by ignoring the cropped versions if # they have the same dimensions of a scaled one. - thumbnails: Dict[Tuple[int, int, str], str] = {} + thumbnails: dict[tuple[int, int, str], str] = {} for requirement in requirements: if requirement.method == "crop": thumbnails.setdefault( @@ -1461,7 +1461,7 @@ class MediaRepository: delete_protected_media=False, ) - async def delete_old_remote_media(self, before_ts: int) -> Dict[str, int]: + async def delete_old_remote_media(self, before_ts: int) -> dict[str, int]: old_media = await self.store.get_remote_media_ids( before_ts, include_quarantined_media=False ) @@ -1497,8 +1497,8 @@ class MediaRepository: return {"deleted": deleted} async def delete_local_media_ids( - self, media_ids: List[str] - ) -> Tuple[List[str], int]: + self, media_ids: list[str] + ) -> tuple[list[str], int]: """ Delete the given local or remote media ID from this server @@ -1516,7 +1516,7 @@ class MediaRepository: keep_profiles: bool = True, delete_quarantined_media: bool = False, delete_protected_media: bool = False, - ) -> Tuple[List[str], int]: + ) -> tuple[list[str], int]: """ Delete local or remote media from this server by size and timestamp. Removes media files, any thumbnails and cached URLs. @@ -1543,8 +1543,8 @@ class MediaRepository: return await self._remove_local_media_from_disk(old_media) async def _remove_local_media_from_disk( - self, media_ids: List[str] - ) -> Tuple[List[str], int]: + self, media_ids: list[str] + ) -> tuple[list[str], int]: """ Delete local or remote media from this server. Removes media files, any thumbnails and cached URLs. diff --git a/synapse/media/media_storage.py b/synapse/media/media_storage.py index 99d002a8df..f6be9edf50 100644 --- a/synapse/media/media_storage.py +++ b/synapse/media/media_storage.py @@ -34,11 +34,8 @@ from typing import ( AsyncIterator, BinaryIO, Callable, - List, Optional, Sequence, - Tuple, - Type, Union, cast, ) @@ -205,7 +202,7 @@ class MediaStorage: @contextlib.asynccontextmanager async def store_into_file( self, file_info: FileInfo - ) -> AsyncIterator[Tuple[BinaryIO, str]]: + ) -> AsyncIterator[tuple[BinaryIO, str]]: """Async Context manager used to get a file like object to write into, as described by file_info. @@ -423,7 +420,7 @@ class FileResponder(Responder): def __exit__( self, - exc_type: Optional[Type[BaseException]], + exc_type: Optional[type[BaseException]], exc_val: Optional[BaseException], exc_tb: Optional[TracebackType], ) -> None: @@ -674,7 +671,7 @@ class Header: self, name: bytes, value: Any, - params: Optional[List[Tuple[Any, Any]]] = None, + params: Optional[list[tuple[Any, Any]]] = None, ): self.name = name self.value = value diff --git a/synapse/media/oembed.py b/synapse/media/oembed.py index 45b481f229..059d8ad1cf 100644 --- a/synapse/media/oembed.py +++ b/synapse/media/oembed.py @@ -21,7 +21,7 @@ import html import logging import urllib.parse -from typing import TYPE_CHECKING, List, Optional, cast +from typing import TYPE_CHECKING, Optional, cast import attr @@ -118,7 +118,7 @@ class OEmbedProvider: # Search for link elements with the proper rel and type attributes. # Cast: the type returned by xpath depends on the xpath expression: mypy can't deduce this. for tag in cast( - List["etree._Element"], + list["etree._Element"], tree.xpath("//link[@rel='alternate'][@type='application/json+oembed']"), ): if "href" in tag.attrib: @@ -127,7 +127,7 @@ class OEmbedProvider: # Some providers (e.g. Flickr) use alternative instead of alternate. # Cast: the type returned by xpath depends on the xpath expression: mypy can't deduce this. for tag in cast( - List["etree._Element"], + list["etree._Element"], tree.xpath("//link[@rel='alternative'][@type='application/json+oembed']"), ): if "href" in tag.attrib: @@ -223,10 +223,10 @@ class OEmbedProvider: return OEmbedResult(open_graph_response, author_name, cache_age) -def _fetch_urls(tree: "etree._Element", tag_name: str) -> List[str]: +def _fetch_urls(tree: "etree._Element", tag_name: str) -> list[str]: results = [] # Cast: the type returned by xpath depends on the xpath expression: mypy can't deduce this. - for tag in cast(List["etree._Element"], tree.xpath("//*/" + tag_name)): + for tag in cast(list["etree._Element"], tree.xpath("//*/" + tag_name)): if "src" in tag.attrib: results.append(cast(str, tag.attrib["src"])) return results diff --git a/synapse/media/preview_html.py b/synapse/media/preview_html.py index 38ae126a23..6a8e479152 100644 --- a/synapse/media/preview_html.py +++ b/synapse/media/preview_html.py @@ -24,12 +24,9 @@ import re from typing import ( TYPE_CHECKING, Callable, - Dict, Generator, Iterable, - List, Optional, - Set, Union, cast, ) @@ -83,7 +80,7 @@ def _get_html_media_encodings( The character encoding of the body, as a string. """ # There's no point in returning an encoding more than once. - attempted_encodings: Set[str] = set() + attempted_encodings: set[str] = set() # Limit searches to the first 1kb, since it ought to be at the top. body_start = body[:1024] @@ -190,7 +187,7 @@ def _get_meta_tags( property: str, prefix: str, property_mapper: Optional[Callable[[str], Optional[str]]] = None, -) -> Dict[str, Optional[str]]: +) -> dict[str, Optional[str]]: """ Search for meta tags prefixed with a particular string. @@ -207,10 +204,10 @@ def _get_meta_tags( """ # This actually returns Dict[str, str], but the caller sets this as a variable # which is Dict[str, Optional[str]]. - results: Dict[str, Optional[str]] = {} + results: dict[str, Optional[str]] = {} # Cast: the type returned by xpath depends on the xpath expression: mypy can't deduce this. for tag in cast( - List["etree._Element"], + list["etree._Element"], tree.xpath( f"//*/meta[starts-with(@{property}, '{prefix}:')][@content][not(@content='')]" ), @@ -256,7 +253,7 @@ def _map_twitter_to_open_graph(key: str) -> Optional[str]: return "og" + key[7:] -def parse_html_to_open_graph(tree: "etree._Element") -> Dict[str, Optional[str]]: +def parse_html_to_open_graph(tree: "etree._Element") -> dict[str, Optional[str]]: """ Parse the HTML document into an Open Graph response. @@ -315,7 +312,7 @@ def parse_html_to_open_graph(tree: "etree._Element") -> Dict[str, Optional[str]] # Attempt to find a title from the title tag, or the biggest header on the page. # Cast: the type returned by xpath depends on the xpath expression: mypy can't deduce this. title = cast( - List["etree._ElementUnicodeResult"], + list["etree._ElementUnicodeResult"], tree.xpath("((//title)[1] | (//h1)[1] | (//h2)[1] | (//h3)[1])/text()"), ) if title: @@ -326,7 +323,7 @@ def parse_html_to_open_graph(tree: "etree._Element") -> Dict[str, Optional[str]] if "og:image" not in og: # Cast: the type returned by xpath depends on the xpath expression: mypy can't deduce this. meta_image = cast( - List["etree._ElementUnicodeResult"], + list["etree._ElementUnicodeResult"], tree.xpath( "//*/meta[translate(@itemprop, 'IMAGE', 'image')='image'][not(@content='')]/@content[1]" ), @@ -340,7 +337,7 @@ def parse_html_to_open_graph(tree: "etree._Element") -> Dict[str, Optional[str]] # # TODO: consider inlined CSS styles as well as width & height attribs images = cast( - List["etree._Element"], + list["etree._Element"], tree.xpath("//img[@src][number(@width)>10][number(@height)>10]"), ) images = sorted( @@ -352,7 +349,7 @@ def parse_html_to_open_graph(tree: "etree._Element") -> Dict[str, Optional[str]] # If no images were found, try to find *any* images. if not images: # Cast: the type returned by xpath depends on the xpath expression: mypy can't deduce this. - images = cast(List["etree._Element"], tree.xpath("//img[@src][1]")) + images = cast(list["etree._Element"], tree.xpath("//img[@src][1]")) if images: og["og:image"] = cast(str, images[0].attrib["src"]) @@ -360,7 +357,7 @@ def parse_html_to_open_graph(tree: "etree._Element") -> Dict[str, Optional[str]] else: # Cast: the type returned by xpath depends on the xpath expression: mypy can't deduce this. favicons = cast( - List["etree._ElementUnicodeResult"], + list["etree._ElementUnicodeResult"], tree.xpath("//link[@href][contains(@rel, 'icon')]/@href[1]"), ) if favicons: @@ -370,7 +367,7 @@ def parse_html_to_open_graph(tree: "etree._Element") -> Dict[str, Optional[str]] # Check the first meta description tag for content. # Cast: the type returned by xpath depends on the xpath expression: mypy can't deduce this. meta_description = cast( - List["etree._ElementUnicodeResult"], + list["etree._ElementUnicodeResult"], tree.xpath( "//*/meta[translate(@name, 'DESCRIPTION', 'description')='description'][not(@content='')]/@content[1]" ), @@ -443,7 +440,7 @@ def parse_html_description(tree: "etree._Element") -> Optional[str]: def _iterate_over_text( tree: Optional["etree._Element"], - tags_to_ignore: Set[object], + tags_to_ignore: set[object], stack_limit: int = 1024, ) -> Generator[str, None, None]: """Iterate over the tree returning text nodes in a depth first fashion, @@ -463,7 +460,7 @@ def _iterate_over_text( # This is a stack whose items are elements to iterate over *or* strings # to be returned. - elements: List[Union[str, "etree._Element"]] = [tree] + elements: list[Union[str, "etree._Element"]] = [tree] while elements: el = elements.pop() diff --git a/synapse/media/thumbnailer.py b/synapse/media/thumbnailer.py index 5d9afda322..cc2fe7318b 100644 --- a/synapse/media/thumbnailer.py +++ b/synapse/media/thumbnailer.py @@ -22,7 +22,7 @@ import logging from io import BytesIO from types import TracebackType -from typing import TYPE_CHECKING, List, Optional, Tuple, Type +from typing import TYPE_CHECKING, Optional from PIL import Image @@ -116,7 +116,7 @@ class Thumbnailer: logger.info("Error parsing image EXIF information: %s", e) @trace - def transpose(self) -> Tuple[int, int]: + def transpose(self) -> tuple[int, int]: """Transpose the image using its EXIF Orientation tag Returns: @@ -134,7 +134,7 @@ class Thumbnailer: self.image.info["exif"] = None return self.image.size - def aspect(self, max_width: int, max_height: int) -> Tuple[int, int]: + def aspect(self, max_width: int, max_height: int) -> tuple[int, int]: """Calculate the largest size that preserves aspect ratio which fits within the given rectangle:: @@ -246,7 +246,7 @@ class Thumbnailer: def __exit__( self, - type: Optional[Type[BaseException]], + type: Optional[type[BaseException]], value: Optional[BaseException], traceback: Optional[TracebackType], ) -> None: @@ -553,7 +553,7 @@ class ThumbnailProvider: desired_height: int, desired_method: str, desired_type: str, - thumbnail_infos: List[ThumbnailInfo], + thumbnail_infos: list[ThumbnailInfo], media_id: str, file_id: str, url_cache: bool, @@ -719,7 +719,7 @@ class ThumbnailProvider: desired_height: int, desired_method: str, desired_type: str, - thumbnail_infos: List[ThumbnailInfo], + thumbnail_infos: list[ThumbnailInfo], file_id: str, url_cache: bool, server_name: Optional[str], @@ -750,12 +750,12 @@ class ThumbnailProvider: if desired_method == "crop": # Thumbnails that match equal or larger sizes of desired width/height. - crop_info_list: List[ - Tuple[int, int, int, bool, Optional[int], ThumbnailInfo] + crop_info_list: list[ + tuple[int, int, int, bool, Optional[int], ThumbnailInfo] ] = [] # Other thumbnails. - crop_info_list2: List[ - Tuple[int, int, int, bool, Optional[int], ThumbnailInfo] + crop_info_list2: list[ + tuple[int, int, int, bool, Optional[int], ThumbnailInfo] ] = [] for info in thumbnail_infos: # Skip thumbnails generated with different methods. @@ -801,9 +801,9 @@ class ThumbnailProvider: thumbnail_info = min(crop_info_list2, key=lambda t: t[:-1])[-1] elif desired_method == "scale": # Thumbnails that match equal or larger sizes of desired width/height. - info_list: List[Tuple[int, bool, int, ThumbnailInfo]] = [] + info_list: list[tuple[int, bool, int, ThumbnailInfo]] = [] # Other thumbnails. - info_list2: List[Tuple[int, bool, int, ThumbnailInfo]] = [] + info_list2: list[tuple[int, bool, int, ThumbnailInfo]] = [] for info in thumbnail_infos: # Skip thumbnails generated with different methods. diff --git a/synapse/media/url_previewer.py b/synapse/media/url_previewer.py index 1a82cc46e3..2a63842fb7 100644 --- a/synapse/media/url_previewer.py +++ b/synapse/media/url_previewer.py @@ -28,7 +28,7 @@ import re import shutil import sys import traceback -from typing import TYPE_CHECKING, BinaryIO, Iterable, Optional, Tuple +from typing import TYPE_CHECKING, BinaryIO, Iterable, Optional from urllib.parse import urljoin, urlparse, urlsplit from urllib.request import urlopen @@ -705,7 +705,7 @@ class UrlPreviewer: async def _handle_oembed_response( self, url: str, media_info: MediaInfo, expiration_ms: int - ) -> Tuple[JsonDict, Optional[str], int]: + ) -> tuple[JsonDict, Optional[str], int]: """ Parse the downloaded oEmbed info. diff --git a/synapse/metrics/__init__.py b/synapse/metrics/__init__.py index 2ffb14070b..def21ac942 100644 --- a/synapse/metrics/__init__.py +++ b/synapse/metrics/__init__.py @@ -28,15 +28,11 @@ import threading from importlib import metadata from typing import ( Callable, - Dict, Generic, Iterable, Mapping, Optional, Sequence, - Set, - Tuple, - Type, TypeVar, Union, cast, @@ -161,10 +157,10 @@ class LaterGauge(Collector): name: str desc: str labelnames: Optional[StrSequence] = attr.ib(hash=False) - _instance_id_to_hook_map: Dict[ + _instance_id_to_hook_map: dict[ Optional[str], # instance_id Callable[ - [], Union[Mapping[Tuple[str, ...], Union[int, float]], Union[int, float]] + [], Union[Mapping[tuple[str, ...], Union[int, float]], Union[int, float]] ], ] = attr.ib(factory=dict, hash=False) """ @@ -206,7 +202,7 @@ class LaterGauge(Collector): *, homeserver_instance_id: Optional[str], hook: Callable[ - [], Union[Mapping[Tuple[str, ...], Union[int, float]], Union[int, float]] + [], Union[Mapping[tuple[str, ...], Union[int, float]], Union[int, float]] ], ) -> None: """ @@ -260,7 +256,7 @@ class LaterGauge(Collector): all_later_gauges_to_clean_up_on_shutdown[self.name] = self -all_later_gauges_to_clean_up_on_shutdown: Dict[str, LaterGauge] = {} +all_later_gauges_to_clean_up_on_shutdown: dict[str, LaterGauge] = {} """ Track all `LaterGauge` instances so we can remove any associated hooks during homeserver shutdown. @@ -302,15 +298,15 @@ class InFlightGauge(Generic[MetricsEntry], Collector): # Create a class which have the sub_metrics values as attributes, which # default to 0 on initialization. Used to pass to registered callbacks. - self._metrics_class: Type[MetricsEntry] = attr.make_class( + self._metrics_class: type[MetricsEntry] = attr.make_class( "_MetricsEntry", attrs={x: attr.ib(default=0) for x in sub_metrics}, slots=True, ) # Counts number of in flight blocks for a given set of label values - self._registrations: Dict[ - Tuple[str, ...], Set[Callable[[MetricsEntry], None]] + self._registrations: dict[ + tuple[str, ...], set[Callable[[MetricsEntry], None]] ] = {} # Protects access to _registrations @@ -320,7 +316,7 @@ class InFlightGauge(Generic[MetricsEntry], Collector): def register( self, - key: Tuple[str, ...], + key: tuple[str, ...], callback: Callable[[MetricsEntry], None], ) -> None: """Registers that we've entered a new block with labels `key`. @@ -349,7 +345,7 @@ class InFlightGauge(Generic[MetricsEntry], Collector): def unregister( self, - key: Tuple[str, ...], + key: tuple[str, ...], callback: Callable[[MetricsEntry], None], ) -> None: """ @@ -424,7 +420,7 @@ class GaugeHistogramMetricFamilyWithLabels(GaugeHistogramMetricFamily): name: str, documentation: str, gsum_value: float, - buckets: Optional[Sequence[Tuple[str, float]]] = None, + buckets: Optional[Sequence[tuple[str, float]]] = None, labelnames: StrSequence = (), labelvalues: StrSequence = (), unit: str = "", diff --git a/synapse/metrics/background_process_metrics.py b/synapse/metrics/background_process_metrics.py index 05e84038ac..c871598680 100644 --- a/synapse/metrics/background_process_metrics.py +++ b/synapse/metrics/background_process_metrics.py @@ -29,13 +29,10 @@ from typing import ( Awaitable, Callable, ContextManager, - Dict, Generator, Iterable, Optional, Protocol, - Set, - Type, TypeVar, Union, ) @@ -134,7 +131,7 @@ _background_process_db_sched_duration = Counter( # map from description to a counter, so that we can name our logcontexts # incrementally. (It actually duplicates _background_process_start_count, but # it's much simpler to do so than to try to combine them.) -_background_process_counts: Dict[str, int] = {} +_background_process_counts: dict[str, int] = {} # Set of all running background processes that became active active since the # last time metrics were scraped (i.e. background processes that performed some @@ -144,7 +141,7 @@ _background_process_counts: Dict[str, int] = {} # background processes stacking up behind a lock or linearizer, where we then # only need to iterate over and update metrics for the process that have # actually been active and can ignore the idle ones. -_background_processes_active_since_last_scrape: "Set[_BackgroundProcess]" = set() +_background_processes_active_since_last_scrape: "set[_BackgroundProcess]" = set() # A lock that covers the above set and dict _bg_metrics_lock = threading.Lock() @@ -531,7 +528,7 @@ class BackgroundProcessLoggingContext(LoggingContext): def __exit__( self, - type: Optional[Type[BaseException]], + type: Optional[type[BaseException]], value: Optional[BaseException], traceback: Optional[TracebackType], ) -> None: diff --git a/synapse/module_api/__init__.py b/synapse/module_api/__init__.py index ea0887966a..9287747cea 100644 --- a/synapse/module_api/__init__.py +++ b/synapse/module_api/__init__.py @@ -26,13 +26,10 @@ from typing import ( Awaitable, Callable, Collection, - Dict, Generator, Iterable, - List, Mapping, Optional, - Tuple, TypeVar, Union, ) @@ -559,7 +556,7 @@ class ModuleApi: check_3pid_auth: Optional[CHECK_3PID_AUTH_CALLBACK] = None, on_logged_out: Optional[ON_LOGGED_OUT_CALLBACK] = None, auth_checkers: Optional[ - Dict[Tuple[str, Tuple[str, ...]], CHECK_AUTH_CALLBACK] + dict[tuple[str, tuple[str, ...]], CHECK_AUTH_CALLBACK] ] = None, is_3pid_allowed: Optional[IS_3PID_ALLOWED_CALLBACK] = None, get_username_for_registration: Optional[ @@ -829,7 +826,7 @@ class ModuleApi: user_id = UserID.from_string(f"@{localpart}:{server_name}") return await self._store.get_profileinfo(user_id) - async def get_threepids_for_user(self, user_id: str) -> List[Dict[str, str]]: + async def get_threepids_for_user(self, user_id: str) -> list[dict[str, str]]: """Look up the threepids (email addresses and phone numbers) associated with the given Matrix user ID. @@ -865,8 +862,8 @@ class ModuleApi: self, localpart: str, displayname: Optional[str] = None, - emails: Optional[List[str]] = None, - ) -> Generator["defer.Deferred[Any]", Any, Tuple[str, str]]: + emails: Optional[list[str]] = None, + ) -> Generator["defer.Deferred[Any]", Any, tuple[str, str]]: """Registers a new user with given localpart and optional displayname, emails. Also returns an access token for the new user. @@ -896,7 +893,7 @@ class ModuleApi: self, localpart: str, displayname: Optional[str] = None, - emails: Optional[List[str]] = None, + emails: Optional[list[str]] = None, admin: bool = False, ) -> "defer.Deferred[str]": """Registers a new user with given localpart and optional displayname, emails. @@ -931,7 +928,7 @@ class ModuleApi: user_id: str, device_id: Optional[str] = None, initial_display_name: Optional[str] = None, - ) -> "defer.Deferred[Tuple[str, str, Optional[int], Optional[str]]]": + ) -> "defer.Deferred[tuple[str, str, Optional[int], Optional[str]]]": """Register a device for a user and generate an access token. Added in Synapse v1.2.0. @@ -1085,7 +1082,7 @@ class ModuleApi: ) async def invalidate_cache( - self, cached_func: CachedFunction, keys: Tuple[Any, ...] + self, cached_func: CachedFunction, keys: tuple[Any, ...] ) -> None: """Invalidate a cache entry of a cached function across workers. The cached function needs to be registered on all workers first with `register_cached_function`. @@ -1138,7 +1135,7 @@ class ModuleApi: @defer.inlineCallbacks def get_state_events_in_room( - self, room_id: str, types: Iterable[Tuple[str, Optional[str]]] + self, room_id: str, types: Iterable[tuple[str, Optional[str]]] ) -> Generator[defer.Deferred, Any, Iterable[EventBase]]: """Gets current state events for the given room. @@ -1170,7 +1167,7 @@ class ModuleApi: room_id: str, new_membership: str, content: Optional[JsonDict] = None, - remote_room_hosts: Optional[List[str]] = None, + remote_room_hosts: Optional[list[str]] = None, ) -> EventBase: """Updates the membership of a user to the given value. @@ -1346,7 +1343,7 @@ class ModuleApi: ) async def set_presence_for_users( - self, users: Mapping[str, Tuple[str, Optional[str]]] + self, users: Mapping[str, tuple[str, Optional[str]]] ) -> None: """ Update the internal presence state of users. @@ -1490,7 +1487,7 @@ class ModuleApi: content: JsonDict, tweaks: Optional[JsonMapping] = None, default_payload: Optional[JsonMapping] = None, - ) -> Dict[str, bool]: + ) -> dict[str, bool]: """Send an HTTP push notification that is forwarded to the registered push gateway for the specified user/device. @@ -1554,9 +1551,9 @@ class ModuleApi: def read_templates( self, - filenames: List[str], + filenames: list[str], custom_template_directory: Optional[str] = None, - ) -> List[jinja2.Template]: + ) -> list[jinja2.Template]: """Read and load the content of the template files at the given location. By default, Synapse will look for these templates in its configured template directory, but another directory to search in can be provided. @@ -1595,7 +1592,7 @@ class ModuleApi: async def get_user_ip_and_agents( self, user_id: str, since_ts: int = 0 - ) -> List[UserIpAndAgent]: + ) -> list[UserIpAndAgent]: """ Return the list of user IPs and agents for a user. @@ -1638,7 +1635,7 @@ class ModuleApi: async def get_room_state( self, room_id: str, - event_filter: Optional[Iterable[Tuple[str, Optional[str]]]] = None, + event_filter: Optional[Iterable[tuple[str, Optional[str]]]] = None, ) -> StateMap[EventBase]: """Returns the current state of the given room. @@ -1803,7 +1800,7 @@ class ModuleApi: await self._store.add_user_bound_threepid(user_id, medium, address, id_server) def check_push_rule_actions( - self, actions: List[Union[str, Dict[str, str]]] + self, actions: list[Union[str, dict[str, str]]] ) -> None: """Checks if the given push rule actions are valid according to the Matrix specification. @@ -1827,7 +1824,7 @@ class ModuleApi: scope: str, kind: str, rule_id: str, - actions: List[Union[str, Dict[str, str]]], + actions: list[Union[str, dict[str, str]]], ) -> None: """Changes the actions of an existing push rule for the given user. @@ -1866,7 +1863,7 @@ class ModuleApi: async def get_monthly_active_users_by_service( self, start_timestamp: Optional[int] = None, end_timestamp: Optional[int] = None - ) -> List[Tuple[str, str]]: + ) -> list[tuple[str, str]]: """Generates list of monthly active users and their services. Please see corresponding storage docstring for more details. @@ -1912,7 +1909,7 @@ class ModuleApi: return RoomAlias.from_string(room_alias_str) return None - async def lookup_room_alias(self, room_alias: str) -> Tuple[str, List[str]]: + async def lookup_room_alias(self, room_alias: str) -> tuple[str, list[str]]: """ Get the room ID associated with a room alias. @@ -1942,7 +1939,7 @@ class ModuleApi: config: JsonDict, ratelimit: bool = True, creator_join_profile: Optional[JsonDict] = None, - ) -> Tuple[str, Optional[str]]: + ) -> tuple[str, Optional[str]]: """Creates a new room. Added in Synapse v1.65.0. diff --git a/synapse/module_api/callbacks/account_validity_callbacks.py b/synapse/module_api/callbacks/account_validity_callbacks.py index a989249280..da01414d9a 100644 --- a/synapse/module_api/callbacks/account_validity_callbacks.py +++ b/synapse/module_api/callbacks/account_validity_callbacks.py @@ -20,7 +20,7 @@ # import logging -from typing import Awaitable, Callable, List, Optional, Tuple +from typing import Awaitable, Callable, Optional from twisted.web.http import Request @@ -33,15 +33,15 @@ ON_USER_LOGIN_CALLBACK = Callable[[str, Optional[str], Optional[str]], Awaitable # Temporary hooks to allow for a transition from `/_matrix/client` endpoints # to `/_synapse/client/account_validity`. See `register_callbacks` below. ON_LEGACY_SEND_MAIL_CALLBACK = Callable[[str], Awaitable] -ON_LEGACY_RENEW_CALLBACK = Callable[[str], Awaitable[Tuple[bool, bool, int]]] +ON_LEGACY_RENEW_CALLBACK = Callable[[str], Awaitable[tuple[bool, bool, int]]] ON_LEGACY_ADMIN_REQUEST = Callable[[Request], Awaitable] class AccountValidityModuleApiCallbacks: def __init__(self) -> None: - self.is_user_expired_callbacks: List[IS_USER_EXPIRED_CALLBACK] = [] - self.on_user_registration_callbacks: List[ON_USER_REGISTRATION_CALLBACK] = [] - self.on_user_login_callbacks: List[ON_USER_LOGIN_CALLBACK] = [] + self.is_user_expired_callbacks: list[IS_USER_EXPIRED_CALLBACK] = [] + self.on_user_registration_callbacks: list[ON_USER_REGISTRATION_CALLBACK] = [] + self.on_user_login_callbacks: list[ON_USER_LOGIN_CALLBACK] = [] self.on_legacy_send_mail_callback: Optional[ON_LEGACY_SEND_MAIL_CALLBACK] = None self.on_legacy_renew_callback: Optional[ON_LEGACY_RENEW_CALLBACK] = None diff --git a/synapse/module_api/callbacks/media_repository_callbacks.py b/synapse/module_api/callbacks/media_repository_callbacks.py index 7d3aed9d66..7cb56e558b 100644 --- a/synapse/module_api/callbacks/media_repository_callbacks.py +++ b/synapse/module_api/callbacks/media_repository_callbacks.py @@ -13,7 +13,7 @@ # import logging -from typing import TYPE_CHECKING, Awaitable, Callable, List, Optional +from typing import TYPE_CHECKING, Awaitable, Callable, Optional from synapse.config.repository import MediaUploadLimit from synapse.types import JsonDict @@ -30,7 +30,7 @@ GET_MEDIA_CONFIG_FOR_USER_CALLBACK = Callable[[str], Awaitable[Optional[JsonDict IS_USER_ALLOWED_TO_UPLOAD_MEDIA_OF_SIZE_CALLBACK = Callable[[str, int], Awaitable[bool]] GET_MEDIA_UPLOAD_LIMITS_FOR_USER_CALLBACK = Callable[ - [str], Awaitable[Optional[List[MediaUploadLimit]]] + [str], Awaitable[Optional[list[MediaUploadLimit]]] ] ON_MEDIA_UPLOAD_LIMIT_EXCEEDED_CALLBACK = Callable[ @@ -42,16 +42,16 @@ class MediaRepositoryModuleApiCallbacks: def __init__(self, hs: "HomeServer") -> None: self.server_name = hs.hostname self.clock = hs.get_clock() - self._get_media_config_for_user_callbacks: List[ + self._get_media_config_for_user_callbacks: list[ GET_MEDIA_CONFIG_FOR_USER_CALLBACK ] = [] - self._is_user_allowed_to_upload_media_of_size_callbacks: List[ + self._is_user_allowed_to_upload_media_of_size_callbacks: list[ IS_USER_ALLOWED_TO_UPLOAD_MEDIA_OF_SIZE_CALLBACK ] = [] - self._get_media_upload_limits_for_user_callbacks: List[ + self._get_media_upload_limits_for_user_callbacks: list[ GET_MEDIA_UPLOAD_LIMITS_FOR_USER_CALLBACK ] = [] - self._on_media_upload_limit_exceeded_callbacks: List[ + self._on_media_upload_limit_exceeded_callbacks: list[ ON_MEDIA_UPLOAD_LIMIT_EXCEEDED_CALLBACK ] = [] @@ -117,7 +117,7 @@ class MediaRepositoryModuleApiCallbacks: async def get_media_upload_limits_for_user( self, user_id: str - ) -> Optional[List[MediaUploadLimit]]: + ) -> Optional[list[MediaUploadLimit]]: """ Get the first non-None list of MediaUploadLimits for the user from the registered callbacks. If a list is returned it will be sorted in descending order of duration. @@ -128,7 +128,7 @@ class MediaRepositoryModuleApiCallbacks: name=f"{callback.__module__}.{callback.__qualname__}", server_name=self.server_name, ): - res: Optional[List[MediaUploadLimit]] = await delay_cancellation( + res: Optional[list[MediaUploadLimit]] = await delay_cancellation( callback(user_id) ) if res is not None: # to allow [] to be returned meaning no limit diff --git a/synapse/module_api/callbacks/ratelimit_callbacks.py b/synapse/module_api/callbacks/ratelimit_callbacks.py index a580ea7d7c..6afcda1216 100644 --- a/synapse/module_api/callbacks/ratelimit_callbacks.py +++ b/synapse/module_api/callbacks/ratelimit_callbacks.py @@ -13,7 +13,7 @@ # import logging -from typing import TYPE_CHECKING, Awaitable, Callable, List, Optional +from typing import TYPE_CHECKING, Awaitable, Callable, Optional import attr @@ -45,7 +45,7 @@ class RatelimitModuleApiCallbacks: def __init__(self, hs: "HomeServer") -> None: self.server_name = hs.hostname self.clock = hs.get_clock() - self._get_ratelimit_override_for_user_callbacks: List[ + self._get_ratelimit_override_for_user_callbacks: list[ GET_RATELIMIT_OVERRIDE_FOR_USER_CALLBACK ] = [] diff --git a/synapse/module_api/callbacks/spamchecker_callbacks.py b/synapse/module_api/callbacks/spamchecker_callbacks.py index 428e733979..4c331c4210 100644 --- a/synapse/module_api/callbacks/spamchecker_callbacks.py +++ b/synapse/module_api/callbacks/spamchecker_callbacks.py @@ -29,10 +29,8 @@ from typing import ( Awaitable, Callable, Collection, - List, Literal, Optional, - Tuple, Union, cast, ) @@ -63,7 +61,7 @@ CHECK_EVENT_FOR_SPAM_CALLBACK = Callable[ # disappear without warning depending on the results of ongoing # experiments. # Use this to return additional information as part of an error. - Tuple[Codes, JsonDict], + tuple[Codes, JsonDict], # Deprecated bool, ] @@ -83,7 +81,7 @@ USER_MAY_JOIN_ROOM_CALLBACK = Callable[ # disappear without warning depending on the results of ongoing # experiments. # Use this to return additional information as part of an error. - Tuple[Codes, JsonDict], + tuple[Codes, JsonDict], # Deprecated bool, ] @@ -99,7 +97,7 @@ USER_MAY_INVITE_CALLBACK = Callable[ # disappear without warning depending on the results of ongoing # experiments. # Use this to return additional information as part of an error. - Tuple[Codes, JsonDict], + tuple[Codes, JsonDict], # Deprecated bool, ] @@ -115,7 +113,7 @@ FEDERATED_USER_MAY_INVITE_CALLBACK = Callable[ # disappear without warning depending on the results of ongoing # experiments. # Use this to return additional information as part of an error. - Tuple[Codes, JsonDict], + tuple[Codes, JsonDict], # Deprecated bool, ] @@ -131,7 +129,7 @@ USER_MAY_SEND_3PID_INVITE_CALLBACK = Callable[ # disappear without warning depending on the results of ongoing # experiments. # Use this to return additional information as part of an error. - Tuple[Codes, JsonDict], + tuple[Codes, JsonDict], # Deprecated bool, ] @@ -144,7 +142,7 @@ USER_MAY_CREATE_ROOM_CALLBACK_RETURN_VALUE = Union[ # disappear without warning depending on the results of ongoing # experiments. # Use this to return additional information as part of an error. - Tuple[Codes, JsonDict], + tuple[Codes, JsonDict], # Deprecated bool, ] @@ -167,7 +165,7 @@ USER_MAY_CREATE_ROOM_ALIAS_CALLBACK = Callable[ # disappear without warning depending on the results of ongoing # experiments. # Use this to return additional information as part of an error. - Tuple[Codes, JsonDict], + tuple[Codes, JsonDict], # Deprecated bool, ] @@ -183,7 +181,7 @@ USER_MAY_PUBLISH_ROOM_CALLBACK = Callable[ # disappear without warning depending on the results of ongoing # experiments. # Use this to return additional information as part of an error. - Tuple[Codes, JsonDict], + tuple[Codes, JsonDict], # Deprecated bool, ] @@ -199,7 +197,7 @@ USER_MAY_SEND_STATE_EVENT_CALLBACK = Callable[ # disappear without warning depending on the results of ongoing # experiments. # Use this to return additional information as part of an error. - Tuple[Codes, JsonDict], + tuple[Codes, JsonDict], ] ], ] @@ -211,7 +209,7 @@ LEGACY_CHECK_REGISTRATION_FOR_SPAM_CALLBACK = Callable[ [ Optional[dict], Optional[str], - Collection[Tuple[str, str]], + Collection[tuple[str, str]], ], Awaitable[RegistrationBehaviour], ] @@ -219,7 +217,7 @@ CHECK_REGISTRATION_FOR_SPAM_CALLBACK = Callable[ [ Optional[dict], Optional[str], - Collection[Tuple[str, str]], + Collection[tuple[str, str]], Optional[str], ], Awaitable[RegistrationBehaviour], @@ -234,7 +232,7 @@ CHECK_MEDIA_FILE_FOR_SPAM_CALLBACK = Callable[ # disappear without warning depending on the results of ongoing # experiments. # Use this to return additional information as part of an error. - Tuple[Codes, JsonDict], + tuple[Codes, JsonDict], # Deprecated bool, ] @@ -245,7 +243,7 @@ CHECK_LOGIN_FOR_SPAM_CALLBACK = Callable[ str, Optional[str], Optional[str], - Collection[Tuple[Optional[str], str]], + Collection[tuple[Optional[str], str]], Optional[str], ], Awaitable[ @@ -256,7 +254,7 @@ CHECK_LOGIN_FOR_SPAM_CALLBACK = Callable[ # disappear without warning depending on the results of ongoing # experiments. # Use this to return additional information as part of an error. - Tuple[Codes, JsonDict], + tuple[Codes, JsonDict], ] ], ] @@ -266,7 +264,7 @@ def load_legacy_spam_checkers(hs: "synapse.server.HomeServer") -> None: """Wrapper that loads spam checkers configured using the old configuration, and registers the spam checker hooks they implement. """ - spam_checkers: List[Any] = [] + spam_checkers: list[Any] = [] api = hs.get_module_api() for module, config in hs.config.spamchecker.spam_checkers: # Older spam checkers don't accept the `api` argument, so we @@ -312,7 +310,7 @@ def load_legacy_spam_checkers(hs: "synapse.server.HomeServer") -> None: def wrapper( email_threepid: Optional[dict], username: Optional[str], - request_info: Collection[Tuple[str, str]], + request_info: Collection[tuple[str, str]], auth_provider_id: Optional[str], ) -> Union[Awaitable[RegistrationBehaviour], RegistrationBehaviour]: # Assertion required because mypy can't prove we won't @@ -359,36 +357,36 @@ class SpamCheckerModuleApiCallbacks: self.server_name = hs.hostname self.clock = hs.get_clock() - self._check_event_for_spam_callbacks: List[CHECK_EVENT_FOR_SPAM_CALLBACK] = [] - self._should_drop_federated_event_callbacks: List[ + self._check_event_for_spam_callbacks: list[CHECK_EVENT_FOR_SPAM_CALLBACK] = [] + self._should_drop_federated_event_callbacks: list[ SHOULD_DROP_FEDERATED_EVENT_CALLBACK ] = [] - self._user_may_join_room_callbacks: List[USER_MAY_JOIN_ROOM_CALLBACK] = [] - self._user_may_invite_callbacks: List[USER_MAY_INVITE_CALLBACK] = [] - self._federated_user_may_invite_callbacks: List[ + self._user_may_join_room_callbacks: list[USER_MAY_JOIN_ROOM_CALLBACK] = [] + self._user_may_invite_callbacks: list[USER_MAY_INVITE_CALLBACK] = [] + self._federated_user_may_invite_callbacks: list[ FEDERATED_USER_MAY_INVITE_CALLBACK ] = [] - self._user_may_send_3pid_invite_callbacks: List[ + self._user_may_send_3pid_invite_callbacks: list[ USER_MAY_SEND_3PID_INVITE_CALLBACK ] = [] - self._user_may_create_room_callbacks: List[USER_MAY_CREATE_ROOM_CALLBACK] = [] - self._user_may_send_state_event_callbacks: List[ + self._user_may_create_room_callbacks: list[USER_MAY_CREATE_ROOM_CALLBACK] = [] + self._user_may_send_state_event_callbacks: list[ USER_MAY_SEND_STATE_EVENT_CALLBACK ] = [] - self._user_may_create_room_alias_callbacks: List[ + self._user_may_create_room_alias_callbacks: list[ USER_MAY_CREATE_ROOM_ALIAS_CALLBACK ] = [] - self._user_may_publish_room_callbacks: List[USER_MAY_PUBLISH_ROOM_CALLBACK] = [] - self._check_username_for_spam_callbacks: List[ + self._user_may_publish_room_callbacks: list[USER_MAY_PUBLISH_ROOM_CALLBACK] = [] + self._check_username_for_spam_callbacks: list[ CHECK_USERNAME_FOR_SPAM_CALLBACK ] = [] - self._check_registration_for_spam_callbacks: List[ + self._check_registration_for_spam_callbacks: list[ CHECK_REGISTRATION_FOR_SPAM_CALLBACK ] = [] - self._check_media_file_for_spam_callbacks: List[ + self._check_media_file_for_spam_callbacks: list[ CHECK_MEDIA_FILE_FOR_SPAM_CALLBACK ] = [] - self._check_login_for_spam_callbacks: List[CHECK_LOGIN_FOR_SPAM_CALLBACK] = [] + self._check_login_for_spam_callbacks: list[CHECK_LOGIN_FOR_SPAM_CALLBACK] = [] def register_callbacks( self, @@ -471,7 +469,7 @@ class SpamCheckerModuleApiCallbacks: @trace async def check_event_for_spam( self, event: "synapse.events.EventBase" - ) -> Union[Tuple[Codes, JsonDict], str]: + ) -> Union[tuple[Codes, JsonDict], str]: """Checks if a given event is considered "spammy" by this server. If the server considers an event spammy, then it will be rejected if @@ -561,7 +559,7 @@ class SpamCheckerModuleApiCallbacks: async def user_may_join_room( self, user_id: str, room_id: str, is_invited: bool - ) -> Union[Tuple[Codes, JsonDict], Literal["NOT_SPAM"]]: + ) -> Union[tuple[Codes, JsonDict], Literal["NOT_SPAM"]]: """Checks if a given users is allowed to join a room. Not called when a user creates a room. @@ -605,7 +603,7 @@ class SpamCheckerModuleApiCallbacks: async def user_may_invite( self, inviter_userid: str, invitee_userid: str, room_id: str - ) -> Union[Tuple[Codes, dict], Literal["NOT_SPAM"]]: + ) -> Union[tuple[Codes, dict], Literal["NOT_SPAM"]]: """Checks if a given user may send an invite Args: @@ -650,7 +648,7 @@ class SpamCheckerModuleApiCallbacks: async def federated_user_may_invite( self, event: "synapse.events.EventBase" - ) -> Union[Tuple[Codes, dict], Literal["NOT_SPAM"]]: + ) -> Union[tuple[Codes, dict], Literal["NOT_SPAM"]]: """Checks if a given user may send an invite Args: @@ -691,7 +689,7 @@ class SpamCheckerModuleApiCallbacks: async def user_may_send_3pid_invite( self, inviter_userid: str, medium: str, address: str, room_id: str - ) -> Union[Tuple[Codes, dict], Literal["NOT_SPAM"]]: + ) -> Union[tuple[Codes, dict], Literal["NOT_SPAM"]]: """Checks if a given user may invite a given threepid into the room Note that if the threepid is already associated with a Matrix user ID, Synapse @@ -739,7 +737,7 @@ class SpamCheckerModuleApiCallbacks: async def user_may_create_room( self, userid: str, room_config: JsonDict - ) -> Union[Tuple[Codes, dict], Literal["NOT_SPAM"]]: + ) -> Union[tuple[Codes, dict], Literal["NOT_SPAM"]]: """Checks if a given user may create a room Args: @@ -805,7 +803,7 @@ class SpamCheckerModuleApiCallbacks: event_type: str, state_key: str, content: JsonDict, - ) -> Union[Tuple[Codes, dict], Literal["NOT_SPAM"]]: + ) -> Union[tuple[Codes, dict], Literal["NOT_SPAM"]]: """Checks if a given user may create a room with a given visibility Args: user_id: The ID of the user attempting to create a room @@ -838,7 +836,7 @@ class SpamCheckerModuleApiCallbacks: async def user_may_create_room_alias( self, userid: str, room_alias: RoomAlias - ) -> Union[Tuple[Codes, dict], Literal["NOT_SPAM"]]: + ) -> Union[tuple[Codes, dict], Literal["NOT_SPAM"]]: """Checks if a given user may create a room alias Args: @@ -876,7 +874,7 @@ class SpamCheckerModuleApiCallbacks: async def user_may_publish_room( self, userid: str, room_id: str - ) -> Union[Tuple[Codes, dict], Literal["NOT_SPAM"]]: + ) -> Union[tuple[Codes, dict], Literal["NOT_SPAM"]]: """Checks if a given user may publish a room to the directory Args: @@ -964,7 +962,7 @@ class SpamCheckerModuleApiCallbacks: self, email_threepid: Optional[dict], username: Optional[str], - request_info: Collection[Tuple[str, str]], + request_info: Collection[tuple[str, str]], auth_provider_id: Optional[str] = None, ) -> RegistrationBehaviour: """Checks if we should allow the given registration request. @@ -1000,7 +998,7 @@ class SpamCheckerModuleApiCallbacks: @trace async def check_media_file_for_spam( self, file_wrapper: ReadableFileWrapper, file_info: FileInfo - ) -> Union[Tuple[Codes, dict], Literal["NOT_SPAM"]]: + ) -> Union[tuple[Codes, dict], Literal["NOT_SPAM"]]: """Checks if a piece of newly uploaded media should be blocked. This will be called for local uploads, downloads of remote media, each @@ -1062,9 +1060,9 @@ class SpamCheckerModuleApiCallbacks: user_id: str, device_id: Optional[str], initial_display_name: Optional[str], - request_info: Collection[Tuple[Optional[str], str]], + request_info: Collection[tuple[Optional[str], str]], auth_provider_id: Optional[str] = None, - ) -> Union[Tuple[Codes, dict], Literal["NOT_SPAM"]]: + ) -> Union[tuple[Codes, dict], Literal["NOT_SPAM"]]: """Checks if we should allow the given registration request. Args: diff --git a/synapse/module_api/callbacks/third_party_event_rules_callbacks.py b/synapse/module_api/callbacks/third_party_event_rules_callbacks.py index 9f7a04372d..2b886cbabb 100644 --- a/synapse/module_api/callbacks/third_party_event_rules_callbacks.py +++ b/synapse/module_api/callbacks/third_party_event_rules_callbacks.py @@ -19,7 +19,7 @@ # # import logging -from typing import TYPE_CHECKING, Any, Awaitable, Callable, List, Optional, Tuple +from typing import TYPE_CHECKING, Any, Awaitable, Callable, Optional from twisted.internet.defer import CancelledError @@ -37,7 +37,7 @@ logger = logging.getLogger(__name__) CHECK_EVENT_ALLOWED_CALLBACK = Callable[ - [EventBase, StateMap[EventBase]], Awaitable[Tuple[bool, Optional[dict]]] + [EventBase, StateMap[EventBase]], Awaitable[tuple[bool, Optional[dict]]] ] ON_CREATE_ROOM_CALLBACK = Callable[[Requester, dict, bool], Awaitable] CHECK_THREEPID_CAN_BE_INVITED_CALLBACK = Callable[ @@ -93,7 +93,7 @@ def load_legacy_third_party_event_rules(hs: "HomeServer") -> None: async def wrap_check_event_allowed( event: EventBase, state_events: StateMap[EventBase], - ) -> Tuple[bool, Optional[dict]]: + ) -> tuple[bool, Optional[dict]]: # Assertion required because mypy can't prove we won't change # `f` back to `None`. See # https://mypy.readthedocs.io/en/latest/common_issues.html#narrowing-and-inner-functions @@ -159,30 +159,30 @@ class ThirdPartyEventRulesModuleApiCallbacks: self.store = hs.get_datastores().main self._storage_controllers = hs.get_storage_controllers() - self._check_event_allowed_callbacks: List[CHECK_EVENT_ALLOWED_CALLBACK] = [] - self._on_create_room_callbacks: List[ON_CREATE_ROOM_CALLBACK] = [] - self._check_threepid_can_be_invited_callbacks: List[ + self._check_event_allowed_callbacks: list[CHECK_EVENT_ALLOWED_CALLBACK] = [] + self._on_create_room_callbacks: list[ON_CREATE_ROOM_CALLBACK] = [] + self._check_threepid_can_be_invited_callbacks: list[ CHECK_THREEPID_CAN_BE_INVITED_CALLBACK ] = [] - self._check_visibility_can_be_modified_callbacks: List[ + self._check_visibility_can_be_modified_callbacks: list[ CHECK_VISIBILITY_CAN_BE_MODIFIED_CALLBACK ] = [] - self._on_new_event_callbacks: List[ON_NEW_EVENT_CALLBACK] = [] - self._check_can_shutdown_room_callbacks: List[ + self._on_new_event_callbacks: list[ON_NEW_EVENT_CALLBACK] = [] + self._check_can_shutdown_room_callbacks: list[ CHECK_CAN_SHUTDOWN_ROOM_CALLBACK ] = [] - self._check_can_deactivate_user_callbacks: List[ + self._check_can_deactivate_user_callbacks: list[ CHECK_CAN_DEACTIVATE_USER_CALLBACK ] = [] - self._on_profile_update_callbacks: List[ON_PROFILE_UPDATE_CALLBACK] = [] - self._on_user_deactivation_status_changed_callbacks: List[ + self._on_profile_update_callbacks: list[ON_PROFILE_UPDATE_CALLBACK] = [] + self._on_user_deactivation_status_changed_callbacks: list[ ON_USER_DEACTIVATION_STATUS_CHANGED_CALLBACK ] = [] - self._on_threepid_bind_callbacks: List[ON_THREEPID_BIND_CALLBACK] = [] - self._on_add_user_third_party_identifier_callbacks: List[ + self._on_threepid_bind_callbacks: list[ON_THREEPID_BIND_CALLBACK] = [] + self._on_add_user_third_party_identifier_callbacks: list[ ON_ADD_USER_THIRD_PARTY_IDENTIFIER_CALLBACK ] = [] - self._on_remove_user_third_party_identifier_callbacks: List[ + self._on_remove_user_third_party_identifier_callbacks: list[ ON_REMOVE_USER_THIRD_PARTY_IDENTIFIER_CALLBACK ] = [] @@ -261,7 +261,7 @@ class ThirdPartyEventRulesModuleApiCallbacks: self, event: EventBase, context: UnpersistedEventContextBase, - ) -> Tuple[bool, Optional[dict]]: + ) -> tuple[bool, Optional[dict]]: """Check if a provided event should be allowed in the given context. The module can return: diff --git a/synapse/notifier.py b/synapse/notifier.py index 9169f50c4d..4a75d07e37 100644 --- a/synapse/notifier.py +++ b/synapse/notifier.py @@ -25,14 +25,10 @@ from typing import ( Awaitable, Callable, Collection, - Dict, Iterable, - List, Literal, Mapping, Optional, - Set, - Tuple, TypeVar, Union, overload, @@ -148,7 +144,7 @@ class _NotifierUserStream: self.last_notified_ms = time_now_ms # Set of listeners that we need to wake up when there has been a change. - self.listeners: Set[Deferred[StreamToken]] = set() + self.listeners: set[Deferred[StreamToken]] = set() def update_and_fetch_deferreds( self, @@ -215,7 +211,7 @@ class _NotifierUserStream: @attr.s(slots=True, frozen=True, auto_attribs=True) class EventStreamResult: - events: List[Union[JsonDict, EventBase]] + events: list[Union[JsonDict, EventBase]] start_token: StreamToken end_token: StreamToken @@ -244,25 +240,25 @@ class Notifier: UNUSED_STREAM_EXPIRY_MS = 10 * 60 * 1000 def __init__(self, hs: "HomeServer"): - self.user_to_user_stream: Dict[str, _NotifierUserStream] = {} - self.room_to_user_streams: Dict[str, Set[_NotifierUserStream]] = {} + self.user_to_user_stream: dict[str, _NotifierUserStream] = {} + self.room_to_user_streams: dict[str, set[_NotifierUserStream]] = {} self.hs = hs self.server_name = hs.hostname self._storage_controllers = hs.get_storage_controllers() self.event_sources = hs.get_event_sources() self.store = hs.get_datastores().main - self.pending_new_room_events: List[_PendingRoomEventEntry] = [] + self.pending_new_room_events: list[_PendingRoomEventEntry] = [] self._replication_notifier = hs.get_replication_notifier() - self._new_join_in_room_callbacks: List[Callable[[str, str], None]] = [] + self._new_join_in_room_callbacks: list[Callable[[str, str], None]] = [] self._federation_client = hs.get_federation_http_client() self._third_party_rules = hs.get_module_api_callbacks().third_party_event_rules # List of callbacks to be notified when a lock is released - self._lock_released_callback: List[Callable[[str, str, str], None]] = [] + self._lock_released_callback: list[Callable[[str, str, str], None]] = [] self.reactor = hs.get_reactor() self.clock = hs.get_clock() @@ -283,10 +279,10 @@ class Notifier: # when rendering the metrics page, which is likely once per minute at # most when scraping it. # - # Ideally, we'd use `Mapping[Tuple[str], int]` here but mypy doesn't like it. + # Ideally, we'd use `Mapping[tuple[str], int]` here but mypy doesn't like it. # This is close enough and better than a type ignore. - def count_listeners() -> Mapping[Tuple[str, ...], int]: - all_user_streams: Set[_NotifierUserStream] = set() + def count_listeners() -> Mapping[tuple[str, ...], int]: + all_user_streams: set[_NotifierUserStream] = set() for streams in list(self.room_to_user_streams.values()): all_user_streams |= streams @@ -338,7 +334,7 @@ class Notifier: async def on_new_room_events( self, - events_and_pos: List[Tuple[EventBase, PersistedEventPosition]], + events_and_pos: list[tuple[EventBase, PersistedEventPosition]], max_room_stream_token: RoomStreamToken, extra_users: Optional[Collection[UserID]] = None, ) -> None: @@ -373,7 +369,7 @@ class Notifier: time_now_ms = self.clock.time_msec() current_token = self.event_sources.get_current_token() - listeners: List["Deferred[StreamToken]"] = [] + listeners: list["Deferred[StreamToken]"] = [] for user_stream in user_streams: try: listeners.extend( @@ -397,7 +393,7 @@ class Notifier: async def notify_new_room_events( self, - event_entries: List[Tuple[_PendingRoomEventEntry, str]], + event_entries: list[tuple[_PendingRoomEventEntry, str]], max_room_stream_token: RoomStreamToken, ) -> None: """Used by handlers to inform the notifier something has happened @@ -453,8 +449,8 @@ class Notifier: pending = self.pending_new_room_events self.pending_new_room_events = [] - users: Set[UserID] = set() - rooms: Set[str] = set() + users: set[UserID] = set() + rooms: set[str] = set() for entry in pending: if entry.event_pos.persisted_after(max_room_stream_token): @@ -560,7 +556,7 @@ class Notifier: users = users or [] rooms = rooms or [] - user_streams: Set[_NotifierUserStream] = set() + user_streams: set[_NotifierUserStream] = set() log_kv( { @@ -593,7 +589,7 @@ class Notifier: time_now_ms = self.clock.time_msec() current_token = self.event_sources.get_current_token() - listeners: List["Deferred[StreamToken]"] = [] + listeners: list["Deferred[StreamToken]"] = [] for user_stream in user_streams: try: listeners.extend( @@ -771,7 +767,7 @@ class Notifier: # The events fetched from each source are a JsonDict, EventBase, or # UserPresenceState, but see below for UserPresenceState being # converted to JsonDict. - events: List[Union[JsonDict, EventBase]] = [] + events: list[Union[JsonDict, EventBase]] = [] end_token = from_token for keyname, source in self.event_sources.sources.get_sources(): @@ -871,7 +867,7 @@ class Notifier: async def _get_room_ids( self, user: UserID, explicit_room_id: Optional[str] - ) -> Tuple[StrCollection, bool]: + ) -> tuple[StrCollection, bool]: joined_room_ids = await self.store.get_rooms_for_user(user.to_string()) if explicit_room_id: if explicit_room_id in joined_room_ids: @@ -960,7 +956,7 @@ class ReplicationNotifier: This is separate from the notifier to avoid circular dependencies. """ - _replication_callbacks: List[Callable[[], None]] = attr.Factory(list) + _replication_callbacks: list[Callable[[], None]] = attr.Factory(list) def add_replication_callback(self, cb: Callable[[], None]) -> None: """Add a callback that will be called when some new data is available. diff --git a/synapse/push/__init__.py b/synapse/push/__init__.py index 7bc99bd785..552af8e14a 100644 --- a/synapse/push/__init__.py +++ b/synapse/push/__init__.py @@ -94,7 +94,7 @@ The Pusher instance also calls out to various utilities for generating payloads """ import abc -from typing import TYPE_CHECKING, Any, Dict, Optional +from typing import TYPE_CHECKING, Any, Optional import attr @@ -131,7 +131,7 @@ class PusherConfig: # while the "set_device_id_for_pushers" background update is running. access_token: Optional[int] - def as_dict(self) -> Dict[str, Any]: + def as_dict(self) -> dict[str, Any]: """Information that can be retrieved about a pusher after creation.""" return { "app_display_name": self.app_display_name, diff --git a/synapse/push/bulk_push_rule_evaluator.py b/synapse/push/bulk_push_rule_evaluator.py index ea9169aef0..9fcd7fdc6e 100644 --- a/synapse/push/bulk_push_rule_evaluator.py +++ b/synapse/push/bulk_push_rule_evaluator.py @@ -24,13 +24,9 @@ from typing import ( TYPE_CHECKING, Any, Collection, - Dict, - FrozenSet, - List, Mapping, Optional, Sequence, - Tuple, Union, cast, ) @@ -237,7 +233,7 @@ class BulkPushRuleEvaluator: event: EventBase, context: EventContext, event_id_to_event: Mapping[str, EventBase], - ) -> Tuple[dict, Optional[int]]: + ) -> tuple[dict, Optional[int]]: """ Given an event and an event context, get the power level event relevant to the event and the power level of the sender of the event. @@ -309,13 +305,13 @@ class BulkPushRuleEvaluator: async def _related_events( self, event: EventBase - ) -> Dict[str, Dict[str, JsonValue]]: + ) -> dict[str, dict[str, JsonValue]]: """Fetches the related events for 'event'. Sets the im.vector.is_falling_back key if the event is from a fallback relation Returns: Mapping of relation type to flattened events. """ - related_events: Dict[str, Dict[str, JsonValue]] = {} + related_events: dict[str, dict[str, JsonValue]] = {} if self._related_event_match_enabled: related_event_id = event.content.get("m.relates_to", {}).get("event_id") relation_type = event.content.get("m.relates_to", {}).get("rel_type") @@ -352,7 +348,7 @@ class BulkPushRuleEvaluator: return related_events async def action_for_events_by_user( - self, events_and_context: List[EventPersistencePair] + self, events_and_context: list[EventPersistencePair] ) -> None: """Given a list of events and their associated contexts, evaluate the push rules for each event, check if the message should increment the unread count, and @@ -394,7 +390,7 @@ class BulkPushRuleEvaluator: count_as_unread = _should_count_as_unread(event, context) rules_by_user = await self._get_rules_for_event(event) - actions_by_user: Dict[str, Collection[Union[Mapping, str]]] = {} + actions_by_user: dict[str, Collection[Union[Mapping, str]]] = {} # Gather a bunch of info in parallel. # @@ -409,7 +405,7 @@ class BulkPushRuleEvaluator: profiles, ) = await make_deferred_yieldable( cast( - "Deferred[Tuple[int, Tuple[dict, Optional[int]], Dict[str, Dict[str, JsonValue]], Mapping[str, ProfileInfo]]]", + "Deferred[tuple[int, tuple[dict, Optional[int]], dict[str, dict[str, JsonValue]], Mapping[str, ProfileInfo]]]", gather_results( ( run_in_background( # type: ignore[call-overload] @@ -481,7 +477,7 @@ class BulkPushRuleEvaluator: self.hs.config.experimental.msc4306_enabled, ) - msc4306_thread_subscribers: Optional[FrozenSet[str]] = None + msc4306_thread_subscribers: Optional[frozenset[str]] = None if self.hs.config.experimental.msc4306_enabled and thread_id != MAIN_TIMELINE: # pull out, in batch, all local subscribers to this thread # (in the common case, they will all be getting processed for push @@ -556,9 +552,9 @@ class BulkPushRuleEvaluator: ) -MemberMap = Dict[str, Optional[EventIdMembership]] -Rule = Dict[str, dict] -RulesByUser = Dict[str, List[Rule]] +MemberMap = dict[str, Optional[EventIdMembership]] +Rule = dict[str, dict] +RulesByUser = dict[str, list[Rule]] StateGroup = Union[object, int] @@ -572,9 +568,9 @@ def _is_simple_value(value: Any) -> bool: def _flatten_dict( d: Union[EventBase, Mapping[str, Any]], - prefix: Optional[List[str]] = None, - result: Optional[Dict[str, JsonValue]] = None, -) -> Dict[str, JsonValue]: + prefix: Optional[list[str]] = None, + result: Optional[dict[str, JsonValue]] = None, +) -> dict[str, JsonValue]: """ Given a JSON dictionary (or event) which might contain sub dictionaries, flatten it into a single layer dictionary by combining the keys & sub-keys. diff --git a/synapse/push/clientformat.py b/synapse/push/clientformat.py index 4f647491f1..fd1758db9d 100644 --- a/synapse/push/clientformat.py +++ b/synapse/push/clientformat.py @@ -20,7 +20,7 @@ # import copy -from typing import Any, Dict, List, Optional +from typing import Any, Optional from synapse.push.rulekinds import PRIORITY_CLASS_INVERSE_MAP, PRIORITY_CLASS_MAP from synapse.synapse_rust.push import FilteredPushRules, PushRule @@ -29,11 +29,11 @@ from synapse.types import UserID def format_push_rules_for_user( user: UserID, ruleslist: FilteredPushRules -) -> Dict[str, Dict[str, List[Dict[str, Any]]]]: +) -> dict[str, dict[str, list[dict[str, Any]]]]: """Converts a list of rawrules and a enabled map into nested dictionaries to match the Matrix client-server format for push rules""" - rules: Dict[str, Dict[str, List[Dict[str, Any]]]] = {"global": {}} + rules: dict[str, dict[str, list[dict[str, Any]]]] = {"global": {}} rules["global"] = _add_empty_priority_class_arrays(rules["global"]) @@ -70,7 +70,7 @@ def format_push_rules_for_user( return rules -def _convert_type_to_value(rule_or_cond: Dict[str, Any], user: UserID) -> None: +def _convert_type_to_value(rule_or_cond: dict[str, Any], user: UserID) -> None: for type_key in ("pattern", "value"): type_value = rule_or_cond.pop(f"{type_key}_type", None) if type_value == "user_id": @@ -79,14 +79,14 @@ def _convert_type_to_value(rule_or_cond: Dict[str, Any], user: UserID) -> None: rule_or_cond[type_key] = user.localpart -def _add_empty_priority_class_arrays(d: Dict[str, list]) -> Dict[str, list]: +def _add_empty_priority_class_arrays(d: dict[str, list]) -> dict[str, list]: for pc in PRIORITY_CLASS_MAP.keys(): d[pc] = [] return d -def _rule_to_template(rule: PushRule) -> Optional[Dict[str, Any]]: - templaterule: Dict[str, Any] +def _rule_to_template(rule: PushRule) -> Optional[dict[str, Any]]: + templaterule: dict[str, Any] unscoped_rule_id = _rule_id_from_namespaced(rule.rule_id) diff --git a/synapse/push/emailpusher.py b/synapse/push/emailpusher.py index 1484bc8fc0..83823c2284 100644 --- a/synapse/push/emailpusher.py +++ b/synapse/push/emailpusher.py @@ -20,7 +20,7 @@ # import logging -from typing import TYPE_CHECKING, Dict, List, Optional +from typing import TYPE_CHECKING, Optional from twisted.internet.error import AlreadyCalled, AlreadyCancelled from twisted.internet.interfaces import IDelayedCall @@ -71,7 +71,7 @@ class EmailPusher(Pusher): self.store = self.hs.get_datastores().main self.email = pusher_config.pushkey self.timed_call: Optional[IDelayedCall] = None - self.throttle_params: Dict[str, ThrottleParams] = {} + self.throttle_params: dict[str, ThrottleParams] = {} self._inited = False self._is_processing = False @@ -324,7 +324,7 @@ class EmailPusher(Pusher): ) async def send_notification( - self, push_actions: List[EmailPushAction], reason: EmailReason + self, push_actions: list[EmailPushAction], reason: EmailReason ) -> None: logger.info("Sending notif email for user %r", self.user_id) diff --git a/synapse/push/httppusher.py b/synapse/push/httppusher.py index 5cac5de8cb..8df106b859 100644 --- a/synapse/push/httppusher.py +++ b/synapse/push/httppusher.py @@ -21,7 +21,7 @@ import logging import random import urllib.parse -from typing import TYPE_CHECKING, Dict, List, Optional, Union +from typing import TYPE_CHECKING, Optional, Union from prometheus_client import Counter @@ -68,7 +68,7 @@ http_badges_failed_counter = Counter( ) -def tweaks_for_actions(actions: List[Union[str, Dict]]) -> JsonMapping: +def tweaks_for_actions(actions: list[Union[str, dict]]) -> JsonMapping: """ Converts a list of actions into a `tweaks` dict (which can then be passed to the push gateway). @@ -396,7 +396,7 @@ class HttpPusher(Pusher): content: JsonDict, tweaks: Optional[JsonMapping] = None, default_payload: Optional[JsonMapping] = None, - ) -> Union[bool, List[str]]: + ) -> Union[bool, list[str]]: """Send a notification to the registered push gateway, with `content` being the content of the `notification` top property specified in the spec. Note that the `devices` property will be added with device-specific @@ -453,7 +453,7 @@ class HttpPusher(Pusher): event: EventBase, tweaks: JsonMapping, badge: int, - ) -> Union[bool, List[str]]: + ) -> Union[bool, list[str]]: """Send a notification to the registered push gateway by building it from an event. diff --git a/synapse/push/mailer.py b/synapse/push/mailer.py index d76cc8237b..3dac61aed5 100644 --- a/synapse/push/mailer.py +++ b/synapse/push/mailer.py @@ -21,7 +21,7 @@ import logging import urllib.parse -from typing import TYPE_CHECKING, Dict, Iterable, List, Optional, TypeVar +from typing import TYPE_CHECKING, Iterable, Optional, TypeVar import bleach import jinja2 @@ -287,7 +287,7 @@ class Mailer: notif_events = await self.store.get_events([pa.event_id for pa in push_actions]) - notifs_by_room: Dict[str, List[EmailPushAction]] = {} + notifs_by_room: dict[str, list[EmailPushAction]] = {} for pa in push_actions: notifs_by_room.setdefault(pa.room_id, []).append(pa) @@ -317,7 +317,7 @@ class Mailer: # actually sort our so-called rooms_in_order list, most recent room first rooms_in_order.sort(key=lambda r: -(notifs_by_room[r][-1].received_ts or 0)) - rooms: List[RoomVars] = [] + rooms: list[RoomVars] = [] for r in rooms_in_order: roomvars = await self._get_room_vars( @@ -417,7 +417,7 @@ class Mailer: room_id: str, user_id: str, notifs: Iterable[EmailPushAction], - notif_events: Dict[str, EventBase], + notif_events: dict[str, EventBase], room_state_ids: StateMap[str], ) -> RoomVars: """ @@ -665,9 +665,9 @@ class Mailer: async def _make_summary_text_single_room( self, room_id: str, - notifs: List[EmailPushAction], + notifs: list[EmailPushAction], room_state_ids: StateMap[str], - notif_events: Dict[str, EventBase], + notif_events: dict[str, EventBase], user_id: str, ) -> str: """ @@ -781,9 +781,9 @@ class Mailer: async def _make_summary_text( self, - notifs_by_room: Dict[str, List[EmailPushAction]], - room_state_ids: Dict[str, StateMap[str]], - notif_events: Dict[str, EventBase], + notifs_by_room: dict[str, list[EmailPushAction]], + room_state_ids: dict[str, StateMap[str]], + notif_events: dict[str, EventBase], reason: EmailReason, ) -> str: """ @@ -814,9 +814,9 @@ class Mailer: async def _make_summary_text_from_member_events( self, room_id: str, - notifs: List[EmailPushAction], + notifs: list[EmailPushAction], room_state_ids: StateMap[str], - notif_events: Dict[str, EventBase], + notif_events: dict[str, EventBase], ) -> str: """ Make a summary text for the email when only a single room has notifications. @@ -995,7 +995,7 @@ def safe_text(raw_text: str) -> Markup: ) -def deduped_ordered_list(it: Iterable[T]) -> List[T]: +def deduped_ordered_list(it: Iterable[T]) -> list[T]: seen = set() ret = [] for item in it: diff --git a/synapse/push/presentable_names.py b/synapse/push/presentable_names.py index 1faa57e9f5..2f32e18b9a 100644 --- a/synapse/push/presentable_names.py +++ b/synapse/push/presentable_names.py @@ -21,7 +21,7 @@ import logging import re -from typing import TYPE_CHECKING, Dict, Iterable, Optional +from typing import TYPE_CHECKING, Iterable, Optional from synapse.api.constants import EventTypes, Membership from synapse.events import EventBase @@ -205,8 +205,8 @@ def name_from_member_event(member_event: EventBase) -> str: return member_event.state_key -def _state_as_two_level_dict(state: StateMap[str]) -> Dict[str, Dict[str, str]]: - ret: Dict[str, Dict[str, str]] = {} +def _state_as_two_level_dict(state: StateMap[str]) -> dict[str, dict[str, str]]: + ret: dict[str, dict[str, str]] = {} for k, v in state.items(): ret.setdefault(k[0], {})[k[1]] = v return ret diff --git a/synapse/push/push_tools.py b/synapse/push/push_tools.py index 3f3e4a9234..8e2ff2bcb4 100644 --- a/synapse/push/push_tools.py +++ b/synapse/push/push_tools.py @@ -18,7 +18,6 @@ # [This file includes modifications made by New Vector Limited] # # -from typing import Dict from synapse.api.constants import EventTypes, Membership from synapse.events import EventBase @@ -56,8 +55,8 @@ async def get_badge_count(store: DataStore, user_id: str, group_by_room: bool) - async def get_context_for_event( storage: StorageControllers, ev: EventBase, user_id: str -) -> Dict[str, str]: - ctx: Dict[str, str] = {} +) -> dict[str, str]: + ctx: dict[str, str] = {} if ev.internal_metadata.outlier: # We don't have state for outliers, so we can't compute the context diff --git a/synapse/push/push_types.py b/synapse/push/push_types.py index 57fa926a46..e1678cd717 100644 --- a/synapse/push/push_types.py +++ b/synapse/push/push_types.py @@ -18,7 +18,7 @@ # [This file includes modifications made by New Vector Limited] # # -from typing import List, Optional, TypedDict +from typing import Optional, TypedDict class EmailReason(TypedDict, total=False): @@ -91,7 +91,7 @@ class NotifVars(TypedDict): link: str ts: Optional[int] - messages: List[MessageVars] + messages: list[MessageVars] class RoomVars(TypedDict): @@ -110,7 +110,7 @@ class RoomVars(TypedDict): title: Optional[str] hash: int invite: bool - notifs: List[NotifVars] + notifs: list[NotifVars] link: str avatar_url: Optional[str] @@ -137,5 +137,5 @@ class TemplateVars(TypedDict, total=False): user_display_name: str unsubscribe_link: str summary_text: str - rooms: List[RoomVars] + rooms: list[RoomVars] reason: EmailReason diff --git a/synapse/push/pusher.py b/synapse/push/pusher.py index 9a5dd7a9d4..17238c95c0 100644 --- a/synapse/push/pusher.py +++ b/synapse/push/pusher.py @@ -20,7 +20,7 @@ # import logging -from typing import TYPE_CHECKING, Callable, Dict, Optional +from typing import TYPE_CHECKING, Callable, Optional from synapse.push import Pusher, PusherConfig from synapse.push.emailpusher import EmailPusher @@ -38,13 +38,13 @@ class PusherFactory: self.hs = hs self.config = hs.config - self.pusher_types: Dict[str, Callable[[HomeServer, PusherConfig], Pusher]] = { + self.pusher_types: dict[str, Callable[[HomeServer, PusherConfig], Pusher]] = { "http": HttpPusher } logger.info("email enable notifs: %r", hs.config.email.email_enable_notifs) if hs.config.email.email_enable_notifs: - self.mailers: Dict[str, Mailer] = {} + self.mailers: dict[str, Mailer] = {} self._notif_template_html = hs.config.email.email_notif_template_html self._notif_template_text = hs.config.email.email_notif_template_text diff --git a/synapse/push/pusherpool.py b/synapse/push/pusherpool.py index 977c55b683..6b70de976a 100644 --- a/synapse/push/pusherpool.py +++ b/synapse/push/pusherpool.py @@ -20,7 +20,7 @@ # import logging -from typing import TYPE_CHECKING, Dict, Iterable, Optional +from typing import TYPE_CHECKING, Iterable, Optional from prometheus_client import Gauge @@ -100,7 +100,7 @@ class PusherPool: self._last_room_stream_id_seen = self.store.get_room_max_stream_ordering() # map from user id to app_id:pushkey to pusher - self.pushers: Dict[str, Dict[str, Pusher]] = {} + self.pushers: dict[str, dict[str, Pusher]] = {} self._account_validity_handler = hs.get_account_validity_handler() diff --git a/synapse/replication/http/_base.py b/synapse/replication/http/_base.py index 0850a99e0c..d76b40cf39 100644 --- a/synapse/replication/http/_base.py +++ b/synapse/replication/http/_base.py @@ -23,7 +23,7 @@ import logging import re import urllib.parse from inspect import signature -from typing import TYPE_CHECKING, Any, Awaitable, Callable, ClassVar, Dict, List, Tuple +from typing import TYPE_CHECKING, Any, Awaitable, Callable, ClassVar from prometheus_client import Counter, Gauge @@ -112,7 +112,7 @@ class ReplicationEndpoint(metaclass=abc.ABCMeta): """ NAME: str = abc.abstractproperty() # type: ignore - PATH_ARGS: Tuple[str, ...] = abc.abstractproperty() # type: ignore + PATH_ARGS: tuple[str, ...] = abc.abstractproperty() # type: ignore METHOD = "POST" CACHE = True RETRY_ON_TIMEOUT = True @@ -187,7 +187,7 @@ class ReplicationEndpoint(metaclass=abc.ABCMeta): @abc.abstractmethod async def _handle_request( self, request: Request, content: JsonDict, **kwargs: Any - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: """Handle incoming request. This is called with the request object and PATH_ARGS. @@ -292,7 +292,7 @@ class ReplicationEndpoint(metaclass=abc.ABCMeta): "/".join(url_args), ) - headers: Dict[bytes, List[bytes]] = {} + headers: dict[bytes, list[bytes]] = {} # Add an authorization header, if configured. if replication_secret: headers[b"Authorization"] = [b"Bearer " + replication_secret] @@ -403,7 +403,7 @@ class ReplicationEndpoint(metaclass=abc.ABCMeta): async def _check_auth_and_handle( self, request: SynapseRequest, **kwargs: Any - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: """Called on new incoming requests when caching is enabled. Checks if there is a cached response for the request and returns that, otherwise calls `_handle_request` and caches its response. diff --git a/synapse/replication/http/account_data.py b/synapse/replication/http/account_data.py index b6eac153ba..560973b916 100644 --- a/synapse/replication/http/account_data.py +++ b/synapse/replication/http/account_data.py @@ -20,7 +20,7 @@ # import logging -from typing import TYPE_CHECKING, Tuple +from typing import TYPE_CHECKING from twisted.web.server import Request @@ -68,7 +68,7 @@ class ReplicationAddUserAccountDataRestServlet(ReplicationEndpoint): async def _handle_request( # type: ignore[override] self, request: Request, content: JsonDict, user_id: str, account_data_type: str - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: max_stream_id = await self.handler.add_account_data_for_user( user_id, account_data_type, content["content"] ) @@ -106,7 +106,7 @@ class ReplicationRemoveUserAccountDataRestServlet(ReplicationEndpoint): async def _handle_request( # type: ignore[override] self, request: Request, content: JsonDict, user_id: str, account_data_type: str - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: max_stream_id = await self.handler.remove_account_data_for_user( user_id, account_data_type ) @@ -153,7 +153,7 @@ class ReplicationAddRoomAccountDataRestServlet(ReplicationEndpoint): user_id: str, room_id: str, account_data_type: str, - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: max_stream_id = await self.handler.add_account_data_to_room( user_id, room_id, account_data_type, content["content"] ) @@ -196,7 +196,7 @@ class ReplicationRemoveRoomAccountDataRestServlet(ReplicationEndpoint): user_id: str, room_id: str, account_data_type: str, - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: max_stream_id = await self.handler.remove_account_data_for_room( user_id, room_id, account_data_type ) @@ -238,7 +238,7 @@ class ReplicationAddTagRestServlet(ReplicationEndpoint): async def _handle_request( # type: ignore[override] self, request: Request, content: JsonDict, user_id: str, room_id: str, tag: str - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: max_stream_id = await self.handler.add_tag_to_room( user_id, room_id, tag, content["content"] ) @@ -276,7 +276,7 @@ class ReplicationRemoveTagRestServlet(ReplicationEndpoint): async def _handle_request( # type: ignore[override] self, request: Request, content: JsonDict, user_id: str, room_id: str, tag: str - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: max_stream_id = await self.handler.remove_tag_from_room( user_id, room_id, diff --git a/synapse/replication/http/deactivate_account.py b/synapse/replication/http/deactivate_account.py index 89658350a5..82df1e1322 100644 --- a/synapse/replication/http/deactivate_account.py +++ b/synapse/replication/http/deactivate_account.py @@ -19,7 +19,7 @@ # import logging -from typing import TYPE_CHECKING, Tuple +from typing import TYPE_CHECKING from twisted.web.server import Request @@ -69,7 +69,7 @@ class ReplicationNotifyAccountDeactivatedServlet(ReplicationEndpoint): async def _handle_request( # type: ignore[override] self, request: Request, content: JsonDict, user_id: str - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: by_admin = content["by_admin"] await self.deactivate_account_handler.notify_account_deactivated( user_id, by_admin=by_admin diff --git a/synapse/replication/http/delayed_events.py b/synapse/replication/http/delayed_events.py index 229022070c..e448ac32bf 100644 --- a/synapse/replication/http/delayed_events.py +++ b/synapse/replication/http/delayed_events.py @@ -13,7 +13,7 @@ # import logging -from typing import TYPE_CHECKING, Dict, Optional, Tuple +from typing import TYPE_CHECKING, Optional from twisted.web.server import Request @@ -52,7 +52,7 @@ class ReplicationAddedDelayedEventRestServlet(ReplicationEndpoint): async def _handle_request( # type: ignore[override] self, request: Request, content: JsonDict - ) -> Tuple[int, Dict[str, Optional[JsonMapping]]]: + ) -> tuple[int, dict[str, Optional[JsonMapping]]]: self.handler.on_added(int(content["next_send_ts"])) return 200, {} diff --git a/synapse/replication/http/devices.py b/synapse/replication/http/devices.py index 94981e22eb..2fadee8a06 100644 --- a/synapse/replication/http/devices.py +++ b/synapse/replication/http/devices.py @@ -19,7 +19,7 @@ # import logging -from typing import TYPE_CHECKING, Dict, List, Optional, Tuple +from typing import TYPE_CHECKING, Optional from twisted.web.server import Request @@ -59,13 +59,13 @@ class ReplicationNotifyDeviceUpdateRestServlet(ReplicationEndpoint): @staticmethod async def _serialize_payload( # type: ignore[override] - user_id: str, device_ids: List[str] + user_id: str, device_ids: list[str] ) -> JsonDict: return {"device_ids": device_ids} async def _handle_request( # type: ignore[override] self, request: Request, content: JsonDict, user_id: str - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: device_ids = content["device_ids"] span = active_span() @@ -102,12 +102,12 @@ class ReplicationNotifyUserSignatureUpdateRestServlet(ReplicationEndpoint): self.clock = hs.get_clock() @staticmethod - async def _serialize_payload(from_user_id: str, user_ids: List[str]) -> JsonDict: # type: ignore[override] + async def _serialize_payload(from_user_id: str, user_ids: list[str]) -> JsonDict: # type: ignore[override] return {"user_ids": user_ids} async def _handle_request( # type: ignore[override] self, request: Request, content: JsonDict, from_user_id: str - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: user_ids = content["user_ids"] span = active_span() @@ -165,13 +165,13 @@ class ReplicationMultiUserDevicesResyncRestServlet(ReplicationEndpoint): self.clock = hs.get_clock() @staticmethod - async def _serialize_payload(user_ids: List[str]) -> JsonDict: # type: ignore[override] + async def _serialize_payload(user_ids: list[str]) -> JsonDict: # type: ignore[override] return {"user_ids": user_ids} async def _handle_request( # type: ignore[override] self, request: Request, content: JsonDict - ) -> Tuple[int, Dict[str, Optional[JsonMapping]]]: - user_ids: List[str] = content["user_ids"] + ) -> tuple[int, dict[str, Optional[JsonMapping]]]: + user_ids: list[str] = content["user_ids"] logger.info("Resync for %r", user_ids) span = active_span() @@ -210,7 +210,7 @@ class ReplicationHandleNewDeviceUpdateRestServlet(ReplicationEndpoint): async def _handle_request( # type: ignore[override] self, request: Request, content: JsonDict - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: await self.device_handler.handle_new_device_update() return 200, {} @@ -241,7 +241,7 @@ class ReplicationDeviceHandleRoomUnPartialStated(ReplicationEndpoint): async def _handle_request( # type: ignore[override] self, request: Request, content: JsonDict, room_id: str - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: await self.device_handler.handle_room_un_partial_stated(room_id) return 200, {} diff --git a/synapse/replication/http/federation.py b/synapse/replication/http/federation.py index 1e302ef59f..448a1f8a71 100644 --- a/synapse/replication/http/federation.py +++ b/synapse/replication/http/federation.py @@ -19,7 +19,7 @@ # import logging -from typing import TYPE_CHECKING, List, Tuple +from typing import TYPE_CHECKING from twisted.web.server import Request @@ -86,7 +86,7 @@ class ReplicationFederationSendEventsRestServlet(ReplicationEndpoint): async def _serialize_payload( # type: ignore[override] store: "DataStore", room_id: str, - event_and_contexts: List[EventPersistencePair], + event_and_contexts: list[EventPersistencePair], backfilled: bool, ) -> JsonDict: """ @@ -122,7 +122,7 @@ class ReplicationFederationSendEventsRestServlet(ReplicationEndpoint): async def _handle_request( # type: ignore[override] self, request: Request, content: JsonDict - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: with Measure( self.clock, name="repl_fed_send_events_parse", server_name=self.server_name ): @@ -194,7 +194,7 @@ class ReplicationFederationSendEduRestServlet(ReplicationEndpoint): async def _handle_request( # type: ignore[override] self, request: Request, content: JsonDict, edu_type: str - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: origin = content["origin"] edu_content = content["content"] @@ -243,7 +243,7 @@ class ReplicationGetQueryRestServlet(ReplicationEndpoint): async def _handle_request( # type: ignore[override] self, request: Request, content: JsonDict, query_type: str - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: args = content["args"] args["origin"] = content["origin"] @@ -285,7 +285,7 @@ class ReplicationCleanRoomRestServlet(ReplicationEndpoint): async def _handle_request( # type: ignore[override] self, request: Request, content: JsonDict, room_id: str - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: await self.store.clean_room_for_join(room_id) return 200, {} @@ -320,7 +320,7 @@ class ReplicationStoreRoomOnOutlierMembershipRestServlet(ReplicationEndpoint): async def _handle_request( # type: ignore[override] self, request: Request, content: JsonDict, room_id: str - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: room_version = KNOWN_ROOM_VERSIONS[content["room_version"]] await self.store.maybe_store_room_on_outlier_membership(room_id, room_version) return 200, {} diff --git a/synapse/replication/http/login.py b/synapse/replication/http/login.py index 8b5b7f755a..0022e12eac 100644 --- a/synapse/replication/http/login.py +++ b/synapse/replication/http/login.py @@ -19,7 +19,7 @@ # import logging -from typing import TYPE_CHECKING, Optional, Tuple, cast +from typing import TYPE_CHECKING, Optional, cast from twisted.web.server import Request @@ -79,7 +79,7 @@ class RegisterDeviceReplicationServlet(ReplicationEndpoint): async def _handle_request( # type: ignore[override] self, request: Request, content: JsonDict, user_id: str - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: device_id = content["device_id"] initial_display_name = content["initial_display_name"] is_guest = content["is_guest"] diff --git a/synapse/replication/http/membership.py b/synapse/replication/http/membership.py index fc66039b2f..0e588037b6 100644 --- a/synapse/replication/http/membership.py +++ b/synapse/replication/http/membership.py @@ -18,7 +18,7 @@ # # import logging -from typing import TYPE_CHECKING, List, Optional, Tuple +from typing import TYPE_CHECKING, Optional from twisted.web.server import Request @@ -63,7 +63,7 @@ class ReplicationRemoteJoinRestServlet(ReplicationEndpoint): requester: Requester, room_id: str, user_id: str, - remote_room_hosts: List[str], + remote_room_hosts: list[str], content: JsonDict, ) -> JsonDict: """ @@ -85,7 +85,7 @@ class ReplicationRemoteJoinRestServlet(ReplicationEndpoint): async def _handle_request( # type: ignore[override] self, request: SynapseRequest, content: JsonDict, room_id: str, user_id: str - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: remote_room_hosts = content["remote_room_hosts"] event_content = content["content"] @@ -130,7 +130,7 @@ class ReplicationRemoteKnockRestServlet(ReplicationEndpoint): requester: Requester, room_id: str, user_id: str, - remote_room_hosts: List[str], + remote_room_hosts: list[str], content: JsonDict, ) -> JsonDict: """ @@ -149,7 +149,7 @@ class ReplicationRemoteKnockRestServlet(ReplicationEndpoint): async def _handle_request( # type: ignore[override] self, request: SynapseRequest, content: JsonDict, room_id: str, user_id: str - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: remote_room_hosts = content["remote_room_hosts"] event_content = content["content"] @@ -215,7 +215,7 @@ class ReplicationRemoteRejectInviteRestServlet(ReplicationEndpoint): async def _handle_request( # type: ignore[override] self, request: SynapseRequest, content: JsonDict, invite_event_id: str - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: txn_id = content["txn_id"] event_content = content["content"] @@ -279,7 +279,7 @@ class ReplicationRemoteRescindKnockRestServlet(ReplicationEndpoint): async def _handle_request( # type: ignore[override] self, request: SynapseRequest, content: JsonDict, knock_event_id: str - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: txn_id = content["txn_id"] event_content = content["content"] @@ -343,7 +343,7 @@ class ReplicationUserJoinedLeftRoomRestServlet(ReplicationEndpoint): room_id: str, user_id: str, change: str, - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: logger.info("user membership change: %s in %s", user_id, room_id) user = UserID.from_string(user_id) diff --git a/synapse/replication/http/presence.py b/synapse/replication/http/presence.py index 8a3f3b0e67..4a894b0221 100644 --- a/synapse/replication/http/presence.py +++ b/synapse/replication/http/presence.py @@ -20,7 +20,7 @@ # import logging -from typing import TYPE_CHECKING, Optional, Tuple +from typing import TYPE_CHECKING, Optional from twisted.web.server import Request @@ -63,7 +63,7 @@ class ReplicationBumpPresenceActiveTime(ReplicationEndpoint): async def _handle_request( # type: ignore[override] self, request: Request, content: JsonDict, user_id: str - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: await self._presence_handler.bump_presence_active_time( UserID.from_string(user_id), content.get("device_id") ) @@ -116,7 +116,7 @@ class ReplicationPresenceSetState(ReplicationEndpoint): async def _handle_request( # type: ignore[override] self, request: Request, content: JsonDict, user_id: str - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: await self._presence_handler.set_state( UserID.from_string(user_id), content.get("device_id"), diff --git a/synapse/replication/http/push.py b/synapse/replication/http/push.py index 6e20a208b6..905414b5ee 100644 --- a/synapse/replication/http/push.py +++ b/synapse/replication/http/push.py @@ -20,7 +20,7 @@ # import logging -from typing import TYPE_CHECKING, Tuple +from typing import TYPE_CHECKING from twisted.web.server import Request @@ -68,7 +68,7 @@ class ReplicationRemovePusherRestServlet(ReplicationEndpoint): async def _handle_request( # type: ignore[override] self, request: Request, content: JsonDict, user_id: str - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: app_id = content["app_id"] pushkey = content["pushkey"] @@ -110,7 +110,7 @@ class ReplicationCopyPusherRestServlet(ReplicationEndpoint): user_id: str, old_room_id: str, new_room_id: str, - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: await self._store.copy_push_rules_from_room_to_room_for_user( old_room_id, new_room_id, user_id ) @@ -144,7 +144,7 @@ class ReplicationDeleteAllPushersForUserRestServlet(ReplicationEndpoint): async def _handle_request( # type: ignore[override] self, request: Request, content: JsonDict, user_id: str - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: await self._store.delete_all_pushers_for_user(user_id) return 200, {} diff --git a/synapse/replication/http/register.py b/synapse/replication/http/register.py index 27d3504c3c..780fcc463a 100644 --- a/synapse/replication/http/register.py +++ b/synapse/replication/http/register.py @@ -19,7 +19,7 @@ # import logging -from typing import TYPE_CHECKING, Optional, Tuple +from typing import TYPE_CHECKING, Optional from twisted.web.server import Request @@ -104,7 +104,7 @@ class ReplicationRegisterServlet(ReplicationEndpoint): async def _handle_request( # type: ignore[override] self, request: Request, content: JsonDict, user_id: str - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: await self.registration_handler.check_registration_ratelimit(content["address"]) # Always default admin users to approved (since it means they were created by @@ -156,7 +156,7 @@ class ReplicationPostRegisterActionsServlet(ReplicationEndpoint): async def _handle_request( # type: ignore[override] self, request: Request, content: JsonDict, user_id: str - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: auth_result = content["auth_result"] access_token = content["access_token"] diff --git a/synapse/replication/http/send_events.py b/synapse/replication/http/send_events.py index 6b1a5a9956..b020a0fe7c 100644 --- a/synapse/replication/http/send_events.py +++ b/synapse/replication/http/send_events.py @@ -20,7 +20,7 @@ # import logging -from typing import TYPE_CHECKING, List, Tuple +from typing import TYPE_CHECKING from twisted.web.server import Request @@ -85,11 +85,11 @@ class ReplicationSendEventsRestServlet(ReplicationEndpoint): @staticmethod async def _serialize_payload( # type: ignore[override] - events_and_context: List[EventPersistencePair], + events_and_context: list[EventPersistencePair], store: "DataStore", requester: Requester, ratelimit: bool, - extra_users: List[UserID], + extra_users: list[UserID], ) -> JsonDict: """ Args: @@ -122,7 +122,7 @@ class ReplicationSendEventsRestServlet(ReplicationEndpoint): async def _handle_request( # type: ignore[override] self, request: Request, payload: JsonDict - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: with Measure( self.clock, name="repl_send_events_parse", server_name=self.server_name ): diff --git a/synapse/replication/http/state.py b/synapse/replication/http/state.py index 3ec4ca5de3..823d330041 100644 --- a/synapse/replication/http/state.py +++ b/synapse/replication/http/state.py @@ -20,7 +20,7 @@ # import logging -from typing import TYPE_CHECKING, Tuple +from typing import TYPE_CHECKING from twisted.web.server import Request @@ -65,7 +65,7 @@ class ReplicationUpdateCurrentStateRestServlet(ReplicationEndpoint): async def _handle_request( # type: ignore[override] self, request: Request, content: JsonDict, room_id: str - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: writer_instance = self._events_shard_config.get_instance(room_id) if writer_instance != self._instance_name: raise SynapseError( diff --git a/synapse/replication/http/streams.py b/synapse/replication/http/streams.py index 61f70d5790..42e78c976f 100644 --- a/synapse/replication/http/streams.py +++ b/synapse/replication/http/streams.py @@ -20,7 +20,7 @@ # import logging -from typing import TYPE_CHECKING, Tuple +from typing import TYPE_CHECKING from twisted.web.server import Request @@ -79,7 +79,7 @@ class ReplicationGetStreamUpdates(ReplicationEndpoint): async def _handle_request( # type: ignore[override] self, request: Request, content: JsonDict, stream_name: str - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: stream = self.streams.get(stream_name) if stream is None: raise SynapseError(400, "Unknown stream") diff --git a/synapse/replication/tcp/client.py b/synapse/replication/tcp/client.py index f2561bc0c5..f9605407af 100644 --- a/synapse/replication/tcp/client.py +++ b/synapse/replication/tcp/client.py @@ -21,7 +21,7 @@ """A replication client for use by synapse workers.""" import logging -from typing import TYPE_CHECKING, Dict, Iterable, Optional, Set, Tuple +from typing import TYPE_CHECKING, Iterable, Optional from sortedcontainers import SortedList @@ -95,8 +95,8 @@ class ReplicationDataHandler: # Map from stream and instance to list of deferreds waiting for the stream to # arrive at a particular position. The lists are sorted by stream position. - self._streams_to_waiters: Dict[ - Tuple[str, str], SortedList[Tuple[int, Deferred]] + self._streams_to_waiters: dict[ + tuple[str, str], SortedList[tuple[int, Deferred]] ] = {} async def on_rdata( @@ -113,7 +113,7 @@ class ReplicationDataHandler: token: stream token for this batch of rows rows: a list of Stream.ROW_TYPE objects as returned by Stream.parse_row. """ - all_room_ids: Set[str] = set() + all_room_ids: set[str] = set() if stream_name == DeviceListsStream.NAME: if any(not row.is_signature and not row.hosts_calculated for row in rows): # This only uses the minimum stream position on the device lists @@ -200,7 +200,7 @@ class ReplicationDataHandler: if row.data.rejected: continue - extra_users: Tuple[UserID, ...] = () + extra_users: tuple[UserID, ...] = () if row.data.type == EventTypes.Member and row.data.state_key: extra_users = (UserID.from_string(row.data.state_key),) diff --git a/synapse/replication/tcp/commands.py b/synapse/replication/tcp/commands.py index 8eec68c3dd..f115cc4db9 100644 --- a/synapse/replication/tcp/commands.py +++ b/synapse/replication/tcp/commands.py @@ -26,7 +26,7 @@ allowed to be sent by which side. import abc import logging -from typing import List, Optional, Tuple, Type, TypeVar +from typing import Optional, TypeVar from synapse.replication.tcp.streams._base import StreamRow from synapse.util.json import json_decoder, json_encoder @@ -49,7 +49,7 @@ class Command(metaclass=abc.ABCMeta): @classmethod @abc.abstractmethod - def from_line(cls: Type[T], line: str) -> T: + def from_line(cls: type[T], line: str) -> T: """Deserialises a line from the wire into this command. `line` does not include the command. """ @@ -88,7 +88,7 @@ class _SimpleCommand(Command): self.data = data @classmethod - def from_line(cls: Type[SC], line: str) -> SC: + def from_line(cls: type[SC], line: str) -> SC: return cls(line) def to_line(self) -> str: @@ -145,7 +145,7 @@ class RdataCommand(Command): self.row = row @classmethod - def from_line(cls: Type["RdataCommand"], line: str) -> "RdataCommand": + def from_line(cls: type["RdataCommand"], line: str) -> "RdataCommand": stream_name, instance_name, token, row_json = line.split(" ", 3) return cls( stream_name, @@ -204,7 +204,7 @@ class PositionCommand(Command): self.new_token = new_token @classmethod - def from_line(cls: Type["PositionCommand"], line: str) -> "PositionCommand": + def from_line(cls: type["PositionCommand"], line: str) -> "PositionCommand": stream_name, instance_name, prev_token, new_token = line.split(" ", 3) return cls(stream_name, instance_name, int(prev_token), int(new_token)) @@ -249,7 +249,7 @@ class ReplicateCommand(Command): REPLICATE """ - __slots__: List[str] = [] + __slots__: list[str] = [] NAME = "REPLICATE" @@ -257,7 +257,7 @@ class ReplicateCommand(Command): pass @classmethod - def from_line(cls: Type[T], line: str) -> T: + def from_line(cls: type[T], line: str) -> T: return cls() def to_line(self) -> str: @@ -299,7 +299,7 @@ class UserSyncCommand(Command): self.last_sync_ms = last_sync_ms @classmethod - def from_line(cls: Type["UserSyncCommand"], line: str) -> "UserSyncCommand": + def from_line(cls: type["UserSyncCommand"], line: str) -> "UserSyncCommand": device_id: Optional[str] instance_id, user_id, device_id, state, last_sync_ms = line.split(" ", 4) @@ -343,7 +343,7 @@ class ClearUserSyncsCommand(Command): @classmethod def from_line( - cls: Type["ClearUserSyncsCommand"], line: str + cls: type["ClearUserSyncsCommand"], line: str ) -> "ClearUserSyncsCommand": return cls(line) @@ -373,7 +373,7 @@ class FederationAckCommand(Command): @classmethod def from_line( - cls: Type["FederationAckCommand"], line: str + cls: type["FederationAckCommand"], line: str ) -> "FederationAckCommand": instance_name, token = line.split(" ") return cls(instance_name, int(token)) @@ -418,7 +418,7 @@ class UserIpCommand(Command): self.last_seen = last_seen @classmethod - def from_line(cls: Type["UserIpCommand"], line: str) -> "UserIpCommand": + def from_line(cls: type["UserIpCommand"], line: str) -> "UserIpCommand": user_id, jsn = line.split(" ", 1) access_token, ip, user_agent, device_id, last_seen = json_decoder.decode(jsn) @@ -485,7 +485,7 @@ class LockReleasedCommand(Command): self.lock_key = lock_key @classmethod - def from_line(cls: Type["LockReleasedCommand"], line: str) -> "LockReleasedCommand": + def from_line(cls: type["LockReleasedCommand"], line: str) -> "LockReleasedCommand": instance_name, lock_name, lock_key = json_decoder.decode(line) return cls(instance_name, lock_name, lock_key) @@ -505,7 +505,7 @@ class NewActiveTaskCommand(_SimpleCommand): NAME = "NEW_ACTIVE_TASK" -_COMMANDS: Tuple[Type[Command], ...] = ( +_COMMANDS: tuple[type[Command], ...] = ( ServerCommand, RdataCommand, PositionCommand, diff --git a/synapse/replication/tcp/handler.py b/synapse/replication/tcp/handler.py index 4d0d3d44ab..bd1ee5ff9d 100644 --- a/synapse/replication/tcp/handler.py +++ b/synapse/replication/tcp/handler.py @@ -20,18 +20,14 @@ # # import logging +from collections import deque from typing import ( TYPE_CHECKING, Any, Awaitable, - Deque, - Dict, Iterable, Iterator, - List, Optional, - Set, - Tuple, TypeVar, Union, ) @@ -119,8 +115,8 @@ tcp_command_queue_gauge = LaterGauge( # the type of the entries in _command_queues_by_stream -_StreamCommandQueue = Deque[ - Tuple[Union[RdataCommand, PositionCommand], IReplicationConnection] +_StreamCommandQueue = deque[ + tuple[Union[RdataCommand, PositionCommand], IReplicationConnection] ] @@ -141,18 +137,18 @@ class ReplicationCommandHandler: self._instance_name = hs.get_instance_name() # Additional Redis channel suffixes to subscribe to. - self._channels_to_subscribe_to: List[str] = [] + self._channels_to_subscribe_to: list[str] = [] self._is_presence_writer = ( hs.get_instance_name() in hs.config.worker.writers.presence ) - self._streams: Dict[str, Stream] = { + self._streams: dict[str, Stream] = { stream.NAME: stream(hs) for stream in STREAMS_MAP.values() } # List of streams that this instance is the source of - self._streams_to_replicate: List[Stream] = [] + self._streams_to_replicate: list[Stream] = [] for stream in self._streams.values(): if hs.config.redis.redis_enabled and stream.NAME == CachesStream.NAME: @@ -246,14 +242,14 @@ class ReplicationCommandHandler: # Map of stream name to batched updates. See RdataCommand for info on # how batching works. - self._pending_batches: Dict[str, List[Any]] = {} + self._pending_batches: dict[str, list[Any]] = {} # The factory used to create connections. self._factory: Optional[ReconnectingClientFactory] = None # The currently connected connections. (The list of places we need to send # outgoing replication commands to.) - self._connections: List[IReplicationConnection] = [] + self._connections: list[IReplicationConnection] = [] tcp_resource_total_connections_gauge.register_hook( homeserver_instance_id=hs.get_instance_id(), @@ -264,7 +260,7 @@ class ReplicationCommandHandler: # them in order in a separate background process. # the streams which are currently being processed by _unsafe_process_queue - self._processing_streams: Set[str] = set() + self._processing_streams: set[str] = set() # for each stream, a queue of commands that are awaiting processing, and the # connection that they arrived on. @@ -274,7 +270,7 @@ class ReplicationCommandHandler: # For each connection, the incoming stream names that have received a POSITION # from that connection. - self._streams_by_connection: Dict[IReplicationConnection, Set[str]] = {} + self._streams_by_connection: dict[IReplicationConnection, set[str]] = {} tcp_command_queue_gauge.register_hook( homeserver_instance_id=hs.get_instance_id(), @@ -450,11 +446,11 @@ class ReplicationCommandHandler: bindAddress=None, ) - def get_streams(self) -> Dict[str, Stream]: + def get_streams(self) -> dict[str, Stream]: """Get a map from stream name to all streams.""" return self._streams - def get_streams_to_replicate(self) -> List[Stream]: + def get_streams_to_replicate(self) -> list[Stream]: """Get a list of streams that this instances replicates.""" return self._streams_to_replicate @@ -902,8 +898,8 @@ UpdateRow = TypeVar("UpdateRow") def _batch_updates( - updates: Iterable[Tuple[UpdateToken, UpdateRow]], -) -> Iterator[Tuple[UpdateToken, List[UpdateRow]]]: + updates: Iterable[tuple[UpdateToken, UpdateRow]], +) -> Iterator[tuple[UpdateToken, list[UpdateRow]]]: """Collect stream updates with the same token together Given a series of updates returned by Stream.get_updates_since(), collects diff --git a/synapse/replication/tcp/protocol.py b/synapse/replication/tcp/protocol.py index bcfc65c2c0..733643cb64 100644 --- a/synapse/replication/tcp/protocol.py +++ b/synapse/replication/tcp/protocol.py @@ -28,7 +28,7 @@ import fcntl import logging import struct from inspect import isawaitable -from typing import TYPE_CHECKING, Any, Collection, List, Optional +from typing import TYPE_CHECKING, Any, Collection, Optional from prometheus_client import Counter from zope.interface import Interface, implementer @@ -82,7 +82,7 @@ tcp_outbound_commands_counter = Counter( # A list of all connected protocols. This allows us to send metrics about the # connections. -connected_connections: "List[BaseReplicationStreamProtocol]" = [] +connected_connections: "list[BaseReplicationStreamProtocol]" = [] logger = logging.getLogger(__name__) @@ -163,7 +163,7 @@ class BaseReplicationStreamProtocol(LineOnlyReceiver): self.conn_id = random_string(5) # To dedupe in case of name clashes. # List of pending commands to send once we've established the connection - self.pending_commands: List[Command] = [] + self.pending_commands: list[Command] = [] # The LoopingCall for sending pings. self._send_ping_loop: Optional[task.LoopingCall] = None diff --git a/synapse/replication/tcp/redis.py b/synapse/replication/tcp/redis.py index caffb2913e..4448117d62 100644 --- a/synapse/replication/tcp/redis.py +++ b/synapse/replication/tcp/redis.py @@ -21,7 +21,7 @@ import logging from inspect import isawaitable -from typing import TYPE_CHECKING, Any, Generic, List, Optional, Type, TypeVar, cast +from typing import TYPE_CHECKING, Any, Generic, Optional, TypeVar, cast import attr from txredisapi import ( @@ -72,7 +72,7 @@ class ConstantProperty(Generic[T, V]): constant: V = attr.ib() - def __get__(self, obj: Optional[T], objtype: Optional[Type[T]] = None) -> V: + def __get__(self, obj: Optional[T], objtype: Optional[type[T]] = None) -> V: return self.constant def __set__(self, obj: Optional[T], value: V) -> None: @@ -111,7 +111,7 @@ class RedisSubscriber(SubscriberProtocol): hs: "HomeServer" synapse_handler: "ReplicationCommandHandler" synapse_stream_prefix: str - synapse_channel_names: List[str] + synapse_channel_names: list[str] synapse_outbound_redis_connection: ConnectionHandler def __init__(self, *args: Any, **kwargs: Any): @@ -296,7 +296,7 @@ class SynapseRedisFactory(RedisFactory): dbid: Optional[int], poolsize: int, isLazy: bool = False, - handler: Type = ConnectionHandler, + handler: type = ConnectionHandler, charset: str = "utf-8", password: Optional[str] = None, replyTimeout: int = 30, @@ -381,7 +381,7 @@ class RedisDirectTcpReplicationClientFactory(SynapseRedisFactory): self, hs: "HomeServer", outbound_redis_connection: ConnectionHandler, - channel_names: List[str], + channel_names: list[str], ): super().__init__( hs, diff --git a/synapse/replication/tcp/resource.py b/synapse/replication/tcp/resource.py index ef72a0a532..8df0a3853f 100644 --- a/synapse/replication/tcp/resource.py +++ b/synapse/replication/tcp/resource.py @@ -22,7 +22,7 @@ import logging import random -from typing import TYPE_CHECKING, List, Optional, Tuple +from typing import TYPE_CHECKING, Optional from prometheus_client import Counter @@ -320,8 +320,8 @@ class ReplicationStreamer: def _batch_updates( - updates: List[Tuple[Token, StreamRow]], -) -> List[Tuple[Optional[Token], StreamRow]]: + updates: list[tuple[Token, StreamRow]], +) -> list[tuple[Optional[Token], StreamRow]]: """Takes a list of updates of form [(token, row)] and sets the token to None for all rows where the next row has the same token. This is used to implement batching. @@ -337,7 +337,7 @@ def _batch_updates( if not updates: return [] - new_updates: List[Tuple[Optional[Token], StreamRow]] = [] + new_updates: list[tuple[Optional[Token], StreamRow]] = [] for i, update in enumerate(updates[:-1]): if update[0] == updates[i + 1][0]: new_updates.append((None, update[1])) diff --git a/synapse/replication/tcp/streams/_base.py b/synapse/replication/tcp/streams/_base.py index ec7e935d6a..d80bdb9b35 100644 --- a/synapse/replication/tcp/streams/_base.py +++ b/synapse/replication/tcp/streams/_base.py @@ -26,9 +26,7 @@ from typing import ( Any, Awaitable, Callable, - List, Optional, - Tuple, TypeVar, ) @@ -56,7 +54,7 @@ Token = int # parsing with Stream.parse_row (which turns it into a `ROW_TYPE`). Normally it's # just a row from a database query, though this is dependent on the stream in question. # -StreamRow = TypeVar("StreamRow", bound=Tuple) +StreamRow = TypeVar("StreamRow", bound=tuple) # The type returned by the update_function of a stream, as well as get_updates(), # get_updates_since, etc. @@ -66,7 +64,7 @@ StreamRow = TypeVar("StreamRow", bound=Tuple) # * `new_last_token` is the new position in stream. # * `limited` is whether there are more updates to fetch. # -StreamUpdateResult = Tuple[List[Tuple[Token, StreamRow]], Token, bool] +StreamUpdateResult = tuple[list[tuple[Token, StreamRow]], Token, bool] # The type of an update_function for a stream # @@ -400,7 +398,7 @@ class TypingStream(Stream): room_id: str # All the users that are 'typing' right now in the specified room. - user_ids: List[str] + user_ids: list[str] NAME = "typing" ROW_TYPE = TypingStreamRow @@ -410,7 +408,7 @@ class TypingStream(Stream): # On the writer, query the typing handler typing_writer_handler = hs.get_typing_writer_handler() update_function: Callable[ - [str, int, int, int], Awaitable[Tuple[List[Tuple[int, Any]], int, bool]] + [str, int, int, int], Awaitable[tuple[list[tuple[int, Any]], int, bool]] ] = typing_writer_handler.get_all_typing_updates self.current_token_function = typing_writer_handler.get_current_token else: @@ -512,7 +510,7 @@ class CachesStream(Stream): """ cache_func: str - keys: Optional[List[Any]] + keys: Optional[list[Any]] invalidation_ts: int NAME = "caches" diff --git a/synapse/replication/tcp/streams/events.py b/synapse/replication/tcp/streams/events.py index 05b55fb033..a6314b0c7d 100644 --- a/synapse/replication/tcp/streams/events.py +++ b/synapse/replication/tcp/streams/events.py @@ -20,7 +20,7 @@ # import heapq from collections import defaultdict -from typing import TYPE_CHECKING, Iterable, Optional, Tuple, Type, TypeVar, cast +from typing import TYPE_CHECKING, Iterable, Optional, TypeVar, cast import attr @@ -93,7 +93,7 @@ class BaseEventsStreamRow: TypeId: str @classmethod - def from_data(cls: Type[T], data: Iterable[Optional[str]]) -> T: + def from_data(cls: type[T], data: Iterable[Optional[str]]) -> T: """Parse the data from the replication stream into a row. By default we just call the constructor with the data list as arguments @@ -136,7 +136,7 @@ class EventsStreamAllStateRow(BaseEventsStreamRow): room_id: str -_EventRows: Tuple[Type[BaseEventsStreamRow], ...] = ( +_EventRows: tuple[type[BaseEventsStreamRow], ...] = ( EventsStreamEventRow, EventsStreamCurrentStateRow, EventsStreamAllStateRow, @@ -237,7 +237,7 @@ class EventsStream(_StreamFromIdGen): # distinguish the row type). At the same time, we can limit the event_rows # to the max stream_id from state_rows. - event_updates: Iterable[Tuple[int, Tuple]] = ( + event_updates: Iterable[tuple[int, tuple]] = ( (stream_id, (EventsStreamEventRow.TypeId, rest)) for (stream_id, *rest) in event_rows if stream_id <= upper_limit @@ -254,20 +254,20 @@ class EventsStream(_StreamFromIdGen): for room_id, stream_ids in state_updates_by_room.items() if len(stream_ids) >= _MAX_STATE_UPDATES_PER_ROOM ] - state_all_updates: Iterable[Tuple[int, Tuple]] = ( + state_all_updates: Iterable[tuple[int, tuple]] = ( (max_stream_id, (EventsStreamAllStateRow.TypeId, (room_id,))) for (max_stream_id, room_id) in state_all_rows ) # Any remaining state updates are sent individually. state_all_rooms = {room_id for _, room_id in state_all_rows} - state_updates: Iterable[Tuple[int, Tuple]] = ( + state_updates: Iterable[tuple[int, tuple]] = ( (stream_id, (EventsStreamCurrentStateRow.TypeId, rest)) for (stream_id, *rest) in state_rows if rest[0] not in state_all_rooms ) - ex_outliers_updates: Iterable[Tuple[int, Tuple]] = ( + ex_outliers_updates: Iterable[tuple[int, tuple]] = ( (stream_id, (EventsStreamEventRow.TypeId, rest)) for (stream_id, *rest) in ex_outliers_rows ) @@ -282,6 +282,6 @@ class EventsStream(_StreamFromIdGen): @classmethod def parse_row(cls, row: StreamRow) -> "EventsStreamRow": - (typ, data) = cast(Tuple[str, Iterable[Optional[str]]], row) + (typ, data) = cast(tuple[str, Iterable[Optional[str]]], row) event_stream_row_data = TypeToRow[typ].from_data(data) return EventsStreamRow(typ, event_stream_row_data) diff --git a/synapse/replication/tcp/streams/federation.py b/synapse/replication/tcp/streams/federation.py index 1c2ffe86b7..c99e720381 100644 --- a/synapse/replication/tcp/streams/federation.py +++ b/synapse/replication/tcp/streams/federation.py @@ -18,7 +18,7 @@ # [This file includes modifications made by New Vector Limited] # # -from typing import TYPE_CHECKING, Any, Awaitable, Callable, List, Tuple +from typing import TYPE_CHECKING, Any, Awaitable, Callable import attr @@ -58,7 +58,7 @@ class FederationStream(Stream): federation_sender.get_current_token ) update_function: Callable[ - [str, int, int, int], Awaitable[Tuple[List[Tuple[int, Any]], int, bool]] + [str, int, int, int], Awaitable[tuple[list[tuple[int, Any]], int, bool]] ] = federation_sender.get_replication_rows elif hs.should_send_federation(): @@ -88,5 +88,5 @@ class FederationStream(Stream): @staticmethod async def _stub_update_function( instance_name: str, from_token: int, upto_token: int, limit: int - ) -> Tuple[list, int, bool]: + ) -> tuple[list, int, bool]: return [], upto_token, False diff --git a/synapse/rest/__init__.py b/synapse/rest/__init__.py index db3bd46542..ea0e47ded4 100644 --- a/synapse/rest/__init__.py +++ b/synapse/rest/__init__.py @@ -19,7 +19,7 @@ # # import logging -from typing import TYPE_CHECKING, Callable, Dict, Iterable, List, Optional, Tuple +from typing import TYPE_CHECKING, Callable, Iterable, Optional from synapse.http.server import HttpServer, JsonResource from synapse.rest import admin @@ -78,7 +78,7 @@ if TYPE_CHECKING: RegisterServletsFunc = Callable[["HomeServer", HttpServer], None] -CLIENT_SERVLET_FUNCTIONS: Tuple[RegisterServletsFunc, ...] = ( +CLIENT_SERVLET_FUNCTIONS: tuple[RegisterServletsFunc, ...] = ( versions.register_servlets, initial_sync.register_servlets, room.register_deprecated_servlets, @@ -128,7 +128,7 @@ CLIENT_SERVLET_FUNCTIONS: Tuple[RegisterServletsFunc, ...] = ( thread_subscriptions.register_servlets, ) -SERVLET_GROUPS: Dict[str, Iterable[RegisterServletsFunc]] = { +SERVLET_GROUPS: dict[str, Iterable[RegisterServletsFunc]] = { "client": CLIENT_SERVLET_FUNCTIONS, } @@ -143,7 +143,7 @@ class ClientRestResource(JsonResource): * etc """ - def __init__(self, hs: "HomeServer", servlet_groups: Optional[List[str]] = None): + def __init__(self, hs: "HomeServer", servlet_groups: Optional[list[str]] = None): JsonResource.__init__(self, hs, canonical_json=False) if hs.config.media.can_load_media_repo: # This import is here to prevent a circular import failure diff --git a/synapse/rest/admin/__init__.py b/synapse/rest/admin/__init__.py index 0386f8a34b..5e75dc4c00 100644 --- a/synapse/rest/admin/__init__.py +++ b/synapse/rest/admin/__init__.py @@ -35,7 +35,7 @@ import logging from http import HTTPStatus -from typing import TYPE_CHECKING, Optional, Tuple +from typing import TYPE_CHECKING, Optional from synapse.api.errors import Codes, NotFoundError, SynapseError from synapse.handlers.pagination import PURGE_HISTORY_ACTION_NAME @@ -137,7 +137,7 @@ class VersionServlet(RestServlet): def __init__(self, hs: "HomeServer"): self.res = {"server_version": SYNAPSE_VERSION} - def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: + def on_GET(self, request: SynapseRequest) -> tuple[int, JsonDict]: return HTTPStatus.OK, self.res @@ -153,7 +153,7 @@ class PurgeHistoryRestServlet(RestServlet): async def on_POST( self, request: SynapseRequest, room_id: str, event_id: Optional[str] - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: await assert_requester_is_admin(self.auth, request) body = parse_json_object_from_request(request, allow_empty_body=True) @@ -237,7 +237,7 @@ class PurgeHistoryStatusRestServlet(RestServlet): async def on_GET( self, request: SynapseRequest, purge_id: str - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: await assert_requester_is_admin(self.auth, request) purge_task = await self.pagination_handler.get_delete_task(purge_id) diff --git a/synapse/rest/admin/background_updates.py b/synapse/rest/admin/background_updates.py index 6fba616d3a..96190c416d 100644 --- a/synapse/rest/admin/background_updates.py +++ b/synapse/rest/admin/background_updates.py @@ -20,7 +20,7 @@ # import logging from http import HTTPStatus -from typing import TYPE_CHECKING, Tuple +from typing import TYPE_CHECKING from synapse.api.errors import SynapseError from synapse.http.servlet import ( @@ -47,7 +47,7 @@ class BackgroundUpdateEnabledRestServlet(RestServlet): self._auth = hs.get_auth() self._data_stores = hs.get_datastores() - async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: + async def on_GET(self, request: SynapseRequest) -> tuple[int, JsonDict]: await assert_requester_is_admin(self._auth, request) # We need to check that all configured databases have updates enabled. @@ -56,7 +56,7 @@ class BackgroundUpdateEnabledRestServlet(RestServlet): return HTTPStatus.OK, {"enabled": enabled} - async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: + async def on_POST(self, request: SynapseRequest) -> tuple[int, JsonDict]: await assert_requester_is_admin(self._auth, request) body = parse_json_object_from_request(request) @@ -88,7 +88,7 @@ class BackgroundUpdateRestServlet(RestServlet): self._auth = hs.get_auth() self._data_stores = hs.get_datastores() - async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: + async def on_GET(self, request: SynapseRequest) -> tuple[int, JsonDict]: await assert_requester_is_admin(self._auth, request) # We need to check that all configured databases have updates enabled. @@ -121,7 +121,7 @@ class BackgroundUpdateStartJobRestServlet(RestServlet): self._auth = hs.get_auth() self._store = hs.get_datastores().main - async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: + async def on_POST(self, request: SynapseRequest) -> tuple[int, JsonDict]: await assert_requester_is_admin(self._auth, request) body = parse_json_object_from_request(request) diff --git a/synapse/rest/admin/devices.py b/synapse/rest/admin/devices.py index c488bce58e..c8e9242ce8 100644 --- a/synapse/rest/admin/devices.py +++ b/synapse/rest/admin/devices.py @@ -20,7 +20,7 @@ # import logging from http import HTTPStatus -from typing import TYPE_CHECKING, Tuple +from typing import TYPE_CHECKING from synapse.api.errors import NotFoundError, SynapseError from synapse.http.servlet import ( @@ -56,7 +56,7 @@ class DeviceRestServlet(RestServlet): async def on_GET( self, request: SynapseRequest, user_id: str, device_id: str - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: await assert_requester_is_admin(self.auth, request) target_user = UserID.from_string(user_id) @@ -76,7 +76,7 @@ class DeviceRestServlet(RestServlet): async def on_DELETE( self, request: SynapseRequest, user_id: str, device_id: str - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: await assert_requester_is_admin(self.auth, request) target_user = UserID.from_string(user_id) @@ -92,7 +92,7 @@ class DeviceRestServlet(RestServlet): async def on_PUT( self, request: SynapseRequest, user_id: str, device_id: str - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: await assert_requester_is_admin(self.auth, request) target_user = UserID.from_string(user_id) @@ -128,7 +128,7 @@ class DevicesRestServlet(RestServlet): async def on_GET( self, request: SynapseRequest, user_id: str - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: await assert_requester_is_admin(self.auth, request) target_user = UserID.from_string(user_id) @@ -157,7 +157,7 @@ class DevicesRestServlet(RestServlet): async def on_POST( self, request: SynapseRequest, user_id: str - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: """Creates a new device for the user.""" await assert_requester_is_admin(self.auth, request) @@ -201,7 +201,7 @@ class DeleteDevicesRestServlet(RestServlet): async def on_POST( self, request: SynapseRequest, user_id: str - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: await assert_requester_is_admin(self.auth, request) target_user = UserID.from_string(user_id) diff --git a/synapse/rest/admin/event_reports.py b/synapse/rest/admin/event_reports.py index ff1abc0697..5e8f85de7e 100644 --- a/synapse/rest/admin/event_reports.py +++ b/synapse/rest/admin/event_reports.py @@ -21,7 +21,7 @@ import logging from http import HTTPStatus -from typing import TYPE_CHECKING, Tuple +from typing import TYPE_CHECKING from synapse.api.constants import Direction from synapse.api.errors import Codes, NotFoundError, SynapseError @@ -65,7 +65,7 @@ class EventReportsRestServlet(RestServlet): self._auth = hs.get_auth() self._store = hs.get_datastores().main - async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: + async def on_GET(self, request: SynapseRequest) -> tuple[int, JsonDict]: await assert_requester_is_admin(self._auth, request) start = parse_integer(request, "from", default=0) @@ -123,7 +123,7 @@ class EventReportDetailRestServlet(RestServlet): async def on_GET( self, request: SynapseRequest, report_id: str - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: await assert_requester_is_admin(self._auth, request) message = ( @@ -149,7 +149,7 @@ class EventReportDetailRestServlet(RestServlet): async def on_DELETE( self, request: SynapseRequest, report_id: str - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: await assert_requester_is_admin(self._auth, request) message = ( diff --git a/synapse/rest/admin/events.py b/synapse/rest/admin/events.py index 61b347f8f4..1c39d5caf3 100644 --- a/synapse/rest/admin/events.py +++ b/synapse/rest/admin/events.py @@ -1,5 +1,5 @@ from http import HTTPStatus -from typing import TYPE_CHECKING, Tuple +from typing import TYPE_CHECKING from synapse.api.errors import NotFoundError from synapse.events.utils import ( @@ -43,7 +43,7 @@ class EventRestServlet(RestServlet): async def on_GET( self, request: SynapseRequest, event_id: str - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: requester = await self._auth.get_user_by_req(request) await assert_user_is_admin(self._auth, requester) diff --git a/synapse/rest/admin/experimental_features.py b/synapse/rest/admin/experimental_features.py index 3d3015cef7..abdb937793 100644 --- a/synapse/rest/admin/experimental_features.py +++ b/synapse/rest/admin/experimental_features.py @@ -22,7 +22,7 @@ from enum import Enum from http import HTTPStatus -from typing import TYPE_CHECKING, Dict, Tuple +from typing import TYPE_CHECKING from synapse.api.errors import SynapseError from synapse.http.servlet import RestServlet, parse_json_object_from_request @@ -74,7 +74,7 @@ class ExperimentalFeaturesRestServlet(RestServlet): self, request: SynapseRequest, user_id: str, - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: """ List which features are enabled for a given user """ @@ -99,7 +99,7 @@ class ExperimentalFeaturesRestServlet(RestServlet): async def on_PUT( self, request: SynapseRequest, user_id: str - ) -> Tuple[HTTPStatus, Dict]: + ) -> tuple[HTTPStatus, dict]: """ Enable or disable the provided features for the requester """ diff --git a/synapse/rest/admin/federation.py b/synapse/rest/admin/federation.py index d85a04b825..e958ef9747 100644 --- a/synapse/rest/admin/federation.py +++ b/synapse/rest/admin/federation.py @@ -20,7 +20,7 @@ # import logging from http import HTTPStatus -from typing import TYPE_CHECKING, Tuple +from typing import TYPE_CHECKING from synapse.api.constants import Direction from synapse.api.errors import NotFoundError, SynapseError @@ -58,7 +58,7 @@ class ListDestinationsRestServlet(RestServlet): self._auth = hs.get_auth() self._store = hs.get_datastores().main - async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: + async def on_GET(self, request: SynapseRequest) -> tuple[int, JsonDict]: await assert_requester_is_admin(self._auth, request) start = parse_integer(request, "from", default=0) @@ -115,7 +115,7 @@ class DestinationRestServlet(RestServlet): async def on_GET( self, request: SynapseRequest, destination: str - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: await assert_requester_is_admin(self._auth, request) if not await self._store.is_destination_known(destination): @@ -175,7 +175,7 @@ class DestinationMembershipRestServlet(RestServlet): async def on_GET( self, request: SynapseRequest, destination: str - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: await assert_requester_is_admin(self._auth, request) if not await self._store.is_destination_known(destination): @@ -224,7 +224,7 @@ class DestinationResetConnectionRestServlet(RestServlet): async def on_POST( self, request: SynapseRequest, destination: str - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: await assert_requester_is_admin(self._auth, request) if not await self._store.is_destination_known(destination): diff --git a/synapse/rest/admin/media.py b/synapse/rest/admin/media.py index 8732c0bf9d..cfdb314b1a 100644 --- a/synapse/rest/admin/media.py +++ b/synapse/rest/admin/media.py @@ -20,7 +20,7 @@ # import logging from http import HTTPStatus -from typing import TYPE_CHECKING, Optional, Tuple +from typing import TYPE_CHECKING, Optional import attr @@ -67,7 +67,7 @@ class QueryMediaById(RestServlet): async def on_GET( self, request: SynapseRequest, server_name: str, media_id: str - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: requester = await self.auth.get_user_by_req(request) await assert_user_is_admin(self.auth, requester) @@ -134,7 +134,7 @@ class QuarantineMediaInRoom(RestServlet): async def on_POST( self, request: SynapseRequest, room_id: str - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: requester = await self.auth.get_user_by_req(request) await assert_user_is_admin(self.auth, requester) @@ -161,7 +161,7 @@ class QuarantineMediaByUser(RestServlet): async def on_POST( self, request: SynapseRequest, user_id: str - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: requester = await self.auth.get_user_by_req(request) await assert_user_is_admin(self.auth, requester) @@ -190,7 +190,7 @@ class QuarantineMediaByID(RestServlet): async def on_POST( self, request: SynapseRequest, server_name: str, media_id: str - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: requester = await self.auth.get_user_by_req(request) await assert_user_is_admin(self.auth, requester) @@ -219,7 +219,7 @@ class UnquarantineMediaByID(RestServlet): async def on_POST( self, request: SynapseRequest, server_name: str, media_id: str - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: await assert_requester_is_admin(self.auth, request) logger.info("Remove from quarantine media by ID: %s/%s", server_name, media_id) @@ -241,7 +241,7 @@ class ProtectMediaByID(RestServlet): async def on_POST( self, request: SynapseRequest, media_id: str - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: await assert_requester_is_admin(self.auth, request) logger.info("Protecting local media by ID: %s", media_id) @@ -263,7 +263,7 @@ class UnprotectMediaByID(RestServlet): async def on_POST( self, request: SynapseRequest, media_id: str - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: await assert_requester_is_admin(self.auth, request) logger.info("Unprotecting local media by ID: %s", media_id) @@ -285,7 +285,7 @@ class ListMediaInRoom(RestServlet): async def on_GET( self, request: SynapseRequest, room_id: str - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: await assert_requester_is_admin(self.auth, request) local_mxcs, remote_mxcs = await self.store.get_media_mxcs_in_room(room_id) @@ -300,7 +300,7 @@ class PurgeMediaCacheRestServlet(RestServlet): self.media_repository = hs.get_media_repository() self.auth = hs.get_auth() - async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: + async def on_POST(self, request: SynapseRequest) -> tuple[int, JsonDict]: await assert_requester_is_admin(self.auth, request) before_ts = parse_integer(request, "before_ts", required=True) @@ -338,7 +338,7 @@ class DeleteMediaByID(RestServlet): async def on_DELETE( self, request: SynapseRequest, server_name: str, media_id: str - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: await assert_requester_is_admin(self.auth, request) if not self._is_mine_server_name(server_name): @@ -375,7 +375,7 @@ class DeleteMediaByDateSize(RestServlet): async def on_POST( self, request: SynapseRequest, server_name: Optional[str] = None - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: await assert_requester_is_admin(self.auth, request) before_ts = parse_integer(request, "before_ts", required=True) @@ -433,7 +433,7 @@ class UserMediaRestServlet(RestServlet): async def on_GET( self, request: SynapseRequest, user_id: str - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: # This will always be set by the time Twisted calls us. assert request.args is not None @@ -477,7 +477,7 @@ class UserMediaRestServlet(RestServlet): async def on_DELETE( self, request: SynapseRequest, user_id: str - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: # This will always be set by the time Twisted calls us. assert request.args is not None diff --git a/synapse/rest/admin/registration_tokens.py b/synapse/rest/admin/registration_tokens.py index bec2331590..ea266403a0 100644 --- a/synapse/rest/admin/registration_tokens.py +++ b/synapse/rest/admin/registration_tokens.py @@ -22,7 +22,7 @@ import logging import string from http import HTTPStatus -from typing import TYPE_CHECKING, Tuple +from typing import TYPE_CHECKING from synapse.api.errors import Codes, NotFoundError, SynapseError from synapse.http.servlet import ( @@ -80,7 +80,7 @@ class ListRegistrationTokensRestServlet(RestServlet): self.auth = hs.get_auth() self.store = hs.get_datastores().main - async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: + async def on_GET(self, request: SynapseRequest) -> tuple[int, JsonDict]: await assert_requester_is_admin(self.auth, request) valid = parse_boolean(request, "valid") token_list = await self.store.get_registration_tokens(valid) @@ -133,7 +133,7 @@ class NewRegistrationTokenRestServlet(RestServlet): self.allowed_chars = string.ascii_letters + string.digits + "._~-" self.allowed_chars_set = set(self.allowed_chars) - async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: + async def on_POST(self, request: SynapseRequest) -> tuple[int, JsonDict]: await assert_requester_is_admin(self.auth, request) body = parse_json_object_from_request(request) @@ -282,7 +282,7 @@ class RegistrationTokenRestServlet(RestServlet): self.auth = hs.get_auth() self.store = hs.get_datastores().main - async def on_GET(self, request: SynapseRequest, token: str) -> Tuple[int, JsonDict]: + async def on_GET(self, request: SynapseRequest, token: str) -> tuple[int, JsonDict]: """Retrieve a registration token.""" await assert_requester_is_admin(self.auth, request) token_info = await self.store.get_one_registration_token(token) @@ -293,7 +293,7 @@ class RegistrationTokenRestServlet(RestServlet): return HTTPStatus.OK, token_info - async def on_PUT(self, request: SynapseRequest, token: str) -> Tuple[int, JsonDict]: + async def on_PUT(self, request: SynapseRequest, token: str) -> tuple[int, JsonDict]: """Update a registration token.""" await assert_requester_is_admin(self.auth, request) body = parse_json_object_from_request(request) @@ -348,7 +348,7 @@ class RegistrationTokenRestServlet(RestServlet): async def on_DELETE( self, request: SynapseRequest, token: str - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: """Delete a registration token.""" await assert_requester_is_admin(self.auth, request) diff --git a/synapse/rest/admin/rooms.py b/synapse/rest/admin/rooms.py index 5bed89c2c4..216af29f9b 100644 --- a/synapse/rest/admin/rooms.py +++ b/synapse/rest/admin/rooms.py @@ -20,7 +20,7 @@ # import logging from http import HTTPStatus -from typing import TYPE_CHECKING, List, Optional, Tuple, cast +from typing import TYPE_CHECKING, Optional, cast import attr from immutabledict import immutabledict @@ -88,7 +88,7 @@ class RoomRestV2Servlet(RestServlet): async def on_DELETE( self, request: SynapseRequest, room_id: str - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: requester = await self._auth.get_user_by_req(request) await assert_user_is_admin(self._auth, requester) @@ -167,7 +167,7 @@ class DeleteRoomStatusByRoomIdRestServlet(RestServlet): async def on_GET( self, request: SynapseRequest, room_id: str - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: await assert_requester_is_admin(self._auth, request) if not RoomID.is_valid(room_id): @@ -198,7 +198,7 @@ class DeleteRoomStatusByDeleteIdRestServlet(RestServlet): async def on_GET( self, request: SynapseRequest, delete_id: str - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: await assert_requester_is_admin(self._auth, request) delete_task = await self._pagination_handler.get_delete_task(delete_id) @@ -224,7 +224,7 @@ class ListRoomRestServlet(RestServlet): self.auth = hs.get_auth() self.admin_handler = hs.get_admin_handler() - async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: + async def on_GET(self, request: SynapseRequest) -> tuple[int, JsonDict]: await assert_requester_is_admin(self.auth, request) # Extract query parameters @@ -319,7 +319,7 @@ class RoomRestServlet(RestServlet): async def on_GET( self, request: SynapseRequest, room_id: str - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: await assert_requester_is_admin(self.auth, request) ret = await self.store.get_room_with_stats(room_id) @@ -337,7 +337,7 @@ class RoomRestServlet(RestServlet): async def on_DELETE( self, request: SynapseRequest, room_id: str - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: return await self._delete_room( request, room_id, @@ -353,7 +353,7 @@ class RoomRestServlet(RestServlet): auth: "Auth", room_shutdown_handler: "RoomShutdownHandler", pagination_handler: "PaginationHandler", - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: requester = await auth.get_user_by_req(request) await assert_user_is_admin(auth, requester) @@ -429,7 +429,7 @@ class RoomMembersRestServlet(RestServlet): async def on_GET( self, request: SynapseRequest, room_id: str - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: await assert_requester_is_admin(self.auth, request) room = await self.store.get_room(room_id) @@ -458,7 +458,7 @@ class RoomStateRestServlet(RestServlet): async def on_GET( self, request: SynapseRequest, room_id: str - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: await assert_requester_is_admin(self.auth, request) room = await self.store.get_room(room_id) @@ -498,7 +498,7 @@ class JoinRoomAliasServlet(ResolveRoomIdMixin, RestServlet): async def on_POST( self, request: SynapseRequest, room_identifier: str - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: # This will always be set by the time Twisted calls us. assert request.args is not None @@ -521,7 +521,7 @@ class JoinRoomAliasServlet(ResolveRoomIdMixin, RestServlet): # Get the room ID from the identifier. try: - remote_room_hosts: Optional[List[str]] = [ + remote_room_hosts: Optional[list[str]] = [ x.decode("ascii") for x in request.args[b"server_name"] ] except Exception: @@ -591,7 +591,7 @@ class MakeRoomAdminRestServlet(ResolveRoomIdMixin, RestServlet): async def on_POST( self, request: SynapseRequest, room_identifier: str - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: requester = await self.auth.get_user_by_req(request) await assert_user_is_admin(self.auth, requester) content = parse_json_object_from_request(request, allow_empty_body=True) @@ -756,7 +756,7 @@ class ForwardExtremitiesRestServlet(ResolveRoomIdMixin, RestServlet): async def on_DELETE( self, request: SynapseRequest, room_identifier: str - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: await assert_requester_is_admin(self.auth, request) room_id, _ = await self.resolve_room_id(room_identifier) @@ -766,7 +766,7 @@ class ForwardExtremitiesRestServlet(ResolveRoomIdMixin, RestServlet): async def on_GET( self, request: SynapseRequest, room_identifier: str - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: await assert_requester_is_admin(self.auth, request) room_id, _ = await self.resolve_room_id(room_identifier) @@ -805,7 +805,7 @@ class RoomEventContextServlet(RestServlet): async def on_GET( self, request: SynapseRequest, room_id: str, event_id: str - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: requester = await self.auth.get_user_by_req(request, allow_guest=False) await assert_user_is_admin(self.auth, requester) @@ -871,7 +871,7 @@ class BlockRoomRestServlet(RestServlet): async def on_GET( self, request: SynapseRequest, room_id: str - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: await assert_requester_is_admin(self._auth, request) if not RoomID.is_valid(room_id): @@ -891,7 +891,7 @@ class BlockRoomRestServlet(RestServlet): async def on_PUT( self, request: SynapseRequest, room_id: str - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: requester = await self._auth.get_user_by_req(request) await assert_user_is_admin(self._auth, requester) @@ -935,7 +935,7 @@ class RoomMessagesRestServlet(RestServlet): async def on_GET( self, request: SynapseRequest, room_id: str - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: requester = await self._auth.get_user_by_req(request) await assert_user_is_admin(self._auth, requester) @@ -997,7 +997,7 @@ class RoomTimestampToEventRestServlet(RestServlet): async def on_GET( self, request: SynapseRequest, room_id: str - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: requester = await self._auth.get_user_by_req(request) await assert_user_is_admin(self._auth, requester) diff --git a/synapse/rest/admin/scheduled_tasks.py b/synapse/rest/admin/scheduled_tasks.py index 2ae13021b9..41c402b424 100644 --- a/synapse/rest/admin/scheduled_tasks.py +++ b/synapse/rest/admin/scheduled_tasks.py @@ -13,7 +13,7 @@ # # # -from typing import TYPE_CHECKING, Tuple +from typing import TYPE_CHECKING from synapse.http.servlet import RestServlet, parse_integer, parse_string from synapse.http.site import SynapseRequest @@ -35,7 +35,7 @@ class ScheduledTasksRestServlet(RestServlet): self._auth = hs.get_auth() self._store = hs.get_datastores().main - async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: + async def on_GET(self, request: SynapseRequest) -> tuple[int, JsonDict]: await assert_requester_is_admin(self._auth, request) # extract query params diff --git a/synapse/rest/admin/server_notice_servlet.py b/synapse/rest/admin/server_notice_servlet.py index f3150e88d7..0be04c0f90 100644 --- a/synapse/rest/admin/server_notice_servlet.py +++ b/synapse/rest/admin/server_notice_servlet.py @@ -18,7 +18,7 @@ # # from http import HTTPStatus -from typing import TYPE_CHECKING, Optional, Tuple +from typing import TYPE_CHECKING, Optional from synapse.api.constants import EventTypes from synapse.api.errors import NotFoundError, SynapseError @@ -81,7 +81,7 @@ class SendServerNoticeServlet(RestServlet): request: SynapseRequest, requester: Requester, txn_id: Optional[str], - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: await assert_user_is_admin(self.auth, requester) body = parse_json_object_from_request(request) assert_params_in_dict(body, ("user_id", "content")) @@ -118,13 +118,13 @@ class SendServerNoticeServlet(RestServlet): async def on_POST( self, request: SynapseRequest, - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: requester = await self.auth.get_user_by_req(request) return await self._do(request, requester, None) async def on_PUT( self, request: SynapseRequest, txn_id: str - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: requester = await self.auth.get_user_by_req(request) set_tag("txn_id", txn_id) return await self.txns.fetch_or_execute_request( diff --git a/synapse/rest/admin/statistics.py b/synapse/rest/admin/statistics.py index 0adc5b7005..3de1d4e9bd 100644 --- a/synapse/rest/admin/statistics.py +++ b/synapse/rest/admin/statistics.py @@ -21,7 +21,7 @@ import logging from http import HTTPStatus -from typing import TYPE_CHECKING, Tuple +from typing import TYPE_CHECKING from synapse.api.constants import Direction from synapse.api.errors import Codes, SynapseError @@ -48,7 +48,7 @@ class UserMediaStatisticsRestServlet(RestServlet): self.auth = hs.get_auth() self.store = hs.get_datastores().main - async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: + async def on_GET(self, request: SynapseRequest) -> tuple[int, JsonDict]: await assert_requester_is_admin(self.auth, request) order_by = parse_string( @@ -119,7 +119,7 @@ class LargestRoomsStatistics(RestServlet): self.auth = hs.get_auth() self.stats_controller = hs.get_storage_controllers().stats - async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: + async def on_GET(self, request: SynapseRequest) -> tuple[int, JsonDict]: await assert_requester_is_admin(self.auth, request) room_sizes = await self.stats_controller.get_room_db_size_estimate() diff --git a/synapse/rest/admin/username_available.py b/synapse/rest/admin/username_available.py index 2d642f7d6b..fb0cee42da 100644 --- a/synapse/rest/admin/username_available.py +++ b/synapse/rest/admin/username_available.py @@ -20,7 +20,7 @@ # import logging from http import HTTPStatus -from typing import TYPE_CHECKING, Tuple +from typing import TYPE_CHECKING from synapse.http.servlet import RestServlet, parse_string from synapse.http.site import SynapseRequest @@ -50,7 +50,7 @@ class UsernameAvailableRestServlet(RestServlet): self.auth = hs.get_auth() self.registration_handler = hs.get_registration_handler() - async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: + async def on_GET(self, request: SynapseRequest) -> tuple[int, JsonDict]: await assert_requester_is_admin(self.auth, request) username = parse_string(request, "username", required=True) diff --git a/synapse/rest/admin/users.py b/synapse/rest/admin/users.py index 25a38dc4ac..e29b0d36e0 100644 --- a/synapse/rest/admin/users.py +++ b/synapse/rest/admin/users.py @@ -23,7 +23,7 @@ import hmac import logging import secrets from http import HTTPStatus -from typing import TYPE_CHECKING, Dict, List, Optional, Tuple, Union +from typing import TYPE_CHECKING, Optional, Union import attr @@ -113,7 +113,7 @@ class UsersRestServletV2(RestServlet): hs.config.mas.enabled or hs.config.experimental.msc3861.enabled ) - async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: + async def on_GET(self, request: SynapseRequest) -> tuple[int, JsonDict]: await assert_requester_is_admin(self.auth, request) start = parse_integer(request, "from", default=0) @@ -164,7 +164,7 @@ class UsersRestServletV2(RestServlet): direction = parse_enum(request, "dir", Direction, default=Direction.FORWARDS) # twisted.web.server.Request.args is incorrectly defined as Optional[Any] - args: Dict[bytes, List[bytes]] = request.args # type: ignore + args: dict[bytes, list[bytes]] = request.args # type: ignore not_user_types = parse_strings_from_args(args, "not_user_type") users, total = await self.store.get_users_paginate( @@ -256,7 +256,7 @@ class UserRestServletV2(RestServlet): async def on_GET( self, request: SynapseRequest, user_id: str - ) -> Tuple[int, JsonMapping]: + ) -> tuple[int, JsonMapping]: await assert_requester_is_admin(self.auth, request) target_user = UserID.from_string(user_id) @@ -271,7 +271,7 @@ class UserRestServletV2(RestServlet): async def on_PUT( self, request: SynapseRequest, user_id: str - ) -> Tuple[int, JsonMapping]: + ) -> tuple[int, JsonMapping]: requester = await self.auth.get_user_by_req(request) await assert_user_is_admin(self.auth, requester) @@ -349,14 +349,14 @@ class UserRestServletV2(RestServlet): "'approved' parameter is not of type boolean", ) - # convert List[Dict[str, str]] into List[Tuple[str, str]] + # convert list[dict[str, str]] into list[tuple[str, str]] if external_ids is not None: new_external_ids = [ (external_id["auth_provider"], external_id["external_id"]) for external_id in external_ids ] - # convert List[Dict[str, str]] into Set[Tuple[str, str]] + # convert list[dict[str, str]] into set[tuple[str, str]] if threepids is not None: new_threepids = { (threepid["medium"], threepid["address"]) for threepid in threepids @@ -545,7 +545,7 @@ class UserRegisterServlet(RestServlet): def __init__(self, hs: "HomeServer"): self.auth_handler = hs.get_auth_handler() self.reactor = hs.get_reactor() - self.nonces: Dict[str, int] = {} + self.nonces: dict[str, int] = {} self.hs = hs self._all_user_types = hs.config.user_types.all_user_types @@ -559,7 +559,7 @@ class UserRegisterServlet(RestServlet): if now - v > self.NONCE_TIMEOUT: del self.nonces[k] - def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: + def on_GET(self, request: SynapseRequest) -> tuple[int, JsonDict]: """ Generate a new nonce. """ @@ -569,7 +569,7 @@ class UserRegisterServlet(RestServlet): self.nonces[nonce] = int(self.reactor.seconds()) return HTTPStatus.OK, {"nonce": nonce} - async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: + async def on_POST(self, request: SynapseRequest) -> tuple[int, JsonDict]: self._clear_old_nonces() if not self.hs.config.registration.registration_shared_secret: @@ -730,7 +730,7 @@ class WhoisRestServlet(RestServlet): async def on_GET( self, request: SynapseRequest, user_id: str - ) -> Tuple[int, JsonMapping]: + ) -> tuple[int, JsonMapping]: target_user = UserID.from_string(user_id) requester = await self.auth.get_user_by_req(request) @@ -756,7 +756,7 @@ class DeactivateAccountRestServlet(RestServlet): async def on_POST( self, request: SynapseRequest, target_user_id: str - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: requester = await self.auth.get_user_by_req(request) await assert_user_is_admin(self.auth, requester) @@ -801,7 +801,7 @@ class SuspendAccountRestServlet(RestServlet): async def on_PUT( self, request: SynapseRequest, target_user_id: str - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: requester = await self.auth.get_user_by_req(request) await assert_user_is_admin(self.auth, requester) @@ -828,7 +828,7 @@ class AccountValidityRenewServlet(RestServlet): ) self.auth = hs.get_auth() - async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: + async def on_POST(self, request: SynapseRequest) -> tuple[int, JsonDict]: await assert_requester_is_admin(self.auth, request) if self.account_validity_module_callbacks.on_legacy_admin_request_callback: @@ -878,7 +878,7 @@ class ResetPasswordRestServlet(RestServlet): async def on_POST( self, request: SynapseRequest, target_user_id: str - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: """Post request to allow an administrator reset password for a user. This needs user to have administrator access in Synapse. """ @@ -920,7 +920,7 @@ class SearchUsersRestServlet(RestServlet): async def on_GET( self, request: SynapseRequest, target_user_id: str - ) -> Tuple[int, Optional[List[JsonDict]]]: + ) -> tuple[int, Optional[list[JsonDict]]]: """Get request to search user table for specific users according to search term. This needs user to have a administrator access in Synapse. @@ -989,7 +989,7 @@ class UserAdminServlet(RestServlet): async def on_GET( self, request: SynapseRequest, user_id: str - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: await assert_requester_is_admin(self.auth, request) target_user = UserID.from_string(user_id) @@ -1006,7 +1006,7 @@ class UserAdminServlet(RestServlet): async def on_PUT( self, request: SynapseRequest, user_id: str - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: requester = await self.auth.get_user_by_req(request) await assert_user_is_admin(self.auth, requester) auth_user = requester.user @@ -1047,7 +1047,7 @@ class UserMembershipRestServlet(RestServlet): async def on_GET( self, request: SynapseRequest, user_id: str - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: await assert_requester_is_admin(self.auth, request) room_ids = await self.store.get_rooms_for_user(user_id) @@ -1079,7 +1079,7 @@ class PushersRestServlet(RestServlet): async def on_GET( self, request: SynapseRequest, user_id: str - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: await assert_requester_is_admin(self.auth, request) if not self.is_mine(UserID.from_string(user_id)): @@ -1122,7 +1122,7 @@ class UserTokenRestServlet(RestServlet): async def on_POST( self, request: SynapseRequest, user_id: str - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: requester = await self.auth.get_user_by_req(request) await assert_user_is_admin(self.auth, requester) auth_user = requester.user @@ -1190,7 +1190,7 @@ class ShadowBanRestServlet(RestServlet): async def on_POST( self, request: SynapseRequest, user_id: str - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: await assert_requester_is_admin(self.auth, request) if not self.is_mine_id(user_id): @@ -1204,7 +1204,7 @@ class ShadowBanRestServlet(RestServlet): async def on_DELETE( self, request: SynapseRequest, user_id: str - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: await assert_requester_is_admin(self.auth, request) if not self.is_mine_id(user_id): @@ -1242,7 +1242,7 @@ class RateLimitRestServlet(RestServlet): async def on_GET( self, request: SynapseRequest, user_id: str - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: await assert_requester_is_admin(self.auth, request) if not self.is_mine_id(user_id): @@ -1273,7 +1273,7 @@ class RateLimitRestServlet(RestServlet): async def on_POST( self, request: SynapseRequest, user_id: str - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: await assert_requester_is_admin(self.auth, request) if not self.is_mine_id(user_id): @@ -1321,7 +1321,7 @@ class RateLimitRestServlet(RestServlet): async def on_DELETE( self, request: SynapseRequest, user_id: str - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: await assert_requester_is_admin(self.auth, request) if not self.is_mine_id(user_id): @@ -1349,7 +1349,7 @@ class AccountDataRestServlet(RestServlet): async def on_GET( self, request: SynapseRequest, user_id: str - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: await assert_requester_is_admin(self._auth, request) if not self._is_mine_id(user_id): @@ -1390,7 +1390,7 @@ class UserReplaceMasterCrossSigningKeyRestServlet(RestServlet): self, request: SynapseRequest, user_id: str, - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: await assert_requester_is_admin(self._auth, request) if user_id is None: @@ -1424,7 +1424,7 @@ class UserByExternalId(RestServlet): request: SynapseRequest, provider: str, external_id: str, - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: await assert_requester_is_admin(self._auth, request) user_id = await self._store.get_user_by_external_id(provider, external_id) @@ -1449,7 +1449,7 @@ class UserByThreePid(RestServlet): request: SynapseRequest, medium: str, address: str, - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: await assert_requester_is_admin(self._auth, request) user_id = await self._store.get_user_id_by_threepid(medium, address) @@ -1475,14 +1475,14 @@ class RedactUser(RestServlet): self.admin_handler = hs.get_admin_handler() class PostBody(RequestBodyModel): - rooms: List[StrictStr] + rooms: list[StrictStr] reason: Optional[StrictStr] limit: Optional[StrictInt] use_admin: Optional[StrictBool] async def on_POST( self, request: SynapseRequest, user_id: str - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: requester = await self._auth.get_user_by_req(request) await assert_user_is_admin(self._auth, requester) @@ -1531,7 +1531,7 @@ class RedactUserStatus(RestServlet): async def on_GET( self, request: SynapseRequest, redact_id: str - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: await assert_requester_is_admin(self._auth, request) task = await self.admin_handler.get_redact_task(redact_id) @@ -1574,7 +1574,7 @@ class UserInvitesCount(RestServlet): async def on_GET( self, request: SynapseRequest, user_id: str - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: await assert_requester_is_admin(self._auth, request) from_ts = parse_integer(request, "from_ts", required=True) @@ -1599,7 +1599,7 @@ class UserJoinedRoomCount(RestServlet): async def on_GET( self, request: SynapseRequest, user_id: str - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: await assert_requester_is_admin(self._auth, request) from_ts = parse_integer(request, "from_ts", required=True) diff --git a/synapse/rest/client/_base.py b/synapse/rest/client/_base.py index 6cf37869d8..fad7234718 100644 --- a/synapse/rest/client/_base.py +++ b/synapse/rest/client/_base.py @@ -23,7 +23,7 @@ import logging import re -from typing import Any, Awaitable, Callable, Iterable, Pattern, Tuple, TypeVar, cast +from typing import Any, Awaitable, Callable, Iterable, Pattern, TypeVar, cast from synapse.api.errors import InteractiveAuthIncompleteError from synapse.api.urls import CLIENT_API_PREFIX @@ -86,7 +86,7 @@ def set_timeline_upper_limit(filter_json: JsonDict, filter_timeline_limit: int) ) -C = TypeVar("C", bound=Callable[..., Awaitable[Tuple[int, JsonDict]]]) +C = TypeVar("C", bound=Callable[..., Awaitable[tuple[int, JsonDict]]]) def interactive_auth_handler(orig: C) -> C: @@ -104,7 +104,7 @@ def interactive_auth_handler(orig: C) -> C: await self.auth_handler.check_auth """ - async def wrapped(*args: Any, **kwargs: Any) -> Tuple[int, JsonDict]: + async def wrapped(*args: Any, **kwargs: Any) -> tuple[int, JsonDict]: try: return await orig(*args, **kwargs) except InteractiveAuthIncompleteError as e: diff --git a/synapse/rest/client/account.py b/synapse/rest/client/account.py index d9f0c169e8..8f2f54f750 100644 --- a/synapse/rest/client/account.py +++ b/synapse/rest/client/account.py @@ -21,7 +21,7 @@ # import logging import random -from typing import TYPE_CHECKING, List, Literal, Optional, Tuple +from typing import TYPE_CHECKING, Literal, Optional from urllib.parse import urlparse import attr @@ -89,7 +89,7 @@ class EmailPasswordRequestTokenRestServlet(RestServlet): template_text=self.config.email.email_password_reset_template_text, ) - async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: + async def on_POST(self, request: SynapseRequest) -> tuple[int, JsonDict]: if not self.config.email.can_verify_email: logger.warning( "User password resets have been disabled due to lack of email config" @@ -169,7 +169,7 @@ class PasswordRestServlet(RestServlet): new_password: Optional[constr(max_length=512, strict=True)] = None @interactive_auth_handler - async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: + async def on_POST(self, request: SynapseRequest) -> tuple[int, JsonDict]: body = parse_and_validate_json_object_from_request(request, self.PostBody) # we do basic sanity checks here because the auth layer will store these @@ -296,7 +296,7 @@ class DeactivateAccountRestServlet(RestServlet): erase: StrictBool = False @interactive_auth_handler - async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: + async def on_POST(self, request: SynapseRequest) -> tuple[int, JsonDict]: body = parse_and_validate_json_object_from_request(request, self.PostBody) requester = await self.auth.get_user_by_req(request) @@ -341,7 +341,7 @@ class EmailThreepidRequestTokenRestServlet(RestServlet): template_text=self.config.email.email_add_threepid_template_text, ) - async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: + async def on_POST(self, request: SynapseRequest) -> tuple[int, JsonDict]: if not self.hs.config.registration.enable_3pid_changes: raise SynapseError( 400, "3PID changes are disabled on this server", Codes.FORBIDDEN @@ -418,7 +418,7 @@ class MsisdnThreepidRequestTokenRestServlet(RestServlet): self.store = self.hs.get_datastores().main self.identity_handler = hs.get_identity_handler() - async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: + async def on_POST(self, request: SynapseRequest) -> tuple[int, JsonDict]: body = parse_and_validate_json_object_from_request( request, MsisdnRequestTokenBody ) @@ -567,7 +567,7 @@ class AddThreepidMsisdnSubmitTokenServlet(RestServlet): self.store = hs.get_datastores().main self.identity_handler = hs.get_identity_handler() - async def on_POST(self, request: Request) -> Tuple[int, JsonDict]: + async def on_POST(self, request: Request) -> tuple[int, JsonDict]: if not self.config.registration.account_threepid_delegate_msisdn: raise SynapseError( 400, @@ -601,7 +601,7 @@ class ThreepidRestServlet(RestServlet): self.auth_handler = hs.get_auth_handler() self.datastore = self.hs.get_datastores().main - async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: + async def on_GET(self, request: SynapseRequest) -> tuple[int, JsonDict]: requester = await self.auth.get_user_by_req(request) threepids = await self.datastore.user_get_threepids(requester.user.to_string()) @@ -612,7 +612,7 @@ class ThreepidRestServlet(RestServlet): # the endpoint is deprecated. (If you really want to, you could do this by reusing # ThreePidBindRestServelet.PostBody with an `alias_generator` to handle # `threePidCreds` versus `three_pid_creds`. - async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: + async def on_POST(self, request: SynapseRequest) -> tuple[int, JsonDict]: if self.hs.config.mas.enabled or self.hs.config.experimental.msc3861.enabled: raise NotFoundError(errcode=Codes.UNRECOGNIZED) @@ -669,7 +669,7 @@ class ThreepidAddRestServlet(RestServlet): sid: StrictStr @interactive_auth_handler - async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: + async def on_POST(self, request: SynapseRequest) -> tuple[int, JsonDict]: if not self.hs.config.registration.enable_3pid_changes: raise SynapseError( 400, "3PID changes are disabled on this server", Codes.FORBIDDEN @@ -718,7 +718,7 @@ class ThreepidBindRestServlet(RestServlet): id_server: StrictStr sid: StrictStr - async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: + async def on_POST(self, request: SynapseRequest) -> tuple[int, JsonDict]: body = parse_and_validate_json_object_from_request(request, self.PostBody) requester = await self.auth.get_user_by_req(request) @@ -746,7 +746,7 @@ class ThreepidUnbindRestServlet(RestServlet): id_server: Optional[StrictStr] = None medium: Literal["email", "msisdn"] - async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: + async def on_POST(self, request: SynapseRequest) -> tuple[int, JsonDict]: """Unbind the given 3pid from a specific identity server, or identity servers that are known to have this 3pid bound """ @@ -775,7 +775,7 @@ class ThreepidDeleteRestServlet(RestServlet): id_server: Optional[StrictStr] = None medium: Literal["email", "msisdn"] - async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: + async def on_POST(self, request: SynapseRequest) -> tuple[int, JsonDict]: if not self.hs.config.registration.enable_3pid_changes: raise SynapseError( 400, "3PID changes are disabled on this server", Codes.FORBIDDEN @@ -859,7 +859,7 @@ class WhoamiRestServlet(RestServlet): super().__init__() self.auth = hs.get_auth() - async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: + async def on_GET(self, request: SynapseRequest) -> tuple[int, JsonDict]: requester = await self.auth.get_user_by_req(request, allow_guest=True) response = { @@ -889,9 +889,9 @@ class AccountStatusRestServlet(RestServlet): class PostBody(RequestBodyModel): # TODO: we could validate that each user id is an mxid here, and/or parse it # as a UserID - user_ids: List[StrictStr] + user_ids: list[StrictStr] - async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: + async def on_POST(self, request: SynapseRequest) -> tuple[int, JsonDict]: await self._auth.get_user_by_req(request) body = parse_and_validate_json_object_from_request(request, self.PostBody) diff --git a/synapse/rest/client/account_data.py b/synapse/rest/client/account_data.py index 734c9e992f..0800c0f5b8 100644 --- a/synapse/rest/client/account_data.py +++ b/synapse/rest/client/account_data.py @@ -20,7 +20,7 @@ # import logging -from typing import TYPE_CHECKING, Optional, Tuple +from typing import TYPE_CHECKING, Optional from synapse.api.constants import AccountDataTypes, ReceiptTypes from synapse.api.errors import AuthError, Codes, NotFoundError, SynapseError @@ -75,7 +75,7 @@ class AccountDataServlet(RestServlet): async def on_PUT( self, request: SynapseRequest, user_id: str, account_data_type: str - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: requester = await self.auth.get_user_by_req(request) if user_id != requester.user.to_string(): raise AuthError(403, "Cannot add account data for other users.") @@ -101,7 +101,7 @@ class AccountDataServlet(RestServlet): async def on_GET( self, request: SynapseRequest, user_id: str, account_data_type: str - ) -> Tuple[int, JsonMapping]: + ) -> tuple[int, JsonMapping]: requester = await self.auth.get_user_by_req(request) if user_id != requester.user.to_string(): raise AuthError(403, "Cannot get account data for other users.") @@ -152,7 +152,7 @@ class UnstableAccountDataServlet(RestServlet): request: SynapseRequest, user_id: str, account_data_type: str, - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: requester = await self.auth.get_user_by_req(request) if user_id != requester.user.to_string(): raise AuthError(403, "Cannot delete account data for other users.") @@ -191,7 +191,7 @@ class RoomAccountDataServlet(RestServlet): user_id: str, room_id: str, account_data_type: str, - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: requester = await self.auth.get_user_by_req(request) if user_id != requester.user.to_string(): raise AuthError(403, "Cannot add account data for other users.") @@ -230,7 +230,7 @@ class RoomAccountDataServlet(RestServlet): user_id: str, room_id: str, account_data_type: str, - ) -> Tuple[int, JsonMapping]: + ) -> tuple[int, JsonMapping]: requester = await self.auth.get_user_by_req(request) if user_id != requester.user.to_string(): raise AuthError(403, "Cannot get account data for other users.") @@ -288,7 +288,7 @@ class UnstableRoomAccountDataServlet(RestServlet): user_id: str, room_id: str, account_data_type: str, - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: requester = await self.auth.get_user_by_req(request) if user_id != requester.user.to_string(): raise AuthError(403, "Cannot delete account data for other users.") diff --git a/synapse/rest/client/account_validity.py b/synapse/rest/client/account_validity.py index ec7836b647..1c60539054 100644 --- a/synapse/rest/client/account_validity.py +++ b/synapse/rest/client/account_validity.py @@ -19,7 +19,7 @@ # import logging -from typing import TYPE_CHECKING, Tuple +from typing import TYPE_CHECKING from twisted.web.server import Request @@ -90,7 +90,7 @@ class AccountValiditySendMailServlet(RestServlet): hs.config.account_validity.account_validity_renew_by_email_enabled ) - async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: + async def on_POST(self, request: SynapseRequest) -> tuple[int, JsonDict]: requester = await self.auth.get_user_by_req(request, allow_expired=True) user_id = requester.user.to_string() await self.account_activity_handler.send_renewal_email_to_user(user_id) diff --git a/synapse/rest/client/appservice_ping.py b/synapse/rest/client/appservice_ping.py index 1f9662a95a..7e2ac15783 100644 --- a/synapse/rest/client/appservice_ping.py +++ b/synapse/rest/client/appservice_ping.py @@ -22,7 +22,7 @@ import logging import time from http import HTTPStatus -from typing import TYPE_CHECKING, Any, Dict, Tuple +from typing import TYPE_CHECKING, Any from synapse.api.errors import ( CodeMessageException, @@ -58,7 +58,7 @@ class AppservicePingRestServlet(RestServlet): async def on_POST( self, request: SynapseRequest, appservice_id: str - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: requester = await self.auth.get_user_by_req(request) if not requester.app_service: @@ -97,7 +97,7 @@ class AppservicePingRestServlet(RestServlet): Codes.AS_PING_CONNECTION_TIMEOUT, ) except CodeMessageException as e: - additional_fields: Dict[str, Any] = {"status": e.code} + additional_fields: dict[str, Any] = {"status": e.code} if isinstance(e, HttpResponseException): try: additional_fields["body"] = e.response.decode("utf-8") diff --git a/synapse/rest/client/auth_metadata.py b/synapse/rest/client/auth_metadata.py index 4b5d997478..702f550906 100644 --- a/synapse/rest/client/auth_metadata.py +++ b/synapse/rest/client/auth_metadata.py @@ -13,7 +13,7 @@ # limitations under the License. import logging import typing -from typing import Tuple, cast +from typing import cast from synapse.api.auth.mas import MasDelegatedAuth from synapse.api.errors import Codes, SynapseError @@ -48,7 +48,7 @@ class AuthIssuerServlet(RestServlet): self._config = hs.config self._auth = hs.get_auth() - async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: + async def on_GET(self, request: SynapseRequest) -> tuple[int, JsonDict]: if self._config.mas.enabled: assert isinstance(self._auth, MasDelegatedAuth) return 200, {"issuer": await self._auth.issuer()} @@ -93,7 +93,7 @@ class AuthMetadataServlet(RestServlet): self._config = hs.config self._auth = hs.get_auth() - async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: + async def on_GET(self, request: SynapseRequest) -> tuple[int, JsonDict]: if self._config.mas.enabled: assert isinstance(self._auth, MasDelegatedAuth) return 200, await self._auth.auth_metadata() diff --git a/synapse/rest/client/capabilities.py b/synapse/rest/client/capabilities.py index a279db1cc5..baff999ab0 100644 --- a/synapse/rest/client/capabilities.py +++ b/synapse/rest/client/capabilities.py @@ -19,7 +19,7 @@ # import logging from http import HTTPStatus -from typing import TYPE_CHECKING, Tuple +from typing import TYPE_CHECKING from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, MSC3244_CAPABILITIES from synapse.http.server import HttpServer @@ -48,7 +48,7 @@ class CapabilitiesRestServlet(RestServlet): self.auth = hs.get_auth() self.auth_handler = hs.get_auth_handler() - async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: + async def on_GET(self, request: SynapseRequest) -> tuple[int, JsonDict]: await self.auth.get_user_by_req(request, allow_guest=True) change_password = self.auth_handler.can_change_password() diff --git a/synapse/rest/client/delayed_events.py b/synapse/rest/client/delayed_events.py index 2dd5a60b2b..80abacbc9d 100644 --- a/synapse/rest/client/delayed_events.py +++ b/synapse/rest/client/delayed_events.py @@ -17,7 +17,7 @@ import logging from enum import Enum from http import HTTPStatus -from typing import TYPE_CHECKING, Tuple +from typing import TYPE_CHECKING from synapse.api.errors import Codes, SynapseError from synapse.http.server import HttpServer @@ -52,7 +52,7 @@ class UpdateDelayedEventServlet(RestServlet): async def on_POST( self, request: SynapseRequest, delay_id: str - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: requester = await self.auth.get_user_by_req(request) body = parse_json_object_from_request(request) @@ -95,7 +95,7 @@ class DelayedEventsServlet(RestServlet): self.auth = hs.get_auth() self.delayed_events_handler = hs.get_delayed_events_handler() - async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: + async def on_GET(self, request: SynapseRequest) -> tuple[int, JsonDict]: requester = await self.auth.get_user_by_req(request) # TODO: Support Pagination stream API ("from" query parameter) delayed_events = await self.delayed_events_handler.get_all_for_user(requester) diff --git a/synapse/rest/client/devices.py b/synapse/rest/client/devices.py index 37bc9ae513..092406b994 100644 --- a/synapse/rest/client/devices.py +++ b/synapse/rest/client/devices.py @@ -22,7 +22,7 @@ import logging from http import HTTPStatus -from typing import TYPE_CHECKING, List, Optional, Tuple +from typing import TYPE_CHECKING, Optional from synapse._pydantic_compat import Extra, StrictStr from synapse.api import errors @@ -56,7 +56,7 @@ class DevicesRestServlet(RestServlet): self.device_handler = hs.get_device_handler() self._msc3852_enabled = hs.config.experimental.msc3852_enabled - async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: + async def on_GET(self, request: SynapseRequest) -> tuple[int, JsonDict]: requester = await self.auth.get_user_by_req(request, allow_guest=True) devices = await self.device_handler.get_devices_by_user( requester.user.to_string() @@ -95,10 +95,10 @@ class DeleteDevicesRestServlet(RestServlet): class PostBody(RequestBodyModel): auth: Optional[AuthenticationData] - devices: List[StrictStr] + devices: list[StrictStr] @interactive_auth_handler - async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: + async def on_POST(self, request: SynapseRequest) -> tuple[int, JsonDict]: requester = await self.auth.get_user_by_req(request) try: @@ -150,7 +150,7 @@ class DeviceRestServlet(RestServlet): async def on_GET( self, request: SynapseRequest, device_id: str - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: requester = await self.auth.get_user_by_req(request, allow_guest=True) device = await self.device_handler.get_device( requester.user.to_string(), device_id @@ -177,7 +177,7 @@ class DeviceRestServlet(RestServlet): @interactive_auth_handler async def on_DELETE( self, request: SynapseRequest, device_id: str - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: requester = await self.auth.get_user_by_req(request) try: @@ -221,7 +221,7 @@ class DeviceRestServlet(RestServlet): async def on_PUT( self, request: SynapseRequest, device_id: str - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: requester = await self.auth.get_user_by_req(request, allow_guest=True) body = parse_and_validate_json_object_from_request(request, self.PutBody) @@ -302,7 +302,7 @@ class DehydratedDeviceServlet(RestServlet): handler = hs.get_device_handler() self.device_handler = handler - async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: + async def on_GET(self, request: SynapseRequest) -> tuple[int, JsonDict]: requester = await self.auth.get_user_by_req(request) dehydrated_device = await self.device_handler.get_dehydrated_device( requester.user.to_string() @@ -318,7 +318,7 @@ class DehydratedDeviceServlet(RestServlet): device_data: DehydratedDeviceDataModel initial_device_display_name: Optional[StrictStr] - async def on_PUT(self, request: SynapseRequest) -> Tuple[int, JsonDict]: + async def on_PUT(self, request: SynapseRequest) -> tuple[int, JsonDict]: submission = parse_and_validate_json_object_from_request(request, self.PutBody) requester = await self.auth.get_user_by_req(request) @@ -364,7 +364,7 @@ class ClaimDehydratedDeviceServlet(RestServlet): class PostBody(RequestBodyModel): device_id: StrictStr - async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: + async def on_POST(self, request: SynapseRequest) -> tuple[int, JsonDict]: requester = await self.auth.get_user_by_req(request) submission = parse_and_validate_json_object_from_request(request, self.PostBody) @@ -395,7 +395,7 @@ class DehydratedDeviceEventsServlet(RestServlet): async def on_POST( self, request: SynapseRequest, device_id: str - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: requester = await self.auth.get_user_by_req(request) next_batch = parse_and_validate_json_object_from_request( @@ -501,7 +501,7 @@ class DehydratedDeviceV2Servlet(RestServlet): self.e2e_keys_handler = hs.get_e2e_keys_handler() self.device_handler = handler - async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: + async def on_GET(self, request: SynapseRequest) -> tuple[int, JsonDict]: requester = await self.auth.get_user_by_req(request) dehydrated_device = await self.device_handler.get_dehydrated_device( @@ -515,7 +515,7 @@ class DehydratedDeviceV2Servlet(RestServlet): else: raise errors.NotFoundError("No dehydrated device available") - async def on_DELETE(self, request: SynapseRequest) -> Tuple[int, JsonDict]: + async def on_DELETE(self, request: SynapseRequest) -> tuple[int, JsonDict]: requester = await self.auth.get_user_by_req(request) dehydrated_device = await self.device_handler.get_dehydrated_device( @@ -543,7 +543,7 @@ class DehydratedDeviceV2Servlet(RestServlet): class Config: extra = Extra.allow - async def on_PUT(self, request: SynapseRequest) -> Tuple[int, JsonDict]: + async def on_PUT(self, request: SynapseRequest) -> tuple[int, JsonDict]: submission = parse_and_validate_json_object_from_request(request, self.PutBody) requester = await self.auth.get_user_by_req(request) user_id = requester.user.to_string() diff --git a/synapse/rest/client/directory.py b/synapse/rest/client/directory.py index 479f489623..eccada67be 100644 --- a/synapse/rest/client/directory.py +++ b/synapse/rest/client/directory.py @@ -20,7 +20,7 @@ # import logging -from typing import TYPE_CHECKING, List, Literal, Optional, Tuple +from typing import TYPE_CHECKING, Literal, Optional from twisted.web.server import Request @@ -59,7 +59,7 @@ class ClientDirectoryServer(RestServlet): self.directory_handler = hs.get_directory_handler() self.auth = hs.get_auth() - async def on_GET(self, request: Request, room_alias: str) -> Tuple[int, JsonDict]: + async def on_GET(self, request: Request, room_alias: str) -> tuple[int, JsonDict]: if not RoomAlias.is_valid(room_alias): raise SynapseError(400, "Room alias invalid", errcode=Codes.INVALID_PARAM) room_alias_obj = RoomAlias.from_string(room_alias) @@ -72,11 +72,11 @@ class ClientDirectoryServer(RestServlet): # TODO: get Pydantic to validate that this is a valid room id? room_id: StrictStr # `servers` is unspecced - servers: Optional[List[StrictStr]] = None + servers: Optional[list[StrictStr]] = None async def on_PUT( self, request: SynapseRequest, room_alias: str - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: if not RoomAlias.is_valid(room_alias): raise SynapseError(400, "Room alias invalid", errcode=Codes.INVALID_PARAM) room_alias_obj = RoomAlias.from_string(room_alias) @@ -103,7 +103,7 @@ class ClientDirectoryServer(RestServlet): async def on_DELETE( self, request: SynapseRequest, room_alias: str - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: if not RoomAlias.is_valid(room_alias): raise SynapseError(400, "Room alias invalid", errcode=Codes.INVALID_PARAM) room_alias_obj = RoomAlias.from_string(room_alias) @@ -141,7 +141,7 @@ class ClientDirectoryListServer(RestServlet): self.directory_handler = hs.get_directory_handler() self.auth = hs.get_auth() - async def on_GET(self, request: Request, room_id: str) -> Tuple[int, JsonDict]: + async def on_GET(self, request: Request, room_id: str) -> tuple[int, JsonDict]: room = await self.store.get_room(room_id) if room is None: raise NotFoundError("Unknown room") @@ -153,7 +153,7 @@ class ClientDirectoryListServer(RestServlet): async def on_PUT( self, request: SynapseRequest, room_id: str - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: requester = await self.auth.get_user_by_req(request) content = parse_and_validate_json_object_from_request(request, self.PutBody) @@ -181,13 +181,13 @@ class ClientAppserviceDirectoryListServer(RestServlet): async def on_PUT( self, request: SynapseRequest, network_id: str, room_id: str - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: content = parse_and_validate_json_object_from_request(request, self.PutBody) return await self._edit(request, network_id, room_id, content.visibility) async def on_DELETE( self, request: SynapseRequest, network_id: str, room_id: str - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: return await self._edit(request, network_id, room_id, "private") async def _edit( @@ -196,7 +196,7 @@ class ClientAppserviceDirectoryListServer(RestServlet): network_id: str, room_id: str, visibility: Literal["public", "private"], - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: requester = await self.auth.get_user_by_req(request) if not requester.app_service: raise AuthError( diff --git a/synapse/rest/client/events.py b/synapse/rest/client/events.py index ad23cc76ce..082bacade6 100644 --- a/synapse/rest/client/events.py +++ b/synapse/rest/client/events.py @@ -22,7 +22,7 @@ """This module contains REST servlets to do with event streaming, /events.""" import logging -from typing import TYPE_CHECKING, Dict, List, Tuple, Union +from typing import TYPE_CHECKING, Union from synapse.api.errors import SynapseError from synapse.events.utils import SerializeEventConfig @@ -51,9 +51,9 @@ class EventStreamRestServlet(RestServlet): self.auth = hs.get_auth() self.store = hs.get_datastores().main - async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: + async def on_GET(self, request: SynapseRequest) -> tuple[int, JsonDict]: requester = await self.auth.get_user_by_req(request, allow_guest=True) - args: Dict[bytes, List[bytes]] = request.args # type: ignore + args: dict[bytes, list[bytes]] = request.args # type: ignore if requester.is_guest: if b"room_id" not in args: raise SynapseError(400, "Guest users must specify room_id param") @@ -96,7 +96,7 @@ class EventRestServlet(RestServlet): async def on_GET( self, request: SynapseRequest, event_id: str - ) -> Tuple[int, Union[str, JsonDict]]: + ) -> tuple[int, Union[str, JsonDict]]: requester = await self.auth.get_user_by_req(request) event = await self.event_handler.get_event(requester.user, None, event_id) diff --git a/synapse/rest/client/filter.py b/synapse/rest/client/filter.py index f1e881975f..cfe82e1473 100644 --- a/synapse/rest/client/filter.py +++ b/synapse/rest/client/filter.py @@ -20,7 +20,7 @@ # import logging -from typing import TYPE_CHECKING, Tuple +from typing import TYPE_CHECKING from synapse.api.errors import AuthError, NotFoundError, StoreError, SynapseError from synapse.http.server import HttpServer @@ -48,7 +48,7 @@ class GetFilterRestServlet(RestServlet): async def on_GET( self, request: SynapseRequest, user_id: str, filter_id: str - ) -> Tuple[int, JsonMapping]: + ) -> tuple[int, JsonMapping]: target_user = UserID.from_string(user_id) requester = await self.auth.get_user_by_req(request) @@ -87,7 +87,7 @@ class CreateFilterRestServlet(RestServlet): async def on_POST( self, request: SynapseRequest, user_id: str - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: target_user = UserID.from_string(user_id) requester = await self.auth.get_user_by_req(request) diff --git a/synapse/rest/client/initial_sync.py b/synapse/rest/client/initial_sync.py index a2c50f5d58..c20e007c5b 100644 --- a/synapse/rest/client/initial_sync.py +++ b/synapse/rest/client/initial_sync.py @@ -19,7 +19,7 @@ # # -from typing import TYPE_CHECKING, Dict, List, Tuple +from typing import TYPE_CHECKING from synapse.http.server import HttpServer from synapse.http.servlet import RestServlet, parse_boolean @@ -43,9 +43,9 @@ class InitialSyncRestServlet(RestServlet): self.auth = hs.get_auth() self.store = hs.get_datastores().main - async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: + async def on_GET(self, request: SynapseRequest) -> tuple[int, JsonDict]: requester = await self.auth.get_user_by_req(request) - args: Dict[bytes, List[bytes]] = request.args # type: ignore + args: dict[bytes, list[bytes]] = request.args # type: ignore as_client_event = b"raw" not in args pagination_config = await PaginationConfig.from_request( self.store, request, default_limit=10 diff --git a/synapse/rest/client/keys.py b/synapse/rest/client/keys.py index f8974e34a8..1f71359d55 100644 --- a/synapse/rest/client/keys.py +++ b/synapse/rest/client/keys.py @@ -24,7 +24,7 @@ import logging import re from collections import Counter from http import HTTPStatus -from typing import TYPE_CHECKING, Any, Dict, List, Mapping, Optional, Tuple, Union +from typing import TYPE_CHECKING, Any, Mapping, Optional, Union from typing_extensions import Self @@ -129,7 +129,7 @@ class KeyUploadServlet(RestServlet): """ class DeviceKeys(RequestBodyModel): - algorithms: List[StrictStr] + algorithms: list[StrictStr] """The encryption algorithms supported by this device.""" device_id: StrictStr @@ -225,7 +225,7 @@ class KeyUploadServlet(RestServlet): async def on_POST( self, request: SynapseRequest, device_id: Optional[str] - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: requester = await self.auth.get_user_by_req(request, allow_guest=True) user_id = requester.user.to_string() @@ -343,7 +343,7 @@ class KeyQueryServlet(RestServlet): self.e2e_keys_handler = hs.get_e2e_keys_handler() @cancellable - async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: + async def on_POST(self, request: SynapseRequest) -> tuple[int, JsonDict]: requester = await self.auth.get_user_by_req(request, allow_guest=True) user_id = requester.user.to_string() device_id = requester.device_id @@ -388,7 +388,7 @@ class KeyChangesServlet(RestServlet): self.store = hs.get_datastores().main @cancellable - async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: + async def on_GET(self, request: SynapseRequest) -> tuple[int, JsonDict]: requester = await self.auth.get_user_by_req(request, allow_guest=True) from_token_string = parse_string(request, "from", required=True) @@ -442,13 +442,13 @@ class OneTimeKeyServlet(RestServlet): self.auth = hs.get_auth() self.e2e_keys_handler = hs.get_e2e_keys_handler() - async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: + async def on_POST(self, request: SynapseRequest) -> tuple[int, JsonDict]: requester = await self.auth.get_user_by_req(request, allow_guest=True) timeout = parse_integer(request, "timeout", 10 * 1000) body = parse_json_object_from_request(request) # Generate a count for each algorithm, which is hard-coded to 1. - query: Dict[str, Dict[str, Dict[str, int]]] = {} + query: dict[str, dict[str, dict[str, int]]] = {} for user_id, one_time_keys in body.get("one_time_keys", {}).items(): for device_id, algorithm in one_time_keys.items(): query.setdefault(user_id, {})[device_id] = {algorithm: 1} @@ -490,13 +490,13 @@ class UnstableOneTimeKeyServlet(RestServlet): self.auth = hs.get_auth() self.e2e_keys_handler = hs.get_e2e_keys_handler() - async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: + async def on_POST(self, request: SynapseRequest) -> tuple[int, JsonDict]: requester = await self.auth.get_user_by_req(request, allow_guest=True) timeout = parse_integer(request, "timeout", 10 * 1000) body = parse_json_object_from_request(request) # Generate a count for each algorithm. - query: Dict[str, Dict[str, Dict[str, int]]] = {} + query: dict[str, dict[str, dict[str, int]]] = {} for user_id, one_time_keys in body.get("one_time_keys", {}).items(): for device_id, algorithms in one_time_keys.items(): query.setdefault(user_id, {})[device_id] = Counter(algorithms) @@ -526,7 +526,7 @@ class SigningKeyUploadServlet(RestServlet): self.auth_handler = hs.get_auth_handler() @interactive_auth_handler - async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: + async def on_POST(self, request: SynapseRequest) -> tuple[int, JsonDict]: requester = await self.auth.get_user_by_req(request) user_id = requester.user.to_string() body = parse_json_object_from_request(request) @@ -659,7 +659,7 @@ class SignaturesUploadServlet(RestServlet): self.auth = hs.get_auth() self.e2e_keys_handler = hs.get_e2e_keys_handler() - async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: + async def on_POST(self, request: SynapseRequest) -> tuple[int, JsonDict]: requester = await self.auth.get_user_by_req(request, allow_guest=True) user_id = requester.user.to_string() body = parse_json_object_from_request(request) diff --git a/synapse/rest/client/knock.py b/synapse/rest/client/knock.py index d7a17e1b35..5e96079b66 100644 --- a/synapse/rest/client/knock.py +++ b/synapse/rest/client/knock.py @@ -20,7 +20,7 @@ # # import logging -from typing import TYPE_CHECKING, Dict, List, Tuple +from typing import TYPE_CHECKING from synapse.api.constants import Membership from synapse.api.errors import SynapseError @@ -58,7 +58,7 @@ class KnockRoomAliasServlet(RestServlet): self, request: SynapseRequest, room_identifier: str, - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: requester = await self.auth.get_user_by_req(request) content = parse_json_object_from_request(request) @@ -70,7 +70,7 @@ class KnockRoomAliasServlet(RestServlet): room_id = room_identifier # twisted.web.server.Request.args is incorrectly defined as Optional[Any] - args: Dict[bytes, List[bytes]] = request.args # type: ignore + args: dict[bytes, list[bytes]] = request.args # type: ignore # Prefer via over server_name (deprecated with MSC4156) remote_room_hosts = parse_strings_from_args(args, "via", required=False) if remote_room_hosts is None: diff --git a/synapse/rest/client/login.py b/synapse/rest/client/login.py index 921232a3ea..bba6944982 100644 --- a/synapse/rest/client/login.py +++ b/synapse/rest/client/login.py @@ -26,10 +26,7 @@ from typing import ( Any, Awaitable, Callable, - Dict, - List, Optional, - Tuple, TypedDict, Union, ) @@ -75,7 +72,7 @@ class LoginResponse(TypedDict, total=False): expires_in_ms: Optional[int] refresh_token: Optional[str] device_id: Optional[str] - well_known: Optional[Dict[str, Any]] + well_known: Optional[dict[str, Any]] class LoginRestServlet(RestServlet): @@ -142,8 +139,8 @@ class LoginRestServlet(RestServlet): # counters are initialised for the auth_provider_ids. _load_sso_handlers(hs) - def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: - flows: List[JsonDict] = [] + def on_GET(self, request: SynapseRequest) -> tuple[int, JsonDict]: + flows: list[JsonDict] = [] if self.jwt_enabled: flows.append({"type": LoginRestServlet.JWT_TYPE}) @@ -178,7 +175,7 @@ class LoginRestServlet(RestServlet): # fall back to the fallback API if they don't understand one of the # login flow types returned. if support_login_token_flow: - tokenTypeFlow: Dict[str, Any] = {"type": LoginRestServlet.TOKEN_TYPE} + tokenTypeFlow: dict[str, Any] = {"type": LoginRestServlet.TOKEN_TYPE} # If the login token flow is enabled advertise the get_login_token flag. if self._get_login_token_enabled: tokenTypeFlow["get_login_token"] = True @@ -190,7 +187,7 @@ class LoginRestServlet(RestServlet): return 200, {"flows": flows} - async def on_POST(self, request: SynapseRequest) -> Tuple[int, LoginResponse]: + async def on_POST(self, request: SynapseRequest) -> tuple[int, LoginResponse]: login_submission = parse_json_object_from_request(request) # Check to see if the client requested a refresh token. @@ -602,7 +599,7 @@ class RefreshTokenServlet(RestServlet): ) self.refresh_token_lifetime = hs.config.registration.refresh_token_lifetime - async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: + async def on_POST(self, request: SynapseRequest) -> tuple[int, JsonDict]: refresh_submission = parse_json_object_from_request(request) assert_params_in_dict(refresh_submission, ["refresh_token"]) @@ -626,7 +623,7 @@ class RefreshTokenServlet(RestServlet): token, access_valid_until_ms, refresh_valid_until_ms ) - response: Dict[str, Union[str, int]] = { + response: dict[str, Union[str, int]] = { "access_token": access_token, "refresh_token": refresh_token, } @@ -684,7 +681,7 @@ class SsoRedirectServlet(RestServlet): finish_request(request) return - args: Dict[bytes, List[bytes]] = request.args # type: ignore + args: dict[bytes, list[bytes]] = request.args # type: ignore client_redirect_url = parse_bytes_from_args(args, "redirectUrl", required=True) sso_url = await self._sso_handler.handle_redirect_request( request, diff --git a/synapse/rest/client/login_token_request.py b/synapse/rest/client/login_token_request.py index a053db8e55..f455e9c0b7 100644 --- a/synapse/rest/client/login_token_request.py +++ b/synapse/rest/client/login_token_request.py @@ -20,7 +20,7 @@ # import logging -from typing import TYPE_CHECKING, Tuple +from typing import TYPE_CHECKING from synapse.api.ratelimiting import Ratelimiter from synapse.config.ratelimiting import RatelimitSettings @@ -89,7 +89,7 @@ class LoginTokenRequestServlet(RestServlet): ) @interactive_auth_handler - async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: + async def on_POST(self, request: SynapseRequest) -> tuple[int, JsonDict]: requester = await self.auth.get_user_by_req(request) body = parse_json_object_from_request(request) diff --git a/synapse/rest/client/logout.py b/synapse/rest/client/logout.py index 39c62b9e26..d804552a4a 100644 --- a/synapse/rest/client/logout.py +++ b/synapse/rest/client/logout.py @@ -20,7 +20,7 @@ # import logging -from typing import TYPE_CHECKING, Tuple +from typing import TYPE_CHECKING from synapse.http.server import HttpServer from synapse.http.servlet import RestServlet @@ -43,7 +43,7 @@ class LogoutRestServlet(RestServlet): self._auth_handler = hs.get_auth_handler() self._device_handler = hs.get_device_handler() - async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: + async def on_POST(self, request: SynapseRequest) -> tuple[int, JsonDict]: requester = await self.auth.get_user_by_req( request, allow_expired=True, allow_locked=True ) @@ -70,7 +70,7 @@ class LogoutAllRestServlet(RestServlet): self._auth_handler = hs.get_auth_handler() self._device_handler = hs.get_device_handler() - async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: + async def on_POST(self, request: SynapseRequest) -> tuple[int, JsonDict]: requester = await self.auth.get_user_by_req( request, allow_expired=True, allow_locked=True ) diff --git a/synapse/rest/client/matrixrtc.py b/synapse/rest/client/matrixrtc.py index afe4d4fa83..22f8498f2f 100644 --- a/synapse/rest/client/matrixrtc.py +++ b/synapse/rest/client/matrixrtc.py @@ -15,7 +15,7 @@ # # -from typing import TYPE_CHECKING, Tuple +from typing import TYPE_CHECKING from synapse.http.server import HttpServer from synapse.http.servlet import RestServlet @@ -37,7 +37,7 @@ class MatrixRTCRestServlet(RestServlet): self._auth = hs.get_auth() self._transports = hs.config.matrix_rtc.transports - async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: + async def on_GET(self, request: SynapseRequest) -> tuple[int, JsonDict]: # Require authentication for this endpoint. await self._auth.get_user_by_req(request) diff --git a/synapse/rest/client/mutual_rooms.py b/synapse/rest/client/mutual_rooms.py index abb1fab0a3..7d0570d0cb 100644 --- a/synapse/rest/client/mutual_rooms.py +++ b/synapse/rest/client/mutual_rooms.py @@ -20,7 +20,7 @@ # import logging from http import HTTPStatus -from typing import TYPE_CHECKING, Dict, List, Tuple +from typing import TYPE_CHECKING from synapse.api.errors import Codes, SynapseError from synapse.http.server import HttpServer @@ -51,9 +51,9 @@ class UserMutualRoomsServlet(RestServlet): self.auth = hs.get_auth() self.store = hs.get_datastores().main - async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: + async def on_GET(self, request: SynapseRequest) -> tuple[int, JsonDict]: # twisted.web.server.Request.args is incorrectly defined as Optional[Any] - args: Dict[bytes, List[bytes]] = request.args # type: ignore + args: dict[bytes, list[bytes]] = request.args # type: ignore user_ids = parse_strings_from_args(args, "user_id", required=True) diff --git a/synapse/rest/client/notifications.py b/synapse/rest/client/notifications.py index 168ce50d3f..2420e9fffb 100644 --- a/synapse/rest/client/notifications.py +++ b/synapse/rest/client/notifications.py @@ -20,7 +20,7 @@ # import logging -from typing import TYPE_CHECKING, Tuple +from typing import TYPE_CHECKING from synapse.api.constants import ReceiptTypes from synapse.events.utils import ( @@ -53,7 +53,7 @@ class NotificationsServlet(RestServlet): self.clock = hs.get_clock() self._event_serializer = hs.get_event_client_serializer() - async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: + async def on_GET(self, request: SynapseRequest) -> tuple[int, JsonDict]: requester = await self.auth.get_user_by_req(request) user_id = requester.user.to_string() diff --git a/synapse/rest/client/openid.py b/synapse/rest/client/openid.py index a2c2faa199..e624a48ce7 100644 --- a/synapse/rest/client/openid.py +++ b/synapse/rest/client/openid.py @@ -20,7 +20,7 @@ # import logging -from typing import TYPE_CHECKING, Tuple +from typing import TYPE_CHECKING from synapse.api.errors import AuthError from synapse.http.server import HttpServer @@ -80,7 +80,7 @@ class IdTokenServlet(RestServlet): async def on_POST( self, request: SynapseRequest, user_id: str - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: requester = await self.auth.get_user_by_req(request) if user_id != requester.user.to_string(): raise AuthError(403, "Cannot request tokens for other users.") diff --git a/synapse/rest/client/password_policy.py b/synapse/rest/client/password_policy.py index 7ec6dd3443..314c409fc2 100644 --- a/synapse/rest/client/password_policy.py +++ b/synapse/rest/client/password_policy.py @@ -20,7 +20,7 @@ # import logging -from typing import TYPE_CHECKING, Tuple +from typing import TYPE_CHECKING from twisted.web.server import Request @@ -46,7 +46,7 @@ class PasswordPolicyServlet(RestServlet): self.policy = hs.config.auth.password_policy self.enabled = hs.config.auth.password_policy_enabled - def on_GET(self, request: Request) -> Tuple[int, JsonDict]: + def on_GET(self, request: Request) -> tuple[int, JsonDict]: if not self.enabled or not self.policy: return 200, {} diff --git a/synapse/rest/client/presence.py b/synapse/rest/client/presence.py index 104d54cd89..de3ffdaa0b 100644 --- a/synapse/rest/client/presence.py +++ b/synapse/rest/client/presence.py @@ -22,7 +22,7 @@ """This module contains REST servlets to do with presence: /presence/""" import logging -from typing import TYPE_CHECKING, Tuple +from typing import TYPE_CHECKING from synapse.api.errors import AuthError, Codes, LimitExceededError, SynapseError from synapse.api.ratelimiting import Ratelimiter @@ -60,7 +60,7 @@ class PresenceStatusRestServlet(RestServlet): async def on_GET( self, request: SynapseRequest, user_id: str - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: requester = await self.auth.get_user_by_req(request) user = UserID.from_string(user_id) @@ -84,7 +84,7 @@ class PresenceStatusRestServlet(RestServlet): async def on_PUT( self, request: SynapseRequest, user_id: str - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: requester = await self.auth.get_user_by_req(request) user = UserID.from_string(user_id) diff --git a/synapse/rest/client/profile.py b/synapse/rest/client/profile.py index 8bc532c811..7f3128cb61 100644 --- a/synapse/rest/client/profile.py +++ b/synapse/rest/client/profile.py @@ -23,7 +23,7 @@ import re from http import HTTPStatus -from typing import TYPE_CHECKING, Tuple +from typing import TYPE_CHECKING from synapse.api.constants import ProfileFields from synapse.api.errors import Codes, SynapseError @@ -69,7 +69,7 @@ class ProfileRestServlet(RestServlet): async def on_GET( self, request: SynapseRequest, user_id: str - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: requester_user = None if self.hs.config.server.require_auth_for_profile_requests: @@ -118,7 +118,7 @@ class ProfileFieldRestServlet(RestServlet): async def on_GET( self, request: SynapseRequest, user_id: str, field_name: str - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: requester_user = None if self.hs.config.server.require_auth_for_profile_requests: @@ -156,7 +156,7 @@ class ProfileFieldRestServlet(RestServlet): async def on_PUT( self, request: SynapseRequest, user_id: str, field_name: str - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: if not UserID.is_valid(user_id): raise SynapseError( HTTPStatus.BAD_REQUEST, "Invalid user id", Codes.INVALID_PARAM @@ -221,7 +221,7 @@ class ProfileFieldRestServlet(RestServlet): async def on_DELETE( self, request: SynapseRequest, user_id: str, field_name: str - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: if not UserID.is_valid(user_id): raise SynapseError( HTTPStatus.BAD_REQUEST, "Invalid user id", Codes.INVALID_PARAM diff --git a/synapse/rest/client/push_rule.py b/synapse/rest/client/push_rule.py index c1939a9b57..0a9b83af95 100644 --- a/synapse/rest/client/push_rule.py +++ b/synapse/rest/client/push_rule.py @@ -20,7 +20,7 @@ # from http import HTTPStatus -from typing import TYPE_CHECKING, List, Tuple, Union +from typing import TYPE_CHECKING, Union from synapse.api.errors import ( Codes, @@ -67,7 +67,7 @@ class PushRuleRestServlet(RestServlet): self._push_rules_handler = hs.get_push_rules_handler() self._push_rule_linearizer = Linearizer(name="push_rules", clock=hs.get_clock()) - async def on_PUT(self, request: SynapseRequest, path: str) -> Tuple[int, JsonDict]: + async def on_PUT(self, request: SynapseRequest, path: str) -> tuple[int, JsonDict]: if not self._is_push_worker: raise Exception("Cannot handle PUT /push_rules on worker") @@ -79,7 +79,7 @@ class PushRuleRestServlet(RestServlet): async def handle_put( self, request: SynapseRequest, path: str, user_id: str - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: spec = _rule_spec_from_path(path.split("/")) try: priority_class = _priority_class_from_spec(spec) @@ -140,7 +140,7 @@ class PushRuleRestServlet(RestServlet): async def on_DELETE( self, request: SynapseRequest, path: str - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: if not self._is_push_worker: raise Exception("Cannot handle DELETE /push_rules on worker") @@ -155,7 +155,7 @@ class PushRuleRestServlet(RestServlet): request: SynapseRequest, path: str, user_id: str, - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: spec = _rule_spec_from_path(path.split("/")) namespaced_rule_id = f"global/{spec.template}/{spec.rule_id}" @@ -170,7 +170,7 @@ class PushRuleRestServlet(RestServlet): else: raise - async def on_GET(self, request: SynapseRequest, path: str) -> Tuple[int, JsonDict]: + async def on_GET(self, request: SynapseRequest, path: str) -> tuple[int, JsonDict]: requester = await self.auth.get_user_by_req(request) requester.user.to_string() @@ -196,7 +196,7 @@ class PushRuleRestServlet(RestServlet): raise UnrecognizedRequestError() -def _rule_spec_from_path(path: List[str]) -> RuleSpec: +def _rule_spec_from_path(path: list[str]) -> RuleSpec: """Turn a sequence of path components into a rule spec Args: @@ -240,7 +240,7 @@ def _rule_spec_from_path(path: List[str]) -> RuleSpec: def _rule_tuple_from_request_object( rule_template: str, rule_id: str, req_obj: JsonDict -) -> Tuple[List[JsonDict], List[Union[str, JsonDict]]]: +) -> tuple[list[JsonDict], list[Union[str, JsonDict]]]: if rule_template == "postcontent": # postcontent is from MSC4306, which says that clients # cannot create their own postcontent rules right now. @@ -279,7 +279,7 @@ def _rule_tuple_from_request_object( return conditions, actions -def _filter_ruleset_with_path(ruleset: JsonDict, path: List[str]) -> JsonDict: +def _filter_ruleset_with_path(ruleset: JsonDict, path: list[str]) -> JsonDict: if path == []: raise UnrecognizedRequestError( PushRuleRestServlet.SLIGHTLY_PEDANTIC_TRAILING_SLASH_ERROR diff --git a/synapse/rest/client/pusher.py b/synapse/rest/client/pusher.py index a455f95a26..66d7fec07e 100644 --- a/synapse/rest/client/pusher.py +++ b/synapse/rest/client/pusher.py @@ -21,7 +21,7 @@ # import logging -from typing import TYPE_CHECKING, Tuple +from typing import TYPE_CHECKING from synapse.api.errors import Codes, SynapseError from synapse.http.server import HttpServer @@ -52,7 +52,7 @@ class PushersRestServlet(RestServlet): self.auth = hs.get_auth() self._store = hs.get_datastores().main - async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: + async def on_GET(self, request: SynapseRequest) -> tuple[int, JsonDict]: requester = await self.auth.get_user_by_req(request) user_id = requester.user.to_string() @@ -85,7 +85,7 @@ class PushersSetRestServlet(RestServlet): self.pusher_pool = self.hs.get_pusherpool() self._store = hs.get_datastores().main - async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: + async def on_POST(self, request: SynapseRequest) -> tuple[int, JsonDict]: requester = await self.auth.get_user_by_req(request) user_id = requester.user.to_string() diff --git a/synapse/rest/client/read_marker.py b/synapse/rest/client/read_marker.py index d3d3c7c41d..874e7487bf 100644 --- a/synapse/rest/client/read_marker.py +++ b/synapse/rest/client/read_marker.py @@ -20,7 +20,7 @@ # import logging -from typing import TYPE_CHECKING, Tuple +from typing import TYPE_CHECKING from synapse.api.constants import ReceiptTypes from synapse.http.server import HttpServer @@ -56,7 +56,7 @@ class ReadMarkerRestServlet(RestServlet): async def on_POST( self, request: SynapseRequest, room_id: str - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: requester = await self.auth.get_user_by_req(request) await self.presence_handler.bump_presence_active_time( diff --git a/synapse/rest/client/receipts.py b/synapse/rest/client/receipts.py index 4bf93f485c..d3a43537bb 100644 --- a/synapse/rest/client/receipts.py +++ b/synapse/rest/client/receipts.py @@ -20,7 +20,7 @@ # import logging -from typing import TYPE_CHECKING, Tuple +from typing import TYPE_CHECKING from synapse.api.constants import MAIN_TIMELINE, ReceiptTypes from synapse.api.errors import Codes, SynapseError @@ -59,7 +59,7 @@ class ReceiptRestServlet(RestServlet): async def on_POST( self, request: SynapseRequest, room_id: str, receipt_type: str, event_id: str - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: requester = await self.auth.get_user_by_req(request) if not RoomID.is_valid(room_id) or not event_id.startswith(EventID.SIGIL): diff --git a/synapse/rest/client/register.py b/synapse/rest/client/register.py index b42006e4ce..145dc6f569 100644 --- a/synapse/rest/client/register.py +++ b/synapse/rest/client/register.py @@ -21,7 +21,7 @@ # import logging import random -from typing import TYPE_CHECKING, List, Optional, Tuple +from typing import TYPE_CHECKING, Optional from twisted.web.server import Request @@ -100,7 +100,7 @@ class EmailRegisterRequestTokenRestServlet(RestServlet): template_text=self.config.email.email_already_in_use_template_text, ) - async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: + async def on_POST(self, request: SynapseRequest) -> tuple[int, JsonDict]: if not self.hs.config.email.can_verify_email: logger.warning( "Email registration has been disabled due to lack of email config" @@ -183,7 +183,7 @@ class MsisdnRegisterRequestTokenRestServlet(RestServlet): self.server_name = hs.hostname self.identity_handler = hs.get_identity_handler() - async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: + async def on_POST(self, request: SynapseRequest) -> tuple[int, JsonDict]: body = parse_json_object_from_request(request) assert_params_in_dict( @@ -352,7 +352,7 @@ class UsernameAvailabilityRestServlet(RestServlet): hs.config.registration.inhibit_user_in_use_error ) - async def on_GET(self, request: Request) -> Tuple[int, JsonDict]: + async def on_GET(self, request: Request) -> tuple[int, JsonDict]: if not self.hs.config.registration.enable_registration: raise SynapseError( 403, "Registration has been disabled", errcode=Codes.FORBIDDEN @@ -402,7 +402,7 @@ class RegistrationTokenValidityRestServlet(RestServlet): cfg=hs.config.ratelimiting.rc_registration_token_validity, ) - async def on_GET(self, request: Request) -> Tuple[int, JsonDict]: + async def on_GET(self, request: Request) -> tuple[int, JsonDict]: await self.ratelimiter.ratelimit(None, (request.getClientAddress().host,)) if not self.hs.config.registration.enable_registration: @@ -453,7 +453,7 @@ class RegisterRestServlet(RestServlet): ) @interactive_auth_handler - async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: + async def on_POST(self, request: SynapseRequest) -> tuple[int, JsonDict]: body = parse_json_object_from_request(request) client_addr = request.getClientAddress().host @@ -853,7 +853,7 @@ class RegisterRestServlet(RestServlet): async def _do_guest_registration( self, params: JsonDict, address: Optional[str] = None - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: if not self.hs.config.registration.allow_guest_access: raise SynapseError(403, "Guest access is disabled") user_id = await self.registration_handler.register_user( @@ -913,7 +913,7 @@ class RegisterAppServiceOnlyRestServlet(RestServlet): self.ratelimiter = hs.get_registration_ratelimiter() @interactive_auth_handler - async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: + async def on_POST(self, request: SynapseRequest) -> tuple[int, JsonDict]: body = parse_json_object_from_request(request) client_addr = request.getClientAddress().host @@ -970,7 +970,7 @@ class RegisterAppServiceOnlyRestServlet(RestServlet): def _calculate_registration_flows( config: HomeServerConfig, auth_handler: AuthHandler -) -> List[List[str]]: +) -> list[list[str]]: """Get a suitable flows list for registration Args: diff --git a/synapse/rest/client/relations.py b/synapse/rest/client/relations.py index 49943cf0c3..d6c7411816 100644 --- a/synapse/rest/client/relations.py +++ b/synapse/rest/client/relations.py @@ -20,7 +20,7 @@ import logging import re -from typing import TYPE_CHECKING, Optional, Tuple +from typing import TYPE_CHECKING, Optional from synapse.api.constants import Direction from synapse.handlers.relations import ThreadsListInclude @@ -63,7 +63,7 @@ class RelationPaginationServlet(RestServlet): parent_id: str, relation_type: Optional[str] = None, event_type: Optional[str] = None, - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: requester = await self.auth.get_user_by_req(request, allow_guest=True) pagination_config = await PaginationConfig.from_request( @@ -105,7 +105,7 @@ class ThreadsServlet(RestServlet): async def on_GET( self, request: SynapseRequest, room_id: str - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: requester = await self.auth.get_user_by_req(request) limit = parse_integer(request, "limit", default=5) diff --git a/synapse/rest/client/reporting.py b/synapse/rest/client/reporting.py index 81faf38a7f..f11f6b7b77 100644 --- a/synapse/rest/client/reporting.py +++ b/synapse/rest/client/reporting.py @@ -21,7 +21,7 @@ import logging from http import HTTPStatus -from typing import TYPE_CHECKING, Tuple +from typing import TYPE_CHECKING from synapse._pydantic_compat import StrictStr from synapse.api.errors import AuthError, Codes, NotFoundError, SynapseError @@ -57,7 +57,7 @@ class ReportEventRestServlet(RestServlet): async def on_POST( self, request: SynapseRequest, room_id: str, event_id: str - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: requester = await self.auth.get_user_by_req(request) user_id = requester.user.to_string() @@ -138,7 +138,7 @@ class ReportRoomRestServlet(RestServlet): async def on_POST( self, request: SynapseRequest, room_id: str - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: requester = await self.auth.get_user_by_req(request) user_id = requester.user.to_string() @@ -191,7 +191,7 @@ class ReportUserRestServlet(RestServlet): async def on_POST( self, request: SynapseRequest, target_user_id: str - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: requester = await self.auth.get_user_by_req(request) body = parse_and_validate_json_object_from_request(request, self.PostBody) diff --git a/synapse/rest/client/room.py b/synapse/rest/client/room.py index 1084139df0..38e315d0e7 100644 --- a/synapse/rest/client/room.py +++ b/synapse/rest/client/room.py @@ -25,7 +25,7 @@ import logging import re from enum import Enum from http import HTTPStatus -from typing import TYPE_CHECKING, Awaitable, Dict, List, Optional, Tuple +from typing import TYPE_CHECKING, Awaitable, Optional from urllib import parse as urlparse from prometheus_client.core import Histogram @@ -166,20 +166,20 @@ class RoomCreateRestServlet(TransactionRestServlet): async def on_PUT( self, request: SynapseRequest, txn_id: str - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: requester = await self.auth.get_user_by_req(request) set_tag("txn_id", txn_id) return await self.txns.fetch_or_execute_request( request, requester, self._do, request, requester ) - async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: + async def on_POST(self, request: SynapseRequest) -> tuple[int, JsonDict]: requester = await self.auth.get_user_by_req(request) return await self._do(request, requester) async def _do( self, request: SynapseRequest, requester: Requester - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: room_id, _, _ = await self._room_creation_handler.create_room( requester, self.get_room_config(request) ) @@ -244,18 +244,18 @@ class RoomStateEventRestServlet(RestServlet): @cancellable def on_GET_no_state_key( self, request: SynapseRequest, room_id: str, event_type: str - ) -> Awaitable[Tuple[int, JsonDict]]: + ) -> Awaitable[tuple[int, JsonDict]]: return self.on_GET(request, room_id, event_type, "") def on_PUT_no_state_key( self, request: SynapseRequest, room_id: str, event_type: str - ) -> Awaitable[Tuple[int, JsonDict]]: + ) -> Awaitable[tuple[int, JsonDict]]: return self.on_PUT(request, room_id, event_type, "") @cancellable async def on_GET( self, request: SynapseRequest, room_id: str, event_type: str, state_key: str - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: requester = await self.auth.get_user_by_req(request, allow_guest=True) format = parse_string( request, "format", default="content", allowed_values=["content", "event"] @@ -295,7 +295,7 @@ class RoomStateEventRestServlet(RestServlet): event_type: str, state_key: str, txn_id: Optional[str] = None, - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: requester = await self.auth.get_user_by_req(request, allow_guest=True) if txn_id: @@ -408,7 +408,7 @@ class RoomSendEventRestServlet(TransactionRestServlet): room_id: str, event_type: str, txn_id: Optional[str], - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: content = parse_json_object_from_request(request) origin_server_ts = None @@ -460,13 +460,13 @@ class RoomSendEventRestServlet(TransactionRestServlet): request: SynapseRequest, room_id: str, event_type: str, - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: requester = await self.auth.get_user_by_req(request, allow_guest=True) return await self._do(request, requester, room_id, event_type, None) async def on_PUT( self, request: SynapseRequest, room_id: str, event_type: str, txn_id: str - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: requester = await self.auth.get_user_by_req(request, allow_guest=True) set_tag("txn_id", txn_id) @@ -545,11 +545,11 @@ class JoinRoomAliasServlet(ResolveRoomIdMixin, TransactionRestServlet): requester: Requester, room_identifier: str, txn_id: Optional[str], - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: content = parse_json_object_from_request(request, allow_empty_body=True) # twisted.web.server.Request.args is incorrectly defined as Optional[Any] - args: Dict[bytes, List[bytes]] = request.args # type: ignore + args: dict[bytes, list[bytes]] = request.args # type: ignore # Prefer via over server_name (deprecated with MSC4156) remote_room_hosts = parse_strings_from_args(args, "via", required=False) if remote_room_hosts is None: @@ -578,13 +578,13 @@ class JoinRoomAliasServlet(ResolveRoomIdMixin, TransactionRestServlet): self, request: SynapseRequest, room_identifier: str, - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: requester = await self.auth.get_user_by_req(request, allow_guest=True) return await self._do(request, requester, room_identifier, None) async def on_PUT( self, request: SynapseRequest, room_identifier: str, txn_id: str - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: requester = await self.auth.get_user_by_req(request, allow_guest=True) set_tag("txn_id", txn_id) @@ -603,7 +603,7 @@ class PublicRoomListRestServlet(RestServlet): self.hs = hs self.auth = hs.get_auth() - async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: + async def on_GET(self, request: SynapseRequest) -> tuple[int, JsonDict]: server = parse_string(request, "server") try: @@ -652,7 +652,7 @@ class PublicRoomListRestServlet(RestServlet): return 200, data - async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: + async def on_POST(self, request: SynapseRequest) -> tuple[int, JsonDict]: await self.auth.get_user_by_req(request, allow_guest=True) server = parse_string(request, "server") @@ -726,7 +726,7 @@ class RoomMemberListRestServlet(RestServlet): @cancellable async def on_GET( self, request: SynapseRequest, room_id: str - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: # TODO support Pagination stream API (limit/tokens) requester = await self.auth.get_user_by_req(request, allow_guest=True) handler = self.message_handler @@ -780,7 +780,7 @@ class JoinedRoomMemberListRestServlet(RestServlet): async def on_GET( self, request: SynapseRequest, room_id: str - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: requester = await self.auth.get_user_by_req(request) users_with_profile = await self.message_handler.get_joined_members( @@ -809,7 +809,7 @@ class RoomMessageListRestServlet(RestServlet): async def on_GET( self, request: SynapseRequest, room_id: str - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: processing_start_time = self.clock.time_msec() # Fire off and hope that we get a result by the end. # @@ -870,7 +870,7 @@ class RoomStateRestServlet(RestServlet): @cancellable async def on_GET( self, request: SynapseRequest, room_id: str - ) -> Tuple[int, List[JsonDict]]: + ) -> tuple[int, list[JsonDict]]: requester = await self.auth.get_user_by_req(request, allow_guest=True) # Get all the current state for this room events = await self.message_handler.get_state_events( @@ -893,7 +893,7 @@ class RoomInitialSyncRestServlet(RestServlet): async def on_GET( self, request: SynapseRequest, room_id: str - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: requester = await self.auth.get_user_by_req(request, allow_guest=True) pagination_config = await PaginationConfig.from_request( self.store, request, default_limit=10 @@ -925,7 +925,7 @@ class RoomEventServlet(RestServlet): async def on_GET( self, request: SynapseRequest, room_id: str, event_id: str - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: requester = await self.auth.get_user_by_req(request, allow_guest=True) include_unredacted_content = self.msc2815_enabled and ( @@ -1013,7 +1013,7 @@ class RoomEventContextServlet(RestServlet): async def on_GET( self, request: SynapseRequest, room_id: str, event_id: str - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: requester = await self.auth.get_user_by_req(request, allow_guest=True) limit = parse_integer(request, "limit", default=10) @@ -1072,20 +1072,20 @@ class RoomForgetRestServlet(TransactionRestServlet): PATTERNS = "/rooms/(?P[^/]*)/forget" register_txn_path(self, PATTERNS, http_server) - async def _do(self, requester: Requester, room_id: str) -> Tuple[int, JsonDict]: + async def _do(self, requester: Requester, room_id: str) -> tuple[int, JsonDict]: await self.room_member_handler.forget(user=requester.user, room_id=room_id) return 200, {} async def on_POST( self, request: SynapseRequest, room_id: str - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: requester = await self.auth.get_user_by_req(request, allow_guest=False) return await self._do(requester, room_id) async def on_PUT( self, request: SynapseRequest, room_id: str, txn_id: str - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: requester = await self.auth.get_user_by_req(request, allow_guest=False) set_tag("txn_id", txn_id) @@ -1119,7 +1119,7 @@ class RoomMembershipRestServlet(TransactionRestServlet): room_id: str, membership_action: str, txn_id: Optional[str], - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: if requester.is_guest and membership_action not in { Membership.JOIN, Membership.LEAVE, @@ -1196,13 +1196,13 @@ class RoomMembershipRestServlet(TransactionRestServlet): request: SynapseRequest, room_id: str, membership_action: str, - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: requester = await self.auth.get_user_by_req(request, allow_guest=True) return await self._do(request, requester, room_id, membership_action, None) async def on_PUT( self, request: SynapseRequest, room_id: str, membership_action: str, txn_id: str - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: requester = await self.auth.get_user_by_req(request, allow_guest=True) set_tag("txn_id", txn_id) @@ -1242,7 +1242,7 @@ class RoomRedactEventRestServlet(TransactionRestServlet): room_id: str, event_id: str, txn_id: Optional[str], - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: content = parse_json_object_from_request(request) requester_suspended = await self._store.get_user_suspended_status( @@ -1328,13 +1328,13 @@ class RoomRedactEventRestServlet(TransactionRestServlet): request: SynapseRequest, room_id: str, event_id: str, - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: requester = await self.auth.get_user_by_req(request) return await self._do(request, requester, room_id, event_id, None) async def on_PUT( self, request: SynapseRequest, room_id: str, event_id: str, txn_id: str - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: requester = await self.auth.get_user_by_req(request) set_tag("txn_id", txn_id) @@ -1363,7 +1363,7 @@ class RoomTypingRestServlet(RestServlet): async def on_PUT( self, request: SynapseRequest, room_id: str, user_id: str - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: requester = await self.auth.get_user_by_req(request) if not self._is_typing_writer: @@ -1419,7 +1419,7 @@ class RoomAliasListServlet(RestServlet): async def on_GET( self, request: SynapseRequest, room_id: str - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: requester = await self.auth.get_user_by_req(request) alias_list = await self.directory_handler.get_aliases_for_room( @@ -1438,7 +1438,7 @@ class SearchRestServlet(RestServlet): self.search_handler = hs.get_search_handler() self.auth = hs.get_auth() - async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: + async def on_POST(self, request: SynapseRequest) -> tuple[int, JsonDict]: requester = await self.auth.get_user_by_req(request) content = parse_json_object_from_request(request) @@ -1458,7 +1458,7 @@ class JoinedRoomsRestServlet(RestServlet): self.store = hs.get_datastores().main self.auth = hs.get_auth() - async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: + async def on_GET(self, request: SynapseRequest) -> tuple[int, JsonDict]: requester = await self.auth.get_user_by_req(request, allow_guest=True) room_ids = await self.store.get_rooms_for_user(requester.user.to_string()) @@ -1533,7 +1533,7 @@ class TimestampLookupRestServlet(RestServlet): async def on_GET( self, request: SynapseRequest, room_id: str - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: requester = await self._auth.get_user_by_req(request) await self._auth.check_user_in_room_or_world_readable(room_id, requester) @@ -1566,7 +1566,7 @@ class RoomHierarchyRestServlet(RestServlet): async def on_GET( self, request: SynapseRequest, room_id: str - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: requester = await self._auth.get_user_by_req(request, allow_guest=True) max_depth = parse_integer(request, "max_depth") @@ -1575,7 +1575,7 @@ class RoomHierarchyRestServlet(RestServlet): # twisted.web.server.Request.args is incorrectly defined as Optional[Any] remote_room_hosts = None if self.msc4235_enabled: - args: Dict[bytes, List[bytes]] = request.args # type: ignore + args: dict[bytes, list[bytes]] = request.args # type: ignore via_param = parse_strings_from_args( args, "org.matrix.msc4235.via", required=False ) @@ -1614,7 +1614,7 @@ class RoomSummaryRestServlet(ResolveRoomIdMixin, RestServlet): async def on_GET( self, request: SynapseRequest, room_identifier: str - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: try: requester = await self._auth.get_user_by_req(request, allow_guest=True) requester_user_id: Optional[str] = requester.user.to_string() @@ -1623,7 +1623,7 @@ class RoomSummaryRestServlet(ResolveRoomIdMixin, RestServlet): requester_user_id = None # twisted.web.server.Request.args is incorrectly defined as Optional[Any] - args: Dict[bytes, List[bytes]] = request.args # type: ignore + args: dict[bytes, list[bytes]] = request.args # type: ignore remote_room_hosts = parse_strings_from_args(args, "via", required=False) room_id, remote_room_hosts = await self.resolve_room_id( room_identifier, diff --git a/synapse/rest/client/room_keys.py b/synapse/rest/client/room_keys.py index 7be08ecb60..b2de591dc5 100644 --- a/synapse/rest/client/room_keys.py +++ b/synapse/rest/client/room_keys.py @@ -19,7 +19,7 @@ # import logging -from typing import TYPE_CHECKING, Optional, Tuple, cast +from typing import TYPE_CHECKING, Optional, cast from synapse.api.errors import Codes, NotFoundError, SynapseError from synapse.http.server import HttpServer @@ -52,7 +52,7 @@ class RoomKeysServlet(RestServlet): async def on_PUT( self, request: SynapseRequest, room_id: Optional[str], session_id: Optional[str] - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: """ Uploads one or more encrypted E2E room keys for backup purposes. room_id: the ID of the room the keys are for (optional) @@ -147,7 +147,7 @@ class RoomKeysServlet(RestServlet): async def on_GET( self, request: SynapseRequest, room_id: Optional[str], session_id: Optional[str] - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: """ Retrieves one or more encrypted E2E room keys for backup purposes. Symmetric with the PUT version of the API. @@ -234,7 +234,7 @@ class RoomKeysServlet(RestServlet): async def on_DELETE( self, request: SynapseRequest, room_id: Optional[str], session_id: Optional[str] - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: """ Deletes one or more encrypted E2E room keys for a user for backup purposes. @@ -267,7 +267,7 @@ class RoomKeysNewVersionServlet(RestServlet): self.auth = hs.get_auth() self.e2e_room_keys_handler = hs.get_e2e_room_keys_handler() - async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: + async def on_GET(self, request: SynapseRequest) -> tuple[int, JsonDict]: """ Retrieve the version information about the most current backup version (if any) @@ -293,7 +293,7 @@ class RoomKeysNewVersionServlet(RestServlet): raise SynapseError(404, "No backup found", Codes.NOT_FOUND) return 200, info - async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: + async def on_POST(self, request: SynapseRequest) -> tuple[int, JsonDict]: """ Create a new backup version for this user's room_keys with the given info. The version is allocated by the server and returned to the user @@ -345,7 +345,7 @@ class RoomKeysVersionServlet(RestServlet): async def on_GET( self, request: SynapseRequest, version: str - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: """ Retrieve the version information about a given version of the user's room_keys backup. @@ -374,7 +374,7 @@ class RoomKeysVersionServlet(RestServlet): async def on_DELETE( self, request: SynapseRequest, version: str - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: """ Delete the information about a given version of the user's room_keys backup. Doesn't delete the actual room data. @@ -391,7 +391,7 @@ class RoomKeysVersionServlet(RestServlet): async def on_PUT( self, request: SynapseRequest, version: str - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: """ Update the information about a given version of the user's room_keys backup. diff --git a/synapse/rest/client/room_upgrade_rest_servlet.py b/synapse/rest/client/room_upgrade_rest_servlet.py index a9717781b0..1c87b86ecb 100644 --- a/synapse/rest/client/room_upgrade_rest_servlet.py +++ b/synapse/rest/client/room_upgrade_rest_servlet.py @@ -20,7 +20,7 @@ # import logging -from typing import TYPE_CHECKING, Tuple +from typing import TYPE_CHECKING from synapse.api.errors import Codes, ShadowBanError, SynapseError from synapse.api.room_versions import KNOWN_ROOM_VERSIONS @@ -73,7 +73,7 @@ class RoomUpgradeRestServlet(RestServlet): async def on_POST( self, request: SynapseRequest, room_id: str - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: requester = await self._auth.get_user_by_req(request) content = parse_json_object_from_request(request) diff --git a/synapse/rest/client/sendtodevice.py b/synapse/rest/client/sendtodevice.py index 2a67514560..597cb1fecc 100644 --- a/synapse/rest/client/sendtodevice.py +++ b/synapse/rest/client/sendtodevice.py @@ -20,7 +20,7 @@ # import logging -from typing import TYPE_CHECKING, Tuple +from typing import TYPE_CHECKING from synapse.http import servlet from synapse.http.server import HttpServer @@ -53,7 +53,7 @@ class SendToDeviceRestServlet(servlet.RestServlet): async def on_PUT( self, request: SynapseRequest, message_type: str, txn_id: str - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: requester = await self.auth.get_user_by_req(request, allow_guest=True) set_tag("txn_id", txn_id) return await self.txns.fetch_or_execute_request( @@ -70,7 +70,7 @@ class SendToDeviceRestServlet(servlet.RestServlet): request: SynapseRequest, requester: Requester, message_type: str, - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: content = parse_json_object_from_request(request) assert_params_in_dict(content, ("messages",)) diff --git a/synapse/rest/client/sync.py b/synapse/rest/client/sync.py index 01868de60b..9c03eecea4 100644 --- a/synapse/rest/client/sync.py +++ b/synapse/rest/client/sync.py @@ -21,7 +21,7 @@ import itertools import logging from collections import defaultdict -from typing import TYPE_CHECKING, Any, Dict, List, Mapping, Optional, Tuple, Union +from typing import TYPE_CHECKING, Any, Mapping, Optional, Union import attr @@ -138,7 +138,7 @@ class SyncRestServlet(RestServlet): cfg=hs.config.ratelimiting.rc_presence_per_user, ) - async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: + async def on_GET(self, request: SynapseRequest) -> tuple[int, JsonDict]: # This will always be set by the time Twisted calls us. assert request.args is not None @@ -380,7 +380,7 @@ class SyncRestServlet(RestServlet): return response @staticmethod - def encode_presence(events: List[UserPresenceState], time_now: int) -> JsonDict: + def encode_presence(events: list[UserPresenceState], time_now: int) -> JsonDict: return { "events": [ { @@ -398,7 +398,7 @@ class SyncRestServlet(RestServlet): async def encode_joined( self, sync_config: SyncConfig, - rooms: List[JoinedSyncResult], + rooms: list[JoinedSyncResult], time_now: int, serialize_options: SerializeEventConfig, ) -> JsonDict: @@ -428,7 +428,7 @@ class SyncRestServlet(RestServlet): @trace_with_opname("sync.encode_invited") async def encode_invited( self, - rooms: List[InvitedSyncResult], + rooms: list[InvitedSyncResult], time_now: int, serialize_options: SerializeEventConfig, ) -> JsonDict: @@ -464,10 +464,10 @@ class SyncRestServlet(RestServlet): @trace_with_opname("sync.encode_knocked") async def encode_knocked( self, - rooms: List[KnockedSyncResult], + rooms: list[KnockedSyncResult], time_now: int, serialize_options: SerializeEventConfig, - ) -> Dict[str, Dict[str, Any]]: + ) -> dict[str, dict[str, Any]]: """ Encode the rooms we've knocked on in a sync result. @@ -517,7 +517,7 @@ class SyncRestServlet(RestServlet): async def encode_archived( self, sync_config: SyncConfig, - rooms: List[ArchivedSyncResult], + rooms: list[ArchivedSyncResult], time_now: int, serialize_options: SerializeEventConfig, ) -> JsonDict: @@ -768,7 +768,7 @@ class SlidingSyncRestServlet(RestServlet): self.sliding_sync_handler = hs.get_sliding_sync_handler() self.event_serializer = hs.get_event_client_serializer() - async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: + async def on_POST(self, request: SynapseRequest) -> tuple[int, JsonDict]: requester = await self.auth.get_user_by_req_experimental_feature( request, allow_guest=True, feature=ExperimentalFeature.MSC3575 ) @@ -900,7 +900,7 @@ class SlidingSyncRestServlet(RestServlet): async def encode_rooms( self, requester: Requester, - rooms: Dict[str, SlidingSyncResult.RoomResult], + rooms: dict[str, SlidingSyncResult.RoomResult], ) -> JsonDict: time_now = self.clock.time_msec() @@ -909,7 +909,7 @@ class SlidingSyncRestServlet(RestServlet): requester=requester, ) - serialized_rooms: Dict[str, JsonDict] = {} + serialized_rooms: dict[str, JsonDict] = {} for room_id, room_result in rooms.items(): serialized_rooms[room_id] = { "notification_count": room_result.notification_count, diff --git a/synapse/rest/client/tags.py b/synapse/rest/client/tags.py index fb59efb11f..5699ff35c7 100644 --- a/synapse/rest/client/tags.py +++ b/synapse/rest/client/tags.py @@ -21,7 +21,7 @@ import logging from http import HTTPStatus -from typing import TYPE_CHECKING, Tuple +from typing import TYPE_CHECKING from synapse.api.errors import AuthError, Codes, SynapseError from synapse.http.server import HttpServer @@ -56,7 +56,7 @@ class TagListServlet(RestServlet): async def on_GET( self, request: SynapseRequest, user_id: str, room_id: str - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: requester = await self.auth.get_user_by_req(request) if user_id != requester.user.to_string(): raise AuthError(403, "Cannot get tags for other users.") @@ -85,7 +85,7 @@ class TagServlet(RestServlet): async def on_PUT( self, request: SynapseRequest, user_id: str, room_id: str, tag: str - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: requester = await self.auth.get_user_by_req(request) if user_id != requester.user.to_string(): raise AuthError(403, "Cannot add tags for other users.") @@ -114,7 +114,7 @@ class TagServlet(RestServlet): async def on_DELETE( self, request: SynapseRequest, user_id: str, room_id: str, tag: str - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: requester = await self.auth.get_user_by_req(request) if user_id != requester.user.to_string(): raise AuthError(403, "Cannot add tags for other users.") diff --git a/synapse/rest/client/thirdparty.py b/synapse/rest/client/thirdparty.py index f972591ebf..c17335eb48 100644 --- a/synapse/rest/client/thirdparty.py +++ b/synapse/rest/client/thirdparty.py @@ -20,7 +20,7 @@ # import logging -from typing import TYPE_CHECKING, Dict, List, Tuple +from typing import TYPE_CHECKING from synapse.api.constants import ThirdPartyEntityKind from synapse.http.server import HttpServer @@ -45,7 +45,7 @@ class ThirdPartyProtocolsServlet(RestServlet): self.auth = hs.get_auth() self.appservice_handler = hs.get_application_service_handler() - async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: + async def on_GET(self, request: SynapseRequest) -> tuple[int, JsonDict]: await self.auth.get_user_by_req(request, allow_guest=True) protocols = await self.appservice_handler.get_3pe_protocols() @@ -63,7 +63,7 @@ class ThirdPartyProtocolServlet(RestServlet): async def on_GET( self, request: SynapseRequest, protocol: str - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: await self.auth.get_user_by_req(request, allow_guest=True) protocols = await self.appservice_handler.get_3pe_protocols( @@ -86,10 +86,10 @@ class ThirdPartyUserServlet(RestServlet): async def on_GET( self, request: SynapseRequest, protocol: str - ) -> Tuple[int, List[JsonDict]]: + ) -> tuple[int, list[JsonDict]]: await self.auth.get_user_by_req(request, allow_guest=True) - fields: Dict[bytes, List[bytes]] = request.args # type: ignore[assignment] + fields: dict[bytes, list[bytes]] = request.args # type: ignore[assignment] fields.pop(b"access_token", None) results = await self.appservice_handler.query_3pe( @@ -110,10 +110,10 @@ class ThirdPartyLocationServlet(RestServlet): async def on_GET( self, request: SynapseRequest, protocol: str - ) -> Tuple[int, List[JsonDict]]: + ) -> tuple[int, list[JsonDict]]: await self.auth.get_user_by_req(request, allow_guest=True) - fields: Dict[bytes, List[bytes]] = request.args # type: ignore[assignment] + fields: dict[bytes, list[bytes]] = request.args # type: ignore[assignment] fields.pop(b"access_token", None) results = await self.appservice_handler.query_3pe( diff --git a/synapse/rest/client/thread_subscriptions.py b/synapse/rest/client/thread_subscriptions.py index 039aba1721..f879c7589c 100644 --- a/synapse/rest/client/thread_subscriptions.py +++ b/synapse/rest/client/thread_subscriptions.py @@ -1,5 +1,5 @@ from http import HTTPStatus -from typing import TYPE_CHECKING, Dict, Optional, Tuple +from typing import TYPE_CHECKING, Optional import attr from typing_extensions import TypeAlias @@ -59,7 +59,7 @@ class ThreadSubscriptionsRestServlet(RestServlet): async def on_GET( self, request: SynapseRequest, room_id: str, thread_root_id: str - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: RoomID.from_string(room_id) if not thread_root_id.startswith("$"): raise SynapseError( @@ -80,7 +80,7 @@ class ThreadSubscriptionsRestServlet(RestServlet): async def on_PUT( self, request: SynapseRequest, room_id: str, thread_root_id: str - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: RoomID.from_string(room_id) if not thread_root_id.startswith("$"): raise SynapseError( @@ -101,7 +101,7 @@ class ThreadSubscriptionsRestServlet(RestServlet): async def on_DELETE( self, request: SynapseRequest, room_id: str, thread_root_id: str - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: RoomID.from_string(room_id) if not thread_root_id.startswith("$"): raise SynapseError( @@ -134,7 +134,7 @@ class ThreadSubscriptionsPaginationRestServlet(RestServlet): self.is_mine = hs.is_mine self.store = hs.get_datastores().main - async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: + async def on_GET(self, request: SynapseRequest) -> tuple[int, JsonDict]: requester = await self.auth.get_user_by_req(request) limit = min( @@ -204,8 +204,8 @@ class ThreadSubscriptionsPaginationRestServlet(RestServlet): ) ) - subscribed_threads: Dict[str, Dict[str, JsonDict]] = {} - unsubscribed_threads: Dict[str, Dict[str, JsonDict]] = {} + subscribed_threads: dict[str, dict[str, JsonDict]] = {} + unsubscribed_threads: dict[str, dict[str, JsonDict]] = {} for stream_id, room_id, thread_root_id, subscribed, automatic in subscriptions: if subscribed: subscribed_threads.setdefault(room_id, {})[thread_root_id] = ( diff --git a/synapse/rest/client/transactions.py b/synapse/rest/client/transactions.py index 571ba2fa62..4b3656a597 100644 --- a/synapse/rest/client/transactions.py +++ b/synapse/rest/client/transactions.py @@ -23,7 +23,7 @@ to ensure idempotency when performing PUTs using the REST API.""" import logging -from typing import TYPE_CHECKING, Awaitable, Callable, Dict, Hashable, Tuple +from typing import TYPE_CHECKING, Awaitable, Callable, Hashable from typing_extensions import ParamSpec @@ -51,8 +51,8 @@ class HttpTransactionCache: self.hs = hs self.clock = self.hs.get_clock() # $txn_key: (ObservableDeferred<(res_code, res_json_body)>, timestamp) - self.transactions: Dict[ - Hashable, Tuple[ObservableDeferred[Tuple[int, JsonDict]], int] + self.transactions: dict[ + Hashable, tuple[ObservableDeferred[tuple[int, JsonDict]], int] ] = {} # Try to clean entries every 30 mins. This means entries will exist # for at *LEAST* 30 mins, and at *MOST* 60 mins. @@ -103,10 +103,10 @@ class HttpTransactionCache: self, request: IRequest, requester: Requester, - fn: Callable[P, Awaitable[Tuple[int, JsonDict]]], + fn: Callable[P, Awaitable[tuple[int, JsonDict]]], *args: P.args, **kwargs: P.kwargs, - ) -> "Deferred[Tuple[int, JsonDict]]": + ) -> "Deferred[tuple[int, JsonDict]]": """Fetches the response for this transaction, or executes the given function to produce a response for this transaction. diff --git a/synapse/rest/client/user_directory.py b/synapse/rest/client/user_directory.py index 94fcb11c0c..0f561c2e61 100644 --- a/synapse/rest/client/user_directory.py +++ b/synapse/rest/client/user_directory.py @@ -20,7 +20,7 @@ # import logging -from typing import TYPE_CHECKING, Tuple +from typing import TYPE_CHECKING from synapse.api.errors import SynapseError from synapse.http.server import HttpServer @@ -46,7 +46,7 @@ class UserDirectorySearchRestServlet(RestServlet): self.auth = hs.get_auth() self.user_directory_handler = hs.get_user_directory_handler() - async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonMapping]: + async def on_POST(self, request: SynapseRequest) -> tuple[int, JsonMapping]: """Searches for users in directory Returns: diff --git a/synapse/rest/client/versions.py b/synapse/rest/client/versions.py index 20395430d7..dee2cdb637 100644 --- a/synapse/rest/client/versions.py +++ b/synapse/rest/client/versions.py @@ -23,7 +23,7 @@ import logging import re -from typing import TYPE_CHECKING, Tuple +from typing import TYPE_CHECKING from synapse.api.constants import RoomCreationPreset from synapse.http.server import HttpServer @@ -62,7 +62,7 @@ class VersionsRestServlet(RestServlet): in self.config.room.encryption_enabled_by_default_for_room_presets ) - async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: + async def on_GET(self, request: SynapseRequest) -> tuple[int, JsonDict]: msc3881_enabled = self.config.experimental.msc3881_enabled msc3575_enabled = self.config.experimental.msc3575_enabled diff --git a/synapse/rest/client/voip.py b/synapse/rest/client/voip.py index fbed3a3bae..581829a790 100644 --- a/synapse/rest/client/voip.py +++ b/synapse/rest/client/voip.py @@ -22,7 +22,7 @@ import base64 import hashlib import hmac -from typing import TYPE_CHECKING, Tuple +from typing import TYPE_CHECKING from synapse.http.server import HttpServer from synapse.http.servlet import RestServlet @@ -43,7 +43,7 @@ class VoipRestServlet(RestServlet): self.hs = hs self.auth = hs.get_auth() - async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: + async def on_GET(self, request: SynapseRequest) -> tuple[int, JsonDict]: requester = await self.auth.get_user_by_req( request, self.hs.config.voip.turn_allow_guests ) diff --git a/synapse/rest/consent/consent_resource.py b/synapse/rest/consent/consent_resource.py index 3961f82894..a1d2364bed 100644 --- a/synapse/rest/consent/consent_resource.py +++ b/synapse/rest/consent/consent_resource.py @@ -23,7 +23,7 @@ import logging from hashlib import sha256 from http import HTTPStatus from os import path -from typing import TYPE_CHECKING, Any, Dict, List +from typing import TYPE_CHECKING, Any import jinja2 from jinja2 import TemplateNotFound @@ -121,7 +121,7 @@ class ConsentResource(DirectServeHtmlResource): has_consented = False public_version = username == "" if not public_version: - args: Dict[bytes, List[bytes]] = request.args # type: ignore + args: dict[bytes, list[bytes]] = request.args # type: ignore userhmac_bytes = parse_bytes_from_args(args, "h", required=True) self._check_hash(username, userhmac_bytes) @@ -154,7 +154,7 @@ class ConsentResource(DirectServeHtmlResource): async def _async_render_POST(self, request: Request) -> None: version = parse_string(request, "v", required=True) username = parse_string(request, "u", required=True) - args: Dict[bytes, List[bytes]] = request.args # type: ignore + args: dict[bytes, list[bytes]] = request.args # type: ignore userhmac = parse_bytes_from_args(args, "h", required=True) self._check_hash(username, userhmac) diff --git a/synapse/rest/key/v2/local_key_resource.py b/synapse/rest/key/v2/local_key_resource.py index 608da25a6c..f783acdb83 100644 --- a/synapse/rest/key/v2/local_key_resource.py +++ b/synapse/rest/key/v2/local_key_resource.py @@ -21,7 +21,7 @@ import logging import re -from typing import TYPE_CHECKING, Optional, Tuple +from typing import TYPE_CHECKING, Optional from signedjson.sign import sign_json from unpaddedbase64 import encode_base64 @@ -108,7 +108,7 @@ class LocalKey(RestServlet): def on_GET( self, request: Request, key_id: Optional[str] = None - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: # Matrix 1.6 drops support for passing the key_id, this is incompatible # with earlier versions and is allowed in order to support both. # A warning is issued to help determine when it is safe to drop this. diff --git a/synapse/rest/key/v2/remote_key_resource.py b/synapse/rest/key/v2/remote_key_resource.py index 94c679b9e7..51cb077496 100644 --- a/synapse/rest/key/v2/remote_key_resource.py +++ b/synapse/rest/key/v2/remote_key_resource.py @@ -21,7 +21,7 @@ import logging import re -from typing import TYPE_CHECKING, Dict, Mapping, Optional, Set, Tuple +from typing import TYPE_CHECKING, Mapping, Optional from signedjson.sign import sign_json @@ -113,7 +113,7 @@ class RemoteKey(RestServlet): CATEGORY = "Federation requests" class PostBody(RequestBodyModel): - server_keys: Dict[StrictStr, Dict[StrictStr, _KeyQueryCriteriaDataModel]] + server_keys: dict[StrictStr, dict[StrictStr, _KeyQueryCriteriaDataModel]] def __init__(self, hs: "HomeServer"): self.fetcher = ServerKeyFetcher(hs) @@ -144,7 +144,7 @@ class RemoteKey(RestServlet): async def on_GET( self, request: Request, server: str, key_id: Optional[str] = None - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: if server and key_id: # Matrix 1.6 drops support for passing the key_id, this is incompatible # with earlier versions and is allowed in order to support both. @@ -168,7 +168,7 @@ class RemoteKey(RestServlet): return 200, await self.query_keys(query, query_remote_on_cache_miss=True) - async def on_POST(self, request: Request) -> Tuple[int, JsonDict]: + async def on_POST(self, request: Request) -> tuple[int, JsonDict]: content = parse_and_validate_json_object_from_request(request, self.PostBody) query = content.server_keys @@ -177,12 +177,12 @@ class RemoteKey(RestServlet): async def query_keys( self, - query: Dict[str, Dict[str, _KeyQueryCriteriaDataModel]], + query: dict[str, dict[str, _KeyQueryCriteriaDataModel]], query_remote_on_cache_miss: bool = False, ) -> JsonDict: logger.info("Handling query for keys %r", query) - server_keys: Dict[Tuple[str, str], Optional[FetchKeyResultForRemote]] = {} + server_keys: dict[tuple[str, str], Optional[FetchKeyResultForRemote]] = {} for server_name, key_ids in query.items(): if key_ids: results: Mapping[ @@ -199,13 +199,13 @@ class RemoteKey(RestServlet): ((server_name, key_id), res) for key_id, res in results.items() ) - json_results: Set[bytes] = set() + json_results: set[bytes] = set() time_now_ms = self.clock.time_msec() # Map server_name->key_id->int. Note that the value of the int is unused. # XXX: why don't we just use a set? - cache_misses: Dict[str, Dict[str, int]] = {} + cache_misses: dict[str, dict[str, int]] = {} for (server_name, key_id), key_result in server_keys.items(): if not query[server_name]: # all keys were requested. Just return what we have without worrying diff --git a/synapse/rest/media/upload_resource.py b/synapse/rest/media/upload_resource.py index 74d8280582..484749dbe6 100644 --- a/synapse/rest/media/upload_resource.py +++ b/synapse/rest/media/upload_resource.py @@ -22,7 +22,7 @@ import logging import re -from typing import IO, TYPE_CHECKING, Dict, List, Optional, Tuple +from typing import IO, TYPE_CHECKING, Optional from synapse.api.errors import Codes, SynapseError from synapse.http.server import respond_with_json @@ -56,7 +56,7 @@ class BaseUploadServlet(RestServlet): async def _get_file_metadata( self, request: SynapseRequest, user_id: str - ) -> Tuple[int, Optional[str], str]: + ) -> tuple[int, Optional[str], str]: raw_content_length = request.getHeader("Content-Length") if raw_content_length is None: raise SynapseError(msg="Request must specify a Content-Length", code=400) @@ -78,7 +78,7 @@ class BaseUploadServlet(RestServlet): code=413, errcode=Codes.TOO_LARGE, ) - args: Dict[bytes, List[bytes]] = request.args # type: ignore + args: dict[bytes, list[bytes]] = request.args # type: ignore upload_name_bytes = parse_bytes_from_args(args, "filename") if upload_name_bytes: try: diff --git a/synapse/rest/synapse/client/federation_whitelist.py b/synapse/rest/synapse/client/federation_whitelist.py index f59daf8428..0382fef1e2 100644 --- a/synapse/rest/synapse/client/federation_whitelist.py +++ b/synapse/rest/synapse/client/federation_whitelist.py @@ -13,7 +13,7 @@ # import logging -from typing import TYPE_CHECKING, Tuple +from typing import TYPE_CHECKING from synapse.http.server import DirectServeJsonResource from synapse.http.site import SynapseRequest @@ -50,7 +50,7 @@ class FederationWhitelistResource(DirectServeJsonResource): self._auth = hs.get_auth() - async def _async_render_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: + async def _async_render_GET(self, request: SynapseRequest) -> tuple[int, JsonDict]: await self._auth.get_user_by_req(request) whitelist = [] diff --git a/synapse/rest/synapse/client/jwks.py b/synapse/rest/synapse/client/jwks.py index e9a7c24e3b..15ff6f47c1 100644 --- a/synapse/rest/synapse/client/jwks.py +++ b/synapse/rest/synapse/client/jwks.py @@ -19,7 +19,7 @@ # # import logging -from typing import TYPE_CHECKING, Tuple +from typing import TYPE_CHECKING from synapse.http.server import DirectServeJsonResource from synapse.http.site import SynapseRequest @@ -73,5 +73,5 @@ class JwksResource(DirectServeJsonResource): "keys": keys, } - async def _async_render_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: + async def _async_render_GET(self, request: SynapseRequest) -> tuple[int, JsonDict]: return 200, self.res diff --git a/synapse/rest/synapse/client/password_reset.py b/synapse/rest/synapse/client/password_reset.py index 377578ef8a..1ccdf23da8 100644 --- a/synapse/rest/synapse/client/password_reset.py +++ b/synapse/rest/synapse/client/password_reset.py @@ -19,7 +19,7 @@ # # import logging -from typing import TYPE_CHECKING, Tuple +from typing import TYPE_CHECKING from twisted.web.server import Request @@ -65,7 +65,7 @@ class PasswordResetSubmitTokenResource(DirectServeHtmlResource): # This resource should only be mounted if email validation is enabled assert hs.config.email.can_verify_email - async def _async_render_GET(self, request: Request) -> Tuple[int, bytes]: + async def _async_render_GET(self, request: Request) -> tuple[int, bytes]: sid = parse_string(request, "sid", required=True) token = parse_string(request, "token", required=True) client_secret = parse_string(request, "client_secret", required=True) @@ -83,7 +83,7 @@ class PasswordResetSubmitTokenResource(DirectServeHtmlResource): self._confirmation_email_template.render(**template_vars).encode("utf-8"), ) - async def _async_render_POST(self, request: Request) -> Tuple[int, bytes]: + async def _async_render_POST(self, request: Request) -> tuple[int, bytes]: sid = parse_string(request, "sid", required=True) token = parse_string(request, "token", required=True) client_secret = parse_string(request, "client_secret", required=True) diff --git a/synapse/rest/synapse/client/pick_username.py b/synapse/rest/synapse/client/pick_username.py index 1727bb63b7..867ea1866d 100644 --- a/synapse/rest/synapse/client/pick_username.py +++ b/synapse/rest/synapse/client/pick_username.py @@ -20,7 +20,7 @@ # import logging -from typing import TYPE_CHECKING, Generator, List, Tuple +from typing import TYPE_CHECKING, Generator from twisted.web.resource import Resource from twisted.web.server import Request @@ -65,7 +65,7 @@ class AvailabilityCheckResource(DirectServeJsonResource): super().__init__(clock=hs.get_clock()) self._sso_handler = hs.get_sso_handler() - async def _async_render_GET(self, request: Request) -> Tuple[int, JsonDict]: + async def _async_render_GET(self, request: Request) -> tuple[int, JsonDict]: localpart = parse_string(request, "username", required=True) session_id = get_username_mapping_session_cookie_from_request(request) @@ -138,7 +138,7 @@ class AccountDetailsResource(DirectServeHtmlResource): use_avatar = parse_boolean(request, "use_avatar", default=False) try: - emails_to_use: List[str] = [ + emails_to_use: list[str] = [ val.decode("utf-8") for val in request.args.get(b"use_email", []) ] except ValueError: diff --git a/synapse/rest/synapse/client/rendezvous.py b/synapse/rest/synapse/client/rendezvous.py index 5278c35572..24c10dee82 100644 --- a/synapse/rest/synapse/client/rendezvous.py +++ b/synapse/rest/synapse/client/rendezvous.py @@ -14,7 +14,7 @@ # import logging -from typing import TYPE_CHECKING, List +from typing import TYPE_CHECKING from synapse.api.errors import UnrecognizedRequestError from synapse.http.server import DirectServeJsonResource @@ -34,7 +34,7 @@ class MSC4108RendezvousSessionResource(DirectServeJsonResource): self._handler = hs.get_rendezvous_handler() async def _async_render_GET(self, request: SynapseRequest) -> None: - postpath: List[bytes] = request.postpath # type: ignore + postpath: list[bytes] = request.postpath # type: ignore if len(postpath) != 1: raise UnrecognizedRequestError() session_id = postpath[0].decode("ascii") @@ -42,7 +42,7 @@ class MSC4108RendezvousSessionResource(DirectServeJsonResource): self._handler.handle_get(request, session_id) def _async_render_PUT(self, request: SynapseRequest) -> None: - postpath: List[bytes] = request.postpath # type: ignore + postpath: list[bytes] = request.postpath # type: ignore if len(postpath) != 1: raise UnrecognizedRequestError() session_id = postpath[0].decode("ascii") @@ -50,7 +50,7 @@ class MSC4108RendezvousSessionResource(DirectServeJsonResource): self._handler.handle_put(request, session_id) def _async_render_DELETE(self, request: SynapseRequest) -> None: - postpath: List[bytes] = request.postpath # type: ignore + postpath: list[bytes] = request.postpath # type: ignore if len(postpath) != 1: raise UnrecognizedRequestError() session_id = postpath[0].decode("ascii") diff --git a/synapse/rest/synapse/mas/devices.py b/synapse/rest/synapse/mas/devices.py index 6cc1153590..654fed8c03 100644 --- a/synapse/rest/synapse/mas/devices.py +++ b/synapse/rest/synapse/mas/devices.py @@ -15,7 +15,7 @@ import logging from http import HTTPStatus -from typing import TYPE_CHECKING, Optional, Tuple +from typing import TYPE_CHECKING, Optional from synapse._pydantic_compat import StrictStr from synapse.api.errors import NotFoundError @@ -56,7 +56,7 @@ class MasUpsertDeviceResource(MasBaseResource): async def _async_render_POST( self, request: "SynapseRequest" - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: self.assert_request_is_from_mas(request) body = parse_and_validate_json_object_from_request(request, self.PostBody) @@ -97,7 +97,7 @@ class MasDeleteDeviceResource(MasBaseResource): async def _async_render_POST( self, request: "SynapseRequest" - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: self.assert_request_is_from_mas(request) body = parse_and_validate_json_object_from_request(request, self.PostBody) @@ -138,7 +138,7 @@ class MasUpdateDeviceDisplayNameResource(MasBaseResource): async def _async_render_POST( self, request: "SynapseRequest" - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: self.assert_request_is_from_mas(request) body = parse_and_validate_json_object_from_request(request, self.PostBody) @@ -180,7 +180,7 @@ class MasSyncDevicesResource(MasBaseResource): async def _async_render_POST( self, request: "SynapseRequest" - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: self.assert_request_is_from_mas(request) body = parse_and_validate_json_object_from_request(request, self.PostBody) diff --git a/synapse/rest/synapse/mas/users.py b/synapse/rest/synapse/mas/users.py index 09aa13bebb..a802887270 100644 --- a/synapse/rest/synapse/mas/users.py +++ b/synapse/rest/synapse/mas/users.py @@ -15,7 +15,7 @@ import logging from http import HTTPStatus -from typing import TYPE_CHECKING, Any, Optional, Tuple, TypedDict +from typing import TYPE_CHECKING, Any, Optional, TypedDict from synapse._pydantic_compat import StrictBool, StrictStr, root_validator from synapse.api.errors import NotFoundError, SynapseError @@ -58,7 +58,7 @@ class MasQueryUserResource(MasBaseResource): async def _async_render_GET( self, request: "SynapseRequest" - ) -> Tuple[int, Response]: + ) -> tuple[int, Response]: self.assert_request_is_from_mas(request) localpart = parse_string(request, "localpart", required=True) @@ -128,7 +128,7 @@ class MasProvisionUserResource(MasBaseResource): async def _async_render_POST( self, request: "SynapseRequest" - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: self.assert_request_is_from_mas(request) body = parse_and_validate_json_object_from_request(request, self.PostBody) @@ -239,7 +239,7 @@ class MasIsLocalpartAvailableResource(MasBaseResource): async def _async_render_GET( self, request: "SynapseRequest" - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: self.assert_request_is_from_mas(request) localpart = parse_string(request, "localpart") if localpart is None: @@ -272,7 +272,7 @@ class MasDeleteUserResource(MasBaseResource): async def _async_render_POST( self, request: "SynapseRequest" - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: self.assert_request_is_from_mas(request) body = parse_and_validate_json_object_from_request(request, self.PostBody) @@ -312,7 +312,7 @@ class MasReactivateUserResource(MasBaseResource): async def _async_render_POST( self, request: "SynapseRequest" - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: self.assert_request_is_from_mas(request) body = parse_and_validate_json_object_from_request(request, self.PostBody) @@ -350,7 +350,7 @@ class MasSetDisplayNameResource(MasBaseResource): async def _async_render_POST( self, request: "SynapseRequest" - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: self.assert_request_is_from_mas(request) body = parse_and_validate_json_object_from_request(request, self.PostBody) @@ -394,7 +394,7 @@ class MasUnsetDisplayNameResource(MasBaseResource): async def _async_render_POST( self, request: "SynapseRequest" - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: self.assert_request_is_from_mas(request) body = parse_and_validate_json_object_from_request(request, self.PostBody) @@ -440,7 +440,7 @@ class MasAllowCrossSigningResetResource(MasBaseResource): async def _async_render_POST( self, request: "SynapseRequest" - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: self.assert_request_is_from_mas(request) body = parse_and_validate_json_object_from_request(request, self.PostBody) diff --git a/synapse/rest/well_known.py b/synapse/rest/well_known.py index ae8c6a8fc0..00965cfb82 100644 --- a/synapse/rest/well_known.py +++ b/synapse/rest/well_known.py @@ -18,7 +18,7 @@ # # import logging -from typing import TYPE_CHECKING, Optional, Tuple +from typing import TYPE_CHECKING, Optional from twisted.web.resource import Resource from twisted.web.server import Request @@ -97,7 +97,7 @@ class ClientWellKnownResource(DirectServeJsonResource): super().__init__(clock=hs.get_clock()) self._well_known_builder = WellKnownBuilder(hs) - async def _async_render_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: + async def _async_render_GET(self, request: SynapseRequest) -> tuple[int, JsonDict]: r = await self._well_known_builder.get_well_known() if not r: raise NotFoundError(".well-known not available") diff --git a/synapse/server.py b/synapse/server.py index b63a11273a..2c252ce86f 100644 --- a/synapse/server.py +++ b/synapse/server.py @@ -34,11 +34,7 @@ from typing import ( Any, Awaitable, Callable, - Dict, - List, Optional, - Tuple, - Type, TypeVar, cast, ) @@ -278,7 +274,7 @@ class ShutdownInfo: func: Callable[..., Any] trigger_id: _SystemEventID - kwargs: Dict[str, object] + kwargs: dict[str, object] class HomeServer(metaclass=abc.ABCMeta): @@ -313,7 +309,7 @@ class HomeServer(metaclass=abc.ABCMeta): @property @abc.abstractmethod - def DATASTORE_CLASS(self) -> Type["SQLBaseStore"]: + def DATASTORE_CLASS(self) -> type["SQLBaseStore"]: # This is overridden in derived application classes # (such as synapse.app.homeserver.SynapseHomeServer) and gives the class to be # instantiated during setup() for future return by get_datastores() @@ -341,8 +337,8 @@ class HomeServer(metaclass=abc.ABCMeta): # the key we use to sign events and requests self.signing_key = config.key.signing_key[0] self.config = config - self._listening_services: List[Port] = [] - self._metrics_listeners: List[Tuple[WSGIServer, Thread]] = [] + self._listening_services: list[Port] = [] + self._metrics_listeners: list[tuple[WSGIServer, Thread]] = [] self.start_time: Optional[int] = None self._instance_id = random_string(5) @@ -352,15 +348,15 @@ class HomeServer(metaclass=abc.ABCMeta): self.datastores: Optional[Databases] = None - self._module_web_resources: Dict[str, Resource] = {} + self._module_web_resources: dict[str, Resource] = {} self._module_web_resources_consumed = False # This attribute is set by the free function `refresh_certificate`. self.tls_server_context_factory: Optional[IOpenSSLContextFactory] = None self._is_shutdown = False - self._async_shutdown_handlers: List[ShutdownInfo] = [] - self._sync_shutdown_handlers: List[ShutdownInfo] = [] + self._async_shutdown_handlers: list[ShutdownInfo] = [] + self._sync_shutdown_handlers: list[ShutdownInfo] = [] self._background_processes: set[defer.Deferred[Optional[Any]]] = set() def run_as_background_process( @@ -1108,7 +1104,7 @@ class HomeServer(metaclass=abc.ABCMeta): return ReplicationDataHandler(self) @cache_in_self - def get_replication_streams(self) -> Dict[str, Stream]: + def get_replication_streams(self) -> dict[str, Stream]: return {stream.NAME: stream(self) for stream in STREAMS_MAP.values()} @cache_in_self diff --git a/synapse/server_notices/consent_server_notices.py b/synapse/server_notices/consent_server_notices.py index d937a3034e..99b362f5ff 100644 --- a/synapse/server_notices/consent_server_notices.py +++ b/synapse/server_notices/consent_server_notices.py @@ -18,7 +18,7 @@ # # import logging -from typing import TYPE_CHECKING, Any, Set +from typing import TYPE_CHECKING, Any from synapse.api.errors import SynapseError from synapse.api.urls import ConsentURIBuilder @@ -40,7 +40,7 @@ class ConsentServerNotices: self._server_notices_manager = hs.get_server_notices_manager() self._store = hs.get_datastores().main - self._users_in_progress: Set[str] = set() + self._users_in_progress: set[str] = set() self._current_consent_version = hs.config.consent.user_consent_version self._server_notice_content = ( diff --git a/synapse/server_notices/resource_limits_server_notices.py b/synapse/server_notices/resource_limits_server_notices.py index e88e8c9b45..493b8cb62b 100644 --- a/synapse/server_notices/resource_limits_server_notices.py +++ b/synapse/server_notices/resource_limits_server_notices.py @@ -18,7 +18,7 @@ # # import logging -from typing import TYPE_CHECKING, List, Tuple +from typing import TYPE_CHECKING from synapse.api.constants import ( EventTypes, @@ -127,7 +127,7 @@ class ResourceLimitsServerNotices: logger.error("Error sending resource limits server notice: %s", e) async def _remove_limit_block_notification( - self, user_id: str, ref_events: List[str] + self, user_id: str, ref_events: list[str] ) -> None: """Utility method to remove limit block notifications from the server notices room. @@ -170,7 +170,7 @@ class ResourceLimitsServerNotices: user_id, content, EventTypes.Pinned, "" ) - async def _is_room_currently_blocked(self, room_id: str) -> Tuple[bool, List[str]]: + async def _is_room_currently_blocked(self, room_id: str) -> tuple[bool, list[str]]: """ Determines if the room is currently blocked @@ -198,7 +198,7 @@ class ResourceLimitsServerNotices: # The user has yet to join the server notices room pass - referenced_events: List[str] = [] + referenced_events: list[str] = [] if pinned_state_event is not None: referenced_events = list(pinned_state_event.content.get("pinned", [])) diff --git a/synapse/state/__init__.py b/synapse/state/__init__.py index 394dc72fa6..991e1f847a 100644 --- a/synapse/state/__init__.py +++ b/synapse/state/__init__.py @@ -26,15 +26,9 @@ from typing import ( Any, Awaitable, Callable, - DefaultDict, - Dict, - FrozenSet, - List, Mapping, Optional, Sequence, - Set, - Tuple, ) import attr @@ -246,7 +240,7 @@ class StateHandler: async def get_current_user_ids_in_room( self, room_id: str, latest_event_ids: StrCollection - ) -> Set[str]: + ) -> set[str]: """ Get the users IDs who are currently in a room. @@ -271,7 +265,7 @@ class StateHandler: async def get_hosts_in_room_at_events( self, room_id: str, event_ids: StrCollection - ) -> FrozenSet[str]: + ) -> frozenset[str]: """Get the hosts that were in a room at the given event ids Args: @@ -647,7 +641,7 @@ class StateResolutionHandler: ) # dict of set of event_ids -> _StateCacheEntry. - self._state_cache: ExpiringCache[FrozenSet[int], _StateCacheEntry] = ( + self._state_cache: ExpiringCache[frozenset[int], _StateCacheEntry] = ( ExpiringCache( cache_name="state_cache", server_name=self.server_name, @@ -665,7 +659,7 @@ class StateResolutionHandler: # # tracks the amount of work done on state res per room - self._state_res_metrics: DefaultDict[str, _StateResMetrics] = defaultdict( + self._state_res_metrics: defaultdict[str, _StateResMetrics] = defaultdict( _StateResMetrics ) @@ -676,7 +670,7 @@ class StateResolutionHandler: room_id: str, room_version: str, state_groups_ids: Mapping[int, StateMap[str]], - event_map: Optional[Dict[str, EventBase]], + event_map: Optional[dict[str, EventBase]], state_res_store: "StateResolutionStore", ) -> _StateCacheEntry: """Resolves conflicts between a set of state groups @@ -776,7 +770,7 @@ class StateResolutionHandler: room_id: str, room_version: str, state_sets: Sequence[StateMap[str]], - event_map: Optional[Dict[str, EventBase]], + event_map: Optional[dict[str, EventBase]], state_res_store: "StateResolutionStore", ) -> StateMap[str]: """ @@ -884,7 +878,7 @@ class StateResolutionHandler: items = self._state_res_metrics.items() # log the N biggest rooms - biggest: List[Tuple[str, _StateResMetrics]] = heapq.nlargest( + biggest: list[tuple[str, _StateResMetrics]] = heapq.nlargest( n_to_log, items, key=lambda i: extract_key(i[1]) ) metrics_logger.debug( @@ -975,7 +969,7 @@ class StateResolutionStore: def get_events( self, event_ids: StrCollection, allow_rejected: bool = False - ) -> Awaitable[Dict[str, EventBase]]: + ) -> Awaitable[dict[str, EventBase]]: """Get events from the database Args: @@ -996,9 +990,9 @@ class StateResolutionStore: def get_auth_chain_difference( self, room_id: str, - state_sets: List[Set[str]], - conflicted_state: Optional[Set[str]], - additional_backwards_reachable_conflicted_events: Optional[Set[str]], + state_sets: list[set[str]], + conflicted_state: Optional[set[str]], + additional_backwards_reachable_conflicted_events: Optional[set[str]], ) -> Awaitable[StateDifference]: """ "Given sets of state events figure out the auth chain difference (as per state res v2 algorithm). diff --git a/synapse/state/v1.py b/synapse/state/v1.py index a2e9eb0a42..a219347264 100644 --- a/synapse/state/v1.py +++ b/synapse/state/v1.py @@ -23,13 +23,9 @@ import logging from typing import ( Awaitable, Callable, - Dict, Iterable, - List, Optional, Sequence, - Set, - Tuple, ) from synapse import event_auth @@ -49,8 +45,8 @@ async def resolve_events_with_store( room_id: str, room_version: RoomVersion, state_sets: Sequence[StateMap[str]], - event_map: Optional[Dict[str, EventBase]], - state_map_factory: Callable[[StrCollection], Awaitable[Dict[str, EventBase]]], + event_map: Optional[dict[str, EventBase]], + state_map_factory: Callable[[StrCollection], Awaitable[dict[str, EventBase]]], ) -> StateMap[str]: """ Args: @@ -145,7 +141,7 @@ async def resolve_events_with_store( def _seperate( state_sets: Iterable[StateMap[str]], -) -> Tuple[MutableStateMap[str], MutableStateMap[Set[str]]]: +) -> tuple[MutableStateMap[str], MutableStateMap[set[str]]]: """Takes the state_sets and figures out which keys are conflicted and which aren't. i.e., which have multiple different event_ids associated with them in different state sets. @@ -166,7 +162,7 @@ def _seperate( """ state_set_iterator = iter(state_sets) unconflicted_state = dict(next(state_set_iterator)) - conflicted_state: MutableStateMap[Set[str]] = {} + conflicted_state: MutableStateMap[set[str]] = {} for state_set in state_set_iterator: for key, value in state_set.items(): @@ -196,8 +192,8 @@ def _seperate( def _create_auth_events_from_maps( room_version: RoomVersion, unconflicted_state: StateMap[str], - conflicted_state: StateMap[Set[str]], - state_map: Dict[str, EventBase], + conflicted_state: StateMap[set[str]], + state_map: dict[str, EventBase], ) -> StateMap[str]: """ @@ -228,9 +224,9 @@ def _create_auth_events_from_maps( def _resolve_with_state( room_version: RoomVersion, unconflicted_state_ids: MutableStateMap[str], - conflicted_state_ids: StateMap[Set[str]], + conflicted_state_ids: StateMap[set[str]], auth_event_ids: StateMap[str], - state_map: Dict[str, EventBase], + state_map: dict[str, EventBase], ) -> MutableStateMap[str]: conflicted_state = {} for key, event_ids in conflicted_state_ids.items(): @@ -263,7 +259,7 @@ def _resolve_with_state( def _resolve_state_events( room_version: RoomVersion, - conflicted_state: StateMap[List[EventBase]], + conflicted_state: StateMap[list[EventBase]], auth_events: MutableStateMap[EventBase], ) -> StateMap[EventBase]: """This is where we actually decide which of the conflicted state to @@ -312,7 +308,7 @@ def _resolve_state_events( def _resolve_auth_events( - room_version: RoomVersion, events: List[EventBase], auth_events: StateMap[EventBase] + room_version: RoomVersion, events: list[EventBase], auth_events: StateMap[EventBase] ) -> EventBase: reverse = list(reversed(_ordered_events(events))) @@ -347,7 +343,7 @@ def _resolve_auth_events( def _resolve_normal_events( - events: List[EventBase], auth_events: StateMap[EventBase] + events: list[EventBase], auth_events: StateMap[EventBase] ) -> EventBase: for event in _ordered_events(events): try: @@ -365,8 +361,8 @@ def _resolve_normal_events( return event -def _ordered_events(events: Iterable[EventBase]) -> List[EventBase]: - def key_func(e: EventBase) -> Tuple[int, str]: +def _ordered_events(events: Iterable[EventBase]) -> list[EventBase]: + def key_func(e: EventBase) -> tuple[int, str]: # we have to use utf-8 rather than ascii here because it turns out we allow # people to send us events with non-ascii event IDs :/ return -int(e.depth), hashlib.sha1(e.event_id.encode("utf-8")).hexdigest() diff --git a/synapse/state/v2.py b/synapse/state/v2.py index 8bf6706434..683f0c1dcc 100644 --- a/synapse/state/v2.py +++ b/synapse/state/v2.py @@ -25,16 +25,12 @@ from typing import ( Any, Awaitable, Callable, - Dict, Generator, Iterable, - List, Literal, Optional, Protocol, Sequence, - Set, - Tuple, overload, ) @@ -61,13 +57,13 @@ class StateResolutionStore(Protocol): # TestStateResolutionStore in tests. def get_events( self, event_ids: StrCollection, allow_rejected: bool = False - ) -> Awaitable[Dict[str, EventBase]]: ... + ) -> Awaitable[dict[str, EventBase]]: ... def get_auth_chain_difference( self, room_id: str, - state_sets: List[Set[str]], - conflicted_state: Optional[Set[str]], + state_sets: list[set[str]], + conflicted_state: Optional[set[str]], additional_backwards_reachable_conflicted_events: Optional[set[str]], ) -> Awaitable[StateDifference]: ... @@ -88,7 +84,7 @@ async def resolve_events_with_store( room_id: str, room_version: RoomVersion, state_sets: Sequence[StateMap[str]], - event_map: Optional[Dict[str, EventBase]], + event_map: Optional[dict[str, EventBase]], state_res_store: StateResolutionStore, ) -> StateMap[str]: """Resolves the state using the v2 state resolution algorithm @@ -128,7 +124,7 @@ async def resolve_events_with_store( logger.debug("%d conflicted state entries", len(conflicted_state)) logger.debug("Calculating auth chain difference") - conflicted_set: Optional[Set[str]] = None + conflicted_set: Optional[set[str]] = None if room_version.state_res == StateResolutionVersions.V2_1: # calculate the conflicted subgraph conflicted_set = set(itertools.chain.from_iterable(conflicted_state.values())) @@ -242,7 +238,7 @@ async def resolve_events_with_store( async def _get_power_level_for_sender( room_id: str, event_id: str, - event_map: Dict[str, EventBase], + event_map: dict[str, EventBase], state_res_store: StateResolutionStore, ) -> int: """Return the power level of the sender of the given event according to @@ -315,10 +311,10 @@ async def _get_power_level_for_sender( async def _get_auth_chain_difference( room_id: str, state_sets: Sequence[StateMap[str]], - unpersisted_events: Dict[str, EventBase], + unpersisted_events: dict[str, EventBase], state_res_store: StateResolutionStore, - conflicted_state: Optional[Set[str]], -) -> Set[str]: + conflicted_state: Optional[set[str]], +) -> set[str]: """Compare the auth chains of each state set and return the set of events that only appear in some, but not all of the auth chains. @@ -356,10 +352,10 @@ async def _get_auth_chain_difference( # event IDs if they appear in the `unpersisted_events`. This is the intersection of # the event's auth chain with the events in `unpersisted_events` *plus* their # auth event IDs. - events_to_auth_chain: Dict[str, Set[str]] = {} + events_to_auth_chain: dict[str, set[str]] = {} # remember the forward links when doing the graph traversal, we'll need it for v2.1 checks # This is a map from an event to the set of events that contain it as an auth event. - event_to_next_event: Dict[str, Set[str]] = {} + event_to_next_event: dict[str, set[str]] = {} for event in unpersisted_events.values(): chain = {event.event_id} events_to_auth_chain[event.event_id] = chain @@ -379,8 +375,8 @@ async def _get_auth_chain_difference( # # Note: If there are no `unpersisted_events` (which is the common case), we can do a # much simpler calculation. - additional_backwards_reachable_conflicted_events: Set[str] = set() - unpersisted_conflicted_events: Set[str] = set() + additional_backwards_reachable_conflicted_events: set[str] = set() + unpersisted_conflicted_events: set[str] = set() if unpersisted_events: # The list of state sets to pass to the store, where each state set is a set # of the event ids making up the state. This is similar to `state_sets`, @@ -388,17 +384,17 @@ async def _get_auth_chain_difference( # ((type, state_key)->event_id) mappings; and (b) we have stripped out # unpersisted events and replaced them with the persisted events in # their auth chain. - state_sets_ids: List[Set[str]] = [] + state_sets_ids: list[set[str]] = [] # For each state set, the unpersisted event IDs reachable (by their auth # chain) from the events in that set. - unpersisted_set_ids: List[Set[str]] = [] + unpersisted_set_ids: list[set[str]] = [] for state_set in state_sets: - set_ids: Set[str] = set() + set_ids: set[str] = set() state_sets_ids.append(set_ids) - unpersisted_ids: Set[str] = set() + unpersisted_ids: set[str] = set() unpersisted_set_ids.append(unpersisted_ids) for event_id in state_set.values(): @@ -479,7 +475,7 @@ async def _get_auth_chain_difference( # but NOT the backwards conflicted set. This mirrors what the DB layer does but in reverse: # we supplied events which are backwards reachable to the DB and now the DB is providing # forwards reachable events from the DB. - forwards_conflicted_set: Set[str] = set() + forwards_conflicted_set: set[str] = set() # we include unpersisted conflicted events here to process exclusive unpersisted subgraphs search_queue = subgraph_frontier.union(unpersisted_conflicted_events) while search_queue: @@ -490,7 +486,7 @@ async def _get_auth_chain_difference( # we've already calculated the backwards form as this is the auth chain for each # unpersisted conflicted event. - backwards_conflicted_set: Set[str] = set() + backwards_conflicted_set: set[str] = set() for uce in unpersisted_conflicted_events: backwards_conflicted_set.update(events_to_auth_chain.get(uce, [])) @@ -526,7 +522,7 @@ async def _get_auth_chain_difference( def _seperate( state_sets: Iterable[StateMap[str]], -) -> Tuple[StateMap[str], StateMap[Set[str]]]: +) -> tuple[StateMap[str], StateMap[set[str]]]: """Return the unconflicted and conflicted state. This is different than in the original algorithm, as this defines a key to be conflicted if one of the state sets doesn't have that key. @@ -550,7 +546,7 @@ def _seperate( conflicted_state[key] = event_ids # mypy doesn't understand that discarding None above means that conflicted - # state is StateMap[Set[str]], not StateMap[Set[Optional[Str]]]. + # state is StateMap[set[str]], not StateMap[set[Optional[Str]]]. return unconflicted_state, conflicted_state # type: ignore[return-value] @@ -579,12 +575,12 @@ def _is_power_event(event: EventBase) -> bool: async def _add_event_and_auth_chain_to_graph( - graph: Dict[str, Set[str]], + graph: dict[str, set[str]], room_id: str, event_id: str, - event_map: Dict[str, EventBase], + event_map: dict[str, EventBase], state_res_store: StateResolutionStore, - full_conflicted_set: Set[str], + full_conflicted_set: set[str], ) -> None: """Helper function for _reverse_topological_power_sort that add the event and its auth chain (that is in the auth diff) to the graph @@ -616,10 +612,10 @@ async def _reverse_topological_power_sort( clock: Clock, room_id: str, event_ids: Iterable[str], - event_map: Dict[str, EventBase], + event_map: dict[str, EventBase], state_res_store: StateResolutionStore, - full_conflicted_set: Set[str], -) -> List[str]: + full_conflicted_set: set[str], +) -> list[str]: """Returns a list of the event_ids sorted by reverse topological ordering, and then by power level and origin_server_ts @@ -635,7 +631,7 @@ async def _reverse_topological_power_sort( The sorted list """ - graph: Dict[str, Set[str]] = {} + graph: dict[str, set[str]] = {} for idx, event_id in enumerate(event_ids, start=1): await _add_event_and_auth_chain_to_graph( graph, room_id, event_id, event_map, state_res_store, full_conflicted_set @@ -658,7 +654,7 @@ async def _reverse_topological_power_sort( if idx % _AWAIT_AFTER_ITERATIONS == 0: await clock.sleep(0) - def _get_power_order(event_id: str) -> Tuple[int, int, str]: + def _get_power_order(event_id: str) -> tuple[int, int, str]: ev = event_map[event_id] pl = event_to_pl[event_id] @@ -675,9 +671,9 @@ async def _iterative_auth_checks( clock: Clock, room_id: str, room_version: RoomVersion, - event_ids: List[str], + event_ids: list[str], base_state: StateMap[str], - event_map: Dict[str, EventBase], + event_map: dict[str, EventBase], state_res_store: StateResolutionStore, ) -> MutableStateMap[str]: """Sequentially apply auth checks to each event in given list, updating the @@ -758,11 +754,11 @@ async def _iterative_auth_checks( async def _mainline_sort( clock: Clock, room_id: str, - event_ids: List[str], + event_ids: list[str], resolved_power_event_id: Optional[str], - event_map: Dict[str, EventBase], + event_map: dict[str, EventBase], state_res_store: StateResolutionStore, -) -> List[str]: +) -> list[str]: """Returns a sorted list of event_ids sorted by mainline ordering based on the given event resolved_power_event_id @@ -829,8 +825,8 @@ async def _mainline_sort( async def _get_mainline_depth_for_event( clock: Clock, event: EventBase, - mainline_map: Dict[str, int], - event_map: Dict[str, EventBase], + mainline_map: dict[str, int], + event_map: dict[str, EventBase], state_res_store: StateResolutionStore, ) -> int: """Get the mainline depths for the given event based on the mainline map @@ -880,7 +876,7 @@ async def _get_mainline_depth_for_event( async def _get_event( room_id: str, event_id: str, - event_map: Dict[str, EventBase], + event_map: dict[str, EventBase], state_res_store: StateResolutionStore, allow_none: Literal[False] = False, ) -> EventBase: ... @@ -890,7 +886,7 @@ async def _get_event( async def _get_event( room_id: str, event_id: str, - event_map: Dict[str, EventBase], + event_map: dict[str, EventBase], state_res_store: StateResolutionStore, allow_none: Literal[True], ) -> Optional[EventBase]: ... @@ -899,7 +895,7 @@ async def _get_event( async def _get_event( room_id: str, event_id: str, - event_map: Dict[str, EventBase], + event_map: dict[str, EventBase], state_res_store: StateResolutionStore, allow_none: bool = False, ) -> Optional[EventBase]: @@ -936,7 +932,7 @@ async def _get_event( def lexicographical_topological_sort( - graph: Dict[str, Set[str]], key: Callable[[str], Any] + graph: dict[str, set[str]], key: Callable[[str], Any] ) -> Generator[str, None, None]: """Performs a lexicographic reverse topological sort on the graph. @@ -960,7 +956,7 @@ def lexicographical_topological_sort( # outgoing edges, c.f. # https://en.wikipedia.org/wiki/Topological_sorting#Kahn's_algorithm outdegree_map = graph - reverse_graph: Dict[str, Set[str]] = {} + reverse_graph: dict[str, set[str]] = {} # Lists of nodes with zero out degree. Is actually a tuple of # `(key(node), node)` so that sorting does the right thing diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py index 1fddcc0799..b6958ef06b 100644 --- a/synapse/storage/_base.py +++ b/synapse/storage/_base.py @@ -21,7 +21,7 @@ # import logging from abc import ABCMeta -from typing import TYPE_CHECKING, Any, Collection, Dict, Iterable, Optional, Union +from typing import TYPE_CHECKING, Any, Collection, Iterable, Optional, Union from synapse.storage.database import ( DatabasePool, @@ -60,7 +60,7 @@ class SQLBaseStore(metaclass=ABCMeta): self.database_engine = database.engine self.db_pool = database - self.external_cached_functions: Dict[str, CachedFunction] = {} + self.external_cached_functions: dict[str, CachedFunction] = {} def process_replication_rows( # noqa: B027 (no-op by design) self, diff --git a/synapse/storage/background_updates.py b/synapse/storage/background_updates.py index e3e793d5f5..ce213050a9 100644 --- a/synapse/storage/background_updates.py +++ b/synapse/storage/background_updates.py @@ -28,13 +28,9 @@ from typing import ( AsyncContextManager, Awaitable, Callable, - Dict, Iterable, - List, Optional, Sequence, - Tuple, - Type, cast, ) @@ -96,7 +92,7 @@ class ForeignKeyConstraint(Constraint): """ referenced_table: str - columns: Sequence[Tuple[str, str]] + columns: Sequence[tuple[str, str]] deferred: bool def make_check_clause(self, table: str) -> str: @@ -173,7 +169,7 @@ class _BackgroundUpdateContextManager: async def __aexit__( self, - exc_type: Optional[Type[BaseException]], + exc_type: Optional[type[BaseException]], exc: Optional[BaseException], tb: Optional[TracebackType], ) -> None: @@ -260,8 +256,8 @@ class BackgroundUpdater: self._default_batch_size_callback: Optional[DEFAULT_BATCH_SIZE_CALLBACK] = None self._min_batch_size_callback: Optional[MIN_BATCH_SIZE_CALLBACK] = None - self._background_update_performance: Dict[str, BackgroundUpdatePerformance] = {} - self._background_update_handlers: Dict[str, _BackgroundUpdateHandler] = {} + self._background_update_performance: dict[str, BackgroundUpdatePerformance] = {} + self._background_update_handlers: dict[str, _BackgroundUpdateHandler] = {} # TODO: all these bool flags make me feel icky---can we combine into a status # enum? self._all_done = False @@ -530,14 +526,14 @@ class BackgroundUpdater: True if we have finished running all the background updates, otherwise False """ - def get_background_updates_txn(txn: Cursor) -> List[Tuple[str, Optional[str]]]: + def get_background_updates_txn(txn: Cursor) -> list[tuple[str, Optional[str]]]: txn.execute( """ SELECT update_name, depends_on FROM background_updates ORDER BY ordering, update_name """ ) - return cast(List[Tuple[str, Optional[str]]], txn.fetchall()) + return cast(list[tuple[str, Optional[str]]], txn.fetchall()) if not self._current_background_update: all_pending_updates = await self.db_pool.runInteraction( @@ -965,7 +961,7 @@ class BackgroundUpdater: order_columns = ", ".join(unique_columns) where_clause = "" - args: List[Any] = [] + args: list[Any] = [] if parsed_progress.lower_bound: where_clause = f"""WHERE ({order_columns}) > ({", ".join("?" for _ in unique_columns)})""" args.extend(parsed_progress.lower_bound) diff --git a/synapse/storage/controllers/persist_events.py b/synapse/storage/controllers/persist_events.py index 646e2cf115..0daf4830d9 100644 --- a/synapse/storage/controllers/persist_events.py +++ b/synapse/storage/controllers/persist_events.py @@ -31,15 +31,10 @@ from typing import ( Callable, ClassVar, Collection, - Deque, - Dict, Generator, Generic, Iterable, - List, Optional, - Set, - Tuple, TypeVar, Union, ) @@ -143,7 +138,7 @@ class _PersistEventsTask: name: ClassVar[str] = "persist_event_batch" # used for opentracing - events_and_contexts: List[EventPersistencePair] + events_and_contexts: list[EventPersistencePair] backfilled: bool def try_merge(self, task: "_EventPersistQueueTask") -> bool: @@ -178,7 +173,7 @@ class _EventPersistQueueItem(Generic[_PersistResult]): task: _EventPersistQueueTask deferred: ObservableDeferred[_PersistResult] - parent_opentracing_span_contexts: List = attr.ib(factory=list) + parent_opentracing_span_contexts: list = attr.ib(factory=list) """A list of opentracing spans waiting for this batch""" opentracing_span_context: Any = None @@ -208,8 +203,8 @@ class _EventPeristenceQueue(Generic[_PersistResult]): """ self.server_name = server_name self.hs = hs - self._event_persist_queues: Dict[str, Deque[_EventPersistQueueItem]] = {} - self._currently_persisting_rooms: Set[str] = set() + self._event_persist_queues: dict[str, deque[_EventPersistQueueItem]] = {} + self._currently_persisting_rooms: set[str] = set() self._per_item_callback = per_item_callback async def add_to_queue( @@ -365,7 +360,7 @@ class EventsPersistenceStorageController: self, room_id: str, task: _EventPersistQueueTask, - ) -> Dict[str, str]: + ) -> dict[str, str]: """Callback for the _event_persist_queue Returns: @@ -394,7 +389,7 @@ class EventsPersistenceStorageController: self, events_and_contexts: Iterable[EventPersistencePair], backfilled: bool = False, - ) -> Tuple[List[EventBase], RoomStreamToken]: + ) -> tuple[list[EventBase], RoomStreamToken]: """ Write events to the database Args: @@ -414,8 +409,8 @@ class EventsPersistenceStorageController: PartialStateConflictError: if attempting to persist a partial state event in a room that has been un-partial stated. """ - event_ids: List[str] = [] - partitioned: Dict[str, List[EventPersistencePair]] = {} + event_ids: list[str] = [] + partitioned: dict[str, list[EventPersistencePair]] = {} for event, ctx in events_and_contexts: partitioned.setdefault(event.room_id, []).append((event, ctx)) event_ids.append(event.event_id) @@ -431,8 +426,8 @@ class EventsPersistenceStorageController: set_tag(SynapseTags.FUNC_ARG_PREFIX + "backfilled", str(backfilled)) async def enqueue( - item: Tuple[str, List[EventPersistencePair]], - ) -> Dict[str, str]: + item: tuple[str, list[EventPersistencePair]], + ) -> dict[str, str]: room_id, evs_ctxs = item return await self._event_persist_queue.add_to_queue( room_id, @@ -447,7 +442,7 @@ class EventsPersistenceStorageController: # # Since we use `yieldable_gather_results` we need to merge the returned list # of dicts into one. - replaced_events: Dict[str, str] = {} + replaced_events: dict[str, str] = {} for d in ret_vals: replaced_events.update(d) @@ -469,7 +464,7 @@ class EventsPersistenceStorageController: @trace async def persist_event( self, event: EventBase, context: EventContext, backfilled: bool = False - ) -> Tuple[EventBase, PersistedEventPosition, RoomStreamToken]: + ) -> tuple[EventBase, PersistedEventPosition, RoomStreamToken]: """ Returns: The event, stream ordering of `event`, and the stream ordering of the @@ -573,7 +568,7 @@ class EventsPersistenceStorageController: async def _persist_event_batch( self, room_id: str, task: _PersistEventsTask - ) -> Dict[str, str]: + ) -> dict[str, str]: """Callback for the _event_persist_queue Calculates the change to current state and forward extremities, and @@ -592,7 +587,7 @@ class EventsPersistenceStorageController: events_and_contexts = task.events_and_contexts backfilled = task.backfilled - replaced_events: Dict[str, str] = {} + replaced_events: dict[str, str] = {} if not events_and_contexts: return replaced_events @@ -678,8 +673,8 @@ class EventsPersistenceStorageController: return replaced_events async def _calculate_new_forward_extremities_and_state_delta( - self, room_id: str, ev_ctx_rm: List[EventPersistencePair] - ) -> Tuple[Optional[Set[str]], Optional[DeltaState]]: + self, room_id: str, ev_ctx_rm: list[EventPersistencePair] + ) -> tuple[Optional[set[str]], Optional[DeltaState]]: """Calculates the new forward extremities and state delta for a room given events to persist. @@ -803,9 +798,9 @@ class EventsPersistenceStorageController: async def _calculate_new_extremities( self, room_id: str, - event_contexts: List[EventPersistencePair], + event_contexts: list[EventPersistencePair], latest_event_ids: AbstractSet[str], - ) -> Set[str]: + ) -> set[str]: """Calculates the new forward extremities for a room given events to persist. @@ -863,10 +858,10 @@ class EventsPersistenceStorageController: async def _get_new_state_after_events( self, room_id: str, - events_context: List[EventPersistencePair], + events_context: list[EventPersistencePair], old_latest_event_ids: AbstractSet[str], - new_latest_event_ids: Set[str], - ) -> Tuple[Optional[StateMap[str]], Optional[StateMap[str]], Set[str]]: + new_latest_event_ids: set[str], + ) -> tuple[Optional[StateMap[str]], Optional[StateMap[str]], set[str]]: """Calculate the current state dict after adding some new events to a room @@ -1037,11 +1032,11 @@ class EventsPersistenceStorageController: async def _prune_extremities( self, room_id: str, - new_latest_event_ids: Set[str], + new_latest_event_ids: set[str], resolved_state_group: int, - event_id_to_state_group: Dict[str, int], - events_context: List[EventPersistencePair], - ) -> Set[str]: + event_id_to_state_group: dict[str, int], + events_context: list[EventPersistencePair], + ) -> set[str]: """See if we can prune any of the extremities after calculating the resolved state. """ @@ -1108,7 +1103,7 @@ class EventsPersistenceStorageController: # as a first cut. events_to_check: Collection[EventBase] = [event] while events_to_check: - new_events: Set[str] = set() + new_events: set[str] = set() for event_to_check in events_to_check: if self.is_mine_id(event_to_check.sender): if event_to_check.type != EventTypes.Dummy: @@ -1177,7 +1172,7 @@ class EventsPersistenceStorageController: async def _is_server_still_joined( self, room_id: str, - ev_ctx_rm: List[EventPersistencePair], + ev_ctx_rm: list[EventPersistencePair], delta: DeltaState, ) -> bool: """Check if the server will still be joined after the given events have diff --git a/synapse/storage/controllers/purge_events.py b/synapse/storage/controllers/purge_events.py index ded9cb0567..6606fdcc30 100644 --- a/synapse/storage/controllers/purge_events.py +++ b/synapse/storage/controllers/purge_events.py @@ -26,7 +26,6 @@ from typing import ( Collection, Mapping, Optional, - Set, ) from synapse.logging.context import nested_logging_context @@ -99,7 +98,7 @@ class PurgeEventsStorageController: async def _find_unreferenced_groups( self, state_groups: Collection[int], - ) -> Set[int]: + ) -> set[int]: """Used when purging history to figure out which state groups can be deleted. @@ -316,7 +315,7 @@ class PurgeEventsStorageController: self, last_checked_state_group: int, batch_size: int, - ) -> tuple[Set[int], int, bool]: + ) -> tuple[set[int], int, bool]: """Used when deleting unreferenced state groups in the background to figure out which state groups can be deleted. To avoid increased DB usage due to de-deltaing state groups, this returns only diff --git a/synapse/storage/controllers/state.py b/synapse/storage/controllers/state.py index 76978402b9..690a0dde2e 100644 --- a/synapse/storage/controllers/state.py +++ b/synapse/storage/controllers/state.py @@ -25,13 +25,9 @@ from typing import ( AbstractSet, Callable, Collection, - Dict, - FrozenSet, Iterable, - List, Mapping, Optional, - Tuple, Union, ) @@ -95,7 +91,7 @@ class StateStorageController: @tag_args async def get_state_group_delta( self, state_group: int - ) -> Tuple[Optional[int], Optional[StateMap[str]]]: + ) -> tuple[Optional[int], Optional[StateMap[str]]]: """Given a state group try to return a previous group and a delta between the old and the new. @@ -114,7 +110,7 @@ class StateStorageController: @tag_args async def get_state_groups_ids( self, _room_id: str, event_ids: Collection[str], await_full_state: bool = True - ) -> Dict[int, MutableStateMap[str]]: + ) -> dict[int, MutableStateMap[str]]: """Get the event IDs of all the state for the state groups for the given events Args: @@ -164,7 +160,7 @@ class StateStorageController: @tag_args async def get_state_groups( self, room_id: str, event_ids: Collection[str] - ) -> Dict[int, List[EventBase]]: + ) -> dict[int, list[EventBase]]: """Get the state groups for the given list of event_ids Args: @@ -200,8 +196,8 @@ class StateStorageController: @trace @tag_args async def _get_state_groups_from_groups( - self, groups: List[int], state_filter: StateFilter - ) -> Dict[int, StateMap[str]]: + self, groups: list[int], state_filter: StateFilter + ) -> dict[int, StateMap[str]]: """Returns the state groups for a given set of groups, filtering on types of state events. @@ -222,7 +218,7 @@ class StateStorageController: @tag_args async def get_state_for_events( self, event_ids: Collection[str], state_filter: Optional[StateFilter] = None - ) -> Dict[str, StateMap[EventBase]]: + ) -> dict[str, StateMap[EventBase]]: """Given a list of event_ids and type tuples, return a list of state dicts for each event. @@ -277,7 +273,7 @@ class StateStorageController: event_ids: Collection[str], state_filter: Optional[StateFilter] = None, await_full_state: bool = True, - ) -> Dict[str, StateMap[str]]: + ) -> dict[str, StateMap[str]]: """ Get the room states after each of a list of events. @@ -505,7 +501,7 @@ class StateStorageController: @tag_args async def get_state_for_groups( self, groups: Iterable[int], state_filter: Optional[StateFilter] = None - ) -> Dict[int, MutableStateMap[str]]: + ) -> dict[int, MutableStateMap[str]]: """Gets the state at each of a list of state groups, optionally filtering by type/state_key @@ -671,7 +667,7 @@ class StateStorageController: @tag_args async def get_current_state_deltas( self, prev_stream_id: int, max_stream_id: int - ) -> Tuple[int, List[StateDelta]]: + ) -> tuple[int, list[StateDelta]]: """Fetch a list of room state changes since the given stream id Args: @@ -745,7 +741,7 @@ class StateStorageController: @trace @tag_args - async def get_current_hosts_in_room_ordered(self, room_id: str) -> Tuple[str, ...]: + async def get_current_hosts_in_room_ordered(self, room_id: str) -> tuple[str, ...]: """Get current hosts in room based on current state. Blocks until we have full state for the given room. This only happens for rooms @@ -807,7 +803,7 @@ class StateStorageController: async def get_joined_hosts( self, room_id: str, state_entry: "_StateCacheEntry" - ) -> FrozenSet[str]: + ) -> frozenset[str]: state_group: Union[object, int] = state_entry.state_group if not state_group: # If state_group is None it means it has yet to be assigned a @@ -828,7 +824,7 @@ class StateStorageController: room_id: str, state_group: Union[object, int], state_entry: "_StateCacheEntry", - ) -> FrozenSet[str]: + ) -> frozenset[str]: # We don't use `state_group`, it's there so that we can cache based on # it. However, its important that its never None, since two # current_state's with a state_group of None are likely to be different. diff --git a/synapse/storage/controllers/stats.py b/synapse/storage/controllers/stats.py index 9445a86240..18e27e0878 100644 --- a/synapse/storage/controllers/stats.py +++ b/synapse/storage/controllers/stats.py @@ -20,7 +20,7 @@ # import logging -from typing import TYPE_CHECKING, Collection, Counter, List, Tuple +from typing import TYPE_CHECKING, Collection, Counter from synapse.api.errors import SynapseError from synapse.storage.database import LoggingTransaction @@ -39,7 +39,7 @@ class StatsController: def __init__(self, hs: "HomeServer", stores: Databases): self.stores = stores - async def get_room_db_size_estimate(self) -> List[Tuple[str, int]]: + async def get_room_db_size_estimate(self) -> list[tuple[str, int]]: """Get an estimate of the largest rooms and how much database space they use, in bytes. diff --git a/synapse/storage/database.py b/synapse/storage/database.py index a4b2b26795..764ca9f229 100644 --- a/synapse/storage/database.py +++ b/synapse/storage/database.py @@ -31,16 +31,12 @@ from typing import ( Awaitable, Callable, Collection, - Dict, Iterable, Iterator, - List, Literal, Mapping, Optional, Sequence, - Tuple, - Type, TypeVar, cast, overload, @@ -218,9 +214,9 @@ class LoggingDatabaseConnection: self, *, txn_name: Optional[str] = None, - after_callbacks: Optional[List["_CallbackListEntry"]] = None, - async_after_callbacks: Optional[List["_AsyncCallbackListEntry"]] = None, - exception_callbacks: Optional[List["_CallbackListEntry"]] = None, + after_callbacks: Optional[list["_CallbackListEntry"]] = None, + async_after_callbacks: Optional[list["_AsyncCallbackListEntry"]] = None, + exception_callbacks: Optional[list["_CallbackListEntry"]] = None, ) -> "LoggingTransaction": if not txn_name: txn_name = self.default_txn_name @@ -250,7 +246,7 @@ class LoggingDatabaseConnection: def __exit__( self, - exc_type: Optional[Type[BaseException]], + exc_type: Optional[type[BaseException]], exc_value: Optional[BaseException], traceback: Optional[types.TracebackType], ) -> Optional[bool]: @@ -262,9 +258,9 @@ class LoggingDatabaseConnection: # The type of entry which goes on our after_callbacks and exception_callbacks lists. -_CallbackListEntry = Tuple[Callable[..., object], Tuple[object, ...], Dict[str, object]] -_AsyncCallbackListEntry = Tuple[ - Callable[..., Awaitable], Tuple[object, ...], Dict[str, object] +_CallbackListEntry = tuple[Callable[..., object], tuple[object, ...], dict[str, object]] +_AsyncCallbackListEntry = tuple[ + Callable[..., Awaitable], tuple[object, ...], dict[str, object] ] P = ParamSpec("P") @@ -311,9 +307,9 @@ class LoggingTransaction: name: str, server_name: str, database_engine: BaseDatabaseEngine, - after_callbacks: Optional[List[_CallbackListEntry]] = None, - async_after_callbacks: Optional[List[_AsyncCallbackListEntry]] = None, - exception_callbacks: Optional[List[_CallbackListEntry]] = None, + after_callbacks: Optional[list[_CallbackListEntry]] = None, + async_after_callbacks: Optional[list[_AsyncCallbackListEntry]] = None, + exception_callbacks: Optional[list[_CallbackListEntry]] = None, ): self.txn = txn self.name = name @@ -383,16 +379,16 @@ class LoggingTransaction: assert self.exception_callbacks is not None self.exception_callbacks.append((callback, args, kwargs)) - def fetchone(self) -> Optional[Tuple]: + def fetchone(self) -> Optional[tuple]: return self.txn.fetchone() - def fetchmany(self, size: Optional[int] = None) -> List[Tuple]: + def fetchmany(self, size: Optional[int] = None) -> list[tuple]: return self.txn.fetchmany(size=size) - def fetchall(self) -> List[Tuple]: + def fetchall(self) -> list[tuple]: return self.txn.fetchall() - def __iter__(self) -> Iterator[Tuple]: + def __iter__(self) -> Iterator[tuple]: return self.txn.__iter__() @property @@ -435,7 +431,7 @@ class LoggingTransaction: values: Iterable[Iterable[Any]], template: Optional[str] = None, fetch: bool = True, - ) -> List[Tuple]: + ) -> list[tuple]: """Corresponds to psycopg2.extras.execute_values. Only available when using postgres. @@ -540,7 +536,7 @@ class LoggingTransaction: def __exit__( self, - exc_type: Optional[Type[BaseException]], + exc_type: Optional[type[BaseException]], exc_value: Optional[BaseException], traceback: Optional[types.TracebackType], ) -> None: @@ -549,8 +545,8 @@ class LoggingTransaction: class PerformanceCounters: def __init__(self) -> None: - self.current_counters: Dict[str, Tuple[int, float]] = {} - self.previous_counters: Dict[str, Tuple[int, float]] = {} + self.current_counters: dict[str, tuple[int, float]] = {} + self.previous_counters: dict[str, tuple[int, float]] = {} def update(self, key: str, duration_secs: float) -> None: count, cum_time = self.current_counters.get(key, (0, 0.0)) @@ -616,7 +612,7 @@ class DatabasePool: self._previous_loop_ts = 0.0 # Transaction counter: key is the twisted thread id, value is the current count - self._txn_counters: Dict[int, int] = defaultdict(int) + self._txn_counters: dict[int, int] = defaultdict(int) # TODO(paul): These can eventually be removed once the metrics code # is running in mainline, and we have some nice monitoring frontends @@ -666,7 +662,7 @@ class DatabasePool: If the background updates have not completed, wait 15 sec and check again. """ updates = cast( - List[Tuple[str]], + list[tuple[str]], await self.simple_select_list( "background_updates", keyvalues=None, @@ -717,9 +713,9 @@ class DatabasePool: self, conn: LoggingDatabaseConnection, desc: str, - after_callbacks: List[_CallbackListEntry], - async_after_callbacks: List[_AsyncCallbackListEntry], - exception_callbacks: List[_CallbackListEntry], + after_callbacks: list[_CallbackListEntry], + async_after_callbacks: list[_AsyncCallbackListEntry], + exception_callbacks: list[_CallbackListEntry], func: Callable[Concatenate[LoggingTransaction, P], R], *args: P.args, **kwargs: P.kwargs, @@ -956,9 +952,9 @@ class DatabasePool: """ async def _runInteraction() -> R: - after_callbacks: List[_CallbackListEntry] = [] - async_after_callbacks: List[_AsyncCallbackListEntry] = [] - exception_callbacks: List[_CallbackListEntry] = [] + after_callbacks: list[_CallbackListEntry] = [] + async_after_callbacks: list[_AsyncCallbackListEntry] = [] + exception_callbacks: list[_CallbackListEntry] = [] if not current_context(): logger.warning("Starting db txn '%s' from sentinel context", desc) @@ -1105,7 +1101,7 @@ class DatabasePool: self._db_pool.runWithConnection(inner_func, *args, **kwargs) ) - async def execute(self, desc: str, query: str, *args: Any) -> List[Tuple[Any, ...]]: + async def execute(self, desc: str, query: str, *args: Any) -> list[tuple[Any, ...]]: """Runs a single query for a result set. Args: @@ -1116,7 +1112,7 @@ class DatabasePool: The result of decoder(results) """ - def interaction(txn: LoggingTransaction) -> List[Tuple[Any, ...]]: + def interaction(txn: LoggingTransaction) -> list[tuple[Any, ...]]: txn.execute(query, args) return txn.fetchall() @@ -1128,7 +1124,7 @@ class DatabasePool: async def simple_insert( self, table: str, - values: Dict[str, Any], + values: dict[str, Any], desc: str = "simple_insert", ) -> None: """Executes an INSERT query on the named table. @@ -1142,7 +1138,7 @@ class DatabasePool: @staticmethod def simple_insert_txn( - txn: LoggingTransaction, table: str, values: Dict[str, Any] + txn: LoggingTransaction, table: str, values: dict[str, Any] ) -> None: keys, vals = zip(*values.items()) @@ -1158,9 +1154,9 @@ class DatabasePool: def simple_insert_returning_txn( txn: LoggingTransaction, table: str, - values: Dict[str, Any], + values: dict[str, Any], returning: StrCollection, - ) -> Tuple[Any, ...]: + ) -> tuple[Any, ...]: """Executes a `INSERT INTO... RETURNING...` statement (or equivalent for SQLite versions that don't support it). """ @@ -1261,9 +1257,9 @@ class DatabasePool: async def simple_upsert( self, table: str, - keyvalues: Dict[str, Any], - values: Dict[str, Any], - insertion_values: Optional[Dict[str, Any]] = None, + keyvalues: dict[str, Any], + values: dict[str, Any], + insertion_values: Optional[dict[str, Any]] = None, where_clause: Optional[str] = None, desc: str = "simple_upsert", ) -> bool: @@ -1463,7 +1459,7 @@ class DatabasePool: return True # We didn't find any existing rows, so insert a new one - allvalues: Dict[str, Any] = {} + allvalues: dict[str, Any] = {} allvalues.update(keyvalues) allvalues.update(values) allvalues.update(insertion_values) @@ -1500,7 +1496,7 @@ class DatabasePool: Returns True if a row was inserted or updated (i.e. if `values` is not empty then this always returns True) """ - allvalues: Dict[str, Any] = {} + allvalues: dict[str, Any] = {} allvalues.update(keyvalues) allvalues.update(insertion_values or {}) @@ -1694,7 +1690,7 @@ class DatabasePool: value_values: A list of each row's value column values. Ignored if value_names is empty. """ - allnames: List[str] = [] + allnames: list[str] = [] allnames.extend(key_names) allnames.extend(value_names) @@ -1737,30 +1733,30 @@ class DatabasePool: async def simple_select_one( self, table: str, - keyvalues: Dict[str, Any], + keyvalues: dict[str, Any], retcols: Collection[str], allow_none: Literal[False] = False, desc: str = "simple_select_one", - ) -> Tuple[Any, ...]: ... + ) -> tuple[Any, ...]: ... @overload async def simple_select_one( self, table: str, - keyvalues: Dict[str, Any], + keyvalues: dict[str, Any], retcols: Collection[str], allow_none: Literal[True] = True, desc: str = "simple_select_one", - ) -> Optional[Tuple[Any, ...]]: ... + ) -> Optional[tuple[Any, ...]]: ... async def simple_select_one( self, table: str, - keyvalues: Dict[str, Any], + keyvalues: dict[str, Any], retcols: Collection[str], allow_none: bool = False, desc: str = "simple_select_one", - ) -> Optional[Tuple[Any, ...]]: + ) -> Optional[tuple[Any, ...]]: """Executes a SELECT query on the named table, which is expected to return a single row, returning multiple columns from it. @@ -1786,7 +1782,7 @@ class DatabasePool: async def simple_select_one_onecol( self, table: str, - keyvalues: Dict[str, Any], + keyvalues: dict[str, Any], retcol: str, allow_none: Literal[False] = False, desc: str = "simple_select_one_onecol", @@ -1796,7 +1792,7 @@ class DatabasePool: async def simple_select_one_onecol( self, table: str, - keyvalues: Dict[str, Any], + keyvalues: dict[str, Any], retcol: str, allow_none: Literal[True] = True, desc: str = "simple_select_one_onecol", @@ -1805,7 +1801,7 @@ class DatabasePool: async def simple_select_one_onecol( self, table: str, - keyvalues: Dict[str, Any], + keyvalues: dict[str, Any], retcol: str, allow_none: bool = False, desc: str = "simple_select_one_onecol", @@ -1837,7 +1833,7 @@ class DatabasePool: cls, txn: LoggingTransaction, table: str, - keyvalues: Dict[str, Any], + keyvalues: dict[str, Any], retcol: str, allow_none: Literal[False] = False, ) -> Any: ... @@ -1848,7 +1844,7 @@ class DatabasePool: cls, txn: LoggingTransaction, table: str, - keyvalues: Dict[str, Any], + keyvalues: dict[str, Any], retcol: str, allow_none: Literal[True] = True, ) -> Optional[Any]: ... @@ -1858,7 +1854,7 @@ class DatabasePool: cls, txn: LoggingTransaction, table: str, - keyvalues: Dict[str, Any], + keyvalues: dict[str, Any], retcol: str, allow_none: bool = False, ) -> Optional[Any]: @@ -1878,9 +1874,9 @@ class DatabasePool: def simple_select_onecol_txn( txn: LoggingTransaction, table: str, - keyvalues: Dict[str, Any], + keyvalues: dict[str, Any], retcol: str, - ) -> List[Any]: + ) -> list[Any]: sql = ("SELECT %(retcol)s FROM %(table)s") % {"retcol": retcol, "table": table} if keyvalues: @@ -1894,10 +1890,10 @@ class DatabasePool: async def simple_select_onecol( self, table: str, - keyvalues: Optional[Dict[str, Any]], + keyvalues: Optional[dict[str, Any]], retcol: str, desc: str = "simple_select_onecol", - ) -> List[Any]: + ) -> list[Any]: """Executes a SELECT query on the named table, which returns a list comprising of the values of the named column from the selected rows. @@ -1922,10 +1918,10 @@ class DatabasePool: async def simple_select_list( self, table: str, - keyvalues: Optional[Dict[str, Any]], + keyvalues: Optional[dict[str, Any]], retcols: Collection[str], desc: str = "simple_select_list", - ) -> List[Tuple[Any, ...]]: + ) -> list[tuple[Any, ...]]: """Executes a SELECT query on the named table, which may return zero or more rows, returning the result as a list of tuples. @@ -1954,9 +1950,9 @@ class DatabasePool: cls, txn: LoggingTransaction, table: str, - keyvalues: Optional[Dict[str, Any]], + keyvalues: Optional[dict[str, Any]], retcols: Iterable[str], - ) -> List[Tuple[Any, ...]]: + ) -> list[tuple[Any, ...]]: """Executes a SELECT query on the named table, which may return zero or more rows, returning the result as a list of tuples. @@ -1990,10 +1986,10 @@ class DatabasePool: column: str, iterable: Iterable[Any], retcols: Collection[str], - keyvalues: Optional[Dict[str, Any]] = None, + keyvalues: Optional[dict[str, Any]] = None, desc: str = "simple_select_many_batch", batch_size: int = 100, - ) -> List[Tuple[Any, ...]]: + ) -> list[tuple[Any, ...]]: """Executes a SELECT query on the named table, which may return zero or more rows. @@ -2013,7 +2009,7 @@ class DatabasePool: """ keyvalues = keyvalues or {} - results: List[Tuple[Any, ...]] = [] + results: list[tuple[Any, ...]] = [] for chunk in batch_iter(iterable, batch_size): rows = await self.runInteraction( @@ -2038,9 +2034,9 @@ class DatabasePool: table: str, column: str, iterable: Collection[Any], - keyvalues: Dict[str, Any], + keyvalues: dict[str, Any], retcols: Iterable[str], - ) -> List[Tuple[Any, ...]]: + ) -> list[tuple[Any, ...]]: """Executes a SELECT query on the named table, which may return zero or more rows. @@ -2080,8 +2076,8 @@ class DatabasePool: async def simple_update( self, table: str, - keyvalues: Dict[str, Any], - updatevalues: Dict[str, Any], + keyvalues: dict[str, Any], + updatevalues: dict[str, Any], desc: str, ) -> int: """ @@ -2217,8 +2213,8 @@ class DatabasePool: async def simple_update_one( self, table: str, - keyvalues: Dict[str, Any], - updatevalues: Dict[str, Any], + keyvalues: dict[str, Any], + updatevalues: dict[str, Any], desc: str = "simple_update_one", ) -> None: """Executes an UPDATE query on the named table, setting new values for @@ -2244,8 +2240,8 @@ class DatabasePool: cls, txn: LoggingTransaction, table: str, - keyvalues: Dict[str, Any], - updatevalues: Dict[str, Any], + keyvalues: dict[str, Any], + updatevalues: dict[str, Any], ) -> None: rowcount = cls.simple_update_txn(txn, table, keyvalues, updatevalues) @@ -2259,29 +2255,29 @@ class DatabasePool: def simple_select_one_txn( txn: LoggingTransaction, table: str, - keyvalues: Dict[str, Any], + keyvalues: dict[str, Any], retcols: Collection[str], allow_none: Literal[False] = False, - ) -> Tuple[Any, ...]: ... + ) -> tuple[Any, ...]: ... @overload @staticmethod def simple_select_one_txn( txn: LoggingTransaction, table: str, - keyvalues: Dict[str, Any], + keyvalues: dict[str, Any], retcols: Collection[str], allow_none: Literal[True] = True, - ) -> Optional[Tuple[Any, ...]]: ... + ) -> Optional[tuple[Any, ...]]: ... @staticmethod def simple_select_one_txn( txn: LoggingTransaction, table: str, - keyvalues: Dict[str, Any], + keyvalues: dict[str, Any], retcols: Collection[str], allow_none: bool = False, - ) -> Optional[Tuple[Any, ...]]: + ) -> Optional[tuple[Any, ...]]: select_sql = "SELECT %s FROM %s" % (", ".join(retcols), table) if keyvalues: @@ -2302,7 +2298,7 @@ class DatabasePool: return row async def simple_delete_one( - self, table: str, keyvalues: Dict[str, Any], desc: str = "simple_delete_one" + self, table: str, keyvalues: dict[str, Any], desc: str = "simple_delete_one" ) -> None: """Executes a DELETE query on the named table, expecting to delete a single row. @@ -2322,7 +2318,7 @@ class DatabasePool: @staticmethod def simple_delete_one_txn( - txn: LoggingTransaction, table: str, keyvalues: Dict[str, Any] + txn: LoggingTransaction, table: str, keyvalues: dict[str, Any] ) -> None: """Executes a DELETE query on the named table, expecting to delete a single row. @@ -2343,7 +2339,7 @@ class DatabasePool: raise StoreError(500, "More than one row matched (%s)" % (table,)) async def simple_delete( - self, table: str, keyvalues: Dict[str, Any], desc: str + self, table: str, keyvalues: dict[str, Any], desc: str ) -> int: """Executes a DELETE query on the named table. @@ -2363,7 +2359,7 @@ class DatabasePool: @staticmethod def simple_delete_txn( - txn: LoggingTransaction, table: str, keyvalues: Dict[str, Any] + txn: LoggingTransaction, table: str, keyvalues: dict[str, Any] ) -> int: """Executes a DELETE query on the named table. @@ -2389,7 +2385,7 @@ class DatabasePool: table: str, column: str, iterable: Collection[Any], - keyvalues: Dict[str, Any], + keyvalues: dict[str, Any], desc: str, ) -> int: """Executes a DELETE query on the named table. @@ -2423,7 +2419,7 @@ class DatabasePool: table: str, column: str, values: Collection[Any], - keyvalues: Dict[str, Any], + keyvalues: dict[str, Any], ) -> int: """Executes a DELETE query on the named table. @@ -2503,7 +2499,7 @@ class DatabasePool: stream_column: str, max_value: int, limit: int = 100000, - ) -> Tuple[Dict[Any, int], int]: + ) -> tuple[dict[Any, int], int]: """Gets roughly the last N changes in the given stream table as a map from entity to the stream ID of the most recent change. @@ -2528,7 +2524,7 @@ class DatabasePool: # The rows come out in reverse stream ID order, so we want to keep the # stream ID of the first row for each entity. - cache: Dict[Any, int] = {} + cache: dict[Any, int] = {} for row in txn: cache.setdefault(row[0], int(row[1])) @@ -2552,11 +2548,11 @@ class DatabasePool: start: int, limit: int, retcols: Iterable[str], - filters: Optional[Dict[str, Any]] = None, - keyvalues: Optional[Dict[str, Any]] = None, - exclude_keyvalues: Optional[Dict[str, Any]] = None, + filters: Optional[dict[str, Any]] = None, + keyvalues: Optional[dict[str, Any]] = None, + exclude_keyvalues: Optional[dict[str, Any]] = None, order_direction: str = "ASC", - ) -> List[Tuple[Any, ...]]: + ) -> list[tuple[Any, ...]]: """ Executes a SELECT query on the named table with start and limit, of row numbers, which may return zero or number of rows from start to limit, @@ -2591,7 +2587,7 @@ class DatabasePool: raise ValueError("order_direction must be one of 'ASC' or 'DESC'.") where_clause = "WHERE " if filters or keyvalues or exclude_keyvalues else "" - arg_list: List[Any] = [] + arg_list: list[Any] = [] if filters: where_clause += " AND ".join("%s LIKE ?" % (k,) for k in filters) arg_list += list(filters.values()) @@ -2621,7 +2617,7 @@ def make_in_list_sql_clause( iterable: Collection[Any], *, negative: bool = False, -) -> Tuple[str, list]: +) -> tuple[str, list]: """Returns an SQL clause that checks the given column is in the iterable. On SQLite this expands to `column IN (?, ?, ...)`, whereas on Postgres @@ -2661,24 +2657,24 @@ def make_in_list_sql_clause( @overload def make_tuple_in_list_sql_clause( database_engine: BaseDatabaseEngine, - columns: Tuple[str, str], - iterable: Collection[Tuple[Any, Any]], -) -> Tuple[str, list]: ... + columns: tuple[str, str], + iterable: Collection[tuple[Any, Any]], +) -> tuple[str, list]: ... @overload def make_tuple_in_list_sql_clause( database_engine: BaseDatabaseEngine, - columns: Tuple[str, str, str], - iterable: Collection[Tuple[Any, Any, Any]], -) -> Tuple[str, list]: ... + columns: tuple[str, str, str], + iterable: Collection[tuple[Any, Any, Any]], +) -> tuple[str, list]: ... def make_tuple_in_list_sql_clause( database_engine: BaseDatabaseEngine, - columns: Tuple[str, ...], - iterable: Collection[Tuple[Any, ...]], -) -> Tuple[str, list]: + columns: tuple[str, ...], + iterable: Collection[tuple[Any, ...]], +) -> tuple[str, list]: """Returns an SQL clause that checks the given tuple of columns is in the iterable. Args: @@ -2726,7 +2722,7 @@ def make_tuple_in_list_sql_clause( KV = TypeVar("KV") -def make_tuple_comparison_clause(keys: List[Tuple[str, KV]]) -> Tuple[str, List[KV]]: +def make_tuple_comparison_clause(keys: list[tuple[str, KV]]) -> tuple[str, list[KV]]: """Returns a tuple comparison SQL clause Builds a SQL clause that looks like "(a, b) > (?, ?)" diff --git a/synapse/storage/databases/__init__.py b/synapse/storage/databases/__init__.py index a4aba96686..f145d21096 100644 --- a/synapse/storage/databases/__init__.py +++ b/synapse/storage/databases/__init__.py @@ -20,7 +20,7 @@ # import logging -from typing import TYPE_CHECKING, Generic, List, Optional, Type, TypeVar +from typing import TYPE_CHECKING, Generic, Optional, TypeVar from synapse.metrics import SERVER_NAME_LABEL, LaterGauge from synapse.storage._base import SQLBaseStore @@ -61,13 +61,13 @@ class Databases(Generic[DataStoreT]): state_deletion """ - databases: List[DatabasePool] + databases: list[DatabasePool] main: "DataStore" # FIXME: https://github.com/matrix-org/synapse/issues/11165: actually an instance of `main_store_class` state: StateGroupDataStore persist_events: Optional[PersistEventsStore] state_deletion: StateDeletionDataStore - def __init__(self, main_store_class: Type[DataStoreT], hs: "HomeServer"): + def __init__(self, main_store_class: type[DataStoreT], hs: "HomeServer"): # Note we pass in the main store class here as workers use a different main # store. diff --git a/synapse/storage/databases/main/__init__.py b/synapse/storage/databases/main/__init__.py index 83b480adaf..9f23c1a4e0 100644 --- a/synapse/storage/databases/main/__init__.py +++ b/synapse/storage/databases/main/__init__.py @@ -20,7 +20,7 @@ # # import logging -from typing import TYPE_CHECKING, List, Optional, Tuple, Union, cast +from typing import TYPE_CHECKING, Optional, Union, cast import attr @@ -188,9 +188,9 @@ class DataStore( order_by: str = UserSortOrder.NAME.value, direction: Direction = Direction.FORWARDS, approved: bool = True, - not_user_types: Optional[List[str]] = None, + not_user_types: Optional[list[str]] = None, locked: bool = False, - ) -> Tuple[List[UserPaginateResponse], int]: + ) -> tuple[list[UserPaginateResponse], int]: """Function to retrieve a paginated list of users from users list. This will return a json list of users and the total number of users matching the filter criteria. @@ -216,7 +216,7 @@ class DataStore( def get_users_paginate_txn( txn: LoggingTransaction, - ) -> Tuple[List[UserPaginateResponse], int]: + ) -> tuple[list[UserPaginateResponse], int]: filters = [] args: list = [] @@ -311,7 +311,7 @@ class DataStore( """ sql = "SELECT COUNT(*) as total_users " + sql_base txn.execute(sql, args) - count = cast(Tuple[int], txn.fetchone())[0] + count = cast(tuple[int], txn.fetchone())[0] sql = f""" SELECT name, user_type, is_guest, admin, deactivated, shadow_banned, @@ -351,8 +351,8 @@ class DataStore( async def search_users( self, term: str - ) -> List[ - Tuple[str, Optional[str], Union[int, bool], Union[int, bool], Optional[str]] + ) -> list[ + tuple[str, Optional[str], Union[int, bool], Union[int, bool], Optional[str]] ]: """Function to search users list for one or more users with the matched term. @@ -366,8 +366,8 @@ class DataStore( def search_users( txn: LoggingTransaction, - ) -> List[ - Tuple[str, Optional[str], Union[int, bool], Union[int, bool], Optional[str]] + ) -> list[ + tuple[str, Optional[str], Union[int, bool], Union[int, bool], Optional[str]] ]: search_term = "%%" + term + "%%" @@ -379,8 +379,8 @@ class DataStore( txn.execute(sql, (search_term,)) return cast( - List[ - Tuple[ + list[ + tuple[ str, Optional[str], Union[int, bool], diff --git a/synapse/storage/databases/main/account_data.py b/synapse/storage/databases/main/account_data.py index 16876e5461..f1fb5fe188 100644 --- a/synapse/storage/databases/main/account_data.py +++ b/synapse/storage/databases/main/account_data.py @@ -23,13 +23,9 @@ import logging from typing import ( TYPE_CHECKING, Any, - Dict, - FrozenSet, Iterable, - List, Mapping, Optional, - Tuple, cast, ) @@ -140,7 +136,7 @@ class AccountDataWorkerStore(PushRulesWorkerStore, CacheInvalidationWorkerStore) def get_global_account_data_for_user( txn: LoggingTransaction, - ) -> Dict[str, JsonDict]: + ) -> dict[str, JsonDict]: # The 'content != '{}' condition below prevents us from using # `simple_select_list_txn` here, as it doesn't support conditions # other than 'equals'. @@ -185,7 +181,7 @@ class AccountDataWorkerStore(PushRulesWorkerStore, CacheInvalidationWorkerStore) def get_room_account_data_for_user_txn( txn: LoggingTransaction, - ) -> Dict[str, Dict[str, JsonMapping]]: + ) -> dict[str, dict[str, JsonMapping]]: # The 'content != '{}' condition below prevents us from using # `simple_select_list_txn` here, as it doesn't support conditions # other than 'equals'. @@ -202,7 +198,7 @@ class AccountDataWorkerStore(PushRulesWorkerStore, CacheInvalidationWorkerStore) txn.execute(sql, (user_id,)) - by_room: Dict[str, Dict[str, JsonMapping]] = {} + by_room: dict[str, dict[str, JsonMapping]] = {} for room_id, account_data_type, content in txn: room_data = by_room.setdefault(room_id, {}) @@ -281,9 +277,9 @@ class AccountDataWorkerStore(PushRulesWorkerStore, CacheInvalidationWorkerStore) def get_account_data_for_room_txn( txn: LoggingTransaction, - ) -> Dict[str, JsonMapping]: + ) -> dict[str, JsonMapping]: rows = cast( - List[Tuple[str, str]], + list[tuple[str, str]], self.db_pool.simple_select_list_txn( txn, table="room_account_data", @@ -338,7 +334,7 @@ class AccountDataWorkerStore(PushRulesWorkerStore, CacheInvalidationWorkerStore) async def get_updated_global_account_data( self, last_id: int, current_id: int, limit: int - ) -> List[Tuple[int, str, str]]: + ) -> list[tuple[int, str, str]]: """Get the global account_data that has changed, for the account_data stream Args: @@ -355,14 +351,14 @@ class AccountDataWorkerStore(PushRulesWorkerStore, CacheInvalidationWorkerStore) def get_updated_global_account_data_txn( txn: LoggingTransaction, - ) -> List[Tuple[int, str, str]]: + ) -> list[tuple[int, str, str]]: sql = ( "SELECT stream_id, user_id, account_data_type" " FROM account_data WHERE ? < stream_id AND stream_id <= ?" " ORDER BY stream_id ASC LIMIT ?" ) txn.execute(sql, (last_id, current_id, limit)) - return cast(List[Tuple[int, str, str]], txn.fetchall()) + return cast(list[tuple[int, str, str]], txn.fetchall()) return await self.db_pool.runInteraction( "get_updated_global_account_data", get_updated_global_account_data_txn @@ -370,7 +366,7 @@ class AccountDataWorkerStore(PushRulesWorkerStore, CacheInvalidationWorkerStore) async def get_updated_room_account_data( self, last_id: int, current_id: int, limit: int - ) -> List[Tuple[int, str, str, str]]: + ) -> list[tuple[int, str, str, str]]: """Get the global account_data that has changed, for the account_data stream Args: @@ -387,14 +383,14 @@ class AccountDataWorkerStore(PushRulesWorkerStore, CacheInvalidationWorkerStore) def get_updated_room_account_data_txn( txn: LoggingTransaction, - ) -> List[Tuple[int, str, str, str]]: + ) -> list[tuple[int, str, str, str]]: sql = ( "SELECT stream_id, user_id, room_id, account_data_type" " FROM room_account_data WHERE ? < stream_id AND stream_id <= ?" " ORDER BY stream_id ASC LIMIT ?" ) txn.execute(sql, (last_id, current_id, limit)) - return cast(List[Tuple[int, str, str, str]], txn.fetchall()) + return cast(list[tuple[int, str, str, str]], txn.fetchall()) return await self.db_pool.runInteraction( "get_updated_room_account_data", get_updated_room_account_data_txn @@ -402,7 +398,7 @@ class AccountDataWorkerStore(PushRulesWorkerStore, CacheInvalidationWorkerStore) async def get_updated_global_account_data_for_user( self, user_id: str, stream_id: int - ) -> Dict[str, JsonMapping]: + ) -> dict[str, JsonMapping]: """Get all the global account_data that's changed for a user. Args: @@ -415,7 +411,7 @@ class AccountDataWorkerStore(PushRulesWorkerStore, CacheInvalidationWorkerStore) def get_updated_global_account_data_for_user( txn: LoggingTransaction, - ) -> Dict[str, JsonMapping]: + ) -> dict[str, JsonMapping]: sql = """ SELECT account_data_type, content FROM account_data WHERE user_id = ? AND stream_id > ? @@ -437,7 +433,7 @@ class AccountDataWorkerStore(PushRulesWorkerStore, CacheInvalidationWorkerStore) async def get_updated_room_account_data_for_user( self, user_id: str, stream_id: int - ) -> Dict[str, Dict[str, JsonMapping]]: + ) -> dict[str, dict[str, JsonMapping]]: """Get all the room account_data that's changed for a user. Args: @@ -450,14 +446,14 @@ class AccountDataWorkerStore(PushRulesWorkerStore, CacheInvalidationWorkerStore) def get_updated_room_account_data_for_user_txn( txn: LoggingTransaction, - ) -> Dict[str, Dict[str, JsonMapping]]: + ) -> dict[str, dict[str, JsonMapping]]: sql = """ SELECT room_id, account_data_type, content FROM room_account_data WHERE user_id = ? AND stream_id > ? """ txn.execute(sql, (user_id, stream_id)) - account_data_by_room: Dict[str, Dict[str, JsonMapping]] = {} + account_data_by_room: dict[str, dict[str, JsonMapping]] = {} for row in txn: room_account_data = account_data_by_room.setdefault(row[0], {}) room_account_data[row[1]] = db_to_json(row[2]) @@ -484,7 +480,7 @@ class AccountDataWorkerStore(PushRulesWorkerStore, CacheInvalidationWorkerStore) room_id: str, from_stream_id: int, to_stream_id: int, - ) -> Dict[str, JsonMapping]: + ) -> dict[str, JsonMapping]: """Get the room account_data that's changed for a user in a room. (> `from_stream_id` and <= `to_stream_id`) @@ -501,14 +497,14 @@ class AccountDataWorkerStore(PushRulesWorkerStore, CacheInvalidationWorkerStore) def get_updated_room_account_data_for_user_for_room_txn( txn: LoggingTransaction, - ) -> Dict[str, JsonMapping]: + ) -> dict[str, JsonMapping]: sql = """ SELECT account_data_type, content FROM room_account_data WHERE user_id = ? AND room_id = ? AND stream_id > ? AND stream_id <= ? """ txn.execute(sql, (user_id, room_id, from_stream_id, to_stream_id)) - room_account_data: Dict[str, JsonMapping] = {} + room_account_data: dict[str, JsonMapping] = {} for row in txn: room_account_data[row[0]] = db_to_json(row[1]) @@ -526,7 +522,7 @@ class AccountDataWorkerStore(PushRulesWorkerStore, CacheInvalidationWorkerStore) ) @cached(max_entries=5000, iterable=True) - async def ignored_by(self, user_id: str) -> FrozenSet[str]: + async def ignored_by(self, user_id: str) -> frozenset[str]: """ Get users which ignore the given user. @@ -546,7 +542,7 @@ class AccountDataWorkerStore(PushRulesWorkerStore, CacheInvalidationWorkerStore) ) @cached(max_entries=5000, iterable=True) - async def ignored_users(self, user_id: str) -> FrozenSet[str]: + async def ignored_users(self, user_id: str) -> frozenset[str]: """ Get users which the given user ignores. diff --git a/synapse/storage/databases/main/appservice.py b/synapse/storage/databases/main/appservice.py index 97dbbb1493..7558672905 100644 --- a/synapse/storage/databases/main/appservice.py +++ b/synapse/storage/databases/main/appservice.py @@ -20,7 +20,7 @@ # import logging import re -from typing import TYPE_CHECKING, List, Optional, Pattern, Sequence, Tuple, cast +from typing import TYPE_CHECKING, Optional, Pattern, Sequence, cast from synapse.appservice import ( ApplicationService, @@ -52,7 +52,7 @@ logger = logging.getLogger(__name__) def _make_exclusive_regex( - services_cache: List[ApplicationService], + services_cache: list[ApplicationService], ) -> Optional[Pattern]: # We precompile a regex constructed from all the regexes that the AS's # have registered for exclusive users. @@ -93,7 +93,7 @@ class ApplicationServiceWorkerStore(RoomMemberWorkerStore): txn.execute( "SELECT COALESCE(max(txn_id), 0) FROM application_services_txns" ) - return cast(Tuple[int], txn.fetchone())[0] + return cast(tuple[int], txn.fetchone())[0] self._as_txn_seq_gen = build_sequence_generator( db_conn, @@ -106,7 +106,7 @@ class ApplicationServiceWorkerStore(RoomMemberWorkerStore): super().__init__(database, db_conn, hs) - def get_app_services(self) -> List[ApplicationService]: + def get_app_services(self) -> list[ApplicationService]: return self.services_cache def get_if_app_services_interested_in_user(self, user_id: str) -> bool: @@ -199,7 +199,7 @@ class ApplicationServiceTransactionWorkerStore( ): async def get_appservices_by_state( self, state: ApplicationServiceState - ) -> List[ApplicationService]: + ) -> list[ApplicationService]: """Get a list of application services based on their state. Args: @@ -208,7 +208,7 @@ class ApplicationServiceTransactionWorkerStore( A list of ApplicationServices, which may be empty. """ results = cast( - List[Tuple[str]], + list[tuple[str]], await self.db_pool.simple_select_list( table="application_services_state", keyvalues={"state": state.value}, @@ -273,8 +273,8 @@ class ApplicationServiceTransactionWorkerStore( self, service: ApplicationService, events: Sequence[EventBase], - ephemeral: List[JsonMapping], - to_device_messages: List[JsonMapping], + ephemeral: list[JsonMapping], + to_device_messages: list[JsonMapping], one_time_keys_count: TransactionOneTimeKeysCount, unused_fallback_keys: TransactionUnusedFallbackKeys, device_list_summary: DeviceListUpdates, @@ -358,7 +358,7 @@ class ApplicationServiceTransactionWorkerStore( def _get_oldest_unsent_txn( txn: LoggingTransaction, - ) -> Optional[Tuple[int, str]]: + ) -> Optional[tuple[int, str]]: # Monotonically increasing txn ids, so just select the smallest # one in the txns table (we delete them when they are sent) txn.execute( @@ -366,7 +366,7 @@ class ApplicationServiceTransactionWorkerStore( " ORDER BY txn_id ASC LIMIT 1", (service.id,), ) - return cast(Optional[Tuple[int, str]], txn.fetchone()) + return cast(Optional[tuple[int, str]], txn.fetchone()) entry = await self.db_pool.runInteraction( "get_oldest_unsent_appservice_txn", _get_oldest_unsent_txn diff --git a/synapse/storage/databases/main/cache.py b/synapse/storage/databases/main/cache.py index 674c6b921e..5a96510b13 100644 --- a/synapse/storage/databases/main/cache.py +++ b/synapse/storage/databases/main/cache.py @@ -23,7 +23,7 @@ import itertools import json import logging -from typing import TYPE_CHECKING, Any, Collection, Iterable, List, Optional, Tuple +from typing import TYPE_CHECKING, Any, Collection, Iterable, Optional from synapse.api.constants import EventTypes from synapse.config._base import Config @@ -145,7 +145,7 @@ class CacheInvalidationWorkerStore(SQLBaseStore): async def get_all_updated_caches( self, instance_name: str, last_id: int, current_id: int, limit: int - ) -> Tuple[List[Tuple[int, tuple]], int, bool]: + ) -> tuple[list[tuple[int, tuple]], int, bool]: """Get updates for caches replication stream. Args: @@ -172,7 +172,7 @@ class CacheInvalidationWorkerStore(SQLBaseStore): def get_all_updated_caches_txn( txn: LoggingTransaction, - ) -> Tuple[List[Tuple[int, tuple]], int, bool]: + ) -> tuple[list[tuple[int, tuple]], int, bool]: # We purposefully don't bound by the current token, as we want to # send across cache invalidations as quickly as possible. Cache # invalidations are idempotent, so duplicates are fine. @@ -597,7 +597,7 @@ class CacheInvalidationWorkerStore(SQLBaseStore): self._invalidate_state_caches_all(room_id) async def invalidate_cache_and_stream( - self, cache_name: str, keys: Tuple[Any, ...] + self, cache_name: str, keys: tuple[Any, ...] ) -> None: """Invalidates the cache and adds it to the cache stream so other workers will know to invalidate their caches. @@ -620,7 +620,7 @@ class CacheInvalidationWorkerStore(SQLBaseStore): self, txn: LoggingTransaction, cache_func: CachedFunction, - keys: Tuple[Any, ...], + keys: tuple[Any, ...], ) -> None: """Invalidates the cache and adds it to the cache stream so other workers will know to invalidate their caches. @@ -636,7 +636,7 @@ class CacheInvalidationWorkerStore(SQLBaseStore): self, txn: LoggingTransaction, cache_func: CachedFunction, - key_tuples: Collection[Tuple[Any, ...]], + key_tuples: Collection[tuple[Any, ...]], ) -> None: """A bulk version of _invalidate_cache_and_stream. @@ -759,7 +759,7 @@ class CacheInvalidationWorkerStore(SQLBaseStore): self, txn: LoggingTransaction, cache_name: str, - key_tuples: Collection[Tuple[Any, ...]], + key_tuples: Collection[tuple[Any, ...]], ) -> None: """Announce the invalidation of multiple (but not all) cache entries. diff --git a/synapse/storage/databases/main/client_ips.py b/synapse/storage/databases/main/client_ips.py index dc6ab99a6c..1033d85a40 100644 --- a/synapse/storage/databases/main/client_ips.py +++ b/synapse/storage/databases/main/client_ips.py @@ -22,11 +22,8 @@ import logging from typing import ( TYPE_CHECKING, - Dict, - List, Mapping, Optional, - Tuple, TypedDict, Union, cast, @@ -190,7 +187,7 @@ class ClientIpBackgroundUpdateStore(SQLBaseStore): """, (begin_last_seen, batch_size), ) - row = cast(Optional[Tuple[int]], txn.fetchone()) + row = cast(Optional[tuple[int]], txn.fetchone()) if row: return row[0] else: @@ -222,7 +219,7 @@ class ClientIpBackgroundUpdateStore(SQLBaseStore): # Define the search space, which requires handling the last batch in # a different way - args: Tuple[int, ...] + args: tuple[int, ...] if last: clause = "? <= last_seen" args = (begin_last_seen,) @@ -251,7 +248,7 @@ class ClientIpBackgroundUpdateStore(SQLBaseStore): args, ) res = cast( - List[Tuple[str, str, str, Optional[str], str, int, int]], txn.fetchall() + list[tuple[str, str, str, Optional[str], str, int, int]], txn.fetchall() ) # We've got some duplicates @@ -361,7 +358,7 @@ class ClientIpBackgroundUpdateStore(SQLBaseStore): # we'll just end up updating the same device row multiple # times, which is fine. - where_args: List[Union[str, int]] + where_args: list[Union[str, int]] where_clause, where_args = make_tuple_comparison_clause( [("user_id", last_user_id), ("device_id", last_device_id)], ) @@ -383,7 +380,7 @@ class ClientIpBackgroundUpdateStore(SQLBaseStore): """ % {"where_clause": where_clause} txn.execute(sql, where_args + [batch_size]) - rows = cast(List[Tuple[int, str, str, str, str]], txn.fetchall()) + rows = cast(list[tuple[int, str, str, str, str]], txn.fetchall()) if not rows: return 0 @@ -434,7 +431,7 @@ class ClientIpWorkerStore(ClientIpBackgroundUpdateStore, MonthlyActiveUsersWorke self.user_ips_max_age = hs.config.server.user_ips_max_age # (user_id, access_token, ip,) -> last_seen - self.client_ip_last_seen = LruCache[Tuple[str, str, str], int]( + self.client_ip_last_seen = LruCache[tuple[str, str, str], int]( cache_name="client_ip_last_seen", server_name=self.server_name, max_size=50000, @@ -449,8 +446,8 @@ class ClientIpWorkerStore(ClientIpBackgroundUpdateStore, MonthlyActiveUsersWorke # tables. # (user_id, access_token, ip,) -> (user_agent, device_id, last_seen) - self._batch_row_update: Dict[ - Tuple[str, str, str], Tuple[str, Optional[str], int] + self._batch_row_update: dict[ + tuple[str, str, str], tuple[str, Optional[str], int] ] = {} self.clock.looping_call(self._update_client_ips_batch, 5 * 1000) @@ -504,7 +501,7 @@ class ClientIpWorkerStore(ClientIpBackgroundUpdateStore, MonthlyActiveUsersWorke async def _get_last_client_ip_by_device_from_database( self, user_id: str, device_id: Optional[str] - ) -> Dict[Tuple[str, str], DeviceLastConnectionInfo]: + ) -> dict[tuple[str, str], DeviceLastConnectionInfo]: """For each device_id listed, give the user_ip it was last seen on. The result might be slightly out of date as client IPs are inserted in batches. @@ -522,7 +519,7 @@ class ClientIpWorkerStore(ClientIpBackgroundUpdateStore, MonthlyActiveUsersWorke keyvalues["device_id"] = device_id res = cast( - List[Tuple[str, Optional[str], Optional[str], str, Optional[int]]], + list[tuple[str, Optional[str], Optional[str], str, Optional[int]]], await self.db_pool.simple_select_list( table="devices", keyvalues=keyvalues, @@ -543,7 +540,7 @@ class ClientIpWorkerStore(ClientIpBackgroundUpdateStore, MonthlyActiveUsersWorke async def _get_user_ip_and_agents_from_database( self, user: UserID, since_ts: int = 0 - ) -> List[LastConnectionInfo]: + ) -> list[LastConnectionInfo]: """Fetch the IPs and user agents for a user since the given timestamp. The result might be slightly out of date as client IPs are inserted in batches. @@ -567,7 +564,7 @@ class ClientIpWorkerStore(ClientIpBackgroundUpdateStore, MonthlyActiveUsersWorke """ user_id = user.to_string() - def get_recent(txn: LoggingTransaction) -> List[Tuple[str, str, str, int]]: + def get_recent(txn: LoggingTransaction) -> list[tuple[str, str, str, int]]: txn.execute( """ SELECT access_token, ip, user_agent, last_seen FROM user_ips @@ -577,7 +574,7 @@ class ClientIpWorkerStore(ClientIpBackgroundUpdateStore, MonthlyActiveUsersWorke """, (since_ts, user_id), ) - return cast(List[Tuple[str, str, str, int]], txn.fetchall()) + return cast(list[tuple[str, str, str, int]], txn.fetchall()) rows = await self.db_pool.runInteraction( desc="get_user_ip_and_agents", func=get_recent @@ -673,7 +670,7 @@ class ClientIpWorkerStore(ClientIpBackgroundUpdateStore, MonthlyActiveUsersWorke def _update_client_ips_batch_txn( self, txn: LoggingTransaction, - to_update: Mapping[Tuple[str, str, str], Tuple[str, Optional[str], int]], + to_update: Mapping[tuple[str, str, str], tuple[str, Optional[str], int]], ) -> None: assert self._update_on_this_worker, ( "This worker is not designated to update client IPs" @@ -719,7 +716,7 @@ class ClientIpWorkerStore(ClientIpBackgroundUpdateStore, MonthlyActiveUsersWorke async def get_last_client_ip_by_device( self, user_id: str, device_id: Optional[str] - ) -> Dict[Tuple[str, str], DeviceLastConnectionInfo]: + ) -> dict[tuple[str, str], DeviceLastConnectionInfo]: """For each device_id listed, give the user_ip it was last seen on Args: @@ -759,7 +756,7 @@ class ClientIpWorkerStore(ClientIpBackgroundUpdateStore, MonthlyActiveUsersWorke async def get_user_ip_and_agents( self, user: UserID, since_ts: int = 0 - ) -> List[LastConnectionInfo]: + ) -> list[LastConnectionInfo]: """Fetch the IPs and user agents for a user since the given timestamp. Args: @@ -786,7 +783,7 @@ class ClientIpWorkerStore(ClientIpBackgroundUpdateStore, MonthlyActiveUsersWorke # the result return rows_from_db - results: Dict[Tuple[str, str], LastConnectionInfo] = { + results: dict[tuple[str, str], LastConnectionInfo] = { (connection["access_token"], connection["ip"]): connection for connection in rows_from_db } diff --git a/synapse/storage/databases/main/delayed_events.py b/synapse/storage/databases/main/delayed_events.py index 78f55b983f..33101327f5 100644 --- a/synapse/storage/databases/main/delayed_events.py +++ b/synapse/storage/databases/main/delayed_events.py @@ -13,7 +13,7 @@ # import logging -from typing import List, NewType, Optional, Tuple +from typing import NewType, Optional import attr @@ -93,7 +93,7 @@ class DelayedEventsStore(SQLBaseStore): origin_server_ts: Optional[int], content: JsonDict, delay: int, - ) -> Tuple[DelayID, Timestamp]: + ) -> tuple[DelayID, Timestamp]: """ Inserts a new delayed event in the DB. @@ -201,7 +201,7 @@ class DelayedEventsStore(SQLBaseStore): async def get_all_delayed_events_for_user( self, user_localpart: str, - ) -> List[JsonDict]: + ) -> list[JsonDict]: """Returns all pending delayed events owned by the given user.""" # TODO: Support Pagination stream API ("next_batch" field) rows = await self.db_pool.execute( @@ -236,8 +236,8 @@ class DelayedEventsStore(SQLBaseStore): async def process_timeout_delayed_events( self, current_ts: Timestamp - ) -> Tuple[ - List[DelayedEventDetails], + ) -> tuple[ + list[DelayedEventDetails], Optional[Timestamp], ]: """ @@ -250,8 +250,8 @@ class DelayedEventsStore(SQLBaseStore): def process_timeout_delayed_events_txn( txn: LoggingTransaction, - ) -> Tuple[ - List[DelayedEventDetails], + ) -> tuple[ + list[DelayedEventDetails], Optional[Timestamp], ]: sql_cols = ", ".join( @@ -322,7 +322,7 @@ class DelayedEventsStore(SQLBaseStore): *, delay_id: str, user_localpart: str, - ) -> Tuple[ + ) -> tuple[ EventDetails, Optional[Timestamp], ]: @@ -343,7 +343,7 @@ class DelayedEventsStore(SQLBaseStore): def process_target_delayed_event_txn( txn: LoggingTransaction, - ) -> Tuple[ + ) -> tuple[ EventDetails, Optional[Timestamp], ]: diff --git a/synapse/storage/databases/main/deviceinbox.py b/synapse/storage/databases/main/deviceinbox.py index a66e11f738..49a82b98d3 100644 --- a/synapse/storage/databases/main/deviceinbox.py +++ b/synapse/storage/databases/main/deviceinbox.py @@ -24,12 +24,8 @@ import logging from typing import ( TYPE_CHECKING, Collection, - Dict, Iterable, - List, Optional, - Set, - Tuple, cast, ) @@ -92,7 +88,7 @@ class DeviceInboxWorkerStore(SQLBaseStore): # Map of (user_id, device_id) to the last stream_id that has been # deleted up to. This is so that we can no op deletions. self._last_device_delete_cache: ExpiringCache[ - Tuple[str, Optional[str]], int + tuple[str, Optional[str]], int ] = ExpiringCache( cache_name="last_device_delete_cache", server_name=self.server_name, @@ -203,7 +199,7 @@ class DeviceInboxWorkerStore(SQLBaseStore): user_ids: Collection[str], from_stream_id: int, to_stream_id: int, - ) -> Dict[Tuple[str, str], List[JsonDict]]: + ) -> dict[tuple[str, str], list[JsonDict]]: """ Retrieve to-device messages for a given set of users. @@ -242,7 +238,7 @@ class DeviceInboxWorkerStore(SQLBaseStore): from_stream_id: int, to_stream_id: int, limit: int = 100, - ) -> Tuple[List[JsonDict], int]: + ) -> tuple[list[JsonDict], int]: """ Retrieve to-device messages for a single user device. @@ -271,7 +267,7 @@ class DeviceInboxWorkerStore(SQLBaseStore): def get_device_messages_txn( txn: LoggingTransaction, - ) -> Tuple[List[JsonDict], int]: + ) -> tuple[list[JsonDict], int]: sql = """ SELECT stream_id, message_json FROM device_inbox WHERE user_id = ? AND device_id = ? @@ -284,7 +280,7 @@ class DeviceInboxWorkerStore(SQLBaseStore): # Create and fill a dictionary of (user ID, device ID) -> list of messages # intended for each device. last_processed_stream_pos = to_stream_id - to_device_messages: List[JsonDict] = [] + to_device_messages: list[JsonDict] = [] rowcount = 0 for row in txn: rowcount += 1 @@ -331,7 +327,7 @@ class DeviceInboxWorkerStore(SQLBaseStore): user_ids: Collection[str], from_stream_id: int, to_stream_id: int, - ) -> Tuple[Dict[Tuple[str, str], List[JsonDict]], int]: + ) -> tuple[dict[tuple[str, str], list[JsonDict]], int]: """ Retrieve pending to-device messages for a collection of user devices. @@ -363,7 +359,7 @@ class DeviceInboxWorkerStore(SQLBaseStore): logger.warning("No users provided upon querying for device IDs") return {}, to_stream_id - user_ids_to_query: Set[str] = set() + user_ids_to_query: set[str] = set() # Determine which users have devices with pending messages for user_id in user_ids: @@ -378,7 +374,7 @@ class DeviceInboxWorkerStore(SQLBaseStore): def get_device_messages_txn( txn: LoggingTransaction, - ) -> Tuple[Dict[Tuple[str, str], List[JsonDict]], int]: + ) -> tuple[dict[tuple[str, str], list[JsonDict]], int]: # Build a query to select messages from any of the given devices that # are between the given stream id bounds. @@ -389,7 +385,7 @@ class DeviceInboxWorkerStore(SQLBaseStore): # since device_inbox has an index on `(user_id, device_id, stream_id)` user_device_dicts = cast( - List[Tuple[str]], + list[tuple[str]], self.db_pool.simple_select_many_txn( txn, table="devices", @@ -436,7 +432,7 @@ class DeviceInboxWorkerStore(SQLBaseStore): # Create and fill a dictionary of (user ID, device ID) -> list of messages # intended for each device. - recipient_device_to_messages: Dict[Tuple[str, str], List[JsonDict]] = {} + recipient_device_to_messages: dict[tuple[str, str], list[JsonDict]] = {} rowcount = 0 for row in txn: rowcount += 1 @@ -535,7 +531,7 @@ class DeviceInboxWorkerStore(SQLBaseStore): from_stream_id: Optional[int], to_stream_id: int, limit: int, - ) -> Tuple[Optional[int], int]: + ) -> tuple[Optional[int], int]: """Delete N device messages between the stream IDs, returning the highest stream ID deleted (or None if all messages in the range have been deleted) and the number of messages deleted. @@ -555,7 +551,7 @@ class DeviceInboxWorkerStore(SQLBaseStore): def delete_messages_for_device_between_txn( txn: LoggingTransaction, - ) -> Tuple[Optional[int], int]: + ) -> tuple[Optional[int], int]: txn.execute( """ SELECT MAX(stream_id) FROM ( @@ -598,7 +594,7 @@ class DeviceInboxWorkerStore(SQLBaseStore): @trace async def get_new_device_msgs_for_remote( self, destination: str, last_stream_id: int, current_stream_id: int, limit: int - ) -> Tuple[List[JsonDict], int]: + ) -> tuple[list[JsonDict], int]: """ Args: destination: The name of the remote server. @@ -628,7 +624,7 @@ class DeviceInboxWorkerStore(SQLBaseStore): @trace def get_new_messages_for_remote_destination_txn( txn: LoggingTransaction, - ) -> Tuple[List[JsonDict], int]: + ) -> tuple[list[JsonDict], int]: sql = ( "SELECT stream_id, messages_json FROM device_federation_outbox" " WHERE destination = ?" @@ -684,7 +680,7 @@ class DeviceInboxWorkerStore(SQLBaseStore): async def get_all_new_device_messages( self, instance_name: str, last_id: int, current_id: int, limit: int - ) -> Tuple[List[Tuple[int, tuple]], int, bool]: + ) -> tuple[list[tuple[int, tuple]], int, bool]: """Get updates for to device replication stream. Args: @@ -711,7 +707,7 @@ class DeviceInboxWorkerStore(SQLBaseStore): def get_all_new_device_messages_txn( txn: LoggingTransaction, - ) -> Tuple[List[Tuple[int, tuple]], int, bool]: + ) -> tuple[list[tuple[int, tuple]], int, bool]: # We limit like this as we might have multiple rows per stream_id, and # we want to make sure we always get all entries for any stream_id # we return. @@ -746,8 +742,8 @@ class DeviceInboxWorkerStore(SQLBaseStore): @trace async def add_messages_to_device_inbox( self, - local_messages_by_user_then_device: Dict[str, Dict[str, JsonDict]], - remote_messages_by_destination: Dict[str, JsonDict], + local_messages_by_user_then_device: dict[str, dict[str, JsonDict]], + remote_messages_by_destination: dict[str, JsonDict], ) -> int: """Used to send messages from this server. @@ -844,7 +840,7 @@ class DeviceInboxWorkerStore(SQLBaseStore): self, origin: str, message_id: str, - local_messages_by_user_then_device: Dict[str, Dict[str, JsonDict]], + local_messages_by_user_then_device: dict[str, dict[str, JsonDict]], ) -> int: assert self._can_write_to_device @@ -898,7 +894,7 @@ class DeviceInboxWorkerStore(SQLBaseStore): self, txn: LoggingTransaction, stream_id: int, - messages_by_user_then_device: Dict[str, Dict[str, JsonDict]], + messages_by_user_then_device: dict[str, dict[str, JsonDict]], ) -> None: assert self._can_write_to_device @@ -929,7 +925,7 @@ class DeviceInboxWorkerStore(SQLBaseStore): # We exclude hidden devices (such as cross-signing keys) here as they are # not expected to receive to-device messages. rows = cast( - List[Tuple[str]], + list[tuple[str]], self.db_pool.simple_select_many_txn( txn, table="devices", @@ -1055,7 +1051,7 @@ class DeviceInboxWorkerStore(SQLBaseStore): txn.execute(sql, args) return {row[0] for row in txn} - results: Set[str] = set() + results: set[str] = set() for batch_device_ids in batch_iter(device_ids, 1000): batch_results = await self.db_pool.runInteraction( "get_devices_with_messages", @@ -1143,7 +1139,7 @@ class DeviceInboxBackgroundUpdateStore(SQLBaseStore): def _remove_dead_devices_from_device_inbox_txn( txn: LoggingTransaction, - ) -> Tuple[int, bool]: + ) -> tuple[int, bool]: if "max_stream_id" in progress: max_stream_id = progress["max_stream_id"] else: @@ -1151,7 +1147,7 @@ class DeviceInboxBackgroundUpdateStore(SQLBaseStore): # There's a type mismatch here between how we want to type the row and # what fetchone says it returns, but we silence it because we know that # res can't be None. - res = cast(Tuple[Optional[int]], txn.fetchone()) + res = cast(tuple[Optional[int]], txn.fetchone()) if res[0] is None: # this can only happen if the `device_inbox` table is empty, in which # case we have no work to do. @@ -1214,7 +1210,7 @@ class DeviceInboxBackgroundUpdateStore(SQLBaseStore): max_stream_id = progress["max_stream_id"] else: txn.execute("SELECT max(stream_id) FROM device_federation_outbox") - res = cast(Tuple[Optional[int]], txn.fetchone()) + res = cast(tuple[Optional[int]], txn.fetchone()) if res[0] is None: # this can only happen if the `device_inbox` table is empty, in which # case we have no work to do. diff --git a/synapse/storage/databases/main/devices.py b/synapse/storage/databases/main/devices.py index d4b9ce0ea0..bf5e05ea51 100644 --- a/synapse/storage/databases/main/devices.py +++ b/synapse/storage/databases/main/devices.py @@ -24,13 +24,9 @@ from typing import ( TYPE_CHECKING, Any, Collection, - Dict, Iterable, - List, Mapping, Optional, - Set, - Tuple, cast, ) @@ -284,7 +280,7 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore): ) txn.execute(sql + clause, args) - return cast(Tuple[int], txn.fetchone())[0] + return cast(tuple[int], txn.fetchone())[0] if not user_ids: return 0 @@ -381,7 +377,7 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore): device_ids: The IDs of the devices to delete """ - def _delete_devices_txn(txn: LoggingTransaction, device_ids: List[str]) -> None: + def _delete_devices_txn(txn: LoggingTransaction, device_ids: list[str]) -> None: self.db_pool.simple_delete_many_txn( txn, table="devices", @@ -497,7 +493,7 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore): async def get_devices_by_user( self, user_id: str - ) -> Dict[str, Dict[str, Optional[str]]]: + ) -> dict[str, dict[str, Optional[str]]]: """Retrieve all of a user's registered devices. Only returns devices that are not marked as hidden. @@ -508,7 +504,7 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore): and "display_name" for each device. Display name may be null. """ devices = cast( - List[Tuple[str, str, Optional[str]]], + list[tuple[str, str, Optional[str]]], await self.db_pool.simple_select_list( table="devices", keyvalues={"user_id": user_id, "hidden": False}, @@ -524,7 +520,7 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore): async def get_devices_by_auth_provider_session_id( self, auth_provider_id: str, auth_provider_session_id: str - ) -> List[Tuple[str, str]]: + ) -> list[tuple[str, str]]: """Retrieve the list of devices associated with a SSO IdP session ID. Args: @@ -534,7 +530,7 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore): A list of dicts containing the device_id and the user_id of each device """ return cast( - List[Tuple[str, str]], + list[tuple[str, str]], await self.db_pool.simple_select_list( table="device_auth_providers", keyvalues={ @@ -549,7 +545,7 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore): @trace async def get_device_updates_by_remote( self, destination: str, from_stream_id: int, limit: int - ) -> Tuple[int, List[Tuple[str, JsonDict]]]: + ) -> tuple[int, list[tuple[str, JsonDict]]]: """Get a stream of device updates to send to the given remote server. Args: @@ -659,8 +655,8 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore): last_processed_stream_id = from_stream_id # A map of (user ID, device ID) to (stream ID, context). - query_map: Dict[Tuple[str, str], Tuple[int, Optional[str]]] = {} - cross_signing_keys_by_user: Dict[str, Dict[str, object]] = {} + query_map: dict[tuple[str, str], tuple[int, Optional[str]]] = {} + cross_signing_keys_by_user: dict[str, dict[str, object]] = {} for user_id, device_id, update_stream_id, update_context in updates: # Calculate the remaining length budget. # Note that, for now, each entry in `cross_signing_keys_by_user` @@ -766,7 +762,7 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore): from_stream_id: int, now_stream_id: int, limit: int, - ) -> List[Tuple[str, str, int, Optional[str]]]: + ) -> list[tuple[str, str, int, Optional[str]]]: """Return device update information for a given remote destination Args: @@ -792,14 +788,14 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore): """ txn.execute(sql, (destination, from_stream_id, now_stream_id, limit)) - return cast(List[Tuple[str, str, int, Optional[str]]], txn.fetchall()) + return cast(list[tuple[str, str, int, Optional[str]]], txn.fetchall()) async def _get_device_update_edus_by_remote( self, destination: str, from_stream_id: int, - query_map: Dict[Tuple[str, str], Tuple[int, Optional[str]]], - ) -> List[Tuple[str, dict]]: + query_map: dict[tuple[str, str], tuple[int, Optional[str]]], + ) -> list[tuple[str, dict]]: """Returns a list of device update EDUs as well as E2EE keys Args: @@ -933,7 +929,7 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore): txn.execute(sql, (destination, stream_id)) async def add_user_signature_change_to_streams( - self, from_user_id: str, user_ids: List[str] + self, from_user_id: str, user_ids: list[str] ) -> int: """Persist that a user has made new signatures @@ -962,7 +958,7 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore): self, txn: LoggingTransaction, from_user_id: str, - user_ids: List[str], + user_ids: list[str], stream_id: int, ) -> None: txn.call_after( @@ -984,8 +980,8 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore): @trace @cancellable async def get_user_devices_from_cache( - self, user_ids: Set[str], user_and_device_ids: List[Tuple[str, str]] - ) -> Tuple[Set[str], Dict[str, Mapping[str, JsonMapping]]]: + self, user_ids: set[str], user_and_device_ids: list[tuple[str, str]] + ) -> tuple[set[str], dict[str, Mapping[str, JsonMapping]]]: """Get the devices (and keys if any) for remote users from the cache. Args: @@ -1005,13 +1001,13 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore): user_ids_not_in_cache = unique_user_ids - user_ids_in_cache # First fetch all the users which all devices are to be returned. - results: Dict[str, Mapping[str, JsonMapping]] = {} + results: dict[str, Mapping[str, JsonMapping]] = {} for user_id in user_ids: if user_id in user_ids_in_cache: results[user_id] = await self.get_cached_devices_for_user(user_id) # Then fetch all device-specific requests, but skip users we've already # fetched all devices for. - device_specific_results: Dict[str, Dict[str, JsonMapping]] = {} + device_specific_results: dict[str, dict[str, JsonMapping]] = {} for user_id, device_id in user_and_device_ids: if user_id in user_ids_in_cache and user_id not in user_ids: device = await self._get_cached_user_device(user_id, device_id) @@ -1025,7 +1021,7 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore): async def get_users_whose_devices_are_cached( self, user_ids: StrCollection - ) -> Set[str]: + ) -> set[str]: """Checks which of the given users we have cached the devices for.""" user_map = await self.get_device_list_last_stream_id_for_remotes(user_ids) @@ -1056,7 +1052,7 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore): self, user_id: str ) -> Mapping[str, JsonMapping]: devices = cast( - List[Tuple[str, str]], + list[tuple[str, str]], await self.db_pool.simple_select_list( table="device_lists_remote_cache", keyvalues={"user_id": user_id}, @@ -1071,7 +1067,7 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore): self, from_key: MultiWriterStreamToken, to_key: MultiWriterStreamToken, - ) -> Set[str]: + ) -> set[str]: """Get all users whose devices have changed in the given range. Args: @@ -1131,7 +1127,7 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore): from_key: MultiWriterStreamToken, user_ids: Collection[str], to_key: Optional[MultiWriterStreamToken] = None, - ) -> Set[str]: + ) -> set[str]: """Get set of users whose devices have changed since `from_key` that are in the given list of user_ids. @@ -1164,14 +1160,14 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore): txn: LoggingTransaction, from_key: MultiWriterStreamToken, to_key: MultiWriterStreamToken, - ) -> Set[str]: + ) -> set[str]: sql = """ SELECT user_id, stream_id, instance_name FROM device_lists_stream WHERE ? < stream_id AND stream_id <= ? AND %s """ - changes: Set[str] = set() + changes: set[str] = set() # Query device changes with a batch of users at a time for chunk in batch_iter(user_ids_to_check, 100): @@ -1204,7 +1200,7 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore): async def get_users_whose_signatures_changed( self, user_id: str, from_key: MultiWriterStreamToken - ) -> Set[str]: + ) -> set[str]: """Get the users who have new cross-signing signatures made by `user_id` since `from_key`. @@ -1243,7 +1239,7 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore): async def get_all_device_list_changes_for_remotes( self, instance_name: str, last_id: int, current_id: int, limit: int - ) -> Tuple[List[Tuple[int, tuple]], int, bool]: + ) -> tuple[list[tuple[int, tuple]], int, bool]: """Get updates for device lists replication stream. Args: @@ -1270,7 +1266,7 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore): def _get_all_device_list_changes_for_remotes( txn: Cursor, - ) -> Tuple[List[Tuple[int, tuple]], int, bool]: + ) -> tuple[list[tuple[int, tuple]], int, bool]: # This query Does The Right Thing where it'll correctly apply the # bounds to the inner queries. sql = """ @@ -1322,7 +1318,7 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore): self, user_ids: Iterable[str] ) -> Mapping[str, Optional[str]]: rows = cast( - List[Tuple[str, str]], + list[tuple[str, str]], await self.db_pool.simple_select_many_batch( table="device_lists_remote_extremeties", column="user_id", @@ -1332,7 +1328,7 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore): ), ) - results: Dict[str, Optional[str]] = dict.fromkeys(user_ids) + results: dict[str, Optional[str]] = dict.fromkeys(user_ids) results.update(rows) return results @@ -1340,7 +1336,7 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore): async def get_user_ids_requiring_device_list_resync( self, user_ids: Optional[Collection[str]] = None, - ) -> Set[str]: + ) -> set[str]: """Given a list of remote users return the list of users that we should resync the device lists for. If None is given instead of a list, return every user that we should resync the device lists for. @@ -1350,7 +1346,7 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore): """ if user_ids: rows = cast( - List[Tuple[str]], + list[tuple[str]], await self.db_pool.simple_select_many_batch( table="device_lists_remote_resync", column="user_id", @@ -1361,7 +1357,7 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore): ) else: rows = cast( - List[Tuple[str]], + list[tuple[str]], await self.db_pool.simple_select_list( table="device_lists_remote_resync", keyvalues=None, @@ -1406,7 +1402,7 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore): desc="mark_remote_user_device_cache_as_valid", ) - async def handle_potentially_left_users(self, user_ids: Set[str]) -> None: + async def handle_potentially_left_users(self, user_ids: set[str]) -> None: """Given a set of remote users check if the server still shares a room with them. If not then mark those users' device cache as stale. """ @@ -1423,7 +1419,7 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore): def handle_potentially_left_users_txn( self, txn: LoggingTransaction, - user_ids: Set[str], + user_ids: set[str], ) -> None: """Given a set of remote users check if the server still shares a room with them. If not then mark those users' device cache as stale. @@ -1463,7 +1459,7 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore): async def get_dehydrated_device( self, user_id: str - ) -> Optional[Tuple[str, JsonDict]]: + ) -> Optional[tuple[str, JsonDict]]: """Retrieve the information for a dehydrated device. Args: @@ -1672,7 +1668,7 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore): async def get_local_devices_not_accessed_since( self, since_ms: int - ) -> Dict[str, List[str]]: + ) -> dict[str, list[str]]: """Retrieves local devices that haven't been accessed since a given date. Args: @@ -1687,20 +1683,20 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore): def get_devices_not_accessed_since_txn( txn: LoggingTransaction, - ) -> List[Tuple[str, str]]: + ) -> list[tuple[str, str]]: sql = """ SELECT user_id, device_id FROM devices WHERE last_seen < ? AND hidden = FALSE """ txn.execute(sql, (since_ms,)) - return cast(List[Tuple[str, str]], txn.fetchall()) + return cast(list[tuple[str, str]], txn.fetchall()) rows = await self.db_pool.runInteraction( "get_devices_not_accessed_since", get_devices_not_accessed_since_txn, ) - devices: Dict[str, List[str]] = {} + devices: dict[str, list[str]] = {} for user_id, device_id in rows: # Remote devices are never stale from our point of view. if self.hs.is_mine_id(user_id): @@ -1728,7 +1724,7 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore): room_ids: Collection[str], from_token: MultiWriterStreamToken, to_token: MultiWriterStreamToken, - ) -> Optional[Set[str]]: + ) -> Optional[set[str]]: """Return the set of users whose devices have changed in the given rooms since the given stream ID. @@ -1759,7 +1755,7 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore): def _get_device_list_changes_in_rooms_txn( txn: LoggingTransaction, chunk: list[str], - ) -> Set[str]: + ) -> set[str]: clause, args = make_in_list_sql_clause( self.database_engine, "room_id", chunk ) @@ -1788,7 +1784,7 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore): return changes - async def get_all_device_list_changes(self, from_id: int, to_id: int) -> Set[str]: + async def get_all_device_list_changes(self, from_id: int, to_id: int) -> set[str]: """Return the set of rooms where devices have changed since the given stream ID. @@ -1807,7 +1803,7 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore): def _get_all_device_list_changes_txn( txn: LoggingTransaction, - ) -> Set[str]: + ) -> set[str]: txn.execute(sql, (from_id, to_id)) return {room_id for (room_id,) in txn} @@ -1818,7 +1814,7 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore): async def get_device_list_changes_in_room( self, room_id: str, min_stream_id: int - ) -> Collection[Tuple[str, str]]: + ) -> Collection[tuple[str, str]]: """Get all device list changes that happened in the room since the given stream ID. @@ -1834,9 +1830,9 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore): def get_device_list_changes_in_room_txn( txn: LoggingTransaction, - ) -> Collection[Tuple[str, str]]: + ) -> Collection[tuple[str, str]]: txn.execute(sql, (room_id, min_stream_id)) - return cast(Collection[Tuple[str, str]], txn.fetchall()) + return cast(Collection[tuple[str, str]], txn.fetchall()) return await self.db_pool.runInteraction( "get_device_list_changes_in_room", @@ -1911,7 +1907,7 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore): ) async def update_remote_device_list_cache( - self, user_id: str, devices: List[dict], stream_id: int + self, user_id: str, devices: list[dict], stream_id: int ) -> None: """Replace the entire cache of the remote user's devices. @@ -1932,7 +1928,7 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore): ) def _update_remote_device_list_cache_txn( - self, txn: LoggingTransaction, user_id: str, devices: List[dict], stream_id: int + self, txn: LoggingTransaction, user_id: str, devices: list[dict], stream_id: int ) -> None: """Replace the list of cached devices for this user with the given list.""" self.db_pool.simple_delete_txn( @@ -2031,7 +2027,7 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore): txn: LoggingTransaction, user_id: str, device_ids: Collection[str], - stream_ids: List[int], + stream_ids: list[int], ) -> None: txn.call_after( self._device_list_stream_cache.entity_has_changed, @@ -2076,7 +2072,7 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore): device_id: str, hosts: Collection[str], stream_id: int, - context: Optional[Dict[str, str]], + context: Optional[dict[str, str]], ) -> None: if self._device_list_federation_stream_cache: for host in hosts: @@ -2163,8 +2159,8 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore): user_id: str, device_ids: StrCollection, room_ids: StrCollection, - stream_ids: List[int], - context: Dict[str, str], + stream_ids: list[int], + context: dict[str, str], ) -> None: """Record the user in the room has updated their device.""" @@ -2208,7 +2204,7 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore): async def get_uncoverted_outbound_room_pokes( self, start_stream_id: int, start_room_id: str, limit: int = 10 - ) -> List[Tuple[str, str, str, int, Optional[Dict[str, str]]]]: + ) -> list[tuple[str, str, str, int, Optional[dict[str, str]]]]: """Get device list changes by room that have not yet been handled and written to `device_lists_outbound_pokes`. @@ -2236,7 +2232,7 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore): def get_uncoverted_outbound_room_pokes_txn( txn: LoggingTransaction, - ) -> List[Tuple[str, str, str, int, Optional[Dict[str, str]]]]: + ) -> list[tuple[str, str, str, int, Optional[dict[str, str]]]]: txn.execute( sql, ( @@ -2270,7 +2266,7 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore): device_id: str, room_id: str, hosts: Collection[str], - context: Optional[Dict[str, str]], + context: Optional[dict[str, str]], ) -> None: """Queue the device update to be sent to the given set of hosts, calculated from the room ID. @@ -2327,7 +2323,7 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore): async def get_pending_remote_device_list_updates_for_room( self, room_id: str - ) -> Collection[Tuple[str, str]]: + ) -> Collection[tuple[str, str]]: """Get the set of remote device list updates from the pending table for the room. """ @@ -2361,16 +2357,16 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore): def get_pending_remote_device_list_updates_for_room_txn( txn: LoggingTransaction, - ) -> Collection[Tuple[str, str]]: + ) -> Collection[tuple[str, str]]: txn.execute(sql, (room_id, min_device_stream_id)) - return cast(Collection[Tuple[str, str]], txn.fetchall()) + return cast(Collection[tuple[str, str]], txn.fetchall()) return await self.db_pool.runInteraction( "get_pending_remote_device_list_updates_for_room", get_pending_remote_device_list_updates_for_room_txn, ) - async def get_device_change_last_converted_pos(self) -> Tuple[int, str]: + async def get_device_change_last_converted_pos(self) -> tuple[int, str]: """ Get the position of the last row in `device_list_changes_in_room` that has been converted to `device_lists_outbound_pokes`. @@ -2388,7 +2384,7 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore): retcols=["stream_id", "room_id"], desc="get_device_change_last_converted_pos", ) - return cast(Tuple[int, str], min(rows)) + return cast(tuple[int, str], min(rows)) async def set_device_change_last_converted_pos( self, diff --git a/synapse/storage/databases/main/directory.py b/synapse/storage/databases/main/directory.py index 49c0575aca..99a951ca4a 100644 --- a/synapse/storage/databases/main/directory.py +++ b/synapse/storage/databases/main/directory.py @@ -19,7 +19,7 @@ # # -from typing import Iterable, List, Optional, Sequence, Tuple +from typing import Iterable, Optional, Sequence import attr @@ -34,7 +34,7 @@ from synapse.util.caches.descriptors import cached class RoomAliasMapping: room_id: str room_alias: str - servers: List[str] + servers: list[str] class DirectoryWorkerStore(CacheInvalidationWorkerStore): @@ -187,7 +187,7 @@ class DirectoryWorkerStore(CacheInvalidationWorkerStore): def _update_aliases_for_room_txn(txn: LoggingTransaction) -> None: update_creator_sql = "" - sql_params: Tuple[str, ...] = (new_room_id, old_room_id) + sql_params: tuple[str, ...] = (new_room_id, old_room_id) if creator: update_creator_sql = ", creator = ?" sql_params = (new_room_id, creator, old_room_id) diff --git a/synapse/storage/databases/main/e2e_room_keys.py b/synapse/storage/databases/main/e2e_room_keys.py index d978e115e4..a4d03d1d90 100644 --- a/synapse/storage/databases/main/e2e_room_keys.py +++ b/synapse/storage/databases/main/e2e_room_keys.py @@ -21,13 +21,10 @@ from typing import ( TYPE_CHECKING, - Dict, Iterable, - List, Literal, Mapping, Optional, - Tuple, TypedDict, cast, ) @@ -195,7 +192,7 @@ class EndToEndRoomKeyStore(EndToEndRoomKeyBackgroundStore): ) async def add_e2e_room_keys( - self, user_id: str, version: str, room_keys: Iterable[Tuple[str, str, RoomKey]] + self, user_id: str, version: str, room_keys: Iterable[tuple[str, str, RoomKey]] ) -> None: """Bulk add room keys to a given backup. @@ -257,8 +254,8 @@ class EndToEndRoomKeyStore(EndToEndRoomKeyBackgroundStore): version: str, room_id: Optional[str] = None, session_id: Optional[str] = None, - ) -> Dict[ - Literal["rooms"], Dict[str, Dict[Literal["sessions"], Dict[str, RoomKey]]] + ) -> dict[ + Literal["rooms"], dict[str, dict[Literal["sessions"], dict[str, RoomKey]]] ]: """Bulk get the E2E room keys for a given backup, optionally filtered to a given room, or a given session. @@ -290,7 +287,7 @@ class EndToEndRoomKeyStore(EndToEndRoomKeyBackgroundStore): keyvalues["session_id"] = session_id rows = cast( - List[Tuple[str, str, int, int, int, str]], + list[tuple[str, str, int, int, int, str]], await self.db_pool.simple_select_list( table="e2e_room_keys", keyvalues=keyvalues, @@ -306,8 +303,8 @@ class EndToEndRoomKeyStore(EndToEndRoomKeyBackgroundStore): ), ) - sessions: Dict[ - Literal["rooms"], Dict[str, Dict[Literal["sessions"], Dict[str, RoomKey]]] + sessions: dict[ + Literal["rooms"], dict[str, dict[Literal["sessions"], dict[str, RoomKey]]] ] = {"rooms": {}} for ( room_id, @@ -333,7 +330,7 @@ class EndToEndRoomKeyStore(EndToEndRoomKeyBackgroundStore): user_id: str, version: str, room_keys: Mapping[str, Mapping[Literal["sessions"], Iterable[str]]], - ) -> Dict[str, Dict[str, RoomKey]]: + ) -> dict[str, dict[str, RoomKey]]: """Get multiple room keys at a time. The difference between this function and get_e2e_room_keys is that this function can be used to retrieve multiple specific keys at a time, whereas get_e2e_room_keys is used for @@ -370,7 +367,7 @@ class EndToEndRoomKeyStore(EndToEndRoomKeyBackgroundStore): user_id: str, version: int, room_keys: Mapping[str, Mapping[Literal["sessions"], Iterable[str]]], - ) -> Dict[str, Dict[str, RoomKey]]: + ) -> dict[str, dict[str, RoomKey]]: if not room_keys: return {} @@ -400,7 +397,7 @@ class EndToEndRoomKeyStore(EndToEndRoomKeyBackgroundStore): txn.execute(sql, params) - ret: Dict[str, Dict[str, RoomKey]] = {} + ret: dict[str, dict[str, RoomKey]] = {} for row in txn: room_id = row[0] @@ -483,7 +480,7 @@ class EndToEndRoomKeyStore(EndToEndRoomKeyBackgroundStore): ) # `SELECT MAX() FROM ...` will always return 1 row. The value in that row will # be `NULL` when there are no available versions. - row = cast(Tuple[Optional[int]], txn.fetchone()) + row = cast(tuple[Optional[int]], txn.fetchone()) if row[0] is None: raise StoreError(404, "No current backup version") return row[0] @@ -559,7 +556,7 @@ class EndToEndRoomKeyStore(EndToEndRoomKeyBackgroundStore): "SELECT MAX(version) FROM e2e_room_keys_versions WHERE user_id=?", (user_id,), ) - current_version = cast(Tuple[Optional[int]], txn.fetchone())[0] + current_version = cast(tuple[Optional[int]], txn.fetchone())[0] if current_version is None: current_version = 0 @@ -600,7 +597,7 @@ class EndToEndRoomKeyStore(EndToEndRoomKeyBackgroundStore): version_etag: etag of the keys in the backup. If None, then the etag is not updated. """ - updatevalues: Dict[str, object] = {} + updatevalues: dict[str, object] = {} if info is not None and "auth_data" in info: updatevalues["auth_data"] = json_encoder.encode(info["auth_data"]) diff --git a/synapse/storage/databases/main/end_to_end_keys.py b/synapse/storage/databases/main/end_to_end_keys.py index 2d3d0c0036..991d64db44 100644 --- a/synapse/storage/databases/main/end_to_end_keys.py +++ b/synapse/storage/databases/main/end_to_end_keys.py @@ -25,15 +25,11 @@ from typing import ( TYPE_CHECKING, Any, Collection, - Dict, Iterable, - List, Literal, Mapping, Optional, Sequence, - Set, - Tuple, Union, cast, overload, @@ -155,7 +151,7 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker async def get_e2e_device_keys_for_federation_query( self, user_id: str - ) -> Tuple[int, Sequence[JsonMapping]]: + ) -> tuple[int, Sequence[JsonMapping]]: """Get all devices (with any device keys) for a user Returns: @@ -241,9 +237,9 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker @cancellable async def get_e2e_device_keys_for_cs_api( self, - query_list: Collection[Tuple[str, Optional[str]]], + query_list: Collection[tuple[str, Optional[str]]], include_displaynames: bool = True, - ) -> Dict[str, Dict[str, JsonDict]]: + ) -> dict[str, dict[str, JsonDict]]: """Fetch a list of device keys, formatted suitably for the C/S API. Args: query_list: List of pairs of user_ids and device_ids. @@ -262,7 +258,7 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker # Build the result structure, un-jsonify the results, and add the # "unsigned" section - rv: Dict[str, Dict[str, JsonDict]] = {} + rv: dict[str, dict[str, JsonDict]] = {} for user_id, device_keys in results.items(): rv[user_id] = {} for device_id, device_info in device_keys.items(): @@ -284,36 +280,36 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker @overload async def get_e2e_device_keys_and_signatures( self, - query_list: Collection[Tuple[str, Optional[str]]], + query_list: Collection[tuple[str, Optional[str]]], include_all_devices: Literal[False] = False, - ) -> Dict[str, Dict[str, DeviceKeyLookupResult]]: ... + ) -> dict[str, dict[str, DeviceKeyLookupResult]]: ... @overload async def get_e2e_device_keys_and_signatures( self, - query_list: Collection[Tuple[str, Optional[str]]], + query_list: Collection[tuple[str, Optional[str]]], include_all_devices: bool = False, include_deleted_devices: Literal[False] = False, - ) -> Dict[str, Dict[str, DeviceKeyLookupResult]]: ... + ) -> dict[str, dict[str, DeviceKeyLookupResult]]: ... @overload async def get_e2e_device_keys_and_signatures( self, - query_list: Collection[Tuple[str, Optional[str]]], + query_list: Collection[tuple[str, Optional[str]]], include_all_devices: Literal[True], include_deleted_devices: Literal[True], - ) -> Dict[str, Dict[str, Optional[DeviceKeyLookupResult]]]: ... + ) -> dict[str, dict[str, Optional[DeviceKeyLookupResult]]]: ... @trace @cancellable async def get_e2e_device_keys_and_signatures( self, - query_list: Collection[Tuple[str, Optional[str]]], + query_list: Collection[tuple[str, Optional[str]]], include_all_devices: bool = False, include_deleted_devices: bool = False, ) -> Union[ - Dict[str, Dict[str, DeviceKeyLookupResult]], - Dict[str, Dict[str, Optional[DeviceKeyLookupResult]]], + dict[str, dict[str, DeviceKeyLookupResult]], + dict[str, dict[str, Optional[DeviceKeyLookupResult]]], ]: """Fetch a list of device keys @@ -388,18 +384,18 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker async def _get_e2e_device_keys( self, - query_list: Collection[Tuple[str, Optional[str]]], + query_list: Collection[tuple[str, Optional[str]]], include_all_devices: bool = False, include_deleted_devices: bool = False, - ) -> Dict[str, Dict[str, Optional[DeviceKeyLookupResult]]]: + ) -> dict[str, dict[str, Optional[DeviceKeyLookupResult]]]: """Get information on devices from the database The results include the device's keys and self-signatures, but *not* any cross-signing signatures which have been added subsequently (for which, see get_e2e_device_keys_and_signatures) """ - query_clauses: List[str] = [] - query_params_list: List[List[object]] = [] + query_clauses: list[str] = [] + query_params_list: list[list[object]] = [] if include_all_devices is False: include_deleted_devices = False @@ -437,7 +433,7 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker query_clauses.append(user_device_id_in_list_clause) query_params_list.append(user_device_args) - result: Dict[str, Dict[str, Optional[DeviceKeyLookupResult]]] = {} + result: dict[str, dict[str, Optional[DeviceKeyLookupResult]]] = {} def get_e2e_device_keys_txn( txn: LoggingTransaction, query_clause: str, query_params: list @@ -490,8 +486,8 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker @cached() def _get_e2e_cross_signing_signatures_for_device( self, - user_id_and_device_id: Tuple[str, str], - ) -> Sequence[Tuple[str, str]]: + user_id_and_device_id: tuple[str, str], + ) -> Sequence[tuple[str, str]]: """ The single-item version of `_get_e2e_cross_signing_signatures_for_devices`. See @cachedList for why a separate method is needed. @@ -503,8 +499,8 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker list_name="device_query", ) async def _get_e2e_cross_signing_signatures_for_devices( - self, device_query: Iterable[Tuple[str, str]] - ) -> Mapping[Tuple[str, str], Sequence[Tuple[str, str]]]: + self, device_query: Iterable[tuple[str, str]] + ) -> Mapping[tuple[str, str], Sequence[tuple[str, str]]]: """Get cross-signing signatures for a given list of user IDs and devices. Args: @@ -524,8 +520,8 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker """ def _get_e2e_cross_signing_signatures_for_devices_txn( - txn: LoggingTransaction, device_query: Iterable[Tuple[str, str]] - ) -> Mapping[Tuple[str, str], Sequence[Tuple[str, str]]]: + txn: LoggingTransaction, device_query: Iterable[tuple[str, str]] + ) -> Mapping[tuple[str, str], Sequence[tuple[str, str]]]: where_clause_sql, where_clause_params = make_tuple_in_list_sql_clause( self.database_engine, columns=("target_user_id", "target_device_id", "user_id"), @@ -541,7 +537,7 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker txn.execute(signature_sql, where_clause_params) - devices_and_signatures: Dict[Tuple[str, str], List[Tuple[str, str]]] = {} + devices_and_signatures: dict[tuple[str, str], list[tuple[str, str]]] = {} # `@cachedList` requires we return one key for every item in `device_query`. # Pre-populate `devices_and_signatures` with each key so that none are missing. @@ -567,8 +563,8 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker ) async def get_e2e_one_time_keys( - self, user_id: str, device_id: str, key_ids: List[str] - ) -> Dict[Tuple[str, str], str]: + self, user_id: str, device_id: str, key_ids: list[str] + ) -> dict[tuple[str, str], str]: """Retrieve a number of one-time keys for a user Args: @@ -581,7 +577,7 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker """ rows = cast( - List[Tuple[str, str, str]], + list[tuple[str, str, str]], await self.db_pool.simple_select_many_batch( table="e2e_one_time_keys_json", column="key_id", @@ -600,7 +596,7 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker user_id: str, device_id: str, time_now: int, - new_keys: Iterable[Tuple[str, str, str]], + new_keys: Iterable[tuple[str, str, str]], ) -> None: """Insert some new one time keys for a device. Errors if any of the keys already exist. @@ -627,7 +623,7 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker user_id: str, device_id: str, time_now: int, - new_keys: Iterable[Tuple[str, str, str]], + new_keys: Iterable[tuple[str, str, str]], ) -> None: """Insert some new one time keys for a device. Errors if any of the keys already exist. @@ -674,7 +670,7 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker A mapping from algorithm to number of keys for that algorithm. """ - def _count_e2e_one_time_keys(txn: LoggingTransaction) -> Dict[str, int]: + def _count_e2e_one_time_keys(txn: LoggingTransaction) -> dict[str, int]: sql = ( "SELECT algorithm, COUNT(key_id) FROM e2e_one_time_keys_json" " WHERE user_id = ? AND device_id = ?" @@ -962,7 +958,7 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker self, txn: LoggingTransaction, user_ids: Iterable[str], - ) -> Dict[str, Dict[str, JsonDict]]: + ) -> dict[str, dict[str, JsonDict]]: """Returns the cross-signing keys for a set of users. The output of this function should be passed to _get_e2e_cross_signing_signatures_txn if the signatures for the calling user need to be fetched. @@ -977,7 +973,7 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker the dict. """ - result: Dict[str, Dict[str, JsonDict]] = {} + result: dict[str, dict[str, JsonDict]] = {} for user_chunk in batch_iter(user_ids, 100): clause, params = make_in_list_sql_clause( @@ -1017,9 +1013,9 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker def _get_e2e_cross_signing_signatures_txn( self, txn: LoggingTransaction, - keys: Dict[str, Optional[Dict[str, JsonDict]]], + keys: dict[str, Optional[dict[str, JsonDict]]], from_user_id: str, - ) -> Dict[str, Optional[Dict[str, JsonDict]]]: + ) -> dict[str, Optional[dict[str, JsonDict]]]: """Returns the cross-signing signatures made by a user on a set of keys. Args: @@ -1037,7 +1033,7 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker # find out what cross-signing keys (a.k.a. devices) we need to get # signatures for. This is a map of (user_id, device_id) to key type # (device_id is the key's public part). - devices: Dict[Tuple[str, str], str] = {} + devices: dict[tuple[str, str], str] = {} for user_id, user_keys in keys.items(): if user_keys is None: @@ -1100,7 +1096,7 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker @cancellable async def get_e2e_cross_signing_keys_bulk( - self, user_ids: List[str], from_user_id: Optional[str] = None + self, user_ids: list[str], from_user_id: Optional[str] = None ) -> Mapping[str, Optional[Mapping[str, JsonMapping]]]: """Returns the cross-signing keys for a set of users. @@ -1118,7 +1114,7 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker if from_user_id: result = cast( - Dict[str, Optional[Mapping[str, JsonMapping]]], + dict[str, Optional[Mapping[str, JsonMapping]]], await self.db_pool.runInteraction( "get_e2e_cross_signing_signatures", self._get_e2e_cross_signing_signatures_txn, @@ -1131,7 +1127,7 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker async def get_all_user_signature_changes_for_remotes( self, instance_name: str, last_id: int, current_id: int, limit: int - ) -> Tuple[List[Tuple[int, tuple]], int, bool]: + ) -> tuple[list[tuple[int, tuple]], int, bool]: """Get updates for groups replication stream. Note that the user signature stream represents when a user signs their @@ -1163,7 +1159,7 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker def _get_all_user_signature_changes_for_remotes_txn( txn: LoggingTransaction, - ) -> Tuple[List[Tuple[int, tuple]], int, bool]: + ) -> tuple[list[tuple[int, tuple]], int, bool]: sql = """ SELECT stream_id, from_user_id AS user_id FROM user_signature_stream @@ -1194,9 +1190,9 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker ... async def claim_e2e_one_time_keys( - self, query_list: Collection[Tuple[str, str, str, int]] - ) -> Tuple[ - Dict[str, Dict[str, Dict[str, JsonDict]]], List[Tuple[str, str, str, int]] + self, query_list: Collection[tuple[str, str, str, int]] + ) -> tuple[ + dict[str, dict[str, dict[str, JsonDict]]], list[tuple[str, str, str, int]] ]: """Take a list of one time keys out of the database. @@ -1211,12 +1207,12 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker may be less than the input counts. In this case, the returned counts are the number of claims that were not fulfilled. """ - results: Dict[str, Dict[str, Dict[str, JsonDict]]] = {} - missing: List[Tuple[str, str, str, int]] = [] + results: dict[str, dict[str, dict[str, JsonDict]]] = {} + missing: list[tuple[str, str, str, int]] = [] if isinstance(self.database_engine, PostgresEngine): # If we can use execute_values we can use a single batch query # in autocommit mode. - unfulfilled_claim_counts: Dict[Tuple[str, str, str], int] = {} + unfulfilled_claim_counts: dict[tuple[str, str, str], int] = {} for user_id, device_id, algorithm, count in query_list: unfulfilled_claim_counts[user_id, device_id, algorithm] = count @@ -1265,8 +1261,8 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker return results, missing async def claim_e2e_fallback_keys( - self, query_list: Iterable[Tuple[str, str, str, bool]] - ) -> Dict[str, Dict[str, Dict[str, JsonDict]]]: + self, query_list: Iterable[tuple[str, str, str, bool]] + ) -> dict[str, dict[str, dict[str, JsonDict]]]: """Take a list of fallback keys out of the database. Args: @@ -1293,13 +1289,13 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker def _claim_e2e_fallback_keys_bulk_txn( self, txn: LoggingTransaction, - query_list: Iterable[Tuple[str, str, str, bool]], - ) -> Dict[str, Dict[str, Dict[str, JsonDict]]]: + query_list: Iterable[tuple[str, str, str, bool]], + ) -> dict[str, dict[str, dict[str, JsonDict]]]: """Efficient implementation of claim_e2e_fallback_keys for Postgres. Safe to autocommit: this is a single query. """ - results: Dict[str, Dict[str, Dict[str, JsonDict]]] = {} + results: dict[str, dict[str, dict[str, JsonDict]]] = {} sql = """ WITH claims(user_id, device_id, algorithm, mark_as_used) AS ( @@ -1312,11 +1308,11 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker RETURNING k.user_id, k.device_id, k.algorithm, k.key_id, k.key_json; """ claimed_keys = cast( - List[Tuple[str, str, str, str, str]], + list[tuple[str, str, str, str, str]], txn.execute_values(sql, query_list), ) - seen_user_device: Set[Tuple[str, str]] = set() + seen_user_device: set[tuple[str, str]] = set() for user_id, device_id, algorithm, key_id, key_json in claimed_keys: device_results = results.setdefault(user_id, {}).setdefault(device_id, {}) device_results[f"{algorithm}:{key_id}"] = json_decoder.decode(key_json) @@ -1330,10 +1326,10 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker async def _claim_e2e_fallback_keys_simple( self, - query_list: Iterable[Tuple[str, str, str, bool]], - ) -> Dict[str, Dict[str, Dict[str, JsonDict]]]: + query_list: Iterable[tuple[str, str, str, bool]], + ) -> dict[str, dict[str, dict[str, JsonDict]]]: """Naive, inefficient implementation of claim_e2e_fallback_keys for SQLite.""" - results: Dict[str, Dict[str, Dict[str, JsonDict]]] = {} + results: dict[str, dict[str, dict[str, JsonDict]]] = {} for user_id, device_id, algorithm, mark_as_used in query_list: row = await self.db_pool.simple_select_one( table="e2e_fallback_keys_json", @@ -1381,7 +1377,7 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker device_id: str, algorithm: str, count: int, - ) -> List[Tuple[str, str]]: + ) -> list[tuple[str, str]]: """Claim OTK for device for DBs that don't support RETURNING. Returns: @@ -1426,8 +1422,8 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker def _claim_e2e_one_time_keys_bulk( self, txn: LoggingTransaction, - query_list: Iterable[Tuple[str, str, str, int]], - ) -> List[Tuple[str, str, str, str, str]]: + query_list: Iterable[tuple[str, str, str, int]], + ) -> list[tuple[str, str, str, str, str]]: """Bulk claim OTKs, for DBs that support DELETE FROM... RETURNING. Args: @@ -1466,7 +1462,7 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker RETURNING user_id, device_id, algorithm, key_id, key_json; """ otk_rows = cast( - List[Tuple[str, str, str, str, str]], txn.execute_values(sql, query_list) + list[tuple[str, str, str, str, str]], txn.execute_values(sql, query_list) ) seen_user_device = { @@ -1482,7 +1478,7 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker async def get_master_cross_signing_key_updatable_before( self, user_id: str - ) -> Tuple[bool, Optional[int]]: + ) -> tuple[bool, Optional[int]]: """Get time before which a master cross-signing key may be replaced without UIA. (UIA means "User-Interactive Auth".) @@ -1503,7 +1499,7 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker """ - def impl(txn: LoggingTransaction) -> Tuple[bool, Optional[int]]: + def impl(txn: LoggingTransaction) -> tuple[bool, Optional[int]]: # We want to distinguish between three cases: txn.execute( """ @@ -1515,7 +1511,7 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker """, (user_id,), ) - row = cast(Optional[Tuple[Optional[int]]], txn.fetchone()) + row = cast(Optional[tuple[Optional[int]]], txn.fetchone()) if row is None: return False, None return True, row[0] @@ -1527,7 +1523,7 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker async def delete_old_otks_for_next_user_batch( self, after_user_id: str, number_of_users: int - ) -> Tuple[List[str], int]: + ) -> tuple[list[str], int]: """Deletes old OTKs belonging to the next batch of users Returns: @@ -1536,7 +1532,7 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker * `rows` is the number of deleted rows """ - def impl(txn: LoggingTransaction) -> Tuple[List[str], int]: + def impl(txn: LoggingTransaction) -> tuple[list[str], int]: # Find a batch of users txn.execute( """ diff --git a/synapse/storage/databases/main/event_federation.py b/synapse/storage/databases/main/event_federation.py index d77420ff47..d889e8eceb 100644 --- a/synapse/storage/databases/main/event_federation.py +++ b/synapse/storage/databases/main/event_federation.py @@ -25,15 +25,10 @@ from queue import Empty, PriorityQueue from typing import ( TYPE_CHECKING, Collection, - Dict, - FrozenSet, Generator, Iterable, - List, Optional, Sequence, - Set, - Tuple, cast, ) @@ -132,9 +127,9 @@ class BackfillQueueNavigationItem: @attr.s(frozen=True, slots=True, auto_attribs=True) class StateDifference: # The event IDs in the auth difference. - auth_difference: Set[str] + auth_difference: set[str] # The event IDs in the conflicted state subgraph. Used in v2.1 only. - conflicted_subgraph: Optional[Set[str]] + conflicted_subgraph: Optional[set[str]] class _NoChainCoverIndex(Exception): @@ -165,7 +160,7 @@ class EventFederationWorkerStore( ) # Cache of event ID to list of auth event IDs and their depths. - self._event_auth_cache: LruCache[str, List[Tuple[str, int]]] = LruCache( + self._event_auth_cache: LruCache[str, list[tuple[str, int]]] = LruCache( max_size=500000, clock=self.hs.get_clock(), server_name=self.server_name, @@ -199,7 +194,7 @@ class EventFederationWorkerStore( async def get_auth_chain( self, room_id: str, event_ids: Collection[str], include_given: bool = False - ) -> List[EventBase]: + ) -> list[EventBase]: """Get auth events for given event_ids. The events *must* be state events. Args: @@ -222,7 +217,7 @@ class EventFederationWorkerStore( room_id: str, event_ids: Collection[str], include_given: bool = False, - ) -> Set[str]: + ) -> set[str]: """Get auth events for given event_ids. The events *must* be state events. Args: @@ -267,7 +262,7 @@ class EventFederationWorkerStore( room_id: str, event_ids: Collection[str], include_given: bool, - ) -> Set[str]: + ) -> set[str]: """Calculates the auth chain IDs using the chain index.""" # First we look up the chain ID/sequence numbers for the given events. @@ -275,10 +270,10 @@ class EventFederationWorkerStore( initial_events = set(event_ids) # All the events that we've found that are reachable from the events. - seen_events: Set[str] = set() + seen_events: set[str] = set() # A map from chain ID to max sequence number of the given events. - event_chains: Dict[int, int] = {} + event_chains: dict[int, int] = {} sql = """ SELECT event_id, chain_id, sequence_number @@ -313,7 +308,7 @@ class EventFederationWorkerStore( # are reachable from any event. # A map from chain ID to max sequence number *reachable* from any event ID. - chains: Dict[int, int] = {} + chains: dict[int, int] = {} for links in self._get_chain_links(txn, set(event_chains.keys())): for chain_id in links: if chain_id not in event_chains: @@ -366,8 +361,8 @@ class EventFederationWorkerStore( @classmethod def _get_chain_links( - cls, txn: LoggingTransaction, chains_to_fetch: Set[int] - ) -> Generator[Dict[int, List[Tuple[int, int, int]]], None, None]: + cls, txn: LoggingTransaction, chains_to_fetch: set[int] + ) -> Generator[dict[int, list[tuple[int, int, int]]], None, None]: """Fetch all auth chain links from the given set of chains, and all links from those chains, recursively. @@ -410,7 +405,7 @@ class EventFederationWorkerStore( ) txn.execute(sql % (clause,), args) - links: Dict[int, List[Tuple[int, int, int]]] = {} + links: dict[int, list[tuple[int, int, int]]] = {} for ( origin_chain_id, @@ -428,7 +423,7 @@ class EventFederationWorkerStore( def _get_auth_chain_ids_txn( self, txn: LoggingTransaction, event_ids: Collection[str], include_given: bool - ) -> Set[str]: + ) -> set[str]: """Calculates the auth chain IDs. This is used when we don't have a cover index for the room. @@ -449,10 +444,10 @@ class EventFederationWorkerStore( front = set(event_ids) while front: - new_front: Set[str] = set() + new_front: set[str] = set() for chunk in batch_iter(front, 100): # Pull the auth events either from the cache or DB. - to_fetch: List[str] = [] # Event IDs to fetch from DB + to_fetch: list[str] = [] # Event IDs to fetch from DB for event_id in chunk: res = self._event_auth_cache.get(event_id) if res is None: @@ -468,7 +463,7 @@ class EventFederationWorkerStore( # Note we need to batch up the results by event ID before # adding to the cache. - to_cache: Dict[str, List[Tuple[str, int]]] = {} + to_cache: dict[str, list[tuple[str, int]]] = {} for event_id, auth_event_id, auth_event_depth in txn: to_cache.setdefault(event_id, []).append( (auth_event_id, auth_event_depth) @@ -488,8 +483,8 @@ class EventFederationWorkerStore( async def get_auth_chain_difference( self, room_id: str, - state_sets: List[Set[str]], - ) -> Set[str]: + state_sets: list[set[str]], + ) -> set[str]: state_diff = await self.get_auth_chain_difference_extended( room_id, state_sets, None, None ) @@ -498,9 +493,9 @@ class EventFederationWorkerStore( async def get_auth_chain_difference_extended( self, room_id: str, - state_sets: List[Set[str]], - conflicted_set: Optional[Set[str]], - additional_backwards_reachable_conflicted_events: Optional[Set[str]], + state_sets: list[set[str]], + conflicted_set: Optional[set[str]], + additional_backwards_reachable_conflicted_events: Optional[set[str]], ) -> StateDifference: """ "Given sets of state events figure out the auth chain difference (as per state res v2 algorithm). @@ -560,9 +555,9 @@ class EventFederationWorkerStore( self, txn: LoggingTransaction, room_id: str, - state_sets: List[Set[str]], - conflicted_set: Optional[Set[str]] = None, - additional_backwards_reachable_conflicted_events: Optional[Set[str]] = None, + state_sets: list[set[str]], + conflicted_set: Optional[set[str]] = None, + additional_backwards_reachable_conflicted_events: Optional[set[str]] = None, ) -> StateDifference: """Calculates the auth chain difference using the chain index. @@ -587,14 +582,14 @@ class EventFederationWorkerStore( ) # Map from event_id -> (chain ID, seq no) - chain_info: Dict[str, Tuple[int, int]] = {} + chain_info: dict[str, tuple[int, int]] = {} # Map from chain ID -> seq no -> event Id - chain_to_event: Dict[int, Dict[int, str]] = {} + chain_to_event: dict[int, dict[int, str]] = {} # All the chains that we've found that are reachable from the state # sets. - seen_chains: Set[int] = set() + seen_chains: set[int] = set() # Fetch the chain cover index for the initial set of events we're # considering. @@ -621,7 +616,7 @@ class EventFederationWorkerStore( events_missing_chain_info = initial_events.difference(chain_info) # The result set to return, i.e. the auth chain difference. - auth_difference_result: Set[str] = set() + auth_difference_result: set[str] = set() if events_missing_chain_info: # For some reason we have events we haven't calculated the chain @@ -652,21 +647,21 @@ class EventFederationWorkerStore( # A subset of chain_info for conflicted events only, as we need to # loop all conflicted chain positions. Map from event_id -> (chain ID, seq no) - conflicted_chain_positions: Dict[str, Tuple[int, int]] = {} + conflicted_chain_positions: dict[str, tuple[int, int]] = {} # For each chain, remember the positions where conflicted events are. # We need this for calculating the forward reachable events. - conflicted_chain_to_seq: Dict[int, Set[int]] = {} # chain_id => {seq_num} + conflicted_chain_to_seq: dict[int, set[int]] = {} # chain_id => {seq_num} # A subset of chain_info for additional backwards reachable events only, as we need to # loop all additional backwards reachable events for calculating backwards reachable events. - additional_backwards_reachable_positions: Dict[ - str, Tuple[int, int] + additional_backwards_reachable_positions: dict[ + str, tuple[int, int] ] = {} # event_id => (chain_id, seq_num) # These next two fields are critical as the intersection of them is the conflicted subgraph. # We'll populate them when we walk the chain links. # chain_id => max(seq_num) backwards reachable (e.g 4 means 1,2,3,4 are backwards reachable) - conflicted_backwards_reachable: Dict[int, int] = {} + conflicted_backwards_reachable: dict[int, int] = {} # chain_id => min(seq_num) forwards reachable (e.g 4 means 4,5,6..n are forwards reachable) - conflicted_forwards_reachable: Dict[int, int] = {} + conflicted_forwards_reachable: dict[int, int] = {} # populate the v2.1 data structures if is_state_res_v21: @@ -688,9 +683,9 @@ class EventFederationWorkerStore( # Corresponds to `state_sets`, except as a map from chain ID to max # sequence number reachable from the state set. - set_to_chain: List[Dict[int, int]] = [] + set_to_chain: list[dict[int, int]] = [] for state_set in state_sets: - chains: Dict[int, int] = {} + chains: dict[int, int] = {} set_to_chain.append(chains) for state_id in state_set: @@ -802,7 +797,7 @@ class EventFederationWorkerStore( # Mapping from chain ID to the range of sequence numbers that should be # pulled from the database. - auth_diff_chain_to_gap: Dict[int, Tuple[int, int]] = {} + auth_diff_chain_to_gap: dict[int, tuple[int, int]] = {} for chain_id in seen_chains: min_seq_no = min(chains.get(chain_id, 0) for chains in set_to_chain) @@ -820,10 +815,10 @@ class EventFederationWorkerStore( auth_diff_chain_to_gap[chain_id] = (min_seq_no, max_seq_no) break - conflicted_subgraph_result: Set[str] = set() + conflicted_subgraph_result: set[str] = set() # Mapping from chain ID to the range of sequence numbers that should be # pulled from the database. - conflicted_subgraph_chain_to_gap: Dict[int, Tuple[int, int]] = {} + conflicted_subgraph_chain_to_gap: dict[int, tuple[int, int]] = {} if is_state_res_v21: # also include the conflicted subgraph using backward/forward reachability info from all # the conflicted events. To calculate this, we want to extract the intersection between @@ -882,9 +877,9 @@ class EventFederationWorkerStore( ) def _fetch_event_ids_from_chains_txn( - self, txn: LoggingTransaction, chains: Dict[int, Tuple[int, int]] - ) -> Set[str]: - result: Set[str] = set() + self, txn: LoggingTransaction, chains: dict[int, tuple[int, int]] + ) -> set[str]: + result: set[str] = set() if isinstance(self.database_engine, PostgresEngine): # We can use `execute_values` to efficiently fetch the gaps when # using postgres. @@ -918,10 +913,10 @@ class EventFederationWorkerStore( self, txn: LoggingTransaction, room_id: str, - state_sets: List[Set[str]], - events_missing_chain_info: Set[str], + state_sets: list[set[str]], + events_missing_chain_info: set[str], events_that_have_chain_index: Collection[str], - ) -> Set[str]: + ) -> set[str]: """Helper for `_get_auth_chain_difference_using_cover_index_txn` to handle the case where we haven't calculated the chain cover index for all events. @@ -962,7 +957,7 @@ class EventFederationWorkerStore( WHERE tc.room_id = ? """ txn.execute(sql, (room_id,)) - event_to_auth_ids: Dict[str, Set[str]] = {} + event_to_auth_ids: dict[str, set[str]] = {} events_that_have_chain_index = set(events_that_have_chain_index) for event_id, auth_id, auth_id_has_chain in txn: s = event_to_auth_ids.setdefault(event_id, set()) @@ -982,7 +977,7 @@ class EventFederationWorkerStore( raise _NoChainCoverIndex(room_id) # Create a map from event IDs we care about to their partial auth chain. - event_id_to_partial_auth_chain: Dict[str, Set[str]] = {} + event_id_to_partial_auth_chain: dict[str, set[str]] = {} for event_id, auth_ids in event_to_auth_ids.items(): if not any(event_id in state_set for state_set in state_sets): continue @@ -1005,7 +1000,7 @@ class EventFederationWorkerStore( # 1. Update the state sets to only include indexed events; and # 2. Create a new list containing the auth chains of the un-indexed # events - unindexed_state_sets: List[Set[str]] = [] + unindexed_state_sets: list[set[str]] = [] for state_set in state_sets: unindexed_state_set = set() for event_id, auth_chain in event_id_to_partial_auth_chain.items(): @@ -1031,8 +1026,8 @@ class EventFederationWorkerStore( return union - intersection def _get_auth_chain_difference_txn( - self, txn: LoggingTransaction, state_sets: List[Set[str]] - ) -> Set[str]: + self, txn: LoggingTransaction, state_sets: list[set[str]] + ) -> set[str]: """Calculates the auth chain difference using a breadth first search. This is used when we don't have a cover index for the room. @@ -1087,7 +1082,7 @@ class EventFederationWorkerStore( } # The sorted list of events whose auth chains we should walk. - search: List[Tuple[int, str]] = [] + search: list[tuple[int, str]] = [] # We need to get the depth of the initial events for sorting purposes. sql = """ @@ -1104,13 +1099,13 @@ class EventFederationWorkerStore( # I think building a temporary list with fetchall is more efficient than # just `search.extend(txn)`, but this is unconfirmed - search.extend(cast(List[Tuple[int, str]], txn.fetchall())) + search.extend(cast(list[tuple[int, str]], txn.fetchall())) # sort by depth search.sort() # Map from event to its auth events - event_to_auth_events: Dict[str, Set[str]] = {} + event_to_auth_events: dict[str, set[str]] = {} base_sql = """ SELECT a.event_id, auth_id, depth @@ -1129,8 +1124,8 @@ class EventFederationWorkerStore( # currently walking, either from cache or DB. search, chunk = search[:-100], search[-100:] - found: List[Tuple[str, str, int]] = [] # Results found - to_fetch: List[str] = [] # Event IDs to fetch from DB + found: list[tuple[str, str, int]] = [] # Results found + to_fetch: list[str] = [] # Event IDs to fetch from DB for _, event_id in chunk: res = self._event_auth_cache.get(event_id) if res is None: @@ -1147,7 +1142,7 @@ class EventFederationWorkerStore( # We parse the results and add the to the `found` set and the # cache (note we need to batch up the results by event ID before # adding to the cache). - to_cache: Dict[str, List[Tuple[str, int]]] = {} + to_cache: dict[str, list[tuple[str, int]]] = {} for event_id, auth_event_id, auth_event_depth in txn: to_cache.setdefault(event_id, []).append( (auth_event_id, auth_event_depth) @@ -1204,7 +1199,7 @@ class EventFederationWorkerStore( room_id: str, current_depth: int, limit: int, - ) -> List[Tuple[str, int]]: + ) -> list[tuple[str, int]]: """ Get the backward extremities to backfill from in the room along with the approximate depth. @@ -1235,7 +1230,7 @@ class EventFederationWorkerStore( def get_backfill_points_in_room_txn( txn: LoggingTransaction, room_id: str - ) -> List[Tuple[str, int]]: + ) -> list[tuple[str, int]]: # Assemble a tuple lookup of event_id -> depth for the oldest events # we know of in the room. Backwards extremeties are the oldest # events we know of in the room but we only know of them because @@ -1336,7 +1331,7 @@ class EventFederationWorkerStore( ), ) - return cast(List[Tuple[str, int]], txn.fetchall()) + return cast(list[tuple[str, int]], txn.fetchall()) return await self.db_pool.runInteraction( "get_backfill_points_in_room", @@ -1346,14 +1341,14 @@ class EventFederationWorkerStore( async def get_max_depth_of( self, event_ids: Collection[str] - ) -> Tuple[Optional[str], int]: + ) -> tuple[Optional[str], int]: """Returns the event ID and depth for the event that has the max depth from a set of event IDs Args: event_ids: The event IDs to calculate the max depth of. """ rows = cast( - List[Tuple[str, int]], + list[tuple[str, int]], await self.db_pool.simple_select_many_batch( table="events", column="event_id", @@ -1378,14 +1373,14 @@ class EventFederationWorkerStore( return max_depth_event_id, current_max_depth - async def get_min_depth_of(self, event_ids: List[str]) -> Tuple[Optional[str], int]: + async def get_min_depth_of(self, event_ids: list[str]) -> tuple[Optional[str], int]: """Returns the event ID and depth for the event that has the min depth from a set of event IDs Args: event_ids: The event IDs to calculate the max depth of. """ rows = cast( - List[Tuple[str, int]], + list[tuple[str, int]], await self.db_pool.simple_select_many_batch( table="events", column="event_id", @@ -1410,7 +1405,7 @@ class EventFederationWorkerStore( return min_depth_event_id, current_min_depth - async def get_prev_events_for_room(self, room_id: str) -> List[str]: + async def get_prev_events_for_room(self, room_id: str) -> list[str]: """ Gets a subset of the current forward extremities in the given room. @@ -1431,7 +1426,7 @@ class EventFederationWorkerStore( def _get_prev_events_for_room_txn( self, txn: LoggingTransaction, room_id: str - ) -> List[str]: + ) -> list[str]: # we just use the 10 newest events. Older events will become # prev_events of future events. @@ -1449,7 +1444,7 @@ class EventFederationWorkerStore( async def get_rooms_with_many_extremities( self, min_count: int, limit: int, room_id_filter: Iterable[str] - ) -> List[str]: + ) -> list[str]: """Get the top rooms with at least N extremities. Args: @@ -1462,7 +1457,7 @@ class EventFederationWorkerStore( sorted by extremity count. """ - def _get_rooms_with_many_extremities_txn(txn: LoggingTransaction) -> List[str]: + def _get_rooms_with_many_extremities_txn(txn: LoggingTransaction) -> list[str]: where_clause = "1=1" if room_id_filter: where_clause = "room_id NOT IN (%s)" % ( @@ -1487,7 +1482,7 @@ class EventFederationWorkerStore( ) @cached(max_entries=5000, iterable=True) - async def get_latest_event_ids_in_room(self, room_id: str) -> FrozenSet[str]: + async def get_latest_event_ids_in_room(self, room_id: str) -> frozenset[str]: event_ids = await self.db_pool.simple_select_onecol( table="event_forward_extremities", keyvalues={"room_id": room_id}, @@ -1610,7 +1605,7 @@ class EventFederationWorkerStore( WHERE room_id = ? """ - def get_forward_extremeties_for_room_txn(txn: LoggingTransaction) -> List[str]: + def get_forward_extremeties_for_room_txn(txn: LoggingTransaction) -> list[str]: txn.execute(sql, (stream_ordering, room_id)) return [event_id for (event_id,) in txn] @@ -1627,7 +1622,7 @@ class EventFederationWorkerStore( def _get_connected_prev_event_backfill_results_txn( self, txn: LoggingTransaction, event_id: str, limit: int - ) -> List[BackfillQueueNavigationItem]: + ) -> list[BackfillQueueNavigationItem]: """ Find any events connected by prev_event the specified event_id. @@ -1675,8 +1670,8 @@ class EventFederationWorkerStore( ] async def get_backfill_events( - self, room_id: str, seed_event_id_list: List[str], limit: int - ) -> List[EventBase]: + self, room_id: str, seed_event_id_list: list[str], limit: int + ) -> list[EventBase]: """Get a list of Events for a given topic that occurred before (and including) the events in seed_event_id_list. Return a list of max size `limit` @@ -1704,9 +1699,9 @@ class EventFederationWorkerStore( self, txn: LoggingTransaction, room_id: str, - seed_event_id_list: List[str], + seed_event_id_list: list[str], limit: int, - ) -> Set[str]: + ) -> set[str]: """ We want to make sure that we do a breadth-first, "depth" ordered search. We also handle navigating historical branches of history connected by @@ -1719,7 +1714,7 @@ class EventFederationWorkerStore( limit, ) - event_id_results: Set[str] = set() + event_id_results: set[str] = set() # In a PriorityQueue, the lowest valued entries are retrieved first. # We're using depth as the priority in the queue and tie-break based on @@ -1727,7 +1722,7 @@ class EventFederationWorkerStore( # highest and newest-in-time message. We add events to the queue with a # negative depth so that we process the newest-in-time messages first # going backwards in time. stream_ordering follows the same pattern. - queue: "PriorityQueue[Tuple[int, int, str, str]]" = PriorityQueue() + queue: "PriorityQueue[tuple[int, int, str, str]]" = PriorityQueue() for seed_event_id in seed_event_id_list: event_lookup_result = self.db_pool.simple_select_one_txn( @@ -1847,7 +1842,7 @@ class EventFederationWorkerStore( @trace async def get_event_ids_with_failed_pull_attempts( self, event_ids: StrCollection - ) -> Set[str]: + ) -> set[str]: """ Filter the given list of `event_ids` and return events which have any failed pull attempts. @@ -1860,7 +1855,7 @@ class EventFederationWorkerStore( """ rows = cast( - List[Tuple[str]], + list[tuple[str]], await self.db_pool.simple_select_many_batch( table="event_failed_pull_attempts", column="event_id", @@ -1877,7 +1872,7 @@ class EventFederationWorkerStore( self, room_id: str, event_ids: Collection[str], - ) -> Dict[str, int]: + ) -> dict[str, int]: """ Filter down the events to ones that we've failed to pull before recently. Uses exponential backoff. @@ -1891,7 +1886,7 @@ class EventFederationWorkerStore( next timestamp at which we may try pulling them again. """ event_failed_pull_attempts = cast( - List[Tuple[str, int, int]], + list[tuple[str, int, int]], await self.db_pool.simple_select_many_batch( table="event_failed_pull_attempts", column="event_id", @@ -1932,10 +1927,10 @@ class EventFederationWorkerStore( async def get_missing_events( self, room_id: str, - earliest_events: List[str], - latest_events: List[str], + earliest_events: list[str], + latest_events: list[str], limit: int, - ) -> List[EventBase]: + ) -> list[EventBase]: ids = await self.db_pool.runInteraction( "get_missing_events", self._get_missing_events, @@ -1950,13 +1945,13 @@ class EventFederationWorkerStore( self, txn: LoggingTransaction, room_id: str, - earliest_events: List[str], - latest_events: List[str], + earliest_events: list[str], + latest_events: list[str], limit: int, - ) -> List[str]: + ) -> list[str]: seen_events = set(earliest_events) front = set(latest_events) - seen_events - event_results: List[str] = [] + event_results: list[str] = [] query = ( "SELECT prev_event_id FROM event_edges " @@ -1983,7 +1978,7 @@ class EventFederationWorkerStore( @trace @tag_args - async def get_successor_events(self, event_id: str) -> List[str]: + async def get_successor_events(self, event_id: str) -> list[str]: """Fetch all events that have the given event as a prev event Args: @@ -2057,7 +2052,7 @@ class EventFederationWorkerStore( """ txn.execute(sql, (origin, event_id)) - row = cast(Optional[Tuple[int]], txn.fetchone()) + row = cast(Optional[tuple[int]], txn.fetchone()) if row is None: return None @@ -2104,7 +2099,7 @@ class EventFederationWorkerStore( async def get_next_staged_event_id_for_room( self, room_id: str, - ) -> Optional[Tuple[str, str]]: + ) -> Optional[tuple[str, str]]: """ Get the next event ID in the staging area for the given room. @@ -2114,7 +2109,7 @@ class EventFederationWorkerStore( def _get_next_staged_event_id_for_room_txn( txn: LoggingTransaction, - ) -> Optional[Tuple[str, str]]: + ) -> Optional[tuple[str, str]]: sql = """ SELECT origin, event_id FROM federation_inbound_events_staging @@ -2125,7 +2120,7 @@ class EventFederationWorkerStore( txn.execute(sql, (room_id,)) - return cast(Optional[Tuple[str, str]], txn.fetchone()) + return cast(Optional[tuple[str, str]], txn.fetchone()) return await self.db_pool.runInteraction( "get_next_staged_event_id_for_room", _get_next_staged_event_id_for_room_txn @@ -2135,12 +2130,12 @@ class EventFederationWorkerStore( self, room_id: str, room_version: RoomVersion, - ) -> Optional[Tuple[str, EventBase]]: + ) -> Optional[tuple[str, EventBase]]: """Get the next event in the staging area for the given room.""" def _get_next_staged_event_for_room_txn( txn: LoggingTransaction, - ) -> Optional[Tuple[str, str, str]]: + ) -> Optional[tuple[str, str, str]]: sql = """ SELECT event_json, internal_metadata, origin FROM federation_inbound_events_staging @@ -2150,7 +2145,7 @@ class EventFederationWorkerStore( """ txn.execute(sql, (room_id,)) - return cast(Optional[Tuple[str, str, str]], txn.fetchone()) + return cast(Optional[tuple[str, str, str]], txn.fetchone()) row = await self.db_pool.runInteraction( "get_next_staged_event_for_room", _get_next_staged_event_for_room_txn @@ -2199,7 +2194,7 @@ class EventFederationWorkerStore( # by other events in the queue). We do this so that we can always # backpaginate in all the events we have dropped. rows = cast( - List[Tuple[str, str]], + list[tuple[str, str]], await self.db_pool.simple_select_list( table="federation_inbound_events_staging", keyvalues={"room_id": room_id}, @@ -2210,8 +2205,8 @@ class EventFederationWorkerStore( # Find the set of events referenced by those in the queue, as well as # collecting all the event IDs in the queue. - referenced_events: Set[str] = set() - seen_events: Set[str] = set() + referenced_events: set[str] = set() + seen_events: set[str] = set() for event_id, event_json in rows: seen_events.add(event_id) event_d = db_to_json(event_json) @@ -2272,7 +2267,7 @@ class EventFederationWorkerStore( return True - async def get_all_rooms_with_staged_incoming_events(self) -> List[str]: + async def get_all_rooms_with_staged_incoming_events(self) -> list[str]: """Get the room IDs of all events currently staged.""" return await self.db_pool.simple_select_onecol( table="federation_inbound_events_staging", @@ -2287,15 +2282,15 @@ class EventFederationWorkerStore( def _get_stats_for_federation_staging_txn( txn: LoggingTransaction, - ) -> Tuple[int, int]: + ) -> tuple[int, int]: txn.execute("SELECT count(*) FROM federation_inbound_events_staging") - (count,) = cast(Tuple[int], txn.fetchone()) + (count,) = cast(tuple[int], txn.fetchone()) txn.execute( "SELECT min(received_ts) FROM federation_inbound_events_staging" ) - (received_ts,) = cast(Tuple[Optional[int]], txn.fetchone()) + (received_ts,) = cast(tuple[Optional[int]], txn.fetchone()) # If there is nothing in the staging area default it to 0. age = 0 @@ -2409,8 +2404,8 @@ class EventFederationStore(EventFederationWorkerStore): def _materialize( origin_chain_id: int, origin_sequence_number: int, - links: Dict[int, List[Tuple[int, int, int]]], - materialized: Dict[int, int], + links: dict[int, list[tuple[int, int, int]]], + materialized: dict[int, int], backwards: bool = True, ) -> None: """Helper function for fetching auth chain links. For a given origin chain @@ -2468,10 +2463,10 @@ def _materialize( def _generate_forward_links( - links: Dict[int, List[Tuple[int, int, int]]], -) -> Dict[int, List[Tuple[int, int, int]]]: + links: dict[int, list[tuple[int, int, int]]], +) -> dict[int, list[tuple[int, int, int]]]: """Reverse the input links from the given backwards links""" - new_links: Dict[int, List[Tuple[int, int, int]]] = {} + new_links: dict[int, list[tuple[int, int, int]]] = {} for origin_chain_id, chain_links in links.items(): for origin_seq_num, target_chain_id, target_seq_num in chain_links: new_links.setdefault(target_chain_id, []).append( @@ -2481,9 +2476,9 @@ def _generate_forward_links( def accumulate_forwards_reachable_events( - conflicted_forwards_reachable: Dict[int, int], - back_links: Dict[int, List[Tuple[int, int, int]]], - conflicted_chain_positions: Dict[str, Tuple[int, int]], + conflicted_forwards_reachable: dict[int, int], + back_links: dict[int, list[tuple[int, int, int]]], + conflicted_chain_positions: dict[str, tuple[int, int]], ) -> None: """Accumulate new forwards reachable events using the back_links provided. diff --git a/synapse/storage/databases/main/event_push_actions.py b/synapse/storage/databases/main/event_push_actions.py index ec26aedc6b..d65ab82fff 100644 --- a/synapse/storage/databases/main/event_push_actions.py +++ b/synapse/storage/databases/main/event_push_actions.py @@ -84,11 +84,8 @@ from collections import defaultdict from typing import ( TYPE_CHECKING, Collection, - Dict, - List, Mapping, Optional, - Tuple, Union, cast, ) @@ -118,11 +115,11 @@ if TYPE_CHECKING: logger = logging.getLogger(__name__) -DEFAULT_NOTIF_ACTION: List[Union[dict, str]] = [ +DEFAULT_NOTIF_ACTION: list[Union[dict, str]] = [ "notify", {"set_tweak": "highlight", "value": False}, ] -DEFAULT_HIGHLIGHT_ACTION: List[Union[dict, str]] = [ +DEFAULT_HIGHLIGHT_ACTION: list[Union[dict, str]] = [ "notify", {"set_tweak": "sound", "value": "default"}, {"set_tweak": "highlight"}, @@ -138,7 +135,7 @@ class _RoomReceipt: unthreaded_stream_ordering: int = 0 # threaded_stream_ordering includes the main pseudo-thread. - threaded_stream_ordering: Dict[str, int] = attr.Factory(dict) + threaded_stream_ordering: dict[str, int] = attr.Factory(dict) def is_unread(self, thread_id: str, stream_ordering: int) -> bool: """Returns True if the stream ordering is unread according to the receipt information.""" @@ -165,7 +162,7 @@ class HttpPushAction: event_id: str room_id: str stream_ordering: int - actions: List[Union[dict, str]] + actions: list[Union[dict, str]] @attr.s(slots=True, frozen=True, auto_attribs=True) @@ -244,7 +241,7 @@ def _serialize_action( return json_encoder.encode(actions) -def _deserialize_action(actions: str, is_highlight: bool) -> List[Union[dict, str]]: +def _deserialize_action(actions: str, is_highlight: bool) -> list[Union[dict, str]]: """Custom deserializer for actions. This allows us to "compress" common actions""" if actions: return db_to_json(actions) @@ -256,7 +253,7 @@ def _deserialize_action(actions: str, is_highlight: bool) -> List[Union[dict, st class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBaseStore): - _background_tasks: List[LoopingCall] = [] + _background_tasks: list[LoopingCall] = [] def __init__( self, @@ -351,7 +348,7 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas ) return 0 - async def get_unread_counts_by_room_for_user(self, user_id: str) -> Dict[str, int]: + async def get_unread_counts_by_room_for_user(self, user_id: str) -> dict[str, int]: """Get the notification count by room for a user. Only considers notifications, not highlight or unread counts, and threads are currently aggregated under their room. @@ -373,7 +370,7 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas def _get_unread_counts_by_room_for_user_txn( self, txn: LoggingTransaction, user_id: str - ) -> Dict[str, int]: + ) -> dict[str, int]: receipt_types_clause, args = make_in_list_sql_clause( self.database_engine, "receipt_type", @@ -440,7 +437,7 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas txn.execute(sql, args) seen_thread_ids = set() - room_to_count: Dict[str, int] = defaultdict(int) + room_to_count: dict[str, int] = defaultdict(int) for room_id, thread_id, notif_count in txn: room_to_count[room_id] += notif_count @@ -585,7 +582,7 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas """ main_counts = NotifCounts() - thread_counts: Dict[str, NotifCounts] = {} + thread_counts: dict[str, NotifCounts] = {} def _get_thread(thread_id: str) -> NotifCounts: if thread_id == MAIN_TIMELINE: @@ -778,7 +775,7 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas stream_ordering: int, max_stream_ordering: Optional[int] = None, thread_id: Optional[str] = None, - ) -> List[Tuple[int, int, str]]: + ) -> list[tuple[int, int, str]]: """Returns the notify and unread counts from `event_push_actions` for the given user/room in the given range. @@ -840,12 +837,12 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas """ txn.execute(sql, args) - return cast(List[Tuple[int, int, str]], txn.fetchall()) + return cast(list[tuple[int, int, str]], txn.fetchall()) async def get_push_action_users_in_range( self, min_stream_ordering: int, max_stream_ordering: int - ) -> List[str]: - def f(txn: LoggingTransaction) -> List[str]: + ) -> list[str]: + def f(txn: LoggingTransaction) -> list[str]: sql = ( "SELECT DISTINCT(user_id) FROM event_push_actions WHERE" " stream_ordering >= ? AND stream_ordering <= ? AND notif = 1" @@ -861,7 +858,7 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas user_id: str, room_ids: StrCollection, thread_ids: StrCollection, - ) -> Dict[str, _RoomReceipt]: + ) -> dict[str, _RoomReceipt]: """ Get (private) read receipts for a user in each of the given room IDs and thread IDs. @@ -936,7 +933,7 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas txn.execute(sql, args) - result: Dict[str, _RoomReceipt] = {} + result: dict[str, _RoomReceipt] = {} for room_id, thread_id, stream_ordering in txn: room_receipt = result.setdefault(room_id, _RoomReceipt()) if thread_id is None: @@ -952,7 +949,7 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas min_stream_ordering: int, max_stream_ordering: int, limit: int = 20, - ) -> List[HttpPushAction]: + ) -> list[HttpPushAction]: """Get a list of the most recent unread push actions for a given user, within the given stream ordering range. Called by the httppusher. @@ -971,7 +968,7 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas def get_push_actions_txn( txn: LoggingTransaction, - ) -> List[Tuple[str, str, str, int, str, bool]]: + ) -> list[tuple[str, str, str, int, str, bool]]: sql = """ SELECT ep.event_id, ep.room_id, ep.thread_id, ep.stream_ordering, ep.actions, ep.highlight @@ -984,7 +981,7 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas ORDER BY ep.stream_ordering ASC LIMIT ? """ txn.execute(sql, (user_id, min_stream_ordering, max_stream_ordering, limit)) - return cast(List[Tuple[str, str, str, int, str, bool]], txn.fetchall()) + return cast(list[tuple[str, str, str, int, str, bool]], txn.fetchall()) push_actions = await self.db_pool.runInteraction( "get_unread_push_actions_for_user_in_range_http", get_push_actions_txn @@ -1040,7 +1037,7 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas min_stream_ordering: int, max_stream_ordering: int, limit: int = 20, - ) -> List[EmailPushAction]: + ) -> list[EmailPushAction]: """Get a list of the most recent unread push actions for a given user, within the given stream ordering range. Called by the emailpusher @@ -1059,7 +1056,7 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas def get_push_actions_txn( txn: LoggingTransaction, - ) -> List[Tuple[str, str, str, int, str, bool, int]]: + ) -> list[tuple[str, str, str, int, str, bool, int]]: sql = """ SELECT ep.event_id, ep.room_id, ep.thread_id, ep.stream_ordering, ep.actions, ep.highlight, e.received_ts @@ -1073,7 +1070,7 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas ORDER BY ep.stream_ordering DESC LIMIT ? """ txn.execute(sql, (user_id, min_stream_ordering, max_stream_ordering, limit)) - return cast(List[Tuple[str, str, str, int, str, bool, int]], txn.fetchall()) + return cast(list[tuple[str, str, str, int, str, bool, int]], txn.fetchall()) push_actions = await self.db_pool.runInteraction( "get_unread_push_actions_for_user_in_range_email", get_push_actions_txn @@ -1159,7 +1156,7 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas async def add_push_actions_to_staging( self, event_id: str, - user_id_actions: Dict[str, Collection[Union[Mapping, str]]], + user_id_actions: dict[str, Collection[Union[Mapping, str]]], count_as_unread: bool, thread_id: str, ) -> None: @@ -1179,7 +1176,7 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas # can be used to insert into the `event_push_actions_staging` table. def _gen_entry( user_id: str, actions: Collection[Union[Mapping, str]] - ) -> Tuple[str, str, str, int, int, int, str, int]: + ) -> tuple[str, str, str, int, int, int, str, int]: is_highlight = 1 if _action_has_highlight(actions) else 0 notif = 1 if "notify" in actions else 0 return ( @@ -1296,7 +1293,7 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas The stream ordering """ txn.execute("SELECT MAX(stream_ordering) FROM events") - max_stream_ordering = cast(Tuple[Optional[int]], txn.fetchone())[0] + max_stream_ordering = cast(tuple[Optional[int]], txn.fetchone())[0] if max_stream_ordering is None: return 0 @@ -1355,7 +1352,7 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas async def get_time_of_last_push_action_before( self, stream_ordering: int ) -> Optional[int]: - def f(txn: LoggingTransaction) -> Optional[Tuple[int]]: + def f(txn: LoggingTransaction) -> Optional[tuple[int]]: sql = """ SELECT e.received_ts FROM event_push_actions AS ep @@ -1365,7 +1362,7 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas LIMIT 1 """ txn.execute(sql, (stream_ordering,)) - return cast(Optional[Tuple[int]], txn.fetchone()) + return cast(Optional[tuple[int]], txn.fetchone()) result = await self.db_pool.runInteraction( "get_time_of_last_push_action_before", f @@ -1457,7 +1454,7 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas limit, ), ) - rows = cast(List[Tuple[int, str, str, Optional[str], int]], txn.fetchall()) + rows = cast(list[tuple[int, str, str, Optional[str], int]], txn.fetchall()) # For each new read receipt we delete push actions from before it and # recalculate the summary. @@ -1469,7 +1466,7 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas continue thread_clause = "" - thread_args: Tuple = () + thread_args: tuple = () if thread_id is not None: thread_clause = "AND thread_id = ?" thread_args = (thread_id,) @@ -1654,7 +1651,7 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas # object because we might not have the same amount of rows in each of them. To do # this, we use a dict indexed on the user ID and room ID to make it easier to # populate. - summaries: Dict[Tuple[str, str, str], _EventPushSummary] = {} + summaries: dict[tuple[str, str, str], _EventPushSummary] = {} for row in txn: summaries[(row[0], row[1], row[2])] = _EventPushSummary( unread_count=row[3], @@ -1832,10 +1829,10 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas before: Optional[int] = None, limit: int = 50, only_highlight: bool = False, - ) -> List[UserPushAction]: + ) -> list[UserPushAction]: def f( txn: LoggingTransaction, - ) -> List[Tuple[str, str, int, int, str, bool, str, int]]: + ) -> list[tuple[str, str, int, int, str, bool, str, int]]: before_clause = "" if before: before_clause = "AND epa.stream_ordering < ?" @@ -1863,7 +1860,7 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas """ % (before_clause,) txn.execute(sql, args) return cast( - List[Tuple[str, str, int, int, str, bool, str, int]], txn.fetchall() + list[tuple[str, str, int, int, str, bool, str, int]], txn.fetchall() ) push_actions = await self.db_pool.runInteraction("get_push_actions_for_user", f) diff --git a/synapse/storage/databases/main/events.py b/synapse/storage/databases/main/events.py index b6037468b3..da9ecfbdb9 100644 --- a/synapse/storage/databases/main/events.py +++ b/synapse/storage/databases/main/events.py @@ -27,14 +27,10 @@ from typing import ( TYPE_CHECKING, Any, Collection, - Dict, Generator, Iterable, - List, Optional, Sequence, - Set, - Tuple, TypedDict, cast, ) @@ -129,7 +125,7 @@ class DeltaState: should e.g. be removed from `current_state_events` table. """ - to_delete: List[Tuple[str, str]] + to_delete: list[tuple[str, str]] to_insert: StateMap[str] no_longer_in_room: bool = False @@ -207,9 +203,9 @@ class SlidingSyncTableChanges: SlidingSyncMembershipSnapshotSharedInsertValues ) # List of membership to insert into `sliding_sync_membership_snapshots` - to_insert_membership_snapshots: List[SlidingSyncMembershipInfo] + to_insert_membership_snapshots: list[SlidingSyncMembershipInfo] # List of user_id to delete from `sliding_sync_membership_snapshots` - to_delete_membership_snapshots: List[str] + to_delete_membership_snapshots: list[str] @attr.s(slots=True, auto_attribs=True) @@ -226,7 +222,7 @@ class NewEventChainLinks: chain_id: int sequence_number: int - links: List[Tuple[int, int]] = attr.Factory(list) + links: list[tuple[int, int]] = attr.Factory(list) class PersistEventsStore: @@ -274,11 +270,11 @@ class PersistEventsStore: async def _persist_events_and_state_updates( self, room_id: str, - events_and_contexts: List[EventPersistencePair], + events_and_contexts: list[EventPersistencePair], *, state_delta_for_room: Optional[DeltaState], - new_forward_extremities: Optional[Set[str]], - new_event_links: Dict[str, NewEventChainLinks], + new_forward_extremities: Optional[set[str]], + new_event_links: dict[str, NewEventChainLinks], use_negative_stream_ordering: bool = False, inhibit_local_membership_updates: bool = False, ) -> None: @@ -585,21 +581,21 @@ class PersistEventsStore: ] membership_snapshot_shared_insert_values: SlidingSyncMembershipSnapshotSharedInsertValues = {} - membership_infos_to_insert_membership_snapshots: List[ + membership_infos_to_insert_membership_snapshots: list[ SlidingSyncMembershipInfo ] = [] if to_insert: - membership_event_id_to_user_id_map: Dict[str, str] = {} + membership_event_id_to_user_id_map: dict[str, str] = {} for state_key, event_id in to_insert.items(): if state_key[0] == EventTypes.Member and self.is_mine_id(state_key[1]): membership_event_id_to_user_id_map[event_id] = state_key[1] - membership_event_map: Dict[str, EventBase] = {} + membership_event_map: dict[str, EventBase] = {} # In normal event persist scenarios, we should be able to find the # membership events in the `events_and_contexts` given to us but it's # possible a state reset happened which added us to the room without a # corresponding new membership event (reset back to a previous membership). - missing_membership_event_ids: Set[str] = set() + missing_membership_event_ids: set[str] = set() for membership_event_id in membership_event_id_to_user_id_map.keys(): membership_event = event_map.get(membership_event_id) if membership_event: @@ -668,7 +664,7 @@ class PersistEventsStore: # these state events in `events_and_contexts` since we don't generally # batch up local membership changes with other events, but it can # happen. - missing_state_event_ids: Set[str] = set() + missing_state_event_ids: set[str] = set() for state_key, event_id in current_state_ids_map.items(): event = event_map.get(event_id) if event: @@ -780,7 +776,7 @@ class PersistEventsStore: # events in the `events_and_contexts` given to us but it's possible a state # reset happened which that reset back to a previous state. current_state_map = {} - missing_event_ids: Set[str] = set() + missing_event_ids: set[str] = set() for state_key, event_id in current_state_ids_map.items(): event = event_map.get(event_id) if event: @@ -826,7 +822,7 @@ class PersistEventsStore: async def calculate_chain_cover_index_for_events( self, room_id: str, events: Collection[EventBase] - ) -> Dict[str, NewEventChainLinks]: + ) -> dict[str, NewEventChainLinks]: # Filter to state events, and ensure there are no duplicates. state_events = [] seen_events = set() @@ -849,7 +845,7 @@ class PersistEventsStore: def calculate_chain_cover_index_for_events_txn( self, txn: LoggingTransaction, room_id: str, state_events: Collection[EventBase] - ) -> Dict[str, NewEventChainLinks]: + ) -> dict[str, NewEventChainLinks]: # We now calculate chain ID/sequence numbers for any state events we're # persisting. We ignore out of band memberships as we're not in the room # and won't have their auth chain (we'll fix it up later if we join the @@ -905,7 +901,7 @@ class PersistEventsStore: event_to_auth_chain, ) - async def _get_events_which_are_prevs(self, event_ids: Iterable[str]) -> List[str]: + async def _get_events_which_are_prevs(self, event_ids: Iterable[str]) -> list[str]: """Filter the supplied list of event_ids to get those which are prev_events of existing (non-outlier/rejected) events. @@ -915,7 +911,7 @@ class PersistEventsStore: Returns: Filtered event ids """ - results: List[str] = [] + results: list[str] = [] def _get_events_which_are_prevs_txn( txn: LoggingTransaction, batch: Collection[str] @@ -946,7 +942,7 @@ class PersistEventsStore: return results - async def _get_prevs_before_rejected(self, event_ids: Iterable[str]) -> Set[str]: + async def _get_prevs_before_rejected(self, event_ids: Iterable[str]) -> set[str]: """Get soft-failed ancestors to remove from the extremities. Given a set of events, find all those that have been soft-failed or @@ -967,7 +963,7 @@ class PersistEventsStore: # The set of event_ids to return. This includes all soft-failed events # and their prev events. - existing_prevs: Set[str] = set() + existing_prevs: set[str] = set() def _get_prevs_before_rejected_txn( txn: LoggingTransaction, batch: Collection[str] @@ -1016,11 +1012,11 @@ class PersistEventsStore: txn: LoggingTransaction, *, room_id: str, - events_and_contexts: List[EventPersistencePair], + events_and_contexts: list[EventPersistencePair], inhibit_local_membership_updates: bool, state_delta_for_room: Optional[DeltaState], - new_forward_extremities: Optional[Set[str]], - new_event_links: Dict[str, NewEventChainLinks], + new_forward_extremities: Optional[set[str]], + new_event_links: dict[str, NewEventChainLinks], sliding_sync_table_changes: Optional[SlidingSyncTableChanges], ) -> None: """Insert some number of room events into the necessary database tables. @@ -1178,8 +1174,8 @@ class PersistEventsStore: def _persist_event_auth_chain_txn( self, txn: LoggingTransaction, - events: List[EventBase], - new_event_links: Dict[str, NewEventChainLinks], + events: list[EventBase], + new_event_links: dict[str, NewEventChainLinks], ) -> None: if new_event_links: self._persist_chain_cover_index(txn, self.db_pool, new_event_links) @@ -1212,9 +1208,9 @@ class PersistEventsStore: txn: LoggingTransaction, db_pool: DatabasePool, event_chain_id_gen: SequenceGenerator, - event_to_room_id: Dict[str, str], - event_to_types: Dict[str, Tuple[str, str]], - event_to_auth_chain: Dict[str, StrCollection], + event_to_room_id: dict[str, str], + event_to_types: dict[str, tuple[str, str]], + event_to_auth_chain: dict[str, StrCollection], ) -> None: """Calculate and persist the chain cover index for the given events. @@ -1241,10 +1237,10 @@ class PersistEventsStore: txn: LoggingTransaction, db_pool: DatabasePool, event_chain_id_gen: SequenceGenerator, - event_to_room_id: Dict[str, str], - event_to_types: Dict[str, Tuple[str, str]], - event_to_auth_chain: Dict[str, StrCollection], - ) -> Dict[str, NewEventChainLinks]: + event_to_room_id: dict[str, str], + event_to_types: dict[str, tuple[str, str]], + event_to_auth_chain: dict[str, StrCollection], + ) -> dict[str, NewEventChainLinks]: """Calculate the chain cover index for the given events. Args: @@ -1259,7 +1255,7 @@ class PersistEventsStore: """ # Map from event ID to chain ID/sequence number. - chain_map: Dict[str, Tuple[int, int]] = {} + chain_map: dict[str, tuple[int, int]] = {} # Set of event IDs to calculate chain ID/seq numbers for. events_to_calc_chain_id_for = set(event_to_room_id) @@ -1268,7 +1264,7 @@ class PersistEventsStore: # we're looking at. These should just be out of band memberships, where # we didn't have the auth chain when we first persisted. auth_chain_to_calc_rows = cast( - List[Tuple[str, str, str]], + list[tuple[str, str, str]], db_pool.simple_select_many_txn( txn, table="event_auth_chain_to_calculate", @@ -1490,7 +1486,7 @@ class PersistEventsStore: cls, txn: LoggingTransaction, db_pool: DatabasePool, - new_event_links: Dict[str, NewEventChainLinks], + new_event_links: dict[str, NewEventChainLinks], ) -> None: db_pool.simple_insert_many_txn( txn, @@ -1536,12 +1532,12 @@ class PersistEventsStore: txn: LoggingTransaction, db_pool: DatabasePool, event_chain_id_gen: SequenceGenerator, - event_to_room_id: Dict[str, str], - event_to_types: Dict[str, Tuple[str, str]], - event_to_auth_chain: Dict[str, StrCollection], - events_to_calc_chain_id_for: Set[str], - chain_map: Dict[str, Tuple[int, int]], - ) -> Dict[str, Tuple[int, int]]: + event_to_room_id: dict[str, str], + event_to_types: dict[str, tuple[str, str]], + event_to_auth_chain: dict[str, StrCollection], + events_to_calc_chain_id_for: set[str], + chain_map: dict[str, tuple[int, int]], + ) -> dict[str, tuple[int, int]]: """Allocates, but does not persist, chain ID/sequence numbers for the events in `events_to_calc_chain_id_for`. (c.f. _add_chain_cover_index for info on args) @@ -1573,8 +1569,8 @@ class PersistEventsStore: # new chain if the sequence number has already been allocated. # - existing_chains: Set[int] = set() - tree: List[Tuple[str, Optional[str]]] = [] + existing_chains: set[int] = set() + tree: list[tuple[str, Optional[str]]] = [] # We need to do this in a topologically sorted order as we want to # generate chain IDs/sequence numbers of an event's auth events before @@ -1604,7 +1600,7 @@ class PersistEventsStore: ) txn.execute(sql % (clause,), args) - chain_to_max_seq_no: Dict[Any, int] = {row[0]: row[1] for row in txn} + chain_to_max_seq_no: dict[Any, int] = {row[0]: row[1] for row in txn} # Allocate the new events chain ID/sequence numbers. # @@ -1614,8 +1610,8 @@ class PersistEventsStore: # number of new chain IDs in one call, replacing all temporary # objects with real allocated chain IDs. - unallocated_chain_ids: Set[object] = set() - new_chain_tuples: Dict[str, Tuple[Any, int]] = {} + unallocated_chain_ids: set[object] = set() + new_chain_tuples: dict[str, tuple[Any, int]] = {} for event_id, auth_event_id in tree: # If we reference an auth_event_id we fetch the allocated chain ID, # either from the existing `chain_map` or the newly generated @@ -1626,7 +1622,7 @@ class PersistEventsStore: if not existing_chain_id: existing_chain_id = chain_map[auth_event_id] - new_chain_tuple: Optional[Tuple[Any, int]] = None + new_chain_tuple: Optional[tuple[Any, int]] = None if existing_chain_id: # We found a chain ID/sequence number candidate, check its # not already taken. @@ -1653,7 +1649,7 @@ class PersistEventsStore: ) # Map from potentially temporary chain ID to real chain ID - chain_id_to_allocated_map: Dict[Any, int] = dict( + chain_id_to_allocated_map: dict[Any, int] = dict( zip(unallocated_chain_ids, newly_allocated_chain_ids) ) chain_id_to_allocated_map.update((c, c) for c in existing_chains) @@ -1666,12 +1662,12 @@ class PersistEventsStore: def _persist_transaction_ids_txn( self, txn: LoggingTransaction, - events_and_contexts: List[EventPersistencePair], + events_and_contexts: list[EventPersistencePair], ) -> None: """Persist the mapping from transaction IDs to event IDs (if defined).""" inserted_ts = self._clock.time_msec() - to_insert_device_id: List[Tuple[str, str, str, str, str, int]] = [] + to_insert_device_id: list[tuple[str, str, str, str, str, int]] = [] for event, _ in events_and_contexts: txn_id = getattr(event.internal_metadata, "txn_id", None) device_id = getattr(event.internal_metadata, "device_id", None) @@ -1899,7 +1895,7 @@ class PersistEventsStore: sliding_sync_table_changes.joined_room_updates.values() ) - args: List[Any] = [ + args: list[Any] = [ room_id, room_id, sliding_sync_table_changes.joined_room_bump_stamp_to_fully_insert, @@ -2316,7 +2312,7 @@ class PersistEventsStore: self, txn: LoggingTransaction, room_id: str, - events_and_contexts: List[EventPersistencePair], + events_and_contexts: list[EventPersistencePair], ) -> None: """ Update the latest `event_stream_ordering`/`bump_stamp` columns in the @@ -2427,7 +2423,7 @@ class PersistEventsStore: self, txn: LoggingTransaction, room_id: str, - new_forward_extremities: Set[str], + new_forward_extremities: set[str], max_stream_order: int, ) -> None: self.db_pool.simple_delete_txn( @@ -2456,8 +2452,8 @@ class PersistEventsStore: @classmethod def _filter_events_and_contexts_for_duplicates( - cls, events_and_contexts: List[EventPersistencePair] - ) -> List[EventPersistencePair]: + cls, events_and_contexts: list[EventPersistencePair] + ) -> list[EventPersistencePair]: """Ensure that we don't have the same event twice. Pick the earliest non-outlier if there is one, else the earliest one. @@ -2486,7 +2482,7 @@ class PersistEventsStore: self, txn: LoggingTransaction, room_id: str, - events_and_contexts: List[EventPersistencePair], + events_and_contexts: list[EventPersistencePair], ) -> None: """Update min_depth for each room @@ -2528,8 +2524,8 @@ class PersistEventsStore: def _update_outliers_txn( self, txn: LoggingTransaction, - events_and_contexts: List[EventPersistencePair], - ) -> List[EventPersistencePair]: + events_and_contexts: list[EventPersistencePair], + ) -> list[EventPersistencePair]: """Update any outliers with new event info. This turns outliers into ex-outliers (unless the new event was rejected), and @@ -2547,7 +2543,7 @@ class PersistEventsStore: a room that has been un-partial stated. """ rows = cast( - List[Tuple[str, bool]], + list[tuple[str, bool]], self.db_pool.simple_select_many_txn( txn, "events", @@ -2740,8 +2736,8 @@ class PersistEventsStore: def _store_rejected_events_txn( self, txn: LoggingTransaction, - events_and_contexts: List[EventPersistencePair], - ) -> List[EventPersistencePair]: + events_and_contexts: list[EventPersistencePair], + ) -> list[EventPersistencePair]: """Add rows to the 'rejections' table for received events which were rejected @@ -2768,8 +2764,8 @@ class PersistEventsStore: self, txn: LoggingTransaction, *, - events_and_contexts: List[EventPersistencePair], - all_events_and_contexts: List[EventPersistencePair], + events_and_contexts: list[EventPersistencePair], + all_events_and_contexts: list[EventPersistencePair], inhibit_local_membership_updates: bool = False, ) -> None: """Update all the miscellaneous tables for new events @@ -2863,9 +2859,9 @@ class PersistEventsStore: def _add_to_cache( self, txn: LoggingTransaction, - events_and_contexts: List[EventPersistencePair], + events_and_contexts: list[EventPersistencePair], ) -> None: - to_prefill: List[EventCacheEntry] = [] + to_prefill: list[EventCacheEntry] = [] ev_map = {e.event_id: e for e, _ in events_and_contexts} if not ev_map: @@ -2925,7 +2921,7 @@ class PersistEventsStore: self, txn: LoggingTransaction, event_id: str, - labels: List[str], + labels: list[str], room_id: str, topological_ordering: int, ) -> None: @@ -2967,7 +2963,7 @@ class PersistEventsStore: def _store_room_members_txn( self, txn: LoggingTransaction, - events: List[EventBase], + events: list[EventBase], *, inhibit_local_membership_updates: bool = False, ) -> None: @@ -3336,8 +3332,8 @@ class PersistEventsStore: def _set_push_actions_for_event_and_users_txn( self, txn: LoggingTransaction, - events_and_contexts: List[EventPersistencePair], - all_events_and_contexts: List[EventPersistencePair], + events_and_contexts: list[EventPersistencePair], + all_events_and_contexts: list[EventPersistencePair], ) -> None: """Handles moving push actions from staging table to main event_push_actions table for all events in `events_and_contexts`. @@ -3517,7 +3513,7 @@ class PersistEventsStore: ) def _handle_mult_prev_events( - self, txn: LoggingTransaction, events: List[EventBase] + self, txn: LoggingTransaction, events: list[EventBase] ) -> None: """ For the given event, update the event edges table and forward and @@ -3535,7 +3531,7 @@ class PersistEventsStore: self._update_backward_extremeties(txn, events) def _update_backward_extremeties( - self, txn: LoggingTransaction, events: List[EventBase] + self, txn: LoggingTransaction, events: list[EventBase] ) -> None: """Updates the event_backward_extremities tables based on the new/updated events being persisted. @@ -3637,16 +3633,16 @@ class _LinkMap: # Stores the set of links as nested maps: source chain ID -> target chain ID # -> source sequence number -> target sequence number. - maps: Dict[int, Dict[int, Dict[int, int]]] = attr.Factory(dict) + maps: dict[int, dict[int, dict[int, int]]] = attr.Factory(dict) # Stores the links that have been added (with new set to true), as tuples of # `(source chain ID, source sequence no, target chain ID, target sequence no.)` - additions: Set[Tuple[int, int, int, int]] = attr.Factory(set) + additions: set[tuple[int, int, int, int]] = attr.Factory(set) def add_link( self, - src_tuple: Tuple[int, int], - target_tuple: Tuple[int, int], + src_tuple: tuple[int, int], + target_tuple: tuple[int, int], new: bool = True, ) -> bool: """Add a new link between two chains, ensuring no redundant links are added. @@ -3701,7 +3697,7 @@ class _LinkMap: current_links[src_seq] = target_seq return True - def get_additions(self) -> Generator[Tuple[int, int, int, int], None, None]: + def get_additions(self) -> Generator[tuple[int, int, int, int], None, None]: """Gets any newly added links. Yields: @@ -3715,8 +3711,8 @@ class _LinkMap: def exists_path_from( self, - src_tuple: Tuple[int, int], - target_tuple: Tuple[int, int], + src_tuple: tuple[int, int], + target_tuple: tuple[int, int], ) -> bool: """Checks if there is a path between the source chain ID/sequence and target chain ID/sequence. @@ -3728,7 +3724,7 @@ class _LinkMap: return target_seq <= src_seq # We have to graph traverse the links to check for indirect paths. - visited_chains: Dict[int, int] = collections.Counter() + visited_chains: dict[int, int] = collections.Counter() search = [(src_chain, src_seq)] while search: chain, seq = search.pop() diff --git a/synapse/storage/databases/main/events_bg_updates.py b/synapse/storage/databases/main/events_bg_updates.py index 37dd8e48d5..637b9104c0 100644 --- a/synapse/storage/databases/main/events_bg_updates.py +++ b/synapse/storage/databases/main/events_bg_updates.py @@ -20,7 +20,7 @@ # import logging -from typing import TYPE_CHECKING, Dict, List, Optional, Set, Tuple, cast +from typing import TYPE_CHECKING, Optional, cast import attr @@ -97,7 +97,7 @@ class _CalculateChainCover: # Map from room_id to last depth/stream processed for each room that we have # processed all events for (i.e. the rooms we can flip the # `has_auth_chain_index` for) - finished_room_map: Dict[str, Tuple[int, int]] + finished_room_map: dict[str, tuple[int, int]] @attr.s(slots=True, frozen=True, auto_attribs=True) @@ -451,7 +451,7 @@ class EventsBackgroundUpdatesStore(StreamWorkerStore, StateDeltasStore, SQLBaseS chunks = [event_ids[i : i + 100] for i in range(0, len(event_ids), 100)] for chunk in chunks: ev_rows = cast( - List[Tuple[str, str]], + list[tuple[str, str]], self.db_pool.simple_select_many_txn( txn, table="event_json", @@ -527,8 +527,8 @@ class EventsBackgroundUpdatesStore(StreamWorkerStore, StateDeltasStore, SQLBaseS # The set of extremity event IDs that we're checking this round original_set = set() - # A dict[str, Set[str]] of event ID to their prev events. - graph: Dict[str, Set[str]] = {} + # A dict[str, set[str]] of event ID to their prev events. + graph: dict[str, set[str]] = {} # The set of descendants of the original set that are not rejected # nor soft-failed. Ancestors of these events should be removed @@ -647,7 +647,7 @@ class EventsBackgroundUpdatesStore(StreamWorkerStore, StateDeltasStore, SQLBaseS if deleted: # We now need to invalidate the caches of these rooms rows = cast( - List[Tuple[str]], + list[tuple[str]], self.db_pool.simple_select_many_txn( txn, table="events", @@ -851,7 +851,7 @@ class EventsBackgroundUpdatesStore(StreamWorkerStore, StateDeltasStore, SQLBaseS def get_rejected_events( txn: Cursor, - ) -> List[Tuple[str, str, JsonDict, bool, bool]]: + ) -> list[tuple[str, str, JsonDict, bool, bool]]: # Fetch rejected event json, their room version and whether we have # inserted them into the state_events or auth_events tables. # @@ -883,7 +883,7 @@ class EventsBackgroundUpdatesStore(StreamWorkerStore, StateDeltasStore, SQLBaseS ) return cast( - List[Tuple[str, str, JsonDict, bool, bool]], + list[tuple[str, str, JsonDict, bool, bool]], [(row[0], row[1], db_to_json(row[2]), row[3], row[4]) for row in txn], ) @@ -1126,7 +1126,7 @@ class EventsBackgroundUpdatesStore(StreamWorkerStore, StateDeltasStore, SQLBaseS # We also need to fetch the auth events for them. auth_events = cast( - List[Tuple[str, str]], + list[tuple[str, str]], self.db_pool.simple_select_many_txn( txn, table="event_auth", @@ -1137,7 +1137,7 @@ class EventsBackgroundUpdatesStore(StreamWorkerStore, StateDeltasStore, SQLBaseS ), ) - event_to_auth_chain: Dict[str, List[str]] = {} + event_to_auth_chain: dict[str, list[str]] = {} for event_id, auth_id in auth_events: event_to_auth_chain.setdefault(event_id, []).append(auth_id) @@ -1151,7 +1151,7 @@ class EventsBackgroundUpdatesStore(StreamWorkerStore, StateDeltasStore, SQLBaseS self.event_chain_id_gen, event_to_room_id, event_to_types, - cast(Dict[str, StrCollection], event_to_auth_chain), + cast(dict[str, StrCollection], event_to_auth_chain), ) return _CalculateChainCover( @@ -1256,7 +1256,7 @@ class EventsBackgroundUpdatesStore(StreamWorkerStore, StateDeltasStore, SQLBaseS results = list(txn) # (event_id, parent_id, rel_type) for each relation - relations_to_insert: List[Tuple[str, str, str, str]] = [] + relations_to_insert: list[tuple[str, str, str, str]] = [] for event_id, event_json_raw in results: try: event_json = db_to_json(event_json_raw) @@ -1636,7 +1636,7 @@ class EventsBackgroundUpdatesStore(StreamWorkerStore, StateDeltasStore, SQLBaseS # We don't need to fetch any progress state because we just grab the next N # events in `sliding_sync_joined_rooms_to_recalculate` - def _get_rooms_to_update_txn(txn: LoggingTransaction) -> List[Tuple[str]]: + def _get_rooms_to_update_txn(txn: LoggingTransaction) -> list[tuple[str]]: """ Returns: A list of room ID's to update along with the progress value @@ -1658,7 +1658,7 @@ class EventsBackgroundUpdatesStore(StreamWorkerStore, StateDeltasStore, SQLBaseS (batch_size,), ) - rooms_to_update_rows = cast(List[Tuple[str]], txn.fetchall()) + rooms_to_update_rows = cast(list[tuple[str]], txn.fetchall()) return rooms_to_update_rows @@ -1674,9 +1674,9 @@ class EventsBackgroundUpdatesStore(StreamWorkerStore, StateDeltasStore, SQLBaseS return 0 # Map from room_id to insert/update state values in the `sliding_sync_joined_rooms` table. - joined_room_updates: Dict[str, SlidingSyncStateInsertValues] = {} + joined_room_updates: dict[str, SlidingSyncStateInsertValues] = {} # Map from room_id to stream_ordering/bump_stamp, etc values - joined_room_stream_ordering_updates: Dict[ + joined_room_stream_ordering_updates: dict[ str, _JoinedRoomStreamOrderingUpdate ] = {} # As long as we get this value before we fetch the current state, we can use it @@ -1886,8 +1886,8 @@ class EventsBackgroundUpdatesStore(StreamWorkerStore, StateDeltasStore, SQLBaseS def _find_memberships_to_update_txn( txn: LoggingTransaction, - ) -> List[ - Tuple[ + ) -> list[ + tuple[ str, Optional[str], Optional[str], @@ -1979,8 +1979,8 @@ class EventsBackgroundUpdatesStore(StreamWorkerStore, StateDeltasStore, SQLBaseS raise Exception("last_event_stream_ordering should not be None") memberships_to_update_rows = cast( - List[ - Tuple[ + list[ + tuple[ str, Optional[str], Optional[str], @@ -2023,7 +2023,7 @@ class EventsBackgroundUpdatesStore(StreamWorkerStore, StateDeltasStore, SQLBaseS def _find_previous_invite_or_knock_membership_txn( txn: LoggingTransaction, room_id: str, user_id: str, event_id: str - ) -> Optional[Tuple[str, str]]: + ) -> Optional[tuple[str, str]]: # Find the previous invite/knock event before the leave event # # Here are some notes on how we landed on this query: @@ -2085,11 +2085,11 @@ class EventsBackgroundUpdatesStore(StreamWorkerStore, StateDeltasStore, SQLBaseS return event_id, membership # Map from (room_id, user_id) to ... - to_insert_membership_snapshots: Dict[ - Tuple[str, str], SlidingSyncMembershipSnapshotSharedInsertValues + to_insert_membership_snapshots: dict[ + tuple[str, str], SlidingSyncMembershipSnapshotSharedInsertValues ] = {} - to_insert_membership_infos: Dict[ - Tuple[str, str], SlidingSyncMembershipInfoWithEventPos + to_insert_membership_infos: dict[ + tuple[str, str], SlidingSyncMembershipInfoWithEventPos ] = {} for ( room_id, @@ -2510,7 +2510,7 @@ class EventsBackgroundUpdatesStore(StreamWorkerStore, StateDeltasStore, SQLBaseS ) memberships_to_update_rows = cast( - List[Tuple[str, str, str, int, int]], + list[tuple[str, str, str, int, int]], txn.fetchall(), ) if not memberships_to_update_rows: @@ -2519,9 +2519,9 @@ class EventsBackgroundUpdatesStore(StreamWorkerStore, StateDeltasStore, SQLBaseS # Assemble the values to update # # (room_id, user_id) - key_values: List[Tuple[str, str]] = [] + key_values: list[tuple[str, str]] = [] # (forgotten,) - value_values: List[Tuple[int]] = [] + value_values: list[tuple[int]] = [] for ( room_id, user_id, @@ -2585,7 +2585,7 @@ class EventsBackgroundUpdatesStore(StreamWorkerStore, StateDeltasStore, SQLBaseS room_id_bound = progress.get("room_id", "") - def redo_max_depth_bg_update_txn(txn: LoggingTransaction) -> Tuple[bool, int]: + def redo_max_depth_bg_update_txn(txn: LoggingTransaction) -> tuple[bool, int]: txn.execute( """ SELECT room_id, room_version FROM rooms @@ -2597,7 +2597,7 @@ class EventsBackgroundUpdatesStore(StreamWorkerStore, StateDeltasStore, SQLBaseS ) # Find the next room ID to process, with a relevant room version. - room_ids: List[str] = [] + room_ids: list[str] = [] max_room_id: Optional[str] = None for room_id, room_version_str in txn: max_room_id = room_id @@ -2704,7 +2704,7 @@ def _resolve_stale_data_in_sliding_sync_joined_rooms_table( # If we have nothing written to the `sliding_sync_joined_rooms` table, there is # nothing to clean up - row = cast(Optional[Tuple[int]], txn.fetchone()) + row = cast(Optional[tuple[int]], txn.fetchone()) max_stream_ordering_sliding_sync_joined_rooms_table = None depends_on = None if row is not None: @@ -2830,7 +2830,7 @@ def _resolve_stale_data_in_sliding_sync_membership_snapshots_table( # If we have nothing written to the `sliding_sync_membership_snapshots` table, # there is nothing to clean up - row = cast(Optional[Tuple[int]], txn.fetchone()) + row = cast(Optional[tuple[int]], txn.fetchone()) max_stream_ordering_sliding_sync_membership_snapshots_table = None if row is not None: (max_stream_ordering_sliding_sync_membership_snapshots_table,) = row diff --git a/synapse/storage/databases/main/events_forward_extremities.py b/synapse/storage/databases/main/events_forward_extremities.py index bd763885d7..d43fb443fd 100644 --- a/synapse/storage/databases/main/events_forward_extremities.py +++ b/synapse/storage/databases/main/events_forward_extremities.py @@ -20,7 +20,7 @@ # import logging -from typing import List, Optional, Tuple, cast +from typing import Optional, cast from synapse.api.errors import SynapseError from synapse.storage.database import LoggingTransaction @@ -98,7 +98,7 @@ class EventForwardExtremitiesStore( async def get_forward_extremities_for_room( self, room_id: str - ) -> List[Tuple[str, int, int, Optional[int]]]: + ) -> list[tuple[str, int, int, Optional[int]]]: """ Get list of forward extremities for a room. @@ -108,7 +108,7 @@ class EventForwardExtremitiesStore( def get_forward_extremities_for_room_txn( txn: LoggingTransaction, - ) -> List[Tuple[str, int, int, Optional[int]]]: + ) -> list[tuple[str, int, int, Optional[int]]]: sql = """ SELECT event_id, state_group, depth, received_ts FROM event_forward_extremities @@ -118,7 +118,7 @@ class EventForwardExtremitiesStore( """ txn.execute(sql, (room_id,)) - return cast(List[Tuple[str, int, int, Optional[int]]], txn.fetchall()) + return cast(list[tuple[str, int, int, Optional[int]]], txn.fetchall()) return await self.db_pool.runInteraction( "get_forward_extremities_for_room", diff --git a/synapse/storage/databases/main/events_worker.py b/synapse/storage/databases/main/events_worker.py index 4f9a1a4f78..005f75a2d8 100644 --- a/synapse/storage/databases/main/events_worker.py +++ b/synapse/storage/databases/main/events_worker.py @@ -27,15 +27,11 @@ from typing import ( TYPE_CHECKING, Any, Collection, - Dict, Iterable, - List, Literal, Mapping, MutableMapping, Optional, - Set, - Tuple, cast, overload, ) @@ -191,7 +187,7 @@ class _EventRow: format_version: Optional[int] room_version_id: Optional[str] rejected_reason: Optional[str] - redactions: List[str] + redactions: list[str] outlier: bool @@ -286,7 +282,7 @@ class EventsWorkerStore(SQLBaseStore): 5 * 60 * 1000, ) - self._get_event_cache: AsyncLruCache[Tuple[str], EventCacheEntry] = ( + self._get_event_cache: AsyncLruCache[tuple[str], EventCacheEntry] = ( AsyncLruCache( clock=hs.get_clock(), server_name=self.server_name, @@ -300,8 +296,8 @@ class EventsWorkerStore(SQLBaseStore): # Map from event ID to a deferred that will result in a map from event # ID to cache entry. Note that the returned dict may not have the # requested event in it if the event isn't in the DB. - self._current_event_fetches: Dict[ - str, ObservableDeferred[Dict[str, EventCacheEntry]] + self._current_event_fetches: dict[ + str, ObservableDeferred[dict[str, EventCacheEntry]] ] = {} # We keep track of the events we have currently loaded in memory so that @@ -311,8 +307,8 @@ class EventsWorkerStore(SQLBaseStore): self._event_ref: MutableMapping[str, EventBase] = weakref.WeakValueDictionary() self._event_fetch_lock = threading.Condition() - self._event_fetch_list: List[ - Tuple[Iterable[str], "defer.Deferred[Dict[str, _EventRow]]"] + self._event_fetch_list: list[ + tuple[Iterable[str], "defer.Deferred[dict[str, _EventRow]]"] ] = [] self._event_fetch_ongoing = 0 event_fetch_ongoing_gauge.labels(**{SERVER_NAME_LABEL: self.server_name}).set( @@ -323,7 +319,7 @@ class EventsWorkerStore(SQLBaseStore): # the DataStore and PersistEventStore. def get_chain_id_txn(txn: Cursor) -> int: txn.execute("SELECT COALESCE(max(chain_id), 0) FROM event_auth_chains") - return cast(Tuple[int], txn.fetchone())[0] + return cast(tuple[int], txn.fetchone())[0] self.event_chain_id_gen = build_sequence_generator( db_conn, @@ -387,7 +383,7 @@ class EventsWorkerStore(SQLBaseStore): async def get_un_partial_stated_events_from_stream( self, instance_name: str, last_id: int, current_id: int, limit: int - ) -> Tuple[List[Tuple[int, Tuple[str, bool]]], int, bool]: + ) -> tuple[list[tuple[int, tuple[str, bool]]], int, bool]: """Get updates for the un-partial-stated events replication stream. Args: @@ -414,7 +410,7 @@ class EventsWorkerStore(SQLBaseStore): def get_un_partial_stated_events_from_stream_txn( txn: LoggingTransaction, - ) -> Tuple[List[Tuple[int, Tuple[str, bool]]], int, bool]: + ) -> tuple[list[tuple[int, tuple[str, bool]]], int, bool]: sql = """ SELECT stream_id, event_id, rejection_status_changed FROM un_partial_stated_event_stream @@ -585,7 +581,7 @@ class EventsWorkerStore(SQLBaseStore): redact_behaviour: EventRedactBehaviour = EventRedactBehaviour.redact, get_prev_content: bool = False, allow_rejected: bool = False, - ) -> Dict[str, EventBase]: + ) -> dict[str, EventBase]: """Get events from the database Unknown events will be omitted from the response. @@ -633,7 +629,7 @@ class EventsWorkerStore(SQLBaseStore): redact_behaviour: EventRedactBehaviour = EventRedactBehaviour.redact, get_prev_content: bool = False, allow_rejected: bool = False, - ) -> List[EventBase]: + ) -> list[EventBase]: """Get events from the database and return in a list in the same order as given by `event_ids` arg. @@ -792,7 +788,7 @@ class EventsWorkerStore(SQLBaseStore): self, event_ids: Collection[str], allow_rejected: bool = False, - ) -> Dict[str, EventCacheEntry]: + ) -> dict[str, EventCacheEntry]: """Fetch a bunch of events from the cache or the database. Note that the events pulled by this function will not have any redactions @@ -836,9 +832,9 @@ class EventsWorkerStore(SQLBaseStore): # avoid extraneous work (if we don't do this we can end up in a n^2 mode # when we wait on the same Deferred N times, then try and merge the # same dict into itself N times). - already_fetching_ids: Set[str] = set() - already_fetching_deferreds: Set[ - ObservableDeferred[Dict[str, EventCacheEntry]] + already_fetching_ids: set[str] = set() + already_fetching_deferreds: set[ + ObservableDeferred[dict[str, EventCacheEntry]] ] = set() for event_id in missing_events_ids: @@ -853,7 +849,7 @@ class EventsWorkerStore(SQLBaseStore): if missing_events_ids: - async def get_missing_events_from_cache_or_db() -> Dict[ + async def get_missing_events_from_cache_or_db() -> dict[ str, EventCacheEntry ]: """Fetches the events in `missing_event_ids` from the database. @@ -869,7 +865,7 @@ class EventsWorkerStore(SQLBaseStore): # to all the events we pulled from the DB (this will result in this # function returning more events than requested, but that can happen # already due to `_get_events_from_db`). - fetching_deferred: ObservableDeferred[Dict[str, EventCacheEntry]] = ( + fetching_deferred: ObservableDeferred[dict[str, EventCacheEntry]] = ( ObservableDeferred(defer.Deferred(), consumeErrors=True) ) for event_id in missing_events_ids: @@ -908,7 +904,7 @@ class EventsWorkerStore(SQLBaseStore): # We must allow the database fetch to complete in the presence of # cancellations, since multiple `_get_events_from_cache_or_db` calls can # reuse the same fetch. - missing_events: Dict[str, EventCacheEntry] = await delay_cancellation( + missing_events: dict[str, EventCacheEntry] = await delay_cancellation( get_missing_events_from_cache_or_db() ) event_entry_map.update(missing_events) @@ -999,7 +995,7 @@ class EventsWorkerStore(SQLBaseStore): async def _get_events_from_cache( self, events: Iterable[str], update_metrics: bool = True - ) -> Dict[str, EventCacheEntry]: + ) -> dict[str, EventCacheEntry]: """Fetch events from the caches, both in memory and any external. May return rejected events. @@ -1025,7 +1021,7 @@ class EventsWorkerStore(SQLBaseStore): @trace async def _get_events_from_external_cache( self, events: Collection[str], update_metrics: bool = True - ) -> Dict[str, EventCacheEntry]: + ) -> dict[str, EventCacheEntry]: """Fetch events from any configured external cache. May return rejected events. @@ -1051,7 +1047,7 @@ class EventsWorkerStore(SQLBaseStore): def _get_events_from_local_cache( self, events: Iterable[str], update_metrics: bool = True - ) -> Dict[str, EventCacheEntry]: + ) -> dict[str, EventCacheEntry]: """Fetch events from the local, in memory, caches. May return rejected events. @@ -1095,7 +1091,7 @@ class EventsWorkerStore(SQLBaseStore): context: EventContext, state_keys_to_include: StateFilter, membership_user_id: Optional[str] = None, - ) -> List[JsonDict]: + ) -> list[JsonDict]: """ Retrieve the stripped state from a room, given an event context to retrieve state from as well as the state types to include. Optionally, include the membership @@ -1257,7 +1253,7 @@ class EventsWorkerStore(SQLBaseStore): def _fetch_event_list( self, conn: LoggingDatabaseConnection, - event_list: List[Tuple[Iterable[str], "defer.Deferred[Dict[str, _EventRow]]"]], + event_list: list[tuple[Iterable[str], "defer.Deferred[dict[str, _EventRow]]"]], ) -> None: """Handle a load of requests from the _event_fetch_list queue @@ -1312,7 +1308,7 @@ class EventsWorkerStore(SQLBaseStore): @trace async def _get_events_from_db( self, event_ids: Collection[str] - ) -> Dict[str, EventCacheEntry]: + ) -> dict[str, EventCacheEntry]: """Fetch a bunch of events from the database. May return rejected events. @@ -1333,8 +1329,8 @@ class EventsWorkerStore(SQLBaseStore): str(len(event_ids)), ) - fetched_event_ids: Set[str] = set() - fetched_events: Dict[str, _EventRow] = {} + fetched_event_ids: set[str] = set() + fetched_events: dict[str, _EventRow] = {} @trace async def _fetch_event_ids_and_get_outstanding_redactions( @@ -1351,7 +1347,7 @@ class EventsWorkerStore(SQLBaseStore): row_map = await self._enqueue_events(event_ids_to_fetch) # we need to recursively fetch any redactions of those events - redaction_ids: Set[str] = set() + redaction_ids: set[str] = set() for event_id in event_ids_to_fetch: row = row_map.get(event_id) fetched_event_ids.add(event_id) @@ -1378,7 +1374,7 @@ class EventsWorkerStore(SQLBaseStore): ) # build a map from event_id to EventBase - event_map: Dict[str, EventBase] = {} + event_map: dict[str, EventBase] = {} for event_id, row in fetched_events.items(): assert row.event_id == event_id @@ -1491,7 +1487,7 @@ class EventsWorkerStore(SQLBaseStore): # finally, we can decide whether each one needs redacting, and build # the cache entries. - result_map: Dict[str, EventCacheEntry] = {} + result_map: dict[str, EventCacheEntry] = {} for event_id, original_ev in event_map.items(): redactions = fetched_events[event_id].redactions redacted_event = self._maybe_redact_event_row( @@ -1511,7 +1507,7 @@ class EventsWorkerStore(SQLBaseStore): return result_map - async def _enqueue_events(self, events: Collection[str]) -> Dict[str, _EventRow]: + async def _enqueue_events(self, events: Collection[str]) -> dict[str, _EventRow]: """Fetches events from the database using the _event_fetch_list. This allows batch and bulk fetching of events - it allows us to fetch events without having to create a new transaction for each request for events. @@ -1524,7 +1520,7 @@ class EventsWorkerStore(SQLBaseStore): that weren't requested. """ - events_d: "defer.Deferred[Dict[str, _EventRow]]" = defer.Deferred() + events_d: "defer.Deferred[dict[str, _EventRow]]" = defer.Deferred() with self._event_fetch_lock: self._event_fetch_list.append((events, events_d)) self._event_fetch_lock.notify() @@ -1540,7 +1536,7 @@ class EventsWorkerStore(SQLBaseStore): def _fetch_event_rows( self, txn: LoggingTransaction, event_ids: Iterable[str] - ) -> Dict[str, _EventRow]: + ) -> dict[str, _EventRow]: """Fetch event rows from the database Events which are not found are omitted from the result. @@ -1607,7 +1603,7 @@ class EventsWorkerStore(SQLBaseStore): # check for MSC4932 redactions to_check = [] - events: List[_EventRow] = [] + events: list[_EventRow] = [] for e in evs: event = event_dict.get(e) if not event: @@ -1656,7 +1652,7 @@ class EventsWorkerStore(SQLBaseStore): self, original_ev: EventBase, redactions: Iterable[str], - event_map: Dict[str, EventBase], + event_map: dict[str, EventBase], ) -> Optional[EventBase]: """Given an event object and a list of possible redacting event ids, determine whether to honour any of those redactions and if so return a redacted @@ -1727,12 +1723,12 @@ class EventsWorkerStore(SQLBaseStore): # no valid redaction found for this event return None - async def have_events_in_timeline(self, event_ids: Iterable[str]) -> Set[str]: + async def have_events_in_timeline(self, event_ids: Iterable[str]) -> set[str]: """Given a list of event ids, check if we have already processed and stored them as non outliers. """ rows = cast( - List[Tuple[str]], + list[tuple[str]], await self.db_pool.simple_select_many_batch( table="events", retcols=("event_id",), @@ -1749,7 +1745,7 @@ class EventsWorkerStore(SQLBaseStore): @tag_args async def have_seen_events( self, room_id: str, event_ids: Iterable[str] - ) -> Set[str]: + ) -> set[str]: """Given a list of event ids, check if we have already processed them. The room_id is only used to structure the cache (so that it can later be @@ -1768,7 +1764,7 @@ class EventsWorkerStore(SQLBaseStore): # we break it down. However, each batch requires its own index scan, so we make # the batches as big as possible. - results: Set[str] = set() + results: set[str] = set() for event_ids_chunk in batch_iter(event_ids, 500): events_seen_dict = await self._have_seen_events_dict( room_id, event_ids_chunk @@ -1798,7 +1794,7 @@ class EventsWorkerStore(SQLBaseStore): # not being invalidated when purging events from a room. The optimisation can # be re-added after https://github.com/matrix-org/synapse/issues/13476 - def have_seen_events_txn(txn: LoggingTransaction) -> Dict[str, bool]: + def have_seen_events_txn(txn: LoggingTransaction) -> dict[str, bool]: # we deliberately do *not* query the database for room_id, to make the # query an index-only lookup on `events_event_id_key`. # @@ -1850,7 +1846,7 @@ class EventsWorkerStore(SQLBaseStore): room_id, ) - async def get_room_complexity(self, room_id: str) -> Dict[str, float]: + async def get_room_complexity(self, room_id: str) -> dict[str, float]: """ Get a rough approximation of the complexity of the room. This is used by remote servers to decide whether they wish to join the room or not. @@ -1873,7 +1869,7 @@ class EventsWorkerStore(SQLBaseStore): async def get_all_new_forward_event_rows( self, instance_name: str, last_id: int, current_id: int, limit: int - ) -> List[Tuple[int, str, str, str, str, str, str, str, bool, bool]]: + ) -> list[tuple[int, str, str, str, str, str, str, str, bool, bool]]: """Returns new events, for the Events replication stream Args: @@ -1889,7 +1885,7 @@ class EventsWorkerStore(SQLBaseStore): def get_all_new_forward_event_rows( txn: LoggingTransaction, - ) -> List[Tuple[int, str, str, str, str, str, str, str, bool, bool]]: + ) -> list[tuple[int, str, str, str, str, str, str, str, bool, bool]]: sql = ( "SELECT e.stream_ordering, e.event_id, e.room_id, e.type," " se.state_key, redacts, relates_to_id, membership, rejections.reason IS NOT NULL," @@ -1907,7 +1903,7 @@ class EventsWorkerStore(SQLBaseStore): ) txn.execute(sql, (last_id, current_id, instance_name, limit)) return cast( - List[Tuple[int, str, str, str, str, str, str, str, bool, bool]], + list[tuple[int, str, str, str, str, str, str, str, bool, bool]], txn.fetchall(), ) @@ -1917,7 +1913,7 @@ class EventsWorkerStore(SQLBaseStore): async def get_ex_outlier_stream_rows( self, instance_name: str, last_id: int, current_id: int - ) -> List[Tuple[int, str, str, str, str, str, str, str, bool, bool]]: + ) -> list[tuple[int, str, str, str, str, str, str, str, bool, bool]]: """Returns de-outliered events, for the Events replication stream Args: @@ -1932,7 +1928,7 @@ class EventsWorkerStore(SQLBaseStore): def get_ex_outlier_stream_rows_txn( txn: LoggingTransaction, - ) -> List[Tuple[int, str, str, str, str, str, str, str, bool, bool]]: + ) -> list[tuple[int, str, str, str, str, str, str, str, bool, bool]]: sql = ( "SELECT out.event_stream_ordering, e.event_id, e.room_id, e.type," " se.state_key, redacts, relates_to_id, membership, rejections.reason IS NOT NULL," @@ -1954,7 +1950,7 @@ class EventsWorkerStore(SQLBaseStore): txn.execute(sql, (last_id, current_id, instance_name)) return cast( - List[Tuple[int, str, str, str, str, str, str, str, bool, bool]], + list[tuple[int, str, str, str, str, str, str, str, bool, bool]], txn.fetchall(), ) @@ -1964,7 +1960,7 @@ class EventsWorkerStore(SQLBaseStore): async def get_all_new_backfill_event_rows( self, instance_name: str, last_id: int, current_id: int, limit: int - ) -> Tuple[List[Tuple[int, Tuple[str, str, str, str, str, str]]], int, bool]: + ) -> tuple[list[tuple[int, tuple[str, str, str, str, str, str]]], int, bool]: """Get updates for backfill replication stream, including all new backfilled events and events that have gone from being outliers to not. @@ -1994,7 +1990,7 @@ class EventsWorkerStore(SQLBaseStore): def get_all_new_backfill_event_rows( txn: LoggingTransaction, - ) -> Tuple[List[Tuple[int, Tuple[str, str, str, str, str, str]]], int, bool]: + ) -> tuple[list[tuple[int, tuple[str, str, str, str, str, str]]], int, bool]: sql = ( "SELECT -e.stream_ordering, e.event_id, e.room_id, e.type," " se.state_key, redacts, relates_to_id" @@ -2008,10 +2004,10 @@ class EventsWorkerStore(SQLBaseStore): " LIMIT ?" ) txn.execute(sql, (-last_id, -current_id, instance_name, limit)) - new_event_updates: List[ - Tuple[int, Tuple[str, str, str, str, str, str]] + new_event_updates: list[ + tuple[int, tuple[str, str, str, str, str, str]] ] = [] - row: Tuple[int, str, str, str, str, str, str] + row: tuple[int, str, str, str, str, str, str] # Type safety: iterating over `txn` yields `Tuple`, i.e. # `Tuple[Any, ...]` of arbitrary length. Mypy detects assigning a # variadic tuple to a fixed length tuple and flags it up as an error. @@ -2057,7 +2053,7 @@ class EventsWorkerStore(SQLBaseStore): async def get_all_updated_current_state_deltas( self, instance_name: str, from_token: int, to_token: int, target_row_count: int - ) -> Tuple[List[Tuple[int, str, str, str, str]], int, bool]: + ) -> tuple[list[tuple[int, str, str, str, str]], int, bool]: """Fetch updates from current_state_delta_stream Args: @@ -2079,7 +2075,7 @@ class EventsWorkerStore(SQLBaseStore): def get_all_updated_current_state_deltas_txn( txn: LoggingTransaction, - ) -> List[Tuple[int, str, str, str, str]]: + ) -> list[tuple[int, str, str, str, str]]: sql = """ SELECT stream_id, room_id, type, state_key, event_id FROM current_state_delta_stream @@ -2088,23 +2084,23 @@ class EventsWorkerStore(SQLBaseStore): ORDER BY stream_id ASC LIMIT ? """ txn.execute(sql, (from_token, to_token, instance_name, target_row_count)) - return cast(List[Tuple[int, str, str, str, str]], txn.fetchall()) + return cast(list[tuple[int, str, str, str, str]], txn.fetchall()) def get_deltas_for_stream_id_txn( txn: LoggingTransaction, stream_id: int - ) -> List[Tuple[int, str, str, str, str]]: + ) -> list[tuple[int, str, str, str, str]]: sql = """ SELECT stream_id, room_id, type, state_key, event_id FROM current_state_delta_stream WHERE stream_id = ? """ txn.execute(sql, [stream_id]) - return cast(List[Tuple[int, str, str, str, str]], txn.fetchall()) + return cast(list[tuple[int, str, str, str, str]], txn.fetchall()) # we need to make sure that, for every stream id in the results, we get *all* # the rows with that stream id. - rows: List[Tuple[int, str, str, str, str]] = await self.db_pool.runInteraction( + rows: list[tuple[int, str, str, str, str]] = await self.db_pool.runInteraction( "get_all_updated_current_state_deltas", get_all_updated_current_state_deltas_txn, ) @@ -2135,7 +2131,7 @@ class EventsWorkerStore(SQLBaseStore): async def get_senders_for_event_ids( self, event_ids: Collection[str] - ) -> Dict[str, Optional[str]]: + ) -> dict[str, Optional[str]]: """ Given a sequence of event IDs, return the sender associated with each. @@ -2151,7 +2147,7 @@ class EventsWorkerStore(SQLBaseStore): def _get_senders_for_event_ids( txn: LoggingTransaction, - ) -> Dict[str, Optional[str]]: + ) -> dict[str, Optional[str]]: rows = self.db_pool.simple_select_many_txn( txn=txn, table="events", @@ -2167,7 +2163,7 @@ class EventsWorkerStore(SQLBaseStore): ) @cached(max_entries=5000) - async def get_event_ordering(self, event_id: str, room_id: str) -> Tuple[int, int]: + async def get_event_ordering(self, event_id: str, room_id: str) -> tuple[int, int]: res = await self.db_pool.simple_select_one( table="events", retcols=["topological_ordering", "stream_ordering"], @@ -2182,7 +2178,7 @@ class EventsWorkerStore(SQLBaseStore): return int(res[0]), int(res[1]) - async def get_next_event_to_expire(self) -> Optional[Tuple[str, int]]: + async def get_next_event_to_expire(self) -> Optional[tuple[str, int]]: """Retrieve the entry with the lowest expiry timestamp in the event_expiry table, or None if there's no more event to expire. @@ -2194,7 +2190,7 @@ class EventsWorkerStore(SQLBaseStore): def get_next_event_to_expire_txn( txn: LoggingTransaction, - ) -> Optional[Tuple[str, int]]: + ) -> Optional[tuple[str, int]]: txn.execute( """ SELECT event_id, expiry_ts FROM event_expiry @@ -2202,7 +2198,7 @@ class EventsWorkerStore(SQLBaseStore): """ ) - return cast(Optional[Tuple[str, int]], txn.fetchone()) + return cast(Optional[tuple[str, int]], txn.fetchone()) return await self.db_pool.runInteraction( desc="get_next_event_to_expire", func=get_next_event_to_expire_txn @@ -2229,7 +2225,7 @@ class EventsWorkerStore(SQLBaseStore): async def get_already_persisted_events( self, events: Iterable[EventBase] - ) -> Dict[str, str]: + ) -> dict[str, str]: """Look up if we have already persisted an event for the transaction ID, returning a mapping from event ID in the given list to the event ID of an existing event. @@ -2239,7 +2235,7 @@ class EventsWorkerStore(SQLBaseStore): """ mapping = {} - txn_id_to_event: Dict[Tuple[str, str, str, str], str] = {} + txn_id_to_event: dict[tuple[str, str, str, str], str] = {} for event in events: device_id = getattr(event.internal_metadata, "device_id", None) @@ -2516,7 +2512,7 @@ class EventsWorkerStore(SQLBaseStore): any of the events which are unknown (or are outliers). """ result = cast( - List[Tuple[str]], + list[tuple[str]], await self.db_pool.simple_select_many_batch( table="partial_state_events", column="event_id", @@ -2541,7 +2537,7 @@ class EventsWorkerStore(SQLBaseStore): ) return result is not None - async def get_partial_state_events_batch(self, room_id: str) -> List[str]: + async def get_partial_state_events_batch(self, room_id: str) -> list[str]: """ Get a list of events in the given room that: - have partial state; and @@ -2560,7 +2556,7 @@ class EventsWorkerStore(SQLBaseStore): @staticmethod def _get_partial_state_events_batch_txn( txn: LoggingTransaction, room_id: str - ) -> List[str]: + ) -> list[str]: # we want to work through the events from oldest to newest, so # we only want events whose prev_events do *not* have partial state - hence # the 'NOT EXISTS' clause in the below. @@ -2644,8 +2640,8 @@ class EventsWorkerStore(SQLBaseStore): self.invalidate_get_event_cache_after_txn(txn, event_id) async def get_events_sent_by_user_in_room( - self, user_id: str, room_id: str, limit: int, filter: Optional[List[str]] = None - ) -> Optional[List[str]]: + self, user_id: str, room_id: str, limit: int, filter: Optional[list[str]] = None + ) -> Optional[list[str]]: """ Get a list of event ids of events sent by the user in the specified room @@ -2660,10 +2656,10 @@ class EventsWorkerStore(SQLBaseStore): txn: LoggingTransaction, user_id: str, room_id: str, - filter: Optional[List[str]], + filter: Optional[list[str]], batch_size: int, offset: int, - ) -> Tuple[Optional[List[str]], int]: + ) -> tuple[Optional[list[str]], int]: if filter: base_clause, args = make_in_list_sql_clause( txn.database_engine, "type", filter @@ -2696,7 +2692,7 @@ class EventsWorkerStore(SQLBaseStore): if batch_size > limit: batch_size = limit - selected_ids: List[str] = [] + selected_ids: list[str] = [] while offset < limit: res, offset = await self.db_pool.runInteraction( "get_events_by_user", diff --git a/synapse/storage/databases/main/experimental_features.py b/synapse/storage/databases/main/experimental_features.py index d980c57fa8..77b6c36884 100644 --- a/synapse/storage/databases/main/experimental_features.py +++ b/synapse/storage/databases/main/experimental_features.py @@ -19,7 +19,7 @@ # # -from typing import TYPE_CHECKING, Dict, FrozenSet, List, Tuple, cast +from typing import TYPE_CHECKING, cast from synapse.storage.database import ( DatabasePool, @@ -44,7 +44,7 @@ class ExperimentalFeaturesStore(CacheInvalidationWorkerStore): super().__init__(database, db_conn, hs) @cached() - async def list_enabled_features(self, user_id: str) -> FrozenSet[str]: + async def list_enabled_features(self, user_id: str) -> frozenset[str]: """ Checks to see what features are enabled for a given user Args: @@ -54,7 +54,7 @@ class ExperimentalFeaturesStore(CacheInvalidationWorkerStore): the features currently enabled for the user """ enabled = cast( - List[Tuple[str]], + list[tuple[str]], await self.db_pool.simple_select_list( table="per_user_experimental_features", keyvalues={"user_id": user_id, "enabled": True}, @@ -67,7 +67,7 @@ class ExperimentalFeaturesStore(CacheInvalidationWorkerStore): async def set_features_for_user( self, user: str, - features: Dict["ExperimentalFeature", bool], + features: dict["ExperimentalFeature", bool], ) -> None: """ Enables or disables features for a given user diff --git a/synapse/storage/databases/main/filtering.py b/synapse/storage/databases/main/filtering.py index af9634bad4..4b3bc69d20 100644 --- a/synapse/storage/databases/main/filtering.py +++ b/synapse/storage/databases/main/filtering.py @@ -20,7 +20,7 @@ # # -from typing import TYPE_CHECKING, Optional, Tuple, Union, cast +from typing import TYPE_CHECKING, Optional, Union, cast from canonicaljson import encode_canonical_json @@ -187,7 +187,7 @@ class FilteringWorkerStore(SQLBaseStore): sql = "SELECT MAX(filter_id) FROM user_filters WHERE full_user_id = ?" txn.execute(sql, (user_id.to_string(),)) - max_id = cast(Tuple[Optional[int]], txn.fetchone())[0] + max_id = cast(tuple[Optional[int]], txn.fetchone())[0] if max_id is None: filter_id = 0 else: diff --git a/synapse/storage/databases/main/keys.py b/synapse/storage/databases/main/keys.py index 2a99a97dd6..9833565095 100644 --- a/synapse/storage/databases/main/keys.py +++ b/synapse/storage/databases/main/keys.py @@ -22,7 +22,7 @@ import itertools import json import logging -from typing import Dict, Iterable, List, Mapping, Optional, Tuple, Union, cast +from typing import Iterable, Mapping, Optional, Union, cast from canonicaljson import encode_canonical_json from signedjson.key import decode_verify_key_bytes @@ -50,7 +50,7 @@ class KeyStore(CacheInvalidationWorkerStore): server_name: str, from_server: str, ts_added_ms: int, - verify_keys: Dict[str, FetchKeyResult], + verify_keys: dict[str, FetchKeyResult], response_json: JsonDict, ) -> None: """Stores the keys for the given server that we got from `from_server`. @@ -130,7 +130,7 @@ class KeyStore(CacheInvalidationWorkerStore): @cached() def _get_server_keys_json( - self, server_name_and_key_id: Tuple[str, str] + self, server_name_and_key_id: tuple[str, str] ) -> FetchKeyResult: raise NotImplementedError() @@ -138,8 +138,8 @@ class KeyStore(CacheInvalidationWorkerStore): cached_method_name="_get_server_keys_json", list_name="server_name_and_key_ids" ) async def get_server_keys_json( - self, server_name_and_key_ids: Iterable[Tuple[str, str]] - ) -> Mapping[Tuple[str, str], FetchKeyResult]: + self, server_name_and_key_ids: Iterable[tuple[str, str]] + ) -> Mapping[tuple[str, str], FetchKeyResult]: """ Args: server_name_and_key_ids: @@ -151,7 +151,7 @@ class KeyStore(CacheInvalidationWorkerStore): """ keys = {} - def _get_keys(txn: Cursor, batch: Tuple[Tuple[str, str], ...]) -> None: + def _get_keys(txn: Cursor, batch: tuple[tuple[str, str], ...]) -> None: """Processes a batch of keys to fetch, and adds the result to `keys`.""" # batch_iter always returns tuples so it's safe to do len(batch) @@ -189,7 +189,7 @@ class KeyStore(CacheInvalidationWorkerStore): valid_until_ts=ts_valid_until_ms, ) - def _txn(txn: Cursor) -> Dict[Tuple[str, str], FetchKeyResult]: + def _txn(txn: Cursor) -> dict[tuple[str, str], FetchKeyResult]: for batch in batch_iter(server_name_and_key_ids, 50): _get_keys(txn, batch) return keys @@ -215,7 +215,7 @@ class KeyStore(CacheInvalidationWorkerStore): If we have multiple entries for a given key ID, returns the most recent. """ rows = cast( - List[Tuple[str, str, int, int, Union[bytes, memoryview]]], + list[tuple[str, str, int, int, Union[bytes, memoryview]]], await self.db_pool.simple_select_many_batch( table="server_keys_json", column="key_id", @@ -252,13 +252,13 @@ class KeyStore(CacheInvalidationWorkerStore): async def get_all_server_keys_json_for_remote( self, server_name: str, - ) -> Dict[str, FetchKeyResultForRemote]: + ) -> dict[str, FetchKeyResultForRemote]: """Fetch the cached keys for the given server. If we have multiple entries for a given key ID, returns the most recent. """ rows = cast( - List[Tuple[str, str, int, int, Union[bytes, memoryview]]], + list[tuple[str, str, int, int, Union[bytes, memoryview]]], await self.db_pool.simple_select_list( table="server_keys_json", keyvalues={"server_name": server_name}, diff --git a/synapse/storage/databases/main/lock.py b/synapse/storage/databases/main/lock.py index e2b15eaf6a..9dd2cae344 100644 --- a/synapse/storage/databases/main/lock.py +++ b/synapse/storage/databases/main/lock.py @@ -21,7 +21,7 @@ import logging from contextlib import AsyncExitStack from types import TracebackType -from typing import TYPE_CHECKING, Collection, Optional, Set, Tuple, Type +from typing import TYPE_CHECKING, Collection, Optional from weakref import WeakValueDictionary from twisted.internet import defer @@ -82,7 +82,7 @@ class LockStore(SQLBaseStore): # A map from `(lock_name, lock_key)` to lock that we think we # currently hold. - self._live_lock_tokens: WeakValueDictionary[Tuple[str, str], Lock] = ( + self._live_lock_tokens: WeakValueDictionary[tuple[str, str], Lock] = ( WeakValueDictionary() ) @@ -91,7 +91,7 @@ class LockStore(SQLBaseStore): # multiple read locks at a time but only one write lock (no mixing read # and write locks at the same time). self._live_read_write_lock_tokens: WeakValueDictionary[ - Tuple[str, str, str], Lock + tuple[str, str, str], Lock ] = WeakValueDictionary() # When we shut down we want to remove the locks. Technically this can @@ -104,7 +104,7 @@ class LockStore(SQLBaseStore): shutdown_func=self._on_shutdown, ) - self._acquiring_locks: Set[Tuple[str, str]] = set() + self._acquiring_locks: set[tuple[str, str]] = set() self.clock.looping_call( self._reap_stale_read_write_locks, _LOCK_TIMEOUT_MS / 10.0 @@ -288,7 +288,7 @@ class LockStore(SQLBaseStore): async def try_acquire_multi_read_write_lock( self, - lock_names: Collection[Tuple[str, str]], + lock_names: Collection[tuple[str, str]], write: bool, ) -> Optional[AsyncExitStack]: """Try to acquire multiple locks for the given names/keys. Will return @@ -318,7 +318,7 @@ class LockStore(SQLBaseStore): def _try_acquire_multi_read_write_lock_txn( self, txn: LoggingTransaction, - lock_names: Collection[Tuple[str, str]], + lock_names: Collection[tuple[str, str]], write: bool, ) -> Collection["Lock"]: locks = [] @@ -497,7 +497,7 @@ class Lock: async def __aexit__( self, - _exctype: Optional[Type[BaseException]], + _exctype: Optional[type[BaseException]], _excinst: Optional[BaseException], _exctb: Optional[TracebackType], ) -> bool: diff --git a/synapse/storage/databases/main/media_repository.py b/synapse/storage/databases/main/media_repository.py index b8bd0042d7..b9f882662e 100644 --- a/synapse/storage/databases/main/media_repository.py +++ b/synapse/storage/databases/main/media_repository.py @@ -25,9 +25,7 @@ from typing import ( TYPE_CHECKING, Collection, Iterable, - List, Optional, - Tuple, Union, cast, ) @@ -275,7 +273,7 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore): user_id: str, order_by: str = MediaSortOrder.CREATED_TS.value, direction: Direction = Direction.FORWARDS, - ) -> Tuple[List[LocalMedia], int]: + ) -> tuple[list[LocalMedia], int]: """Get a paginated list of metadata for a local piece of media which an user_id has uploaded @@ -292,7 +290,7 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore): def get_local_media_by_user_paginate_txn( txn: LoggingTransaction, - ) -> Tuple[List[LocalMedia], int]: + ) -> tuple[list[LocalMedia], int]: # Set ordering order_by_column = MediaSortOrder(order_by).value @@ -301,14 +299,14 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore): else: order = "ASC" - args: List[Union[str, int]] = [user_id] + args: list[Union[str, int]] = [user_id] sql = """ SELECT COUNT(*) as total_media FROM local_media_repository WHERE user_id = ? """ txn.execute(sql, args) - count = cast(Tuple[int], txn.fetchone())[0] + count = cast(tuple[int], txn.fetchone())[0] sql = """ SELECT @@ -365,7 +363,7 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore): keep_profiles: bool, include_quarantined_media: bool, include_protected_media: bool, - ) -> List[str]: + ) -> list[str]: """ Retrieve a list of media IDs from the local media store. @@ -437,7 +435,7 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore): AND NOT safe_from_quarantine """ - def _get_local_media_ids_txn(txn: LoggingTransaction) -> List[str]: + def _get_local_media_ids_txn(txn: LoggingTransaction) -> list[str]: txn.execute(sql, (before_ts, before_ts, size_gt)) return [row[0] for row in txn] @@ -544,7 +542,7 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore): desc="mark_local_media_as_safe", ) - async def count_pending_media(self, user_id: UserID) -> Tuple[int, int]: + async def count_pending_media(self, user_id: UserID) -> tuple[int, int]: """Count the number of pending media for a user. Returns: @@ -552,7 +550,7 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore): expiration timestamp. """ - def get_pending_media_txn(txn: LoggingTransaction) -> Tuple[int, int]: + def get_pending_media_txn(txn: LoggingTransaction) -> tuple[int, int]: sql = """ SELECT COUNT(*), MIN(created_ts) FROM local_media_repository @@ -637,9 +635,9 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore): desc="store_url_cache", ) - async def get_local_media_thumbnails(self, media_id: str) -> List[ThumbnailInfo]: + async def get_local_media_thumbnails(self, media_id: str) -> list[ThumbnailInfo]: rows = cast( - List[Tuple[int, int, str, str, int]], + list[tuple[int, int, str, str, int]], await self.db_pool.simple_select_list( "local_media_repository_thumbnails", {"media_id": media_id}, @@ -755,7 +753,7 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore): async def update_cached_last_access_time( self, local_media: Iterable[str], - remote_media: Iterable[Tuple[str, str]], + remote_media: Iterable[tuple[str, str]], time_ms: int, ) -> None: """Updates the last access time of the given media @@ -793,9 +791,9 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore): async def get_remote_media_thumbnails( self, origin: str, media_id: str - ) -> List[ThumbnailInfo]: + ) -> list[ThumbnailInfo]: rows = cast( - List[Tuple[int, int, str, str, int]], + list[tuple[int, int, str, str, int]], await self.db_pool.simple_select_list( "remote_media_cache_thumbnails", {"media_origin": origin, "media_id": media_id}, @@ -881,7 +879,7 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore): async def get_remote_media_ids( self, before_ts: int, include_quarantined_media: bool - ) -> List[Tuple[str, str, str]]: + ) -> list[tuple[str, str, str]]: """ Retrieve a list of server name, media ID tuples from the remote media cache. @@ -911,7 +909,7 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore): """ return cast( - List[Tuple[str, str, str]], + list[tuple[str, str, str]], await self.db_pool.execute("get_remote_media_ids", sql, before_ts), ) @@ -932,7 +930,7 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore): "delete_remote_media", delete_remote_media_txn ) - async def get_expired_url_cache(self, now_ts: int) -> List[str]: + async def get_expired_url_cache(self, now_ts: int) -> list[str]: sql = ( "SELECT media_id FROM local_media_repository_url_cache" " WHERE expires_ts < ?" @@ -940,7 +938,7 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore): " LIMIT 500" ) - def _get_expired_url_cache_txn(txn: LoggingTransaction) -> List[str]: + def _get_expired_url_cache_txn(txn: LoggingTransaction) -> list[str]: txn.execute(sql, (now_ts,)) return [row[0] for row in txn] @@ -959,7 +957,7 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore): await self.db_pool.runInteraction("delete_url_cache", _delete_url_cache_txn) - async def get_url_cache_media_before(self, before_ts: int) -> List[str]: + async def get_url_cache_media_before(self, before_ts: int) -> list[str]: sql = ( "SELECT media_id FROM local_media_repository" " WHERE created_ts < ? AND url_cache IS NOT NULL" @@ -967,7 +965,7 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore): " LIMIT 500" ) - def _get_url_cache_media_before_txn(txn: LoggingTransaction) -> List[str]: + def _get_url_cache_media_before_txn(txn: LoggingTransaction) -> list[str]: txn.execute(sql, (before_ts,)) return [row[0] for row in txn] diff --git a/synapse/storage/databases/main/metrics.py b/synapse/storage/databases/main/metrics.py index 49411ed034..dc8e2c1616 100644 --- a/synapse/storage/databases/main/metrics.py +++ b/synapse/storage/databases/main/metrics.py @@ -21,7 +21,7 @@ import calendar import logging import time -from typing import TYPE_CHECKING, Dict, List, Tuple, cast +from typing import TYPE_CHECKING, cast from synapse.metrics import SERVER_NAME_LABEL, GaugeBucketCollector from synapse.metrics.background_process_metrics import wrap_as_background_process @@ -85,7 +85,7 @@ class ServerMetricsStore(EventPushActionsWorkerStore, SQLBaseStore): @wrap_as_background_process("read_forward_extremities") async def _read_forward_extremities(self) -> None: - def fetch(txn: LoggingTransaction) -> List[Tuple[int, int]]: + def fetch(txn: LoggingTransaction) -> list[tuple[int, int]]: txn.execute( """ SELECT t1.c, t2.c @@ -98,7 +98,7 @@ class ServerMetricsStore(EventPushActionsWorkerStore, SQLBaseStore): ) t2 ON t1.room_id = t2.room_id """ ) - return cast(List[Tuple[int, int]], txn.fetchall()) + return cast(list[tuple[int, int]], txn.fetchall()) res = await self.db_pool.runInteraction("read_forward_extremities", fetch) @@ -125,7 +125,7 @@ class ServerMetricsStore(EventPushActionsWorkerStore, SQLBaseStore): AND stream_ordering > ? """ txn.execute(sql, (self.stream_ordering_day_ago,)) - (count,) = cast(Tuple[int], txn.fetchone()) + (count,) = cast(tuple[int], txn.fetchone()) return count return await self.db_pool.runInteraction("count_e2ee_messages", _count_messages) @@ -144,7 +144,7 @@ class ServerMetricsStore(EventPushActionsWorkerStore, SQLBaseStore): """ txn.execute(sql, (like_clause, self.stream_ordering_day_ago)) - (count,) = cast(Tuple[int], txn.fetchone()) + (count,) = cast(tuple[int], txn.fetchone()) return count return await self.db_pool.runInteraction( @@ -159,7 +159,7 @@ class ServerMetricsStore(EventPushActionsWorkerStore, SQLBaseStore): AND stream_ordering > ? """ txn.execute(sql, (self.stream_ordering_day_ago,)) - (count,) = cast(Tuple[int], txn.fetchone()) + (count,) = cast(tuple[int], txn.fetchone()) return count return await self.db_pool.runInteraction( @@ -181,7 +181,7 @@ class ServerMetricsStore(EventPushActionsWorkerStore, SQLBaseStore): AND stream_ordering > ? """ txn.execute(sql, (self.stream_ordering_day_ago,)) - (count,) = cast(Tuple[int], txn.fetchone()) + (count,) = cast(tuple[int], txn.fetchone()) return count return await self.db_pool.runInteraction("count_messages", _count_messages) @@ -200,7 +200,7 @@ class ServerMetricsStore(EventPushActionsWorkerStore, SQLBaseStore): """ txn.execute(sql, (like_clause, self.stream_ordering_day_ago)) - (count,) = cast(Tuple[int], txn.fetchone()) + (count,) = cast(tuple[int], txn.fetchone()) return count return await self.db_pool.runInteraction( @@ -215,7 +215,7 @@ class ServerMetricsStore(EventPushActionsWorkerStore, SQLBaseStore): AND stream_ordering > ? """ txn.execute(sql, (self.stream_ordering_day_ago,)) - (count,) = cast(Tuple[int], txn.fetchone()) + (count,) = cast(tuple[int], txn.fetchone()) return count return await self.db_pool.runInteraction("count_daily_active_rooms", _count) @@ -256,10 +256,10 @@ class ServerMetricsStore(EventPushActionsWorkerStore, SQLBaseStore): # Mypy knows that fetchone() might return None if there are no rows. # We know better: "SELECT COUNT(...) FROM ..." without any GROUP BY always # returns exactly one row. - (count,) = cast(Tuple[int], txn.fetchone()) + (count,) = cast(tuple[int], txn.fetchone()) return count - async def count_r30v2_users(self) -> Dict[str, int]: + async def count_r30v2_users(self) -> dict[str, int]: """ Counts the number of 30 day retained users, defined as users that: - Appear more than once in the past 60 days @@ -279,7 +279,7 @@ class ServerMetricsStore(EventPushActionsWorkerStore, SQLBaseStore): - "web" (any web application -- it's not possible to distinguish Element Web here) """ - def _count_r30v2_users(txn: LoggingTransaction) -> Dict[str, int]: + def _count_r30v2_users(txn: LoggingTransaction) -> dict[str, int]: thirty_days_in_secs = 86400 * 30 now = int(self.clock.time()) sixty_days_ago_in_secs = now - 2 * thirty_days_in_secs @@ -376,7 +376,7 @@ class ServerMetricsStore(EventPushActionsWorkerStore, SQLBaseStore): thirty_days_in_secs * 1000, ), ) - (count,) = cast(Tuple[int], txn.fetchone()) + (count,) = cast(tuple[int], txn.fetchone()) results["all"] = count return results diff --git a/synapse/storage/databases/main/monthly_active_users.py b/synapse/storage/databases/main/monthly_active_users.py index 86744f616c..bf8e540ffb 100644 --- a/synapse/storage/databases/main/monthly_active_users.py +++ b/synapse/storage/databases/main/monthly_active_users.py @@ -18,7 +18,7 @@ # # import logging -from typing import TYPE_CHECKING, Dict, List, Mapping, Optional, Tuple, cast +from typing import TYPE_CHECKING, Mapping, Optional, cast from synapse.metrics.background_process_metrics import wrap_as_background_process from synapse.storage.database import ( @@ -94,7 +94,7 @@ class MonthlyActiveUsersWorkerStore(RegistrationWorkerStore): WHERE (users.appservice_id IS NULL OR users.appservice_id = ''); """ txn.execute(sql) - (count,) = cast(Tuple[int], txn.fetchone()) + (count,) = cast(tuple[int], txn.fetchone()) return count return await self.db_pool.runInteraction("count_users", _count_users) @@ -112,7 +112,7 @@ class MonthlyActiveUsersWorkerStore(RegistrationWorkerStore): """ - def _count_users_by_service(txn: LoggingTransaction) -> Dict[str, int]: + def _count_users_by_service(txn: LoggingTransaction) -> dict[str, int]: sql = """ SELECT COALESCE(appservice_id, 'native'), COUNT(*) FROM monthly_active_users @@ -121,7 +121,7 @@ class MonthlyActiveUsersWorkerStore(RegistrationWorkerStore): """ txn.execute(sql) - result = cast(List[Tuple[str, int]], txn.fetchall()) + result = cast(list[tuple[str, int]], txn.fetchall()) return dict(result) return await self.db_pool.runInteraction( @@ -130,7 +130,7 @@ class MonthlyActiveUsersWorkerStore(RegistrationWorkerStore): async def get_monthly_active_users_by_service( self, start_timestamp: Optional[int] = None, end_timestamp: Optional[int] = None - ) -> List[Tuple[str, str]]: + ) -> list[tuple[str, str]]: """Generates list of monthly active users and their services. Please see "get_monthly_active_count_by_service" docstring for more details about services. @@ -160,7 +160,7 @@ class MonthlyActiveUsersWorkerStore(RegistrationWorkerStore): where_clause = "" query_params = [] - def _list_users(txn: LoggingTransaction) -> List[Tuple[str, str]]: + def _list_users(txn: LoggingTransaction) -> list[tuple[str, str]]: sql = f""" SELECT COALESCE(appservice_id, 'native'), user_id FROM monthly_active_users @@ -169,11 +169,11 @@ class MonthlyActiveUsersWorkerStore(RegistrationWorkerStore): """ txn.execute(sql, query_params) - return cast(List[Tuple[str, str]], txn.fetchall()) + return cast(list[tuple[str, str]], txn.fetchall()) return await self.db_pool.runInteraction("list_users", _list_users) - async def get_registered_reserved_users(self) -> List[str]: + async def get_registered_reserved_users(self) -> list[str]: """Of the reserved threepids defined in config, retrieve those that are associated with registered users @@ -219,7 +219,7 @@ class MonthlyActiveUsersWorkerStore(RegistrationWorkerStore): entries exist. """ - def _reap_users(txn: LoggingTransaction, reserved_users: List[str]) -> None: + def _reap_users(txn: LoggingTransaction, reserved_users: list[str]) -> None: """ Args: reserved_users: reserved users to preserve @@ -294,7 +294,7 @@ class MonthlyActiveUsersWorkerStore(RegistrationWorkerStore): ) def _initialise_reserved_users( - self, txn: LoggingTransaction, threepids: List[dict] + self, txn: LoggingTransaction, threepids: list[dict] ) -> None: """Ensures that reserved threepids are accounted for in the MAU table, should be called on start up. diff --git a/synapse/storage/databases/main/presence.py b/synapse/storage/databases/main/presence.py index 587f51df2c..fec94f4e5a 100644 --- a/synapse/storage/databases/main/presence.py +++ b/synapse/storage/databases/main/presence.py @@ -21,12 +21,9 @@ from typing import ( TYPE_CHECKING, Any, - Dict, Iterable, - List, Mapping, Optional, - Tuple, Union, cast, ) @@ -116,8 +113,8 @@ class PresenceStore(PresenceBackgroundUpdateStore, CacheInvalidationWorkerStore) ) async def update_presence( - self, presence_states: List[UserPresenceState] - ) -> Tuple[int, int]: + self, presence_states: list[UserPresenceState] + ) -> tuple[int, int]: assert self._can_persist_presence stream_ordering_manager = self._presence_id_gen.get_next_mult( @@ -142,8 +139,8 @@ class PresenceStore(PresenceBackgroundUpdateStore, CacheInvalidationWorkerStore) def _update_presence_txn( self, txn: LoggingTransaction, - stream_orderings: List[int], - presence_states: List[UserPresenceState], + stream_orderings: list[int], + presence_states: list[UserPresenceState], ) -> None: for stream_id, state in zip(stream_orderings, presence_states): txn.call_after( @@ -193,7 +190,7 @@ class PresenceStore(PresenceBackgroundUpdateStore, CacheInvalidationWorkerStore) async def get_all_presence_updates( self, instance_name: str, last_id: int, current_id: int, limit: int - ) -> Tuple[List[Tuple[int, list]], int, bool]: + ) -> tuple[list[tuple[int, list]], int, bool]: """Get updates for presence replication stream. Args: @@ -220,7 +217,7 @@ class PresenceStore(PresenceBackgroundUpdateStore, CacheInvalidationWorkerStore) def get_all_presence_updates_txn( txn: LoggingTransaction, - ) -> Tuple[List[Tuple[int, list]], int, bool]: + ) -> tuple[list[tuple[int, list]], int, bool]: sql = """ SELECT stream_id, user_id, state, last_active_ts, last_federation_update_ts, last_user_sync_ts, @@ -232,7 +229,7 @@ class PresenceStore(PresenceBackgroundUpdateStore, CacheInvalidationWorkerStore) """ txn.execute(sql, (last_id, current_id, limit)) updates = cast( - List[Tuple[int, list]], + list[tuple[int, list]], [(row[0], row[1:]) for row in txn], ) @@ -263,7 +260,7 @@ class PresenceStore(PresenceBackgroundUpdateStore, CacheInvalidationWorkerStore) # TODO All these columns are nullable, but we don't expect that: # https://github.com/matrix-org/synapse/issues/16467 rows = cast( - List[Tuple[str, str, int, int, int, Optional[str], Union[int, bool]]], + list[tuple[str, str, int, int, int, Optional[str], Union[int, bool]]], await self.db_pool.simple_select_many_batch( table="presence_stream", column="user_id", @@ -375,7 +372,7 @@ class PresenceStore(PresenceBackgroundUpdateStore, CacheInvalidationWorkerStore) async def get_presence_for_all_users( self, include_offline: bool = True, - ) -> Dict[str, UserPresenceState]: + ) -> dict[str, UserPresenceState]: """Retrieve the current presence state for all users. Note that the presence_stream table is culled frequently, so it should only @@ -402,7 +399,7 @@ class PresenceStore(PresenceBackgroundUpdateStore, CacheInvalidationWorkerStore) # TODO All these columns are nullable, but we don't expect that: # https://github.com/matrix-org/synapse/issues/16467 rows = cast( - List[Tuple[str, str, int, int, int, Optional[str], Union[int, bool]]], + list[tuple[str, str, int, int, int, Optional[str], Union[int, bool]]], await self.db_pool.runInteraction( "get_presence_for_all_users", self.db_pool.simple_select_list_paginate_txn, @@ -457,7 +454,7 @@ class PresenceStore(PresenceBackgroundUpdateStore, CacheInvalidationWorkerStore) def get_presence_stream_id_gen(self) -> MultiWriterIdGenerator: return self._presence_id_gen - def _get_active_presence(self, db_conn: Connection) -> List[UserPresenceState]: + def _get_active_presence(self, db_conn: Connection) -> list[UserPresenceState]: """Fetch non-offline presence from the database so that we can register the appropriate time outs. """ @@ -488,7 +485,7 @@ class PresenceStore(PresenceBackgroundUpdateStore, CacheInvalidationWorkerStore) for user_id, state, last_active_ts, last_federation_update_ts, last_user_sync_ts, status_msg, currently_active in rows ] - def take_presence_startup_info(self) -> List[UserPresenceState]: + def take_presence_startup_info(self) -> list[UserPresenceState]: active_on_startup = self._presence_on_startup self._presence_on_startup = [] return active_on_startup diff --git a/synapse/storage/databases/main/profile.py b/synapse/storage/databases/main/profile.py index 30d8a58d96..71f01a597b 100644 --- a/synapse/storage/databases/main/profile.py +++ b/synapse/storage/databases/main/profile.py @@ -19,7 +19,7 @@ # # import json -from typing import TYPE_CHECKING, Dict, Optional, Tuple, cast +from typing import TYPE_CHECKING, Optional, cast from canonicaljson import encode_canonical_json @@ -240,7 +240,7 @@ class ProfileWorkerStore(SQLBaseStore): # Test exists first since value being None is used for both # missing and a null JSON value. - exists, value = cast(Tuple[bool, JsonValue], txn.fetchone()) + exists, value = cast(tuple[bool, JsonValue], txn.fetchone()) if not exists: raise StoreError(404, "No row found") return value @@ -258,7 +258,7 @@ class ProfileWorkerStore(SQLBaseStore): # If value_type is None, then the value did not exist. value_type, value = cast( - Tuple[Optional[str], JsonValue], txn.fetchone() + tuple[Optional[str], JsonValue], txn.fetchone() ) if not value_type: raise StoreError(404, "No row found") @@ -271,7 +271,7 @@ class ProfileWorkerStore(SQLBaseStore): return await self.db_pool.runInteraction("get_profile_field", get_profile_field) - async def get_profile_fields(self, user_id: UserID) -> Dict[str, str]: + async def get_profile_fields(self, user_id: UserID) -> dict[str, str]: """ Get all custom profile fields for a user. @@ -346,7 +346,7 @@ class ProfileWorkerStore(SQLBaseStore): # possible due to the grammar. (f'$."{new_field_name}"', user_id.localpart), ) - row = cast(Tuple[Optional[int], Optional[int], Optional[int]], txn.fetchone()) + row = cast(tuple[Optional[int], Optional[int], Optional[int]], txn.fetchone()) # The values return null if the column is null. total_bytes = ( diff --git a/synapse/storage/databases/main/purge_events.py b/synapse/storage/databases/main/purge_events.py index d4642a1309..10de1b35a6 100644 --- a/synapse/storage/databases/main/purge_events.py +++ b/synapse/storage/databases/main/purge_events.py @@ -20,7 +20,7 @@ # import logging -from typing import Any, Set, Tuple, cast +from typing import Any, cast from synapse.api.errors import SynapseError from synapse.storage.database import LoggingTransaction @@ -103,7 +103,7 @@ The tables with a `room_id` column regardless of whether they have a useful inde class PurgeEventsStore(StateGroupWorkerStore, CacheInvalidationWorkerStore): async def purge_history( self, room_id: str, token: str, delete_local_events: bool - ) -> Set[int]: + ) -> set[int]: """Deletes room history before a certain point. Note that only a single purge can occur at once, this is guaranteed via @@ -137,7 +137,7 @@ class PurgeEventsStore(StateGroupWorkerStore, CacheInvalidationWorkerStore): room_id: str, token: RoomStreamToken, delete_local_events: bool, - ) -> Set[int]: + ) -> set[int]: # Tables that should be pruned: # event_auth # event_backward_extremities @@ -204,7 +204,7 @@ class PurgeEventsStore(StateGroupWorkerStore, CacheInvalidationWorkerStore): logger.info("[purge] looking for events to delete") should_delete_expr = "state_events.state_key IS NULL" - should_delete_params: Tuple[Any, ...] = () + should_delete_params: tuple[Any, ...] = () if not delete_local_events: should_delete_expr += " AND event_id NOT LIKE ?" @@ -355,7 +355,7 @@ class PurgeEventsStore(StateGroupWorkerStore, CacheInvalidationWorkerStore): """, (room_id,), ) - (min_depth,) = cast(Tuple[int], txn.fetchone()) + (min_depth,) = cast(tuple[int], txn.fetchone()) logger.info("[purge] updating room_depth to %d", min_depth) diff --git a/synapse/storage/databases/main/push_rule.py b/synapse/storage/databases/main/push_rule.py index 1860be1713..ecab19eb2e 100644 --- a/synapse/storage/databases/main/push_rule.py +++ b/synapse/storage/databases/main/push_rule.py @@ -23,13 +23,10 @@ from typing import ( TYPE_CHECKING, Any, Collection, - Dict, Iterable, - List, Mapping, Optional, Sequence, - Tuple, Union, cast, ) @@ -69,8 +66,8 @@ logger = logging.getLogger(__name__) def _load_rules( - rawrules: List[Tuple[str, int, str, str]], - enabled_map: Dict[str, bool], + rawrules: list[tuple[str, int, str, str]], + enabled_map: dict[str, bool], experimental_config: ExperimentalConfig, ) -> FilteredPushRules: """Take the DB rows returned from the DB and convert them into a full @@ -206,7 +203,7 @@ class PushRulesWorkerStore( @cached(max_entries=5000) async def get_push_rules_for_user(self, user_id: str) -> FilteredPushRules: rows = cast( - List[Tuple[str, int, int, str, str]], + list[tuple[str, int, int, str, str]], await self.db_pool.simple_select_list( table="push_rules", keyvalues={"user_name": user_id}, @@ -232,9 +229,9 @@ class PushRulesWorkerStore( self.hs.config.experimental, ) - async def get_push_rules_enabled_for_user(self, user_id: str) -> Dict[str, bool]: + async def get_push_rules_enabled_for_user(self, user_id: str) -> dict[str, bool]: results = cast( - List[Tuple[str, Optional[Union[int, bool]]]], + list[tuple[str, Optional[Union[int, bool]]]], await self.db_pool.simple_select_list( table="push_rules_enable", keyvalues={"user_name": user_id}, @@ -257,7 +254,7 @@ class PushRulesWorkerStore( " WHERE user_id = ? AND ? < stream_id" ) txn.execute(sql, (user_id, last_id)) - (count,) = cast(Tuple[int], txn.fetchone()) + (count,) = cast(tuple[int], txn.fetchone()) return bool(count) return await self.db_pool.runInteraction( @@ -271,7 +268,7 @@ class PushRulesWorkerStore( if not user_ids: return {} - raw_rules: Dict[str, List[Tuple[str, int, str, str]]] = { + raw_rules: dict[str, list[tuple[str, int, str, str]]] = { user_id: [] for user_id in user_ids } @@ -280,7 +277,7 @@ class PushRulesWorkerStore( gather_results( ( cast( - "defer.Deferred[List[Tuple[str, str, int, int, str, str]]]", + "defer.Deferred[list[tuple[str, str, int, int, str, str]]]", run_in_background( self.db_pool.simple_select_many_batch, table="push_rules", @@ -312,7 +309,7 @@ class PushRulesWorkerStore( (rule_id, priority_class, conditions, actions) ) - results: Dict[str, FilteredPushRules] = {} + results: dict[str, FilteredPushRules] = {} for user_id, rules in raw_rules.items(): results[user_id] = _load_rules( @@ -323,14 +320,14 @@ class PushRulesWorkerStore( async def bulk_get_push_rules_enabled( self, user_ids: Collection[str] - ) -> Dict[str, Dict[str, bool]]: + ) -> dict[str, dict[str, bool]]: if not user_ids: return {} - results: Dict[str, Dict[str, bool]] = {user_id: {} for user_id in user_ids} + results: dict[str, dict[str, bool]] = {user_id: {} for user_id in user_ids} rows = cast( - List[Tuple[str, str, Optional[int]]], + list[tuple[str, str, Optional[int]]], await self.db_pool.simple_select_many_batch( table="push_rules_enable", column="user_name", @@ -346,7 +343,7 @@ class PushRulesWorkerStore( async def get_all_push_rule_updates( self, instance_name: str, last_id: int, current_id: int, limit: int - ) -> Tuple[List[Tuple[int, Tuple[str]]], int, bool]: + ) -> tuple[list[tuple[int, tuple[str]]], int, bool]: """Get updates for push_rules replication stream. Args: @@ -373,7 +370,7 @@ class PushRulesWorkerStore( def get_all_push_rule_updates_txn( txn: LoggingTransaction, - ) -> Tuple[List[Tuple[int, Tuple[str]]], int, bool]: + ) -> tuple[list[tuple[int, tuple[str]]], int, bool]: sql = """ SELECT stream_id, user_id FROM push_rules_stream @@ -383,7 +380,7 @@ class PushRulesWorkerStore( """ txn.execute(sql, (last_id, current_id, limit)) updates = cast( - List[Tuple[int, Tuple[str]]], + list[tuple[int, tuple[str]]], [(stream_id, (user_id,)) for stream_id, user_id in txn], ) @@ -794,7 +791,7 @@ class PushRulesWorkerStore( self, user_id: str, rule_id: str, - actions: List[Union[dict, str]], + actions: list[Union[dict, str]], is_default_rule: bool, ) -> None: """ diff --git a/synapse/storage/databases/main/pusher.py b/synapse/storage/databases/main/pusher.py index 1b2aa79ab1..c8f049536a 100644 --- a/synapse/storage/databases/main/pusher.py +++ b/synapse/storage/databases/main/pusher.py @@ -23,12 +23,9 @@ import logging from typing import ( TYPE_CHECKING, Any, - Dict, Iterable, Iterator, - List, Optional, - Tuple, cast, ) @@ -51,7 +48,7 @@ if TYPE_CHECKING: logger = logging.getLogger(__name__) # The type of a row in the pushers table. -PusherRow = Tuple[ +PusherRow = tuple[ int, # id str, # user_name Optional[int], # access_token @@ -192,7 +189,7 @@ class PusherWorkerStore(SQLBaseStore): async def get_pushers_by_user_id(self, user_id: str) -> Iterator[PusherConfig]: return await self.get_pushers_by({"user_name": user_id}) - async def get_pushers_by(self, keyvalues: Dict[str, Any]) -> Iterator[PusherConfig]: + async def get_pushers_by(self, keyvalues: dict[str, Any]) -> Iterator[PusherConfig]: """Retrieve pushers that match the given criteria. Args: @@ -202,7 +199,7 @@ class PusherWorkerStore(SQLBaseStore): The pushers for which the given columns have the given values. """ - def get_pushers_by_txn(txn: LoggingTransaction) -> List[PusherRow]: + def get_pushers_by_txn(txn: LoggingTransaction) -> list[PusherRow]: # We could technically use simple_select_list here, but we need to call # COALESCE on the 'enabled' column. While it is technically possible to give # simple_select_list the whole `COALESCE(...) AS ...` as a column name, it @@ -220,7 +217,7 @@ class PusherWorkerStore(SQLBaseStore): txn.execute(sql, list(keyvalues.values())) - return cast(List[PusherRow], txn.fetchall()) + return cast(list[PusherRow], txn.fetchall()) ret = await self.db_pool.runInteraction( desc="get_pushers_by", @@ -230,7 +227,7 @@ class PusherWorkerStore(SQLBaseStore): return self._decode_pushers_rows(ret) async def get_enabled_pushers(self) -> Iterator[PusherConfig]: - def get_enabled_pushers_txn(txn: LoggingTransaction) -> List[PusherRow]: + def get_enabled_pushers_txn(txn: LoggingTransaction) -> list[PusherRow]: txn.execute( """ SELECT id, user_name, access_token, profile_tag, kind, app_id, @@ -240,7 +237,7 @@ class PusherWorkerStore(SQLBaseStore): FROM pushers WHERE COALESCE(enabled, TRUE) """ ) - return cast(List[PusherRow], txn.fetchall()) + return cast(list[PusherRow], txn.fetchall()) return self._decode_pushers_rows( await self.db_pool.runInteraction( @@ -250,7 +247,7 @@ class PusherWorkerStore(SQLBaseStore): async def get_all_updated_pushers_rows( self, instance_name: str, last_id: int, current_id: int, limit: int - ) -> Tuple[List[Tuple[int, tuple]], int, bool]: + ) -> tuple[list[tuple[int, tuple]], int, bool]: """Get updates for pushers replication stream. Args: @@ -277,7 +274,7 @@ class PusherWorkerStore(SQLBaseStore): def get_all_updated_pushers_rows_txn( txn: LoggingTransaction, - ) -> Tuple[List[Tuple[int, tuple]], int, bool]: + ) -> tuple[list[tuple[int, tuple]], int, bool]: sql = """ SELECT id, user_name, app_id, pushkey FROM pushers @@ -286,7 +283,7 @@ class PusherWorkerStore(SQLBaseStore): """ txn.execute(sql, (last_id, current_id, limit)) updates = cast( - List[Tuple[int, tuple]], + list[tuple[int, tuple]], [ (stream_id, (user_name, app_id, pushkey, False)) for stream_id, user_name, app_id, pushkey in txn @@ -379,9 +376,9 @@ class PusherWorkerStore(SQLBaseStore): async def get_throttle_params_by_room( self, pusher_id: int - ) -> Dict[str, ThrottleParams]: + ) -> dict[str, ThrottleParams]: res = cast( - List[Tuple[str, Optional[int], Optional[int]]], + list[tuple[str, Optional[int], Optional[int]]], await self.db_pool.simple_select_list( "pusher_throttle", {"pusher": pusher_id}, @@ -610,7 +607,7 @@ class PusherBackgroundUpdatesStore(SQLBaseStore): (last_pusher_id, batch_size), ) - rows = cast(List[Tuple[int, Optional[str], Optional[str]]], txn.fetchall()) + rows = cast(list[tuple[int, Optional[str], Optional[str]]], txn.fetchall()) if len(rows) == 0: return 0 @@ -764,7 +761,7 @@ class PusherStore(PusherWorkerStore, PusherBackgroundUpdatesStore): # account. pushers = list(await self.get_pushers_by_user_id(user_id)) - def delete_pushers_txn(txn: LoggingTransaction, stream_ids: List[int]) -> None: + def delete_pushers_txn(txn: LoggingTransaction, stream_ids: list[int]) -> None: self._invalidate_cache_and_stream( # type: ignore[attr-defined] txn, self.get_if_user_has_pusher, (user_id,) ) diff --git a/synapse/storage/databases/main/receipts.py b/synapse/storage/databases/main/receipts.py index f1dbf68971..63d4e1f68c 100644 --- a/synapse/storage/databases/main/receipts.py +++ b/synapse/storage/databases/main/receipts.py @@ -24,14 +24,10 @@ from typing import ( TYPE_CHECKING, Any, Collection, - Dict, Iterable, - List, Mapping, Optional, Sequence, - Set, - Tuple, cast, ) @@ -92,14 +88,14 @@ class ReceiptInRoom: # matching threaded receipts. # Set of (user_id, event_id) - unthreaded_receipts: Set[Tuple[str, str]] = { + unthreaded_receipts: set[tuple[str, str]] = { (receipt.user_id, receipt.event_id) for receipt in receipts if receipt.thread_id is None } # event_id -> receipt_type -> user_id -> receipt data - content: Dict[str, Dict[str, Dict[str, JsonMapping]]] = {} + content: dict[str, dict[str, dict[str, JsonMapping]]] = {} for receipt in receipts: data = receipt.data if receipt.thread_id is not None: @@ -180,7 +176,7 @@ class ReceiptsWorkerStore(SQLBaseStore): user_id: str, room_id: str, receipt_types: Collection[str], - ) -> Optional[Tuple[str, int]]: + ) -> Optional[tuple[str, int]]: """ Fetch the event ID and stream_ordering for the latest unthreaded receipt in a room with one of the given receipt types. @@ -212,11 +208,11 @@ class ReceiptsWorkerStore(SQLBaseStore): args.extend((user_id, room_id)) txn.execute(sql, args) - return cast(Optional[Tuple[str, int]], txn.fetchone()) + return cast(Optional[tuple[str, int]], txn.fetchone()) async def get_receipts_for_user( self, user_id: str, receipt_types: Iterable[str] - ) -> Dict[str, str]: + ) -> dict[str, str]: """ Fetch the event IDs for the latest receipts sent by the given user. @@ -285,7 +281,7 @@ class ReceiptsWorkerStore(SQLBaseStore): A map of room ID to the latest receipt information. """ - def f(txn: LoggingTransaction) -> List[Tuple[str, str, int, int]]: + def f(txn: LoggingTransaction) -> list[tuple[str, str, int, int]]: sql = ( "SELECT rl.room_id, rl.event_id," " e.topological_ordering, e.stream_ordering" @@ -297,7 +293,7 @@ class ReceiptsWorkerStore(SQLBaseStore): " AND receipt_type = ?" ) txn.execute(sql, (user_id, receipt_type)) - return cast(List[Tuple[str, str, int, int]], txn.fetchall()) + return cast(list[tuple[str, str, int, int]], txn.fetchall()) rows = await self.db_pool.runInteraction( "get_receipts_for_user_with_orderings", f @@ -316,7 +312,7 @@ class ReceiptsWorkerStore(SQLBaseStore): room_ids: Iterable[str], to_key: MultiWriterStreamToken, from_key: Optional[MultiWriterStreamToken] = None, - ) -> List[JsonMapping]: + ) -> list[JsonMapping]: """Get receipts for multiple rooms for sending to clients. Args: @@ -379,7 +375,7 @@ class ReceiptsWorkerStore(SQLBaseStore): ) -> Sequence[JsonMapping]: """See get_linearized_receipts_for_room""" - def f(txn: LoggingTransaction) -> List[Tuple[str, str, str, str]]: + def f(txn: LoggingTransaction) -> list[tuple[str, str, str, str]]: if from_key: sql = """ SELECT stream_id, instance_name, receipt_type, user_id, event_id, data @@ -466,7 +462,7 @@ class ReceiptsWorkerStore(SQLBaseStore): txn.execute(sql + clause, [to_key.get_max_stream_pos()] + list(args)) - results: Dict[str, List[ReceiptInRoom]] = {} + results: dict[str, list[ReceiptInRoom]] = {} for ( stream_id, instance_name, @@ -515,7 +511,7 @@ class ReceiptsWorkerStore(SQLBaseStore): async def get_linearized_receipts_for_events( self, - room_and_event_ids: Collection[Tuple[str, str]], + room_and_event_ids: Collection[tuple[str, str]], ) -> Mapping[str, Sequence[ReceiptInRoom]]: """Get all receipts for the given set of events. @@ -531,8 +527,8 @@ class ReceiptsWorkerStore(SQLBaseStore): def get_linearized_receipts_for_events_txn( txn: LoggingTransaction, - room_id_event_id_tuples: Collection[Tuple[str, str]], - ) -> List[Tuple[str, str, str, str, Optional[str], str]]: + room_id_event_id_tuples: Collection[tuple[str, str]], + ) -> list[tuple[str, str, str, str, Optional[str], str]]: clause, args = make_tuple_in_list_sql_clause( self.database_engine, ("room_id", "event_id"), room_id_event_id_tuples ) @@ -548,7 +544,7 @@ class ReceiptsWorkerStore(SQLBaseStore): return txn.fetchall() # room_id -> receipts - room_to_receipts: Dict[str, List[ReceiptInRoom]] = {} + room_to_receipts: dict[str, list[ReceiptInRoom]] = {} for batch in batch_iter(room_and_event_ids, 1000): batch_results = await self.db_pool.runInteraction( "get_linearized_receipts_for_events", @@ -596,7 +592,7 @@ class ReceiptsWorkerStore(SQLBaseStore): A dictionary of roomids to a list of receipts. """ - def f(txn: LoggingTransaction) -> List[Tuple[str, str, str, str, str]]: + def f(txn: LoggingTransaction) -> list[tuple[str, str, str, str, str]]: if from_key: sql = """ SELECT stream_id, instance_name, room_id, receipt_type, user_id, event_id, data @@ -659,7 +655,7 @@ class ReceiptsWorkerStore(SQLBaseStore): def get_linearized_receipts_for_user_in_rooms_txn( txn: LoggingTransaction, batch_room_ids: StrCollection, - ) -> List[Tuple[str, str, str, str, Optional[str], str]]: + ) -> list[tuple[str, str, str, str, Optional[str], str]]: clause, args = make_in_list_sql_clause( self.database_engine, "room_id", batch_room_ids ) @@ -687,7 +683,7 @@ class ReceiptsWorkerStore(SQLBaseStore): ] # room_id -> receipts - room_to_receipts: Dict[str, List[ReceiptInRoom]] = {} + room_to_receipts: dict[str, list[ReceiptInRoom]] = {} for batch in batch_iter(room_ids, 1000): batch_results = await self.db_pool.runInteraction( "get_linearized_receipts_for_events", @@ -746,7 +742,7 @@ class ReceiptsWorkerStore(SQLBaseStore): return [room_id for (room_id,) in txn] - results: List[str] = [] + results: list[str] = [] for batch in batch_iter(room_ids, 1000): batch_result = await self.db_pool.runInteraction( "get_rooms_with_receipts_between", f, batch @@ -757,7 +753,7 @@ class ReceiptsWorkerStore(SQLBaseStore): async def get_users_sent_receipts_between( self, last_id: int, current_id: int - ) -> List[str]: + ) -> list[str]: """Get all users who sent receipts between `last_id` exclusive and `current_id` inclusive. @@ -768,7 +764,7 @@ class ReceiptsWorkerStore(SQLBaseStore): if last_id == current_id: return [] - def _get_users_sent_receipts_between_txn(txn: LoggingTransaction) -> List[str]: + def _get_users_sent_receipts_between_txn(txn: LoggingTransaction) -> list[str]: sql = """ SELECT DISTINCT user_id FROM receipts_linearized WHERE ? < stream_id AND stream_id <= ? @@ -783,8 +779,8 @@ class ReceiptsWorkerStore(SQLBaseStore): async def get_all_updated_receipts( self, instance_name: str, last_id: int, current_id: int, limit: int - ) -> Tuple[ - List[Tuple[int, Tuple[str, str, str, str, Optional[str], JsonDict]]], int, bool + ) -> tuple[ + list[tuple[int, tuple[str, str, str, str, Optional[str], JsonDict]]], int, bool ]: """Get updates for receipts replication stream. @@ -812,8 +808,8 @@ class ReceiptsWorkerStore(SQLBaseStore): def get_all_updated_receipts_txn( txn: LoggingTransaction, - ) -> Tuple[ - List[Tuple[int, Tuple[str, str, str, str, Optional[str], JsonDict]]], + ) -> tuple[ + list[tuple[int, tuple[str, str, str, str, Optional[str], JsonDict]]], int, bool, ]: @@ -828,7 +824,7 @@ class ReceiptsWorkerStore(SQLBaseStore): txn.execute(sql, (last_id, current_id, instance_name, limit)) updates = cast( - List[Tuple[int, Tuple[str, str, str, str, Optional[str], JsonDict]]], + list[tuple[int, tuple[str, str, str, str, Optional[str], JsonDict]]], [(r[0], r[1:6] + (db_to_json(r[6]),)) for r in txn], ) @@ -917,7 +913,7 @@ class ReceiptsWorkerStore(SQLBaseStore): if stream_ordering is not None: if thread_id is None: thread_clause = "r.thread_id IS NULL" - thread_args: Tuple[str, ...] = () + thread_args: tuple[str, ...] = () else: thread_clause = "r.thread_id = ?" thread_args = (thread_id,) @@ -986,7 +982,7 @@ class ReceiptsWorkerStore(SQLBaseStore): return rx_ts def _graph_to_linear( - self, txn: LoggingTransaction, room_id: str, event_ids: List[str] + self, txn: LoggingTransaction, room_id: str, event_ids: list[str] ) -> str: """ Generate a linearized event from a list of events (i.e. a list of forward @@ -1026,7 +1022,7 @@ class ReceiptsWorkerStore(SQLBaseStore): room_id: str, receipt_type: str, user_id: str, - event_ids: List[str], + event_ids: list[str], thread_id: Optional[str], data: dict, ) -> Optional[PersistedPosition]: @@ -1098,7 +1094,7 @@ class ReceiptsWorkerStore(SQLBaseStore): room_id: str, receipt_type: str, user_id: str, - event_ids: List[str], + event_ids: list[str], thread_id: Optional[str], data: JsonDict, ) -> None: @@ -1237,7 +1233,7 @@ class ReceiptsBackgroundUpdateStore(SQLBaseStore): HAVING COUNT(*) > 1 """ txn.execute(sql) - duplicate_keys = cast(List[Tuple[int, str, str, str]], list(txn)) + duplicate_keys = cast(list[tuple[int, str, str, str]], list(txn)) # Then remove duplicate receipts, keeping the one with the highest # `stream_id`. Since there might be duplicate rows with the same @@ -1255,7 +1251,7 @@ class ReceiptsBackgroundUpdateStore(SQLBaseStore): LIMIT 1 """ txn.execute(sql, (room_id, receipt_type, user_id, stream_id)) - row_id = cast(Tuple[str], txn.fetchone())[0] + row_id = cast(tuple[str], txn.fetchone())[0] sql = f""" DELETE FROM receipts_linearized @@ -1306,7 +1302,7 @@ class ReceiptsBackgroundUpdateStore(SQLBaseStore): HAVING COUNT(*) > 1 """ txn.execute(sql) - duplicate_keys = cast(List[Tuple[str, str, str]], list(txn)) + duplicate_keys = cast(list[tuple[str, str, str]], list(txn)) # Then remove all duplicate receipts. # We could be clever and try to keep the latest receipt out of every set of diff --git a/synapse/storage/databases/main/registration.py b/synapse/storage/databases/main/registration.py index 906d1a91f6..7ce9bf43e6 100644 --- a/synapse/storage/databases/main/registration.py +++ b/synapse/storage/databases/main/registration.py @@ -22,7 +22,7 @@ import logging import random import re -from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union, cast +from typing import TYPE_CHECKING, Any, Optional, Union, cast import attr @@ -576,7 +576,7 @@ class RegistrationWorkerStore(StatsStore, CacheInvalidationWorkerStore): async def get_user_from_renewal_token( self, renewal_token: str - ) -> Tuple[str, int, Optional[int]]: + ) -> tuple[str, int, Optional[int]]: """Get a user ID and renewal status from a renewal token. Args: @@ -592,7 +592,7 @@ class RegistrationWorkerStore(StatsStore, CacheInvalidationWorkerStore): has not been renewed using the current token yet. """ return cast( - Tuple[str, int, Optional[int]], + tuple[str, int, Optional[int]], await self.db_pool.simple_select_one( table="account_validity", keyvalues={"renewal_token": renewal_token}, @@ -617,7 +617,7 @@ class RegistrationWorkerStore(StatsStore, CacheInvalidationWorkerStore): desc="get_renewal_token_for_user", ) - async def get_users_expiring_soon(self) -> List[Tuple[str, int]]: + async def get_users_expiring_soon(self) -> list[tuple[str, int]]: """Selects users whose account will expire in the [now, now + renew_at] time window (see configuration for account_validity for information on what renew_at refers to). @@ -628,14 +628,14 @@ class RegistrationWorkerStore(StatsStore, CacheInvalidationWorkerStore): def select_users_txn( txn: LoggingTransaction, now_ms: int, renew_at: int - ) -> List[Tuple[str, int]]: + ) -> list[tuple[str, int]]: sql = ( "SELECT user_id, expiration_ts_ms FROM account_validity" " WHERE email_sent = FALSE AND (expiration_ts_ms - ?) <= ?" ) values = [now_ms, renew_at] txn.execute(sql, values) - return cast(List[Tuple[str, int]], txn.fetchall()) + return cast(list[tuple[str, int]], txn.fetchall()) return await self.db_pool.runInteraction( "get_users_expiring_soon", @@ -858,17 +858,17 @@ class RegistrationWorkerStore(StatsStore, CacheInvalidationWorkerStore): ) return True if res == UserTypes.SUPPORT else False - async def get_users_by_id_case_insensitive(self, user_id: str) -> Dict[str, str]: + async def get_users_by_id_case_insensitive(self, user_id: str) -> dict[str, str]: """Gets users that match user_id case insensitively. Returns: A mapping of user_id -> password_hash. """ - def f(txn: LoggingTransaction) -> Dict[str, str]: + def f(txn: LoggingTransaction) -> dict[str, str]: sql = "SELECT name, password_hash FROM users WHERE lower(name) = lower(?)" txn.execute(sql, (user_id,)) - result = cast(List[Tuple[str, str]], txn.fetchall()) + result = cast(list[tuple[str, str]], txn.fetchall()) return dict(result) return await self.db_pool.runInteraction("get_users_by_id_case_insensitive", f) @@ -978,7 +978,7 @@ class RegistrationWorkerStore(StatsStore, CacheInvalidationWorkerStore): async def replace_user_external_id( self, - record_external_ids: List[Tuple[str, str]], + record_external_ids: list[tuple[str, str]], user_id: str, ) -> None: """Replace mappings from external user ids to a mxid in a single transaction. @@ -1045,7 +1045,7 @@ class RegistrationWorkerStore(StatsStore, CacheInvalidationWorkerStore): desc="get_user_by_external_id", ) - async def get_external_ids_by_user(self, mxid: str) -> List[Tuple[str, str]]: + async def get_external_ids_by_user(self, mxid: str) -> list[tuple[str, str]]: """Look up external ids for the given user Args: @@ -1055,7 +1055,7 @@ class RegistrationWorkerStore(StatsStore, CacheInvalidationWorkerStore): Tuples of (auth_provider, external_id) """ return cast( - List[Tuple[str, str]], + list[tuple[str, str]], await self.db_pool.simple_select_list( table="user_external_ids", keyvalues={"user_id": mxid}, @@ -1075,7 +1075,7 @@ class RegistrationWorkerStore(StatsStore, CacheInvalidationWorkerStore): return await self.db_pool.runInteraction("count_users", _count_users) - async def count_daily_user_type(self) -> Dict[str, int]: + async def count_daily_user_type(self) -> dict[str, int]: """ Counts 1) native non guest users 2) native guests users @@ -1083,7 +1083,7 @@ class RegistrationWorkerStore(StatsStore, CacheInvalidationWorkerStore): who registered on the homeserver in the past 24 hours """ - def _count_daily_user_type(txn: LoggingTransaction) -> Dict[str, int]: + def _count_daily_user_type(txn: LoggingTransaction) -> dict[str, int]: yesterday = int(self.clock.time()) - (60 * 60 * 24) sql = """ @@ -1116,7 +1116,7 @@ class RegistrationWorkerStore(StatsStore, CacheInvalidationWorkerStore): WHERE appservice_id IS NULL """ ) - (count,) = cast(Tuple[int], txn.fetchone()) + (count,) = cast(tuple[int], txn.fetchone()) return count return await self.db_pool.runInteraction("count_users", _count_users) @@ -1196,9 +1196,9 @@ class RegistrationWorkerStore(StatsStore, CacheInvalidationWorkerStore): {"user_id": user_id, "validated_at": validated_at, "added_at": added_at}, ) - async def user_get_threepids(self, user_id: str) -> List[ThreepidResult]: + async def user_get_threepids(self, user_id: str) -> list[ThreepidResult]: results = cast( - List[Tuple[str, str, int, int]], + list[tuple[str, str, int, int]], await self.db_pool.simple_select_list( "user_threepids", keyvalues={"user_id": user_id}, @@ -1253,7 +1253,7 @@ class RegistrationWorkerStore(StatsStore, CacheInvalidationWorkerStore): desc="add_user_bound_threepid", ) - async def user_get_bound_threepids(self, user_id: str) -> List[Tuple[str, str]]: + async def user_get_bound_threepids(self, user_id: str) -> list[tuple[str, str]]: """Get the threepids that a user has bound to an identity server through the homeserver The homeserver remembers where binds to an identity server occurred. Using this method can retrieve those threepids. @@ -1267,7 +1267,7 @@ class RegistrationWorkerStore(StatsStore, CacheInvalidationWorkerStore): address: The address of the threepid (e.g "bob@example.com") """ return cast( - List[Tuple[str, str]], + list[tuple[str, str]], await self.db_pool.simple_select_list( table="user_threepid_id_server", keyvalues={"user_id": user_id}, @@ -1302,7 +1302,7 @@ class RegistrationWorkerStore(StatsStore, CacheInvalidationWorkerStore): async def get_id_servers_user_bound( self, user_id: str, medium: str, address: str - ) -> List[str]: + ) -> list[str]: """Get the list of identity servers that the server proxied bind requests to for given user and threepid @@ -1686,7 +1686,7 @@ class RegistrationWorkerStore(StatsStore, CacheInvalidationWorkerStore): """ def _use_registration_token_txn(txn: LoggingTransaction) -> None: - # Normally, res is Optional[Dict[str, Any]]. + # Normally, res is Optional[dict[str, Any]]. # Override type because the return type is only optional if # allow_none is True, and we don't want mypy throwing errors # about None not being indexable. @@ -1716,7 +1716,7 @@ class RegistrationWorkerStore(StatsStore, CacheInvalidationWorkerStore): async def get_registration_tokens( self, valid: Optional[bool] = None - ) -> List[Tuple[str, Optional[int], int, int, Optional[int]]]: + ) -> list[tuple[str, Optional[int], int, int, Optional[int]]]: """List all registration tokens. Used by the admin API. Args: @@ -1735,7 +1735,7 @@ class RegistrationWorkerStore(StatsStore, CacheInvalidationWorkerStore): def select_registration_tokens_txn( txn: LoggingTransaction, now: int, valid: Optional[bool] - ) -> List[Tuple[str, Optional[int], int, int, Optional[int]]]: + ) -> list[tuple[str, Optional[int], int, int, Optional[int]]]: if valid is None: # Return all tokens regardless of validity txn.execute( @@ -1765,7 +1765,7 @@ class RegistrationWorkerStore(StatsStore, CacheInvalidationWorkerStore): txn.execute(sql, [now]) return cast( - List[Tuple[str, Optional[int], int, int, Optional[int]]], txn.fetchall() + list[tuple[str, Optional[int], int, int, Optional[int]]], txn.fetchall() ) return await self.db_pool.runInteraction( @@ -1775,7 +1775,7 @@ class RegistrationWorkerStore(StatsStore, CacheInvalidationWorkerStore): valid, ) - async def get_one_registration_token(self, token: str) -> Optional[Dict[str, Any]]: + async def get_one_registration_token(self, token: str) -> Optional[dict[str, Any]]: """Get info about the given registration token. Used by the admin API. Args: @@ -1892,8 +1892,8 @@ class RegistrationWorkerStore(StatsStore, CacheInvalidationWorkerStore): ) async def update_registration_token( - self, token: str, updatevalues: Dict[str, Optional[int]] - ) -> Optional[Dict[str, Any]]: + self, token: str, updatevalues: dict[str, Optional[int]] + ) -> Optional[dict[str, Any]]: """Update a registration token. Used by the admin API. Args: @@ -1909,7 +1909,7 @@ class RegistrationWorkerStore(StatsStore, CacheInvalidationWorkerStore): def _update_registration_token_txn( txn: LoggingTransaction, - ) -> Optional[Dict[str, Any]]: + ) -> Optional[dict[str, Any]]: try: self.db_pool.simple_update_one_txn( txn, @@ -2457,7 +2457,7 @@ class RegistrationWorkerStore(StatsStore, CacheInvalidationWorkerStore): user_id: str, except_token_id: Optional[int] = None, device_id: Optional[str] = None, - ) -> List[Tuple[str, int, Optional[str]]]: + ) -> list[tuple[str, int, Optional[str]]]: """ Invalidate access and refresh tokens belonging to a user @@ -2471,14 +2471,14 @@ class RegistrationWorkerStore(StatsStore, CacheInvalidationWorkerStore): A tuple of (token, token id, device id) for each of the deleted tokens """ - def f(txn: LoggingTransaction) -> List[Tuple[str, int, Optional[str]]]: + def f(txn: LoggingTransaction) -> list[tuple[str, int, Optional[str]]]: keyvalues = {"user_id": user_id} if device_id is not None: keyvalues["device_id"] = device_id items = keyvalues.items() where_clause = " AND ".join(k + " = ?" for k, _ in items) - values: List[Union[str, int]] = [v for _, v in items] + values: list[Union[str, int]] = [v for _, v in items] # Conveniently, refresh_tokens and access_tokens both use the user_id and device_id fields. Only caveat # is the `except_token_id` param that is tricky to get right, so for now we're just using the same where # clause and values before we handle that. This seems to be only used in the "set password" handler. @@ -2517,7 +2517,7 @@ class RegistrationWorkerStore(StatsStore, CacheInvalidationWorkerStore): self, user_id: str, device_ids: StrCollection, - ) -> List[Tuple[str, int, Optional[str]]]: + ) -> list[tuple[str, int, Optional[str]]]: """ Invalidate access and refresh tokens belonging to a user @@ -2530,7 +2530,7 @@ class RegistrationWorkerStore(StatsStore, CacheInvalidationWorkerStore): def user_delete_access_tokens_for_devices_txn( txn: LoggingTransaction, batch_device_ids: StrCollection - ) -> List[Tuple[str, int, Optional[str]]]: + ) -> list[tuple[str, int, Optional[str]]]: self.db_pool.simple_delete_many_txn( txn, table="refresh_tokens", @@ -2686,7 +2686,7 @@ class RegistrationBackgroundUpdateStore(RegistrationWorkerStore): def _background_update_set_deactivated_flag_txn( txn: LoggingTransaction, - ) -> Tuple[bool, int]: + ) -> tuple[bool, int]: txn.execute( """ SELECT diff --git a/synapse/storage/databases/main/relations.py b/synapse/storage/databases/main/relations.py index ea746e0511..529102c245 100644 --- a/synapse/storage/databases/main/relations.py +++ b/synapse/storage/databases/main/relations.py @@ -22,15 +22,10 @@ import logging from typing import ( TYPE_CHECKING, Collection, - Dict, - FrozenSet, Iterable, - List, Mapping, Optional, Sequence, - Set, - Tuple, Union, cast, ) @@ -179,7 +174,7 @@ class RelationsWorkerStore(SQLBaseStore): from_token: Optional[StreamToken] = None, to_token: Optional[StreamToken] = None, recurse: bool = False, - ) -> Tuple[Sequence[_RelatedEvent], Optional[StreamToken]]: + ) -> tuple[Sequence[_RelatedEvent], Optional[StreamToken]]: """Get a list of relations for an event, ordered by topological ordering. Args: @@ -209,7 +204,7 @@ class RelationsWorkerStore(SQLBaseStore): assert limit >= 0 where_clause = ["room_id = ?"] - where_args: List[Union[str, int]] = [room_id] + where_args: list[Union[str, int]] = [room_id] is_redacted = event.internal_metadata.is_redacted() if relation_type is not None: @@ -281,14 +276,14 @@ class RelationsWorkerStore(SQLBaseStore): def _get_recent_references_for_event_txn( txn: LoggingTransaction, - ) -> Tuple[List[_RelatedEvent], Optional[StreamToken]]: + ) -> tuple[list[_RelatedEvent], Optional[StreamToken]]: txn.execute(sql, [event.event_id] + where_args + [limit + 1]) events = [] - topo_orderings: List[int] = [] - stream_orderings: List[int] = [] + topo_orderings: list[int] = [] + stream_orderings: list[int] = [] for event_id, relation_type, sender, topo_ordering, stream_ordering in cast( - List[Tuple[str, str, str, int, int]], txn + list[tuple[str, str, str, int, int]], txn ): # Do not include edits for redacted events as they leak event # content. @@ -329,8 +324,8 @@ class RelationsWorkerStore(SQLBaseStore): async def get_all_relations_for_event_with_types( self, event_id: str, - relation_types: List[str], - ) -> List[str]: + relation_types: list[str], + ) -> list[str]: """Get the event IDs of all events that have a relation to the given event with one of the given relation types. @@ -345,9 +340,9 @@ class RelationsWorkerStore(SQLBaseStore): def get_all_relation_ids_for_event_with_types_txn( txn: LoggingTransaction, - ) -> List[str]: + ) -> list[str]: rows = cast( - List[Tuple[str]], + list[tuple[str]], self.db_pool.simple_select_many_txn( txn=txn, table="event_relations", @@ -368,7 +363,7 @@ class RelationsWorkerStore(SQLBaseStore): async def get_all_relations_for_event( self, event_id: str, - ) -> List[str]: + ) -> list[str]: """Get the event IDs of all events that have a relation to the given event. Args: @@ -380,9 +375,9 @@ class RelationsWorkerStore(SQLBaseStore): def get_all_relation_ids_for_event_txn( txn: LoggingTransaction, - ) -> List[str]: + ) -> list[str]: rows = cast( - List[Tuple[str]], + list[tuple[str]], self.db_pool.simple_select_list_txn( txn=txn, table="event_relations", @@ -462,7 +457,7 @@ class RelationsWorkerStore(SQLBaseStore): return result is not None @cached() # type: ignore[synapse-@cached-mutable] - async def get_references_for_event(self, event_id: str) -> List[JsonDict]: + async def get_references_for_event(self, event_id: str) -> list[JsonDict]: raise NotImplementedError() @cachedList(cached_method_name="get_references_for_event", list_name="event_ids") @@ -498,12 +493,12 @@ class RelationsWorkerStore(SQLBaseStore): def _get_references_for_events_txn( txn: LoggingTransaction, - ) -> Mapping[str, List[_RelatedEvent]]: + ) -> Mapping[str, list[_RelatedEvent]]: txn.execute(sql, args) - result: Dict[str, List[_RelatedEvent]] = {} + result: dict[str, list[_RelatedEvent]] = {} for relates_to_id, event_id, sender in cast( - List[Tuple[str, str, str]], txn + list[tuple[str, str, str]], txn ): result.setdefault(relates_to_id, []).append( _RelatedEvent(event_id, sender) @@ -578,14 +573,14 @@ class RelationsWorkerStore(SQLBaseStore): ORDER by edit.origin_server_ts, edit.event_id """ - def _get_applicable_edits_txn(txn: LoggingTransaction) -> Dict[str, str]: + def _get_applicable_edits_txn(txn: LoggingTransaction) -> dict[str, str]: clause, args = make_in_list_sql_clause( txn.database_engine, "relates_to_id", event_ids ) args.append(RelationTypes.REPLACE) txn.execute(sql % (clause,), args) - return dict(cast(Iterable[Tuple[str, str]], txn.fetchall())) + return dict(cast(Iterable[tuple[str, str]], txn.fetchall())) edit_ids = await self.db_pool.runInteraction( "get_applicable_edits", _get_applicable_edits_txn @@ -603,14 +598,14 @@ class RelationsWorkerStore(SQLBaseStore): } @cached() # type: ignore[synapse-@cached-mutable] - def get_thread_summary(self, event_id: str) -> Optional[Tuple[int, EventBase]]: + def get_thread_summary(self, event_id: str) -> Optional[tuple[int, EventBase]]: raise NotImplementedError() # TODO: This returns a mutable object, which is generally bad. @cachedList(cached_method_name="get_thread_summary", list_name="event_ids") # type: ignore[synapse-@cached-mutable] async def get_thread_summaries( self, event_ids: Collection[str] - ) -> Mapping[str, Optional[Tuple[int, EventBase]]]: + ) -> Mapping[str, Optional[tuple[int, EventBase]]]: """Get the number of threaded replies and the latest reply (if any) for the given events. Args: @@ -627,7 +622,7 @@ class RelationsWorkerStore(SQLBaseStore): def _get_thread_summaries_txn( txn: LoggingTransaction, - ) -> Tuple[Dict[str, int], Dict[str, str]]: + ) -> tuple[dict[str, int], dict[str, str]]: # Fetch the count of threaded events and the latest event ID. # TODO Should this only allow m.room.message events. if isinstance(self.database_engine, PostgresEngine): @@ -698,7 +693,7 @@ class RelationsWorkerStore(SQLBaseStore): args.append(RelationTypes.THREAD) txn.execute(sql % (clause,), args) - counts = dict(cast(List[Tuple[str, int]], txn.fetchall())) + counts = dict(cast(list[tuple[str, int]], txn.fetchall())) return counts, latest_event_ids @@ -726,8 +721,8 @@ class RelationsWorkerStore(SQLBaseStore): async def get_threaded_messages_per_user( self, event_ids: Collection[str], - users: FrozenSet[str] = frozenset(), - ) -> Dict[Tuple[str, str], int]: + users: frozenset[str] = frozenset(), + ) -> dict[tuple[str, str], int]: """Get the number of threaded replies for a set of users. This is used, in conjunction with get_thread_summaries, to calculate an @@ -759,7 +754,7 @@ class RelationsWorkerStore(SQLBaseStore): def _get_threaded_messages_per_user_txn( txn: LoggingTransaction, - ) -> Dict[Tuple[str, str], int]: + ) -> dict[tuple[str, str], int]: users_sql, users_args = make_in_list_sql_clause( self.database_engine, "child.sender", users ) @@ -799,7 +794,7 @@ class RelationsWorkerStore(SQLBaseStore): user participated in that event's thread, otherwise false. """ - def _get_threads_participated_txn(txn: LoggingTransaction) -> Set[str]: + def _get_threads_participated_txn(txn: LoggingTransaction) -> set[str]: # Fetch whether the requester has participated or not. sql = """ SELECT DISTINCT relates_to_id @@ -830,10 +825,10 @@ class RelationsWorkerStore(SQLBaseStore): async def events_have_relations( self, - parent_ids: List[str], - relation_senders: Optional[List[str]], - relation_types: Optional[List[str]], - ) -> List[str]: + parent_ids: list[str], + relation_senders: Optional[list[str]], + relation_types: Optional[list[str]], + ) -> list[str]: """Check which events have a relationship from the given senders of the given types. @@ -856,8 +851,8 @@ class RelationsWorkerStore(SQLBaseStore): %s; """ - def _get_if_events_have_relations(txn: LoggingTransaction) -> List[str]: - clauses: List[str] = [] + def _get_if_events_have_relations(txn: LoggingTransaction) -> list[str]: + clauses: list[str] = [] clause, args = make_in_list_sql_clause( txn.database_engine, "relates_to_id", parent_ids ) @@ -936,7 +931,7 @@ class RelationsWorkerStore(SQLBaseStore): room_id: str, limit: int = 5, from_token: Optional[ThreadsNextBatch] = None, - ) -> Tuple[Sequence[str], Optional[ThreadsNextBatch]]: + ) -> tuple[Sequence[str], Optional[ThreadsNextBatch]]: """Get a list of thread IDs, ordered by topological ordering of their latest reply. @@ -976,10 +971,10 @@ class RelationsWorkerStore(SQLBaseStore): def _get_threads_txn( txn: LoggingTransaction, - ) -> Tuple[List[str], Optional[ThreadsNextBatch]]: + ) -> tuple[list[str], Optional[ThreadsNextBatch]]: txn.execute(sql, (room_id, *pagination_args, limit + 1)) - rows = cast(List[Tuple[str, int, int]], txn.fetchall()) + rows = cast(list[tuple[str, int, int]], txn.fetchall()) thread_ids = [r[0] for r in rows] # If there are more events, generate the next pagination key from the diff --git a/synapse/storage/databases/main/room.py b/synapse/storage/databases/main/room.py index 9f03c084a5..7a294de558 100644 --- a/synapse/storage/databases/main/room.py +++ b/synapse/storage/databases/main/room.py @@ -27,12 +27,8 @@ from typing import ( AbstractSet, Any, Collection, - Dict, - List, Mapping, Optional, - Set, - Tuple, Union, cast, ) @@ -139,7 +135,7 @@ class RoomSortOrder(Enum): @attr.s(slots=True, frozen=True, auto_attribs=True) class PartialStateResyncInfo: joined_via: Optional[str] - servers_in_room: Set[str] = attr.ib(factory=set) + servers_in_room: set[str] = attr.ib(factory=set) class RoomWorkerStore(CacheInvalidationWorkerStore): @@ -209,7 +205,7 @@ class RoomWorkerStore(CacheInvalidationWorkerStore): logger.error("store_room with room_id=%s failed: %s", room_id, e) raise StoreError(500, "Problem creating room.") - async def get_room(self, room_id: str) -> Optional[Tuple[bool, bool]]: + async def get_room(self, room_id: str) -> Optional[tuple[bool, bool]]: """Retrieve a room. Args: @@ -222,7 +218,7 @@ class RoomWorkerStore(CacheInvalidationWorkerStore): or None if the room is unknown. """ row = cast( - Optional[Tuple[Optional[Union[int, bool]], Optional[Union[int, bool]]]], + Optional[tuple[Optional[Union[int, bool]], Optional[Union[int, bool]]]], await self.db_pool.simple_select_one( table="rooms", keyvalues={"room_id": room_id}, @@ -287,7 +283,7 @@ class RoomWorkerStore(CacheInvalidationWorkerStore): "get_room_with_stats", get_room_with_stats_txn, room_id ) - async def get_public_room_ids(self) -> List[str]: + async def get_public_room_ids(self) -> list[str]: return await self.db_pool.simple_select_onecol( table="rooms", keyvalues={"is_public": True}, @@ -296,8 +292,8 @@ class RoomWorkerStore(CacheInvalidationWorkerStore): ) def _construct_room_type_where_clause( - self, room_types: Union[List[Union[str, None]], None] - ) -> Tuple[Union[str, None], list]: + self, room_types: Union[list[Union[str, None]], None] + ) -> tuple[Union[str, None], list]: if not room_types: return None, [] @@ -387,7 +383,7 @@ class RoomWorkerStore(CacheInvalidationWorkerStore): """ txn.execute(sql, query_args) - return cast(Tuple[int], txn.fetchone())[0] + return cast(tuple[int], txn.fetchone())[0] return await self.db_pool.runInteraction( "count_public_rooms", _count_public_rooms_txn @@ -399,7 +395,7 @@ class RoomWorkerStore(CacheInvalidationWorkerStore): def f(txn: LoggingTransaction) -> int: sql = "SELECT count(*) FROM rooms" txn.execute(sql) - row = cast(Tuple[int], txn.fetchone()) + row = cast(tuple[int], txn.fetchone()) return row[0] return await self.db_pool.runInteraction("get_rooms", f) @@ -409,10 +405,10 @@ class RoomWorkerStore(CacheInvalidationWorkerStore): network_tuple: Optional[ThirdPartyInstanceID], search_filter: Optional[dict], limit: Optional[int], - bounds: Optional[Tuple[int, str]], + bounds: Optional[tuple[int, str]], forwards: bool, ignore_non_federatable: bool = False, - ) -> List[LargestRoomStats]: + ) -> list[LargestRoomStats]: """Gets the largest public rooms (where largest is in terms of joined members, as tracked in the statistics table). @@ -433,7 +429,7 @@ class RoomWorkerStore(CacheInvalidationWorkerStore): """ where_clauses = [] - query_args: List[Union[str, int]] = [] + query_args: list[Union[str, int]] = [] if network_tuple: if network_tuple.appservice_id: @@ -549,7 +545,7 @@ class RoomWorkerStore(CacheInvalidationWorkerStore): def _get_largest_public_rooms_txn( txn: LoggingTransaction, - ) -> List[LargestRoomStats]: + ) -> list[LargestRoomStats]: txn.execute(sql, query_args) results = [ @@ -611,7 +607,7 @@ class RoomWorkerStore(CacheInvalidationWorkerStore): search_term: Optional[str], public_rooms: Optional[bool], empty_rooms: Optional[bool], - ) -> Tuple[List[Dict[str, Any]], int]: + ) -> tuple[list[dict[str, Any]], int]: """Function to retrieve a paginated list of rooms as json. Args: @@ -760,7 +756,7 @@ class RoomWorkerStore(CacheInvalidationWorkerStore): def _get_rooms_paginate_txn( txn: LoggingTransaction, - ) -> Tuple[List[Dict[str, Any]], int]: + ) -> tuple[list[dict[str, Any]], int]: # Add the search term into the WHERE clause # and execute the data query txn.execute(info_sql, where_args + [limit, start]) @@ -795,7 +791,7 @@ class RoomWorkerStore(CacheInvalidationWorkerStore): # Add the search term into the WHERE clause if present txn.execute(count_sql, where_args) - room_count = cast(Tuple[int], txn.fetchone()) + room_count = cast(tuple[int], txn.fetchone()) return rooms, room_count[0] return await self.db_pool.runInteraction( @@ -909,7 +905,7 @@ class RoomWorkerStore(CacheInvalidationWorkerStore): def get_retention_policy_for_room_txn( txn: LoggingTransaction, - ) -> Optional[Tuple[Optional[int], Optional[int]]]: + ) -> Optional[tuple[Optional[int], Optional[int]]]: txn.execute( """ SELECT min_lifetime, max_lifetime FROM room_retention @@ -919,7 +915,7 @@ class RoomWorkerStore(CacheInvalidationWorkerStore): (room_id,), ) - return cast(Optional[Tuple[Optional[int], Optional[int]]], txn.fetchone()) + return cast(Optional[tuple[Optional[int], Optional[int]]], txn.fetchone()) ret = await self.db_pool.runInteraction( "get_retention_policy_for_room", @@ -951,7 +947,7 @@ class RoomWorkerStore(CacheInvalidationWorkerStore): max_lifetime=max_lifetime, ) - async def get_media_mxcs_in_room(self, room_id: str) -> Tuple[List[str], List[str]]: + async def get_media_mxcs_in_room(self, room_id: str) -> tuple[list[str], list[str]]: """Retrieves all the local and remote media MXC URIs in a given room Args: @@ -963,7 +959,7 @@ class RoomWorkerStore(CacheInvalidationWorkerStore): def _get_media_mxcs_in_room_txn( txn: LoggingTransaction, - ) -> Tuple[List[str], List[str]]: + ) -> tuple[list[str], list[str]]: local_mxcs, remote_mxcs = self._get_media_mxcs_in_room_txn(txn, room_id) local_media_mxcs = [] remote_media_mxcs = [] @@ -1001,7 +997,7 @@ class RoomWorkerStore(CacheInvalidationWorkerStore): def _get_media_mxcs_in_room_txn( self, txn: LoggingTransaction, room_id: str - ) -> Tuple[List[str], List[Tuple[str, str]]]: + ) -> tuple[list[str], list[tuple[str, str]]]: """Retrieves all the local and remote media MXC URIs in a given room Returns: @@ -1107,7 +1103,7 @@ class RoomWorkerStore(CacheInvalidationWorkerStore): def _get_media_ids_by_user_txn( self, txn: LoggingTransaction, user_id: str, filter_quarantined: bool = True - ) -> List[str]: + ) -> list[str]: """Retrieves local media IDs by a given user Args: @@ -1137,8 +1133,8 @@ class RoomWorkerStore(CacheInvalidationWorkerStore): def _quarantine_local_media_txn( self, txn: LoggingTransaction, - hashes: Set[str], - media_ids: Set[str], + hashes: set[str], + media_ids: set[str], quarantined_by: Optional[str], ) -> int: """Quarantine and unquarantine local media items. @@ -1192,8 +1188,8 @@ class RoomWorkerStore(CacheInvalidationWorkerStore): def _quarantine_remote_media_txn( self, txn: LoggingTransaction, - hashes: Set[str], - media: Set[Tuple[str, str]], + hashes: set[str], + media: set[tuple[str, str]], quarantined_by: Optional[str], ) -> int: """Quarantine and unquarantine remote items @@ -1240,8 +1236,8 @@ class RoomWorkerStore(CacheInvalidationWorkerStore): def _quarantine_media_txn( self, txn: LoggingTransaction, - local_mxcs: List[str], - remote_mxcs: List[Tuple[str, str]], + local_mxcs: list[str], + remote_mxcs: list[tuple[str, str]], quarantined_by: Optional[str], ) -> int: """Quarantine and unquarantine local and remote media items @@ -1346,7 +1342,7 @@ class RoomWorkerStore(CacheInvalidationWorkerStore): async def get_rooms_for_retention_period_in_range( self, min_ms: Optional[int], max_ms: Optional[int], include_null: bool = False - ) -> Dict[str, RetentionPolicy]: + ) -> dict[str, RetentionPolicy]: """Retrieves all of the rooms within the given retention range. Optionally includes the rooms which don't have a retention policy. @@ -1368,7 +1364,7 @@ class RoomWorkerStore(CacheInvalidationWorkerStore): def get_rooms_for_retention_period_in_range_txn( txn: LoggingTransaction, - ) -> Dict[str, RetentionPolicy]: + ) -> dict[str, RetentionPolicy]: range_conditions = [] args = [] @@ -1464,10 +1460,10 @@ class RoomWorkerStore(CacheInvalidationWorkerStore): A dictionary of rooms with partial state, with room IDs as keys and lists of servers in rooms as values. """ - room_servers: Dict[str, PartialStateResyncInfo] = {} + room_servers: dict[str, PartialStateResyncInfo] = {} rows = cast( - List[Tuple[str, str]], + list[tuple[str, str]], await self.db_pool.simple_select_list( table="partial_state_rooms", keyvalues={}, @@ -1480,7 +1476,7 @@ class RoomWorkerStore(CacheInvalidationWorkerStore): room_servers[room_id] = PartialStateResyncInfo(joined_via=joined_via) rows = cast( - List[Tuple[str, str]], + list[tuple[str, str]], await self.db_pool.simple_select_list( "partial_state_rooms_servers", keyvalues=None, @@ -1533,7 +1529,7 @@ class RoomWorkerStore(CacheInvalidationWorkerStore): """ rows = cast( - List[Tuple[str]], + list[tuple[str]], await self.db_pool.simple_select_many_batch( table="partial_state_rooms", column="room_id", @@ -1571,7 +1567,7 @@ class RoomWorkerStore(CacheInvalidationWorkerStore): async def get_join_event_id_and_device_lists_stream_id_for_partial_state( self, room_id: str - ) -> Tuple[str, int]: + ) -> tuple[str, int]: """Get the event ID of the initial join that started the partial join, and the device list stream ID at the point we started the partial join. @@ -1583,7 +1579,7 @@ class RoomWorkerStore(CacheInvalidationWorkerStore): """ return cast( - Tuple[str, int], + tuple[str, int], await self.db_pool.simple_select_one( table="partial_state_rooms", keyvalues={"room_id": room_id}, @@ -1602,7 +1598,7 @@ class RoomWorkerStore(CacheInvalidationWorkerStore): async def get_un_partial_stated_rooms_between( self, last_id: int, current_id: int, room_ids: Collection[str] - ) -> Set[str]: + ) -> set[str]: """Get all rooms that got un partial stated between `last_id` exclusive and `current_id` inclusive. @@ -1615,7 +1611,7 @@ class RoomWorkerStore(CacheInvalidationWorkerStore): def _get_un_partial_stated_rooms_between_txn( txn: LoggingTransaction, - ) -> Set[str]: + ) -> set[str]: sql = """ SELECT DISTINCT room_id FROM un_partial_stated_room_stream WHERE ? < stream_id AND stream_id <= ? AND @@ -1636,7 +1632,7 @@ class RoomWorkerStore(CacheInvalidationWorkerStore): async def get_un_partial_stated_rooms_from_stream( self, instance_name: str, last_id: int, current_id: int, limit: int - ) -> Tuple[List[Tuple[int, Tuple[str]]], int, bool]: + ) -> tuple[list[tuple[int, tuple[str]]], int, bool]: """Get updates for un partial stated rooms replication stream. Args: @@ -1663,7 +1659,7 @@ class RoomWorkerStore(CacheInvalidationWorkerStore): def get_un_partial_stated_rooms_from_stream_txn( txn: LoggingTransaction, - ) -> Tuple[List[Tuple[int, Tuple[str]]], int, bool]: + ) -> tuple[list[tuple[int, tuple[str]]], int, bool]: sql = """ SELECT stream_id, room_id FROM un_partial_stated_room_stream @@ -1686,7 +1682,7 @@ class RoomWorkerStore(CacheInvalidationWorkerStore): get_un_partial_stated_rooms_from_stream_txn, ) - async def get_event_report(self, report_id: int) -> Optional[Dict[str, Any]]: + async def get_event_report(self, report_id: int) -> Optional[dict[str, Any]]: """Retrieve an event report Args: @@ -1698,7 +1694,7 @@ class RoomWorkerStore(CacheInvalidationWorkerStore): def _get_event_report_txn( txn: LoggingTransaction, report_id: int - ) -> Optional[Dict[str, Any]]: + ) -> Optional[dict[str, Any]]: sql = """ SELECT er.id, @@ -1755,7 +1751,7 @@ class RoomWorkerStore(CacheInvalidationWorkerStore): user_id: Optional[str] = None, room_id: Optional[str] = None, event_sender_user_id: Optional[str] = None, - ) -> Tuple[List[Dict[str, Any]], int]: + ) -> tuple[list[dict[str, Any]], int]: """Retrieve a paginated list of event reports Args: @@ -1775,9 +1771,9 @@ class RoomWorkerStore(CacheInvalidationWorkerStore): def _get_event_reports_paginate_txn( txn: LoggingTransaction, - ) -> Tuple[List[Dict[str, Any]], int]: + ) -> tuple[list[dict[str, Any]], int]: filters = [] - args: List[object] = [] + args: list[object] = [] if user_id: filters.append("er.user_id LIKE ?") @@ -1810,7 +1806,7 @@ class RoomWorkerStore(CacheInvalidationWorkerStore): {} """.format(where_clause) txn.execute(sql, args) - count = cast(Tuple[int], txn.fetchone())[0] + count = cast(tuple[int], txn.fetchone())[0] sql = """ SELECT @@ -2214,7 +2210,7 @@ class RoomBackgroundUpdateStore(RoomWorkerStore): last_room = progress.get("room_id", "") - def _get_rooms(txn: LoggingTransaction) -> List[str]: + def _get_rooms(txn: LoggingTransaction) -> list[str]: txn.execute( """ SELECT room_id @@ -2460,7 +2456,7 @@ class RoomStore(RoomBackgroundUpdateStore, RoomWorkerStore): self._instance_name = hs.get_instance_name() async def upsert_room_on_join( - self, room_id: str, room_version: RoomVersion, state_events: List[EventBase] + self, room_id: str, room_version: RoomVersion, state_events: list[EventBase] ) -> None: """Ensure that the room is stored in the table diff --git a/synapse/storage/databases/main/roommember.py b/synapse/storage/databases/main/roommember.py index 65caf4b1ea..1e22ab4e6d 100644 --- a/synapse/storage/databases/main/roommember.py +++ b/synapse/storage/databases/main/roommember.py @@ -24,15 +24,10 @@ from typing import ( TYPE_CHECKING, AbstractSet, Collection, - Dict, - FrozenSet, Iterable, - List, Mapping, Optional, Sequence, - Set, - Tuple, Union, cast, ) @@ -187,7 +182,7 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore): desc="get_users_in_room", ) - def get_users_in_room_txn(self, txn: LoggingTransaction, room_id: str) -> List[str]: + def get_users_in_room_txn(self, txn: LoggingTransaction, room_id: str) -> list[str]: """Returns a list of users in the room.""" return self.db_pool.simple_select_onecol_txn( @@ -242,7 +237,7 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore): def _get_subset_users_in_room_with_profiles( txn: LoggingTransaction, - ) -> Dict[str, ProfileInfo]: + ) -> dict[str, ProfileInfo]: clause, ids = make_in_list_sql_clause( self.database_engine, "c.state_key", user_ids ) @@ -287,7 +282,7 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore): def _get_users_in_room_with_profiles( txn: LoggingTransaction, - ) -> Dict[str, ProfileInfo]: + ) -> dict[str, ProfileInfo]: sql = """ SELECT state_key, display_name, avatar_url FROM room_memberships as m INNER JOIN current_state_events as c @@ -328,14 +323,14 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore): def _get_room_summary_txn( txn: LoggingTransaction, - ) -> Dict[str, MemberSummary]: + ) -> dict[str, MemberSummary]: # first get counts. # We do this all in one transaction to keep the cache small. # FIXME: get rid of this when we have room_stats counts = self._get_member_counts_txn(txn, room_id) - res: Dict[str, MemberSummary] = {} + res: dict[str, MemberSummary] = {} for membership, count in counts.items(): res.setdefault(membership, MemberSummary([], count)) @@ -392,7 +387,7 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore): def _get_member_counts_txn( self, txn: LoggingTransaction, room_id: str - ) -> Dict[str, int]: + ) -> dict[str, int]: """Get a mapping of number of users by membership""" # Note, rejected events will have a null membership field, so @@ -473,7 +468,7 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore): user_id: str, membership_list: Collection[str], excluded_rooms: StrCollection = (), - ) -> List[RoomsForUser]: + ) -> list[RoomsForUser]: """Get all the rooms for this *local* user where the membership for this user matches one in the membership list. @@ -536,8 +531,8 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore): self, txn: LoggingTransaction, user_id: str, - membership_list: List[str], - ) -> List[RoomsForUser]: + membership_list: list[str], + ) -> list[RoomsForUser]: """Get all the rooms for this *local* user where the membership for this user matches one in the membership list. @@ -603,12 +598,12 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore): async def get_local_users_related_to_room( self, room_id: str - ) -> List[Tuple[str, str]]: + ) -> list[tuple[str, str]]: """ Retrieves a list of the current roommembers who are local to the server and their membership status. """ return cast( - List[Tuple[str, str]], + list[tuple[str, str]], await self.db_pool.simple_select_list( table="local_current_membership", keyvalues={"room_id": room_id}, @@ -660,7 +655,7 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore): async def get_local_current_membership_for_user_in_room( self, user_id: str, room_id: str - ) -> Tuple[Optional[str], Optional[str]]: + ) -> tuple[Optional[str], Optional[str]]: """Retrieve the current local membership state and event ID for a user in a room. Args: @@ -677,7 +672,7 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore): raise SynapseError(HTTPStatus.BAD_REQUEST, message, errcode=Codes.BAD_JSON) results = cast( - Optional[Tuple[str, str]], + Optional[tuple[str, str]], await self.db_pool.simple_select_one( "local_current_membership", {"room_id": room_id, "user_id": user_id}, @@ -693,7 +688,7 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore): async def get_users_server_still_shares_room_with( self, user_ids: Collection[str] - ) -> Set[str]: + ) -> set[str]: """Given a list of users return the set that the server still share a room with. """ @@ -711,7 +706,7 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore): self, txn: LoggingTransaction, user_ids: Collection[str], - ) -> Set[str]: + ) -> set[str]: if not user_ids: return set() @@ -734,7 +729,7 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore): async def get_rooms_user_currently_banned_from( self, user_id: str - ) -> FrozenSet[str]: + ) -> frozenset[str]: """Returns a set of room_ids the user is currently banned from. If a remote user only returns rooms this server is currently @@ -754,7 +749,7 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore): return frozenset(room_ids) @cached(max_entries=500000, iterable=True) - async def get_rooms_for_user(self, user_id: str) -> FrozenSet[str]: + async def get_rooms_for_user(self, user_id: str) -> frozenset[str]: """Returns a set of room_ids the user is currently joined to. If a remote user only returns rooms this server is currently @@ -780,7 +775,7 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore): ) async def _get_rooms_for_users( self, user_ids: Collection[str] - ) -> Mapping[str, FrozenSet[str]]: + ) -> Mapping[str, frozenset[str]]: """A batched version of `get_rooms_for_user`. Returns: @@ -788,7 +783,7 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore): """ rows = cast( - List[Tuple[str, str]], + list[tuple[str, str]], await self.db_pool.simple_select_many_batch( table="current_state_events", column="state_key", @@ -805,7 +800,7 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore): ), ) - user_rooms: Dict[str, Set[str]] = {user_id: set() for user_id in user_ids} + user_rooms: dict[str, set[str]] = {user_id: set() for user_id in user_ids} for state_key, room_id in rows: user_rooms[state_key].add(room_id) @@ -814,11 +809,11 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore): async def get_rooms_for_users( self, user_ids: Collection[str] - ) -> Dict[str, FrozenSet[str]]: + ) -> dict[str, frozenset[str]]: """A batched wrapper around `_get_rooms_for_users`, to prevent locking other calls to `get_rooms_for_user` for large user lists. """ - all_user_rooms: Dict[str, FrozenSet[str]] = {} + all_user_rooms: dict[str, frozenset[str]] = {} # 250 users is pretty arbitrary but the data can be quite large if users # are in many rooms. @@ -848,7 +843,7 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore): def do_users_share_a_room_txn( txn: LoggingTransaction, user_ids: Collection[str] - ) -> Dict[str, bool]: + ) -> dict[str, bool]: clause, args = make_in_list_sql_clause( self.database_engine, "state_key", user_ids ) @@ -882,7 +877,7 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore): async def do_users_share_a_room( self, user_id: str, other_user_ids: Collection[str] - ) -> Set[str]: + ) -> set[str]: """Return the set of users who share a room with the first users""" user_dict = await self._do_users_share_a_room(user_id, other_user_ids) @@ -911,7 +906,7 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore): def do_users_share_a_room_joined_or_invited_txn( txn: LoggingTransaction, user_ids: Collection[str] - ) -> Dict[str, bool]: + ) -> dict[str, bool]: clause, args = make_in_list_sql_clause( self.database_engine, "state_key", user_ids ) @@ -947,7 +942,7 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore): async def do_users_share_a_room_joined_or_invited( self, user_id: str, other_user_ids: Collection[str] - ) -> Set[str]: + ) -> set[str]: """Return the set of users who share a room with the first users via being either joined or invited""" user_dict = await self._do_users_share_a_room_joined_or_invited( @@ -956,11 +951,11 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore): return {u for u, share_room in user_dict.items() if share_room} - async def get_users_who_share_room_with_user(self, user_id: str) -> Set[str]: + async def get_users_who_share_room_with_user(self, user_id: str) -> set[str]: """Returns the set of users who share a room with `user_id`""" room_ids = await self.get_rooms_for_user(user_id) - user_who_share_room: Set[str] = set() + user_who_share_room: set[str] = set() for room_id in room_ids: user_ids = await self.get_users_in_room(room_id) user_who_share_room.update(user_ids) @@ -969,8 +964,8 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore): @cached(cache_context=True, iterable=True) async def get_mutual_rooms_between_users( - self, user_ids: FrozenSet[str], cache_context: _CacheContext - ) -> FrozenSet[str]: + self, user_ids: frozenset[str], cache_context: _CacheContext + ) -> frozenset[str]: """ Returns the set of rooms that all users in `user_ids` share. @@ -979,7 +974,7 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore): overlapping joined rooms for. cache_context """ - shared_room_ids: Optional[FrozenSet[str]] = None + shared_room_ids: Optional[frozenset[str]] = None for user_id in user_ids: room_ids = await self.get_rooms_for_user( user_id, on_invalidate=cache_context.invalidate @@ -993,7 +988,7 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore): async def get_joined_user_ids_from_state( self, room_id: str, state: StateMap[str] - ) -> Set[str]: + ) -> set[str]: """ For a given set of state IDs, get a set of user IDs in the room. @@ -1050,7 +1045,7 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore): ) def _get_user_id_from_membership_event_id( self, event_id: str - ) -> Optional[Tuple[str, ProfileInfo]]: + ) -> Optional[tuple[str, ProfileInfo]]: raise NotImplementedError() @cachedList( @@ -1071,7 +1066,7 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore): """ rows = cast( - List[Tuple[str, str]], + list[tuple[str, str]], await self.db_pool.simple_select_many_batch( table="room_memberships", column="event_id", @@ -1148,7 +1143,7 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore): # For PostgreSQL we can use a regex to pull out the domains from the # joined users in `current_state_events` via regex. - def get_current_hosts_in_room_txn(txn: LoggingTransaction) -> Set[str]: + def get_current_hosts_in_room_txn(txn: LoggingTransaction) -> set[str]: sql = """ SELECT DISTINCT substring(state_key FROM '@[^:]*:(.*)$') FROM current_state_events @@ -1165,7 +1160,7 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore): ) @cached(iterable=True, max_entries=10000) - async def get_current_hosts_in_room_ordered(self, room_id: str) -> Tuple[str, ...]: + async def get_current_hosts_in_room_ordered(self, room_id: str) -> tuple[str, ...]: """ Get current hosts in room based on current state. @@ -1201,7 +1196,7 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore): def get_current_hosts_in_room_ordered_txn( txn: LoggingTransaction, - ) -> Tuple[str, ...]: + ) -> tuple[str, ...]: # Returns a list of servers currently joined in the room sorted by # longest in the room first (aka. with the lowest depth). The # heuristic of sorting by servers who have been in the room the @@ -1245,7 +1240,7 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore): """ rows = cast( - List[Tuple[str, Optional[str]]], + list[tuple[str, Optional[str]]], await self.db_pool.simple_select_list( "current_state_events", keyvalues={"room_id": room_id}, @@ -1297,7 +1292,7 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore): The forgotten rooms. """ - def _get_forgotten_rooms_for_user_txn(txn: LoggingTransaction) -> Set[str]: + def _get_forgotten_rooms_for_user_txn(txn: LoggingTransaction) -> set[str]: # This is a slightly convoluted query that first looks up all rooms # that the user has forgotten in the past, then rechecks that list # to see if any have subsequently been updated. This is done so that @@ -1348,7 +1343,7 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore): # If any rows still exist it means someone has not forgotten this room yet return not rows[0][0] - async def get_rooms_user_has_been_in(self, user_id: str) -> Set[str]: + async def get_rooms_user_has_been_in(self, user_id: str) -> set[str]: """Get all rooms that the user has ever been in. Args: @@ -1369,7 +1364,7 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore): async def get_membership_event_ids_for_user( self, user_id: str, room_id: str - ) -> Set[str]: + ) -> set[str]: """Get all event_ids for the given user and room. Args: @@ -1409,7 +1404,7 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore): """ rows = cast( - List[Tuple[str, str, str]], + list[tuple[str, str, str]], await self.db_pool.simple_select_many_batch( table="room_memberships", column="event_id", @@ -1533,7 +1528,7 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore): def _txn( txn: LoggingTransaction, - ) -> Dict[str, RoomsForUserSlidingSync]: + ) -> dict[str, RoomsForUserSlidingSync]: # XXX: If you use any new columns that can change (like from # `sliding_sync_joined_rooms` or `forgotten`), make sure to bust the # `get_sliding_sync_rooms_for_user_from_membership_snapshots` cache in the @@ -1582,7 +1577,7 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore): self, user_id: str, to_token: StreamToken, - ) -> Dict[str, RoomsForUserSlidingSync]: + ) -> dict[str, RoomsForUserSlidingSync]: """ Get all the self-leave rooms for a user after the `to_token` (outside the token range) that are potentially relevant[1] and needed to handle a sliding sync @@ -1614,7 +1609,7 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore): def _txn( txn: LoggingTransaction, - ) -> Dict[str, RoomsForUserSlidingSync]: + ) -> dict[str, RoomsForUserSlidingSync]: sql = """ SELECT m.room_id, m.sender, m.membership, m.membership_event_id, r.room_version, @@ -1641,7 +1636,7 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore): txn.execute(sql, (user_id, min_to_token_position)) # Map from room_id to membership info - room_membership_for_user_map: Dict[str, RoomsForUserSlidingSync] = {} + room_membership_for_user_map: dict[str, RoomsForUserSlidingSync] = {} for row in txn: room_for_user = RoomsForUserSlidingSync( room_id=row[0], @@ -1728,7 +1723,7 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore): async def get_sliding_sync_room_for_user_batch( self, user_id: str, room_ids: StrCollection - ) -> Dict[str, RoomsForUserSlidingSync]: + ) -> dict[str, RoomsForUserSlidingSync]: """Get the sliding sync room entry for the given user and rooms.""" if not room_ids: @@ -1736,7 +1731,7 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore): def get_sliding_sync_room_for_user_batch_txn( txn: LoggingTransaction, - ) -> Dict[str, RoomsForUserSlidingSync]: + ) -> dict[str, RoomsForUserSlidingSync]: clause, args = make_in_list_sql_clause( self.database_engine, "m.room_id", room_ids ) @@ -1779,7 +1774,7 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore): async def get_rooms_for_user_by_date( self, user_id: str, from_ts: int - ) -> FrozenSet[str]: + ) -> frozenset[str]: """ Fetch a list of rooms that the user has joined at or after the given timestamp, including those they subsequently have left/been banned from. @@ -1993,7 +1988,7 @@ class RoomMemberBackgroundUpdateStore(SQLBaseStore): def _background_current_state_membership_txn( txn: LoggingTransaction, last_processed_room: str - ) -> Tuple[int, bool]: + ) -> tuple[int, bool]: processed = 0 while processed < batch_size: txn.execute( @@ -2063,7 +2058,7 @@ class RoomMemberStore( def extract_heroes_from_room_summary( details: Mapping[str, MemberSummary], me: str -) -> List[str]: +) -> list[str]: """Determine the users that represent a room, from the perspective of the `me` user. This function expects `MemberSummary.members` to already be sorted by @@ -2105,7 +2100,7 @@ class _JoinedHostsCache: """The cached data used by the `_get_joined_hosts_cache`.""" # Dict of host to the set of their users in the room at the state group. - hosts_to_joined_users: Dict[str, Set[str]] = attr.Factory(dict) + hosts_to_joined_users: dict[str, set[str]] = attr.Factory(dict) # The state group `hosts_to_joined_users` is derived from. Will be an object # if the instance is newly created or if the state is not based on a state diff --git a/synapse/storage/databases/main/search.py b/synapse/storage/databases/main/search.py index 47dfdf64e5..63489f5c27 100644 --- a/synapse/storage/databases/main/search.py +++ b/synapse/storage/databases/main/search.py @@ -28,10 +28,7 @@ from typing import ( Any, Collection, Iterable, - List, Optional, - Set, - Tuple, Union, cast, ) @@ -362,7 +359,7 @@ class SearchBackgroundUpdateStore(SearchWorkerStore): pg, ) - def reindex_search_txn(txn: LoggingTransaction) -> Tuple[int, bool]: + def reindex_search_txn(txn: LoggingTransaction) -> tuple[int, bool]: sql = """ UPDATE event_search AS es SET stream_ordering = e.stream_ordering, origin_server_ts = e.origin_server_ts @@ -451,7 +448,7 @@ class SearchStore(SearchBackgroundUpdateStore): """ clauses = [] - args: List[Any] = [] + args: list[Any] = [] # Make sure we don't explode because the person is in too many rooms. # We filter the results below regardless. @@ -471,7 +468,7 @@ class SearchStore(SearchBackgroundUpdateStore): count_args = args count_clauses = clauses - sqlite_highlights: List[str] = [] + sqlite_highlights: list[str] = [] if isinstance(self.database_engine, PostgresEngine): search_query = search_term @@ -519,7 +516,7 @@ class SearchStore(SearchBackgroundUpdateStore): # List of tuples of (rank, room_id, event_id). results = cast( - List[Tuple[Union[int, float], str, str]], + list[tuple[Union[int, float], str, str]], await self.db_pool.execute("search_msgs", sql, *args), ) @@ -544,7 +541,7 @@ class SearchStore(SearchBackgroundUpdateStore): # List of tuples of (room_id, count). count_results = cast( - List[Tuple[str, int]], + list[tuple[str, int]], await self.db_pool.execute("search_rooms_count", count_sql, *count_args), ) @@ -580,7 +577,7 @@ class SearchStore(SearchBackgroundUpdateStore): Each match as a dictionary. """ clauses = [] - args: List[Any] = [] + args: list[Any] = [] # Make sure we don't explode because the person is in too many rooms. # We filter the results below regardless. @@ -602,7 +599,7 @@ class SearchStore(SearchBackgroundUpdateStore): count_args = list(args) count_clauses = list(clauses) - sqlite_highlights: List[str] = [] + sqlite_highlights: list[str] = [] if pagination_token: try: @@ -686,7 +683,7 @@ class SearchStore(SearchBackgroundUpdateStore): # List of tuples of (rank, room_id, event_id, origin_server_ts, stream_ordering). results = cast( - List[Tuple[Union[int, float], str, str, int, int]], + list[tuple[Union[int, float], str, str, int, int]], await self.db_pool.execute("search_rooms", sql, *args), ) @@ -711,7 +708,7 @@ class SearchStore(SearchBackgroundUpdateStore): # List of tuples of (room_id, count). count_results = cast( - List[Tuple[str, int]], + list[tuple[str, int]], await self.db_pool.execute("search_rooms_count", count_sql, *count_args), ) @@ -732,8 +729,8 @@ class SearchStore(SearchBackgroundUpdateStore): } async def _find_highlights_in_postgres( - self, search_query: str, events: List[EventBase] - ) -> Set[str]: + self, search_query: str, events: list[EventBase] + ) -> set[str]: """Given a list of events and a search term, return a list of words that match from the content of the event. @@ -748,7 +745,7 @@ class SearchStore(SearchBackgroundUpdateStore): A set of strings. """ - def f(txn: LoggingTransaction) -> Set[str]: + def f(txn: LoggingTransaction) -> set[str]: highlight_words = set() for event in events: # As a hack we simply join values of all possible keys. This is @@ -811,7 +808,7 @@ def _to_postgres_options(options_dict: JsonDict) -> str: @dataclass class Phrase: - phrase: List[str] + phrase: list[str] class SearchToken(enum.Enum): @@ -821,7 +818,7 @@ class SearchToken(enum.Enum): Token = Union[str, Phrase, SearchToken] -TokenList = List[Token] +TokenList = list[Token] def _is_stop_word(word: str) -> bool: @@ -901,7 +898,7 @@ def _tokenize_query(query: str) -> TokenList: return tokens -def _tokens_to_sqlite_match_query(tokens: TokenList) -> Tuple[str, List[str]]: +def _tokens_to_sqlite_match_query(tokens: TokenList) -> tuple[str, list[str]]: """ Convert the list of tokens to a string suitable for passing to sqlite's MATCH. Assume sqlite was compiled with enhanced query syntax. @@ -934,7 +931,7 @@ def _tokens_to_sqlite_match_query(tokens: TokenList) -> Tuple[str, List[str]]: return "".join(match_query), highlights -def _parse_query_for_sqlite(search_term: str) -> Tuple[str, List[str]]: +def _parse_query_for_sqlite(search_term: str) -> tuple[str, list[str]]: """Takes a plain unicode string from the user and converts it into a form that can be passed to sqllite's matchinfo(). diff --git a/synapse/storage/databases/main/signatures.py b/synapse/storage/databases/main/signatures.py index ef86151e31..8072a8c741 100644 --- a/synapse/storage/databases/main/signatures.py +++ b/synapse/storage/databases/main/signatures.py @@ -19,7 +19,7 @@ # # -from typing import Collection, Dict, List, Mapping, Tuple +from typing import Collection, Mapping from unpaddedbase64 import encode_base64 @@ -59,7 +59,7 @@ class SignatureWorkerStore(EventsWorkerStore): allow_rejected=True, ) - hashes: Dict[str, Dict[str, bytes]] = {} + hashes: dict[str, dict[str, bytes]] = {} for event_id in event_ids: event = events.get(event_id) if event is None: @@ -72,7 +72,7 @@ class SignatureWorkerStore(EventsWorkerStore): async def add_event_hashes( self, event_ids: Collection[str] - ) -> List[Tuple[str, Dict[str, str]]]: + ) -> list[tuple[str, dict[str, str]]]: """ Args: diff --git a/synapse/storage/databases/main/sliding_sync.py b/synapse/storage/databases/main/sliding_sync.py index c0c5087b13..62463c0259 100644 --- a/synapse/storage/databases/main/sliding_sync.py +++ b/synapse/storage/databases/main/sliding_sync.py @@ -14,7 +14,7 @@ import logging -from typing import TYPE_CHECKING, Dict, List, Mapping, Optional, Set, cast +from typing import TYPE_CHECKING, Mapping, Optional, cast import attr @@ -222,7 +222,7 @@ class SlidingSyncStore(SQLBaseStore): # with the updates to `required_state` # Dict from required state json -> required state ID - required_state_to_id: Dict[str, int] = {} + required_state_to_id: dict[str, int] = {} if previous_connection_position is not None: rows = self.db_pool.simple_select_list_txn( txn, @@ -233,8 +233,8 @@ class SlidingSyncStore(SQLBaseStore): for required_state_id, required_state in rows: required_state_to_id[required_state] = required_state_id - room_to_state_ids: Dict[str, int] = {} - unique_required_state: Dict[str, List[str]] = {} + room_to_state_ids: dict[str, int] = {} + unique_required_state: dict[str, list[str]] = {} for room_id, room_state in per_connection_state.room_configs.items(): serialized_state = json_encoder.encode( # We store the required state as a sorted list of event type / @@ -418,7 +418,7 @@ class SlidingSyncStore(SQLBaseStore): ), ) - required_state_map: Dict[int, Dict[str, Set[str]]] = {} + required_state_map: dict[int, dict[str, set[str]]] = {} for row in rows: state = required_state_map[row[0]] = {} for event_type, state_key in db_to_json(row[1]): @@ -437,7 +437,7 @@ class SlidingSyncStore(SQLBaseStore): ), ) - room_configs: Dict[str, RoomSyncConfig] = {} + room_configs: dict[str, RoomSyncConfig] = {} for ( room_id, timeline_limit, @@ -449,9 +449,9 @@ class SlidingSyncStore(SQLBaseStore): ) # Now look up the per-room stream data. - rooms: Dict[str, HaveSentRoom[str]] = {} - receipts: Dict[str, HaveSentRoom[str]] = {} - account_data: Dict[str, HaveSentRoom[str]] = {} + rooms: dict[str, HaveSentRoom[str]] = {} + receipts: dict[str, HaveSentRoom[str]] = {} + account_data: dict[str, HaveSentRoom[str]] = {} receipt_rows = self.db_pool.simple_select_list_txn( txn, diff --git a/synapse/storage/databases/main/state.py b/synapse/storage/databases/main/state.py index cfcc731f86..c2c1b62d7e 100644 --- a/synapse/storage/databases/main/state.py +++ b/synapse/storage/databases/main/state.py @@ -25,15 +25,10 @@ from typing import ( TYPE_CHECKING, Any, Collection, - Dict, - FrozenSet, Iterable, - List, Mapping, MutableMapping, Optional, - Set, - Tuple, TypeVar, Union, cast, @@ -199,7 +194,7 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): @trace async def get_metadata_for_events( self, event_ids: Collection[str] - ) -> Dict[str, EventMetadata]: + ) -> dict[str, EventMetadata]: """Get some metadata (room_id, type, state_key) for the given events. This method is a faster alternative than fetching the full events from @@ -212,7 +207,7 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): def get_metadata_for_events_txn( txn: LoggingTransaction, batch_ids: Collection[str], - ) -> Dict[str, EventMetadata]: + ) -> dict[str, EventMetadata]: clause, args = make_in_list_sql_clause( self.database_engine, "e.event_id", batch_ids ) @@ -236,7 +231,7 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): for event_id, room_id, event_type, state_key, rejection_reason in txn } - result_map: Dict[str, EventMetadata] = {} + result_map: dict[str, EventMetadata] = {} for batch_ids in batch_iter(event_ids, 1000): result_map.update( await self.db_pool.runInteraction( @@ -329,7 +324,7 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): @cachedList(cached_method_name="get_room_type", list_name="room_ids") async def bulk_get_room_type( - self, room_ids: Set[str] + self, room_ids: set[str] ) -> Mapping[str, Union[Optional[str], Sentinel]]: """ Bulk fetch room types for the given rooms (via current state). @@ -408,7 +403,7 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): @cachedList(cached_method_name="get_room_encryption", list_name="room_ids") async def bulk_get_room_encryption( - self, room_ids: Set[str] + self, room_ids: set[str] ) -> Mapping[str, Union[Optional[str], Sentinel]]: """ Bulk fetch room encryption for the given rooms (via current state). @@ -469,7 +464,7 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): # If we haven't updated `room_stats_state` with the room yet, query the state # directly. This should happen only rarely so we don't mind if we do this in a # loop. - encryption_event_ids: List[str] = [] + encryption_event_ids: list[str] = [] for room_id in room_ids - results.keys(): state_map = await self.get_partial_filtered_current_state_ids( room_id, @@ -541,7 +536,7 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): async def check_if_events_in_current_state( self, event_ids: StrCollection - ) -> FrozenSet[str]: + ) -> frozenset[str]: """Checks and returns which of the given events is part of the current state.""" rows = await self.db_pool.simple_select_many_batch( table="current_state_events", @@ -632,7 +627,7 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): RuntimeError if the state is unknown at any of the given events """ rows = cast( - List[Tuple[str, int]], + list[tuple[str, int]], await self.db_pool.simple_select_many_batch( table="event_to_state_groups", column="event_id", @@ -651,7 +646,7 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): async def get_referenced_state_groups( self, state_groups: Iterable[int] - ) -> Set[int]: + ) -> set[int]: """Check if the state groups are referenced by events. Args: @@ -662,7 +657,7 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): """ rows = cast( - List[Tuple[int]], + list[tuple[int]], await self.db_pool.simple_select_many_batch( table="event_to_state_groups", column="state_group", @@ -803,7 +798,7 @@ class MainStateBackgroundUpdateStore(RoomMemberWorkerStore): def _background_remove_left_rooms_txn( txn: LoggingTransaction, - ) -> Tuple[bool, Set[str]]: + ) -> tuple[bool, set[str]]: # get a batch of room ids to consider sql = """ SELECT DISTINCT room_id FROM current_state_events @@ -884,7 +879,7 @@ class MainStateBackgroundUpdateStore(RoomMemberWorkerStore): # server didn't share a room with the remote user and therefore may # have missed any device updates. rows = cast( - List[Tuple[str]], + list[tuple[str]], self.db_pool.simple_select_many_txn( txn, table="current_state_events", @@ -975,7 +970,7 @@ class StateStore(StateGroupWorkerStore, MainStateBackgroundUpdateStore): @attr.s(auto_attribs=True, slots=True) -class StateMapWrapper(Dict[StateKey, str]): +class StateMapWrapper(dict[StateKey, str]): """A wrapper around a StateMap[str] to ensure that we only query for items that were not filtered out. diff --git a/synapse/storage/databases/main/state_deltas.py b/synapse/storage/databases/main/state_deltas.py index 303b232d7b..3df5c8b6f4 100644 --- a/synapse/storage/databases/main/state_deltas.py +++ b/synapse/storage/databases/main/state_deltas.py @@ -20,7 +20,7 @@ # import logging -from typing import TYPE_CHECKING, List, Optional, Tuple +from typing import TYPE_CHECKING, Optional import attr @@ -79,7 +79,7 @@ class StateDeltasStore(SQLBaseStore): async def get_partial_current_state_deltas( self, prev_stream_id: int, max_stream_id: int - ) -> Tuple[int, List[StateDelta]]: + ) -> tuple[int, list[StateDelta]]: """Fetch a list of room state changes since the given stream id This may be the partial state if we're lazy joining the room. @@ -114,7 +114,7 @@ class StateDeltasStore(SQLBaseStore): def get_current_state_deltas_txn( txn: LoggingTransaction, - ) -> Tuple[int, List[StateDelta]]: + ) -> tuple[int, list[StateDelta]]: # First we calculate the max stream id that will give us less than # N results. # We arbitrarily limit to 100 stream_id entries to ensure we don't @@ -193,7 +193,7 @@ class StateDeltasStore(SQLBaseStore): *, from_token: Optional[RoomStreamToken], to_token: Optional[RoomStreamToken], - ) -> List[StateDelta]: + ) -> list[StateDelta]: """ Get the state deltas between two tokens. @@ -239,7 +239,7 @@ class StateDeltasStore(SQLBaseStore): *, from_token: Optional[RoomStreamToken], to_token: Optional[RoomStreamToken], - ) -> List[StateDelta]: + ) -> list[StateDelta]: """ Get the state deltas between two tokens. @@ -275,7 +275,7 @@ class StateDeltasStore(SQLBaseStore): room_ids: StrCollection, from_token: RoomStreamToken, to_token: RoomStreamToken, - ) -> List[StateDelta]: + ) -> list[StateDelta]: """Get the state deltas between two tokens for the set of rooms.""" room_ids = self._curr_state_delta_stream_cache.get_entities_changed( @@ -287,7 +287,7 @@ class StateDeltasStore(SQLBaseStore): def get_current_state_deltas_for_rooms_txn( txn: LoggingTransaction, room_ids: StrCollection, - ) -> List[StateDelta]: + ) -> list[StateDelta]: clause, args = make_in_list_sql_clause( self.database_engine, "room_id", room_ids ) diff --git a/synapse/storage/databases/main/stats.py b/synapse/storage/databases/main/stats.py index 74830b7129..19e525a3cd 100644 --- a/synapse/storage/databases/main/stats.py +++ b/synapse/storage/databases/main/stats.py @@ -26,11 +26,8 @@ from typing import ( TYPE_CHECKING, Any, Counter, - Dict, Iterable, - List, Optional, - Tuple, Union, cast, ) @@ -154,7 +151,7 @@ class StatsStore(StateDeltasStore): last_user_id = progress.get("last_user_id", "") - def _get_next_batch(txn: LoggingTransaction) -> List[str]: + def _get_next_batch(txn: LoggingTransaction) -> list[str]: sql = """ SELECT DISTINCT name FROM users WHERE name > ? @@ -200,7 +197,7 @@ class StatsStore(StateDeltasStore): last_room_id = progress.get("last_room_id", "") - def _get_next_batch(txn: LoggingTransaction) -> List[str]: + def _get_next_batch(txn: LoggingTransaction) -> list[str]: sql = """ SELECT DISTINCT room_id FROM current_state_events WHERE room_id > ? @@ -245,7 +242,7 @@ class StatsStore(StateDeltasStore): desc="stats_incremental_position", ) - async def update_room_state(self, room_id: str, fields: Dict[str, Any]) -> None: + async def update_room_state(self, room_id: str, fields: dict[str, Any]) -> None: """Update the state of a room. fields can contain the following keys with string values: @@ -320,7 +317,7 @@ class StatsStore(StateDeltasStore): ) async def bulk_update_stats_delta( - self, ts: int, updates: Dict[str, Dict[str, Counter[str]]], stream_id: int + self, ts: int, updates: dict[str, dict[str, Counter[str]]], stream_id: int ) -> None: """Bulk update stats tables for a given stream_id and updates the stats incremental position. @@ -363,9 +360,9 @@ class StatsStore(StateDeltasStore): ts: int, stats_type: str, stats_id: str, - fields: Dict[str, int], + fields: dict[str, int], complete_with_stream_id: int, - absolute_field_overrides: Optional[Dict[str, int]] = None, + absolute_field_overrides: Optional[dict[str, int]] = None, ) -> None: """ Updates the statistics for a subject, with a delta (difference/relative @@ -401,9 +398,9 @@ class StatsStore(StateDeltasStore): ts: int, stats_type: str, stats_id: str, - fields: Dict[str, int], + fields: dict[str, int], complete_with_stream_id: int, - absolute_field_overrides: Optional[Dict[str, int]] = None, + absolute_field_overrides: Optional[dict[str, int]] = None, ) -> None: if absolute_field_overrides is None: absolute_field_overrides = {} @@ -450,9 +447,9 @@ class StatsStore(StateDeltasStore): self, txn: LoggingTransaction, table: str, - keyvalues: Dict[str, Any], - absolutes: Dict[str, Any], - additive_relatives: Dict[str, int], + keyvalues: dict[str, Any], + absolutes: dict[str, Any], + additive_relatives: dict[str, int], ) -> None: """Used to update values in the stats tables. @@ -510,11 +507,11 @@ class StatsStore(StateDeltasStore): def _fetch_current_state_stats( txn: LoggingTransaction, - ) -> Tuple[List[str], Dict[str, int], int, List[str], int]: + ) -> tuple[list[str], dict[str, int], int, list[str], int]: pos = self.get_room_max_stream_ordering() # type: ignore[attr-defined] rows = cast( - List[Tuple[str]], + list[tuple[str]], self.db_pool.simple_select_many_txn( txn, table="current_state_events", @@ -544,7 +541,7 @@ class StatsStore(StateDeltasStore): """, (room_id,), ) - membership_counts = dict(cast(Iterable[Tuple[str, int]], txn)) + membership_counts = dict(cast(Iterable[tuple[str, int]], txn)) txn.execute( """ @@ -554,7 +551,7 @@ class StatsStore(StateDeltasStore): (room_id,), ) - current_state_events_count = cast(Tuple[int], txn.fetchone())[0] + current_state_events_count = cast(tuple[int], txn.fetchone())[0] users_in_room = self.get_users_in_room_txn(txn, room_id) # type: ignore[attr-defined] @@ -588,7 +585,7 @@ class StatsStore(StateDeltasStore): ) return - room_state: Dict[str, Union[None, bool, str]] = { + room_state: dict[str, Union[None, bool, str]] = { "join_rules": None, "history_visibility": None, "encryption": None, @@ -651,7 +648,7 @@ class StatsStore(StateDeltasStore): async def _calculate_and_set_initial_state_for_user(self, user_id: str) -> None: def _calculate_and_set_initial_state_for_user_txn( txn: LoggingTransaction, - ) -> Tuple[int, int]: + ) -> tuple[int, int]: pos = self._get_max_stream_id_in_current_state_deltas_txn(txn) txn.execute( @@ -662,7 +659,7 @@ class StatsStore(StateDeltasStore): """, (user_id,), ) - count = cast(Tuple[int], txn.fetchone())[0] + count = cast(tuple[int], txn.fetchone())[0] return count, pos joined_rooms, pos = await self.db_pool.runInteraction( @@ -688,7 +685,7 @@ class StatsStore(StateDeltasStore): order_by: Optional[str] = UserSortOrder.USER_ID.value, direction: Direction = Direction.FORWARDS, search_term: Optional[str] = None, - ) -> Tuple[List[Tuple[str, Optional[str], int, int]], int]: + ) -> tuple[list[tuple[str, Optional[str], int, int]], int]: """Function to retrieve a paginated list of users and their uploaded local media (size and number). This will return a json list of users and the total number of users matching the filter criteria. @@ -713,7 +710,7 @@ class StatsStore(StateDeltasStore): def get_users_media_usage_paginate_txn( txn: LoggingTransaction, - ) -> Tuple[List[Tuple[str, Optional[str], int, int]], int]: + ) -> tuple[list[tuple[str, Optional[str], int, int]], int]: filters = [] args: list = [] @@ -766,7 +763,7 @@ class StatsStore(StateDeltasStore): sql_base=sql_base, ) txn.execute(sql, args) - count = cast(Tuple[int], txn.fetchone())[0] + count = cast(tuple[int], txn.fetchone())[0] sql = """ SELECT @@ -785,7 +782,7 @@ class StatsStore(StateDeltasStore): args += [limit, start] txn.execute(sql, args) - users = cast(List[Tuple[str, Optional[str], int, int]], txn.fetchall()) + users = cast(list[tuple[str, Optional[str], int, int]], txn.fetchall()) return users, count diff --git a/synapse/storage/databases/main/stream.py b/synapse/storage/databases/main/stream.py index 66280f2f9a..e8ea1e5480 100644 --- a/synapse/storage/databases/main/stream.py +++ b/synapse/storage/databases/main/stream.py @@ -47,15 +47,11 @@ from typing import ( AbstractSet, Any, Collection, - Dict, Iterable, - List, Literal, Mapping, Optional, Protocol, - Set, - Tuple, cast, overload, ) @@ -109,7 +105,7 @@ class PaginateFunction(Protocol): to_key: Optional[RoomStreamToken] = None, direction: Direction = Direction.BACKWARDS, limit: int = 0, - ) -> Tuple[List[EventBase], RoomStreamToken, bool]: ... + ) -> tuple[list[EventBase], RoomStreamToken, bool]: ... # Used as return values for pagination APIs @@ -122,8 +118,8 @@ class _EventDictReturn: @attr.s(slots=True, frozen=True, auto_attribs=True) class _EventsAround: - events_before: List[EventBase] - events_after: List[EventBase] + events_before: list[EventBase] + events_after: list[EventBase] start: RoomStreamToken end: RoomStreamToken @@ -156,9 +152,9 @@ class CurrentStateDeltaMembership: def generate_pagination_where_clause( direction: Direction, - column_names: Tuple[str, str], - from_token: Optional[Tuple[Optional[int], int]], - to_token: Optional[Tuple[Optional[int], int]], + column_names: tuple[str, str], + from_token: Optional[tuple[Optional[int], int]], + to_token: Optional[tuple[Optional[int], int]], engine: BaseDatabaseEngine, ) -> str: """Creates an SQL expression to bound the columns by the pagination @@ -224,8 +220,8 @@ def generate_pagination_bounds( direction: Direction, from_token: Optional[RoomStreamToken], to_token: Optional[RoomStreamToken], -) -> Tuple[ - str, Optional[Tuple[Optional[int], int]], Optional[Tuple[Optional[int], int]] +) -> tuple[ + str, Optional[tuple[Optional[int], int]], Optional[tuple[Optional[int], int]] ]: """ Generate a start and end point for this page of events. @@ -261,7 +257,7 @@ def generate_pagination_bounds( # by fetching all events between the min stream token and the maximum # stream token (as returned by `RoomStreamToken.get_max_stream_pos`) and # then filtering the results. - from_bound: Optional[Tuple[Optional[int], int]] = None + from_bound: Optional[tuple[Optional[int], int]] = None if from_token: if from_token.topological is not None: from_bound = from_token.as_historical_tuple() @@ -276,7 +272,7 @@ def generate_pagination_bounds( from_token.stream, ) - to_bound: Optional[Tuple[Optional[int], int]] = None + to_bound: Optional[tuple[Optional[int], int]] = None if to_token: if to_token.topological is not None: to_bound = to_token.as_historical_tuple() @@ -320,8 +316,8 @@ def generate_next_token( def _make_generic_sql_bound( bound: str, - column_names: Tuple[str, str], - values: Tuple[Optional[int], int], + column_names: tuple[str, str], + values: tuple[Optional[int], int], engine: BaseDatabaseEngine, ) -> str: """Create an SQL expression that bounds the given column names by the @@ -484,7 +480,7 @@ def _filter_results_by_stream( return True -def filter_to_clause(event_filter: Optional[Filter]) -> Tuple[str, List[str]]: +def filter_to_clause(event_filter: Optional[Filter]) -> tuple[str, list[str]]: # NB: This may create SQL clauses that don't optimise well (and we don't # have indices on all possible clauses). E.g. it may create # "room_id == X AND room_id != X", which postgres doesn't optimise. @@ -669,7 +665,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): to_key: Optional[RoomStreamToken] = None, direction: Direction = Direction.BACKWARDS, limit: int = 0, - ) -> Dict[str, Tuple[List[EventBase], RoomStreamToken, bool]]: + ) -> dict[str, tuple[list[EventBase], RoomStreamToken, bool]]: """Get new room events in stream ordering since `from_key`. Args: @@ -730,7 +726,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): def get_rooms_that_changed( self, room_ids: Collection[str], from_key: RoomStreamToken - ) -> Set[str]: + ) -> set[str]: """Given a list of rooms and a token, return rooms where there may have been changes. """ @@ -765,7 +761,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): AND event_stream_ordering > ? """ - results: Set[str] = set() + results: set[str] = set() for batch in batch_iter(room_ids, 1000): clause, args = make_in_list_sql_clause( self.database_engine, "room_id", batch @@ -791,7 +787,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): to_key: Optional[RoomStreamToken] = None, direction: Direction = Direction.BACKWARDS, limit: int = 0, - ) -> Tuple[List[EventBase], RoomStreamToken, bool]: + ) -> tuple[list[EventBase], RoomStreamToken, bool]: """ Paginate events by `stream_ordering` in the room from the `from_key` in the given `direction` to the `to_key` or `limit`. @@ -876,7 +872,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): engine=self.database_engine, ) - def f(txn: LoggingTransaction) -> Tuple[List[_EventDictReturn], bool]: + def f(txn: LoggingTransaction) -> tuple[list[_EventDictReturn], bool]: sql = f""" SELECT event_id, instance_name, stream_ordering FROM events @@ -940,8 +936,8 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): user_id: str, from_key: RoomStreamToken, to_key: RoomStreamToken, - excluded_room_ids: Optional[List[str]] = None, - ) -> List[CurrentStateDeltaMembership]: + excluded_room_ids: Optional[list[str]] = None, + ) -> list[CurrentStateDeltaMembership]: """ Fetch membership events (and the previous event that was replaced by that one) for a given user. @@ -995,13 +991,13 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): if not has_changed: return [] - def f(txn: LoggingTransaction) -> List[CurrentStateDeltaMembership]: + def f(txn: LoggingTransaction) -> list[CurrentStateDeltaMembership]: # To handle tokens with a non-empty instance_map we fetch more # results than necessary and then filter down min_from_id = from_key.stream max_to_id = to_key.get_max_stream_pos() - args: List[Any] = [min_from_id, max_to_id, EventTypes.Member, user_id] + args: list[Any] = [min_from_id, max_to_id, EventTypes.Member, user_id] # TODO: It would be good to assert that the `from_token`/`to_token` is >= # the first row in `current_state_delta_stream` for the rooms we're @@ -1044,7 +1040,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): txn.execute(sql, args) - membership_changes: List[CurrentStateDeltaMembership] = [] + membership_changes: list[CurrentStateDeltaMembership] = [] for ( room_id, event_id, @@ -1136,7 +1132,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): from_key: RoomStreamToken, to_key: RoomStreamToken, excluded_room_ids: Optional[AbstractSet[str]] = None, - ) -> Dict[str, RoomsForUserStateReset]: + ) -> dict[str, RoomsForUserStateReset]: """ Fetch membership events that result in a meaningful membership change for a given user. @@ -1185,7 +1181,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): if excluded_room_ids is not None: room_ids_to_exclude = excluded_room_ids - def f(txn: LoggingTransaction) -> Dict[str, RoomsForUserStateReset]: + def f(txn: LoggingTransaction) -> dict[str, RoomsForUserStateReset]: # To handle tokens with a non-empty instance_map we fetch more # results than necessary and then filter down min_from_id = from_key.stream @@ -1248,7 +1244,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): (user_id, EventTypes.Member, user_id, min_from_id, max_to_id), ) - membership_changes: Dict[str, RoomsForUserStateReset] = {} + membership_changes: dict[str, RoomsForUserStateReset] = {} for ( room_id, membership_event_id, @@ -1332,8 +1328,8 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): user_id: str, from_key: RoomStreamToken, to_key: RoomStreamToken, - excluded_rooms: Optional[List[str]] = None, - ) -> List[EventBase]: + excluded_rooms: Optional[list[str]] = None, + ) -> list[EventBase]: """Fetch membership events for a given user. All such events whose stream ordering `s` lies in the range @@ -1351,13 +1347,13 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): if not has_changed: return [] - def f(txn: LoggingTransaction) -> List[_EventDictReturn]: + def f(txn: LoggingTransaction) -> list[_EventDictReturn]: # To handle tokens with a non-empty instance_map we fetch more # results than necessary and then filter down min_from_id = from_key.stream max_to_id = to_key.get_max_stream_pos() - args: List[Any] = [user_id, min_from_id, max_to_id] + args: list[Any] = [user_id, min_from_id, max_to_id] ignore_room_clause = "" if excluded_rooms is not None and len(excluded_rooms) > 0: @@ -1403,7 +1399,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): async def get_recent_events_for_room( self, room_id: str, limit: int, end_token: RoomStreamToken - ) -> Tuple[List[EventBase], RoomStreamToken]: + ) -> tuple[list[EventBase], RoomStreamToken]: """Get the most recent events in the room in topological ordering. Args: @@ -1428,7 +1424,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): async def get_recent_event_ids_for_room( self, room_id: str, limit: int, end_token: RoomStreamToken - ) -> Tuple[List[_EventDictReturn], RoomStreamToken]: + ) -> tuple[list[_EventDictReturn], RoomStreamToken]: """Get the most recent events in the room in topological ordering. Args: @@ -1459,7 +1455,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): async def get_room_event_before_stream_ordering( self, room_id: str, stream_ordering: int - ) -> Optional[Tuple[int, int, str]]: + ) -> Optional[tuple[int, int, str]]: """Gets details of the first event in a room at or before a stream ordering Args: @@ -1470,7 +1466,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): A tuple of (stream ordering, topological ordering, event_id) """ - def _f(txn: LoggingTransaction) -> Optional[Tuple[int, int, str]]: + def _f(txn: LoggingTransaction) -> Optional[tuple[int, int, str]]: sql = """ SELECT stream_ordering, topological_ordering, event_id FROM events @@ -1483,7 +1479,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): LIMIT 1 """ txn.execute(sql, (room_id, stream_ordering)) - return cast(Optional[Tuple[int, int, str]], txn.fetchone()) + return cast(Optional[tuple[int, int, str]], txn.fetchone()) return await self.db_pool.runInteraction( "get_room_event_before_stream_ordering", _f @@ -1519,7 +1515,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): self, room_id: str, event_types: Optional[StrCollection] = None, - ) -> Optional[Tuple[str, PersistedEventPosition]]: + ) -> Optional[tuple[str, PersistedEventPosition]]: """ Returns the ID and event position of the last event in a room. @@ -1536,9 +1532,9 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): def _get_last_event_pos_in_room_txn( txn: LoggingTransaction, - ) -> Optional[Tuple[str, PersistedEventPosition]]: + ) -> Optional[tuple[str, PersistedEventPosition]]: event_type_clause = "" - event_type_args: List[str] = [] + event_type_args: list[str] = [] if event_types is not None and len(event_types) > 0: event_type_clause, event_type_args = make_in_list_sql_clause( txn.database_engine, "type", event_types @@ -1562,7 +1558,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): [room_id] + event_type_args, ) - row = cast(Optional[Tuple[str, int, str]], txn.fetchone()) + row = cast(Optional[tuple[str, int, str]], txn.fetchone()) if row is not None: event_id, stream_ordering, instance_name = row @@ -1585,7 +1581,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): room_id: str, end_token: RoomStreamToken, event_types: Optional[StrCollection] = None, - ) -> Optional[Tuple[str, PersistedEventPosition]]: + ) -> Optional[tuple[str, PersistedEventPosition]]: """ Returns the ID and event position of the last event in a room at or before a stream ordering. @@ -1602,7 +1598,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): def get_last_event_pos_in_room_before_stream_ordering_txn( txn: LoggingTransaction, - ) -> Optional[Tuple[str, PersistedEventPosition]]: + ) -> Optional[tuple[str, PersistedEventPosition]]: # We're looking for the closest event at or before the token. We need to # handle the fact that the stream token can be a vector clock (with an # `instance_map`) and events can be persisted on different instances @@ -1616,7 +1612,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): max_stream = end_token.get_max_stream_pos() event_type_clause = "" - event_type_args: List[str] = [] + event_type_args: list[str] = [] if event_types is not None and len(event_types) > 0: event_type_clause, event_type_args = make_in_list_sql_clause( txn.database_engine, "type", event_types @@ -1692,7 +1688,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): self, room_ids: StrCollection, end_token: RoomStreamToken, - ) -> Dict[str, int]: + ) -> dict[str, int]: """Bulk fetch the stream position of the latest events in the given rooms """ @@ -1705,8 +1701,8 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): # Check that the stream position for the rooms are from before the # minimum position of the token. If not then we need to fetch more # rows. - results: Dict[str, int] = {} - recheck_rooms: Set[str] = set() + results: dict[str, int] = {} + recheck_rooms: set[str] = set() min_token = end_token.stream for room_id, stream in uncapped_results.items(): if stream is None: @@ -1747,11 +1743,11 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): now_token = self.get_room_max_token() max_pos = now_token.get_max_stream_pos() - results: Dict[str, int] = {} + results: dict[str, int] = {} # First, we check for the rooms in the stream change cache to see if we # can just use the latest position from it. - missing_room_ids: Set[str] = set() + missing_room_ids: set[str] = set() for room_id in room_ids: stream_pos = self._events_stream_cache.get_max_pos_of_last_change(room_id) if stream_pos is not None: @@ -1770,7 +1766,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): def bulk_get_max_event_pos_fallback_txn( txn: LoggingTransaction, batched_room_ids: StrCollection - ) -> Dict[str, int]: + ) -> dict[str, int]: clause, args = make_in_list_sql_clause( self.database_engine, "room_id", batched_room_ids ) @@ -1795,7 +1791,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): # the joins and sub-queries. def bulk_get_max_event_pos_from_sliding_sync_tables_txn( txn: LoggingTransaction, batched_room_ids: StrCollection - ) -> Dict[str, int]: + ) -> dict[str, int]: clause, args = make_in_list_sql_clause( self.database_engine, "room_id", batched_room_ids ) @@ -1808,7 +1804,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): txn.execute(sql, args) return {row[0]: row[1] for row in txn} - recheck_rooms: Set[str] = set() + recheck_rooms: set[str] = set() for batched in batch_iter(room_ids, 1000): if await self.have_finished_sliding_sync_background_jobs(): batch_results = await self.db_pool.runInteraction( @@ -2077,7 +2073,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): from_id: int, current_id: int, limit: int, - ) -> Tuple[int, Dict[str, Optional[int]]]: + ) -> tuple[int, dict[str, Optional[int]]]: """Get all new events Returns all event ids with from_id < stream_ordering <= current_id. @@ -2098,7 +2094,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): def get_all_new_event_ids_stream_txn( txn: LoggingTransaction, - ) -> Tuple[int, Dict[str, Optional[int]]]: + ) -> tuple[int, dict[str, Optional[int]]]: sql = ( "SELECT e.stream_ordering, e.event_id, e.received_ts" " FROM events AS e" @@ -2115,7 +2111,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): if len(rows) == limit: upper_bound = rows[-1][0] - event_to_received_ts: Dict[str, Optional[int]] = { + event_to_received_ts: dict[str, Optional[int]] = { row[1]: row[2] for row in rows } return upper_bound, event_to_received_ts @@ -2194,7 +2190,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): """ txn.execute(sql) min_positions = dict( - cast(Iterable[Tuple[str, int]], txn) + cast(Iterable[tuple[str, int]], txn) ) # Map from type -> min position # Ensure we do actually have some values here @@ -2229,7 +2225,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): direction: Direction = Direction.BACKWARDS, limit: int = 0, event_filter: Optional[Filter] = None, - ) -> Tuple[List[_EventDictReturn], RoomStreamToken, bool]: + ) -> tuple[list[_EventDictReturn], RoomStreamToken, bool]: """Returns list of events before or after a given token. Args: @@ -2269,7 +2265,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): # Token selection matches what we do below if there are no rows return [], to_token if to_token else from_token, False - args: List[Any] = [room_id] + args: list[Any] = [room_id] order, from_bound, to_bound = generate_pagination_bounds( direction, from_token, to_token @@ -2403,7 +2399,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): direction: Direction = Direction.BACKWARDS, limit: int = 0, event_filter: Optional[Filter] = None, - ) -> Tuple[List[EventBase], RoomStreamToken, bool]: + ) -> tuple[list[EventBase], RoomStreamToken, bool]: """ Paginate events by `topological_ordering` (tie-break with `stream_ordering`) in the room from the `from_key` in the given `direction` to the `to_key` or diff --git a/synapse/storage/databases/main/tags.py b/synapse/storage/databases/main/tags.py index 94cf7f4052..0768dd78c0 100644 --- a/synapse/storage/databases/main/tags.py +++ b/synapse/storage/databases/main/tags.py @@ -21,7 +21,7 @@ # import logging -from typing import Any, Dict, Iterable, List, Mapping, Tuple, cast +from typing import Any, Iterable, Mapping, cast from synapse.api.constants import AccountDataTypes from synapse.replication.tcp.streams import AccountDataStream @@ -52,13 +52,13 @@ class TagsWorkerStore(AccountDataWorkerStore): """ rows = cast( - List[Tuple[str, str, str]], + list[tuple[str, str, str]], await self.db_pool.simple_select_list( "room_tags", {"user_id": user_id}, ["room_id", "tag", "content"] ), ) - tags_by_room: Dict[str, Dict[str, JsonDict]] = {} + tags_by_room: dict[str, dict[str, JsonDict]] = {} for room_id, tag, content in rows: room_tags = tags_by_room.setdefault(room_id, {}) room_tags[tag] = db_to_json(content) @@ -66,7 +66,7 @@ class TagsWorkerStore(AccountDataWorkerStore): async def get_all_updated_tags( self, instance_name: str, last_id: int, current_id: int, limit: int - ) -> Tuple[List[Tuple[int, str, str]], int, bool]: + ) -> tuple[list[tuple[int, str, str]], int, bool]: """Get updates for tags replication stream. Args: @@ -93,7 +93,7 @@ class TagsWorkerStore(AccountDataWorkerStore): def get_all_updated_tags_txn( txn: LoggingTransaction, - ) -> List[Tuple[int, str, str]]: + ) -> list[tuple[int, str, str]]: sql = ( "SELECT stream_id, user_id, room_id" " FROM room_tags_revisions as r" @@ -102,7 +102,7 @@ class TagsWorkerStore(AccountDataWorkerStore): ) txn.execute(sql, (last_id, current_id, limit)) # mypy doesn't understand what the query is selecting. - return cast(List[Tuple[int, str, str]], txn.fetchall()) + return cast(list[tuple[int, str, str]], txn.fetchall()) tag_ids = await self.db_pool.runInteraction( "get_all_updated_tags", get_all_updated_tags_txn @@ -131,7 +131,7 @@ class TagsWorkerStore(AccountDataWorkerStore): rooms that changed since the stream_id token. """ - def get_updated_tags_txn(txn: LoggingTransaction) -> List[str]: + def get_updated_tags_txn(txn: LoggingTransaction) -> list[str]: sql = ( "SELECT room_id from room_tags_revisions" " WHERE user_id = ? AND stream_id > ?" @@ -218,7 +218,7 @@ class TagsWorkerStore(AccountDataWorkerStore): A mapping of tags to tag content. """ rows = cast( - List[Tuple[str, str]], + list[tuple[str, str]], await self.db_pool.simple_select_list( table="room_tags", keyvalues={"user_id": user_id, "room_id": room_id}, @@ -338,8 +338,8 @@ class TagsWorkerStore(AccountDataWorkerStore): if stream_name == AccountDataStream.NAME: # Cast is safe because the `AccountDataStream` should only be giving us # `AccountDataStreamRow` - account_data_stream_rows: List[AccountDataStream.AccountDataStreamRow] = ( - cast(List[AccountDataStream.AccountDataStreamRow], rows) + account_data_stream_rows: list[AccountDataStream.AccountDataStreamRow] = ( + cast(list[AccountDataStream.AccountDataStreamRow], rows) ) for row in account_data_stream_rows: diff --git a/synapse/storage/databases/main/task_scheduler.py b/synapse/storage/databases/main/task_scheduler.py index 2d4804fef6..7410507255 100644 --- a/synapse/storage/databases/main/task_scheduler.py +++ b/synapse/storage/databases/main/task_scheduler.py @@ -19,7 +19,7 @@ # # -from typing import TYPE_CHECKING, Any, List, Optional, Tuple, cast +from typing import TYPE_CHECKING, Any, Optional, cast from synapse.storage._base import SQLBaseStore, db_to_json from synapse.storage.database import ( @@ -34,7 +34,7 @@ from synapse.util.json import json_encoder if TYPE_CHECKING: from synapse.server import HomeServer -ScheduledTaskRow = Tuple[str, str, str, int, str, str, str, str] +ScheduledTaskRow = tuple[str, str, str, int, str, str, str, str] class TaskSchedulerWorkerStore(SQLBaseStore): @@ -63,12 +63,12 @@ class TaskSchedulerWorkerStore(SQLBaseStore): async def get_scheduled_tasks( self, *, - actions: Optional[List[str]] = None, + actions: Optional[list[str]] = None, resource_id: Optional[str] = None, - statuses: Optional[List[TaskStatus]] = None, + statuses: Optional[list[TaskStatus]] = None, max_timestamp: Optional[int] = None, limit: Optional[int] = None, - ) -> List[ScheduledTask]: + ) -> list[ScheduledTask]: """Get a list of scheduled tasks from the DB. Args: @@ -82,9 +82,9 @@ class TaskSchedulerWorkerStore(SQLBaseStore): Returns: a list of `ScheduledTask`, ordered by increasing timestamps """ - def get_scheduled_tasks_txn(txn: LoggingTransaction) -> List[ScheduledTaskRow]: - clauses: List[str] = [] - args: List[Any] = [] + def get_scheduled_tasks_txn(txn: LoggingTransaction) -> list[ScheduledTaskRow]: + clauses: list[str] = [] + args: list[Any] = [] if resource_id: clauses.append("resource_id = ?") args.append(resource_id) @@ -115,7 +115,7 @@ class TaskSchedulerWorkerStore(SQLBaseStore): args.append(limit) txn.execute(sql, args) - return cast(List[ScheduledTaskRow], txn.fetchall()) + return cast(list[ScheduledTaskRow], txn.fetchall()) rows = await self.db_pool.runInteraction( "get_scheduled_tasks", get_scheduled_tasks_txn diff --git a/synapse/storage/databases/main/thread_subscriptions.py b/synapse/storage/databases/main/thread_subscriptions.py index 50084887a4..1c02ab1611 100644 --- a/synapse/storage/databases/main/thread_subscriptions.py +++ b/synapse/storage/databases/main/thread_subscriptions.py @@ -14,11 +14,8 @@ import logging from typing import ( TYPE_CHECKING, Any, - FrozenSet, Iterable, - List, Optional, - Tuple, Union, cast, ) @@ -479,7 +476,7 @@ class ThreadSubscriptionsWorkerStore(CacheInvalidationWorkerStore): @cached(max_entries=100) async def get_subscribers_to_thread( self, room_id: str, thread_root_event_id: str - ) -> FrozenSet[str]: + ) -> frozenset[str]: """ Returns: the set of user_ids for local users who are subscribed to the given thread. @@ -510,7 +507,7 @@ class ThreadSubscriptionsWorkerStore(CacheInvalidationWorkerStore): async def get_updated_thread_subscriptions( self, *, from_id: int, to_id: int, limit: int - ) -> List[Tuple[int, str, str, str]]: + ) -> list[tuple[int, str, str, str]]: """Get updates to thread subscriptions between two stream IDs. Args: @@ -524,7 +521,7 @@ class ThreadSubscriptionsWorkerStore(CacheInvalidationWorkerStore): def get_updated_thread_subscriptions_txn( txn: LoggingTransaction, - ) -> List[Tuple[int, str, str, str]]: + ) -> list[tuple[int, str, str, str]]: sql = """ SELECT stream_id, user_id, room_id, event_id FROM thread_subscriptions @@ -534,7 +531,7 @@ class ThreadSubscriptionsWorkerStore(CacheInvalidationWorkerStore): """ txn.execute(sql, (from_id, to_id, limit)) - return cast(List[Tuple[int, str, str, str]], txn.fetchall()) + return cast(list[tuple[int, str, str, str]], txn.fetchall()) return await self.db_pool.runInteraction( "get_updated_thread_subscriptions", @@ -543,7 +540,7 @@ class ThreadSubscriptionsWorkerStore(CacheInvalidationWorkerStore): async def get_latest_updated_thread_subscriptions_for_user( self, user_id: str, *, from_id: int, to_id: int, limit: int - ) -> List[Tuple[int, str, str, bool, Optional[bool]]]: + ) -> list[tuple[int, str, str, bool, Optional[bool]]]: """Get the latest updates to thread subscriptions for a specific user. Args: @@ -561,7 +558,7 @@ class ThreadSubscriptionsWorkerStore(CacheInvalidationWorkerStore): def get_updated_thread_subscriptions_for_user_txn( txn: LoggingTransaction, - ) -> List[Tuple[int, str, str, bool, Optional[bool]]]: + ) -> list[tuple[int, str, str, bool, Optional[bool]]]: sql = """ WITH the_updates AS ( SELECT stream_id, room_id, event_id, subscribed, automatic diff --git a/synapse/storage/databases/main/transactions.py b/synapse/storage/databases/main/transactions.py index 41c9483927..e0422f7459 100644 --- a/synapse/storage/databases/main/transactions.py +++ b/synapse/storage/databases/main/transactions.py @@ -21,7 +21,7 @@ import logging from enum import Enum -from typing import TYPE_CHECKING, Iterable, List, Mapping, Optional, Tuple, cast +from typing import TYPE_CHECKING, Iterable, Mapping, Optional, cast import attr from canonicaljson import encode_canonical_json @@ -97,7 +97,7 @@ class TransactionWorkerStore(CacheInvalidationWorkerStore): async def get_received_txn_response( self, transaction_id: str, origin: str - ) -> Optional[Tuple[int, JsonDict]]: + ) -> Optional[tuple[int, JsonDict]]: """For an incoming transaction from a given origin, check if we have already responded to it. If so, return the response code and response body (as a dict). @@ -120,7 +120,7 @@ class TransactionWorkerStore(CacheInvalidationWorkerStore): def _get_received_txn_response( self, txn: LoggingTransaction, transaction_id: str, origin: str - ) -> Optional[Tuple[int, JsonDict]]: + ) -> Optional[tuple[int, JsonDict]]: result = self.db_pool.simple_select_one_txn( txn, table="received_transactions", @@ -215,7 +215,7 @@ class TransactionWorkerStore(CacheInvalidationWorkerStore): self, destinations: StrCollection ) -> Mapping[str, Optional[DestinationRetryTimings]]: rows = cast( - List[Tuple[str, Optional[int], Optional[int], Optional[int]]], + list[tuple[str, Optional[int], Optional[int], Optional[int]]], await self.db_pool.simple_select_many_batch( table="destinations", iterable=destinations, @@ -377,7 +377,7 @@ class TransactionWorkerStore(CacheInvalidationWorkerStore): self, destination: str, last_successful_stream_ordering: int, - ) -> List[str]: + ) -> list[str]: """ Returns at most 50 event IDs and their corresponding stream_orderings that correspond to the oldest events that have not yet been sent to @@ -403,7 +403,7 @@ class TransactionWorkerStore(CacheInvalidationWorkerStore): txn: LoggingTransaction, destination: str, last_successful_stream_ordering: int, - ) -> List[str]: + ) -> list[str]: q = """ SELECT event_id FROM destination_rooms JOIN events USING (stream_ordering) @@ -421,7 +421,7 @@ class TransactionWorkerStore(CacheInvalidationWorkerStore): async def get_catch_up_outstanding_destinations( self, after_destination: Optional[str] - ) -> List[str]: + ) -> list[str]: """ Get a list of destinations we should retry transaction sending to. @@ -450,7 +450,7 @@ class TransactionWorkerStore(CacheInvalidationWorkerStore): @staticmethod def _get_catch_up_outstanding_destinations_txn( txn: LoggingTransaction, now_time_ms: int, after_destination: Optional[str] - ) -> List[str]: + ) -> list[str]: # We're looking for destinations which satisfy either of the following # conditions: # @@ -540,8 +540,8 @@ class TransactionWorkerStore(CacheInvalidationWorkerStore): destination: Optional[str] = None, order_by: str = DestinationSortOrder.DESTINATION.value, direction: Direction = Direction.FORWARDS, - ) -> Tuple[ - List[Tuple[str, Optional[int], Optional[int], Optional[int], Optional[int]]], + ) -> tuple[ + list[tuple[str, Optional[int], Optional[int], Optional[int], Optional[int]]], int, ]: """Function to retrieve a paginated list of destinations. @@ -566,9 +566,9 @@ class TransactionWorkerStore(CacheInvalidationWorkerStore): def get_destinations_paginate_txn( txn: LoggingTransaction, - ) -> Tuple[ - List[ - Tuple[str, Optional[int], Optional[int], Optional[int], Optional[int]] + ) -> tuple[ + list[ + tuple[str, Optional[int], Optional[int], Optional[int], Optional[int]] ], int, ]: @@ -579,7 +579,7 @@ class TransactionWorkerStore(CacheInvalidationWorkerStore): else: order = "ASC" - args: List[object] = [] + args: list[object] = [] where_statement = "" if destination: args.extend(["%" + destination.lower() + "%"]) @@ -588,7 +588,7 @@ class TransactionWorkerStore(CacheInvalidationWorkerStore): sql_base = f"FROM destinations {where_statement} " sql = f"SELECT COUNT(*) as total_destinations {sql_base}" txn.execute(sql, args) - count = cast(Tuple[int], txn.fetchone())[0] + count = cast(tuple[int], txn.fetchone())[0] sql = f""" SELECT destination, retry_last_ts, retry_interval, failure_ts, @@ -599,8 +599,8 @@ class TransactionWorkerStore(CacheInvalidationWorkerStore): """ txn.execute(sql, args + [limit, start]) destinations = cast( - List[ - Tuple[ + list[ + tuple[ str, Optional[int], Optional[int], Optional[int], Optional[int] ] ], @@ -618,7 +618,7 @@ class TransactionWorkerStore(CacheInvalidationWorkerStore): start: int, limit: int, direction: Direction = Direction.FORWARDS, - ) -> Tuple[List[Tuple[str, int]], int]: + ) -> tuple[list[tuple[str, int]], int]: """Function to retrieve a paginated list of destination's rooms. This will return a json list of rooms and the total number of rooms. @@ -636,7 +636,7 @@ class TransactionWorkerStore(CacheInvalidationWorkerStore): def get_destination_rooms_paginate_txn( txn: LoggingTransaction, - ) -> Tuple[List[Tuple[str, int]], int]: + ) -> tuple[list[tuple[str, int]], int]: if direction == Direction.BACKWARDS: order = "DESC" else: @@ -648,10 +648,10 @@ class TransactionWorkerStore(CacheInvalidationWorkerStore): WHERE destination = ? """ txn.execute(sql, [destination]) - count = cast(Tuple[int], txn.fetchone())[0] + count = cast(tuple[int], txn.fetchone())[0] rooms = cast( - List[Tuple[str, int]], + list[tuple[str, int]], self.db_pool.simple_select_list_paginate_txn( txn=txn, table="destination_rooms", diff --git a/synapse/storage/databases/main/ui_auth.py b/synapse/storage/databases/main/ui_auth.py index 569925e39f..69a4431f29 100644 --- a/synapse/storage/databases/main/ui_auth.py +++ b/synapse/storage/databases/main/ui_auth.py @@ -18,7 +18,7 @@ # [This file includes modifications made by New Vector Limited] # # -from typing import Any, Dict, List, Optional, Tuple, Union, cast +from typing import Any, Optional, Union, cast import attr @@ -170,7 +170,7 @@ class UIAuthWorkerStore(SQLBaseStore): async def get_completed_ui_auth_stages( self, session_id: str - ) -> Dict[str, Union[str, bool, JsonDict]]: + ) -> dict[str, Union[str, bool, JsonDict]]: """ Retrieve the completed stages of a UI authentication session. @@ -182,7 +182,7 @@ class UIAuthWorkerStore(SQLBaseStore): """ results = {} rows = cast( - List[Tuple[str, str]], + list[tuple[str, str]], await self.db_pool.simple_select_list( table="ui_auth_sessions_credentials", keyvalues={"session_id": session_id}, @@ -302,14 +302,14 @@ class UIAuthWorkerStore(SQLBaseStore): async def get_user_agents_ips_to_ui_auth_session( self, session_id: str, - ) -> List[Tuple[str, str]]: + ) -> list[tuple[str, str]]: """Get the given user agents / IPs used during the ui auth process Returns: List of user_agent/ip pairs """ return cast( - List[Tuple[str, str]], + list[tuple[str, str]], await self.db_pool.simple_select_list( table="ui_auth_sessions_ips", keyvalues={"session_id": session_id}, @@ -353,7 +353,7 @@ class UIAuthWorkerStore(SQLBaseStore): # If a registration token was used, decrement the pending counter # before deleting the session. rows = cast( - List[Tuple[str]], + list[tuple[str]], self.db_pool.simple_select_many_txn( txn, table="ui_auth_sessions_credentials", @@ -365,7 +365,7 @@ class UIAuthWorkerStore(SQLBaseStore): ) # Get the tokens used and how much pending needs to be decremented by. - token_counts: Dict[str, int] = {} + token_counts: dict[str, int] = {} for r in rows: # If registration was successfully completed, the result of the # registration token stage for that session will be True. @@ -378,7 +378,7 @@ class UIAuthWorkerStore(SQLBaseStore): # Update the `pending` counters. if len(token_counts) > 0: token_rows = cast( - List[Tuple[str, int]], + list[tuple[str, int]], self.db_pool.simple_select_many_txn( txn, table="registration_tokens", diff --git a/synapse/storage/databases/main/user_directory.py b/synapse/storage/databases/main/user_directory.py index 9deb9ab73c..895d7e6148 100644 --- a/synapse/storage/databases/main/user_directory.py +++ b/synapse/storage/databases/main/user_directory.py @@ -26,11 +26,8 @@ from typing import ( TYPE_CHECKING, Collection, Iterable, - List, Optional, Sequence, - Set, - Tuple, TypedDict, cast, ) @@ -209,7 +206,7 @@ class UserDirectoryBackgroundUpdateStore(StateDeltasStore): def _get_next_batch( txn: LoggingTransaction, - ) -> Optional[Sequence[Tuple[str, int]]]: + ) -> Optional[Sequence[tuple[str, int]]]: # Only fetch 250 rooms, so we don't fetch too many at once, even # if those 250 rooms have less than batch_size state events. sql = """ @@ -218,7 +215,7 @@ class UserDirectoryBackgroundUpdateStore(StateDeltasStore): LIMIT 250 """ % (TEMP_TABLE + "_rooms",) txn.execute(sql) - rooms_to_work_on = cast(List[Tuple[str, int]], txn.fetchall()) + rooms_to_work_on = cast(list[tuple[str, int]], txn.fetchall()) if not rooms_to_work_on: return None @@ -369,14 +366,14 @@ class UserDirectoryBackgroundUpdateStore(StateDeltasStore): RETURNING user_id """ txn.execute(sql, (batch_size,)) - user_result = cast(List[Tuple[str]], txn.fetchall()) + user_result = cast(list[tuple[str]], txn.fetchall()) else: sql = "SELECT user_id FROM %s ORDER BY user_id LIMIT %s" % ( TEMP_TABLE + "_users", str(batch_size), ) txn.execute(sql) - user_result = cast(List[Tuple[str]], txn.fetchall()) + user_result = cast(list[tuple[str]], txn.fetchall()) if not user_result: return None @@ -408,7 +405,7 @@ class UserDirectoryBackgroundUpdateStore(StateDeltasStore): # Next fetch their profiles. Note that not all users have profiles. profile_rows = cast( - List[Tuple[str, Optional[str], Optional[str]]], + list[tuple[str, Optional[str], Optional[str]]], self.db_pool.simple_select_many_txn( txn, table="profiles", @@ -514,7 +511,7 @@ class UserDirectoryBackgroundUpdateStore(StateDeltasStore): ] rows = cast( - List[Tuple[str, Optional[str]]], + list[tuple[str, Optional[str]]], self.db_pool.simple_select_many_txn( txn, table="users", @@ -608,7 +605,7 @@ class UserDirectoryBackgroundUpdateStore(StateDeltasStore): async def get_remote_servers_with_profiles_to_refresh( self, now_ts: int, limit: int - ) -> List[str]: + ) -> list[str]: """ Get a list of up to `limit` server names which have users whose locally-cached profiles we believe to be stale @@ -617,7 +614,7 @@ class UserDirectoryBackgroundUpdateStore(StateDeltasStore): def _get_remote_servers_with_refreshable_profiles_txn( txn: LoggingTransaction, - ) -> List[str]: + ) -> list[str]: sql = """ SELECT user_server_name FROM user_directory_stale_remote_users @@ -636,7 +633,7 @@ class UserDirectoryBackgroundUpdateStore(StateDeltasStore): async def get_remote_users_to_refresh_on_server( self, server_name: str, now_ts: int, limit: int - ) -> List[Tuple[str, int, int]]: + ) -> list[tuple[str, int, int]]: """ Get a list of up to `limit` user IDs from the server `server_name` whose locally-cached profiles we believe to be stale @@ -651,7 +648,7 @@ class UserDirectoryBackgroundUpdateStore(StateDeltasStore): def _get_remote_users_to_refresh_on_server_txn( txn: LoggingTransaction, - ) -> List[Tuple[str, int, int]]: + ) -> list[tuple[str, int, int]]: sql = """ SELECT user_id, retry_counter, next_try_at_ts FROM user_directory_stale_remote_users @@ -660,7 +657,7 @@ class UserDirectoryBackgroundUpdateStore(StateDeltasStore): LIMIT ? """ txn.execute(sql, (server_name, now_ts, limit)) - return cast(List[Tuple[str, int, int]], txn.fetchall()) + return cast(list[tuple[str, int, int]], txn.fetchall()) return await self.db_pool.runInteraction( "get_remote_users_to_refresh_on_server", @@ -771,7 +768,7 @@ class UserDirectoryBackgroundUpdateStore(StateDeltasStore): raise Exception("Unrecognized database engine") async def add_users_who_share_private_room( - self, room_id: str, user_id_tuples: Iterable[Tuple[str, str]] + self, room_id: str, user_id_tuples: Iterable[tuple[str, str]] ) -> None: """Insert entries into the users_who_share_private_rooms table. The first user should be a local user. @@ -834,7 +831,7 @@ class UserDirectoryBackgroundUpdateStore(StateDeltasStore): async def _get_user_in_directory( self, user_id: str - ) -> Optional[Tuple[Optional[str], Optional[str]]]: + ) -> Optional[tuple[Optional[str], Optional[str]]]: """ Fetch the user information in the user directory. @@ -843,7 +840,7 @@ class UserDirectoryBackgroundUpdateStore(StateDeltasStore): avatar URL (both of which may be None). """ return cast( - Optional[Tuple[Optional[str], Optional[str]]], + Optional[tuple[Optional[str], Optional[str]]], await self.db_pool.simple_select_one( table="user_directory", keyvalues={"user_id": user_id}, @@ -864,7 +861,7 @@ class UserDirectoryBackgroundUpdateStore(StateDeltasStore): class SearchResult(TypedDict): limited: bool - results: List[UserProfile] + results: list[UserProfile] class UserDirectoryStore(UserDirectoryBackgroundUpdateStore): @@ -911,7 +908,7 @@ class UserDirectoryStore(UserDirectoryBackgroundUpdateStore): "remove_from_user_dir", _remove_from_user_dir_txn ) - async def get_users_in_dir_due_to_room(self, room_id: str) -> Set[str]: + async def get_users_in_dir_due_to_room(self, room_id: str) -> set[str]: """Get all user_ids that are in the room directory because they're in the given room_id """ @@ -965,7 +962,7 @@ class UserDirectoryStore(UserDirectoryBackgroundUpdateStore): "remove_user_who_share_room", _remove_user_who_share_room_txn ) - async def get_user_dir_rooms_user_is_in(self, user_id: str) -> List[str]: + async def get_user_dir_rooms_user_is_in(self, user_id: str) -> list[str]: """ Returns the rooms that a user is in. @@ -1031,7 +1028,7 @@ class UserDirectoryStore(UserDirectoryBackgroundUpdateStore): } """ - join_args: Tuple[str, ...] = (user_id,) + join_args: tuple[str, ...] = (user_id,) if self.hs.config.userdirectory.user_directory_search_all_users: where_clause = "user_id != ?" @@ -1060,7 +1057,7 @@ class UserDirectoryStore(UserDirectoryBackgroundUpdateStore): # We allow manipulating the ranking algorithm by injecting statements # based on config options. additional_ordering_statements = [] - ordering_arguments: Tuple[str, ...] = () + ordering_arguments: tuple[str, ...] = () if isinstance(self.database_engine, PostgresEngine): full_query, exact_query, prefix_query = _parse_query_postgres(search_term) @@ -1166,7 +1163,7 @@ class UserDirectoryStore(UserDirectoryBackgroundUpdateStore): raise Exception("Unrecognized database engine") results = cast( - List[Tuple[str, Optional[str], Optional[str]]], + list[tuple[str, Optional[str], Optional[str]]], await self.db_pool.execute("search_user_dir", sql, *args), ) @@ -1232,7 +1229,7 @@ def _parse_query_sqlite(search_term: str) -> str: return " & ".join("(%s* OR %s)" % (result, result) for result in results) -def _parse_query_postgres(search_term: str) -> Tuple[str, str, str]: +def _parse_query_postgres(search_term: str) -> tuple[str, str, str]: """Takes a plain unicode string from the user and converts it into a form that can be passed to the database. We use this so that we can add prefix matching, which isn't something @@ -1263,7 +1260,7 @@ def _parse_query_postgres(search_term: str) -> Tuple[str, str, str]: return both, exact, prefix -def _parse_words(search_term: str) -> List[str]: +def _parse_words(search_term: str) -> list[str]: """Split the provided search string into a list of its words using ICU. Args: @@ -1275,7 +1272,7 @@ def _parse_words(search_term: str) -> List[str]: return _parse_words_with_icu(search_term) -def _parse_words_with_icu(search_term: str) -> List[str]: +def _parse_words_with_icu(search_term: str) -> list[str]: """Break down the provided search string into its individual words using ICU (International Components for Unicode). @@ -1298,7 +1295,7 @@ def _parse_words_with_icu(search_term: str) -> List[str]: # # In particular, user-71 in postgres gets tokenised to "user, -71", and this # will not match a query for "user, 71". - new_results: List[str] = [] + new_results: list[str] = [] i = 0 while i < len(results): curr = results[i] diff --git a/synapse/storage/databases/main/user_erasure_store.py b/synapse/storage/databases/main/user_erasure_store.py index cceed484c3..f89f11e149 100644 --- a/synapse/storage/databases/main/user_erasure_store.py +++ b/synapse/storage/databases/main/user_erasure_store.py @@ -18,7 +18,7 @@ # # -from typing import Iterable, List, Mapping, Tuple, cast +from typing import Iterable, Mapping, cast from synapse.storage.database import LoggingTransaction from synapse.storage.databases.main import CacheInvalidationWorkerStore @@ -57,7 +57,7 @@ class UserErasureWorkerStore(CacheInvalidationWorkerStore): for each user, whether the user has requested erasure. """ rows = cast( - List[Tuple[str]], + list[tuple[str]], await self.db_pool.simple_select_many_batch( table="erased_users", column="user_id", diff --git a/synapse/storage/databases/state/bg_updates.py b/synapse/storage/databases/state/bg_updates.py index ac38b2ab19..a0d8667b07 100644 --- a/synapse/storage/databases/state/bg_updates.py +++ b/synapse/storage/databases/state/bg_updates.py @@ -22,11 +22,8 @@ import logging from typing import ( TYPE_CHECKING, - Dict, - List, Mapping, Optional, - Tuple, Union, ) @@ -106,7 +103,7 @@ class StateGroupBackgroundUpdateStore(SQLBaseStore): def _get_state_groups_from_groups_txn( self, txn: LoggingTransaction, - groups: List[int], + groups: list[int], state_filter: Optional[StateFilter] = None, ) -> Mapping[int, StateMap[str]]: """ @@ -123,7 +120,7 @@ class StateGroupBackgroundUpdateStore(SQLBaseStore): if state_filter is None: state_filter = StateFilter.all() - results: Dict[int, MutableStateMap[str]] = {group: {} for group in groups} + results: dict[int, MutableStateMap[str]] = {group: {} for group in groups} if isinstance(self.database_engine, PostgresEngine): # Temporarily disable sequential scans in this transaction. This is @@ -147,7 +144,7 @@ class StateGroupBackgroundUpdateStore(SQLBaseStore): %s """ - overall_select_query_args: List[Union[int, str]] = [] + overall_select_query_args: list[Union[int, str]] = [] # This is an optimization to create a select clause per-condition. This # makes the query planner a lot smarter on what rows should pull out in the @@ -156,7 +153,7 @@ class StateGroupBackgroundUpdateStore(SQLBaseStore): use_condition_optimization = ( not state_filter.include_others and not state_filter.is_full() ) - state_filter_condition_combos: List[Tuple[str, Optional[str]]] = [] + state_filter_condition_combos: list[tuple[str, Optional[str]]] = [] # We don't need to caclculate this list if we're not using the condition # optimization if use_condition_optimization: @@ -173,7 +170,7 @@ class StateGroupBackgroundUpdateStore(SQLBaseStore): # `filter_events_for_client` which just uses 2 conditions # (`EventTypes.RoomHistoryVisibility` and `EventTypes.Member`). if use_condition_optimization and len(state_filter_condition_combos) < 10: - select_clause_list: List[str] = [] + select_clause_list: list[str] = [] for etype, skey in state_filter_condition_combos: if skey is None: where_clause = "(type = ?)" @@ -216,7 +213,7 @@ class StateGroupBackgroundUpdateStore(SQLBaseStore): """ for group in groups: - args: List[Union[int, str]] = [group] + args: list[Union[int, str]] = [group] args.extend(overall_select_query_args) txn.execute(sql % (overall_select_clause,), args) @@ -347,7 +344,7 @@ class StateBackgroundUpdateStore(StateGroupBackgroundUpdateStore): ) max_group = rows[0][0] - def reindex_txn(txn: LoggingTransaction) -> Tuple[bool, int]: + def reindex_txn(txn: LoggingTransaction) -> tuple[bool, int]: new_last_state_group = last_state_group for count in range(batch_size): txn.execute( diff --git a/synapse/storage/databases/state/deletion.py b/synapse/storage/databases/state/deletion.py index 9b62c1d814..6975690c51 100644 --- a/synapse/storage/databases/state/deletion.py +++ b/synapse/storage/databases/state/deletion.py @@ -21,8 +21,6 @@ from typing import ( Collection, Mapping, Optional, - Set, - Tuple, ) from synapse.events.snapshot import EventPersistencePair @@ -233,7 +231,7 @@ class StateDeletionDataStore: any state groups referenced still exist and that they don't get deleted during this.""" - referenced_state_groups: Set[int] = set() + referenced_state_groups: set[int] = set() for event, ctx in event_and_contexts: if ctx.rejected or event.internal_metadata.is_outlier(): continue @@ -269,7 +267,7 @@ class StateDeletionDataStore: ) def _mark_state_groups_as_persisting_txn( - self, txn: LoggingTransaction, state_groups: Set[int] + self, txn: LoggingTransaction, state_groups: set[int] ) -> None: """Marks the given state groups as being persisted.""" @@ -508,7 +506,7 @@ class StateDeletionDataStore: async def get_next_state_group_collection_to_delete( self, - ) -> Optional[Tuple[str, Mapping[int, int]]]: + ) -> Optional[tuple[str, Mapping[int, int]]]: """Get the next set of state groups to try and delete Returns: @@ -522,7 +520,7 @@ class StateDeletionDataStore: def _get_next_state_group_collection_to_delete_txn( self, txn: LoggingTransaction, - ) -> Optional[Tuple[str, Mapping[int, int]]]: + ) -> Optional[tuple[str, Mapping[int, int]]]: """Implementation of `get_next_state_group_collection_to_delete`""" # We want to return chunks of state groups that were marked for deletion diff --git a/synapse/storage/databases/state/store.py b/synapse/storage/databases/state/store.py index b62f3e6f5b..6f25e7f0bc 100644 --- a/synapse/storage/databases/state/store.py +++ b/synapse/storage/databases/state/store.py @@ -22,13 +22,9 @@ import logging from typing import ( TYPE_CHECKING, - Dict, Iterable, - List, Mapping, Optional, - Set, - Tuple, cast, ) @@ -174,7 +170,7 @@ class StateGroupDataStore(StateBackgroundUpdateStore, SQLBaseStore): return _GetStateGroupDelta(None, None) delta_ids = cast( - List[Tuple[str, str, str]], + list[tuple[str, str, str]], self.db_pool.simple_select_list_txn( txn, table="state_groups_state", @@ -199,8 +195,8 @@ class StateGroupDataStore(StateBackgroundUpdateStore, SQLBaseStore): @tag_args @cancellable async def _get_state_groups_from_groups( - self, groups: List[int], state_filter: StateFilter - ) -> Dict[int, StateMap[str]]: + self, groups: list[int], state_filter: StateFilter + ) -> dict[int, StateMap[str]]: """Returns the state groups for a given set of groups from the database, filtering on types of state events. @@ -211,7 +207,7 @@ class StateGroupDataStore(StateBackgroundUpdateStore, SQLBaseStore): Returns: Dict of state group to state map. """ - results: Dict[int, StateMap[str]] = {} + results: dict[int, StateMap[str]] = {} chunks = [groups[i : i + 100] for i in range(0, len(groups), 100)] for chunk in chunks: @@ -232,7 +228,7 @@ class StateGroupDataStore(StateBackgroundUpdateStore, SQLBaseStore): cache: DictionaryCache[int, StateKey, str], group: int, state_filter: StateFilter, - ) -> Tuple[MutableStateMap[str], bool]: + ) -> tuple[MutableStateMap[str], bool]: """Checks if group is in cache. See `get_state_for_groups` Args: @@ -284,7 +280,7 @@ class StateGroupDataStore(StateBackgroundUpdateStore, SQLBaseStore): @cancellable async def _get_state_for_groups( self, groups: Iterable[int], state_filter: Optional[StateFilter] = None - ) -> Dict[int, MutableStateMap[str]]: + ) -> dict[int, MutableStateMap[str]]: """Gets the state at each of a list of state groups, optionally filtering by type/state_key @@ -355,7 +351,7 @@ class StateGroupDataStore(StateBackgroundUpdateStore, SQLBaseStore): groups: Iterable[int], cache: DictionaryCache[int, StateKey, str], state_filter: StateFilter, - ) -> Tuple[Dict[int, MutableStateMap[str]], Set[int]]: + ) -> tuple[dict[int, MutableStateMap[str]], set[int]]: """Gets the state at each of a list of state groups, optionally filtering by type/state_key, querying from a specific cache. @@ -387,7 +383,7 @@ class StateGroupDataStore(StateBackgroundUpdateStore, SQLBaseStore): def _insert_into_cache( self, - group_to_state_dict: Dict[int, StateMap[str]], + group_to_state_dict: dict[int, StateMap[str]], state_filter: StateFilter, cache_seq_num_members: int, cache_seq_num_non_members: int, @@ -452,10 +448,10 @@ class StateGroupDataStore(StateBackgroundUpdateStore, SQLBaseStore): @tag_args async def store_state_deltas_for_batched( self, - events_and_context: List[Tuple[EventBase, UnpersistedEventContextBase]], + events_and_context: list[tuple[EventBase, UnpersistedEventContextBase]], room_id: str, prev_group: int, - ) -> List[Tuple[EventBase, UnpersistedEventContext]]: + ) -> list[tuple[EventBase, UnpersistedEventContext]]: """Generate and store state deltas for a group of events and contexts created to be batch persisted. Note that all the events must be in a linear chain (ie a <- b <- c). @@ -469,9 +465,9 @@ class StateGroupDataStore(StateBackgroundUpdateStore, SQLBaseStore): def insert_deltas_group_txn( txn: LoggingTransaction, - events_and_context: List[Tuple[EventBase, UnpersistedEventContext]], + events_and_context: list[tuple[EventBase, UnpersistedEventContext]], prev_group: int, - ) -> List[Tuple[EventBase, UnpersistedEventContext]]: + ) -> list[tuple[EventBase, UnpersistedEventContext]]: """Generate and store state groups for the provided events and contexts. Requires that we have the state as a delta from the last persisted state group. @@ -782,7 +778,7 @@ class StateGroupDataStore(StateBackgroundUpdateStore, SQLBaseStore): ) rows = cast( - List[Tuple[int]], + list[tuple[int]], self.db_pool.simple_select_many_txn( txn, table="state_group_edges", @@ -853,7 +849,7 @@ class StateGroupDataStore(StateBackgroundUpdateStore, SQLBaseStore): @tag_args async def get_previous_state_groups( self, state_groups: Iterable[int] - ) -> Dict[int, int]: + ) -> dict[int, int]: """Fetch the previous groups of the given state groups. Args: @@ -864,7 +860,7 @@ class StateGroupDataStore(StateBackgroundUpdateStore, SQLBaseStore): """ rows = cast( - List[Tuple[int, int]], + list[tuple[int, int]], await self.db_pool.simple_select_many_batch( table="state_group_edges", column="state_group", @@ -881,7 +877,7 @@ class StateGroupDataStore(StateBackgroundUpdateStore, SQLBaseStore): @tag_args async def get_next_state_groups( self, state_groups: Iterable[int] - ) -> Dict[int, int]: + ) -> dict[int, int]: """Fetch the groups that have the given state groups as their previous state groups. @@ -893,7 +889,7 @@ class StateGroupDataStore(StateBackgroundUpdateStore, SQLBaseStore): """ rows = cast( - List[Tuple[int, int]], + list[tuple[int, int]], await self.db_pool.simple_select_many_batch( table="state_group_edges", column="prev_state_group", diff --git a/synapse/storage/engines/postgres.py b/synapse/storage/engines/postgres.py index e4cd359201..8a1bbfa0f5 100644 --- a/synapse/storage/engines/postgres.py +++ b/synapse/storage/engines/postgres.py @@ -20,7 +20,7 @@ # import logging -from typing import TYPE_CHECKING, Any, Mapping, NoReturn, Optional, Tuple, cast +from typing import TYPE_CHECKING, Any, Mapping, NoReturn, Optional, cast import psycopg2.extensions @@ -79,11 +79,11 @@ class PostgresEngine( def single_threaded(self) -> bool: return False - def get_db_locale(self, txn: Cursor) -> Tuple[str, str]: + def get_db_locale(self, txn: Cursor) -> tuple[str, str]: txn.execute( "SELECT datcollate, datctype FROM pg_database WHERE datname = current_database()" ) - collation, ctype = cast(Tuple[str, str], txn.fetchone()) + collation, ctype = cast(tuple[str, str], txn.fetchone()) return collation, ctype def check_database( diff --git a/synapse/storage/engines/sqlite.py b/synapse/storage/engines/sqlite.py index 9d1795ebe5..ac3dc25bb5 100644 --- a/synapse/storage/engines/sqlite.py +++ b/synapse/storage/engines/sqlite.py @@ -22,7 +22,7 @@ import platform import sqlite3 import struct import threading -from typing import TYPE_CHECKING, Any, List, Mapping, Optional +from typing import TYPE_CHECKING, Any, Mapping, Optional from synapse.storage.engines import BaseDatabaseEngine from synapse.storage.engines._base import AUTO_INCREMENT_PRIMARY_KEYPLACEHOLDER @@ -182,7 +182,7 @@ class Sqlite3Engine(BaseDatabaseEngine[sqlite3.Connection, sqlite3.Cursor]): # Following functions taken from: https://github.com/coleifer/peewee -def _parse_match_info(buf: bytes) -> List[int]: +def _parse_match_info(buf: bytes) -> list[int]: bufsize = len(buf) return [struct.unpack("@I", buf[i : i + 4])[0] for i in range(0, bufsize, 4)] diff --git a/synapse/storage/prepare_database.py b/synapse/storage/prepare_database.py index bf087702ea..d4bd8020e1 100644 --- a/synapse/storage/prepare_database.py +++ b/synapse/storage/prepare_database.py @@ -28,10 +28,8 @@ from typing import ( Counter as CounterType, Generator, Iterable, - List, Optional, TextIO, - Tuple, ) import attr @@ -270,7 +268,7 @@ def _setup_new_database( for database in databases ) - directory_entries: List[_DirectoryListing] = [] + directory_entries: list[_DirectoryListing] = [] for directory in directories: directory_entries.extend( _DirectoryListing(file_name, os.path.join(directory, file_name)) @@ -453,7 +451,7 @@ def _upgrade_existing_database( file_name_counter: CounterType[str] = Counter() # Now find which directories have anything of interest. - directory_entries: List[_DirectoryListing] = [] + directory_entries: list[_DirectoryListing] = [] for directory in directories: logger.debug("Looking for schema deltas in %s", directory) try: @@ -593,7 +591,7 @@ def _apply_module_schema_files( cur: Cursor, database_engine: BaseDatabaseEngine, modname: str, - names_and_streams: Iterable[Tuple[str, TextIO]], + names_and_streams: Iterable[tuple[str, TextIO]], ) -> None: """Apply the module schemas for a single module diff --git a/synapse/storage/roommember.py b/synapse/storage/roommember.py index 9dc6c395e8..35da5351f8 100644 --- a/synapse/storage/roommember.py +++ b/synapse/storage/roommember.py @@ -20,7 +20,7 @@ # import logging -from typing import List, Optional, Tuple +from typing import Optional import attr @@ -84,6 +84,6 @@ class ProfileInfo: class MemberSummary: # A truncated list of (user_id, event_id) tuples for users of a given # membership type, suitable for use in calculating heroes for a room. - members: List[Tuple[str, str]] + members: list[tuple[str, str]] # The total number of users of a given membership type. count: int diff --git a/synapse/storage/schema/main/delta/30/as_users.py b/synapse/storage/schema/main/delta/30/as_users.py index 060217575b..b7e9a11c2f 100644 --- a/synapse/storage/schema/main/delta/30/as_users.py +++ b/synapse/storage/schema/main/delta/30/as_users.py @@ -19,7 +19,7 @@ # # import logging -from typing import Dict, Iterable, List, Tuple, cast +from typing import Iterable, cast from synapse.config.appservice import load_appservices from synapse.config.homeserver import HomeServerConfig @@ -44,7 +44,7 @@ def run_upgrade( config: HomeServerConfig, ) -> None: cur.execute("SELECT name FROM users") - rows = cast(Iterable[Tuple[str]], cur.fetchall()) + rows = cast(Iterable[tuple[str]], cur.fetchall()) config_files = [] try: @@ -54,7 +54,7 @@ def run_upgrade( appservices = load_appservices(config.server.server_name, config_files) - owned: Dict[str, List[str]] = {} + owned: dict[str, list[str]] = {} for row in rows: user_id = row[0] diff --git a/synapse/storage/types.py b/synapse/storage/types.py index 4329d88c9a..fedf10dfc0 100644 --- a/synapse/storage/types.py +++ b/synapse/storage/types.py @@ -23,13 +23,10 @@ from typing import ( Any, Callable, Iterator, - List, Mapping, Optional, Protocol, Sequence, - Tuple, - Type, Union, ) @@ -47,11 +44,11 @@ class Cursor(Protocol): self, sql: str, parameters: Sequence[SQLQueryParameters] ) -> Any: ... - def fetchone(self) -> Optional[Tuple]: ... + def fetchone(self) -> Optional[tuple]: ... - def fetchmany(self, size: Optional[int] = ...) -> List[Tuple]: ... + def fetchmany(self, size: Optional[int] = ...) -> list[tuple]: ... - def fetchall(self) -> List[Tuple]: ... + def fetchall(self) -> list[tuple]: ... @property def description( @@ -66,7 +63,7 @@ class Cursor(Protocol): def rowcount(self) -> int: return 0 - def __iter__(self) -> Iterator[Tuple]: ... + def __iter__(self) -> Iterator[tuple]: ... def close(self) -> None: ... @@ -84,7 +81,7 @@ class Connection(Protocol): def __exit__( self, - exc_type: Optional[Type[BaseException]], + exc_type: Optional[type[BaseException]], exc_value: Optional[BaseException], traceback: Optional[TracebackType], ) -> Optional[bool]: ... @@ -117,20 +114,20 @@ class DBAPI2Module(Protocol): # explain why this is necessary for safety. TL;DR: we shouldn't be able to write # to `x`, only read from it. See also https://github.com/python/mypy/issues/6002 . @property - def Warning(self) -> Type[Exception]: ... + def Warning(self) -> type[Exception]: ... @property - def Error(self) -> Type[Exception]: ... + def Error(self) -> type[Exception]: ... # Errors are divided into `InterfaceError`s (something went wrong in the database # driver) and `DatabaseError`s (something went wrong in the database). These are # both subclasses of `Error`, but we can't currently express this in type # annotations due to https://github.com/python/mypy/issues/8397 @property - def InterfaceError(self) -> Type[Exception]: ... + def InterfaceError(self) -> type[Exception]: ... @property - def DatabaseError(self) -> Type[Exception]: ... + def DatabaseError(self) -> type[Exception]: ... # Everything below is a subclass of `DatabaseError`. @@ -139,7 +136,7 @@ class DBAPI2Module(Protocol): # - An invalid date time was provided. # - A string contained a null code point. @property - def DataError(self) -> Type[Exception]: ... + def DataError(self) -> type[Exception]: ... # Roughly: something went wrong in the database, but it's not within the application # programmer's control. Examples: @@ -150,18 +147,18 @@ class DBAPI2Module(Protocol): # - The database ran out of resources, such as storage, memory, connections, etc. # - The database encountered an error from the operating system. @property - def OperationalError(self) -> Type[Exception]: ... + def OperationalError(self) -> type[Exception]: ... # Roughly: we've given the database data which breaks a rule we asked it to enforce. # Examples: # - Stop, criminal scum! You violated the foreign key constraint # - Also check constraints, non-null constraints, etc. @property - def IntegrityError(self) -> Type[Exception]: ... + def IntegrityError(self) -> type[Exception]: ... # Roughly: something went wrong within the database server itself. @property - def InternalError(self) -> Type[Exception]: ... + def InternalError(self) -> type[Exception]: ... # Roughly: the application did something silly that needs to be fixed. Examples: # - We don't have permissions to do something. @@ -169,11 +166,11 @@ class DBAPI2Module(Protocol): # - We tried to use a reserved name. # - We referred to a column that doesn't exist. @property - def ProgrammingError(self) -> Type[Exception]: ... + def ProgrammingError(self) -> type[Exception]: ... # Roughly: we've tried to do something that this database doesn't support. @property - def NotSupportedError(self) -> Type[Exception]: ... + def NotSupportedError(self) -> type[Exception]: ... # We originally wrote # def connect(self, *args, **kwargs) -> Connection: ... diff --git a/synapse/storage/util/id_generators.py b/synapse/storage/util/id_generators.py index 1b7c5dac7a..5bf5c2b4bf 100644 --- a/synapse/storage/util/id_generators.py +++ b/synapse/storage/util/id_generators.py @@ -28,15 +28,10 @@ from typing import ( TYPE_CHECKING, AsyncContextManager, ContextManager, - Dict, Generic, Iterable, - List, Optional, Sequence, - Set, - Tuple, - Type, TypeVar, Union, cast, @@ -223,9 +218,9 @@ class MultiWriterIdGenerator(AbstractStreamIdGenerator): stream_name: str, server_name: str, instance_name: str, - tables: List[Tuple[str, str, str]], + tables: list[tuple[str, str, str]], sequence_name: str, - writers: List[str], + writers: list[str], positive: bool = True, ) -> None: self._db = db @@ -243,7 +238,7 @@ class MultiWriterIdGenerator(AbstractStreamIdGenerator): # Note: If we are a negative stream then we still store all the IDs as # positive to make life easier for us, and simply negate the IDs when we # return them. - self._current_positions: Dict[str, int] = {} + self._current_positions: dict[str, int] = {} # Set of local IDs that we're still processing. The current position # should be less than the minimum of this set (if not empty). @@ -260,7 +255,7 @@ class MultiWriterIdGenerator(AbstractStreamIdGenerator): # Set of local IDs that we've processed that are larger than the current # position, due to there being smaller unpersisted IDs. - self._finished_ids: Set[int] = set() + self._finished_ids: set[int] = set() # We track the max position where we know everything before has been # persisted. This is done by a) looking at the min across all instances @@ -281,7 +276,7 @@ class MultiWriterIdGenerator(AbstractStreamIdGenerator): self._persisted_upto_position = ( min(self._current_positions.values()) if self._current_positions else 1 ) - self._known_persisted_positions: List[int] = [] + self._known_persisted_positions: list[int] = [] # The maximum stream ID that we have seen been allocated across any writer. # Since this defaults to 1, this means that ID 1 is assumed to have already @@ -348,7 +343,7 @@ class MultiWriterIdGenerator(AbstractStreamIdGenerator): def _load_current_ids( self, db_conn: LoggingDatabaseConnection, - tables: List[Tuple[str, str, str]], + tables: list[tuple[str, str, str]], sequence_name: str, ) -> None: cur = db_conn.cursor(txn_name="_load_current_ids") @@ -439,7 +434,7 @@ class MultiWriterIdGenerator(AbstractStreamIdGenerator): self._persisted_upto_position = min_stream_id - rows: List[Tuple[str, int]] = [] + rows: list[tuple[str, int]] = [] for table, instance_column, id_column in tables: sql = """ SELECT %(instance)s, %(id)s FROM %(table)s @@ -453,13 +448,13 @@ class MultiWriterIdGenerator(AbstractStreamIdGenerator): cur.execute(sql, (min_stream_id * self._return_factor,)) # Cast safety: this corresponds to the types returned by the query above. - rows.extend(cast(Iterable[Tuple[str, int]], cur)) + rows.extend(cast(Iterable[tuple[str, int]], cur)) # Sort by stream_id (ascending, lowest -> highest) so that we handle # rows in order for each instance because we don't want to overwrite # the current_position of an instance to a lower stream ID than # we're actually at. - def sort_by_stream_id_key_func(row: Tuple[str, int]) -> int: + def sort_by_stream_id_key_func(row: tuple[str, int]) -> int: (instance, stream_id) = row # If `stream_id` is ever `None`, we will see a `TypeError: '<' # not supported between instances of 'NoneType' and 'X'` error. @@ -492,7 +487,7 @@ class MultiWriterIdGenerator(AbstractStreamIdGenerator): stream_ids = self._load_next_mult_id_txn(txn, 1) return stream_ids[0] - def _load_next_mult_id_txn(self, txn: Cursor, n: int) -> List[int]: + def _load_next_mult_id_txn(self, txn: Cursor, n: int) -> list[int]: # We need to track that we've requested some more stream IDs, and what # the current max allocated stream ID is. This is to prevent a race # where we've been allocated stream IDs but they have not yet been added @@ -529,7 +524,7 @@ class MultiWriterIdGenerator(AbstractStreamIdGenerator): AsyncContextManager[int], _MultiWriterCtxManager(self, self._notifier) ) - def get_next_mult(self, n: int) -> AsyncContextManager[List[int]]: + def get_next_mult(self, n: int) -> AsyncContextManager[list[int]]: # If we have a list of instances that are allowed to write to this # stream, make sure we're in it. if self._writers and self._instance_name not in self._writers: @@ -537,7 +532,7 @@ class MultiWriterIdGenerator(AbstractStreamIdGenerator): # Cast safety: see get_next. return cast( - AsyncContextManager[List[int]], + AsyncContextManager[list[int]], _MultiWriterCtxManager(self, self._notifier, n), ) @@ -578,7 +573,7 @@ class MultiWriterIdGenerator(AbstractStreamIdGenerator): return self._return_factor * next_id - def get_next_mult_txn(self, txn: LoggingTransaction, n: int) -> List[int]: + def get_next_mult_txn(self, txn: LoggingTransaction, n: int) -> list[int]: """ Usage: @@ -615,7 +610,7 @@ class MultiWriterIdGenerator(AbstractStreamIdGenerator): return [self._return_factor * next_id for next_id in next_ids] - def _mark_ids_as_finished(self, next_ids: List[int]) -> None: + def _mark_ids_as_finished(self, next_ids: list[int]) -> None: """These IDs have finished being processed so we should advance the current position if possible. """ @@ -707,7 +702,7 @@ class MultiWriterIdGenerator(AbstractStreamIdGenerator): self._instance_name, self._persisted_upto_position ) - def get_positions(self) -> Dict[str, int]: + def get_positions(self) -> dict[str, int]: """Get a copy of the current positon map. Note that this won't necessarily include all configured writers if some @@ -849,7 +844,7 @@ class _AsyncCtxManagerWrapper(Generic[T]): async def __aexit__( self, - exc_type: Optional[Type[BaseException]], + exc_type: Optional[type[BaseException]], exc: Optional[BaseException], tb: Optional[TracebackType], ) -> Optional[bool]: @@ -863,9 +858,9 @@ class _MultiWriterCtxManager: id_gen: MultiWriterIdGenerator notifier: "ReplicationNotifier" multiple_ids: Optional[int] = None - stream_ids: List[int] = attr.Factory(list) + stream_ids: list[int] = attr.Factory(list) - async def __aenter__(self) -> Union[int, List[int]]: + async def __aenter__(self) -> Union[int, list[int]]: # It's safe to run this in autocommit mode as fetching values from a # sequence ignores transaction semantics anyway. self.stream_ids = await self.id_gen._db.runInteraction( @@ -882,7 +877,7 @@ class _MultiWriterCtxManager: async def __aexit__( self, - exc_type: Optional[Type[BaseException]], + exc_type: Optional[type[BaseException]], exc: Optional[BaseException], tb: Optional[TracebackType], ) -> bool: diff --git a/synapse/storage/util/partial_state_events_tracker.py b/synapse/storage/util/partial_state_events_tracker.py index f8addf38f6..5078f6367b 100644 --- a/synapse/storage/util/partial_state_events_tracker.py +++ b/synapse/storage/util/partial_state_events_tracker.py @@ -21,7 +21,7 @@ import logging from collections import defaultdict -from typing import Collection, Dict, Set +from typing import Collection from twisted.internet import defer from twisted.internet.defer import Deferred @@ -43,7 +43,7 @@ class PartialStateEventsTracker: self._store = store # a map from event id to a set of Deferreds which are waiting for that event to be # un-partial-stated. - self._observers: Dict[str, Set[Deferred[None]]] = defaultdict(set) + self._observers: dict[str, set[Deferred[None]]] = defaultdict(set) def notify_un_partial_stated(self, event_id: str) -> None: """Notify that we now have full state for a given event @@ -93,7 +93,7 @@ class PartialStateEventsTracker: ) # create an observer for each lazy-joined event - observers: Dict[str, Deferred[None]] = { + observers: dict[str, Deferred[None]] = { event_id: Deferred() for event_id in partial_state_event_ids } for event_id, observer in observers.items(): @@ -140,7 +140,7 @@ class PartialCurrentStateTracker: # a map from room id to a set of Deferreds which are waiting for that room to be # un-partial-stated. - self._observers: Dict[str, Set[Deferred[None]]] = defaultdict(set) + self._observers: dict[str, set[Deferred[None]]] = defaultdict(set) def notify_un_partial_stated(self, room_id: str) -> None: """Notify that we now have full current state for a given room diff --git a/synapse/storage/util/sequence.py b/synapse/storage/util/sequence.py index cac3eba1a5..e2256aa109 100644 --- a/synapse/storage/util/sequence.py +++ b/synapse/storage/util/sequence.py @@ -21,7 +21,7 @@ import abc import logging import threading -from typing import TYPE_CHECKING, Callable, List, Optional +from typing import TYPE_CHECKING, Callable, Optional from synapse.storage.engines import ( BaseDatabaseEngine, @@ -61,7 +61,7 @@ class SequenceGenerator(metaclass=abc.ABCMeta): ... @abc.abstractmethod - def get_next_mult_txn(self, txn: Cursor, n: int) -> List[int]: + def get_next_mult_txn(self, txn: Cursor, n: int) -> list[int]: """Get the next `n` IDs in the sequence""" ... @@ -105,7 +105,7 @@ class PostgresSequenceGenerator(SequenceGenerator): assert fetch_res is not None return fetch_res[0] - def get_next_mult_txn(self, txn: Cursor, n: int) -> List[int]: + def get_next_mult_txn(self, txn: Cursor, n: int) -> list[int]: txn.execute( "SELECT nextval(?) FROM generate_series(1, ?)", (self._sequence_name, n) ) @@ -241,7 +241,7 @@ class LocalSequenceGenerator(SequenceGenerator): self._current_max_id += 1 return self._current_max_id - def get_next_mult_txn(self, txn: Cursor, n: int) -> List[int]: + def get_next_mult_txn(self, txn: Cursor, n: int) -> list[int]: with self._lock: if self._current_max_id is None: assert self._callback is not None diff --git a/synapse/streams/__init__.py b/synapse/streams/__init__.py index 67635d7ebe..faf453b8a1 100644 --- a/synapse/streams/__init__.py +++ b/synapse/streams/__init__.py @@ -19,7 +19,7 @@ # # from abc import ABC, abstractmethod -from typing import Generic, List, Optional, Tuple, TypeVar +from typing import Generic, Optional, TypeVar from synapse.types import StrCollection, UserID @@ -39,5 +39,5 @@ class EventSource(ABC, Generic[K, R]): room_ids: StrCollection, is_guest: bool, explicit_room_id: Optional[str] = None, - ) -> Tuple[List[R], K]: + ) -> tuple[list[R], K]: raise NotImplementedError() diff --git a/synapse/streams/events.py b/synapse/streams/events.py index 1e4bebe46d..143f659499 100644 --- a/synapse/streams/events.py +++ b/synapse/streams/events.py @@ -20,7 +20,7 @@ # import logging -from typing import TYPE_CHECKING, Sequence, Tuple +from typing import TYPE_CHECKING, Sequence import attr @@ -52,7 +52,7 @@ class _EventSourcesInner: receipt: ReceiptEventSource account_data: AccountDataEventSource - def get_sources(self) -> Sequence[Tuple[StreamKeyType, EventSource]]: + def get_sources(self) -> Sequence[tuple[StreamKeyType, EventSource]]: return [ (StreamKeyType.ROOM, self.room), (StreamKeyType.PRESENCE, self.presence), diff --git a/synapse/synapse_rust/acl.pyi b/synapse/synapse_rust/acl.pyi index 985994d313..934d0de80a 100644 --- a/synapse/synapse_rust/acl.pyi +++ b/synapse/synapse_rust/acl.pyi @@ -13,10 +13,8 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import List - class ServerAclEvaluator: def __init__( - self, allow_ip_literals: bool, allow: List[str], deny: List[str] + self, allow_ip_literals: bool, allow: list[str], deny: list[str] ) -> None: ... def server_matches_acl_event(self, server_name: str) -> bool: ... diff --git a/synapse/synapse_rust/events.pyi b/synapse/synapse_rust/events.pyi index a82211283b..08c976121a 100644 --- a/synapse/synapse_rust/events.pyi +++ b/synapse/synapse_rust/events.pyi @@ -10,7 +10,7 @@ # See the GNU Affero General Public License for more details: # . -from typing import List, Mapping, Optional, Tuple +from typing import Mapping, Optional from synapse.types import JsonDict @@ -115,7 +115,7 @@ def event_visible_to_server( history_visibility: str, erased_senders: Mapping[str, bool], partial_state_invisible: bool, - memberships: List[Tuple[str, str]], + memberships: list[tuple[str, str]], ) -> bool: """Determine whether the server is allowed to see the unredacted event. diff --git a/synapse/synapse_rust/push.pyi b/synapse/synapse_rust/push.pyi index a3e12ad648..1e135b8c69 100644 --- a/synapse/synapse_rust/push.pyi +++ b/synapse/synapse_rust/push.pyi @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import Any, Collection, Dict, Mapping, Optional, Sequence, Tuple, Union +from typing import Any, Collection, Mapping, Optional, Sequence, Union from synapse.types import JsonDict, JsonValue @@ -43,7 +43,7 @@ class FilteredPushRules: def __init__( self, push_rules: PushRules, - enabled_map: Dict[str, bool], + enabled_map: dict[str, bool], msc1767_enabled: bool, msc3381_polls_enabled: bool, msc3664_enabled: bool, @@ -51,7 +51,7 @@ class FilteredPushRules: msc4210_enabled: bool, msc4306_enabled: bool, ): ... - def rules(self) -> Collection[Tuple[PushRule, bool]]: ... + def rules(self) -> Collection[tuple[PushRule, bool]]: ... def get_base_rule_ids() -> Collection[str]: ... @@ -65,7 +65,7 @@ class PushRuleEvaluator: notification_power_levels: Mapping[str, int], related_events_flattened: Mapping[str, Mapping[str, JsonValue]], related_event_match_enabled: bool, - room_version_feature_flags: Tuple[str, ...], + room_version_feature_flags: tuple[str, ...], msc3931_enabled: bool, msc4210_enabled: bool, msc4306_enabled: bool, diff --git a/synapse/synapse_rust/segmenter.pyi b/synapse/synapse_rust/segmenter.pyi index 5f36765947..19a0a4d83c 100644 --- a/synapse/synapse_rust/segmenter.pyi +++ b/synapse/synapse_rust/segmenter.pyi @@ -1,3 +1 @@ -from typing import List - -def parse_words(text: str) -> List[str]: ... +def parse_words(text: str) -> list[str]: ... diff --git a/synapse/types/__init__.py b/synapse/types/__init__.py index 0386cb77d6..87436459ac 100644 --- a/synapse/types/__init__.py +++ b/synapse/types/__init__.py @@ -29,17 +29,12 @@ from typing import ( AbstractSet, Any, ClassVar, - Dict, - List, Literal, Mapping, Match, MutableMapping, NoReturn, Optional, - Set, - Tuple, - Type, TypedDict, TypeVar, Union, @@ -84,16 +79,16 @@ logger = logging.getLogger(__name__) # Define a state map type from type/state_key to T (usually an event ID or # event) T = TypeVar("T") -StateKey = Tuple[str, str] +StateKey = tuple[str, str] StateMap = Mapping[StateKey, T] MutableStateMap = MutableMapping[StateKey, T] # JSON types. These could be made stronger, but will do for now. # A "simple" (canonical) JSON value. SimpleJsonValue = Optional[Union[str, int, bool]] -JsonValue = Union[List[SimpleJsonValue], Tuple[SimpleJsonValue, ...], SimpleJsonValue] +JsonValue = Union[list[SimpleJsonValue], tuple[SimpleJsonValue, ...], SimpleJsonValue] # A JSON-serialisable dict. -JsonDict = Dict[str, Any] +JsonDict = dict[str, Any] # A JSON-serialisable mapping; roughly speaking an immutable JSONDict. # Useful when you have a TypedDict which isn't going to be mutated and you don't want # to cast to JsonDict everywhere. @@ -106,12 +101,12 @@ JsonSerializable = object # # StrCollection is an unordered collection of strings. If ordering is important, # StrSequence can be used instead. -StrCollection = Union[Tuple[str, ...], List[str], AbstractSet[str]] +StrCollection = Union[tuple[str, ...], list[str], AbstractSet[str]] # Sequence[str] that does not include str itself; str being a Sequence[str] # is very misleading and results in bugs. # # Unlike StrCollection, StrSequence is an ordered collection of strings. -StrSequence = Union[Tuple[str, ...], List[str]] +StrSequence = Union[tuple[str, ...], list[str]] # Note that this seems to require inheriting *directly* from Interface in order @@ -165,13 +160,13 @@ class Requester: user: "UserID" access_token_id: Optional[int] is_guest: bool - scope: Set[str] + scope: set[str] shadow_banned: bool device_id: Optional[str] app_service: Optional["ApplicationService"] authenticated_entity: str - def serialize(self) -> Dict[str, Any]: + def serialize(self) -> dict[str, Any]: """Converts self to a type that can be serialized as JSON, and then deserialized by `deserialize` @@ -191,7 +186,7 @@ class Requester: @staticmethod def deserialize( - store: "ApplicationServiceWorkerStore", input: Dict[str, Any] + store: "ApplicationServiceWorkerStore", input: dict[str, Any] ) -> "Requester": """Converts a dict that was produced by `serialize` back into a Requester. @@ -305,11 +300,11 @@ class DomainSpecificString(metaclass=abc.ABCMeta): def __copy__(self: DS) -> DS: return self - def __deepcopy__(self: DS, memo: Dict[str, object]) -> DS: + def __deepcopy__(self: DS, memo: dict[str, object]) -> DS: return self @classmethod - def from_string(cls: Type[DS], s: str) -> DS: + def from_string(cls: type[DS], s: str) -> DS: """Parse the string given by 's' into a structure object.""" if len(s) < 1 or s[0:1] != cls.SIGIL: raise SynapseError( @@ -337,7 +332,7 @@ class DomainSpecificString(metaclass=abc.ABCMeta): return "%s%s:%s" % (self.SIGIL, self.localpart, self.domain) @classmethod - def is_valid(cls: Type[DS], s: str) -> bool: + def is_valid(cls: type[DS], s: str) -> bool: """Parses the input string and attempts to ensure it is valid.""" # TODO: this does not reject an empty localpart or an overly-long string. # See https://spec.matrix.org/v1.2/appendices/#identifier-grammar @@ -393,7 +388,7 @@ class RoomID: room_id_with_domain: Optional[RoomIdWithDomain] @classmethod - def is_valid(cls: Type["RoomID"], s: str) -> bool: + def is_valid(cls: type["RoomID"], s: str) -> bool: if ":" in s: return RoomIdWithDomain.is_valid(s) try: @@ -415,7 +410,7 @@ class RoomID: __repr__ = to_string @classmethod - def from_string(cls: Type["RoomID"], s: str) -> "RoomID": + def from_string(cls: type["RoomID"], s: str) -> "RoomID": # sigil check if len(s) < 1 or s[0] != cls.SIGIL: raise SynapseError( @@ -829,7 +824,7 @@ class RoomStreamToken(AbstractMultiWriterStreamToken): return super().copy_and_advance(other) - def as_historical_tuple(self) -> Tuple[int, int]: + def as_historical_tuple(self) -> tuple[int, int]: """Returns a tuple of `(topological, stream)` for historical tokens. Raises if not an historical token (i.e. doesn't have a topological part). @@ -1412,7 +1407,7 @@ class ThirdPartyInstanceID: def __copy__(self) -> "ThirdPartyInstanceID": return self - def __deepcopy__(self, memo: Dict[str, object]) -> "ThirdPartyInstanceID": + def __deepcopy__(self, memo: dict[str, object]) -> "ThirdPartyInstanceID": return self @classmethod @@ -1436,7 +1431,7 @@ class ReadReceipt: room_id: str receipt_type: str user_id: str - event_ids: List[str] + event_ids: list[str] thread_id: Optional[str] data: JsonDict @@ -1459,8 +1454,8 @@ class DeviceListUpdates: # The latter happening only once, thus always giving you the same sets # across multiple DeviceListUpdates instances. # Also see: don't define mutable default arguments. - changed: Set[str] = attr.ib(factory=set) - left: Set[str] = attr.ib(factory=set) + changed: set[str] = attr.ib(factory=set) + left: set[str] = attr.ib(factory=set) def __bool__(self) -> bool: return bool(self.changed or self.left) @@ -1468,7 +1463,7 @@ class DeviceListUpdates: def get_verify_key_from_cross_signing_key( key_info: Mapping[str, Any], -) -> Tuple[str, VerifyKey]: +) -> tuple[str, VerifyKey]: """Get the key ID and signedjson verify key from a cross-signing key dict Args: diff --git a/synapse/types/handlers/__init__.py b/synapse/types/handlers/__init__.py index f2fbc1dddf..80651bb685 100644 --- a/synapse/types/handlers/__init__.py +++ b/synapse/types/handlers/__init__.py @@ -19,7 +19,7 @@ # -from typing import List, Optional, TypedDict +from typing import Optional, TypedDict from synapse.api.constants import EventTypes @@ -87,7 +87,7 @@ class ShutdownRoomResponse(TypedDict): new_room_id: A string representing the room ID of the new room. """ - kicked_users: List[str] - failed_to_kick_users: List[str] - local_aliases: List[str] + kicked_users: list[str] + failed_to_kick_users: list[str] + local_aliases: list[str] new_room_id: Optional[str] diff --git a/synapse/types/handlers/sliding_sync.py b/synapse/types/handlers/sliding_sync.py index b7bc565464..aef7db8e98 100644 --- a/synapse/types/handlers/sliding_sync.py +++ b/synapse/types/handlers/sliding_sync.py @@ -21,16 +21,12 @@ from typing import ( AbstractSet, Any, Callable, - Dict, Final, Generic, - List, Mapping, MutableMapping, Optional, Sequence, - Set, - Tuple, TypeVar, cast, ) @@ -178,17 +174,17 @@ class SlidingSyncResult: name: Optional[str] avatar: Optional[str] - heroes: Optional[List[StrippedHero]] + heroes: Optional[list[StrippedHero]] is_dm: bool initial: bool unstable_expanded_timeline: bool # Should be empty for invite/knock rooms with `stripped_state` - required_state: List[EventBase] + required_state: list[EventBase] # Should be empty for invite/knock rooms with `stripped_state` - timeline_events: List[EventBase] - bundled_aggregations: Optional[Dict[str, "BundledAggregations"]] + timeline_events: list[EventBase] + bundled_aggregations: Optional[dict[str, "BundledAggregations"]] # Optional because it's only relevant to invite/knock rooms - stripped_state: List[JsonDict] + stripped_state: list[JsonDict] # Only optional because it won't be included for invite/knock rooms with `stripped_state` prev_batch: Optional[StreamToken] # Only optional because it won't be included for invite/knock rooms with `stripped_state` @@ -240,11 +236,11 @@ class SlidingSyncResult: """ op: OperationType - range: Tuple[int, int] - room_ids: List[str] + range: tuple[int, int] + room_ids: list[str] count: int - ops: List[Operation] + ops: list[Operation] @attr.s(slots=True, frozen=True, auto_attribs=True) class Extensions: @@ -415,7 +411,7 @@ class SlidingSyncResult: next_pos: SlidingSyncStreamToken lists: Mapping[str, SlidingWindowList] - rooms: Dict[str, RoomResult] + rooms: dict[str, RoomResult] extensions: Extensions def __bool__(self) -> bool: @@ -485,7 +481,7 @@ class RoomSyncConfig: Args: room_params: `SlidingSyncConfig.SlidingSyncList` or `SlidingSyncConfig.RoomSubscription` """ - required_state_map: Dict[str, Set[str]] = {} + required_state_map: dict[str, set[str]] = {} for ( state_type, state_key, diff --git a/synapse/types/rest/client/__init__.py b/synapse/types/rest/client/__init__.py index 11d7e59b43..4940fabd12 100644 --- a/synapse/types/rest/client/__init__.py +++ b/synapse/types/rest/client/__init__.py @@ -18,7 +18,7 @@ # [This file includes modifications made by New Vector Limited] # # -from typing import TYPE_CHECKING, Dict, List, Optional, Tuple, Union +from typing import TYPE_CHECKING, Optional, Union from synapse._pydantic_compat import ( Extra, @@ -72,7 +72,7 @@ class ThreepidRequestTokenBody(RequestBodyModel): @validator("id_access_token", always=True) def token_required_for_identity_server( - cls, token: Optional[str], values: Dict[str, object] + cls, token: Optional[str], values: dict[str, object] ) -> Optional[str]: if values.get("id_server") is not None and token is None: raise ValueError("id_access_token is required if an id_server is supplied.") @@ -144,7 +144,7 @@ class SlidingSyncBody(RequestBodyModel): (Max 1000 messages) """ - required_state: List[Tuple[StrictStr, StrictStr]] + required_state: list[tuple[StrictStr, StrictStr]] # mypy workaround via https://github.com/pydantic/pydantic/issues/156#issuecomment-1130883884 if TYPE_CHECKING: timeline_limit: int @@ -242,21 +242,21 @@ class SlidingSyncBody(RequestBodyModel): """ is_dm: Optional[StrictBool] = None - spaces: Optional[List[StrictStr]] = None + spaces: Optional[list[StrictStr]] = None is_encrypted: Optional[StrictBool] = None is_invite: Optional[StrictBool] = None - room_types: Optional[List[Union[StrictStr, None]]] = None - not_room_types: Optional[List[Union[StrictStr, None]]] = None + room_types: Optional[list[Union[StrictStr, None]]] = None + not_room_types: Optional[list[Union[StrictStr, None]]] = None room_name_like: Optional[StrictStr] = None - tags: Optional[List[StrictStr]] = None - not_tags: Optional[List[StrictStr]] = None + tags: Optional[list[StrictStr]] = None + not_tags: Optional[list[StrictStr]] = None # mypy workaround via https://github.com/pydantic/pydantic/issues/156#issuecomment-1130883884 if TYPE_CHECKING: - ranges: Optional[List[Tuple[int, int]]] = None + ranges: Optional[list[tuple[int, int]]] = None else: ranges: Optional[ - List[Tuple[conint(ge=0, strict=True), conint(ge=0, strict=True)]] + list[tuple[conint(ge=0, strict=True), conint(ge=0, strict=True)]] ] = None # type: ignore[valid-type] slow_get_all_rooms: Optional[StrictBool] = False filters: Optional[Filters] = None @@ -327,9 +327,9 @@ class SlidingSyncBody(RequestBodyModel): enabled: Optional[StrictBool] = False # Process all lists defined in the Sliding Window API. (This is the default.) - lists: Optional[List[StrictStr]] = ["*"] + lists: Optional[list[StrictStr]] = ["*"] # Process all room subscriptions defined in the Room Subscription API. (This is the default.) - rooms: Optional[List[StrictStr]] = ["*"] + rooms: Optional[list[StrictStr]] = ["*"] class ReceiptsExtension(RequestBodyModel): """The Receipts extension (MSC3960) @@ -344,9 +344,9 @@ class SlidingSyncBody(RequestBodyModel): enabled: Optional[StrictBool] = False # Process all lists defined in the Sliding Window API. (This is the default.) - lists: Optional[List[StrictStr]] = ["*"] + lists: Optional[list[StrictStr]] = ["*"] # Process all room subscriptions defined in the Room Subscription API. (This is the default.) - rooms: Optional[List[StrictStr]] = ["*"] + rooms: Optional[list[StrictStr]] = ["*"] class TypingExtension(RequestBodyModel): """The Typing Notification extension (MSC3961) @@ -361,9 +361,9 @@ class SlidingSyncBody(RequestBodyModel): enabled: Optional[StrictBool] = False # Process all lists defined in the Sliding Window API. (This is the default.) - lists: Optional[List[StrictStr]] = ["*"] + lists: Optional[list[StrictStr]] = ["*"] # Process all room subscriptions defined in the Room Subscription API. (This is the default.) - rooms: Optional[List[StrictStr]] = ["*"] + rooms: Optional[list[StrictStr]] = ["*"] class ThreadSubscriptionsExtension(RequestBodyModel): """The Thread Subscriptions extension (MSC4308) @@ -389,18 +389,18 @@ class SlidingSyncBody(RequestBodyModel): # mypy workaround via https://github.com/pydantic/pydantic/issues/156#issuecomment-1130883884 if TYPE_CHECKING: - lists: Optional[Dict[str, SlidingSyncList]] = None + lists: Optional[dict[str, SlidingSyncList]] = None else: - lists: Optional[Dict[constr(max_length=64, strict=True), SlidingSyncList]] = ( + lists: Optional[dict[constr(max_length=64, strict=True), SlidingSyncList]] = ( None # type: ignore[valid-type] ) - room_subscriptions: Optional[Dict[StrictStr, RoomSubscription]] = None + room_subscriptions: Optional[dict[StrictStr, RoomSubscription]] = None extensions: Optional[Extensions] = None @validator("lists") def lists_length_check( - cls, value: Optional[Dict[str, SlidingSyncList]] - ) -> Optional[Dict[str, SlidingSyncList]]: + cls, value: Optional[dict[str, SlidingSyncList]] + ) -> Optional[dict[str, SlidingSyncList]]: if value is not None: assert len(value) <= 100, f"Max lists: 100 but saw {len(value)}" return value diff --git a/synapse/types/state.py b/synapse/types/state.py index 6420e050a5..1b4de61d3e 100644 --- a/synapse/types/state.py +++ b/synapse/types/state.py @@ -25,13 +25,9 @@ from typing import ( Any, Callable, Collection, - Dict, Iterable, - List, Mapping, Optional, - Set, - Tuple, TypeVar, ) @@ -42,7 +38,7 @@ from synapse.api.constants import EventTypes from synapse.types import MutableStateMap, StateKey, StateMap if TYPE_CHECKING: - from typing import FrozenSet # noqa: used within quoted type hint; flake8 sad + pass # noqa: used within quoted type hint; flake8 sad logger = logging.getLogger(__name__) @@ -64,7 +60,7 @@ class StateFilter: appear in `types`. """ - types: "immutabledict[str, Optional[FrozenSet[str]]]" + types: "immutabledict[str, Optional[frozenset[str]]]" include_others: bool = False def __attrs_post_init__(self) -> None: @@ -105,7 +101,7 @@ class StateFilter: return _NONE_STATE_FILTER @staticmethod - def from_types(types: Iterable[Tuple[str, Optional[str]]]) -> "StateFilter": + def from_types(types: Iterable[tuple[str, Optional[str]]]) -> "StateFilter": """Creates a filter that only fetches the given types Args: @@ -115,7 +111,7 @@ class StateFilter: Returns: The new state filter. """ - type_dict: Dict[str, Optional[Set[str]]] = {} + type_dict: dict[str, Optional[set[str]]] = {} for typ, s in types: if typ in type_dict: if type_dict[typ] is None: @@ -134,7 +130,7 @@ class StateFilter: ) ) - def to_types(self) -> Iterable[Tuple[str, Optional[str]]]: + def to_types(self) -> Iterable[tuple[str, Optional[str]]]: """The inverse to `from_types`.""" for event_type, state_keys in self.types.items(): if state_keys is None: @@ -167,7 +163,7 @@ class StateFilter: Returns a (frozen) StateFilter with the same contents as the parameters specified here, which can be made of mutable types. """ - types_with_frozen_values: Dict[str, Optional[FrozenSet[str]]] = {} + types_with_frozen_values: dict[str, Optional[frozenset[str]]] = {} for state_types, state_keys in types.items(): if state_keys is not None: types_with_frozen_values[state_types] = frozenset(state_keys) @@ -240,7 +236,7 @@ class StateFilter: # We want to return all non-members return _ALL_NON_MEMBER_STATE_FILTER - def make_sql_filter_clause(self) -> Tuple[str, List[str]]: + def make_sql_filter_clause(self) -> tuple[str, list[str]]: """Converts the filter to an SQL clause. For example: @@ -257,7 +253,7 @@ class StateFilter: """ where_clause = "" - where_args: List[str] = [] + where_args: list[str] = [] if self.is_full(): return where_clause, where_args @@ -353,7 +349,7 @@ class StateFilter: state_keys is None for state_keys in self.types.values() ) - def concrete_types(self) -> List[Tuple[str, str]]: + def concrete_types(self) -> list[tuple[str, str]]: """Returns a list of concrete type/state_keys (i.e. not None) that will be fetched. This will be a complete list if `has_wildcards` returns False, but otherwise will be a subset (or even empty). @@ -368,7 +364,7 @@ class StateFilter: for s in state_keys ] - def wildcard_types(self) -> List[str]: + def wildcard_types(self) -> list[str]: """Returns a list of event types which require us to fetch all state keys. This will be empty unless `has_wildcards` returns True. @@ -377,7 +373,7 @@ class StateFilter: """ return [t for t, state_keys in self.types.items() if state_keys is None] - def get_member_split(self) -> Tuple["StateFilter", "StateFilter"]: + def get_member_split(self) -> tuple["StateFilter", "StateFilter"]: """Return the filter split into two: one which assumes it's exclusively matching against member state, and one which assumes it's matching against non member state. @@ -416,7 +412,7 @@ class StateFilter: def _decompose_into_four_parts( self, - ) -> Tuple[Tuple[bool, Set[str]], Tuple[Set[str], Set[StateKey]]]: + ) -> tuple[tuple[bool, set[str]], tuple[set[str], set[StateKey]]]: """ Decomposes this state filter into 4 constituent parts, which can be thought of as this: @@ -432,18 +428,18 @@ class StateFilter: correspondence. """ is_all = self.include_others - excluded_types: Set[str] = {t for t in self.types if is_all} - wildcard_types: Set[str] = {t for t, s in self.types.items() if s is None} - concrete_keys: Set[StateKey] = set(self.concrete_types()) + excluded_types: set[str] = {t for t in self.types if is_all} + wildcard_types: set[str] = {t for t, s in self.types.items() if s is None} + concrete_keys: set[StateKey] = set(self.concrete_types()) return (is_all, excluded_types), (wildcard_types, concrete_keys) @staticmethod def _recompose_from_four_parts( all_part: bool, - minus_wildcards: Set[str], - plus_wildcards: Set[str], - plus_state_keys: Set[StateKey], + minus_wildcards: set[str], + plus_wildcards: set[str], + plus_state_keys: set[StateKey], ) -> "StateFilter": """ Recomposes a state filter from 4 parts. @@ -454,7 +450,7 @@ class StateFilter: # {state type -> set of state keys OR None for wildcard} # (The same structure as that of a StateFilter.) - new_types: Dict[str, Optional[Set[str]]] = {} + new_types: dict[str, Optional[set[str]]] = {} # if we start with all, insert the excluded statetypes as empty sets # to prevent them from being included diff --git a/synapse/util/__init__.py b/synapse/util/__init__.py index 2ae2e245a9..0d3b7ca740 100644 --- a/synapse/util/__init__.py +++ b/synapse/util/__init__.py @@ -23,12 +23,10 @@ import collections.abc import logging import typing from typing import ( - Dict, Iterator, Mapping, Optional, Sequence, - Set, TypeVar, ) @@ -119,8 +117,8 @@ class MutableOverlayMapping(collections.abc.MutableMapping[K, V]): """ _underlying_map: Mapping[K, V] - _mutable_map: Dict[K, V] = attr.ib(factory=dict) - _deletions: Set[K] = attr.ib(factory=set) + _mutable_map: dict[K, V] = attr.ib(factory=dict) + _deletions: set[K] = attr.ib(factory=set) def __getitem__(self, key: K) -> V: if key in self._deletions: diff --git a/synapse/util/async_helpers.py b/synapse/util/async_helpers.py index 2a167f209c..c568b377d2 100644 --- a/synapse/util/async_helpers.py +++ b/synapse/util/async_helpers.py @@ -35,16 +35,12 @@ from typing import ( Callable, Collection, Coroutine, - Dict, Generator, Generic, Hashable, Iterable, - List, Literal, Optional, - Set, - Tuple, TypeVar, Union, overload, @@ -108,8 +104,8 @@ class ObservableDeferred(Generic[_T], AbstractObservableDeferred[_T]): __slots__ = ["_deferred", "_observers", "_result"] _deferred: "defer.Deferred[_T]" - _observers: Union[List["defer.Deferred[_T]"], Tuple[()]] - _result: Union[None, Tuple[Literal[True], _T], Tuple[Literal[False], Failure]] + _observers: Union[list["defer.Deferred[_T]"], tuple[()]] + _result: Union[None, tuple[Literal[True], _T], tuple[Literal[False], Failure]] def __init__(self, deferred: "defer.Deferred[_T]", consumeErrors: bool = False): object.__setattr__(self, "_deferred", deferred) @@ -268,7 +264,7 @@ async def yieldable_gather_results( iter: Iterable[T], *args: P.args, **kwargs: P.kwargs, -) -> List[R]: +) -> list[R]: """Executes the function with each argument concurrently. Args: @@ -310,7 +306,7 @@ async def yieldable_gather_results_delaying_cancellation( iter: Iterable[T], *args: P.args, **kwargs: P.kwargs, -) -> List[R]: +) -> list[R]: """Executes the function with each argument concurrently. Cancellation is delayed until after all the results have been gathered. @@ -350,49 +346,49 @@ T6 = TypeVar("T6") @overload def gather_results( - deferredList: Tuple[()], consumeErrors: bool = ... -) -> "defer.Deferred[Tuple[()]]": ... + deferredList: tuple[()], consumeErrors: bool = ... +) -> "defer.Deferred[tuple[()]]": ... @overload def gather_results( - deferredList: Tuple["defer.Deferred[T1]"], + deferredList: tuple["defer.Deferred[T1]"], consumeErrors: bool = ..., -) -> "defer.Deferred[Tuple[T1]]": ... +) -> "defer.Deferred[tuple[T1]]": ... @overload def gather_results( - deferredList: Tuple["defer.Deferred[T1]", "defer.Deferred[T2]"], + deferredList: tuple["defer.Deferred[T1]", "defer.Deferred[T2]"], consumeErrors: bool = ..., -) -> "defer.Deferred[Tuple[T1, T2]]": ... +) -> "defer.Deferred[tuple[T1, T2]]": ... @overload def gather_results( - deferredList: Tuple[ + deferredList: tuple[ "defer.Deferred[T1]", "defer.Deferred[T2]", "defer.Deferred[T3]" ], consumeErrors: bool = ..., -) -> "defer.Deferred[Tuple[T1, T2, T3]]": ... +) -> "defer.Deferred[tuple[T1, T2, T3]]": ... @overload def gather_results( - deferredList: Tuple[ + deferredList: tuple[ "defer.Deferred[T1]", "defer.Deferred[T2]", "defer.Deferred[T3]", "defer.Deferred[T4]", ], consumeErrors: bool = ..., -) -> "defer.Deferred[Tuple[T1, T2, T3, T4]]": ... +) -> "defer.Deferred[tuple[T1, T2, T3, T4]]": ... def gather_results( # type: ignore[misc] - deferredList: Tuple["defer.Deferred[T1]", ...], + deferredList: tuple["defer.Deferred[T1]", ...], consumeErrors: bool = False, -) -> "defer.Deferred[Tuple[T1, ...]]": +) -> "defer.Deferred[tuple[T1, ...]]": """Combines a tuple of `Deferred`s into a single `Deferred`. Wraps `defer.gatherResults` to provide type annotations that support heterogenous @@ -406,50 +402,50 @@ def gather_results( # type: ignore[misc] @overload async def gather_optional_coroutines( - *coroutines: Unpack[Tuple[Optional[Coroutine[Any, Any, T1]]]], -) -> Tuple[Optional[T1]]: ... + *coroutines: Unpack[tuple[Optional[Coroutine[Any, Any, T1]]]], +) -> tuple[Optional[T1]]: ... @overload async def gather_optional_coroutines( *coroutines: Unpack[ - Tuple[ + tuple[ Optional[Coroutine[Any, Any, T1]], Optional[Coroutine[Any, Any, T2]], ] ], -) -> Tuple[Optional[T1], Optional[T2]]: ... +) -> tuple[Optional[T1], Optional[T2]]: ... @overload async def gather_optional_coroutines( *coroutines: Unpack[ - Tuple[ + tuple[ Optional[Coroutine[Any, Any, T1]], Optional[Coroutine[Any, Any, T2]], Optional[Coroutine[Any, Any, T3]], ] ], -) -> Tuple[Optional[T1], Optional[T2], Optional[T3]]: ... +) -> tuple[Optional[T1], Optional[T2], Optional[T3]]: ... @overload async def gather_optional_coroutines( *coroutines: Unpack[ - Tuple[ + tuple[ Optional[Coroutine[Any, Any, T1]], Optional[Coroutine[Any, Any, T2]], Optional[Coroutine[Any, Any, T3]], Optional[Coroutine[Any, Any, T4]], ] ], -) -> Tuple[Optional[T1], Optional[T2], Optional[T3], Optional[T4]]: ... +) -> tuple[Optional[T1], Optional[T2], Optional[T3], Optional[T4]]: ... @overload async def gather_optional_coroutines( *coroutines: Unpack[ - Tuple[ + tuple[ Optional[Coroutine[Any, Any, T1]], Optional[Coroutine[Any, Any, T2]], Optional[Coroutine[Any, Any, T3]], @@ -457,13 +453,13 @@ async def gather_optional_coroutines( Optional[Coroutine[Any, Any, T5]], ] ], -) -> Tuple[Optional[T1], Optional[T2], Optional[T3], Optional[T4], Optional[T5]]: ... +) -> tuple[Optional[T1], Optional[T2], Optional[T3], Optional[T4], Optional[T5]]: ... @overload async def gather_optional_coroutines( *coroutines: Unpack[ - Tuple[ + tuple[ Optional[Coroutine[Any, Any, T1]], Optional[Coroutine[Any, Any, T2]], Optional[Coroutine[Any, Any, T3]], @@ -472,14 +468,14 @@ async def gather_optional_coroutines( Optional[Coroutine[Any, Any, T6]], ] ], -) -> Tuple[ +) -> tuple[ Optional[T1], Optional[T2], Optional[T3], Optional[T4], Optional[T5], Optional[T6] ]: ... async def gather_optional_coroutines( - *coroutines: Unpack[Tuple[Optional[Coroutine[Any, Any, T1]], ...]], -) -> Tuple[Optional[T1], ...]: + *coroutines: Unpack[tuple[Optional[Coroutine[Any, Any, T1]], ...]], +) -> tuple[Optional[T1], ...]: """Helper function that allows waiting on multiple coroutines at once. The return value is a tuple of the return values of the coroutines in order. @@ -563,7 +559,7 @@ class Linearizer: self._clock = clock # key_to_defer is a map from the key to a _LinearizerEntry. - self.key_to_defer: Dict[Hashable, _LinearizerEntry] = {} + self.key_to_defer: dict[Hashable, _LinearizerEntry] = {} def is_queued(self, key: Hashable) -> bool: """Checks whether there is a process queued up waiting""" @@ -698,10 +694,10 @@ class ReadWriteLock: def __init__(self) -> None: # Latest readers queued - self.key_to_current_readers: Dict[str, Set[defer.Deferred]] = {} + self.key_to_current_readers: dict[str, set[defer.Deferred]] = {} # Latest writer queued - self.key_to_current_writer: Dict[str, defer.Deferred] = {} + self.key_to_current_writer: dict[str, defer.Deferred] = {} def read(self, key: str) -> AsyncContextManager: @asynccontextmanager @@ -968,7 +964,7 @@ class AwakenableSleeper: """ def __init__(self, clock: Clock) -> None: - self._streams: Dict[str, Set[defer.Deferred[None]]] = {} + self._streams: dict[str, set[defer.Deferred[None]]] = {} self._clock = clock def wake(self, name: str) -> None: diff --git a/synapse/util/batching_queue.py b/synapse/util/batching_queue.py index f77301afd8..514abcbec1 100644 --- a/synapse/util/batching_queue.py +++ b/synapse/util/batching_queue.py @@ -24,12 +24,8 @@ from typing import ( TYPE_CHECKING, Awaitable, Callable, - Dict, Generic, Hashable, - List, - Set, - Tuple, TypeVar, ) @@ -102,7 +98,7 @@ class BatchingQueue(Generic[V, R]): name: str, hs: "HomeServer", clock: Clock, - process_batch_callback: Callable[[List[V]], Awaitable[R]], + process_batch_callback: Callable[[list[V]], Awaitable[R]], ): self._name = name self.hs = hs @@ -110,11 +106,11 @@ class BatchingQueue(Generic[V, R]): self._clock = clock # The set of keys currently being processed. - self._processing_keys: Set[Hashable] = set() + self._processing_keys: set[Hashable] = set() # The currently pending batch of values by key, with a Deferred to call # with the result of the corresponding `_process_batch_callback` call. - self._next_values: Dict[Hashable, List[Tuple[V, defer.Deferred]]] = {} + self._next_values: dict[Hashable, list[tuple[V, defer.Deferred]]] = {} # The function to call with batches of values. self._process_batch_callback = process_batch_callback diff --git a/synapse/util/caches/__init__.py b/synapse/util/caches/__init__.py index 08ff842af0..c799fca550 100644 --- a/synapse/util/caches/__init__.py +++ b/synapse/util/caches/__init__.py @@ -24,7 +24,7 @@ import logging import typing from enum import Enum, auto from sys import intern -from typing import Any, Callable, Dict, List, Optional, Sized, TypeVar +from typing import Any, Callable, Optional, Sized, TypeVar import attr from prometheus_client import REGISTRY @@ -162,7 +162,7 @@ class CacheMetric: if self.memory_usage is not None: self.memory_usage = 0 - def describe(self) -> List[str]: + def describe(self) -> list[str]: return [] def collect(self) -> None: @@ -283,7 +283,7 @@ def intern_string(string: T) -> T: return string -def intern_dict(dictionary: Dict[str, Any]) -> Dict[str, Any]: +def intern_dict(dictionary: dict[str, Any]) -> dict[str, Any]: """Takes a dictionary and interns well known keys and their values""" return { KNOWN_KEYS.get(key, key): _intern_known_values(key, value) diff --git a/synapse/util/caches/deferred_cache.py b/synapse/util/caches/deferred_cache.py index 016acbac71..380f2a78ca 100644 --- a/synapse/util/caches/deferred_cache.py +++ b/synapse/util/caches/deferred_cache.py @@ -26,13 +26,10 @@ import threading from typing import ( Callable, Collection, - Dict, Generic, MutableMapping, Optional, - Set, Sized, - Tuple, TypeVar, Union, cast, @@ -203,7 +200,7 @@ class DeferredCache(Generic[KT, VT]): self, keys: Collection[KT], callback: Optional[Callable[[], None]] = None, - ) -> Tuple[Dict[KT, VT], Optional["defer.Deferred[Dict[KT, VT]]"], Collection[KT]]: + ) -> tuple[dict[KT, VT], Optional["defer.Deferred[dict[KT, VT]]"], Collection[KT]]: """Bulk lookup of items in the cache. Returns: @@ -458,7 +455,7 @@ class CacheEntrySingle(CacheEntry[KT, VT]): def __init__(self, deferred: "defer.Deferred[VT]") -> None: self._deferred = ObservableDeferred(deferred, consumeErrors=True) - self._callbacks: Set[Callable[[], None]] = set() + self._callbacks: set[Callable[[], None]] = set() def deferred(self, key: KT) -> "defer.Deferred[VT]": return self._deferred.observe() @@ -481,9 +478,9 @@ class CacheMultipleEntries(CacheEntry[KT, VT]): __slots__ = ["_deferred", "_callbacks", "_global_callbacks"] def __init__(self) -> None: - self._deferred: Optional[ObservableDeferred[Dict[KT, VT]]] = None - self._callbacks: Dict[KT, Set[Callable[[], None]]] = {} - self._global_callbacks: Set[Callable[[], None]] = set() + self._deferred: Optional[ObservableDeferred[dict[KT, VT]]] = None + self._callbacks: dict[KT, set[Callable[[], None]]] = {} + self._global_callbacks: set[Callable[[], None]] = set() def deferred(self, key: KT) -> "defer.Deferred[VT]": if not self._deferred: @@ -513,7 +510,7 @@ class CacheMultipleEntries(CacheEntry[KT, VT]): def complete_bulk( self, cache: DeferredCache[KT, VT], - result: Dict[KT, VT], + result: dict[KT, VT], ) -> None: """Called when there is a result""" for key, value in result.items(): diff --git a/synapse/util/caches/descriptors.py b/synapse/util/caches/descriptors.py index 6e3c8eada9..7cc83bad37 100644 --- a/synapse/util/caches/descriptors.py +++ b/synapse/util/caches/descriptors.py @@ -26,17 +26,13 @@ from typing import ( Awaitable, Callable, Collection, - Dict, Generic, Hashable, Iterable, - List, Mapping, Optional, Protocol, Sequence, - Tuple, - Type, TypeVar, Union, cast, @@ -57,15 +53,15 @@ from synapse.util.clock import Clock logger = logging.getLogger(__name__) -CacheKey = Union[Tuple, Any] +CacheKey = Union[tuple, Any] F = TypeVar("F", bound=Callable[..., Any]) class CachedFunction(Generic[F]): - invalidate: Callable[[Tuple[Any, ...]], None] + invalidate: Callable[[tuple[Any, ...]], None] invalidate_all: Callable[[], None] - prefill: Callable[[Tuple[Any, ...], Any], None] + prefill: Callable[[tuple[Any, ...], Any], None] cache: Any = None num_args: Any = None @@ -247,7 +243,7 @@ class DeferredCacheDescriptor(_CacheDescriptorBase): self.prune_unread_entries = prune_unread_entries def __get__( - self, obj: Optional[HasServerNameAndClock], owner: Optional[Type] + self, obj: Optional[HasServerNameAndClock], owner: Optional[type] ) -> Callable[..., "defer.Deferred[Any]"]: # We need access to instance-level `obj.server_name` attribute assert obj is not None, ( @@ -332,7 +328,7 @@ class DeferredCacheListDescriptor(_CacheDescriptorBase): def __init__( self, - orig: Callable[..., Awaitable[Dict]], + orig: Callable[..., Awaitable[dict]], cached_method_name: str, list_name: str, num_args: Optional[int] = None, @@ -363,8 +359,8 @@ class DeferredCacheListDescriptor(_CacheDescriptorBase): ) def __get__( - self, obj: Optional[Any], objtype: Optional[Type] = None - ) -> Callable[..., "defer.Deferred[Dict[Hashable, Any]]"]: + self, obj: Optional[Any], objtype: Optional[type] = None + ) -> Callable[..., "defer.Deferred[dict[Hashable, Any]]"]: cached_method = getattr(obj, self.cached_method_name) cache: DeferredCache[CacheKey, Any] = cached_method.cache num_args = cached_method.num_args @@ -376,7 +372,7 @@ class DeferredCacheListDescriptor(_CacheDescriptorBase): ) @functools.wraps(self.orig) - def wrapped(*args: Any, **kwargs: Any) -> "defer.Deferred[Dict]": + def wrapped(*args: Any, **kwargs: Any) -> "defer.Deferred[dict]": # If we're passed a cache_context then we'll want to call its # invalidate() whenever we are invalidated invalidate_callback = kwargs.pop("on_invalidate", None) @@ -412,10 +408,10 @@ class DeferredCacheListDescriptor(_CacheDescriptorBase): results = {cache_key_to_arg(key): v for key, v in immediate_results.items()} - cached_defers: List["defer.Deferred[Any]"] = [] + cached_defers: list["defer.Deferred[Any]"] = [] if pending_deferred: - def update_results(r: Dict) -> None: + def update_results(r: dict) -> None: for k, v in r.items(): results[cache_key_to_arg(k)] = v @@ -425,7 +421,7 @@ class DeferredCacheListDescriptor(_CacheDescriptorBase): if missing: cache_entry = cache.start_bulk_input(missing, invalidate_callback) - def complete_all(res: Dict[Hashable, Any]) -> None: + def complete_all(res: dict[Hashable, Any]) -> None: missing_results = {} for key in missing: arg = cache_key_to_arg(key) @@ -478,7 +474,7 @@ class _CacheContext: Cache = Union[DeferredCache, LruCache] _cache_context_objects: """WeakValueDictionary[ - Tuple["_CacheContext.Cache", CacheKey], "_CacheContext" + tuple["_CacheContext.Cache", CacheKey], "_CacheContext" ]""" = WeakValueDictionary() def __init__(self, cache: "_CacheContext.Cache", cache_key: CacheKey) -> None: diff --git a/synapse/util/caches/dictionary_cache.py b/synapse/util/caches/dictionary_cache.py index eb5493d322..dd6f413e79 100644 --- a/synapse/util/caches/dictionary_cache.py +++ b/synapse/util/caches/dictionary_cache.py @@ -22,13 +22,10 @@ import enum import logging import threading from typing import ( - Dict, Generic, Iterable, Literal, Optional, - Set, - Tuple, TypeVar, Union, ) @@ -65,8 +62,8 @@ class DictionaryEntry(Generic[DKT, DV]): """ full: bool - known_absent: Set[DKT] - value: Dict[DKT, DV] + known_absent: set[DKT] + value: dict[DKT, DV] def __len__(self) -> int: return len(self.value) @@ -160,8 +157,8 @@ class DictionaryCache(Generic[KT, DKT, DV]): # * A key of `(KT, DKT)` has a value of `_PerKeyValue` # * A key of `(KT, _FullCacheKey.KEY)` has a value of `Dict[DKT, DV]` self.cache: LruCache[ - Tuple[KT, Union[DKT, Literal[_FullCacheKey.KEY]]], - Union[_PerKeyValue, Dict[DKT, DV]], + tuple[KT, Union[DKT, Literal[_FullCacheKey.KEY]]], + Union[_PerKeyValue, dict[DKT, DV]], ] = LruCache( max_size=max_entries, clock=clock, @@ -297,7 +294,7 @@ class DictionaryCache(Generic[KT, DKT, DV]): self, sequence: int, key: KT, - value: Dict[DKT, DV], + value: dict[DKT, DV], fetched_keys: Optional[Iterable[DKT]] = None, ) -> None: """Updates the entry in the cache. @@ -332,7 +329,7 @@ class DictionaryCache(Generic[KT, DKT, DV]): self._update_subset(key, value, fetched_keys) def _update_subset( - self, key: KT, value: Dict[DKT, DV], fetched_keys: Iterable[DKT] + self, key: KT, value: dict[DKT, DV], fetched_keys: Iterable[DKT] ) -> None: """Add the given dictionary values as explicit keys in the cache. diff --git a/synapse/util/caches/lrucache.py b/synapse/util/caches/lrucache.py index 324acb728a..04549ab65f 100644 --- a/synapse/util/caches/lrucache.py +++ b/synapse/util/caches/lrucache.py @@ -30,15 +30,10 @@ from typing import ( Any, Callable, Collection, - Dict, Generic, Iterable, - List, Literal, Optional, - Set, - Tuple, - Type, TypeVar, Union, cast, @@ -308,7 +303,7 @@ class _Node(Generic[KT, VT]): # footprint down. Storing `None` is free as its a singleton, while empty # lists are 56 bytes (and empty sets are 216 bytes, if we did the naive # thing and used sets). - self.callbacks: Optional[List[Callable[[], None]]] = None + self.callbacks: Optional[list[Callable[[], None]]] = None self.add_callbacks(callbacks) @@ -404,7 +399,7 @@ class LruCache(Generic[KT, VT]): clock: Clock, server_name: str, cache_name: str, - cache_type: Type[Union[dict, TreeCache]] = dict, + cache_type: type[Union[dict, TreeCache]] = dict, size_callback: Optional[Callable[[VT], int]] = None, metrics_collection_callback: Optional[Callable[[], None]] = None, apply_cache_factor_from_config: bool = True, @@ -420,7 +415,7 @@ class LruCache(Generic[KT, VT]): clock: Clock, server_name: str, cache_name: Literal[None] = None, - cache_type: Type[Union[dict, TreeCache]] = dict, + cache_type: type[Union[dict, TreeCache]] = dict, size_callback: Optional[Callable[[VT], int]] = None, metrics_collection_callback: Optional[Callable[[], None]] = None, apply_cache_factor_from_config: bool = True, @@ -435,7 +430,7 @@ class LruCache(Generic[KT, VT]): clock: Clock, server_name: str, cache_name: Optional[str] = None, - cache_type: Type[Union[dict, TreeCache]] = dict, + cache_type: type[Union[dict, TreeCache]] = dict, size_callback: Optional[Callable[[VT], int]] = None, metrics_collection_callback: Optional[Callable[[], None]] = None, apply_cache_factor_from_config: bool = True, @@ -489,7 +484,7 @@ class LruCache(Generic[KT, VT]): Note: The new key does not have to be unique. """ - cache: Union[Dict[KT, _Node[KT, VT]], TreeCache] = cache_type() + cache: Union[dict[KT, _Node[KT, VT]], TreeCache] = cache_type() self.cache = cache # Used for introspection. self.apply_cache_factor_from_config = apply_cache_factor_from_config @@ -529,7 +524,7 @@ class LruCache(Generic[KT, VT]): lock = threading.Lock() - extra_index: Dict[KT, Set[KT]] = {} + extra_index: dict[KT, set[KT]] = {} def evict() -> None: while cache_len() > self.max_size: @@ -682,21 +677,21 @@ class LruCache(Generic[KT, VT]): key: tuple, default: Literal[None] = None, update_metrics: bool = True, - ) -> Union[None, Iterable[Tuple[KT, VT]]]: ... + ) -> Union[None, Iterable[tuple[KT, VT]]]: ... @overload def cache_get_multi( key: tuple, default: T, update_metrics: bool = True, - ) -> Union[T, Iterable[Tuple[KT, VT]]]: ... + ) -> Union[T, Iterable[tuple[KT, VT]]]: ... @synchronized def cache_get_multi( key: tuple, default: Optional[T] = None, update_metrics: bool = True, - ) -> Union[None, T, Iterable[Tuple[KT, VT]]]: + ) -> Union[None, T, Iterable[tuple[KT, VT]]]: """Returns a generator yielding all entries under the given key. Can only be used if backed by a tree cache. diff --git a/synapse/util/caches/response_cache.py b/synapse/util/caches/response_cache.py index 3d39357236..e82036d7e0 100644 --- a/synapse/util/caches/response_cache.py +++ b/synapse/util/caches/response_cache.py @@ -24,7 +24,6 @@ from typing import ( Any, Awaitable, Callable, - Dict, Generic, Iterable, Optional, @@ -119,7 +118,7 @@ class ResponseCache(Generic[KV]): timeout_ms enable_logging """ - self._result_cache: Dict[KV, ResponseCacheEntry] = {} + self._result_cache: dict[KV, ResponseCacheEntry] = {} self.clock = clock self.timeout_sec = timeout_ms / 1000.0 diff --git a/synapse/util/caches/stream_change_cache.py b/synapse/util/caches/stream_change_cache.py index 2cffd352d8..552570fbb9 100644 --- a/synapse/util/caches/stream_change_cache.py +++ b/synapse/util/caches/stream_change_cache.py @@ -21,7 +21,7 @@ import logging import math -from typing import Collection, Dict, FrozenSet, List, Mapping, Optional, Set, Union +from typing import Collection, Mapping, Optional, Union import attr from sortedcontainers import SortedDict @@ -45,14 +45,14 @@ class AllEntitiesChangedResult: that callers do the correct checks. """ - _entities: Optional[List[EntityType]] + _entities: Optional[list[EntityType]] @property def hit(self) -> bool: return self._entities is not None @property - def entities(self) -> List[EntityType]: + def entities(self) -> list[EntityType]: assert self._entities is not None return self._entities @@ -94,11 +94,11 @@ class StreamChangeCache: self._max_size = math.floor(max_size) # map from stream id to the set of entities which changed at that stream id. - self._cache: SortedDict[int, Set[EntityType]] = SortedDict() + self._cache: SortedDict[int, set[EntityType]] = SortedDict() # map from entity to the stream ID of the latest change for that entity. # # Must be kept in sync with _cache. - self._entity_to_key: Dict[EntityType, int] = {} + self._entity_to_key: dict[EntityType, int] = {} # the earliest stream_pos for which we can reliably answer # get_all_entities_changed. In other words, one less than the earliest @@ -182,7 +182,7 @@ class StreamChangeCache: def get_entities_changed( self, entities: Collection[EntityType], stream_pos: int, _perf_factor: int = 1 - ) -> Union[Set[EntityType], FrozenSet[EntityType]]: + ) -> Union[set[EntityType], frozenset[EntityType]]: """ Returns the subset of the given entities that have had changes after the given position. @@ -291,7 +291,7 @@ class StreamChangeCache: if stream_pos < self._earliest_known_stream_pos: return AllEntitiesChangedResult(None) - changed_entities: List[EntityType] = [] + changed_entities: list[EntityType] = [] for k in self._cache.islice(start=self._cache.bisect_right(stream_pos)): changed_entities.extend(self._cache[k]) diff --git a/synapse/util/caches/ttlcache.py b/synapse/util/caches/ttlcache.py index 18c3a1e51c..2be9463d6a 100644 --- a/synapse/util/caches/ttlcache.py +++ b/synapse/util/caches/ttlcache.py @@ -21,7 +21,7 @@ import logging import time -from typing import Any, Callable, Dict, Generic, Tuple, TypeVar, Union +from typing import Any, Callable, Generic, TypeVar, Union import attr from sortedcontainers import SortedList @@ -56,7 +56,7 @@ class TTLCache(Generic[KT, VT]): """ # map from key to _CacheEntry - self._data: Dict[KT, _CacheEntry[KT, VT]] = {} + self._data: dict[KT, _CacheEntry[KT, VT]] = {} # the _CacheEntries, sorted by expiry time self._expiry_list: SortedList[_CacheEntry[KT, VT]] = SortedList() @@ -113,7 +113,7 @@ class TTLCache(Generic[KT, VT]): self._metrics.inc_hits() return e.value - def get_with_expiry(self, key: KT) -> Tuple[VT, float, float]: + def get_with_expiry(self, key: KT) -> tuple[VT, float, float]: """Get a value, and its expiry time, from the cache Args: diff --git a/synapse/util/clock.py b/synapse/util/clock.py index 5e65cf32a4..6557582629 100644 --- a/synapse/util/clock.py +++ b/synapse/util/clock.py @@ -17,8 +17,6 @@ from typing import ( Any, Callable, - Dict, - List, ) from typing_extensions import ParamSpec @@ -62,10 +60,10 @@ class Clock: self._delayed_call_id: int = 0 """Unique ID used to track delayed calls""" - self._looping_calls: List[LoopingCall] = [] + self._looping_calls: list[LoopingCall] = [] """List of active looping calls""" - self._call_id_to_delayed_call: Dict[int, IDelayedCall] = {} + self._call_id_to_delayed_call: dict[int, IDelayedCall] = {} """Mapping from unique call ID to delayed call""" self._is_shutdown = False diff --git a/synapse/util/daemonize.py b/synapse/util/daemonize.py index dba815040d..411b47f939 100644 --- a/synapse/util/daemonize.py +++ b/synapse/util/daemonize.py @@ -27,7 +27,7 @@ import os import signal import sys from types import FrameType, TracebackType -from typing import NoReturn, Optional, Type +from typing import NoReturn, Optional from synapse.logging.context import ( LoggingContext, @@ -119,7 +119,7 @@ def daemonize_process(pid_file: str, logger: logging.Logger, chdir: str = "/") - # also catch any other uncaught exceptions before we get that far.) def excepthook( - type_: Type[BaseException], + type_: type[BaseException], value: BaseException, traceback: Optional[TracebackType], ) -> None: diff --git a/synapse/util/distributor.py b/synapse/util/distributor.py index dec6536e4e..e8df5399cd 100644 --- a/synapse/util/distributor.py +++ b/synapse/util/distributor.py @@ -24,9 +24,7 @@ from typing import ( Any, Awaitable, Callable, - Dict, Generic, - List, Optional, TypeVar, Union, @@ -69,8 +67,8 @@ class Distributor: (this should be `hs.hostname`). """ self.hs = hs - self.signals: Dict[str, Signal] = {} - self.pre_registration: Dict[str, List[Callable]] = {} + self.signals: dict[str, Signal] = {} + self.pre_registration: dict[str, list[Callable]] = {} def declare(self, name: str) -> None: if name in self.signals: @@ -122,7 +120,7 @@ class Signal(Generic[P]): def __init__(self, name: str): self.name: str = name - self.observers: List[Callable[P, Any]] = [] + self.observers: list[Callable[P, Any]] = [] def observe(self, observer: Callable[P, Any]) -> None: """Adds a new callable to the observer list which will be invoked by @@ -131,7 +129,7 @@ class Signal(Generic[P]): Each observer callable may return a Deferred.""" self.observers.append(observer) - def fire(self, *args: P.args, **kwargs: P.kwargs) -> "defer.Deferred[List[Any]]": + def fire(self, *args: P.args, **kwargs: P.kwargs) -> "defer.Deferred[list[Any]]": """Invokes every callable in the observer list, passing in the args and kwargs. Exceptions thrown by observers are logged but ignored. It is not an error to fire a signal with no observers. diff --git a/synapse/util/events.py b/synapse/util/events.py index 4808268702..e41799b1f7 100644 --- a/synapse/util/events.py +++ b/synapse/util/events.py @@ -13,7 +13,7 @@ # # -from typing import Any, List, Optional +from typing import Any, Optional from synapse._pydantic_compat import Field, StrictStr, ValidationError, validator from synapse.types import JsonDict @@ -52,7 +52,7 @@ class MTopic(ParseModel): See `TopicContentBlock` in the Matrix specification. """ - m_text: Optional[List[MTextRepresentation]] = Field(alias="m.text") + m_text: Optional[list[MTextRepresentation]] = Field(alias="m.text") """ An ordered array of textual representations in different mimetypes. """ @@ -63,7 +63,7 @@ class MTopic(ParseModel): @validator("m_text", pre=True) def ignore_invalid_representations( cls, m_text: Any - ) -> Optional[List[MTextRepresentation]]: + ) -> Optional[list[MTextRepresentation]]: if not isinstance(m_text, list): raise ValueError("m.text must be a list") representations = [] diff --git a/synapse/util/gai_resolver.py b/synapse/util/gai_resolver.py index 3c7a966e87..e07003f1af 100644 --- a/synapse/util/gai_resolver.py +++ b/synapse/util/gai_resolver.py @@ -17,12 +17,9 @@ from socket import ( from typing import ( TYPE_CHECKING, Callable, - List, NoReturn, Optional, Sequence, - Tuple, - Type, Union, ) @@ -91,13 +88,13 @@ _socktypeToType = { } -_GETADDRINFO_RESULT = List[ - Tuple[ +_GETADDRINFO_RESULT = list[ + tuple[ AddressFamily, SocketKind, int, str, - Union[Tuple[str, int], Tuple[str, int, int, int], Tuple[int, bytes]], + Union[tuple[str, int], tuple[str, int, int, int], tuple[int, bytes]], ] ] @@ -141,7 +138,7 @@ class GAIResolver: resolutionReceiver: IResolutionReceiver, hostName: str, portNumber: int = 0, - addressTypes: Optional[Sequence[Type[IAddress]]] = None, + addressTypes: Optional[Sequence[type[IAddress]]] = None, transportSemantics: str = "TCP", ) -> IHostResolution: """ diff --git a/synapse/util/httpresourcetree.py b/synapse/util/httpresourcetree.py index 6471b31c94..46fa92a4c5 100644 --- a/synapse/util/httpresourcetree.py +++ b/synapse/util/httpresourcetree.py @@ -20,7 +20,6 @@ # import logging -from typing import Dict from twisted.web.resource import Resource @@ -30,7 +29,7 @@ logger = logging.getLogger(__name__) def create_resource_tree( - desired_tree: Dict[str, Resource], root_resource: Resource + desired_tree: dict[str, Resource], root_resource: Resource ) -> Resource: """Create the resource tree for this homeserver. @@ -48,7 +47,7 @@ def create_resource_tree( # unless you give it a Request object IN ADDITION to the name :/ So # instead, we'll store a copy of this mapping so we can actually add # extra resources to existing nodes. See self._resource_id for the key. - resource_mappings: Dict[str, Resource] = {} + resource_mappings: dict[str, Resource] = {} for full_path_str, res in desired_tree.items(): # twisted requires all resources to be bytes full_path = full_path_str.encode("utf-8") diff --git a/synapse/util/iterutils.py b/synapse/util/iterutils.py index 0a6a30aab2..19789a4666 100644 --- a/synapse/util/iterutils.py +++ b/synapse/util/iterutils.py @@ -24,16 +24,12 @@ from itertools import islice from typing import ( Callable, Collection, - Dict, Generator, Iterable, Iterator, - List, Mapping, Protocol, - Set, Sized, - Tuple, TypeVar, ) @@ -52,7 +48,7 @@ class _SelfSlice(Sized, Protocol): def __getitem__(self: S, i: slice) -> S: ... -def batch_iter(iterable: Iterable[T], size: int) -> Iterator[Tuple[T, ...]]: +def batch_iter(iterable: Iterable[T], size: int) -> Iterator[tuple[T, ...]]: """batch an iterable up into tuples with a maximum size Args: @@ -80,7 +76,7 @@ def chunk_seq(iseq: S, maxlen: int) -> Iterator[S]: def partition( iterable: Iterable[T], predicate: Callable[[T], bool] -) -> Tuple[List[T], List[T]]: +) -> tuple[list[T], list[T]]: """ Separate a given iterable into two lists based on the result of a predicate function. @@ -115,7 +111,7 @@ def sorted_topologically( # This is implemented by Kahn's algorithm. degree_map = dict.fromkeys(nodes, 0) - reverse_graph: Dict[T, Set[T]] = {} + reverse_graph: dict[T, set[T]] = {} for node, edges in graph.items(): if node not in degree_map: @@ -165,7 +161,7 @@ def sorted_topologically_batched( """ degree_map = dict.fromkeys(nodes, 0) - reverse_graph: Dict[T, Set[T]] = {} + reverse_graph: dict[T, set[T]] = {} for node, edges in graph.items(): if node not in degree_map: diff --git a/synapse/util/json.py b/synapse/util/json.py index e6db55f8e4..b1091704a8 100644 --- a/synapse/util/json.py +++ b/synapse/util/json.py @@ -16,7 +16,6 @@ import json from typing import ( Any, - Dict, ) from immutabledict import immutabledict @@ -27,7 +26,7 @@ def _reject_invalid_json(val: Any) -> None: raise ValueError("Invalid JSON value: '%s'" % val) -def _handle_immutabledict(obj: Any) -> Dict[Any, Any]: +def _handle_immutabledict(obj: Any) -> dict[Any, Any]: """Helper for json_encoder. Makes immutabledicts serializable by returning the underlying dict """ diff --git a/synapse/util/linked_list.py b/synapse/util/linked_list.py index 87f801c0cf..052863fdd6 100644 --- a/synapse/util/linked_list.py +++ b/synapse/util/linked_list.py @@ -22,7 +22,7 @@ """A circular doubly linked list implementation.""" import threading -from typing import Generic, Optional, Type, TypeVar +from typing import Generic, Optional, TypeVar P = TypeVar("P") LN = TypeVar("LN", bound="ListNode") @@ -53,7 +53,7 @@ class ListNode(Generic[P]): self.next_node: Optional[ListNode[P]] = None @classmethod - def create_root_node(cls: Type["ListNode[P]"]) -> "ListNode[P]": + def create_root_node(cls: type["ListNode[P]"]) -> "ListNode[P]": """Create a new linked list by creating a "root" node, which is a node that has prev_node/next_node pointing to itself and no associated cache entry. @@ -65,7 +65,7 @@ class ListNode(Generic[P]): @classmethod def insert_after( - cls: Type[LN], + cls: type[LN], cache_entry: P, node: "ListNode[P]", ) -> LN: diff --git a/synapse/util/manhole.py b/synapse/util/manhole.py index 63ec3e7e1b..dbf444e015 100644 --- a/synapse/util/manhole.py +++ b/synapse/util/manhole.py @@ -21,7 +21,7 @@ import inspect import sys import traceback -from typing import Any, Dict, Optional +from typing import Any, Optional from twisted.conch import manhole_ssh from twisted.conch.insults import insults @@ -71,7 +71,7 @@ EddTrx3TNpr1D5m/f+6mnXWrc8u9y1+GNx9yz889xMjIBTBI9KqaaOs= -----END RSA PRIVATE KEY-----""" -def manhole(settings: ManholeConfig, globals: Dict[str, Any]) -> ServerFactory: +def manhole(settings: ManholeConfig, globals: dict[str, Any]) -> ServerFactory: """Starts a ssh listener with password authentication using the given username and password. Clients connecting to the ssh listener will find themselves in a colored python shell with diff --git a/synapse/util/metrics.py b/synapse/util/metrics.py index f71380d689..6d1adf1131 100644 --- a/synapse/util/metrics.py +++ b/synapse/util/metrics.py @@ -25,11 +25,9 @@ from types import TracebackType from typing import ( Awaitable, Callable, - Dict, Generator, Optional, Protocol, - Type, TypeVar, ) @@ -238,7 +236,7 @@ class Measure: def __exit__( self, - exc_type: Optional[Type[BaseException]], + exc_type: Optional[type[BaseException]], exc_val: Optional[BaseException], exc_tb: Optional[TracebackType], ) -> None: @@ -293,8 +291,8 @@ class DynamicCollectorRegistry(CollectorRegistry): def __init__(self) -> None: super().__init__() - self._server_name_to_pre_update_hooks: Dict[ - str, Dict[str, Callable[[], None]] + self._server_name_to_pre_update_hooks: dict[ + str, dict[str, Callable[[], None]] ] = {} """ Mapping of server name to a mapping of metric name to metric pre-update diff --git a/synapse/util/module_loader.py b/synapse/util/module_loader.py index a33c75d54e..ae50e302ed 100644 --- a/synapse/util/module_loader.py +++ b/synapse/util/module_loader.py @@ -21,7 +21,7 @@ import importlib import importlib.util from types import ModuleType -from typing import Any, Tuple, Type +from typing import Any import jsonschema @@ -30,7 +30,7 @@ from synapse.config._util import json_error_to_config_error from synapse.types import StrSequence -def load_module(provider: dict, config_path: StrSequence) -> Tuple[Type, Any]: +def load_module(provider: dict, config_path: StrSequence) -> tuple[type, Any]: """Loads a synapse module with its config Args: diff --git a/synapse/util/patch_inline_callbacks.py b/synapse/util/patch_inline_callbacks.py index c776ad65b3..fca166a5b8 100644 --- a/synapse/util/patch_inline_callbacks.py +++ b/synapse/util/patch_inline_callbacks.py @@ -21,7 +21,7 @@ import functools import sys from types import GeneratorType -from typing import Any, Callable, Generator, List, TypeVar, cast +from typing import Any, Callable, Generator, TypeVar, cast from typing_extensions import ParamSpec @@ -56,7 +56,7 @@ def do_patch() -> None: @functools.wraps(f) def wrapped(*args: P.args, **kwargs: P.kwargs) -> "Deferred[T]": start_context = current_context() - changes: List[str] = [] + changes: list[str] = [] orig: Callable[P, "Deferred[T]"] = orig_inline_callbacks( _check_yield_points(f, changes) ) @@ -126,7 +126,7 @@ def do_patch() -> None: def _check_yield_points( f: Callable[P, Generator["Deferred[object]", object, T]], - changes: List[str], + changes: list[str], ) -> Callable: """Wraps a generator that is about to be passed to defer.inlineCallbacks checking that after every yield the log contexts are correct. diff --git a/synapse/util/ratelimitutils.py b/synapse/util/ratelimitutils.py index 756677fe6c..37d2e4505d 100644 --- a/synapse/util/ratelimitutils.py +++ b/synapse/util/ratelimitutils.py @@ -28,15 +28,10 @@ from typing import ( Any, Callable, ContextManager, - DefaultDict, - Dict, Iterator, - List, Mapping, MutableSet, Optional, - Set, - Tuple, ) from weakref import WeakSet @@ -104,7 +99,7 @@ _rate_limiter_instances_lock = threading.Lock() def _get_counts_from_rate_limiter_instance( count_func: Callable[["FederationRateLimiter"], int], -) -> Mapping[Tuple[str, ...], int]: +) -> Mapping[tuple[str, ...], int]: """Returns a count of something (slept/rejected hosts) by (metrics_name)""" # Cast to a list to prevent it changing while the Prometheus # thread is collecting metrics @@ -114,7 +109,7 @@ def _get_counts_from_rate_limiter_instance( # Map from (metrics_name,) -> int, the number of something like slept hosts # or rejected hosts. The key type is Tuple[str], but we leave the length # unspecified for compatability with LaterGauge's annotations. - counts: Dict[Tuple[str, ...], int] = {} + counts: dict[tuple[str, ...], int] = {} for rate_limiter_instance in rate_limiter_instances: # Only track metrics if they provided a `metrics_name` to # differentiate this instance of the rate limiter. @@ -191,7 +186,7 @@ class FederationRateLimiter: metrics_name=metrics_name, ) - self.ratelimiters: DefaultDict[str, "_PerHostRatelimiter"] = ( + self.ratelimiters: collections.defaultdict[str, "_PerHostRatelimiter"] = ( collections.defaultdict(new_limiter) ) @@ -244,7 +239,7 @@ class _PerHostRatelimiter: self.concurrent_requests = config.concurrent # request_id objects for requests which have been slept - self.sleeping_requests: Set[object] = set() + self.sleeping_requests: set[object] = set() # map from request_id object to Deferred for requests which are ready # for processing but have been queued @@ -253,11 +248,11 @@ class _PerHostRatelimiter: ] = collections.OrderedDict() # request id objects for requests which are in progress - self.current_processing: Set[object] = set() + self.current_processing: set[object] = set() # times at which we have recently (within the last window_size ms) # received requests. - self.request_times: List[int] = [] + self.request_times: list[int] = [] @contextlib.contextmanager def ratelimit(self, host: str) -> "Iterator[defer.Deferred[None]]": diff --git a/synapse/util/retryutils.py b/synapse/util/retryutils.py index 96fe2bd566..ce747c3f19 100644 --- a/synapse/util/retryutils.py +++ b/synapse/util/retryutils.py @@ -21,7 +21,7 @@ import logging import random from types import TracebackType -from typing import TYPE_CHECKING, Any, Optional, Type +from typing import TYPE_CHECKING, Any, Optional from synapse.api.errors import CodeMessageException from synapse.storage import DataStore @@ -230,7 +230,7 @@ class RetryDestinationLimiter: def __exit__( self, - exc_type: Optional[Type[BaseException]], + exc_type: Optional[type[BaseException]], exc_val: Optional[BaseException], exc_tb: Optional[TracebackType], ) -> None: diff --git a/synapse/util/stringutils.py b/synapse/util/stringutils.py index 32b5bc00c9..6b0d3677da 100644 --- a/synapse/util/stringutils.py +++ b/synapse/util/stringutils.py @@ -23,7 +23,7 @@ import itertools import re import secrets import string -from typing import Any, Iterable, Optional, Tuple +from typing import Any, Iterable, Optional from netaddr import valid_ipv6 @@ -92,7 +92,7 @@ def assert_valid_client_secret(client_secret: str) -> None: ) -def parse_server_name(server_name: str) -> Tuple[str, Optional[int]]: +def parse_server_name(server_name: str) -> tuple[str, Optional[int]]: """Split a server name into host/port parts. Args: @@ -123,7 +123,7 @@ def parse_server_name(server_name: str) -> Tuple[str, Optional[int]]: VALID_HOST_REGEX = re.compile("\\A[0-9a-zA-Z-]+(?:\\.[0-9a-zA-Z-]+)*\\Z") -def parse_and_validate_server_name(server_name: str) -> Tuple[str, Optional[int]]: +def parse_and_validate_server_name(server_name: str) -> tuple[str, Optional[int]]: """Split a server name into host/port parts and do some basic validation. Args: @@ -190,7 +190,7 @@ def valid_id_server_location(id_server: str) -> bool: return "#" not in path and "?" not in path -def parse_and_validate_mxc_uri(mxc: str) -> Tuple[str, Optional[int], str]: +def parse_and_validate_mxc_uri(mxc: str) -> tuple[str, Optional[int], str]: """Parse the given string as an MXC URI Checks that the "server name" part is a valid server name diff --git a/synapse/util/task_scheduler.py b/synapse/util/task_scheduler.py index 8dd6f12feb..f033d37579 100644 --- a/synapse/util/task_scheduler.py +++ b/synapse/util/task_scheduler.py @@ -20,7 +20,7 @@ # import logging -from typing import TYPE_CHECKING, Awaitable, Callable, Dict, List, Optional, Set, Tuple +from typing import TYPE_CHECKING, Awaitable, Callable, Optional from twisted.python.failure import Failure @@ -110,13 +110,13 @@ class TaskScheduler: self.server_name = hs.hostname self._store = hs.get_datastores().main self._clock = hs.get_clock() - self._running_tasks: Set[str] = set() + self._running_tasks: set[str] = set() # A map between action names and their registered function - self._actions: Dict[ + self._actions: dict[ str, Callable[ [ScheduledTask], - Awaitable[Tuple[TaskStatus, Optional[JsonMapping], Optional[str]]], + Awaitable[tuple[TaskStatus, Optional[JsonMapping], Optional[str]]], ], ] = {} self._run_background_tasks = hs.config.worker.run_background_tasks @@ -143,7 +143,7 @@ class TaskScheduler: self, function: Callable[ [ScheduledTask], - Awaitable[Tuple[TaskStatus, Optional[JsonMapping], Optional[str]]], + Awaitable[tuple[TaskStatus, Optional[JsonMapping], Optional[str]]], ], action_name: str, ) -> None: @@ -278,12 +278,12 @@ class TaskScheduler: async def get_tasks( self, *, - actions: Optional[List[str]] = None, + actions: Optional[list[str]] = None, resource_id: Optional[str] = None, - statuses: Optional[List[TaskStatus]] = None, + statuses: Optional[list[TaskStatus]] = None, max_timestamp: Optional[int] = None, limit: Optional[int] = None, - ) -> List[ScheduledTask]: + ) -> list[ScheduledTask]: """Get a list of tasks. Returns all the tasks if no args are provided. If an arg is `None`, all tasks matching the other args will be selected. diff --git a/synapse/util/wheel_timer.py b/synapse/util/wheel_timer.py index 95eb1d7185..c63faa96df 100644 --- a/synapse/util/wheel_timer.py +++ b/synapse/util/wheel_timer.py @@ -19,7 +19,7 @@ # # import logging -from typing import Generic, Hashable, List, Set, TypeVar +from typing import Generic, Hashable, TypeVar import attr @@ -31,7 +31,7 @@ T = TypeVar("T", bound=Hashable) @attr.s(slots=True, frozen=True, auto_attribs=True) class _Entry(Generic[T]): end_key: int - elements: Set[T] = attr.Factory(set) + elements: set[T] = attr.Factory(set) class WheelTimer(Generic[T]): @@ -46,7 +46,7 @@ class WheelTimer(Generic[T]): accuracy of the timer. """ self.bucket_size: int = bucket_size - self.entries: List[_Entry[T]] = [] + self.entries: list[_Entry[T]] = [] def insert(self, now: int, obj: T, then: int) -> None: """Inserts object into timer. @@ -91,7 +91,7 @@ class WheelTimer(Generic[T]): self.entries[-1].elements.add(obj) - def fetch(self, now: int) -> List[T]: + def fetch(self, now: int) -> list[T]: """Fetch any objects that have timed out Args: @@ -102,7 +102,7 @@ class WheelTimer(Generic[T]): """ now_key = int(now / self.bucket_size) - ret: List[T] = [] + ret: list[T] = [] while self.entries and self.entries[0].end_key <= now_key: ret.extend(self.entries.pop(0).elements) diff --git a/synapse/visibility.py b/synapse/visibility.py index 662f2636d0..41b6198af0 100644 --- a/synapse/visibility.py +++ b/synapse/visibility.py @@ -23,14 +23,9 @@ import logging from enum import Enum, auto from typing import ( Collection, - Dict, Final, - FrozenSet, - List, Optional, Sequence, - Set, - Tuple, ) import attr @@ -76,18 +71,18 @@ MEMBERSHIP_PRIORITY = ( Membership.BAN, ) -_HISTORY_VIS_KEY: Final[Tuple[str, str]] = (EventTypes.RoomHistoryVisibility, "") +_HISTORY_VIS_KEY: Final[tuple[str, str]] = (EventTypes.RoomHistoryVisibility, "") @trace async def filter_events_for_client( storage: StorageControllers, user_id: str, - events: List[EventBase], + events: list[EventBase], is_peeking: bool = False, - always_include_ids: FrozenSet[str] = frozenset(), + always_include_ids: frozenset[str] = frozenset(), filter_send_to_client: bool = True, -) -> List[EventBase]: +) -> list[EventBase]: """ Check which events a user is allowed to see. If the user can see the event but its sender asked for their data to be erased, prune the content of the event. @@ -160,7 +155,7 @@ async def filter_events_for_client( if filter_send_to_client: room_ids = {e.room_id for e in events} - retention_policies: Dict[str, RetentionPolicy] = {} + retention_policies: dict[str, RetentionPolicy] = {} for room_id in room_ids: retention_policies[ @@ -351,7 +346,7 @@ def _check_client_allowed_to_see_event( clock: Clock, filter_send_to_client: bool, is_peeking: bool, - always_include_ids: FrozenSet[str], + always_include_ids: frozenset[str], sender_ignored: bool, retention_policy: RetentionPolicy, state: Optional[StateMap[EventBase]], @@ -652,7 +647,7 @@ async def filter_events_for_server( redact: bool, filter_out_erased_senders: bool, filter_out_remote_partial_state_events: bool, -) -> List[EventBase]: +) -> list[EventBase]: """Filter a list of events based on whether the target server is allowed to see them. @@ -687,7 +682,7 @@ async def filter_events_for_server( # otherwise a room could be fully joined after we retrieve those, which would then bypass # this check but would base the filtering on an outdated view of the membership events. - partial_state_invisible_event_ids: Set[str] = set() + partial_state_invisible_event_ids: set[str] = set() if filter_out_remote_partial_state_events: for e in events: sender_domain = get_domain_from_id(e.sender) @@ -733,7 +728,7 @@ async def filter_events_for_server( async def _event_to_history_vis( storage: StorageControllers, events: Collection[EventBase] -) -> Dict[str, str]: +) -> dict[str, str]: """Get the history visibility at each of the given events Returns a map from event id to history_visibility setting @@ -758,7 +753,7 @@ async def _event_to_history_vis( } vis_events = await storage.main.get_events(visibility_ids) - result: Dict[str, str] = {} + result: dict[str, str] = {} for event in events: vis = HistoryVisibility.SHARED state_ids = event_to_state_ids.get(event.event_id) @@ -780,7 +775,7 @@ async def _event_to_history_vis( async def _event_to_memberships( storage: StorageControllers, events: Collection[EventBase], server_name: str -) -> Dict[str, StateMap[Tuple[str, str]]]: +) -> dict[str, StateMap[tuple[str, str]]]: """Get the remote membership list at each of the given events Returns a map from event id to state map, which will contain only membership events diff --git a/synmark/__main__.py b/synmark/__main__.py index 82717c4fc7..5308c96012 100644 --- a/synmark/__main__.py +++ b/synmark/__main__.py @@ -22,7 +22,7 @@ import sys from argparse import REMAINDER, Namespace from contextlib import redirect_stderr from io import StringIO -from typing import Any, Callable, Coroutine, List, TypeVar +from typing import Any, Callable, Coroutine, TypeVar import pyperf @@ -76,7 +76,7 @@ def make_test( if __name__ == "__main__": - def add_cmdline_args(cmd: List[str], args: Namespace) -> None: + def add_cmdline_args(cmd: list[str], args: Namespace) -> None: if args.log: cmd.extend(["--log"]) cmd.extend(args.tests) diff --git a/tests/api/test_filtering.py b/tests/api/test_filtering.py index d74878a4e1..7742a06b4c 100644 --- a/tests/api/test_filtering.py +++ b/tests/api/test_filtering.py @@ -20,7 +20,6 @@ # [This file includes modifications made by New Vector Limited] # # -from typing import List from unittest.mock import patch import jsonschema @@ -50,7 +49,7 @@ class FilteringTestCase(unittest.HomeserverTestCase): def test_errors_on_invalid_filters(self) -> None: # See USER_FILTER_SCHEMA for the filter schema. - invalid_filters: List[JsonDict] = [ + invalid_filters: list[JsonDict] = [ # `account_data` must be a dictionary {"account_data": "Hello World"}, # `event_format` must be "client" or "federation" @@ -67,7 +66,7 @@ class FilteringTestCase(unittest.HomeserverTestCase): def test_ignores_unknown_filter_fields(self) -> None: # For forward compatibility, we must ignore unknown filter fields. # See USER_FILTER_SCHEMA for the filter schema. - filters: List[JsonDict] = [ + filters: list[JsonDict] = [ {"org.matrix.msc9999.future_option": True}, {"presence": {"org.matrix.msc9999.future_option": True}}, {"room": {"org.matrix.msc9999.future_option": True}}, @@ -78,7 +77,7 @@ class FilteringTestCase(unittest.HomeserverTestCase): # Must not raise. def test_valid_filters(self) -> None: - valid_filters: List[JsonDict] = [ + valid_filters: list[JsonDict] = [ { "room": { "timeline": {"limit": 20}, @@ -557,7 +556,7 @@ class FilteringTestCase(unittest.HomeserverTestCase): room_id="!foo:bar", ), ] - jsondicts: List[JsonDict] = [{}] + jsondicts: list[JsonDict] = [{}] # For the following tests we patch the datastore method (intead of injecting # events). This is a bit cheeky, but tests the logic of _check_event_relations. @@ -565,7 +564,7 @@ class FilteringTestCase(unittest.HomeserverTestCase): # Filter for a particular sender. definition = {"related_by_senders": ["@foo:bar"]} - async def events_have_relations(*args: object, **kwargs: object) -> List[str]: + async def events_have_relations(*args: object, **kwargs: object) -> list[str]: return ["$with_relation"] with patch.object( diff --git a/tests/app/test_openid_listener.py b/tests/app/test_openid_listener.py index 6ca514d557..6a1a630fe8 100644 --- a/tests/app/test_openid_listener.py +++ b/tests/app/test_openid_listener.py @@ -17,7 +17,6 @@ # [This file includes modifications made by New Vector Limited] # # -from typing import List from unittest.mock import Mock, patch from parameterized import parameterized @@ -58,7 +57,7 @@ class FederationReaderOpenIDListenerTests(HomeserverTestCase): (["openid"], "auth_fail"), ] ) - def test_openid_listener(self, names: List[str], expectation: str) -> None: + def test_openid_listener(self, names: list[str], expectation: str) -> None: """ Test different openid listener configurations. @@ -106,7 +105,7 @@ class SynapseHomeserverOpenIDListenerTests(HomeserverTestCase): (["openid"], "auth_fail"), ] ) - def test_openid_listener(self, names: List[str], expectation: str) -> None: + def test_openid_listener(self, names: list[str], expectation: str) -> None: """ Test different openid listener configurations. diff --git a/tests/appservice/test_api.py b/tests/appservice/test_api.py index 085dfd2d1d..1943292a8f 100644 --- a/tests/appservice/test_api.py +++ b/tests/appservice/test_api.py @@ -18,7 +18,7 @@ # [This file includes modifications made by New Vector Limited] # # -from typing import Any, List, Mapping, Optional, Sequence, Union +from typing import Any, Mapping, Optional, Sequence, Union from unittest.mock import Mock from twisted.internet.testing import MemoryReactor @@ -81,7 +81,7 @@ class ApplicationServiceApiTestCase(unittest.HomeserverTestCase): url: str, args: Mapping[Any, Any], headers: Mapping[Union[str, bytes], Sequence[Union[str, bytes]]], - ) -> List[JsonDict]: + ) -> list[JsonDict]: # Ensure the access token is passed as a header. if not headers or not headers.get(b"Authorization"): raise RuntimeError("Access token not provided") @@ -157,7 +157,7 @@ class ApplicationServiceApiTestCase(unittest.HomeserverTestCase): headers: Optional[ Mapping[Union[str, bytes], Sequence[Union[str, bytes]]] ] = None, - ) -> List[JsonDict]: + ) -> list[JsonDict]: # Ensure the access token is passed as a both a query param and in the headers. if not args.get(b"access_token"): raise RuntimeError("Access token should be provided in query params.") diff --git a/tests/appservice/test_scheduler.py b/tests/appservice/test_scheduler.py index f4490a1a79..f17957c206 100644 --- a/tests/appservice/test_scheduler.py +++ b/tests/appservice/test_scheduler.py @@ -18,7 +18,7 @@ # [This file includes modifications made by New Vector Limited] # # -from typing import List, Optional, Sequence, Tuple +from typing import Optional, Sequence from unittest.mock import AsyncMock, Mock from typing_extensions import TypeAlias @@ -288,11 +288,11 @@ class ApplicationServiceSchedulerRecovererTestCase(unittest.HomeserverTestCase): # Corresponds to synapse.appservice.scheduler._TransactionController.send TxnCtrlArgs: TypeAlias = """ defer.Deferred[ - Tuple[ + tuple[ ApplicationService, Sequence[EventBase], - Optional[List[JsonDict]], - Optional[List[JsonDict]], + Optional[list[JsonDict]], + Optional[list[JsonDict]], Optional[TransactionOneTimeKeysCount], Optional[TransactionUnusedFallbackKeys], Optional[DeviceListUpdates], diff --git a/tests/config/utils.py b/tests/config/utils.py index 3cba4ac588..efc63558db 100644 --- a/tests/config/utils.py +++ b/tests/config/utils.py @@ -24,7 +24,6 @@ import tempfile import unittest from contextlib import redirect_stdout from io import StringIO -from typing import List from synapse.config.homeserver import HomeServerConfig @@ -61,7 +60,7 @@ class ConfigFileTestCase(unittest.TestCase): with open(self.config_file, "w") as f: f.write("".join(contents)) - def add_lines_to_config(self, lines: List[str]) -> None: + def add_lines_to_config(self, lines: list[str]) -> None: with open(self.config_file, "a") as f: for line in lines: f.write(line + "\n") diff --git a/tests/crypto/test_keyring.py b/tests/crypto/test_keyring.py index 93ae24628a..2eaf77e9dc 100644 --- a/tests/crypto/test_keyring.py +++ b/tests/crypto/test_keyring.py @@ -19,7 +19,7 @@ # # import time -from typing import Any, Dict, List, Optional, cast +from typing import Any, Optional, cast from unittest.mock import Mock import attr @@ -60,7 +60,7 @@ class MockPerspectiveServer: self.server_name = "mock_server" self.key = signedjson.key.generate_signing_key("0") - def get_verify_keys(self) -> Dict[str, str]: + def get_verify_keys(self) -> dict[str, str]: vk = signedjson.key.get_verify_key(self.key) return {"%s:%s" % (vk.alg, vk.version): encode_verify_key_base64(vk)} @@ -107,8 +107,8 @@ class KeyringTestCase(unittest.HomeserverTestCase): first_lookup_deferred: "Deferred[None]" = Deferred() async def first_lookup_fetch( - server_name: str, key_ids: List[str], minimum_valid_until_ts: int - ) -> Dict[str, FetchKeyResult]: + server_name: str, key_ids: list[str], minimum_valid_until_ts: int + ) -> dict[str, FetchKeyResult]: # self.assertEqual(current_context().request.id, "context_11") self.assertEqual(server_name, "server10") self.assertEqual(key_ids, [get_key_id(key1)]) @@ -152,8 +152,8 @@ class KeyringTestCase(unittest.HomeserverTestCase): # should block rather than start a second call async def second_lookup_fetch( - server_name: str, key_ids: List[str], minimum_valid_until_ts: int - ) -> Dict[str, FetchKeyResult]: + server_name: str, key_ids: list[str], minimum_valid_until_ts: int + ) -> dict[str, FetchKeyResult]: # self.assertEqual(current_context().request.id, "context_12") return {get_key_id(key1): FetchKeyResult(get_verify_key(key1), 100)} @@ -276,8 +276,8 @@ class KeyringTestCase(unittest.HomeserverTestCase): # set up a mock fetcher which will return the key async def get_keys( - server_name: str, key_ids: List[str], minimum_valid_until_ts: int - ) -> Dict[str, FetchKeyResult]: + server_name: str, key_ids: list[str], minimum_valid_until_ts: int + ) -> dict[str, FetchKeyResult]: self.assertEqual(server_name, self.hs.hostname) self.assertEqual(key_ids, [get_key_id(key2)]) @@ -302,8 +302,8 @@ class KeyringTestCase(unittest.HomeserverTestCase): key1 = signedjson.key.generate_signing_key("1") async def get_keys( - server_name: str, key_ids: List[str], minimum_valid_until_ts: int - ) -> Dict[str, FetchKeyResult]: + server_name: str, key_ids: list[str], minimum_valid_until_ts: int + ) -> dict[str, FetchKeyResult]: # there should only be one request object (with the max validity) self.assertEqual(server_name, "server1") self.assertEqual(key_ids, [get_key_id(key1)]) @@ -344,16 +344,16 @@ class KeyringTestCase(unittest.HomeserverTestCase): key1 = signedjson.key.generate_signing_key("1") async def get_keys1( - server_name: str, key_ids: List[str], minimum_valid_until_ts: int - ) -> Dict[str, FetchKeyResult]: + server_name: str, key_ids: list[str], minimum_valid_until_ts: int + ) -> dict[str, FetchKeyResult]: self.assertEqual(server_name, "server1") self.assertEqual(key_ids, [get_key_id(key1)]) self.assertEqual(minimum_valid_until_ts, 1500) return {get_key_id(key1): FetchKeyResult(get_verify_key(key1), 800)} async def get_keys2( - server_name: str, key_ids: List[str], minimum_valid_until_ts: int - ) -> Dict[str, FetchKeyResult]: + server_name: str, key_ids: list[str], minimum_valid_until_ts: int + ) -> dict[str, FetchKeyResult]: self.assertEqual(server_name, "server1") self.assertEqual(key_ids, [get_key_id(key1)]) self.assertEqual(minimum_valid_until_ts, 1500) @@ -701,7 +701,7 @@ class PerspectivesKeyFetcherTestCase(unittest.HomeserverTestCase): SERVER_NAME, testkey, VALID_UNTIL_TS ) - def get_key_from_perspectives(response: JsonDict) -> Dict[str, FetchKeyResult]: + def get_key_from_perspectives(response: JsonDict) -> dict[str, FetchKeyResult]: fetcher = PerspectivesKeyFetcher(self.hs) self.expect_outgoing_key_query(SERVER_NAME, "key1", response) return self.get_success(fetcher.get_keys(SERVER_NAME, ["key1"], 0)) diff --git a/tests/events/test_auto_accept_invites.py b/tests/events/test_auto_accept_invites.py index fa7ea64105..d3842e72d7 100644 --- a/tests/events/test_auto_accept_invites.py +++ b/tests/events/test_auto_accept_invites.py @@ -21,7 +21,7 @@ import asyncio from asyncio import Future from http import HTTPStatus -from typing import Any, Awaitable, Dict, List, Optional, Tuple, TypeVar, cast +from typing import Any, Awaitable, Optional, TypeVar, cast from unittest.mock import Mock import attr @@ -527,7 +527,7 @@ def sync_join( testcase: HomeserverTestCase, user_id: str, since_token: Optional[StreamToken] = None, -) -> Tuple[List[JoinedSyncResult], StreamToken]: +) -> tuple[list[JoinedSyncResult], StreamToken]: """Perform a sync request for the given user and return the user join updates they've received, as well as the next_batch token. @@ -765,7 +765,7 @@ class MockEvent: sender: str type: str - content: Dict[str, Any] + content: dict[str, Any] room_id: str = "!someroom" state_key: Optional[str] = None @@ -802,7 +802,7 @@ def make_multiple_awaitable(result: TV) -> Awaitable[TV]: def create_module( - config_override: Optional[Dict[str, Any]] = None, worker_name: Optional[str] = None + config_override: Optional[dict[str, Any]] = None, worker_name: Optional[str] = None ) -> InviteAutoAccepter: # Create a mock based on the ModuleApi spec, but override some mocked functions # because some capabilities are needed for running the tests. diff --git a/tests/events/test_presence_router.py b/tests/events/test_presence_router.py index 696d9dd6e2..aa8d7454c0 100644 --- a/tests/events/test_presence_router.py +++ b/tests/events/test_presence_router.py @@ -18,7 +18,7 @@ # [This file includes modifications made by New Vector Limited] # # -from typing import Dict, Iterable, List, Optional, Set, Tuple, Union +from typing import Iterable, Optional, Union from unittest.mock import AsyncMock, Mock import attr @@ -46,7 +46,7 @@ from tests.unittest import ( @attr.s class PresenceRouterTestConfig: - users_who_should_receive_all_presence = attr.ib(type=List[str], default=[]) + users_who_should_receive_all_presence = attr.ib(type=list[str], default=[]) class LegacyPresenceRouterTestModule: @@ -56,14 +56,14 @@ class LegacyPresenceRouterTestModule: async def get_users_for_states( self, state_updates: Iterable[UserPresenceState] - ) -> Dict[str, Set[UserPresenceState]]: + ) -> dict[str, set[UserPresenceState]]: users_to_state = { user_id: set(state_updates) for user_id in self._config.users_who_should_receive_all_presence } return users_to_state - async def get_interested_users(self, user_id: str) -> Union[Set[str], str]: + async def get_interested_users(self, user_id: str) -> Union[set[str], str]: if user_id in self._config.users_who_should_receive_all_presence: return PresenceRouter.ALL_USERS @@ -106,14 +106,14 @@ class PresenceRouterTestModule: async def get_users_for_states( self, state_updates: Iterable[UserPresenceState] - ) -> Dict[str, Set[UserPresenceState]]: + ) -> dict[str, set[UserPresenceState]]: users_to_state = { user_id: set(state_updates) for user_id in self._config.users_who_should_receive_all_presence } return users_to_state - async def get_interested_users(self, user_id: str) -> Union[Set[str], str]: + async def get_interested_users(self, user_id: str) -> Union[set[str], str]: if user_id in self._config.users_who_should_receive_all_presence: return PresenceRouter.ALL_USERS @@ -511,7 +511,7 @@ def sync_presence( testcase: HomeserverTestCase, user_id: str, since_token: Optional[StreamToken] = None, -) -> Tuple[List[UserPresenceState], StreamToken]: +) -> tuple[list[UserPresenceState], StreamToken]: """Perform a sync request for the given user and return the user presence updates they've received, as well as the next_batch token. diff --git a/tests/events/test_utils.py b/tests/events/test_utils.py index c6ebefbf38..9d41067844 100644 --- a/tests/events/test_utils.py +++ b/tests/events/test_utils.py @@ -20,7 +20,7 @@ # import unittest as stdlib_unittest -from typing import Any, List, Mapping, Optional +from typing import Any, Mapping, Optional import attr from parameterized import parameterized @@ -648,7 +648,7 @@ class SerializeEventTestCase(stdlib_unittest.TestCase): def serialize( self, ev: EventBase, - fields: Optional[List[str]], + fields: Optional[list[str]], include_admin_metadata: bool = False, ) -> JsonDict: return serialize_event( diff --git a/tests/federation/test_federation_catch_up.py b/tests/federation/test_federation_catch_up.py index 5edb651767..34b552b9ed 100644 --- a/tests/federation/test_federation_catch_up.py +++ b/tests/federation/test_federation_catch_up.py @@ -1,4 +1,4 @@ -from typing import Callable, Collection, List, Optional, Tuple +from typing import Callable, Collection, Optional from unittest import mock from unittest.mock import AsyncMock, Mock @@ -55,8 +55,8 @@ class FederationCatchUpTestCases(FederatingHomeserverTestCase): ) # whenever send_transaction is called, record the pdu data - self.pdus: List[JsonDict] = [] - self.failed_pdus: List[JsonDict] = [] + self.pdus: list[JsonDict] = [] + self.failed_pdus: list[JsonDict] = [] self.is_online = True self.federation_transport_client.send_transaction.side_effect = ( self.record_transaction @@ -269,7 +269,7 @@ class FederationCatchUpTestCases(FederatingHomeserverTestCase): def make_fake_destination_queue( self, destination: str = "host2" - ) -> Tuple[PerDestinationQueue, List[EventBase]]: + ) -> tuple[PerDestinationQueue, list[EventBase]]: """ Makes a fake per-destination queue. """ @@ -279,8 +279,8 @@ class FederationCatchUpTestCases(FederatingHomeserverTestCase): async def fake_send( destination_tm: str, - pending_pdus: List[EventBase], - _pending_edus: List[Edu], + pending_pdus: list[EventBase], + _pending_edus: list[Edu], ) -> None: assert destination == destination_tm results_list.extend(pending_pdus) diff --git a/tests/federation/test_federation_out_of_band_membership.py b/tests/federation/test_federation_out_of_band_membership.py index fa4e7c63ba..905f9e6580 100644 --- a/tests/federation/test_federation_out_of_band_membership.py +++ b/tests/federation/test_federation_out_of_band_membership.py @@ -23,7 +23,7 @@ import logging import time import urllib.parse from http import HTTPStatus -from typing import Any, Callable, Optional, Set, Tuple, TypeVar, Union +from typing import Any, Callable, Optional, TypeVar, Union from unittest.mock import Mock import attr @@ -147,7 +147,7 @@ class OutOfBandMembershipTests(unittest.FederatingHomeserverTestCase): def do_sync( self, sync_body: JsonDict, *, since: Optional[str] = None, tok: str - ) -> Tuple[JsonDict, str]: + ) -> tuple[JsonDict, str]: """Do a sliding sync request with given body. Asserts the request was successful. @@ -350,7 +350,7 @@ class OutOfBandMembershipTests(unittest.FederatingHomeserverTestCase): self.federation_http_client.get_json.side_effect = get_json # PDU's that hs1 sent to hs2 - collected_pdus_from_hs1_federation_send: Set[str] = set() + collected_pdus_from_hs1_federation_send: set[str] = set() async def put_json( destination: str, @@ -503,7 +503,7 @@ class OutOfBandMembershipTests(unittest.FederatingHomeserverTestCase): T = TypeVar("T") # PDU's that hs1 sent to hs2 - collected_pdus_from_hs1_federation_send: Set[str] = set() + collected_pdus_from_hs1_federation_send: set[str] = set() async def put_json( destination: str, diff --git a/tests/federation/test_federation_sender.py b/tests/federation/test_federation_sender.py index 27b69a9180..20b67e3a73 100644 --- a/tests/federation/test_federation_sender.py +++ b/tests/federation/test_federation_sender.py @@ -17,7 +17,7 @@ # [This file includes modifications made by New Vector Limited] # # -from typing import Callable, FrozenSet, List, Optional, Set +from typing import Callable, Optional from unittest.mock import AsyncMock, Mock from signedjson import key, sign @@ -435,7 +435,7 @@ class FederationSenderPresenceTestCases(HomeserverTestCase): # A set of all user presence we see, this should end up matching the # number we sent out above. - seen_users: Set[str] = set() + seen_users: set[str] = set() for edu in presence_edus: presence_states = edu["content"]["push"] @@ -483,12 +483,12 @@ class FederationSenderDevicesTestCases(HomeserverTestCase): # stub out `get_rooms_for_user` and `get_current_hosts_in_room` so that the # server thinks the user shares a room with `@user2:host2` - def get_rooms_for_user(user_id: str) -> "defer.Deferred[FrozenSet[str]]": + def get_rooms_for_user(user_id: str) -> "defer.Deferred[frozenset[str]]": return defer.succeed(frozenset({test_room_id})) hs.get_datastores().main.get_rooms_for_user = get_rooms_for_user # type: ignore[assignment] - async def get_current_hosts_in_room(room_id: str) -> Set[str]: + async def get_current_hosts_in_room(room_id: str) -> set[str]: if room_id == test_room_id: return {"host2"} else: @@ -504,7 +504,7 @@ class FederationSenderDevicesTestCases(HomeserverTestCase): self.device_handler = device_handler # whenever send_transaction is called, record the edu data - self.edus: List[JsonDict] = [] + self.edus: list[JsonDict] = [] self.federation_transport_client.send_transaction.side_effect = ( self.record_transaction ) diff --git a/tests/federation/transport/server/test__base.py b/tests/federation/transport/server/test__base.py index 0e3b41ec4d..3c553e6e40 100644 --- a/tests/federation/transport/server/test__base.py +++ b/tests/federation/transport/server/test__base.py @@ -20,7 +20,6 @@ # from http import HTTPStatus -from typing import Dict, List, Tuple from twisted.web.resource import Resource @@ -52,14 +51,14 @@ class CancellableFederationServlet(BaseFederationServlet): @cancellable async def on_GET( - self, origin: str, content: None, query: Dict[bytes, List[bytes]] - ) -> Tuple[int, JsonDict]: + self, origin: str, content: None, query: dict[bytes, list[bytes]] + ) -> tuple[int, JsonDict]: await self.clock.sleep(1.0) return HTTPStatus.OK, {"result": True} async def on_POST( - self, origin: str, content: JsonDict, query: Dict[bytes, List[bytes]] - ) -> Tuple[int, JsonDict]: + self, origin: str, content: JsonDict, query: dict[bytes, list[bytes]] + ) -> tuple[int, JsonDict]: await self.clock.sleep(1.0) return HTTPStatus.OK, {"result": True} diff --git a/tests/federation/transport/test_client.py b/tests/federation/transport/test_client.py index 3d882f99f2..f538b67e41 100644 --- a/tests/federation/transport/test_client.py +++ b/tests/federation/transport/test_client.py @@ -20,7 +20,7 @@ # import json -from typing import List, Optional +from typing import Optional from unittest.mock import Mock import ijson.common @@ -98,7 +98,7 @@ class SendJoinParserTestCase(TestCase): def test_servers_in_room(self) -> None: """Check that the servers_in_room field is correctly parsed""" - def parse(response: JsonDict) -> Optional[List[str]]: + def parse(response: JsonDict) -> Optional[list[str]]: parser = SendJoinParser(RoomVersions.V1, False) serialised_response = json.dumps(response).encode() diff --git a/tests/federation/transport/test_knocking.py b/tests/federation/transport/test_knocking.py index a243938255..9e92b06d91 100644 --- a/tests/federation/transport/test_knocking.py +++ b/tests/federation/transport/test_knocking.py @@ -19,7 +19,7 @@ # # from collections import OrderedDict -from typing import Any, Dict, List, Optional +from typing import Any, Optional from twisted.internet.testing import MemoryReactor @@ -161,8 +161,8 @@ class KnockingStrippedStateEventHelperMixin(HomeserverTestCase): def check_knock_room_state_against_room_state( self, - knock_room_state: List[Dict], - expected_room_state: Dict, + knock_room_state: list[dict], + expected_room_state: dict, ) -> None: """Test a list of stripped room state events received over federation against a dict of expected state events. diff --git a/tests/handlers/test_appservice.py b/tests/handlers/test_appservice.py index df36185b99..7d6bd35a9a 100644 --- a/tests/handlers/test_appservice.py +++ b/tests/handlers/test_appservice.py @@ -24,9 +24,7 @@ from typing import ( Any, Awaitable, Callable, - Dict, Iterable, - List, Optional, TypeVar, ) @@ -450,7 +448,7 @@ class ApplicationServicesHandlerSendEventsTestCase(unittest.HomeserverTestCase): hs.get_application_service_handler().scheduler.txn_ctrl.send = self.send_mock # type: ignore[method-assign] # Mock out application services, and allow defining our own in tests - self._services: List[ApplicationService] = [] + self._services: list[ApplicationService] = [] self.hs.get_datastores().main.get_app_services = Mock( # type: ignore[method-assign] return_value=self._services ) @@ -884,7 +882,7 @@ class ApplicationServicesHandlerSendEventsTestCase(unittest.HomeserverTestCase): # Count the total number of to-device messages that were sent out per-service. # Ensure that we only sent to-device messages to interested services, and that # each interested service received the full count of to-device messages. - service_id_to_message_count: Dict[str, int] = {} + service_id_to_message_count: dict[str, int] = {} for call in self.send_mock.call_args_list: ( @@ -1023,7 +1021,7 @@ class ApplicationServicesHandlerSendEventsTestCase(unittest.HomeserverTestCase): def _register_application_service( self, - namespaces: Optional[Dict[str, Iterable[Dict]]] = None, + namespaces: Optional[dict[str, Iterable[dict]]] = None, ) -> ApplicationService: """ Register a new application service, with the given namespaces of interest. @@ -1073,7 +1071,7 @@ class ApplicationServicesHandlerDeviceListsTestCase(unittest.HomeserverTestCase) hs.get_application_service_api().put_json = self.put_json # type: ignore[method-assign] # Mock out application services, and allow defining our own in tests - self._services: List[ApplicationService] = [] + self._services: list[ApplicationService] = [] self.hs.get_datastores().main.get_app_services = Mock( # type: ignore[method-assign] return_value=self._services ) diff --git a/tests/handlers/test_cas.py b/tests/handlers/test_cas.py index f677f3be2a..02671fc264 100644 --- a/tests/handlers/test_cas.py +++ b/tests/handlers/test_cas.py @@ -18,7 +18,7 @@ # [This file includes modifications made by New Vector Limited] # # -from typing import Any, Dict +from typing import Any from unittest.mock import AsyncMock, Mock from twisted.internet.testing import MemoryReactor @@ -35,7 +35,7 @@ SERVER_URL = "https://issuer/" class CasHandlerTestCase(HomeserverTestCase): - def default_config(self) -> Dict[str, Any]: + def default_config(self) -> dict[str, Any]: config = super().default_config() config["public_baseurl"] = BASE_URL cas_config = { diff --git a/tests/handlers/test_directory.py b/tests/handlers/test_directory.py index 45b8f2353a..76b145b92b 100644 --- a/tests/handlers/test_directory.py +++ b/tests/handlers/test_directory.py @@ -19,7 +19,7 @@ # [This file includes modifications made by New Vector Limited] # # -from typing import Any, Awaitable, Callable, Dict +from typing import Any, Awaitable, Callable from unittest.mock import AsyncMock, Mock from twisted.internet.testing import MemoryReactor @@ -43,7 +43,7 @@ class DirectoryTestCase(unittest.HomeserverTestCase): self.mock_federation = AsyncMock() self.mock_registry = Mock() - self.query_handlers: Dict[str, Callable[[dict], Awaitable[JsonDict]]] = {} + self.query_handlers: dict[str, Callable[[dict], Awaitable[JsonDict]]] = {} def register_query_handler( query_type: str, handler: Callable[[dict], Awaitable[JsonDict]] @@ -410,7 +410,7 @@ class TestCreateAliasACL(unittest.HomeserverTestCase): servlets = [directory.register_servlets, room.register_servlets] - def default_config(self) -> Dict[str, Any]: + def default_config(self) -> dict[str, Any]: config = super().default_config() # Add custom alias creation rules to the config. @@ -476,7 +476,7 @@ class TestCreatePublishedRoomACL(unittest.HomeserverTestCase): data = {"room_alias_name": "unofficial_test"} allowed_localpart = "allowed" - def default_config(self) -> Dict[str, Any]: + def default_config(self) -> dict[str, Any]: config = super().default_config() # Add custom room list publication rules to the config. diff --git a/tests/handlers/test_e2e_keys.py b/tests/handlers/test_e2e_keys.py index fca1f2cc44..a4f9d55a13 100644 --- a/tests/handlers/test_e2e_keys.py +++ b/tests/handlers/test_e2e_keys.py @@ -20,7 +20,7 @@ # # import time -from typing import Dict, Iterable +from typing import Iterable from unittest import mock from parameterized import parameterized @@ -291,7 +291,7 @@ class E2eKeysHandlerTestCase(unittest.HomeserverTestCase): (chris, "chris_dev_2", "alg2"): 1, } # Convert to the format the handler wants. - query: Dict[str, Dict[str, Dict[str, int]]] = {} + query: dict[str, dict[str, dict[str, int]]] = {} for (user_id, device_id, algorithm), count in claims_to_make.items(): query.setdefault(user_id, {}).setdefault(device_id, {})[algorithm] = count claim_res = self.get_success( @@ -1510,7 +1510,7 @@ class E2eKeysHandlerTestCase(unittest.HomeserverTestCase): ) # Setup a response. - response: Dict[str, Dict[str, Dict[str, JsonDict]]] = { + response: dict[str, dict[str, dict[str, JsonDict]]] = { local_user: {device_id_1: {**as_otk, **as_fallback_key}} } self.appservice_api.claim_client_keys.return_value = (response, []) diff --git a/tests/handlers/test_message.py b/tests/handlers/test_message.py index 4262e805e7..6450a90444 100644 --- a/tests/handlers/test_message.py +++ b/tests/handlers/test_message.py @@ -19,7 +19,6 @@ # # import logging -from typing import Tuple from twisted.internet.testing import MemoryReactor @@ -64,7 +63,7 @@ class EventCreationTestCase(unittest.HomeserverTestCase): self.requester = create_requester(self.user_id, device_id=device_id) - def _create_and_persist_member_event(self) -> Tuple[EventBase, EventContext]: + def _create_and_persist_member_event(self) -> tuple[EventBase, EventContext]: # Create a member event we can use as an auth_event memberEvent, memberEventContext = self.get_success( create_event( @@ -86,7 +85,7 @@ class EventCreationTestCase(unittest.HomeserverTestCase): def _create_duplicate_event( self, txn_id: str - ) -> Tuple[EventBase, UnpersistedEventContextBase]: + ) -> tuple[EventBase, UnpersistedEventContextBase]: """Create a new event with the given transaction ID. All events produced by this method will be considered duplicates. """ diff --git a/tests/handlers/test_oauth_delegation.py b/tests/handlers/test_oauth_delegation.py index 4640f35a1e..43004bfc69 100644 --- a/tests/handlers/test_oauth_delegation.py +++ b/tests/handlers/test_oauth_delegation.py @@ -25,7 +25,7 @@ import time from http import HTTPStatus from http.server import BaseHTTPRequestHandler, HTTPServer from io import BytesIO -from typing import Any, ClassVar, Coroutine, Dict, Generator, Optional, TypeVar, Union +from typing import Any, ClassVar, Coroutine, Generator, Optional, TypeVar, Union from unittest.mock import ANY, AsyncMock, Mock from urllib.parse import parse_qs @@ -130,7 +130,7 @@ class MSC3861OAuthDelegation(HomeserverTestCase): keys.register_servlets, ] - def default_config(self) -> Dict[str, Any]: + def default_config(self) -> dict[str, Any]: config = super().default_config() config["public_baseurl"] = BASE_URL config["disable_registration"] = True @@ -834,7 +834,7 @@ class MasAuthDelegation(HomeserverTestCase): return deferred - def default_config(self) -> Dict[str, Any]: + def default_config(self) -> dict[str, Any]: config = super().default_config() config["public_baseurl"] = BASE_URL config["disable_registration"] = True @@ -1100,9 +1100,9 @@ class DisabledEndpointsTestCase(HomeserverTestCase): admin.register_servlets, ] - config: Dict[str, Any] + config: dict[str, Any] - def default_config(self) -> Dict[str, Any]: + def default_config(self) -> dict[str, Any]: config = super().default_config() config["public_baseurl"] = BASE_URL config["disable_registration"] = True diff --git a/tests/handlers/test_oidc.py b/tests/handlers/test_oidc.py index 5207382f00..3180969e7b 100644 --- a/tests/handlers/test_oidc.py +++ b/tests/handlers/test_oidc.py @@ -19,7 +19,7 @@ # # import os -from typing import Any, Awaitable, ContextManager, Dict, Optional, Tuple +from typing import Any, Awaitable, ContextManager, Optional from unittest.mock import ANY, AsyncMock, Mock, patch from urllib.parse import parse_qs, urlparse @@ -152,7 +152,7 @@ class OidcHandlerTestCase(HomeserverTestCase): if not HAS_OIDC: skip = "requires OIDC" - def default_config(self) -> Dict[str, Any]: + def default_config(self) -> dict[str, Any]: config = super().default_config() config["public_baseurl"] = BASE_URL return config @@ -204,7 +204,7 @@ class OidcHandlerTestCase(HomeserverTestCase): client_redirect_url: str = "http://client/redirect", scope: str = "openid", with_sid: bool = False, - ) -> Tuple[SynapseRequest, FakeAuthorizationGrant]: + ) -> tuple[SynapseRequest, FakeAuthorizationGrant]: """Start an authorization request, and get the callback request back.""" nonce = random_string(10) state = random_string(10) @@ -222,7 +222,7 @@ class OidcHandlerTestCase(HomeserverTestCase): def assertRenderedError( self, error: str, error_description: Optional[str] = None - ) -> Tuple[Any, ...]: + ) -> tuple[Any, ...]: self.render_error.assert_called_once() args = self.render_error.call_args[0] self.assertEqual(args[1], error) diff --git a/tests/handlers/test_password_providers.py b/tests/handlers/test_password_providers.py index aa41875063..faa269bd35 100644 --- a/tests/handlers/test_password_providers.py +++ b/tests/handlers/test_password_providers.py @@ -22,7 +22,7 @@ """Tests for the password_auth_provider interface""" from http import HTTPStatus -from typing import Any, Dict, List, Optional, Type, Union +from typing import Any, Optional, Union from unittest.mock import AsyncMock, Mock from twisted.internet.testing import MemoryReactor @@ -75,7 +75,7 @@ class LegacyCustomAuthProvider: def __init__(self, config: None, account_handler: AccountHandler): pass - def get_supported_login_types(self) -> Dict[str, List[str]]: + def get_supported_login_types(self) -> dict[str, list[str]]: return {"test.login_type": ["test_field"]} def check_auth(self, *args: str) -> Mock: @@ -109,7 +109,7 @@ class LegacyPasswordCustomAuthProvider: def __init__(self, config: None, account_handler: AccountHandler): pass - def get_supported_login_types(self) -> Dict[str, List[str]]: + def get_supported_login_types(self) -> dict[str, list[str]]: return {"m.login.password": ["password"], "test.login_type": ["test_field"]} def check_auth(self, *args: str) -> Mock: @@ -139,7 +139,7 @@ class PasswordCustomAuthProvider: return mock_password_provider.check_password(*args) -def legacy_providers_config(*providers: Type[Any]) -> dict: +def legacy_providers_config(*providers: type[Any]) -> dict: """Returns a config dict that will enable the given legacy password auth providers""" return { "password_providers": [ @@ -149,7 +149,7 @@ def legacy_providers_config(*providers: Type[Any]) -> dict: } -def providers_config(*providers: Type[Any]) -> dict: +def providers_config(*providers: type[Any]) -> dict: """Returns a config dict that will enable the given modules""" return { "modules": [ diff --git a/tests/handlers/test_profile.py b/tests/handlers/test_profile.py index 73426c7b04..7a7f803ebd 100644 --- a/tests/handlers/test_profile.py +++ b/tests/handlers/test_profile.py @@ -18,7 +18,7 @@ # [This file includes modifications made by New Vector Limited] # # -from typing import Any, Awaitable, Callable, Dict +from typing import Any, Awaitable, Callable from unittest.mock import AsyncMock, Mock from parameterized import parameterized @@ -44,7 +44,7 @@ class ProfileTestCase(unittest.HomeserverTestCase): self.mock_federation = AsyncMock() self.mock_registry = Mock() - self.query_handlers: Dict[str, Callable[[dict], Awaitable[JsonDict]]] = {} + self.query_handlers: dict[str, Callable[[dict], Awaitable[JsonDict]]] = {} def register_query_handler( query_type: str, handler: Callable[[dict], Awaitable[JsonDict]] @@ -377,7 +377,7 @@ class ProfileTestCase(unittest.HomeserverTestCase): self.get_success(self.handler.check_avatar_size_and_mime_type(remote_mxc)) ) - def _setup_local_files(self, names_and_props: Dict[str, Dict[str, Any]]) -> None: + def _setup_local_files(self, names_and_props: dict[str, dict[str, Any]]) -> None: """Stores metadata about files in the database. Args: diff --git a/tests/handlers/test_receipts.py b/tests/handlers/test_receipts.py index 4febccbfcf..bb9e84d644 100644 --- a/tests/handlers/test_receipts.py +++ b/tests/handlers/test_receipts.py @@ -20,7 +20,6 @@ # from copy import deepcopy -from typing import List from twisted.internet.testing import MemoryReactor @@ -334,7 +333,7 @@ class ReceiptsTestCase(unittest.HomeserverTestCase): self.assertEqual(events, original_events) def _test_filters_private( - self, events: List[JsonDict], expected_output: List[JsonDict] + self, events: list[JsonDict], expected_output: list[JsonDict] ) -> None: """Tests that the _filter_out_private returns the expected output""" filtered_events = self.event_source.filter_out_private_receipts( diff --git a/tests/handlers/test_register.py b/tests/handlers/test_register.py index 5e2eb8dee7..20c2554e25 100644 --- a/tests/handlers/test_register.py +++ b/tests/handlers/test_register.py @@ -19,7 +19,7 @@ # # -from typing import Any, Collection, List, Optional, Tuple +from typing import Any, Collection, Optional from unittest.mock import AsyncMock, Mock from twisted.internet.testing import MemoryReactor @@ -65,7 +65,7 @@ class TestSpamChecker: self, email_threepid: Optional[dict], username: Optional[str], - request_info: Collection[Tuple[str, str]], + request_info: Collection[tuple[str, str]], auth_provider_id: Optional[str], ) -> RegistrationBehaviour: return RegistrationBehaviour.ALLOW @@ -76,7 +76,7 @@ class DenyAll(TestSpamChecker): self, email_threepid: Optional[dict], username: Optional[str], - request_info: Collection[Tuple[str, str]], + request_info: Collection[tuple[str, str]], auth_provider_id: Optional[str], ) -> RegistrationBehaviour: return RegistrationBehaviour.DENY @@ -87,7 +87,7 @@ class BanAll(TestSpamChecker): self, email_threepid: Optional[dict], username: Optional[str], - request_info: Collection[Tuple[str, str]], + request_info: Collection[tuple[str, str]], auth_provider_id: Optional[str], ) -> RegistrationBehaviour: return RegistrationBehaviour.SHADOW_BAN @@ -98,7 +98,7 @@ class BanBadIdPUser(TestSpamChecker): self, email_threepid: Optional[dict], username: Optional[str], - request_info: Collection[Tuple[str, str]], + request_info: Collection[tuple[str, str]], auth_provider_id: Optional[str] = None, ) -> RegistrationBehaviour: # Reject any user coming from CAS and whose username contains profanity @@ -115,7 +115,7 @@ class TestLegacyRegistrationSpamChecker: self, email_threepid: Optional[dict], username: Optional[str], - request_info: Collection[Tuple[str, str]], + request_info: Collection[tuple[str, str]], ) -> RegistrationBehaviour: return RegistrationBehaviour.ALLOW @@ -125,7 +125,7 @@ class LegacyAllowAll(TestLegacyRegistrationSpamChecker): self, email_threepid: Optional[dict], username: Optional[str], - request_info: Collection[Tuple[str, str]], + request_info: Collection[tuple[str, str]], ) -> RegistrationBehaviour: return RegistrationBehaviour.ALLOW @@ -135,7 +135,7 @@ class LegacyDenyAll(TestLegacyRegistrationSpamChecker): self, email_threepid: Optional[dict], username: Optional[str], - request_info: Collection[Tuple[str, str]], + request_info: Collection[tuple[str, str]], ) -> RegistrationBehaviour: return RegistrationBehaviour.DENY @@ -779,7 +779,7 @@ class RegistrationTestCase(unittest.HomeserverTestCase): localpart: str, displayname: Optional[str], password_hash: Optional[str] = None, - ) -> Tuple[str, str]: + ) -> tuple[str, str]: """Creates a new user if the user does not exist, else revokes all previous access tokens and generates a new one. @@ -842,7 +842,7 @@ class RemoteAutoJoinTestCase(unittest.HomeserverTestCase): async def lookup_room_alias( *args: Any, **kwargs: Any - ) -> Tuple[RoomID, List[str]]: + ) -> tuple[RoomID, list[str]]: return RoomID.from_string(self.room_id), ["remotetest"] self.room_member_handler = Mock(spec=["update_membership", "lookup_room_alias"]) diff --git a/tests/handlers/test_room_list.py b/tests/handlers/test_room_list.py index 45cef09b22..f6e9309f1f 100644 --- a/tests/handlers/test_room_list.py +++ b/tests/handlers/test_room_list.py @@ -1,5 +1,5 @@ from http import HTTPStatus -from typing import Optional, Set +from typing import Optional from synapse.rest import admin from synapse.rest.client import directory, login, room @@ -69,7 +69,7 @@ class RoomListHandlerTestCase(unittest.HomeserverTestCase): limit=50, from_federation_origin="test2" ) ) - room_ids_in_test2_list: Set[str] = { + room_ids_in_test2_list: set[str] = { entry["room_id"] for entry in room_list["chunk"] } @@ -78,7 +78,7 @@ class RoomListHandlerTestCase(unittest.HomeserverTestCase): limit=50, from_federation_origin="test3" ) ) - room_ids_in_test3_list: Set[str] = { + room_ids_in_test3_list: set[str] = { entry["room_id"] for entry in room_list["chunk"] } diff --git a/tests/handlers/test_room_summary.py b/tests/handlers/test_room_summary.py index 00592b9871..3c8c483921 100644 --- a/tests/handlers/test_room_summary.py +++ b/tests/handlers/test_room_summary.py @@ -18,7 +18,7 @@ # [This file includes modifications made by New Vector Limited] # # -from typing import Any, Dict, Iterable, List, Optional, Set, Tuple +from typing import Any, Iterable, Optional from unittest import mock from twisted.internet.defer import ensureDeferred @@ -60,7 +60,7 @@ def _create_event( return result -def _order(*events: mock.Mock) -> List[mock.Mock]: +def _order(*events: mock.Mock) -> list[mock.Mock]: return sorted(events, key=_child_events_comparison_key) @@ -152,7 +152,7 @@ class SpaceSummaryTestCase(unittest.HomeserverTestCase): room_id: str, token: str, order: Optional[str] = None, - via: Optional[List[str]] = None, + via: Optional[list[str]] = None, ) -> None: """Add a child room to a space.""" if via is None: @@ -170,7 +170,7 @@ class SpaceSummaryTestCase(unittest.HomeserverTestCase): ) def _assert_hierarchy( - self, result: JsonDict, rooms_and_children: Iterable[Tuple[str, Iterable[str]]] + self, result: JsonDict, rooms_and_children: Iterable[tuple[str, Iterable[str]]] ) -> None: """ Assert that the expected room IDs are in the response. @@ -547,7 +547,7 @@ class SpaceSummaryTestCase(unittest.HomeserverTestCase): ) # The result should have the space and all of the links, plus some of the # rooms and a pagination token. - expected: List[Tuple[str, Iterable[str]]] = [(self.space, room_ids)] + expected: list[tuple[str, Iterable[str]]] = [(self.space, room_ids)] expected += [(room_id, ()) for room_id in room_ids[:6]] self._assert_hierarchy(result, expected) self.assertIn("next_batch", result) @@ -646,7 +646,7 @@ class SpaceSummaryTestCase(unittest.HomeserverTestCase): create_requester(self.user), self.space, max_depth=0 ) ) - expected: List[Tuple[str, Iterable[str]]] = [(spaces[0], [rooms[0], spaces[1]])] + expected: list[tuple[str, Iterable[str]]] = [(spaces[0], [rooms[0], spaces[1]])] self._assert_hierarchy(result, expected) # A single additional layer. @@ -740,7 +740,7 @@ class SpaceSummaryTestCase(unittest.HomeserverTestCase): async def summarize_remote_room_hierarchy( _self: Any, room: Any, suggested_only: bool - ) -> Tuple[Optional[_RoomEntry], Dict[str, JsonDict], Set[str]]: + ) -> tuple[Optional[_RoomEntry], dict[str, JsonDict], set[str]]: return requested_room_entry, {subroom: child_room}, set() # Add a room to the space which is on another server. @@ -793,7 +793,7 @@ class SpaceSummaryTestCase(unittest.HomeserverTestCase): async def summarize_remote_room_hierarchy( _self: Any, room: Any, suggested_only: bool - ) -> Tuple[Optional[_RoomEntry], Dict[str, JsonDict], Set[str]]: + ) -> tuple[Optional[_RoomEntry], dict[str, JsonDict], set[str]]: return requested_room_entry, {fed_subroom: child_room}, set() expected = [ @@ -921,7 +921,7 @@ class SpaceSummaryTestCase(unittest.HomeserverTestCase): async def summarize_remote_room_hierarchy( _self: Any, room: Any, suggested_only: bool - ) -> Tuple[Optional[_RoomEntry], Dict[str, JsonDict], Set[str]]: + ) -> tuple[Optional[_RoomEntry], dict[str, JsonDict], set[str]]: return subspace_room_entry, dict(children_rooms), set() # Add a room to the space which is on another server. @@ -985,7 +985,7 @@ class SpaceSummaryTestCase(unittest.HomeserverTestCase): async def summarize_remote_room_hierarchy( _self: Any, room: Any, suggested_only: bool - ) -> Tuple[Optional[_RoomEntry], Dict[str, JsonDict], Set[str]]: + ) -> tuple[Optional[_RoomEntry], dict[str, JsonDict], set[str]]: return fed_room_entry, {}, set() # Add a room to the space which is on another server. @@ -1120,7 +1120,7 @@ class SpaceSummaryTestCase(unittest.HomeserverTestCase): async def summarize_remote_room_hierarchy( _self: Any, room: Any, suggested_only: bool - ) -> Tuple[Optional[_RoomEntry], Dict[str, JsonDict], Set[str]]: + ) -> tuple[Optional[_RoomEntry], dict[str, JsonDict], set[str]]: return requested_room_entry, {fed_subroom: child_room}, set() expected = [ @@ -1233,7 +1233,7 @@ class RoomSummaryTestCase(unittest.HomeserverTestCase): async def summarize_remote_room_hierarchy( _self: Any, room: Any, suggested_only: bool - ) -> Tuple[Optional[_RoomEntry], Dict[str, JsonDict], Set[str]]: + ) -> tuple[Optional[_RoomEntry], dict[str, JsonDict], set[str]]: return requested_room_entry, {}, set() with mock.patch( diff --git a/tests/handlers/test_saml.py b/tests/handlers/test_saml.py index f7cbf91113..28159abbcb 100644 --- a/tests/handlers/test_saml.py +++ b/tests/handlers/test_saml.py @@ -19,7 +19,7 @@ # # -from typing import Any, Dict, Optional, Set, Tuple +from typing import Any, Optional from unittest.mock import AsyncMock, Mock import attr @@ -73,7 +73,7 @@ class TestMappingProvider: return None @staticmethod - def get_saml_attributes(config: None) -> Tuple[Set[str], Set[str]]: + def get_saml_attributes(config: None) -> tuple[set[str], set[str]]: return {"uid"}, {"displayName"} def get_remote_user_id( @@ -102,10 +102,10 @@ class TestRedirectMappingProvider(TestMappingProvider): class SamlHandlerTestCase(HomeserverTestCase): - def default_config(self) -> Dict[str, Any]: + def default_config(self) -> dict[str, Any]: config = super().default_config() config["public_baseurl"] = BASE_URL - saml_config: Dict[str, Any] = { + saml_config: dict[str, Any] = { "sp_config": {"metadata": {}}, # Disable grandfathering. "grandfathered_mxid_source_attribute": None, diff --git a/tests/handlers/test_send_email.py b/tests/handlers/test_send_email.py index 5f7839c82c..d033ed3a1c 100644 --- a/tests/handlers/test_send_email.py +++ b/tests/handlers/test_send_email.py @@ -20,7 +20,7 @@ # -from typing import Callable, List, Tuple, Type, Union +from typing import Callable, Union from unittest.mock import patch from zope.interface import implementer @@ -58,18 +58,18 @@ def TestingESMTPTLSClientFactory( class _DummyMessageDelivery: def __init__(self) -> None: # (recipient, message) tuples - self.messages: List[Tuple[smtp.Address, bytes]] = [] + self.messages: list[tuple[smtp.Address, bytes]] = [] def receivedHeader( self, - helo: Tuple[bytes, bytes], + helo: tuple[bytes, bytes], origin: smtp.Address, - recipients: List[smtp.User], + recipients: list[smtp.User], ) -> None: return None def validateFrom( - self, helo: Tuple[bytes, bytes], origin: smtp.Address + self, helo: tuple[bytes, bytes], origin: smtp.Address ) -> smtp.Address: return origin @@ -89,7 +89,7 @@ class _DummyMessage: def __init__(self, delivery: _DummyMessageDelivery, user: smtp.User): self._delivery = delivery self._user = user - self._buffer: List[bytes] = [] + self._buffer: list[bytes] = [] def lineReceived(self, line: bytes) -> None: self._buffer.append(line) @@ -104,7 +104,7 @@ class _DummyMessage: class SendEmailHandlerTestCaseIPv4(HomeserverTestCase): - ip_class: Union[Type[IPv4Address], Type[IPv6Address]] = IPv4Address + ip_class: Union[type[IPv4Address], type[IPv6Address]] = IPv4Address def setUp(self) -> None: super().setUp() diff --git a/tests/handlers/test_sliding_sync.py b/tests/handlers/test_sliding_sync.py index 1ffd15cadb..a35910e4dd 100644 --- a/tests/handlers/test_sliding_sync.py +++ b/tests/handlers/test_sliding_sync.py @@ -18,7 +18,7 @@ # # import logging -from typing import AbstractSet, Dict, Mapping, Optional, Set, Tuple +from typing import AbstractSet, Mapping, Optional from unittest.mock import patch import attr @@ -3278,7 +3278,7 @@ class FilterRoomsRelevantForSyncTestCase(HomeserverTestCase): user: UserID, to_token: StreamToken, from_token: Optional[StreamToken], - ) -> Tuple[Dict[str, RoomsForUserType], AbstractSet[str], AbstractSet[str]]: + ) -> tuple[dict[str, RoomsForUserType], AbstractSet[str], AbstractSet[str]]: """ Get the rooms the user should be syncing with """ @@ -3615,7 +3615,7 @@ class SortRoomsTestCase(HomeserverTestCase): user: UserID, to_token: StreamToken, from_token: Optional[StreamToken], - ) -> Tuple[Dict[str, RoomsForUserType], AbstractSet[str], AbstractSet[str]]: + ) -> tuple[dict[str, RoomsForUserType], AbstractSet[str], AbstractSet[str]]: """ Get the rooms the user should be syncing with """ @@ -3824,13 +3824,13 @@ class SortRoomsTestCase(HomeserverTestCase): @attr.s(slots=True, auto_attribs=True, frozen=True) class RequiredStateChangesTestParameters: - previous_required_state_map: Dict[str, Set[str]] - request_required_state_map: Dict[str, Set[str]] + previous_required_state_map: dict[str, set[str]] + request_required_state_map: dict[str, set[str]] state_deltas: StateMap[str] - expected_with_state_deltas: Tuple[ + expected_with_state_deltas: tuple[ Optional[Mapping[str, AbstractSet[str]]], StateFilter ] - expected_without_state_deltas: Tuple[ + expected_without_state_deltas: tuple[ Optional[Mapping[str, AbstractSet[str]]], StateFilter ] @@ -4785,7 +4785,7 @@ class RequiredStateChangesTestCase(unittest.TestCase): self, _test_label: str, event_type: str, - extra_state_keys: Set[str], + extra_state_keys: set[str], ) -> None: """ Test that we limit the number of state_keys that we remember but always include diff --git a/tests/handlers/test_sso.py b/tests/handlers/test_sso.py index b09d0a42f5..5ac088f601 100644 --- a/tests/handlers/test_sso.py +++ b/tests/handlers/test_sso.py @@ -18,7 +18,7 @@ # # from http import HTTPStatus -from typing import BinaryIO, Callable, Dict, List, Optional, Tuple +from typing import BinaryIO, Callable, Optional from unittest.mock import Mock from twisted.internet.testing import MemoryReactor @@ -120,7 +120,7 @@ async def mock_get_file( max_size: Optional[int] = None, headers: Optional[RawHeaders] = None, is_allowed_content_type: Optional[Callable[[str], bool]] = None, -) -> Tuple[int, Dict[bytes, List[bytes]], str, int]: +) -> tuple[int, dict[bytes, list[bytes]], str, int]: fake_response = FakeResponse(code=404) if url == "http://my.server/me.png": fake_response = FakeResponse( diff --git a/tests/handlers/test_stats.py b/tests/handlers/test_stats.py index abec5c2e39..94f5e472ca 100644 --- a/tests/handlers/test_stats.py +++ b/tests/handlers/test_stats.py @@ -18,7 +18,7 @@ # # -from typing import Any, Dict, List, Optional, Tuple, cast +from typing import Any, Optional, cast from twisted.internet.testing import MemoryReactor @@ -74,9 +74,9 @@ class StatsRoomTests(unittest.HomeserverTestCase): ) ) - async def get_all_room_state(self) -> List[Optional[str]]: + async def get_all_room_state(self) -> list[Optional[str]]: rows = cast( - List[Tuple[Optional[str]]], + list[tuple[Optional[str]]], await self.store.db_pool.simple_select_list( "room_stats_state", None, retcols=("topic",) ), @@ -85,7 +85,7 @@ class StatsRoomTests(unittest.HomeserverTestCase): def _get_current_stats( self, stats_type: str, stat_id: str - ) -> Optional[Dict[str, Any]]: + ) -> Optional[dict[str, Any]]: table, id_col = stats.TYPE_TO_TABLE[stats_type] cols = list(stats.ABSOLUTE_STATS_FIELDS[stats_type]) diff --git a/tests/handlers/test_sync.py b/tests/handlers/test_sync.py index c61788fe90..140dd4a0ba 100644 --- a/tests/handlers/test_sync.py +++ b/tests/handlers/test_sync.py @@ -18,7 +18,7 @@ # # from http import HTTPStatus -from typing import Collection, ContextManager, List, Optional +from typing import Collection, ContextManager, Optional from unittest.mock import AsyncMock, Mock, patch from parameterized import parameterized, parameterized_class @@ -872,7 +872,7 @@ class SyncTestCase(tests.unittest.HomeserverTestCase): # ... And the state should be empty self.assertEqual(sync_room_result.state, {}) - def _patch_get_latest_events(self, latest_events: List[str]) -> ContextManager: + def _patch_get_latest_events(self, latest_events: list[str]) -> ContextManager: """Monkey-patch `get_prev_events_for_room` Returns a context manager which will replace the implementation of @@ -902,7 +902,7 @@ class SyncTestCase(tests.unittest.HomeserverTestCase): async def _check_sigs_and_hash_for_pulled_events_and_fetch( dest: str, pdus: Collection[EventBase], room_version: RoomVersion - ) -> List[EventBase]: + ) -> list[EventBase]: return list(pdus) self.client._check_sigs_and_hash_for_pulled_events_and_fetch = ( # type: ignore[method-assign] diff --git a/tests/handlers/test_typing.py b/tests/handlers/test_typing.py index 90c185bc3d..70557a4a5f 100644 --- a/tests/handlers/test_typing.py +++ b/tests/handlers/test_typing.py @@ -21,7 +21,6 @@ import json -from typing import Dict, List, Set from unittest.mock import ANY, AsyncMock, Mock, call from netaddr import IPSet @@ -110,7 +109,7 @@ class TypingNotificationsTestCase(unittest.HomeserverTestCase): return hs - def create_resource_dict(self) -> Dict[str, Resource]: + def create_resource_dict(self) -> dict[str, Resource]: d = super().create_resource_dict() d["/_matrix/federation"] = TransportLayerServer(self.hs) return d @@ -143,7 +142,7 @@ class TypingNotificationsTestCase(unittest.HomeserverTestCase): return_value=None ) - self.room_members: List[UserID] = [] + self.room_members: list[UserID] = [] async def check_user_in_room(room_id: str, requester: Requester) -> None: if requester.user.to_string() not in [ @@ -163,7 +162,7 @@ class TypingNotificationsTestCase(unittest.HomeserverTestCase): side_effect=check_host_in_room ) - async def get_current_hosts_in_room(room_id: str) -> Set[str]: + async def get_current_hosts_in_room(room_id: str) -> set[str]: return {member.domain for member in self.room_members} hs.get_storage_controllers().state.get_current_hosts_in_room = Mock( # type: ignore[method-assign] @@ -174,7 +173,7 @@ class TypingNotificationsTestCase(unittest.HomeserverTestCase): side_effect=get_current_hosts_in_room ) - async def get_users_in_room(room_id: str) -> Set[str]: + async def get_users_in_room(room_id: str) -> set[str]: return {str(u) for u in self.room_members} self.datastore.get_users_in_room = Mock(side_effect=get_users_in_room) diff --git a/tests/handlers/test_user_directory.py b/tests/handlers/test_user_directory.py index 1ba0be51a2..f50fa1f4a0 100644 --- a/tests/handlers/test_user_directory.py +++ b/tests/handlers/test_user_directory.py @@ -17,7 +17,7 @@ # [This file includes modifications made by New Vector Limited] # # -from typing import Any, Tuple +from typing import Any from unittest.mock import AsyncMock, Mock, patch from urllib.parse import quote @@ -313,7 +313,7 @@ class UserDirectoryTestCase(unittest.HomeserverTestCase): def _create_rooms_and_inject_memberships( self, creator: str, token: str, joiner: str - ) -> Tuple[str, str]: + ) -> tuple[str, str]: """Create a public and private room as a normal user. Then get the `joiner` into those rooms. """ diff --git a/tests/http/__init__.py b/tests/http/__init__.py index 3c20e5e442..b19a484004 100644 --- a/tests/http/__init__.py +++ b/tests/http/__init__.py @@ -19,7 +19,6 @@ # import os.path import subprocess -from typing import List from incremental import Version from zope.interface import implementer @@ -85,7 +84,7 @@ subjectAltName = %(sanentries)s """ -def create_test_cert_file(sanlist: List[bytes]) -> str: +def create_test_cert_file(sanlist: list[bytes]) -> str: """build an x509 certificate file Args: @@ -151,7 +150,7 @@ class TestServerTLSConnectionFactory: """An SSL connection creator which returns connections which present a certificate signed by our test CA.""" - def __init__(self, sanlist: List[bytes]): + def __init__(self, sanlist: list[bytes]): """ Args: sanlist: a list of subjectAltName values for the cert @@ -166,7 +165,7 @@ class TestServerTLSConnectionFactory: def wrap_server_factory_for_tls( - factory: IProtocolFactory, clock: IReactorTime, sanlist: List[bytes] + factory: IProtocolFactory, clock: IReactorTime, sanlist: list[bytes] ) -> TLSMemoryBIOFactory: """Wrap an existing Protocol Factory with a test TLSMemoryBIOFactory diff --git a/tests/http/federation/test_matrix_federation_agent.py b/tests/http/federation/test_matrix_federation_agent.py index c66ca489a4..949564fcc7 100644 --- a/tests/http/federation/test_matrix_federation_agent.py +++ b/tests/http/federation/test_matrix_federation_agent.py @@ -20,7 +20,7 @@ import base64 import logging import os -from typing import Generator, List, Optional, cast +from typing import Generator, Optional, cast from unittest.mock import AsyncMock, call, patch import treq @@ -110,7 +110,7 @@ class MatrixFederationAgentTests(unittest.TestCase): client_factory: IProtocolFactory, ssl: bool = True, expected_sni: Optional[bytes] = None, - tls_sanlist: Optional[List[bytes]] = None, + tls_sanlist: Optional[list[bytes]] = None, ) -> HTTPChannel: """Builds a test server, and completes the outgoing client connection Args: diff --git a/tests/http/federation/test_srv_resolver.py b/tests/http/federation/test_srv_resolver.py index a359b0a141..54f3168a01 100644 --- a/tests/http/federation/test_srv_resolver.py +++ b/tests/http/federation/test_srv_resolver.py @@ -18,7 +18,7 @@ # [This file includes modifications made by New Vector Limited] # # -from typing import Dict, Generator, List, Tuple, cast +from typing import Generator, cast from unittest.mock import Mock from twisted.internet import defer @@ -44,20 +44,20 @@ class SrvResolverTestCase(unittest.TestCase): type=dns.SRV, payload=dns.Record_SRV(target=host_name) ) - result_deferred: "Deferred[Tuple[List[dns.RRHeader], None, None]]" = Deferred() + result_deferred: "Deferred[tuple[list[dns.RRHeader], None, None]]" = Deferred() dns_client_mock.lookupService.return_value = result_deferred - cache: Dict[bytes, List[Server]] = {} + cache: dict[bytes, list[Server]] = {} resolver = SrvResolver(dns_client=dns_client_mock, cache=cache) @defer.inlineCallbacks - def do_lookup() -> Generator["Deferred[object]", object, List[Server]]: + def do_lookup() -> Generator["Deferred[object]", object, list[Server]]: with LoggingContext( name="one", server_name="test_server", ) as ctx: resolve_d = resolver.resolve_service(service_name) - result: List[Server] + result: list[Server] result = yield defer.ensureDeferred(resolve_d) # type: ignore[assignment] # should have restored our context @@ -95,7 +95,7 @@ class SrvResolverTestCase(unittest.TestCase): cache = {service_name: [cast(Server, entry)]} resolver = SrvResolver(dns_client=dns_client_mock, cache=cache) - servers: List[Server] + servers: list[Server] servers = yield defer.ensureDeferred(resolver.resolve_service(service_name)) # type: ignore[assignment] dns_client_mock.lookupService.assert_called_once_with(service_name) @@ -122,7 +122,7 @@ class SrvResolverTestCase(unittest.TestCase): dns_client=dns_client_mock, cache=cache, get_time=clock.time ) - servers: List[Server] + servers: list[Server] servers = yield defer.ensureDeferred(resolver.resolve_service(service_name)) # type: ignore[assignment] self.assertFalse(dns_client_mock.lookupService.called) @@ -138,7 +138,7 @@ class SrvResolverTestCase(unittest.TestCase): service_name = b"test_service.example.com" - cache: Dict[bytes, List[Server]] = {} + cache: dict[bytes, list[Server]] = {} resolver = SrvResolver(dns_client=dns_client_mock, cache=cache) with self.assertRaises(error.DNSServerError): @@ -152,10 +152,10 @@ class SrvResolverTestCase(unittest.TestCase): service_name = b"test_service.example.com" - cache: Dict[bytes, List[Server]] = {} + cache: dict[bytes, list[Server]] = {} resolver = SrvResolver(dns_client=dns_client_mock, cache=cache) - servers: List[Server] + servers: list[Server] servers = yield defer.ensureDeferred(resolver.resolve_service(service_name)) # type: ignore[assignment] self.assertEqual(len(servers), 0) @@ -167,10 +167,10 @@ class SrvResolverTestCase(unittest.TestCase): """ service_name = b"test_service.example.com" - lookup_deferred: "Deferred[Tuple[List[dns.RRHeader], None, None]]" = Deferred() + lookup_deferred: "Deferred[tuple[list[dns.RRHeader], None, None]]" = Deferred() dns_client_mock = Mock() dns_client_mock.lookupService.return_value = lookup_deferred - cache: Dict[bytes, List[Server]] = {} + cache: dict[bytes, list[Server]] = {} resolver = SrvResolver(dns_client=dns_client_mock, cache=cache) # Old versions of Twisted don't have an ensureDeferred in failureResultOf. @@ -193,10 +193,10 @@ class SrvResolverTestCase(unittest.TestCase): """ service_name = b"test_service.example.com" - lookup_deferred: "Deferred[Tuple[List[dns.RRHeader], None, None]]" = Deferred() + lookup_deferred: "Deferred[tuple[list[dns.RRHeader], None, None]]" = Deferred() dns_client_mock = Mock() dns_client_mock.lookupService.return_value = lookup_deferred - cache: Dict[bytes, List[Server]] = {} + cache: dict[bytes, list[Server]] = {} resolver = SrvResolver(dns_client=dns_client_mock, cache=cache) # Old versions of Twisted don't have an ensureDeferred in successResultOf. diff --git a/tests/http/server/_base.py b/tests/http/server/_base.py index 8eec4329fe..cc9b5fd6e1 100644 --- a/tests/http/server/_base.py +++ b/tests/http/server/_base.py @@ -26,12 +26,8 @@ from typing import ( Any, Callable, ContextManager, - Dict, Generator, - List, Optional, - Set, - Tuple, TypeVar, Union, ) @@ -208,7 +204,7 @@ def make_request_with_cancellation_test( # The set of previously seen `await`s. # Each element is a stringified stack trace. - seen_awaits: Set[Tuple[str, ...]] = set() + seen_awaits: set[tuple[str, ...]] = set() _log_for_request( 0, f"Running make_request_with_cancellation_test for {test_name}..." @@ -337,7 +333,7 @@ class Deferred__await__Patch: deferred_patch.unblock_awaits() """ - def __init__(self, seen_awaits: Set[Tuple[str, ...]], request_number: int): + def __init__(self, seen_awaits: set[tuple[str, ...]], request_number: int): """ Args: seen_awaits: The set of stack traces of `await`s that have been previously @@ -365,10 +361,10 @@ class Deferred__await__Patch: # unresolved `Deferred` and return it out of `Deferred.__await__` / # `coroutine.send()`. We have to resolve it later, in case the `await`ing # coroutine is part of some shared processing, such as `@cached`. - self._to_unblock: Dict[Deferred, Union[object, Failure]] = {} + self._to_unblock: dict[Deferred, Union[object, Failure]] = {} # The last stack we logged. - self._previous_stack: List[inspect.FrameInfo] = [] + self._previous_stack: list[inspect.FrameInfo] = [] def patch(self) -> ContextManager[Mock]: """Returns a context manager which patches `Deferred.__await__`.""" @@ -507,8 +503,8 @@ def _log_for_request(request_number: int, message: str) -> None: def _log_await_stack( - stack: List[inspect.FrameInfo], - previous_stack: List[inspect.FrameInfo], + stack: list[inspect.FrameInfo], + previous_stack: list[inspect.FrameInfo], request_number: int, note: str, ) -> None: @@ -566,7 +562,7 @@ def _format_stack_frame(frame_info: inspect.FrameInfo) -> str: ) -def _get_stack(skip_frames: int) -> List[inspect.FrameInfo]: +def _get_stack(skip_frames: int) -> list[inspect.FrameInfo]: """Captures the stack for a request. Skips any twisted frames and stops at `JsonResource.wrapped_async_request_handler`. @@ -622,6 +618,6 @@ def _get_stack_frame_method_name(frame_info: inspect.FrameInfo) -> str: return method_name -def _hash_stack(stack: List[inspect.FrameInfo]) -> Tuple[str, ...]: +def _hash_stack(stack: list[inspect.FrameInfo]) -> tuple[str, ...]: """Turns a stack into a hashable value that can be put into a set.""" return tuple(_format_stack_frame(frame) for frame in stack) diff --git a/tests/http/test_client.py b/tests/http/test_client.py index a02f6fc728..d9eaa78a39 100644 --- a/tests/http/test_client.py +++ b/tests/http/test_client.py @@ -20,7 +20,7 @@ # from io import BytesIO -from typing import Tuple, Union +from typing import Union from unittest.mock import Mock from netaddr import IPSet @@ -59,7 +59,7 @@ class ReadMultipartResponseTests(TestCase): def _build_multipart_response( self, response_length: Union[int, str], max_length: int - ) -> Tuple[ + ) -> tuple[ BytesIO, "Deferred[MultipartResponse]", _MultipartParserProtocol, @@ -209,7 +209,7 @@ class ReadMultipartResponseTests(TestCase): class ReadBodyWithMaxSizeTests(TestCase): def _build_response( self, length: Union[int, str] = UNKNOWN_LENGTH - ) -> Tuple[ + ) -> tuple[ BytesIO, "Deferred[int]", _DiscardBodyWithMaxSizeProtocol, diff --git a/tests/http/test_matrixfederationclient.py b/tests/http/test_matrixfederationclient.py index 6d87541888..6accb03b9f 100644 --- a/tests/http/test_matrixfederationclient.py +++ b/tests/http/test_matrixfederationclient.py @@ -18,7 +18,7 @@ # # import io -from typing import Any, Dict, Generator +from typing import Any, Generator from unittest.mock import ANY, Mock, create_autospec from netaddr import IPSet @@ -745,7 +745,7 @@ class FederationClientTests(HomeserverTestCase): class FederationClientProxyTests(BaseMultiWorkerStreamTestCase): - def default_config(self) -> Dict[str, Any]: + def default_config(self) -> dict[str, Any]: conf = super().default_config() conf["instance_map"] = { "main": {"host": "testserv", "port": 8765}, diff --git a/tests/http/test_proxy.py b/tests/http/test_proxy.py index 7110dcf9f9..59a9b073bc 100644 --- a/tests/http/test_proxy.py +++ b/tests/http/test_proxy.py @@ -18,7 +18,6 @@ # [This file includes modifications made by New Vector Limited] # # -from typing import Set from parameterized import parameterized @@ -64,7 +63,7 @@ class ProxyTests(TestCase): def test_parse_connection_header_value( self, connection_header_value: bytes, - expected_extra_headers_to_remove: Set[str], + expected_extra_headers_to_remove: set[str], ) -> None: """ Tests that the connection header value is parsed correctly diff --git a/tests/http/test_proxyagent.py b/tests/http/test_proxyagent.py index 5bc5d18d81..a9b4f3d956 100644 --- a/tests/http/test_proxyagent.py +++ b/tests/http/test_proxyagent.py @@ -21,7 +21,7 @@ import base64 import logging import os -from typing import List, Optional +from typing import Optional from unittest.mock import patch import treq @@ -252,7 +252,7 @@ class ProxyAgentTests(TestCase): server_factory: IProtocolFactory, ssl: bool = False, expected_sni: Optional[bytes] = None, - tls_sanlist: Optional[List[bytes]] = None, + tls_sanlist: Optional[list[bytes]] = None, ) -> IProtocol: """Builds a test server, and completes the outgoing client connection diff --git a/tests/http/test_servlet.py b/tests/http/test_servlet.py index db39ecf244..087191b220 100644 --- a/tests/http/test_servlet.py +++ b/tests/http/test_servlet.py @@ -21,7 +21,7 @@ import json from http import HTTPStatus from io import BytesIO -from typing import Tuple, Union +from typing import Union from unittest.mock import Mock from synapse.api.errors import Codes, SynapseError @@ -108,11 +108,11 @@ class CancellableRestServlet(RestServlet): self.clock = hs.get_clock() @cancellable - async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: + async def on_GET(self, request: SynapseRequest) -> tuple[int, JsonDict]: await self.clock.sleep(1.0) return HTTPStatus.OK, {"result": True} - async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: + async def on_POST(self, request: SynapseRequest) -> tuple[int, JsonDict]: await self.clock.sleep(1.0) return HTTPStatus.OK, {"result": True} diff --git a/tests/logging/test_remote_handler.py b/tests/logging/test_remote_handler.py index e0fd12ccf7..534a1fc4ee 100644 --- a/tests/logging/test_remote_handler.py +++ b/tests/logging/test_remote_handler.py @@ -18,7 +18,6 @@ # [This file includes modifications made by New Vector Limited] # # -from typing import Tuple from twisted.internet.protocol import Protocol from twisted.internet.testing import AccumulatingProtocol, MemoryReactorClock @@ -33,7 +32,7 @@ from tests.utils import checked_cast def connect_logging_client( reactor: MemoryReactorClock, client_id: int -) -> Tuple[Protocol, AccumulatingProtocol]: +) -> tuple[Protocol, AccumulatingProtocol]: # This is essentially tests.server.connect_client, but disabling autoflush on # the client transport. This is necessary to avoid an infinite loop due to # sending of data via the logging transport causing additional logs to be diff --git a/tests/media/test_media_storage.py b/tests/media/test_media_storage.py index 28c4ce676a..d584ea951c 100644 --- a/tests/media/test_media_storage.py +++ b/tests/media/test_media_storage.py @@ -23,7 +23,7 @@ import shutil import tempfile from binascii import unhexlify from io import BytesIO -from typing import Any, BinaryIO, ClassVar, Dict, List, Literal, Optional, Tuple, Union +from typing import Any, BinaryIO, ClassVar, Literal, Optional, Union from unittest.mock import MagicMock, Mock, patch from urllib import parse @@ -297,9 +297,9 @@ class MediaRepoTests(unittest.HomeserverTestCase): user_id = "@test:user" def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: - self.fetches: List[ - Tuple[ - "Deferred[Tuple[bytes, Tuple[int, Dict[bytes, List[bytes]]]]]", + self.fetches: list[ + tuple[ + "Deferred[tuple[bytes, tuple[int, dict[bytes, list[bytes]]]]]", str, str, Optional[QueryParams], @@ -317,12 +317,12 @@ class MediaRepoTests(unittest.HomeserverTestCase): retry_on_dns_fail: bool = True, ignore_backoff: bool = False, follow_redirects: bool = False, - ) -> "Deferred[Tuple[int, Dict[bytes, List[bytes]]]]": + ) -> "Deferred[tuple[int, dict[bytes, list[bytes]]]]": """A mock for MatrixFederationHttpClient.get_file.""" def write_to( - r: Tuple[bytes, Tuple[int, Dict[bytes, List[bytes]]]], - ) -> Tuple[int, Dict[bytes, List[bytes]]]: + r: tuple[bytes, tuple[int, dict[bytes, list[bytes]]]], + ) -> tuple[int, dict[bytes, list[bytes]]]: data, response = r output_stream.write(data) return response @@ -332,7 +332,7 @@ class MediaRepoTests(unittest.HomeserverTestCase): output_stream.write(f.value.response) return f - d: Deferred[Tuple[bytes, Tuple[int, Dict[bytes, List[bytes]]]]] = Deferred() + d: Deferred[tuple[bytes, tuple[int, dict[bytes, list[bytes]]]]] = Deferred() self.fetches.append((d, destination, path, args)) # Note that this callback changes the value held by d. d_after_callback = d.addCallbacks(write_to, write_err) @@ -370,7 +370,7 @@ class MediaRepoTests(unittest.HomeserverTestCase): self.media_id = "example.com/12345" - def create_resource_dict(self) -> Dict[str, Resource]: + def create_resource_dict(self) -> dict[str, Resource]: resources = super().create_resource_dict() resources["/_matrix/media"] = self.hs.get_media_repository_resource() return resources @@ -860,12 +860,12 @@ class TestSpamCheckerLegacy: Uses the legacy Spam-Checker API. """ - def __init__(self, config: Dict[str, Any], api: ModuleApi) -> None: + def __init__(self, config: dict[str, Any], api: ModuleApi) -> None: self.config = config self.api = api @staticmethod - def parse_config(config: Dict[str, Any]) -> Dict[str, Any]: + def parse_config(config: dict[str, Any]) -> dict[str, Any]: return config async def check_event_for_spam(self, event: EventBase) -> Union[bool, str]: @@ -911,12 +911,12 @@ class SpamCheckerTestCaseLegacy(unittest.HomeserverTestCase): load_legacy_spam_checkers(hs) - def create_resource_dict(self) -> Dict[str, Resource]: + def create_resource_dict(self) -> dict[str, Resource]: resources = super().create_resource_dict() resources["/_matrix/media"] = self.hs.get_media_repository_resource() return resources - def default_config(self) -> Dict[str, Any]: + def default_config(self) -> dict[str, Any]: config = default_config("test") config.update( @@ -965,14 +965,14 @@ class SpamCheckerTestCase(unittest.HomeserverTestCase): check_media_file_for_spam=self.check_media_file_for_spam ) - def create_resource_dict(self) -> Dict[str, Resource]: + def create_resource_dict(self) -> dict[str, Resource]: resources = super().create_resource_dict() resources["/_matrix/media"] = self.hs.get_media_repository_resource() return resources async def check_media_file_for_spam( self, file_wrapper: ReadableFileWrapper, file_info: FileInfo - ) -> Union[Codes, Literal["NOT_SPAM"], Tuple[Codes, JsonDict]]: + ) -> Union[Codes, Literal["NOT_SPAM"], tuple[Codes, JsonDict]]: buf = BytesIO() await file_wrapper.write_chunks_to(buf.write) @@ -1028,7 +1028,7 @@ class RemoteDownloadLimiterTestCase(unittest.HomeserverTestCase): self.client = hs.get_federation_http_client() self.store = hs.get_datastores().main - def create_resource_dict(self) -> Dict[str, Resource]: + def create_resource_dict(self) -> dict[str, Resource]: # We need to manually set the resource tree to include media, the # default only does `/_matrix/client` APIs. return {"/_matrix/media": self.hs.get_media_repository_resource()} @@ -1280,7 +1280,7 @@ class MediaHashesTestCase(unittest.HomeserverTestCase): self.store = hs.get_datastores().main self.client = hs.get_federation_http_client() - def create_resource_dict(self) -> Dict[str, Resource]: + def create_resource_dict(self) -> dict[str, Resource]: resources = super().create_resource_dict() resources["/_matrix/media"] = self.hs.get_media_repository_resource() return resources @@ -1377,7 +1377,7 @@ class MediaRepoSizeModuleCallbackTestCase(unittest.HomeserverTestCase): is_user_allowed_to_upload_media_of_size=self.is_user_allowed_to_upload_media_of_size, ) - def create_resource_dict(self) -> Dict[str, Resource]: + def create_resource_dict(self) -> dict[str, Resource]: resources = super().create_resource_dict() resources["/_matrix/media"] = self.hs.get_media_repository_resource() return resources diff --git a/tests/metrics/test_metrics.py b/tests/metrics/test_metrics.py index b3f42c76f1..084eba3a5a 100644 --- a/tests/metrics/test_metrics.py +++ b/tests/metrics/test_metrics.py @@ -18,7 +18,7 @@ # [This file includes modifications made by New Vector Limited] # # -from typing import Dict, NoReturn, Protocol, Tuple +from typing import NoReturn, Protocol from prometheus_client.core import Sample @@ -35,7 +35,7 @@ from synapse.util.caches.deferred_cache import DeferredCache from tests import unittest -def get_sample_labels_value(sample: Sample) -> Tuple[Dict[str, str], float]: +def get_sample_labels_value(sample: Sample) -> tuple[dict[str, str], float]: """Extract the labels and values of a sample. prometheus_client 0.5 changed the sample type to a named tuple with more @@ -54,7 +54,7 @@ def get_sample_labels_value(sample: Sample) -> Tuple[Dict[str, str], float]: # Otherwise fall back to treating it as a plain 3 tuple. else: # In older versions of prometheus_client Sample was a 3-tuple. - labels: Dict[str, str] + labels: dict[str, str] value: float _, labels, value = sample # type: ignore[misc] return labels, value @@ -127,7 +127,7 @@ class TestMauLimit(unittest.TestCase): def get_metrics_from_gauge( self, gauge: InFlightGauge - ) -> Dict[str, Dict[Tuple[str, ...], float]]: + ) -> dict[str, dict[tuple[str, ...], float]]: results = {} for r in gauge.collect(): @@ -384,7 +384,7 @@ class LaterGaugeTests(unittest.HomeserverTestCase): self.assertEqual(hs2_metric_value, "2.0") -def get_latest_metrics() -> Dict[str, str]: +def get_latest_metrics() -> dict[str, str]: """ Collect the latest metrics from the registry and parse them into an easy to use map. The key includes the metric name and labels. diff --git a/tests/module_api/test_api.py b/tests/module_api/test_api.py index 86f987f292..b768a913d7 100644 --- a/tests/module_api/test_api.py +++ b/tests/module_api/test_api.py @@ -18,7 +18,7 @@ # [This file includes modifications made by New Vector Limited] # # -from typing import Any, Dict, Optional +from typing import Any, Optional from unittest.mock import AsyncMock, Mock from twisted.internet import defer @@ -839,7 +839,7 @@ class ModuleApiWorkerTestCase(BaseModuleApiTestCase, BaseMultiWorkerStreamTestCa presence.register_servlets, ] - def default_config(self) -> Dict[str, Any]: + def default_config(self) -> dict[str, Any]: conf = super().default_config() conf["stream_writers"] = {"presence": ["presence_writer"]} conf["instance_map"] = { diff --git a/tests/push/test_email.py b/tests/push/test_email.py index 26819e2d3c..d3822b8643 100644 --- a/tests/push/test_email.py +++ b/tests/push/test_email.py @@ -21,7 +21,7 @@ import email.message import importlib.resources as importlib_resources import os from http import HTTPStatus -from typing import Any, Dict, List, Sequence, Tuple +from typing import Any, Sequence import attr from parameterized import parameterized @@ -83,8 +83,8 @@ class EmailPusherTests(HomeserverTestCase): hs = self.setup_test_homeserver(config=config) - # List[Tuple[Deferred, args, kwargs]] - self.email_attempts: List[Tuple[Deferred, Sequence, Dict]] = [] + # list[tuple[Deferred, args, kwargs]] + self.email_attempts: list[tuple[Deferred, Sequence, dict]] = [] def sendmail(*args: Any, **kwargs: Any) -> Deferred: # This mocks out synapse.reactor.send_email._sendmail. @@ -510,7 +510,7 @@ class EmailPusherTests(HomeserverTestCase): ) self.assertEqual(len(pushers), 0) - def _check_for_mail(self) -> Tuple[Sequence, Dict]: + def _check_for_mail(self) -> tuple[Sequence, dict]: """ Assert that synapse sent off exactly one email notification. diff --git a/tests/push/test_http.py b/tests/push/test_http.py index 4c8aae5782..ca2ced01ed 100644 --- a/tests/push/test_http.py +++ b/tests/push/test_http.py @@ -17,7 +17,7 @@ # [This file includes modifications made by New Vector Limited] # # -from typing import Any, Dict, List, Tuple +from typing import Any from unittest.mock import Mock from parameterized import parameterized @@ -51,7 +51,7 @@ class HTTPPusherTests(HomeserverTestCase): hijack_auth = False def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: - self.push_attempts: List[Tuple[Deferred, str, dict]] = [] + self.push_attempts: list[tuple[Deferred, str, dict]] = [] m = Mock() @@ -747,7 +747,7 @@ class HTTPPusherTests(HomeserverTestCase): def _make_user_with_pusher( self, username: str, enabled: bool = True - ) -> Tuple[str, str]: + ) -> tuple[str, str]: """Registers a user and creates a pusher for them. Args: @@ -925,7 +925,7 @@ class HTTPPusherTests(HomeserverTestCase): ret = self.get_success( self.hs.get_datastores().main.get_pushers_by({"user_name": user_id}) ) - pushers: List[PusherConfig] = list(ret) + pushers: list[PusherConfig] = list(ret) # Check that we still have one pusher, and that the device ID associated with # it didn't change. @@ -1118,7 +1118,7 @@ class HTTPPusherTests(HomeserverTestCase): device_id = user_tuple.device_id # Set the push data dict based on test input parameters - push_data: Dict[str, Any] = { + push_data: dict[str, Any] = { "url": "http://example.com/_matrix/push/v1/notify", } if disable_badge_count: diff --git a/tests/push/test_presentable_names.py b/tests/push/test_presentable_names.py index bd42fc0580..4982a80cce 100644 --- a/tests/push/test_presentable_names.py +++ b/tests/push/test_presentable_names.py @@ -19,7 +19,7 @@ # # -from typing import Iterable, List, Optional, Tuple, cast +from typing import Iterable, Optional, cast from synapse.api.constants import EventTypes, Membership from synapse.api.room_versions import RoomVersions @@ -36,7 +36,7 @@ class MockDataStore: (I.e. the state key is used as the event ID.) """ - def __init__(self, events: Iterable[Tuple[StateKey, dict]]): + def __init__(self, events: Iterable[tuple[StateKey, dict]]): """ Args: events: A state map to event contents. @@ -63,7 +63,7 @@ class MockDataStore: assert allow_none, "Mock not configured for allow_none = False" # Decode the state key from the event ID. - state_key = cast(Tuple[str, str], tuple(event_id.split("|", 1))) + state_key = cast(tuple[str, str], tuple(event_id.split("|", 1))) return self._events.get(state_key) async def get_events(self, event_ids: Iterable[StateKey]) -> StateMap[EventBase]: @@ -77,7 +77,7 @@ class PresentableNamesTestCase(unittest.HomeserverTestCase): def _calculate_room_name( self, - events: Iterable[Tuple[Tuple[str, str], dict]], + events: Iterable[tuple[tuple[str, str], dict]], user_id: str = "", fallback_to_members: bool = True, fallback_to_single_member: bool = True, @@ -97,7 +97,7 @@ class PresentableNamesTestCase(unittest.HomeserverTestCase): def test_name(self) -> None: """A room name event should be used.""" - events: List[Tuple[Tuple[str, str], dict]] = [ + events: list[tuple[tuple[str, str], dict]] = [ ((EventTypes.Name, ""), {"name": "test-name"}), ] self.assertEqual("test-name", self._calculate_room_name(events)) @@ -111,7 +111,7 @@ class PresentableNamesTestCase(unittest.HomeserverTestCase): def test_canonical_alias(self) -> None: """An canonical alias should be used.""" - events: List[Tuple[Tuple[str, str], dict]] = [ + events: list[tuple[tuple[str, str], dict]] = [ ((EventTypes.CanonicalAlias, ""), {"alias": "#test-name:test"}), ] self.assertEqual("#test-name:test", self._calculate_room_name(events)) @@ -125,7 +125,7 @@ class PresentableNamesTestCase(unittest.HomeserverTestCase): def test_invite(self) -> None: """An invite has special behaviour.""" - events: List[Tuple[Tuple[str, str], dict]] = [ + events: list[tuple[tuple[str, str], dict]] = [ ((EventTypes.Member, self.USER_ID), {"membership": Membership.INVITE}), ((EventTypes.Member, self.OTHER_USER_ID), {"displayname": "Other User"}), ] @@ -151,7 +151,7 @@ class PresentableNamesTestCase(unittest.HomeserverTestCase): def test_no_members(self) -> None: """Behaviour of an empty room.""" - events: List[Tuple[Tuple[str, str], dict]] = [] + events: list[tuple[tuple[str, str], dict]] = [] self.assertEqual("Empty Room", self._calculate_room_name(events)) # Note that events with invalid (or missing) membership are ignored. diff --git a/tests/push/test_push_rule_evaluator.py b/tests/push/test_push_rule_evaluator.py index 718c9614e5..b1f7ba6973 100644 --- a/tests/push/test_push_rule_evaluator.py +++ b/tests/push/test_push_rule_evaluator.py @@ -19,7 +19,7 @@ # # -from typing import Any, Dict, List, Optional, Union, cast +from typing import Any, Optional, Union, cast from twisted.internet.testing import MemoryReactor @@ -60,7 +60,7 @@ class FlattenDictTestCase(unittest.TestCase): def test_non_string(self) -> None: """String, booleans, ints, nulls and list of those should be kept while other items are dropped.""" - input: Dict[str, Any] = { + input: dict[str, Any] = { "woo": "woo", "foo": True, "bar": 1, @@ -165,13 +165,13 @@ class PushRuleEvaluatorTestCase(unittest.TestCase): ) room_member_count = 0 sender_power_level = 0 - power_levels: Dict[str, Union[int, Dict[str, int]]] = {} + power_levels: dict[str, Union[int, dict[str, int]]] = {} return PushRuleEvaluator( _flatten_dict(event), False, room_member_count, sender_power_level, - cast(Dict[str, int], power_levels.get("notifications", {})), + cast(dict[str, int], power_levels.get("notifications", {})), {} if related_events is None else related_events, related_event_match_enabled=True, room_version_feature_flags=event.room_version.msc3931_push_features, @@ -588,7 +588,7 @@ class PushRuleEvaluatorTestCase(unittest.TestCase): This tests the behaviour of tweaks_for_actions. """ - actions: List[Union[Dict[str, str], str]] = [ + actions: list[Union[dict[str, str], str]] = [ {"set_tweak": "sound", "value": "default"}, {"set_tweak": "highlight"}, "notify", diff --git a/tests/replication/_base.py b/tests/replication/_base.py index 8a6394e9ef..84bdc84ce9 100644 --- a/tests/replication/_base.py +++ b/tests/replication/_base.py @@ -19,7 +19,7 @@ # import logging from collections import defaultdict -from typing import Any, Dict, List, Optional, Set, Tuple +from typing import Any, Optional from twisted.internet.address import IPv4Address from twisted.internet.protocol import Protocol, connectionDone @@ -108,7 +108,7 @@ class BaseStreamTestCase(unittest.HomeserverTestCase): self._client_transport: Optional[FakeTransport] = None self._server_transport: Optional[FakeTransport] = None - def create_resource_dict(self) -> Dict[str, Resource]: + def create_resource_dict(self) -> dict[str, Resource]: d = super().create_resource_dict() d["/_synapse/replication"] = ReplicationRestResource(self.hs) return d @@ -183,7 +183,7 @@ class BaseStreamTestCase(unittest.HomeserverTestCase): # hook into the channel's request factory so that we can keep a record # of the requests - requests: List[SynapseRequest] = [] + requests: list[SynapseRequest] = [] real_request_factory = channel.requestFactory def request_factory(*args: Any, **kwargs: Any) -> SynapseRequest: @@ -256,7 +256,7 @@ class BaseMultiWorkerStreamTestCase(unittest.HomeserverTestCase): # Redis replication only takes place on Postgres skip = "Requires Postgres" - def default_config(self) -> Dict[str, Any]: + def default_config(self) -> dict[str, Any]: """ Overrides the default config to enable Redis. Even if the test only uses make_worker_hs, the main process needs Redis @@ -491,7 +491,7 @@ class TestReplicationDataHandler(ReplicationDataHandler): super().__init__(hs) # list of received (stream_name, token, row) tuples - self.received_rdata_rows: List[Tuple[str, int, Any]] = [] + self.received_rdata_rows: list[tuple[str, int, Any]] = [] async def on_rdata( self, stream_name: str, instance_name: str, token: int, rows: list @@ -505,7 +505,7 @@ class FakeRedisPubSubServer: """A fake Redis server for pub/sub.""" def __init__(self) -> None: - self._subscribers_by_channel: Dict[bytes, Set["FakeRedisPubSubProtocol"]] = ( + self._subscribers_by_channel: dict[bytes, set["FakeRedisPubSubProtocol"]] = ( defaultdict(set) ) diff --git a/tests/replication/http/test__base.py b/tests/replication/http/test__base.py index 31d3163c01..b757c6428a 100644 --- a/tests/replication/http/test__base.py +++ b/tests/replication/http/test__base.py @@ -20,7 +20,6 @@ # from http import HTTPStatus -from typing import Tuple from twisted.web.server import Request @@ -52,7 +51,7 @@ class CancellableReplicationEndpoint(ReplicationEndpoint): @cancellable async def _handle_request( # type: ignore[override] self, request: Request, content: JsonDict - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: await self.clock.sleep(1.0) return HTTPStatus.OK, {"result": True} @@ -73,7 +72,7 @@ class UncancellableReplicationEndpoint(ReplicationEndpoint): async def _handle_request( # type: ignore[override] self, request: Request, content: JsonDict - ) -> Tuple[int, JsonDict]: + ) -> tuple[int, JsonDict]: await self.clock.sleep(1.0) return HTTPStatus.OK, {"result": True} diff --git a/tests/replication/storage/test_events.py b/tests/replication/storage/test_events.py index fce3269005..1398689c2d 100644 --- a/tests/replication/storage/test_events.py +++ b/tests/replication/storage/test_events.py @@ -19,7 +19,7 @@ # # import logging -from typing import Any, Iterable, List, Optional, Tuple +from typing import Any, Iterable, Optional from canonicaljson import encode_canonical_json from parameterized import parameterized @@ -244,13 +244,13 @@ class EventsWorkerStoreTestCase(BaseWorkerStoreTestCase): key: Optional[str] = None, internal: Optional[dict] = None, depth: Optional[int] = None, - prev_events: Optional[List[Tuple[str, dict]]] = None, - auth_events: Optional[List[str]] = None, - prev_state: Optional[List[str]] = None, + prev_events: Optional[list[tuple[str, dict]]] = None, + auth_events: Optional[list[str]] = None, + prev_state: Optional[list[str]] = None, redacts: Optional[str] = None, push_actions: Iterable = frozenset(), **content: object, - ) -> Tuple[EventBase, EventContext]: + ) -> tuple[EventBase, EventContext]: prev_events = prev_events or [] auth_events = auth_events or [] prev_state = prev_state or [] diff --git a/tests/replication/tcp/streams/test_events.py b/tests/replication/tcp/streams/test_events.py index 452032205f..9607c03224 100644 --- a/tests/replication/tcp/streams/test_events.py +++ b/tests/replication/tcp/streams/test_events.py @@ -18,7 +18,7 @@ # # -from typing import Any, List, Optional +from typing import Any, Optional from parameterized import parameterized @@ -299,7 +299,7 @@ class EventsStreamTestCase(BaseStreamTestCase): self.assertEqual(row.data.event_id, pl_event.event_id) # the state rows are unsorted - state_rows: List[EventsStreamCurrentStateRow] = [] + state_rows: list[EventsStreamCurrentStateRow] = [] for stream_name, _, row in received_event_rows: self.assertEqual("events", stream_name) self.assertIsInstance(row, EventsStreamRow) @@ -355,7 +355,7 @@ class EventsStreamTestCase(BaseStreamTestCase): self.hs.get_datastores().main.get_latest_event_ids_in_room(self.room_id) ) - events: List[EventBase] = [] + events: list[EventBase] = [] for user in user_ids: events.extend( self._inject_state_event(sender=user) for _ in range(STATES_PER_USER) @@ -426,7 +426,7 @@ class EventsStreamTestCase(BaseStreamTestCase): self.assertEqual(row.data.event_id, pl_events[i].event_id) # the state rows are unsorted - state_rows: List[EventsStreamCurrentStateRow] = [] + state_rows: list[EventsStreamCurrentStateRow] = [] for _ in range(STATES_PER_USER + 1): stream_name, token, row = received_event_rows.pop(0) self.assertEqual("events", stream_name) diff --git a/tests/replication/test_multi_media_repo.py b/tests/replication/test_multi_media_repo.py index f712ad1fe3..193c6c0198 100644 --- a/tests/replication/test_multi_media_repo.py +++ b/tests/replication/test_multi_media_repo.py @@ -20,7 +20,7 @@ # import logging import os -from typing import Any, Optional, Tuple +from typing import Any, Optional from twisted.internet.protocol import Factory from twisted.internet.testing import MemoryReactor @@ -78,7 +78,7 @@ class MediaRepoShardTestCase(BaseMultiWorkerStreamTestCase): def _get_media_req( self, hs: HomeServer, target: str, media_id: str - ) -> Tuple[FakeChannel, Request]: + ) -> tuple[FakeChannel, Request]: """Request some remote media from the given HS by calling the download API. @@ -293,7 +293,7 @@ class AuthenticatedMediaRepoShardTestCase(BaseMultiWorkerStreamTestCase): def _get_media_req( self, hs: HomeServer, target: str, media_id: str - ) -> Tuple[FakeChannel, Request]: + ) -> tuple[FakeChannel, Request]: """Request some remote media from the given HS by calling the download API. diff --git a/tests/rest/admin/test_admin.py b/tests/rest/admin/test_admin.py index 2a17389feb..f3740a8e35 100644 --- a/tests/rest/admin/test_admin.py +++ b/tests/rest/admin/test_admin.py @@ -20,7 +20,7 @@ # import urllib.parse -from typing import Dict, cast +from typing import cast from parameterized import parameterized @@ -65,7 +65,7 @@ class QuarantineMediaTestCase(unittest.HomeserverTestCase): room.register_servlets, ] - def create_resource_dict(self) -> Dict[str, Resource]: + def create_resource_dict(self) -> dict[str, Resource]: resources = super().create_resource_dict() resources["/_matrix/media"] = self.hs.get_media_repository_resource() return resources diff --git a/tests/rest/admin/test_event_reports.py b/tests/rest/admin/test_event_reports.py index 28be7fcd97..19d945bb42 100644 --- a/tests/rest/admin/test_event_reports.py +++ b/tests/rest/admin/test_event_reports.py @@ -18,7 +18,6 @@ # [This file includes modifications made by New Vector Limited] # # -from typing import List from twisted.internet.testing import MemoryReactor @@ -441,7 +440,7 @@ class EventReportsTestCase(unittest.HomeserverTestCase): ) self.assertEqual(200, channel.code, msg=channel.json_body) - def _check_fields(self, content: List[JsonDict]) -> None: + def _check_fields(self, content: list[JsonDict]) -> None: """Checks that all attributes are present in an event report""" for c in content: self.assertIn("id", c) diff --git a/tests/rest/admin/test_federation.py b/tests/rest/admin/test_federation.py index d0b57d1faa..5586bb47e1 100644 --- a/tests/rest/admin/test_federation.py +++ b/tests/rest/admin/test_federation.py @@ -18,7 +18,7 @@ # [This file includes modifications made by New Vector Limited] # # -from typing import List, Optional +from typing import Optional from parameterized import parameterized @@ -272,7 +272,7 @@ class FederationTestCase(unittest.HomeserverTestCase): """Testing order list with parameter `order_by`""" def _order_test( - expected_destination_list: List[str], + expected_destination_list: list[str], order_by: Optional[str], dir: Optional[str] = None, ) -> None: @@ -521,7 +521,7 @@ class FederationTestCase(unittest.HomeserverTestCase): dest = f"sub{i}.example.com" self._create_destination(dest, 50, 50, 50, 100) - def _check_fields(self, content: List[JsonDict]) -> None: + def _check_fields(self, content: list[JsonDict]) -> None: """Checks that the expected destination attributes are present in content Args: @@ -820,7 +820,7 @@ class DestinationMembershipTestCase(unittest.HomeserverTestCase): self, number_rooms: int, destination: Optional[str] = None, - ) -> List[str]: + ) -> list[str]: """ Create the given number of rooms. The given `destination` homeserver will be recorded as a participant. @@ -853,7 +853,7 @@ class DestinationMembershipTestCase(unittest.HomeserverTestCase): return room_ids - def _check_fields(self, content: List[JsonDict]) -> None: + def _check_fields(self, content: list[JsonDict]) -> None: """Checks that the expected room attributes are present in content Args: diff --git a/tests/rest/admin/test_jwks.py b/tests/rest/admin/test_jwks.py index 55b822c4d0..ee5588951b 100644 --- a/tests/rest/admin/test_jwks.py +++ b/tests/rest/admin/test_jwks.py @@ -19,7 +19,6 @@ # # -from typing import Dict from twisted.web.resource import Resource @@ -33,7 +32,7 @@ from tests.utils import HAS_AUTHLIB class JWKSTestCase(HomeserverTestCase): """Test /_synapse/jwks JWKS data.""" - def create_resource_dict(self) -> Dict[str, Resource]: + def create_resource_dict(self) -> dict[str, Resource]: d = super().create_resource_dict() d.update(build_synapse_client_resource_tree(self.hs)) return d diff --git a/tests/rest/admin/test_media.py b/tests/rest/admin/test_media.py index 86c09634cc..8cc54cc80c 100644 --- a/tests/rest/admin/test_media.py +++ b/tests/rest/admin/test_media.py @@ -20,7 +20,6 @@ # # import os -from typing import Dict from parameterized import parameterized @@ -51,7 +50,7 @@ class _AdminMediaTests(unittest.HomeserverTestCase): media.register_servlets, ] - def create_resource_dict(self) -> Dict[str, Resource]: + def create_resource_dict(self) -> dict[str, Resource]: resources = super().create_resource_dict() resources["/_matrix/media"] = self.hs.get_media_repository_resource() return resources diff --git a/tests/rest/admin/test_room.py b/tests/rest/admin/test_room.py index 30b2de26e4..6bd21630db 100644 --- a/tests/rest/admin/test_room.py +++ b/tests/rest/admin/test_room.py @@ -22,7 +22,7 @@ import json import time import urllib.parse from http import HTTPStatus -from typing import List, Optional +from typing import Optional from unittest.mock import AsyncMock, Mock from parameterized import parameterized @@ -1609,7 +1609,7 @@ class RoomTestCase(unittest.HomeserverTestCase): def _order_test( order_type: str, - expected_room_list: List[str], + expected_room_list: list[str], reverse: bool = False, ) -> None: """Request the list of rooms in a certain order. Assert that order is what diff --git a/tests/rest/admin/test_scheduled_tasks.py b/tests/rest/admin/test_scheduled_tasks.py index 16b80e214b..264c62e2de 100644 --- a/tests/rest/admin/test_scheduled_tasks.py +++ b/tests/rest/admin/test_scheduled_tasks.py @@ -13,7 +13,7 @@ # # # -from typing import Mapping, Optional, Tuple +from typing import Mapping, Optional from twisted.internet.testing import MemoryReactor @@ -42,17 +42,17 @@ class ScheduledTasksAdminApiTestCase(unittest.HomeserverTestCase): # create and schedule a few tasks async def _test_task( task: ScheduledTask, - ) -> Tuple[TaskStatus, Optional[JsonMapping], Optional[str]]: + ) -> tuple[TaskStatus, Optional[JsonMapping], Optional[str]]: return TaskStatus.ACTIVE, None, None async def _finished_test_task( task: ScheduledTask, - ) -> Tuple[TaskStatus, Optional[JsonMapping], Optional[str]]: + ) -> tuple[TaskStatus, Optional[JsonMapping], Optional[str]]: return TaskStatus.COMPLETE, None, None async def _failed_test_task( task: ScheduledTask, - ) -> Tuple[TaskStatus, Optional[JsonMapping], Optional[str]]: + ) -> tuple[TaskStatus, Optional[JsonMapping], Optional[str]]: return TaskStatus.FAILED, None, "Everything failed" self._task_scheduler.register_action(_test_task, "test_task") diff --git a/tests/rest/admin/test_server_notice.py b/tests/rest/admin/test_server_notice.py index ebb6867d7c..5053fea9c9 100644 --- a/tests/rest/admin/test_server_notice.py +++ b/tests/rest/admin/test_server_notice.py @@ -18,7 +18,7 @@ # [This file includes modifications made by New Vector Limited] # # -from typing import List, Sequence +from typing import Sequence from twisted.internet.testing import MemoryReactor @@ -729,7 +729,7 @@ class ServerNoticeTestCase(unittest.HomeserverTestCase): return invited_rooms - def _sync_and_get_messages(self, room_id: str, token: str) -> List[JsonDict]: + def _sync_and_get_messages(self, room_id: str, token: str) -> list[JsonDict]: """ Do a sync and get messages of a room. diff --git a/tests/rest/admin/test_statistics.py b/tests/rest/admin/test_statistics.py index 4026c47a23..a18952983e 100644 --- a/tests/rest/admin/test_statistics.py +++ b/tests/rest/admin/test_statistics.py @@ -19,7 +19,7 @@ # [This file includes modifications made by New Vector Limited] # # -from typing import Dict, List, Optional +from typing import Optional from twisted.internet.testing import MemoryReactor from twisted.web.resource import Resource @@ -50,7 +50,7 @@ class UserMediaStatisticsTestCase(unittest.HomeserverTestCase): self.url = "/_synapse/admin/v1/statistics/users/media" - def create_resource_dict(self) -> Dict[str, Resource]: + def create_resource_dict(self) -> dict[str, Resource]: resources = super().create_resource_dict() resources["/_matrix/media"] = self.hs.get_media_repository_resource() return resources @@ -485,7 +485,7 @@ class UserMediaStatisticsTestCase(unittest.HomeserverTestCase): # Upload some media into the room self.helper.upload_media(SMALL_PNG, tok=user_token, expect_code=200) - def _check_fields(self, content: List[JsonDict]) -> None: + def _check_fields(self, content: list[JsonDict]) -> None: """Checks that all attributes are present in content Args: content: List that is checked for content @@ -497,7 +497,7 @@ class UserMediaStatisticsTestCase(unittest.HomeserverTestCase): self.assertIn("media_length", c) def _order_test( - self, order_type: str, expected_user_list: List[str], dir: Optional[str] = None + self, order_type: str, expected_user_list: list[str], dir: Optional[str] = None ) -> None: """Request the list of users in a certain order. Assert that order is what we expect diff --git a/tests/rest/admin/test_user.py b/tests/rest/admin/test_user.py index ca41cd6c31..040b21d471 100644 --- a/tests/rest/admin/test_user.py +++ b/tests/rest/admin/test_user.py @@ -27,7 +27,7 @@ import time import urllib.parse from binascii import unhexlify from http import HTTPStatus -from typing import Dict, List, Optional +from typing import Optional from unittest.mock import AsyncMock, Mock, patch from parameterized import parameterized, parameterized_class @@ -1185,7 +1185,7 @@ class UsersListTestCase(unittest.HomeserverTestCase): ) def test_user_type( - expected_user_ids: List[str], not_user_types: Optional[List[str]] = None + expected_user_ids: list[str], not_user_types: Optional[list[str]] = None ) -> None: """Runs a test for the not_user_types param Args: @@ -1262,7 +1262,7 @@ class UsersListTestCase(unittest.HomeserverTestCase): ) def test_user_type( - expected_user_ids: List[str], not_user_types: Optional[List[str]] = None + expected_user_ids: list[str], not_user_types: Optional[list[str]] = None ) -> None: """Runs a test for the not_user_types param Args: @@ -1373,7 +1373,7 @@ class UsersListTestCase(unittest.HomeserverTestCase): def _order_test( self, - expected_user_list: List[str], + expected_user_list: list[str], order_by: Optional[str], dir: Optional[str] = None, ) -> None: @@ -1403,7 +1403,7 @@ class UsersListTestCase(unittest.HomeserverTestCase): self.assertEqual(expected_user_list, returned_order) self._check_fields(channel.json_body["users"]) - def _check_fields(self, content: List[JsonDict]) -> None: + def _check_fields(self, content: list[JsonDict]) -> None: """Checks that the expected user attributes are present in content Args: content: List that is checked for content @@ -3690,7 +3690,7 @@ class UserMediaRestTestCase(unittest.HomeserverTestCase): self.other_user ) - def create_resource_dict(self) -> Dict[str, Resource]: + def create_resource_dict(self) -> dict[str, Resource]: resources = super().create_resource_dict() resources["/_matrix/media"] = self.hs.get_media_repository_resource() return resources @@ -4138,7 +4138,7 @@ class UserMediaRestTestCase(unittest.HomeserverTestCase): [media2] + sorted([media1, media3]), "safe_from_quarantine", "b" ) - def _create_media_for_user(self, user_token: str, number_media: int) -> List[str]: + def _create_media_for_user(self, user_token: str, number_media: int) -> list[str]: """ Create a number of media for a specific user Args: @@ -4195,7 +4195,7 @@ class UserMediaRestTestCase(unittest.HomeserverTestCase): return media_id - def _check_fields(self, content: List[JsonDict]) -> None: + def _check_fields(self, content: list[JsonDict]) -> None: """Checks that the expected user attributes are present in content Args: content: List that is checked for content @@ -4212,7 +4212,7 @@ class UserMediaRestTestCase(unittest.HomeserverTestCase): def _order_test( self, - expected_media_list: List[str], + expected_media_list: list[str], order_by: Optional[str], dir: Optional[str] = None, ) -> None: diff --git a/tests/rest/client/sliding_sync/test_extension_thread_subscriptions.py b/tests/rest/client/sliding_sync/test_extension_thread_subscriptions.py index 4e151b9aae..de76334f64 100644 --- a/tests/rest/client/sliding_sync/test_extension_thread_subscriptions.py +++ b/tests/rest/client/sliding_sync/test_extension_thread_subscriptions.py @@ -13,7 +13,7 @@ # import logging from http import HTTPStatus -from typing import List, Optional, Tuple, cast +from typing import Optional, cast from twisted.test.proto_helpers import MemoryReactor @@ -358,7 +358,7 @@ class SlidingSyncThreadSubscriptionsExtensionTestCase(SlidingSyncBase): using the companion /thread_subscriptions endpoint. """ - thread_root_ids: List[str] = [] + thread_root_ids: list[str] = [] def make_subscription() -> None: thread_root_resp = self.helper.send( @@ -455,7 +455,7 @@ class SlidingSyncThreadSubscriptionsExtensionTestCase(SlidingSyncBase): def _do_backpaginate( self, *, from_tok: str, to_tok: str, limit: int, access_token: str - ) -> Tuple[JsonDict, Optional[str]]: + ) -> tuple[JsonDict, Optional[str]]: channel = self.make_request( "GET", "/_matrix/client/unstable/io.element.msc4308/thread_subscriptions" diff --git a/tests/rest/client/sliding_sync/test_extension_to_device.py b/tests/rest/client/sliding_sync/test_extension_to_device.py index a77b0a2e9f..0b0a65babf 100644 --- a/tests/rest/client/sliding_sync/test_extension_to_device.py +++ b/tests/rest/client/sliding_sync/test_extension_to_device.py @@ -12,7 +12,6 @@ # . # import logging -from typing import List from parameterized import parameterized_class @@ -59,7 +58,7 @@ class SlidingSyncToDeviceExtensionTestCase(SlidingSyncBase): super().prepare(reactor, clock, hs) def _assert_to_device_response( - self, response_body: JsonDict, expected_messages: List[JsonDict] + self, response_body: JsonDict, expected_messages: list[JsonDict] ) -> str: """Assert the sliding sync response was successful and has the expected to-device messages. diff --git a/tests/rest/client/sliding_sync/test_rooms_timeline.py b/tests/rest/client/sliding_sync/test_rooms_timeline.py index 44a6068c11..04a9cd5382 100644 --- a/tests/rest/client/sliding_sync/test_rooms_timeline.py +++ b/tests/rest/client/sliding_sync/test_rooms_timeline.py @@ -12,7 +12,7 @@ # . # import logging -from typing import List, Optional +from typing import Optional from parameterized import parameterized_class @@ -75,14 +75,14 @@ class SlidingSyncRoomsTimelineTestCase(SlidingSyncBase): if actual_items == expected_items: return - expected_lines: List[str] = [] + expected_lines: list[str] = [] for expected_item in expected_items: is_expected_in_actual = expected_item in actual_items expected_lines.append( "{} {}".format(" " if is_expected_in_actual else "?", expected_item) ) - actual_lines: List[str] = [] + actual_lines: list[str] = [] for actual_item in actual_items: is_actual_in_expected = actual_item in expected_items actual_lines.append( @@ -101,8 +101,8 @@ class SlidingSyncRoomsTimelineTestCase(SlidingSyncBase): self, *, room_id: str, - actual_event_ids: List[str], - expected_event_ids: List[str], + actual_event_ids: list[str], + expected_event_ids: list[str], message: Optional[str] = None, ) -> None: """ diff --git a/tests/rest/client/sliding_sync/test_sliding_sync.py b/tests/rest/client/sliding_sync/test_sliding_sync.py index 8da5863b3a..9f4c6bad05 100644 --- a/tests/rest/client/sliding_sync/test_sliding_sync.py +++ b/tests/rest/client/sliding_sync/test_sliding_sync.py @@ -12,7 +12,7 @@ # . # import logging -from typing import Any, Dict, Iterable, List, Literal, Optional, Tuple +from typing import Any, Iterable, Literal, Optional from unittest.mock import AsyncMock from parameterized import parameterized, parameterized_class @@ -82,7 +82,7 @@ class SlidingSyncBase(unittest.HomeserverTestCase): def do_sync( self, sync_body: JsonDict, *, since: Optional[str] = None, tok: str - ) -> Tuple[JsonDict, str]: + ) -> tuple[JsonDict, str]: """Do a sliding sync request with given body. Asserts the request was successful. @@ -170,7 +170,7 @@ class SlidingSyncBase(unittest.HomeserverTestCase): # Scrutinize the account data since it has no concrete type. We're just copying # everything into a known type. It should be a mapping from user ID to a list of # room IDs. Ignore anything else. - new_dm_map: Dict[str, List[str]] = {} + new_dm_map: dict[str, list[str]] = {} if isinstance(existing_dm_map, dict): for user_id, room_ids in existing_dm_map.items(): if isinstance(user_id, str) and isinstance(room_ids, list): @@ -239,7 +239,7 @@ class SlidingSyncBase(unittest.HomeserverTestCase): def _create_remote_invite_room_for_user( self, invitee_user_id: str, - unsigned_invite_room_state: Optional[List[StrippedStateEvent]], + unsigned_invite_room_state: Optional[list[StrippedStateEvent]], invite_room_id: Optional[str] = None, ) -> str: """ diff --git a/tests/rest/client/test_account.py b/tests/rest/client/test_account.py index 773f49dfc9..c4c62c7800 100644 --- a/tests/rest/client/test_account.py +++ b/tests/rest/client/test_account.py @@ -23,7 +23,7 @@ import os import re from email.parser import Parser from http import HTTPStatus -from typing import Any, Dict, List, Optional, Union +from typing import Any, Optional, Union from unittest.mock import Mock from twisted.internet.interfaces import IReactorTCP @@ -87,7 +87,7 @@ class PasswordResetTestCase(unittest.HomeserverTestCase): ) -> None: self.email_attempts.append(msg_bytes) - self.email_attempts: List[bytes] = [] + self.email_attempts: list[bytes] = [] hs.get_send_email_handler()._sendmail = sendmail return hs @@ -721,7 +721,7 @@ class WhoamiTestCase(unittest.HomeserverTestCase): register.register_servlets, ] - def default_config(self) -> Dict[str, Any]: + def default_config(self) -> dict[str, Any]: config = super().default_config() config["allow_guest_access"] = True return config @@ -827,7 +827,7 @@ class ThreepidEmailRestTestCase(unittest.HomeserverTestCase): ) -> None: self.email_attempts.append(msg_bytes) - self.email_attempts: List[bytes] = [] + self.email_attempts: list[bytes] = [] self.hs.get_send_email_handler()._sendmail = sendmail return self.hs @@ -1501,10 +1501,10 @@ class AccountStatusTestCase(unittest.HomeserverTestCase): def _test_status( self, - users: Optional[List[str]], + users: Optional[list[str]], expected_status_code: int = HTTPStatus.OK, - expected_statuses: Optional[Dict[str, Dict[str, bool]]] = None, - expected_failures: Optional[List[str]] = None, + expected_statuses: Optional[dict[str, dict[str, bool]]] = None, + expected_failures: Optional[list[str]] = None, expected_errcode: Optional[str] = None, ) -> None: """Send a request to the account status endpoint and check that the response diff --git a/tests/rest/client/test_auth.py b/tests/rest/client/test_auth.py index f5b7f95721..5955d4b7a2 100644 --- a/tests/rest/client/test_auth.py +++ b/tests/rest/client/test_auth.py @@ -20,7 +20,7 @@ # import re from http import HTTPStatus -from typing import Any, Dict, List, Optional, Tuple, Union +from typing import Any, Optional, Union from twisted.internet.defer import succeed from twisted.internet.testing import MemoryReactor @@ -47,7 +47,7 @@ from tests.unittest import override_config, skip_unless class DummyRecaptchaChecker(UserInteractiveAuthChecker): def __init__(self, hs: HomeServer) -> None: super().__init__(hs) - self.recaptcha_attempts: List[Tuple[dict, str]] = [] + self.recaptcha_attempts: list[tuple[dict, str]] = [] def is_enabled(self) -> bool: return True @@ -178,7 +178,7 @@ class UIAuthTests(unittest.HomeserverTestCase): register.register_servlets, ] - def default_config(self) -> Dict[str, Any]: + def default_config(self) -> dict[str, Any]: config = super().default_config() # public_baseurl uses an http:// scheme because FakeChannel.isSecure() returns @@ -195,7 +195,7 @@ class UIAuthTests(unittest.HomeserverTestCase): return config - def create_resource_dict(self) -> Dict[str, Resource]: + def create_resource_dict(self) -> dict[str, Resource]: resource_dict = super().create_resource_dict() resource_dict.update(build_synapse_client_resource_tree(self.hs)) return resource_dict @@ -1091,7 +1091,7 @@ class RefreshAuthTests(unittest.HomeserverTestCase): was very slow if a lot of refreshes had been performed for the session. """ - def _refresh(refresh_token: str) -> Tuple[str, str]: + def _refresh(refresh_token: str) -> tuple[str, str]: """ Performs one refresh, returning the next refresh token and access token. """ @@ -1172,7 +1172,7 @@ class RefreshAuthTests(unittest.HomeserverTestCase): def oidc_config( id: str, with_localpart_template: bool, **kwargs: Any -) -> Dict[str, Any]: +) -> dict[str, Any]: """Sample OIDC provider config used in backchannel logout tests. Args: @@ -1185,7 +1185,7 @@ def oidc_config( A dict suitable for the `oidc_config` or the `oidc_providers[]` parts of the HS config """ - config: Dict[str, Any] = { + config: dict[str, Any] = { "idp_id": id, "idp_name": id, "issuer": TEST_OIDC_ISSUER, @@ -1213,7 +1213,7 @@ class OidcBackchannelLogoutTests(unittest.HomeserverTestCase): login.register_servlets, ] - def default_config(self) -> Dict[str, Any]: + def default_config(self) -> dict[str, Any]: config = super().default_config() # public_baseurl uses an http:// scheme because FakeChannel.isSecure() returns @@ -1223,7 +1223,7 @@ class OidcBackchannelLogoutTests(unittest.HomeserverTestCase): return config - def create_resource_dict(self) -> Dict[str, Resource]: + def create_resource_dict(self) -> dict[str, Resource]: resource_dict = super().create_resource_dict() resource_dict.update(build_synapse_client_resource_tree(self.hs)) return resource_dict @@ -1363,7 +1363,7 @@ class OidcBackchannelLogoutTests(unittest.HomeserverTestCase): # We should have a user_mapping_session cookie cookie_headers = channel.headers.getRawHeaders("Set-Cookie") assert cookie_headers - cookies: Dict[str, str] = {} + cookies: dict[str, str] = {} for h in cookie_headers: key, value = h.split(";")[0].split("=", maxsplit=1) cookies[key] = value diff --git a/tests/rest/client/test_delayed_events.py b/tests/rest/client/test_delayed_events.py index 221a4902f2..c67ffc7668 100644 --- a/tests/rest/client/test_delayed_events.py +++ b/tests/rest/client/test_delayed_events.py @@ -15,7 +15,6 @@ """Tests REST events for /delayed_events paths.""" from http import HTTPStatus -from typing import List from parameterized import parameterized @@ -574,7 +573,7 @@ class DelayedEventsTestCase(HomeserverTestCase): ) self.assertEqual(setter_expected, content.get(setter_key), content) - def _get_delayed_events(self) -> List[JsonDict]: + def _get_delayed_events(self) -> list[JsonDict]: channel = self.make_request( "GET", PATH_PREFIX, diff --git a/tests/rest/client/test_login.py b/tests/rest/client/test_login.py index c54e409a6c..1ebd59b42a 100644 --- a/tests/rest/client/test_login.py +++ b/tests/rest/client/test_login.py @@ -25,11 +25,8 @@ from typing import ( BinaryIO, Callable, Collection, - Dict, - List, Literal, Optional, - Tuple, Union, ) from unittest.mock import Mock @@ -146,11 +143,11 @@ class TestSpamChecker: user_id: str, device_id: Optional[str], initial_display_name: Optional[str], - request_info: Collection[Tuple[Optional[str], str]], + request_info: Collection[tuple[Optional[str], str]], auth_provider_id: Optional[str] = None, ) -> Union[ Literal["NOT_SPAM"], - Tuple["synapse.module_api.errors.Codes", JsonDict], + tuple["synapse.module_api.errors.Codes", JsonDict], ]: return "NOT_SPAM" @@ -170,11 +167,11 @@ class DenyAllSpamChecker: user_id: str, device_id: Optional[str], initial_display_name: Optional[str], - request_info: Collection[Tuple[Optional[str], str]], + request_info: Collection[tuple[Optional[str], str]], auth_provider_id: Optional[str] = None, ) -> Union[ Literal["NOT_SPAM"], - Tuple["synapse.module_api.errors.Codes", JsonDict], + tuple["synapse.module_api.errors.Codes", JsonDict], ]: # Return an odd set of values to ensure that they get correctly passed # to the client. @@ -633,7 +630,7 @@ class MultiSSOTestCase(unittest.HomeserverTestCase): login.register_servlets, ] - def default_config(self) -> Dict[str, Any]: + def default_config(self) -> dict[str, Any]: config = super().default_config() config["public_baseurl"] = PUBLIC_BASEURL @@ -678,7 +675,7 @@ class MultiSSOTestCase(unittest.HomeserverTestCase): def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.login_sso_redirect_url_builder = LoginSSORedirectURIBuilder(hs.config) - def create_resource_dict(self) -> Dict[str, Resource]: + def create_resource_dict(self) -> dict[str, Resource]: d = super().create_resource_dict() d.update(build_synapse_client_resource_tree(self.hs)) return d @@ -730,7 +727,7 @@ class MultiSSOTestCase(unittest.HomeserverTestCase): p.close() # there should be a link for each href - returned_idps: List[str] = [] + returned_idps: list[str] = [] for link in p.links: path, query = link.split("?", 1) self.assertEqual(path, "pick_idp") @@ -891,7 +888,7 @@ class MultiSSOTestCase(unittest.HomeserverTestCase): # ... and should have set a cookie including the redirect url cookie_headers = channel.headers.getRawHeaders("Set-Cookie") assert cookie_headers - cookies: Dict[str, str] = {} + cookies: dict[str, str] = {} for h in cookie_headers: key, value = h.split(";")[0].split("=", maxsplit=1) cookies[key] = value @@ -1179,7 +1176,7 @@ class JWTTestCase(unittest.HomeserverTestCase): "algorithm": jwt_algorithm, } - def default_config(self) -> Dict[str, Any]: + def default_config(self) -> dict[str, Any]: config = super().default_config() # If jwt_config has been defined (eg via @override_config), don't replace it. @@ -1188,7 +1185,7 @@ class JWTTestCase(unittest.HomeserverTestCase): return config - def jwt_encode(self, payload: Dict[str, Any], secret: str = jwt_secret) -> str: + def jwt_encode(self, payload: dict[str, Any], secret: str = jwt_secret) -> str: header = {"alg": self.jwt_algorithm} result: bytes = jwt.encode(header, payload, secret) return result.decode("ascii") @@ -1426,7 +1423,7 @@ class JWTPubKeyTestCase(unittest.HomeserverTestCase): ] ) - def default_config(self) -> Dict[str, Any]: + def default_config(self) -> dict[str, Any]: config = super().default_config() config["jwt_config"] = { "enabled": True, @@ -1435,7 +1432,7 @@ class JWTPubKeyTestCase(unittest.HomeserverTestCase): } return config - def jwt_encode(self, payload: Dict[str, Any], secret: str = jwt_privatekey) -> str: + def jwt_encode(self, payload: dict[str, Any], secret: str = jwt_privatekey) -> str: header = {"alg": "RS256"} if secret.startswith("-----BEGIN RSA PRIVATE KEY-----"): secret = JsonWebKey.import_key(secret, {"kty": "RSA"}) @@ -1630,7 +1627,7 @@ class UsernamePickerTestCase(HomeserverTestCase): ) return hs - def default_config(self) -> Dict[str, Any]: + def default_config(self) -> dict[str, Any]: config = super().default_config() config["public_baseurl"] = PUBLIC_BASEURL @@ -1649,7 +1646,7 @@ class UsernamePickerTestCase(HomeserverTestCase): config["sso"] = {"client_whitelist": ["https://x"]} return config - def create_resource_dict(self) -> Dict[str, Resource]: + def create_resource_dict(self) -> dict[str, Resource]: d = super().create_resource_dict() d.update(build_synapse_client_resource_tree(self.hs)) return d @@ -1660,7 +1657,7 @@ class UsernamePickerTestCase(HomeserverTestCase): displayname: str, email: str, picture: str, - ) -> Tuple[str, str]: + ) -> tuple[str, str]: # do the start of the login flow channel, _ = self.helper.auth_via_oidc( fake_oidc_server, @@ -1681,7 +1678,7 @@ class UsernamePickerTestCase(HomeserverTestCase): self.assertEqual(picker_url, "/_synapse/client/pick_username/account_details") # ... with a username_mapping_session cookie - cookies: Dict[str, str] = {} + cookies: dict[str, str] = {} channel.extract_cookies(cookies) self.assertIn("username_mapping_session", cookies) session_id = cookies["username_mapping_session"] @@ -1894,5 +1891,5 @@ async def mock_get_file( max_size: Optional[int] = None, headers: Optional[RawHeaders] = None, is_allowed_content_type: Optional[Callable[[str], bool]] = None, -) -> Tuple[int, Dict[bytes, List[bytes]], str, int]: +) -> tuple[int, dict[bytes, list[bytes]], str, int]: return 0, {b"Content-Type": [b"image/png"]}, "", 200 diff --git a/tests/rest/client/test_media.py b/tests/rest/client/test_media.py index 91bf94b672..79f70db8a3 100644 --- a/tests/rest/client/test_media.py +++ b/tests/rest/client/test_media.py @@ -24,7 +24,7 @@ import json import os import re import shutil -from typing import Any, BinaryIO, ClassVar, Dict, List, Optional, Sequence, Tuple, Type +from typing import Any, BinaryIO, ClassVar, Optional, Sequence from unittest.mock import MagicMock, Mock, patch from urllib import parse from urllib.parse import quote, urlencode @@ -265,7 +265,7 @@ class URLPreviewTests(unittest.HomeserverTestCase): assert self.media_repo.url_previewer is not None self.url_previewer = self.media_repo.url_previewer - self.lookups: Dict[str, Any] = {} + self.lookups: dict[str, Any] = {} class Resolver: def resolveHostName( @@ -273,7 +273,7 @@ class URLPreviewTests(unittest.HomeserverTestCase): resolutionReceiver: IResolutionReceiver, hostName: str, portNumber: int = 0, - addressTypes: Optional[Sequence[Type[IAddress]]] = None, + addressTypes: Optional[Sequence[type[IAddress]]] = None, transportSemantics: str = "TCP", ) -> IResolutionReceiver: resolution = HostResolution(hostName) @@ -1357,7 +1357,7 @@ class URLPreviewTests(unittest.HomeserverTestCase): self.assertEqual(body["og:title"], "Test") self.assertNotIn("og:image", body) - def _download_image(self) -> Tuple[str, str]: + def _download_image(self) -> tuple[str, str]: """Downloads an image into the URL cache. Returns: A (host, media_id) tuple representing the MXC URI of the image. @@ -1994,8 +1994,8 @@ class DownloadAndThumbnailTestCase(unittest.HomeserverTestCase): ] def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: - self.fetches: List[ - Tuple[ + self.fetches: list[ + tuple[ "Deferred[Any]", str, str, @@ -2014,12 +2014,12 @@ class DownloadAndThumbnailTestCase(unittest.HomeserverTestCase): retry_on_dns_fail: bool = True, ignore_backoff: bool = False, follow_redirects: bool = False, - ) -> "Deferred[Tuple[int, Dict[bytes, List[bytes]], bytes]]": + ) -> "Deferred[tuple[int, dict[bytes, list[bytes]], bytes]]": """A mock for MatrixFederationHttpClient.federation_get_file.""" def write_to( - r: Tuple[bytes, Tuple[int, Dict[bytes, List[bytes]], bytes]], - ) -> Tuple[int, Dict[bytes, List[bytes]], bytes]: + r: tuple[bytes, tuple[int, dict[bytes, list[bytes]], bytes]], + ) -> tuple[int, dict[bytes, list[bytes]], bytes]: data, response = r output_stream.write(data) return response @@ -2029,7 +2029,7 @@ class DownloadAndThumbnailTestCase(unittest.HomeserverTestCase): output_stream.write(f.value.response) return f - d: Deferred[Tuple[bytes, Tuple[int, Dict[bytes, List[bytes]], bytes]]] = ( + d: Deferred[tuple[bytes, tuple[int, dict[bytes, list[bytes]], bytes]]] = ( Deferred() ) self.fetches.append((d, destination, path, args)) @@ -2048,12 +2048,12 @@ class DownloadAndThumbnailTestCase(unittest.HomeserverTestCase): retry_on_dns_fail: bool = True, ignore_backoff: bool = False, follow_redirects: bool = False, - ) -> "Deferred[Tuple[int, Dict[bytes, List[bytes]]]]": + ) -> "Deferred[tuple[int, dict[bytes, list[bytes]]]]": """A mock for MatrixFederationHttpClient.get_file.""" def write_to( - r: Tuple[bytes, Tuple[int, Dict[bytes, List[bytes]]]], - ) -> Tuple[int, Dict[bytes, List[bytes]]]: + r: tuple[bytes, tuple[int, dict[bytes, list[bytes]]]], + ) -> tuple[int, dict[bytes, list[bytes]]]: data, response = r output_stream.write(data) return response @@ -2063,7 +2063,7 @@ class DownloadAndThumbnailTestCase(unittest.HomeserverTestCase): output_stream.write(f.value.response) return f - d: Deferred[Tuple[bytes, Tuple[int, Dict[bytes, List[bytes]]]]] = Deferred() + d: Deferred[tuple[bytes, tuple[int, dict[bytes, list[bytes]]]]] = Deferred() self.fetches.append((d, destination, path, args)) # Note that this callback changes the value held by d. d_after_callback = d.addCallbacks(write_to, write_err) @@ -2538,7 +2538,7 @@ configs = [ @parameterized_class(configs) class AuthenticatedMediaTestCase(unittest.HomeserverTestCase): - extra_config: Dict[str, Any] + extra_config: dict[str, Any] servlets = [ media.register_servlets, login.register_servlets, @@ -2576,7 +2576,7 @@ class AuthenticatedMediaTestCase(unittest.HomeserverTestCase): self.user = self.register_user("user", "pass") self.tok = self.login("user", "pass") - def create_resource_dict(self) -> Dict[str, Resource]: + def create_resource_dict(self) -> dict[str, Resource]: resources = super().create_resource_dict() resources["/_matrix/media"] = self.hs.get_media_repository_resource() return resources @@ -2895,7 +2895,7 @@ class MediaUploadLimits(unittest.HomeserverTestCase): self.user = self.register_user("user", "pass") self.tok = self.login("user", "pass") - def create_resource_dict(self) -> Dict[str, Resource]: + def create_resource_dict(self) -> dict[str, Resource]: resources = super().create_resource_dict() resources["/_matrix/media"] = self.hs.get_media_repository_resource() return resources @@ -3012,7 +3012,7 @@ class MediaUploadLimitsModuleOverrides(unittest.HomeserverTestCase): async def _get_media_upload_limits_for_user( self, user_id: str, - ) -> Optional[List[MediaUploadLimit]]: + ) -> Optional[list[MediaUploadLimit]]: # user1 has custom limits if user_id == self.user1: # n.b. we return these in increasing duration order and Synapse will need to sort them correctly @@ -3060,7 +3060,7 @@ class MediaUploadLimitsModuleOverrides(unittest.HomeserverTestCase): on_media_upload_limit_exceeded=self._on_media_upload_limit_exceeded, ) - def create_resource_dict(self) -> Dict[str, Resource]: + def create_resource_dict(self) -> dict[str, Resource]: resources = super().create_resource_dict() resources["/_matrix/media"] = self.hs.get_media_repository_resource() return resources diff --git a/tests/rest/client/test_notifications.py b/tests/rest/client/test_notifications.py index e00152389b..7e2a63955c 100644 --- a/tests/rest/client/test_notifications.py +++ b/tests/rest/client/test_notifications.py @@ -18,7 +18,7 @@ # [This file includes modifications made by New Vector Limited] # # -from typing import List, Optional, Tuple +from typing import Optional from unittest.mock import AsyncMock, Mock from twisted.internet.testing import MemoryReactor @@ -156,7 +156,7 @@ class HTTPPusherTests(HomeserverTestCase): def _request_notifications( self, from_token: Optional[str], limit: int, expected_count: int - ) -> Tuple[List[str], str]: + ) -> tuple[list[str], str]: """ Make a request to /notifications to get the latest events to be notified about. diff --git a/tests/rest/client/test_profile.py b/tests/rest/client/test_profile.py index 18b3d3a089..aa9b72c65e 100644 --- a/tests/rest/client/test_profile.py +++ b/tests/rest/client/test_profile.py @@ -24,7 +24,7 @@ import logging import urllib.parse from http import HTTPStatus -from typing import Any, Dict, Optional +from typing import Any, Optional from canonicaljson import encode_canonical_json @@ -778,7 +778,7 @@ class ProfileTestCase(unittest.HomeserverTestCase): self.assertEqual(channel.code, 403, channel.result) self.assertEqual(channel.json_body["errcode"], Codes.FORBIDDEN) - def _setup_local_files(self, names_and_props: Dict[str, Dict[str, Any]]) -> None: + def _setup_local_files(self, names_and_props: dict[str, dict[str, Any]]) -> None: """Stores metadata about files in the database. Args: diff --git a/tests/rest/client/test_redactions.py b/tests/rest/client/test_redactions.py index e3ca108d03..88be8748ee 100644 --- a/tests/rest/client/test_redactions.py +++ b/tests/rest/client/test_redactions.py @@ -18,7 +18,7 @@ # [This file includes modifications made by New Vector Limited] # # -from typing import List, Optional +from typing import Optional from parameterized import parameterized @@ -85,7 +85,7 @@ class RedactionsTestCase(HomeserverTestCase): room_id: str, event_id: str, expect_code: int = 200, - with_relations: Optional[List[str]] = None, + with_relations: Optional[list[str]] = None, content: Optional[JsonDict] = None, ) -> JsonDict: """Helper function to send a redaction event. @@ -104,7 +104,7 @@ class RedactionsTestCase(HomeserverTestCase): self.assertEqual(channel.code, expect_code) return channel.json_body - def _sync_room_timeline(self, access_token: str, room_id: str) -> List[JsonDict]: + def _sync_room_timeline(self, access_token: str, room_id: str) -> list[JsonDict]: channel = self.make_request("GET", "sync", access_token=access_token) self.assertEqual(channel.code, 200) room_sync = channel.json_body["rooms"]["join"][room_id] diff --git a/tests/rest/client/test_register.py b/tests/rest/client/test_register.py index c7c81aa81c..2c0396a3de 100644 --- a/tests/rest/client/test_register.py +++ b/tests/rest/client/test_register.py @@ -22,7 +22,7 @@ import datetime import importlib.resources as importlib_resources import os -from typing import Any, Dict, List, Tuple +from typing import Any from unittest.mock import AsyncMock from twisted.internet.testing import MemoryReactor @@ -54,7 +54,7 @@ class RegisterRestServletTestCase(unittest.HomeserverTestCase): ] url = b"/_matrix/client/r0/register" - def default_config(self) -> Dict[str, Any]: + def default_config(self) -> dict[str, Any]: config = super().default_config() config["allow_guest_access"] = True return config @@ -1032,7 +1032,7 @@ class AccountValidityRenewalByEmailTestCase(unittest.HomeserverTestCase): async def sendmail(*args: Any, **kwargs: Any) -> None: self.email_attempts.append((args, kwargs)) - self.email_attempts: List[Tuple[Any, Any]] = [] + self.email_attempts: list[tuple[Any, Any]] = [] self.hs.get_send_email_handler()._sendmail = sendmail self.store = self.hs.get_datastores().main @@ -1146,7 +1146,7 @@ class AccountValidityRenewalByEmailTestCase(unittest.HomeserverTestCase): self.assertEqual(len(self.email_attempts), 0) - def create_user(self) -> Tuple[str, str]: + def create_user(self) -> tuple[str, str]: user_id = self.register_user("kermit", "monkey") tok = self.login("kermit", "monkey") # We need to manually add an email address otherwise the handler will do @@ -1250,7 +1250,7 @@ class RegistrationTokenValidityRestServletTestCase(unittest.HomeserverTestCase): servlets = [register.register_servlets] url = "/_matrix/client/v1/register/m.login.registration_token/validity" - def default_config(self) -> Dict[str, Any]: + def default_config(self) -> dict[str, Any]: config = super().default_config() config["registration_requires_token"] = True return config diff --git a/tests/rest/client/test_relations.py b/tests/rest/client/test_relations.py index 21fb86367a..3912a3c772 100644 --- a/tests/rest/client/test_relations.py +++ b/tests/rest/client/test_relations.py @@ -20,7 +20,7 @@ # import urllib.parse -from typing import Any, Callable, Dict, List, Optional, Tuple +from typing import Any, Callable, Optional from unittest.mock import AsyncMock, patch from twisted.internet.testing import MemoryReactor @@ -48,7 +48,7 @@ class BaseRelationsTestCase(unittest.HomeserverTestCase): ] hijack_auth = False - def default_config(self) -> Dict[str, Any]: + def default_config(self) -> dict[str, Any]: # We need to enable msc1849 support for aggregations config = super().default_config() @@ -69,7 +69,7 @@ class BaseRelationsTestCase(unittest.HomeserverTestCase): res = self.helper.send(self.room, body="Hi!", tok=self.user_token) self.parent_id = res["event_id"] - def _create_user(self, localpart: str) -> Tuple[str, str]: + def _create_user(self, localpart: str) -> tuple[str, str]: user_id = self.register_user(localpart, "abc123") access_token = self.login(localpart, "abc123") @@ -123,7 +123,7 @@ class BaseRelationsTestCase(unittest.HomeserverTestCase): self.assertEqual(expected_response_code, channel.code, channel.json_body) return channel - def _get_related_events(self) -> List[str]: + def _get_related_events(self) -> list[str]: """ Requests /relations on the parent ID and returns a list of event IDs. """ @@ -149,7 +149,7 @@ class BaseRelationsTestCase(unittest.HomeserverTestCase): self.assertEqual(200, channel.code, channel.json_body) return channel.json_body["unsigned"].get("m.relations", {}) - def _find_event_in_chunk(self, events: List[JsonDict]) -> JsonDict: + def _find_event_in_chunk(self, events: list[JsonDict]) -> JsonDict: """ Find the parent event in a chunk of events and assert that it has the proper bundled aggregations. """ @@ -846,7 +846,7 @@ class RelationPaginationTestCase(BaseRelationsTestCase): expected_event_ids.append(channel.json_body["event_id"]) prev_token: Optional[str] = "" - found_event_ids: List[str] = [] + found_event_ids: list[str] = [] for _ in range(20): from_token = "" if prev_token: @@ -1484,9 +1484,9 @@ class RelationIgnoredUserTestCase(BaseRelationsTestCase): def _test_ignored_user( self, relation_type: str, - allowed_event_ids: List[str], - ignored_event_ids: List[str], - ) -> Tuple[JsonDict, JsonDict]: + allowed_event_ids: list[str], + ignored_event_ids: list[str], + ) -> tuple[JsonDict, JsonDict]: """ Fetch the relations and ensure they're all there, then ignore user2, and repeat. @@ -1600,7 +1600,7 @@ class RelationRedactionTestCase(BaseRelationsTestCase): ) self.assertEqual(200, channel.code, channel.json_body) - def _get_threads(self) -> List[Tuple[str, str]]: + def _get_threads(self) -> list[tuple[str, str]]: """Request the threads in the room and returns a list of thread ID and latest event ID.""" # Request the threads in the room. channel = self.make_request( @@ -1793,7 +1793,7 @@ class RelationRedactionTestCase(BaseRelationsTestCase): class ThreadsTestCase(BaseRelationsTestCase): - def _get_threads(self, body: JsonDict) -> List[Tuple[str, str]]: + def _get_threads(self, body: JsonDict) -> list[tuple[str, str]]: return [ ( ev["event_id"], diff --git a/tests/rest/client/test_rendezvous.py b/tests/rest/client/test_rendezvous.py index 160f852705..dc4f833fa2 100644 --- a/tests/rest/client/test_rendezvous.py +++ b/tests/rest/client/test_rendezvous.py @@ -19,7 +19,6 @@ # # -from typing import Dict from urllib.parse import urlparse from twisted.internet.testing import MemoryReactor @@ -46,7 +45,7 @@ class RendezvousServletTestCase(unittest.HomeserverTestCase): self.hs = self.setup_test_homeserver() return self.hs - def create_resource_dict(self) -> Dict[str, Resource]: + def create_resource_dict(self) -> dict[str, Resource]: return { **super().create_resource_dict(), "/_synapse/client/rendezvous": MSC4108RendezvousSessionResource(self.hs), diff --git a/tests/rest/client/test_retention.py b/tests/rest/client/test_retention.py index 7a816a66e0..758d62e63b 100644 --- a/tests/rest/client/test_retention.py +++ b/tests/rest/client/test_retention.py @@ -17,7 +17,7 @@ # [This file includes modifications made by New Vector Limited] # # -from typing import Any, Dict +from typing import Any from unittest.mock import Mock from twisted.internet.testing import MemoryReactor @@ -265,7 +265,7 @@ class RetentionNoDefaultPolicyTestCase(unittest.HomeserverTestCase): room.register_servlets, ] - def default_config(self) -> Dict[str, Any]: + def default_config(self) -> dict[str, Any]: config = super().default_config() retention_config = { diff --git a/tests/rest/client/test_rooms.py b/tests/rest/client/test_rooms.py index feae5f77cd..4142aed363 100644 --- a/tests/rest/client/test_rooms.py +++ b/tests/rest/client/test_rooms.py @@ -25,7 +25,7 @@ import json from http import HTTPStatus -from typing import Any, Dict, Iterable, List, Literal, Optional, Tuple, Union +from typing import Any, Iterable, Literal, Optional, Union from unittest.mock import AsyncMock, Mock, call, patch from urllib import parse as urlparse @@ -989,7 +989,7 @@ class RoomsCreateTestCase(RoomBase): mxid: str, room_id: str, is_invite: bool, - ) -> Tuple[Codes, dict]: + ) -> tuple[Codes, dict]: return Codes.INCOMPATIBLE_ROOM_VERSION, {} join_mock.side_effect = user_may_join_room_tuple @@ -1002,7 +1002,7 @@ class RoomsCreateTestCase(RoomBase): self.assertEqual(channel.code, HTTPStatus.OK, channel.json_body) self.assertEqual(join_mock.call_count, 0) - def _create_basic_room(self) -> Tuple[int, object]: + def _create_basic_room(self) -> tuple[int, object]: """ Tries to create a basic room and returns the response code. """ @@ -1351,7 +1351,7 @@ class RoomJoinTestCase(RoomBase): """ # Register a dummy callback. Make it allow all room joins for now. - return_value: Union[Literal["NOT_SPAM"], Tuple[Codes, dict], Codes] = ( + return_value: Union[Literal["NOT_SPAM"], tuple[Codes, dict], Codes] = ( synapse.module_api.NOT_SPAM ) @@ -1359,7 +1359,7 @@ class RoomJoinTestCase(RoomBase): userid: str, room_id: str, is_invited: bool, - ) -> Union[Literal["NOT_SPAM"], Tuple[Codes, dict], Codes]: + ) -> Union[Literal["NOT_SPAM"], tuple[Codes, dict], Codes]: return return_value # `spec` argument is needed for this function mock to have `__qualname__`, which @@ -1848,12 +1848,12 @@ class RoomMessagesTestCase(RoomBase): def test_spam_checker_check_event_for_spam( self, name: str, - value: Union[str, bool, Codes, Tuple[Codes, JsonDict]], + value: Union[str, bool, Codes, tuple[Codes, JsonDict]], expected_code: int, expected_fields: dict, ) -> None: class SpamCheck: - mock_return_value: Union[str, bool, Codes, Tuple[Codes, JsonDict], bool] = ( + mock_return_value: Union[str, bool, Codes, tuple[Codes, JsonDict], bool] = ( "NOT_SPAM" ) mock_content: Optional[JsonDict] = None @@ -1861,7 +1861,7 @@ class RoomMessagesTestCase(RoomBase): async def check_event_for_spam( self, event: synapse.events.EventBase, - ) -> Union[str, Codes, Tuple[Codes, JsonDict], bool]: + ) -> Union[str, Codes, tuple[Codes, JsonDict], bool]: self.mock_content = event.content return self.mock_return_value @@ -1915,7 +1915,7 @@ class RoomPowerLevelOverridesTestCase(RoomBase): self.admin_user_id = self.register_user("admin", "pass") self.admin_access_token = self.login("admin", "pass") - def power_levels(self, room_id: str) -> Dict[str, Any]: + def power_levels(self, room_id: str) -> dict[str, Any]: return self.helper.get_state( room_id, "m.room.power_levels", self.admin_access_token ) @@ -2076,7 +2076,7 @@ class RoomPowerLevelOverridesInPracticeTestCase(RoomBase): # Given the server has config allowing normal users to post my event type # And I am a normal member of a room # But the room was created with special permissions - extra_content: Dict[str, Any] = { + extra_content: dict[str, Any] = { "power_level_content_override": {"events": {}}, } room_id = self.helper.create_room_as( @@ -2707,9 +2707,9 @@ class PublicRoomsRoomTypeFilterTestCase(unittest.HomeserverTestCase): def make_public_rooms_request( self, - room_types: Optional[List[Union[str, None]]], + room_types: Optional[list[Union[str, None]]], instance_id: Optional[str] = None, - ) -> Tuple[List[Dict[str, Any]], int]: + ) -> tuple[list[dict[str, Any]], int]: body: JsonDict = {"filter": {PublicRoomsFilterFields.ROOM_TYPES: room_types}} if instance_id: body["third_party_instance_id"] = "test|test" @@ -3470,7 +3470,7 @@ class LabelsTestCase(unittest.HomeserverTestCase): class RelationsTestCase(PaginationTestCase): - def _filter_messages(self, filter: JsonDict) -> List[str]: + def _filter_messages(self, filter: JsonDict) -> list[str]: """Make a request to /messages with a filter, returns the chunk of events.""" from_token = self.get_success( self.from_token.to_string(self.hs.get_datastores().main) @@ -4529,8 +4529,8 @@ class MSC4293RedactOnBanKickTestCase(unittest.FederatingHomeserverTestCase): def _check_redactions( self, - original_events: List[EventBase], - pulled_events: List[JsonDict], + original_events: list[EventBase], + pulled_events: list[JsonDict], expect_redaction: bool, reason: Optional[str] = None, ) -> None: diff --git a/tests/rest/client/test_sync.py b/tests/rest/client/test_sync.py index e949bb69e6..fcbf3fd53c 100644 --- a/tests/rest/client/test_sync.py +++ b/tests/rest/client/test_sync.py @@ -20,7 +20,6 @@ # import json import logging -from typing import List from parameterized import parameterized @@ -131,7 +130,7 @@ class SyncFilterTestCase(unittest.HomeserverTestCase): self.assertEqual(len(events), 1, [event["content"] for event in events]) self.assertEqual(events[0]["content"]["body"], "with wrong label", events[0]) - def _test_sync_filter_labels(self, sync_filter: str) -> List[JsonDict]: + def _test_sync_filter_labels(self, sync_filter: str) -> list[JsonDict]: user_id = self.register_user("kermit", "test") tok = self.login("kermit", "test") diff --git a/tests/rest/client/test_third_party_rules.py b/tests/rest/client/test_third_party_rules.py index 4161faa11f..78fa8f4e1c 100644 --- a/tests/rest/client/test_third_party_rules.py +++ b/tests/rest/client/test_third_party_rules.py @@ -19,7 +19,7 @@ # # import threading -from typing import TYPE_CHECKING, Any, Dict, Optional, Tuple, Union +from typing import TYPE_CHECKING, Any, Optional, Union from unittest.mock import AsyncMock, Mock from twisted.internet.testing import MemoryReactor @@ -48,7 +48,7 @@ thread_local = threading.local() class LegacyThirdPartyRulesTestModule: - def __init__(self, config: Dict, module_api: "ModuleApi") -> None: + def __init__(self, config: dict, module_api: "ModuleApi") -> None: # keep a record of the "current" rules module, so that the test can patch # it if desired. thread_local.rules_module = self @@ -65,12 +65,12 @@ class LegacyThirdPartyRulesTestModule: return True @staticmethod - def parse_config(config: Dict[str, Any]) -> Dict[str, Any]: + def parse_config(config: dict[str, Any]) -> dict[str, Any]: return config class LegacyDenyNewRooms(LegacyThirdPartyRulesTestModule): - def __init__(self, config: Dict, module_api: "ModuleApi") -> None: + def __init__(self, config: dict, module_api: "ModuleApi") -> None: super().__init__(config, module_api) async def on_create_room( @@ -80,7 +80,7 @@ class LegacyDenyNewRooms(LegacyThirdPartyRulesTestModule): class LegacyChangeEvents(LegacyThirdPartyRulesTestModule): - def __init__(self, config: Dict, module_api: "ModuleApi") -> None: + def __init__(self, config: dict, module_api: "ModuleApi") -> None: super().__init__(config, module_api) async def check_event_allowed( @@ -150,7 +150,7 @@ class ThirdPartyRulesTestCase(unittest.FederatingHomeserverTestCase): # types async def check( ev: EventBase, state: StateMap[EventBase] - ) -> Tuple[bool, Optional[JsonDict]]: + ) -> tuple[bool, Optional[JsonDict]]: return ev.type != "foo.bar.forbidden", None callback = Mock(spec=[], side_effect=check) @@ -207,7 +207,7 @@ class ThirdPartyRulesTestCase(unittest.FederatingHomeserverTestCase): # add a callback that will raise our hacky exception async def check( ev: EventBase, state: StateMap[EventBase] - ) -> Tuple[bool, Optional[JsonDict]]: + ) -> tuple[bool, Optional[JsonDict]]: raise NastyHackException(429, "message") self.hs.get_module_api_callbacks().third_party_event_rules._check_event_allowed_callbacks = [ @@ -235,7 +235,7 @@ class ThirdPartyRulesTestCase(unittest.FederatingHomeserverTestCase): # first patch the event checker so that it will try to modify the event async def check( ev: EventBase, state: StateMap[EventBase] - ) -> Tuple[bool, Optional[JsonDict]]: + ) -> tuple[bool, Optional[JsonDict]]: ev.content = {"x": "y"} return True, None @@ -260,7 +260,7 @@ class ThirdPartyRulesTestCase(unittest.FederatingHomeserverTestCase): # first patch the event checker so that it will modify the event async def check( ev: EventBase, state: StateMap[EventBase] - ) -> Tuple[bool, Optional[JsonDict]]: + ) -> tuple[bool, Optional[JsonDict]]: d = ev.get_dict() d["content"] = {"x": "y"} return True, d @@ -295,7 +295,7 @@ class ThirdPartyRulesTestCase(unittest.FederatingHomeserverTestCase): # first patch the event checker so that it will modify the event async def check( ev: EventBase, state: StateMap[EventBase] - ) -> Tuple[bool, Optional[JsonDict]]: + ) -> tuple[bool, Optional[JsonDict]]: d = ev.get_dict() d["content"] = { "msgtype": "m.text", @@ -443,7 +443,7 @@ class ThirdPartyRulesTestCase(unittest.FederatingHomeserverTestCase): # Define a callback that sends a custom event on power levels update. async def test_fn( event: EventBase, state_events: StateMap[EventBase] - ) -> Tuple[bool, Optional[JsonDict]]: + ) -> tuple[bool, Optional[JsonDict]]: if event.is_state() and event.type == EventTypes.PowerLevels: await api.create_and_send_event_into_room( { diff --git a/tests/rest/client/test_transactions.py b/tests/rest/client/test_transactions.py index bb83988d76..64d22d485a 100644 --- a/tests/rest/client/test_transactions.py +++ b/tests/rest/client/test_transactions.py @@ -20,7 +20,7 @@ # from http import HTTPStatus -from typing import Any, Generator, Tuple, cast +from typing import Any, Generator, cast from unittest.mock import AsyncMock, Mock, call from twisted.internet import defer, reactor as _reactor @@ -92,7 +92,7 @@ class HttpTransactionCacheTestCase(unittest.TestCase): self, ) -> Generator["defer.Deferred[Any]", object, None]: @defer.inlineCallbacks - def cb() -> Generator["defer.Deferred[object]", object, Tuple[int, JsonDict]]: + def cb() -> Generator["defer.Deferred[object]", object, tuple[int, JsonDict]]: # Ignore `multiple-internal-clocks` linter error here since we are creating a `Clock` # for testing purposes. yield defer.ensureDeferred( @@ -124,7 +124,7 @@ class HttpTransactionCacheTestCase(unittest.TestCase): """ called = [False] - def cb() -> "defer.Deferred[Tuple[int, JsonDict]]": + def cb() -> "defer.Deferred[tuple[int, JsonDict]]": if called[0]: # return a valid result the second time return defer.succeed(self.mock_http_response) @@ -156,7 +156,7 @@ class HttpTransactionCacheTestCase(unittest.TestCase): """ called = [False] - def cb() -> "defer.Deferred[Tuple[int, JsonDict]]": + def cb() -> "defer.Deferred[tuple[int, JsonDict]]": if called[0]: # return a valid result the second time return defer.succeed(self.mock_http_response) diff --git a/tests/rest/client/utils.py b/tests/rest/client/utils.py index bb214759d9..d5c824b291 100644 --- a/tests/rest/client/utils.py +++ b/tests/rest/client/utils.py @@ -30,14 +30,12 @@ from typing import ( Any, AnyStr, Callable, - Dict, Iterable, Literal, Mapping, MutableMapping, Optional, Sequence, - Tuple, overload, ) from urllib.parse import urlencode @@ -87,8 +85,8 @@ class RestHelper: room_version: Optional[str] = ..., tok: Optional[str] = ..., expect_code: Literal[200] = ..., - extra_content: Optional[Dict] = ..., - custom_headers: Optional[Iterable[Tuple[AnyStr, AnyStr]]] = ..., + extra_content: Optional[dict] = ..., + custom_headers: Optional[Iterable[tuple[AnyStr, AnyStr]]] = ..., ) -> str: ... @overload @@ -99,8 +97,8 @@ class RestHelper: room_version: Optional[str] = ..., tok: Optional[str] = ..., expect_code: int = ..., - extra_content: Optional[Dict] = ..., - custom_headers: Optional[Iterable[Tuple[AnyStr, AnyStr]]] = ..., + extra_content: Optional[dict] = ..., + custom_headers: Optional[Iterable[tuple[AnyStr, AnyStr]]] = ..., ) -> Optional[str]: ... def create_room_as( @@ -110,8 +108,8 @@ class RestHelper: room_version: Optional[str] = None, tok: Optional[str] = None, expect_code: int = HTTPStatus.OK, - extra_content: Optional[Dict] = None, - custom_headers: Optional[Iterable[Tuple[AnyStr, AnyStr]]] = None, + extra_content: Optional[dict] = None, + custom_headers: Optional[Iterable[tuple[AnyStr, AnyStr]]] = None, ) -> Optional[str]: """ Create a room. @@ -310,7 +308,7 @@ class RestHelper: self.auth_user_id = src path = f"/_matrix/client/r0/rooms/{room}/state/m.room.member/{targ}" - url_params: Dict[str, str] = {} + url_params: dict[str, str] = {} if tok: url_params["access_token"] = tok @@ -378,7 +376,7 @@ class RestHelper: txn_id: Optional[str] = None, tok: Optional[str] = None, expect_code: int = HTTPStatus.OK, - custom_headers: Optional[Iterable[Tuple[AnyStr, AnyStr]]] = None, + custom_headers: Optional[Iterable[tuple[AnyStr, AnyStr]]] = None, type: str = "m.room.message", ) -> JsonDict: if body is None: @@ -430,7 +428,7 @@ class RestHelper: txn_id: Optional[str] = None, tok: Optional[str] = None, expect_code: int = HTTPStatus.OK, - custom_headers: Optional[Iterable[Tuple[AnyStr, AnyStr]]] = None, + custom_headers: Optional[Iterable[tuple[AnyStr, AnyStr]]] = None, ) -> JsonDict: if txn_id is None: txn_id = "m%s" % (str(time.time())) @@ -497,7 +495,7 @@ class RestHelper: self, room_id: str, event_type: str, - body: Optional[Dict[str, Any]], + body: Optional[dict[str, Any]], tok: Optional[str], expect_code: int = HTTPStatus.OK, state_key: str = "", @@ -575,7 +573,7 @@ class RestHelper: self, room_id: str, event_type: str, - body: Dict[str, Any], + body: dict[str, Any], tok: Optional[str] = None, expect_code: int = HTTPStatus.OK, state_key: str = "", @@ -684,7 +682,7 @@ class RestHelper: with_sid: bool = False, idp_id: Optional[str] = None, expected_status: int = 200, - ) -> Tuple[JsonDict, FakeAuthorizationGrant]: + ) -> tuple[JsonDict, FakeAuthorizationGrant]: """Log in (as a new user) via OIDC Returns the result of the final token login and the fake authorization grant. @@ -757,7 +755,7 @@ class RestHelper: ui_auth_session_id: Optional[str] = None, with_sid: bool = False, idp_id: Optional[str] = None, - ) -> Tuple[FakeChannel, FakeAuthorizationGrant]: + ) -> tuple[FakeChannel, FakeAuthorizationGrant]: """Perform an OIDC authentication flow via a mock OIDC provider. This can be used for either login or user-interactive auth. @@ -790,7 +788,7 @@ class RestHelper: went. """ - cookies: Dict[str, str] = {} + cookies: dict[str, str] = {} with fake_server.patch_homeserver(hs=self.hs): # if we're doing a ui auth, hit the ui auth redirect endpoint @@ -824,7 +822,7 @@ class RestHelper: cookies: Mapping[str, str], user_info_dict: JsonDict, with_sid: bool = False, - ) -> Tuple[FakeChannel, FakeAuthorizationGrant]: + ) -> tuple[FakeChannel, FakeAuthorizationGrant]: """Mock out an OIDC authentication flow Assumes that an OIDC auth has been initiated by one of initiate_sso_login or diff --git a/tests/rest/key/v2/test_remote_key_resource.py b/tests/rest/key/v2/test_remote_key_resource.py index 8d2489f718..c412a19f9b 100644 --- a/tests/rest/key/v2/test_remote_key_resource.py +++ b/tests/rest/key/v2/test_remote_key_resource.py @@ -19,7 +19,7 @@ # # from io import BytesIO, StringIO -from typing import Any, Dict, Optional, Union +from typing import Any, Optional, Union from unittest.mock import Mock import signedjson.key @@ -156,7 +156,7 @@ class EndToEndPerspectivesTests(BaseRemoteKeyResourceTestCase): endpoint, to check that the two implementations are compatible. """ - def default_config(self) -> Dict[str, Any]: + def default_config(self) -> dict[str, Any]: config = super().default_config() # replace the signing key with our own diff --git a/tests/rest/media/test_domain_blocking.py b/tests/rest/media/test_domain_blocking.py index 9eb0222102..0bdbaa676d 100644 --- a/tests/rest/media/test_domain_blocking.py +++ b/tests/rest/media/test_domain_blocking.py @@ -18,7 +18,6 @@ # [This file includes modifications made by New Vector Limited] # # -from typing import Dict from twisted.internet.testing import MemoryReactor from twisted.web.resource import Resource @@ -65,7 +64,7 @@ class MediaDomainBlockingTests(unittest.HomeserverTestCase): ) ) - def create_resource_dict(self) -> Dict[str, Resource]: + def create_resource_dict(self) -> dict[str, Resource]: # We need to manually set the resource tree to include media, the # default only does `/_matrix/client` APIs. return {"/_matrix/media": self.hs.get_media_repository_resource()} diff --git a/tests/rest/media/test_url_preview.py b/tests/rest/media/test_url_preview.py index 7c8d2fc998..5af2e79f45 100644 --- a/tests/rest/media/test_url_preview.py +++ b/tests/rest/media/test_url_preview.py @@ -22,7 +22,7 @@ import base64 import json import os import re -from typing import Any, Dict, Optional, Sequence, Tuple, Type +from typing import Any, Optional, Sequence from urllib.parse import quote, urlencode from twisted.internet._resolver import HostResolution @@ -127,7 +127,7 @@ class URLPreviewTests(unittest.HomeserverTestCase): assert self.media_repo.url_previewer is not None self.url_previewer = self.media_repo.url_previewer - self.lookups: Dict[str, Any] = {} + self.lookups: dict[str, Any] = {} class Resolver: def resolveHostName( @@ -135,7 +135,7 @@ class URLPreviewTests(unittest.HomeserverTestCase): resolutionReceiver: IResolutionReceiver, hostName: str, portNumber: int = 0, - addressTypes: Optional[Sequence[Type[IAddress]]] = None, + addressTypes: Optional[Sequence[type[IAddress]]] = None, transportSemantics: str = "TCP", ) -> IResolutionReceiver: resolution = HostResolution(hostName) @@ -150,7 +150,7 @@ class URLPreviewTests(unittest.HomeserverTestCase): self.reactor.nameResolver = Resolver() # type: ignore[assignment] - def create_resource_dict(self) -> Dict[str, Resource]: + def create_resource_dict(self) -> dict[str, Resource]: """Create a resource tree for the test server A resource tree is a mapping from path to twisted.web.resource. @@ -1227,7 +1227,7 @@ class URLPreviewTests(unittest.HomeserverTestCase): self.assertEqual(body["og:title"], "Test") self.assertNotIn("og:image", body) - def _download_image(self) -> Tuple[str, str]: + def _download_image(self) -> tuple[str, str]: """Downloads an image into the URL cache. Returns: A (host, media_id) tuple representing the MXC URI of the image. diff --git a/tests/rest/synapse/client/test_federation_whitelist.py b/tests/rest/synapse/client/test_federation_whitelist.py index f0067a8f2b..c4a990e32c 100644 --- a/tests/rest/synapse/client/test_federation_whitelist.py +++ b/tests/rest/synapse/client/test_federation_whitelist.py @@ -11,7 +11,6 @@ # See the GNU Affero General Public License for more details: # . -from typing import Dict from twisted.web.resource import Resource @@ -28,7 +27,7 @@ class FederationWhitelistTests(unittest.HomeserverTestCase): login.register_servlets, ] - def create_resource_dict(self) -> Dict[str, Resource]: + def create_resource_dict(self) -> dict[str, Resource]: base = super().create_resource_dict() base.update(build_synapse_client_resource_tree(self.hs)) return base diff --git a/tests/scripts/test_new_matrix_user.py b/tests/scripts/test_new_matrix_user.py index cae096e72b..2e71e2a797 100644 --- a/tests/scripts/test_new_matrix_user.py +++ b/tests/scripts/test_new_matrix_user.py @@ -18,7 +18,7 @@ # # -from typing import List, Optional +from typing import Optional from unittest.mock import Mock, patch from synapse._scripts.register_new_matrix_user import request_registration @@ -60,8 +60,8 @@ class RegisterTestCase(TestCase): requests.post = post # The fake stdout will be written here - out: List[str] = [] - err_code: List[int] = [] + out: list[str] = [] + err_code: list[int] = [] with patch("synapse._scripts.register_new_matrix_user.requests", requests): request_registration( @@ -96,8 +96,8 @@ class RegisterTestCase(TestCase): requests.get = get # The fake stdout will be written here - out: List[str] = [] - err_code: List[int] = [] + out: list[str] = [] + err_code: list[int] = [] with patch("synapse._scripts.register_new_matrix_user.requests", requests): request_registration( @@ -151,8 +151,8 @@ class RegisterTestCase(TestCase): requests.post = post # The fake stdout will be written here - out: List[str] = [] - err_code: List[int] = [] + out: list[str] = [] + err_code: list[int] = [] with patch("synapse._scripts.register_new_matrix_user.requests", requests): request_registration( diff --git a/tests/server.py b/tests/server.py index 208556abaf..ff5c606180 100644 --- a/tests/server.py +++ b/tests/server.py @@ -35,15 +35,10 @@ from typing import ( Any, Awaitable, Callable, - Deque, - Dict, Iterable, - List, MutableMapping, Optional, Sequence, - Tuple, - Type, TypeVar, Union, cast, @@ -124,7 +119,7 @@ R = TypeVar("R") P = ParamSpec("P") # the type of thing that can be passed into `make_request` in the headers list -CustomHeaderType = Tuple[Union[str, bytes], Union[str, bytes]] +CustomHeaderType = tuple[Union[str, bytes], Union[str, bytes]] # A pre-prepared SQLite DB that is used as a template when creating new SQLite # DB each test run. This dramatically speeds up test set up when using SQLite. @@ -172,7 +167,7 @@ class FakeChannel: return body @property - def json_list(self) -> List[JsonDict]: + def json_list(self) -> list[JsonDict]: body = json.loads(self.text_body) assert isinstance(body, list) return body @@ -211,7 +206,7 @@ class FakeChannel: version: bytes, code: bytes, reason: bytes, - headers: Union[Headers, List[Tuple[bytes, bytes]]], + headers: Union[Headers, list[tuple[bytes, bytes]]], ) -> None: self.result["version"] = version self.result["code"] = code @@ -367,7 +362,7 @@ def make_request( path: Union[bytes, str], content: Union[bytes, str, JsonDict] = b"", access_token: Optional[str] = None, - request: Type[Request] = SynapseRequest, + request: type[Request] = SynapseRequest, shorthand: bool = True, federation_auth_origin: Optional[bytes] = None, content_type: Optional[bytes] = None, @@ -492,10 +487,10 @@ class ThreadedMemoryReactorClock(MemoryReactorClock): def __init__(self) -> None: self.threadpool = ThreadPool(self) - self._tcp_callbacks: Dict[Tuple[str, int], Callable] = {} - self._udp: List[udp.Port] = [] - self.lookups: Dict[str, str] = {} - self._thread_callbacks: Deque[Callable[..., R]] = deque() + self._tcp_callbacks: dict[tuple[str, int], Callable] = {} + self._udp: list[udp.Port] = [] + self.lookups: dict[str, str] = {} + self._thread_callbacks: deque[Callable[..., R]] = deque() lookups = self.lookups @@ -622,7 +617,7 @@ class ThreadedMemoryReactorClock(MemoryReactorClock): port: int, factory: ClientFactory, timeout: float = 30, - bindAddress: Optional[Tuple[str, int]] = None, + bindAddress: Optional[tuple[str, int]] = None, ) -> IConnector: """Fake L{IReactorTCP.connectTCP}.""" @@ -814,7 +809,7 @@ class ThreadPool: return d -def get_clock() -> Tuple[ThreadedMemoryReactorClock, Clock]: +def get_clock() -> tuple[ThreadedMemoryReactorClock, Clock]: # Ignore the linter error since this is an expected usage of creating a `Clock` for # testing purposes. reactor = ThreadedMemoryReactorClock() @@ -1041,7 +1036,7 @@ class FakeTransport: def connect_client( reactor: ThreadedMemoryReactorClock, client_id: int -) -> Tuple[IProtocol, AccumulatingProtocol]: +) -> tuple[IProtocol, AccumulatingProtocol]: """ Connect a client to a fake TCP transport. @@ -1068,7 +1063,7 @@ def setup_test_homeserver( server_name: str = "test", config: Optional[HomeServerConfig] = None, reactor: Optional[ISynapseReactor] = None, - homeserver_to_use: Type[HomeServer] = TestHomeServer, + homeserver_to_use: type[HomeServer] = TestHomeServer, db_txn_limit: Optional[int] = None, **extra_homeserver_attributes: Any, ) -> HomeServer: diff --git a/tests/server_notices/test_resource_limits_server_notices.py b/tests/server_notices/test_resource_limits_server_notices.py index dd38528a7d..a0c5582496 100644 --- a/tests/server_notices/test_resource_limits_server_notices.py +++ b/tests/server_notices/test_resource_limits_server_notices.py @@ -17,7 +17,6 @@ # [This file includes modifications made by New Vector Limited] # # -from typing import Tuple from unittest.mock import AsyncMock, Mock from twisted.internet.testing import MemoryReactor @@ -363,7 +362,7 @@ class TestResourceLimitsServerNoticesWithRealRooms(unittest.HomeserverTestCase): self.assertTrue(notice_in_room, "No server notice in room") - def _trigger_notice_and_join(self) -> Tuple[str, str, str]: + def _trigger_notice_and_join(self) -> tuple[str, str, str]: """Creates enough active users to hit the MAU limit and trigger a system notice about it, then joins the system notices room with one of the users created. diff --git a/tests/state/test_v2.py b/tests/state/test_v2.py index b4f2b98cc4..2cf411e30b 100644 --- a/tests/state/test_v2.py +++ b/tests/state/test_v2.py @@ -21,13 +21,9 @@ import itertools from typing import ( Collection, - Dict, Iterable, - List, Mapping, Optional, - Set, - Tuple, TypeVar, ) @@ -94,7 +90,7 @@ class FakeEvent: self.content = content self.room_id = ROOM_ID - def to_event(self, auth_events: List[str], prev_events: List[str]) -> EventBase: + def to_event(self, auth_events: list[str], prev_events: list[str]) -> EventBase: """Given the auth_events and prev_events, convert to a Frozen Event Args: @@ -461,9 +457,9 @@ class StateTestCase(unittest.TestCase): def do_check( self, - events: List[FakeEvent], - edges: List[List[str]], - expected_state_ids: List[str], + events: list[FakeEvent], + edges: list[list[str]], + expected_state_ids: list[str], ) -> None: """Take a list of events and edges and calculate the state of the graph at END, and asserts it matches `expected_state_ids` @@ -476,9 +472,9 @@ class StateTestCase(unittest.TestCase): the keys that haven't changed since START). """ # We want to sort the events into topological order for processing. - graph: Dict[str, Set[str]] = {} + graph: dict[str, set[str]] = {} - fake_event_map: Dict[str, FakeEvent] = {} + fake_event_map: dict[str, FakeEvent] = {} for ev in itertools.chain(INITIAL_EVENTS, events): graph[ev.node_id] = set() @@ -491,8 +487,8 @@ class StateTestCase(unittest.TestCase): for a, b in pairwise(edge_list): graph[a].add(b) - event_map: Dict[str, EventBase] = {} - state_at_event: Dict[str, StateMap[str]] = {} + event_map: dict[str, EventBase] = {} + state_at_event: dict[str, StateMap[str]] = {} # We copy the map as the sort consumes the graph graph_copy = {k: set(v) for k, v in graph.items()} @@ -568,7 +564,7 @@ class StateTestCase(unittest.TestCase): class LexicographicalTestCase(unittest.TestCase): def test_simple(self) -> None: - graph: Dict[str, Set[str]] = { + graph: dict[str, set[str]] = { "l": {"o"}, "m": {"n", "o"}, "n": {"o"}, @@ -1020,7 +1016,7 @@ class AuthChainDifferenceTestCase(unittest.TestCase): T = TypeVar("T") -def pairwise(iterable: Iterable[T]) -> Iterable[Tuple[T, T]]: +def pairwise(iterable: Iterable[T]) -> Iterable[tuple[T, T]]: "s -> (s0,s1), (s1,s2), (s2, s3), ..." a, b = itertools.tee(iterable) next(b, None) @@ -1029,11 +1025,11 @@ def pairwise(iterable: Iterable[T]) -> Iterable[Tuple[T, T]]: @attr.s class TestStateResolutionStore: - event_map: Dict[str, EventBase] = attr.ib() + event_map: dict[str, EventBase] = attr.ib() def get_events( self, event_ids: Collection[str], allow_rejected: bool = False - ) -> "defer.Deferred[Dict[str, EventBase]]": + ) -> "defer.Deferred[dict[str, EventBase]]": """Get events from the database Args: @@ -1048,7 +1044,7 @@ class TestStateResolutionStore: {eid: self.event_map[eid] for eid in event_ids if eid in self.event_map} ) - def _get_auth_chain(self, event_ids: Iterable[str]) -> List[str]: + def _get_auth_chain(self, event_ids: Iterable[str]) -> list[str]: """Gets the full auth chain for a set of events (including rejected events). @@ -1085,9 +1081,9 @@ class TestStateResolutionStore: def get_auth_chain_difference( self, room_id: str, - auth_sets: List[Set[str]], - conflicted_state: Optional[Set[str]], - additional_backwards_reachable_conflicted_events: Optional[Set[str]], + auth_sets: list[set[str]], + conflicted_state: Optional[set[str]], + additional_backwards_reachable_conflicted_events: Optional[set[str]], ) -> "defer.Deferred[StateDifference]": chains = [frozenset(self._get_auth_chain(a)) for a in auth_sets] diff --git a/tests/state/test_v21.py b/tests/state/test_v21.py index ff1715d4f7..7bef3decf0 100644 --- a/tests/state/test_v21.py +++ b/tests/state/test_v21.py @@ -18,7 +18,7 @@ # # import itertools -from typing import Dict, List, Optional, Sequence, Set +from typing import Optional, Sequence from twisted.internet import defer from twisted.test.proto_helpers import MemoryReactor @@ -357,11 +357,11 @@ class StateResV21TestCase(unittest.HomeserverTestCase): self, room_id: str, state_maps: Sequence[StateMap[str]], - event_map: Optional[Dict[str, EventBase]], + event_map: Optional[dict[str, EventBase]], state_res_store: StateResolutionStoreInterface, - ) -> Set[str]: + ) -> set[str]: _, conflicted_state = _seperate(state_maps) - conflicted_set: Optional[Set[str]] = set( + conflicted_set: Optional[set[str]] = set( itertools.chain.from_iterable(conflicted_state.values()) ) if event_map is None: @@ -377,7 +377,7 @@ class StateResV21TestCase(unittest.HomeserverTestCase): def get_resolution_and_verify_expected( self, state_maps: Sequence[StateMap[str]], - events: List[EventBase], + events: list[EventBase], expected: StateMap[str], ) -> None: room_id = events[0].room_id @@ -475,9 +475,9 @@ class StateResV21TestCase(unittest.HomeserverTestCase): event_type: str, state_key: Optional[str], sender: str, - content: Dict, - auth_events: List[str], - prev_events: Optional[List[str]] = None, + content: dict, + auth_events: list[str], + prev_events: Optional[list[str]] = None, room_id: Optional[str] = None, ) -> EventBase: """Short-hand for event_from_pdu_json for fields we typically care about. diff --git a/tests/storage/databases/main/test_end_to_end_keys.py b/tests/storage/databases/main/test_end_to_end_keys.py index d0dd8f866b..35e1e15d66 100644 --- a/tests/storage/databases/main/test_end_to_end_keys.py +++ b/tests/storage/databases/main/test_end_to_end_keys.py @@ -18,7 +18,7 @@ # [This file includes modifications made by New Vector Limited] # # -from typing import List, Optional, Tuple +from typing import Optional from twisted.internet.testing import MemoryReactor @@ -99,7 +99,7 @@ class EndToEndKeyWorkerStoreTestCase(HomeserverTestCase): def check_timestamp_column( txn: LoggingTransaction, - ) -> List[Tuple[JsonDict, Optional[int]]]: + ) -> list[tuple[JsonDict, Optional[int]]]: """Fetch all rows for Alice's keys.""" txn.execute( """ diff --git a/tests/storage/databases/main/test_events_worker.py b/tests/storage/databases/main/test_events_worker.py index a7c6bdd9b4..c786271c09 100644 --- a/tests/storage/databases/main/test_events_worker.py +++ b/tests/storage/databases/main/test_events_worker.py @@ -20,7 +20,7 @@ # import json from contextlib import contextmanager -from typing import Generator, List, Set, Tuple +from typing import Generator from unittest import mock from twisted.enterprise.adbapi import ConnectionPool @@ -60,7 +60,7 @@ class HaveSeenEventsTestCase(unittest.HomeserverTestCase): self.token = self.login(self.user, "pass") self.room_id = self.helper.create_room_as(self.user, tok=self.token) - self.event_ids: List[str] = [] + self.event_ids: list[str] = [] for i in range(3): event = self.get_success( inject_event( @@ -316,7 +316,7 @@ class GetEventsTestCase(unittest.HomeserverTestCase): room_id = self.helper.create_room_as(user_id, tok=user_tok) - event_ids: Set[str] = set() + event_ids: set[str] = set() for i in range(num_events): event = self.get_success( inject_event( @@ -371,7 +371,7 @@ class DatabaseOutageTestCase(unittest.HomeserverTestCase): ) ) - self.event_ids: List[str] = [] + self.event_ids: list[str] = [] for idx in range(1, 21): # Stream ordering starts at 1. event_json = { "type": f"test {idx}", @@ -504,7 +504,7 @@ class GetEventCancellationTestCase(unittest.HomeserverTestCase): def blocking_get_event_calls( self, ) -> Generator[ - Tuple["Deferred[None]", "Deferred[None]", "Deferred[None]"], None, None + tuple["Deferred[None]", "Deferred[None]", "Deferred[None]"], None, None ]: """Starts two concurrent `get_event` calls for the same event. diff --git a/tests/storage/databases/main/test_receipts.py b/tests/storage/databases/main/test_receipts.py index d084f5c2ba..2d63b52aca 100644 --- a/tests/storage/databases/main/test_receipts.py +++ b/tests/storage/databases/main/test_receipts.py @@ -19,7 +19,7 @@ # # -from typing import Any, Dict, Optional, Sequence, Tuple +from typing import Any, Optional, Sequence from twisted.internet.testing import MemoryReactor @@ -51,8 +51,8 @@ class ReceiptsBackgroundUpdateStoreTestCase(HomeserverTestCase): update_name: str, index_name: str, table: str, - receipts: Dict[Tuple[str, str, str], Sequence[Dict[str, Any]]], - expected_unique_receipts: Dict[Tuple[str, str, str], Optional[Dict[str, Any]]], + receipts: dict[tuple[str, str, str], Sequence[dict[str, Any]]], + expected_unique_receipts: dict[tuple[str, str, str], Optional[dict[str, Any]]], ) -> None: """Test that the background update to uniqueify non-thread receipts in the given receipts table works properly. diff --git a/tests/storage/test__base.py b/tests/storage/test__base.py index 5e773a5545..5602531880 100644 --- a/tests/storage/test__base.py +++ b/tests/storage/test__base.py @@ -20,7 +20,7 @@ # import secrets -from typing import Generator, List, Tuple, cast +from typing import Generator, cast from twisted.internet.testing import MemoryReactor @@ -52,9 +52,9 @@ class UpdateUpsertManyTests(unittest.HomeserverTestCase): ) ) - def _dump_table_to_tuple(self) -> Generator[Tuple[int, str, str], None, None]: + def _dump_table_to_tuple(self) -> Generator[tuple[int, str, str], None, None]: yield from cast( - List[Tuple[int, str, str]], + list[tuple[int, str, str]], self.get_success( self.storage.db_pool.simple_select_list( self.table_name, None, ["id, username, value"] diff --git a/tests/storage/test_account_data.py b/tests/storage/test_account_data.py index 13c4be988e..d9307154da 100644 --- a/tests/storage/test_account_data.py +++ b/tests/storage/test_account_data.py @@ -19,7 +19,7 @@ # # -from typing import Iterable, Optional, Set +from typing import Iterable, Optional from twisted.internet.testing import MemoryReactor @@ -52,7 +52,7 @@ class IgnoredUsersTestCase(unittest.HomeserverTestCase): ) def assert_ignorers( - self, ignored_user_id: str, expected_ignorer_user_ids: Set[str] + self, ignored_user_id: str, expected_ignorer_user_ids: set[str] ) -> None: self.assertEqual( self.get_success(self.store.ignored_by(ignored_user_id)), @@ -60,7 +60,7 @@ class IgnoredUsersTestCase(unittest.HomeserverTestCase): ) def assert_ignored( - self, ignorer_user_id: str, expected_ignored_user_ids: Set[str] + self, ignorer_user_id: str, expected_ignored_user_ids: set[str] ) -> None: self.assertEqual( self.get_success(self.store.ignored_users(ignorer_user_id)), diff --git a/tests/storage/test_appservice.py b/tests/storage/test_appservice.py index b4df92c7a1..4b9d069d6a 100644 --- a/tests/storage/test_appservice.py +++ b/tests/storage/test_appservice.py @@ -21,7 +21,7 @@ import json import os import tempfile -from typing import List, cast +from typing import cast from unittest.mock import AsyncMock, Mock import yaml @@ -48,7 +48,7 @@ class ApplicationServiceStoreTestCase(unittest.HomeserverTestCase): def setUp(self) -> None: super().setUp() - self.as_yaml_files: List[str] = [] + self.as_yaml_files: list[str] = [] self.hs.config.appservice.app_service_config_files = self.as_yaml_files self.hs.config.caches.event_cache_size = 1 @@ -123,7 +123,7 @@ class ApplicationServiceStoreTestCase(unittest.HomeserverTestCase): class ApplicationServiceTransactionStoreTestCase(unittest.HomeserverTestCase): def setUp(self) -> None: super().setUp() - self.as_yaml_files: List[str] = [] + self.as_yaml_files: list[str] = [] self.hs.config.appservice.app_service_config_files = self.as_yaml_files self.hs.config.caches.event_cache_size = 1 @@ -180,7 +180,7 @@ class ApplicationServiceTransactionStoreTestCase(unittest.HomeserverTestCase): ) def _insert_txn( - self, as_id: str, txn_id: int, events: List[Mock] + self, as_id: str, txn_id: int, events: list[Mock] ) -> "defer.Deferred[None]": return self.db_pool.runOperation( self.engine.convert_param_style( @@ -277,7 +277,7 @@ class ApplicationServiceTransactionStoreTestCase(unittest.HomeserverTestCase): self, ) -> None: service = Mock(id=self.as_list[0]["id"]) - events = cast(List[EventBase], [Mock(event_id="e1"), Mock(event_id="e2")]) + events = cast(list[EventBase], [Mock(event_id="e1"), Mock(event_id="e2")]) txn = self.get_success( defer.ensureDeferred( self.store.create_appservice_txn( diff --git a/tests/storage/test_background_update.py b/tests/storage/test_background_update.py index cf63b50c2f..3505423691 100644 --- a/tests/storage/test_background_update.py +++ b/tests/storage/test_background_update.py @@ -19,7 +19,7 @@ # # import logging -from typing import List, Tuple, cast +from typing import cast from unittest.mock import AsyncMock, Mock import yaml @@ -535,7 +535,7 @@ class BackgroundUpdateValidateConstraintTestCase(unittest.HomeserverTestCase): # Check the correct values are in the new table. rows = cast( - List[Tuple[int, int]], + list[tuple[int, int]], self.get_success( self.store.db_pool.simple_select_list( table="test_constraint", @@ -652,7 +652,7 @@ class BackgroundUpdateValidateConstraintTestCase(unittest.HomeserverTestCase): # Check the correct values are in the new table. rows = cast( - List[Tuple[int, int]], + list[tuple[int, int]], self.get_success( self.store.db_pool.simple_select_list( table="test_constraint", diff --git a/tests/storage/test_client_ips.py b/tests/storage/test_client_ips.py index 1cd97a9dd7..2c1ba9d6c2 100644 --- a/tests/storage/test_client_ips.py +++ b/tests/storage/test_client_ips.py @@ -19,7 +19,7 @@ # # -from typing import Any, Dict, List, Optional, Tuple, cast +from typing import Any, Optional, cast from unittest.mock import AsyncMock from parameterized import parameterized @@ -104,7 +104,7 @@ class ClientIpStoreTestCase(unittest.HomeserverTestCase): self.pump(0) result = cast( - List[Tuple[str, str, str, Optional[str], int]], + list[tuple[str, str, str, Optional[str], int]], self.get_success( self.store.db_pool.simple_select_list( table="user_ips", @@ -135,7 +135,7 @@ class ClientIpStoreTestCase(unittest.HomeserverTestCase): self.pump(0) result = cast( - List[Tuple[str, str, str, Optional[str], int]], + list[tuple[str, str, str, Optional[str], int]], self.get_success( self.store.db_pool.simple_select_list( table="user_ips", @@ -184,7 +184,7 @@ class ClientIpStoreTestCase(unittest.HomeserverTestCase): else: # Check that the new IP and user agent has not been stored yet db_result = cast( - List[Tuple[str, Optional[str], Optional[str], str, Optional[int]]], + list[tuple[str, Optional[str], Optional[str], str, Optional[int]]], self.get_success( self.store.db_pool.simple_select_list( table="devices", @@ -266,7 +266,7 @@ class ClientIpStoreTestCase(unittest.HomeserverTestCase): # Check that the new IP and user agent has not been stored yet db_result = cast( - List[Tuple[str, Optional[str], Optional[str], str, Optional[int]]], + list[tuple[str, Optional[str], Optional[str], str, Optional[int]]], self.get_success( self.store.db_pool.simple_select_list( table="devices", @@ -381,7 +381,7 @@ class ClientIpStoreTestCase(unittest.HomeserverTestCase): # Check that the new IP and user agent has not been stored yet db_result = cast( - List[Tuple[str, str, str, int]], + list[tuple[str, str, str, int]], self.get_success( self.store.db_pool.simple_select_list( table="user_ips", @@ -589,7 +589,7 @@ class ClientIpStoreTestCase(unittest.HomeserverTestCase): # We should see that in the DB result = cast( - List[Tuple[str, str, str, Optional[str], int]], + list[tuple[str, str, str, Optional[str], int]], self.get_success( self.store.db_pool.simple_select_list( table="user_ips", @@ -616,7 +616,7 @@ class ClientIpStoreTestCase(unittest.HomeserverTestCase): # We should get no results. result = cast( - List[Tuple[str, str, str, Optional[str], int]], + list[tuple[str, str, str, Optional[str], int]], self.get_success( self.store.db_pool.simple_select_list( table="user_ips", @@ -695,7 +695,7 @@ class ClientIpStoreTestCase(unittest.HomeserverTestCase): # We should see that in the DB result = cast( - List[Tuple[str, str, str, Optional[str], int]], + list[tuple[str, str, str, Optional[str], int]], self.get_success( self.store.db_pool.simple_select_list( table="user_ips", @@ -745,9 +745,9 @@ class ClientIpAuthTestCase(unittest.HomeserverTestCase): def _runtest( self, - headers: Dict[bytes, bytes], + headers: dict[bytes, bytes], expected_ip: str, - make_request_args: Dict[str, Any], + make_request_args: dict[str, Any], ) -> None: device_id = "bleb" diff --git a/tests/storage/test_database.py b/tests/storage/test_database.py index fd6963bb82..ffcff3363f 100644 --- a/tests/storage/test_database.py +++ b/tests/storage/test_database.py @@ -19,7 +19,7 @@ # # -from typing import Callable, Tuple +from typing import Callable from unittest.mock import Mock, call from twisted.internet import defer @@ -149,7 +149,7 @@ class CallbacksTestCase(unittest.HomeserverTestCase): def _run_interaction( self, func: Callable[[LoggingTransaction], object] - ) -> Tuple[Mock, Mock]: + ) -> tuple[Mock, Mock]: """Run the given function in a database transaction, with callbacks registered. Args: diff --git a/tests/storage/test_devices.py b/tests/storage/test_devices.py index bd6fcd8eeb..1d1979e19f 100644 --- a/tests/storage/test_devices.py +++ b/tests/storage/test_devices.py @@ -19,7 +19,7 @@ # # -from typing import Collection, List, Tuple +from typing import Collection from twisted.internet.testing import MemoryReactor @@ -44,7 +44,7 @@ class DeviceStoreTestCase(HomeserverTestCase): config["federation_sender_instances"] = ["master"] return config - def add_device_change(self, user_id: str, device_ids: List[str], host: str) -> None: + def add_device_change(self, user_id: str, device_ids: list[str], host: str) -> None: """Add a device list change for the given device to `device_lists_outbound_pokes` table. """ @@ -306,7 +306,7 @@ class DeviceStoreTestCase(HomeserverTestCase): def _check_devices_in_updates( self, expected_device_ids: Collection[str], - device_updates: List[Tuple[str, JsonDict]], + device_updates: list[tuple[str, JsonDict]], ) -> None: """Check that an specific device ids exist in a list of device update EDUs""" self.assertEqual(len(device_updates), len(expected_device_ids)) diff --git a/tests/storage/test_event_chain.py b/tests/storage/test_event_chain.py index fe9bb7bcca..175a5ffc78 100644 --- a/tests/storage/test_event_chain.py +++ b/tests/storage/test_event_chain.py @@ -19,7 +19,7 @@ # # -from typing import Dict, List, Set, Tuple, cast +from typing import cast from parameterized import parameterized @@ -420,7 +420,7 @@ class EventChainStoreTestCase(HomeserverTestCase): def persist( self, - events: List[EventBase], + events: list[EventBase], ) -> None: """Persist the given events and check that the links generated match those given. @@ -464,11 +464,11 @@ class EventChainStoreTestCase(HomeserverTestCase): ) def fetch_chains( - self, events: List[EventBase] - ) -> Tuple[Dict[str, Tuple[int, int]], _LinkMap]: + self, events: list[EventBase] + ) -> tuple[dict[str, tuple[int, int]], _LinkMap]: # Fetch the map from event ID -> (chain ID, sequence number) rows = cast( - List[Tuple[str, int, int]], + list[tuple[str, int, int]], self.get_success( self.store.db_pool.simple_select_many_batch( table="event_auth_chains", @@ -487,7 +487,7 @@ class EventChainStoreTestCase(HomeserverTestCase): # Fetch all the links and pass them to the _LinkMap. auth_chain_rows = cast( - List[Tuple[int, int, int, int]], + list[tuple[int, int, int, int]], self.get_success( self.store.db_pool.simple_select_many_batch( table="event_auth_chain_links", @@ -575,7 +575,7 @@ class EventChainBackgroundUpdateTestCase(HomeserverTestCase): self.token = self.login("foo", "pass") self.requester = create_requester(self.user_id) - def _generate_room(self) -> Tuple[str, List[Set[str]]]: + def _generate_room(self) -> tuple[str, list[set[str]]]: """Insert a room without a chain cover index.""" room_id = self.helper.create_room_as(self.user_id, tok=self.token) diff --git a/tests/storage/test_event_federation.py b/tests/storage/test_event_federation.py index ee9cf3687f..d8c6a1cd04 100644 --- a/tests/storage/test_event_federation.py +++ b/tests/storage/test_event_federation.py @@ -21,14 +21,9 @@ import datetime from typing import ( Collection, - Dict, - FrozenSet, Iterable, - List, Mapping, NamedTuple, - Set, - Tuple, TypeVar, Union, cast, @@ -74,7 +69,7 @@ import tests.utils # | | # K J -AUTH_GRAPH: Dict[str, List[str]] = { +AUTH_GRAPH: dict[str, list[str]] = { "a": ["e"], "b": ["e"], "c": ["g", "i"], @@ -108,7 +103,7 @@ T = TypeVar("T") def get_all_topologically_sorted_orders( nodes: Iterable[T], graph: Mapping[T, Collection[T]], -) -> List[List[T]]: +) -> list[list[T]]: """Given a set of nodes and a graph, return all possible topological orderings. """ @@ -117,7 +112,7 @@ def get_all_topologically_sorted_orders( # we have a choice over which node to consider next. degree_map = dict.fromkeys(nodes, 0) - reverse_graph: Dict[T, Set[T]] = {} + reverse_graph: dict[T, set[T]] = {} for node, edges in graph.items(): if node not in degree_map: @@ -138,10 +133,10 @@ def get_all_topologically_sorted_orders( def _get_all_topologically_sorted_orders_inner( - reverse_graph: Dict[T, Set[T]], - zero_degree: List[T], - degree_map: Dict[T, int], -) -> List[List[T]]: + reverse_graph: dict[T, set[T]], + zero_degree: list[T], + degree_map: dict[T, int], +) -> list[list[T]]: new_paths = [] # Rather than only choosing *one* item from the list of nodes with zero @@ -175,7 +170,7 @@ def _get_all_topologically_sorted_orders_inner( def get_all_topologically_consistent_subsets( nodes: Iterable[T], graph: Mapping[T, Collection[T]], -) -> Set[FrozenSet[T]]: +) -> set[frozenset[T]]: """Get all subsets of the graph where if node N is in the subgraph, then all nodes that can reach that node (i.e. for all X there exists a path X -> N) are in the subgraph. @@ -195,7 +190,7 @@ def get_all_topologically_consistent_subsets( @attr.s(auto_attribs=True, frozen=True, slots=True) class _BackfillSetupInfo: room_id: str - depth_map: Dict[str, int] + depth_map: dict[str, int] class EventFederationWorkerStoreTestCase(tests.unittest.HomeserverTestCase): @@ -573,7 +568,7 @@ class EventFederationWorkerStoreTestCase(tests.unittest.HomeserverTestCase): # | | # K J - auth_graph: Dict[str, List[str]] = { + auth_graph: dict[str, list[str]] = { "a": ["e"], "b": ["e"], "c": ["g", "i"], @@ -756,11 +751,11 @@ class EventFederationWorkerStoreTestCase(tests.unittest.HomeserverTestCase): seq_num: int class TestLink(NamedTuple): - origin_chain_and_seq: Tuple[int, int] - target_chain_and_seq: Tuple[int, int] + origin_chain_and_seq: tuple[int, int] + target_chain_and_seq: tuple[int, int] # Map to chain IDs / seq nums - nodes: List[TestNode] = [ + nodes: list[TestNode] = [ TestNode("A1", 1, 1), TestNode("A2", 1, 2), TestNode("A3", 1, 3), @@ -779,7 +774,7 @@ class EventFederationWorkerStoreTestCase(tests.unittest.HomeserverTestCase): TestNode("G1", 7, 1), TestNode("G2", 7, 2), ] - links: List[TestLink] = [ + links: list[TestLink] = [ TestLink((2, 1), (1, 2)), # B1 -> A2 TestLink((3, 1), (2, 2)), # C1 -> B2 TestLink((4, 1), (3, 1)), # D1 -> C1 @@ -818,9 +813,9 @@ class EventFederationWorkerStoreTestCase(tests.unittest.HomeserverTestCase): # Define the test cases class TestCase(NamedTuple): name: str - conflicted: Set[str] - additional_backwards_reachable: Set[str] - want_conflicted_subgraph: Set[str] + conflicted: set[str] + additional_backwards_reachable: set[str] + want_conflicted_subgraph: set[str] # Reminder: # A1 <- A2 <- A3 @@ -936,7 +931,7 @@ class EventFederationWorkerStoreTestCase(tests.unittest.HomeserverTestCase): room_id = "some_room_id" - def prev_event_format(prev_event_id: str) -> Union[Tuple[str, dict], str]: + def prev_event_format(prev_event_id: str) -> Union[tuple[str, dict], str]: """Account for differences in prev_events format across room versions""" if room_version.event_format == EventFormatVersions.ROOM_V1_V2: return prev_event_id, {} @@ -1034,7 +1029,7 @@ class EventFederationWorkerStoreTestCase(tests.unittest.HomeserverTestCase): # | # 5 (newest) - event_graph: Dict[str, List[str]] = { + event_graph: dict[str, list[str]] = { "1": [], "2": ["1"], "3": ["2", "A"], @@ -1050,7 +1045,7 @@ class EventFederationWorkerStoreTestCase(tests.unittest.HomeserverTestCase): "b6": ["3"], } - depth_map: Dict[str, int] = { + depth_map: dict[str, int] = { "1": 1, "2": 2, "b1": 3, @@ -1070,7 +1065,7 @@ class EventFederationWorkerStoreTestCase(tests.unittest.HomeserverTestCase): # The rest are events in the room but not backfilled tet. our_server_events = {"5", "4", "B", "3", "A"} - complete_event_dict_map: Dict[str, JsonDict] = {} + complete_event_dict_map: dict[str, JsonDict] = {} stream_ordering = 0 for event_id, prev_event_ids in event_graph.items(): depth = depth_map[event_id] @@ -1425,14 +1420,14 @@ class EventFederationWorkerStoreTestCase(tests.unittest.HomeserverTestCase): class FakeEvent: event_id: str room_id: str - auth_events: List[str] + auth_events: list[str] type = "foo" state_key = "foo" internal_metadata = EventInternalMetadata({}) - def auth_event_ids(self) -> List[str]: + def auth_event_ids(self) -> list[str]: return self.auth_events def is_state(self) -> bool: diff --git a/tests/storage/test_event_push_actions.py b/tests/storage/test_event_push_actions.py index 30ba1ad94a..ef6c0f2465 100644 --- a/tests/storage/test_event_push_actions.py +++ b/tests/storage/test_event_push_actions.py @@ -19,7 +19,7 @@ # # -from typing import Optional, Tuple +from typing import Optional from twisted.internet.testing import MemoryReactor @@ -47,7 +47,7 @@ class EventPushActionsStoreTestCase(HomeserverTestCase): assert persist_events_store is not None self.persist_events_store = persist_events_store - def _create_users_and_room(self) -> Tuple[str, str, str, str, str]: + def _create_users_and_room(self) -> tuple[str, str, str, str, str]: """ Creates two users and a shared room. diff --git a/tests/storage/test_events.py b/tests/storage/test_events.py index 25a380e325..5c7f814078 100644 --- a/tests/storage/test_events.py +++ b/tests/storage/test_events.py @@ -20,7 +20,7 @@ # import logging -from typing import Dict, List, Optional +from typing import Optional from twisted.internet.testing import MemoryReactor @@ -54,7 +54,7 @@ class EventsTestCase(HomeserverTestCase): def test_get_senders_for_event_ids(self) -> None: """Tests the `get_senders_for_event_ids` storage function.""" - users_and_tokens: Dict[str, str] = {} + users_and_tokens: dict[str, str] = {} for localpart_suffix in range(10): localpart = f"user_{localpart_suffix}" user_id = self.register_user(localpart, "rabbit") @@ -70,7 +70,7 @@ class EventsTestCase(HomeserverTestCase): room_id = self.helper.create_room_as( room_creator_user_id, tok=room_creator_token ) - event_ids_to_senders: Dict[str, str] = {} + event_ids_to_senders: dict[str, str] = {} for user_id, token in users_and_tokens.items(): if user_id == room_creator_user_id: continue @@ -180,7 +180,7 @@ class ExtremPruneTestCase(HomeserverTestCase): ) self.get_success(self._persistence.persist_event(event, context)) - def assert_extremities(self, expected_extremities: List[str]) -> None: + def assert_extremities(self, expected_extremities: list[str]) -> None: """Assert the current extremities for the room""" extremities = self.get_success( self.store.get_prev_events_for_room(self.room_id) diff --git a/tests/storage/test_events_bg_updates.py b/tests/storage/test_events_bg_updates.py index a1375aa4ac..d1a794c5a1 100644 --- a/tests/storage/test_events_bg_updates.py +++ b/tests/storage/test_events_bg_updates.py @@ -13,7 +13,6 @@ # # -from typing import Dict from twisted.internet.testing import MemoryReactor @@ -48,7 +47,7 @@ class TestFixupMaxDepthCapBgUpdate(HomeserverTestCase): ) ) - def create_room(self, room_version: RoomVersion) -> Dict[str, int]: + def create_room(self, room_version: RoomVersion) -> dict[str, int]: """Create a room with a known room version and insert events. Returns the set of event IDs that exceed MAX_DEPTH and @@ -67,7 +66,7 @@ class TestFixupMaxDepthCapBgUpdate(HomeserverTestCase): ) # Insert events with some depths exceeding MAX_DEPTH - event_id_to_depth: Dict[str, int] = {} + event_id_to_depth: dict[str, int] = {} for depth in range(MAX_DEPTH - 5, MAX_DEPTH + 5): event_id = f"$event{depth}:example.com" event_id_to_depth[event_id] = depth diff --git a/tests/storage/test_id_generators.py b/tests/storage/test_id_generators.py index 4c1311a00e..4846e8cac3 100644 --- a/tests/storage/test_id_generators.py +++ b/tests/storage/test_id_generators.py @@ -18,7 +18,7 @@ # [This file includes modifications made by New Vector Limited] # # -from typing import Dict, List, Optional +from typing import Optional from twisted.internet.testing import MemoryReactor @@ -43,12 +43,12 @@ from tests.utils import USE_POSTGRES_FOR_TESTS class MultiWriterIdGeneratorBase(HomeserverTestCase): positive: bool = True - tables: List[str] = ["foobar"] + tables: list[str] = ["foobar"] def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.store = hs.get_datastores().main self.db_pool: DatabasePool = self.store.db_pool - self.instances: Dict[str, MultiWriterIdGenerator] = {} + self.instances: dict[str, MultiWriterIdGenerator] = {} self.get_success(self.db_pool.runInteraction("_setup_db", self._setup_db)) @@ -76,7 +76,7 @@ class MultiWriterIdGeneratorBase(HomeserverTestCase): def _create_id_generator( self, instance_name: str = "master", - writers: Optional[List[str]] = None, + writers: Optional[list[str]] = None, ) -> MultiWriterIdGenerator: def _create(conn: LoggingDatabaseConnection) -> MultiWriterIdGenerator: return MultiWriterIdGenerator( diff --git a/tests/storage/test_monthly_active_users.py b/tests/storage/test_monthly_active_users.py index 9a3b44219d..9ea2fa5311 100644 --- a/tests/storage/test_monthly_active_users.py +++ b/tests/storage/test_monthly_active_users.py @@ -17,7 +17,7 @@ # [This file includes modifications made by New Vector Limited] # # -from typing import Any, Dict, List +from typing import Any from unittest.mock import AsyncMock from twisted.internet.testing import MemoryReactor @@ -32,7 +32,7 @@ from tests.unittest import default_config, override_config FORTY_DAYS = 40 * 24 * 60 * 60 -def gen_3pids(count: int) -> List[Dict[str, Any]]: +def gen_3pids(count: int) -> list[dict[str, Any]]: """Generate `count` threepids as a list.""" return [ {"medium": "email", "address": "user%i@matrix.org" % i} for i in range(count) @@ -40,7 +40,7 @@ def gen_3pids(count: int) -> List[Dict[str, Any]]: class MonthlyActiveUsersTestCase(unittest.HomeserverTestCase): - def default_config(self) -> Dict[str, Any]: + def default_config(self) -> dict[str, Any]: config = default_config("test") config.update({"limit_usage_by_mau": True, "max_mau_value": 50}) diff --git a/tests/storage/test_redaction.py b/tests/storage/test_redaction.py index 7565376a59..2c188b8046 100644 --- a/tests/storage/test_redaction.py +++ b/tests/storage/test_redaction.py @@ -18,7 +18,7 @@ # [This file includes modifications made by New Vector Limited] # # -from typing import List, Optional, cast +from typing import Optional, cast from canonicaljson import json @@ -247,8 +247,8 @@ class RedactionTestCase(unittest.HomeserverTestCase): async def build( self, - prev_event_ids: List[str], - auth_event_ids: Optional[List[str]], + prev_event_ids: list[str], + auth_event_ids: Optional[list[str]], depth: Optional[int] = None, ) -> EventBase: built_event = await self._base_builder.build( diff --git a/tests/storage/test_rollback_worker.py b/tests/storage/test_rollback_worker.py index f61eb2e319..125c4499b0 100644 --- a/tests/storage/test_rollback_worker.py +++ b/tests/storage/test_rollback_worker.py @@ -18,7 +18,6 @@ # [This file includes modifications made by New Vector Limited] # # -from typing import List from unittest import mock from twisted.internet.testing import MemoryReactor @@ -34,7 +33,7 @@ from synapse.util.clock import Clock from tests.unittest import HomeserverTestCase -def fake_listdir(filepath: str) -> List[str]: +def fake_listdir(filepath: str) -> list[str]: """ A fake implementation of os.listdir which we can use to mock out the filesystem. diff --git a/tests/storage/test_room_search.py b/tests/storage/test_room_search.py index e530e59fa6..2c0ef19e9e 100644 --- a/tests/storage/test_room_search.py +++ b/tests/storage/test_room_search.py @@ -19,7 +19,6 @@ # # -from typing import List, Tuple from unittest.case import SkipTest from twisted.internet.testing import MemoryReactor @@ -317,7 +316,7 @@ class MessageSearchTest(HomeserverTestCase): ) def _check_test_cases( - self, store: DataStore, cases: List[Tuple[str, bool]] + self, store: DataStore, cases: list[tuple[str, bool]] ) -> None: # Run all the test cases versus search_msgs for query, expect_to_contain in cases: diff --git a/tests/storage/test_roommember.py b/tests/storage/test_roommember.py index b8933d957b..c5487d81e6 100644 --- a/tests/storage/test_roommember.py +++ b/tests/storage/test_roommember.py @@ -20,7 +20,7 @@ # # import logging -from typing import List, Optional, Tuple, cast +from typing import Optional, cast from twisted.internet.testing import MemoryReactor @@ -133,7 +133,7 @@ class RoomMemberStoreTestCase(unittest.HomeserverTestCase): room = self.helper.create_room_as(self.u_alice, tok=self.t_alice) res = cast( - List[Tuple[Optional[str], str]], + list[tuple[Optional[str], str]], self.get_success( self.store.db_pool.simple_select_list( "room_memberships", @@ -165,7 +165,7 @@ class RoomMemberStoreTestCase(unittest.HomeserverTestCase): ) res2 = cast( - List[Tuple[Optional[str], str]], + list[tuple[Optional[str], str]], self.get_success( self.store.db_pool.simple_select_list( "room_memberships", @@ -408,7 +408,7 @@ class RoomSummaryTestCase(unittest.HomeserverTestCase): def _assert_member_summary( self, actual_member_summary: MemberSummary, - expected_member_list: List[str], + expected_member_list: list[str], *, expected_member_count: Optional[int] = None, ) -> None: diff --git a/tests/storage/test_sliding_sync_tables.py b/tests/storage/test_sliding_sync_tables.py index f0df166bab..5cfc1a9c29 100644 --- a/tests/storage/test_sliding_sync_tables.py +++ b/tests/storage/test_sliding_sync_tables.py @@ -18,7 +18,7 @@ # # import logging -from typing import Dict, List, Optional, Tuple, cast +from typing import Optional, cast import attr from parameterized import parameterized @@ -112,7 +112,7 @@ class SlidingSyncTablesTestCaseBase(HomeserverTestCase): self.state_handler = self.hs.get_state_handler() - def _get_sliding_sync_joined_rooms(self) -> Dict[str, _SlidingSyncJoinedRoomResult]: + def _get_sliding_sync_joined_rooms(self) -> dict[str, _SlidingSyncJoinedRoomResult]: """ Return the rows from the `sliding_sync_joined_rooms` table. @@ -120,7 +120,7 @@ class SlidingSyncTablesTestCaseBase(HomeserverTestCase): Mapping from room_id to _SlidingSyncJoinedRoomResult. """ rows = cast( - List[Tuple[str, int, int, str, str, bool, str]], + list[tuple[str, int, int, str, str, bool, str]], self.get_success( self.store.db_pool.simple_select_list( "sliding_sync_joined_rooms", @@ -153,7 +153,7 @@ class SlidingSyncTablesTestCaseBase(HomeserverTestCase): def _get_sliding_sync_membership_snapshots( self, - ) -> Dict[Tuple[str, str], _SlidingSyncMembershipSnapshotResult]: + ) -> dict[tuple[str, str], _SlidingSyncMembershipSnapshotResult]: """ Return the rows from the `sliding_sync_membership_snapshots` table. @@ -161,7 +161,7 @@ class SlidingSyncTablesTestCaseBase(HomeserverTestCase): Mapping from the (room_id, user_id) to _SlidingSyncMembershipSnapshotResult. """ rows = cast( - List[Tuple[str, str, str, str, str, int, int, bool, str, str, bool, str]], + list[tuple[str, str, str, str, str, int, int, bool, str, str, bool, str]], self.get_success( self.store.db_pool.simple_select_list( "sliding_sync_membership_snapshots", @@ -207,8 +207,8 @@ class SlidingSyncTablesTestCaseBase(HomeserverTestCase): def _create_remote_invite_room_for_user( self, invitee_user_id: str, - unsigned_invite_room_state: Optional[List[StrippedStateEvent]], - ) -> Tuple[str, EventBase]: + unsigned_invite_room_state: Optional[list[StrippedStateEvent]], + ) -> tuple[str, EventBase]: """ Create a fake invite for a remote room and persist it. @@ -2246,7 +2246,7 @@ class SlidingSyncTablesTestCase(SlidingSyncTablesTestCaseBase): ] ) def test_non_join_remote_invite_no_stripped_state( - self, _description: str, stripped_state: Optional[List[StrippedStateEvent]] + self, _description: str, stripped_state: Optional[list[StrippedStateEvent]] ) -> None: """ Test remote invite with no stripped state provided shows up in diff --git a/tests/storage/test_state.py b/tests/storage/test_state.py index bf6da71549..8e821c6d18 100644 --- a/tests/storage/test_state.py +++ b/tests/storage/test_state.py @@ -20,7 +20,7 @@ # import logging -from typing import List, Tuple, cast +from typing import cast from immutabledict import immutabledict @@ -593,7 +593,7 @@ class StateStoreTestCase(HomeserverTestCase): # check that only state events are in state_groups, and all state events are in state_groups res = cast( - List[Tuple[str]], + list[tuple[str]], self.get_success( self.store.db_pool.simple_select_list( table="state_groups", @@ -618,7 +618,7 @@ class StateStoreTestCase(HomeserverTestCase): for event, context in processed_events_and_context: if event.is_state(): state = cast( - List[Tuple[str, str]], + list[tuple[str, str]], self.get_success( self.store.db_pool.simple_select_list( table="state_groups_state", @@ -631,7 +631,7 @@ class StateStoreTestCase(HomeserverTestCase): self.assertEqual(event.state_key, state[0][1]) groups = cast( - List[Tuple[str]], + list[tuple[str]], self.get_success( self.store.db_pool.simple_select_list( table="state_group_edges", diff --git a/tests/storage/test_stream.py b/tests/storage/test_stream.py index 0777c254c0..d51fa1f8ba 100644 --- a/tests/storage/test_stream.py +++ b/tests/storage/test_stream.py @@ -20,7 +20,6 @@ # import logging -from typing import List, Tuple from unittest.mock import AsyncMock, patch from immutabledict import immutabledict @@ -150,7 +149,7 @@ class PaginationTestCase(HomeserverTestCase): ) self.event_id_none = res["event_id"] - def _filter_messages(self, filter: JsonDict) -> List[str]: + def _filter_messages(self, filter: JsonDict) -> list[str]: """Make a request to /messages with a filter, returns the chunk of events.""" events, next_key, _ = self.get_success( @@ -324,7 +323,7 @@ class GetLastEventInRoomBeforeStreamOrderingTestCase(HomeserverTestCase): def _send_event_on_instance( self, instance_name: str, room_id: str, access_token: str - ) -> Tuple[JsonDict, PersistedEventPosition]: + ) -> tuple[JsonDict, PersistedEventPosition]: """ Send an event in a room and mimic that it was persisted by a specific instance/worker. diff --git a/tests/storage/test_user_directory.py b/tests/storage/test_user_directory.py index 26e045135e..83d3357c65 100644 --- a/tests/storage/test_user_directory.py +++ b/tests/storage/test_user_directory.py @@ -19,7 +19,7 @@ # # import re -from typing import Any, Dict, List, Optional, Set, Tuple, cast +from typing import Any, Optional, cast from unittest import mock from unittest.mock import Mock, patch @@ -56,21 +56,21 @@ class GetUserDirectoryTables: def __init__(self, store: DataStore): self.store = store - async def get_users_in_public_rooms(self) -> Set[Tuple[str, str]]: + async def get_users_in_public_rooms(self) -> set[tuple[str, str]]: """Fetch the entire `users_in_public_rooms` table. Returns a list of tuples (user_id, room_id) where room_id is public and contains the user with the given id. """ r = cast( - List[Tuple[str, str]], + list[tuple[str, str]], await self.store.db_pool.simple_select_list( "users_in_public_rooms", None, ("user_id", "room_id") ), ) return set(r) - async def get_users_who_share_private_rooms(self) -> Set[Tuple[str, str, str]]: + async def get_users_who_share_private_rooms(self) -> set[tuple[str, str, str]]: """Fetch the entire `users_who_share_private_rooms` table. Returns a set of tuples (user_id, other_user_id, room_id) corresponding @@ -78,7 +78,7 @@ class GetUserDirectoryTables: """ rows = cast( - List[Tuple[str, str, str]], + list[tuple[str, str, str]], await self.store.db_pool.simple_select_list( "users_who_share_private_rooms", None, @@ -87,13 +87,13 @@ class GetUserDirectoryTables: ) return set(rows) - async def get_users_in_user_directory(self) -> Set[str]: + async def get_users_in_user_directory(self) -> set[str]: """Fetch the set of users in the `user_directory` table. This is useful when checking we've correctly excluded users from the directory. """ result = cast( - List[Tuple[str]], + list[tuple[str]], await self.store.db_pool.simple_select_list( "user_directory", None, @@ -102,7 +102,7 @@ class GetUserDirectoryTables: ) return {row[0] for row in result} - async def get_profiles_in_user_directory(self) -> Dict[str, ProfileInfo]: + async def get_profiles_in_user_directory(self) -> dict[str, ProfileInfo]: """Fetch users and their profiles from the `user_directory` table. This is useful when we want to inspect display names and avatars. @@ -110,7 +110,7 @@ class GetUserDirectoryTables: thing missing is an unused room_id column. """ rows = cast( - List[Tuple[str, Optional[str], Optional[str]]], + list[tuple[str, Optional[str], Optional[str]]], await self.store.db_pool.simple_select_list( "user_directory", None, @@ -124,7 +124,7 @@ class GetUserDirectoryTables: async def get_tables( self, - ) -> Tuple[Set[str], Set[Tuple[str, str]], Set[Tuple[str, str, str]]]: + ) -> tuple[set[str], set[tuple[str, str]], set[tuple[str, str, str]]]: """Multiple tests want to inspect these tables, so expose them together.""" return ( await self.get_users_in_user_directory(), @@ -277,7 +277,7 @@ class UserDirectoryInitialPopulationTestcase(HomeserverTestCase): def _create_rooms_and_inject_memberships( self, creator: str, token: str, joiner: str - ) -> Tuple[str, str]: + ) -> tuple[str, str]: """Create a public and private room as a normal user. Then get the `joiner` into those rooms. """ diff --git a/tests/storage/util/test_partial_state_events_tracker.py b/tests/storage/util/test_partial_state_events_tracker.py index 1e5663f137..026bc58180 100644 --- a/tests/storage/util/test_partial_state_events_tracker.py +++ b/tests/storage/util/test_partial_state_events_tracker.py @@ -19,7 +19,7 @@ # # -from typing import Collection, Dict +from typing import Collection from unittest import mock from twisted.internet.defer import CancelledError, ensureDeferred @@ -35,9 +35,9 @@ from tests.unittest import TestCase class PartialStateEventsTrackerTestCase(TestCase): def setUp(self) -> None: # the results to be returned by the mocked get_partial_state_events - self._events_dict: Dict[str, bool] = {} + self._events_dict: dict[str, bool] = {} - async def get_partial_state_events(events: Collection[str]) -> Dict[str, bool]: + async def get_partial_state_events(events: Collection[str]) -> dict[str, bool]: return {e: self._events_dict[e] for e in events} self.mock_store = mock.Mock(spec_set=["get_partial_state_events"]) @@ -73,7 +73,7 @@ class PartialStateEventsTrackerTestCase(TestCase): # registration of the listener, it should not block. self._events_dict = {"event1": True, "event2": False} - async def get_partial_state_events(events: Collection[str]) -> Dict[str, bool]: + async def get_partial_state_events(events: Collection[str]) -> dict[str, bool]: res = {e: self._events_dict[e] for e in events} # change the result for next time self._events_dict = {"event1": False, "event2": False} @@ -91,13 +91,13 @@ class PartialStateEventsTrackerTestCase(TestCase): self._events_dict = {"event1": True, "event2": False} - async def get_partial_state_events1(events: Collection[str]) -> Dict[str, bool]: + async def get_partial_state_events1(events: Collection[str]) -> dict[str, bool]: self.mock_store.get_partial_state_events.side_effect = ( get_partial_state_events2 ) return {e: self._events_dict[e] for e in events} - async def get_partial_state_events2(events: Collection[str]) -> Dict[str, bool]: + async def get_partial_state_events2(events: Collection[str]) -> dict[str, bool]: self.tracker.notify_un_partial_stated("event1") self._events_dict["event1"] = False return {e: self._events_dict[e] for e in events} diff --git a/tests/test_event_auth.py b/tests/test_event_auth.py index f12402f5f2..7737101967 100644 --- a/tests/test_event_auth.py +++ b/tests/test_event_auth.py @@ -20,7 +20,7 @@ # import unittest -from typing import Any, Collection, Dict, Iterable, List, Optional +from typing import Any, Collection, Iterable, Optional from parameterized import parameterized @@ -39,7 +39,7 @@ class _StubEventSourceStore: """A stub implementation of the EventSourceStore""" def __init__(self) -> None: - self._store: Dict[str, EventBase] = {} + self._store: dict[str, EventBase] = {} def add_event(self, event: EventBase) -> None: self._store[event.event_id] = event @@ -54,7 +54,7 @@ class _StubEventSourceStore: redact_behaviour: EventRedactBehaviour, get_prev_content: bool = False, allow_rejected: bool = False, - ) -> Dict[str, EventBase]: + ) -> dict[str, EventBase]: assert allow_rejected assert not get_prev_content assert redact_behaviour == EventRedactBehaviour.as_is @@ -745,7 +745,7 @@ class EventAuthTestCase(unittest.TestCase): test_room_v10_rejects_string_power_levels above handles the string case. """ - def create_event(pl_event_content: Dict[str, Any]) -> EventBase: + def create_event(pl_event_content: dict[str, Any]) -> EventBase: return make_event_from_dict( { "room_id": TEST_ROOM_ID, @@ -759,7 +759,7 @@ class EventAuthTestCase(unittest.TestCase): room_version=RoomVersions.V10, ) - contents: Iterable[Dict[str, Any]] = [ + contents: Iterable[dict[str, Any]] = [ {"notifications": {"room": None}}, {"users": {"@alice:wonderland": []}}, {"users_default": {}}, @@ -861,7 +861,7 @@ def _alias_event(room_version: RoomVersion, sender: str, **kwargs: Any) -> Event def _build_auth_dict_for_room_version( room_version: RoomVersion, auth_events: Iterable[EventBase] -) -> List: +) -> list: if room_version.event_format == EventFormatVersions.ROOM_V1_V2: return [(e.event_id, "not_used") for e in auth_events] else: diff --git a/tests/test_mau.py b/tests/test_mau.py index fa98242bf7..e535e7dc2e 100644 --- a/tests/test_mau.py +++ b/tests/test_mau.py @@ -20,7 +20,7 @@ """Tests REST events for /rooms paths.""" -from typing import List, Optional +from typing import Optional from twisted.internet.testing import MemoryReactor @@ -249,7 +249,7 @@ class TestMauLimit(unittest.HomeserverTestCase): } ) def test_as_trial_days(self) -> None: - user_tokens: List[str] = [] + user_tokens: list[str] = [] def advance_time_and_sync() -> None: self.reactor.advance(24 * 60 * 61) diff --git a/tests/test_server.py b/tests/test_server.py index 1854a3c4d4..e7d3febe3f 100644 --- a/tests/test_server.py +++ b/tests/test_server.py @@ -20,7 +20,7 @@ import re from http import HTTPStatus -from typing import Awaitable, Callable, Dict, NoReturn, Optional, Tuple +from typing import Awaitable, Callable, NoReturn, Optional from twisted.internet.defer import Deferred from twisted.web.resource import Resource @@ -70,7 +70,7 @@ class JsonResourceTests(unittest.TestCase): def _callback( request: SynapseRequest, **kwargs: object - ) -> Tuple[int, Dict[str, object]]: + ) -> tuple[int, dict[str, object]]: got_kwargs.update(kwargs) return 200, kwargs @@ -192,7 +192,7 @@ class JsonResourceTests(unittest.TestCase): def _callback( request: SynapseRequest, **kwargs: object - ) -> Tuple[int, Dict[str, object]]: + ) -> tuple[int, dict[str, object]]: return 200, {"result": True} res = JsonResource(self.homeserver) @@ -405,11 +405,11 @@ class CancellableDirectServeJsonResource(DirectServeJsonResource): self.clock = clock @cancellable - async def _async_render_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: + async def _async_render_GET(self, request: SynapseRequest) -> tuple[int, JsonDict]: await self.clock.sleep(1.0) return HTTPStatus.OK, {"result": True} - async def _async_render_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: + async def _async_render_POST(self, request: SynapseRequest) -> tuple[int, JsonDict]: await self.clock.sleep(1.0) return HTTPStatus.OK, {"result": True} @@ -422,11 +422,11 @@ class CancellableDirectServeHtmlResource(DirectServeHtmlResource): self.clock = clock @cancellable - async def _async_render_GET(self, request: SynapseRequest) -> Tuple[int, bytes]: + async def _async_render_GET(self, request: SynapseRequest) -> tuple[int, bytes]: await self.clock.sleep(1.0) return HTTPStatus.OK, b"ok" - async def _async_render_POST(self, request: SynapseRequest) -> Tuple[int, bytes]: + async def _async_render_POST(self, request: SynapseRequest) -> tuple[int, bytes]: await self.clock.sleep(1.0) return HTTPStatus.OK, b"ok" diff --git a/tests/test_state.py b/tests/test_state.py index ab7b52e90c..6e5a6d845d 100644 --- a/tests/test_state.py +++ b/tests/test_state.py @@ -21,14 +21,10 @@ from typing import ( Any, Collection, - Dict, Generator, Iterable, Iterator, - List, Optional, - Set, - Tuple, ) from unittest.mock import AsyncMock, Mock @@ -57,7 +53,7 @@ def create_event( state_key: Optional[str] = None, depth: int = 2, event_id: Optional[str] = None, - prev_events: Optional[List[Tuple[str, dict]]] = None, + prev_events: Optional[list[tuple[str, dict]]] = None, **kwargs: Any, ) -> EventBase: global _next_event_id @@ -91,16 +87,16 @@ def create_event( class _DummyStore: def __init__(self) -> None: - self._event_to_state_group: Dict[str, int] = {} - self._group_to_state: Dict[int, MutableStateMap[str]] = {} + self._event_to_state_group: dict[str, int] = {} + self._group_to_state: dict[int, MutableStateMap[str]] = {} - self._event_id_to_event: Dict[str, EventBase] = {} + self._event_id_to_event: dict[str, EventBase] = {} self._next_group = 1 async def get_state_groups_ids( self, room_id: str, event_ids: Collection[str] - ) -> Dict[int, MutableStateMap[str]]: + ) -> dict[int, MutableStateMap[str]]: groups = {} for event_id in event_ids: group = self._event_to_state_group.get(event_id) @@ -137,7 +133,7 @@ class _DummyStore: async def get_events( self, event_ids: Collection[str], **kwargs: Any - ) -> Dict[str, EventBase]: + ) -> dict[str, EventBase]: return { e_id: self._event_id_to_event[e_id] for e_id in event_ids @@ -146,12 +142,12 @@ class _DummyStore: async def get_partial_state_events( self, event_ids: Collection[str] - ) -> Dict[str, bool]: + ) -> dict[str, bool]: return dict.fromkeys(event_ids, False) async def get_state_group_delta( self, name: str - ) -> Tuple[Optional[int], Optional[StateMap[str]]]: + ) -> tuple[Optional[int], Optional[StateMap[str]]]: return None, None def register_events(self, events: Iterable[EventBase]) -> None: @@ -170,7 +166,7 @@ class _DummyStore: async def get_state_group_for_events( self, event_ids: Collection[str], await_full_state: bool = True - ) -> Dict[str, int]: + ) -> dict[str, int]: res = {} for event in event_ids: res[event] = self._event_to_state_group[event] @@ -178,7 +174,7 @@ class _DummyStore: async def get_state_for_groups( self, groups: Collection[int] - ) -> Dict[int, MutableStateMap[str]]: + ) -> dict[int, MutableStateMap[str]]: res = {} for group in groups: state = self._group_to_state[group] @@ -193,15 +189,15 @@ class DictObj(dict): class Graph: - def __init__(self, nodes: Dict[str, DictObj], edges: Dict[str, List[str]]): - events: Dict[str, EventBase] = {} - clobbered: Set[str] = set() + def __init__(self, nodes: dict[str, DictObj], edges: dict[str, list[str]]): + events: dict[str, EventBase] = {} + clobbered: set[str] = set() for event_id, fields in nodes.items(): refs = edges.get(event_id) if refs: clobbered.difference_update(refs) - prev_events: List[Tuple[str, dict]] = [(r, {}) for r in refs] + prev_events: list[tuple[str, dict]] = [(r, {}) for r in refs] else: prev_events = [] @@ -281,7 +277,7 @@ class StateTestCase(unittest.TestCase): self.dummy_store.register_events(graph.walk()) - context_store: Dict[str, EventContext] = {} + context_store: dict[str, EventContext] = {} for event in graph.walk(): context = yield defer.ensureDeferred( @@ -328,7 +324,7 @@ class StateTestCase(unittest.TestCase): self.dummy_store.register_events(graph.walk()) - context_store: Dict[str, EventContext] = {} + context_store: dict[str, EventContext] = {} for event in graph.walk(): context = yield defer.ensureDeferred( @@ -389,7 +385,7 @@ class StateTestCase(unittest.TestCase): self.dummy_store.register_events(graph.walk()) - context_store: Dict[str, EventContext] = {} + context_store: dict[str, EventContext] = {} for event in graph.walk(): context = yield defer.ensureDeferred( @@ -467,7 +463,7 @@ class StateTestCase(unittest.TestCase): self.dummy_store.register_events(graph.walk()) - context_store: Dict[str, EventContext] = {} + context_store: dict[str, EventContext] = {} for event in graph.walk(): context = yield defer.ensureDeferred( @@ -490,7 +486,7 @@ class StateTestCase(unittest.TestCase): self.assertEqual(ctx_d.state_group_before_event, ctx_d.state_group) def _add_depths( - self, nodes: Dict[str, DictObj], edges: Dict[str, List[str]] + self, nodes: dict[str, DictObj], edges: dict[str, list[str]] ) -> None: def _get_depth(ev: str) -> int: node = nodes[ev] diff --git a/tests/test_types.py b/tests/test_types.py index 0c08bc8ecc..1802f0fae3 100644 --- a/tests/test_types.py +++ b/tests/test_types.py @@ -19,7 +19,6 @@ # # -from typing import Type from unittest import skipUnless from immutabledict import immutabledict @@ -152,7 +151,7 @@ class MapUsernameTestCase(unittest.TestCase): class MultiWriterTokenTestCase(unittest.HomeserverTestCase): """Tests for the different types of multi writer tokens.""" - token_type: Type[AbstractMultiWriterStreamToken] + token_type: type[AbstractMultiWriterStreamToken] def test_basic_token(self) -> None: """Test that a simple stream token can be serialized and unserialized""" diff --git a/tests/test_utils/__init__.py b/tests/test_utils/__init__.py index 3e6fd03600..0df5a4e6c3 100644 --- a/tests/test_utils/__init__.py +++ b/tests/test_utils/__init__.py @@ -28,7 +28,7 @@ import json import sys import warnings from binascii import unhexlify -from typing import TYPE_CHECKING, Awaitable, Callable, Tuple, TypeVar +from typing import TYPE_CHECKING, Awaitable, Callable, TypeVar import attr import zope.interface @@ -102,7 +102,7 @@ class FakeResponse: # type: ignore[misc] attribute, and didn't support deliverBody until recently. """ - version: Tuple[bytes, int, int] = (b"HTTP", 1, 1) + version: tuple[bytes, int, int] = (b"HTTP", 1, 1) # HTTP response code code: int = 200 diff --git a/tests/test_utils/event_injection.py b/tests/test_utils/event_injection.py index c1eaf9a575..9cdb456b1b 100644 --- a/tests/test_utils/event_injection.py +++ b/tests/test_utils/event_injection.py @@ -18,7 +18,7 @@ # [This file includes modifications made by New Vector Limited] # # -from typing import Any, List, Optional, Tuple +from typing import Any, Optional import synapse.server from synapse.api.constants import EventTypes @@ -62,7 +62,7 @@ async def inject_member_event( async def inject_event( hs: synapse.server.HomeServer, room_version: Optional[str] = None, - prev_event_ids: Optional[List[str]] = None, + prev_event_ids: Optional[list[str]] = None, **kwargs: Any, ) -> EventBase: """Inject a generic event into a room @@ -87,9 +87,9 @@ async def inject_event( async def create_event( hs: synapse.server.HomeServer, room_version: Optional[str] = None, - prev_event_ids: Optional[List[str]] = None, + prev_event_ids: Optional[list[str]] = None, **kwargs: Any, -) -> Tuple[EventBase, EventContext]: +) -> tuple[EventBase, EventContext]: if room_version is None: room_version = await hs.get_datastores().main.get_room_version_id( kwargs["room_id"] diff --git a/tests/test_utils/html_parsers.py b/tests/test_utils/html_parsers.py index a0f39cb130..aff1626295 100644 --- a/tests/test_utils/html_parsers.py +++ b/tests/test_utils/html_parsers.py @@ -20,7 +20,7 @@ # from html.parser import HTMLParser -from typing import Dict, Iterable, List, NoReturn, Optional, Tuple +from typing import Iterable, NoReturn, Optional class TestHtmlParser(HTMLParser): @@ -30,16 +30,16 @@ class TestHtmlParser(HTMLParser): super().__init__() # a list of links found in the doc - self.links: List[str] = [] + self.links: list[str] = [] # the values of any hidden s: map from name to value - self.hiddens: Dict[str, Optional[str]] = {} + self.hiddens: dict[str, Optional[str]] = {} # the values of any radio buttons: map from name to list of values - self.radios: Dict[str, List[Optional[str]]] = {} + self.radios: dict[str, list[Optional[str]]] = {} def handle_starttag( - self, tag: str, attrs: Iterable[Tuple[str, Optional[str]]] + self, tag: str, attrs: Iterable[tuple[str, Optional[str]]] ) -> None: attr_dict = dict(attrs) if tag == "a": diff --git a/tests/test_utils/oidc.py b/tests/test_utils/oidc.py index f2de8bded5..c2d6af029a 100644 --- a/tests/test_utils/oidc.py +++ b/tests/test_utils/oidc.py @@ -23,7 +23,7 @@ import base64 import json from hashlib import sha256 -from typing import Any, ContextManager, Dict, List, Optional, Tuple +from typing import Any, ContextManager, Optional from unittest.mock import Mock, patch from urllib.parse import parse_qs @@ -75,16 +75,16 @@ class FakeOidcServer: self.post_token_handler = Mock(side_effect=self._post_token_handler) # A code -> grant mapping - self._authorization_grants: Dict[str, FakeAuthorizationGrant] = {} + self._authorization_grants: dict[str, FakeAuthorizationGrant] = {} # An access token -> grant mapping - self._sessions: Dict[str, FakeAuthorizationGrant] = {} + self._sessions: dict[str, FakeAuthorizationGrant] = {} # We generate here an ECDSA key with the P-256 curve (ES256 algorithm) used for # signing JWTs. ECDSA keys are really quick to generate compared to RSA. self._key = ECKey.generate_key(crv="P-256", is_private=True) self._jwks = KeySet([ECKey.import_key(self._key.as_pem(is_private=False))]) - self._id_token_overrides: Dict[str, Any] = {} + self._id_token_overrides: dict[str, Any] = {} def reset_mocks(self) -> None: self.request.reset_mock() @@ -222,7 +222,7 @@ class FakeOidcServer: userinfo: dict, nonce: Optional[str] = None, with_sid: bool = False, - ) -> Tuple[str, FakeAuthorizationGrant]: + ) -> tuple[str, FakeAuthorizationGrant]: """Start an authorization request, and get back the code to use on the authorization endpoint.""" code = random_string(10) sid = None @@ -242,7 +242,7 @@ class FakeOidcServer: return code, grant - def exchange_code(self, code: str) -> Optional[Dict[str, Any]]: + def exchange_code(self, code: str) -> Optional[dict[str, Any]]: grant = self._authorization_grants.pop(code, None) if grant is None: return None @@ -269,7 +269,7 @@ class FakeOidcServer: metadata: bool = False, token: bool = False, userinfo: bool = False, - ) -> ContextManager[Dict[str, Mock]]: + ) -> ContextManager[dict[str, Mock]]: """A context which makes a set of endpoints return a 500 error. Args: @@ -356,7 +356,7 @@ class FakeOidcServer: return FakeResponse.json(payload=user_info) - def _post_token_handler(self, params: Dict[str, List[str]]) -> IResponse: + def _post_token_handler(self, params: dict[str, list[str]]) -> IResponse: """Handles requests to the token endpoint.""" code = params.get("code", []) diff --git a/tests/unittest.py b/tests/unittest.py index 9ab052e7c0..1007f40456 100644 --- a/tests/unittest.py +++ b/tests/unittest.py @@ -33,16 +33,12 @@ from typing import ( Awaitable, Callable, ClassVar, - Dict, Generic, Iterable, - List, Mapping, NoReturn, Optional, Protocol, - Tuple, - Type, TypeVar, Union, ) @@ -169,7 +165,7 @@ def _parse_config_dict(config: str) -> HomeServerConfig: return config_obj -def make_homeserver_config_obj(config: Dict[str, Any]) -> HomeServerConfig: +def make_homeserver_config_obj(config: dict[str, Any]) -> HomeServerConfig: """Creates a :class:`HomeServerConfig` instance with the given configuration dict. This is equivalent to:: @@ -250,7 +246,7 @@ class TestCase(unittest.TestCase): return ret - def assertObjectHasAttributes(self, attrs: Dict[str, object], obj: object) -> None: + def assertObjectHasAttributes(self, attrs: dict[str, object], obj: object) -> None: """Asserts that the given object has each of the attributes given, and that the value of each matches according to assertEqual.""" for key in attrs.keys(): @@ -299,14 +295,14 @@ class TestCase(unittest.TestCase): elif not exact and actual_items >= expected_items: return - expected_lines: List[str] = [] + expected_lines: list[str] = [] for expected_item in expected_items: is_expected_in_actual = expected_item in actual_items expected_lines.append( "{} {}".format(" " if is_expected_in_actual else "?", expected_item) ) - actual_lines: List[str] = [] + actual_lines: list[str] = [] for actual_item in actual_items: is_actual_in_expected = actual_item in expected_items actual_lines.append( @@ -379,7 +375,7 @@ class HomeserverTestCase(TestCase): hijack_auth: ClassVar[bool] = True needs_threadpool: ClassVar[bool] = False - servlets: ClassVar[List[RegisterServletsFunc]] = [] + servlets: ClassVar[list[RegisterServletsFunc]] = [] def __init__(self, methodName: str): super().__init__(methodName) @@ -527,7 +523,7 @@ class HomeserverTestCase(TestCase): create_resource_tree(self.create_resource_dict(), root_resource) return root_resource - def create_resource_dict(self) -> Dict[str, Resource]: + def create_resource_dict(self) -> dict[str, Resource]: """Create a resource tree for the test server A resource tree is a mapping from path to twisted.web.resource. @@ -578,7 +574,7 @@ class HomeserverTestCase(TestCase): path: Union[bytes, str], content: Union[bytes, str, JsonDict] = b"", access_token: Optional[str] = None, - request: Type[Request] = SynapseRequest, + request: type[Request] = SynapseRequest, shorthand: bool = True, federation_auth_origin: Optional[bytes] = None, content_type: Optional[bytes] = None, @@ -709,7 +705,7 @@ class HomeserverTestCase(TestCase): return self.successResultOf(deferred) def get_failure( - self, d: Awaitable[Any], exc: Type[_ExcType], by: float = 0.0 + self, d: Awaitable[Any], exc: type[_ExcType], by: float = 0.0 ) -> _TypedFailure[_ExcType]: """ Run a Deferred and get a Failure from it. The failure must be of the type `exc`. @@ -799,7 +795,7 @@ class HomeserverTestCase(TestCase): username: str, appservice_token: str, inhibit_login: bool = False, - ) -> Tuple[str, Optional[str]]: + ) -> tuple[str, Optional[str]]: """Register an appservice user as an application service. Requires the client-facing registration API be registered. @@ -831,7 +827,7 @@ class HomeserverTestCase(TestCase): username: str, password: str, device_id: Optional[str] = None, - additional_request_fields: Optional[Dict[str, str]] = None, + additional_request_fields: Optional[dict[str, str]] = None, custom_headers: Optional[Iterable[CustomHeaderType]] = None, ) -> str: """ @@ -871,7 +867,7 @@ class HomeserverTestCase(TestCase): room_id: str, user: UserID, soft_failed: bool = False, - prev_event_ids: Optional[List[str]] = None, + prev_event_ids: Optional[list[str]] = None, ) -> str: """ Create and send an event. @@ -963,7 +959,7 @@ class FederatingHomeserverTestCase(HomeserverTestCase): ) ) - def create_resource_dict(self) -> Dict[str, Resource]: + def create_resource_dict(self) -> dict[str, Resource]: d = super().create_resource_dict() d["/_matrix/federation"] = TransportLayerServer(self.hs) return d diff --git a/tests/util/caches/test_deferred_cache.py b/tests/util/caches/test_deferred_cache.py index f0deb1554e..fc01a2f5e9 100644 --- a/tests/util/caches/test_deferred_cache.py +++ b/tests/util/caches/test_deferred_cache.py @@ -20,7 +20,6 @@ # from functools import partial -from typing import List, Tuple from twisted.internet import defer @@ -169,7 +168,7 @@ class DeferredCacheTestCase(TestCase): self.assertEqual(v, 2) def test_invalidate(self) -> None: - cache: DeferredCache[Tuple[str], int] = DeferredCache( + cache: DeferredCache[tuple[str], int] = DeferredCache( name="test", clock=self.clock, server_name="test_server" ) cache.prefill(("foo",), 123) @@ -266,7 +265,7 @@ class DeferredCacheTestCase(TestCase): cache.get(3) def test_eviction_iterable(self) -> None: - cache: DeferredCache[int, List[str]] = DeferredCache( + cache: DeferredCache[int, list[str]] = DeferredCache( name="test", clock=self.clock, server_name="test_server", diff --git a/tests/util/caches/test_descriptors.py b/tests/util/caches/test_descriptors.py index 0e3b6ae36b..e27f84fa6d 100644 --- a/tests/util/caches/test_descriptors.py +++ b/tests/util/caches/test_descriptors.py @@ -23,12 +23,9 @@ from typing import ( Any, Generator, Iterable, - List, Mapping, NoReturn, Optional, - Set, - Tuple, cast, ) from unittest import mock @@ -257,7 +254,7 @@ class DescriptorTestCase(unittest.TestCase): return self.result obj = Cls() - callbacks: Set[str] = set() + callbacks: set[str] = set() # set off an asynchronous request origin_d: Deferred = Deferred() @@ -435,7 +432,7 @@ class DescriptorTestCase(unittest.TestCase): _, self.clock = get_clock() # nb must be called this for @cached @descriptors.cached(iterable=True) - def fn(self, arg1: int, arg2: int) -> Tuple[str, ...]: + def fn(self, arg1: int, arg2: int) -> tuple[str, ...]: return self.mock(arg1, arg2) obj = Cls() @@ -925,7 +922,7 @@ class CachedListDescriptorTestCase(unittest.TestCase): pass @descriptors.cachedList(cached_method_name="fn", list_name="args1") - def list_fn(self, args1: List[int]) -> "Deferred[Mapping[int, str]]": + def list_fn(self, args1: list[int]) -> "Deferred[Mapping[int, str]]": return self.mock(args1) obj = Cls() @@ -970,7 +967,7 @@ class CachedListDescriptorTestCase(unittest.TestCase): pass @descriptors.cachedList(cached_method_name="fn", list_name="args1") - async def list_fn(self, args1: List[int], arg2: int) -> Mapping[int, str]: + async def list_fn(self, args1: list[int], arg2: int) -> Mapping[int, str]: # we want this to behave like an asynchronous function await run_on_reactor() return self.mock(args1, arg2) @@ -1012,7 +1009,7 @@ class CachedListDescriptorTestCase(unittest.TestCase): pass @cachedList(cached_method_name="fn", list_name="args") - async def list_fn(self, args: List[int]) -> Mapping[int, str]: + async def list_fn(self, args: list[int]) -> Mapping[int, str]: await complete_lookup return {arg: str(arg) for arg in args} @@ -1049,7 +1046,7 @@ class CachedListDescriptorTestCase(unittest.TestCase): pass @cachedList(cached_method_name="fn", list_name="args") - async def list_fn(self, args: List[int]) -> Mapping[int, str]: + async def list_fn(self, args: list[int]) -> Mapping[int, str]: await make_deferred_yieldable(complete_lookup) self.inner_context_was_finished = current_context().finished return {arg: str(arg) for arg in args} @@ -1097,7 +1094,7 @@ class CachedListDescriptorTestCase(unittest.TestCase): # of arguments as the underlying cached function, just with one of # the arguments being an iterable @descriptors.cachedList(cached_method_name="fn", list_name="keys") - def list_fn(self, keys: Iterable[Tuple[str, str]]) -> None: + def list_fn(self, keys: Iterable[tuple[str, str]]) -> None: pass # Corrected syntax ✅ diff --git a/tests/util/test_async_helpers.py b/tests/util/test_async_helpers.py index fd8d576aea..a02a2f0cef 100644 --- a/tests/util/test_async_helpers.py +++ b/tests/util/test_async_helpers.py @@ -19,7 +19,7 @@ # import logging import traceback -from typing import Any, Coroutine, List, NoReturn, Optional, Tuple, TypeVar +from typing import Any, Coroutine, NoReturn, Optional, TypeVar from parameterized import parameterized_class @@ -71,7 +71,7 @@ class ObservableDeferredTest(TestCase): observer1.addBoth(check_called_first) # store the results - results: List[Optional[int]] = [None, None] + results: list[Optional[int]] = [None, None] def check_val(res: int, idx: int) -> int: results[idx] = res @@ -102,7 +102,7 @@ class ObservableDeferredTest(TestCase): observer1.addBoth(check_called_first) # store the results - results: List[Optional[Failure]] = [None, None] + results: list[Optional[Failure]] = [None, None] def check_failure(res: Failure, idx: int) -> None: results[idx] = res @@ -644,7 +644,7 @@ class AwakenableSleeperTests(TestCase): class GatherCoroutineTests(TestCase): """Tests for `gather_optional_coroutines`""" - def make_coroutine(self) -> Tuple[Coroutine[Any, Any, T], "defer.Deferred[T]"]: + def make_coroutine(self) -> tuple[Coroutine[Any, Any, T], "defer.Deferred[T]"]: """Returns a coroutine and a deferred that it is waiting on to resolve""" d: "defer.Deferred[T]" = defer.Deferred() diff --git a/tests/util/test_batching_queue.py b/tests/util/test_batching_queue.py index 60bfdf38aa..30b07dc6ad 100644 --- a/tests/util/test_batching_queue.py +++ b/tests/util/test_batching_queue.py @@ -18,7 +18,6 @@ # [This file includes modifications made by New Vector Limited] # # -from typing import List, Tuple from prometheus_client import Gauge @@ -47,7 +46,7 @@ class BatchingQueueTestCase(HomeserverTestCase): except KeyError: pass - self._pending_calls: List[Tuple[List[str], defer.Deferred]] = [] + self._pending_calls: list[tuple[list[str], defer.Deferred]] = [] self.queue: BatchingQueue[str, str] = BatchingQueue( name="test_queue", hs=self.hs, @@ -55,7 +54,7 @@ class BatchingQueueTestCase(HomeserverTestCase): process_batch_callback=self._process_queue, ) - async def _process_queue(self, values: List[str]) -> str: + async def _process_queue(self, values: list[str]) -> str: d: "defer.Deferred[str]" = defer.Deferred() self._pending_calls.append((values, d)) return await make_deferred_yieldable(d) diff --git a/tests/util/test_expiring_cache.py b/tests/util/test_expiring_cache.py index 35c0f02e3f..8964359a6e 100644 --- a/tests/util/test_expiring_cache.py +++ b/tests/util/test_expiring_cache.py @@ -19,7 +19,6 @@ # # -from typing import List from synapse.util.caches.expiringcache import ExpiringCache @@ -65,7 +64,7 @@ class ExpiringCacheTestCase(unittest.HomeserverTestCase): def test_iterable_eviction(self) -> None: reactor, clock = get_clock() - cache: ExpiringCache[str, List[int]] = ExpiringCache( + cache: ExpiringCache[str, list[int]] = ExpiringCache( cache_name="test", server_name="testserver", hs=self.hs, diff --git a/tests/util/test_itertools.py b/tests/util/test_itertools.py index 7a593cc683..016389d49b 100644 --- a/tests/util/test_itertools.py +++ b/tests/util/test_itertools.py @@ -18,7 +18,7 @@ # [This file includes modifications made by New Vector Limited] # # -from typing import Dict, Iterable, List, Sequence +from typing import Iterable, Sequence from synapse.util.iterutils import ( chunk_seq, @@ -67,13 +67,13 @@ class SortTopologically(TestCase): def test_empty(self) -> None: "Test that an empty graph works correctly" - graph: Dict[int, List[int]] = {} + graph: dict[int, list[int]] = {} self.assertEqual(list(sorted_topologically([], graph)), []) def test_handle_empty_graph(self) -> None: "Test that a graph where a node doesn't have an entry is treated as empty" - graph: Dict[int, List[int]] = {} + graph: dict[int, list[int]] = {} # For disconnected nodes the output is simply sorted. self.assertEqual(list(sorted_topologically([1, 2], graph)), [1, 2]) @@ -81,7 +81,7 @@ class SortTopologically(TestCase): def test_disconnected(self) -> None: "Test that a graph with no edges work" - graph: Dict[int, List[int]] = {1: [], 2: []} + graph: dict[int, list[int]] = {1: [], 2: []} # For disconnected nodes the output is simply sorted. self.assertEqual(list(sorted_topologically([1, 2], graph)), [1, 2]) @@ -89,19 +89,19 @@ class SortTopologically(TestCase): def test_linear(self) -> None: "Test that a simple `4 -> 3 -> 2 -> 1` graph works" - graph: Dict[int, List[int]] = {1: [], 2: [1], 3: [2], 4: [3]} + graph: dict[int, list[int]] = {1: [], 2: [1], 3: [2], 4: [3]} self.assertEqual(list(sorted_topologically([4, 3, 2, 1], graph)), [1, 2, 3, 4]) def test_subset(self) -> None: "Test that only sorting a subset of the graph works" - graph: Dict[int, List[int]] = {1: [], 2: [1], 3: [2], 4: [3]} + graph: dict[int, list[int]] = {1: [], 2: [1], 3: [2], 4: [3]} self.assertEqual(list(sorted_topologically([4, 3], graph)), [3, 4]) def test_fork(self) -> None: "Test that a forked graph works" - graph: Dict[int, List[int]] = {1: [], 2: [1], 3: [1], 4: [2, 3]} + graph: dict[int, list[int]] = {1: [], 2: [1], 3: [1], 4: [2, 3]} # Valid orderings are `[1, 3, 2, 4]` or `[1, 2, 3, 4]`, but we should # always get the same one. @@ -109,13 +109,13 @@ class SortTopologically(TestCase): def test_duplicates(self) -> None: "Test that a graph with duplicate edges work" - graph: Dict[int, List[int]] = {1: [], 2: [1, 1], 3: [2, 2], 4: [3]} + graph: dict[int, list[int]] = {1: [], 2: [1, 1], 3: [2, 2], 4: [3]} self.assertEqual(list(sorted_topologically([4, 3, 2, 1], graph)), [1, 2, 3, 4]) def test_multiple_paths(self) -> None: "Test that a graph with multiple paths between two nodes work" - graph: Dict[int, List[int]] = {1: [], 2: [1], 3: [2], 4: [3, 2, 1]} + graph: dict[int, list[int]] = {1: [], 2: [1], 3: [2], 4: [3, 2, 1]} self.assertEqual(list(sorted_topologically([4, 3, 2, 1], graph)), [1, 2, 3, 4]) @@ -126,13 +126,13 @@ class SortTopologicallyBatched(TestCase): def test_empty(self) -> None: "Test that an empty graph works correctly" - graph: Dict[int, List[int]] = {} + graph: dict[int, list[int]] = {} self.assertEqual(list(sorted_topologically_batched([], graph)), []) def test_handle_empty_graph(self) -> None: "Test that a graph where a node doesn't have an entry is treated as empty" - graph: Dict[int, List[int]] = {} + graph: dict[int, list[int]] = {} # For disconnected nodes the output is simply sorted. self.assertEqual(list(sorted_topologically_batched([1, 2], graph)), [[1, 2]]) @@ -140,7 +140,7 @@ class SortTopologicallyBatched(TestCase): def test_disconnected(self) -> None: "Test that a graph with no edges work" - graph: Dict[int, List[int]] = {1: [], 2: []} + graph: dict[int, list[int]] = {1: [], 2: []} # For disconnected nodes the output is simply sorted. self.assertEqual(list(sorted_topologically_batched([1, 2], graph)), [[1, 2]]) @@ -148,7 +148,7 @@ class SortTopologicallyBatched(TestCase): def test_linear(self) -> None: "Test that a simple `4 -> 3 -> 2 -> 1` graph works" - graph: Dict[int, List[int]] = {1: [], 2: [1], 3: [2], 4: [3]} + graph: dict[int, list[int]] = {1: [], 2: [1], 3: [2], 4: [3]} self.assertEqual( list(sorted_topologically_batched([4, 3, 2, 1], graph)), @@ -157,13 +157,13 @@ class SortTopologicallyBatched(TestCase): def test_subset(self) -> None: "Test that only sorting a subset of the graph works" - graph: Dict[int, List[int]] = {1: [], 2: [1], 3: [2], 4: [3]} + graph: dict[int, list[int]] = {1: [], 2: [1], 3: [2], 4: [3]} self.assertEqual(list(sorted_topologically_batched([4, 3], graph)), [[3], [4]]) def test_fork(self) -> None: "Test that a forked graph works" - graph: Dict[int, List[int]] = {1: [], 2: [1], 3: [1], 4: [2, 3]} + graph: dict[int, list[int]] = {1: [], 2: [1], 3: [1], 4: [2, 3]} # Valid orderings are `[1, 3, 2, 4]` or `[1, 2, 3, 4]`, but we should # always get the same one. @@ -173,7 +173,7 @@ class SortTopologicallyBatched(TestCase): def test_duplicates(self) -> None: "Test that a graph with duplicate edges work" - graph: Dict[int, List[int]] = {1: [], 2: [1, 1], 3: [2, 2], 4: [3]} + graph: dict[int, list[int]] = {1: [], 2: [1, 1], 3: [2, 2], 4: [3]} self.assertEqual( list(sorted_topologically_batched([4, 3, 2, 1], graph)), @@ -182,7 +182,7 @@ class SortTopologicallyBatched(TestCase): def test_multiple_paths(self) -> None: "Test that a graph with multiple paths between two nodes work" - graph: Dict[int, List[int]] = {1: [], 2: [1], 3: [2], 4: [3, 2, 1]} + graph: dict[int, list[int]] = {1: [], 2: [1], 3: [2], 4: [3, 2, 1]} self.assertEqual( list(sorted_topologically_batched([4, 3, 2, 1], graph)), diff --git a/tests/util/test_linearizer.py b/tests/util/test_linearizer.py index 722ce79dcc..b2a631d747 100644 --- a/tests/util/test_linearizer.py +++ b/tests/util/test_linearizer.py @@ -19,7 +19,7 @@ # # -from typing import Hashable, Protocol, Tuple +from typing import Hashable, Protocol from twisted.internet import defer from twisted.internet.defer import CancelledError, Deferred @@ -43,7 +43,7 @@ class LinearizerTestCase(unittest.TestCase): def _start_task( self, linearizer: Linearizer, key: Hashable - ) -> Tuple["Deferred[None]", "Deferred[None]", UnblockFunction]: + ) -> tuple["Deferred[None]", "Deferred[None]", UnblockFunction]: """Starts a task which acquires the linearizer lock, blocks, then completes. Args: diff --git a/tests/util/test_lrucache.py b/tests/util/test_lrucache.py index 56e9996b00..dcbfcfa2e4 100644 --- a/tests/util/test_lrucache.py +++ b/tests/util/test_lrucache.py @@ -20,7 +20,6 @@ # -from typing import List, Tuple from unittest.mock import Mock, patch from synapse.metrics.jemalloc import JemallocStats @@ -84,7 +83,7 @@ class LruCacheTestCase(unittest.HomeserverTestCase): def test_del_multi(self) -> None: # The type here isn't quite correct as they don't handle TreeCache well. - cache: LruCache[Tuple[str, str], str] = LruCache( + cache: LruCache[tuple[str, str], str] = LruCache( max_size=4, clock=self.clock, cache_type=TreeCache, @@ -211,7 +210,7 @@ class LruCacheCallbacksTestCase(unittest.HomeserverTestCase): m3 = Mock() m4 = Mock() # The type here isn't quite correct as they don't handle TreeCache well. - cache: LruCache[Tuple[str, str], str] = LruCache( + cache: LruCache[tuple[str, str], str] = LruCache( max_size=4, clock=self.clock, cache_type=TreeCache, @@ -295,7 +294,7 @@ class LruCacheCallbacksTestCase(unittest.HomeserverTestCase): class LruCacheSizedTestCase(unittest.HomeserverTestCase): def test_evict(self) -> None: - cache: LruCache[str, List[int]] = LruCache( + cache: LruCache[str, list[int]] = LruCache( max_size=5, clock=self.clock, size_callback=len, server_name="test_server" ) cache["key1"] = [0] @@ -320,7 +319,7 @@ class LruCacheSizedTestCase(unittest.HomeserverTestCase): def test_zero_size_drop_from_cache(self) -> None: """Test that `drop_from_cache` works correctly with 0-sized entries.""" - cache: LruCache[str, List[int]] = LruCache( + cache: LruCache[str, list[int]] = LruCache( max_size=5, clock=self.clock, size_callback=lambda x: 0, diff --git a/tests/util/test_mutable_overlay_mapping.py b/tests/util/test_mutable_overlay_mapping.py index a7335fca73..ed738919e4 100644 --- a/tests/util/test_mutable_overlay_mapping.py +++ b/tests/util/test_mutable_overlay_mapping.py @@ -13,7 +13,6 @@ # import unittest -from typing import Dict from synapse.util import MutableOverlayMapping @@ -24,7 +23,7 @@ class TestMutableOverlayMapping(unittest.TestCase): def test_init(self) -> None: """Test initialization with different input types.""" # Test with empty dict - empty_dict: Dict[str, int] = {} + empty_dict: dict[str, int] = {} mapping = MutableOverlayMapping(empty_dict) self.assertEqual(len(mapping), 0) diff --git a/tests/util/test_rwlock.py b/tests/util/test_rwlock.py index 12f821d684..36771188ae 100644 --- a/tests/util/test_rwlock.py +++ b/tests/util/test_rwlock.py @@ -19,7 +19,7 @@ # # -from typing import AsyncContextManager, Callable, Sequence, Tuple +from typing import AsyncContextManager, Callable, Sequence from twisted.internet import defer from twisted.internet.defer import CancelledError, Deferred @@ -35,7 +35,7 @@ class ReadWriteLockTestCase(unittest.TestCase): read_or_write: Callable[[str], AsyncContextManager], key: str, return_value: str, - ) -> Tuple["Deferred[str]", "Deferred[None]", "Deferred[None]"]: + ) -> tuple["Deferred[str]", "Deferred[None]", "Deferred[None]"]: """Starts a reader or writer which acquires the lock, blocks, then completes. Args: @@ -67,7 +67,7 @@ class ReadWriteLockTestCase(unittest.TestCase): def _start_blocking_reader( self, rwlock: ReadWriteLock, key: str, return_value: str - ) -> Tuple["Deferred[str]", "Deferred[None]", "Deferred[None]"]: + ) -> tuple["Deferred[str]", "Deferred[None]", "Deferred[None]"]: """Starts a reader which acquires the lock, blocks, then releases the lock. See the docstring for `_start_reader_or_writer` for details about the arguments @@ -77,7 +77,7 @@ class ReadWriteLockTestCase(unittest.TestCase): def _start_blocking_writer( self, rwlock: ReadWriteLock, key: str, return_value: str - ) -> Tuple["Deferred[str]", "Deferred[None]", "Deferred[None]"]: + ) -> tuple["Deferred[str]", "Deferred[None]", "Deferred[None]"]: """Starts a writer which acquires the lock, blocks, then releases the lock. See the docstring for `_start_reader_or_writer` for details about the arguments @@ -87,7 +87,7 @@ class ReadWriteLockTestCase(unittest.TestCase): def _start_nonblocking_reader( self, rwlock: ReadWriteLock, key: str, return_value: str - ) -> Tuple["Deferred[str]", "Deferred[None]"]: + ) -> tuple["Deferred[str]", "Deferred[None]"]: """Starts a reader which acquires the lock, then releases it immediately. See the docstring for `_start_reader_or_writer` for details about the arguments. @@ -106,7 +106,7 @@ class ReadWriteLockTestCase(unittest.TestCase): def _start_nonblocking_writer( self, rwlock: ReadWriteLock, key: str, return_value: str - ) -> Tuple["Deferred[str]", "Deferred[None]"]: + ) -> tuple["Deferred[str]", "Deferred[None]"]: """Starts a writer which acquires the lock, then releases it immediately. See the docstring for `_start_reader_or_writer` for details about the arguments. diff --git a/tests/util/test_task_scheduler.py b/tests/util/test_task_scheduler.py index e97f0ed611..43c3ce52ea 100644 --- a/tests/util/test_task_scheduler.py +++ b/tests/util/test_task_scheduler.py @@ -18,7 +18,7 @@ # [This file includes modifications made by New Vector Limited] # # -from typing import List, Optional, Tuple +from typing import Optional from twisted.internet.task import deferLater from twisted.internet.testing import MemoryReactor @@ -42,7 +42,7 @@ class TestTaskScheduler(HomeserverTestCase): async def _test_task( self, task: ScheduledTask - ) -> Tuple[TaskStatus, Optional[JsonMapping], Optional[str]]: + ) -> tuple[TaskStatus, Optional[JsonMapping], Optional[str]]: # This test task will copy the parameters to the result result = None if task.params: @@ -85,7 +85,7 @@ class TestTaskScheduler(HomeserverTestCase): async def _sleeping_task( self, task: ScheduledTask - ) -> Tuple[TaskStatus, Optional[JsonMapping], Optional[str]]: + ) -> tuple[TaskStatus, Optional[JsonMapping], Optional[str]]: # Sleep for a second await deferLater(self.reactor, 1, lambda: None) return TaskStatus.COMPLETE, None, None @@ -103,7 +103,7 @@ class TestTaskScheduler(HomeserverTestCase): ) ) - def get_tasks_of_status(status: TaskStatus) -> List[ScheduledTask]: + def get_tasks_of_status(status: TaskStatus) -> list[ScheduledTask]: tasks = ( self.get_success(self.task_scheduler.get_task(task_id)) for task_id in task_ids @@ -151,7 +151,7 @@ class TestTaskScheduler(HomeserverTestCase): async def _raising_task( self, task: ScheduledTask - ) -> Tuple[TaskStatus, Optional[JsonMapping], Optional[str]]: + ) -> tuple[TaskStatus, Optional[JsonMapping], Optional[str]]: raise Exception("raising") def test_schedule_raising_task(self) -> None: @@ -165,7 +165,7 @@ class TestTaskScheduler(HomeserverTestCase): async def _resumable_task( self, task: ScheduledTask - ) -> Tuple[TaskStatus, Optional[JsonMapping], Optional[str]]: + ) -> tuple[TaskStatus, Optional[JsonMapping], Optional[str]]: if task.result and "in_progress" in task.result: return TaskStatus.COMPLETE, {"success": True}, None else: @@ -201,7 +201,7 @@ class TestTaskSchedulerWithBackgroundWorker(BaseMultiWorkerStreamTestCase): async def _test_task( self, task: ScheduledTask - ) -> Tuple[TaskStatus, Optional[JsonMapping], Optional[str]]: + ) -> tuple[TaskStatus, Optional[JsonMapping], Optional[str]]: return (TaskStatus.COMPLETE, None, None) @override_config({"run_background_tasks_on": "worker1"}) diff --git a/tests/utils.py b/tests/utils.py index 051388ee2e..b3d59a0ebe 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -24,10 +24,8 @@ import os import signal from types import FrameType, TracebackType from typing import ( - Dict, Literal, Optional, - Type, TypeVar, Union, overload, @@ -134,7 +132,7 @@ def setupdb() -> None: @overload def default_config( server_name: str, parse: Literal[False] = ... -) -> Dict[str, object]: ... +) -> dict[str, object]: ... @overload @@ -143,7 +141,7 @@ def default_config(server_name: str, parse: Literal[True]) -> HomeServerConfig: def default_config( server_name: str, parse: bool = False -) -> Union[Dict[str, object], HomeServerConfig]: +) -> Union[dict[str, object], HomeServerConfig]: """ Create a reasonable test config. @@ -283,7 +281,7 @@ async def create_room(hs: HomeServer, room_id: str, creator_id: str) -> None: T = TypeVar("T") -def checked_cast(type: Type[T], x: object) -> T: +def checked_cast(type: type[T], x: object) -> T: """A version of typing.cast that is checked at runtime. We have our own function for this for two reasons: @@ -337,7 +335,7 @@ class test_timeout: def __exit__( self, - exc_type: Optional[Type[BaseException]], + exc_type: Optional[type[BaseException]], exc_val: Optional[BaseException], exc_tb: Optional[TracebackType], ) -> None: From b835eb253ca02f3080012e886ee628c075142102 Mon Sep 17 00:00:00 2001 From: Bryce Servis Date: Thu, 23 Oct 2025 10:10:10 -0500 Subject: [PATCH 070/149] Make optional networking and security settings for Redis more apparent in workers.md (#19073) I couldn't really find any documentation regarding how to setup TLS communication between Synapse and Redis, so I looked through the source code and found it. I figured I should go ahead and document it here. --- changelog.d/19073.doc | 1 + docs/workers.md | 3 +++ 2 files changed, 4 insertions(+) create mode 100644 changelog.d/19073.doc diff --git a/changelog.d/19073.doc b/changelog.d/19073.doc new file mode 100644 index 0000000000..6bbaaba99e --- /dev/null +++ b/changelog.d/19073.doc @@ -0,0 +1 @@ +Point out additional Redis configuration options available in the worker docs. Contributed by @servisbryce. diff --git a/docs/workers.md b/docs/workers.md index 18bb0b76f6..f766b40251 100644 --- a/docs/workers.md +++ b/docs/workers.md @@ -120,6 +120,9 @@ worker_replication_secret: "" redis: enabled: true + # For additional Redis configuration options (TLS, authentication, etc.), + # see the Synapse configuration documentation: + # https://element-hq.github.io/synapse/latest/usage/configuration/config_documentation.html#redis instance_map: main: From 5556b491c1a6775a6592316067d12e35a743d45c Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Fri, 24 Oct 2025 11:19:44 +0200 Subject: [PATCH 071/149] Spruce up generated announcement text in the release script (#19089) --- changelog.d/19089.misc | 1 + scripts-dev/release.py | 21 +++++++++++++++++---- 2 files changed, 18 insertions(+), 4 deletions(-) create mode 100644 changelog.d/19089.misc diff --git a/changelog.d/19089.misc b/changelog.d/19089.misc new file mode 100644 index 0000000000..81c8775fd0 --- /dev/null +++ b/changelog.d/19089.misc @@ -0,0 +1 @@ +Update the release script's generated announcement text to include a title and extra text for RC's. \ No newline at end of file diff --git a/scripts-dev/release.py b/scripts-dev/release.py index 16f1fc5f2a..c20237eab8 100755 --- a/scripts-dev/release.py +++ b/scripts-dev/release.py @@ -715,18 +715,31 @@ def _announce() -> None: current_version = get_package_version() tag_name = f"v{current_version}" + is_rc = "rc" in tag_name + + release_text = f""" +### Synapse {current_version} {"🧪" if is_rc else "🚀"} - click.echo( - f""" Hi everyone. Synapse {current_version} has just been released. +""" + if "rc" in tag_name: + release_text += ( + "\nThis is a release candidate. Please help us test it out " + "before the final release by deploying it to non-production environments, " + "and reporting any issues you find to " + "[the issue tracker](https://github.com/element-hq/synapse/issues). Thanks!\n" + ) + + release_text += f""" [notes](https://github.com/element-hq/synapse/releases/tag/{tag_name}) | \ [docker](https://hub.docker.com/r/matrixdotorg/synapse/tags?name={tag_name}) | \ [debs](https://packages.matrix.org/debian/) | \ [pypi](https://pypi.org/project/matrix-synapse/{current_version}/)""" - ) - if "rc" in tag_name: + click.echo(release_text) + + if is_rc: click.echo( """ Announce the RC in From 72d0de9f3037b1ebf3d88596d79a4fa9fd10db05 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Fri, 24 Oct 2025 11:39:06 +0200 Subject: [PATCH 072/149] Don't exit the release script if there are uncommitted changes (#19088) --- changelog.d/19088.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/19088.misc diff --git a/changelog.d/19088.misc b/changelog.d/19088.misc new file mode 100644 index 0000000000..3224b3697d --- /dev/null +++ b/changelog.d/19088.misc @@ -0,0 +1 @@ +Don't immediately exit the release script if the checkout is dirty. Instead, allow the user to clear the dirty changes and retry. \ No newline at end of file From 45a042ae888ee30d93186245de5973a4f7352649 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Fri, 24 Oct 2025 11:39:29 +0200 Subject: [PATCH 073/149] Remove cibuildwheel `pp38*` skip selector (#19085) --- changelog.d/19085.misc | 1 + pyproject.toml | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) create mode 100644 changelog.d/19085.misc diff --git a/changelog.d/19085.misc b/changelog.d/19085.misc new file mode 100644 index 0000000000..d48fad9d5d --- /dev/null +++ b/changelog.d/19085.misc @@ -0,0 +1 @@ +Remove `pp38*` skip selector from cibuildwheel to silence warning. \ No newline at end of file diff --git a/pyproject.toml b/pyproject.toml index b0cb355c52..2b43f182ac 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -387,10 +387,10 @@ build-backend = "poetry.core.masonry.api" # Skip unsupported platforms (by us or by Rust). # See https://cibuildwheel.readthedocs.io/en/stable/options/#build-skip for the list of build targets. # We skip: -# - CPython and PyPy 3.8: EOLed +# - CPython 3.8: EOLed # - musllinux i686: excluded to reduce number of wheels we build. # c.f. https://github.com/matrix-org/synapse/pull/12595#discussion_r963107677 -skip = "cp38* pp38* *-musllinux_i686" +skip = "cp38* *-musllinux_i686" # Enable non-default builds. # "pypy" used to be included by default up until cibuildwheel 3. enable = "pypy" From a092d2053ad073512c7ff407c2d8bf495c46777a Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Fri, 24 Oct 2025 12:19:04 +0200 Subject: [PATCH 074/149] Fix deprecation warning in release script (#19080) --- changelog.d/19080.misc | 1 + scripts-dev/release.py | 16 +++++++++++----- 2 files changed, 12 insertions(+), 5 deletions(-) create mode 100644 changelog.d/19080.misc diff --git a/changelog.d/19080.misc b/changelog.d/19080.misc new file mode 100644 index 0000000000..c738be3fe9 --- /dev/null +++ b/changelog.d/19080.misc @@ -0,0 +1 @@ +Update deprecated code in the release script to prevent a warning message from being printed. \ No newline at end of file diff --git a/scripts-dev/release.py b/scripts-dev/release.py index c20237eab8..111c184ccb 100755 --- a/scripts-dev/release.py +++ b/scripts-dev/release.py @@ -38,6 +38,7 @@ import attr import click import git import github +import github.Auth from click.exceptions import ClickException from git import GitCommandError, Repo from github import BadCredentialsException, Github @@ -429,7 +430,7 @@ def _publish(gh_token: str) -> None: if gh_token: # Test that the GH Token is valid before continuing. - gh = Github(gh_token) + gh = Github(auth=github.Auth.Token(token=gh_token)) gh.get_user() # Make sure we're in a git repo. @@ -442,7 +443,7 @@ def _publish(gh_token: str) -> None: return # Publish the draft release - gh = Github(gh_token) + gh = Github(auth=github.Auth.Token(token=gh_token)) gh_repo = gh.get_repo("element-hq/synapse") for release in gh_repo.get_releases(): if release.title == tag_name: @@ -487,8 +488,13 @@ def _upload(gh_token: Optional[str]) -> None: click.echo(f"Tag {tag_name} ({tag.commit}) is not currently checked out!") click.get_current_context().abort() + if gh_token: + gh = Github(auth=github.Auth.Token(token=gh_token)) + else: + # Use github anonymously. + gh = Github() + # Query all the assets corresponding to this release. - gh = Github(gh_token) gh_repo = gh.get_repo("element-hq/synapse") gh_release = gh_repo.get_release(tag_name) @@ -764,7 +770,7 @@ Ask the designated people to do the blog and tweets.""" def full(gh_token: str) -> None: if gh_token: # Test that the GH Token is valid before continuing. - gh = Github(gh_token) + gh = Github(auth=github.Auth.Token(token=gh_token)) gh.get_user() click.echo("1. If this is a security release, read the security wiki page.") @@ -850,7 +856,7 @@ def check_valid_gh_token(gh_token: Optional[str]) -> None: return try: - gh = Github(gh_token) + gh = Github(auth=github.Auth.Token(token=gh_token)) # We need to lookup name to trigger a request. _name = gh.get_user().name From 123eff1bc0eff6efae68f2c2408d343e9ed8d3d4 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Fri, 24 Oct 2025 12:19:40 +0200 Subject: [PATCH 075/149] Update poetry dev dependencies name (#19081) --- changelog.d/19081.misc | 1 + mypy.ini | 2 +- poetry.lock | 2 +- pyproject.toml | 2 +- 4 files changed, 4 insertions(+), 3 deletions(-) create mode 100644 changelog.d/19081.misc diff --git a/changelog.d/19081.misc b/changelog.d/19081.misc new file mode 100644 index 0000000000..8518840fb6 --- /dev/null +++ b/changelog.d/19081.misc @@ -0,0 +1 @@ +Update the deprecated poetry development dependencies group name in `pyproject.toml`. \ No newline at end of file diff --git a/mypy.ini b/mypy.ini index ae903f858a..eefe405fe5 100644 --- a/mypy.ini +++ b/mypy.ini @@ -69,7 +69,7 @@ warn_unused_ignores = False ;; https://github.com/python/typeshed/tree/master/stubs ;; and for each package `foo` there's a corresponding `types-foo` package on PyPI, ;; which we can pull in as a dev dependency by adding to `pyproject.toml`'s -;; `[tool.poetry.dev-dependencies]` list. +;; `[tool.poetry.group.dev.dependencies]` list. # https://github.com/lepture/authlib/issues/460 [mypy-authlib.*] diff --git a/poetry.lock b/poetry.lock index 1a26e23fad..efbd856b6d 100644 --- a/poetry.lock +++ b/poetry.lock @@ -3293,4 +3293,4 @@ url-preview = ["lxml"] [metadata] lock-version = "2.1" python-versions = "^3.9.0" -content-hash = "0058b93ca13a3f2a0cfc28485ddd8202c42d0015dbaf3b9692e43f37fe2a0be6" +content-hash = "5d71c862b924bc2af936cb6fef264a023213153543f738af31357deaf6de19b8" diff --git a/pyproject.toml b/pyproject.toml index 2b43f182ac..27265357d7 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -325,7 +325,7 @@ all = [ # - systemd: this is a system-based requirement ] -[tool.poetry.dev-dependencies] +[tool.poetry.group.dev.dependencies] # We pin development dependencies in poetry.lock so that our tests don't start # failing on new releases. Keeping lower bounds loose here means that dependabot # can bump versions without having to update the content-hash in the lockfile. From a2fa61d1b5d90be7252c255fd89598e0e3f1c777 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 24 Oct 2025 13:17:35 +0100 Subject: [PATCH 076/149] Bump msgpack from 1.1.1 to 1.1.2 (#19050) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 125 +++++++++++++++++++++++++++------------------------- 1 file changed, 64 insertions(+), 61 deletions(-) diff --git a/poetry.lock b/poetry.lock index efbd856b6d..2589390c06 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1342,71 +1342,74 @@ files = [ [[package]] name = "msgpack" -version = "1.1.1" +version = "1.1.2" description = "MessagePack serializer" optional = false -python-versions = ">=3.8" +python-versions = ">=3.9" groups = ["main"] files = [ - {file = "msgpack-1.1.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:353b6fc0c36fde68b661a12949d7d49f8f51ff5fa019c1e47c87c4ff34b080ed"}, - {file = "msgpack-1.1.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:79c408fcf76a958491b4e3b103d1c417044544b68e96d06432a189b43d1215c8"}, - {file = "msgpack-1.1.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:78426096939c2c7482bf31ef15ca219a9e24460289c00dd0b94411040bb73ad2"}, - {file = "msgpack-1.1.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8b17ba27727a36cb73aabacaa44b13090feb88a01d012c0f4be70c00f75048b4"}, - {file = "msgpack-1.1.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7a17ac1ea6ec3c7687d70201cfda3b1e8061466f28f686c24f627cae4ea8efd0"}, - {file = "msgpack-1.1.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:88d1e966c9235c1d4e2afac21ca83933ba59537e2e2727a999bf3f515ca2af26"}, - {file = "msgpack-1.1.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:f6d58656842e1b2ddbe07f43f56b10a60f2ba5826164910968f5933e5178af75"}, - {file = "msgpack-1.1.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:96decdfc4adcbc087f5ea7ebdcfd3dee9a13358cae6e81d54be962efc38f6338"}, - {file = "msgpack-1.1.1-cp310-cp310-win32.whl", hash = "sha256:6640fd979ca9a212e4bcdf6eb74051ade2c690b862b679bfcb60ae46e6dc4bfd"}, - {file = "msgpack-1.1.1-cp310-cp310-win_amd64.whl", hash = "sha256:8b65b53204fe1bd037c40c4148d00ef918eb2108d24c9aaa20bc31f9810ce0a8"}, - {file = "msgpack-1.1.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:71ef05c1726884e44f8b1d1773604ab5d4d17729d8491403a705e649116c9558"}, - {file = "msgpack-1.1.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:36043272c6aede309d29d56851f8841ba907a1a3d04435e43e8a19928e243c1d"}, - {file = "msgpack-1.1.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a32747b1b39c3ac27d0670122b57e6e57f28eefb725e0b625618d1b59bf9d1e0"}, - {file = "msgpack-1.1.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8a8b10fdb84a43e50d38057b06901ec9da52baac6983d3f709d8507f3889d43f"}, - {file = "msgpack-1.1.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ba0c325c3f485dc54ec298d8b024e134acf07c10d494ffa24373bea729acf704"}, - {file = "msgpack-1.1.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:88daaf7d146e48ec71212ce21109b66e06a98e5e44dca47d853cbfe171d6c8d2"}, - {file = "msgpack-1.1.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:d8b55ea20dc59b181d3f47103f113e6f28a5e1c89fd5b67b9140edb442ab67f2"}, - {file = "msgpack-1.1.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:4a28e8072ae9779f20427af07f53bbb8b4aa81151054e882aee333b158da8752"}, - {file = "msgpack-1.1.1-cp311-cp311-win32.whl", hash = "sha256:7da8831f9a0fdb526621ba09a281fadc58ea12701bc709e7b8cbc362feabc295"}, - {file = "msgpack-1.1.1-cp311-cp311-win_amd64.whl", hash = "sha256:5fd1b58e1431008a57247d6e7cc4faa41c3607e8e7d4aaf81f7c29ea013cb458"}, - {file = "msgpack-1.1.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:ae497b11f4c21558d95de9f64fff7053544f4d1a17731c866143ed6bb4591238"}, - {file = "msgpack-1.1.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:33be9ab121df9b6b461ff91baac6f2731f83d9b27ed948c5b9d1978ae28bf157"}, - {file = "msgpack-1.1.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6f64ae8fe7ffba251fecb8408540c34ee9df1c26674c50c4544d72dbf792e5ce"}, - {file = "msgpack-1.1.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a494554874691720ba5891c9b0b39474ba43ffb1aaf32a5dac874effb1619e1a"}, - {file = "msgpack-1.1.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cb643284ab0ed26f6957d969fe0dd8bb17beb567beb8998140b5e38a90974f6c"}, - {file = "msgpack-1.1.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:d275a9e3c81b1093c060c3837e580c37f47c51eca031f7b5fb76f7b8470f5f9b"}, - {file = "msgpack-1.1.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:4fd6b577e4541676e0cc9ddc1709d25014d3ad9a66caa19962c4f5de30fc09ef"}, - {file = "msgpack-1.1.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:bb29aaa613c0a1c40d1af111abf025f1732cab333f96f285d6a93b934738a68a"}, - {file = "msgpack-1.1.1-cp312-cp312-win32.whl", hash = "sha256:870b9a626280c86cff9c576ec0d9cbcc54a1e5ebda9cd26dab12baf41fee218c"}, - {file = "msgpack-1.1.1-cp312-cp312-win_amd64.whl", hash = "sha256:5692095123007180dca3e788bb4c399cc26626da51629a31d40207cb262e67f4"}, - {file = "msgpack-1.1.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:3765afa6bd4832fc11c3749be4ba4b69a0e8d7b728f78e68120a157a4c5d41f0"}, - {file = "msgpack-1.1.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:8ddb2bcfd1a8b9e431c8d6f4f7db0773084e107730ecf3472f1dfe9ad583f3d9"}, - {file = "msgpack-1.1.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:196a736f0526a03653d829d7d4c5500a97eea3648aebfd4b6743875f28aa2af8"}, - {file = "msgpack-1.1.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9d592d06e3cc2f537ceeeb23d38799c6ad83255289bb84c2e5792e5a8dea268a"}, - {file = "msgpack-1.1.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4df2311b0ce24f06ba253fda361f938dfecd7b961576f9be3f3fbd60e87130ac"}, - {file = "msgpack-1.1.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:e4141c5a32b5e37905b5940aacbc59739f036930367d7acce7a64e4dec1f5e0b"}, - {file = "msgpack-1.1.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:b1ce7f41670c5a69e1389420436f41385b1aa2504c3b0c30620764b15dded2e7"}, - {file = "msgpack-1.1.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4147151acabb9caed4e474c3344181e91ff7a388b888f1e19ea04f7e73dc7ad5"}, - {file = "msgpack-1.1.1-cp313-cp313-win32.whl", hash = "sha256:500e85823a27d6d9bba1d057c871b4210c1dd6fb01fbb764e37e4e8847376323"}, - {file = "msgpack-1.1.1-cp313-cp313-win_amd64.whl", hash = "sha256:6d489fba546295983abd142812bda76b57e33d0b9f5d5b71c09a583285506f69"}, - {file = "msgpack-1.1.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bba1be28247e68994355e028dcd668316db30c1f758d3241a7b903ac78dcd285"}, - {file = "msgpack-1.1.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b8f93dcddb243159c9e4109c9750ba5b335ab8d48d9522c5308cd05d7e3ce600"}, - {file = "msgpack-1.1.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2fbbc0b906a24038c9958a1ba7ae0918ad35b06cb449d398b76a7d08470b0ed9"}, - {file = "msgpack-1.1.1-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:61e35a55a546a1690d9d09effaa436c25ae6130573b6ee9829c37ef0f18d5e78"}, - {file = "msgpack-1.1.1-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:1abfc6e949b352dadf4bce0eb78023212ec5ac42f6abfd469ce91d783c149c2a"}, - {file = "msgpack-1.1.1-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:996f2609ddf0142daba4cefd767d6db26958aac8439ee41db9cc0db9f4c4c3a6"}, - {file = "msgpack-1.1.1-cp38-cp38-win32.whl", hash = "sha256:4d3237b224b930d58e9d83c81c0dba7aacc20fcc2f89c1e5423aa0529a4cd142"}, - {file = "msgpack-1.1.1-cp38-cp38-win_amd64.whl", hash = "sha256:da8f41e602574ece93dbbda1fab24650d6bf2a24089f9e9dbb4f5730ec1e58ad"}, - {file = "msgpack-1.1.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:f5be6b6bc52fad84d010cb45433720327ce886009d862f46b26d4d154001994b"}, - {file = "msgpack-1.1.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:3a89cd8c087ea67e64844287ea52888239cbd2940884eafd2dcd25754fb72232"}, - {file = "msgpack-1.1.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1d75f3807a9900a7d575d8d6674a3a47e9f227e8716256f35bc6f03fc597ffbf"}, - {file = "msgpack-1.1.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d182dac0221eb8faef2e6f44701812b467c02674a322c739355c39e94730cdbf"}, - {file = "msgpack-1.1.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1b13fe0fb4aac1aa5320cd693b297fe6fdef0e7bea5518cbc2dd5299f873ae90"}, - {file = "msgpack-1.1.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:435807eeb1bc791ceb3247d13c79868deb22184e1fc4224808750f0d7d1affc1"}, - {file = "msgpack-1.1.1-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:4835d17af722609a45e16037bb1d4d78b7bdf19d6c0128116d178956618c4e88"}, - {file = "msgpack-1.1.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:a8ef6e342c137888ebbfb233e02b8fbd689bb5b5fcc59b34711ac47ebd504478"}, - {file = "msgpack-1.1.1-cp39-cp39-win32.whl", hash = "sha256:61abccf9de335d9efd149e2fff97ed5974f2481b3353772e8e2dd3402ba2bd57"}, - {file = "msgpack-1.1.1-cp39-cp39-win_amd64.whl", hash = "sha256:40eae974c873b2992fd36424a5d9407f93e97656d999f43fca9d29f820899084"}, - {file = "msgpack-1.1.1.tar.gz", hash = "sha256:77b79ce34a2bdab2594f490c8e80dd62a02d650b91a75159a63ec413b8d104cd"}, + {file = "msgpack-1.1.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0051fffef5a37ca2cd16978ae4f0aef92f164df86823871b5162812bebecd8e2"}, + {file = "msgpack-1.1.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a605409040f2da88676e9c9e5853b3449ba8011973616189ea5ee55ddbc5bc87"}, + {file = "msgpack-1.1.2-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:8b696e83c9f1532b4af884045ba7f3aa741a63b2bc22617293a2c6a7c645f251"}, + {file = "msgpack-1.1.2-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:365c0bbe981a27d8932da71af63ef86acc59ed5c01ad929e09a0b88c6294e28a"}, + {file = "msgpack-1.1.2-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:41d1a5d875680166d3ac5c38573896453bbbea7092936d2e107214daf43b1d4f"}, + {file = "msgpack-1.1.2-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:354e81bcdebaab427c3df4281187edc765d5d76bfb3a7c125af9da7a27e8458f"}, + {file = "msgpack-1.1.2-cp310-cp310-win32.whl", hash = "sha256:e64c8d2f5e5d5fda7b842f55dec6133260ea8f53c4257d64494c534f306bf7a9"}, + {file = "msgpack-1.1.2-cp310-cp310-win_amd64.whl", hash = "sha256:db6192777d943bdaaafb6ba66d44bf65aa0e9c5616fa1d2da9bb08828c6b39aa"}, + {file = "msgpack-1.1.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:2e86a607e558d22985d856948c12a3fa7b42efad264dca8a3ebbcfa2735d786c"}, + {file = "msgpack-1.1.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:283ae72fc89da59aa004ba147e8fc2f766647b1251500182fac0350d8af299c0"}, + {file = "msgpack-1.1.2-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:61c8aa3bd513d87c72ed0b37b53dd5c5a0f58f2ff9f26e1555d3bd7948fb7296"}, + {file = "msgpack-1.1.2-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:454e29e186285d2ebe65be34629fa0e8605202c60fbc7c4c650ccd41870896ef"}, + {file = "msgpack-1.1.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:7bc8813f88417599564fafa59fd6f95be417179f76b40325b500b3c98409757c"}, + {file = "msgpack-1.1.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:bafca952dc13907bdfdedfc6a5f579bf4f292bdd506fadb38389afa3ac5b208e"}, + {file = "msgpack-1.1.2-cp311-cp311-win32.whl", hash = "sha256:602b6740e95ffc55bfb078172d279de3773d7b7db1f703b2f1323566b878b90e"}, + {file = "msgpack-1.1.2-cp311-cp311-win_amd64.whl", hash = "sha256:d198d275222dc54244bf3327eb8cbe00307d220241d9cec4d306d49a44e85f68"}, + {file = "msgpack-1.1.2-cp311-cp311-win_arm64.whl", hash = "sha256:86f8136dfa5c116365a8a651a7d7484b65b13339731dd6faebb9a0242151c406"}, + {file = "msgpack-1.1.2-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:70a0dff9d1f8da25179ffcf880e10cf1aad55fdb63cd59c9a49a1b82290062aa"}, + {file = "msgpack-1.1.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:446abdd8b94b55c800ac34b102dffd2f6aa0ce643c55dfc017ad89347db3dbdb"}, + {file = "msgpack-1.1.2-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c63eea553c69ab05b6747901b97d620bb2a690633c77f23feb0c6a947a8a7b8f"}, + {file = "msgpack-1.1.2-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:372839311ccf6bdaf39b00b61288e0557916c3729529b301c52c2d88842add42"}, + {file = "msgpack-1.1.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:2929af52106ca73fcb28576218476ffbb531a036c2adbcf54a3664de124303e9"}, + {file = "msgpack-1.1.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:be52a8fc79e45b0364210eef5234a7cf8d330836d0a64dfbb878efa903d84620"}, + {file = "msgpack-1.1.2-cp312-cp312-win32.whl", hash = "sha256:1fff3d825d7859ac888b0fbda39a42d59193543920eda9d9bea44d958a878029"}, + {file = "msgpack-1.1.2-cp312-cp312-win_amd64.whl", hash = "sha256:1de460f0403172cff81169a30b9a92b260cb809c4cb7e2fc79ae8d0510c78b6b"}, + {file = "msgpack-1.1.2-cp312-cp312-win_arm64.whl", hash = "sha256:be5980f3ee0e6bd44f3a9e9dea01054f175b50c3e6cdb692bc9424c0bbb8bf69"}, + {file = "msgpack-1.1.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:4efd7b5979ccb539c221a4c4e16aac1a533efc97f3b759bb5a5ac9f6d10383bf"}, + {file = "msgpack-1.1.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:42eefe2c3e2af97ed470eec850facbe1b5ad1d6eacdbadc42ec98e7dcf68b4b7"}, + {file = "msgpack-1.1.2-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1fdf7d83102bf09e7ce3357de96c59b627395352a4024f6e2458501f158bf999"}, + {file = "msgpack-1.1.2-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:fac4be746328f90caa3cd4bc67e6fe36ca2bf61d5c6eb6d895b6527e3f05071e"}, + {file = "msgpack-1.1.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:fffee09044073e69f2bad787071aeec727183e7580443dfeb8556cbf1978d162"}, + {file = "msgpack-1.1.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:5928604de9b032bc17f5099496417f113c45bc6bc21b5c6920caf34b3c428794"}, + {file = "msgpack-1.1.2-cp313-cp313-win32.whl", hash = "sha256:a7787d353595c7c7e145e2331abf8b7ff1e6673a6b974ded96e6d4ec09f00c8c"}, + {file = "msgpack-1.1.2-cp313-cp313-win_amd64.whl", hash = "sha256:a465f0dceb8e13a487e54c07d04ae3ba131c7c5b95e2612596eafde1dccf64a9"}, + {file = "msgpack-1.1.2-cp313-cp313-win_arm64.whl", hash = "sha256:e69b39f8c0aa5ec24b57737ebee40be647035158f14ed4b40e6f150077e21a84"}, + {file = "msgpack-1.1.2-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:e23ce8d5f7aa6ea6d2a2b326b4ba46c985dbb204523759984430db7114f8aa00"}, + {file = "msgpack-1.1.2-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:6c15b7d74c939ebe620dd8e559384be806204d73b4f9356320632d783d1f7939"}, + {file = "msgpack-1.1.2-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:99e2cb7b9031568a2a5c73aa077180f93dd2e95b4f8d3b8e14a73ae94a9e667e"}, + {file = "msgpack-1.1.2-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:180759d89a057eab503cf62eeec0aa61c4ea1200dee709f3a8e9397dbb3b6931"}, + {file = "msgpack-1.1.2-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:04fb995247a6e83830b62f0b07bf36540c213f6eac8e851166d8d86d83cbd014"}, + {file = "msgpack-1.1.2-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:8e22ab046fa7ede9e36eeb4cfad44d46450f37bb05d5ec482b02868f451c95e2"}, + {file = "msgpack-1.1.2-cp314-cp314-win32.whl", hash = "sha256:80a0ff7d4abf5fecb995fcf235d4064b9a9a8a40a3ab80999e6ac1e30b702717"}, + {file = "msgpack-1.1.2-cp314-cp314-win_amd64.whl", hash = "sha256:9ade919fac6a3e7260b7f64cea89df6bec59104987cbea34d34a2fa15d74310b"}, + {file = "msgpack-1.1.2-cp314-cp314-win_arm64.whl", hash = "sha256:59415c6076b1e30e563eb732e23b994a61c159cec44deaf584e5cc1dd662f2af"}, + {file = "msgpack-1.1.2-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:897c478140877e5307760b0ea66e0932738879e7aa68144d9b78ea4c8302a84a"}, + {file = "msgpack-1.1.2-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:a668204fa43e6d02f89dbe79a30b0d67238d9ec4c5bd8a940fc3a004a47b721b"}, + {file = "msgpack-1.1.2-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5559d03930d3aa0f3aacb4c42c776af1a2ace2611871c84a75afe436695e6245"}, + {file = "msgpack-1.1.2-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:70c5a7a9fea7f036b716191c29047374c10721c389c21e9ffafad04df8c52c90"}, + {file = "msgpack-1.1.2-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:f2cb069d8b981abc72b41aea1c580ce92d57c673ec61af4c500153a626cb9e20"}, + {file = "msgpack-1.1.2-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:d62ce1f483f355f61adb5433ebfd8868c5f078d1a52d042b0a998682b4fa8c27"}, + {file = "msgpack-1.1.2-cp314-cp314t-win32.whl", hash = "sha256:1d1418482b1ee984625d88aa9585db570180c286d942da463533b238b98b812b"}, + {file = "msgpack-1.1.2-cp314-cp314t-win_amd64.whl", hash = "sha256:5a46bf7e831d09470ad92dff02b8b1ac92175ca36b087f904a0519857c6be3ff"}, + {file = "msgpack-1.1.2-cp314-cp314t-win_arm64.whl", hash = "sha256:d99ef64f349d5ec3293688e91486c5fdb925ed03807f64d98d205d2713c60b46"}, + {file = "msgpack-1.1.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:ea5405c46e690122a76531ab97a079e184c0daf491e588592d6a23d3e32af99e"}, + {file = "msgpack-1.1.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:9fba231af7a933400238cb357ecccf8ab5d51535ea95d94fc35b7806218ff844"}, + {file = "msgpack-1.1.2-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:a8f6e7d30253714751aa0b0c84ae28948e852ee7fb0524082e6716769124bc23"}, + {file = "msgpack-1.1.2-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:94fd7dc7d8cb0a54432f296f2246bc39474e017204ca6f4ff345941d4ed285a7"}, + {file = "msgpack-1.1.2-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:350ad5353a467d9e3b126d8d1b90fe05ad081e2e1cef5753f8c345217c37e7b8"}, + {file = "msgpack-1.1.2-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:6bde749afe671dc44893f8d08e83bf475a1a14570d67c4bb5cec5573463c8833"}, + {file = "msgpack-1.1.2-cp39-cp39-win32.whl", hash = "sha256:ad09b984828d6b7bb52d1d1d0c9be68ad781fa004ca39216c8a1e63c0f34ba3c"}, + {file = "msgpack-1.1.2-cp39-cp39-win_amd64.whl", hash = "sha256:67016ae8c8965124fdede9d3769528ad8284f14d635337ffa6a713a580f6c030"}, + {file = "msgpack-1.1.2.tar.gz", hash = "sha256:3b60763c1373dd60f398488069bcdc703cd08a711477b5d480eecc9f9626f47e"}, ] [[package]] From 1419b35a405a4207eeb3a6e3eaaade3bc136603f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 24 Oct 2025 13:18:25 +0100 Subject: [PATCH 077/149] Bump ijson from 3.4.0 to 3.4.0.post0 (#19051) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 182 +++++++++++++++++++++++++++------------------------- 1 file changed, 96 insertions(+), 86 deletions(-) diff --git a/poetry.lock b/poetry.lock index 2589390c06..780d6528dd 100644 --- a/poetry.lock +++ b/poetry.lock @@ -688,97 +688,107 @@ all = ["flake8 (>=7.1.1)", "mypy (>=1.11.2)", "pytest (>=8.3.2)", "ruff (>=0.6.2 [[package]] name = "ijson" -version = "3.4.0" +version = "3.4.0.post0" description = "Iterative JSON parser with standard Python iterator interfaces" optional = false python-versions = ">=3.9" groups = ["main"] files = [ - {file = "ijson-3.4.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:e27e50f6dcdee648f704abc5d31b976cd2f90b4642ed447cf03296d138433d09"}, - {file = "ijson-3.4.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:2a753be681ac930740a4af9c93cfb4edc49a167faed48061ea650dc5b0f406f1"}, - {file = "ijson-3.4.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a07c47aed534e0ec198e6a2d4360b259d32ac654af59c015afc517ad7973b7fb"}, - {file = "ijson-3.4.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9c55f48181e11c597cd7146fb31edc8058391201ead69f8f40d2ecbb0b3e4fc6"}, - {file = "ijson-3.4.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:abd5669f96f79d8a2dd5ae81cbd06770a4d42c435fd4a75c74ef28d9913b697d"}, - {file = "ijson-3.4.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3e3ddd46d16b8542c63b1b8af7006c758d4e21cc1b86122c15f8530fae773461"}, - {file = "ijson-3.4.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:1504cec7fe04be2bb0cc33b50c9dd3f83f98c0540ad4991d4017373b7853cfe6"}, - {file = "ijson-3.4.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:2f2ff456adeb216603e25d7915f10584c1b958b6eafa60038d76d08fc8a5fb06"}, - {file = "ijson-3.4.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:0ab00d75d61613a125fbbb524551658b1ad6919a52271ca16563ca5bc2737bb1"}, - {file = "ijson-3.4.0-cp310-cp310-win32.whl", hash = "sha256:ada421fd59fe2bfa4cfa64ba39aeba3f0753696cdcd4d50396a85f38b1d12b01"}, - {file = "ijson-3.4.0-cp310-cp310-win_amd64.whl", hash = "sha256:8c75e82cec05d00ed3a4af5f4edf08f59d536ed1a86ac7e84044870872d82a33"}, - {file = "ijson-3.4.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:9e369bf5a173ca51846c243002ad8025d32032532523b06510881ecc8723ee54"}, - {file = "ijson-3.4.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:26e7da0a3cd2a56a1fde1b34231867693f21c528b683856f6691e95f9f39caec"}, - {file = "ijson-3.4.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1c28c7f604729be22aa453e604e9617b665fa0c24cd25f9f47a970e8130c571a"}, - {file = "ijson-3.4.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0bed8bcb84d3468940f97869da323ba09ae3e6b950df11dea9b62e2b231ca1e3"}, - {file = "ijson-3.4.0-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:296bc824f4088f2af814aaf973b0435bc887ce3d9f517b1577cc4e7d1afb1cb7"}, - {file = "ijson-3.4.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8145f8f40617b6a8aa24e28559d0adc8b889e56a203725226a8a60fa3501073f"}, - {file = "ijson-3.4.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:b674a97bd503ea21bc85103e06b6493b1b2a12da3372950f53e1c664566a33a4"}, - {file = "ijson-3.4.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:8bc731cf1c3282b021d3407a601a5a327613da9ad3c4cecb1123232623ae1826"}, - {file = "ijson-3.4.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:42ace5e940e0cf58c9de72f688d6829ddd815096d07927ee7e77df2648006365"}, - {file = "ijson-3.4.0-cp311-cp311-win32.whl", hash = "sha256:5be39a0df4cd3f02b304382ea8885391900ac62e95888af47525a287c50005e9"}, - {file = "ijson-3.4.0-cp311-cp311-win_amd64.whl", hash = "sha256:0b1be1781792291e70d2e177acf564ec672a7907ba74f313583bdf39fe81f9b7"}, - {file = "ijson-3.4.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:956b148f88259a80a9027ffbe2d91705fae0c004fbfba3e5a24028fbe72311a9"}, - {file = "ijson-3.4.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:06b89960f5c721106394c7fba5760b3f67c515b8eb7d80f612388f5eca2f4621"}, - {file = "ijson-3.4.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:9a0bb591cf250dd7e9dfab69d634745a7f3272d31cfe879f9156e0a081fd97ee"}, - {file = "ijson-3.4.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:72e92de999977f4c6b660ffcf2b8d59604ccd531edcbfde05b642baf283e0de8"}, - {file = "ijson-3.4.0-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9e9602157a5b869d44b6896e64f502c712a312fcde044c2e586fccb85d3e316e"}, - {file = "ijson-3.4.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b1e83660edb931a425b7ff662eb49db1f10d30ca6d4d350e5630edbed098bc01"}, - {file = "ijson-3.4.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:49bf8eac1c7b7913073865a859c215488461f7591b4fa6a33c14b51cb73659d0"}, - {file = "ijson-3.4.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:160b09273cb42019f1811469508b0a057d19f26434d44752bde6f281da6d3f32"}, - {file = "ijson-3.4.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:2019ff4e6f354aa00c76c8591bd450899111c61f2354ad55cc127e2ce2492c44"}, - {file = "ijson-3.4.0-cp312-cp312-win32.whl", hash = "sha256:931c007bf6bb8330705429989b2deed6838c22b63358a330bf362b6e458ba0bf"}, - {file = "ijson-3.4.0-cp312-cp312-win_amd64.whl", hash = "sha256:71523f2b64cb856a820223e94d23e88369f193017ecc789bb4de198cc9d349eb"}, - {file = "ijson-3.4.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:e8d96f88d75196a61c9d9443de2b72c2d4a7ba9456ff117b57ae3bba23a54256"}, - {file = "ijson-3.4.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:c45906ce2c1d3b62f15645476fc3a6ca279549127f01662a39ca5ed334a00cf9"}, - {file = "ijson-3.4.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:4ab4bc2119b35c4363ea49f29563612237cae9413d2fbe54b223be098b97bc9e"}, - {file = "ijson-3.4.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:97b0a9b5a15e61dfb1f14921ea4e0dba39f3a650df6d8f444ddbc2b19b479ff1"}, - {file = "ijson-3.4.0-cp313-cp313-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e3047bb994dabedf11de11076ed1147a307924b6e5e2df6784fb2599c4ad8c60"}, - {file = "ijson-3.4.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:68c83161b052e9f5dc8191acbc862bb1e63f8a35344cb5cd0db1afd3afd487a6"}, - {file = "ijson-3.4.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:1eebd9b6c20eb1dffde0ae1f0fbb4aeacec2eb7b89adb5c7c0449fc9fd742760"}, - {file = "ijson-3.4.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:13fb6d5c35192c541421f3ee81239d91fc15a8d8f26c869250f941f4b346a86c"}, - {file = "ijson-3.4.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:28b7196ff7b37c4897c547a28fa4876919696739fc91c1f347651c9736877c69"}, - {file = "ijson-3.4.0-cp313-cp313-win32.whl", hash = "sha256:3c2691d2da42629522140f77b99587d6f5010440d58d36616f33bc7bdc830cc3"}, - {file = "ijson-3.4.0-cp313-cp313-win_amd64.whl", hash = "sha256:c4554718c275a044c47eb3874f78f2c939f300215d9031e785a6711cc51b83fc"}, - {file = "ijson-3.4.0-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:915a65e3f3c0eee2ea937bc62aaedb6c14cc1e8f0bb9f3f4fb5a9e2bbfa4b480"}, - {file = "ijson-3.4.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:afbe9748707684b6c5adc295c4fdcf27765b300aec4d484e14a13dca4e5c0afa"}, - {file = "ijson-3.4.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:d823f8f321b4d8d5fa020d0a84f089fec5d52b7c0762430476d9f8bf95bbc1a9"}, - {file = "ijson-3.4.0-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b8a0a2c54f3becf76881188beefd98b484b1d3bd005769a740d5b433b089fa23"}, - {file = "ijson-3.4.0-cp313-cp313t-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ced19a83ab09afa16257a0b15bc1aa888dbc555cb754be09d375c7f8d41051f2"}, - {file = "ijson-3.4.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8100f9885eff1f38d35cef80ef759a1bbf5fc946349afa681bd7d0e681b7f1a0"}, - {file = "ijson-3.4.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:d7bcc3f7f21b0f703031ecd15209b1284ea51b2a329d66074b5261de3916c1eb"}, - {file = "ijson-3.4.0-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:2dcb190227b09dd171bdcbfe4720fddd574933c66314818dfb3960c8a6246a77"}, - {file = "ijson-3.4.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:eda4cfb1d49c6073a901735aaa62e39cb7ab47f3ad7bb184862562f776f1fa8a"}, - {file = "ijson-3.4.0-cp313-cp313t-win32.whl", hash = "sha256:0772638efa1f3b72b51736833404f1cbd2f5beeb9c1a3d392e7d385b9160cba7"}, - {file = "ijson-3.4.0-cp313-cp313t-win_amd64.whl", hash = "sha256:3d8a0d67f36e4fb97c61a724456ef0791504b16ce6f74917a31c2e92309bbeb9"}, - {file = "ijson-3.4.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:8a990401dc7350c1739f42187823e68d2ef6964b55040c6e9f3a29461f9929e2"}, - {file = "ijson-3.4.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:80f50e0f5da4cd6b65e2d8ff38cb61b26559608a05dd3a3f9cfa6f19848e6f22"}, - {file = "ijson-3.4.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:2d9ca52f5650d820a2e7aa672dea1c560f609e165337e5b3ed7cf56d696bf309"}, - {file = "ijson-3.4.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:940c8c5fd20fb89b56dde9194a4f1c7b779149f1ab26af6d8dc1da51a95d26dd"}, - {file = "ijson-3.4.0-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:41dbb525666017ad856ac9b4f0f4b87d3e56b7dfde680d5f6d123556b22e2172"}, - {file = "ijson-3.4.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a9f84f5e2eea5c2d271c97221c382db005534294d1175ddd046a12369617c41c"}, - {file = "ijson-3.4.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:c0cd126c11835839bba8ac0baaba568f67d701fc4f717791cf37b10b74a2ebd7"}, - {file = "ijson-3.4.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:f9a9d3bbc6d91c24a2524a189d2aca703cb5f7e8eb34ad0aff3c91702404a983"}, - {file = "ijson-3.4.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:56679ee133470d0f1f598a8ad109d760fcfebeef4819531e29335aefb7e4cb1a"}, - {file = "ijson-3.4.0-cp39-cp39-win32.whl", hash = "sha256:583c15ded42ba80104fa1d0fa0dfdd89bb47922f3bb893a931bb843aeb55a3f3"}, - {file = "ijson-3.4.0-cp39-cp39-win_amd64.whl", hash = "sha256:4563e603e56f4451572d96b47311dffef5b933d825f3417881d4d3630c6edac2"}, - {file = "ijson-3.4.0-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:54e989c35dba9cf163d532c14bcf0c260897d5f465643f0cd1fba9c908bed7ef"}, - {file = "ijson-3.4.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:494eeb8e87afef22fbb969a4cb81ac2c535f30406f334fb6136e9117b0bb5380"}, - {file = "ijson-3.4.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:81603de95de1688958af65cd2294881a4790edae7de540b70c65c8253c5dc44a"}, - {file = "ijson-3.4.0-pp310-pypy310_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8524be12c1773e1be466034cc49c1ecbe3d5b47bb86217bd2a57f73f970a6c19"}, - {file = "ijson-3.4.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:17994696ec895d05e0cfa21b11c68c920c82634b4a3d8b8a1455d6fe9fdee8f7"}, - {file = "ijson-3.4.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:0b67727aaee55d43b2e82b6a866c3cbcb2b66a5e9894212190cbd8773d0d9857"}, - {file = "ijson-3.4.0-pp311-pypy311_pp73-macosx_10_15_x86_64.whl", hash = "sha256:cdc8c5ca0eec789ed99db29c68012dda05027af0860bb360afd28d825238d69d"}, - {file = "ijson-3.4.0-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:8e6b44b6ec45d5b1a0ee9d97e0e65ab7f62258727004cbbe202bf5f198bc21f7"}, - {file = "ijson-3.4.0-pp311-pypy311_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b51e239e4cb537929796e840d349fc731fdc0d58b1a0683ce5465ad725321e0f"}, - {file = "ijson-3.4.0-pp311-pypy311_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ed05d43ec02be8ddb1ab59579761f6656b25d241a77fd74f4f0f7ec09074318a"}, - {file = "ijson-3.4.0-pp311-pypy311_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cfeca1aaa59d93fd0a3718cbe5f7ef0effff85cf837e0bceb71831a47f39cc14"}, - {file = "ijson-3.4.0-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:7ca72ca12e9a1dd4252c97d952be34282907f263f7e28fcdff3a01b83981e837"}, - {file = "ijson-3.4.0-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:0f79b2cd52bd220fff83b3ee4ef89b54fd897f57cc8564a6d8ab7ac669de3930"}, - {file = "ijson-3.4.0-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:d16eed737610ad5ad8989b5864fbe09c64133129734e840c29085bb0d497fb03"}, - {file = "ijson-3.4.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6b3aac1d7a27e1e3bdec5bd0689afe55c34aa499baa06a80852eda31f1ffa6dc"}, - {file = "ijson-3.4.0-pp39-pypy39_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:784ae654aa9851851e87f323e9429b20b58a5399f83e6a7e348e080f2892081f"}, - {file = "ijson-3.4.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5d05bd8fa6a8adefb32bbf7b993d2a2f4507db08453dd1a444c281413a6d9685"}, - {file = "ijson-3.4.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:b5a05fd935cc28786b88c16976313086cd96414c6a3eb0a3822c47ab48b1793e"}, - {file = "ijson-3.4.0.tar.gz", hash = "sha256:5f74dcbad9d592c428d3ca3957f7115a42689ee7ee941458860900236ae9bb13"}, + {file = "ijson-3.4.0.post0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:8f904a405b58a04b6ef0425f1babbc5c65feb66b0a4cc7f214d4ad7de106f77d"}, + {file = "ijson-3.4.0.post0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:a07dcc1a8a1ddd76131a7c7528cbd12951c2e34eb3c3d63697b905069a2d65b1"}, + {file = "ijson-3.4.0.post0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:ab3be841b8c430c1883b8c0775eb551f21b5500c102c7ee828afa35ddd701bdd"}, + {file = "ijson-3.4.0.post0-cp310-cp310-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:43059ae0d657b11c5ddb11d149bc400c44f9e514fb8663057e9b2ea4d8d44c1f"}, + {file = "ijson-3.4.0.post0-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0d3e82963096579d1385c06b2559570d7191e225664b7fa049617da838e1a4a4"}, + {file = "ijson-3.4.0.post0-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:461ce4e87a21a261b60c0a68a2ad17c7dd214f0b90a0bec7e559a66b6ae3bd7e"}, + {file = "ijson-3.4.0.post0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:890cf6610c9554efcb9765a93e368efeb5bb6135f59ce0828d92eaefff07fde5"}, + {file = "ijson-3.4.0.post0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:6793c29a5728e7751a7df01be58ba7da9b9690c12bf79d32094c70a908fa02b9"}, + {file = "ijson-3.4.0.post0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:a56b6674d7feec0401c91f86c376f4e3d8ff8129128a8ad21ca43ec0b1242f79"}, + {file = "ijson-3.4.0.post0-cp310-cp310-win32.whl", hash = "sha256:01767fcbd75a5fa5a626069787b41f04681216b798510d5f63bcf66884386368"}, + {file = "ijson-3.4.0.post0-cp310-cp310-win_amd64.whl", hash = "sha256:09127c06e5dec753feb9e4b8c5f6a23603d1cd672d098159a17e53a73b898eec"}, + {file = "ijson-3.4.0.post0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:0b473112e72c0c506da425da3278367b6680f340ecc093084693a1e819d28435"}, + {file = "ijson-3.4.0.post0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:043f9b7cf9cc744263a78175e769947733710d2412d25180df44b1086b23ebd5"}, + {file = "ijson-3.4.0.post0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:b55e49045f4c8031f3673f56662fd828dc9e8d65bd3b03a9420dda0d370e64ba"}, + {file = "ijson-3.4.0.post0-cp311-cp311-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:11f13b73194ea2a5a8b4a2863f25b0b4624311f10db3a75747b510c4958179b0"}, + {file = "ijson-3.4.0.post0-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:659acb2843433e080c271ecedf7d19c71adde1ee5274fc7faa2fec0a793f9f1c"}, + {file = "ijson-3.4.0.post0-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:deda4cfcaafa72ca3fa845350045b1d0fef9364ec9f413241bb46988afbe6ee6"}, + {file = "ijson-3.4.0.post0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:47352563e8c594360bacee2e0753e97025f0861234722d02faace62b1b6d2b2a"}, + {file = "ijson-3.4.0.post0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:5a48b9486242d1295abe7fd0fbb6308867da5ca3f69b55c77922a93c2b6847aa"}, + {file = "ijson-3.4.0.post0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:9c0886234d1fae15cf4581a430bdba03d79251c1ab3b07e30aa31b13ef28d01c"}, + {file = "ijson-3.4.0.post0-cp311-cp311-win32.whl", hash = "sha256:fecae19b5187d92900c73debb3a979b0b3290a53f85df1f8f3c5ba7d1e9fb9cb"}, + {file = "ijson-3.4.0.post0-cp311-cp311-win_amd64.whl", hash = "sha256:b39dbf87071f23a23c8077eea2ae7cfeeca9ff9ffec722dfc8b5f352e4dd729c"}, + {file = "ijson-3.4.0.post0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:b607a500fca26101be47d2baf7cddb457b819ab60a75ce51ed1092a40da8b2f9"}, + {file = "ijson-3.4.0.post0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:4827d9874a6a81625412c59f7ca979a84d01f7f6bfb3c6d4dc4c46d0382b14e0"}, + {file = "ijson-3.4.0.post0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:d4d4afec780881edb2a0d2dd40b1cdbe246e630022d5192f266172a0307986a7"}, + {file = "ijson-3.4.0.post0-cp312-cp312-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:432fb60ffb952926f9438e0539011e2dfcd108f8426ee826ccc6173308c3ff2c"}, + {file = "ijson-3.4.0.post0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:54a0e3e05d9a0c95ecba73d9579f146cf6d5c5874116c849dba2d39a5f30380e"}, + {file = "ijson-3.4.0.post0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:05807edc0bcbd222dc6ea32a2b897f0c81dc7f12c8580148bc82f6d7f5e7ec7b"}, + {file = "ijson-3.4.0.post0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:a5269af16f715855d9864937f9dd5c348ca1ac49cee6a2c7a1b7091c159e874f"}, + {file = "ijson-3.4.0.post0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:b200df83c901f5bfa416d069ac71077aa1608f854a4c50df1b84ced560e9c9ec"}, + {file = "ijson-3.4.0.post0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:6458bd8e679cdff459a0a5e555b107c3bbacb1f382da3fe0f40e392871eb518d"}, + {file = "ijson-3.4.0.post0-cp312-cp312-win32.whl", hash = "sha256:55f7f656b5986326c978cbb3a9eea9e33f3ef6ecc4535b38f1d452c731da39ab"}, + {file = "ijson-3.4.0.post0-cp312-cp312-win_amd64.whl", hash = "sha256:e15833dcf6f6d188fdc624a31cd0520c3ba21b6855dc304bc7c1a8aeca02d4ac"}, + {file = "ijson-3.4.0.post0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:114ed248166ac06377e87a245a158d6b98019d2bdd3bb93995718e0bd996154f"}, + {file = "ijson-3.4.0.post0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:ffb21203736b08fe27cb30df6a4f802fafb9ef7646c5ff7ef79569b63ea76c57"}, + {file = "ijson-3.4.0.post0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:07f20ecd748602ac7f18c617637e53bd73ded7f3b22260bba3abe401a7fc284e"}, + {file = "ijson-3.4.0.post0-cp313-cp313-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:27aa193d47ffc6bc4e45453896ad98fb089a367e8283b973f1fe5c0198b60b4e"}, + {file = "ijson-3.4.0.post0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ccddb2894eb7af162ba43b9475ac5825d15d568832f82eb8783036e5d2aebd42"}, + {file = "ijson-3.4.0.post0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:61ab0b8c5bf707201dc67e02c116f4b6545c4afd7feb2264b989d242d9c4348a"}, + {file = "ijson-3.4.0.post0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:254cfb8c124af68327a0e7a49b50bbdacafd87c4690a3d62c96eb01020a685ef"}, + {file = "ijson-3.4.0.post0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:04ac9ca54db20f82aeda6379b5f4f6112fdb150d09ebce04affeab98a17b4ed3"}, + {file = "ijson-3.4.0.post0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:a603d7474bf35e7b3a8e49c8dabfc4751841931301adff3f3318171c4e407f32"}, + {file = "ijson-3.4.0.post0-cp313-cp313-win32.whl", hash = "sha256:ec5bb1520cb212ebead7dba048bb9b70552c3440584f83b01b0abc96862e2a09"}, + {file = "ijson-3.4.0.post0-cp313-cp313-win_amd64.whl", hash = "sha256:3505dff18bdeb8b171eb28af6df34857e2be80dc01e2e3b624e77215ad58897f"}, + {file = "ijson-3.4.0.post0-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:45a0b1c833ed2620eaf8da958f06ac8351c59e5e470e078400d23814670ed708"}, + {file = "ijson-3.4.0.post0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:7809ec8c8f40228edaaa089f33e811dff4c5b8509702652870d3f286c9682e27"}, + {file = "ijson-3.4.0.post0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:cf4a34c2cfe852aee75c89c05b0a4531c49dc0be27eeed221afd6fbf9c3e149c"}, + {file = "ijson-3.4.0.post0-cp313-cp313t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:a39d5d36067604b26b78de70b8951c90e9272450642661fe531a8f7a6936a7fa"}, + {file = "ijson-3.4.0.post0-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:83fc738d81c9ea686b452996110b8a6678296c481e0546857db24785bff8da92"}, + {file = "ijson-3.4.0.post0-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:b2a81aee91633868f5b40280e2523f7c5392e920a5082f47c5e991e516b483f6"}, + {file = "ijson-3.4.0.post0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:56169e298c5a2e7196aaa55da78ddc2415876a74fe6304f81b1eb0d3273346f7"}, + {file = "ijson-3.4.0.post0-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:eeb9540f0b1a575cbb5968166706946458f98c16e7accc6f2fe71efa29864241"}, + {file = "ijson-3.4.0.post0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:ba3478ff0bb49d7ba88783f491a99b6e3fa929c930ab062d2bb7837e6a38fe88"}, + {file = "ijson-3.4.0.post0-cp313-cp313t-win32.whl", hash = "sha256:b005ce84e82f28b00bf777a464833465dfe3efa43a0a26c77b5ac40723e1a728"}, + {file = "ijson-3.4.0.post0-cp313-cp313t-win_amd64.whl", hash = "sha256:fe9c84c9b1c8798afa407be1cea1603401d99bfc7c34497e19f4f5e5ddc9b441"}, + {file = "ijson-3.4.0.post0-cp314-cp314-macosx_10_13_universal2.whl", hash = "sha256:da6a21b88cbf5ecbc53371283988d22c9643aa71ae2873bbeaefd2dea3b6160b"}, + {file = "ijson-3.4.0.post0-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:cf24a48a1c3ca9d44a04feb59ccefeb9aa52bb49b9cb70ad30518c25cce74bb7"}, + {file = "ijson-3.4.0.post0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:d14427d366f95f21adcb97d0ed1f6d30f6fdc04d0aa1e4de839152c50c2b8d65"}, + {file = "ijson-3.4.0.post0-cp314-cp314-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:339d49f6c5d24051c85d9226be96d2d56e633cb8b7d09dd8099de8d8b51a97e2"}, + {file = "ijson-3.4.0.post0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:7206afcb396aaef66c2b066997b4e9d9042c4b7d777f4d994e9cec6d322c2fe6"}, + {file = "ijson-3.4.0.post0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c8dd327da225887194fe8b93f2b3c9c256353e14a6b9eefc940ed17fde38f5b8"}, + {file = "ijson-3.4.0.post0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:4810546e66128af51fd4a0c9a640e84e8508e9c15c4f247d8a3e3253b20e1465"}, + {file = "ijson-3.4.0.post0-cp314-cp314-musllinux_1_2_i686.whl", hash = "sha256:103a0838061297d063bca81d724b0958b616f372bd893bbc278320152252c652"}, + {file = "ijson-3.4.0.post0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:40007c977e230e04118b27322f25a72ae342a3d61464b2057fcd9b21eeb7427a"}, + {file = "ijson-3.4.0.post0-cp314-cp314-win32.whl", hash = "sha256:f932969fc1fd4449ca141cf5f47ff357656a154a361f28d9ebca0badc5b02297"}, + {file = "ijson-3.4.0.post0-cp314-cp314-win_amd64.whl", hash = "sha256:3ed19b1e4349240773a8ce4a4bfa450892d4a57949c02c515cd6be5a46b7696a"}, + {file = "ijson-3.4.0.post0-cp314-cp314t-macosx_10_13_universal2.whl", hash = "sha256:226447e40ca9340a39ed07d68ea02ee14b52cb4fe649425b256c1f0073531c83"}, + {file = "ijson-3.4.0.post0-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:2c88f0669d45d4b1aa017c9b68d378e7cd15d188dfb6f0209adc78b7f45590a7"}, + {file = "ijson-3.4.0.post0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:56b3089dc28c12492d92cc4896d2be585a89ecae34e25d08c1df88f21815cb50"}, + {file = "ijson-3.4.0.post0-cp314-cp314t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:c117321cfa7b749cc1213f9b4c80dc958f0a206df98ec038ae4bcbbdb8463a15"}, + {file = "ijson-3.4.0.post0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:8311f48db6a33116db5c81682f08b6e2405501a4b4e460193ae69fec3cd1f87a"}, + {file = "ijson-3.4.0.post0-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:91c61a3e63e04da648737e6b4abd537df1b46fb8cdf3219b072e790bb3c1a46b"}, + {file = "ijson-3.4.0.post0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:1709171023ce82651b2f132575c2e6282e47f64ad67bd3260da476418d0e7895"}, + {file = "ijson-3.4.0.post0-cp314-cp314t-musllinux_1_2_i686.whl", hash = "sha256:5f0a72b1e3c0f78551670c12b2fdc1bf05f2796254d9c2055ba319bec2216020"}, + {file = "ijson-3.4.0.post0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:b982a3597b0439ce9c8f4cfc929d86c6ed43907908be1e8463a34dc35fe5b258"}, + {file = "ijson-3.4.0.post0-cp314-cp314t-win32.whl", hash = "sha256:4e39bfdc36b0b460ef15a06550a6a385c64c81f7ac205ccff39bd45147918912"}, + {file = "ijson-3.4.0.post0-cp314-cp314t-win_amd64.whl", hash = "sha256:17e45262a5ddef39894013fb1548ee7094e444c8389eb1a97f86708b19bea03e"}, + {file = "ijson-3.4.0.post0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:35eb2760a42fd9461358b4be131287587b49ff504fc37fa3014dca6c27c343f4"}, + {file = "ijson-3.4.0.post0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:f82ca7abfb3ef3cf2194c71dad634572bcccd62a5dd466649f78fe73d492c860"}, + {file = "ijson-3.4.0.post0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:97f5ef3d839fc24b0ad47e8b31b4751ae72c5d83606e3ee4c92bb25965c03a4f"}, + {file = "ijson-3.4.0.post0-cp39-cp39-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:a2c873742e9f7e21378516217d81d6fa11d34bae860ed364832c00ab1dbf37ed"}, + {file = "ijson-3.4.0.post0-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:2f8b9ffa2c2dfe3289da9aec4e5ab52684fa2b2da2c853c7891b360ec46fba07"}, + {file = "ijson-3.4.0.post0-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0634b21188c67e5cf471cc1d30d193d19f521d89e2125ab1fb602aa8ae61e050"}, + {file = "ijson-3.4.0.post0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:3752dd6f51ef58a71799de745649deff293e959700f1b7f5b1989618da366f24"}, + {file = "ijson-3.4.0.post0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:57db77f4ea3eca09f519f627d9f9c76eb862b30edef5d899f031feeed94f05a1"}, + {file = "ijson-3.4.0.post0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:435270a4b75667305f6df3226e5224e83cd6906022d7fdcc9df05caae725f796"}, + {file = "ijson-3.4.0.post0-cp39-cp39-win32.whl", hash = "sha256:742c211b004ab51ccad2b301525d8a6eb2cf68a5fb82d78836f3a351eec44d4e"}, + {file = "ijson-3.4.0.post0-cp39-cp39-win_amd64.whl", hash = "sha256:35aaa979da875fa92bea5dc5969b1541b4912b165091761785459a43f0c20946"}, + {file = "ijson-3.4.0.post0-pp311-pypy311_pp73-macosx_10_15_x86_64.whl", hash = "sha256:add9242f886eae844a7410b84aee2bbb8bdc83c624f227cb1fdb2d0476a96cb1"}, + {file = "ijson-3.4.0.post0-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:69718ed41710dfcaa7564b0af42abc05875d4f7aaa24627c808867ef32634bc7"}, + {file = "ijson-3.4.0.post0-pp311-pypy311_pp73-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:636b6eca96c6c43c04629c6b37fad0181662eaacf9877c71c698485637f752f9"}, + {file = "ijson-3.4.0.post0-pp311-pypy311_pp73-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:eb5e73028f6e63d27b3d286069fe350ed80a4ccc493b022b590fea4bb086710d"}, + {file = "ijson-3.4.0.post0-pp311-pypy311_pp73-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:461acf4320219459dabe5ed90a45cb86c9ba8cc6d6db9dad0d9427d42f57794c"}, + {file = "ijson-3.4.0.post0-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:a0fedf09c0f6ffa2a99e7e7fd9c5f3caf74e655c1ee015a0797383e99382ebc3"}, + {file = "ijson-3.4.0.post0.tar.gz", hash = "sha256:9aa02dc70bb245670a6ca7fba737b992aeeb4895360980622f7e568dbf23e41e"}, ] [[package]] From 40893be93c2c7e566134c2df99ad311aeb5a5cdb Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 24 Oct 2025 13:24:16 +0100 Subject: [PATCH 078/149] Bump idna from 3.10 to 3.11 (#19053) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/poetry.lock b/poetry.lock index 780d6528dd..175039a69c 100644 --- a/poetry.lock +++ b/poetry.lock @@ -673,14 +673,14 @@ test = ["coverage[toml]", "pretend", "pytest", "pytest-cov"] [[package]] name = "idna" -version = "3.10" +version = "3.11" description = "Internationalized Domain Names in Applications (IDNA)" optional = false -python-versions = ">=3.6" +python-versions = ">=3.8" groups = ["main", "dev"] files = [ - {file = "idna-3.10-py3-none-any.whl", hash = "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3"}, - {file = "idna-3.10.tar.gz", hash = "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9"}, + {file = "idna-3.11-py3-none-any.whl", hash = "sha256:771a87f49d9defaf64091e6e6fe9c18d4833f140bd19464795bc32d966ca37ea"}, + {file = "idna-3.11.tar.gz", hash = "sha256:795dafcc9c04ed0c1fb032c2aa73654d8e8c5023a7df64a53f39190ada629902"}, ] [package.extras] From 9d81bb703c2d07d006fb7fdf975f585e72dca642 Mon Sep 17 00:00:00 2001 From: Andrew Ferrazzutti Date: Fri, 24 Oct 2025 14:21:49 -0400 Subject: [PATCH 079/149] Always treat `RETURNING` as supported by SQL engines (#19047) Can do this now that SQLite 3.35.0 added support for `RETURNING`. > The RETURNING syntax has been supported by SQLite since version 3.35.0 (2021-03-12). > > *-- https://sqlite.org/lang_returning.html* This also bumps the minimum supported SQLite version according to Synapse's [deprecation policy](https://element-hq.github.io/synapse/latest/deprecation_policy.html#platform-dependencies). Fix https://github.com/element-hq/synapse/issues/17577 --- .ci/scripts/calculate_jobs.py | 14 ++-- .github/workflows/latest_deps.yml | 4 +- .github/workflows/twisted_trunk.yml | 4 +- changelog.d/19047.doc | 1 + changelog.d/19047.misc | 1 + changelog.d/19047.removal | 1 + docs/deprecation_policy.md | 2 +- docs/development/contributing_guide.md | 2 +- synapse/storage/database.py | 39 +++-------- .../storage/databases/main/delayed_events.py | 35 ++++------ .../databases/main/event_federation.py | 68 +++++-------------- .../storage/databases/main/registration.py | 32 ++------- .../storage/databases/main/user_directory.py | 43 ++++-------- synapse/storage/engines/_base.py | 6 -- synapse/storage/engines/postgres.py | 5 -- synapse/storage/engines/sqlite.py | 9 +-- 16 files changed, 80 insertions(+), 186 deletions(-) create mode 100644 changelog.d/19047.doc create mode 100644 changelog.d/19047.misc create mode 100644 changelog.d/19047.removal diff --git a/.ci/scripts/calculate_jobs.py b/.ci/scripts/calculate_jobs.py index 5249acdc5d..f3b1bb1503 100755 --- a/.ci/scripts/calculate_jobs.py +++ b/.ci/scripts/calculate_jobs.py @@ -99,24 +99,24 @@ set_output("trial_test_matrix", test_matrix) # First calculate the various sytest jobs. # -# For each type of test we only run on bullseye on PRs +# For each type of test we only run on bookworm on PRs sytest_tests = [ { - "sytest-tag": "bullseye", + "sytest-tag": "bookworm", }, { - "sytest-tag": "bullseye", + "sytest-tag": "bookworm", "postgres": "postgres", }, { - "sytest-tag": "bullseye", + "sytest-tag": "bookworm", "postgres": "multi-postgres", "workers": "workers", }, { - "sytest-tag": "bullseye", + "sytest-tag": "bookworm", "postgres": "multi-postgres", "workers": "workers", "reactor": "asyncio", @@ -127,11 +127,11 @@ if not IS_PR: sytest_tests.extend( [ { - "sytest-tag": "bullseye", + "sytest-tag": "bookworm", "reactor": "asyncio", }, { - "sytest-tag": "bullseye", + "sytest-tag": "bookworm", "postgres": "postgres", "reactor": "asyncio", }, diff --git a/.github/workflows/latest_deps.yml b/.github/workflows/latest_deps.yml index a00c52fcb2..526546531a 100644 --- a/.github/workflows/latest_deps.yml +++ b/.github/workflows/latest_deps.yml @@ -139,9 +139,9 @@ jobs: fail-fast: false matrix: include: - - sytest-tag: bullseye + - sytest-tag: bookworm - - sytest-tag: bullseye + - sytest-tag: bookworm postgres: postgres workers: workers redis: redis diff --git a/.github/workflows/twisted_trunk.yml b/.github/workflows/twisted_trunk.yml index 04a8db2cc7..3f14219bbc 100644 --- a/.github/workflows/twisted_trunk.yml +++ b/.github/workflows/twisted_trunk.yml @@ -108,11 +108,11 @@ jobs: if: needs.check_repo.outputs.should_run_workflow == 'true' runs-on: ubuntu-latest container: - # We're using debian:bullseye because it uses Python 3.9 which is our minimum supported Python version. + # We're using bookworm because that's what Debian oldstable is at the time of writing. # This job is a canary to warn us about unreleased twisted changes that would cause problems for us if # they were to be released immediately. For simplicity's sake (and to save CI runners) we use the oldest # version, assuming that any incompatibilities on newer versions would also be present on the oldest. - image: matrixdotorg/sytest-synapse:bullseye + image: matrixdotorg/sytest-synapse:bookworm volumes: - ${{ github.workspace }}:/src diff --git a/changelog.d/19047.doc b/changelog.d/19047.doc new file mode 100644 index 0000000000..fee241f2a5 --- /dev/null +++ b/changelog.d/19047.doc @@ -0,0 +1 @@ +Update the link to the Debian oldstable package for SQLite. diff --git a/changelog.d/19047.misc b/changelog.d/19047.misc new file mode 100644 index 0000000000..47f686a158 --- /dev/null +++ b/changelog.d/19047.misc @@ -0,0 +1 @@ +Always treat `RETURNING` as supported by SQL engines, now that the minimum-supported versions of both SQLite and PostgreSQL support it. diff --git a/changelog.d/19047.removal b/changelog.d/19047.removal new file mode 100644 index 0000000000..da7a161868 --- /dev/null +++ b/changelog.d/19047.removal @@ -0,0 +1 @@ +Remove support for SQLite < 3.37.2. diff --git a/docs/deprecation_policy.md b/docs/deprecation_policy.md index 2f3a09723e..06c724d348 100644 --- a/docs/deprecation_policy.md +++ b/docs/deprecation_policy.md @@ -21,7 +21,7 @@ people building from source should ensure they can fetch recent versions of Rust (e.g. by using [rustup](https://rustup.rs/)). The oldest supported version of SQLite is the version -[provided](https://packages.debian.org/bullseye/libsqlite3-0) by +[provided](https://packages.debian.org/oldstable/libsqlite3-0) by [Debian oldstable](https://wiki.debian.org/DebianOldStable). diff --git a/docs/development/contributing_guide.md b/docs/development/contributing_guide.md index eb6f04e301..41fff1d6a3 100644 --- a/docs/development/contributing_guide.md +++ b/docs/development/contributing_guide.md @@ -320,7 +320,7 @@ The following command will let you run the integration test with the most common configuration: ```sh -$ docker run --rm -it -v /path/where/you/have/cloned/the/repository\:/src:ro -v /path/to/where/you/want/logs\:/logs matrixdotorg/sytest-synapse:bullseye +$ docker run --rm -it -v /path/where/you/have/cloned/the/repository\:/src:ro -v /path/to/where/you/want/logs\:/logs matrixdotorg/sytest-synapse:bookworm ``` (Note that the paths must be full paths! You could also write `$(realpath relative/path)` if needed.) diff --git a/synapse/storage/database.py b/synapse/storage/database.py index 764ca9f229..b7f870bd26 100644 --- a/synapse/storage/database.py +++ b/synapse/storage/database.py @@ -1161,36 +1161,17 @@ class DatabasePool: SQLite versions that don't support it). """ - if txn.database_engine.supports_returning: - sql = "INSERT INTO %s (%s) VALUES(%s) RETURNING %s" % ( - table, - ", ".join(k for k in values.keys()), - ", ".join("?" for _ in values.keys()), - ", ".join(k for k in returning), - ) + sql = "INSERT INTO %s (%s) VALUES(%s) RETURNING %s" % ( + table, + ", ".join(k for k in values.keys()), + ", ".join("?" for _ in values.keys()), + ", ".join(k for k in returning), + ) - txn.execute(sql, list(values.values())) - row = txn.fetchone() - assert row is not None - return row - else: - # For old versions of SQLite we do a standard insert and then can - # use `last_insert_rowid` to get at the row we just inserted - DatabasePool.simple_insert_txn( - txn, - table=table, - values=values, - ) - txn.execute("SELECT last_insert_rowid()") - row = txn.fetchone() - assert row is not None - (rowid,) = row - - row = DatabasePool.simple_select_one_txn( - txn, table=table, keyvalues={"rowid": rowid}, retcols=returning - ) - assert row is not None - return row + txn.execute(sql, list(values.values())) + row = txn.fetchone() + assert row is not None + return row async def simple_insert_many( self, diff --git a/synapse/storage/databases/main/delayed_events.py b/synapse/storage/databases/main/delayed_events.py index 33101327f5..6ad161db33 100644 --- a/synapse/storage/databases/main/delayed_events.py +++ b/synapse/storage/databases/main/delayed_events.py @@ -347,33 +347,28 @@ class DelayedEventsStore(SQLBaseStore): EventDetails, Optional[Timestamp], ]: - sql_cols = ", ".join( - ( - "room_id", - "event_type", - "state_key", - "origin_server_ts", - "content", - "device_id", - ) - ) - sql_update = "UPDATE delayed_events SET is_processed = TRUE" - sql_where = "WHERE delay_id = ? AND user_localpart = ? AND NOT is_processed" - sql_args = (delay_id, user_localpart) txn.execute( + """ + UPDATE delayed_events + SET is_processed = TRUE + WHERE delay_id = ? AND user_localpart = ? + AND NOT is_processed + RETURNING + room_id, + event_type, + state_key, + origin_server_ts, + content, + device_id + """, ( - f"{sql_update} {sql_where} RETURNING {sql_cols}" - if self.database_engine.supports_returning - else f"SELECT {sql_cols} FROM delayed_events {sql_where}" + delay_id, + user_localpart, ), - sql_args, ) row = txn.fetchone() if row is None: raise NotFoundError("Delayed event not found") - elif not self.database_engine.supports_returning: - txn.execute(f"{sql_update} {sql_where}", sql_args) - assert txn.rowcount == 1 event = EventDetails( RoomID.from_string(row[0]), diff --git a/synapse/storage/databases/main/event_federation.py b/synapse/storage/databases/main/event_federation.py index d889e8eceb..0a8571f0c8 100644 --- a/synapse/storage/databases/main/event_federation.py +++ b/synapse/storage/databases/main/event_federation.py @@ -2040,61 +2040,29 @@ class EventFederationWorkerStore( Returns: The received_ts of the row that was deleted, if any. """ - if self.db_pool.engine.supports_returning: - def _remove_received_event_from_staging_txn( - txn: LoggingTransaction, - ) -> Optional[int]: - sql = """ - DELETE FROM federation_inbound_events_staging - WHERE origin = ? AND event_id = ? - RETURNING received_ts - """ + def _remove_received_event_from_staging_txn( + txn: LoggingTransaction, + ) -> Optional[int]: + sql = """ + DELETE FROM federation_inbound_events_staging + WHERE origin = ? AND event_id = ? + RETURNING received_ts + """ - txn.execute(sql, (origin, event_id)) - row = cast(Optional[tuple[int]], txn.fetchone()) + txn.execute(sql, (origin, event_id)) + row = cast(Optional[tuple[int]], txn.fetchone()) - if row is None: - return None + if row is None: + return None - return row[0] + return row[0] - return await self.db_pool.runInteraction( - "remove_received_event_from_staging", - _remove_received_event_from_staging_txn, - db_autocommit=True, - ) - - else: - - def _remove_received_event_from_staging_txn( - txn: LoggingTransaction, - ) -> Optional[int]: - received_ts = self.db_pool.simple_select_one_onecol_txn( - txn, - table="federation_inbound_events_staging", - keyvalues={ - "origin": origin, - "event_id": event_id, - }, - retcol="received_ts", - allow_none=True, - ) - self.db_pool.simple_delete_txn( - txn, - table="federation_inbound_events_staging", - keyvalues={ - "origin": origin, - "event_id": event_id, - }, - ) - - return received_ts - - return await self.db_pool.runInteraction( - "remove_received_event_from_staging", - _remove_received_event_from_staging_txn, - ) + return await self.db_pool.runInteraction( + "remove_received_event_from_staging", + _remove_received_event_from_staging_txn, + db_autocommit=True, + ) async def get_next_staged_event_id_for_room( self, diff --git a/synapse/storage/databases/main/registration.py b/synapse/storage/databases/main/registration.py index 7ce9bf43e6..bad2d0b63a 100644 --- a/synapse/storage/databases/main/registration.py +++ b/synapse/storage/databases/main/registration.py @@ -2544,31 +2544,13 @@ class RegistrationWorkerStore(StatsStore, CacheInvalidationWorkerStore): ) args.append(user_id) - if self.database_engine.supports_returning: - sql = f""" - DELETE FROM access_tokens - WHERE {clause} AND user_id = ? - RETURNING token, id, device_id - """ - txn.execute(sql, args) - tokens_and_devices = txn.fetchall() - else: - tokens_and_devices = self.db_pool.simple_select_many_txn( - txn, - table="access_tokens", - column="device_id", - iterable=batch_device_ids, - keyvalues={"user_id": user_id}, - retcols=("token", "id", "device_id"), - ) - - self.db_pool.simple_delete_many_txn( - txn, - table="access_tokens", - keyvalues={"user_id": user_id}, - column="device_id", - values=batch_device_ids, - ) + sql = f""" + DELETE FROM access_tokens + WHERE {clause} AND user_id = ? + RETURNING token, id, device_id + """ + txn.execute(sql, args) + tokens_and_devices = txn.fetchall() self._invalidate_cache_and_stream_bulk( txn, diff --git a/synapse/storage/databases/main/user_directory.py b/synapse/storage/databases/main/user_directory.py index 895d7e6148..7a57beee71 100644 --- a/synapse/storage/databases/main/user_directory.py +++ b/synapse/storage/databases/main/user_directory.py @@ -353,27 +353,19 @@ class UserDirectoryBackgroundUpdateStore(StateDeltasStore): def _populate_user_directory_process_users_txn( txn: LoggingTransaction, ) -> Optional[int]: - if self.database_engine.supports_returning: - # Note: we use an ORDER BY in the SELECT to force usage of an - # index. Otherwise, postgres does a sequential scan that is - # surprisingly slow (I think due to the fact it will read/skip - # over lots of already deleted rows). - sql = f""" - DELETE FROM {TEMP_TABLE + "_users"} - WHERE user_id IN ( - SELECT user_id FROM {TEMP_TABLE + "_users"} ORDER BY user_id LIMIT ? - ) - RETURNING user_id - """ - txn.execute(sql, (batch_size,)) - user_result = cast(list[tuple[str]], txn.fetchall()) - else: - sql = "SELECT user_id FROM %s ORDER BY user_id LIMIT %s" % ( - TEMP_TABLE + "_users", - str(batch_size), + # Note: we use an ORDER BY in the SELECT to force usage of an + # index. Otherwise, postgres does a sequential scan that is + # surprisingly slow (I think due to the fact it will read/skip + # over lots of already deleted rows). + sql = f""" + DELETE FROM {TEMP_TABLE + "_users"} + WHERE user_id IN ( + SELECT user_id FROM {TEMP_TABLE + "_users"} ORDER BY user_id LIMIT ? ) - txn.execute(sql) - user_result = cast(list[tuple[str]], txn.fetchall()) + RETURNING user_id + """ + txn.execute(sql, (batch_size,)) + user_result = cast(list[tuple[str]], txn.fetchall()) if not user_result: return None @@ -432,17 +424,6 @@ class UserDirectoryBackgroundUpdateStore(StateDeltasStore): # Actually insert the users with their profiles into the directory. self._update_profiles_in_user_dir_txn(txn, profiles_to_insert) - # We've finished processing the users. Delete it from the table, if - # we haven't already. - if not self.database_engine.supports_returning: - self.db_pool.simple_delete_many_txn( - txn, - table=TEMP_TABLE + "_users", - column="user_id", - values=users_to_work_on, - keyvalues={}, - ) - # Update the remaining counter. progress["remaining"] -= len(users_to_work_on) self.db_pool.updates._background_update_progress_txn( diff --git a/synapse/storage/engines/_base.py b/synapse/storage/engines/_base.py index 9fec42c2e0..be6981f77c 100644 --- a/synapse/storage/engines/_base.py +++ b/synapse/storage/engines/_base.py @@ -63,12 +63,6 @@ class BaseDatabaseEngine(Generic[ConnectionType, CursorType], metaclass=abc.ABCM """ ... - @property - @abc.abstractmethod - def supports_returning(self) -> bool: - """Do we support the `RETURNING` clause in insert/update/delete?""" - ... - @abc.abstractmethod def check_database( self, db_conn: ConnectionType, allow_outdated_version: bool = False diff --git a/synapse/storage/engines/postgres.py b/synapse/storage/engines/postgres.py index 8a1bbfa0f5..b059b924c2 100644 --- a/synapse/storage/engines/postgres.py +++ b/synapse/storage/engines/postgres.py @@ -193,11 +193,6 @@ class PostgresEngine( """Do we support using `a = ANY(?)` and passing a list""" return True - @property - def supports_returning(self) -> bool: - """Do we support the `RETURNING` clause in insert/update/delete?""" - return True - def is_deadlock(self, error: Exception) -> bool: if isinstance(error, psycopg2.DatabaseError): # https://www.postgresql.org/docs/current/static/errcodes-appendix.html diff --git a/synapse/storage/engines/sqlite.py b/synapse/storage/engines/sqlite.py index ac3dc25bb5..b49d230eed 100644 --- a/synapse/storage/engines/sqlite.py +++ b/synapse/storage/engines/sqlite.py @@ -68,11 +68,6 @@ class Sqlite3Engine(BaseDatabaseEngine[sqlite3.Connection, sqlite3.Cursor]): """Do we support using `a = ANY(?)` and passing a list""" return False - @property - def supports_returning(self) -> bool: - """Do we support the `RETURNING` clause in insert/update/delete?""" - return sqlite3.sqlite_version_info >= (3, 35, 0) - def check_database( self, db_conn: sqlite3.Connection, allow_outdated_version: bool = False ) -> None: @@ -80,8 +75,8 @@ class Sqlite3Engine(BaseDatabaseEngine[sqlite3.Connection, sqlite3.Cursor]): # Synapse is untested against older SQLite versions, and we don't want # to let users upgrade to a version of Synapse with broken support for their # sqlite version, because it risks leaving them with a half-upgraded db. - if sqlite3.sqlite_version_info < (3, 27, 0): - raise RuntimeError("Synapse requires sqlite 3.27 or above.") + if sqlite3.sqlite_version_info < (3, 37, 2): + raise RuntimeError("Synapse requires sqlite 3.37.2 or above.") def check_new_database(self, txn: Cursor) -> None: """Gets called when setting up a brand new database. This allows us to From f1695ac20efdbf24543c1ce7b2795264d612b7fb Mon Sep 17 00:00:00 2001 From: Shay Date: Fri, 24 Oct 2025 13:32:16 -0700 Subject: [PATCH 080/149] Add an admin API to get the space hierarchy (#19021) It is often useful when investigating a space to get information about that space and it's children. This PR adds an Admin API to return information about a space and it's children, regardless of room membership. Will not fetch information over federation about remote rooms that the server is not participating in. --- changelog.d/19021.feature | 2 + docs/admin_api/rooms.md | 73 ++++++++ synapse/handlers/room_summary.py | 71 ++++++-- synapse/rest/admin/__init__.py | 2 + synapse/rest/admin/rooms.py | 44 +++++ tests/rest/admin/test_room.py | 304 ++++++++++++++++++++++++++++++- 6 files changed, 475 insertions(+), 21 deletions(-) create mode 100644 changelog.d/19021.feature diff --git a/changelog.d/19021.feature b/changelog.d/19021.feature new file mode 100644 index 0000000000..dea4748769 --- /dev/null +++ b/changelog.d/19021.feature @@ -0,0 +1,2 @@ +Add an [Admin API](https://element-hq.github.io/synapse/latest/usage/administration/admin_api/index.html) +to allow an admin to fetch the space/room hierarchy for a given space. \ No newline at end of file diff --git a/docs/admin_api/rooms.md b/docs/admin_api/rooms.md index 12af87148d..11e787c236 100644 --- a/docs/admin_api/rooms.md +++ b/docs/admin_api/rooms.md @@ -1115,3 +1115,76 @@ Example response: ] } ``` + +# Admin Space Hierarchy Endpoint + +This API allows an admin to fetch the space/room hierarchy for a given space, +returning details about that room and any children the room may have, paginating +over the space tree in a depth-first manner to locate child rooms. This is +functionally similar to the [CS Hierarchy](https://spec.matrix.org/v1.16/client-server-api/#get_matrixclientv1roomsroomidhierarchy) endpoint but does not check for +room membership when returning room summaries. + +The endpoint does not query other servers over federation about remote rooms +that the server has not joined. This is a deliberate trade-off: while this +means it will leave some holes in the hierarchy that we could otherwise +sometimes fill in, it significantly improves the endpoint's response time and +the admin endpoint is designed for managing rooms local to the homeserver +anyway. + +**Parameters** + +The following query parameters are available: + +* `from` - An optional pagination token, provided when there are more rooms to + return than the limit. +* `limit` - Maximum amount of rooms to return. Must be a non-negative integer, + defaults to `50`. +* `max_depth` - The maximum depth in the tree to explore, must be a non-negative + integer. 0 would correspond to just the root room, 1 would include just the + root room's children, etc. If not provided will recurse into the space tree without limit. + +Request: + +```http +GET /_synapse/admin/v1/rooms//hierarchy +``` + +Response: + +```json +{ + "rooms": + [ + { "children_state": [ + { + "content": { + "via": ["local_test_server"] + }, + "origin_server_ts": 1500, + "sender": "@user:test", + "state_key": "!QrMkkqBSwYRIFNFCso:test", + "type": "m.space.child" + } + ], + "name": "space room", + "guest_can_join": false, + "join_rule": "public", + "num_joined_members": 1, + "room_id": "!sPOpNyMHbZAoAOsOFL:test", + "room_type": "m.space", + "world_readable": false + }, + + { + "children_state": [], + "guest_can_join": true, + "join_rule": "invite", + "name": "nefarious", + "num_joined_members": 1, + "room_id": "!QrMkkqBSwYRIFNFCso:test", + "topic": "being bad", + "world_readable": false} + ], + "next_batch": "KUYmRbeSpAoaAIgOKGgyaCEn" +} +``` diff --git a/synapse/handlers/room_summary.py b/synapse/handlers/room_summary.py index a948202056..a3247d3cda 100644 --- a/synapse/handlers/room_summary.py +++ b/synapse/handlers/room_summary.py @@ -116,6 +116,8 @@ class RoomSummaryHandler: str, str, bool, + bool, + bool, Optional[int], Optional[int], Optional[str], @@ -133,6 +135,8 @@ class RoomSummaryHandler: requester: Requester, requested_room_id: str, suggested_only: bool = False, + omit_remote_room_hierarchy: bool = False, + admin_skip_room_visibility_check: bool = False, max_depth: Optional[int] = None, limit: Optional[int] = None, from_token: Optional[str] = None, @@ -146,6 +150,11 @@ class RoomSummaryHandler: requested_room_id: The room ID to start the hierarchy at (the "root" room). suggested_only: Whether we should only return children with the "suggested" flag set. + omit_remote_room_hierarchy: Whether to skip reaching out over + federation to get information on rooms which the server + is not currently joined to + admin_skip_room_visibility_check: Whether to skip checking if the room can + be accessed by the requester, used for the admin endpoints. max_depth: The maximum depth in the tree to explore, must be a non-negative integer. @@ -173,6 +182,8 @@ class RoomSummaryHandler: requester.user.to_string(), requested_room_id, suggested_only, + omit_remote_room_hierarchy, + admin_skip_room_visibility_check, max_depth, limit, from_token, @@ -182,6 +193,8 @@ class RoomSummaryHandler: requester.user.to_string(), requested_room_id, suggested_only, + omit_remote_room_hierarchy, + admin_skip_room_visibility_check, max_depth, limit, from_token, @@ -193,6 +206,8 @@ class RoomSummaryHandler: requester: str, requested_room_id: str, suggested_only: bool = False, + omit_remote_room_hierarchy: bool = False, + admin_skip_room_visibility_check: bool = False, max_depth: Optional[int] = None, limit: Optional[int] = None, from_token: Optional[str] = None, @@ -204,17 +219,18 @@ class RoomSummaryHandler: local_room = await self._store.is_host_joined( requested_room_id, self._server_name ) - if local_room and not await self._is_local_room_accessible( - requested_room_id, requester - ): - raise UnstableSpecAuthError( - 403, - "User %s not in room %s, and room previews are disabled" - % (requester, requested_room_id), - errcode=Codes.NOT_JOINED, - ) + if not admin_skip_room_visibility_check: + if local_room and not await self._is_local_room_accessible( + requested_room_id, requester + ): + raise UnstableSpecAuthError( + 403, + "User %s not in room %s, and room previews are disabled" + % (requester, requested_room_id), + errcode=Codes.NOT_JOINED, + ) - if not local_room: + if not local_room and not omit_remote_room_hierarchy: room_hierarchy = await self._summarize_remote_room_hierarchy( _RoomQueueEntry(requested_room_id, remote_room_hosts or ()), False, @@ -223,12 +239,13 @@ class RoomSummaryHandler: if not root_room_entry or not await self._is_remote_room_accessible( requester, requested_room_id, root_room_entry.room ): - raise UnstableSpecAuthError( - 403, - "User %s not in room %s, and room previews are disabled" - % (requester, requested_room_id), - errcode=Codes.NOT_JOINED, - ) + if not admin_skip_room_visibility_check: + raise UnstableSpecAuthError( + 403, + "User %s not in room %s, and room previews are disabled" + % (requester, requested_room_id), + errcode=Codes.NOT_JOINED, + ) # If this is continuing a previous session, pull the persisted data. if from_token: @@ -240,13 +257,18 @@ class RoomSummaryHandler: except StoreError: raise SynapseError(400, "Unknown pagination token", Codes.INVALID_PARAM) - # If the requester, room ID, suggested-only, or max depth were modified - # the session is invalid. + # If the requester, room ID, suggested-only, max depth, + # omit_remote_room_hierarchy, or admin_skip_room_visibility_check + # were modified the session is invalid. if ( requester != pagination_session["requester"] or requested_room_id != pagination_session["room_id"] or suggested_only != pagination_session["suggested_only"] or max_depth != pagination_session["max_depth"] + or omit_remote_room_hierarchy + != pagination_session["omit_remote_room_hierarchy"] + or admin_skip_room_visibility_check + != pagination_session["admin_skip_room_visibility_check"] ): raise SynapseError(400, "Unknown pagination token", Codes.INVALID_PARAM) @@ -301,6 +323,7 @@ class RoomSummaryHandler: None, room_id, suggested_only, + admin_skip_room_visibility_check=admin_skip_room_visibility_check, ) # Otherwise, attempt to use information for federation. @@ -321,7 +344,7 @@ class RoomSummaryHandler: # If the above isn't true, attempt to fetch the room # information over federation. - else: + elif not omit_remote_room_hierarchy: ( room_entry, children_room_entries, @@ -378,6 +401,8 @@ class RoomSummaryHandler: "room_id": requested_room_id, "suggested_only": suggested_only, "max_depth": max_depth, + "omit_remote_room_hierarchy": omit_remote_room_hierarchy, + "admin_skip_room_visibility_check": admin_skip_room_visibility_check, # The stored state. "room_queue": [ attr.astuple(room_entry) for room_entry in room_queue @@ -460,6 +485,7 @@ class RoomSummaryHandler: room_id: str, suggested_only: bool, include_children: bool = True, + admin_skip_room_visibility_check: bool = False, ) -> Optional["_RoomEntry"]: """ Generate a room entry and a list of event entries for a given room. @@ -476,11 +502,16 @@ class RoomSummaryHandler: Otherwise, all children are returned. include_children: Whether to include the events of any children. + admin_skip_room_visibility_check: Whether to skip checking if the room + can be accessed by the requester, used for the admin endpoints. Returns: A room entry if the room should be returned. None, otherwise. """ - if not await self._is_local_room_accessible(room_id, requester, origin): + if ( + not admin_skip_room_visibility_check + and not await self._is_local_room_accessible(room_id, requester, origin) + ): return None room_entry = await self._build_room_entry(room_id, for_federation=bool(origin)) diff --git a/synapse/rest/admin/__init__.py b/synapse/rest/admin/__init__.py index 5e75dc4c00..bcaba85da3 100644 --- a/synapse/rest/admin/__init__.py +++ b/synapse/rest/admin/__init__.py @@ -74,6 +74,7 @@ from synapse.rest.admin.registration_tokens import ( RegistrationTokenRestServlet, ) from synapse.rest.admin.rooms import ( + AdminRoomHierarchy, BlockRoomRestServlet, DeleteRoomStatusByDeleteIdRestServlet, DeleteRoomStatusByRoomIdRestServlet, @@ -342,6 +343,7 @@ def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None: ExperimentalFeaturesRestServlet(hs).register(http_server) SuspendAccountRestServlet(hs).register(http_server) ScheduledTasksRestServlet(hs).register(http_server) + AdminRoomHierarchy(hs).register(http_server) EventRestServlet(hs).register(http_server) diff --git a/synapse/rest/admin/rooms.py b/synapse/rest/admin/rooms.py index 216af29f9b..e1bfca3c03 100644 --- a/synapse/rest/admin/rooms.py +++ b/synapse/rest/admin/rooms.py @@ -63,6 +63,50 @@ if TYPE_CHECKING: logger = logging.getLogger(__name__) +class AdminRoomHierarchy(RestServlet): + """ + Given a room, returns room details on that room and any space children of + the provided room. Does not reach out over federation to fetch information about + any remote rooms which the server is not currently participating in + """ + + PATTERNS = admin_patterns("/rooms/(?P[^/]*)/hierarchy$") + + def __init__(self, hs: "HomeServer"): + self._auth = hs.get_auth() + self._room_summary_handler = hs.get_room_summary_handler() + self._store = hs.get_datastores().main + self._storage_controllers = hs.get_storage_controllers() + + async def on_GET( + self, request: SynapseRequest, room_id: str + ) -> tuple[int, JsonDict]: + requester = await self._auth.get_user_by_req(request) + await assert_user_is_admin(self._auth, requester) + + max_depth = parse_integer(request, "max_depth") + limit = parse_integer(request, "limit") + + room_entry_summary = await self._room_summary_handler.get_room_hierarchy( + requester, + room_id, + # We omit details about remote rooms because we only care + # about managing rooms local to the homeserver. This + # also immensely helps with the response time of the + # endpoint since we don't need to reach out over federation. + # There is a trade-off as this will leave holes where + # information about public/peekable remote rooms the + # server is not participating in will be omitted. + omit_remote_room_hierarchy=True, + admin_skip_room_visibility_check=True, + max_depth=max_depth, + limit=limit, + from_token=parse_string(request, "from"), + ) + + return HTTPStatus.OK, room_entry_summary + + class RoomRestV2Servlet(RestServlet): """Delete a room from server asynchronously with a background task. diff --git a/tests/rest/admin/test_room.py b/tests/rest/admin/test_room.py index 6bd21630db..5b95262365 100644 --- a/tests/rest/admin/test_room.py +++ b/tests/rest/admin/test_room.py @@ -31,7 +31,7 @@ from twisted.internet.task import deferLater from twisted.internet.testing import MemoryReactor import synapse.rest.admin -from synapse.api.constants import EventTypes, Membership, RoomTypes +from synapse.api.constants import EventContentFields, EventTypes, Membership, RoomTypes from synapse.api.errors import Codes from synapse.api.room_versions import RoomVersions from synapse.handlers.pagination import ( @@ -56,6 +56,308 @@ from tests import unittest ONE_HOUR_IN_S = 3600 +class AdminHierarchyTestCase(unittest.HomeserverTestCase): + servlets = [ + synapse.rest.admin.register_servlets, + login.register_servlets, + room.register_servlets, + ] + + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: + # create some users + self.admin_user = self.register_user("admin", "pass", admin=True) + self.admin_user_tok = self.login("admin", "pass") + + self.other_user = self.register_user("user", "pass") + self.other_user_tok = self.login("user", "pass") + + self.third_user = self.register_user("third_user", "pass") + self.third_user_tok = self.login("third_user", "pass") + + # mock out the function which pulls room information in over federation. + self._room_summary_handler = hs.get_room_summary_handler() + self._room_summary_handler._summarize_remote_room_hierarchy = Mock() # type: ignore[method-assign] + + # create some rooms with different options + self.room_id1 = self.helper.create_room_as( + self.other_user, + is_public=False, + tok=self.other_user_tok, + extra_content={"name": "nefarious", "topic": "being bad"}, + ) + + self.room_id2 = self.helper.create_room_as( + self.third_user, + tok=self.third_user_tok, + extra_content={"name": "also nefarious"}, + ) + + self.room_id3 = self.helper.create_room_as( + self.admin_user, + is_public=False, + tok=self.admin_user_tok, + extra_content={ + "name": "not nefarious", + "topic": "happy things", + "creation_content": { + "additional_creators": [self.other_user, self.third_user] + }, + }, + room_version="12", + ) + + self.not_in_space_room_id = self.helper.create_room_as( + self.other_user, + tok=self.other_user_tok, + extra_content={"name": "not related to other rooms"}, + ) + + # create a space room + self.space_room_id = self.helper.create_room_as( + self.other_user, + is_public=True, + extra_content={ + "visibility": "public", + "creation_content": {EventContentFields.ROOM_TYPE: RoomTypes.SPACE}, + "name": "space_room", + }, + tok=self.other_user_tok, + ) + + # and an unjoined remote room + self.remote_room_id = "!remote_room" + + self.room_id_to_human_name_map = { + self.room_id1: "room1", + self.room_id2: "room2", + self.room_id3: "room3", + self.not_in_space_room_id: "room4", + self.space_room_id: "space_room", + self.remote_room_id: "remote_room", + } + + # add three of the rooms to space + for state_key in [self.room_id1, self.room_id2, self.room_id3]: + self.helper.send_state( + self.space_room_id, + EventTypes.SpaceChild, + body={"via": ["local_test_server"]}, + tok=self.other_user_tok, + state_key=state_key, + ) + + # and add remote room to space - ideally we'd add an actual remote + # space with rooms in it but the test framework doesn't currently + # support that. Instead we add a room which the server would have to + # reach out over federation to get details about and assert that the + # federation call was not made + self.helper.send_state( + self.space_room_id, + EventTypes.SpaceChild, + body={"via": ["remote_test_server"]}, + tok=self.other_user_tok, + state_key=self.remote_room_id, + ) + + def test_no_auth(self) -> None: + """ + If the requester does not provide authentication, a 401 is returned + """ + + channel = self.make_request( + "GET", + f"/_synapse/admin/v1/rooms/{self.space_room_id}/hierarchy", + ) + + self.assertEqual(401, channel.code, msg=channel.json_body) + self.assertEqual(Codes.MISSING_TOKEN, channel.json_body["errcode"]) + + def test_requester_is_no_admin(self) -> None: + """ + If the requester is not a server admin, an error 403 is returned. + """ + + channel = self.make_request( + "GET", + f"/_synapse/admin/v1/rooms/{self.space_room_id}/hierarchy", + access_token=self.other_user_tok, + ) + + self.assertEqual(403, channel.code, msg=channel.json_body) + self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"]) + + def test_bad_request(self) -> None: + """ + Test that invalid param values raise an error + """ + channel = self.make_request( + "GET", + f"/_synapse/admin/v1/rooms/{self.space_room_id}/hierarchy?limit=ten", + access_token=self.admin_user_tok, + ) + self.assertEqual(400, channel.code, msg=channel.json_body) + self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"]) + + channel = self.make_request( + "GET", + f"/_synapse/admin/v1/rooms/{self.space_room_id}/hierarchy?max_depth=four", + access_token=self.admin_user_tok, + ) + self.assertEqual(400, channel.code, msg=channel.json_body) + self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"]) + + def test_room_summary(self) -> None: + """ + Test that details of room and details of children of room are + provided correctly + """ + + channel = self.make_request( + "GET", + f"/_synapse/admin/v1/rooms/{self.space_room_id}/hierarchy", + access_token=self.admin_user_tok, + ) + self.assertEqual(channel.code, 200, msg=channel.json_body) + rooms = channel.json_body["rooms"] + self.assertCountEqual( + { + self.room_id_to_human_name_map.get( + room["room_id"], f"Unknown room: {room['room_id']}" + ) + for room in rooms + }, + {"space_room", "room1", "room2", "room3"}, + ) + + for room_result in rooms: + room_id = room_result["room_id"] + if room_id == self.room_id1: + self.assertEqual(room_result["name"], "nefarious") + self.assertEqual(room_result["topic"], "being bad") + self.assertEqual(room_result["join_rule"], "invite") + self.assertEqual(len(room_result["children_state"]), 0) + self.assertEqual(room_result["world_readable"], False) + self.assertEqual(room_result["guest_can_join"], True) + self.assertEqual(room_result["num_joined_members"], 1) + elif room_id == self.room_id2: + self.assertEqual(room_result["name"], "also nefarious") + self.assertEqual(room_result["join_rule"], "public") + self.assertEqual(len(room_result["children_state"]), 0) + self.assertEqual(room_result["world_readable"], False) + self.assertEqual(room_result["guest_can_join"], False) + self.assertEqual(room_result["num_joined_members"], 1) + elif room_id == self.room_id3: + self.assertEqual(room_result["name"], "not nefarious") + self.assertEqual(room_result["join_rule"], "invite") + self.assertEqual(room_result["topic"], "happy things") + self.assertEqual(len(room_result["children_state"]), 0) + self.assertEqual(room_result["world_readable"], False) + self.assertEqual(room_result["guest_can_join"], True) + self.assertEqual(room_result["num_joined_members"], 1) + elif room_id == self.not_in_space_room_id: + self.fail("this room should not have been returned") + elif room_id == self.space_room_id: + self.assertEqual(room_result["join_rule"], "public") + self.assertEqual(len(room_result["children_state"]), 4) + self.assertEqual(room_result["room_type"], "m.space") + self.assertEqual(room_result["world_readable"], False) + self.assertEqual(room_result["guest_can_join"], False) + self.assertEqual(room_result["num_joined_members"], 1) + self.assertEqual(room_result["name"], "space_room") + else: + self.fail("unknown room returned") + + # Assert that a federation function to look up details about + # this room has not been called. We never expect the admin + # hierarchy endpoint to reach out over federation. + self._room_summary_handler._summarize_remote_room_hierarchy.assert_not_called() # type: ignore[attr-defined] + + def test_room_summary_pagination(self) -> None: + """ + Test that details of room and details of children of room are provided + correctly when paginating + """ + + channel = self.make_request( + "GET", + f"/_synapse/admin/v1/rooms/{self.space_room_id}/hierarchy?limit=2", + access_token=self.admin_user_tok, + ) + self.assertEqual(channel.code, 200, msg=channel.json_body) + rooms = channel.json_body["rooms"] + self.assertCountEqual( + { + self.room_id_to_human_name_map.get( + room["room_id"], f"Unknown room: {room['room_id']}" + ) + for room in rooms + }, + {"space_room", "room1"}, + ) + next_batch = channel.json_body["next_batch"] + + channel2 = self.make_request( + "GET", + f"/_synapse/admin/v1/rooms/{self.space_room_id}/hierarchy?from={next_batch}", + access_token=self.admin_user_tok, + ) + self.assertEqual(channel2.code, 200, msg=channel2.json_body) + new_rooms = channel2.json_body["rooms"] + self.assertCountEqual( + { + self.room_id_to_human_name_map.get( + room["room_id"], f"Unknown room: {room['room_id']}" + ) + for room in new_rooms + }, + {"room2", "room3"}, + ) + + rooms_to_check = rooms + new_rooms + for room_result in rooms_to_check: + room_id = room_result["room_id"] + if room_id == self.room_id1: + self.assertEqual(room_result["name"], "nefarious") + self.assertEqual(room_result["topic"], "being bad") + self.assertEqual(room_result["join_rule"], "invite") + self.assertEqual(len(room_result["children_state"]), 0) + self.assertEqual(room_result["world_readable"], False) + self.assertEqual(room_result["guest_can_join"], True) + self.assertEqual(room_result["num_joined_members"], 1) + elif room_id == self.room_id2: + self.assertEqual(room_result["name"], "also nefarious") + self.assertEqual(room_result["join_rule"], "public") + self.assertEqual(len(room_result["children_state"]), 0) + self.assertEqual(room_result["world_readable"], False) + self.assertEqual(room_result["guest_can_join"], False) + self.assertEqual(room_result["num_joined_members"], 1) + elif room_id == self.room_id3: + self.assertEqual(room_result["name"], "not nefarious") + self.assertEqual(room_result["join_rule"], "invite") + self.assertEqual(room_result["topic"], "happy things") + self.assertEqual(len(room_result["children_state"]), 0) + self.assertEqual(room_result["world_readable"], False) + self.assertEqual(room_result["guest_can_join"], True) + self.assertEqual(room_result["num_joined_members"], 1) + elif room_id == self.not_in_space_room_id: + self.fail("this room should not have been returned") + elif room_id == self.space_room_id: + self.assertEqual(room_result["join_rule"], "public") + self.assertEqual(len(room_result["children_state"]), 4) + self.assertEqual(room_result["room_type"], "m.space") + self.assertEqual(room_result["world_readable"], False) + self.assertEqual(room_result["guest_can_join"], False) + self.assertEqual(room_result["num_joined_members"], 1) + self.assertEqual(room_result["name"], "space_room") + else: + self.fail("unknown room returned") + + # Assert that a federation function to look up details about + # this room has not been called. We never expect the admin + # hierarchy endpoint to reach out over federation. + self._room_summary_handler._summarize_remote_room_hierarchy.assert_not_called() # type: ignore[attr-defined] + + class DeleteRoomTestCase(unittest.HomeserverTestCase): servlets = [ synapse.rest.admin.register_servlets, From f6ef9c129a64b9679f70b9f400041bd03ed98b74 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 27 Oct 2025 16:58:05 +0000 Subject: [PATCH 081/149] Bump stefanzweifel/git-auto-commit-action from 6.0.1 to 7.0.0 (#19052) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/fix_lint.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/fix_lint.yaml b/.github/workflows/fix_lint.yaml index 87f46078be..c33481a51e 100644 --- a/.github/workflows/fix_lint.yaml +++ b/.github/workflows/fix_lint.yaml @@ -47,6 +47,6 @@ jobs: - run: cargo fmt continue-on-error: true - - uses: stefanzweifel/git-auto-commit-action@778341af668090896ca464160c2def5d1d1a3eb0 # v6.0.1 + - uses: stefanzweifel/git-auto-commit-action@28e16e81777b558cc906c8750092100bbb34c5e3 # v7.0.0 with: commit_message: "Attempt to fix linting" From 634f7cf18b3bf359b5112355073d941b68f08ecc Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 27 Oct 2025 16:58:33 +0000 Subject: [PATCH 082/149] Bump types-psycopg2 from 2.9.21.20250915 to 2.9.21.20251012 (#19054) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/poetry.lock b/poetry.lock index 175039a69c..3ca3facd3d 100644 --- a/poetry.lock +++ b/poetry.lock @@ -3021,14 +3021,14 @@ files = [ [[package]] name = "types-psycopg2" -version = "2.9.21.20250915" +version = "2.9.21.20251012" description = "Typing stubs for psycopg2" optional = false python-versions = ">=3.9" groups = ["dev"] files = [ - {file = "types_psycopg2-2.9.21.20250915-py3-none-any.whl", hash = "sha256:eefe5ccdc693fc086146e84c9ba437bb278efe1ef330b299a0cb71169dc6c55f"}, - {file = "types_psycopg2-2.9.21.20250915.tar.gz", hash = "sha256:bfeb8f54c32490e7b5edc46215ab4163693192bc90407b4a023822de9239f5c8"}, + {file = "types_psycopg2-2.9.21.20251012-py3-none-any.whl", hash = "sha256:712bad5c423fe979e357edbf40a07ca40ef775d74043de72bd4544ca328cc57e"}, + {file = "types_psycopg2-2.9.21.20251012.tar.gz", hash = "sha256:4cdafd38927da0cfde49804f39ab85afd9c6e9c492800e42f1f0c1a1b0312935"}, ] [[package]] From db9a61c30f1066f87047eb60e435917c6478311f Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Tue, 28 Oct 2025 11:16:02 +0100 Subject: [PATCH 083/149] Fix `bcrypt` errors preventing users from being able to log in (#19101) --- changelog.d/19101.bugfix | 1 + docs/usage/configuration/config_documentation.md | 2 +- schema/synapse-config.schema.yaml | 5 +++-- synapse/_scripts/hash_password.py | 2 +- synapse/handlers/auth.py | 15 +++++++++++++-- 5 files changed, 19 insertions(+), 6 deletions(-) create mode 100644 changelog.d/19101.bugfix diff --git a/changelog.d/19101.bugfix b/changelog.d/19101.bugfix new file mode 100644 index 0000000000..9f481b4620 --- /dev/null +++ b/changelog.d/19101.bugfix @@ -0,0 +1 @@ +Fix users being unable to log in if their password, or the server's configured pepper, was too long. \ No newline at end of file diff --git a/docs/usage/configuration/config_documentation.md b/docs/usage/configuration/config_documentation.md index fec8d468a8..7509e4d715 100644 --- a/docs/usage/configuration/config_documentation.md +++ b/docs/usage/configuration/config_documentation.md @@ -3815,7 +3815,7 @@ This setting has the following sub-options: * `localdb_enabled` (boolean): Set to false to disable authentication against the local password database. This is ignored if `enabled` is false, and is only useful if you have other `password_providers`. Defaults to `true`. -* `pepper` (string|null): Set the value here to a secret random string for extra security. DO NOT CHANGE THIS AFTER INITIAL SETUP! Defaults to `null`. +* `pepper` (string|null): A secret random string that will be appended to user's passwords before they are hashed. This improves the security of short passwords. DO NOT CHANGE THIS AFTER INITIAL SETUP! Defaults to `null`. * `policy` (object): Define and enforce a password policy, such as minimum lengths for passwords, etc. This is an implementation of MSC2000. diff --git a/schema/synapse-config.schema.yaml b/schema/synapse-config.schema.yaml index 419a0ab91b..75a9a0aac5 100644 --- a/schema/synapse-config.schema.yaml +++ b/schema/synapse-config.schema.yaml @@ -4695,8 +4695,9 @@ properties: pepper: type: ["string", "null"] description: >- - Set the value here to a secret random string for extra security. DO - NOT CHANGE THIS AFTER INITIAL SETUP! + A secret random string that will be appended to user's passwords + before they are hashed. This improves the security of short passwords. + DO NOT CHANGE THIS AFTER INITIAL SETUP! default: null policy: type: object diff --git a/synapse/_scripts/hash_password.py b/synapse/_scripts/hash_password.py index 6a87303fc9..ae475b8490 100755 --- a/synapse/_scripts/hash_password.py +++ b/synapse/_scripts/hash_password.py @@ -77,7 +77,7 @@ def main() -> None: if len(bytes_to_hash) > 72: # bcrypt only looks at the first 72 bytes print( - f"Password is too long ({len(bytes_to_hash)} bytes); truncating to 72 bytes for bcrypt. " + f"Password + pepper is too long ({len(bytes_to_hash)} bytes); truncating to 72 bytes for bcrypt. " "This is expected behaviour and will not affect a user's ability to log in. 72 bytes is " "sufficient entropy for a password." ) diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index f4583e33c3..564d5f723b 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -1691,7 +1691,7 @@ class AuthHandler: # # Note: we explicitly DO NOT log the length of the user's password here. logger.debug( - "Password is too long; truncating to 72 bytes for bcrypt. " + "Password + pepper is too long; truncating to 72 bytes for bcrypt. " "This is expected behaviour and will not affect a user's ability to log in. 72 bytes is " "sufficient entropy for a password." ) @@ -1720,9 +1720,20 @@ class AuthHandler: def _do_validate_hash(checked_hash: bytes) -> bool: # Normalise the Unicode in the password pw = unicodedata.normalize("NFKC", password) + password_pepper = self.hs.config.auth.password_pepper + + bytes_to_hash = pw.encode("utf8") + password_pepper.encode("utf8") + if len(bytes_to_hash) > 72: + # bcrypt only looks at the first 72 bytes + logger.debug( + "Password + pepper is too long; truncating to 72 bytes for bcrypt. " + "This is expected behaviour and will not affect a user's ability to log in. 72 bytes is " + "sufficient entropy for a password." + ) + bytes_to_hash = bytes_to_hash[:72] return bcrypt.checkpw( - pw.encode("utf8") + self.hs.config.auth.password_pepper.encode("utf8"), + bytes_to_hash, checked_hash, ) From d8e6fcced78477e9958fbb36733ca3c9a5aff0dd Mon Sep 17 00:00:00 2001 From: Andrew Morgan Date: Tue, 28 Oct 2025 10:20:39 +0000 Subject: [PATCH 084/149] 1.141.0rc2 --- CHANGES.md | 9 +++++++++ changelog.d/19101.bugfix | 1 - debian/changelog | 6 ++++++ pyproject.toml | 2 +- 4 files changed, 16 insertions(+), 2 deletions(-) delete mode 100644 changelog.d/19101.bugfix diff --git a/CHANGES.md b/CHANGES.md index a26f012c64..0404245e6b 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,12 @@ +# Synapse 1.141.0rc2 (2025-10-28) + +## Bugfixes + +- Fix users being unable to log in if their password, or the server's configured pepper, was too long. ([\#19101](https://github.com/element-hq/synapse/issues/19101)) + + + + # Synapse 1.141.0rc1 (2025-10-21) ## Deprecation of MacOS Python wheels diff --git a/changelog.d/19101.bugfix b/changelog.d/19101.bugfix deleted file mode 100644 index 9f481b4620..0000000000 --- a/changelog.d/19101.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix users being unable to log in if their password, or the server's configured pepper, was too long. \ No newline at end of file diff --git a/debian/changelog b/debian/changelog index 0f61e38b1f..012d59aa93 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.141.0~rc2) stable; urgency=medium + + * New Synapse release 1.141.0rc2. + + -- Synapse Packaging team Tue, 28 Oct 2025 10:20:26 +0000 + matrix-synapse-py3 (1.141.0~rc1) stable; urgency=medium * New Synapse release 1.141.0rc1. diff --git a/pyproject.toml b/pyproject.toml index ee7016b1d0..affea7fc5e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -101,7 +101,7 @@ module-name = "synapse.synapse_rust" [tool.poetry] name = "matrix-synapse" -version = "1.141.0rc1" +version = "1.141.0rc2" description = "Homeserver for the Matrix decentralised comms protocol" authors = ["Matrix.org Team and Contributors "] license = "AGPL-3.0-or-later OR LicenseRef-Element-Commercial" From 7106f674700e7ee7e7520abda0d7985999bca0ff Mon Sep 17 00:00:00 2001 From: Andrew Morgan Date: Tue, 28 Oct 2025 10:22:23 +0000 Subject: [PATCH 085/149] Move MacOS wheel deprecation message to top of changelog --- CHANGES.md | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index 0404245e6b..8fa31fa8e8 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,14 +1,5 @@ # Synapse 1.141.0rc2 (2025-10-28) -## Bugfixes - -- Fix users being unable to log in if their password, or the server's configured pepper, was too long. ([\#19101](https://github.com/element-hq/synapse/issues/19101)) - - - - -# Synapse 1.141.0rc1 (2025-10-21) - ## Deprecation of MacOS Python wheels The team has decided to deprecate and eventually stop publishing python wheels @@ -21,6 +12,16 @@ do make use of these wheels downstream, please reach out to us in [#synapse-dev:matrix.org](https://matrix.to/#/#synapse-dev:matrix.org). We'd love to hear from you! + +## Bugfixes + +- Fix users being unable to log in if their password, or the server's configured pepper, was too long. ([\#19101](https://github.com/element-hq/synapse/issues/19101)) + + + + +# Synapse 1.141.0rc1 (2025-10-21) + ## Features - Allow using [MSC4190](https://github.com/matrix-org/matrix-spec-proposals/pull/4190) behavior without the opt-in registration flag. Contributed by @tulir @ Beeper. ([\#19031](https://github.com/element-hq/synapse/issues/19031)) From 77c6905805542ae894f58dca762f4bec8365515e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 28 Oct 2025 11:47:23 +0000 Subject: [PATCH 086/149] Bump regex from 1.11.3 to 1.12.2 (#19074) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 321f1c7933..0ac32101c7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1062,9 +1062,9 @@ dependencies = [ [[package]] name = "regex" -version = "1.11.3" +version = "1.12.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b5288124840bee7b386bc413c487869b360b2b4ec421ea56425128692f2a82c" +checksum = "843bc0191f75f3e22651ae5f1e72939ab2f72a4bc30fa80a066bd66edefc24d4" dependencies = [ "aho-corasick", "memchr", @@ -1074,9 +1074,9 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.4.11" +version = "0.4.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "833eb9ce86d40ef33cb1306d8accf7bc8ec2bfea4355cbdebb3df68b40925cad" +checksum = "5276caf25ac86c8d810222b3dbb938e512c55c6831a10f3e6ed1c93b84041f1c" dependencies = [ "aho-corasick", "memchr", From 5cfe873146156551720aaeedd68affe8cfc9a72d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 28 Oct 2025 11:48:44 +0000 Subject: [PATCH 087/149] Bump tokio from 1.47.1 to 1.48.0 (#19076) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 95 +++++++++++------------------------------------------- 1 file changed, 18 insertions(+), 77 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 0ac32101c7..a9cdf70ba0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2,21 +2,6 @@ # It is not intended for manual editing. version = 3 -[[package]] -name = "addr2line" -version = "0.24.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dfbe277e56a376000877090da837660b4427aad530e3028d44e0bffe4f89a1c1" -dependencies = [ - "gimli", -] - -[[package]] -name = "adler2" -version = "2.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "320119579fcad9c21884f5c4861d16174d0e06250625266f50fe6898340abefa" - [[package]] name = "aho-corasick" version = "1.1.3" @@ -50,21 +35,6 @@ version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c08606f8c3cbf4ce6ec8e28fb0014a2c086708fe954eaa885384a6165172e7e8" -[[package]] -name = "backtrace" -version = "0.3.75" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6806a6321ec58106fea15becdad98371e28d92ccbc7c8f1b3b6dd724fe8f1002" -dependencies = [ - "addr2line", - "cfg-if", - "libc", - "miniz_oxide", - "object", - "rustc-demangle", - "windows-targets", -] - [[package]] name = "base64" version = "0.22.1" @@ -341,12 +311,6 @@ dependencies = [ "wasm-bindgen", ] -[[package]] -name = "gimli" -version = "0.31.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07e28edb80900c19c28f1072f2e8aeca7fa06b23cd4169cefe1af5aa3260783f" - [[package]] name = "h2" version = "0.4.11" @@ -684,17 +648,6 @@ version = "2.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f4c7245a08504955605670dbf141fceab975f15ca21570696aebe9d2e71576bd" -[[package]] -name = "io-uring" -version = "0.7.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d93587f37623a1a17d94ef2bc9ada592f5465fe7732084ab7beefabe5c77c0c4" -dependencies = [ - "bitflags", - "cfg-if", - "libc", -] - [[package]] name = "ipnet" version = "2.11.0" @@ -784,15 +737,6 @@ version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a" -[[package]] -name = "miniz_oxide" -version = "0.8.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1fa76a2c86f704bdb222d66965fb3d63269ce38518b83cb0575fca855ebb6316" -dependencies = [ - "adler2", -] - [[package]] name = "mio" version = "1.0.4" @@ -804,15 +748,6 @@ dependencies = [ "windows-sys 0.59.0", ] -[[package]] -name = "object" -version = "0.36.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62948e14d923ea95ea2c7c86c71013138b66525b86bdc08d2dcc262bdb497b87" -dependencies = [ - "memchr", -] - [[package]] name = "once_cell" version = "1.21.3" @@ -1145,12 +1080,6 @@ dependencies = [ "windows-sys 0.52.0", ] -[[package]] -name = "rustc-demangle" -version = "0.1.26" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56f7d92ca342cea22a06f2121d944b4fd82af56988c270852495420f961d4ace" - [[package]] name = "rustc-hash" version = "2.1.1" @@ -1489,19 +1418,16 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.47.1" +version = "1.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89e49afdadebb872d3145a5638b59eb0691ea23e46ca484037cfab3b76b95038" +checksum = "ff360e02eab121e0bc37a2d3b4d4dc622e6eda3a8e5253d5435ecf5bd4c68408" dependencies = [ - "backtrace", "bytes", - "io-uring", "libc", "mio", "pin-project-lite", - "slab", "socket2 0.6.0", - "windows-sys 0.59.0", + "windows-sys 0.61.2", ] [[package]] @@ -1782,6 +1708,12 @@ dependencies = [ "wasm-bindgen", ] +[[package]] +name = "windows-link" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0805222e57f7521d6a62e36fa9163bc891acd422f971defe97d64e70d0a4fe5" + [[package]] name = "windows-sys" version = "0.52.0" @@ -1800,6 +1732,15 @@ dependencies = [ "windows-targets", ] +[[package]] +name = "windows-sys" +version = "0.61.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ae137229bcbd6cdf0f7b80a31df61766145077ddf49416a728b02cb3921ff3fc" +dependencies = [ + "windows-link", +] + [[package]] name = "windows-targets" version = "0.52.6" From 0d20f762cbd6671c4a9684575cd1145e3bc7e98b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 28 Oct 2025 11:49:26 +0000 Subject: [PATCH 088/149] Bump reqwest from 0.12.23 to 0.12.24 (#19077) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index a9cdf70ba0..35f62fe4e9 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1026,9 +1026,9 @@ checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c" [[package]] name = "reqwest" -version = "0.12.23" +version = "0.12.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d429f34c8092b2d42c7c93cec323bb4adeb7c67698f70839adec842ec10c7ceb" +checksum = "9d0946410b9f7b082a427e4ef5c8ff541a88b357bc6c637c40db3a68ac70a36f" dependencies = [ "base64", "bytes", From cb0ed5ec76bd393173b45f0c8ec4e91fee8d6b3c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 28 Oct 2025 11:53:36 +0000 Subject: [PATCH 089/149] Bump actions/download-artifact from 5.0.0 to 6.0.0 (#19102) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/docker.yml | 2 +- .github/workflows/release-artifacts.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index c88312f050..09832fae5c 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -95,7 +95,7 @@ jobs: - build steps: - name: Download digests - uses: actions/download-artifact@634f93cb2916e3fdff6788551b99b062d0335ce0 # v5.0.0 + uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0 with: path: ${{ runner.temp }}/digests pattern: digests-* diff --git a/.github/workflows/release-artifacts.yml b/.github/workflows/release-artifacts.yml index da6996742b..fc291ad771 100644 --- a/.github/workflows/release-artifacts.yml +++ b/.github/workflows/release-artifacts.yml @@ -191,7 +191,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Download all workflow run artifacts - uses: actions/download-artifact@634f93cb2916e3fdff6788551b99b062d0335ce0 # v5.0.0 + uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0 - name: Build a tarball for the debs # We need to merge all the debs uploads into one folder, then compress # that. From 66a42d4e54add85f11f63e116eb5702ca6c61778 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 28 Oct 2025 11:54:00 +0000 Subject: [PATCH 090/149] Bump hiredis from 3.2.1 to 3.3.0 (#19103) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 281 ++++++++++++++++++++++++++++------------------------ 1 file changed, 149 insertions(+), 132 deletions(-) diff --git a/poetry.lock b/poetry.lock index 3ca3facd3d..7b1b3b4cbc 100644 --- a/poetry.lock +++ b/poetry.lock @@ -39,7 +39,7 @@ description = "The ultimate Python library in building OAuth and OpenID Connect optional = true python-versions = ">=3.9" groups = ["main"] -markers = "extra == \"oidc\" or extra == \"jwt\" or extra == \"all\"" +markers = "extra == \"all\" or extra == \"jwt\" or extra == \"oidc\"" files = [ {file = "authlib-1.6.5-py2.py3-none-any.whl", hash = "sha256:3e0e0507807f842b02175507bdee8957a1d5707fd4afb17c32fb43fee90b6e3a"}, {file = "authlib-1.6.5.tar.gz", hash = "sha256:6aaf9c79b7cc96c900f0b284061691c5d4e61221640a948fe690b556a6d6d10b"}, @@ -447,7 +447,7 @@ description = "XML bomb protection for Python stdlib modules" optional = true python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" groups = ["main"] -markers = "extra == \"saml2\" or extra == \"all\"" +markers = "extra == \"all\" or extra == \"saml2\"" files = [ {file = "defusedxml-0.7.1-py2.py3-none-any.whl", hash = "sha256:a352e7e428770286cc899e2542b6cdaedb2b4953ff269a210103ec58f6198a61"}, {file = "defusedxml-0.7.1.tar.gz", hash = "sha256:1bb3032db185915b62d7c6209c5a8792be6a32ab2fedacc84e01b52c51aa3e69"}, @@ -472,7 +472,7 @@ description = "XPath 1.0/2.0/3.0/3.1 parsers and selectors for ElementTree and l optional = true python-versions = ">=3.7" groups = ["main"] -markers = "extra == \"saml2\" or extra == \"all\"" +markers = "extra == \"all\" or extra == \"saml2\"" files = [ {file = "elementpath-4.1.5-py3-none-any.whl", hash = "sha256:2ac1a2fb31eb22bbbf817f8cf6752f844513216263f0e3892c8e79782fe4bb55"}, {file = "elementpath-4.1.5.tar.gz", hash = "sha256:c2d6dc524b29ef751ecfc416b0627668119d8812441c555d7471da41d4bacb8d"}, @@ -518,122 +518,118 @@ test = ["coverage[toml]", "ddt (>=1.1.1,!=1.4.3)", "mock ; python_version < \"3. [[package]] name = "hiredis" -version = "3.2.1" +version = "3.3.0" description = "Python wrapper for hiredis" optional = true python-versions = ">=3.8" groups = ["main"] -markers = "extra == \"redis\" or extra == \"all\"" +markers = "extra == \"all\" or extra == \"redis\"" files = [ - {file = "hiredis-3.2.1-cp310-cp310-macosx_10_15_universal2.whl", hash = "sha256:add17efcbae46c5a6a13b244ff0b4a8fa079602ceb62290095c941b42e9d5dec"}, - {file = "hiredis-3.2.1-cp310-cp310-macosx_10_15_x86_64.whl", hash = "sha256:5fe955cc4f66c57df1ae8e5caf4de2925d43b5efab4e40859662311d1bcc5f54"}, - {file = "hiredis-3.2.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:f9ad63cd9065820a43fb1efb8ed5ae85bb78f03ef5eb53f6bde47914708f5718"}, - {file = "hiredis-3.2.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d8e7f9e5fdba08841d78d4e1450cae03a4dbed2eda8a4084673cafa5615ce24a"}, - {file = "hiredis-3.2.1-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1dce2508eca5d4e47ef38bc7c0724cb45abcdb0089f95a2ef49baf52882979a8"}, - {file = "hiredis-3.2.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:186428bf353e4819abae15aa2ad64c3f40499d596ede280fe328abb9e98e72ce"}, - {file = "hiredis-3.2.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:74f2500d90a0494843aba7abcdc3e77f859c502e0892112d708c02e1dcae8f90"}, - {file = "hiredis-3.2.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:32822a94d2fdd1da96c05b22fdeef6d145d8fdbd865ba2f273f45eb949e4a805"}, - {file = "hiredis-3.2.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:ead809fb08dd4fdb5b4b6e2999c834e78c3b0c450a07c3ed88983964432d0c64"}, - {file = "hiredis-3.2.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:b90fada20301c3a257e868dd6a4694febc089b2b6d893fa96a3fc6c1f9ab4340"}, - {file = "hiredis-3.2.1-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:6d8bff53f526da3d9db86c8668011e4f7ca2958ee3a46c648edab6fe2cd1e709"}, - {file = "hiredis-3.2.1-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:043d929ae262d03e1db0f08616e14504a9119c1ff3de13d66f857d85cd45caff"}, - {file = "hiredis-3.2.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:8d470fef39d02dbe5c541ec345cc4ffd7d2baec7d6e59c92bd9d9545dc221829"}, - {file = "hiredis-3.2.1-cp310-cp310-win32.whl", hash = "sha256:efa4c76c45cc8c42228c7989b279fa974580e053b5e6a4a834098b5324b9eafa"}, - {file = "hiredis-3.2.1-cp310-cp310-win_amd64.whl", hash = "sha256:cbac5ec3a620b095c46ef3a8f1f06da9c86c1cdc411d44a5f538876c39a2b321"}, - {file = "hiredis-3.2.1-cp311-cp311-macosx_10_15_universal2.whl", hash = "sha256:e4ae0be44cab5e74e6e4c4a93d04784629a45e781ff483b136cc9e1b9c23975c"}, - {file = "hiredis-3.2.1-cp311-cp311-macosx_10_15_x86_64.whl", hash = "sha256:24647e84c9f552934eb60b7f3d2116f8b64a7020361da9369e558935ca45914d"}, - {file = "hiredis-3.2.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:6fb3e92d1172da8decc5f836bf8b528c0fc9b6d449f1353e79ceeb9dc1801132"}, - {file = "hiredis-3.2.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:38ba7a32e51e518b6b3e470142e52ed2674558e04d7d73d86eb19ebcb37d7d40"}, - {file = "hiredis-3.2.1-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4fc632be73174891d6bb71480247e57b2fd8f572059f0a1153e4d0339e919779"}, - {file = "hiredis-3.2.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f03e6839ff21379ad3c195e0700fc9c209e7f344946dea0f8a6d7b5137a2a141"}, - {file = "hiredis-3.2.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:99983873e37c71bb71deb544670ff4f9d6920dab272aaf52365606d87a4d6c73"}, - {file = "hiredis-3.2.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ffd982c419f48e3a57f592678c72474429465bb4bfc96472ec805f5d836523f0"}, - {file = "hiredis-3.2.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:bc993f4aa4abc029347f309e722f122e05a3b8a0c279ae612849b5cc9dc69f2d"}, - {file = "hiredis-3.2.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:dde790d420081f18b5949227649ccb3ed991459df33279419a25fcae7f97cd92"}, - {file = "hiredis-3.2.1-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:b0c8cae7edbef860afcf3177b705aef43e10b5628f14d5baf0ec69668247d08d"}, - {file = "hiredis-3.2.1-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:e8a90eaca7e1ce7f175584f07a2cdbbcab13f4863f9f355d7895c4d28805f65b"}, - {file = "hiredis-3.2.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:476031958fa44e245e803827e0787d49740daa4de708fe514370293ce519893a"}, - {file = "hiredis-3.2.1-cp311-cp311-win32.whl", hash = "sha256:eb3f5df2a9593b4b4b676dce3cea53b9c6969fc372875188589ddf2bafc7f624"}, - {file = "hiredis-3.2.1-cp311-cp311-win_amd64.whl", hash = "sha256:1402e763d8a9fdfcc103bbf8b2913971c0a3f7b8a73deacbda3dfe5f3a9d1e0b"}, - {file = "hiredis-3.2.1-cp312-cp312-macosx_10_15_universal2.whl", hash = "sha256:3742d8b17e73c198cabeab11da35f2e2a81999d406f52c6275234592256bf8e8"}, - {file = "hiredis-3.2.1-cp312-cp312-macosx_10_15_x86_64.whl", hash = "sha256:9c2f3176fb617a79f6cccf22cb7d2715e590acb534af6a82b41f8196ad59375d"}, - {file = "hiredis-3.2.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:a8bd46189c7fa46174e02670dc44dfecb60f5bd4b67ed88cb050d8f1fd842f09"}, - {file = "hiredis-3.2.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f86ee4488c8575b58139cdfdddeae17f91e9a893ffee20260822add443592e2f"}, - {file = "hiredis-3.2.1-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3717832f4a557b2fe7060b9d4a7900e5de287a15595e398c3f04df69019ca69d"}, - {file = "hiredis-3.2.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e5cb12c21fb9e2403d28c4e6a38120164973342d34d08120f2d7009b66785644"}, - {file = "hiredis-3.2.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:080fda1510bbd389af91f919c11a4f2aa4d92f0684afa4709236faa084a42cac"}, - {file = "hiredis-3.2.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1252e10a1f3273d1c6bf2021e461652c2e11b05b83e0915d6eb540ec7539afe2"}, - {file = "hiredis-3.2.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:d9e320e99ab7d2a30dc91ff6f745ba38d39b23f43d345cdee9881329d7b511d6"}, - {file = "hiredis-3.2.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:641668f385f16550fdd6fdc109b0af6988b94ba2acc06770a5e06a16e88f320c"}, - {file = "hiredis-3.2.1-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:1e1f44208c39d6c345ff451f82f21e9eeda6fe9af4ac65972cc3eeb58d41f7cb"}, - {file = "hiredis-3.2.1-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:f882a0d6415fffe1ffcb09e6281d0ba8b1ece470e866612bbb24425bf76cf397"}, - {file = "hiredis-3.2.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:b4e78719a0730ebffe335528531d154bc8867a246418f74ecd88adbc4d938c49"}, - {file = "hiredis-3.2.1-cp312-cp312-win32.whl", hash = "sha256:33c4604d9f79a13b84da79950a8255433fca7edaf292bbd3364fd620864ed7b2"}, - {file = "hiredis-3.2.1-cp312-cp312-win_amd64.whl", hash = "sha256:7b9749375bf9d171aab8813694f379f2cff0330d7424000f5e92890ad4932dc9"}, - {file = "hiredis-3.2.1-cp313-cp313-macosx_10_15_universal2.whl", hash = "sha256:7cabf7f1f06be221e1cbed1f34f00891a7bdfad05b23e4d315007dd42148f3d4"}, - {file = "hiredis-3.2.1-cp313-cp313-macosx_10_15_x86_64.whl", hash = "sha256:db85cb86f8114c314d0ec6d8de25b060a2590b4713135240d568da4f7dea97ac"}, - {file = "hiredis-3.2.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:c9a592a49b7b8497e4e62c3ff40700d0c7f1a42d145b71e3e23c385df573c964"}, - {file = "hiredis-3.2.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0079ef1e03930b364556b78548e67236ab3def4e07e674f6adfc52944aa972dd"}, - {file = "hiredis-3.2.1-cp313-cp313-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1d6a290ed45d9c14f4c50b6bda07afb60f270c69b5cb626fd23a4c2fde9e3da1"}, - {file = "hiredis-3.2.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:79dd5fe8c0892769f82949adeb021342ca46871af26e26945eb55d044fcdf0d0"}, - {file = "hiredis-3.2.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:998a82281a159f4aebbfd4fb45cfe24eb111145206df2951d95bc75327983b58"}, - {file = "hiredis-3.2.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:41fc3cd52368ffe7c8e489fb83af5e99f86008ed7f9d9ba33b35fec54f215c0a"}, - {file = "hiredis-3.2.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:8d10df3575ce09b0fa54b8582f57039dcbdafde5de698923a33f601d2e2a246c"}, - {file = "hiredis-3.2.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:1ab010d04be33735ad8e643a40af0d68a21d70a57b1d0bff9b6a66b28cca9dbf"}, - {file = "hiredis-3.2.1-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:ec3b5f9ea34f70aaba3e061cbe1fa3556fea401d41f5af321b13e326792f3017"}, - {file = "hiredis-3.2.1-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:158dfb505fff6bffd17f823a56effc0c2a7a8bc4fb659d79a52782f22eefc697"}, - {file = "hiredis-3.2.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:9d632cd0ddd7895081be76748e6fb9286f81d2a51c371b516541c6324f2fdac9"}, - {file = "hiredis-3.2.1-cp313-cp313-win32.whl", hash = "sha256:e9726d03e7df068bf755f6d1ecc61f7fc35c6b20363c7b1b96f39a14083df940"}, - {file = "hiredis-3.2.1-cp313-cp313-win_amd64.whl", hash = "sha256:b5b1653ad7263a001f2e907e81a957d6087625f9700fa404f1a2268c0a4f9059"}, - {file = "hiredis-3.2.1-cp38-cp38-macosx_10_15_universal2.whl", hash = "sha256:ef27728a8ceaa038ef4b6efc0e4473b7643b5c873c2fff5475e2c8b9c8d2e0d5"}, - {file = "hiredis-3.2.1-cp38-cp38-macosx_10_15_x86_64.whl", hash = "sha256:1039d8d2e1d2a1528ad9f9e289e8aa8eec9bf4b4759be4d453a2ab406a70a800"}, - {file = "hiredis-3.2.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:83a8cd0eb6e535c93aad9c21e3e85bcb7dd26d3ff9b8ab095287be86e8af2f59"}, - {file = "hiredis-3.2.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d6fc1e8f78bcdc7e25651b7d96d19b983b843b575904d96642f97ae157797ae4"}, - {file = "hiredis-3.2.1-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0ddfa9a10fda3bea985a3b371a64553731141aaa0a20cbcc62a0e659f05e6c01"}, - {file = "hiredis-3.2.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e789ee008752b9be82a7bed82e36b62053c7cc06a0179a5a403ba5b2acba5bd8"}, - {file = "hiredis-3.2.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4bf271877947a0f3eb9dc331688404a2e4cc246bca61bc5a1e2d62da9a1caad8"}, - {file = "hiredis-3.2.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c9ad404fd0fdbdfe74e55ebb0592ab4169eecfe70ccf0db80eedc1d9943dd6d7"}, - {file = "hiredis-3.2.1-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:979572c602bdea0c3df255545c8c257f2163dd6c10d1f172268ffa7a6e1287d6"}, - {file = "hiredis-3.2.1-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:f74e3d899be057fb00444ea5f7ae1d7389d393bddf0f3ed698997aa05563483b"}, - {file = "hiredis-3.2.1-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:a015666d5fdc3ca704f68db9850d0272ddcfb27e9f26a593013383f565ed2ad7"}, - {file = "hiredis-3.2.1-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:259a3389dfe3390e356c2796b6bc96a778695e9d7d40c82121096a6b8a2dd3c6"}, - {file = "hiredis-3.2.1-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:39f469891d29f0522712265de76018ab83a64b85ac4b4f67e1f692cbd42a03f9"}, - {file = "hiredis-3.2.1-cp38-cp38-win32.whl", hash = "sha256:73aa0508f26cd6cb4dfdbe189b28fb3162fd171532e526e90a802363b88027f8"}, - {file = "hiredis-3.2.1-cp38-cp38-win_amd64.whl", hash = "sha256:2b910f12d7bcaf5ffc056087fc7b2d23e688f166462c31b73a0799d12891378d"}, - {file = "hiredis-3.2.1-cp39-cp39-macosx_10_15_universal2.whl", hash = "sha256:523a241d9f268bc0c7306792f58f9c633185f939a19abc0356c55f078d3901c5"}, - {file = "hiredis-3.2.1-cp39-cp39-macosx_10_15_x86_64.whl", hash = "sha256:fec453a038c262e18d7de4919220b2916e0b17d1eadd12e7a800f09f78f84f39"}, - {file = "hiredis-3.2.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:e75a49c5927453c316665cfa39f4274081d00ce69b137b393823eb90c66a8371"}, - {file = "hiredis-3.2.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cd974cbe8b3ae8d3e7f60675e6da10383da69f029147c2c93d1a7e44b36d1290"}, - {file = "hiredis-3.2.1-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:12d3b8fff9905e44f357417159d64138a32500dbd0d5cffaddbb2600d3ce33b1"}, - {file = "hiredis-3.2.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e21985804a40cb91e69e35ae321eb4e3610cd61a2cbc0328ab73a245f608fa1c"}, - {file = "hiredis-3.2.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4e26e2b49a9569f44a2a2d743464ff0786b46fb1124ed33d2a1bd8b1c660c25b"}, - {file = "hiredis-3.2.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4ef1ebf9ee8e0b4a895b86a02a8b7e184b964c43758393532966ecb8a256f37c"}, - {file = "hiredis-3.2.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:c936b690dd31d7af74f707fc9003c500315b4c9ad70fa564aff73d1283b3b37a"}, - {file = "hiredis-3.2.1-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:4909666bcb73270bb806aa00d0eee9e81f7a1aca388aafb4ba7dfcf5d344d23a"}, - {file = "hiredis-3.2.1-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:d74a2ad25bc91ca9639e4485099852e6263b360b2c3650fdd3cc47762c5db3fa"}, - {file = "hiredis-3.2.1-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:e99910088df446ee64d64b160835f592fb4d36189fcc948dd204e903d91fffa3"}, - {file = "hiredis-3.2.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:54423bd7af93a773edc6f166341cfb0e5f35ef42ca07b93f568f672a6f445e40"}, - {file = "hiredis-3.2.1-cp39-cp39-win32.whl", hash = "sha256:4a5365cb6d7be82d3c6d523b369bc0bc1a64987e88ed6ecfabadda2aa1cf4fa4"}, - {file = "hiredis-3.2.1-cp39-cp39-win_amd64.whl", hash = "sha256:0a2eb02b6aaf4f1425a408e892c0378ba6cb6b45b1412c30dd258df1322d88c0"}, - {file = "hiredis-3.2.1-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:73913d2fa379e722d17ba52f21ce12dd578140941a08efd73e73b6fab1dea4d8"}, - {file = "hiredis-3.2.1-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:15a3dff3eca31ecbf3d7d6d104cf1b318dc2b013bad3f4bdb2839cb9ea2e1584"}, - {file = "hiredis-3.2.1-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c78258032c2f9fc6f39fee7b07882ce26de281e09178266ce535992572132d95"}, - {file = "hiredis-3.2.1-pp310-pypy310_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:578d6a881e64e46db065256355594e680202c3bacf3270be3140057171d2c23e"}, - {file = "hiredis-3.2.1-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8b7f34b170093c077c972b8cc0ceb15d8ff88ad0079751a8ae9733e94d77e733"}, - {file = "hiredis-3.2.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:291a18b228fc90f6720d178de2fac46522082c96330b4cc2d3dd8cb2c1cb2815"}, - {file = "hiredis-3.2.1-pp38-pypy38_pp73-macosx_10_15_x86_64.whl", hash = "sha256:f53d2af5a7cd33a4b4d7ba632dce80c17823df6814ef5a8d328ed44c815a68e7"}, - {file = "hiredis-3.2.1-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:20bdf6dbdf77eb43b98bc53950f7711983042472199245d4c36448e6b4cb460f"}, - {file = "hiredis-3.2.1-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f43e5c50d76da15118c72b757216cf26c643d55bb1b3c86cad1ae49173971780"}, - {file = "hiredis-3.2.1-pp38-pypy38_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1e5bb5fe9834851d56c8543e52dcd2ac5275fb6772ebc97876e18c2e05a3300b"}, - {file = "hiredis-3.2.1-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:53e348438b6452e3d14dddb95d071fe8eaf6f264f641cba999c10bf6359cf1d2"}, - {file = "hiredis-3.2.1-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:e305f6c63a2abcbde6ce28958de2bb4dd0fd34c6ab3bde5a4410befd5df8c6b2"}, - {file = "hiredis-3.2.1-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:33f24b1152f684b54d6b9d09135d849a6df64b6982675e8cf972f8adfa2de9aa"}, - {file = "hiredis-3.2.1-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:01dd8ea88bf8363751857ca2eb8f13faad0c7d57a6369663d4d1160f225ab449"}, - {file = "hiredis-3.2.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4b16946533535cbb5cc7d4b6fc009d32d22b0f9ac58e8eb6f144637b64f9a61d"}, - {file = "hiredis-3.2.1-pp39-pypy39_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f9a03886cad1076e9f7e9e411c402826a8eac6f56ba426ee84b88e6515574b7b"}, - {file = "hiredis-3.2.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8a4f6340f1c378bce17c195d46288a796fcf213dd3e2a008c2c942b33ab58993"}, - {file = "hiredis-3.2.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:9d64ddf29016d34e7e3bc4b3d36ca9ac8a94f9b2c13ac4b9d8a486862d91b95c"}, - {file = "hiredis-3.2.1.tar.gz", hash = "sha256:5a5f64479bf04dd829fe7029fad0ea043eac4023abc6e946668cbbec3493a78d"}, + {file = "hiredis-3.3.0-cp310-cp310-macosx_10_15_universal2.whl", hash = "sha256:9937d9b69321b393fbace69f55423480f098120bc55a3316e1ca3508c4dbbd6f"}, + {file = "hiredis-3.3.0-cp310-cp310-macosx_10_15_x86_64.whl", hash = "sha256:50351b77f89ba6a22aff430b993653847f36b71d444509036baa0f2d79d1ebf4"}, + {file = "hiredis-3.3.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:1d00bce25c813eec45a2f524249f58daf51d38c9d3347f6f643ae53826fc735a"}, + {file = "hiredis-3.3.0-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0ef840d9f142556ed384180ed8cdf14ff875fcae55c980cbe5cec7adca2ef4d8"}, + {file = "hiredis-3.3.0-cp310-cp310-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:88bc79d7e9b94d17ed1bd8b7f2815ed0eada376ed5f48751044e5e4d179aa2f2"}, + {file = "hiredis-3.3.0-cp310-cp310-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:7165c7363e59b258e1875c51f35c0b2b9901e6c691037b487d8a0ace2c137ed2"}, + {file = "hiredis-3.3.0-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8c3be446f0c38fbe6863a7cf4522c9a463df6e64bee87c4402e9f6d7d2e7f869"}, + {file = "hiredis-3.3.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:96f9a27643279853b91a1fb94a88b559e55fdecec86f1fcd5f2561492be52e47"}, + {file = "hiredis-3.3.0-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:0a5eebb170de1b415c78ae5ca3aee17cff8b885df93c2055d54320e789d838f4"}, + {file = "hiredis-3.3.0-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:200678547ac3966bac3e38df188211fdc13d5f21509c23267e7def411710e112"}, + {file = "hiredis-3.3.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:dd9d78c5363a858f9dc5e698e5e1e402b83c00226cba294f977a92c53092b549"}, + {file = "hiredis-3.3.0-cp310-cp310-win32.whl", hash = "sha256:a0d31ff178b913137a7a08c7377e93805914755a15c3585e203d0d74496456c0"}, + {file = "hiredis-3.3.0-cp310-cp310-win_amd64.whl", hash = "sha256:7b41833c8f0d4c7fbfaa867c8ed9a4e4aaa71d7c54e4806ed62da2d5cd27b40d"}, + {file = "hiredis-3.3.0-cp311-cp311-macosx_10_15_universal2.whl", hash = "sha256:63ee6c1ae6a2462a2439eb93c38ab0315cd5f4b6d769c6a34903058ba538b5d6"}, + {file = "hiredis-3.3.0-cp311-cp311-macosx_10_15_x86_64.whl", hash = "sha256:31eda3526e2065268a8f97fbe3d0e9a64ad26f1d89309e953c80885c511ea2ae"}, + {file = "hiredis-3.3.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a26bae1b61b7bcafe3d0d0c7d012fb66ab3c95f2121dbea336df67e344e39089"}, + {file = "hiredis-3.3.0-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b9546079f7fd5c50fbff9c791710049b32eebe7f9b94debec1e8b9f4c048cba2"}, + {file = "hiredis-3.3.0-cp311-cp311-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:ae327fc13b1157b694d53f92d50920c0051e30b0c245f980a7036e299d039ab4"}, + {file = "hiredis-3.3.0-cp311-cp311-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:4016e50a8be5740a59c5af5252e5ad16c395021a999ad24c6604f0d9faf4d346"}, + {file = "hiredis-3.3.0-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c17b473f273465a3d2168a57a5b43846165105ac217d5652a005e14068589ddc"}, + {file = "hiredis-3.3.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:9ecd9b09b11bd0b8af87d29c3f5da628d2bdc2a6c23d2dd264d2da082bd4bf32"}, + {file = "hiredis-3.3.0-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:00fb04eac208cd575d14f246e74a468561081ce235937ab17d77cde73aefc66c"}, + {file = "hiredis-3.3.0-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:60814a7d0b718adf3bfe2c32c6878b0e00d6ae290ad8e47f60d7bba3941234a6"}, + {file = "hiredis-3.3.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:fcbd1a15e935aa323b5b2534b38419511b7909b4b8ee548e42b59090a1b37bb1"}, + {file = "hiredis-3.3.0-cp311-cp311-win32.whl", hash = "sha256:73679607c5a19f4bcfc9cf6eb54480bcd26617b68708ac8b1079da9721be5449"}, + {file = "hiredis-3.3.0-cp311-cp311-win_amd64.whl", hash = "sha256:30a4df3d48f32538de50648d44146231dde5ad7f84f8f08818820f426840ae97"}, + {file = "hiredis-3.3.0-cp312-cp312-macosx_10_15_universal2.whl", hash = "sha256:5b8e1d6a2277ec5b82af5dce11534d3ed5dffeb131fd9b210bc1940643b39b5f"}, + {file = "hiredis-3.3.0-cp312-cp312-macosx_10_15_x86_64.whl", hash = "sha256:c4981de4d335f996822419e8a8b3b87367fcef67dc5fb74d3bff4df9f6f17783"}, + {file = "hiredis-3.3.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:1706480a683e328ae9ba5d704629dee2298e75016aa0207e7067b9c40cecc271"}, + {file = "hiredis-3.3.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0a95cef9989736ac313639f8f545b76b60b797e44e65834aabbb54e4fad8d6c8"}, + {file = "hiredis-3.3.0-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:ca2802934557ccc28a954414c245ba7ad904718e9712cb67c05152cf6b9dd0a3"}, + {file = "hiredis-3.3.0-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:fe730716775f61e76d75810a38ee4c349d3af3896450f1525f5a4034cf8f2ed7"}, + {file = "hiredis-3.3.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:749faa69b1ce1f741f5eaf743435ac261a9262e2d2d66089192477e7708a9abc"}, + {file = "hiredis-3.3.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:95c9427f2ac3f1dd016a3da4e1161fa9d82f221346c8f3fdd6f3f77d4e28946c"}, + {file = "hiredis-3.3.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:c863ee44fe7bff25e41f3a5105c936a63938b76299b802d758f40994ab340071"}, + {file = "hiredis-3.3.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:2213c7eb8ad5267434891f3241c7776e3bafd92b5933fc57d53d4456247dc542"}, + {file = "hiredis-3.3.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:a172bae3e2837d74530cd60b06b141005075db1b814d966755977c69bd882ce8"}, + {file = "hiredis-3.3.0-cp312-cp312-win32.whl", hash = "sha256:cb91363b9fd6d41c80df9795e12fffbaf5c399819e6ae8120f414dedce6de068"}, + {file = "hiredis-3.3.0-cp312-cp312-win_amd64.whl", hash = "sha256:04ec150e95eea3de9ff8bac754978aa17b8bf30a86d4ab2689862020945396b0"}, + {file = "hiredis-3.3.0-cp313-cp313-macosx_10_15_universal2.whl", hash = "sha256:b7048b4ec0d5dddc8ddd03da603de0c4b43ef2540bf6e4c54f47d23e3480a4fa"}, + {file = "hiredis-3.3.0-cp313-cp313-macosx_10_15_x86_64.whl", hash = "sha256:e5f86ce5a779319c15567b79e0be806e8e92c18bb2ea9153e136312fafa4b7d6"}, + {file = "hiredis-3.3.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:fbdb97a942e66016fff034df48a7a184e2b7dc69f14c4acd20772e156f20d04b"}, + {file = "hiredis-3.3.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b0fb4bea72fe45ff13e93ddd1352b43ff0749f9866263b5cca759a4c960c776f"}, + {file = "hiredis-3.3.0-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:85b9baf98050e8f43c2826ab46aaf775090d608217baf7af7882596aef74e7f9"}, + {file = "hiredis-3.3.0-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:69079fb0f0ebb61ba63340b9c4bce9388ad016092ca157e5772eb2818209d930"}, + {file = "hiredis-3.3.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c17f77b79031ea4b0967d30255d2ae6e7df0603ee2426ad3274067f406938236"}, + {file = "hiredis-3.3.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:45d14f745fc177bc05fc24bdf20e2b515e9a068d3d4cce90a0fb78d04c9c9d9a"}, + {file = "hiredis-3.3.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:ba063fdf1eff6377a0c409609cbe890389aefddfec109c2d20fcc19cfdafe9da"}, + {file = "hiredis-3.3.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:1799cc66353ad066bfdd410135c951959da9f16bcb757c845aab2f21fc4ef099"}, + {file = "hiredis-3.3.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:2cbf71a121996ffac82436b6153290815b746afb010cac19b3290a1644381b07"}, + {file = "hiredis-3.3.0-cp313-cp313-win32.whl", hash = "sha256:a7cbbc6026bf03659f0b25e94bbf6e64f6c8c22f7b4bc52fe569d041de274194"}, + {file = "hiredis-3.3.0-cp313-cp313-win_amd64.whl", hash = "sha256:a8def89dd19d4e2e4482b7412d453dec4a5898954d9a210d7d05f60576cedef6"}, + {file = "hiredis-3.3.0-cp314-cp314-macosx_10_15_universal2.whl", hash = "sha256:c135bda87211f7af9e2fd4e046ab433c576cd17b69e639a0f5bb2eed5e0e71a9"}, + {file = "hiredis-3.3.0-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:2f855c678230aed6fc29b962ce1cc67e5858a785ef3a3fd6b15dece0487a2e60"}, + {file = "hiredis-3.3.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:4059c78a930cbb33c391452ccce75b137d6f89e2eebf6273d75dafc5c2143c03"}, + {file = "hiredis-3.3.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:334a3f1d14c253bb092e187736c3384203bd486b244e726319bbb3f7dffa4a20"}, + {file = "hiredis-3.3.0-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:fd137b147235447b3d067ec952c5b9b95ca54b71837e1b38dbb2ec03b89f24fc"}, + {file = "hiredis-3.3.0-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:8f88f4f2aceb73329ece86a1cb0794fdbc8e6d614cb5ca2d1023c9b7eb432db8"}, + {file = "hiredis-3.3.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:550f4d1538822fc75ebf8cf63adc396b23d4958bdbbad424521f2c0e3dfcb169"}, + {file = "hiredis-3.3.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:54b14211fbd5930fc696f6fcd1f1f364c660970d61af065a80e48a1fa5464dd6"}, + {file = "hiredis-3.3.0-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:c9e96f63dbc489fc86f69951e9f83dadb9582271f64f6822c47dcffa6fac7e4a"}, + {file = "hiredis-3.3.0-cp314-cp314-musllinux_1_2_s390x.whl", hash = "sha256:106e99885d46684d62ab3ec1d6b01573cc0e0083ac295b11aaa56870b536c7ec"}, + {file = "hiredis-3.3.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:087e2ef3206361281b1a658b5b4263572b6ba99465253e827796964208680459"}, + {file = "hiredis-3.3.0-cp314-cp314-win32.whl", hash = "sha256:80638ebeab1cefda9420e9fedc7920e1ec7b4f0513a6b23d58c9d13c882f8065"}, + {file = "hiredis-3.3.0-cp314-cp314-win_amd64.whl", hash = "sha256:a68aaf9ba024f4e28cf23df9196ff4e897bd7085872f3a30644dca07fa787816"}, + {file = "hiredis-3.3.0-cp314-cp314t-macosx_10_15_universal2.whl", hash = "sha256:f7f80442a32ce51ee5d89aeb5a84ee56189a0e0e875f1a57bbf8d462555ae48f"}, + {file = "hiredis-3.3.0-cp314-cp314t-macosx_10_15_x86_64.whl", hash = "sha256:a1a67530da714954ed50579f4fe1ab0ddbac9c43643b1721c2cb226a50dde263"}, + {file = "hiredis-3.3.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:616868352e47ab355559adca30f4f3859f9db895b4e7bc71e2323409a2add751"}, + {file = "hiredis-3.3.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:e799b79f3150083e9702fc37e6243c0bd47a443d6eae3f3077b0b3f510d6a145"}, + {file = "hiredis-3.3.0-cp314-cp314t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:9ef1dfb0d2c92c3701655e2927e6bbe10c499aba632c7ea57b6392516df3864b"}, + {file = "hiredis-3.3.0-cp314-cp314t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:c290da6bc2a57e854c7da9956cd65013483ede935677e84560da3b848f253596"}, + {file = "hiredis-3.3.0-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:fd8c438d9e1728f0085bf9b3c9484d19ec31f41002311464e75b69550c32ffa8"}, + {file = "hiredis-3.3.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:1bbc6b8a88bbe331e3ebf6685452cebca6dfe6d38a6d4efc5651d7e363ba28bd"}, + {file = "hiredis-3.3.0-cp314-cp314t-musllinux_1_2_ppc64le.whl", hash = "sha256:55d8c18fe9a05496c5c04e6eccc695169d89bf358dff964bcad95696958ec05f"}, + {file = "hiredis-3.3.0-cp314-cp314t-musllinux_1_2_s390x.whl", hash = "sha256:4ddc79afa76b805d364e202a754666cb3c4d9c85153cbfed522871ff55827838"}, + {file = "hiredis-3.3.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:8e8a4b8540581dcd1b2b25827a54cfd538e0afeaa1a0e3ca87ad7126965981cc"}, + {file = "hiredis-3.3.0-cp314-cp314t-win32.whl", hash = "sha256:298593bb08487753b3afe6dc38bac2532e9bac8dcee8d992ef9977d539cc6776"}, + {file = "hiredis-3.3.0-cp314-cp314t-win_amd64.whl", hash = "sha256:b442b6ab038a6f3b5109874d2514c4edf389d8d8b553f10f12654548808683bc"}, + {file = "hiredis-3.3.0-cp38-cp38-macosx_10_15_universal2.whl", hash = "sha256:114c0b9f1b5fad99edae38e747018aead358a4f4e9720cc1876495d78cdb8276"}, + {file = "hiredis-3.3.0-cp38-cp38-macosx_10_15_x86_64.whl", hash = "sha256:c6d91a5e6904ed7eca21d74b041e03f2ad598dd08a6065b06a776974fe5d003c"}, + {file = "hiredis-3.3.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:76374faa075e996c895cbe106ba923852a9f8146f2aa59eba22111c5e5ec6316"}, + {file = "hiredis-3.3.0-cp38-cp38-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:50a54397bd104c2e2f5b7696bbdab8ba2973d3075e4deb932adb025b8863de91"}, + {file = "hiredis-3.3.0-cp38-cp38-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:15edee02cc9cc06e07e2bcfae07e283e640cc1aeedd08b4c6934bf1a0113c607"}, + {file = "hiredis-3.3.0-cp38-cp38-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:ff3179a57745d0f8d71fa8bf3ea3944d3f557dcfa4431304497987fecad381dd"}, + {file = "hiredis-3.3.0-cp38-cp38-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:bdb7cd9e1e73db78f145a09bb837732790d0912eb963dee5768631faf2ece162"}, + {file = "hiredis-3.3.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:4d3b4e0d4445faf9041c52a98cb5d2b65c4fcaebb2aa02efa7c6517c4917f7e8"}, + {file = "hiredis-3.3.0-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:ffea6c407cff532c7599d3ec9e8502c2c865753cebab044f3dfce9afbf71a8df"}, + {file = "hiredis-3.3.0-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:bcd745a28e1b3216e42680d91e142a42569dfad68a6f40535080c47b0356c796"}, + {file = "hiredis-3.3.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:4c18a97ea55d1a58f5c3adfe236b3e7cccedc6735cbd36ab1c786c52fd823667"}, + {file = "hiredis-3.3.0-cp38-cp38-win32.whl", hash = "sha256:77eacd969e3c6ff50c2b078c27d2a773c652248a5d81af5765a8663478d0bc02"}, + {file = "hiredis-3.3.0-cp38-cp38-win_amd64.whl", hash = "sha256:161a4a595a53475587aef8dc549d0527962879b0c5d62f7947b44ba7e5084b76"}, + {file = "hiredis-3.3.0-cp39-cp39-macosx_10_15_universal2.whl", hash = "sha256:1203697a7ebadc7cf873acc189df9e44fcb377b636e6660471707ac8d5bcba68"}, + {file = "hiredis-3.3.0-cp39-cp39-macosx_10_15_x86_64.whl", hash = "sha256:9a7ea2344d277317160da4911f885bcf7dfd8381b830d76b442f7775b41544b3"}, + {file = "hiredis-3.3.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:9bd7c9a089cf4e4f4b5a61f412c76293449bac6b0bf92bb49a3892850bd5c899"}, + {file = "hiredis-3.3.0-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:294de11e3995128c784534e327d1f9382b88dc5407356465df7934c710e8392d"}, + {file = "hiredis-3.3.0-cp39-cp39-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:4a3aab895358368f81f9546a7cd192b6fb427f785cb1a8853cf9db38df01e9ca"}, + {file = "hiredis-3.3.0-cp39-cp39-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:eaf8418e33e23d6d7ef0128eff4c06ab3040d40b9bbc8a24d6265d751a472596"}, + {file = "hiredis-3.3.0-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:41aea51949142bad4e40badb0396392d7f4394791e4097a0951ab75bcc58ff84"}, + {file = "hiredis-3.3.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:1f9a5f84a8bd29ac5b9953b27e8ba5508396afeabf1d165611a1e31fbd90a0e1"}, + {file = "hiredis-3.3.0-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:a5f9fde56550ebbe962f437a4c982b0856d03aea7fab09e30fa6c0f9be992b40"}, + {file = "hiredis-3.3.0-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:c567aab02612d91f3e747fc492100ae894515194f85d6fb6bb68958c0e718721"}, + {file = "hiredis-3.3.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:ca97c5e6f9e9b9f0aed61b70fed2d594ce2f7472905077d2d10b307c50a41008"}, + {file = "hiredis-3.3.0-cp39-cp39-win32.whl", hash = "sha256:776dc5769d5eb05e969216de095377ff61c802414a74bd3c24a4ca8526c897ab"}, + {file = "hiredis-3.3.0-cp39-cp39-win_amd64.whl", hash = "sha256:538a9f5fbb3a8a4ef0c3abd309cccb90cd2ba9976fcc2b44193af9507d005b48"}, + {file = "hiredis-3.3.0.tar.gz", hash = "sha256:105596aad9249634361815c574351f1bd50455dc23b537c2940066c4a9dea685"}, ] [[package]] @@ -870,7 +866,7 @@ description = "Jaeger Python OpenTracing Tracer implementation" optional = true python-versions = ">=3.7" groups = ["main"] -markers = "extra == \"opentracing\" or extra == \"all\"" +markers = "extra == \"all\" or extra == \"opentracing\"" files = [ {file = "jaeger-client-4.8.0.tar.gz", hash = "sha256:3157836edab8e2c209bd2d6ae61113db36f7ee399e66b1dcbb715d87ab49bfe0"}, ] @@ -1008,7 +1004,7 @@ description = "A strictly RFC 4510 conforming LDAP V3 pure Python client library optional = true python-versions = "*" groups = ["main"] -markers = "extra == \"matrix-synapse-ldap3\" or extra == \"all\"" +markers = "extra == \"all\" or extra == \"matrix-synapse-ldap3\"" files = [ {file = "ldap3-2.9.1-py2.py3-none-any.whl", hash = "sha256:5869596fc4948797020d3f03b7939da938778a0f9e2009f7a072ccf92b8e8d70"}, {file = "ldap3-2.9.1.tar.gz", hash = "sha256:f3e7fc4718e3f09dda568b57100095e0ce58633bcabbed8667ce3f8fbaa4229f"}, @@ -1024,7 +1020,7 @@ description = "Powerful and Pythonic XML processing library combining libxml2/li optional = true python-versions = ">=3.8" groups = ["main"] -markers = "extra == \"url-preview\" or extra == \"all\"" +markers = "extra == \"all\" or extra == \"url-preview\"" files = [ {file = "lxml-6.0.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:e77dd455b9a16bbd2a5036a63ddbd479c19572af81b624e79ef422f929eef388"}, {file = "lxml-6.0.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:5d444858b9f07cefff6455b983aea9a67f7462ba1f6cbe4a21e8bf6791bf2153"}, @@ -1311,7 +1307,7 @@ description = "An LDAP3 auth provider for Synapse" optional = true python-versions = ">=3.7" groups = ["main"] -markers = "extra == \"matrix-synapse-ldap3\" or extra == \"all\"" +markers = "extra == \"all\" or extra == \"matrix-synapse-ldap3\"" files = [ {file = "matrix-synapse-ldap3-0.3.0.tar.gz", hash = "sha256:8bb6517173164d4b9cc44f49de411d8cebdb2e705d5dd1ea1f38733c4a009e1d"}, {file = "matrix_synapse_ldap3-0.3.0-py3-none-any.whl", hash = "sha256:8b4d701f8702551e98cc1d8c20dbed532de5613584c08d0df22de376ba99159d"}, @@ -1553,7 +1549,7 @@ description = "OpenTracing API for Python. See documentation at http://opentraci optional = true python-versions = "*" groups = ["main"] -markers = "extra == \"opentracing\" or extra == \"all\"" +markers = "extra == \"all\" or extra == \"opentracing\"" files = [ {file = "opentracing-2.4.0.tar.gz", hash = "sha256:a173117e6ef580d55874734d1fa7ecb6f3655160b8b8974a2a1e98e5ec9c840d"}, ] @@ -1622,6 +1618,8 @@ groups = ["main"] files = [ {file = "pillow-11.3.0-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:1b9c17fd4ace828b3003dfd1e30bff24863e0eb59b535e8f80194d9cc7ecf860"}, {file = "pillow-11.3.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:65dc69160114cdd0ca0f35cb434633c75e8e7fad4cf855177a05bf38678f73ad"}, + {file = "pillow-11.3.0-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:7107195ddc914f656c7fc8e4a5e1c25f32e9236ea3ea860f257b0436011fddd0"}, + {file = "pillow-11.3.0-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:cc3e831b563b3114baac7ec2ee86819eb03caa1a2cef0b481a5675b59c4fe23b"}, {file = "pillow-11.3.0-cp310-cp310-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f1f182ebd2303acf8c380a54f615ec883322593320a9b00438eb842c1f37ae50"}, {file = "pillow-11.3.0-cp310-cp310-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4445fa62e15936a028672fd48c4c11a66d641d2c05726c7ec1f8ba6a572036ae"}, {file = "pillow-11.3.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:71f511f6b3b91dd543282477be45a033e4845a40278fa8dcdbfdb07109bf18f9"}, @@ -1631,6 +1629,8 @@ files = [ {file = "pillow-11.3.0-cp310-cp310-win_arm64.whl", hash = "sha256:819931d25e57b513242859ce1876c58c59dc31587847bf74cfe06b2e0cb22d2f"}, {file = "pillow-11.3.0-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:1cd110edf822773368b396281a2293aeb91c90a2db00d78ea43e7e861631b722"}, {file = "pillow-11.3.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:9c412fddd1b77a75aa904615ebaa6001f169b26fd467b4be93aded278266b288"}, + {file = "pillow-11.3.0-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:7d1aa4de119a0ecac0a34a9c8bde33f34022e2e8f99104e47a3ca392fd60e37d"}, + {file = "pillow-11.3.0-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:91da1d88226663594e3f6b4b8c3c8d85bd504117d043740a8e0ec449087cc494"}, {file = "pillow-11.3.0-cp311-cp311-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:643f189248837533073c405ec2f0bb250ba54598cf80e8c1e043381a60632f58"}, {file = "pillow-11.3.0-cp311-cp311-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:106064daa23a745510dabce1d84f29137a37224831d88eb4ce94bb187b1d7e5f"}, {file = "pillow-11.3.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:cd8ff254faf15591e724dc7c4ddb6bf4793efcbe13802a4ae3e863cd300b493e"}, @@ -1640,6 +1640,8 @@ files = [ {file = "pillow-11.3.0-cp311-cp311-win_arm64.whl", hash = "sha256:30807c931ff7c095620fe04448e2c2fc673fcbb1ffe2a7da3fb39613489b1ddd"}, {file = "pillow-11.3.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:fdae223722da47b024b867c1ea0be64e0df702c5e0a60e27daad39bf960dd1e4"}, {file = "pillow-11.3.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:921bd305b10e82b4d1f5e802b6850677f965d8394203d182f078873851dada69"}, + {file = "pillow-11.3.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:eb76541cba2f958032d79d143b98a3a6b3ea87f0959bbe256c0b5e416599fd5d"}, + {file = "pillow-11.3.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:67172f2944ebba3d4a7b54f2e95c786a3a50c21b88456329314caaa28cda70f6"}, {file = "pillow-11.3.0-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:97f07ed9f56a3b9b5f49d3661dc9607484e85c67e27f3e8be2c7d28ca032fec7"}, {file = "pillow-11.3.0-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:676b2815362456b5b3216b4fd5bd89d362100dc6f4945154ff172e206a22c024"}, {file = "pillow-11.3.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:3e184b2f26ff146363dd07bde8b711833d7b0202e27d13540bfe2e35a323a809"}, @@ -1652,6 +1654,8 @@ files = [ {file = "pillow-11.3.0-cp313-cp313-ios_13_0_x86_64_iphonesimulator.whl", hash = "sha256:7859a4cc7c9295f5838015d8cc0a9c215b77e43d07a25e460f35cf516df8626f"}, {file = "pillow-11.3.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:ec1ee50470b0d050984394423d96325b744d55c701a439d2bd66089bff963d3c"}, {file = "pillow-11.3.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:7db51d222548ccfd274e4572fdbf3e810a5e66b00608862f947b163e613b67dd"}, + {file = "pillow-11.3.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:2d6fcc902a24ac74495df63faad1884282239265c6839a0a6416d33faedfae7e"}, + {file = "pillow-11.3.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:f0f5d8f4a08090c6d6d578351a2b91acf519a54986c055af27e7a93feae6d3f1"}, {file = "pillow-11.3.0-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c37d8ba9411d6003bba9e518db0db0c58a680ab9fe5179f040b0463644bc9805"}, {file = "pillow-11.3.0-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:13f87d581e71d9189ab21fe0efb5a23e9f28552d5be6979e84001d3b8505abe8"}, {file = "pillow-11.3.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:023f6d2d11784a465f09fd09a34b150ea4672e85fb3d05931d89f373ab14abb2"}, @@ -1661,6 +1665,8 @@ files = [ {file = "pillow-11.3.0-cp313-cp313-win_arm64.whl", hash = "sha256:1904e1264881f682f02b7f8167935cce37bc97db457f8e7849dc3a6a52b99580"}, {file = "pillow-11.3.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:4c834a3921375c48ee6b9624061076bc0a32a60b5532b322cc0ea64e639dd50e"}, {file = "pillow-11.3.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:5e05688ccef30ea69b9317a9ead994b93975104a677a36a8ed8106be9260aa6d"}, + {file = "pillow-11.3.0-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:1019b04af07fc0163e2810167918cb5add8d74674b6267616021ab558dc98ced"}, + {file = "pillow-11.3.0-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:f944255db153ebb2b19c51fe85dd99ef0ce494123f21b9db4877ffdfc5590c7c"}, {file = "pillow-11.3.0-cp313-cp313t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1f85acb69adf2aaee8b7da124efebbdb959a104db34d3a2cb0f3793dbae422a8"}, {file = "pillow-11.3.0-cp313-cp313t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:05f6ecbeff5005399bb48d198f098a9b4b6bdf27b8487c7f38ca16eeb070cd59"}, {file = "pillow-11.3.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:a7bc6e6fd0395bc052f16b1a8670859964dbd7003bd0af2ff08342eb6e442cfe"}, @@ -1670,6 +1676,8 @@ files = [ {file = "pillow-11.3.0-cp313-cp313t-win_arm64.whl", hash = "sha256:8797edc41f3e8536ae4b10897ee2f637235c94f27404cac7297f7b607dd0716e"}, {file = "pillow-11.3.0-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:d9da3df5f9ea2a89b81bb6087177fb1f4d1c7146d583a3fe5c672c0d94e55e12"}, {file = "pillow-11.3.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:0b275ff9b04df7b640c59ec5a3cb113eefd3795a8df80bac69646ef699c6981a"}, + {file = "pillow-11.3.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:0743841cabd3dba6a83f38a92672cccbd69af56e3e91777b0ee7f4dba4385632"}, + {file = "pillow-11.3.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:2465a69cf967b8b49ee1b96d76718cd98c4e925414ead59fdf75cf0fd07df673"}, {file = "pillow-11.3.0-cp314-cp314-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:41742638139424703b4d01665b807c6468e23e699e8e90cffefe291c5832b027"}, {file = "pillow-11.3.0-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:93efb0b4de7e340d99057415c749175e24c8864302369e05914682ba642e5d77"}, {file = "pillow-11.3.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:7966e38dcd0fa11ca390aed7c6f20454443581d758242023cf36fcb319b1a874"}, @@ -1679,6 +1687,8 @@ files = [ {file = "pillow-11.3.0-cp314-cp314-win_arm64.whl", hash = "sha256:155658efb5e044669c08896c0c44231c5e9abcaadbc5cd3648df2f7c0b96b9a6"}, {file = "pillow-11.3.0-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:59a03cdf019efbfeeed910bf79c7c93255c3d54bc45898ac2a4140071b02b4ae"}, {file = "pillow-11.3.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:f8a5827f84d973d8636e9dc5764af4f0cf2318d26744b3d902931701b0d46653"}, + {file = "pillow-11.3.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:ee92f2fd10f4adc4b43d07ec5e779932b4eb3dbfbc34790ada5a6669bc095aa6"}, + {file = "pillow-11.3.0-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:c96d333dcf42d01f47b37e0979b6bd73ec91eae18614864622d9b87bbd5bbf36"}, {file = "pillow-11.3.0-cp314-cp314t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:4c96f993ab8c98460cd0c001447bff6194403e8b1d7e149ade5f00594918128b"}, {file = "pillow-11.3.0-cp314-cp314t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:41342b64afeba938edb034d122b2dda5db2139b9a4af999729ba8818e0056477"}, {file = "pillow-11.3.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:068d9c39a2d1b358eb9f245ce7ab1b5c3246c7c8c7d9ba58cfa5b43146c06e50"}, @@ -1688,6 +1698,8 @@ files = [ {file = "pillow-11.3.0-cp314-cp314t-win_arm64.whl", hash = "sha256:79ea0d14d3ebad43ec77ad5272e6ff9bba5b679ef73375ea760261207fa8e0aa"}, {file = "pillow-11.3.0-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:48d254f8a4c776de343051023eb61ffe818299eeac478da55227d96e241de53f"}, {file = "pillow-11.3.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:7aee118e30a4cf54fdd873bd3a29de51e29105ab11f9aad8c32123f58c8f8081"}, + {file = "pillow-11.3.0-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:23cff760a9049c502721bdb743a7cb3e03365fafcdfc2ef9784610714166e5a4"}, + {file = "pillow-11.3.0-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:6359a3bc43f57d5b375d1ad54a0074318a0844d11b76abccf478c37c986d3cfc"}, {file = "pillow-11.3.0-cp39-cp39-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:092c80c76635f5ecb10f3f83d76716165c96f5229addbd1ec2bdbbda7d496e06"}, {file = "pillow-11.3.0-cp39-cp39-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:cadc9e0ea0a2431124cde7e1697106471fc4c1da01530e679b2391c37d3fbb3a"}, {file = "pillow-11.3.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:6a418691000f2a418c9135a7cf0d797c1bb7d9a485e61fe8e7722845b95ef978"}, @@ -1697,11 +1709,15 @@ files = [ {file = "pillow-11.3.0-cp39-cp39-win_arm64.whl", hash = "sha256:6abdbfd3aea42be05702a8dd98832329c167ee84400a1d1f61ab11437f1717eb"}, {file = "pillow-11.3.0-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:3cee80663f29e3843b68199b9d6f4f54bd1d4a6b59bdd91bceefc51238bcb967"}, {file = "pillow-11.3.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:b5f56c3f344f2ccaf0dd875d3e180f631dc60a51b314295a3e681fe8cf851fbe"}, + {file = "pillow-11.3.0-pp310-pypy310_pp73-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:e67d793d180c9df62f1f40aee3accca4829d3794c95098887edc18af4b8b780c"}, + {file = "pillow-11.3.0-pp310-pypy310_pp73-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:d000f46e2917c705e9fb93a3606ee4a819d1e3aa7a9b442f6444f07e77cf5e25"}, {file = "pillow-11.3.0-pp310-pypy310_pp73-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:527b37216b6ac3a12d7838dc3bd75208ec57c1c6d11ef01902266a5a0c14fc27"}, {file = "pillow-11.3.0-pp310-pypy310_pp73-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:be5463ac478b623b9dd3937afd7fb7ab3d79dd290a28e2b6df292dc75063eb8a"}, {file = "pillow-11.3.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:8dc70ca24c110503e16918a658b869019126ecfe03109b754c402daff12b3d9f"}, {file = "pillow-11.3.0-pp311-pypy311_pp73-macosx_10_15_x86_64.whl", hash = "sha256:7c8ec7a017ad1bd562f93dbd8505763e688d388cde6e4a010ae1486916e713e6"}, {file = "pillow-11.3.0-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:9ab6ae226de48019caa8074894544af5b53a117ccb9d3b3dcb2871464c829438"}, + {file = "pillow-11.3.0-pp311-pypy311_pp73-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:fe27fb049cdcca11f11a7bfda64043c37b30e6b91f10cb5bab275806c32f6ab3"}, + {file = "pillow-11.3.0-pp311-pypy311_pp73-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:465b9e8844e3c3519a983d58b80be3f668e2a7a5db97f2784e7079fbc9f9822c"}, {file = "pillow-11.3.0-pp311-pypy311_pp73-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5418b53c0d59b3824d05e029669efa023bbef0f3e92e75ec8428f3799487f361"}, {file = "pillow-11.3.0-pp311-pypy311_pp73-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:504b6f59505f08ae014f724b6207ff6222662aab5cc9542577fb084ed0676ac7"}, {file = "pillow-11.3.0-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:c84d689db21a1c397d001aa08241044aa2069e7587b398c8cc63020390b1c1b8"}, @@ -1739,7 +1755,7 @@ description = "psycopg2 - Python-PostgreSQL Database Adapter" optional = true python-versions = ">=3.8" groups = ["main"] -markers = "extra == \"postgres\" or extra == \"all\"" +markers = "extra == \"all\" or extra == \"postgres\"" files = [ {file = "psycopg2-2.9.10-cp310-cp310-win32.whl", hash = "sha256:5df2b672140f95adb453af93a7d669d7a7bf0a56bcd26f1502329166f4a61716"}, {file = "psycopg2-2.9.10-cp310-cp310-win_amd64.whl", hash = "sha256:c6f7b8561225f9e711a9c47087388a97fdc948211c10a4bccbf0ba68ab7b3b5a"}, @@ -1747,6 +1763,7 @@ files = [ {file = "psycopg2-2.9.10-cp311-cp311-win_amd64.whl", hash = "sha256:0435034157049f6846e95103bd8f5a668788dd913a7c30162ca9503fdf542cb4"}, {file = "psycopg2-2.9.10-cp312-cp312-win32.whl", hash = "sha256:65a63d7ab0e067e2cdb3cf266de39663203d38d6a8ed97f5ca0cb315c73fe067"}, {file = "psycopg2-2.9.10-cp312-cp312-win_amd64.whl", hash = "sha256:4a579d6243da40a7b3182e0430493dbd55950c493d8c68f4eec0b302f6bbf20e"}, + {file = "psycopg2-2.9.10-cp313-cp313-win_amd64.whl", hash = "sha256:91fd603a2155da8d0cfcdbf8ab24a2d54bca72795b90d2a3ed2b6da8d979dee2"}, {file = "psycopg2-2.9.10-cp39-cp39-win32.whl", hash = "sha256:9d5b3b94b79a844a986d029eee38998232451119ad653aea42bb9220a8c5066b"}, {file = "psycopg2-2.9.10-cp39-cp39-win_amd64.whl", hash = "sha256:88138c8dedcbfa96408023ea2b0c369eda40fe5d75002c0964c78f46f11fa442"}, {file = "psycopg2-2.9.10.tar.gz", hash = "sha256:12ec0b40b0273f95296233e8750441339298e6a572f7039da5b260e3c8b60e11"}, @@ -1759,7 +1776,7 @@ description = ".. image:: https://travis-ci.org/chtd/psycopg2cffi.svg?branch=mas optional = true python-versions = "*" groups = ["main"] -markers = "platform_python_implementation == \"PyPy\" and (extra == \"postgres\" or extra == \"all\")" +markers = "platform_python_implementation == \"PyPy\" and (extra == \"all\" or extra == \"postgres\")" files = [ {file = "psycopg2cffi-2.9.0.tar.gz", hash = "sha256:7e272edcd837de3a1d12b62185eb85c45a19feda9e62fa1b120c54f9e8d35c52"}, ] @@ -1775,7 +1792,7 @@ description = "A Simple library to enable psycopg2 compatability" optional = true python-versions = "*" groups = ["main"] -markers = "platform_python_implementation == \"PyPy\" and (extra == \"postgres\" or extra == \"all\")" +markers = "platform_python_implementation == \"PyPy\" and (extra == \"all\" or extra == \"postgres\")" files = [ {file = "psycopg2cffi-compat-1.1.tar.gz", hash = "sha256:d25e921748475522b33d13420aad5c2831c743227dc1f1f2585e0fdb5c914e05"}, ] @@ -2034,7 +2051,7 @@ description = "A development tool to measure, monitor and analyze the memory beh optional = true python-versions = ">=3.6" groups = ["main"] -markers = "extra == \"cache-memory\" or extra == \"all\"" +markers = "extra == \"all\" or extra == \"cache-memory\"" files = [ {file = "Pympler-1.0.1-py3-none-any.whl", hash = "sha256:d260dda9ae781e1eab6ea15bacb84015849833ba5555f141d2d9b7b7473b307d"}, {file = "Pympler-1.0.1.tar.gz", hash = "sha256:993f1a3599ca3f4fcd7160c7545ad06310c9e12f70174ae7ae8d4e25f6c5d3fa"}, @@ -2094,7 +2111,7 @@ description = "Python implementation of SAML Version 2 Standard" optional = true python-versions = ">=3.9,<4.0" groups = ["main"] -markers = "extra == \"saml2\" or extra == \"all\"" +markers = "extra == \"all\" or extra == \"saml2\"" files = [ {file = "pysaml2-7.5.0-py3-none-any.whl", hash = "sha256:bc6627cc344476a83c757f440a73fda1369f13b6fda1b4e16bca63ffbabb5318"}, {file = "pysaml2-7.5.0.tar.gz", hash = "sha256:f36871d4e5ee857c6b85532e942550d2cf90ea4ee943d75eb681044bbc4f54f7"}, @@ -2119,7 +2136,7 @@ description = "Extensions to the standard Python datetime module" optional = true python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" groups = ["main"] -markers = "extra == \"saml2\" or extra == \"all\"" +markers = "extra == \"all\" or extra == \"saml2\"" files = [ {file = "python-dateutil-2.8.2.tar.gz", hash = "sha256:0123cacc1627ae19ddf3c27a5de5bd67ee4586fbdd6440d9748f8abb483d3e86"}, {file = "python_dateutil-2.8.2-py2.py3-none-any.whl", hash = "sha256:961d03dc3453ebbc59dbdea9e4e11c5651520a876d0f4db161e8674aae935da9"}, @@ -2147,7 +2164,7 @@ description = "World timezone definitions, modern and historical" optional = true python-versions = "*" groups = ["main"] -markers = "extra == \"saml2\" or extra == \"all\"" +markers = "extra == \"all\" or extra == \"saml2\"" files = [ {file = "pytz-2022.7.1-py2.py3-none-any.whl", hash = "sha256:78f4f37d8198e0627c5f1143240bb0206b8691d8d7ac6d78fee88b78733f8c4a"}, {file = "pytz-2022.7.1.tar.gz", hash = "sha256:01a0681c4b9684a28304615eba55d1ab31ae00bf68ec157ec3708a8182dbbcd0"}, @@ -2513,7 +2530,7 @@ description = "Python client for Sentry (https://sentry.io)" optional = true python-versions = ">=3.6" groups = ["main"] -markers = "extra == \"sentry\" or extra == \"all\"" +markers = "extra == \"all\" or extra == \"sentry\"" files = [ {file = "sentry_sdk-2.34.1-py2.py3-none-any.whl", hash = "sha256:b7a072e1cdc5abc48101d5146e1ae680fa81fe886d8d95aaa25a0b450c818d32"}, {file = "sentry_sdk-2.34.1.tar.gz", hash = "sha256:69274eb8c5c38562a544c3e9f68b5be0a43be4b697f5fd385bf98e4fbe672687"}, @@ -2701,7 +2718,7 @@ description = "Tornado IOLoop Backed Concurrent Futures" optional = true python-versions = "*" groups = ["main"] -markers = "extra == \"opentracing\" or extra == \"all\"" +markers = "extra == \"all\" or extra == \"opentracing\"" files = [ {file = "threadloop-1.0.2-py2-none-any.whl", hash = "sha256:5c90dbefab6ffbdba26afb4829d2a9df8275d13ac7dc58dccb0e279992679599"}, {file = "threadloop-1.0.2.tar.gz", hash = "sha256:8b180aac31013de13c2ad5c834819771992d350267bddb854613ae77ef571944"}, @@ -2717,7 +2734,7 @@ description = "Python bindings for the Apache Thrift RPC system" optional = true python-versions = "*" groups = ["main"] -markers = "extra == \"opentracing\" or extra == \"all\"" +markers = "extra == \"all\" or extra == \"opentracing\"" files = [ {file = "thrift-0.16.0.tar.gz", hash = "sha256:2b5b6488fcded21f9d312aa23c9ff6a0195d0f6ae26ddbd5ad9e3e25dfc14408"}, ] @@ -2779,7 +2796,7 @@ description = "Tornado is a Python web framework and asynchronous networking lib optional = true python-versions = ">=3.9" groups = ["main"] -markers = "extra == \"opentracing\" or extra == \"all\"" +markers = "extra == \"all\" or extra == \"opentracing\"" files = [ {file = "tornado-6.5-cp39-abi3-macosx_10_9_universal2.whl", hash = "sha256:f81067dad2e4443b015368b24e802d0083fecada4f0a4572fdb72fc06e54a9a6"}, {file = "tornado-6.5-cp39-abi3-macosx_10_9_x86_64.whl", hash = "sha256:9ac1cbe1db860b3cbb251e795c701c41d343f06a96049d6274e7c77559117e41"}, @@ -2916,7 +2933,7 @@ description = "non-blocking redis client for python" optional = true python-versions = "*" groups = ["main"] -markers = "extra == \"redis\" or extra == \"all\"" +markers = "extra == \"all\" or extra == \"redis\"" files = [ {file = "txredisapi-1.4.11-py3-none-any.whl", hash = "sha256:ac64d7a9342b58edca13ef267d4fa7637c1aa63f8595e066801c1e8b56b22d0b"}, {file = "txredisapi-1.4.11.tar.gz", hash = "sha256:3eb1af99aefdefb59eb877b1dd08861efad60915e30ad5bf3d5bf6c5cedcdbc6"}, @@ -3162,7 +3179,7 @@ description = "An XML Schema validator and decoder" optional = true python-versions = ">=3.7" groups = ["main"] -markers = "extra == \"saml2\" or extra == \"all\"" +markers = "extra == \"all\" or extra == \"saml2\"" files = [ {file = "xmlschema-2.4.0-py3-none-any.whl", hash = "sha256:dc87be0caaa61f42649899189aab2fd8e0d567f2cf548433ba7b79278d231a4a"}, {file = "xmlschema-2.4.0.tar.gz", hash = "sha256:d74cd0c10866ac609e1ef94a5a69b018ad16e39077bc6393408b40c6babee793"}, From 1a78fc8a65d215959f611c9e4d0643edcf07b68e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 28 Oct 2025 11:55:12 +0000 Subject: [PATCH 091/149] Bump pyyaml from 6.0.2 to 6.0.3 (#19105) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 128 ++++++++++++++++++++++++++++++---------------------- 1 file changed, 74 insertions(+), 54 deletions(-) diff --git a/poetry.lock b/poetry.lock index 7b1b3b4cbc..dd86fe8159 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2185,65 +2185,85 @@ files = [ [[package]] name = "pyyaml" -version = "6.0.2" +version = "6.0.3" description = "YAML parser and emitter for Python" optional = false python-versions = ">=3.8" groups = ["main"] files = [ - {file = "PyYAML-6.0.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0a9a2848a5b7feac301353437eb7d5957887edbf81d56e903999a75a3d743086"}, - {file = "PyYAML-6.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:29717114e51c84ddfba879543fb232a6ed60086602313ca38cce623c1d62cfbf"}, - {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8824b5a04a04a047e72eea5cec3bc266db09e35de6bdfe34c9436ac5ee27d237"}, - {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7c36280e6fb8385e520936c3cb3b8042851904eba0e58d277dca80a5cfed590b"}, - {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ec031d5d2feb36d1d1a24380e4db6d43695f3748343d99434e6f5f9156aaa2ed"}, - {file = "PyYAML-6.0.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:936d68689298c36b53b29f23c6dbb74de12b4ac12ca6cfe0e047bedceea56180"}, - {file = "PyYAML-6.0.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:23502f431948090f597378482b4812b0caae32c22213aecf3b55325e049a6c68"}, - {file = "PyYAML-6.0.2-cp310-cp310-win32.whl", hash = "sha256:2e99c6826ffa974fe6e27cdb5ed0021786b03fc98e5ee3c5bfe1fd5015f42b99"}, - {file = "PyYAML-6.0.2-cp310-cp310-win_amd64.whl", hash = "sha256:a4d3091415f010369ae4ed1fc6b79def9416358877534caf6a0fdd2146c87a3e"}, - {file = "PyYAML-6.0.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:cc1c1159b3d456576af7a3e4d1ba7e6924cb39de8f67111c735f6fc832082774"}, - {file = "PyYAML-6.0.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1e2120ef853f59c7419231f3bf4e7021f1b936f6ebd222406c3b60212205d2ee"}, - {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5d225db5a45f21e78dd9358e58a98702a0302f2659a3c6cd320564b75b86f47c"}, - {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5ac9328ec4831237bec75defaf839f7d4564be1e6b25ac710bd1a96321cc8317"}, - {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3ad2a3decf9aaba3d29c8f537ac4b243e36bef957511b4766cb0057d32b0be85"}, - {file = "PyYAML-6.0.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:ff3824dc5261f50c9b0dfb3be22b4567a6f938ccce4587b38952d85fd9e9afe4"}, - {file = "PyYAML-6.0.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:797b4f722ffa07cc8d62053e4cff1486fa6dc094105d13fea7b1de7d8bf71c9e"}, - {file = "PyYAML-6.0.2-cp311-cp311-win32.whl", hash = "sha256:11d8f3dd2b9c1207dcaf2ee0bbbfd5991f571186ec9cc78427ba5bd32afae4b5"}, - {file = "PyYAML-6.0.2-cp311-cp311-win_amd64.whl", hash = "sha256:e10ce637b18caea04431ce14fabcf5c64a1c61ec9c56b071a4b7ca131ca52d44"}, - {file = "PyYAML-6.0.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:c70c95198c015b85feafc136515252a261a84561b7b1d51e3384e0655ddf25ab"}, - {file = "PyYAML-6.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ce826d6ef20b1bc864f0a68340c8b3287705cae2f8b4b1d932177dcc76721725"}, - {file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1f71ea527786de97d1a0cc0eacd1defc0985dcf6b3f17bb77dcfc8c34bec4dc5"}, - {file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9b22676e8097e9e22e36d6b7bda33190d0d400f345f23d4065d48f4ca7ae0425"}, - {file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:80bab7bfc629882493af4aa31a4cfa43a4c57c83813253626916b8c7ada83476"}, - {file = "PyYAML-6.0.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:0833f8694549e586547b576dcfaba4a6b55b9e96098b36cdc7ebefe667dfed48"}, - {file = "PyYAML-6.0.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8b9c7197f7cb2738065c481a0461e50ad02f18c78cd75775628afb4d7137fb3b"}, - {file = "PyYAML-6.0.2-cp312-cp312-win32.whl", hash = "sha256:ef6107725bd54b262d6dedcc2af448a266975032bc85ef0172c5f059da6325b4"}, - {file = "PyYAML-6.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:7e7401d0de89a9a855c839bc697c079a4af81cf878373abd7dc625847d25cbd8"}, - {file = "PyYAML-6.0.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:efdca5630322a10774e8e98e1af481aad470dd62c3170801852d752aa7a783ba"}, - {file = "PyYAML-6.0.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:50187695423ffe49e2deacb8cd10510bc361faac997de9efef88badc3bb9e2d1"}, - {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0ffe8360bab4910ef1b9e87fb812d8bc0a308b0d0eef8c8f44e0254ab3b07133"}, - {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:17e311b6c678207928d649faa7cb0d7b4c26a0ba73d41e99c4fff6b6c3276484"}, - {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:70b189594dbe54f75ab3a1acec5f1e3faa7e8cf2f1e08d9b561cb41b845f69d5"}, - {file = "PyYAML-6.0.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:41e4e3953a79407c794916fa277a82531dd93aad34e29c2a514c2c0c5fe971cc"}, - {file = "PyYAML-6.0.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:68ccc6023a3400877818152ad9a1033e3db8625d899c72eacb5a668902e4d652"}, - {file = "PyYAML-6.0.2-cp313-cp313-win32.whl", hash = "sha256:bc2fa7c6b47d6bc618dd7fb02ef6fdedb1090ec036abab80d4681424b84c1183"}, - {file = "PyYAML-6.0.2-cp313-cp313-win_amd64.whl", hash = "sha256:8388ee1976c416731879ac16da0aff3f63b286ffdd57cdeb95f3f2e085687563"}, - {file = "PyYAML-6.0.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:24471b829b3bf607e04e88d79542a9d48bb037c2267d7927a874e6c205ca7e9a"}, - {file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d7fded462629cfa4b685c5416b949ebad6cec74af5e2d42905d41e257e0869f5"}, - {file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d84a1718ee396f54f3a086ea0a66d8e552b2ab2017ef8b420e92edbc841c352d"}, - {file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9056c1ecd25795207ad294bcf39f2db3d845767be0ea6e6a34d856f006006083"}, - {file = "PyYAML-6.0.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:82d09873e40955485746739bcb8b4586983670466c23382c19cffecbf1fd8706"}, - {file = "PyYAML-6.0.2-cp38-cp38-win32.whl", hash = "sha256:43fa96a3ca0d6b1812e01ced1044a003533c47f6ee8aca31724f78e93ccc089a"}, - {file = "PyYAML-6.0.2-cp38-cp38-win_amd64.whl", hash = "sha256:01179a4a8559ab5de078078f37e5c1a30d76bb88519906844fd7bdea1b7729ff"}, - {file = "PyYAML-6.0.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:688ba32a1cffef67fd2e9398a2efebaea461578b0923624778664cc1c914db5d"}, - {file = "PyYAML-6.0.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a8786accb172bd8afb8be14490a16625cbc387036876ab6ba70912730faf8e1f"}, - {file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d8e03406cac8513435335dbab54c0d385e4a49e4945d2909a581c83647ca0290"}, - {file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f753120cb8181e736c57ef7636e83f31b9c0d1722c516f7e86cf15b7aa57ff12"}, - {file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3b1fdb9dc17f5a7677423d508ab4f243a726dea51fa5e70992e59a7411c89d19"}, - {file = "PyYAML-6.0.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:0b69e4ce7a131fe56b7e4d770c67429700908fc0752af059838b1cfb41960e4e"}, - {file = "PyYAML-6.0.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:a9f8c2e67970f13b16084e04f134610fd1d374bf477b17ec1599185cf611d725"}, - {file = "PyYAML-6.0.2-cp39-cp39-win32.whl", hash = "sha256:6395c297d42274772abc367baaa79683958044e5d3835486c16da75d2a694631"}, - {file = "PyYAML-6.0.2-cp39-cp39-win_amd64.whl", hash = "sha256:39693e1f8320ae4f43943590b49779ffb98acb81f788220ea932a6b6c51004d8"}, - {file = "pyyaml-6.0.2.tar.gz", hash = "sha256:d584d9ec91ad65861cc08d42e834324ef890a082e591037abe114850ff7bbc3e"}, + {file = "PyYAML-6.0.3-cp38-cp38-macosx_10_13_x86_64.whl", hash = "sha256:c2514fceb77bc5e7a2f7adfaa1feb2fb311607c9cb518dbc378688ec73d8292f"}, + {file = "PyYAML-6.0.3-cp38-cp38-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:9c57bb8c96f6d1808c030b1687b9b5fb476abaa47f0db9c0101f5e9f394e97f4"}, + {file = "PyYAML-6.0.3-cp38-cp38-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:efd7b85f94a6f21e4932043973a7ba2613b059c4a000551892ac9f1d11f5baf3"}, + {file = "PyYAML-6.0.3-cp38-cp38-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:22ba7cfcad58ef3ecddc7ed1db3409af68d023b7f940da23c6c2a1890976eda6"}, + {file = "PyYAML-6.0.3-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:6344df0d5755a2c9a276d4473ae6b90647e216ab4757f8426893b5dd2ac3f369"}, + {file = "PyYAML-6.0.3-cp38-cp38-win32.whl", hash = "sha256:3ff07ec89bae51176c0549bc4c63aa6202991da2d9a6129d7aef7f1407d3f295"}, + {file = "PyYAML-6.0.3-cp38-cp38-win_amd64.whl", hash = "sha256:5cf4e27da7e3fbed4d6c3d8e797387aaad68102272f8f9752883bc32d61cb87b"}, + {file = "pyyaml-6.0.3-cp310-cp310-macosx_10_13_x86_64.whl", hash = "sha256:214ed4befebe12df36bcc8bc2b64b396ca31be9304b8f59e25c11cf94a4c033b"}, + {file = "pyyaml-6.0.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:02ea2dfa234451bbb8772601d7b8e426c2bfa197136796224e50e35a78777956"}, + {file = "pyyaml-6.0.3-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b30236e45cf30d2b8e7b3e85881719e98507abed1011bf463a8fa23e9c3e98a8"}, + {file = "pyyaml-6.0.3-cp310-cp310-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:66291b10affd76d76f54fad28e22e51719ef9ba22b29e1d7d03d6777a9174198"}, + {file = "pyyaml-6.0.3-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9c7708761fccb9397fe64bbc0395abcae8c4bf7b0eac081e12b809bf47700d0b"}, + {file = "pyyaml-6.0.3-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:418cf3f2111bc80e0933b2cd8cd04f286338bb88bdc7bc8e6dd775ebde60b5e0"}, + {file = "pyyaml-6.0.3-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:5e0b74767e5f8c593e8c9b5912019159ed0533c70051e9cce3e8b6aa699fcd69"}, + {file = "pyyaml-6.0.3-cp310-cp310-win32.whl", hash = "sha256:28c8d926f98f432f88adc23edf2e6d4921ac26fb084b028c733d01868d19007e"}, + {file = "pyyaml-6.0.3-cp310-cp310-win_amd64.whl", hash = "sha256:bdb2c67c6c1390b63c6ff89f210c8fd09d9a1217a465701eac7316313c915e4c"}, + {file = "pyyaml-6.0.3-cp311-cp311-macosx_10_13_x86_64.whl", hash = "sha256:44edc647873928551a01e7a563d7452ccdebee747728c1080d881d68af7b997e"}, + {file = "pyyaml-6.0.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:652cb6edd41e718550aad172851962662ff2681490a8a711af6a4d288dd96824"}, + {file = "pyyaml-6.0.3-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:10892704fc220243f5305762e276552a0395f7beb4dbf9b14ec8fd43b57f126c"}, + {file = "pyyaml-6.0.3-cp311-cp311-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:850774a7879607d3a6f50d36d04f00ee69e7fc816450e5f7e58d7f17f1ae5c00"}, + {file = "pyyaml-6.0.3-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:b8bb0864c5a28024fac8a632c443c87c5aa6f215c0b126c449ae1a150412f31d"}, + {file = "pyyaml-6.0.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:1d37d57ad971609cf3c53ba6a7e365e40660e3be0e5175fa9f2365a379d6095a"}, + {file = "pyyaml-6.0.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:37503bfbfc9d2c40b344d06b2199cf0e96e97957ab1c1b546fd4f87e53e5d3e4"}, + {file = "pyyaml-6.0.3-cp311-cp311-win32.whl", hash = "sha256:8098f252adfa6c80ab48096053f512f2321f0b998f98150cea9bd23d83e1467b"}, + {file = "pyyaml-6.0.3-cp311-cp311-win_amd64.whl", hash = "sha256:9f3bfb4965eb874431221a3ff3fdcddc7e74e3b07799e0e84ca4a0f867d449bf"}, + {file = "pyyaml-6.0.3-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:7f047e29dcae44602496db43be01ad42fc6f1cc0d8cd6c83d342306c32270196"}, + {file = "pyyaml-6.0.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:fc09d0aa354569bc501d4e787133afc08552722d3ab34836a80547331bb5d4a0"}, + {file = "pyyaml-6.0.3-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:9149cad251584d5fb4981be1ecde53a1ca46c891a79788c0df828d2f166bda28"}, + {file = "pyyaml-6.0.3-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:5fdec68f91a0c6739b380c83b951e2c72ac0197ace422360e6d5a959d8d97b2c"}, + {file = "pyyaml-6.0.3-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:ba1cc08a7ccde2d2ec775841541641e4548226580ab850948cbfda66a1befcdc"}, + {file = "pyyaml-6.0.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:8dc52c23056b9ddd46818a57b78404882310fb473d63f17b07d5c40421e47f8e"}, + {file = "pyyaml-6.0.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:41715c910c881bc081f1e8872880d3c650acf13dfa8214bad49ed4cede7c34ea"}, + {file = "pyyaml-6.0.3-cp312-cp312-win32.whl", hash = "sha256:96b533f0e99f6579b3d4d4995707cf36df9100d67e0c8303a0c55b27b5f99bc5"}, + {file = "pyyaml-6.0.3-cp312-cp312-win_amd64.whl", hash = "sha256:5fcd34e47f6e0b794d17de1b4ff496c00986e1c83f7ab2fb8fcfe9616ff7477b"}, + {file = "pyyaml-6.0.3-cp312-cp312-win_arm64.whl", hash = "sha256:64386e5e707d03a7e172c0701abfb7e10f0fb753ee1d773128192742712a98fd"}, + {file = "pyyaml-6.0.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:8da9669d359f02c0b91ccc01cac4a67f16afec0dac22c2ad09f46bee0697eba8"}, + {file = "pyyaml-6.0.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:2283a07e2c21a2aa78d9c4442724ec1eb15f5e42a723b99cb3d822d48f5f7ad1"}, + {file = "pyyaml-6.0.3-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ee2922902c45ae8ccada2c5b501ab86c36525b883eff4255313a253a3160861c"}, + {file = "pyyaml-6.0.3-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:a33284e20b78bd4a18c8c2282d549d10bc8408a2a7ff57653c0cf0b9be0afce5"}, + {file = "pyyaml-6.0.3-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0f29edc409a6392443abf94b9cf89ce99889a1dd5376d94316ae5145dfedd5d6"}, + {file = "pyyaml-6.0.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:f7057c9a337546edc7973c0d3ba84ddcdf0daa14533c2065749c9075001090e6"}, + {file = "pyyaml-6.0.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:eda16858a3cab07b80edaf74336ece1f986ba330fdb8ee0d6c0d68fe82bc96be"}, + {file = "pyyaml-6.0.3-cp313-cp313-win32.whl", hash = "sha256:d0eae10f8159e8fdad514efdc92d74fd8d682c933a6dd088030f3834bc8e6b26"}, + {file = "pyyaml-6.0.3-cp313-cp313-win_amd64.whl", hash = "sha256:79005a0d97d5ddabfeeea4cf676af11e647e41d81c9a7722a193022accdb6b7c"}, + {file = "pyyaml-6.0.3-cp313-cp313-win_arm64.whl", hash = "sha256:5498cd1645aa724a7c71c8f378eb29ebe23da2fc0d7a08071d89469bf1d2defb"}, + {file = "pyyaml-6.0.3-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:8d1fab6bb153a416f9aeb4b8763bc0f22a5586065f86f7664fc23339fc1c1fac"}, + {file = "pyyaml-6.0.3-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:34d5fcd24b8445fadc33f9cf348c1047101756fd760b4dacb5c3e99755703310"}, + {file = "pyyaml-6.0.3-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:501a031947e3a9025ed4405a168e6ef5ae3126c59f90ce0cd6f2bfc477be31b7"}, + {file = "pyyaml-6.0.3-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:b3bc83488de33889877a0f2543ade9f70c67d66d9ebb4ac959502e12de895788"}, + {file = "pyyaml-6.0.3-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c458b6d084f9b935061bc36216e8a69a7e293a2f1e68bf956dcd9e6cbcd143f5"}, + {file = "pyyaml-6.0.3-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:7c6610def4f163542a622a73fb39f534f8c101d690126992300bf3207eab9764"}, + {file = "pyyaml-6.0.3-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:5190d403f121660ce8d1d2c1bb2ef1bd05b5f68533fc5c2ea899bd15f4399b35"}, + {file = "pyyaml-6.0.3-cp314-cp314-win_amd64.whl", hash = "sha256:4a2e8cebe2ff6ab7d1050ecd59c25d4c8bd7e6f400f5f82b96557ac0abafd0ac"}, + {file = "pyyaml-6.0.3-cp314-cp314-win_arm64.whl", hash = "sha256:93dda82c9c22deb0a405ea4dc5f2d0cda384168e466364dec6255b293923b2f3"}, + {file = "pyyaml-6.0.3-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:02893d100e99e03eda1c8fd5c441d8c60103fd175728e23e431db1b589cf5ab3"}, + {file = "pyyaml-6.0.3-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:c1ff362665ae507275af2853520967820d9124984e0f7466736aea23d8611fba"}, + {file = "pyyaml-6.0.3-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6adc77889b628398debc7b65c073bcb99c4a0237b248cacaf3fe8a557563ef6c"}, + {file = "pyyaml-6.0.3-cp314-cp314t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:a80cb027f6b349846a3bf6d73b5e95e782175e52f22108cfa17876aaeff93702"}, + {file = "pyyaml-6.0.3-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:00c4bdeba853cc34e7dd471f16b4114f4162dc03e6b7afcc2128711f0eca823c"}, + {file = "pyyaml-6.0.3-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:66e1674c3ef6f541c35191caae2d429b967b99e02040f5ba928632d9a7f0f065"}, + {file = "pyyaml-6.0.3-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:16249ee61e95f858e83976573de0f5b2893b3677ba71c9dd36b9cf8be9ac6d65"}, + {file = "pyyaml-6.0.3-cp314-cp314t-win_amd64.whl", hash = "sha256:4ad1906908f2f5ae4e5a8ddfce73c320c2a1429ec52eafd27138b7f1cbe341c9"}, + {file = "pyyaml-6.0.3-cp314-cp314t-win_arm64.whl", hash = "sha256:ebc55a14a21cb14062aa4162f906cd962b28e2e9ea38f9b4391244cd8de4ae0b"}, + {file = "pyyaml-6.0.3-cp39-cp39-macosx_10_13_x86_64.whl", hash = "sha256:b865addae83924361678b652338317d1bd7e79b1f4596f96b96c77a5a34b34da"}, + {file = "pyyaml-6.0.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c3355370a2c156cffb25e876646f149d5d68f5e0a3ce86a5084dd0b64a994917"}, + {file = "pyyaml-6.0.3-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:3c5677e12444c15717b902a5798264fa7909e41153cdf9ef7ad571b704a63dd9"}, + {file = "pyyaml-6.0.3-cp39-cp39-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:5ed875a24292240029e4483f9d4a4b8a1ae08843b9c54f43fcc11e404532a8a5"}, + {file = "pyyaml-6.0.3-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0150219816b6a1fa26fb4699fb7daa9caf09eb1999f3b70fb6e786805e80375a"}, + {file = "pyyaml-6.0.3-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:fa160448684b4e94d80416c0fa4aac48967a969efe22931448d853ada8baf926"}, + {file = "pyyaml-6.0.3-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:27c0abcb4a5dac13684a37f76e701e054692a9b2d3064b70f5e4eb54810553d7"}, + {file = "pyyaml-6.0.3-cp39-cp39-win32.whl", hash = "sha256:1ebe39cb5fc479422b83de611d14e2c0d3bb2a18bbcb01f229ab3cfbd8fee7a0"}, + {file = "pyyaml-6.0.3-cp39-cp39-win_amd64.whl", hash = "sha256:2e71d11abed7344e42a8849600193d15b6def118602c4c176f748e4583246007"}, + {file = "pyyaml-6.0.3.tar.gz", hash = "sha256:d76623373421df22fb4cf8817020cbb7ef15c725b9d5e45f17e189bfc384190f"}, ] [[package]] From e23e7ae48fa75bb6700931f4911e2e5d05072867 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 28 Oct 2025 11:56:29 +0000 Subject: [PATCH 092/149] Bump actions/upload-artifact from 4 to 5 (#19106) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/docker.yml | 2 +- .github/workflows/docs-pr.yaml | 2 +- .github/workflows/latest_deps.yml | 2 +- .github/workflows/release-artifacts.yml | 6 +++--- .github/workflows/tests.yml | 4 ++-- .github/workflows/twisted_trunk.yml | 2 +- 6 files changed, 9 insertions(+), 9 deletions(-) diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index 09832fae5c..987fb76ae1 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -75,7 +75,7 @@ jobs: touch "${{ runner.temp }}/digests/${digest#sha256:}" - name: Upload digest - uses: actions/upload-artifact@v4 + uses: actions/upload-artifact@v5 with: name: digests-${{ matrix.suffix }} path: ${{ runner.temp }}/digests/* diff --git a/.github/workflows/docs-pr.yaml b/.github/workflows/docs-pr.yaml index a0af38a6c5..6a61dd5fb1 100644 --- a/.github/workflows/docs-pr.yaml +++ b/.github/workflows/docs-pr.yaml @@ -39,7 +39,7 @@ jobs: cp book/welcome_and_overview.html book/index.html - name: Upload Artifact - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 + uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 with: name: book path: book diff --git a/.github/workflows/latest_deps.yml b/.github/workflows/latest_deps.yml index 526546531a..2076a1c1e1 100644 --- a/.github/workflows/latest_deps.yml +++ b/.github/workflows/latest_deps.yml @@ -173,7 +173,7 @@ jobs: if: ${{ always() }} run: /sytest/scripts/tap_to_gha.pl /logs/results.tap - name: Upload SyTest logs - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 + uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 if: ${{ always() }} with: name: Sytest Logs - ${{ job.status }} - (${{ join(matrix.*, ', ') }}) diff --git a/.github/workflows/release-artifacts.yml b/.github/workflows/release-artifacts.yml index fc291ad771..a19bde0a60 100644 --- a/.github/workflows/release-artifacts.yml +++ b/.github/workflows/release-artifacts.yml @@ -101,7 +101,7 @@ jobs: echo "ARTIFACT_NAME=${DISTRO#*:}" >> "$GITHUB_OUTPUT" - name: Upload debs as artifacts - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 + uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 with: name: debs-${{ steps.artifact-name.outputs.ARTIFACT_NAME }} path: debs/* @@ -154,7 +154,7 @@ jobs: # for, and so need extra build deps. CIBW_TEST_SKIP: pp3*-* *i686* *musl* - - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 + - uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 with: name: Wheel-${{ matrix.os }} path: ./wheelhouse/*.whl @@ -175,7 +175,7 @@ jobs: - name: Build sdist run: python -m build --sdist - - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 + - uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 with: name: Sdist path: dist/*.tar.gz diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 6b8cb3c585..f75435bedf 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -585,7 +585,7 @@ jobs: if: ${{ always() }} run: /sytest/scripts/tap_to_gha.pl /logs/results.tap - name: Upload SyTest logs - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 + uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 if: ${{ always() }} with: name: Sytest Logs - ${{ job.status }} - (${{ join(matrix.job.*, ', ') }}) @@ -683,7 +683,7 @@ jobs: PGPASSWORD: postgres PGDATABASE: postgres - name: "Upload schema differences" - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 + uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 if: ${{ failure() && !cancelled() && steps.run_tester_script.outcome == 'failure' }} with: name: Schema dumps diff --git a/.github/workflows/twisted_trunk.yml b/.github/workflows/twisted_trunk.yml index 3f14219bbc..11b7bfe143 100644 --- a/.github/workflows/twisted_trunk.yml +++ b/.github/workflows/twisted_trunk.yml @@ -147,7 +147,7 @@ jobs: if: ${{ always() }} run: /sytest/scripts/tap_to_gha.pl /logs/results.tap - name: Upload SyTest logs - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 + uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 if: ${{ always() }} with: name: Sytest Logs - ${{ job.status }} - (${{ join(matrix.*, ', ') }}) From a07dd43ac40cf5c3e0e357227d8d6e48759964e9 Mon Sep 17 00:00:00 2001 From: Andrew Ferrazzutti Date: Tue, 28 Oct 2025 14:11:45 -0400 Subject: [PATCH 093/149] Use Pillow's non-experimental getexif (#19098) It has been available since Pillow 6, and Synapse is now pinned on Pillow >=10.0.1. Found this while looking at Debian-shipped dependencies, and figured this may as well be updated. --- changelog.d/19098.misc | 1 + synapse/media/thumbnailer.py | 11 +---------- 2 files changed, 2 insertions(+), 10 deletions(-) create mode 100644 changelog.d/19098.misc diff --git a/changelog.d/19098.misc b/changelog.d/19098.misc new file mode 100644 index 0000000000..a6933348a3 --- /dev/null +++ b/changelog.d/19098.misc @@ -0,0 +1 @@ +Use Pillow's `Image.getexif` method instead of the experimental `Image._getexif`. diff --git a/synapse/media/thumbnailer.py b/synapse/media/thumbnailer.py index cc2fe7318b..a42d39c319 100644 --- a/synapse/media/thumbnailer.py +++ b/synapse/media/thumbnailer.py @@ -97,16 +97,7 @@ class Thumbnailer: self.transpose_method = None try: # We don't use ImageOps.exif_transpose since it crashes with big EXIF - # - # Ignore safety: Pillow seems to acknowledge that this method is - # "private, experimental, but generally widely used". Pillow 6 - # includes a public getexif() method (no underscore) that we might - # consider using instead when we can bump that dependency. - # - # At the time of writing, Debian buster (currently oldstable) - # provides version 5.4.1. It's expected to EOL in mid-2022, see - # https://wiki.debian.org/DebianReleases#Production_Releases - image_exif = self.image._getexif() # type: ignore + image_exif = self.image.getexif() if image_exif is not None: image_orientation = image_exif.get(EXIF_ORIENTATION_TAG) assert type(image_orientation) is int # noqa: E721 From dc33ef90d373ed7f8abd85fbd38bdddc8d984e27 Mon Sep 17 00:00:00 2001 From: Andrew Ferrazzutti Date: Tue, 28 Oct 2025 18:25:16 -0400 Subject: [PATCH 094/149] Update docs on downstream Debian package (#19100) --- changelog.d/19100.doc | 1 + docs/setup/installation.md | 8 ++------ 2 files changed, 3 insertions(+), 6 deletions(-) create mode 100644 changelog.d/19100.doc diff --git a/changelog.d/19100.doc b/changelog.d/19100.doc new file mode 100644 index 0000000000..a723f34c4f --- /dev/null +++ b/changelog.d/19100.doc @@ -0,0 +1 @@ +Update the list of Debian releases that the downstream Debian package is maintained for. diff --git a/docs/setup/installation.md b/docs/setup/installation.md index 0840f532b0..68f224d33a 100644 --- a/docs/setup/installation.md +++ b/docs/setup/installation.md @@ -87,17 +87,13 @@ file when you upgrade the Debian package to a later version. Andrej Shadura maintains a [`matrix-synapse`](https://packages.debian.org/sid/matrix-synapse) package in the Debian repositories. -For `bookworm` and `sid`, it can be installed simply with: +For `forky` (14) and `sid` (rolling release), it can be installed simply with: ```sh sudo apt install matrix-synapse ``` -Synapse is also available in `bullseye-backports`. Please -see the [Debian documentation](https://backports.debian.org/Instructions/) -for information on how to use backports. - -`matrix-synapse` is no longer maintained for `buster` and older. +The downstream Debian `matrix-synapse` package is not available for `trixie` (13) and older. Consider using the Matrix.org packages (above). ##### Downstream Ubuntu packages From 5f9f3d72b8af7dda96a4be55a9e914c611e377e8 Mon Sep 17 00:00:00 2001 From: Andrew Morgan Date: Wed, 29 Oct 2025 11:01:53 +0000 Subject: [PATCH 095/149] 1.141.0 --- CHANGES.md | 7 +++++++ debian/changelog | 6 ++++++ pyproject.toml | 2 +- 3 files changed, 14 insertions(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index 8fa31fa8e8..3ecd5c3ca7 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,10 @@ +# Synapse 1.141.0 (2025-10-29) + +No significant changes since 1.141.0rc2. + + + + # Synapse 1.141.0rc2 (2025-10-28) ## Deprecation of MacOS Python wheels diff --git a/debian/changelog b/debian/changelog index 012d59aa93..14278968a8 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.141.0) stable; urgency=medium + + * New Synapse release 1.141.0. + + -- Synapse Packaging team Wed, 29 Oct 2025 11:01:43 +0000 + matrix-synapse-py3 (1.141.0~rc2) stable; urgency=medium * New Synapse release 1.141.0rc2. diff --git a/pyproject.toml b/pyproject.toml index affea7fc5e..b4514481c6 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -101,7 +101,7 @@ module-name = "synapse.synapse_rust" [tool.poetry] name = "matrix-synapse" -version = "1.141.0rc2" +version = "1.141.0" description = "Homeserver for the Matrix decentralised comms protocol" authors = ["Matrix.org Team and Contributors "] license = "AGPL-3.0-or-later OR LicenseRef-Element-Commercial" From 1090c3ec819818469c6bfa91ccafdaee8f8fd8f9 Mon Sep 17 00:00:00 2001 From: Andrew Morgan Date: Wed, 29 Oct 2025 11:02:46 +0000 Subject: [PATCH 096/149] Move MacOS wheels deprecation notice to the top --- CHANGES.md | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index 3ecd5c3ca7..c1c517dad5 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,12 +1,5 @@ # Synapse 1.141.0 (2025-10-29) -No significant changes since 1.141.0rc2. - - - - -# Synapse 1.141.0rc2 (2025-10-28) - ## Deprecation of MacOS Python wheels The team has decided to deprecate and eventually stop publishing python wheels @@ -19,6 +12,12 @@ do make use of these wheels downstream, please reach out to us in [#synapse-dev:matrix.org](https://matrix.to/#/#synapse-dev:matrix.org). We'd love to hear from you! +No significant changes since 1.141.0rc2. + + + + +# Synapse 1.141.0rc2 (2025-10-28) ## Bugfixes From facb81d97b06c81063795e823b41985cc7fc9c85 Mon Sep 17 00:00:00 2001 From: Andrew Morgan Date: Wed, 29 Oct 2025 11:03:56 +0000 Subject: [PATCH 097/149] Add Debian trixie notice to the top of the release notes --- CHANGES.md | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/CHANGES.md b/CHANGES.md index c1c517dad5..eead7e35cd 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -12,6 +12,13 @@ do make use of these wheels downstream, please reach out to us in [#synapse-dev:matrix.org](https://matrix.to/#/#synapse-dev:matrix.org). We'd love to hear from you! + +## Docker images now based on Debian `trixie` with Python 3.13 + +The Docker images are now based on Debian `trixie` and use Python 3.13. If you +are using the Docker images as a base image you may need to e.g. adjust the +paths you mount any additional Python packages at. + No significant changes since 1.141.0rc2. From 7897c8f6af3007b8a537247fe150e390a4202865 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Wed, 29 Oct 2025 12:32:33 +0100 Subject: [PATCH 098/149] Add a docs page with common steps to review the release notes (#19109) --- changelog.d/19109.doc | 1 + docs/SUMMARY.md | 2 ++ .../release_notes_review_checklist.md | 12 ++++++++++++ scripts-dev/release.py | 5 ++++- 4 files changed, 19 insertions(+), 1 deletion(-) create mode 100644 changelog.d/19109.doc create mode 100644 docs/development/internal_documentation/release_notes_review_checklist.md diff --git a/changelog.d/19109.doc b/changelog.d/19109.doc new file mode 100644 index 0000000000..4cce54b486 --- /dev/null +++ b/changelog.d/19109.doc @@ -0,0 +1 @@ +Add [a page](https://element-hq.github.io/synapse/latest/development/internal_documentation/release_notes_review_checklist.html) to the documentation describing the steps the Synapse team takes to review the release notes before publishing them. \ No newline at end of file diff --git a/docs/SUMMARY.md b/docs/SUMMARY.md index 64869eca8e..926a6eb848 100644 --- a/docs/SUMMARY.md +++ b/docs/SUMMARY.md @@ -116,6 +116,8 @@ - [The Auth Chain Difference Algorithm](auth_chain_difference_algorithm.md) - [Media Repository](media_repository.md) - [Room and User Statistics](room_and_user_statistics.md) + - [Releasing]() + - [Release Notes Review Checklist](development/internal_documentation/release_notes_review_checklist.md) - [Scripts]() # Other diff --git a/docs/development/internal_documentation/release_notes_review_checklist.md b/docs/development/internal_documentation/release_notes_review_checklist.md new file mode 100644 index 0000000000..c3d4d665be --- /dev/null +++ b/docs/development/internal_documentation/release_notes_review_checklist.md @@ -0,0 +1,12 @@ +# Release notes review checklist + +The Synapse release process includes a step to review the changelog before +publishing it. The following is a list of common points to check for: + +1. Check whether any similar entries that can be merged together (make sure to include all mentioned PRs at the end of the line, i.e. (#1234, #1235, ...)). +2. Link any MSCXXXX lines to the Matrix Spec Change itself: . +3. Wrap any class names, variable names, etc. in back-ticks, if needed. +4. Hoist any relevant security, deprecation, etc. announcements to the top of this version's changelog for visibility. This includes any announcements in RCs for this release. +5. Check the upgrade notes for any important announcements, and link to them from the changelog if warranted. +6. Quickly skim and check that each entry is in the appropriate section. +7. Entries under the Bugfixes section should ideally state what Synapse version the bug was introduced in. For example: "Fixed a bug introduced in v1.x.y" or if no version can be identified, "Fixed a long-standing bug ...". \ No newline at end of file diff --git a/scripts-dev/release.py b/scripts-dev/release.py index 111c184ccb..262c1503c7 100755 --- a/scripts-dev/release.py +++ b/scripts-dev/release.py @@ -316,7 +316,10 @@ def _prepare() -> None: ) print("Opening the changelog in your browser...") - print("Please ask #synapse-dev to give it a check.") + print( + "Please review it using the release notes review checklist: https://element-hq.github.io/synapse/develop/development/internal_documentation/release_notes_review_checklist.html" + ) + print("And post it in #synapse-dev for cursory review from the team.") click.launch( f"https://github.com/element-hq/synapse/blob/{synapse_repo.active_branch.name}/CHANGES.md" ) From 0417296b9f3d3499bd00807a356809aa34a74559 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Wed, 29 Oct 2025 10:23:10 -0500 Subject: [PATCH 099/149] Remove logcontext problems caused by awaiting raw `deferLater(...)` (#19058) This is a normal problem where we `await` a deferred without wrapping it in `make_deferred_yieldable(...)`. But I've opted to replace the usage of `deferLater` with something more standard for the Synapse codebase. Part of https://github.com/element-hq/synapse/issues/18905 It's unclear why we're only now seeing these failures happen with the changes from https://github.com/element-hq/synapse/pull/19057 Example failures seen in https://github.com/element-hq/synapse/actions/runs/18477454390/job/52645183606?pr=19057 ``` builtins.AssertionError: Expected `looping_call` callback from the reactor to start with the sentinel logcontext but saw task-_resumable_task-0-IBzAmHUoepQfLnEA. In other words, another task shouldn't have leaked their logcontext to us. ``` --- changelog.d/19058.misc | 1 + synapse/util/task_scheduler.py | 4 ++-- tests/rest/admin/test_room.py | 3 +-- tests/util/test_task_scheduler.py | 9 ++++++--- 4 files changed, 10 insertions(+), 7 deletions(-) create mode 100644 changelog.d/19058.misc diff --git a/changelog.d/19058.misc b/changelog.d/19058.misc new file mode 100644 index 0000000000..15bc4b39bd --- /dev/null +++ b/changelog.d/19058.misc @@ -0,0 +1 @@ +Remove logcontext problems caused by awaiting raw `deferLater(...)`. diff --git a/synapse/util/task_scheduler.py b/synapse/util/task_scheduler.py index f033d37579..22b3bf8c15 100644 --- a/synapse/util/task_scheduler.py +++ b/synapse/util/task_scheduler.py @@ -53,8 +53,8 @@ running_tasks_gauge = LaterGauge( class TaskScheduler: """ This is a simple task scheduler designed for resumable tasks. Normally, - you'd use `run_in_background` to start a background task or Twisted's - `deferLater` if you want to run it later. + you'd use `run_in_background` to start a background task or `clock.call_later` + if you want to run it later. The issue is that these tasks stop completely and won't resume if Synapse is shut down for any reason. diff --git a/tests/rest/admin/test_room.py b/tests/rest/admin/test_room.py index 5b95262365..40b34f4433 100644 --- a/tests/rest/admin/test_room.py +++ b/tests/rest/admin/test_room.py @@ -27,7 +27,6 @@ from unittest.mock import AsyncMock, Mock from parameterized import parameterized -from twisted.internet.task import deferLater from twisted.internet.testing import MemoryReactor import synapse.rest.admin @@ -1163,7 +1162,7 @@ class DeleteRoomV2TestCase(unittest.HomeserverTestCase): # Mock PaginationHandler.purge_room to sleep for 100s, so we have time to do a second call # before the purge is over. Note that it doesn't purge anymore, but we don't care. async def purge_room(room_id: str, force: bool) -> None: - await deferLater(self.hs.get_reactor(), 100, lambda: None) + await self.hs.get_clock().sleep(100) self.pagination_handler.purge_room = AsyncMock(side_effect=purge_room) # type: ignore[method-assign] diff --git a/tests/util/test_task_scheduler.py b/tests/util/test_task_scheduler.py index 43c3ce52ea..de9e381489 100644 --- a/tests/util/test_task_scheduler.py +++ b/tests/util/test_task_scheduler.py @@ -20,9 +20,10 @@ # from typing import Optional -from twisted.internet.task import deferLater +from twisted.internet.defer import Deferred from twisted.internet.testing import MemoryReactor +from synapse.logging.context import make_deferred_yieldable from synapse.server import HomeServer from synapse.types import JsonMapping, ScheduledTask, TaskStatus from synapse.util.clock import Clock @@ -87,7 +88,7 @@ class TestTaskScheduler(HomeserverTestCase): self, task: ScheduledTask ) -> tuple[TaskStatus, Optional[JsonMapping], Optional[str]]: # Sleep for a second - await deferLater(self.reactor, 1, lambda: None) + await self.hs.get_clock().sleep(1) return TaskStatus.COMPLETE, None, None def test_schedule_lot_of_tasks(self) -> None: @@ -170,8 +171,10 @@ class TestTaskScheduler(HomeserverTestCase): return TaskStatus.COMPLETE, {"success": True}, None else: await self.task_scheduler.update_task(task.id, result={"in_progress": True}) + # Create a deferred which we will never complete + incomplete_d: Deferred = Deferred() # Await forever to simulate an aborted task because of a restart - await deferLater(self.reactor, 2**16, lambda: None) + await make_deferred_yieldable(incomplete_d) # This should never been called return TaskStatus.ACTIVE, None, None From 6facf98a3ac83e03cf9880a057e4321c8be6070f Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Wed, 29 Oct 2025 10:28:05 -0500 Subject: [PATCH 100/149] Be mindful of other `SIGHUP` handlers in 3rd-party code (#19095) Be mindful that Synapse can be run alongside other code in the same Python process. We shouldn't clobber other `SIGHUP` handlers as only one can be set at time. (no clobber) ### Background As part of Element's plan to support a light form of vhosting (virtual host) (multiple instances of Synapse in the same Python process), we're currently diving into the details and implications of running multiple instances of Synapse in the same Python process. "Per-tenant logging" tracked internally by https://github.com/element-hq/synapse-small-hosts/issues/48 Relevant to logging as we use a `SIGHUP` to reload log config in Synapse. --- changelog.d/19095.misc | 1 + synapse/app/_base.py | 118 ++++++++++++++++++++++++--------------- synapse/config/logger.py | 4 +- 3 files changed, 74 insertions(+), 49 deletions(-) create mode 100644 changelog.d/19095.misc diff --git a/changelog.d/19095.misc b/changelog.d/19095.misc new file mode 100644 index 0000000000..c9949c9cb5 --- /dev/null +++ b/changelog.d/19095.misc @@ -0,0 +1 @@ +Avoid clobbering other `SIGHUP` handlers in 3rd-party code. diff --git a/synapse/app/_base.py b/synapse/app/_base.py index 1954dbc1a0..c0fcf8ca29 100644 --- a/synapse/app/_base.py +++ b/synapse/app/_base.py @@ -29,6 +29,7 @@ import traceback import warnings from textwrap import indent from threading import Thread +from types import FrameType from typing import ( TYPE_CHECKING, Any, @@ -36,6 +37,7 @@ from typing import ( Callable, NoReturn, Optional, + Union, cast, ) from wsgiref.simple_server import WSGIServer @@ -72,7 +74,6 @@ from synapse.handlers.auth import load_legacy_password_auth_providers from synapse.http.site import SynapseSite from synapse.logging.context import LoggingContext, PreserveLoggingContext from synapse.metrics import install_gc_manager, register_threadpool -from synapse.metrics.background_process_metrics import run_as_background_process from synapse.metrics.jemalloc import setup_jemalloc_stats from synapse.module_api.callbacks.spamchecker_callbacks import load_legacy_spam_checkers from synapse.module_api.callbacks.third_party_event_rules_callbacks import ( @@ -108,7 +109,7 @@ P = ParamSpec("P") def register_sighup( - homeserver_instance_id: str, + hs: "HomeServer", func: Callable[P, None], *args: P.args, **kwargs: P.kwargs, @@ -123,19 +124,25 @@ def register_sighup( *args, **kwargs: args and kwargs to be passed to the target function. """ - _instance_id_to_sighup_callbacks_map.setdefault(homeserver_instance_id, []).append( - (func, args, kwargs) + # Wrap the function so we can run it within a logcontext + def _callback_wrapper(*args: P.args, **kwargs: P.kwargs) -> None: + with LoggingContext(name="sighup", server_name=hs.hostname): + func(*args, **kwargs) + + _instance_id_to_sighup_callbacks_map.setdefault(hs.get_instance_id(), []).append( + (_callback_wrapper, args, kwargs) ) -def unregister_sighups(instance_id: str) -> None: +def unregister_sighups(homeserver_instance_id: str) -> None: """ Unregister all sighup functions associated with this Synapse instance. Args: - instance_id: Unique ID for this Synapse process instance. + homeserver_instance_id: The unique ID for this Synapse process instance to + unregister hooks for (`hs.get_instance_id()`). """ - _instance_id_to_sighup_callbacks_map.pop(instance_id, []) + _instance_id_to_sighup_callbacks_map.pop(homeserver_instance_id, []) def start_worker_reactor( @@ -540,6 +547,61 @@ def refresh_certificate(hs: "HomeServer") -> None: logger.info("Context factories updated.") +_already_setup_sighup_handling = False +""" +Marks whether we've already successfully ran `setup_sighup_handling()`. +""" + + +def setup_sighup_handling() -> None: + """ + Set up SIGHUP handling to call registered callbacks. + + This can be called multiple times safely. + """ + global _already_setup_sighup_handling + # We only need to set things up once per process. + if _already_setup_sighup_handling: + return + + previous_sighup_handler: Union[ + Callable[[int, Optional[FrameType]], Any], int, None + ] = None + + # Set up the SIGHUP machinery. + if hasattr(signal, "SIGHUP"): + + def handle_sighup(*args: Any, **kwargs: Any) -> None: + # Tell systemd our state, if we're using it. This will silently fail if + # we're not using systemd. + sdnotify(b"RELOADING=1") + + if callable(previous_sighup_handler): + previous_sighup_handler(*args, **kwargs) + + for sighup_callbacks in _instance_id_to_sighup_callbacks_map.values(): + for func, args, kwargs in sighup_callbacks: + func(*args, **kwargs) + + sdnotify(b"READY=1") + + # We defer running the sighup handlers until next reactor tick. This + # is so that we're in a sane state, e.g. flushing the logs may fail + # if the sighup happens in the middle of writing a log entry. + def run_sighup(*args: Any, **kwargs: Any) -> None: + # `callFromThread` should be "signal safe" as well as thread + # safe. + reactor.callFromThread(handle_sighup, *args, **kwargs) + + # Register for the SIGHUP signal, chaining any existing handler as there can + # only be one handler per signal and we don't want to clobber any existing + # handlers (like the `multi_synapse` shard process in the context of Synapse Pro + # for small hosts) + previous_sighup_handler = signal.signal(signal.SIGHUP, run_sighup) + + _already_setup_sighup_handling = True + + async def start(hs: "HomeServer", freeze: bool = True) -> None: """ Start a Synapse server or worker. @@ -579,45 +641,9 @@ async def start(hs: "HomeServer", freeze: bool = True) -> None: name="gai_resolver", server_name=server_name, threadpool=resolver_threadpool ) - # Set up the SIGHUP machinery. - if hasattr(signal, "SIGHUP"): - - def handle_sighup(*args: Any, **kwargs: Any) -> "defer.Deferred[None]": - async def _handle_sighup(*args: Any, **kwargs: Any) -> None: - # Tell systemd our state, if we're using it. This will silently fail if - # we're not using systemd. - sdnotify(b"RELOADING=1") - - for sighup_callbacks in _instance_id_to_sighup_callbacks_map.values(): - for func, args, kwargs in sighup_callbacks: - func(*args, **kwargs) - - sdnotify(b"READY=1") - - # It's okay to ignore the linter error here and call - # `run_as_background_process` directly because `_handle_sighup` operates - # outside of the scope of a specific `HomeServer` instance and holds no - # references to it which would prevent a clean shutdown. - return run_as_background_process( # type: ignore[untracked-background-process] - "sighup", - server_name, - _handle_sighup, - *args, - **kwargs, - ) - - # We defer running the sighup handlers until next reactor tick. This - # is so that we're in a sane state, e.g. flushing the logs may fail - # if the sighup happens in the middle of writing a log entry. - def run_sighup(*args: Any, **kwargs: Any) -> None: - # `callFromThread` should be "signal safe" as well as thread - # safe. - reactor.callFromThread(handle_sighup, *args, **kwargs) - - signal.signal(signal.SIGHUP, run_sighup) - - register_sighup(hs.get_instance_id(), refresh_certificate, hs) - register_sighup(hs.get_instance_id(), reload_cache_config, hs.config) + setup_sighup_handling() + register_sighup(hs, refresh_certificate, hs) + register_sighup(hs, reload_cache_config, hs.config) # Apply the cache config. hs.config.caches.resize_all_caches() diff --git a/synapse/config/logger.py b/synapse/config/logger.py index 8e355035a9..945236ed07 100644 --- a/synapse/config/logger.py +++ b/synapse/config/logger.py @@ -345,9 +345,7 @@ def setup_logging( # Add a SIGHUP handler to reload the logging configuration, if one is available. from synapse.app import _base as appbase - appbase.register_sighup( - hs.get_instance_id(), _reload_logging_config, log_config_path - ) + appbase.register_sighup(hs, _reload_logging_config, log_config_path) # Log immediately so we can grep backwards. logger.warning("***** STARTING SERVER *****") From 32998d07d2250ef4b82fef3aaeca7c83b87f38d8 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 29 Oct 2025 15:39:07 +0000 Subject: [PATCH 101/149] Bump sigstore/cosign-installer from 3.10.0 to 4.0.0 (#19075) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/docker.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index 987fb76ae1..8bc045dc64 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -120,7 +120,7 @@ jobs: uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v3.11.1 - name: Install Cosign - uses: sigstore/cosign-installer@d7543c93d881b35a8faa02e8e3605f69b7a1ce62 # v3.10.0 + uses: sigstore/cosign-installer@faadad0cce49287aee09b3a48701e75088a2c6ad # v4.0.0 - name: Calculate docker image tag uses: docker/metadata-action@c1e51972afc2121e065aed6d45c65596fe445f3f # v5.8.0 From e0838c25674d1c46a514ebcb8f14fd058a595b2a Mon Sep 17 00:00:00 2001 From: Andrew Ferrazzutti Date: Wed, 29 Oct 2025 13:15:00 -0400 Subject: [PATCH 102/149] Drop Python 3.9, bump tests/builds to Python 3.10 (#19099) Python 3.9 EOL is on 2025-10-31 --- .ci/scripts/calculate_jobs.py | 10 +++---- .github/workflows/release-artifacts.yml | 2 +- .github/workflows/tests.yml | 6 ++--- build_rust.py | 6 ++--- changelog.d/19099.removal | 1 + docker/editable.Dockerfile | 2 +- docs/development/dependencies.md | 8 +++--- docs/setup/installation.md | 11 +++++--- docs/upgrade.md | 12 +++++++++ mypy.ini | 2 +- poetry.lock | 35 +++---------------------- pyproject.toml | 16 ++++++----- rust/Cargo.toml | 2 +- scripts-dev/build_debian_packages.py | 5 ++-- synapse/__init__.py | 4 +-- tox.ini | 2 +- 16 files changed, 59 insertions(+), 65 deletions(-) create mode 100644 changelog.d/19099.removal diff --git a/.ci/scripts/calculate_jobs.py b/.ci/scripts/calculate_jobs.py index f3b1bb1503..2971b3c5c8 100755 --- a/.ci/scripts/calculate_jobs.py +++ b/.ci/scripts/calculate_jobs.py @@ -36,11 +36,11 @@ IS_PR = os.environ["GITHUB_REF"].startswith("refs/pull/") # First calculate the various trial jobs. # # For PRs, we only run each type of test with the oldest Python version supported (which -# is Python 3.9 right now) +# is Python 3.10 right now) trial_sqlite_tests = [ { - "python-version": "3.9", + "python-version": "3.10", "database": "sqlite", "extras": "all", } @@ -53,12 +53,12 @@ if not IS_PR: "database": "sqlite", "extras": "all", } - for version in ("3.10", "3.11", "3.12", "3.13") + for version in ("3.11", "3.12", "3.13") ) trial_postgres_tests = [ { - "python-version": "3.9", + "python-version": "3.10", "database": "postgres", "postgres-version": "13", "extras": "all", @@ -77,7 +77,7 @@ if not IS_PR: trial_no_extra_tests = [ { - "python-version": "3.9", + "python-version": "3.10", "database": "sqlite", "extras": "", } diff --git a/.github/workflows/release-artifacts.yml b/.github/workflows/release-artifacts.yml index a19bde0a60..f3e0da5aa4 100644 --- a/.github/workflows/release-artifacts.yml +++ b/.github/workflows/release-artifacts.yml @@ -145,7 +145,7 @@ jobs: - name: Only build a single wheel on PR if: startsWith(github.ref, 'refs/pull/') - run: echo "CIBW_BUILD="cp39-manylinux_*"" >> $GITHUB_ENV + run: echo "CIBW_BUILD="cp310-manylinux_*"" >> $GITHUB_ENV - name: Build wheels run: python -m cibuildwheel --output-dir wheelhouse diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index f75435bedf..93c0e9415f 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -470,7 +470,7 @@ jobs: - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 with: - python-version: '3.9' + python-version: '3.10' - name: Prepare old deps if: steps.cache-poetry-old-deps.outputs.cache-hit != 'true' @@ -514,7 +514,7 @@ jobs: runs-on: ubuntu-latest strategy: matrix: - python-version: ["pypy-3.9"] + python-version: ["pypy-3.10"] extras: ["all"] steps: @@ -638,7 +638,7 @@ jobs: strategy: matrix: include: - - python-version: "3.9" + - python-version: "3.10" postgres-version: "13" - python-version: "3.13" diff --git a/build_rust.py b/build_rust.py index af7bd2fdc5..a9e9265daf 100644 --- a/build_rust.py +++ b/build_rust.py @@ -27,12 +27,12 @@ def build(setup_kwargs: dict[str, Any]) -> None: setup_kwargs["zip_safe"] = False # We look up the minimum supported Python version with - # `python_requires` (e.g. ">=3.9.0,<4.0.0") and finding the first Python + # `python_requires` (e.g. ">=3.10.0,<4.0.0") and finding the first Python # version that matches. We then convert that into the `py_limited_api` form, - # e.g. cp39 for Python 3.9. + # e.g. cp310 for Python 3.10. py_limited_api: str python_bounds = SpecifierSet(setup_kwargs["python_requires"]) - for minor_version in itertools.count(start=8): + for minor_version in itertools.count(start=10): if f"3.{minor_version}.0" in python_bounds: py_limited_api = f"cp3{minor_version}" break diff --git a/changelog.d/19099.removal b/changelog.d/19099.removal new file mode 100644 index 0000000000..8279a1c7f9 --- /dev/null +++ b/changelog.d/19099.removal @@ -0,0 +1 @@ +Drop support for Python 3.9. diff --git a/docker/editable.Dockerfile b/docker/editable.Dockerfile index 7e5da4e4f4..b2aff9cb53 100644 --- a/docker/editable.Dockerfile +++ b/docker/editable.Dockerfile @@ -3,7 +3,7 @@ # # Used by `complement.sh`. Not suitable for production use. -ARG PYTHON_VERSION=3.9 +ARG PYTHON_VERSION=3.10 ### ### Stage 0: generate requirements.txt diff --git a/docs/development/dependencies.md b/docs/development/dependencies.md index e381b3d155..1b3348703f 100644 --- a/docs/development/dependencies.md +++ b/docs/development/dependencies.md @@ -79,17 +79,17 @@ phonenumbers = [ We can see this pinned version inside the docker image for that release: ``` -$ docker pull vectorim/synapse:v1.97.0 +$ docker pull matrixdotorg/synapse:latest ... -$ docker run --entrypoint pip vectorim/synapse:v1.97.0 show phonenumbers +$ docker run --entrypoint pip matrixdotorg/synapse:latest show phonenumbers Name: phonenumbers -Version: 8.12.44 +Version: 9.0.15 Summary: Python version of Google's common library for parsing, formatting, storing and validating international phone numbers. Home-page: https://github.com/daviddrysdale/python-phonenumbers Author: David Drysdale Author-email: dmd@lurklurk.org License: Apache License 2.0 -Location: /usr/local/lib/python3.9/site-packages +Location: /usr/local/lib/python3.12/site-packages Requires: Required-by: matrix-synapse ``` diff --git a/docs/setup/installation.md b/docs/setup/installation.md index 68f224d33a..786672c689 100644 --- a/docs/setup/installation.md +++ b/docs/setup/installation.md @@ -204,7 +204,7 @@ When following this route please make sure that the [Platform-specific prerequis System requirements: - POSIX-compliant system (tested on Linux & OS X) -- Python 3.9 or later, up to Python 3.13. +- Python 3.10 or later, up to Python 3.13. - At least 1GB of free RAM if you want to join large public rooms like #matrix:matrix.org If building on an uncommon architecture for which pre-built wheels are @@ -307,11 +307,16 @@ sudo dnf group install "Development Tools" ##### Red Hat Enterprise Linux / Rocky Linux / Oracle Linux -*Note: The term "RHEL" below refers to Red Hat Enterprise Linux, Oracle Linux and Rocky Linux. The distributions are 1:1 binary compatible.* +*Note: The term "RHEL" below refers to Red Hat Enterprise Linux, Oracle Linux and Rocky Linux. +The distributions are 1:1 binary compatible.* It's recommended to use the latest Python versions. -RHEL 8 in particular ships with Python 3.6 by default which is EOL and therefore no longer supported by Synapse. RHEL 9 ships with Python 3.9 which is still supported by the Python core team as of this writing. However, newer Python versions provide significant performance improvements and they're available in official distributions' repositories. Therefore it's recommended to use them. +RHEL 8 & 9 in particular ship with Python 3.6 & 3.9 respectively by default +which are EOL and therefore no longer supported by Synapse. +However, newer Python versions provide significant performance improvements +and they're available in official distributions' repositories. +Therefore it's recommended to use them. Python 3.11 and 3.12 are available for both RHEL 8 and 9. diff --git a/docs/upgrade.md b/docs/upgrade.md index 63d567505f..faf6cbf8dc 100644 --- a/docs/upgrade.md +++ b/docs/upgrade.md @@ -117,6 +117,18 @@ each upgrade are complete before moving on to the next upgrade, to avoid stacking them up. You can monitor the currently running background updates with [the Admin API](usage/administration/admin_api/background_updates.html#status). +# Upgrading to v1.142.0 + +## Minimum supported Python version + +The minimum supported Python version has been increased from v3.9 to v3.10. +You will need Python 3.10+ to run Synapse v1.142.0. + +If you use current versions of the +[matrixorg/synapse](setup/installation.html#docker-images-and-ansible-playbooks) +Docker images, no action is required. + + # Upgrading to v1.141.0 ## Docker images now based on Debian `trixie` with Python 3.13 diff --git a/mypy.ini b/mypy.ini index eefe405fe5..d6a3434293 100644 --- a/mypy.ini +++ b/mypy.ini @@ -37,7 +37,7 @@ strict_equality = True # Run mypy type checking with the minimum supported Python version to catch new usage # that isn't backwards-compatible (types, overloads, etc). -python_version = 3.9 +python_version = 3.10 files = docker/, diff --git a/poetry.lock b/poetry.lock index dd86fe8159..5a16dd5860 100644 --- a/poetry.lock +++ b/poetry.lock @@ -60,9 +60,6 @@ files = [ {file = "automat-25.4.16.tar.gz", hash = "sha256:0017591a5477066e90d26b0e696ddc143baafd87b588cfac8100bc6be9634de0"}, ] -[package.dependencies] -typing_extensions = {version = "*", markers = "python_version < \"3.10\""} - [package.extras] visualize = ["Twisted (>=16.1.1)", "graphviz (>0.5.1)"] @@ -510,7 +507,6 @@ files = [ [package.dependencies] gitdb = ">=4.0.1,<5" -typing-extensions = {version = ">=3.10.0.2", markers = "python_version < \"3.10\""} [package.extras] doc = ["sphinx (>=7.1.2,<7.2)", "sphinx-autodoc-typehints", "sphinx_rtd_theme"] @@ -806,7 +802,7 @@ description = "Read metadata from Python packages" optional = false python-versions = ">=3.7" groups = ["dev"] -markers = "platform_machine != \"ppc64le\" and platform_machine != \"s390x\" and python_version < \"3.12\" or python_version < \"3.10\"" +markers = "platform_machine != \"ppc64le\" and platform_machine != \"s390x\" and python_version < \"3.12\"" files = [ {file = "importlib_metadata-6.7.0-py3-none-any.whl", hash = "sha256:cb52082e659e97afc5dac71e79de97d8681de3aa07ff18578330904a9d18e5b5"}, {file = "importlib_metadata-6.7.0.tar.gz", hash = "sha256:1aaf550d4f73e5d6783e7acb77aec43d49da8017410afae93822cc9cca98c4d4"}, @@ -820,26 +816,6 @@ docs = ["furo", "jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "rst.linker perf = ["ipython"] testing = ["flufl.flake8", "importlib-resources (>=1.3) ; python_version < \"3.9\"", "packaging", "pyfakefs", "pytest (>=6)", "pytest-black (>=0.3.7) ; platform_python_implementation != \"PyPy\"", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-mypy (>=0.9.1) ; platform_python_implementation != \"PyPy\"", "pytest-perf (>=0.9.2)", "pytest-ruff"] -[[package]] -name = "importlib-resources" -version = "5.12.0" -description = "Read resources from Python packages" -optional = false -python-versions = ">=3.7" -groups = ["dev"] -markers = "python_version < \"3.10\"" -files = [ - {file = "importlib_resources-5.12.0-py3-none-any.whl", hash = "sha256:7b1deeebbf351c7578e09bf2f63fa2ce8b5ffec296e0d349139d43cca061a81a"}, - {file = "importlib_resources-5.12.0.tar.gz", hash = "sha256:4be82589bf5c1d7999aedf2a45159d10cb3ca4f19b2271f8792bc8e6da7b22f6"}, -] - -[package.dependencies] -zipp = {version = ">=3.1.0", markers = "python_version < \"3.10\""} - -[package.extras] -docs = ["furo", "jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] -testing = ["flake8 (<5)", "pytest (>=6)", "pytest-black (>=0.3.7) ; platform_python_implementation != \"PyPy\"", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-flake8 ; python_version < \"3.12\"", "pytest-mypy (>=0.9.1) ; platform_python_implementation != \"PyPy\""] - [[package]] name = "incremental" version = "24.7.2" @@ -2846,8 +2822,6 @@ files = [ [package.dependencies] click = "*" -importlib-metadata = {version = ">=4.6", markers = "python_version < \"3.10\""} -importlib-resources = {version = ">=5", markers = "python_version < \"3.10\""} jinja2 = "*" tomli = {version = "*", markers = "python_version < \"3.11\""} @@ -2893,7 +2867,6 @@ files = [ [package.dependencies] id = "*" -importlib-metadata = {version = ">=3.6", markers = "python_version < \"3.10\""} keyring = {version = ">=21.2.0", markers = "platform_machine != \"ppc64le\" and platform_machine != \"s390x\""} packaging = ">=24.0" readme-renderer = ">=35.0" @@ -3220,7 +3193,7 @@ description = "Backport of pathlib-compatible object wrapper for zip files" optional = false python-versions = ">=3.8" groups = ["dev"] -markers = "platform_machine != \"ppc64le\" and platform_machine != \"s390x\" and python_version < \"3.12\" or python_version < \"3.10\"" +markers = "platform_machine != \"ppc64le\" and platform_machine != \"s390x\" and python_version < \"3.12\"" files = [ {file = "zipp-3.19.1-py3-none-any.whl", hash = "sha256:2828e64edb5386ea6a52e7ba7cdb17bb30a73a858f5eb6eb93d8d36f5ea26091"}, {file = "zipp-3.19.1.tar.gz", hash = "sha256:35427f6d5594f4acf82d25541438348c26736fa9b3afa2754bcd63cdb99d8e8f"}, @@ -3342,5 +3315,5 @@ url-preview = ["lxml"] [metadata] lock-version = "2.1" -python-versions = "^3.9.0" -content-hash = "5d71c862b924bc2af936cb6fef264a023213153543f738af31357deaf6de19b8" +python-versions = "^3.10.0" +content-hash = "0122c5aa55099678f2ba5094ec393ebd814def15213388b33e5f1d7760392ffc" diff --git a/pyproject.toml b/pyproject.toml index 9a57a2b8d1..08b4b8af66 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -36,7 +36,7 @@ [tool.ruff] line-length = 88 -target-version = "py39" +target-version = "py310" [tool.ruff.lint] # See https://beta.ruff.rs/docs/rules/#error-e @@ -165,7 +165,7 @@ synapse_review_recent_signups = "synapse._scripts.review_recent_signups:main" update_synapse_database = "synapse._scripts.update_synapse_database:main" [tool.poetry.dependencies] -python = "^3.9.0" +python = "^3.10.0" # Mandatory Dependencies # ---------------------- @@ -201,7 +201,8 @@ bcrypt = ">=3.1.7" # Packagers that already took care of libwebp can lower that down to 5.4.0. Pillow = ">=10.0.1" # We use SortedDict.peekitem(), which was added in sortedcontainers 1.5.2. -sortedcontainers = ">=1.5.2" +# 2.0.5 updates collections.abc imports to avoid Python 3.10 incompatibility. +sortedcontainers = ">=2.0.5" pymacaroons = ">=0.13.0" msgpack = ">=0.5.2" phonenumbers = ">=8.2.0" @@ -217,7 +218,8 @@ netaddr = ">=0.7.18" # end up with a broken installation, with recent MarkupSafe but old Jinja, we # add a lower bound to the Jinja2 dependency. Jinja2 = ">=3.0" -bleach = ">=1.4.3" +# 3.2.0 updates collections.abc imports to avoid Python 3.10 incompatibility. +bleach = ">=3.2.0" # We use `assert_never`, which were added in `typing-extensions` 4.1. typing-extensions = ">=4.1" # We enforce that we have a `cryptography` version that bundles an `openssl` @@ -258,10 +260,12 @@ authlib = { version = ">=0.15.1", optional = true } # `contrib/systemd/log_config.yaml`. # Note: systemd-python 231 appears to have been yanked from pypi systemd-python = { version = ">=231", optional = true } -lxml = { version = ">=4.5.2", optional = true } +# 4.6.3 removes usage of _PyGen_Send which is unavailable in CPython as of Python 3.10. +lxml = { version = ">=4.6.3", optional = true } sentry-sdk = { version = ">=0.7.2", optional = true } opentracing = { version = ">=2.2.0", optional = true } -jaeger-client = { version = ">=4.0.0", optional = true } +# 4.2.0 updates collections.abc imports to avoid Python 3.10 incompatibility. +jaeger-client = { version = ">=4.2.0", optional = true } txredisapi = { version = ">=1.4.7", optional = true } hiredis = { version = "*", optional = true } Pympler = { version = "*", optional = true } diff --git a/rust/Cargo.toml b/rust/Cargo.toml index 0706357294..4f0319a7f5 100644 --- a/rust/Cargo.toml +++ b/rust/Cargo.toml @@ -34,7 +34,7 @@ pyo3 = { version = "0.25.1", features = [ "macros", "anyhow", "abi3", - "abi3-py39", + "abi3-py310", ] } pyo3-log = "0.12.4" pythonize = "0.25.0" diff --git a/scripts-dev/build_debian_packages.py b/scripts-dev/build_debian_packages.py index f94c5a37fc..60aa8a5796 100755 --- a/scripts-dev/build_debian_packages.py +++ b/scripts-dev/build_debian_packages.py @@ -21,13 +21,12 @@ from types import FrameType from typing import Collection, Optional, Sequence # These are expanded inside the dockerfile to be a fully qualified image name. -# e.g. docker.io/library/debian:bullseye +# e.g. docker.io/library/debian:bookworm # # If an EOL is forced by a Python version and we're dropping support for it, make sure -# to remove references to the distibution across Synapse (search for "bullseye" for +# to remove references to the distibution across Synapse (search for "bookworm" for # example) DISTS = ( - "debian:bullseye", # (EOL ~2024-07) (our EOL forced by Python 3.9 is 2025-10-05) "debian:bookworm", # (EOL 2026-06) (our EOL forced by Python 3.11 is 2027-10-24) "debian:sid", # (rolling distro, no EOL) "ubuntu:jammy", # 22.04 LTS (EOL 2027-04) (our EOL forced by Python 3.10 is 2026-10-04) diff --git a/synapse/__init__.py b/synapse/__init__.py index d1c306b8f3..2bed060878 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -39,8 +39,8 @@ ImageFile.LOAD_TRUNCATED_IMAGES = True # Note that we use an (unneeded) variable here so that pyupgrade doesn't nuke the # if-statement completely. py_version = sys.version_info -if py_version < (3, 9): - print("Synapse requires Python 3.9 or above.") +if py_version < (3, 10): + print("Synapse requires Python 3.10 or above.") sys.exit(1) # Allow using the asyncio reactor via env var. diff --git a/tox.ini b/tox.ini index a506b5034d..a0e397bbbf 100644 --- a/tox.ini +++ b/tox.ini @@ -1,5 +1,5 @@ [tox] -envlist = py39, py310, py311, py312, py313 +envlist = py310, py311, py312, py313 # we require tox>=2.3.2 for the fix to https://github.com/tox-dev/tox/issues/208 minversion = 2.3.2 From 728512918e169dd6a84f3c3a2b359c97fcde9b9a Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Thu, 30 Oct 2025 11:17:35 +0100 Subject: [PATCH 103/149] Exclude `.lock` file from `/usr/local` when building docker images (#19107) --- changelog.d/19107.misc | 1 + docker/Dockerfile | 7 ++++++- 2 files changed, 7 insertions(+), 1 deletion(-) create mode 100644 changelog.d/19107.misc diff --git a/changelog.d/19107.misc b/changelog.d/19107.misc new file mode 100644 index 0000000000..38cb9a9b3b --- /dev/null +++ b/changelog.d/19107.misc @@ -0,0 +1 @@ +Prevent uv `/usr/local/.lock` file from appearing in built Synapse docker images. \ No newline at end of file diff --git a/docker/Dockerfile b/docker/Dockerfile index f83486036d..6d10dee1aa 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -179,7 +179,12 @@ LABEL org.opencontainers.image.licenses='AGPL-3.0-or-later OR LicenseRef-Element COPY --from=runtime-deps /install-${TARGETARCH}/etc /etc COPY --from=runtime-deps /install-${TARGETARCH}/usr /usr COPY --from=runtime-deps /install-${TARGETARCH}/var /var -COPY --from=builder /install /usr/local + +# Copy the installed python packages from the builder stage. +# +# uv will generate a `.lock` file when installing packages, which we don't want +# to copy to the final image. +COPY --from=builder --exclude=.lock /install /usr/local COPY ./docker/start.py /start.py COPY ./docker/conf /conf From f54ddbcace2a42f656b739ae0520ac1c8f2205a5 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Thu, 30 Oct 2025 13:40:53 +0100 Subject: [PATCH 104/149] Prevent duplicate GH releases being created during Synapse release process (#19096) --- .github/workflows/release-artifacts.yml | 17 ++++++----------- changelog.d/19096.misc | 1 + 2 files changed, 7 insertions(+), 11 deletions(-) create mode 100644 changelog.d/19096.misc diff --git a/.github/workflows/release-artifacts.yml b/.github/workflows/release-artifacts.yml index f3e0da5aa4..4e38c0f35b 100644 --- a/.github/workflows/release-artifacts.yml +++ b/.github/workflows/release-artifacts.yml @@ -200,16 +200,11 @@ jobs: mv debs*/* debs/ tar -cvJf debs.tar.xz debs - name: Attach to release - # Pinned to work around https://github.com/softprops/action-gh-release/issues/445 - uses: softprops/action-gh-release@c95fe1489396fe8a9eb87c0abf8aa5b2ef267fda # v0.1.15 env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - with: - files: | - Sdist/* - Wheel*/* - debs.tar.xz - # if it's not already published, keep the release as a draft. - draft: true - # mark it as a prerelease if the tag contains 'rc'. - prerelease: ${{ contains(github.ref, 'rc') }} + run: | + gh release upload "${{ github.ref_name }}" \ + Sdist/* \ + Wheel*/* \ + debs.tar.xz \ + --repo ${{ github.repository }} diff --git a/changelog.d/19096.misc b/changelog.d/19096.misc new file mode 100644 index 0000000000..0b7bdf0967 --- /dev/null +++ b/changelog.d/19096.misc @@ -0,0 +1 @@ +Prevent duplicate GitHub draft releases being created during the Synapse release process. \ No newline at end of file From 2c4057bf93c55ce0df16a8024f75b46eab70739d Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Thu, 30 Oct 2025 10:21:56 -0500 Subject: [PATCH 105/149] Prevent duplicate logging setup when running multiple Synapse instances (#19067) Be mindful that it's possible to run Synapse multiple times in the same Python process. So we only need to do some part of the logging setup once. - We only need to setup the global log record factory and context filter once - We only need to redirect Twisted logging once ### Background As part of Element's plan to support a light form of vhosting (virtual host) (multiple instances of Synapse in the same Python process), we're currently diving into the details and implications of running multiple instances of Synapse in the same Python process. "Per-tenant logging" tracked internally by https://github.com/element-hq/synapse-small-hosts/issues/48 --- changelog.d/19067.misc | 1 + synapse/config/logger.py | 88 +++++++++++++++++++++++++-------------- synmark/suites/logging.py | 4 +- 3 files changed, 60 insertions(+), 33 deletions(-) create mode 100644 changelog.d/19067.misc diff --git a/changelog.d/19067.misc b/changelog.d/19067.misc new file mode 100644 index 0000000000..560fbfc668 --- /dev/null +++ b/changelog.d/19067.misc @@ -0,0 +1 @@ +Prevent duplicate logging setup when running multiple Synapse instances. diff --git a/synapse/config/logger.py b/synapse/config/logger.py index 945236ed07..1f5c6da3ae 100644 --- a/synapse/config/logger.py +++ b/synapse/config/logger.py @@ -198,12 +198,27 @@ class LoggingConfig(Config): log_config_file.write(DEFAULT_LOG_CONFIG.substitute(log_file=log_file)) -def _setup_stdlib_logging( - config: "HomeServerConfig", log_config_path: Optional[str], logBeginner: LogBeginner -) -> None: +_already_performed_one_time_logging_setup: bool = False +""" +Marks whether we've already successfully ran `one_time_logging_setup()`. +""" + + +def one_time_logging_setup(*, logBeginner: LogBeginner = globalLogBeginner) -> None: """ - Set up Python standard library logging. + Perform one-time logging configuration for the Python process. + + For example, we don't need to have multiple log record factories. Once we've + configured it once, we don't need to do it again. + + This matters because multiple Synapse instances can be run in the same Python + process (c.f. Synapse Pro for small hosts) """ + global _already_performed_one_time_logging_setup + + # We only need to set things up once. + if _already_performed_one_time_logging_setup: + return # We add a log record factory that runs all messages through the # LoggingContextFilter so that we get the context *at the time we log* @@ -221,26 +236,6 @@ def _setup_stdlib_logging( logging.setLogRecordFactory(factory) - # Configure the logger with the initial configuration. - if log_config_path is None: - log_format = ( - "%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s" - " - %(message)s" - ) - - logger = logging.getLogger("") - logger.setLevel(logging.INFO) - logging.getLogger("synapse.storage.SQL").setLevel(logging.INFO) - - formatter = logging.Formatter(log_format) - - handler = logging.StreamHandler() - handler.setFormatter(formatter) - logger.addHandler(handler) - else: - # Load the logging configuration. - _load_logging_config(log_config_path) - # Route Twisted's native logging through to the standard library logging # system. observer = STDLibLogObserver() @@ -281,6 +276,36 @@ def _setup_stdlib_logging( logBeginner.beginLoggingTo([_log], redirectStandardIO=False) + _already_performed_one_time_logging_setup = True + + +def _setup_stdlib_logging( + config: "HomeServerConfig", log_config_path: Optional[str] +) -> None: + """ + Set up Python standard library logging. + """ + + # Configure the logger with the initial configuration. + if log_config_path is None: + log_format = ( + "%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s" + " - %(message)s" + ) + + logger = logging.getLogger("") + logger.setLevel(logging.INFO) + logging.getLogger("synapse.storage.SQL").setLevel(logging.INFO) + + formatter = logging.Formatter(log_format) + + handler = logging.StreamHandler() + handler.setFormatter(formatter) + logger.addHandler(handler) + else: + # Load the logging configuration. + _load_logging_config(log_config_path) + def _load_logging_config(log_config_path: str) -> None: """ @@ -318,19 +343,14 @@ def setup_logging( hs: "HomeServer", config: "HomeServerConfig", use_worker_options: bool = False, - logBeginner: LogBeginner = globalLogBeginner, ) -> None: """ Set up the logging subsystem. Args: config: configuration data - use_worker_options: True to use the 'worker_log_config' option instead of 'log_config'. - - logBeginner: The Twisted logBeginner to use. - """ from twisted.internet import reactor @@ -341,11 +361,17 @@ def setup_logging( ) # Perform one-time logging configuration. - _setup_stdlib_logging(config, log_config_path, logBeginner=logBeginner) + one_time_logging_setup() + + # Configure logging. + _setup_stdlib_logging(config, log_config_path) # Add a SIGHUP handler to reload the logging configuration, if one is available. from synapse.app import _base as appbase - appbase.register_sighup(hs, _reload_logging_config, log_config_path) + # We only need to reload the config if there is a log config file path provided to + # reload from. + if log_config_path: + appbase.register_sighup(hs, _reload_logging_config, log_config_path) # Log immediately so we can grep backwards. logger.warning("***** STARTING SERVER *****") diff --git a/synmark/suites/logging.py b/synmark/suites/logging.py index cf9c836e06..db77484f4c 100644 --- a/synmark/suites/logging.py +++ b/synmark/suites/logging.py @@ -33,7 +33,7 @@ from twisted.internet.protocol import ServerFactory from twisted.logger import LogBeginner, LogPublisher from twisted.protocols.basic import LineOnlyReceiver -from synapse.config.logger import _setup_stdlib_logging +from synapse.config.logger import _setup_stdlib_logging, one_time_logging_setup from synapse.logging import RemoteHandler from synapse.synapse_rust import reset_logging_config from synapse.types import ISynapseReactor @@ -115,10 +115,10 @@ async def main(reactor: ISynapseReactor, loops: int) -> float: } logger = logging.getLogger("synapse") + one_time_logging_setup(logBeginner=beginner) _setup_stdlib_logging( hs_config, # type: ignore[arg-type] None, - logBeginner=beginner, ) # Force a new logging config without having to load it from a file. From 349599143eaf7f7b415a7a3d859d0488afffa1d1 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Thu, 30 Oct 2025 16:22:52 +0100 Subject: [PATCH 106/149] Move reading of multipart response into `try` body (#19062) --- changelog.d/19062.bugfix | 1 + synapse/http/matrixfederationclient.py | 2 +- tests/http/test_matrixfederationclient.py | 59 +++++++++++++++++++++++ 3 files changed, 61 insertions(+), 1 deletion(-) create mode 100644 changelog.d/19062.bugfix diff --git a/changelog.d/19062.bugfix b/changelog.d/19062.bugfix new file mode 100644 index 0000000000..c5231cbbc8 --- /dev/null +++ b/changelog.d/19062.bugfix @@ -0,0 +1 @@ +Fix a bug introduced in 1.111.0 where failed attempts to download authenticated remote media would not be handled correctly. \ No newline at end of file diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py index d0e47cf8dc..562007c74f 100644 --- a/synapse/http/matrixfederationclient.py +++ b/synapse/http/matrixfederationclient.py @@ -1755,6 +1755,7 @@ class MatrixFederationHttpClient: response, output_stream, boundary, expected_size + 1 ) deferred.addTimeout(self.default_timeout_seconds, self.reactor) + multipart_response = await make_deferred_yieldable(deferred) except BodyExceededMaxSize: msg = "Requested file is too large > %r bytes" % (expected_size,) logger.warning( @@ -1791,7 +1792,6 @@ class MatrixFederationHttpClient: ) raise - multipart_response = await make_deferred_yieldable(deferred) if not multipart_response.url: assert multipart_response.length is not None length = multipart_response.length diff --git a/tests/http/test_matrixfederationclient.py b/tests/http/test_matrixfederationclient.py index 6accb03b9f..4792bdc9bc 100644 --- a/tests/http/test_matrixfederationclient.py +++ b/tests/http/test_matrixfederationclient.py @@ -414,6 +414,65 @@ class FederationClientTests(HomeserverTestCase): self.assertEqual(length, len(data)) self.assertEqual(output_stream.getvalue(), data) + @override_config( + { + "federation": { + # Set the timeout to a deterministic value, in case the defaults + # change. + "client_timeout": "10s", + } + } + ) + def test_authed_media_timeout_reading_body(self) -> None: + """ + If the HTTP request is connected, but gets no response before being + timed out, it'll give a RequestSendFailed with can_retry. + + Regression test for https://github.com/element-hq/synapse/issues/19061 + """ + limiter = Ratelimiter( + store=self.hs.get_datastores().main, + clock=self.clock, + cfg=RatelimitSettings(key="", per_second=0.17, burst_count=1048576), + ) + + output_stream = io.BytesIO() + + d = defer.ensureDeferred( + # timeout is set by `client_timeout`, which we override above. + self.cl.federation_get_file( + "testserv:8008", "path", output_stream, limiter, "127.0.0.1", 10000 + ) + ) + + self.pump() + + conn = Mock() + clients = self.reactor.tcpClients + client = clients[0][2].buildProtocol(None) + client.makeConnection(conn) + + # Deferred does not have a result + self.assertNoResult(d) + + # Send it the HTTP response + client.dataReceived( + b"HTTP/1.1 200 OK\r\n" + b"Server: Fake\r\n" + # Set a large content length, prompting the federation client to + # wait to receive the rest of the body. + b"Content-Length: 1000\r\n" + b"Content-Type: multipart/mixed; boundary=6067d4698f8d40a0a794ea7d7379d53a\r\n\r\n" + ) + + # Push by enough to time it out + self.reactor.advance(10.5) + f = self.failureResultOf(d) + + self.assertIsInstance(f.value, RequestSendFailed) + self.assertTrue(f.value.can_retry) + self.assertIsInstance(f.value.inner_exception, defer.TimeoutError) + @parameterized.expand(["get_json", "post_json", "delete_json", "put_json"]) def test_timeout_reading_body(self, method_name: str) -> None: """ From f0aae62f8543fd4e1be295a14d3ecc874c1431ef Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Thu, 30 Oct 2025 11:47:53 -0500 Subject: [PATCH 107/149] Cheaper logcontext debug logs (`random_string_insecure_fast(...)`) (#19094) Follow-up to https://github.com/element-hq/synapse/pull/18966 During the weekly Backend team meeting, it was mentioned that `random_string(...)` was taking a significant amount of CPU on `matrix.org`. This makes sense as it relies on [`secrets.choice(...)`](https://docs.python.org/3/library/secrets.html#secrets.choice), a cryptographically secure function that is inherently computationally expensive. And since https://github.com/element-hq/synapse/pull/18966, we're calling `random_string(...)` as part of a bunch of logcontext utilities. Since we don't need cryptographically secure random strings for our debug logs, this PR is introducing a new `random_string_insecure_fast(...)` function that uses [`random.choice(...)`](https://docs.python.org/3/library/random.html#random.choice) which uses pseudo-random numbers that are "both fast and threadsafe". --- changelog.d/19094.misc | 1 + synapse/logging/context.py | 8 ++++---- synapse/util/stringutils.py | 17 +++++++++++++++++ 3 files changed, 22 insertions(+), 4 deletions(-) create mode 100644 changelog.d/19094.misc diff --git a/changelog.d/19094.misc b/changelog.d/19094.misc new file mode 100644 index 0000000000..0d38d17483 --- /dev/null +++ b/changelog.d/19094.misc @@ -0,0 +1 @@ +Use cheaper random string function in logcontext utilities. diff --git a/synapse/logging/context.py b/synapse/logging/context.py index 6a4425ff1d..5b87de6eb3 100644 --- a/synapse/logging/context.py +++ b/synapse/logging/context.py @@ -53,7 +53,7 @@ from twisted.internet import defer, threads from twisted.python.threadpool import ThreadPool from synapse.logging.loggers import ExplicitlyConfiguredLogger -from synapse.util.stringutils import random_string +from synapse.util.stringutils import random_string_insecure_fast if TYPE_CHECKING: from synapse.logging.scopecontextmanager import _LogContextScope @@ -657,7 +657,7 @@ class PreserveLoggingContext: self, new_context: LoggingContextOrSentinel = SENTINEL_CONTEXT ) -> None: self._new_context = new_context - self._instance_id = random_string(5) + self._instance_id = random_string_insecure_fast(5) def __enter__(self) -> None: logcontext_debug_logger.debug( @@ -859,7 +859,7 @@ def run_in_background( Note that the returned Deferred does not follow the synapse logcontext rules. """ - instance_id = random_string(5) + instance_id = random_string_insecure_fast(5) calling_context = current_context() logcontext_debug_logger.debug( "run_in_background(%s): called with logcontext=%s", instance_id, calling_context @@ -1012,7 +1012,7 @@ def make_deferred_yieldable(deferred: "defer.Deferred[T]") -> "defer.Deferred[T] restores the old context once the awaitable completes (execution passes from the reactor back to the code). """ - instance_id = random_string(5) + instance_id = random_string_insecure_fast(5) logcontext_debug_logger.debug( "make_deferred_yieldable(%s): called with logcontext=%s", instance_id, diff --git a/synapse/util/stringutils.py b/synapse/util/stringutils.py index 6b0d3677da..0dadafbc78 100644 --- a/synapse/util/stringutils.py +++ b/synapse/util/stringutils.py @@ -20,6 +20,7 @@ # # import itertools +import random import re import secrets import string @@ -56,6 +57,10 @@ def random_string(length: int) -> str: """Generate a cryptographically secure string of random letters. Drawn from the characters: `a-z` and `A-Z` + + Because this is generated from cryptographic sources, it takes a notable amount of + effort to generate (computationally expensive). If you don't need cryptographic + security, consider using `random_string_insecure_fast` for better performance. """ return "".join(secrets.choice(string.ascii_letters) for _ in range(length)) @@ -68,6 +73,18 @@ def random_string_with_symbols(length: int) -> str: return "".join(secrets.choice(_string_with_symbols) for _ in range(length)) +def random_string_insecure_fast(length: int) -> str: + """ + Generate a string of random letters (insecure, fast). This is a more performant but + insecure version of `random_string`. + + WARNING: Not for security or cryptographic uses. Use `random_string` instead. + + Drawn from the characters: `a-z` and `A-Z` + """ + return "".join(random.choice(string.ascii_letters) for _ in range(length)) + + def is_ascii(s: bytes) -> bool: try: s.decode("ascii").encode("ascii") From c0b9437ab6511cad351dc79cfabec3b8a1c767fa Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Thu, 30 Oct 2025 11:49:15 -0500 Subject: [PATCH 108/149] Fix lost logcontext when using `timeout_deferred(...)` (#19090) Fix lost logcontext when using `timeout_deferred(...)` and things actually timeout. Fix https://github.com/element-hq/synapse/issues/19087 (our HTTP client times out requests using `timeout_deferred(...)` Fix https://github.com/element-hq/synapse/issues/19066 (`/sync` uses `notifier.wait_for_events()` which uses `timeout_deferred(...)` under the hood) ### When/why did these lost logcontext warnings start happening? ``` synapse.logging.context - 107 - WARNING - sentinel - Expected logging context call_later but found POST-2453 synapse.logging.context - 107 - WARNING - sentinel - Expected logging context call_later was lost ``` In https://github.com/element-hq/synapse/pull/18828, we switched `timeout_deferred(...)` from using `reactor.callLater(...)` to [`clock.call_later(...)`](https://github.com/element-hq/synapse/blob/3b59ac3b69f6a2f73a504699b30313d8dcfe4709/synapse/util/clock.py#L224-L313) under the hood. This meant it started dealing with logcontexts but our `time_it_out()` callback didn't follow our [Synapse logcontext rules](https://github.com/element-hq/synapse/blob/3b59ac3b69f6a2f73a504699b30313d8dcfe4709/docs/log_contexts.md). --- changelog.d/19090.bugfix | 1 + synapse/logging/context.py | 22 ++++++++--- synapse/util/async_helpers.py | 3 +- synapse/util/clock.py | 2 +- tests/unittest.py | 3 ++ tests/util/test_async_helpers.py | 68 +++++++++++++++++++++++++++++++- 6 files changed, 89 insertions(+), 10 deletions(-) create mode 100644 changelog.d/19090.bugfix diff --git a/changelog.d/19090.bugfix b/changelog.d/19090.bugfix new file mode 100644 index 0000000000..077dafcbf8 --- /dev/null +++ b/changelog.d/19090.bugfix @@ -0,0 +1 @@ +Fix lost logcontext warnings from timeouts in sync and requests made by Synapse itself. diff --git a/synapse/logging/context.py b/synapse/logging/context.py index 5b87de6eb3..86e994cbb4 100644 --- a/synapse/logging/context.py +++ b/synapse/logging/context.py @@ -896,7 +896,7 @@ def run_in_background( # If the function messes with logcontexts, we can assume it follows the Synapse # logcontext rules (Rules for functions returning awaitables: "If the awaitable # is already complete, the function returns with the same logcontext it started - # with."). If it function doesn't touch logcontexts at all, we can also assume + # with."). If the function doesn't touch logcontexts at all, we can also assume # the logcontext is unchanged. # # Either way, the function should have maintained the calling logcontext, so we @@ -905,11 +905,21 @@ def run_in_background( # to reset the logcontext to the sentinel logcontext as that would run # immediately (remember our goal is to maintain the calling logcontext when we # return). - logcontext_debug_logger.debug( - "run_in_background(%s): deferred already completed and the function should have maintained the logcontext %s", - instance_id, - calling_context, - ) + if current_context() != calling_context: + logcontext_error( + "run_in_background(%s): deferred already completed but the function did not maintain the calling logcontext %s (found %s)" + % ( + instance_id, + calling_context, + current_context(), + ) + ) + else: + logcontext_debug_logger.debug( + "run_in_background(%s): deferred already completed (maintained the calling logcontext %s)", + instance_id, + calling_context, + ) return d # Since the function we called may follow the Synapse logcontext rules (Rules for diff --git a/synapse/util/async_helpers.py b/synapse/util/async_helpers.py index c568b377d2..99e899d1ef 100644 --- a/synapse/util/async_helpers.py +++ b/synapse/util/async_helpers.py @@ -808,7 +808,8 @@ def timeout_deferred( timed_out[0] = True try: - deferred.cancel() + with PreserveLoggingContext(): + deferred.cancel() except Exception: # if we throw any exception it'll break time outs logger.exception("Canceller failed during timeout") diff --git a/synapse/util/clock.py b/synapse/util/clock.py index 6557582629..5b59cef60a 100644 --- a/synapse/util/clock.py +++ b/synapse/util/clock.py @@ -266,7 +266,7 @@ class Clock: # We use `PreserveLoggingContext` to prevent our new `call_later` # logcontext from finishing as soon as we exit this function, in case `f` # returns an awaitable/deferred which would continue running and may try to - # restore the `loop_call` context when it's done (because it's trying to + # restore the `call_later` context when it's done (because it's trying to # adhere to the Synapse logcontext rules.) # # This also ensures that we return to the `sentinel` context when we exit diff --git a/tests/unittest.py b/tests/unittest.py index 1007f40456..049a92caaa 100644 --- a/tests/unittest.py +++ b/tests/unittest.py @@ -341,6 +341,9 @@ def logcontext_clean(target: TV) -> TV: """ def logcontext_error(msg: str) -> NoReturn: + # Log so we can still see it in the logs like normal + logger.warning(msg) + # But also fail the test raise AssertionError("logcontext error: %s" % (msg)) patcher = patch("synapse.logging.context.logcontext_error", new=logcontext_error) diff --git a/tests/util/test_async_helpers.py b/tests/util/test_async_helpers.py index a02a2f0cef..8fbee12fb9 100644 --- a/tests/util/test_async_helpers.py +++ b/tests/util/test_async_helpers.py @@ -45,7 +45,7 @@ from synapse.util.async_helpers import ( ) from tests.server import get_clock -from tests.unittest import TestCase +from tests.unittest import TestCase, logcontext_clean logger = logging.getLogger(__name__) @@ -198,7 +198,12 @@ class TimeoutDeferredTest(TestCase): self.failureResultOf(timing_out_d, defer.TimeoutError) - async def test_logcontext_is_preserved_on_cancellation(self) -> None: + @logcontext_clean + async def test_logcontext_is_preserved_on_timeout_cancellation(self) -> None: + """ + Test that the logcontext is preserved when we timeout and the deferred is + cancelled. + """ # Sanity check that we start in the sentinel context self.assertEqual(current_context(), SENTINEL_CONTEXT) @@ -270,6 +275,65 @@ class TimeoutDeferredTest(TestCase): # Back to the sentinel context self.assertEqual(current_context(), SENTINEL_CONTEXT) + @logcontext_clean + async def test_logcontext_is_not_lost_when_awaiting_on_timeout_cancellation( + self, + ) -> None: + """ + Test that the logcontext isn't lost when we `await make_deferred_yieldable(...)` + the deferred to complete/timeout and it times out. + """ + + # Sanity check that we start in the sentinel context + self.assertEqual(current_context(), SENTINEL_CONTEXT) + + # Create a deferred which we will never complete + incomplete_d: Deferred = Deferred() + + async def competing_task() -> None: + with LoggingContext( + name="competing", server_name="test_server" + ) as context_competing: + timing_out_d = timeout_deferred( + deferred=incomplete_d, + timeout=1.0, + clock=self.clock, + ) + self.assertNoResult(timing_out_d) + # We should still be in the logcontext we started in + self.assertIs(current_context(), context_competing) + + # Mimic the normal use case to wait for the work to complete or timeout. + # + # In this specific test, we expect the deferred to timeout and raise an + # exception at this point. + await make_deferred_yieldable(timing_out_d) + + self.fail( + "We should not make it to this point as the `timing_out_d` should have been cancelled" + ) + + d = defer.ensureDeferred(competing_task()) + + # Still in the sentinel context + self.assertEqual(current_context(), SENTINEL_CONTEXT) + + # Pump until we trigger the timeout + self.reactor.pump( + # We only need to pump `1.0` (seconds) as we set + # `timeout_deferred(timeout=1.0)` above + (1.0,) + ) + + # Still in the sentinel context + self.assertEqual(current_context(), SENTINEL_CONTEXT) + + # We expect a failure due to the timeout + self.failureResultOf(d, defer.TimeoutError) + + # Back to the sentinel context at the end of the day + self.assertEqual(current_context(), SENTINEL_CONTEXT) + class _TestException(Exception): # pass From 300c5558ab051a978e0abe04df963ef302eb0958 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Thu, 30 Oct 2025 22:33:29 +0100 Subject: [PATCH 109/149] Update `check_dependencies` to support markers (#19110) --- changelog.d/19110.misc | 1 + synapse/util/check_dependencies.py | 131 ++++++++++++++++++++++++-- tests/util/test_check_dependencies.py | 52 +++++++++- 3 files changed, 176 insertions(+), 8 deletions(-) create mode 100644 changelog.d/19110.misc diff --git a/changelog.d/19110.misc b/changelog.d/19110.misc new file mode 100644 index 0000000000..dc45eef17c --- /dev/null +++ b/changelog.d/19110.misc @@ -0,0 +1 @@ +Allow Synapse's runtime dependency checking code to take packaging markers (i.e. `python <= 3.14`) into account when checking dependencies. \ No newline at end of file diff --git a/synapse/util/check_dependencies.py b/synapse/util/check_dependencies.py index 1c79c0be48..715240c8ce 100644 --- a/synapse/util/check_dependencies.py +++ b/synapse/util/check_dependencies.py @@ -28,8 +28,9 @@ require. But this is probably just symptomatic of Python's package management. import logging from importlib import metadata -from typing import Iterable, NamedTuple, Optional +from typing import Any, Iterable, NamedTuple, Optional, Sequence, cast +from packaging.markers import Marker, Value, Variable, default_environment from packaging.requirements import Requirement DISTRIBUTION_NAME = "matrix-synapse" @@ -65,9 +66,23 @@ RUNTIME_EXTRAS = set(ALL_EXTRAS) - DEV_EXTRAS VERSION = metadata.version(DISTRIBUTION_NAME) +def _marker_environment(extra: str) -> dict[str, str]: + """Return the marker environment for `extra`, seeded with the current interpreter.""" + + env = cast(dict[str, str], dict(default_environment())) + env["extra"] = extra + return env + + def _is_dev_dependency(req: Requirement) -> bool: - return req.marker is not None and any( - req.marker.evaluate({"extra": e}) for e in DEV_EXTRAS + """Return True if `req` is a development dependency.""" + if req.marker is None: + return False + + marker_extras = _extras_from_marker(req.marker) + return any( + extra in DEV_EXTRAS and req.marker.evaluate(_marker_environment(extra)) + for extra in marker_extras ) @@ -95,6 +110,7 @@ def _generic_dependencies() -> Iterable[Dependency]: """Yield pairs (requirement, must_be_installed).""" requirements = metadata.requires(DISTRIBUTION_NAME) assert requirements is not None + env_no_extra = _marker_environment("") for raw_requirement in requirements: req = Requirement(raw_requirement) if _is_dev_dependency(req) or _should_ignore_runtime_requirement(req): @@ -103,7 +119,7 @@ def _generic_dependencies() -> Iterable[Dependency]: # https://packaging.pypa.io/en/latest/markers.html#usage notes that # > Evaluating an extra marker with no environment is an error # so we pass in a dummy empty extra value here. - must_be_installed = req.marker is None or req.marker.evaluate({"extra": ""}) + must_be_installed = req.marker is None or req.marker.evaluate(env_no_extra) yield Dependency(req, must_be_installed) @@ -111,6 +127,8 @@ def _dependencies_for_extra(extra: str) -> Iterable[Dependency]: """Yield additional dependencies needed for a given `extra`.""" requirements = metadata.requires(DISTRIBUTION_NAME) assert requirements is not None + env_no_extra = _marker_environment("") + env_for_extra = _marker_environment(extra) for raw_requirement in requirements: req = Requirement(raw_requirement) if _is_dev_dependency(req): @@ -118,12 +136,84 @@ def _dependencies_for_extra(extra: str) -> Iterable[Dependency]: # Exclude mandatory deps by only selecting deps needed with this extra. if ( req.marker is not None - and req.marker.evaluate({"extra": extra}) - and not req.marker.evaluate({"extra": ""}) + and req.marker.evaluate(env_for_extra) + and not req.marker.evaluate(env_no_extra) ): yield Dependency(req, True) +def _values_from_marker_value(value: Value) -> set[str]: + """Extract text values contained in a marker `Value`.""" + + raw: Any = value.value + if isinstance(raw, str): + return {raw} + if isinstance(raw, (tuple, list)): + return {str(item) for item in raw} + return {str(raw)} + + +def _extras_from_marker(marker: Optional[Marker]) -> set[str]: + """Return every `extra` referenced in the supplied marker tree.""" + + extras: set[str] = set() + + if marker is None: + return extras + + def collect(tree: object) -> None: + if isinstance(tree, list): + for item in tree: + collect(item) + elif isinstance(tree, tuple) and len(tree) == 3: + lhs, _op, rhs = tree + if ( + isinstance(lhs, Variable) + and lhs.value == "extra" + and isinstance(rhs, Value) + ): + extras.update(_values_from_marker_value(rhs)) + elif ( + isinstance(rhs, Variable) + and rhs.value == "extra" + and isinstance(lhs, Value) + ): + extras.update(_values_from_marker_value(lhs)) + + collect(marker._markers) + return extras + + +def _extras_to_consider_for_requirement( + marker: Marker, base_candidates: Sequence[str] +) -> set[str]: + """ + Augment `base_candidates` with extras explicitly mentioned in `marker`. + + Markers can mention extras (e.g. `extra == "saml2"`). + """ + + # Avoid modifying the input sequence. + # Use a set to efficiently avoid duplicate extras. + extras = set(base_candidates) + + for candidate in _extras_from_marker(marker): + extras.add(candidate) + + return extras + + +def _marker_applies_for_any_extra(requirement: Requirement, extras: set[str]) -> bool: + """Check whether a requirement's marker matches any evaluated `extra`.""" + + if requirement.marker is None: + return True + + return any( + requirement.marker.evaluate(_marker_environment(extra)) for extra in extras + ) + + def _not_installed(requirement: Requirement, extra: Optional[str] = None) -> str: if extra: return ( @@ -164,7 +254,7 @@ def _no_reported_version(requirement: Requirement, extra: Optional[str] = None) def check_requirements(extra: Optional[str] = None) -> None: """Check Synapse's dependencies are present and correctly versioned. - If provided, `extra` must be the name of an pacakging extra (e.g. "saml2" in + If provided, `extra` must be the name of an packaging extra (e.g. "saml2" in `pip install matrix-synapse[saml2]`). If `extra` is None, this function checks that @@ -174,6 +264,15 @@ def check_requirements(extra: Optional[str] = None) -> None: If `extra` is not None, this function checks that - the dependencies needed for that extra are installed and correctly versioned. + `marker`s are optional attributes on each requirement which specify + conditions under which the requirement applies. For example, a requirement + might only be needed on Windows, or with Python < 3.14. Markers can + additionally mention `extras` themselves, meaning a requirement may not + apply if the marker mentions an extra that the user has not asked for. + + This function skips a requirement when its markers do not apply in the + current environment. + :raises DependencyException: if a dependency is missing or incorrectly versioned. :raises ValueError: if this extra does not exist. """ @@ -188,7 +287,25 @@ def check_requirements(extra: Optional[str] = None) -> None: deps_unfulfilled = [] errors = [] + if extra is None: + # Default to all mandatory dependencies (non-dev extras). + # "" means all dependencies that aren't conditional on an extra. + base_extra_candidates: Sequence[str] = ("", *RUNTIME_EXTRAS) + else: + base_extra_candidates = (extra,) + for requirement, must_be_installed in dependencies: + if requirement.marker is not None: + candidate_extras = _extras_to_consider_for_requirement( + requirement.marker, base_extra_candidates + ) + # Skip checking this dependency if the requirement's marker object + # (i.e. `python_version < "3.14" and os_name == "win32"`) does not + # apply for any of the extras we're considering. + if not _marker_applies_for_any_extra(requirement, candidate_extras): + continue + + # Check if the requirement is installed and correctly versioned. try: dist: metadata.Distribution = metadata.distribution(requirement.name) except metadata.PackageNotFoundError: diff --git a/tests/util/test_check_dependencies.py b/tests/util/test_check_dependencies.py index c052ba2b75..ab2e2f6291 100644 --- a/tests/util/test_check_dependencies.py +++ b/tests/util/test_check_dependencies.py @@ -22,9 +22,11 @@ from contextlib import contextmanager from os import PathLike from pathlib import Path -from typing import Generator, Optional, Union +from typing import Generator, Optional, Union, cast from unittest.mock import patch +from packaging.markers import default_environment as packaging_default_environment + from synapse.util.check_dependencies import ( DependencyException, check_requirements, @@ -80,6 +82,22 @@ class TestDependencyChecker(TestCase): ): yield + @contextmanager + def mock_python_version(self, version: str) -> Generator[None, None, None]: + """Override the marker environment to report the supplied `python_version`.""" + + def fake_default_environment() -> dict[str, str]: + env = cast(dict[str, str], dict(packaging_default_environment())) + env["python_version"] = version + env["python_full_version"] = f"{version}.0" + return env + + with patch( + "synapse.util.check_dependencies.default_environment", + side_effect=fake_default_environment, + ): + yield + def test_mandatory_dependency(self) -> None: """Complain if a required package is missing or old.""" with patch( @@ -191,3 +209,35 @@ class TestDependencyChecker(TestCase): with self.mock_installed_package(old): # We also ignore old versions of setuptools_rust check_requirements() + + def test_python_version_markers_respected(self) -> None: + """ + Tests that python_version markers are properly respected. + + Specifically that older versions of dependencies can be installed in + environments with older Python versions. + """ + requirements = [ + "pydantic ~= 2.8; python_version < '3.14'", + "pydantic ~= 2.12; python_version >= '3.14'", + ] + + with patch( + "synapse.util.check_dependencies.metadata.requires", + return_value=requirements, + ): + with self.mock_python_version("3.9"): + with self.mock_installed_package(DummyDistribution("2.12.3")): + check_requirements() + with self.mock_installed_package(DummyDistribution("2.8.1")): + check_requirements() + with self.mock_installed_package(DummyDistribution("2.7.0")): + self.assertRaises(DependencyException, check_requirements) + + with self.mock_python_version("3.14"): + with self.mock_installed_package(DummyDistribution("2.12.3")): + check_requirements() + with self.mock_installed_package(DummyDistribution("2.8.1")): + self.assertRaises(DependencyException, check_requirements) + with self.mock_installed_package(DummyDistribution("2.7.0")): + self.assertRaises(DependencyException, check_requirements) From 3595ff921f876ee6ccb03623ae93e21f723bd444 Mon Sep 17 00:00:00 2001 From: V02460 Date: Fri, 31 Oct 2025 10:22:22 +0100 Subject: [PATCH 110/149] Pydantic v2 (#19071) Co-authored-by: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Co-authored-by: Andrew Morgan --- .github/workflows/tests.yml | 22 - changelog.d/19071.misc | 1 + poetry.lock | 294 ++++++----- pyproject.toml | 13 +- scripts-dev/check_pydantic_models.py | 474 ------------------ scripts-dev/lint.sh | 3 - synapse/_pydantic_compat.py | 104 ---- synapse/api/auth/mas.py | 49 +- synapse/config/_util.py | 4 +- synapse/config/mas.py | 29 +- synapse/config/matrixrtc.py | 13 +- synapse/config/workers.py | 4 +- synapse/events/validator.py | 2 +- synapse/http/servlet.py | 21 +- synapse/rest/admin/users.py | 8 +- synapse/rest/client/account.py | 11 +- synapse/rest/client/devices.py | 24 +- synapse/rest/client/directory.py | 3 +- synapse/rest/client/keys.py | 19 +- synapse/rest/client/reporting.py | 3 +- synapse/rest/client/thread_subscriptions.py | 2 +- synapse/rest/key/v2/remote_key_resource.py | 5 +- synapse/rest/synapse/mas/devices.py | 7 +- synapse/rest/synapse/mas/users.py | 6 +- synapse/storage/background_updates.py | 4 +- synapse/types/handlers/sliding_sync.py | 17 +- synapse/types/rest/client/__init__.py | 119 +++-- synapse/util/events.py | 25 +- synapse/util/pydantic_models.py | 65 +-- tests/config/test_oauth_delegation.py | 11 +- tests/rest/client/test_account.py | 4 +- tests/rest/client/test_models.py | 15 +- .../rest/client/test_thread_subscriptions.py | 46 +- 33 files changed, 422 insertions(+), 1005 deletions(-) create mode 100644 changelog.d/19071.misc delete mode 100755 scripts-dev/check_pydantic_models.py delete mode 100644 synapse/_pydantic_compat.py diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 93c0e9415f..494543e4b9 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -207,26 +207,6 @@ jobs: env: PULL_REQUEST_NUMBER: ${{ github.event.number }} - lint-pydantic: - runs-on: ubuntu-latest - needs: changes - if: ${{ needs.changes.outputs.linting == 'true' }} - - steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 - with: - ref: ${{ github.event.pull_request.head.sha }} - - name: Install Rust - uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master - with: - toolchain: ${{ env.RUST_VERSION }} - - uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1 - - uses: matrix-org/setup-python-poetry@5bbf6603c5c930615ec8a29f1b5d7d258d905aa4 # v2.0.0 - with: - poetry-version: "2.1.1" - extras: "all" - - run: poetry run scripts-dev/check_pydantic_models.py - lint-clippy: runs-on: ubuntu-latest needs: changes @@ -341,7 +321,6 @@ jobs: - lint-mypy - lint-crlf - lint-newsfile - - lint-pydantic - check-sampleconfig - check-schema-delta - check-lockfile @@ -363,7 +342,6 @@ jobs: lint lint-mypy lint-newsfile - lint-pydantic lint-clippy lint-clippy-nightly lint-rust diff --git a/changelog.d/19071.misc b/changelog.d/19071.misc new file mode 100644 index 0000000000..d0930f339b --- /dev/null +++ b/changelog.d/19071.misc @@ -0,0 +1 @@ +Update pydantic to v2. \ No newline at end of file diff --git a/poetry.lock b/poetry.lock index 5a16dd5860..a1f133e164 100644 --- a/poetry.lock +++ b/poetry.lock @@ -6,7 +6,7 @@ version = "0.7.0" description = "Reusable constraint types to use with typing.Annotated" optional = false python-versions = ">=3.8" -groups = ["main", "dev"] +groups = ["main"] files = [ {file = "annotated_types-0.7.0-py3-none-any.whl", hash = "sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53"}, {file = "annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89"}, @@ -39,7 +39,7 @@ description = "The ultimate Python library in building OAuth and OpenID Connect optional = true python-versions = ">=3.9" groups = ["main"] -markers = "extra == \"all\" or extra == \"jwt\" or extra == \"oidc\"" +markers = "extra == \"oidc\" or extra == \"jwt\" or extra == \"all\"" files = [ {file = "authlib-1.6.5-py2.py3-none-any.whl", hash = "sha256:3e0e0507807f842b02175507bdee8957a1d5707fd4afb17c32fb43fee90b6e3a"}, {file = "authlib-1.6.5.tar.gz", hash = "sha256:6aaf9c79b7cc96c900f0b284061691c5d4e61221640a948fe690b556a6d6d10b"}, @@ -444,7 +444,7 @@ description = "XML bomb protection for Python stdlib modules" optional = true python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" groups = ["main"] -markers = "extra == \"all\" or extra == \"saml2\"" +markers = "extra == \"saml2\" or extra == \"all\"" files = [ {file = "defusedxml-0.7.1-py2.py3-none-any.whl", hash = "sha256:a352e7e428770286cc899e2542b6cdaedb2b4953ff269a210103ec58f6198a61"}, {file = "defusedxml-0.7.1.tar.gz", hash = "sha256:1bb3032db185915b62d7c6209c5a8792be6a32ab2fedacc84e01b52c51aa3e69"}, @@ -469,7 +469,7 @@ description = "XPath 1.0/2.0/3.0/3.1 parsers and selectors for ElementTree and l optional = true python-versions = ">=3.7" groups = ["main"] -markers = "extra == \"all\" or extra == \"saml2\"" +markers = "extra == \"saml2\" or extra == \"all\"" files = [ {file = "elementpath-4.1.5-py3-none-any.whl", hash = "sha256:2ac1a2fb31eb22bbbf817f8cf6752f844513216263f0e3892c8e79782fe4bb55"}, {file = "elementpath-4.1.5.tar.gz", hash = "sha256:c2d6dc524b29ef751ecfc416b0627668119d8812441c555d7471da41d4bacb8d"}, @@ -519,7 +519,7 @@ description = "Python wrapper for hiredis" optional = true python-versions = ">=3.8" groups = ["main"] -markers = "extra == \"all\" or extra == \"redis\"" +markers = "extra == \"redis\" or extra == \"all\"" files = [ {file = "hiredis-3.3.0-cp310-cp310-macosx_10_15_universal2.whl", hash = "sha256:9937d9b69321b393fbace69f55423480f098120bc55a3316e1ca3508c4dbbd6f"}, {file = "hiredis-3.3.0-cp310-cp310-macosx_10_15_x86_64.whl", hash = "sha256:50351b77f89ba6a22aff430b993653847f36b71d444509036baa0f2d79d1ebf4"}, @@ -842,7 +842,7 @@ description = "Jaeger Python OpenTracing Tracer implementation" optional = true python-versions = ">=3.7" groups = ["main"] -markers = "extra == \"all\" or extra == \"opentracing\"" +markers = "extra == \"opentracing\" or extra == \"all\"" files = [ {file = "jaeger-client-4.8.0.tar.gz", hash = "sha256:3157836edab8e2c209bd2d6ae61113db36f7ee399e66b1dcbb715d87ab49bfe0"}, ] @@ -980,7 +980,7 @@ description = "A strictly RFC 4510 conforming LDAP V3 pure Python client library optional = true python-versions = "*" groups = ["main"] -markers = "extra == \"all\" or extra == \"matrix-synapse-ldap3\"" +markers = "extra == \"matrix-synapse-ldap3\" or extra == \"all\"" files = [ {file = "ldap3-2.9.1-py2.py3-none-any.whl", hash = "sha256:5869596fc4948797020d3f03b7939da938778a0f9e2009f7a072ccf92b8e8d70"}, {file = "ldap3-2.9.1.tar.gz", hash = "sha256:f3e7fc4718e3f09dda568b57100095e0ce58633bcabbed8667ce3f8fbaa4229f"}, @@ -996,7 +996,7 @@ description = "Powerful and Pythonic XML processing library combining libxml2/li optional = true python-versions = ">=3.8" groups = ["main"] -markers = "extra == \"all\" or extra == \"url-preview\"" +markers = "extra == \"url-preview\" or extra == \"all\"" files = [ {file = "lxml-6.0.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:e77dd455b9a16bbd2a5036a63ddbd479c19572af81b624e79ef422f929eef388"}, {file = "lxml-6.0.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:5d444858b9f07cefff6455b983aea9a67f7462ba1f6cbe4a21e8bf6791bf2153"}, @@ -1283,7 +1283,7 @@ description = "An LDAP3 auth provider for Synapse" optional = true python-versions = ">=3.7" groups = ["main"] -markers = "extra == \"all\" or extra == \"matrix-synapse-ldap3\"" +markers = "extra == \"matrix-synapse-ldap3\" or extra == \"all\"" files = [ {file = "matrix-synapse-ldap3-0.3.0.tar.gz", hash = "sha256:8bb6517173164d4b9cc44f49de411d8cebdb2e705d5dd1ea1f38733c4a009e1d"}, {file = "matrix_synapse_ldap3-0.3.0-py3-none-any.whl", hash = "sha256:8b4d701f8702551e98cc1d8c20dbed532de5613584c08d0df22de376ba99159d"}, @@ -1525,7 +1525,7 @@ description = "OpenTracing API for Python. See documentation at http://opentraci optional = true python-versions = "*" groups = ["main"] -markers = "extra == \"all\" or extra == \"opentracing\"" +markers = "extra == \"opentracing\" or extra == \"all\"" files = [ {file = "opentracing-2.4.0.tar.gz", hash = "sha256:a173117e6ef580d55874734d1fa7ecb6f3655160b8b8974a2a1e98e5ec9c840d"}, ] @@ -1731,7 +1731,7 @@ description = "psycopg2 - Python-PostgreSQL Database Adapter" optional = true python-versions = ">=3.8" groups = ["main"] -markers = "extra == \"all\" or extra == \"postgres\"" +markers = "extra == \"postgres\" or extra == \"all\"" files = [ {file = "psycopg2-2.9.10-cp310-cp310-win32.whl", hash = "sha256:5df2b672140f95adb453af93a7d669d7a7bf0a56bcd26f1502329166f4a61716"}, {file = "psycopg2-2.9.10-cp310-cp310-win_amd64.whl", hash = "sha256:c6f7b8561225f9e711a9c47087388a97fdc948211c10a4bccbf0ba68ab7b3b5a"}, @@ -1739,7 +1739,6 @@ files = [ {file = "psycopg2-2.9.10-cp311-cp311-win_amd64.whl", hash = "sha256:0435034157049f6846e95103bd8f5a668788dd913a7c30162ca9503fdf542cb4"}, {file = "psycopg2-2.9.10-cp312-cp312-win32.whl", hash = "sha256:65a63d7ab0e067e2cdb3cf266de39663203d38d6a8ed97f5ca0cb315c73fe067"}, {file = "psycopg2-2.9.10-cp312-cp312-win_amd64.whl", hash = "sha256:4a579d6243da40a7b3182e0430493dbd55950c493d8c68f4eec0b302f6bbf20e"}, - {file = "psycopg2-2.9.10-cp313-cp313-win_amd64.whl", hash = "sha256:91fd603a2155da8d0cfcdbf8ab24a2d54bca72795b90d2a3ed2b6da8d979dee2"}, {file = "psycopg2-2.9.10-cp39-cp39-win32.whl", hash = "sha256:9d5b3b94b79a844a986d029eee38998232451119ad653aea42bb9220a8c5066b"}, {file = "psycopg2-2.9.10-cp39-cp39-win_amd64.whl", hash = "sha256:88138c8dedcbfa96408023ea2b0c369eda40fe5d75002c0964c78f46f11fa442"}, {file = "psycopg2-2.9.10.tar.gz", hash = "sha256:12ec0b40b0273f95296233e8750441339298e6a572f7039da5b260e3c8b60e11"}, @@ -1752,7 +1751,7 @@ description = ".. image:: https://travis-ci.org/chtd/psycopg2cffi.svg?branch=mas optional = true python-versions = "*" groups = ["main"] -markers = "platform_python_implementation == \"PyPy\" and (extra == \"all\" or extra == \"postgres\")" +markers = "platform_python_implementation == \"PyPy\" and (extra == \"postgres\" or extra == \"all\")" files = [ {file = "psycopg2cffi-2.9.0.tar.gz", hash = "sha256:7e272edcd837de3a1d12b62185eb85c45a19feda9e62fa1b120c54f9e8d35c52"}, ] @@ -1768,7 +1767,7 @@ description = "A Simple library to enable psycopg2 compatability" optional = true python-versions = "*" groups = ["main"] -markers = "platform_python_implementation == \"PyPy\" and (extra == \"all\" or extra == \"postgres\")" +markers = "platform_python_implementation == \"PyPy\" and (extra == \"postgres\" or extra == \"all\")" files = [ {file = "psycopg2cffi-compat-1.1.tar.gz", hash = "sha256:d25e921748475522b33d13420aad5c2831c743227dc1f1f2585e0fdb5c914e05"}, ] @@ -1817,21 +1816,21 @@ files = [ [[package]] name = "pydantic" -version = "2.11.10" +version = "2.12.3" description = "Data validation using Python type hints" optional = false python-versions = ">=3.9" -groups = ["main", "dev"] +groups = ["main"] files = [ - {file = "pydantic-2.11.10-py3-none-any.whl", hash = "sha256:802a655709d49bd004c31e865ef37da30b540786a46bfce02333e0e24b5fe29a"}, - {file = "pydantic-2.11.10.tar.gz", hash = "sha256:dc280f0982fbda6c38fada4e476dc0a4f3aeaf9c6ad4c28df68a666ec3c61423"}, + {file = "pydantic-2.12.3-py3-none-any.whl", hash = "sha256:6986454a854bc3bc6e5443e1369e06a3a456af9d339eda45510f517d9ea5c6bf"}, + {file = "pydantic-2.12.3.tar.gz", hash = "sha256:1da1c82b0fc140bb0103bc1441ffe062154c8d38491189751ee00fd8ca65ce74"}, ] [package.dependencies] annotated-types = ">=0.6.0" -pydantic-core = "2.33.2" -typing-extensions = ">=4.12.2" -typing-inspection = ">=0.4.0" +pydantic-core = "2.41.4" +typing-extensions = ">=4.14.1" +typing-inspection = ">=0.4.2" [package.extras] email = ["email-validator (>=2.0.0)"] @@ -1839,115 +1838,133 @@ timezone = ["tzdata ; python_version >= \"3.9\" and platform_system == \"Windows [[package]] name = "pydantic-core" -version = "2.33.2" +version = "2.41.4" description = "Core functionality for Pydantic validation and serialization" optional = false python-versions = ">=3.9" -groups = ["main", "dev"] +groups = ["main"] files = [ - {file = "pydantic_core-2.33.2-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:2b3d326aaef0c0399d9afffeb6367d5e26ddc24d351dbc9c636840ac355dc5d8"}, - {file = "pydantic_core-2.33.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:0e5b2671f05ba48b94cb90ce55d8bdcaaedb8ba00cc5359f6810fc918713983d"}, - {file = "pydantic_core-2.33.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0069c9acc3f3981b9ff4cdfaf088e98d83440a4c7ea1bc07460af3d4dc22e72d"}, - {file = "pydantic_core-2.33.2-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d53b22f2032c42eaaf025f7c40c2e3b94568ae077a606f006d206a463bc69572"}, - {file = "pydantic_core-2.33.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0405262705a123b7ce9f0b92f123334d67b70fd1f20a9372b907ce1080c7ba02"}, - {file = "pydantic_core-2.33.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4b25d91e288e2c4e0662b8038a28c6a07eaac3e196cfc4ff69de4ea3db992a1b"}, - {file = "pydantic_core-2.33.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6bdfe4b3789761f3bcb4b1ddf33355a71079858958e3a552f16d5af19768fef2"}, - {file = "pydantic_core-2.33.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:efec8db3266b76ef9607c2c4c419bdb06bf335ae433b80816089ea7585816f6a"}, - {file = "pydantic_core-2.33.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:031c57d67ca86902726e0fae2214ce6770bbe2f710dc33063187a68744a5ecac"}, - {file = "pydantic_core-2.33.2-cp310-cp310-musllinux_1_1_armv7l.whl", hash = "sha256:f8de619080e944347f5f20de29a975c2d815d9ddd8be9b9b7268e2e3ef68605a"}, - {file = "pydantic_core-2.33.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:73662edf539e72a9440129f231ed3757faab89630d291b784ca99237fb94db2b"}, - {file = "pydantic_core-2.33.2-cp310-cp310-win32.whl", hash = "sha256:0a39979dcbb70998b0e505fb1556a1d550a0781463ce84ebf915ba293ccb7e22"}, - {file = "pydantic_core-2.33.2-cp310-cp310-win_amd64.whl", hash = "sha256:b0379a2b24882fef529ec3b4987cb5d003b9cda32256024e6fe1586ac45fc640"}, - {file = "pydantic_core-2.33.2-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:4c5b0a576fb381edd6d27f0a85915c6daf2f8138dc5c267a57c08a62900758c7"}, - {file = "pydantic_core-2.33.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:e799c050df38a639db758c617ec771fd8fb7a5f8eaaa4b27b101f266b216a246"}, - {file = "pydantic_core-2.33.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dc46a01bf8d62f227d5ecee74178ffc448ff4e5197c756331f71efcc66dc980f"}, - {file = "pydantic_core-2.33.2-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:a144d4f717285c6d9234a66778059f33a89096dfb9b39117663fd8413d582dcc"}, - {file = "pydantic_core-2.33.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:73cf6373c21bc80b2e0dc88444f41ae60b2f070ed02095754eb5a01df12256de"}, - {file = "pydantic_core-2.33.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3dc625f4aa79713512d1976fe9f0bc99f706a9dee21dfd1810b4bbbf228d0e8a"}, - {file = "pydantic_core-2.33.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:881b21b5549499972441da4758d662aeea93f1923f953e9cbaff14b8b9565aef"}, - {file = "pydantic_core-2.33.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:bdc25f3681f7b78572699569514036afe3c243bc3059d3942624e936ec93450e"}, - {file = "pydantic_core-2.33.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:fe5b32187cbc0c862ee201ad66c30cf218e5ed468ec8dc1cf49dec66e160cc4d"}, - {file = "pydantic_core-2.33.2-cp311-cp311-musllinux_1_1_armv7l.whl", hash = "sha256:bc7aee6f634a6f4a95676fcb5d6559a2c2a390330098dba5e5a5f28a2e4ada30"}, - {file = "pydantic_core-2.33.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:235f45e5dbcccf6bd99f9f472858849f73d11120d76ea8707115415f8e5ebebf"}, - {file = "pydantic_core-2.33.2-cp311-cp311-win32.whl", hash = "sha256:6368900c2d3ef09b69cb0b913f9f8263b03786e5b2a387706c5afb66800efd51"}, - {file = "pydantic_core-2.33.2-cp311-cp311-win_amd64.whl", hash = "sha256:1e063337ef9e9820c77acc768546325ebe04ee38b08703244c1309cccc4f1bab"}, - {file = "pydantic_core-2.33.2-cp311-cp311-win_arm64.whl", hash = "sha256:6b99022f1d19bc32a4c2a0d544fc9a76e3be90f0b3f4af413f87d38749300e65"}, - {file = "pydantic_core-2.33.2-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:a7ec89dc587667f22b6a0b6579c249fca9026ce7c333fc142ba42411fa243cdc"}, - {file = "pydantic_core-2.33.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:3c6db6e52c6d70aa0d00d45cdb9b40f0433b96380071ea80b09277dba021ddf7"}, - {file = "pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4e61206137cbc65e6d5256e1166f88331d3b6238e082d9f74613b9b765fb9025"}, - {file = "pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:eb8c529b2819c37140eb51b914153063d27ed88e3bdc31b71198a198e921e011"}, - {file = "pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c52b02ad8b4e2cf14ca7b3d918f3eb0ee91e63b3167c32591e57c4317e134f8f"}, - {file = "pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:96081f1605125ba0855dfda83f6f3df5ec90c61195421ba72223de35ccfb2f88"}, - {file = "pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8f57a69461af2a5fa6e6bbd7a5f60d3b7e6cebb687f55106933188e79ad155c1"}, - {file = "pydantic_core-2.33.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:572c7e6c8bb4774d2ac88929e3d1f12bc45714ae5ee6d9a788a9fb35e60bb04b"}, - {file = "pydantic_core-2.33.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:db4b41f9bd95fbe5acd76d89920336ba96f03e149097365afe1cb092fceb89a1"}, - {file = "pydantic_core-2.33.2-cp312-cp312-musllinux_1_1_armv7l.whl", hash = "sha256:fa854f5cf7e33842a892e5c73f45327760bc7bc516339fda888c75ae60edaeb6"}, - {file = "pydantic_core-2.33.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:5f483cfb75ff703095c59e365360cb73e00185e01aaea067cd19acffd2ab20ea"}, - {file = "pydantic_core-2.33.2-cp312-cp312-win32.whl", hash = "sha256:9cb1da0f5a471435a7bc7e439b8a728e8b61e59784b2af70d7c169f8dd8ae290"}, - {file = "pydantic_core-2.33.2-cp312-cp312-win_amd64.whl", hash = "sha256:f941635f2a3d96b2973e867144fde513665c87f13fe0e193c158ac51bfaaa7b2"}, - {file = "pydantic_core-2.33.2-cp312-cp312-win_arm64.whl", hash = "sha256:cca3868ddfaccfbc4bfb1d608e2ccaaebe0ae628e1416aeb9c4d88c001bb45ab"}, - {file = "pydantic_core-2.33.2-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:1082dd3e2d7109ad8b7da48e1d4710c8d06c253cbc4a27c1cff4fbcaa97a9e3f"}, - {file = "pydantic_core-2.33.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:f517ca031dfc037a9c07e748cefd8d96235088b83b4f4ba8939105d20fa1dcd6"}, - {file = "pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0a9f2c9dd19656823cb8250b0724ee9c60a82f3cdf68a080979d13092a3b0fef"}, - {file = "pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:2b0a451c263b01acebe51895bfb0e1cc842a5c666efe06cdf13846c7418caa9a"}, - {file = "pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1ea40a64d23faa25e62a70ad163571c0b342b8bf66d5fa612ac0dec4f069d916"}, - {file = "pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0fb2d542b4d66f9470e8065c5469ec676978d625a8b7a363f07d9a501a9cb36a"}, - {file = "pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9fdac5d6ffa1b5a83bca06ffe7583f5576555e6c8b3a91fbd25ea7780f825f7d"}, - {file = "pydantic_core-2.33.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:04a1a413977ab517154eebb2d326da71638271477d6ad87a769102f7c2488c56"}, - {file = "pydantic_core-2.33.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:c8e7af2f4e0194c22b5b37205bfb293d166a7344a5b0d0eaccebc376546d77d5"}, - {file = "pydantic_core-2.33.2-cp313-cp313-musllinux_1_1_armv7l.whl", hash = "sha256:5c92edd15cd58b3c2d34873597a1e20f13094f59cf88068adb18947df5455b4e"}, - {file = "pydantic_core-2.33.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:65132b7b4a1c0beded5e057324b7e16e10910c106d43675d9bd87d4f38dde162"}, - {file = "pydantic_core-2.33.2-cp313-cp313-win32.whl", hash = "sha256:52fb90784e0a242bb96ec53f42196a17278855b0f31ac7c3cc6f5c1ec4811849"}, - {file = "pydantic_core-2.33.2-cp313-cp313-win_amd64.whl", hash = "sha256:c083a3bdd5a93dfe480f1125926afcdbf2917ae714bdb80b36d34318b2bec5d9"}, - {file = "pydantic_core-2.33.2-cp313-cp313-win_arm64.whl", hash = "sha256:e80b087132752f6b3d714f041ccf74403799d3b23a72722ea2e6ba2e892555b9"}, - {file = "pydantic_core-2.33.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:61c18fba8e5e9db3ab908620af374db0ac1baa69f0f32df4f61ae23f15e586ac"}, - {file = "pydantic_core-2.33.2-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:95237e53bb015f67b63c91af7518a62a8660376a6a0db19b89acc77a4d6199f5"}, - {file = "pydantic_core-2.33.2-cp313-cp313t-win_amd64.whl", hash = "sha256:c2fc0a768ef76c15ab9238afa6da7f69895bb5d1ee83aeea2e3509af4472d0b9"}, - {file = "pydantic_core-2.33.2-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:a2b911a5b90e0374d03813674bf0a5fbbb7741570dcd4b4e85a2e48d17def29d"}, - {file = "pydantic_core-2.33.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:6fa6dfc3e4d1f734a34710f391ae822e0a8eb8559a85c6979e14e65ee6ba2954"}, - {file = "pydantic_core-2.33.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c54c939ee22dc8e2d545da79fc5381f1c020d6d3141d3bd747eab59164dc89fb"}, - {file = "pydantic_core-2.33.2-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:53a57d2ed685940a504248187d5685e49eb5eef0f696853647bf37c418c538f7"}, - {file = "pydantic_core-2.33.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:09fb9dd6571aacd023fe6aaca316bd01cf60ab27240d7eb39ebd66a3a15293b4"}, - {file = "pydantic_core-2.33.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0e6116757f7959a712db11f3e9c0a99ade00a5bbedae83cb801985aa154f071b"}, - {file = "pydantic_core-2.33.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8d55ab81c57b8ff8548c3e4947f119551253f4e3787a7bbc0b6b3ca47498a9d3"}, - {file = "pydantic_core-2.33.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:c20c462aa4434b33a2661701b861604913f912254e441ab8d78d30485736115a"}, - {file = "pydantic_core-2.33.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:44857c3227d3fb5e753d5fe4a3420d6376fa594b07b621e220cd93703fe21782"}, - {file = "pydantic_core-2.33.2-cp39-cp39-musllinux_1_1_armv7l.whl", hash = "sha256:eb9b459ca4df0e5c87deb59d37377461a538852765293f9e6ee834f0435a93b9"}, - {file = "pydantic_core-2.33.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:9fcd347d2cc5c23b06de6d3b7b8275be558a0c90549495c699e379a80bf8379e"}, - {file = "pydantic_core-2.33.2-cp39-cp39-win32.whl", hash = "sha256:83aa99b1285bc8f038941ddf598501a86f1536789740991d7d8756e34f1e74d9"}, - {file = "pydantic_core-2.33.2-cp39-cp39-win_amd64.whl", hash = "sha256:f481959862f57f29601ccced557cc2e817bce7533ab8e01a797a48b49c9692b3"}, - {file = "pydantic_core-2.33.2-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:5c4aa4e82353f65e548c476b37e64189783aa5384903bfea4f41580f255fddfa"}, - {file = "pydantic_core-2.33.2-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:d946c8bf0d5c24bf4fe333af284c59a19358aa3ec18cb3dc4370080da1e8ad29"}, - {file = "pydantic_core-2.33.2-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:87b31b6846e361ef83fedb187bb5b4372d0da3f7e28d85415efa92d6125d6e6d"}, - {file = "pydantic_core-2.33.2-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aa9d91b338f2df0508606f7009fde642391425189bba6d8c653afd80fd6bb64e"}, - {file = "pydantic_core-2.33.2-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2058a32994f1fde4ca0480ab9d1e75a0e8c87c22b53a3ae66554f9af78f2fe8c"}, - {file = "pydantic_core-2.33.2-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:0e03262ab796d986f978f79c943fc5f620381be7287148b8010b4097f79a39ec"}, - {file = "pydantic_core-2.33.2-pp310-pypy310_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:1a8695a8d00c73e50bff9dfda4d540b7dee29ff9b8053e38380426a85ef10052"}, - {file = "pydantic_core-2.33.2-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:fa754d1850735a0b0e03bcffd9d4b4343eb417e47196e4485d9cca326073a42c"}, - {file = "pydantic_core-2.33.2-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:a11c8d26a50bfab49002947d3d237abe4d9e4b5bdc8846a63537b6488e197808"}, - {file = "pydantic_core-2.33.2-pp311-pypy311_pp73-macosx_10_12_x86_64.whl", hash = "sha256:dd14041875d09cc0f9308e37a6f8b65f5585cf2598a53aa0123df8b129d481f8"}, - {file = "pydantic_core-2.33.2-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:d87c561733f66531dced0da6e864f44ebf89a8fba55f31407b00c2f7f9449593"}, - {file = "pydantic_core-2.33.2-pp311-pypy311_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2f82865531efd18d6e07a04a17331af02cb7a651583c418df8266f17a63c6612"}, - {file = "pydantic_core-2.33.2-pp311-pypy311_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2bfb5112df54209d820d7bf9317c7a6c9025ea52e49f46b6a2060104bba37de7"}, - {file = "pydantic_core-2.33.2-pp311-pypy311_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:64632ff9d614e5eecfb495796ad51b0ed98c453e447a76bcbeeb69615079fc7e"}, - {file = "pydantic_core-2.33.2-pp311-pypy311_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:f889f7a40498cc077332c7ab6b4608d296d852182211787d4f3ee377aaae66e8"}, - {file = "pydantic_core-2.33.2-pp311-pypy311_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:de4b83bb311557e439b9e186f733f6c645b9417c84e2eb8203f3f820a4b988bf"}, - {file = "pydantic_core-2.33.2-pp311-pypy311_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:82f68293f055f51b51ea42fafc74b6aad03e70e191799430b90c13d643059ebb"}, - {file = "pydantic_core-2.33.2-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:329467cecfb529c925cf2bbd4d60d2c509bc2fb52a20c1045bf09bb70971a9c1"}, - {file = "pydantic_core-2.33.2-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:87acbfcf8e90ca885206e98359d7dca4bcbb35abdc0ff66672a293e1d7a19101"}, - {file = "pydantic_core-2.33.2-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:7f92c15cd1e97d4b12acd1cc9004fa092578acfa57b67ad5e43a197175d01a64"}, - {file = "pydantic_core-2.33.2-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d3f26877a748dc4251cfcfda9dfb5f13fcb034f5308388066bcfe9031b63ae7d"}, - {file = "pydantic_core-2.33.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dac89aea9af8cd672fa7b510e7b8c33b0bba9a43186680550ccf23020f32d535"}, - {file = "pydantic_core-2.33.2-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:970919794d126ba8645f3837ab6046fb4e72bbc057b3709144066204c19a455d"}, - {file = "pydantic_core-2.33.2-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:3eb3fe62804e8f859c49ed20a8451342de53ed764150cb14ca71357c765dc2a6"}, - {file = "pydantic_core-2.33.2-pp39-pypy39_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:3abcd9392a36025e3bd55f9bd38d908bd17962cc49bc6da8e7e96285336e2bca"}, - {file = "pydantic_core-2.33.2-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:3a1c81334778f9e3af2f8aeb7a960736e5cab1dfebfb26aabca09afd2906c039"}, - {file = "pydantic_core-2.33.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:2807668ba86cb38c6817ad9bc66215ab8584d1d304030ce4f0887336f28a5e27"}, - {file = "pydantic_core-2.33.2.tar.gz", hash = "sha256:7cb8bc3605c29176e1b105350d2e6474142d7c1bd1d9327c4a9bdb46bf827acc"}, + {file = "pydantic_core-2.41.4-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:2442d9a4d38f3411f22eb9dd0912b7cbf4b7d5b6c92c4173b75d3e1ccd84e36e"}, + {file = "pydantic_core-2.41.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:30a9876226dda131a741afeab2702e2d127209bde3c65a2b8133f428bc5d006b"}, + {file = "pydantic_core-2.41.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d55bbac04711e2980645af68b97d445cdbcce70e5216de444a6c4b6943ebcccd"}, + {file = "pydantic_core-2.41.4-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e1d778fb7849a42d0ee5927ab0f7453bf9f85eef8887a546ec87db5ddb178945"}, + {file = "pydantic_core-2.41.4-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1b65077a4693a98b90ec5ad8f203ad65802a1b9b6d4a7e48066925a7e1606706"}, + {file = "pydantic_core-2.41.4-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:62637c769dee16eddb7686bf421be48dfc2fae93832c25e25bc7242e698361ba"}, + {file = "pydantic_core-2.41.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2dfe3aa529c8f501babf6e502936b9e8d4698502b2cfab41e17a028d91b1ac7b"}, + {file = "pydantic_core-2.41.4-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:ca2322da745bf2eeb581fc9ea3bbb31147702163ccbcbf12a3bb630e4bf05e1d"}, + {file = "pydantic_core-2.41.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:e8cd3577c796be7231dcf80badcf2e0835a46665eaafd8ace124d886bab4d700"}, + {file = "pydantic_core-2.41.4-cp310-cp310-musllinux_1_1_armv7l.whl", hash = "sha256:1cae8851e174c83633f0833e90636832857297900133705ee158cf79d40f03e6"}, + {file = "pydantic_core-2.41.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:a26d950449aae348afe1ac8be5525a00ae4235309b729ad4d3399623125b43c9"}, + {file = "pydantic_core-2.41.4-cp310-cp310-win32.whl", hash = "sha256:0cf2a1f599efe57fa0051312774280ee0f650e11152325e41dfd3018ef2c1b57"}, + {file = "pydantic_core-2.41.4-cp310-cp310-win_amd64.whl", hash = "sha256:a8c2e340d7e454dc3340d3d2e8f23558ebe78c98aa8f68851b04dcb7bc37abdc"}, + {file = "pydantic_core-2.41.4-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:28ff11666443a1a8cf2a044d6a545ebffa8382b5f7973f22c36109205e65dc80"}, + {file = "pydantic_core-2.41.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:61760c3925d4633290292bad462e0f737b840508b4f722247d8729684f6539ae"}, + {file = "pydantic_core-2.41.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:eae547b7315d055b0de2ec3965643b0ab82ad0106a7ffd29615ee9f266a02827"}, + {file = "pydantic_core-2.41.4-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ef9ee5471edd58d1fcce1c80ffc8783a650e3e3a193fe90d52e43bb4d87bff1f"}, + {file = "pydantic_core-2.41.4-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:15dd504af121caaf2c95cb90c0ebf71603c53de98305621b94da0f967e572def"}, + {file = "pydantic_core-2.41.4-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3a926768ea49a8af4d36abd6a8968b8790f7f76dd7cbd5a4c180db2b4ac9a3a2"}, + {file = "pydantic_core-2.41.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6916b9b7d134bff5440098a4deb80e4cb623e68974a87883299de9124126c2a8"}, + {file = "pydantic_core-2.41.4-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:5cf90535979089df02e6f17ffd076f07237efa55b7343d98760bde8743c4b265"}, + {file = "pydantic_core-2.41.4-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:7533c76fa647fade2d7ec75ac5cc079ab3f34879626dae5689b27790a6cf5a5c"}, + {file = "pydantic_core-2.41.4-cp311-cp311-musllinux_1_1_armv7l.whl", hash = "sha256:37e516bca9264cbf29612539801ca3cd5d1be465f940417b002905e6ed79d38a"}, + {file = "pydantic_core-2.41.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:0c19cb355224037c83642429b8ce261ae108e1c5fbf5c028bac63c77b0f8646e"}, + {file = "pydantic_core-2.41.4-cp311-cp311-win32.whl", hash = "sha256:09c2a60e55b357284b5f31f5ab275ba9f7f70b7525e18a132ec1f9160b4f1f03"}, + {file = "pydantic_core-2.41.4-cp311-cp311-win_amd64.whl", hash = "sha256:711156b6afb5cb1cb7c14a2cc2c4a8b4c717b69046f13c6b332d8a0a8f41ca3e"}, + {file = "pydantic_core-2.41.4-cp311-cp311-win_arm64.whl", hash = "sha256:6cb9cf7e761f4f8a8589a45e49ed3c0d92d1d696a45a6feaee8c904b26efc2db"}, + {file = "pydantic_core-2.41.4-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:ab06d77e053d660a6faaf04894446df7b0a7e7aba70c2797465a0a1af00fc887"}, + {file = "pydantic_core-2.41.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:c53ff33e603a9c1179a9364b0a24694f183717b2e0da2b5ad43c316c956901b2"}, + {file = "pydantic_core-2.41.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:304c54176af2c143bd181d82e77c15c41cbacea8872a2225dd37e6544dce9999"}, + {file = "pydantic_core-2.41.4-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:025ba34a4cf4fb32f917d5d188ab5e702223d3ba603be4d8aca2f82bede432a4"}, + {file = "pydantic_core-2.41.4-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b9f5f30c402ed58f90c70e12eff65547d3ab74685ffe8283c719e6bead8ef53f"}, + {file = "pydantic_core-2.41.4-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dd96e5d15385d301733113bcaa324c8bcf111275b7675a9c6e88bfb19fc05e3b"}, + {file = "pydantic_core-2.41.4-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:98f348cbb44fae6e9653c1055db7e29de67ea6a9ca03a5fa2c2e11a47cff0e47"}, + {file = "pydantic_core-2.41.4-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:ec22626a2d14620a83ca583c6f5a4080fa3155282718b6055c2ea48d3ef35970"}, + {file = "pydantic_core-2.41.4-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:3a95d4590b1f1a43bf33ca6d647b990a88f4a3824a8c4572c708f0b45a5290ed"}, + {file = "pydantic_core-2.41.4-cp312-cp312-musllinux_1_1_armv7l.whl", hash = "sha256:f9672ab4d398e1b602feadcffcdd3af44d5f5e6ddc15bc7d15d376d47e8e19f8"}, + {file = "pydantic_core-2.41.4-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:84d8854db5f55fead3b579f04bda9a36461dab0730c5d570e1526483e7bb8431"}, + {file = "pydantic_core-2.41.4-cp312-cp312-win32.whl", hash = "sha256:9be1c01adb2ecc4e464392c36d17f97e9110fbbc906bcbe1c943b5b87a74aabd"}, + {file = "pydantic_core-2.41.4-cp312-cp312-win_amd64.whl", hash = "sha256:d682cf1d22bab22a5be08539dca3d1593488a99998f9f412137bc323179067ff"}, + {file = "pydantic_core-2.41.4-cp312-cp312-win_arm64.whl", hash = "sha256:833eebfd75a26d17470b58768c1834dfc90141b7afc6eb0429c21fc5a21dcfb8"}, + {file = "pydantic_core-2.41.4-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:85e050ad9e5f6fe1004eec65c914332e52f429bc0ae12d6fa2092407a462c746"}, + {file = "pydantic_core-2.41.4-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:e7393f1d64792763a48924ba31d1e44c2cfbc05e3b1c2c9abb4ceeadd912cced"}, + {file = "pydantic_core-2.41.4-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:94dab0940b0d1fb28bcab847adf887c66a27a40291eedf0b473be58761c9799a"}, + {file = "pydantic_core-2.41.4-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:de7c42f897e689ee6f9e93c4bec72b99ae3b32a2ade1c7e4798e690ff5246e02"}, + {file = "pydantic_core-2.41.4-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:664b3199193262277b8b3cd1e754fb07f2c6023289c815a1e1e8fb415cb247b1"}, + {file = "pydantic_core-2.41.4-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d95b253b88f7d308b1c0b417c4624f44553ba4762816f94e6986819b9c273fb2"}, + {file = "pydantic_core-2.41.4-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a1351f5bbdbbabc689727cb91649a00cb9ee7203e0a6e54e9f5ba9e22e384b84"}, + {file = "pydantic_core-2.41.4-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:1affa4798520b148d7182da0615d648e752de4ab1a9566b7471bc803d88a062d"}, + {file = "pydantic_core-2.41.4-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:7b74e18052fea4aa8dea2fb7dbc23d15439695da6cbe6cfc1b694af1115df09d"}, + {file = "pydantic_core-2.41.4-cp313-cp313-musllinux_1_1_armv7l.whl", hash = "sha256:285b643d75c0e30abda9dc1077395624f314a37e3c09ca402d4015ef5979f1a2"}, + {file = "pydantic_core-2.41.4-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:f52679ff4218d713b3b33f88c89ccbf3a5c2c12ba665fb80ccc4192b4608dbab"}, + {file = "pydantic_core-2.41.4-cp313-cp313-win32.whl", hash = "sha256:ecde6dedd6fff127c273c76821bb754d793be1024bc33314a120f83a3c69460c"}, + {file = "pydantic_core-2.41.4-cp313-cp313-win_amd64.whl", hash = "sha256:d081a1f3800f05409ed868ebb2d74ac39dd0c1ff6c035b5162356d76030736d4"}, + {file = "pydantic_core-2.41.4-cp313-cp313-win_arm64.whl", hash = "sha256:f8e49c9c364a7edcbe2a310f12733aad95b022495ef2a8d653f645e5d20c1564"}, + {file = "pydantic_core-2.41.4-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:ed97fd56a561f5eb5706cebe94f1ad7c13b84d98312a05546f2ad036bafe87f4"}, + {file = "pydantic_core-2.41.4-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a870c307bf1ee91fc58a9a61338ff780d01bfae45922624816878dce784095d2"}, + {file = "pydantic_core-2.41.4-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d25e97bc1f5f8f7985bdc2335ef9e73843bb561eb1fa6831fdfc295c1c2061cf"}, + {file = "pydantic_core-2.41.4-cp313-cp313t-win_amd64.whl", hash = "sha256:d405d14bea042f166512add3091c1af40437c2e7f86988f3915fabd27b1e9cd2"}, + {file = "pydantic_core-2.41.4-cp313-cp313t-win_arm64.whl", hash = "sha256:19f3684868309db5263a11bace3c45d93f6f24afa2ffe75a647583df22a2ff89"}, + {file = "pydantic_core-2.41.4-cp314-cp314-macosx_10_12_x86_64.whl", hash = "sha256:e9205d97ed08a82ebb9a307e92914bb30e18cdf6f6b12ca4bedadb1588a0bfe1"}, + {file = "pydantic_core-2.41.4-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:82df1f432b37d832709fbcc0e24394bba04a01b6ecf1ee87578145c19cde12ac"}, + {file = "pydantic_core-2.41.4-cp314-cp314-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fc3b4cc4539e055cfa39a3763c939f9d409eb40e85813257dcd761985a108554"}, + {file = "pydantic_core-2.41.4-cp314-cp314-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:b1eb1754fce47c63d2ff57fdb88c351a6c0150995890088b33767a10218eaa4e"}, + {file = "pydantic_core-2.41.4-cp314-cp314-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e6ab5ab30ef325b443f379ddb575a34969c333004fca5a1daa0133a6ffaad616"}, + {file = "pydantic_core-2.41.4-cp314-cp314-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:31a41030b1d9ca497634092b46481b937ff9397a86f9f51bd41c4767b6fc04af"}, + {file = "pydantic_core-2.41.4-cp314-cp314-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a44ac1738591472c3d020f61c6df1e4015180d6262ebd39bf2aeb52571b60f12"}, + {file = "pydantic_core-2.41.4-cp314-cp314-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d72f2b5e6e82ab8f94ea7d0d42f83c487dc159c5240d8f83beae684472864e2d"}, + {file = "pydantic_core-2.41.4-cp314-cp314-musllinux_1_1_aarch64.whl", hash = "sha256:c4d1e854aaf044487d31143f541f7aafe7b482ae72a022c664b2de2e466ed0ad"}, + {file = "pydantic_core-2.41.4-cp314-cp314-musllinux_1_1_armv7l.whl", hash = "sha256:b568af94267729d76e6ee5ececda4e283d07bbb28e8148bb17adad93d025d25a"}, + {file = "pydantic_core-2.41.4-cp314-cp314-musllinux_1_1_x86_64.whl", hash = "sha256:6d55fb8b1e8929b341cc313a81a26e0d48aa3b519c1dbaadec3a6a2b4fcad025"}, + {file = "pydantic_core-2.41.4-cp314-cp314-win32.whl", hash = "sha256:5b66584e549e2e32a1398df11da2e0a7eff45d5c2d9db9d5667c5e6ac764d77e"}, + {file = "pydantic_core-2.41.4-cp314-cp314-win_amd64.whl", hash = "sha256:557a0aab88664cc552285316809cab897716a372afaf8efdbef756f8b890e894"}, + {file = "pydantic_core-2.41.4-cp314-cp314-win_arm64.whl", hash = "sha256:3f1ea6f48a045745d0d9f325989d8abd3f1eaf47dd00485912d1a3a63c623a8d"}, + {file = "pydantic_core-2.41.4-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:6c1fe4c5404c448b13188dd8bd2ebc2bdd7e6727fa61ff481bcc2cca894018da"}, + {file = "pydantic_core-2.41.4-cp314-cp314t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:523e7da4d43b113bf8e7b49fa4ec0c35bf4fe66b2230bfc5c13cc498f12c6c3e"}, + {file = "pydantic_core-2.41.4-cp314-cp314t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5729225de81fb65b70fdb1907fcf08c75d498f4a6f15af005aabb1fdadc19dfa"}, + {file = "pydantic_core-2.41.4-cp314-cp314t-win_amd64.whl", hash = "sha256:de2cfbb09e88f0f795fd90cf955858fc2c691df65b1f21f0aa00b99f3fbc661d"}, + {file = "pydantic_core-2.41.4-cp314-cp314t-win_arm64.whl", hash = "sha256:d34f950ae05a83e0ede899c595f312ca976023ea1db100cd5aa188f7005e3ab0"}, + {file = "pydantic_core-2.41.4-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:646e76293345954acea6966149683047b7b2ace793011922208c8e9da12b0062"}, + {file = "pydantic_core-2.41.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:cc8e85a63085a137d286e2791037f5fdfff0aabb8b899483ca9c496dd5797338"}, + {file = "pydantic_core-2.41.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:692c622c8f859a17c156492783902d8370ac7e121a611bd6fe92cc71acf9ee8d"}, + {file = "pydantic_core-2.41.4-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d1e2906efb1031a532600679b424ef1d95d9f9fb507f813951f23320903adbd7"}, + {file = "pydantic_core-2.41.4-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e04e2f7f8916ad3ddd417a7abdd295276a0bf216993d9318a5d61cc058209166"}, + {file = "pydantic_core-2.41.4-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:df649916b81822543d1c8e0e1d079235f68acdc7d270c911e8425045a8cfc57e"}, + {file = "pydantic_core-2.41.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:66c529f862fdba70558061bb936fe00ddbaaa0c647fd26e4a4356ef1d6561891"}, + {file = "pydantic_core-2.41.4-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:fc3b4c5a1fd3a311563ed866c2c9b62da06cb6398bee186484ce95c820db71cb"}, + {file = "pydantic_core-2.41.4-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:6e0fc40d84448f941df9b3334c4b78fe42f36e3bf631ad54c3047a0cdddc2514"}, + {file = "pydantic_core-2.41.4-cp39-cp39-musllinux_1_1_armv7l.whl", hash = "sha256:44e7625332683b6c1c8b980461475cde9595eff94447500e80716db89b0da005"}, + {file = "pydantic_core-2.41.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:170ee6835f6c71081d031ef1c3b4dc4a12b9efa6a9540f93f95b82f3c7571ae8"}, + {file = "pydantic_core-2.41.4-cp39-cp39-win32.whl", hash = "sha256:3adf61415efa6ce977041ba9745183c0e1f637ca849773afa93833e04b163feb"}, + {file = "pydantic_core-2.41.4-cp39-cp39-win_amd64.whl", hash = "sha256:a238dd3feee263eeaeb7dc44aea4ba1364682c4f9f9467e6af5596ba322c2332"}, + {file = "pydantic_core-2.41.4-graalpy311-graalpy242_311_native-macosx_10_12_x86_64.whl", hash = "sha256:a1b2cfec3879afb742a7b0bcfa53e4f22ba96571c9e54d6a3afe1052d17d843b"}, + {file = "pydantic_core-2.41.4-graalpy311-graalpy242_311_native-macosx_11_0_arm64.whl", hash = "sha256:d175600d975b7c244af6eb9c9041f10059f20b8bbffec9e33fdd5ee3f67cdc42"}, + {file = "pydantic_core-2.41.4-graalpy311-graalpy242_311_native-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0f184d657fa4947ae5ec9c47bd7e917730fa1cbb78195037e32dcbab50aca5ee"}, + {file = "pydantic_core-2.41.4-graalpy311-graalpy242_311_native-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1ed810568aeffed3edc78910af32af911c835cc39ebbfacd1f0ab5dd53028e5c"}, + {file = "pydantic_core-2.41.4-graalpy312-graalpy250_312_native-macosx_10_12_x86_64.whl", hash = "sha256:4f5d640aeebb438517150fdeec097739614421900e4a08db4a3ef38898798537"}, + {file = "pydantic_core-2.41.4-graalpy312-graalpy250_312_native-macosx_11_0_arm64.whl", hash = "sha256:4a9ab037b71927babc6d9e7fc01aea9e66dc2a4a34dff06ef0724a4049629f94"}, + {file = "pydantic_core-2.41.4-graalpy312-graalpy250_312_native-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e4dab9484ec605c3016df9ad4fd4f9a390bc5d816a3b10c6550f8424bb80b18c"}, + {file = "pydantic_core-2.41.4-graalpy312-graalpy250_312_native-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bd8a5028425820731d8c6c098ab642d7b8b999758e24acae03ed38a66eca8335"}, + {file = "pydantic_core-2.41.4-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:1e5ab4fc177dd41536b3c32b2ea11380dd3d4619a385860621478ac2d25ceb00"}, + {file = "pydantic_core-2.41.4-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:3d88d0054d3fa11ce936184896bed3c1c5441d6fa483b498fac6a5d0dd6f64a9"}, + {file = "pydantic_core-2.41.4-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7b2a054a8725f05b4b6503357e0ac1c4e8234ad3b0c2ac130d6ffc66f0e170e2"}, + {file = "pydantic_core-2.41.4-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b0d9db5a161c99375a0c68c058e227bee1d89303300802601d76a3d01f74e258"}, + {file = "pydantic_core-2.41.4-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:6273ea2c8ffdac7b7fda2653c49682db815aebf4a89243a6feccf5e36c18c347"}, + {file = "pydantic_core-2.41.4-pp310-pypy310_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:4c973add636efc61de22530b2ef83a65f39b6d6f656df97f678720e20de26caa"}, + {file = "pydantic_core-2.41.4-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:b69d1973354758007f46cf2d44a4f3d0933f10b6dc9bf15cf1356e037f6f731a"}, + {file = "pydantic_core-2.41.4-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:3619320641fd212aaf5997b6ca505e97540b7e16418f4a241f44cdf108ffb50d"}, + {file = "pydantic_core-2.41.4-pp311-pypy311_pp73-macosx_10_12_x86_64.whl", hash = "sha256:491535d45cd7ad7e4a2af4a5169b0d07bebf1adfd164b0368da8aa41e19907a5"}, + {file = "pydantic_core-2.41.4-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:54d86c0cada6aba4ec4c047d0e348cbad7063b87ae0f005d9f8c9ad04d4a92a2"}, + {file = "pydantic_core-2.41.4-pp311-pypy311_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eca1124aced216b2500dc2609eade086d718e8249cb9696660ab447d50a758bd"}, + {file = "pydantic_core-2.41.4-pp311-pypy311_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:6c9024169becccf0cb470ada03ee578d7348c119a0d42af3dcf9eda96e3a247c"}, + {file = "pydantic_core-2.41.4-pp311-pypy311_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:26895a4268ae5a2849269f4991cdc97236e4b9c010e51137becf25182daac405"}, + {file = "pydantic_core-2.41.4-pp311-pypy311_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:ca4df25762cf71308c446e33c9b1fdca2923a3f13de616e2a949f38bf21ff5a8"}, + {file = "pydantic_core-2.41.4-pp311-pypy311_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:5a28fcedd762349519276c36634e71853b4541079cab4acaaac60c4421827308"}, + {file = "pydantic_core-2.41.4-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:c173ddcd86afd2535e2b695217e82191580663a1d1928239f877f5a1649ef39f"}, + {file = "pydantic_core-2.41.4.tar.gz", hash = "sha256:70e47929a9d4a1905a67e4b687d5946026390568a8e952b92824118063cee4d5"}, ] [package.dependencies] -typing-extensions = ">=4.6.0,<4.7.0 || >4.7.0" +typing-extensions = ">=4.14.1" [[package]] name = "pygithub" @@ -2027,7 +2044,7 @@ description = "A development tool to measure, monitor and analyze the memory beh optional = true python-versions = ">=3.6" groups = ["main"] -markers = "extra == \"all\" or extra == \"cache-memory\"" +markers = "extra == \"cache-memory\" or extra == \"all\"" files = [ {file = "Pympler-1.0.1-py3-none-any.whl", hash = "sha256:d260dda9ae781e1eab6ea15bacb84015849833ba5555f141d2d9b7b7473b307d"}, {file = "Pympler-1.0.1.tar.gz", hash = "sha256:993f1a3599ca3f4fcd7160c7545ad06310c9e12f70174ae7ae8d4e25f6c5d3fa"}, @@ -2087,7 +2104,7 @@ description = "Python implementation of SAML Version 2 Standard" optional = true python-versions = ">=3.9,<4.0" groups = ["main"] -markers = "extra == \"all\" or extra == \"saml2\"" +markers = "extra == \"saml2\" or extra == \"all\"" files = [ {file = "pysaml2-7.5.0-py3-none-any.whl", hash = "sha256:bc6627cc344476a83c757f440a73fda1369f13b6fda1b4e16bca63ffbabb5318"}, {file = "pysaml2-7.5.0.tar.gz", hash = "sha256:f36871d4e5ee857c6b85532e942550d2cf90ea4ee943d75eb681044bbc4f54f7"}, @@ -2112,7 +2129,7 @@ description = "Extensions to the standard Python datetime module" optional = true python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" groups = ["main"] -markers = "extra == \"all\" or extra == \"saml2\"" +markers = "extra == \"saml2\" or extra == \"all\"" files = [ {file = "python-dateutil-2.8.2.tar.gz", hash = "sha256:0123cacc1627ae19ddf3c27a5de5bd67ee4586fbdd6440d9748f8abb483d3e86"}, {file = "python_dateutil-2.8.2-py2.py3-none-any.whl", hash = "sha256:961d03dc3453ebbc59dbdea9e4e11c5651520a876d0f4db161e8674aae935da9"}, @@ -2140,7 +2157,7 @@ description = "World timezone definitions, modern and historical" optional = true python-versions = "*" groups = ["main"] -markers = "extra == \"all\" or extra == \"saml2\"" +markers = "extra == \"saml2\" or extra == \"all\"" files = [ {file = "pytz-2022.7.1-py2.py3-none-any.whl", hash = "sha256:78f4f37d8198e0627c5f1143240bb0206b8691d8d7ac6d78fee88b78733f8c4a"}, {file = "pytz-2022.7.1.tar.gz", hash = "sha256:01a0681c4b9684a28304615eba55d1ab31ae00bf68ec157ec3708a8182dbbcd0"}, @@ -2526,7 +2543,7 @@ description = "Python client for Sentry (https://sentry.io)" optional = true python-versions = ">=3.6" groups = ["main"] -markers = "extra == \"all\" or extra == \"sentry\"" +markers = "extra == \"sentry\" or extra == \"all\"" files = [ {file = "sentry_sdk-2.34.1-py2.py3-none-any.whl", hash = "sha256:b7a072e1cdc5abc48101d5146e1ae680fa81fe886d8d95aaa25a0b450c818d32"}, {file = "sentry_sdk-2.34.1.tar.gz", hash = "sha256:69274eb8c5c38562a544c3e9f68b5be0a43be4b697f5fd385bf98e4fbe672687"}, @@ -2714,7 +2731,7 @@ description = "Tornado IOLoop Backed Concurrent Futures" optional = true python-versions = "*" groups = ["main"] -markers = "extra == \"all\" or extra == \"opentracing\"" +markers = "extra == \"opentracing\" or extra == \"all\"" files = [ {file = "threadloop-1.0.2-py2-none-any.whl", hash = "sha256:5c90dbefab6ffbdba26afb4829d2a9df8275d13ac7dc58dccb0e279992679599"}, {file = "threadloop-1.0.2.tar.gz", hash = "sha256:8b180aac31013de13c2ad5c834819771992d350267bddb854613ae77ef571944"}, @@ -2730,7 +2747,7 @@ description = "Python bindings for the Apache Thrift RPC system" optional = true python-versions = "*" groups = ["main"] -markers = "extra == \"all\" or extra == \"opentracing\"" +markers = "extra == \"opentracing\" or extra == \"all\"" files = [ {file = "thrift-0.16.0.tar.gz", hash = "sha256:2b5b6488fcded21f9d312aa23c9ff6a0195d0f6ae26ddbd5ad9e3e25dfc14408"}, ] @@ -2784,6 +2801,7 @@ files = [ {file = "tomli-2.2.1-py3-none-any.whl", hash = "sha256:cb55c73c5f4408779d0cf3eef9f762b9c9f147a77de7b258bef0a5628adc85cc"}, {file = "tomli-2.2.1.tar.gz", hash = "sha256:cd45e1dc79c835ce60f7404ec8119f2eb06d38b1deba146f07ced3bbc44505ff"}, ] +markers = {main = "python_version < \"3.14\""} [[package]] name = "tornado" @@ -2792,7 +2810,7 @@ description = "Tornado is a Python web framework and asynchronous networking lib optional = true python-versions = ">=3.9" groups = ["main"] -markers = "extra == \"all\" or extra == \"opentracing\"" +markers = "extra == \"opentracing\" or extra == \"all\"" files = [ {file = "tornado-6.5-cp39-abi3-macosx_10_9_universal2.whl", hash = "sha256:f81067dad2e4443b015368b24e802d0083fecada4f0a4572fdb72fc06e54a9a6"}, {file = "tornado-6.5-cp39-abi3-macosx_10_9_x86_64.whl", hash = "sha256:9ac1cbe1db860b3cbb251e795c701c41d343f06a96049d6274e7c77559117e41"}, @@ -2926,7 +2944,7 @@ description = "non-blocking redis client for python" optional = true python-versions = "*" groups = ["main"] -markers = "extra == \"all\" or extra == \"redis\"" +markers = "extra == \"redis\" or extra == \"all\"" files = [ {file = "txredisapi-1.4.11-py3-none-any.whl", hash = "sha256:ac64d7a9342b58edca13ef267d4fa7637c1aa63f8595e066801c1e8b56b22d0b"}, {file = "txredisapi-1.4.11.tar.gz", hash = "sha256:3eb1af99aefdefb59eb877b1dd08861efad60915e30ad5bf3d5bf6c5cedcdbc6"}, @@ -3110,14 +3128,14 @@ files = [ [[package]] name = "typing-inspection" -version = "0.4.0" +version = "0.4.2" description = "Runtime typing introspection tools" optional = false python-versions = ">=3.9" -groups = ["main", "dev"] +groups = ["main"] files = [ - {file = "typing_inspection-0.4.0-py3-none-any.whl", hash = "sha256:50e72559fcd2a6367a19f7a7e610e6afcb9fac940c650290eed893d61386832f"}, - {file = "typing_inspection-0.4.0.tar.gz", hash = "sha256:9765c87de36671694a67904bf2c96e395be9c6439bb6c87b5142569dcdd65122"}, + {file = "typing_inspection-0.4.2-py3-none-any.whl", hash = "sha256:4ed1cacbdc298c220f1bd249ed5287caa16f34d44ef4e9c3d0cbad5b521545e7"}, + {file = "typing_inspection-0.4.2.tar.gz", hash = "sha256:ba561c48a67c5958007083d386c3295464928b01faa735ab8547c5692e87f464"}, ] [package.dependencies] @@ -3172,7 +3190,7 @@ description = "An XML Schema validator and decoder" optional = true python-versions = ">=3.7" groups = ["main"] -markers = "extra == \"all\" or extra == \"saml2\"" +markers = "extra == \"saml2\" or extra == \"all\"" files = [ {file = "xmlschema-2.4.0-py3-none-any.whl", hash = "sha256:dc87be0caaa61f42649899189aab2fd8e0d567f2cf548433ba7b79278d231a4a"}, {file = "xmlschema-2.4.0.tar.gz", hash = "sha256:d74cd0c10866ac609e1ef94a5a69b018ad16e39077bc6393408b40c6babee793"}, @@ -3316,4 +3334,4 @@ url-preview = ["lxml"] [metadata] lock-version = "2.1" python-versions = "^3.10.0" -content-hash = "0122c5aa55099678f2ba5094ec393ebd814def15213388b33e5f1d7760392ffc" +content-hash = "363f8059c998566788b0465c338a3a8aaa56d1e61cc347f2473b687ff34f2a8d" diff --git a/pyproject.toml b/pyproject.toml index 08b4b8af66..5fb0c88b4f 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -220,8 +220,8 @@ netaddr = ">=0.7.18" Jinja2 = ">=3.0" # 3.2.0 updates collections.abc imports to avoid Python 3.10 incompatibility. bleach = ">=3.2.0" -# We use `assert_never`, which were added in `typing-extensions` 4.1. -typing-extensions = ">=4.1" +# pydantic 2.12 depends on typing-extensions>=4.14.1 +typing-extensions = ">=4.14.1" # We enforce that we have a `cryptography` version that bundles an `openssl` # with the latest security patches. cryptography = ">=3.4.7" @@ -230,9 +230,10 @@ ijson = ">=3.1.4" matrix-common = "^1.3.0" # We need packaging.verison.Version(...).major added in 20.0. packaging = ">=20.0" -# We support pydantic v1 and pydantic v2 via the pydantic.v1 compat module. -# See https://github.com/matrix-org/synapse/issues/15858 -pydantic = ">=1.7.4, <3" +pydantic = [ + { version = "~=2.8", python = "<3.14" }, + { version = "~=2.12", python = ">=3.14" }, +] # This is for building the rust components during "poetry install", which # currently ignores the `build-system.requires` directive (c.f. @@ -335,8 +336,6 @@ all = [ # can bump versions without having to update the content-hash in the lockfile. # This helps prevents merge conflicts when running a batch of dependabot updates. ruff = "0.12.10" -# Type checking only works with the pydantic.v1 compat module from pydantic v2 -pydantic = "^2" # Typechecking lxml-stubs = ">=0.4.0" diff --git a/scripts-dev/check_pydantic_models.py b/scripts-dev/check_pydantic_models.py deleted file mode 100755 index 69c49e258d..0000000000 --- a/scripts-dev/check_pydantic_models.py +++ /dev/null @@ -1,474 +0,0 @@ -#! /usr/bin/env python -# -# This file is licensed under the Affero General Public License (AGPL) version 3. -# -# Copyright 2022 The Matrix.org Foundation C.I.C. -# Copyright (C) 2023 New Vector, Ltd -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU Affero General Public License as -# published by the Free Software Foundation, either version 3 of the -# License, or (at your option) any later version. -# -# See the GNU Affero General Public License for more details: -# . -# -# Originally licensed under the Apache License, Version 2.0: -# . -# -# [This file includes modifications made by New Vector Limited] -# -# -""" -A script which enforces that Synapse always uses strict types when defining a Pydantic -model. - -Pydantic does not yet offer a strict mode, but it is planned for pydantic v2. See - - https://github.com/pydantic/pydantic/issues/1098 - https://pydantic-docs.helpmanual.io/blog/pydantic-v2/#strict-mode - -until then, this script is a best effort to stop us from introducing type coersion bugs -(like the infamous stringy power levels fixed in room version 10). -""" - -import argparse -import contextlib -import functools -import importlib -import logging -import os -import pkgutil -import sys -import textwrap -import traceback -import unittest.mock -from contextlib import contextmanager -from typing import ( - Any, - Callable, - Generator, - TypeVar, -) - -from parameterized import parameterized -from typing_extensions import ParamSpec - -from synapse._pydantic_compat import ( - BaseModel as PydanticBaseModel, - conbytes, - confloat, - conint, - constr, - get_args, -) - -logger = logging.getLogger(__name__) - -CONSTRAINED_TYPE_FACTORIES_WITH_STRICT_FLAG: list[Callable] = [ - constr, - conbytes, - conint, - confloat, -] - -TYPES_THAT_PYDANTIC_WILL_COERCE_TO = [ - str, - bytes, - int, - float, - bool, -] - - -P = ParamSpec("P") -R = TypeVar("R") - - -class ModelCheckerException(Exception): - """Dummy exception. Allows us to detect unwanted types during a module import.""" - - -class MissingStrictInConstrainedTypeException(ModelCheckerException): - factory_name: str - - def __init__(self, factory_name: str): - self.factory_name = factory_name - - -class FieldHasUnwantedTypeException(ModelCheckerException): - message: str - - def __init__(self, message: str): - self.message = message - - -def make_wrapper(factory: Callable[P, R]) -> Callable[P, R]: - """We patch `constr` and friends with wrappers that enforce strict=True.""" - - @functools.wraps(factory) - def wrapper(*args: P.args, **kwargs: P.kwargs) -> R: - if "strict" not in kwargs: - raise MissingStrictInConstrainedTypeException(factory.__name__) - if not kwargs["strict"]: - raise MissingStrictInConstrainedTypeException(factory.__name__) - return factory(*args, **kwargs) - - return wrapper - - -def field_type_unwanted(type_: Any) -> bool: - """Very rough attempt to detect if a type is unwanted as a Pydantic annotation. - - At present, we exclude types which will coerce, or any generic type involving types - which will coerce.""" - logger.debug("Is %s unwanted?") - if type_ in TYPES_THAT_PYDANTIC_WILL_COERCE_TO: - logger.debug("yes") - return True - logger.debug("Maybe. Subargs are %s", get_args(type_)) - rv = any(field_type_unwanted(t) for t in get_args(type_)) - logger.debug("Conclusion: %s %s unwanted", type_, "is" if rv else "is not") - return rv - - -class PatchedBaseModel(PydanticBaseModel): - """A patched version of BaseModel that inspects fields after models are defined. - - We complain loudly if we see an unwanted type. - - Beware: ModelField.type_ is presumably private; this is likely to be very brittle. - """ - - @classmethod - def __init_subclass__(cls: type[PydanticBaseModel], **kwargs: object): - for field in cls.__fields__.values(): - # Note that field.type_ and field.outer_type are computed based on the - # annotation type, see pydantic.fields.ModelField._type_analysis - if field_type_unwanted(field.outer_type_): - # TODO: this only reports the first bad field. Can we find all bad ones - # and report them all? - raise FieldHasUnwantedTypeException( - f"{cls.__module__}.{cls.__qualname__} has field '{field.name}' " - f"with unwanted type `{field.outer_type_}`" - ) - - -@contextmanager -def monkeypatch_pydantic() -> Generator[None, None, None]: - """Patch pydantic with our snooping versions of BaseModel and the con* functions. - - If the snooping functions see something they don't like, they'll raise a - ModelCheckingException instance. - """ - with contextlib.ExitStack() as patches: - # Most Synapse code ought to import the patched objects directly from - # `pydantic`. But we also patch their containing modules `pydantic.main` and - # `pydantic.types` for completeness. - patch_basemodel = unittest.mock.patch( - "synapse._pydantic_compat.BaseModel", new=PatchedBaseModel - ) - patches.enter_context(patch_basemodel) - for factory in CONSTRAINED_TYPE_FACTORIES_WITH_STRICT_FLAG: - wrapper: Callable = make_wrapper(factory) - patch = unittest.mock.patch( - f"synapse._pydantic_compat.{factory.__name__}", new=wrapper - ) - patches.enter_context(patch) - yield - - -def format_model_checker_exception(e: ModelCheckerException) -> str: - """Work out which line of code caused e. Format the line in a human-friendly way.""" - # TODO. FieldHasUnwantedTypeException gives better error messages. Can we ditch the - # patches of constr() etc, and instead inspect fields to look for ConstrainedStr - # with strict=False? There is some difficulty with the inheritance hierarchy - # because StrictStr < ConstrainedStr < str. - if isinstance(e, FieldHasUnwantedTypeException): - return e.message - elif isinstance(e, MissingStrictInConstrainedTypeException): - frame_summary = traceback.extract_tb(e.__traceback__)[-2] - return ( - f"Missing `strict=True` from {e.factory_name}() call \n" - + traceback.format_list([frame_summary])[0].lstrip() - ) - else: - raise ValueError(f"Unknown exception {e}") from e - - -def lint() -> int: - """Try to import all of Synapse and see if we spot any Pydantic type coercions. - - Print any problems, then return a status code suitable for sys.exit.""" - failures = do_lint() - if failures: - print(f"Found {len(failures)} problem(s)") - for failure in sorted(failures): - print(failure) - return os.EX_DATAERR if failures else os.EX_OK - - -def do_lint() -> set[str]: - """Try to import all of Synapse and see if we spot any Pydantic type coercions.""" - failures = set() - - with monkeypatch_pydantic(): - logger.debug("Importing synapse") - try: - # TODO: make "synapse" an argument so we can target this script at - # a subpackage - module = importlib.import_module("synapse") - except ModelCheckerException as e: - logger.warning("Bad annotation found when importing synapse") - failures.add(format_model_checker_exception(e)) - return failures - - try: - logger.debug("Fetching subpackages") - module_infos = list( - pkgutil.walk_packages(module.__path__, f"{module.__name__}.") - ) - except ModelCheckerException as e: - logger.warning("Bad annotation found when looking for modules to import") - failures.add(format_model_checker_exception(e)) - return failures - - for module_info in module_infos: - logger.debug("Importing %s", module_info.name) - try: - importlib.import_module(module_info.name) - except ModelCheckerException as e: - logger.warning( - "Bad annotation found when importing %s", module_info.name - ) - failures.add(format_model_checker_exception(e)) - - return failures - - -def run_test_snippet(source: str) -> None: - """Exec a snippet of source code in an isolated environment.""" - # To emulate `source` being called at the top level of the module, - # the globals and locals we provide apparently have to be the same mapping. - # - # > Remember that at the module level, globals and locals are the same dictionary. - # > If exec gets two separate objects as globals and locals, the code will be - # > executed as if it were embedded in a class definition. - globals_: dict[str, object] - locals_: dict[str, object] - globals_ = locals_ = {} - exec(textwrap.dedent(source), globals_, locals_) - - -class TestConstrainedTypesPatch(unittest.TestCase): - def test_expression_without_strict_raises(self) -> None: - with monkeypatch_pydantic(), self.assertRaises(ModelCheckerException): - run_test_snippet( - """ - try: - from pydantic.v1 import constr - except ImportError: - from pydantic import constr - constr() - """ - ) - - def test_called_as_module_attribute_raises(self) -> None: - with monkeypatch_pydantic(), self.assertRaises(ModelCheckerException): - run_test_snippet( - """ - import pydantic - pydantic.constr() - """ - ) - - def test_wildcard_import_raises(self) -> None: - with monkeypatch_pydantic(), self.assertRaises(ModelCheckerException): - run_test_snippet( - """ - try: - from pydantic.v1 import * - except ImportError: - from pydantic import * - constr() - """ - ) - - def test_alternative_import_raises(self) -> None: - with monkeypatch_pydantic(), self.assertRaises(ModelCheckerException): - run_test_snippet( - """ - try: - from pydantic.v1.types import constr - except ImportError: - from pydantic.types import constr - constr() - """ - ) - - def test_alternative_import_attribute_raises(self) -> None: - with monkeypatch_pydantic(), self.assertRaises(ModelCheckerException): - run_test_snippet( - """ - try: - from pydantic.v1 import types as pydantic_types - except ImportError: - from pydantic import types as pydantic_types - pydantic_types.constr() - """ - ) - - def test_kwarg_but_no_strict_raises(self) -> None: - with monkeypatch_pydantic(), self.assertRaises(ModelCheckerException): - run_test_snippet( - """ - try: - from pydantic.v1 import constr - except ImportError: - from pydantic import constr - constr(min_length=10) - """ - ) - - def test_kwarg_strict_False_raises(self) -> None: - with monkeypatch_pydantic(), self.assertRaises(ModelCheckerException): - run_test_snippet( - """ - try: - from pydantic.v1 import constr - except ImportError: - from pydantic import constr - constr(strict=False) - """ - ) - - def test_kwarg_strict_True_doesnt_raise(self) -> None: - with monkeypatch_pydantic(): - run_test_snippet( - """ - try: - from pydantic.v1 import constr - except ImportError: - from pydantic import constr - constr(strict=True) - """ - ) - - def test_annotation_without_strict_raises(self) -> None: - with monkeypatch_pydantic(), self.assertRaises(ModelCheckerException): - run_test_snippet( - """ - try: - from pydantic.v1 import constr - except ImportError: - from pydantic import constr - x: constr() - """ - ) - - def test_field_annotation_without_strict_raises(self) -> None: - with monkeypatch_pydantic(), self.assertRaises(ModelCheckerException): - run_test_snippet( - """ - try: - from pydantic.v1 import BaseModel, conint - except ImportError: - from pydantic import BaseModel, conint - class C: - x: conint() - """ - ) - - -class TestFieldTypeInspection(unittest.TestCase): - @parameterized.expand( - [ - ("str",), - ("bytes"), - ("int",), - ("float",), - ("bool"), - ("Optional[str]",), - ("Union[None, str]",), - ("list[str]",), - ("list[list[str]]",), - ("dict[StrictStr, str]",), - ("dict[str, StrictStr]",), - ("TypedDict('D', x=int)",), - ] - ) - def test_field_holding_unwanted_type_raises(self, annotation: str) -> None: - with monkeypatch_pydantic(), self.assertRaises(ModelCheckerException): - run_test_snippet( - f""" - from typing import * - try: - from pydantic.v1 import * - except ImportError: - from pydantic import * - class C(BaseModel): - f: {annotation} - """ - ) - - @parameterized.expand( - [ - ("StrictStr",), - ("StrictBytes"), - ("StrictInt",), - ("StrictFloat",), - ("StrictBool"), - ("constr(strict=True, min_length=10)",), - ("Optional[StrictStr]",), - ("Union[None, StrictStr]",), - ("list[StrictStr]",), - ("list[list[StrictStr]]",), - ("dict[StrictStr, StrictStr]",), - ("TypedDict('D', x=StrictInt)",), - ] - ) - def test_field_holding_accepted_type_doesnt_raise(self, annotation: str) -> None: - with monkeypatch_pydantic(): - run_test_snippet( - f""" - from typing import * - try: - from pydantic.v1 import * - except ImportError: - from pydantic import * - class C(BaseModel): - f: {annotation} - """ - ) - - def test_field_holding_str_raises_with_alternative_import(self) -> None: - with monkeypatch_pydantic(), self.assertRaises(ModelCheckerException): - run_test_snippet( - """ - try: - from pydantic.v1.main import BaseModel - except ImportError: - from pydantic.main import BaseModel - class C(BaseModel): - f: str - """ - ) - - -parser = argparse.ArgumentParser() -parser.add_argument("mode", choices=["lint", "test"], default="lint", nargs="?") -parser.add_argument("-v", "--verbose", action="store_true") - - -if __name__ == "__main__": - args = parser.parse_args(sys.argv[1:]) - logging.basicConfig( - format="%(asctime)s %(name)s:%(lineno)d %(levelname)s %(message)s", - level=logging.DEBUG if args.verbose else logging.INFO, - ) - # suppress logs we don't care about - logging.getLogger("xmlschema").setLevel(logging.WARNING) - if args.mode == "lint": - sys.exit(lint()) - elif args.mode == "test": - unittest.main(argv=sys.argv[:1]) diff --git a/scripts-dev/lint.sh b/scripts-dev/lint.sh index 7096100a3e..d5e10d4292 100755 --- a/scripts-dev/lint.sh +++ b/scripts-dev/lint.sh @@ -134,9 +134,6 @@ fi # Ensure the formatting of Rust code. cargo-fmt -# Ensure all Pydantic models use strict types. -./scripts-dev/check_pydantic_models.py lint - # Ensure type hints are correct. mypy diff --git a/synapse/_pydantic_compat.py b/synapse/_pydantic_compat.py deleted file mode 100644 index a520c0e897..0000000000 --- a/synapse/_pydantic_compat.py +++ /dev/null @@ -1,104 +0,0 @@ -# -# This file is licensed under the Affero General Public License (AGPL) version 3. -# -# Copyright 2023 Maxwell G -# Copyright (C) 2023 New Vector, Ltd -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU Affero General Public License as -# published by the Free Software Foundation, either version 3 of the -# License, or (at your option) any later version. -# -# See the GNU Affero General Public License for more details: -# . -# -# Originally licensed under the Apache License, Version 2.0: -# . -# -# [This file includes modifications made by New Vector Limited] -# -# - -from typing import TYPE_CHECKING - -from packaging.version import Version - -try: - from pydantic import __version__ as pydantic_version -except ImportError: - import importlib.metadata - - pydantic_version = importlib.metadata.version("pydantic") - -HAS_PYDANTIC_V2: bool = Version(pydantic_version).major == 2 - -if TYPE_CHECKING or HAS_PYDANTIC_V2: - from pydantic.v1 import ( - AnyHttpUrl, - BaseModel, - Extra, - Field, - FilePath, - MissingError, - PydanticValueError, - StrictBool, - StrictInt, - StrictStr, - ValidationError, - conbytes, - confloat, - conint, - constr, - parse_obj_as, - root_validator, - validator, - ) - from pydantic.v1.error_wrappers import ErrorWrapper - from pydantic.v1.typing import get_args -else: - from pydantic import ( - AnyHttpUrl, - BaseModel, - Extra, - Field, - FilePath, - MissingError, - PydanticValueError, - StrictBool, - StrictInt, - StrictStr, - ValidationError, - conbytes, - confloat, - conint, - constr, - parse_obj_as, - root_validator, - validator, - ) - from pydantic.error_wrappers import ErrorWrapper - from pydantic.typing import get_args - -__all__ = ( - "HAS_PYDANTIC_V2", - "AnyHttpUrl", - "BaseModel", - "constr", - "conbytes", - "conint", - "confloat", - "ErrorWrapper", - "Extra", - "Field", - "FilePath", - "get_args", - "MissingError", - "parse_obj_as", - "PydanticValueError", - "StrictBool", - "StrictInt", - "StrictStr", - "ValidationError", - "validator", - "root_validator", -) diff --git a/synapse/api/auth/mas.py b/synapse/api/auth/mas.py index 325d264161..f2b218e34f 100644 --- a/synapse/api/auth/mas.py +++ b/synapse/api/auth/mas.py @@ -16,14 +16,16 @@ import logging from typing import TYPE_CHECKING, Optional from urllib.parse import urlencode -from synapse._pydantic_compat import ( +from pydantic import ( + AnyHttpUrl, BaseModel, - Extra, + ConfigDict, StrictBool, StrictInt, StrictStr, ValidationError, ) + from synapse.api.auth.base import BaseAuth from synapse.api.errors import ( AuthError, @@ -63,8 +65,7 @@ STABLE_SCOPE_MATRIX_DEVICE_PREFIX = "urn:matrix:client:device:" class ServerMetadata(BaseModel): - class Config: - extra = Extra.allow + model_config = ConfigDict(extra="allow") issuer: StrictStr account_management_uri: StrictStr @@ -73,14 +74,12 @@ class ServerMetadata(BaseModel): class IntrospectionResponse(BaseModel): retrieved_at_ms: StrictInt active: StrictBool - scope: Optional[StrictStr] - username: Optional[StrictStr] - sub: Optional[StrictStr] - device_id: Optional[StrictStr] - expires_in: Optional[StrictInt] - - class Config: - extra = Extra.allow + scope: Optional[StrictStr] = None + username: Optional[StrictStr] = None + sub: Optional[StrictStr] = None + device_id: Optional[StrictStr] = None + expires_in: Optional[StrictInt] = None + model_config = ConfigDict(extra="allow") def get_scope_set(self) -> set[str]: if not self.scope: @@ -148,11 +147,33 @@ class MasDelegatedAuth(BaseAuth): @property def _metadata_url(self) -> str: - return f"{self._config.endpoint.rstrip('/')}/.well-known/openid-configuration" + return str( + AnyHttpUrl.build( + scheme=self._config.endpoint.scheme, + username=self._config.endpoint.username, + password=self._config.endpoint.password, + host=self._config.endpoint.host or "", + port=self._config.endpoint.port, + path=".well-known/openid-configuration", + query=None, + fragment=None, + ) + ) @property def _introspection_endpoint(self) -> str: - return f"{self._config.endpoint.rstrip('/')}/oauth2/introspect" + return str( + AnyHttpUrl.build( + scheme=self._config.endpoint.scheme, + username=self._config.endpoint.username, + password=self._config.endpoint.password, + host=self._config.endpoint.host or "", + port=self._config.endpoint.port, + path="oauth2/introspect", + query=None, + fragment=None, + ) + ) async def _load_metadata(self) -> ServerMetadata: response = await self._http_client.get_json(self._metadata_url) diff --git a/synapse/config/_util.py b/synapse/config/_util.py index 3e239c525e..e09c68ebd4 100644 --- a/synapse/config/_util.py +++ b/synapse/config/_util.py @@ -21,8 +21,8 @@ from typing import Any, TypeVar import jsonschema +from pydantic import BaseModel, TypeAdapter, ValidationError -from synapse._pydantic_compat import BaseModel, ValidationError, parse_obj_as from synapse.config._base import ConfigError from synapse.types import JsonDict, StrSequence @@ -93,7 +93,7 @@ def parse_and_validate_mapping( try: # type-ignore: mypy doesn't like constructing `Dict[str, model_type]` because # `model_type` is a runtime variable. Pydantic is fine with this. - instances = parse_obj_as(dict[str, model_type], config) # type: ignore[valid-type] + instances = TypeAdapter(dict[str, model_type]).validate_python(config) # type: ignore[valid-type] except ValidationError as e: raise ConfigError(str(e)) from e return instances diff --git a/synapse/config/mas.py b/synapse/config/mas.py index fe0d326f7a..53cf500e95 100644 --- a/synapse/config/mas.py +++ b/synapse/config/mas.py @@ -15,15 +15,17 @@ from typing import Any, Optional -from synapse._pydantic_compat import ( +from pydantic import ( AnyHttpUrl, Field, FilePath, StrictBool, StrictStr, ValidationError, - validator, + model_validator, ) +from typing_extensions import Self + from synapse.config.experimental import read_secret_from_file_once from synapse.types import JsonDict from synapse.util.pydantic_models import ParseModel @@ -33,27 +35,24 @@ from ._base import Config, ConfigError, RootConfig class MasConfigModel(ParseModel): enabled: StrictBool = False - endpoint: AnyHttpUrl = Field(default="http://localhost:8080") + endpoint: AnyHttpUrl = AnyHttpUrl("http://localhost:8080") secret: Optional[StrictStr] = Field(default=None) secret_path: Optional[FilePath] = Field(default=None) - @validator("secret") - def validate_secret_is_set_if_enabled(cls, v: Any, values: dict) -> Any: - if values.get("enabled", False) and not values.get("secret_path") and not v: + @model_validator(mode="after") + def verify_secret(self) -> Self: + if not self.enabled: + return self + if not self.secret and not self.secret_path: raise ValueError( - "You must set a `secret` or `secret_path` when enabling Matrix Authentication Service integration." + "You must set a `secret` or `secret_path` when enabling the Matrix " + "Authentication Service integration." ) - - return v - - @validator("secret_path") - def validate_secret_path_is_set_if_enabled(cls, v: Any, values: dict) -> Any: - if values.get("secret"): + if self.secret and self.secret_path: raise ValueError( "`secret` and `secret_path` cannot be set at the same time." ) - - return v + return self class MasConfig(Config): diff --git a/synapse/config/matrixrtc.py b/synapse/config/matrixrtc.py index 7844d8f398..74fd7cad81 100644 --- a/synapse/config/matrixrtc.py +++ b/synapse/config/matrixrtc.py @@ -17,9 +17,9 @@ from typing import Any, Optional -from pydantic import ValidationError +from pydantic import Field, StrictStr, ValidationError, model_validator +from typing_extensions import Self -from synapse._pydantic_compat import Field, StrictStr, validator from synapse.types import JsonDict from synapse.util.pydantic_models import ParseModel @@ -32,14 +32,13 @@ class TransportConfigModel(ParseModel): livekit_service_url: Optional[StrictStr] = Field(default=None) """An optional livekit service URL. Only required if type is "livekit".""" - @validator("livekit_service_url", always=True) - def validate_livekit_service_url(cls, v: Any, values: dict) -> Any: - if values.get("type") == "livekit" and not v: + @model_validator(mode="after") + def validate_livekit_service_url(self) -> Self: + if self.type == "livekit" and not self.livekit_service_url: raise ValueError( "You must set a `livekit_service_url` when using the 'livekit' transport." ) - - return v + return self class MatrixRtcConfigModel(ParseModel): diff --git a/synapse/config/workers.py b/synapse/config/workers.py index da7148b3a1..90f8c72412 100644 --- a/synapse/config/workers.py +++ b/synapse/config/workers.py @@ -25,12 +25,12 @@ import logging from typing import Any, Optional, Union import attr - -from synapse._pydantic_compat import ( +from pydantic import ( StrictBool, StrictInt, StrictStr, ) + from synapse.config._base import ( Config, ConfigError, diff --git a/synapse/events/validator.py b/synapse/events/validator.py index 6fb52f82c1..c2cecd0fcb 100644 --- a/synapse/events/validator.py +++ b/synapse/events/validator.py @@ -22,8 +22,8 @@ import collections.abc from typing import Union, cast import jsonschema +from pydantic import Field, StrictBool, StrictStr -from synapse._pydantic_compat import Field, StrictBool, StrictStr from synapse.api.constants import ( MAX_ALIAS_LENGTH, EventContentFields, diff --git a/synapse/http/servlet.py b/synapse/http/servlet.py index 66694e0607..bca93fb036 100644 --- a/synapse/http/servlet.py +++ b/synapse/http/servlet.py @@ -35,15 +35,10 @@ from typing import ( overload, ) +from pydantic import BaseModel, ValidationError + from twisted.web.server import Request -from synapse._pydantic_compat import ( - BaseModel, - ErrorWrapper, - MissingError, - PydanticValueError, - ValidationError, -) from synapse.api.errors import Codes, SynapseError from synapse.http import redact_uri from synapse.http.server import HttpServer @@ -897,20 +892,20 @@ def validate_json_object(content: JsonDict, model_type: type[Model]) -> Model: if it wasn't a JSON object. """ try: - instance = model_type.parse_obj(content) + instance = model_type.model_validate(content) except ValidationError as e: + err_type = e.errors()[0]["type"] + # Choose a matrix error code. The catch-all is BAD_JSON, but we try to find a # more specific error if possible (which occasionally helps us to be spec- # compliant) This is a bit awkward because the spec's error codes aren't very # clear-cut: BAD_JSON arguably overlaps with MISSING_PARAM and INVALID_PARAM. errcode = Codes.BAD_JSON - raw_errors = e.raw_errors - if len(raw_errors) == 1 and isinstance(raw_errors[0], ErrorWrapper): - raw_error = raw_errors[0].exc - if isinstance(raw_error, MissingError): + if e.error_count() == 1: + if err_type == "missing": errcode = Codes.MISSING_PARAM - elif isinstance(raw_error, PydanticValueError): + elif err_type == "value_error": errcode = Codes.INVALID_PARAM raise SynapseError(HTTPStatus.BAD_REQUEST, str(e), errcode=errcode) diff --git a/synapse/rest/admin/users.py b/synapse/rest/admin/users.py index e29b0d36e0..3eab53e5a2 100644 --- a/synapse/rest/admin/users.py +++ b/synapse/rest/admin/users.py @@ -26,8 +26,8 @@ from http import HTTPStatus from typing import TYPE_CHECKING, Optional, Union import attr +from pydantic import StrictBool, StrictInt, StrictStr -from synapse._pydantic_compat import StrictBool, StrictInt, StrictStr from synapse.api.constants import Direction from synapse.api.errors import Codes, NotFoundError, SynapseError from synapse.http.servlet import ( @@ -1476,9 +1476,9 @@ class RedactUser(RestServlet): class PostBody(RequestBodyModel): rooms: list[StrictStr] - reason: Optional[StrictStr] - limit: Optional[StrictInt] - use_admin: Optional[StrictBool] + reason: Optional[StrictStr] = None + limit: Optional[StrictInt] = None + use_admin: Optional[StrictBool] = None async def on_POST( self, request: SynapseRequest, user_id: str diff --git a/synapse/rest/client/account.py b/synapse/rest/client/account.py index 8f2f54f750..f928a8a3f4 100644 --- a/synapse/rest/client/account.py +++ b/synapse/rest/client/account.py @@ -25,10 +25,11 @@ from typing import TYPE_CHECKING, Literal, Optional from urllib.parse import urlparse import attr +from pydantic import StrictBool, StrictStr, StringConstraints +from typing_extensions import Annotated from twisted.web.server import Request -from synapse._pydantic_compat import StrictBool, StrictStr, constr from synapse.api.constants import LoginType from synapse.api.errors import ( Codes, @@ -162,11 +163,9 @@ class PasswordRestServlet(RestServlet): class PostBody(RequestBodyModel): auth: Optional[AuthenticationData] = None logout_devices: StrictBool = True - if TYPE_CHECKING: - # workaround for https://github.com/samuelcolvin/pydantic/issues/156 - new_password: Optional[StrictStr] = None - else: - new_password: Optional[constr(max_length=512, strict=True)] = None + new_password: Optional[ + Annotated[str, StringConstraints(max_length=512, strict=True)] + ] = None @interactive_auth_handler async def on_POST(self, request: SynapseRequest) -> tuple[int, JsonDict]: diff --git a/synapse/rest/client/devices.py b/synapse/rest/client/devices.py index 092406b994..e20e49d48b 100644 --- a/synapse/rest/client/devices.py +++ b/synapse/rest/client/devices.py @@ -24,7 +24,8 @@ import logging from http import HTTPStatus from typing import TYPE_CHECKING, Optional -from synapse._pydantic_compat import Extra, StrictStr +from pydantic import ConfigDict, StrictStr + from synapse.api import errors from synapse.api.errors import NotFoundError, SynapseError, UnrecognizedRequestError from synapse.http.server import HttpServer @@ -94,7 +95,7 @@ class DeleteDevicesRestServlet(RestServlet): self.auth_handler = hs.get_auth_handler() class PostBody(RequestBodyModel): - auth: Optional[AuthenticationData] + auth: Optional[AuthenticationData] = None devices: list[StrictStr] @interactive_auth_handler @@ -108,7 +109,7 @@ class DeleteDevicesRestServlet(RestServlet): # TODO: Can/should we remove this fallback now? # deal with older clients which didn't pass a JSON dict # the same as those that pass an empty dict - body = self.PostBody.parse_obj({}) + body = self.PostBody.model_validate({}) else: raise e @@ -172,7 +173,7 @@ class DeviceRestServlet(RestServlet): return 200, device class DeleteBody(RequestBodyModel): - auth: Optional[AuthenticationData] + auth: Optional[AuthenticationData] = None @interactive_auth_handler async def on_DELETE( @@ -188,7 +189,7 @@ class DeviceRestServlet(RestServlet): # TODO: can/should we remove this fallback now? # deal with older clients which didn't pass a JSON dict # the same as those that pass an empty dict - body = self.DeleteBody.parse_obj({}) + body = self.DeleteBody.model_validate({}) else: raise @@ -217,7 +218,7 @@ class DeviceRestServlet(RestServlet): return 200, {} class PutBody(RequestBodyModel): - display_name: Optional[StrictStr] + display_name: Optional[StrictStr] = None async def on_PUT( self, request: SynapseRequest, device_id: str @@ -247,8 +248,7 @@ class DehydratedDeviceDataModel(RequestBodyModel): Expects other freeform fields. Use .dict() to access them. """ - class Config: - extra = Extra.allow + model_config = ConfigDict(extra="allow") algorithm: StrictStr @@ -316,7 +316,7 @@ class DehydratedDeviceServlet(RestServlet): class PutBody(RequestBodyModel): device_data: DehydratedDeviceDataModel - initial_device_display_name: Optional[StrictStr] + initial_device_display_name: Optional[StrictStr] = None async def on_PUT(self, request: SynapseRequest) -> tuple[int, JsonDict]: submission = parse_and_validate_json_object_from_request(request, self.PutBody) @@ -391,7 +391,7 @@ class DehydratedDeviceEventsServlet(RestServlet): self.store = hs.get_datastores().main class PostBody(RequestBodyModel): - next_batch: Optional[StrictStr] + next_batch: Optional[StrictStr] = None async def on_POST( self, request: SynapseRequest, device_id: str @@ -539,9 +539,7 @@ class DehydratedDeviceV2Servlet(RestServlet): device_data: DehydratedDeviceDataModel device_id: StrictStr initial_device_display_name: Optional[StrictStr] - - class Config: - extra = Extra.allow + model_config = ConfigDict(extra="allow") async def on_PUT(self, request: SynapseRequest) -> tuple[int, JsonDict]: submission = parse_and_validate_json_object_from_request(request, self.PutBody) diff --git a/synapse/rest/client/directory.py b/synapse/rest/client/directory.py index eccada67be..943674bbb1 100644 --- a/synapse/rest/client/directory.py +++ b/synapse/rest/client/directory.py @@ -22,9 +22,10 @@ import logging from typing import TYPE_CHECKING, Literal, Optional +from pydantic import StrictStr + from twisted.web.server import Request -from synapse._pydantic_compat import StrictStr from synapse.api.errors import AuthError, Codes, NotFoundError, SynapseError from synapse.http.server import HttpServer from synapse.http.servlet import ( diff --git a/synapse/rest/client/keys.py b/synapse/rest/client/keys.py index 1f71359d55..b87b9bd68a 100644 --- a/synapse/rest/client/keys.py +++ b/synapse/rest/client/keys.py @@ -26,13 +26,8 @@ from collections import Counter from http import HTTPStatus from typing import TYPE_CHECKING, Any, Mapping, Optional, Union -from typing_extensions import Self +from pydantic import StrictBool, StrictStr, field_validator -from synapse._pydantic_compat import ( - StrictBool, - StrictStr, - validator, -) from synapse.api.auth.mas import MasDelegatedAuth from synapse.api.errors import ( Codes, @@ -164,7 +159,7 @@ class KeyUploadServlet(RestServlet): device_keys: Optional[DeviceKeys] = None """Identity keys for the device. May be absent if no new identity keys are required.""" - fallback_keys: Optional[Mapping[StrictStr, Union[StrictStr, KeyObject]]] + fallback_keys: Optional[Mapping[StrictStr, Union[StrictStr, KeyObject]]] = None """ The public key which should be used if the device's one-time keys are exhausted. The fallback key is not deleted once used, but should be @@ -180,8 +175,9 @@ class KeyUploadServlet(RestServlet): May be absent if a new fallback key is not required. """ - @validator("fallback_keys", pre=True) - def validate_fallback_keys(cls: Self, v: Any) -> Any: + @field_validator("fallback_keys", mode="before") + @classmethod + def validate_fallback_keys(cls, v: Any) -> Any: if v is None: return v if not isinstance(v, dict): @@ -206,8 +202,9 @@ class KeyUploadServlet(RestServlet): https://spec.matrix.org/v1.16/client-server-api/#key-algorithms. """ - @validator("one_time_keys", pre=True) - def validate_one_time_keys(cls: Self, v: Any) -> Any: + @field_validator("one_time_keys", mode="before") + @classmethod + def validate_one_time_keys(cls, v: Any) -> Any: if v is None: return v if not isinstance(v, dict): diff --git a/synapse/rest/client/reporting.py b/synapse/rest/client/reporting.py index f11f6b7b77..0c594b9f3f 100644 --- a/synapse/rest/client/reporting.py +++ b/synapse/rest/client/reporting.py @@ -23,7 +23,8 @@ import logging from http import HTTPStatus from typing import TYPE_CHECKING -from synapse._pydantic_compat import StrictStr +from pydantic import StrictStr + from synapse.api.errors import AuthError, Codes, NotFoundError, SynapseError from synapse.http.server import HttpServer from synapse.http.servlet import ( diff --git a/synapse/rest/client/thread_subscriptions.py b/synapse/rest/client/thread_subscriptions.py index f879c7589c..d02f2cb48a 100644 --- a/synapse/rest/client/thread_subscriptions.py +++ b/synapse/rest/client/thread_subscriptions.py @@ -50,7 +50,7 @@ class ThreadSubscriptionsRestServlet(RestServlet): self.handler = hs.get_thread_subscriptions_handler() class PutBody(RequestBodyModel): - automatic: Optional[AnyEventId] + automatic: Optional[AnyEventId] = None """ If supplied, the event ID of an event giving rise to this automatic subscription. diff --git a/synapse/rest/key/v2/remote_key_resource.py b/synapse/rest/key/v2/remote_key_resource.py index 51cb077496..e8b0b31210 100644 --- a/synapse/rest/key/v2/remote_key_resource.py +++ b/synapse/rest/key/v2/remote_key_resource.py @@ -23,11 +23,11 @@ import logging import re from typing import TYPE_CHECKING, Mapping, Optional +from pydantic import ConfigDict, StrictInt, StrictStr from signedjson.sign import sign_json from twisted.web.server import Request -from synapse._pydantic_compat import Extra, StrictInt, StrictStr from synapse.crypto.keyring import ServerKeyFetcher from synapse.http.server import HttpServer from synapse.http.servlet import ( @@ -48,8 +48,7 @@ logger = logging.getLogger(__name__) class _KeyQueryCriteriaDataModel(RequestBodyModel): - class Config: - extra = Extra.allow + model_config = ConfigDict(extra="allow") minimum_valid_until_ts: Optional[StrictInt] diff --git a/synapse/rest/synapse/mas/devices.py b/synapse/rest/synapse/mas/devices.py index 654fed8c03..eac51de44c 100644 --- a/synapse/rest/synapse/mas/devices.py +++ b/synapse/rest/synapse/mas/devices.py @@ -17,7 +17,8 @@ import logging from http import HTTPStatus from typing import TYPE_CHECKING, Optional -from synapse._pydantic_compat import StrictStr +from pydantic import StrictStr + from synapse.api.errors import NotFoundError from synapse.http.servlet import parse_and_validate_json_object_from_request from synapse.types import JsonDict, UserID @@ -52,7 +53,7 @@ class MasUpsertDeviceResource(MasBaseResource): class PostBody(RequestBodyModel): localpart: StrictStr device_id: StrictStr - display_name: Optional[StrictStr] + display_name: Optional[StrictStr] = None async def _async_render_POST( self, request: "SynapseRequest" @@ -176,7 +177,7 @@ class MasSyncDevicesResource(MasBaseResource): class PostBody(RequestBodyModel): localpart: StrictStr - devices: set[StrictStr] + devices: list[str] async def _async_render_POST( self, request: "SynapseRequest" diff --git a/synapse/rest/synapse/mas/users.py b/synapse/rest/synapse/mas/users.py index a802887270..f52c4bb167 100644 --- a/synapse/rest/synapse/mas/users.py +++ b/synapse/rest/synapse/mas/users.py @@ -17,7 +17,8 @@ import logging from http import HTTPStatus from typing import TYPE_CHECKING, Any, Optional, TypedDict -from synapse._pydantic_compat import StrictBool, StrictStr, root_validator +from pydantic import StrictBool, StrictStr, model_validator + from synapse.api.errors import NotFoundError, SynapseError from synapse.http.servlet import ( parse_and_validate_json_object_from_request, @@ -111,7 +112,8 @@ class MasProvisionUserResource(MasBaseResource): unset_emails: StrictBool = False set_emails: Optional[list[StrictStr]] = None - @root_validator(pre=True) + @model_validator(mode="before") + @classmethod def validate_exclusive(cls, values: Any) -> Any: if "unset_displayname" in values and "set_displayname" in values: raise ValueError( diff --git a/synapse/storage/background_updates.py b/synapse/storage/background_updates.py index ce213050a9..1c17d4d609 100644 --- a/synapse/storage/background_updates.py +++ b/synapse/storage/background_updates.py @@ -35,8 +35,8 @@ from typing import ( ) import attr +from pydantic import BaseModel -from synapse._pydantic_compat import BaseModel from synapse.storage.engines import PostgresEngine from synapse.storage.types import Connection, Cursor from synapse.types import JsonDict, StrCollection @@ -954,7 +954,7 @@ class BackgroundUpdater: # match the constraint. # 3. We try re-validating the constraint. - parsed_progress = ValidateConstraintProgress.parse_obj(progress) + parsed_progress = ValidateConstraintProgress.model_validate(progress) if parsed_progress.state == ValidateConstraintProgress.State.check: return_columns = ", ".join(unique_columns) diff --git a/synapse/types/handlers/sliding_sync.py b/synapse/types/handlers/sliding_sync.py index aef7db8e98..c83b534e00 100644 --- a/synapse/types/handlers/sliding_sync.py +++ b/synapse/types/handlers/sliding_sync.py @@ -32,8 +32,8 @@ from typing import ( ) import attr +from pydantic import ConfigDict -from synapse._pydantic_compat import Extra from synapse.api.constants import EventTypes from synapse.events import EventBase from synapse.types import ( @@ -65,15 +65,12 @@ class SlidingSyncConfig(SlidingSyncBody): user: UserID requester: Requester - - # Pydantic config - class Config: - # By default, ignore fields that we don't recognise. - extra = Extra.ignore - # By default, don't allow fields to be reassigned after parsing. - allow_mutation = False - # Allow custom types like `UserID` to be used in the model - arbitrary_types_allowed = True + model_config = ConfigDict( + extra="ignore", + frozen=True, + # Allow custom types like `UserID` to be used in the model. + arbitrary_types_allowed=True, + ) class OperationType(Enum): diff --git a/synapse/types/rest/client/__init__.py b/synapse/types/rest/client/__init__.py index 4940fabd12..865c2ba532 100644 --- a/synapse/types/rest/client/__init__.py +++ b/synapse/types/rest/client/__init__.py @@ -18,18 +18,21 @@ # [This file includes modifications made by New Vector Limited] # # -from typing import TYPE_CHECKING, Optional, Union +from typing import Optional, Union -from synapse._pydantic_compat import ( - Extra, +from pydantic import ( + ConfigDict, Field, StrictBool, StrictInt, StrictStr, - conint, - constr, - validator, + StringConstraints, + field_validator, + model_validator, ) +from pydantic_core import PydanticCustomError +from typing_extensions import Annotated, Self + from synapse.types.rest import RequestBodyModel from synapse.util.threepids import validate_email @@ -44,39 +47,36 @@ class AuthenticationData(RequestBodyModel): `.dict(exclude_unset=True)` to access them. """ - class Config: - extra = Extra.allow + model_config = ConfigDict(extra="allow") session: Optional[StrictStr] = None type: Optional[StrictStr] = None -if TYPE_CHECKING: - ClientSecretStr = StrictStr -else: - # See also assert_valid_client_secret() - ClientSecretStr = constr( - regex="[0-9a-zA-Z.=_-]", # noqa: F722 +# See also assert_valid_client_secret() +ClientSecretStr = Annotated[ + str, + StringConstraints( + pattern="[0-9a-zA-Z.=_-]", min_length=1, max_length=255, strict=True, - ) + ), +] class ThreepidRequestTokenBody(RequestBodyModel): client_secret: ClientSecretStr - id_server: Optional[StrictStr] - id_access_token: Optional[StrictStr] - next_link: Optional[StrictStr] + id_server: Optional[StrictStr] = None + id_access_token: Optional[StrictStr] = None + next_link: Optional[StrictStr] = None send_attempt: StrictInt - @validator("id_access_token", always=True) - def token_required_for_identity_server( - cls, token: Optional[str], values: dict[str, object] - ) -> Optional[str]: - if values.get("id_server") is not None and token is None: + @model_validator(mode="after") + def token_required_for_identity_server(self) -> Self: + if self.id_server is not None and self.id_access_token is None: raise ValueError("id_access_token is required if an id_server is supplied.") - return token + return self class EmailRequestTokenBody(ThreepidRequestTokenBody): @@ -87,14 +87,21 @@ class EmailRequestTokenBody(ThreepidRequestTokenBody): # know the exact spelling (eg. upper and lower case) of address in the database. # Without this, an email stored in the database as "foo@bar.com" would cause # user requests for "FOO@bar.com" to raise a Not Found error. - _email_validator = validator("email", allow_reuse=True)(validate_email) + @field_validator("email") + @classmethod + def _email_validator(cls, email: StrictStr) -> StrictStr: + try: + return validate_email(email) + except ValueError as e: + # To ensure backward compatibility of HTTP error codes, we return a + # Pydantic error with the custom, unrecognized error type + # "email_custom_err_type" instead of the default error type + # "value_error". This results in the more generic BAD_JSON HTTP + # error instead of the more specific INVALID_PARAM one. + raise PydanticCustomError("email_custom_err_type", str(e), None) from e -if TYPE_CHECKING: - ISO3116_1_Alpha_2 = StrictStr -else: - # Per spec: two-letter uppercase ISO-3166-1-alpha-2 - ISO3116_1_Alpha_2 = constr(regex="[A-Z]{2}", strict=True) +ISO3116_1_Alpha_2 = Annotated[str, StringConstraints(pattern="[A-Z]{2}", strict=True)] class MsisdnRequestTokenBody(ThreepidRequestTokenBody): @@ -144,12 +151,10 @@ class SlidingSyncBody(RequestBodyModel): (Max 1000 messages) """ - required_state: list[tuple[StrictStr, StrictStr]] - # mypy workaround via https://github.com/pydantic/pydantic/issues/156#issuecomment-1130883884 - if TYPE_CHECKING: - timeline_limit: int - else: - timeline_limit: conint(le=1000, strict=True) # type: ignore[valid-type] + required_state: list[ + Annotated[tuple[StrictStr, StrictStr], Field(strict=False)] + ] + timeline_limit: Annotated[int, Field(le=1000, strict=True)] class SlidingSyncList(CommonRoomParameters): """ @@ -251,13 +256,17 @@ class SlidingSyncBody(RequestBodyModel): tags: Optional[list[StrictStr]] = None not_tags: Optional[list[StrictStr]] = None - # mypy workaround via https://github.com/pydantic/pydantic/issues/156#issuecomment-1130883884 - if TYPE_CHECKING: - ranges: Optional[list[tuple[int, int]]] = None - else: - ranges: Optional[ - list[tuple[conint(ge=0, strict=True), conint(ge=0, strict=True)]] - ] = None # type: ignore[valid-type] + ranges: Optional[ + list[ + Annotated[ + tuple[ + Annotated[int, Field(ge=0, strict=True)], + Annotated[int, Field(ge=0, strict=True)], + ], + Field(strict=False), + ] + ] + ] = None slow_get_all_rooms: Optional[StrictBool] = False filters: Optional[Filters] = None @@ -286,7 +295,8 @@ class SlidingSyncBody(RequestBodyModel): limit: StrictInt = 100 since: Optional[StrictStr] = None - @validator("since") + @field_validator("since") + @classmethod def since_token_check( cls, value: Optional[StrictStr] ) -> Optional[StrictStr]: @@ -382,22 +392,21 @@ class SlidingSyncBody(RequestBodyModel): receipts: Optional[ReceiptsExtension] = None typing: Optional[TypingExtension] = None thread_subscriptions: Optional[ThreadSubscriptionsExtension] = Field( - alias="io.element.msc4308.thread_subscriptions" + None, alias="io.element.msc4308.thread_subscriptions" ) - conn_id: Optional[StrictStr] - - # mypy workaround via https://github.com/pydantic/pydantic/issues/156#issuecomment-1130883884 - if TYPE_CHECKING: - lists: Optional[dict[str, SlidingSyncList]] = None - else: - lists: Optional[dict[constr(max_length=64, strict=True), SlidingSyncList]] = ( - None # type: ignore[valid-type] - ) + conn_id: Optional[StrictStr] = None + lists: Optional[ + dict[ + Annotated[str, StringConstraints(max_length=64, strict=True)], + SlidingSyncList, + ] + ] = None room_subscriptions: Optional[dict[StrictStr, RoomSubscription]] = None extensions: Optional[Extensions] = None - @validator("lists") + @field_validator("lists") + @classmethod def lists_length_check( cls, value: Optional[dict[str, SlidingSyncList]] ) -> Optional[dict[str, SlidingSyncList]]: diff --git a/synapse/util/events.py b/synapse/util/events.py index e41799b1f7..4a1aa28ce4 100644 --- a/synapse/util/events.py +++ b/synapse/util/events.py @@ -15,7 +15,8 @@ from typing import Any, Optional -from synapse._pydantic_compat import Field, StrictStr, ValidationError, validator +from pydantic import Field, StrictStr, ValidationError, field_validator + from synapse.types import JsonDict from synapse.util.pydantic_models import ParseModel from synapse.util.stringutils import random_string @@ -40,7 +41,7 @@ class MTextRepresentation(ParseModel): """ body: StrictStr - mimetype: Optional[StrictStr] + mimetype: Optional[StrictStr] = None class MTopic(ParseModel): @@ -52,7 +53,7 @@ class MTopic(ParseModel): See `TopicContentBlock` in the Matrix specification. """ - m_text: Optional[list[MTextRepresentation]] = Field(alias="m.text") + m_text: Optional[list[MTextRepresentation]] = Field(None, alias="m.text") """ An ordered array of textual representations in different mimetypes. """ @@ -60,16 +61,17 @@ class MTopic(ParseModel): # Because "Receivers SHOULD use the first representation in the array that they # understand.", we ignore invalid representations in the `m.text` field and use # what we can. - @validator("m_text", pre=True) + @field_validator("m_text", mode="before") + @classmethod def ignore_invalid_representations( cls, m_text: Any ) -> Optional[list[MTextRepresentation]]: - if not isinstance(m_text, list): - raise ValueError("m.text must be a list") + if not isinstance(m_text, (list, tuple)): + raise ValueError("m.text must be a list or a tuple") representations = [] for element in m_text: try: - representations.append(MTextRepresentation.parse_obj(element)) + representations.append(MTextRepresentation.model_validate(element)) except ValidationError: continue return representations @@ -85,17 +87,18 @@ class TopicContent(ParseModel): The topic in plain text. """ - m_topic: Optional[MTopic] = Field(alias="m.topic") + m_topic: Optional[MTopic] = Field(None, alias="m.topic") """ Textual representation of the room topic in different mimetypes. """ # We ignore invalid `m.topic` fields as we can always fall back to the plain-text # `topic` field. - @validator("m_topic", pre=True) + @field_validator("m_topic", mode="before") + @classmethod def ignore_invalid_m_topic(cls, m_topic: Any) -> Optional[MTopic]: try: - return MTopic.parse_obj(m_topic) + return MTopic.model_validate(m_topic) except ValidationError: return None @@ -114,7 +117,7 @@ def get_plain_text_topic_from_event_content(content: JsonDict) -> Optional[str]: """ try: - topic_content = TopicContent.parse_obj(content) + topic_content = TopicContent.model_validate(content, strict=False) except ValidationError: return None diff --git a/synapse/util/pydantic_models.py b/synapse/util/pydantic_models.py index 4880709501..e1e2d8b99f 100644 --- a/synapse/util/pydantic_models.py +++ b/synapse/util/pydantic_models.py @@ -13,18 +13,20 @@ # # -import re -from typing import Any, Callable, Generator +from typing import Annotated, Union -from synapse._pydantic_compat import BaseModel, Extra, StrictStr +from pydantic import AfterValidator, BaseModel, ConfigDict, StrictStr, StringConstraints + +from synapse.api.errors import SynapseError from synapse.types import EventID class ParseModel(BaseModel): """A custom version of Pydantic's BaseModel which - - ignores unknown fields and - - does not allow fields to be overwritten after construction, + - ignores unknown fields, + - does not allow fields to be overwritten after construction and + - enables strict mode, but otherwise uses Pydantic's default behaviour. @@ -36,48 +38,19 @@ class ParseModel(BaseModel): https://pydantic-docs.helpmanual.io/usage/model_config/#change-behaviour-globally """ - class Config: - # By default, ignore fields that we don't recognise. - extra = Extra.ignore - # By default, don't allow fields to be reassigned after parsing. - allow_mutation = False + model_config = ConfigDict(extra="ignore", frozen=True, strict=True) -class AnyEventId(StrictStr): - """ - A validator for strings that need to be an Event ID. +def validate_event_id_v1_and_2(value: str) -> str: + try: + EventID.from_string(value) + except SynapseError as e: + raise ValueError from e + return value - Accepts any valid grammar of Event ID from any room version. - """ - EVENT_ID_HASH_ROOM_VERSION_3_PLUS = re.compile( - r"^([a-zA-Z0-9-_]{43}|[a-zA-Z0-9+/]{43})$" - ) - - @classmethod - def __get_validators__(cls) -> Generator[Callable[..., Any], Any, Any]: - yield from super().__get_validators__() # type: ignore - yield cls.validate_event_id - - @classmethod - def validate_event_id(cls, value: str) -> str: - if not value.startswith("$"): - raise ValueError("Event ID must start with `$`") - - if ":" in value: - # Room versions 1 and 2 - EventID.from_string(value) # throws on fail - else: - # Room versions 3+: event ID is $ + a base64 sha256 hash - # Room version 3 is base64, 4+ are base64Url - # In both cases, the base64 is unpadded. - # refs: - # - https://spec.matrix.org/v1.15/rooms/v3/ e.g. $acR1l0raoZnm60CBwAVgqbZqoO/mYU81xysh1u7XcJk - # - https://spec.matrix.org/v1.15/rooms/v4/ e.g. $Rqnc-F-dvnEYJTyHq_iKxU2bZ1CI92-kuZq3a5lr5Zg - b64_hash = value[1:] - if cls.EVENT_ID_HASH_ROOM_VERSION_3_PLUS.fullmatch(b64_hash) is None: - raise ValueError( - "Event ID must either have a domain part or be a valid hash" - ) - - return value +EventIdV1And2 = Annotated[StrictStr, AfterValidator(validate_event_id_v1_and_2)] +EventIdV3Plus = Annotated[ + StrictStr, StringConstraints(pattern=r"^\$([a-zA-Z0-9-_]{43}|[a-zA-Z0-9+/]{43})$") +] +AnyEventId = Union[EventIdV1And2, EventIdV3Plus] diff --git a/tests/config/test_oauth_delegation.py b/tests/config/test_oauth_delegation.py index 85e0a3b6b6..6105ca2b04 100644 --- a/tests/config/test_oauth_delegation.py +++ b/tests/config/test_oauth_delegation.py @@ -21,6 +21,7 @@ import os import tempfile +from pathlib import Path from unittest.mock import Mock from synapse.config import ConfigError @@ -309,7 +310,9 @@ class MasAuthDelegation(TestCase): def test_secret_and_secret_path_are_mutually_exclusive(self) -> None: with tempfile.NamedTemporaryFile() as f: self.config_dict["matrix_authentication_service"]["secret"] = "verysecret" - self.config_dict["matrix_authentication_service"]["secret_path"] = f.name + self.config_dict["matrix_authentication_service"]["secret_path"] = Path( + f.name + ) with self.assertRaises(ConfigError): self.parse_config() @@ -317,13 +320,15 @@ class MasAuthDelegation(TestCase): with tempfile.NamedTemporaryFile(buffering=0) as f: f.write(b"53C237") del self.config_dict["matrix_authentication_service"]["secret"] - self.config_dict["matrix_authentication_service"]["secret_path"] = f.name + self.config_dict["matrix_authentication_service"]["secret_path"] = Path( + f.name + ) config = self.parse_config() self.assertEqual(config.mas.secret(), "53C237") def test_secret_path_must_exist(self) -> None: del self.config_dict["matrix_authentication_service"]["secret"] - self.config_dict["matrix_authentication_service"]["secret_path"] = ( + self.config_dict["matrix_authentication_service"]["secret_path"] = Path( "/not/a/valid/file" ) with self.assertRaises(ConfigError): diff --git a/tests/rest/client/test_account.py b/tests/rest/client/test_account.py index c4c62c7800..03474d7400 100644 --- a/tests/rest/client/test_account.py +++ b/tests/rest/client/test_account.py @@ -1201,7 +1201,9 @@ class ThreepidEmailRestTestCase(unittest.HomeserverTestCase): self.assertEqual( HTTPStatus.BAD_REQUEST, channel.code, msg=channel.result["body"] ) - self.assertEqual(expected_errcode, channel.json_body["errcode"]) + self.assertEqual( + expected_errcode, channel.json_body["errcode"], msg=channel.result["body"] + ) self.assertIn(expected_error, channel.json_body["error"]) def _validate_token(self, link: str) -> None: diff --git a/tests/rest/client/test_models.py b/tests/rest/client/test_models.py index 75479e6235..f297856830 100644 --- a/tests/rest/client/test_models.py +++ b/tests/rest/client/test_models.py @@ -21,7 +21,8 @@ import unittest as stdlib_unittest from typing import Literal -from synapse._pydantic_compat import BaseModel, ValidationError +from pydantic import BaseModel, ValidationError + from synapse.types.rest.client import EmailRequestTokenBody @@ -35,16 +36,16 @@ class ThreepidMediumEnumTestCase(stdlib_unittest.TestCase): This is arguably more of a test of a class that inherits from str and Enum simultaneously. """ - model = self.Model.parse_obj({"medium": "email"}) + model = self.Model.model_validate({"medium": "email"}) self.assertEqual(model.medium, "email") def test_rejects_invalid_medium_value(self) -> None: with self.assertRaises(ValidationError): - self.Model.parse_obj({"medium": "interpretive_dance"}) + self.Model.model_validate({"medium": "interpretive_dance"}) def test_rejects_invalid_medium_type(self) -> None: with self.assertRaises(ValidationError): - self.Model.parse_obj({"medium": 123}) + self.Model.model_validate({"medium": 123}) class EmailRequestTokenBodyTestCase(stdlib_unittest.TestCase): @@ -56,14 +57,14 @@ class EmailRequestTokenBodyTestCase(stdlib_unittest.TestCase): def test_token_required_if_id_server_provided(self) -> None: with self.assertRaises(ValidationError): - EmailRequestTokenBody.parse_obj( + EmailRequestTokenBody.model_validate( { **self.base_request, "id_server": "identity.wonderland.com", } ) with self.assertRaises(ValidationError): - EmailRequestTokenBody.parse_obj( + EmailRequestTokenBody.model_validate( { **self.base_request, "id_server": "identity.wonderland.com", @@ -73,7 +74,7 @@ class EmailRequestTokenBodyTestCase(stdlib_unittest.TestCase): def test_token_typechecked_when_id_server_provided(self) -> None: with self.assertRaises(ValidationError): - EmailRequestTokenBody.parse_obj( + EmailRequestTokenBody.model_validate( { **self.base_request, "id_server": "identity.wonderland.com", diff --git a/tests/rest/client/test_thread_subscriptions.py b/tests/rest/client/test_thread_subscriptions.py index 5aae07ef50..87c477cbb5 100644 --- a/tests/rest/client/test_thread_subscriptions.py +++ b/tests/rest/client/test_thread_subscriptions.py @@ -111,7 +111,7 @@ class ThreadSubscriptionsTestCase(unittest.HomeserverTestCase): {}, access_token=self.token, ) - self.assertEqual(channel.code, HTTPStatus.OK) + self.assertEqual(channel.code, HTTPStatus.OK, channel.json_body) # Assert the subscription was saved channel = self.make_request( @@ -119,8 +119,8 @@ class ThreadSubscriptionsTestCase(unittest.HomeserverTestCase): f"{PREFIX}/{self.room_id}/thread/{self.root_event_id}/subscription", access_token=self.token, ) - self.assertEqual(channel.code, HTTPStatus.OK) - self.assertEqual(channel.json_body, {"automatic": False}) + self.assertEqual(channel.code, HTTPStatus.OK, channel.json_body) + self.assertEqual(channel.json_body, {"automatic": False}, channel.json_body) # Now also register an automatic subscription; it should not # override the manual subscription @@ -130,7 +130,7 @@ class ThreadSubscriptionsTestCase(unittest.HomeserverTestCase): {"automatic": self.threaded_events[0]}, access_token=self.token, ) - self.assertEqual(channel.code, HTTPStatus.OK) + self.assertEqual(channel.code, HTTPStatus.OK, channel.json_body) # Assert the manual subscription was not overridden channel = self.make_request( @@ -138,8 +138,8 @@ class ThreadSubscriptionsTestCase(unittest.HomeserverTestCase): f"{PREFIX}/{self.room_id}/thread/{self.root_event_id}/subscription", access_token=self.token, ) - self.assertEqual(channel.code, HTTPStatus.OK) - self.assertEqual(channel.json_body, {"automatic": False}) + self.assertEqual(channel.code, HTTPStatus.OK, channel.json_body) + self.assertEqual(channel.json_body, {"automatic": False}, channel.json_body) def test_subscribe_automatic_then_manual(self) -> None: """Test subscribing to a thread, first an automatic subscription then a manual subscription. @@ -160,8 +160,8 @@ class ThreadSubscriptionsTestCase(unittest.HomeserverTestCase): f"{PREFIX}/{self.room_id}/thread/{self.root_event_id}/subscription", access_token=self.token, ) - self.assertEqual(channel.code, HTTPStatus.OK) - self.assertEqual(channel.json_body, {"automatic": True}) + self.assertEqual(channel.code, HTTPStatus.OK, channel.json_body) + self.assertEqual(channel.json_body, {"automatic": True}, channel.json_body) # Now also register a manual subscription channel = self.make_request( @@ -170,7 +170,7 @@ class ThreadSubscriptionsTestCase(unittest.HomeserverTestCase): {}, access_token=self.token, ) - self.assertEqual(channel.code, HTTPStatus.OK) + self.assertEqual(channel.code, HTTPStatus.OK, channel.json_body) # Assert the manual subscription was not overridden channel = self.make_request( @@ -178,8 +178,8 @@ class ThreadSubscriptionsTestCase(unittest.HomeserverTestCase): f"{PREFIX}/{self.room_id}/thread/{self.root_event_id}/subscription", access_token=self.token, ) - self.assertEqual(channel.code, HTTPStatus.OK) - self.assertEqual(channel.json_body, {"automatic": False}) + self.assertEqual(channel.code, HTTPStatus.OK, channel.json_body) + self.assertEqual(channel.json_body, {"automatic": False}, channel.json_body) def test_unsubscribe(self) -> None: """Test subscribing to a thread, then unsubscribing.""" @@ -191,7 +191,7 @@ class ThreadSubscriptionsTestCase(unittest.HomeserverTestCase): }, access_token=self.token, ) - self.assertEqual(channel.code, HTTPStatus.OK) + self.assertEqual(channel.code, HTTPStatus.OK, channel.json_body) # Assert the subscription was saved channel = self.make_request( @@ -199,23 +199,23 @@ class ThreadSubscriptionsTestCase(unittest.HomeserverTestCase): f"{PREFIX}/{self.room_id}/thread/{self.root_event_id}/subscription", access_token=self.token, ) - self.assertEqual(channel.code, HTTPStatus.OK) - self.assertEqual(channel.json_body, {"automatic": True}) + self.assertEqual(channel.code, HTTPStatus.OK, channel.json_body) + self.assertEqual(channel.json_body, {"automatic": True}, channel.json_body) channel = self.make_request( "DELETE", f"{PREFIX}/{self.room_id}/thread/{self.root_event_id}/subscription", access_token=self.token, ) - self.assertEqual(channel.code, HTTPStatus.OK) + self.assertEqual(channel.code, HTTPStatus.OK, channel.json_body) channel = self.make_request( "GET", f"{PREFIX}/{self.room_id}/thread/{self.root_event_id}/subscription", access_token=self.token, ) - self.assertEqual(channel.code, HTTPStatus.NOT_FOUND) - self.assertEqual(channel.json_body["errcode"], "M_NOT_FOUND") + self.assertEqual(channel.code, HTTPStatus.NOT_FOUND, channel.json_body) + self.assertEqual(channel.json_body["errcode"], "M_NOT_FOUND", channel.json_body) def test_set_thread_subscription_nonexistent_thread(self) -> None: """Test setting subscription settings for a nonexistent thread.""" @@ -225,8 +225,8 @@ class ThreadSubscriptionsTestCase(unittest.HomeserverTestCase): {}, access_token=self.token, ) - self.assertEqual(channel.code, HTTPStatus.NOT_FOUND) - self.assertEqual(channel.json_body["errcode"], "M_NOT_FOUND") + self.assertEqual(channel.code, HTTPStatus.NOT_FOUND, channel.json_body) + self.assertEqual(channel.json_body["errcode"], "M_NOT_FOUND", channel.json_body) def test_set_thread_subscription_no_access(self) -> None: """Test that a user can't set thread subscription for a thread they can't access.""" @@ -239,8 +239,8 @@ class ThreadSubscriptionsTestCase(unittest.HomeserverTestCase): {}, access_token=no_access_token, ) - self.assertEqual(channel.code, HTTPStatus.NOT_FOUND) - self.assertEqual(channel.json_body["errcode"], "M_NOT_FOUND") + self.assertEqual(channel.code, HTTPStatus.NOT_FOUND, channel.json_body) + self.assertEqual(channel.json_body["errcode"], "M_NOT_FOUND", channel.json_body) def test_invalid_body(self) -> None: """Test that sending invalid subscription settings is rejected.""" @@ -251,7 +251,7 @@ class ThreadSubscriptionsTestCase(unittest.HomeserverTestCase): {"automatic": True}, access_token=self.token, ) - self.assertEqual(channel.code, HTTPStatus.BAD_REQUEST) + self.assertEqual(channel.code, HTTPStatus.BAD_REQUEST, channel.json_body) channel = self.make_request( "PUT", @@ -260,7 +260,7 @@ class ThreadSubscriptionsTestCase(unittest.HomeserverTestCase): {"automatic": "$malformedEventId"}, access_token=self.token, ) - self.assertEqual(channel.code, HTTPStatus.BAD_REQUEST) + self.assertEqual(channel.code, HTTPStatus.BAD_REQUEST, channel.json_body) def test_auto_subscribe_cause_event_not_in_thread(self) -> None: """ From 07e79805725880e88cb58e3e5565f9b48d0770ed Mon Sep 17 00:00:00 2001 From: V02460 Date: Fri, 31 Oct 2025 13:09:13 +0100 Subject: [PATCH 111/149] =?UTF-8?q?Fix=20Rust=E2=80=99s=20confusing=20life?= =?UTF-8?q?time=20lint=20(#19118)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> --- changelog.d/19118.misc | 1 + rust/src/http_client.rs | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/19118.misc diff --git a/changelog.d/19118.misc b/changelog.d/19118.misc new file mode 100644 index 0000000000..672ed45573 --- /dev/null +++ b/changelog.d/19118.misc @@ -0,0 +1 @@ +Fix a lint error related to lifetimes in Rust 1.90. \ No newline at end of file diff --git a/rust/src/http_client.rs b/rust/src/http_client.rs index e67dae169f..ca4bf1590b 100644 --- a/rust/src/http_client.rs +++ b/rust/src/http_client.rs @@ -137,7 +137,7 @@ fn get_runtime<'a>(reactor: &Bound<'a, PyAny>) -> PyResult = OnceCell::new(); /// Access to the `twisted.internet.defer` module. -fn defer(py: Python<'_>) -> PyResult<&Bound> { +fn defer(py: Python<'_>) -> PyResult<&Bound<'_, PyAny>> { Ok(DEFER .get_or_try_init(|| py.import("twisted.internet.defer").map(Into::into))? .bind(py)) From 3ccc5184e0fe1f00dec69293c097d513da54a410 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 31 Oct 2025 13:16:47 +0000 Subject: [PATCH 112/149] Fix schema lint script to understand `CREATE TABLE IF NOT EXISTS` (#19020) The schema lint tries to make sure we don't add or remove indices in schema files (rather than as background updates), *unless* the table was created in the same schema file. The regex to pull out the `CREATE TABLE` SQL incorrectly didn't recognise `IF NOT EXISTS`. There is a test delta file that shows that we accept different types of `CREATE TABLE` and `CREATE INDEX` statements, as well as an index creation that doesn't have a matching create table (to show that we do still catch it). The test delta should be removed before merge. --- changelog.d/19020.misc | 1 + scripts-dev/check_schema_delta.py | 20 +++++++++++++++----- 2 files changed, 16 insertions(+), 5 deletions(-) create mode 100644 changelog.d/19020.misc diff --git a/changelog.d/19020.misc b/changelog.d/19020.misc new file mode 100644 index 0000000000..f5775ff194 --- /dev/null +++ b/changelog.d/19020.misc @@ -0,0 +1 @@ +Fix CI linter for schema delta files to correctly handle all types of `CREATE TABLE` syntax. diff --git a/scripts-dev/check_schema_delta.py b/scripts-dev/check_schema_delta.py index 7b2dec25d4..dd96c904bb 100755 --- a/scripts-dev/check_schema_delta.py +++ b/scripts-dev/check_schema_delta.py @@ -11,9 +11,13 @@ import click import git SCHEMA_FILE_REGEX = re.compile(r"^synapse/storage/schema/(.*)/delta/(.*)/(.*)$") -INDEX_CREATION_REGEX = re.compile(r"CREATE .*INDEX .*ON ([a-z_]+)", flags=re.IGNORECASE) -INDEX_DELETION_REGEX = re.compile(r"DROP .*INDEX ([a-z_]+)", flags=re.IGNORECASE) -TABLE_CREATION_REGEX = re.compile(r"CREATE .*TABLE ([a-z_]+)", flags=re.IGNORECASE) +INDEX_CREATION_REGEX = re.compile( + r"CREATE .*INDEX .*ON ([a-z_0-9]+)", flags=re.IGNORECASE +) +INDEX_DELETION_REGEX = re.compile(r"DROP .*INDEX ([a-z_0-9]+)", flags=re.IGNORECASE) +TABLE_CREATION_REGEX = re.compile( + r"CREATE .*TABLE.* ([a-z_0-9]+)\s*\(", flags=re.IGNORECASE +) # The base branch we want to check against. We use the main development branch # on the assumption that is what we are developing against. @@ -173,11 +177,14 @@ def main(force_colors: bool) -> None: clause = match.group() click.secho( - f"Found delta with index deletion: '{clause}' in {delta_file}\nThese should be in background updates.", + f"Found delta with index deletion: '{clause}' in {delta_file}", fg="red", bold=True, color=force_colors, ) + click.secho( + " ↪ These should be in background updates.", + ) return_code = 1 # Check for index creation, which is only allowed for tables we've @@ -188,11 +195,14 @@ def main(force_colors: bool) -> None: table_name = match.group(1) if table_name not in created_tables: click.secho( - f"Found delta with index creation: '{clause}' in {delta_file}\nThese should be in background updates.", + f"Found delta with index creation for existing table: '{clause}' in {delta_file}", fg="red", bold=True, color=force_colors, ) + click.secho( + " ↪ These should be in background updates (or the table should be created in the same delta).", + ) return_code = 1 click.get_current_context().exit(return_code) From 41a2762e588013887682cdd5a997f6d5b8aa6c2b Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Fri, 31 Oct 2025 10:12:05 -0500 Subject: [PATCH 113/149] Be mindful of other logging context filters in 3rd-party code (#19068) Be mindful that Synapse can be run alongside other code in the same Python process. We shouldn't overwrite fields on given log record unless we know it's relevant to Synapse. (no clobber) ### Background As part of Element's plan to support a light form of vhosting (virtual host) (multiple instances of Synapse in the same Python process), we're currently diving into the details and implications of running multiple instances of Synapse in the same Python process. "Per-tenant logging" tracked internally by https://github.com/element-hq/synapse-small-hosts/issues/48 --- changelog.d/19068.misc | 1 + synapse/logging/context.py | 40 ++++++++++++++++++++++++++++++++++---- 2 files changed, 37 insertions(+), 4 deletions(-) create mode 100644 changelog.d/19068.misc diff --git a/changelog.d/19068.misc b/changelog.d/19068.misc new file mode 100644 index 0000000000..9e5c34b608 --- /dev/null +++ b/changelog.d/19068.misc @@ -0,0 +1 @@ +Be mindful of other logging context filters in 3rd-party code and avoid overwriting log record fields unless we know the log record is relevant to Synapse. diff --git a/synapse/logging/context.py b/synapse/logging/context.py index 86e994cbb4..919493d1a3 100644 --- a/synapse/logging/context.py +++ b/synapse/logging/context.py @@ -604,25 +604,57 @@ class LoggingContextFilter(logging.Filter): self._default_request = request def filter(self, record: logging.LogRecord) -> Literal[True]: - """Add each fields from the logging contexts to the record. + """ + Add each field from the logging context to the record. + + Please be mindful of 3rd-party code outside of Synapse (like in the case of + Synapse Pro for small hosts) as this is running as a global log record filter. + Other code may have set their own attributes on the record and the log record + may not be relevant to Synapse at all so we should not mangle it. + + We can have some defaults but we should avoid overwriting existing attributes on + any log record unless we actually have a Synapse logcontext (not just the + default sentinel logcontext). + Returns: True to include the record in the log output. """ context = current_context() record.request = self._default_request - record.server_name = "unknown_server_from_no_context" + + # Avoid overwriting an existing `server_name` on the record. This is running in + # the context of a global log record filter so there may be 3rd-party code that + # adds their own `server_name` and we don't want to interfere with that + # (clobber). + if not hasattr(record, "server_name"): + record.server_name = "unknown_server_from_no_logcontext" # context should never be None, but if it somehow ends up being, then # we end up in a death spiral of infinite loops, so let's check, for # robustness' sake. if context is not None: - record.server_name = context.server_name + + def safe_set(attr: str, value: Any) -> None: + """ + Only write the attribute if it hasn't already been set or we actually have + a Synapse logcontext (indicating that this log record is relevant to + Synapse). + """ + if context is not SENTINEL_CONTEXT or not hasattr(record, attr): + setattr(record, attr, value) + + safe_set("server_name", context.server_name) + # Logging is interested in the request ID. Note that for backwards # compatibility this is stored as the "request" on the record. - record.request = str(context) + safe_set("request", str(context)) # Add some data from the HTTP request. request = context.request + # The sentinel logcontext has no request so if we get past this point, we + # know we have some actual Synapse logcontext and don't need to worry about + # using `safe_set`. We'll consider this an optimization since this is a + # pretty hot-path. if request is None: return True From 69bab78b440fc2f89c513a8e556bfcca1ee7a8d8 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Mon, 3 Nov 2025 12:53:59 +0100 Subject: [PATCH 114/149] Python 3.14 support (#19055) Co-authored-by: Eric Eastwood --- .ci/scripts/calculate_jobs.py | 4 +- .github/workflows/tests.yml | 2 +- Cargo.lock | 29 ++- changelog.d/19055.misc | 1 + poetry.lock | 313 +++++++++++++-------------- pyproject.toml | 4 + rust/Cargo.toml | 6 +- rust/src/events/internal_metadata.rs | 4 +- rust/src/http_client.rs | 8 +- rust/src/rendezvous/mod.rs | 4 +- 10 files changed, 184 insertions(+), 191 deletions(-) create mode 100644 changelog.d/19055.misc diff --git a/.ci/scripts/calculate_jobs.py b/.ci/scripts/calculate_jobs.py index 2971b3c5c8..87fbc7a266 100755 --- a/.ci/scripts/calculate_jobs.py +++ b/.ci/scripts/calculate_jobs.py @@ -53,7 +53,7 @@ if not IS_PR: "database": "sqlite", "extras": "all", } - for version in ("3.11", "3.12", "3.13") + for version in ("3.11", "3.12", "3.13", "3.14") ) trial_postgres_tests = [ @@ -68,7 +68,7 @@ trial_postgres_tests = [ if not IS_PR: trial_postgres_tests.append( { - "python-version": "3.13", + "python-version": "3.14", "database": "postgres", "postgres-version": "17", "extras": "all", diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 494543e4b9..4f38ab0690 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -619,7 +619,7 @@ jobs: - python-version: "3.10" postgres-version: "13" - - python-version: "3.13" + - python-version: "3.14" postgres-version: "17" services: diff --git a/Cargo.lock b/Cargo.lock index 35f62fe4e9..a057c812af 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -814,9 +814,9 @@ dependencies = [ [[package]] name = "pyo3" -version = "0.25.1" +version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8970a78afe0628a3e3430376fc5fd76b6b45c4d43360ffd6cdd40bdde72b682a" +checksum = "7ba0117f4212101ee6544044dae45abe1083d30ce7b29c4b5cbdfa2354e07383" dependencies = [ "anyhow", "indoc", @@ -832,19 +832,18 @@ dependencies = [ [[package]] name = "pyo3-build-config" -version = "0.25.1" +version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "458eb0c55e7ece017adeba38f2248ff3ac615e53660d7c71a238d7d2a01c7598" +checksum = "4fc6ddaf24947d12a9aa31ac65431fb1b851b8f4365426e182901eabfb87df5f" dependencies = [ - "once_cell", "target-lexicon", ] [[package]] name = "pyo3-ffi" -version = "0.25.1" +version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7114fe5457c61b276ab77c5055f206295b812608083644a5c5b2640c3102565c" +checksum = "025474d3928738efb38ac36d4744a74a400c901c7596199e20e45d98eb194105" dependencies = [ "libc", "pyo3-build-config", @@ -852,9 +851,9 @@ dependencies = [ [[package]] name = "pyo3-log" -version = "0.12.4" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "45192e5e4a4d2505587e27806c7b710c231c40c56f3bfc19535d0bb25df52264" +checksum = "d359e20231345f21a3b5b6aea7e73f4dc97e1712ef3bfe2d88997ac6a308d784" dependencies = [ "arc-swap", "log", @@ -863,9 +862,9 @@ dependencies = [ [[package]] name = "pyo3-macros" -version = "0.25.1" +version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8725c0a622b374d6cb051d11a0983786448f7785336139c3c94f5aa6bef7e50" +checksum = "2e64eb489f22fe1c95911b77c44cc41e7c19f3082fc81cce90f657cdc42ffded" dependencies = [ "proc-macro2", "pyo3-macros-backend", @@ -875,9 +874,9 @@ dependencies = [ [[package]] name = "pyo3-macros-backend" -version = "0.25.1" +version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4109984c22491085343c05b0dbc54ddc405c3cf7b4374fc533f5c3313a572ccc" +checksum = "100246c0ecf400b475341b8455a9213344569af29a3c841d29270e53102e0fcf" dependencies = [ "heck", "proc-macro2", @@ -888,9 +887,9 @@ dependencies = [ [[package]] name = "pythonize" -version = "0.25.0" +version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "597907139a488b22573158793aa7539df36ae863eba300c75f3a0d65fc475e27" +checksum = "11e06e4cff9be2bbf2bddf28a486ae619172ea57e79787f856572878c62dcfe2" dependencies = [ "pyo3", "serde", diff --git a/changelog.d/19055.misc b/changelog.d/19055.misc new file mode 100644 index 0000000000..61e626cc9b --- /dev/null +++ b/changelog.d/19055.misc @@ -0,0 +1 @@ +Add support for Python 3.14. \ No newline at end of file diff --git a/poetry.lock b/poetry.lock index a1f133e164..ce8b9ef6ee 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1594,8 +1594,6 @@ groups = ["main"] files = [ {file = "pillow-11.3.0-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:1b9c17fd4ace828b3003dfd1e30bff24863e0eb59b535e8f80194d9cc7ecf860"}, {file = "pillow-11.3.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:65dc69160114cdd0ca0f35cb434633c75e8e7fad4cf855177a05bf38678f73ad"}, - {file = "pillow-11.3.0-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:7107195ddc914f656c7fc8e4a5e1c25f32e9236ea3ea860f257b0436011fddd0"}, - {file = "pillow-11.3.0-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:cc3e831b563b3114baac7ec2ee86819eb03caa1a2cef0b481a5675b59c4fe23b"}, {file = "pillow-11.3.0-cp310-cp310-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f1f182ebd2303acf8c380a54f615ec883322593320a9b00438eb842c1f37ae50"}, {file = "pillow-11.3.0-cp310-cp310-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4445fa62e15936a028672fd48c4c11a66d641d2c05726c7ec1f8ba6a572036ae"}, {file = "pillow-11.3.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:71f511f6b3b91dd543282477be45a033e4845a40278fa8dcdbfdb07109bf18f9"}, @@ -1605,8 +1603,6 @@ files = [ {file = "pillow-11.3.0-cp310-cp310-win_arm64.whl", hash = "sha256:819931d25e57b513242859ce1876c58c59dc31587847bf74cfe06b2e0cb22d2f"}, {file = "pillow-11.3.0-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:1cd110edf822773368b396281a2293aeb91c90a2db00d78ea43e7e861631b722"}, {file = "pillow-11.3.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:9c412fddd1b77a75aa904615ebaa6001f169b26fd467b4be93aded278266b288"}, - {file = "pillow-11.3.0-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:7d1aa4de119a0ecac0a34a9c8bde33f34022e2e8f99104e47a3ca392fd60e37d"}, - {file = "pillow-11.3.0-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:91da1d88226663594e3f6b4b8c3c8d85bd504117d043740a8e0ec449087cc494"}, {file = "pillow-11.3.0-cp311-cp311-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:643f189248837533073c405ec2f0bb250ba54598cf80e8c1e043381a60632f58"}, {file = "pillow-11.3.0-cp311-cp311-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:106064daa23a745510dabce1d84f29137a37224831d88eb4ce94bb187b1d7e5f"}, {file = "pillow-11.3.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:cd8ff254faf15591e724dc7c4ddb6bf4793efcbe13802a4ae3e863cd300b493e"}, @@ -1616,8 +1612,6 @@ files = [ {file = "pillow-11.3.0-cp311-cp311-win_arm64.whl", hash = "sha256:30807c931ff7c095620fe04448e2c2fc673fcbb1ffe2a7da3fb39613489b1ddd"}, {file = "pillow-11.3.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:fdae223722da47b024b867c1ea0be64e0df702c5e0a60e27daad39bf960dd1e4"}, {file = "pillow-11.3.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:921bd305b10e82b4d1f5e802b6850677f965d8394203d182f078873851dada69"}, - {file = "pillow-11.3.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:eb76541cba2f958032d79d143b98a3a6b3ea87f0959bbe256c0b5e416599fd5d"}, - {file = "pillow-11.3.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:67172f2944ebba3d4a7b54f2e95c786a3a50c21b88456329314caaa28cda70f6"}, {file = "pillow-11.3.0-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:97f07ed9f56a3b9b5f49d3661dc9607484e85c67e27f3e8be2c7d28ca032fec7"}, {file = "pillow-11.3.0-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:676b2815362456b5b3216b4fd5bd89d362100dc6f4945154ff172e206a22c024"}, {file = "pillow-11.3.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:3e184b2f26ff146363dd07bde8b711833d7b0202e27d13540bfe2e35a323a809"}, @@ -1630,8 +1624,6 @@ files = [ {file = "pillow-11.3.0-cp313-cp313-ios_13_0_x86_64_iphonesimulator.whl", hash = "sha256:7859a4cc7c9295f5838015d8cc0a9c215b77e43d07a25e460f35cf516df8626f"}, {file = "pillow-11.3.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:ec1ee50470b0d050984394423d96325b744d55c701a439d2bd66089bff963d3c"}, {file = "pillow-11.3.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:7db51d222548ccfd274e4572fdbf3e810a5e66b00608862f947b163e613b67dd"}, - {file = "pillow-11.3.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:2d6fcc902a24ac74495df63faad1884282239265c6839a0a6416d33faedfae7e"}, - {file = "pillow-11.3.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:f0f5d8f4a08090c6d6d578351a2b91acf519a54986c055af27e7a93feae6d3f1"}, {file = "pillow-11.3.0-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c37d8ba9411d6003bba9e518db0db0c58a680ab9fe5179f040b0463644bc9805"}, {file = "pillow-11.3.0-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:13f87d581e71d9189ab21fe0efb5a23e9f28552d5be6979e84001d3b8505abe8"}, {file = "pillow-11.3.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:023f6d2d11784a465f09fd09a34b150ea4672e85fb3d05931d89f373ab14abb2"}, @@ -1641,8 +1633,6 @@ files = [ {file = "pillow-11.3.0-cp313-cp313-win_arm64.whl", hash = "sha256:1904e1264881f682f02b7f8167935cce37bc97db457f8e7849dc3a6a52b99580"}, {file = "pillow-11.3.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:4c834a3921375c48ee6b9624061076bc0a32a60b5532b322cc0ea64e639dd50e"}, {file = "pillow-11.3.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:5e05688ccef30ea69b9317a9ead994b93975104a677a36a8ed8106be9260aa6d"}, - {file = "pillow-11.3.0-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:1019b04af07fc0163e2810167918cb5add8d74674b6267616021ab558dc98ced"}, - {file = "pillow-11.3.0-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:f944255db153ebb2b19c51fe85dd99ef0ce494123f21b9db4877ffdfc5590c7c"}, {file = "pillow-11.3.0-cp313-cp313t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1f85acb69adf2aaee8b7da124efebbdb959a104db34d3a2cb0f3793dbae422a8"}, {file = "pillow-11.3.0-cp313-cp313t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:05f6ecbeff5005399bb48d198f098a9b4b6bdf27b8487c7f38ca16eeb070cd59"}, {file = "pillow-11.3.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:a7bc6e6fd0395bc052f16b1a8670859964dbd7003bd0af2ff08342eb6e442cfe"}, @@ -1652,8 +1642,6 @@ files = [ {file = "pillow-11.3.0-cp313-cp313t-win_arm64.whl", hash = "sha256:8797edc41f3e8536ae4b10897ee2f637235c94f27404cac7297f7b607dd0716e"}, {file = "pillow-11.3.0-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:d9da3df5f9ea2a89b81bb6087177fb1f4d1c7146d583a3fe5c672c0d94e55e12"}, {file = "pillow-11.3.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:0b275ff9b04df7b640c59ec5a3cb113eefd3795a8df80bac69646ef699c6981a"}, - {file = "pillow-11.3.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:0743841cabd3dba6a83f38a92672cccbd69af56e3e91777b0ee7f4dba4385632"}, - {file = "pillow-11.3.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:2465a69cf967b8b49ee1b96d76718cd98c4e925414ead59fdf75cf0fd07df673"}, {file = "pillow-11.3.0-cp314-cp314-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:41742638139424703b4d01665b807c6468e23e699e8e90cffefe291c5832b027"}, {file = "pillow-11.3.0-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:93efb0b4de7e340d99057415c749175e24c8864302369e05914682ba642e5d77"}, {file = "pillow-11.3.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:7966e38dcd0fa11ca390aed7c6f20454443581d758242023cf36fcb319b1a874"}, @@ -1663,8 +1651,6 @@ files = [ {file = "pillow-11.3.0-cp314-cp314-win_arm64.whl", hash = "sha256:155658efb5e044669c08896c0c44231c5e9abcaadbc5cd3648df2f7c0b96b9a6"}, {file = "pillow-11.3.0-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:59a03cdf019efbfeeed910bf79c7c93255c3d54bc45898ac2a4140071b02b4ae"}, {file = "pillow-11.3.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:f8a5827f84d973d8636e9dc5764af4f0cf2318d26744b3d902931701b0d46653"}, - {file = "pillow-11.3.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:ee92f2fd10f4adc4b43d07ec5e779932b4eb3dbfbc34790ada5a6669bc095aa6"}, - {file = "pillow-11.3.0-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:c96d333dcf42d01f47b37e0979b6bd73ec91eae18614864622d9b87bbd5bbf36"}, {file = "pillow-11.3.0-cp314-cp314t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:4c96f993ab8c98460cd0c001447bff6194403e8b1d7e149ade5f00594918128b"}, {file = "pillow-11.3.0-cp314-cp314t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:41342b64afeba938edb034d122b2dda5db2139b9a4af999729ba8818e0056477"}, {file = "pillow-11.3.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:068d9c39a2d1b358eb9f245ce7ab1b5c3246c7c8c7d9ba58cfa5b43146c06e50"}, @@ -1674,8 +1660,6 @@ files = [ {file = "pillow-11.3.0-cp314-cp314t-win_arm64.whl", hash = "sha256:79ea0d14d3ebad43ec77ad5272e6ff9bba5b679ef73375ea760261207fa8e0aa"}, {file = "pillow-11.3.0-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:48d254f8a4c776de343051023eb61ffe818299eeac478da55227d96e241de53f"}, {file = "pillow-11.3.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:7aee118e30a4cf54fdd873bd3a29de51e29105ab11f9aad8c32123f58c8f8081"}, - {file = "pillow-11.3.0-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:23cff760a9049c502721bdb743a7cb3e03365fafcdfc2ef9784610714166e5a4"}, - {file = "pillow-11.3.0-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:6359a3bc43f57d5b375d1ad54a0074318a0844d11b76abccf478c37c986d3cfc"}, {file = "pillow-11.3.0-cp39-cp39-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:092c80c76635f5ecb10f3f83d76716165c96f5229addbd1ec2bdbbda7d496e06"}, {file = "pillow-11.3.0-cp39-cp39-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:cadc9e0ea0a2431124cde7e1697106471fc4c1da01530e679b2391c37d3fbb3a"}, {file = "pillow-11.3.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:6a418691000f2a418c9135a7cf0d797c1bb7d9a485e61fe8e7722845b95ef978"}, @@ -1685,15 +1669,11 @@ files = [ {file = "pillow-11.3.0-cp39-cp39-win_arm64.whl", hash = "sha256:6abdbfd3aea42be05702a8dd98832329c167ee84400a1d1f61ab11437f1717eb"}, {file = "pillow-11.3.0-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:3cee80663f29e3843b68199b9d6f4f54bd1d4a6b59bdd91bceefc51238bcb967"}, {file = "pillow-11.3.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:b5f56c3f344f2ccaf0dd875d3e180f631dc60a51b314295a3e681fe8cf851fbe"}, - {file = "pillow-11.3.0-pp310-pypy310_pp73-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:e67d793d180c9df62f1f40aee3accca4829d3794c95098887edc18af4b8b780c"}, - {file = "pillow-11.3.0-pp310-pypy310_pp73-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:d000f46e2917c705e9fb93a3606ee4a819d1e3aa7a9b442f6444f07e77cf5e25"}, {file = "pillow-11.3.0-pp310-pypy310_pp73-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:527b37216b6ac3a12d7838dc3bd75208ec57c1c6d11ef01902266a5a0c14fc27"}, {file = "pillow-11.3.0-pp310-pypy310_pp73-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:be5463ac478b623b9dd3937afd7fb7ab3d79dd290a28e2b6df292dc75063eb8a"}, {file = "pillow-11.3.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:8dc70ca24c110503e16918a658b869019126ecfe03109b754c402daff12b3d9f"}, {file = "pillow-11.3.0-pp311-pypy311_pp73-macosx_10_15_x86_64.whl", hash = "sha256:7c8ec7a017ad1bd562f93dbd8505763e688d388cde6e4a010ae1486916e713e6"}, {file = "pillow-11.3.0-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:9ab6ae226de48019caa8074894544af5b53a117ccb9d3b3dcb2871464c829438"}, - {file = "pillow-11.3.0-pp311-pypy311_pp73-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:fe27fb049cdcca11f11a7bfda64043c37b30e6b91f10cb5bab275806c32f6ab3"}, - {file = "pillow-11.3.0-pp311-pypy311_pp73-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:465b9e8844e3c3519a983d58b80be3f668e2a7a5db97f2784e7079fbc9f9822c"}, {file = "pillow-11.3.0-pp311-pypy311_pp73-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5418b53c0d59b3824d05e029669efa023bbef0f3e92e75ec8428f3799487f361"}, {file = "pillow-11.3.0-pp311-pypy311_pp73-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:504b6f59505f08ae014f724b6207ff6222662aab5cc9542577fb084ed0676ac7"}, {file = "pillow-11.3.0-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:c84d689db21a1c397d001aa08241044aa2069e7587b398c8cc63020390b1c1b8"}, @@ -2369,109 +2349,127 @@ jupyter = ["ipywidgets (>=7.5.1,<9)"] [[package]] name = "rpds-py" -version = "0.8.10" +version = "0.28.0" description = "Python bindings to Rust's persistent data structures (rpds)" optional = false -python-versions = ">=3.8" +python-versions = ">=3.10" groups = ["main", "dev"] files = [ - {file = "rpds_py-0.8.10-cp310-cp310-macosx_10_7_x86_64.whl", hash = "sha256:93d06cccae15b3836247319eee7b6f1fdcd6c10dabb4e6d350d27bd0bdca2711"}, - {file = "rpds_py-0.8.10-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:3816a890a6a9e9f1de250afa12ca71c9a7a62f2b715a29af6aaee3aea112c181"}, - {file = "rpds_py-0.8.10-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a7c6304b894546b5a6bdc0fe15761fa53fe87d28527a7142dae8de3c663853e1"}, - {file = "rpds_py-0.8.10-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ad3bfb44c8840fb4be719dc58e229f435e227fbfbe133dc33f34981ff622a8f8"}, - {file = "rpds_py-0.8.10-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:14f1c356712f66653b777ecd8819804781b23dbbac4eade4366b94944c9e78ad"}, - {file = "rpds_py-0.8.10-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:82bb361cae4d0a627006dadd69dc2f36b7ad5dc1367af9d02e296ec565248b5b"}, - {file = "rpds_py-0.8.10-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b2e3c4f2a8e3da47f850d7ea0d7d56720f0f091d66add889056098c4b2fd576c"}, - {file = "rpds_py-0.8.10-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:15a90d0ac11b4499171067ae40a220d1ca3cb685ec0acc356d8f3800e07e4cb8"}, - {file = "rpds_py-0.8.10-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:70bb9c8004b97b4ef7ae56a2aa56dfaa74734a0987c78e7e85f00004ab9bf2d0"}, - {file = "rpds_py-0.8.10-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:d64f9f88d5203274a002b54442cafc9c7a1abff2a238f3e767b70aadf919b451"}, - {file = "rpds_py-0.8.10-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:ccbbd276642788c4376fbe8d4e6c50f0fb4972ce09ecb051509062915891cbf0"}, - {file = "rpds_py-0.8.10-cp310-none-win32.whl", hash = "sha256:fafc0049add8043ad07ab5382ee80d80ed7e3699847f26c9a5cf4d3714d96a84"}, - {file = "rpds_py-0.8.10-cp310-none-win_amd64.whl", hash = "sha256:915031002c86a5add7c6fd4beb601b2415e8a1c956590a5f91d825858e92fe6e"}, - {file = "rpds_py-0.8.10-cp311-cp311-macosx_10_7_x86_64.whl", hash = "sha256:84eb541a44f7a18f07a6bfc48b95240739e93defe1fdfb4f2a295f37837945d7"}, - {file = "rpds_py-0.8.10-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f59996d0550894affaad8743e97b9b9c98f638b221fac12909210ec3d9294786"}, - {file = "rpds_py-0.8.10-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f9adb5664b78fcfcd830000416c8cc69853ef43cb084d645b3f1f0296edd9bae"}, - {file = "rpds_py-0.8.10-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:f96f3f98fbff7af29e9edf9a6584f3c1382e7788783d07ba3721790625caa43e"}, - {file = "rpds_py-0.8.10-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:376b8de737401050bd12810003d207e824380be58810c031f10ec563ff6aef3d"}, - {file = "rpds_py-0.8.10-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5d1c2bc319428d50b3e0fa6b673ab8cc7fa2755a92898db3a594cbc4eeb6d1f7"}, - {file = "rpds_py-0.8.10-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:73a1e48430f418f0ac3dfd87860e4cc0d33ad6c0f589099a298cb53724db1169"}, - {file = "rpds_py-0.8.10-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:134ec8f14ca7dbc6d9ae34dac632cdd60939fe3734b5d287a69683c037c51acb"}, - {file = "rpds_py-0.8.10-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:4b519bac7c09444dd85280fd60f28c6dde4389c88dddf4279ba9b630aca3bbbe"}, - {file = "rpds_py-0.8.10-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:9cd57981d9fab04fc74438d82460f057a2419974d69a96b06a440822d693b3c0"}, - {file = "rpds_py-0.8.10-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:69d089c026f6a8b9d64a06ff67dc3be196707b699d7f6ca930c25f00cf5e30d8"}, - {file = "rpds_py-0.8.10-cp311-none-win32.whl", hash = "sha256:220bdcad2d2936f674650d304e20ac480a3ce88a40fe56cd084b5780f1d104d9"}, - {file = "rpds_py-0.8.10-cp311-none-win_amd64.whl", hash = "sha256:6c6a0225b8501d881b32ebf3f5807a08ad3685b5eb5f0a6bfffd3a6e039b2055"}, - {file = "rpds_py-0.8.10-cp312-cp312-macosx_10_7_x86_64.whl", hash = "sha256:e3d0cd3dff0e7638a7b5390f3a53057c4e347f4ef122ee84ed93fc2fb7ea4aa2"}, - {file = "rpds_py-0.8.10-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:d77dff3a5aa5eedcc3da0ebd10ff8e4969bc9541aa3333a8d41715b429e99f47"}, - {file = "rpds_py-0.8.10-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:41c89a366eae49ad9e65ed443a8f94aee762931a1e3723749d72aeac80f5ef2f"}, - {file = "rpds_py-0.8.10-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:3793c21494bad1373da517001d0849eea322e9a049a0e4789e50d8d1329df8e7"}, - {file = "rpds_py-0.8.10-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:805a5f3f05d186c5d50de2e26f765ba7896d0cc1ac5b14ffc36fae36df5d2f10"}, - {file = "rpds_py-0.8.10-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b01b39ad5411563031ea3977bbbc7324d82b088e802339e6296f082f78f6115c"}, - {file = "rpds_py-0.8.10-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f3f1e860be21f3e83011116a65e7310486300e08d9a3028e73e8d13bb6c77292"}, - {file = "rpds_py-0.8.10-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:a13c8e56c46474cd5958d525ce6a9996727a83d9335684e41f5192c83deb6c58"}, - {file = "rpds_py-0.8.10-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:93d99f957a300d7a4ced41615c45aeb0343bb8f067c42b770b505de67a132346"}, - {file = "rpds_py-0.8.10-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:148b0b38d719c0760e31ce9285a9872972bdd7774969a4154f40c980e5beaca7"}, - {file = "rpds_py-0.8.10-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:3cc5e5b5514796f45f03a568981971b12a3570f3de2e76114f7dc18d4b60a3c4"}, - {file = "rpds_py-0.8.10-cp38-cp38-macosx_10_7_x86_64.whl", hash = "sha256:e8e24b210a4deb5a7744971f8f77393005bae7f873568e37dfd9effe808be7f7"}, - {file = "rpds_py-0.8.10-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:b41941583adce4242af003d2a8337b066ba6148ca435f295f31ac6d9e4ea2722"}, - {file = "rpds_py-0.8.10-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3c490204e16bca4f835dba8467869fe7295cdeaa096e4c5a7af97f3454a97991"}, - {file = "rpds_py-0.8.10-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:1ee45cd1d84beed6cbebc839fd85c2e70a3a1325c8cfd16b62c96e2ffb565eca"}, - {file = "rpds_py-0.8.10-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4a8ca409f1252e1220bf09c57290b76cae2f14723746215a1e0506472ebd7bdf"}, - {file = "rpds_py-0.8.10-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:96b293c0498c70162effb13100624c5863797d99df75f2f647438bd10cbf73e4"}, - {file = "rpds_py-0.8.10-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b4627520a02fccbd324b33c7a83e5d7906ec746e1083a9ac93c41ac7d15548c7"}, - {file = "rpds_py-0.8.10-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e39d7ab0c18ac99955b36cd19f43926450baba21e3250f053e0704d6ffd76873"}, - {file = "rpds_py-0.8.10-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:ba9f1d1ebe4b63801977cec7401f2d41e888128ae40b5441270d43140efcad52"}, - {file = "rpds_py-0.8.10-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:802f42200d8caf7f25bbb2a6464cbd83e69d600151b7e3b49f49a47fa56b0a38"}, - {file = "rpds_py-0.8.10-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:d19db6ba816e7f59fc806c690918da80a7d186f00247048cd833acdab9b4847b"}, - {file = "rpds_py-0.8.10-cp38-none-win32.whl", hash = "sha256:7947e6e2c2ad68b1c12ee797d15e5f8d0db36331200b0346871492784083b0c6"}, - {file = "rpds_py-0.8.10-cp38-none-win_amd64.whl", hash = "sha256:fa326b3505d5784436d9433b7980171ab2375535d93dd63fbcd20af2b5ca1bb6"}, - {file = "rpds_py-0.8.10-cp39-cp39-macosx_10_7_x86_64.whl", hash = "sha256:7b38a9ac96eeb6613e7f312cd0014de64c3f07000e8bf0004ad6ec153bac46f8"}, - {file = "rpds_py-0.8.10-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c4d42e83ddbf3445e6514f0aff96dca511421ed0392d9977d3990d9f1ba6753c"}, - {file = "rpds_py-0.8.10-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1b21575031478609db6dbd1f0465e739fe0e7f424a8e7e87610a6c7f68b4eb16"}, - {file = "rpds_py-0.8.10-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:574868858a7ff6011192c023a5289158ed20e3f3b94b54f97210a773f2f22921"}, - {file = "rpds_py-0.8.10-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ae40f4a70a1f40939d66ecbaf8e7edc144fded190c4a45898a8cfe19d8fc85ea"}, - {file = "rpds_py-0.8.10-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:37f7ee4dc86db7af3bac6d2a2cedbecb8e57ce4ed081f6464510e537589f8b1e"}, - {file = "rpds_py-0.8.10-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:695f642a3a5dbd4ad2ffbbacf784716ecd87f1b7a460843b9ddf965ccaeafff4"}, - {file = "rpds_py-0.8.10-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f43ab4cb04bde6109eb2555528a64dfd8a265cc6a9920a67dcbde13ef53a46c8"}, - {file = "rpds_py-0.8.10-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:a11ab0d97be374efd04f640c04fe5c2d3dabc6dfb998954ea946ee3aec97056d"}, - {file = "rpds_py-0.8.10-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:92cf5b3ee60eef41f41e1a2cabca466846fb22f37fc580ffbcb934d1bcab225a"}, - {file = "rpds_py-0.8.10-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:ceaac0c603bf5ac2f505a78b2dcab78d3e6b706be6596c8364b64cc613d208d2"}, - {file = "rpds_py-0.8.10-cp39-none-win32.whl", hash = "sha256:dd4f16e57c12c0ae17606c53d1b57d8d1c8792efe3f065a37cb3341340599d49"}, - {file = "rpds_py-0.8.10-cp39-none-win_amd64.whl", hash = "sha256:c03a435d26c3999c2a8642cecad5d1c4d10c961817536af52035f6f4ee2f5dd0"}, - {file = "rpds_py-0.8.10-pp310-pypy310_pp73-macosx_10_7_x86_64.whl", hash = "sha256:0da53292edafecba5e1d8c1218f99babf2ed0bf1c791d83c0ab5c29b57223068"}, - {file = "rpds_py-0.8.10-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:7d20a8ed227683401cc508e7be58cba90cc97f784ea8b039c8cd01111e6043e0"}, - {file = "rpds_py-0.8.10-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:97cab733d303252f7c2f7052bf021a3469d764fc2b65e6dbef5af3cbf89d4892"}, - {file = "rpds_py-0.8.10-pp310-pypy310_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:8c398fda6df361a30935ab4c4bccb7f7a3daef2964ca237f607c90e9f3fdf66f"}, - {file = "rpds_py-0.8.10-pp310-pypy310_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2eb4b08c45f8f8d8254cdbfacd3fc5d6b415d64487fb30d7380b0d0569837bf1"}, - {file = "rpds_py-0.8.10-pp310-pypy310_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e7dfb1cbb895810fa2b892b68153c17716c6abaa22c7dc2b2f6dcf3364932a1c"}, - {file = "rpds_py-0.8.10-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:89c92b74e8bf6f53a6f4995fd52f4bd510c12f103ee62c99e22bc9e05d45583c"}, - {file = "rpds_py-0.8.10-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e9c0683cb35a9b5881b41bc01d5568ffc667910d9dbc632a1fba4e7d59e98773"}, - {file = "rpds_py-0.8.10-pp310-pypy310_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:0eeb2731708207d0fe2619afe6c4dc8cb9798f7de052da891de5f19c0006c315"}, - {file = "rpds_py-0.8.10-pp310-pypy310_pp73-musllinux_1_2_i686.whl", hash = "sha256:7495010b658ec5b52835f21d8c8b1a7e52e194c50f095d4223c0b96c3da704b1"}, - {file = "rpds_py-0.8.10-pp310-pypy310_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:c72ebc22e70e04126158c46ba56b85372bc4d54d00d296be060b0db1671638a4"}, - {file = "rpds_py-0.8.10-pp38-pypy38_pp73-macosx_10_7_x86_64.whl", hash = "sha256:2cd3045e7f6375dda64ed7db1c5136826facb0159ea982f77d9cf6125025bd34"}, - {file = "rpds_py-0.8.10-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:2418cf17d653d24ffb8b75e81f9f60b7ba1b009a23298a433a4720b2a0a17017"}, - {file = "rpds_py-0.8.10-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1a2edf8173ac0c7a19da21bc68818be1321998528b5e3f748d6ee90c0ba2a1fd"}, - {file = "rpds_py-0.8.10-pp38-pypy38_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:7f29b8c55fd3a2bc48e485e37c4e2df3317f43b5cc6c4b6631c33726f52ffbb3"}, - {file = "rpds_py-0.8.10-pp38-pypy38_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9a7d20c1cf8d7b3960c5072c265ec47b3f72a0c608a9a6ee0103189b4f28d531"}, - {file = "rpds_py-0.8.10-pp38-pypy38_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:521fc8861a86ae54359edf53a15a05fabc10593cea7b3357574132f8427a5e5a"}, - {file = "rpds_py-0.8.10-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d5c191713e98e7c28800233f039a32a42c1a4f9a001a8a0f2448b07391881036"}, - {file = "rpds_py-0.8.10-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:083df0fafe199371206111583c686c985dddaf95ab3ee8e7b24f1fda54515d09"}, - {file = "rpds_py-0.8.10-pp38-pypy38_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:ed41f3f49507936a6fe7003985ea2574daccfef999775525d79eb67344e23767"}, - {file = "rpds_py-0.8.10-pp38-pypy38_pp73-musllinux_1_2_i686.whl", hash = "sha256:2614c2732bf45de5c7f9e9e54e18bc78693fa2f635ae58d2895b7965e470378c"}, - {file = "rpds_py-0.8.10-pp38-pypy38_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:c60528671d9d467009a6ec284582179f6b88651e83367d0ab54cb739021cd7de"}, - {file = "rpds_py-0.8.10-pp39-pypy39_pp73-macosx_10_7_x86_64.whl", hash = "sha256:ee744fca8d1ea822480a2a4e7c5f2e1950745477143668f0b523769426060f29"}, - {file = "rpds_py-0.8.10-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:a38b9f526d0d6cbdaa37808c400e3d9f9473ac4ff64d33d9163fd05d243dbd9b"}, - {file = "rpds_py-0.8.10-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:60e0e86e870350e03b3e25f9b1dd2c6cc72d2b5f24e070249418320a6f9097b7"}, - {file = "rpds_py-0.8.10-pp39-pypy39_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:f53f55a8852f0e49b0fc76f2412045d6ad9d5772251dea8f55ea45021616e7d5"}, - {file = "rpds_py-0.8.10-pp39-pypy39_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c493365d3fad241d52f096e4995475a60a80f4eba4d3ff89b713bc65c2ca9615"}, - {file = "rpds_py-0.8.10-pp39-pypy39_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:300eb606e6b94a7a26f11c8cc8ee59e295c6649bd927f91e1dbd37a4c89430b6"}, - {file = "rpds_py-0.8.10-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5a665f6f1a87614d1c3039baf44109094926dedf785e346d8b0a728e9cabd27a"}, - {file = "rpds_py-0.8.10-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:927d784648211447201d4c6f1babddb7971abad922b32257ab74de2f2750fad0"}, - {file = "rpds_py-0.8.10-pp39-pypy39_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:c200b30dd573afa83847bed7e3041aa36a8145221bf0cfdfaa62d974d720805c"}, - {file = "rpds_py-0.8.10-pp39-pypy39_pp73-musllinux_1_2_i686.whl", hash = "sha256:08166467258fd0240a1256fce272f689f2360227ee41c72aeea103e9e4f63d2b"}, - {file = "rpds_py-0.8.10-pp39-pypy39_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:996cc95830de9bc22b183661d95559ec6b3cd900ad7bc9154c4cbf5be0c9b734"}, - {file = "rpds_py-0.8.10.tar.gz", hash = "sha256:13e643ce8ad502a0263397362fb887594b49cf84bf518d6038c16f235f2bcea4"}, + {file = "rpds_py-0.28.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:7b6013db815417eeb56b2d9d7324e64fcd4fa289caeee6e7a78b2e11fc9b438a"}, + {file = "rpds_py-0.28.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:1a4c6b05c685c0c03f80dabaeb73e74218c49deea965ca63f76a752807397207"}, + {file = "rpds_py-0.28.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f4794c6c3fbe8f9ac87699b131a1f26e7b4abcf6d828da46a3a52648c7930eba"}, + {file = "rpds_py-0.28.0-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:2e8456b6ee5527112ff2354dd9087b030e3429e43a74f480d4a5ca79d269fd85"}, + {file = "rpds_py-0.28.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:beb880a9ca0a117415f241f66d56025c02037f7c4efc6fe59b5b8454f1eaa50d"}, + {file = "rpds_py-0.28.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6897bebb118c44b38c9cb62a178e09f1593c949391b9a1a6fe777ccab5934ee7"}, + {file = "rpds_py-0.28.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b1b553dd06e875249fd43efd727785efb57a53180e0fde321468222eabbeaafa"}, + {file = "rpds_py-0.28.0-cp310-cp310-manylinux_2_31_riscv64.whl", hash = "sha256:f0b2044fdddeea5b05df832e50d2a06fe61023acb44d76978e1b060206a8a476"}, + {file = "rpds_py-0.28.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:05cf1e74900e8da73fa08cc76c74a03345e5a3e37691d07cfe2092d7d8e27b04"}, + {file = "rpds_py-0.28.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:efd489fec7c311dae25e94fe7eeda4b3d06be71c68f2cf2e8ef990ffcd2cd7e8"}, + {file = "rpds_py-0.28.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:ada7754a10faacd4f26067e62de52d6af93b6d9542f0df73c57b9771eb3ba9c4"}, + {file = "rpds_py-0.28.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:c2a34fd26588949e1e7977cfcbb17a9a42c948c100cab890c6d8d823f0586457"}, + {file = "rpds_py-0.28.0-cp310-cp310-win32.whl", hash = "sha256:f9174471d6920cbc5e82a7822de8dfd4dcea86eb828b04fc8c6519a77b0ee51e"}, + {file = "rpds_py-0.28.0-cp310-cp310-win_amd64.whl", hash = "sha256:6e32dd207e2c4f8475257a3540ab8a93eff997abfa0a3fdb287cae0d6cd874b8"}, + {file = "rpds_py-0.28.0-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:03065002fd2e287725d95fbc69688e0c6daf6c6314ba38bdbaa3895418e09296"}, + {file = "rpds_py-0.28.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:28ea02215f262b6d078daec0b45344c89e161eab9526b0d898221d96fdda5f27"}, + {file = "rpds_py-0.28.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:25dbade8fbf30bcc551cb352376c0ad64b067e4fc56f90e22ba70c3ce205988c"}, + {file = "rpds_py-0.28.0-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:3c03002f54cc855860bfdc3442928ffdca9081e73b5b382ed0b9e8efe6e5e205"}, + {file = "rpds_py-0.28.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b9699fa7990368b22032baf2b2dce1f634388e4ffc03dfefaaac79f4695edc95"}, + {file = "rpds_py-0.28.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b9b06fe1a75e05e0713f06ea0c89ecb6452210fd60e2f1b6ddc1067b990e08d9"}, + {file = "rpds_py-0.28.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ac9f83e7b326a3f9ec3ef84cda98fb0a74c7159f33e692032233046e7fd15da2"}, + {file = "rpds_py-0.28.0-cp311-cp311-manylinux_2_31_riscv64.whl", hash = "sha256:0d3259ea9ad8743a75a43eb7819324cdab393263c91be86e2d1901ee65c314e0"}, + {file = "rpds_py-0.28.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:9a7548b345f66f6695943b4ef6afe33ccd3f1b638bd9afd0f730dd255c249c9e"}, + {file = "rpds_py-0.28.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:c9a40040aa388b037eb39416710fbcce9443498d2eaab0b9b45ae988b53f5c67"}, + {file = "rpds_py-0.28.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:8f60c7ea34e78c199acd0d3cda37a99be2c861dd2b8cf67399784f70c9f8e57d"}, + {file = "rpds_py-0.28.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:1571ae4292649100d743b26d5f9c63503bb1fedf538a8f29a98dce2d5ba6b4e6"}, + {file = "rpds_py-0.28.0-cp311-cp311-win32.whl", hash = "sha256:5cfa9af45e7c1140af7321fa0bef25b386ee9faa8928c80dc3a5360971a29e8c"}, + {file = "rpds_py-0.28.0-cp311-cp311-win_amd64.whl", hash = "sha256:dd8d86b5d29d1b74100982424ba53e56033dc47720a6de9ba0259cf81d7cecaa"}, + {file = "rpds_py-0.28.0-cp311-cp311-win_arm64.whl", hash = "sha256:4e27d3a5709cc2b3e013bf93679a849213c79ae0573f9b894b284b55e729e120"}, + {file = "rpds_py-0.28.0-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:6b4f28583a4f247ff60cd7bdda83db8c3f5b05a7a82ff20dd4b078571747708f"}, + {file = "rpds_py-0.28.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:d678e91b610c29c4b3d52a2c148b641df2b4676ffe47c59f6388d58b99cdc424"}, + {file = "rpds_py-0.28.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e819e0e37a44a78e1383bf1970076e2ccc4dc8c2bbaa2f9bd1dc987e9afff628"}, + {file = "rpds_py-0.28.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5ee514e0f0523db5d3fb171f397c54875dbbd69760a414dccf9d4d7ad628b5bd"}, + {file = "rpds_py-0.28.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5f3fa06d27fdcee47f07a39e02862da0100cb4982508f5ead53ec533cd5fe55e"}, + {file = "rpds_py-0.28.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:46959ef2e64f9e4a41fc89aa20dbca2b85531f9a72c21099a3360f35d10b0d5a"}, + {file = "rpds_py-0.28.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8455933b4bcd6e83fde3fefc987a023389c4b13f9a58c8d23e4b3f6d13f78c84"}, + {file = "rpds_py-0.28.0-cp312-cp312-manylinux_2_31_riscv64.whl", hash = "sha256:ad50614a02c8c2962feebe6012b52f9802deec4263946cddea37aaf28dd25a66"}, + {file = "rpds_py-0.28.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e5deca01b271492553fdb6c7fd974659dce736a15bae5dad7ab8b93555bceb28"}, + {file = "rpds_py-0.28.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:735f8495a13159ce6a0d533f01e8674cec0c57038c920495f87dcb20b3ddb48a"}, + {file = "rpds_py-0.28.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:961ca621ff10d198bbe6ba4957decca61aa2a0c56695384c1d6b79bf61436df5"}, + {file = "rpds_py-0.28.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:2374e16cc9131022e7d9a8f8d65d261d9ba55048c78f3b6e017971a4f5e6353c"}, + {file = "rpds_py-0.28.0-cp312-cp312-win32.whl", hash = "sha256:d15431e334fba488b081d47f30f091e5d03c18527c325386091f31718952fe08"}, + {file = "rpds_py-0.28.0-cp312-cp312-win_amd64.whl", hash = "sha256:a410542d61fc54710f750d3764380b53bf09e8c4edbf2f9141a82aa774a04f7c"}, + {file = "rpds_py-0.28.0-cp312-cp312-win_arm64.whl", hash = "sha256:1f0cfd1c69e2d14f8c892b893997fa9a60d890a0c8a603e88dca4955f26d1edd"}, + {file = "rpds_py-0.28.0-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:e9e184408a0297086f880556b6168fa927d677716f83d3472ea333b42171ee3b"}, + {file = "rpds_py-0.28.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:edd267266a9b0448f33dc465a97cfc5d467594b600fe28e7fa2f36450e03053a"}, + {file = "rpds_py-0.28.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:85beb8b3f45e4e32f6802fb6cd6b17f615ef6c6a52f265371fb916fae02814aa"}, + {file = "rpds_py-0.28.0-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d2412be8d00a1b895f8ad827cc2116455196e20ed994bb704bf138fe91a42724"}, + {file = "rpds_py-0.28.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:cf128350d384b777da0e68796afdcebc2e9f63f0e9f242217754e647f6d32491"}, + {file = "rpds_py-0.28.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a2036d09b363aa36695d1cc1a97b36865597f4478470b0697b5ee9403f4fe399"}, + {file = "rpds_py-0.28.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b8e1e9be4fa6305a16be628959188e4fd5cd6f1b0e724d63c6d8b2a8adf74ea6"}, + {file = "rpds_py-0.28.0-cp313-cp313-manylinux_2_31_riscv64.whl", hash = "sha256:0a403460c9dd91a7f23fc3188de6d8977f1d9603a351d5db6cf20aaea95b538d"}, + {file = "rpds_py-0.28.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d7366b6553cdc805abcc512b849a519167db8f5e5c3472010cd1228b224265cb"}, + {file = "rpds_py-0.28.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:5b43c6a3726efd50f18d8120ec0551241c38785b68952d240c45ea553912ac41"}, + {file = "rpds_py-0.28.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:0cb7203c7bc69d7c1585ebb33a2e6074492d2fc21ad28a7b9d40457ac2a51ab7"}, + {file = "rpds_py-0.28.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:7a52a5169c664dfb495882adc75c304ae1d50df552fbd68e100fdc719dee4ff9"}, + {file = "rpds_py-0.28.0-cp313-cp313-win32.whl", hash = "sha256:2e42456917b6687215b3e606ab46aa6bca040c77af7df9a08a6dcfe8a4d10ca5"}, + {file = "rpds_py-0.28.0-cp313-cp313-win_amd64.whl", hash = "sha256:e0a0311caedc8069d68fc2bf4c9019b58a2d5ce3cd7cb656c845f1615b577e1e"}, + {file = "rpds_py-0.28.0-cp313-cp313-win_arm64.whl", hash = "sha256:04c1b207ab8b581108801528d59ad80aa83bb170b35b0ddffb29c20e411acdc1"}, + {file = "rpds_py-0.28.0-cp313-cp313t-macosx_10_12_x86_64.whl", hash = "sha256:f296ea3054e11fc58ad42e850e8b75c62d9a93a9f981ad04b2e5ae7d2186ff9c"}, + {file = "rpds_py-0.28.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:5a7306c19b19005ad98468fcefeb7100b19c79fc23a5f24a12e06d91181193fa"}, + {file = "rpds_py-0.28.0-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e5d9b86aa501fed9862a443c5c3116f6ead8bc9296185f369277c42542bd646b"}, + {file = "rpds_py-0.28.0-cp313-cp313t-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e5bbc701eff140ba0e872691d573b3d5d30059ea26e5785acba9132d10c8c31d"}, + {file = "rpds_py-0.28.0-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9a5690671cd672a45aa8616d7374fdf334a1b9c04a0cac3c854b1136e92374fe"}, + {file = "rpds_py-0.28.0-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9f1d92ecea4fa12f978a367c32a5375a1982834649cdb96539dcdc12e609ab1a"}, + {file = "rpds_py-0.28.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8d252db6b1a78d0a3928b6190156042d54c93660ce4d98290d7b16b5296fb7cc"}, + {file = "rpds_py-0.28.0-cp313-cp313t-manylinux_2_31_riscv64.whl", hash = "sha256:d61b355c3275acb825f8777d6c4505f42b5007e357af500939d4a35b19177259"}, + {file = "rpds_py-0.28.0-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:acbe5e8b1026c0c580d0321c8aae4b0a1e1676861d48d6e8c6586625055b606a"}, + {file = "rpds_py-0.28.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:8aa23b6f0fc59b85b4c7d89ba2965af274346f738e8d9fc2455763602e62fd5f"}, + {file = "rpds_py-0.28.0-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:7b14b0c680286958817c22d76fcbca4800ddacef6f678f3a7c79a1fe7067fe37"}, + {file = "rpds_py-0.28.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:bcf1d210dfee61a6c86551d67ee1031899c0fdbae88b2d44a569995d43797712"}, + {file = "rpds_py-0.28.0-cp313-cp313t-win32.whl", hash = "sha256:3aa4dc0fdab4a7029ac63959a3ccf4ed605fee048ba67ce89ca3168da34a1342"}, + {file = "rpds_py-0.28.0-cp313-cp313t-win_amd64.whl", hash = "sha256:7b7d9d83c942855e4fdcfa75d4f96f6b9e272d42fffcb72cd4bb2577db2e2907"}, + {file = "rpds_py-0.28.0-cp314-cp314-macosx_10_12_x86_64.whl", hash = "sha256:dcdcb890b3ada98a03f9f2bb108489cdc7580176cb73b4f2d789e9a1dac1d472"}, + {file = "rpds_py-0.28.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:f274f56a926ba2dc02976ca5b11c32855cbd5925534e57cfe1fda64e04d1add2"}, + {file = "rpds_py-0.28.0-cp314-cp314-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4fe0438ac4a29a520ea94c8c7f1754cdd8feb1bc490dfda1bfd990072363d527"}, + {file = "rpds_py-0.28.0-cp314-cp314-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:8a358a32dd3ae50e933347889b6af9a1bdf207ba5d1a3f34e1a38cd3540e6733"}, + {file = "rpds_py-0.28.0-cp314-cp314-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e80848a71c78aa328fefaba9c244d588a342c8e03bda518447b624ea64d1ff56"}, + {file = "rpds_py-0.28.0-cp314-cp314-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f586db2e209d54fe177e58e0bc4946bea5fb0102f150b1b2f13de03e1f0976f8"}, + {file = "rpds_py-0.28.0-cp314-cp314-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5ae8ee156d6b586e4292491e885d41483136ab994e719a13458055bec14cf370"}, + {file = "rpds_py-0.28.0-cp314-cp314-manylinux_2_31_riscv64.whl", hash = "sha256:a805e9b3973f7e27f7cab63a6b4f61d90f2e5557cff73b6e97cd5b8540276d3d"}, + {file = "rpds_py-0.28.0-cp314-cp314-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:5d3fd16b6dc89c73a4da0b4ac8b12a7ecc75b2864b95c9e5afed8003cb50a728"}, + {file = "rpds_py-0.28.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:6796079e5d24fdaba6d49bda28e2c47347e89834678f2bc2c1b4fc1489c0fb01"}, + {file = "rpds_py-0.28.0-cp314-cp314-musllinux_1_2_i686.whl", hash = "sha256:76500820c2af232435cbe215e3324c75b950a027134e044423f59f5b9a1ba515"}, + {file = "rpds_py-0.28.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:bbdc5640900a7dbf9dd707fe6388972f5bbd883633eb68b76591044cfe346f7e"}, + {file = "rpds_py-0.28.0-cp314-cp314-win32.whl", hash = "sha256:adc8aa88486857d2b35d75f0640b949759f79dc105f50aa2c27816b2e0dd749f"}, + {file = "rpds_py-0.28.0-cp314-cp314-win_amd64.whl", hash = "sha256:66e6fa8e075b58946e76a78e69e1a124a21d9a48a5b4766d15ba5b06869d1fa1"}, + {file = "rpds_py-0.28.0-cp314-cp314-win_arm64.whl", hash = "sha256:a6fe887c2c5c59413353b7c0caff25d0e566623501ccfff88957fa438a69377d"}, + {file = "rpds_py-0.28.0-cp314-cp314t-macosx_10_12_x86_64.whl", hash = "sha256:7a69df082db13c7070f7b8b1f155fa9e687f1d6aefb7b0e3f7231653b79a067b"}, + {file = "rpds_py-0.28.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:b1cde22f2c30ebb049a9e74c5374994157b9b70a16147d332f89c99c5960737a"}, + {file = "rpds_py-0.28.0-cp314-cp314t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5338742f6ba7a51012ea470bd4dc600a8c713c0c72adaa0977a1b1f4327d6592"}, + {file = "rpds_py-0.28.0-cp314-cp314t-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e1460ebde1bcf6d496d80b191d854adedcc619f84ff17dc1c6d550f58c9efbba"}, + {file = "rpds_py-0.28.0-cp314-cp314t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e3eb248f2feba84c692579257a043a7699e28a77d86c77b032c1d9fbb3f0219c"}, + {file = "rpds_py-0.28.0-cp314-cp314t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:bd3bbba5def70b16cd1c1d7255666aad3b290fbf8d0fe7f9f91abafb73611a91"}, + {file = "rpds_py-0.28.0-cp314-cp314t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3114f4db69ac5a1f32e7e4d1cbbe7c8f9cf8217f78e6e002cedf2d54c2a548ed"}, + {file = "rpds_py-0.28.0-cp314-cp314t-manylinux_2_31_riscv64.whl", hash = "sha256:4b0cb8a906b1a0196b863d460c0222fb8ad0f34041568da5620f9799b83ccf0b"}, + {file = "rpds_py-0.28.0-cp314-cp314t-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:cf681ac76a60b667106141e11a92a3330890257e6f559ca995fbb5265160b56e"}, + {file = "rpds_py-0.28.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:1e8ee6413cfc677ce8898d9cde18cc3a60fc2ba756b0dec5b71eb6eb21c49fa1"}, + {file = "rpds_py-0.28.0-cp314-cp314t-musllinux_1_2_i686.whl", hash = "sha256:b3072b16904d0b5572a15eb9d31c1954e0d3227a585fc1351aa9878729099d6c"}, + {file = "rpds_py-0.28.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:b670c30fd87a6aec281c3c9896d3bae4b205fd75d79d06dc87c2503717e46092"}, + {file = "rpds_py-0.28.0-cp314-cp314t-win32.whl", hash = "sha256:8014045a15b4d2b3476f0a287fcc93d4f823472d7d1308d47884ecac9e612be3"}, + {file = "rpds_py-0.28.0-cp314-cp314t-win_amd64.whl", hash = "sha256:7a4e59c90d9c27c561eb3160323634a9ff50b04e4f7820600a2beb0ac90db578"}, + {file = "rpds_py-0.28.0-pp311-pypy311_pp73-macosx_10_12_x86_64.whl", hash = "sha256:f5e7101145427087e493b9c9b959da68d357c28c562792300dd21a095118ed16"}, + {file = "rpds_py-0.28.0-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:31eb671150b9c62409a888850aaa8e6533635704fe2b78335f9aaf7ff81eec4d"}, + {file = "rpds_py-0.28.0-pp311-pypy311_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:48b55c1f64482f7d8bd39942f376bfdf2f6aec637ee8c805b5041e14eeb771db"}, + {file = "rpds_py-0.28.0-pp311-pypy311_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:24743a7b372e9a76171f6b69c01aedf927e8ac3e16c474d9fe20d552a8cb45c7"}, + {file = "rpds_py-0.28.0-pp311-pypy311_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:389c29045ee8bbb1627ea190b4976a310a295559eaf9f1464a1a6f2bf84dde78"}, + {file = "rpds_py-0.28.0-pp311-pypy311_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:23690b5827e643150cf7b49569679ec13fe9a610a15949ed48b85eb7f98f34ec"}, + {file = "rpds_py-0.28.0-pp311-pypy311_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6f0c9266c26580e7243ad0d72fc3e01d6b33866cfab5084a6da7576bcf1c4f72"}, + {file = "rpds_py-0.28.0-pp311-pypy311_pp73-manylinux_2_31_riscv64.whl", hash = "sha256:4c6c4db5d73d179746951486df97fd25e92396be07fc29ee8ff9a8f5afbdfb27"}, + {file = "rpds_py-0.28.0-pp311-pypy311_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:a3b695a8fa799dd2cfdb4804b37096c5f6dba1ac7f48a7fbf6d0485bcd060316"}, + {file = "rpds_py-0.28.0-pp311-pypy311_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:6aa1bfce3f83baf00d9c5fcdbba93a3ab79958b4c7d7d1f55e7fe68c20e63912"}, + {file = "rpds_py-0.28.0-pp311-pypy311_pp73-musllinux_1_2_i686.whl", hash = "sha256:7b0f9dceb221792b3ee6acb5438eb1f02b0cb2c247796a72b016dcc92c6de829"}, + {file = "rpds_py-0.28.0-pp311-pypy311_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:5d0145edba8abd3db0ab22b5300c99dc152f5c9021fab861be0f0544dc3cbc5f"}, + {file = "rpds_py-0.28.0.tar.gz", hash = "sha256:abd4df20485a0983e2ca334a216249b6186d6e3c1627e106651943dbdb791aea"}, ] [[package]] @@ -3242,54 +3240,45 @@ test = ["zope.testrunner"] [[package]] name = "zope-interface" -version = "7.1.0" +version = "8.0.1" description = "Interfaces for Python" optional = false -python-versions = ">=3.8" +python-versions = ">=3.9" groups = ["main", "dev"] files = [ - {file = "zope.interface-7.1.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:2bd9e9f366a5df08ebbdc159f8224904c1c5ce63893984abb76954e6fbe4381a"}, - {file = "zope.interface-7.1.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:661d5df403cd3c5b8699ac480fa7f58047a3253b029db690efa0c3cf209993ef"}, - {file = "zope.interface-7.1.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:91b6c30689cfd87c8f264acb2fc16ad6b3c72caba2aec1bf189314cf1a84ca33"}, - {file = "zope.interface-7.1.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2b6a4924f5bad9fe21d99f66a07da60d75696a136162427951ec3cb223a5570d"}, - {file = "zope.interface-7.1.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:80a3c00b35f6170be5454b45abe2719ea65919a2f09e8a6e7b1362312a872cd3"}, - {file = "zope.interface-7.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:b936d61dbe29572fd2cfe13e30b925e5383bed1aba867692670f5a2a2eb7b4e9"}, - {file = "zope.interface-7.1.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:0ac20581fc6cd7c754f6dff0ae06fedb060fa0e9ea6309d8be8b2701d9ea51c4"}, - {file = "zope.interface-7.1.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:848b6fa92d7c8143646e64124ed46818a0049a24ecc517958c520081fd147685"}, - {file = "zope.interface-7.1.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ec1ef1fdb6f014d5886b97e52b16d0f852364f447d2ab0f0c6027765777b6667"}, - {file = "zope.interface-7.1.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3bcff5c09d0215f42ba64b49205a278e44413d9bf9fa688fd9e42bfe472b5f4f"}, - {file = "zope.interface-7.1.0-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:07add15de0cc7e69917f7d286b64d54125c950aeb43efed7a5ea7172f000fbc1"}, - {file = "zope.interface-7.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:9940d5bc441f887c5f375ec62bcf7e7e495a2d5b1da97de1184a88fb567f06af"}, - {file = "zope.interface-7.1.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:f245d039f72e6f802902375755846f5de1ee1e14c3e8736c078565599bcab621"}, - {file = "zope.interface-7.1.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:6159e767d224d8f18deff634a1d3722e68d27488c357f62ebeb5f3e2f5288b1f"}, - {file = "zope.interface-7.1.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5e956b1fd7f3448dd5e00f273072e73e50dfafcb35e4227e6d5af208075593c9"}, - {file = "zope.interface-7.1.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ff115ef91c0eeac69cd92daeba36a9d8e14daee445b504eeea2b1c0b55821984"}, - {file = "zope.interface-7.1.0-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bec001798ab62c3fc5447162bf48496ae9fba02edc295a9e10a0b0c639a6452e"}, - {file = "zope.interface-7.1.0-cp312-cp312-win_amd64.whl", hash = "sha256:124149e2d42067b9c6597f4dafdc7a0983d0163868f897b7bb5dc850b14f9a87"}, - {file = "zope.interface-7.1.0-cp313-cp313-macosx_10_9_x86_64.whl", hash = "sha256:9733a9a0f94ef53d7aa64661811b20875b5bc6039034c6e42fb9732170130573"}, - {file = "zope.interface-7.1.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:5fcf379b875c610b5a41bc8a891841533f98de0520287d7f85e25386cd10d3e9"}, - {file = "zope.interface-7.1.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d0a45b5af9f72c805ee668d1479480ca85169312211bed6ed18c343e39307d5f"}, - {file = "zope.interface-7.1.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4af4a12b459a273b0b34679a5c3dc5e34c1847c3dd14a628aa0668e19e638ea2"}, - {file = "zope.interface-7.1.0-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a735f82d2e3ed47ca01a20dfc4c779b966b16352650a8036ab3955aad151ed8a"}, - {file = "zope.interface-7.1.0-cp313-cp313-win_amd64.whl", hash = "sha256:5501e772aff595e3c54266bc1bfc5858e8f38974ce413a8f1044aae0f32a83a3"}, - {file = "zope.interface-7.1.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:ec59fe53db7d32abb96c6d4efeed84aab4a7c38c62d7a901a9b20c09dd936e7a"}, - {file = "zope.interface-7.1.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:e53c291debef523b09e1fe3dffe5f35dde164f1c603d77f770b88a1da34b7ed6"}, - {file = "zope.interface-7.1.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:711eebc77f2092c6a8b304bad0b81a6ce3cf5490b25574e7309fbc07d881e3af"}, - {file = "zope.interface-7.1.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4a00ead2e24c76436e1b457a5132d87f83858330f6c923640b7ef82d668525d1"}, - {file = "zope.interface-7.1.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5e28ea0bc4b084fc93a483877653a033062435317082cdc6388dec3438309faf"}, - {file = "zope.interface-7.1.0-cp38-cp38-win_amd64.whl", hash = "sha256:27cfb5205d68b12682b6e55ab8424662d96e8ead19550aad0796b08dd2c9a45e"}, - {file = "zope.interface-7.1.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9e3e48f3dea21c147e1b10c132016cb79af1159facca9736d231694ef5a740a8"}, - {file = "zope.interface-7.1.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a99240b1d02dc469f6afbe7da1bf617645e60290c272968f4e53feec18d7dce8"}, - {file = "zope.interface-7.1.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cc8a318162123eddbdf22fcc7b751288ce52e4ad096d3766ff1799244352449d"}, - {file = "zope.interface-7.1.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b7b25db127db3e6b597c5f74af60309c4ad65acd826f89609662f0dc33a54728"}, - {file = "zope.interface-7.1.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2a29ac607e970b5576547f0e3589ec156e04de17af42839eedcf478450687317"}, - {file = "zope.interface-7.1.0-cp39-cp39-win_amd64.whl", hash = "sha256:a14c9decf0eb61e0892631271d500c1e306c7b6901c998c7035e194d9150fdd1"}, - {file = "zope_interface-7.1.0.tar.gz", hash = "sha256:3f005869a1a05e368965adb2075f97f8ee9a26c61898a9e52a9764d93774f237"}, + {file = "zope_interface-8.0.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:fd7195081b8637eeed8d73e4d183b07199a1dc738fb28b3de6666b1b55662570"}, + {file = "zope_interface-8.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:f7c4bc4021108847bce763673ce70d0716b08dfc2ba9889e7bad46ac2b3bb924"}, + {file = "zope_interface-8.0.1-cp310-cp310-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:758803806b962f32c87b31bb18c298b022965ba34fe532163831cc39118c24ab"}, + {file = "zope_interface-8.0.1-cp310-cp310-manylinux1_x86_64.manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:f8e88f35f86bbe8243cad4b2972deef0fdfca0a0723455abbebdc83bbab96b69"}, + {file = "zope_interface-8.0.1-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:7844765695937d9b0d83211220b72e2cf6ac81a08608ad2b58f2c094af498d83"}, + {file = "zope_interface-8.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:64fa7b206dd9669f29d5c1241a768bebe8ab1e8a4b63ee16491f041e058c09d0"}, + {file = "zope_interface-8.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:4bd01022d2e1bce4a4a4ed9549edb25393c92e607d7daa6deff843f1f68b479d"}, + {file = "zope_interface-8.0.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:29be8db8b712d94f1c05e24ea230a879271d787205ba1c9a6100d1d81f06c69a"}, + {file = "zope_interface-8.0.1-cp311-cp311-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:51ae1b856565b30455b7879fdf0a56a88763b401d3f814fa9f9542d7410dbd7e"}, + {file = "zope_interface-8.0.1-cp311-cp311-manylinux1_x86_64.manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:d2e7596149cb1acd1d4d41b9f8fe2ffc0e9e29e2e91d026311814181d0d9efaf"}, + {file = "zope_interface-8.0.1-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:b2737c11c34fb9128816759864752d007ec4f987b571c934c30723ed881a7a4f"}, + {file = "zope_interface-8.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:cf66e4bf731aa7e0ced855bb3670e8cda772f6515a475c6a107bad5cb6604103"}, + {file = "zope_interface-8.0.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:115f27c1cc95ce7a517d960ef381beedb0a7ce9489645e80b9ab3cbf8a78799c"}, + {file = "zope_interface-8.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:af655c573b84e3cb6a4f6fd3fbe04e4dc91c63c6b6f99019b3713ef964e589bc"}, + {file = "zope_interface-8.0.1-cp312-cp312-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:23f82ef9b2d5370750cc1bf883c3b94c33d098ce08557922a3fbc7ff3b63dfe1"}, + {file = "zope_interface-8.0.1-cp312-cp312-manylinux1_x86_64.manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:35a1565d5244997f2e629c5c68715b3d9d9036e8df23c4068b08d9316dcb2822"}, + {file = "zope_interface-8.0.1-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:029ea1db7e855a475bf88d9910baab4e94d007a054810e9007ac037a91c67c6f"}, + {file = "zope_interface-8.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:0beb3e7f7dc153944076fcaf717a935f68d39efa9fce96ec97bafcc0c2ea6cab"}, + {file = "zope_interface-8.0.1-cp313-cp313-macosx_10_9_x86_64.whl", hash = "sha256:c7cc027fc5c61c5d69e5080c30b66382f454f43dc379c463a38e78a9c6bab71a"}, + {file = "zope_interface-8.0.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:fcf9097ff3003b7662299f1c25145e15260ec2a27f9a9e69461a585d79ca8552"}, + {file = "zope_interface-8.0.1-cp313-cp313-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:6d965347dd1fb9e9a53aa852d4ded46b41ca670d517fd54e733a6b6a4d0561c2"}, + {file = "zope_interface-8.0.1-cp313-cp313-manylinux1_x86_64.manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:9a3b8bb77a4b89427a87d1e9eb969ab05e38e6b4a338a9de10f6df23c33ec3c2"}, + {file = "zope_interface-8.0.1-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:87e6b089002c43231fb9afec89268391bcc7a3b66e76e269ffde19a8112fb8d5"}, + {file = "zope_interface-8.0.1-cp313-cp313-win_amd64.whl", hash = "sha256:64a43f5280aa770cbafd0307cb3d1ff430e2a1001774e8ceb40787abe4bb6658"}, + {file = "zope_interface-8.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:b84464a9fcf801289fa8b15bfc0829e7855d47fb4a8059555effc6f2d1d9a613"}, + {file = "zope_interface-8.0.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:7b915cf7e747b5356d741be79a153aa9107e8923bc93bcd65fc873caf0fb5c50"}, + {file = "zope_interface-8.0.1-cp39-cp39-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:110c73ddf974b369ef3c6e7b0d87d44673cf4914eba3fe8a33bfb21c6c606ad8"}, + {file = "zope_interface-8.0.1-cp39-cp39-manylinux1_x86_64.manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:9e9bdca901c1bcc34e438001718512c65b3b8924aabcd732b6e7a7f0cd715f17"}, + {file = "zope_interface-8.0.1-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:bbd22d4801ad3e8ec704ba9e3e6a4ac2e875e4d77e363051ccb76153d24c5519"}, + {file = "zope_interface-8.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:a0016ca85f93b938824e2f9a43534446e95134a2945b084944786e1ace2020bc"}, + {file = "zope_interface-8.0.1.tar.gz", hash = "sha256:eba5610d042c3704a48222f7f7c6ab5b243ed26f917e2bc69379456b115e02d1"}, ] -[package.dependencies] -setuptools = "*" - [package.extras] docs = ["Sphinx", "furo", "repoze.sphinx.autointerface"] test = ["coverage[toml]", "zope.event", "zope.testing"] @@ -3334,4 +3323,4 @@ url-preview = ["lxml"] [metadata] lock-version = "2.1" python-versions = "^3.10.0" -content-hash = "363f8059c998566788b0465c338a3a8aaa56d1e61cc347f2473b687ff34f2a8d" +content-hash = "262051340e8b5daac02d0bb61a145a609984d76732423131bdbbeb052329f168" diff --git a/pyproject.toml b/pyproject.toml index 5fb0c88b4f..01fbfd8efb 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -171,6 +171,10 @@ python = "^3.10.0" # ---------------------- # we use the TYPE_CHECKER.redefine method added in jsonschema 3.0.0 jsonschema = ">=3.0.0" +# 0.25.0 is the first version to support Python 3.14. +# We can remove this once https://github.com/python-jsonschema/jsonschema/issues/1426 is fixed +# and included in a release. +rpds-py = ">=0.25.0" # We choose 2.0 as a lower bound: the most recent backwards incompatible release. # It seems generally available, judging by https://pkgs.org/search/?q=immutabledict immutabledict = ">=2.0" diff --git a/rust/Cargo.toml b/rust/Cargo.toml index 4f0319a7f5..e8321d159b 100644 --- a/rust/Cargo.toml +++ b/rust/Cargo.toml @@ -30,14 +30,14 @@ http = "1.1.0" lazy_static = "1.4.0" log = "0.4.17" mime = "0.3.17" -pyo3 = { version = "0.25.1", features = [ +pyo3 = { version = "0.26.0", features = [ "macros", "anyhow", "abi3", "abi3-py310", ] } -pyo3-log = "0.12.4" -pythonize = "0.25.0" +pyo3-log = "0.13.1" +pythonize = "0.26.0" regex = "1.6.0" sha2 = "0.10.8" serde = { version = "1.0.144", features = ["derive"] } diff --git a/rust/src/events/internal_metadata.rs b/rust/src/events/internal_metadata.rs index 4711fc540f..fa40fdcfad 100644 --- a/rust/src/events/internal_metadata.rs +++ b/rust/src/events/internal_metadata.rs @@ -41,7 +41,7 @@ use pyo3::{ pybacked::PyBackedStr, pyclass, pymethods, types::{PyAnyMethods, PyDict, PyDictMethods, PyString}, - Bound, IntoPyObject, PyAny, PyObject, PyResult, Python, + Bound, IntoPyObject, Py, PyAny, PyResult, Python, }; use crate::UnwrapInfallible; @@ -289,7 +289,7 @@ impl EventInternalMetadata { /// Get a dict holding the data stored in the `internal_metadata` column in the database. /// /// Note that `outlier` and `stream_ordering` are stored in separate columns so are not returned here. - fn get_dict(&self, py: Python<'_>) -> PyResult { + fn get_dict(&self, py: Python<'_>) -> PyResult> { let dict = PyDict::new(py); for entry in &self.data { diff --git a/rust/src/http_client.rs b/rust/src/http_client.rs index ca4bf1590b..4bd80c8e04 100644 --- a/rust/src/http_client.rs +++ b/rust/src/http_client.rs @@ -134,7 +134,7 @@ fn get_runtime<'a>(reactor: &Bound<'a, PyAny>) -> PyResult = OnceCell::new(); +static DEFER: OnceCell> = OnceCell::new(); /// Access to the `twisted.internet.defer` module. fn defer(py: Python<'_>) -> PyResult<&Bound<'_, PyAny>> { @@ -165,7 +165,7 @@ pub fn register_module(py: Python<'_>, m: &Bound<'_, PyModule>) -> PyResult<()> #[pyclass] struct HttpClient { client: reqwest::Client, - reactor: PyObject, + reactor: Py, } #[pymethods] @@ -237,7 +237,7 @@ impl HttpClient { return Err(HttpResponseException::new(status, buffer)); } - let r = Python::with_gil(|py| buffer.into_pyobject(py).map(|o| o.unbind()))?; + let r = Python::attach(|py| buffer.into_pyobject(py).map(|o| o.unbind()))?; Ok(r) }) @@ -270,7 +270,7 @@ where handle.spawn(async move { let res = task.await; - Python::with_gil(move |py| { + Python::attach(move |py| { // Flatten the panic into standard python error let res = match res { Ok(r) => r, diff --git a/rust/src/rendezvous/mod.rs b/rust/src/rendezvous/mod.rs index 3148e0f67a..848b5035bb 100644 --- a/rust/src/rendezvous/mod.rs +++ b/rust/src/rendezvous/mod.rs @@ -29,7 +29,7 @@ use pyo3::{ exceptions::PyValueError, pyclass, pymethods, types::{PyAnyMethods, PyModule, PyModuleMethods}, - Bound, IntoPyObject, Py, PyAny, PyObject, PyResult, Python, + Bound, IntoPyObject, Py, PyAny, PyResult, Python, }; use ulid::Ulid; @@ -56,7 +56,7 @@ fn prepare_headers(headers: &mut HeaderMap, session: &Session) { #[pyclass] struct RendezvousHandler { base: Uri, - clock: PyObject, + clock: Py, sessions: BTreeMap, capacity: usize, max_content_length: u64, From bc926bd99eaf4882625b8262d07ea55a89675647 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 3 Nov 2025 16:38:19 +0000 Subject: [PATCH 115/149] Bump ruff from 0.12.10 to 0.14.3 (#19124) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 107 +++++++++++++++++++++++++++++-------------------- pyproject.toml | 2 +- 2 files changed, 65 insertions(+), 44 deletions(-) diff --git a/poetry.lock b/poetry.lock index ce8b9ef6ee..4996517afc 100644 --- a/poetry.lock +++ b/poetry.lock @@ -39,7 +39,7 @@ description = "The ultimate Python library in building OAuth and OpenID Connect optional = true python-versions = ">=3.9" groups = ["main"] -markers = "extra == \"oidc\" or extra == \"jwt\" or extra == \"all\"" +markers = "extra == \"all\" or extra == \"jwt\" or extra == \"oidc\"" files = [ {file = "authlib-1.6.5-py2.py3-none-any.whl", hash = "sha256:3e0e0507807f842b02175507bdee8957a1d5707fd4afb17c32fb43fee90b6e3a"}, {file = "authlib-1.6.5.tar.gz", hash = "sha256:6aaf9c79b7cc96c900f0b284061691c5d4e61221640a948fe690b556a6d6d10b"}, @@ -444,7 +444,7 @@ description = "XML bomb protection for Python stdlib modules" optional = true python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" groups = ["main"] -markers = "extra == \"saml2\" or extra == \"all\"" +markers = "extra == \"all\" or extra == \"saml2\"" files = [ {file = "defusedxml-0.7.1-py2.py3-none-any.whl", hash = "sha256:a352e7e428770286cc899e2542b6cdaedb2b4953ff269a210103ec58f6198a61"}, {file = "defusedxml-0.7.1.tar.gz", hash = "sha256:1bb3032db185915b62d7c6209c5a8792be6a32ab2fedacc84e01b52c51aa3e69"}, @@ -469,7 +469,7 @@ description = "XPath 1.0/2.0/3.0/3.1 parsers and selectors for ElementTree and l optional = true python-versions = ">=3.7" groups = ["main"] -markers = "extra == \"saml2\" or extra == \"all\"" +markers = "extra == \"all\" or extra == \"saml2\"" files = [ {file = "elementpath-4.1.5-py3-none-any.whl", hash = "sha256:2ac1a2fb31eb22bbbf817f8cf6752f844513216263f0e3892c8e79782fe4bb55"}, {file = "elementpath-4.1.5.tar.gz", hash = "sha256:c2d6dc524b29ef751ecfc416b0627668119d8812441c555d7471da41d4bacb8d"}, @@ -519,7 +519,7 @@ description = "Python wrapper for hiredis" optional = true python-versions = ">=3.8" groups = ["main"] -markers = "extra == \"redis\" or extra == \"all\"" +markers = "extra == \"all\" or extra == \"redis\"" files = [ {file = "hiredis-3.3.0-cp310-cp310-macosx_10_15_universal2.whl", hash = "sha256:9937d9b69321b393fbace69f55423480f098120bc55a3316e1ca3508c4dbbd6f"}, {file = "hiredis-3.3.0-cp310-cp310-macosx_10_15_x86_64.whl", hash = "sha256:50351b77f89ba6a22aff430b993653847f36b71d444509036baa0f2d79d1ebf4"}, @@ -842,7 +842,7 @@ description = "Jaeger Python OpenTracing Tracer implementation" optional = true python-versions = ">=3.7" groups = ["main"] -markers = "extra == \"opentracing\" or extra == \"all\"" +markers = "extra == \"all\" or extra == \"opentracing\"" files = [ {file = "jaeger-client-4.8.0.tar.gz", hash = "sha256:3157836edab8e2c209bd2d6ae61113db36f7ee399e66b1dcbb715d87ab49bfe0"}, ] @@ -980,7 +980,7 @@ description = "A strictly RFC 4510 conforming LDAP V3 pure Python client library optional = true python-versions = "*" groups = ["main"] -markers = "extra == \"matrix-synapse-ldap3\" or extra == \"all\"" +markers = "extra == \"all\" or extra == \"matrix-synapse-ldap3\"" files = [ {file = "ldap3-2.9.1-py2.py3-none-any.whl", hash = "sha256:5869596fc4948797020d3f03b7939da938778a0f9e2009f7a072ccf92b8e8d70"}, {file = "ldap3-2.9.1.tar.gz", hash = "sha256:f3e7fc4718e3f09dda568b57100095e0ce58633bcabbed8667ce3f8fbaa4229f"}, @@ -996,7 +996,7 @@ description = "Powerful and Pythonic XML processing library combining libxml2/li optional = true python-versions = ">=3.8" groups = ["main"] -markers = "extra == \"url-preview\" or extra == \"all\"" +markers = "extra == \"all\" or extra == \"url-preview\"" files = [ {file = "lxml-6.0.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:e77dd455b9a16bbd2a5036a63ddbd479c19572af81b624e79ef422f929eef388"}, {file = "lxml-6.0.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:5d444858b9f07cefff6455b983aea9a67f7462ba1f6cbe4a21e8bf6791bf2153"}, @@ -1283,7 +1283,7 @@ description = "An LDAP3 auth provider for Synapse" optional = true python-versions = ">=3.7" groups = ["main"] -markers = "extra == \"matrix-synapse-ldap3\" or extra == \"all\"" +markers = "extra == \"all\" or extra == \"matrix-synapse-ldap3\"" files = [ {file = "matrix-synapse-ldap3-0.3.0.tar.gz", hash = "sha256:8bb6517173164d4b9cc44f49de411d8cebdb2e705d5dd1ea1f38733c4a009e1d"}, {file = "matrix_synapse_ldap3-0.3.0-py3-none-any.whl", hash = "sha256:8b4d701f8702551e98cc1d8c20dbed532de5613584c08d0df22de376ba99159d"}, @@ -1525,7 +1525,7 @@ description = "OpenTracing API for Python. See documentation at http://opentraci optional = true python-versions = "*" groups = ["main"] -markers = "extra == \"opentracing\" or extra == \"all\"" +markers = "extra == \"all\" or extra == \"opentracing\"" files = [ {file = "opentracing-2.4.0.tar.gz", hash = "sha256:a173117e6ef580d55874734d1fa7ecb6f3655160b8b8974a2a1e98e5ec9c840d"}, ] @@ -1594,6 +1594,8 @@ groups = ["main"] files = [ {file = "pillow-11.3.0-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:1b9c17fd4ace828b3003dfd1e30bff24863e0eb59b535e8f80194d9cc7ecf860"}, {file = "pillow-11.3.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:65dc69160114cdd0ca0f35cb434633c75e8e7fad4cf855177a05bf38678f73ad"}, + {file = "pillow-11.3.0-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:7107195ddc914f656c7fc8e4a5e1c25f32e9236ea3ea860f257b0436011fddd0"}, + {file = "pillow-11.3.0-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:cc3e831b563b3114baac7ec2ee86819eb03caa1a2cef0b481a5675b59c4fe23b"}, {file = "pillow-11.3.0-cp310-cp310-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f1f182ebd2303acf8c380a54f615ec883322593320a9b00438eb842c1f37ae50"}, {file = "pillow-11.3.0-cp310-cp310-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4445fa62e15936a028672fd48c4c11a66d641d2c05726c7ec1f8ba6a572036ae"}, {file = "pillow-11.3.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:71f511f6b3b91dd543282477be45a033e4845a40278fa8dcdbfdb07109bf18f9"}, @@ -1603,6 +1605,8 @@ files = [ {file = "pillow-11.3.0-cp310-cp310-win_arm64.whl", hash = "sha256:819931d25e57b513242859ce1876c58c59dc31587847bf74cfe06b2e0cb22d2f"}, {file = "pillow-11.3.0-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:1cd110edf822773368b396281a2293aeb91c90a2db00d78ea43e7e861631b722"}, {file = "pillow-11.3.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:9c412fddd1b77a75aa904615ebaa6001f169b26fd467b4be93aded278266b288"}, + {file = "pillow-11.3.0-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:7d1aa4de119a0ecac0a34a9c8bde33f34022e2e8f99104e47a3ca392fd60e37d"}, + {file = "pillow-11.3.0-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:91da1d88226663594e3f6b4b8c3c8d85bd504117d043740a8e0ec449087cc494"}, {file = "pillow-11.3.0-cp311-cp311-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:643f189248837533073c405ec2f0bb250ba54598cf80e8c1e043381a60632f58"}, {file = "pillow-11.3.0-cp311-cp311-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:106064daa23a745510dabce1d84f29137a37224831d88eb4ce94bb187b1d7e5f"}, {file = "pillow-11.3.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:cd8ff254faf15591e724dc7c4ddb6bf4793efcbe13802a4ae3e863cd300b493e"}, @@ -1612,6 +1616,8 @@ files = [ {file = "pillow-11.3.0-cp311-cp311-win_arm64.whl", hash = "sha256:30807c931ff7c095620fe04448e2c2fc673fcbb1ffe2a7da3fb39613489b1ddd"}, {file = "pillow-11.3.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:fdae223722da47b024b867c1ea0be64e0df702c5e0a60e27daad39bf960dd1e4"}, {file = "pillow-11.3.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:921bd305b10e82b4d1f5e802b6850677f965d8394203d182f078873851dada69"}, + {file = "pillow-11.3.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:eb76541cba2f958032d79d143b98a3a6b3ea87f0959bbe256c0b5e416599fd5d"}, + {file = "pillow-11.3.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:67172f2944ebba3d4a7b54f2e95c786a3a50c21b88456329314caaa28cda70f6"}, {file = "pillow-11.3.0-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:97f07ed9f56a3b9b5f49d3661dc9607484e85c67e27f3e8be2c7d28ca032fec7"}, {file = "pillow-11.3.0-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:676b2815362456b5b3216b4fd5bd89d362100dc6f4945154ff172e206a22c024"}, {file = "pillow-11.3.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:3e184b2f26ff146363dd07bde8b711833d7b0202e27d13540bfe2e35a323a809"}, @@ -1624,6 +1630,8 @@ files = [ {file = "pillow-11.3.0-cp313-cp313-ios_13_0_x86_64_iphonesimulator.whl", hash = "sha256:7859a4cc7c9295f5838015d8cc0a9c215b77e43d07a25e460f35cf516df8626f"}, {file = "pillow-11.3.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:ec1ee50470b0d050984394423d96325b744d55c701a439d2bd66089bff963d3c"}, {file = "pillow-11.3.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:7db51d222548ccfd274e4572fdbf3e810a5e66b00608862f947b163e613b67dd"}, + {file = "pillow-11.3.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:2d6fcc902a24ac74495df63faad1884282239265c6839a0a6416d33faedfae7e"}, + {file = "pillow-11.3.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:f0f5d8f4a08090c6d6d578351a2b91acf519a54986c055af27e7a93feae6d3f1"}, {file = "pillow-11.3.0-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c37d8ba9411d6003bba9e518db0db0c58a680ab9fe5179f040b0463644bc9805"}, {file = "pillow-11.3.0-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:13f87d581e71d9189ab21fe0efb5a23e9f28552d5be6979e84001d3b8505abe8"}, {file = "pillow-11.3.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:023f6d2d11784a465f09fd09a34b150ea4672e85fb3d05931d89f373ab14abb2"}, @@ -1633,6 +1641,8 @@ files = [ {file = "pillow-11.3.0-cp313-cp313-win_arm64.whl", hash = "sha256:1904e1264881f682f02b7f8167935cce37bc97db457f8e7849dc3a6a52b99580"}, {file = "pillow-11.3.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:4c834a3921375c48ee6b9624061076bc0a32a60b5532b322cc0ea64e639dd50e"}, {file = "pillow-11.3.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:5e05688ccef30ea69b9317a9ead994b93975104a677a36a8ed8106be9260aa6d"}, + {file = "pillow-11.3.0-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:1019b04af07fc0163e2810167918cb5add8d74674b6267616021ab558dc98ced"}, + {file = "pillow-11.3.0-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:f944255db153ebb2b19c51fe85dd99ef0ce494123f21b9db4877ffdfc5590c7c"}, {file = "pillow-11.3.0-cp313-cp313t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1f85acb69adf2aaee8b7da124efebbdb959a104db34d3a2cb0f3793dbae422a8"}, {file = "pillow-11.3.0-cp313-cp313t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:05f6ecbeff5005399bb48d198f098a9b4b6bdf27b8487c7f38ca16eeb070cd59"}, {file = "pillow-11.3.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:a7bc6e6fd0395bc052f16b1a8670859964dbd7003bd0af2ff08342eb6e442cfe"}, @@ -1642,6 +1652,8 @@ files = [ {file = "pillow-11.3.0-cp313-cp313t-win_arm64.whl", hash = "sha256:8797edc41f3e8536ae4b10897ee2f637235c94f27404cac7297f7b607dd0716e"}, {file = "pillow-11.3.0-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:d9da3df5f9ea2a89b81bb6087177fb1f4d1c7146d583a3fe5c672c0d94e55e12"}, {file = "pillow-11.3.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:0b275ff9b04df7b640c59ec5a3cb113eefd3795a8df80bac69646ef699c6981a"}, + {file = "pillow-11.3.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:0743841cabd3dba6a83f38a92672cccbd69af56e3e91777b0ee7f4dba4385632"}, + {file = "pillow-11.3.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:2465a69cf967b8b49ee1b96d76718cd98c4e925414ead59fdf75cf0fd07df673"}, {file = "pillow-11.3.0-cp314-cp314-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:41742638139424703b4d01665b807c6468e23e699e8e90cffefe291c5832b027"}, {file = "pillow-11.3.0-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:93efb0b4de7e340d99057415c749175e24c8864302369e05914682ba642e5d77"}, {file = "pillow-11.3.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:7966e38dcd0fa11ca390aed7c6f20454443581d758242023cf36fcb319b1a874"}, @@ -1651,6 +1663,8 @@ files = [ {file = "pillow-11.3.0-cp314-cp314-win_arm64.whl", hash = "sha256:155658efb5e044669c08896c0c44231c5e9abcaadbc5cd3648df2f7c0b96b9a6"}, {file = "pillow-11.3.0-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:59a03cdf019efbfeeed910bf79c7c93255c3d54bc45898ac2a4140071b02b4ae"}, {file = "pillow-11.3.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:f8a5827f84d973d8636e9dc5764af4f0cf2318d26744b3d902931701b0d46653"}, + {file = "pillow-11.3.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:ee92f2fd10f4adc4b43d07ec5e779932b4eb3dbfbc34790ada5a6669bc095aa6"}, + {file = "pillow-11.3.0-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:c96d333dcf42d01f47b37e0979b6bd73ec91eae18614864622d9b87bbd5bbf36"}, {file = "pillow-11.3.0-cp314-cp314t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:4c96f993ab8c98460cd0c001447bff6194403e8b1d7e149ade5f00594918128b"}, {file = "pillow-11.3.0-cp314-cp314t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:41342b64afeba938edb034d122b2dda5db2139b9a4af999729ba8818e0056477"}, {file = "pillow-11.3.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:068d9c39a2d1b358eb9f245ce7ab1b5c3246c7c8c7d9ba58cfa5b43146c06e50"}, @@ -1660,6 +1674,8 @@ files = [ {file = "pillow-11.3.0-cp314-cp314t-win_arm64.whl", hash = "sha256:79ea0d14d3ebad43ec77ad5272e6ff9bba5b679ef73375ea760261207fa8e0aa"}, {file = "pillow-11.3.0-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:48d254f8a4c776de343051023eb61ffe818299eeac478da55227d96e241de53f"}, {file = "pillow-11.3.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:7aee118e30a4cf54fdd873bd3a29de51e29105ab11f9aad8c32123f58c8f8081"}, + {file = "pillow-11.3.0-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:23cff760a9049c502721bdb743a7cb3e03365fafcdfc2ef9784610714166e5a4"}, + {file = "pillow-11.3.0-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:6359a3bc43f57d5b375d1ad54a0074318a0844d11b76abccf478c37c986d3cfc"}, {file = "pillow-11.3.0-cp39-cp39-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:092c80c76635f5ecb10f3f83d76716165c96f5229addbd1ec2bdbbda7d496e06"}, {file = "pillow-11.3.0-cp39-cp39-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:cadc9e0ea0a2431124cde7e1697106471fc4c1da01530e679b2391c37d3fbb3a"}, {file = "pillow-11.3.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:6a418691000f2a418c9135a7cf0d797c1bb7d9a485e61fe8e7722845b95ef978"}, @@ -1669,11 +1685,15 @@ files = [ {file = "pillow-11.3.0-cp39-cp39-win_arm64.whl", hash = "sha256:6abdbfd3aea42be05702a8dd98832329c167ee84400a1d1f61ab11437f1717eb"}, {file = "pillow-11.3.0-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:3cee80663f29e3843b68199b9d6f4f54bd1d4a6b59bdd91bceefc51238bcb967"}, {file = "pillow-11.3.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:b5f56c3f344f2ccaf0dd875d3e180f631dc60a51b314295a3e681fe8cf851fbe"}, + {file = "pillow-11.3.0-pp310-pypy310_pp73-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:e67d793d180c9df62f1f40aee3accca4829d3794c95098887edc18af4b8b780c"}, + {file = "pillow-11.3.0-pp310-pypy310_pp73-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:d000f46e2917c705e9fb93a3606ee4a819d1e3aa7a9b442f6444f07e77cf5e25"}, {file = "pillow-11.3.0-pp310-pypy310_pp73-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:527b37216b6ac3a12d7838dc3bd75208ec57c1c6d11ef01902266a5a0c14fc27"}, {file = "pillow-11.3.0-pp310-pypy310_pp73-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:be5463ac478b623b9dd3937afd7fb7ab3d79dd290a28e2b6df292dc75063eb8a"}, {file = "pillow-11.3.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:8dc70ca24c110503e16918a658b869019126ecfe03109b754c402daff12b3d9f"}, {file = "pillow-11.3.0-pp311-pypy311_pp73-macosx_10_15_x86_64.whl", hash = "sha256:7c8ec7a017ad1bd562f93dbd8505763e688d388cde6e4a010ae1486916e713e6"}, {file = "pillow-11.3.0-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:9ab6ae226de48019caa8074894544af5b53a117ccb9d3b3dcb2871464c829438"}, + {file = "pillow-11.3.0-pp311-pypy311_pp73-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:fe27fb049cdcca11f11a7bfda64043c37b30e6b91f10cb5bab275806c32f6ab3"}, + {file = "pillow-11.3.0-pp311-pypy311_pp73-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:465b9e8844e3c3519a983d58b80be3f668e2a7a5db97f2784e7079fbc9f9822c"}, {file = "pillow-11.3.0-pp311-pypy311_pp73-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5418b53c0d59b3824d05e029669efa023bbef0f3e92e75ec8428f3799487f361"}, {file = "pillow-11.3.0-pp311-pypy311_pp73-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:504b6f59505f08ae014f724b6207ff6222662aab5cc9542577fb084ed0676ac7"}, {file = "pillow-11.3.0-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:c84d689db21a1c397d001aa08241044aa2069e7587b398c8cc63020390b1c1b8"}, @@ -1711,7 +1731,7 @@ description = "psycopg2 - Python-PostgreSQL Database Adapter" optional = true python-versions = ">=3.8" groups = ["main"] -markers = "extra == \"postgres\" or extra == \"all\"" +markers = "extra == \"all\" or extra == \"postgres\"" files = [ {file = "psycopg2-2.9.10-cp310-cp310-win32.whl", hash = "sha256:5df2b672140f95adb453af93a7d669d7a7bf0a56bcd26f1502329166f4a61716"}, {file = "psycopg2-2.9.10-cp310-cp310-win_amd64.whl", hash = "sha256:c6f7b8561225f9e711a9c47087388a97fdc948211c10a4bccbf0ba68ab7b3b5a"}, @@ -1719,6 +1739,7 @@ files = [ {file = "psycopg2-2.9.10-cp311-cp311-win_amd64.whl", hash = "sha256:0435034157049f6846e95103bd8f5a668788dd913a7c30162ca9503fdf542cb4"}, {file = "psycopg2-2.9.10-cp312-cp312-win32.whl", hash = "sha256:65a63d7ab0e067e2cdb3cf266de39663203d38d6a8ed97f5ca0cb315c73fe067"}, {file = "psycopg2-2.9.10-cp312-cp312-win_amd64.whl", hash = "sha256:4a579d6243da40a7b3182e0430493dbd55950c493d8c68f4eec0b302f6bbf20e"}, + {file = "psycopg2-2.9.10-cp313-cp313-win_amd64.whl", hash = "sha256:91fd603a2155da8d0cfcdbf8ab24a2d54bca72795b90d2a3ed2b6da8d979dee2"}, {file = "psycopg2-2.9.10-cp39-cp39-win32.whl", hash = "sha256:9d5b3b94b79a844a986d029eee38998232451119ad653aea42bb9220a8c5066b"}, {file = "psycopg2-2.9.10-cp39-cp39-win_amd64.whl", hash = "sha256:88138c8dedcbfa96408023ea2b0c369eda40fe5d75002c0964c78f46f11fa442"}, {file = "psycopg2-2.9.10.tar.gz", hash = "sha256:12ec0b40b0273f95296233e8750441339298e6a572f7039da5b260e3c8b60e11"}, @@ -1731,7 +1752,7 @@ description = ".. image:: https://travis-ci.org/chtd/psycopg2cffi.svg?branch=mas optional = true python-versions = "*" groups = ["main"] -markers = "platform_python_implementation == \"PyPy\" and (extra == \"postgres\" or extra == \"all\")" +markers = "platform_python_implementation == \"PyPy\" and (extra == \"all\" or extra == \"postgres\")" files = [ {file = "psycopg2cffi-2.9.0.tar.gz", hash = "sha256:7e272edcd837de3a1d12b62185eb85c45a19feda9e62fa1b120c54f9e8d35c52"}, ] @@ -1747,7 +1768,7 @@ description = "A Simple library to enable psycopg2 compatability" optional = true python-versions = "*" groups = ["main"] -markers = "platform_python_implementation == \"PyPy\" and (extra == \"postgres\" or extra == \"all\")" +markers = "platform_python_implementation == \"PyPy\" and (extra == \"all\" or extra == \"postgres\")" files = [ {file = "psycopg2cffi-compat-1.1.tar.gz", hash = "sha256:d25e921748475522b33d13420aad5c2831c743227dc1f1f2585e0fdb5c914e05"}, ] @@ -2024,7 +2045,7 @@ description = "A development tool to measure, monitor and analyze the memory beh optional = true python-versions = ">=3.6" groups = ["main"] -markers = "extra == \"cache-memory\" or extra == \"all\"" +markers = "extra == \"all\" or extra == \"cache-memory\"" files = [ {file = "Pympler-1.0.1-py3-none-any.whl", hash = "sha256:d260dda9ae781e1eab6ea15bacb84015849833ba5555f141d2d9b7b7473b307d"}, {file = "Pympler-1.0.1.tar.gz", hash = "sha256:993f1a3599ca3f4fcd7160c7545ad06310c9e12f70174ae7ae8d4e25f6c5d3fa"}, @@ -2084,7 +2105,7 @@ description = "Python implementation of SAML Version 2 Standard" optional = true python-versions = ">=3.9,<4.0" groups = ["main"] -markers = "extra == \"saml2\" or extra == \"all\"" +markers = "extra == \"all\" or extra == \"saml2\"" files = [ {file = "pysaml2-7.5.0-py3-none-any.whl", hash = "sha256:bc6627cc344476a83c757f440a73fda1369f13b6fda1b4e16bca63ffbabb5318"}, {file = "pysaml2-7.5.0.tar.gz", hash = "sha256:f36871d4e5ee857c6b85532e942550d2cf90ea4ee943d75eb681044bbc4f54f7"}, @@ -2109,7 +2130,7 @@ description = "Extensions to the standard Python datetime module" optional = true python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" groups = ["main"] -markers = "extra == \"saml2\" or extra == \"all\"" +markers = "extra == \"all\" or extra == \"saml2\"" files = [ {file = "python-dateutil-2.8.2.tar.gz", hash = "sha256:0123cacc1627ae19ddf3c27a5de5bd67ee4586fbdd6440d9748f8abb483d3e86"}, {file = "python_dateutil-2.8.2-py2.py3-none-any.whl", hash = "sha256:961d03dc3453ebbc59dbdea9e4e11c5651520a876d0f4db161e8674aae935da9"}, @@ -2137,7 +2158,7 @@ description = "World timezone definitions, modern and historical" optional = true python-versions = "*" groups = ["main"] -markers = "extra == \"saml2\" or extra == \"all\"" +markers = "extra == \"all\" or extra == \"saml2\"" files = [ {file = "pytz-2022.7.1-py2.py3-none-any.whl", hash = "sha256:78f4f37d8198e0627c5f1143240bb0206b8691d8d7ac6d78fee88b78733f8c4a"}, {file = "pytz-2022.7.1.tar.gz", hash = "sha256:01a0681c4b9684a28304615eba55d1ab31ae00bf68ec157ec3708a8182dbbcd0"}, @@ -2474,31 +2495,31 @@ files = [ [[package]] name = "ruff" -version = "0.12.10" +version = "0.14.3" description = "An extremely fast Python linter and code formatter, written in Rust." optional = false python-versions = ">=3.7" groups = ["dev"] files = [ - {file = "ruff-0.12.10-py3-none-linux_armv6l.whl", hash = "sha256:8b593cb0fb55cc8692dac7b06deb29afda78c721c7ccfed22db941201b7b8f7b"}, - {file = "ruff-0.12.10-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:ebb7333a45d56efc7c110a46a69a1b32365d5c5161e7244aaf3aa20ce62399c1"}, - {file = "ruff-0.12.10-py3-none-macosx_11_0_arm64.whl", hash = "sha256:d59e58586829f8e4a9920788f6efba97a13d1fa320b047814e8afede381c6839"}, - {file = "ruff-0.12.10-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:822d9677b560f1fdeab69b89d1f444bf5459da4aa04e06e766cf0121771ab844"}, - {file = "ruff-0.12.10-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:37b4a64f4062a50c75019c61c7017ff598cb444984b638511f48539d3a1c98db"}, - {file = "ruff-0.12.10-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2c6f4064c69d2542029b2a61d39920c85240c39837599d7f2e32e80d36401d6e"}, - {file = "ruff-0.12.10-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:059e863ea3a9ade41407ad71c1de2badfbe01539117f38f763ba42a1206f7559"}, - {file = "ruff-0.12.10-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1bef6161e297c68908b7218fa6e0e93e99a286e5ed9653d4be71e687dff101cf"}, - {file = "ruff-0.12.10-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4f1345fbf8fb0531cd722285b5f15af49b2932742fc96b633e883da8d841896b"}, - {file = "ruff-0.12.10-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1f68433c4fbc63efbfa3ba5db31727db229fa4e61000f452c540474b03de52a9"}, - {file = "ruff-0.12.10-py3-none-manylinux_2_31_riscv64.whl", hash = "sha256:141ce3d88803c625257b8a6debf4a0473eb6eed9643a6189b68838b43e78165a"}, - {file = "ruff-0.12.10-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:f3fc21178cd44c98142ae7590f42ddcb587b8e09a3b849cbc84edb62ee95de60"}, - {file = "ruff-0.12.10-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:7d1a4e0bdfafcd2e3e235ecf50bf0176f74dd37902f241588ae1f6c827a36c56"}, - {file = "ruff-0.12.10-py3-none-musllinux_1_2_i686.whl", hash = "sha256:e67d96827854f50b9e3e8327b031647e7bcc090dbe7bb11101a81a3a2cbf1cc9"}, - {file = "ruff-0.12.10-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:ae479e1a18b439c59138f066ae79cc0f3ee250712a873d00dbafadaad9481e5b"}, - {file = "ruff-0.12.10-py3-none-win32.whl", hash = "sha256:9de785e95dc2f09846c5e6e1d3a3d32ecd0b283a979898ad427a9be7be22b266"}, - {file = "ruff-0.12.10-py3-none-win_amd64.whl", hash = "sha256:7837eca8787f076f67aba2ca559cefd9c5cbc3a9852fd66186f4201b87c1563e"}, - {file = "ruff-0.12.10-py3-none-win_arm64.whl", hash = "sha256:cc138cc06ed9d4bfa9d667a65af7172b47840e1a98b02ce7011c391e54635ffc"}, - {file = "ruff-0.12.10.tar.gz", hash = "sha256:189ab65149d11ea69a2d775343adf5f49bb2426fc4780f65ee33b423ad2e47f9"}, + {file = "ruff-0.14.3-py3-none-linux_armv6l.whl", hash = "sha256:876b21e6c824f519446715c1342b8e60f97f93264012de9d8d10314f8a79c371"}, + {file = "ruff-0.14.3-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:b6fd8c79b457bedd2abf2702b9b472147cd860ed7855c73a5247fa55c9117654"}, + {file = "ruff-0.14.3-py3-none-macosx_11_0_arm64.whl", hash = "sha256:71ff6edca490c308f083156938c0c1a66907151263c4abdcb588602c6e696a14"}, + {file = "ruff-0.14.3-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:786ee3ce6139772ff9272aaf43296d975c0217ee1b97538a98171bf0d21f87ed"}, + {file = "ruff-0.14.3-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:cd6291d0061811c52b8e392f946889916757610d45d004e41140d81fb6cd5ddc"}, + {file = "ruff-0.14.3-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a497ec0c3d2c88561b6d90f9c29f5ae68221ac00d471f306fa21fa4264ce5fcd"}, + {file = "ruff-0.14.3-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:e231e1be58fc568950a04fbe6887c8e4b85310e7889727e2b81db205c45059eb"}, + {file = "ruff-0.14.3-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:469e35872a09c0e45fecf48dd960bfbce056b5db2d5e6b50eca329b4f853ae20"}, + {file = "ruff-0.14.3-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3d6bc90307c469cb9d28b7cfad90aaa600b10d67c6e22026869f585e1e8a2db0"}, + {file = "ruff-0.14.3-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0e2f8a0bbcffcfd895df39c9a4ecd59bb80dca03dc43f7fb63e647ed176b741e"}, + {file = "ruff-0.14.3-py3-none-manylinux_2_31_riscv64.whl", hash = "sha256:678fdd7c7d2d94851597c23ee6336d25f9930b460b55f8598e011b57c74fd8c5"}, + {file = "ruff-0.14.3-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:1ec1ac071e7e37e0221d2f2dbaf90897a988c531a8592a6a5959f0603a1ecf5e"}, + {file = "ruff-0.14.3-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:afcdc4b5335ef440d19e7df9e8ae2ad9f749352190e96d481dc501b753f0733e"}, + {file = "ruff-0.14.3-py3-none-musllinux_1_2_i686.whl", hash = "sha256:7bfc42f81862749a7136267a343990f865e71fe2f99cf8d2958f684d23ce3dfa"}, + {file = "ruff-0.14.3-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:a65e448cfd7e9c59fae8cf37f9221585d3354febaad9a07f29158af1528e165f"}, + {file = "ruff-0.14.3-py3-none-win32.whl", hash = "sha256:f3d91857d023ba93e14ed2d462ab62c3428f9bbf2b4fbac50a03ca66d31991f7"}, + {file = "ruff-0.14.3-py3-none-win_amd64.whl", hash = "sha256:d7b7006ac0756306db212fd37116cce2bd307e1e109375e1c6c106002df0ae5f"}, + {file = "ruff-0.14.3-py3-none-win_arm64.whl", hash = "sha256:26eb477ede6d399d898791d01961e16b86f02bc2486d0d1a7a9bb2379d055dc1"}, + {file = "ruff-0.14.3.tar.gz", hash = "sha256:4ff876d2ab2b161b6de0aa1f5bd714e8e9b4033dc122ee006925fbacc4f62153"}, ] [[package]] @@ -2541,7 +2562,7 @@ description = "Python client for Sentry (https://sentry.io)" optional = true python-versions = ">=3.6" groups = ["main"] -markers = "extra == \"sentry\" or extra == \"all\"" +markers = "extra == \"all\" or extra == \"sentry\"" files = [ {file = "sentry_sdk-2.34.1-py2.py3-none-any.whl", hash = "sha256:b7a072e1cdc5abc48101d5146e1ae680fa81fe886d8d95aaa25a0b450c818d32"}, {file = "sentry_sdk-2.34.1.tar.gz", hash = "sha256:69274eb8c5c38562a544c3e9f68b5be0a43be4b697f5fd385bf98e4fbe672687"}, @@ -2729,7 +2750,7 @@ description = "Tornado IOLoop Backed Concurrent Futures" optional = true python-versions = "*" groups = ["main"] -markers = "extra == \"opentracing\" or extra == \"all\"" +markers = "extra == \"all\" or extra == \"opentracing\"" files = [ {file = "threadloop-1.0.2-py2-none-any.whl", hash = "sha256:5c90dbefab6ffbdba26afb4829d2a9df8275d13ac7dc58dccb0e279992679599"}, {file = "threadloop-1.0.2.tar.gz", hash = "sha256:8b180aac31013de13c2ad5c834819771992d350267bddb854613ae77ef571944"}, @@ -2745,7 +2766,7 @@ description = "Python bindings for the Apache Thrift RPC system" optional = true python-versions = "*" groups = ["main"] -markers = "extra == \"opentracing\" or extra == \"all\"" +markers = "extra == \"all\" or extra == \"opentracing\"" files = [ {file = "thrift-0.16.0.tar.gz", hash = "sha256:2b5b6488fcded21f9d312aa23c9ff6a0195d0f6ae26ddbd5ad9e3e25dfc14408"}, ] @@ -2808,7 +2829,7 @@ description = "Tornado is a Python web framework and asynchronous networking lib optional = true python-versions = ">=3.9" groups = ["main"] -markers = "extra == \"opentracing\" or extra == \"all\"" +markers = "extra == \"all\" or extra == \"opentracing\"" files = [ {file = "tornado-6.5-cp39-abi3-macosx_10_9_universal2.whl", hash = "sha256:f81067dad2e4443b015368b24e802d0083fecada4f0a4572fdb72fc06e54a9a6"}, {file = "tornado-6.5-cp39-abi3-macosx_10_9_x86_64.whl", hash = "sha256:9ac1cbe1db860b3cbb251e795c701c41d343f06a96049d6274e7c77559117e41"}, @@ -2942,7 +2963,7 @@ description = "non-blocking redis client for python" optional = true python-versions = "*" groups = ["main"] -markers = "extra == \"redis\" or extra == \"all\"" +markers = "extra == \"all\" or extra == \"redis\"" files = [ {file = "txredisapi-1.4.11-py3-none-any.whl", hash = "sha256:ac64d7a9342b58edca13ef267d4fa7637c1aa63f8595e066801c1e8b56b22d0b"}, {file = "txredisapi-1.4.11.tar.gz", hash = "sha256:3eb1af99aefdefb59eb877b1dd08861efad60915e30ad5bf3d5bf6c5cedcdbc6"}, @@ -3188,7 +3209,7 @@ description = "An XML Schema validator and decoder" optional = true python-versions = ">=3.7" groups = ["main"] -markers = "extra == \"saml2\" or extra == \"all\"" +markers = "extra == \"all\" or extra == \"saml2\"" files = [ {file = "xmlschema-2.4.0-py3-none-any.whl", hash = "sha256:dc87be0caaa61f42649899189aab2fd8e0d567f2cf548433ba7b79278d231a4a"}, {file = "xmlschema-2.4.0.tar.gz", hash = "sha256:d74cd0c10866ac609e1ef94a5a69b018ad16e39077bc6393408b40c6babee793"}, @@ -3323,4 +3344,4 @@ url-preview = ["lxml"] [metadata] lock-version = "2.1" python-versions = "^3.10.0" -content-hash = "262051340e8b5daac02d0bb61a145a609984d76732423131bdbbeb052329f168" +content-hash = "2a891bc466355554d5c5873e7f8592e4f693de4d0f734ddb55f8a55bb4e529df" diff --git a/pyproject.toml b/pyproject.toml index 01fbfd8efb..f530666e45 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -339,7 +339,7 @@ all = [ # failing on new releases. Keeping lower bounds loose here means that dependabot # can bump versions without having to update the content-hash in the lockfile. # This helps prevents merge conflicts when running a batch of dependabot updates. -ruff = "0.12.10" +ruff = "0.14.3" # Typechecking lxml-stubs = ">=0.4.0" From e00a41183724f62661423720aa271ef74fee6ea0 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Mon, 3 Nov 2025 11:18:56 -0600 Subject: [PATCH 116/149] Move exception handling up the stack (avoid `exit(1)` in our composable functions) (#19116) Move exception handling up the stack (avoid `exit(1)` in our composable functions) Relevant to Synapse Pro for small hosts as we don't want to exit the entire Python process and affect all homeserver tenants. ### Background As part of Element's plan to support a light form of vhosting (virtual host) (multiple instances of Synapse in the same Python process) (c.f Synapse Pro for small hosts), we're currently diving into the details and implications of running multiple instances of Synapse in the same Python process. "Clean tenant provisioning" tracked internally by https://github.com/element-hq/synapse-small-hosts/issues/48 --- changelog.d/19116.misc | 1 + synapse/app/generic_worker.py | 16 ++++++++-------- synapse/app/homeserver.py | 10 +++++----- 3 files changed, 14 insertions(+), 13 deletions(-) create mode 100644 changelog.d/19116.misc diff --git a/changelog.d/19116.misc b/changelog.d/19116.misc new file mode 100644 index 0000000000..2291d0781a --- /dev/null +++ b/changelog.d/19116.misc @@ -0,0 +1 @@ +Move exception handling up the stack (avoid `exit(1)` in our composable functions). diff --git a/synapse/app/generic_worker.py b/synapse/app/generic_worker.py index 8f512c1577..1a7bedaac5 100644 --- a/synapse/app/generic_worker.py +++ b/synapse/app/generic_worker.py @@ -364,14 +364,11 @@ def start(config: HomeServerConfig) -> None: # Start the tracer init_tracer(hs) # noqa - try: - hs.setup() + hs.setup() - # Ensure the replication streamer is always started in case we write to any - # streams. Will no-op if no streams can be written to by this worker. - hs.get_replication_streamer() - except Exception as e: - handle_startup_exception(e) + # Ensure the replication streamer is always started in case we write to any + # streams. Will no-op if no streams can be written to by this worker. + hs.get_replication_streamer() async def start() -> None: await _base.start(hs) @@ -388,7 +385,10 @@ def start(config: HomeServerConfig) -> None: def main() -> None: homeserver_config = load_config(sys.argv[1:]) with LoggingContext(name="main", server_name=homeserver_config.server.server_name): - start(homeserver_config) + try: + start(homeserver_config) + except Exception as e: + handle_startup_exception(e) if __name__ == "__main__": diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index 023a0d877f..9fd65b2718 100644 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -414,10 +414,7 @@ def setup( # Start the tracer init_tracer(hs) # noqa - try: - hs.setup() - except Exception as e: - handle_startup_exception(e) + hs.setup() async def _start_when_reactor_running() -> None: # TODO: Feels like this should be moved somewhere else. @@ -464,7 +461,10 @@ def main() -> None: # check base requirements check_requirements() hs = create_homeserver(homeserver_config) - setup(hs) + try: + setup(hs) + except Exception as e: + handle_startup_exception(e) # redirect stdio to the logs, if configured. if not hs.config.logging.no_redirect_stdio: From f02ac5a4d5ca5297a14a01ff88f450dd8e757b43 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 3 Nov 2025 17:19:20 +0000 Subject: [PATCH 117/149] Bump markdown-it-py from 3.0.0 to 4.0.0 (#19123) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) diff --git a/poetry.lock b/poetry.lock index 4996517afc..72f784110f 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1163,14 +1163,14 @@ test = ["coverage[toml] (>=7.2.5)", "mypy (>=1.2.0)", "pytest (>=7.3.0)", "pytes [[package]] name = "markdown-it-py" -version = "3.0.0" +version = "4.0.0" description = "Python port of markdown-it. Markdown parsing, done right!" optional = false -python-versions = ">=3.8" +python-versions = ">=3.10" groups = ["dev"] files = [ - {file = "markdown-it-py-3.0.0.tar.gz", hash = "sha256:e3f60a94fa066dc52ec76661e37c851cb232d92f9886b15cb560aaada2df8feb"}, - {file = "markdown_it_py-3.0.0-py3-none-any.whl", hash = "sha256:355216845c60bd96232cd8d8c40e8f9765cc86f46880e43a8fd22dc1a1a8cab1"}, + {file = "markdown_it_py-4.0.0-py3-none-any.whl", hash = "sha256:87327c59b172c5011896038353a81343b6754500a08cd7a4973bb48c6d578147"}, + {file = "markdown_it_py-4.0.0.tar.gz", hash = "sha256:cb0a2b4aa34f932c007117b194e945bd74e0ec24133ceb5bac59009cda1cb9f3"}, ] [package.dependencies] @@ -1178,13 +1178,12 @@ mdurl = ">=0.1,<1.0" [package.extras] benchmarking = ["psutil", "pytest", "pytest-benchmark"] -code-style = ["pre-commit (>=3.0,<4.0)"] -compare = ["commonmark (>=0.9,<1.0)", "markdown (>=3.4,<4.0)", "mistletoe (>=1.0,<2.0)", "mistune (>=2.0,<3.0)", "panflute (>=2.3,<3.0)"] +compare = ["commonmark (>=0.9,<1.0)", "markdown (>=3.4,<4.0)", "markdown-it-pyrs", "mistletoe (>=1.0,<2.0)", "mistune (>=3.0,<4.0)", "panflute (>=2.3,<3.0)"] linkify = ["linkify-it-py (>=1,<3)"] -plugins = ["mdit-py-plugins"] +plugins = ["mdit-py-plugins (>=0.5.0)"] profiling = ["gprof2dot"] -rtd = ["jupyter_sphinx", "mdit-py-plugins", "myst-parser", "pyyaml", "sphinx", "sphinx-copybutton", "sphinx-design", "sphinx_book_theme"] -testing = ["coverage", "pytest", "pytest-cov", "pytest-regressions"] +rtd = ["ipykernel", "jupyter_sphinx", "mdit-py-plugins (>=0.5.0)", "myst-parser", "pyyaml", "sphinx", "sphinx-book-theme (>=1.0,<2.0)", "sphinx-copybutton", "sphinx-design"] +testing = ["coverage", "pytest", "pytest-cov", "pytest-regressions", "requests"] [[package]] name = "markupsafe" From a7107458c6c7bc6de7c1056c21196c259c4a2ea4 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Mon, 3 Nov 2025 12:04:43 -0600 Subject: [PATCH 118/149] Refactor app entrypoints (avoid `exit(1)` in our composable functions) (#19121) - Move `register_start` (calls `os._exit(1)`) out of `setup` (our composable function) - We want to avoid `exit(...)` because we use these composable functions in Synapse Pro for small hosts where we have multiple Synapse instances running in the same process. We don't want a problem from one homeserver tenant causing the entire Python process to exit and affect all of the other homeserver tenants. - Continuation of https://github.com/element-hq/synapse/pull/19116 - Align our app entrypoints: `homeserver` (main), `generic_worker` (worker), and `admin_cmd` ### Background As part of Element's plan to support a light form of vhosting (virtual host) (multiple instances of Synapse in the same Python process) (c.f Synapse Pro for small hosts), we're currently diving into the details and implications of running multiple instances of Synapse in the same Python process. "Clean tenant provisioning" tracked internally by https://github.com/element-hq/synapse-small-hosts/issues/48 --- changelog.d/19121.misc | 1 + synapse/app/_base.py | 2 +- synapse/app/admin_cmd.py | 85 +++++++++++++++++++++------- synapse/app/appservice.py | 9 +-- synapse/app/client_reader.py | 9 +-- synapse/app/event_creator.py | 9 +-- synapse/app/federation_reader.py | 9 +-- synapse/app/federation_sender.py | 9 +-- synapse/app/frontend_proxy.py | 9 +-- synapse/app/generic_worker.py | 95 ++++++++++++++++++++++++++++---- synapse/app/homeserver.py | 90 ++++++++++++++++++++---------- synapse/app/media_repository.py | 9 +-- synapse/app/pusher.py | 9 +-- synapse/app/synchrotron.py | 9 +-- synapse/app/user_dir.py | 8 +-- 15 files changed, 231 insertions(+), 131 deletions(-) create mode 100644 changelog.d/19121.misc diff --git a/changelog.d/19121.misc b/changelog.d/19121.misc new file mode 100644 index 0000000000..cb1fb8f024 --- /dev/null +++ b/changelog.d/19121.misc @@ -0,0 +1 @@ +Refactor and align app entrypoints (avoid `exit(1)` in our composable functions). diff --git a/synapse/app/_base.py b/synapse/app/_base.py index c0fcf8ca29..e5f4cfb0e6 100644 --- a/synapse/app/_base.py +++ b/synapse/app/_base.py @@ -602,7 +602,7 @@ def setup_sighup_handling() -> None: _already_setup_sighup_handling = True -async def start(hs: "HomeServer", freeze: bool = True) -> None: +async def start(hs: "HomeServer", *, freeze: bool = True) -> None: """ Start a Synapse server or worker. diff --git a/synapse/app/admin_cmd.py b/synapse/app/admin_cmd.py index b5b1edac0a..dac603de88 100644 --- a/synapse/app/admin_cmd.py +++ b/synapse/app/admin_cmd.py @@ -64,7 +64,7 @@ from synapse.storage.databases.main.state import StateGroupWorkerStore from synapse.storage.databases.main.stream import StreamWorkerStore from synapse.storage.databases.main.tags import TagsWorkerStore from synapse.storage.databases.main.user_erasure_store import UserErasureWorkerStore -from synapse.types import JsonMapping, StateMap +from synapse.types import ISynapseReactor, JsonMapping, StateMap from synapse.util.logcontext import LoggingContext logger = logging.getLogger("synapse.app.admin_cmd") @@ -289,7 +289,21 @@ def load_config(argv_options: list[str]) -> tuple[HomeServerConfig, argparse.Nam return config, args -def start(config: HomeServerConfig, args: argparse.Namespace) -> None: +def create_homeserver( + config: HomeServerConfig, + reactor: Optional[ISynapseReactor] = None, +) -> AdminCmdServer: + """ + Create a homeserver instance for the Synapse admin command process. + + Args: + config: The configuration for the homeserver. + reactor: Optionally provide a reactor to use. Can be useful in different + scenarios that you want control over the reactor, such as tests. + + Returns: + A homeserver instance. + """ if config.worker.worker_app is not None: assert config.worker.worker_app == "synapse.app.admin_cmd" @@ -312,33 +326,62 @@ def start(config: HomeServerConfig, args: argparse.Namespace) -> None: synapse.events.USE_FROZEN_DICTS = config.server.use_frozen_dicts - ss = AdminCmdServer( + admin_command_server = AdminCmdServer( config.server.server_name, config=config, + reactor=reactor, ) - setup_logging(ss, config, use_worker_options=True) + return admin_command_server - ss.setup() - # We use task.react as the basic run command as it correctly handles tearing - # down the reactor when the deferreds resolve and setting the return value. - # We also make sure that `_base.start` gets run before we actually run the - # command. +def setup(admin_command_server: AdminCmdServer) -> None: + """ + Setup a `AdminCmdServer` instance. - async def run() -> None: - with LoggingContext(name="command", server_name=config.server.server_name): - await _base.start(ss) - await args.func(ss, args) - - _base.start_worker_reactor( - "synapse-admin-cmd", - config, - run_command=lambda: task.react(lambda _reactor: defer.ensureDeferred(run())), + Args: + admin_command_server: The homeserver to setup. + """ + setup_logging( + admin_command_server, admin_command_server.config, use_worker_options=True ) + admin_command_server.setup() + + +async def start(admin_command_server: AdminCmdServer, args: argparse.Namespace) -> None: + """ + Should be called once the reactor is running. + + Args: + admin_command_server: The homeserver to setup. + args: Command line arguments. + """ + # This needs a logcontext unlike other entrypoints because we're not using + # `register_start(...)` to run this function. + with LoggingContext(name="start", server_name=admin_command_server.hostname): + # We make sure that `_base.start` gets run before we actually run the command. + await _base.start(admin_command_server) + # Run the command + await args.func(admin_command_server, args) + + +def main() -> None: + homeserver_config, args = load_config(sys.argv[1:]) + with LoggingContext(name="main", server_name=homeserver_config.server.server_name): + admin_command_server = create_homeserver(homeserver_config) + setup(admin_command_server) + + _base.start_worker_reactor( + "synapse-admin-cmd", + admin_command_server.config, + # We use task.react as the basic run command as it correctly handles tearing + # down the reactor when the deferreds resolve and setting the return value. + run_command=lambda: task.react( + lambda _reactor: defer.ensureDeferred(start(admin_command_server, args)) + ), + ) + if __name__ == "__main__": - homeserver_config, args = load_config(sys.argv[1:]) - with LoggingContext(name="main", server_name=homeserver_config.server.server_name): - start(homeserver_config, args) + main() diff --git a/synapse/app/appservice.py b/synapse/app/appservice.py index 823e1908b5..d251d0ab64 100644 --- a/synapse/app/appservice.py +++ b/synapse/app/appservice.py @@ -19,16 +19,11 @@ # # -import sys - -from synapse.app.generic_worker import load_config, start -from synapse.util.logcontext import LoggingContext +from synapse.app.generic_worker import main as worker_main def main() -> None: - homeserver_config = load_config(sys.argv[1:]) - with LoggingContext(name="main", server_name=homeserver_config.server.server_name): - start(homeserver_config) + worker_main() if __name__ == "__main__": diff --git a/synapse/app/client_reader.py b/synapse/app/client_reader.py index 823e1908b5..d251d0ab64 100644 --- a/synapse/app/client_reader.py +++ b/synapse/app/client_reader.py @@ -19,16 +19,11 @@ # # -import sys - -from synapse.app.generic_worker import load_config, start -from synapse.util.logcontext import LoggingContext +from synapse.app.generic_worker import main as worker_main def main() -> None: - homeserver_config = load_config(sys.argv[1:]) - with LoggingContext(name="main", server_name=homeserver_config.server.server_name): - start(homeserver_config) + worker_main() if __name__ == "__main__": diff --git a/synapse/app/event_creator.py b/synapse/app/event_creator.py index 1a9b0ad155..5b18578bd0 100644 --- a/synapse/app/event_creator.py +++ b/synapse/app/event_creator.py @@ -18,16 +18,11 @@ # # -import sys - -from synapse.app.generic_worker import load_config, start -from synapse.util.logcontext import LoggingContext +from synapse.app.generic_worker import main as worker_main def main() -> None: - homeserver_config = load_config(sys.argv[1:]) - with LoggingContext(name="main", server_name=homeserver_config.server.server_name): - start(homeserver_config) + worker_main() if __name__ == "__main__": diff --git a/synapse/app/federation_reader.py b/synapse/app/federation_reader.py index 823e1908b5..d251d0ab64 100644 --- a/synapse/app/federation_reader.py +++ b/synapse/app/federation_reader.py @@ -19,16 +19,11 @@ # # -import sys - -from synapse.app.generic_worker import load_config, start -from synapse.util.logcontext import LoggingContext +from synapse.app.generic_worker import main as worker_main def main() -> None: - homeserver_config = load_config(sys.argv[1:]) - with LoggingContext(name="main", server_name=homeserver_config.server.server_name): - start(homeserver_config) + worker_main() if __name__ == "__main__": diff --git a/synapse/app/federation_sender.py b/synapse/app/federation_sender.py index 823e1908b5..d251d0ab64 100644 --- a/synapse/app/federation_sender.py +++ b/synapse/app/federation_sender.py @@ -19,16 +19,11 @@ # # -import sys - -from synapse.app.generic_worker import load_config, start -from synapse.util.logcontext import LoggingContext +from synapse.app.generic_worker import main as worker_main def main() -> None: - homeserver_config = load_config(sys.argv[1:]) - with LoggingContext(name="main", server_name=homeserver_config.server.server_name): - start(homeserver_config) + worker_main() if __name__ == "__main__": diff --git a/synapse/app/frontend_proxy.py b/synapse/app/frontend_proxy.py index 823e1908b5..d251d0ab64 100644 --- a/synapse/app/frontend_proxy.py +++ b/synapse/app/frontend_proxy.py @@ -19,16 +19,11 @@ # # -import sys - -from synapse.app.generic_worker import load_config, start -from synapse.util.logcontext import LoggingContext +from synapse.app.generic_worker import main as worker_main def main() -> None: - homeserver_config = load_config(sys.argv[1:]) - with LoggingContext(name="main", server_name=homeserver_config.server.server_name): - start(homeserver_config) + worker_main() if __name__ == "__main__": diff --git a/synapse/app/generic_worker.py b/synapse/app/generic_worker.py index 1a7bedaac5..a1dde368d4 100644 --- a/synapse/app/generic_worker.py +++ b/synapse/app/generic_worker.py @@ -21,6 +21,7 @@ # import logging import sys +from typing import Optional from twisted.web.resource import Resource @@ -111,6 +112,7 @@ from synapse.storage.databases.main.transactions import TransactionWorkerStore from synapse.storage.databases.main.ui_auth import UIAuthWorkerStore from synapse.storage.databases.main.user_directory import UserDirectoryStore from synapse.storage.databases.main.user_erasure_store import UserErasureWorkerStore +from synapse.types import ISynapseReactor from synapse.util.httpresourcetree import create_resource_tree logger = logging.getLogger("synapse.app.generic_worker") @@ -332,7 +334,30 @@ def load_config(argv_options: list[str]) -> HomeServerConfig: return config -def start(config: HomeServerConfig) -> None: +def create_homeserver( + config: HomeServerConfig, + reactor: Optional[ISynapseReactor] = None, +) -> GenericWorkerServer: + """ + Create a homeserver instance for the Synapse worker process. + + Our composable functions (`create_homeserver`, `setup`, `start`) should not exit the + Python process (call `exit(...)`) and instead raise exceptions which can be handled + by the caller as desired. This doesn't matter for the normal case of one Synapse + instance running in the Python process (as we're only affecting ourselves), but is + important when we have multiple Synapse homeserver tenants running in the same + Python process (c.f. Synapse Pro for small hosts) as we don't want some problem from + one tenant stopping the rest of the tenants. + + Args: + config: The configuration for the homeserver. + reactor: Optionally provide a reactor to use. Can be useful in different + scenarios that you want control over the reactor, such as tests. + + Returns: + A homeserver instance. + """ + # For backwards compatibility let any of the old app names. assert config.worker.worker_app in ( "synapse.app.appservice", @@ -357,9 +382,29 @@ def start(config: HomeServerConfig) -> None: hs = GenericWorkerServer( config.server.server_name, config=config, + reactor=reactor, ) - setup_logging(hs, config, use_worker_options=True) + return hs + + +def setup(hs: GenericWorkerServer) -> None: + """ + Setup a `GenericWorkerServer` (worker) instance. + + Our composable functions (`create_homeserver`, `setup`, `start`) should not exit the + Python process (call `exit(...)`) and instead raise exceptions which can be handled + by the caller as desired. This doesn't matter for the normal case of one Synapse + instance running in the Python process (as we're only affecting ourselves), but is + important when we have multiple Synapse homeserver tenants running in the same + Python process (c.f. Synapse Pro for small hosts) as we don't want some problem from + one tenant stopping the rest of the tenants. + + Args: + hs: The homeserver to setup. + """ + + setup_logging(hs, hs.config, use_worker_options=True) # Start the tracer init_tracer(hs) # noqa @@ -370,26 +415,56 @@ def start(config: HomeServerConfig) -> None: # streams. Will no-op if no streams can be written to by this worker. hs.get_replication_streamer() - async def start() -> None: - await _base.start(hs) - register_start(hs, start) +async def start( + hs: GenericWorkerServer, + *, + freeze: bool = True, +) -> None: + """ + Should be called once the reactor is running. - # redirect stdio to the logs, if configured. - if not hs.config.logging.no_redirect_stdio: - redirect_stdio_to_logs() + Our composable functions (`create_homeserver`, `setup`, `start`) should not exit the + Python process (call `exit(...)`) and instead raise exceptions which can be handled + by the caller as desired. This doesn't matter for the normal case of one Synapse + instance running in the Python process (as we're only affecting ourselves), but is + important when we have multiple Synapse homeserver tenants running in the same + Python process (c.f. Synapse Pro for small hosts) as we don't want some problem from + one tenant stopping the rest of the tenants. - _base.start_worker_reactor("synapse-generic-worker", config) + Args: + hs: The homeserver to setup. + freeze: whether to freeze the homeserver base objects in the garbage collector. + May improve garbage collection performance by marking objects with an effectively + static lifetime as frozen so they don't need to be considered for cleanup. + If you ever want to `shutdown` the homeserver, this needs to be + False otherwise the homeserver cannot be garbage collected after `shutdown`. + """ + + await _base.start(hs, freeze=freeze) def main() -> None: homeserver_config = load_config(sys.argv[1:]) + + # Create a logging context as soon as possible so we can start associating + # everything with this homeserver. with LoggingContext(name="main", server_name=homeserver_config.server.server_name): + # redirect stdio to the logs, if configured. + if not homeserver_config.logging.no_redirect_stdio: + redirect_stdio_to_logs() + + hs = create_homeserver(homeserver_config) try: - start(homeserver_config) + setup(hs) except Exception as e: handle_startup_exception(e) + # Register a callback to be invoked once the reactor is running + register_start(hs, start, hs) + + _base.start_worker_reactor("synapse-generic-worker", homeserver_config) + if __name__ == "__main__": main() diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index 9fd65b2718..3807a18ab7 100644 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -71,7 +71,6 @@ from synapse.rest.well_known import well_known_resource from synapse.server import HomeServer from synapse.storage import DataStore from synapse.types import ISynapseReactor -from synapse.util.check_dependencies import check_requirements from synapse.util.httpresourcetree import create_resource_tree from synapse.util.module_loader import load_module @@ -356,6 +355,14 @@ def create_homeserver( """ Create a homeserver instance for the Synapse main process. + Our composable functions (`create_homeserver`, `setup`, `start`) should not exit the + Python process (call `exit(...)`) and instead raise exceptions which can be handled + by the caller as desired. This doesn't matter for the normal case of one Synapse + instance running in the Python process (as we're only affecting ourselves), but is + important when we have multiple Synapse homeserver tenants running in the same + Python process (c.f. Synapse Pro for small hosts) as we don't want some problem from + one tenant stopping the rest of the tenants. + Args: config: The configuration for the homeserver. reactor: Optionally provide a reactor to use. Can be useful in different @@ -388,22 +395,20 @@ def create_homeserver( def setup( hs: SynapseHomeServer, - *, - freeze: bool = True, ) -> None: """ - Setup a Synapse homeserver instance given a configuration. + Setup a `SynapseHomeServer` (main) instance. + + Our composable functions (`create_homeserver`, `setup`, `start`) should not exit the + Python process (call `exit(...)`) and instead raise exceptions which can be handled + by the caller as desired. This doesn't matter for the normal case of one Synapse + instance running in the Python process (as we're only affecting ourselves), but is + important when we have multiple Synapse homeserver tenants running in the same + Python process (c.f. Synapse Pro for small hosts) as we don't want some problem from + one tenant stopping the rest of the tenants. Args: hs: The homeserver to setup. - freeze: whether to freeze the homeserver base objects in the garbage collector. - May improve garbage collection performance by marking objects with an effectively - static lifetime as frozen so they don't need to be considered for cleanup. - If you ever want to `shutdown` the homeserver, this needs to be - False otherwise the homeserver cannot be garbage collected after `shutdown`. - - Returns: - A homeserver instance. """ setup_logging(hs, hs.config, use_worker_options=False) @@ -416,22 +421,44 @@ def setup( hs.setup() - async def _start_when_reactor_running() -> None: - # TODO: Feels like this should be moved somewhere else. - # - # Load the OIDC provider metadatas, if OIDC is enabled. - if hs.config.oidc.oidc_enabled: - oidc = hs.get_oidc_handler() - # Loading the provider metadata also ensures the provider config is valid. - await oidc.load_metadata() - await _base.start(hs, freeze) +async def start( + hs: SynapseHomeServer, + *, + freeze: bool = True, +) -> None: + """ + Should be called once the reactor is running. - # TODO: Feels like this should be moved somewhere else. - hs.get_datastores().main.db_pool.updates.start_doing_background_updates() + Our composable functions (`create_homeserver`, `setup`, `start`) should not exit the + Python process (call `exit(...)`) and instead raise exceptions which can be handled + by the caller as desired. This doesn't matter for the normal case of one Synapse + instance running in the Python process (as we're only affecting ourselves), but is + important when we have multiple Synapse homeserver tenants running in the same + Python process (c.f. Synapse Pro for small hosts) as we don't want some problem from + one tenant stopping the rest of the tenants. - # Register a callback to be invoked once the reactor is running - register_start(hs, _start_when_reactor_running) + Args: + hs: The homeserver to setup. + freeze: whether to freeze the homeserver base objects in the garbage collector. + May improve garbage collection performance by marking objects with an effectively + static lifetime as frozen so they don't need to be considered for cleanup. + If you ever want to `shutdown` the homeserver, this needs to be + False otherwise the homeserver cannot be garbage collected after `shutdown`. + """ + + # TODO: Feels like this should be moved somewhere else. + # + # Load the OIDC provider metadatas, if OIDC is enabled. + if hs.config.oidc.oidc_enabled: + oidc = hs.get_oidc_handler() + # Loading the provider metadata also ensures the provider config is valid. + await oidc.load_metadata() + + await _base.start(hs, freeze=freeze) + + # TODO: Feels like this should be moved somewhere else. + hs.get_datastores().main.db_pool.updates.start_doing_background_updates() def start_reactor( @@ -457,18 +484,21 @@ def start_reactor( def main() -> None: homeserver_config = load_or_generate_config(sys.argv[1:]) + # Create a logging context as soon as possible so we can start associating + # everything with this homeserver. with LoggingContext(name="main", server_name=homeserver_config.server.server_name): - # check base requirements - check_requirements() + # redirect stdio to the logs, if configured. + if not homeserver_config.logging.no_redirect_stdio: + redirect_stdio_to_logs() + hs = create_homeserver(homeserver_config) try: setup(hs) except Exception as e: handle_startup_exception(e) - # redirect stdio to the logs, if configured. - if not hs.config.logging.no_redirect_stdio: - redirect_stdio_to_logs() + # Register a callback to be invoked once the reactor is running + register_start(hs, start, hs) start_reactor(homeserver_config) diff --git a/synapse/app/media_repository.py b/synapse/app/media_repository.py index 823e1908b5..d251d0ab64 100644 --- a/synapse/app/media_repository.py +++ b/synapse/app/media_repository.py @@ -19,16 +19,11 @@ # # -import sys - -from synapse.app.generic_worker import load_config, start -from synapse.util.logcontext import LoggingContext +from synapse.app.generic_worker import main as worker_main def main() -> None: - homeserver_config = load_config(sys.argv[1:]) - with LoggingContext(name="main", server_name=homeserver_config.server.server_name): - start(homeserver_config) + worker_main() if __name__ == "__main__": diff --git a/synapse/app/pusher.py b/synapse/app/pusher.py index 823e1908b5..d251d0ab64 100644 --- a/synapse/app/pusher.py +++ b/synapse/app/pusher.py @@ -19,16 +19,11 @@ # # -import sys - -from synapse.app.generic_worker import load_config, start -from synapse.util.logcontext import LoggingContext +from synapse.app.generic_worker import main as worker_main def main() -> None: - homeserver_config = load_config(sys.argv[1:]) - with LoggingContext(name="main", server_name=homeserver_config.server.server_name): - start(homeserver_config) + worker_main() if __name__ == "__main__": diff --git a/synapse/app/synchrotron.py b/synapse/app/synchrotron.py index 823e1908b5..d251d0ab64 100644 --- a/synapse/app/synchrotron.py +++ b/synapse/app/synchrotron.py @@ -19,16 +19,11 @@ # # -import sys - -from synapse.app.generic_worker import load_config, start -from synapse.util.logcontext import LoggingContext +from synapse.app.generic_worker import main as worker_main def main() -> None: - homeserver_config = load_config(sys.argv[1:]) - with LoggingContext(name="main", server_name=homeserver_config.server.server_name): - start(homeserver_config) + worker_main() if __name__ == "__main__": diff --git a/synapse/app/user_dir.py b/synapse/app/user_dir.py index f64d82e41f..2c47d9f4fd 100644 --- a/synapse/app/user_dir.py +++ b/synapse/app/user_dir.py @@ -19,16 +19,12 @@ # # -import sys -from synapse.app.generic_worker import load_config, start -from synapse.util.logcontext import LoggingContext +from synapse.app.generic_worker import main as worker_main def main() -> None: - homeserver_config = load_config(sys.argv[1:]) - with LoggingContext(name="main", server_name=homeserver_config.server.server_name): - start(homeserver_config) + worker_main() if __name__ == "__main__": From 2c5deb800ec2dba7fa1e8559811b698a72201807 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 3 Nov 2025 19:18:16 +0000 Subject: [PATCH 119/149] Bump icu_segmenter from 2.0.0 to 2.0.1 (#19126) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index a057c812af..3ff8f2c477 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -589,9 +589,9 @@ dependencies = [ [[package]] name = "icu_segmenter" -version = "2.0.0" +version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e185fc13b6401c138cf40db12b863b35f5edf31b88192a545857b41aeaf7d3d3" +checksum = "38e30e593cf9c3ca2f51aa312eb347cd1ba95715e91a842ec3fc9058eab2af4b" dependencies = [ "core_maths", "displaydoc", From 4f9dc3b6134f2073111e29d105ff1b4736b88ad8 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 3 Nov 2025 19:19:06 +0000 Subject: [PATCH 120/149] Bump psycopg2 from 2.9.10 to 2.9.11 (#19125) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 21 +++++++++------------ 1 file changed, 9 insertions(+), 12 deletions(-) diff --git a/poetry.lock b/poetry.lock index 72f784110f..1de977f15c 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1725,23 +1725,20 @@ twisted = ["twisted"] [[package]] name = "psycopg2" -version = "2.9.10" +version = "2.9.11" description = "psycopg2 - Python-PostgreSQL Database Adapter" optional = true -python-versions = ">=3.8" +python-versions = ">=3.9" groups = ["main"] markers = "extra == \"all\" or extra == \"postgres\"" files = [ - {file = "psycopg2-2.9.10-cp310-cp310-win32.whl", hash = "sha256:5df2b672140f95adb453af93a7d669d7a7bf0a56bcd26f1502329166f4a61716"}, - {file = "psycopg2-2.9.10-cp310-cp310-win_amd64.whl", hash = "sha256:c6f7b8561225f9e711a9c47087388a97fdc948211c10a4bccbf0ba68ab7b3b5a"}, - {file = "psycopg2-2.9.10-cp311-cp311-win32.whl", hash = "sha256:47c4f9875125344f4c2b870e41b6aad585901318068acd01de93f3677a6522c2"}, - {file = "psycopg2-2.9.10-cp311-cp311-win_amd64.whl", hash = "sha256:0435034157049f6846e95103bd8f5a668788dd913a7c30162ca9503fdf542cb4"}, - {file = "psycopg2-2.9.10-cp312-cp312-win32.whl", hash = "sha256:65a63d7ab0e067e2cdb3cf266de39663203d38d6a8ed97f5ca0cb315c73fe067"}, - {file = "psycopg2-2.9.10-cp312-cp312-win_amd64.whl", hash = "sha256:4a579d6243da40a7b3182e0430493dbd55950c493d8c68f4eec0b302f6bbf20e"}, - {file = "psycopg2-2.9.10-cp313-cp313-win_amd64.whl", hash = "sha256:91fd603a2155da8d0cfcdbf8ab24a2d54bca72795b90d2a3ed2b6da8d979dee2"}, - {file = "psycopg2-2.9.10-cp39-cp39-win32.whl", hash = "sha256:9d5b3b94b79a844a986d029eee38998232451119ad653aea42bb9220a8c5066b"}, - {file = "psycopg2-2.9.10-cp39-cp39-win_amd64.whl", hash = "sha256:88138c8dedcbfa96408023ea2b0c369eda40fe5d75002c0964c78f46f11fa442"}, - {file = "psycopg2-2.9.10.tar.gz", hash = "sha256:12ec0b40b0273f95296233e8750441339298e6a572f7039da5b260e3c8b60e11"}, + {file = "psycopg2-2.9.11-cp310-cp310-win_amd64.whl", hash = "sha256:103e857f46bb76908768ead4e2d0ba1d1a130e7b8ed77d3ae91e8b33481813e8"}, + {file = "psycopg2-2.9.11-cp311-cp311-win_amd64.whl", hash = "sha256:210daed32e18f35e3140a1ebe059ac29209dd96468f2f7559aa59f75ee82a5cb"}, + {file = "psycopg2-2.9.11-cp312-cp312-win_amd64.whl", hash = "sha256:e03e4a6dbe87ff81540b434f2e5dc2bddad10296db5eea7bdc995bf5f4162938"}, + {file = "psycopg2-2.9.11-cp313-cp313-win_amd64.whl", hash = "sha256:8dc379166b5b7d5ea66dcebf433011dfc51a7bb8a5fc12367fa05668e5fc53c8"}, + {file = "psycopg2-2.9.11-cp314-cp314-win_amd64.whl", hash = "sha256:f10a48acba5fe6e312b891f290b4d2ca595fc9a06850fe53320beac353575578"}, + {file = "psycopg2-2.9.11-cp39-cp39-win_amd64.whl", hash = "sha256:6ecddcf573777536bddfefaea8079ce959287798c8f5804bee6933635d538924"}, + {file = "psycopg2-2.9.11.tar.gz", hash = "sha256:964d31caf728e217c697ff77ea69c2ba0865fa41ec20bb00f0977e62fdcc52e3"}, ] [[package]] From e02a6f5e5d354e0862b3e018c0ef69007647f8d3 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Mon, 3 Nov 2025 14:07:10 -0600 Subject: [PATCH 121/149] Fix lost logcontext on `HomeServer.shutdown()` (#19108) Same fix as https://github.com/element-hq/synapse/pull/19090 Spawning from working on clean tenant deprovisioning in the Synapse Pro for small hosts project (https://github.com/element-hq/synapse-small-hosts/pull/204). --- changelog.d/19108.bugfix | 1 + synapse/server.py | 4 +++- tests/app/test_homeserver_shutdown.py | 23 ++++++++++++++++++----- 3 files changed, 22 insertions(+), 6 deletions(-) create mode 100644 changelog.d/19108.bugfix diff --git a/changelog.d/19108.bugfix b/changelog.d/19108.bugfix new file mode 100644 index 0000000000..a2afe19f41 --- /dev/null +++ b/changelog.d/19108.bugfix @@ -0,0 +1 @@ +Fix lost logcontext when using `HomeServer.shutdown()`. diff --git a/synapse/server.py b/synapse/server.py index 2c252ce86f..766515c930 100644 --- a/synapse/server.py +++ b/synapse/server.py @@ -143,6 +143,7 @@ from synapse.http.client import ( SimpleHttpClient, ) from synapse.http.matrixfederationclient import MatrixFederationHttpClient +from synapse.logging.context import PreserveLoggingContext from synapse.media.media_repository import MediaRepository from synapse.metrics import ( all_later_gauges_to_clean_up_on_shutdown, @@ -507,7 +508,8 @@ class HomeServer(metaclass=abc.ABCMeta): for background_process in list(self._background_processes): try: - background_process.cancel() + with PreserveLoggingContext(): + background_process.cancel() except Exception: pass self._background_processes.clear() diff --git a/tests/app/test_homeserver_shutdown.py b/tests/app/test_homeserver_shutdown.py index d8119ba310..f127e5571d 100644 --- a/tests/app/test_homeserver_shutdown.py +++ b/tests/app/test_homeserver_shutdown.py @@ -22,6 +22,7 @@ import gc import weakref from synapse.app.homeserver import SynapseHomeServer +from synapse.logging.context import LoggingContext from synapse.storage.background_updates import UpdaterStatus from tests.server import ( @@ -29,7 +30,7 @@ from tests.server import ( get_clock, setup_test_homeserver, ) -from tests.unittest import HomeserverTestCase +from tests.unittest import HomeserverTestCase, logcontext_clean class HomeserverCleanShutdownTestCase(HomeserverTestCase): @@ -44,6 +45,7 @@ class HomeserverCleanShutdownTestCase(HomeserverTestCase): # closed in a timely manner during shutdown. Simulating this behaviour in a unit test # won't be as good as a proper integration test in complement. + @logcontext_clean def test_clean_homeserver_shutdown(self) -> None: """Ensure the `SynapseHomeServer` can be fully shutdown and garbage collected""" self.reactor, self.clock = get_clock() @@ -63,8 +65,13 @@ class HomeserverCleanShutdownTestCase(HomeserverTestCase): # we use in tests doesn't handle this properly (see doc comment) cleanup_test_reactor_system_event_triggers(self.reactor) - # Cleanup the homeserver. - self.get_success(self.hs.shutdown()) + async def shutdown() -> None: + # Use a logcontext just to double-check that we don't mangle the logcontext + # during shutdown. + with LoggingContext(name="hs_shutdown", server_name=self.hs.hostname): + await self.hs.shutdown() + + self.get_success(shutdown()) # Cleanup the internal reference in our test case del self.hs @@ -114,6 +121,7 @@ class HomeserverCleanShutdownTestCase(HomeserverTestCase): # # to generate the result. # objgraph.show_backrefs(synapse_hs, max_depth=10, too_many=10) + @logcontext_clean def test_clean_homeserver_shutdown_mid_background_updates(self) -> None: """Ensure the `SynapseHomeServer` can be fully shutdown and garbage collected before background updates have completed""" @@ -141,8 +149,13 @@ class HomeserverCleanShutdownTestCase(HomeserverTestCase): # Ensure the background updates are not complete. self.assertNotEqual(store.db_pool.updates.get_status(), UpdaterStatus.COMPLETE) - # Cleanup the homeserver. - self.get_success(self.hs.shutdown()) + async def shutdown() -> None: + # Use a logcontext just to double-check that we don't mangle the logcontext + # during shutdown. + with LoggingContext(name="hs_shutdown", server_name=self.hs.hostname): + await self.hs.shutdown() + + self.get_success(shutdown()) # Cleanup the internal reference in our test case del self.hs From 891acfd502b5abd60fffdc161d332244e2e1462d Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Mon, 3 Nov 2025 15:23:22 -0600 Subject: [PATCH 122/149] Move `oidc.load_metadata()` startup into `_base.start()` (#19056) Slightly related to ["clean-tenant provisioning"](https://github.com/element-hq/synapse-small-hosts/issues/221) as making startup cleaner, makes it more clear how to handle clean provisioning. --- changelog.d/19056.misc | 1 + synapse/app/_base.py | 10 ++++++++++ synapse/app/homeserver.py | 8 -------- 3 files changed, 11 insertions(+), 8 deletions(-) create mode 100644 changelog.d/19056.misc diff --git a/changelog.d/19056.misc b/changelog.d/19056.misc new file mode 100644 index 0000000000..f3a1b4e66e --- /dev/null +++ b/changelog.d/19056.misc @@ -0,0 +1 @@ +Move `oidc.load_metadata()` startup into `_base.start()`. diff --git a/synapse/app/_base.py b/synapse/app/_base.py index e5f4cfb0e6..2de5bdb51e 100644 --- a/synapse/app/_base.py +++ b/synapse/app/_base.py @@ -648,6 +648,16 @@ async def start(hs: "HomeServer", *, freeze: bool = True) -> None: # Apply the cache config. hs.config.caches.resize_all_caches() + # Load the OIDC provider metadatas, if OIDC is enabled. + if hs.config.oidc.oidc_enabled: + oidc = hs.get_oidc_handler() + # Loading the provider metadata also ensures the provider config is valid. + # + # FIXME: It feels a bit strange to validate and block on startup as one of these + # OIDC providers could be temporarily unavailable and cause Synapse to be unable + # to start. + await oidc.load_metadata() + # Load the certificate from disk. refresh_certificate(hs) diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index 3807a18ab7..fb937c63c1 100644 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -447,14 +447,6 @@ async def start( False otherwise the homeserver cannot be garbage collected after `shutdown`. """ - # TODO: Feels like this should be moved somewhere else. - # - # Load the OIDC provider metadatas, if OIDC is enabled. - if hs.config.oidc.oidc_enabled: - oidc = hs.get_oidc_handler() - # Loading the provider metadata also ensures the provider config is valid. - await oidc.load_metadata() - await _base.start(hs, freeze=freeze) # TODO: Feels like this should be moved somewhere else. From db00925ae77f134872b5e7cb26a1cacb31281334 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Mon, 3 Nov 2025 16:16:23 -0600 Subject: [PATCH 123/149] Redirect `stdout`/`stderr` to logs after initialization (#19131) This regressed in https://github.com/element-hq/synapse/pull/19121. I moved things in https://github.com/element-hq/synapse/pull/19121 because I thought that it made sense to redirect anything printed to `stdout`/`stderr` to the logs as early as possible. But we actually want to log any immediately apparent problems during initialization to `stderr` in the terminal so that they are obvious and visible to the operator. Now, I've moved `redirect_stdio_to_logs()` back to where it was previously along with some proper comment context for why we have it there. --- changelog.d/19131.misc | 1 + synapse/app/admin_cmd.py | 1 + synapse/app/generic_worker.py | 13 +++++++++---- synapse/app/homeserver.py | 13 +++++++++---- 4 files changed, 20 insertions(+), 8 deletions(-) create mode 100644 changelog.d/19131.misc diff --git a/changelog.d/19131.misc b/changelog.d/19131.misc new file mode 100644 index 0000000000..cb1fb8f024 --- /dev/null +++ b/changelog.d/19131.misc @@ -0,0 +1 @@ +Refactor and align app entrypoints (avoid `exit(1)` in our composable functions). diff --git a/synapse/app/admin_cmd.py b/synapse/app/admin_cmd.py index dac603de88..193482b7fc 100644 --- a/synapse/app/admin_cmd.py +++ b/synapse/app/admin_cmd.py @@ -369,6 +369,7 @@ async def start(admin_command_server: AdminCmdServer, args: argparse.Namespace) def main() -> None: homeserver_config, args = load_config(sys.argv[1:]) with LoggingContext(name="main", server_name=homeserver_config.server.server_name): + # Initialize and setup the homeserver admin_command_server = create_homeserver(homeserver_config) setup(admin_command_server) diff --git a/synapse/app/generic_worker.py b/synapse/app/generic_worker.py index a1dde368d4..0a4abd1839 100644 --- a/synapse/app/generic_worker.py +++ b/synapse/app/generic_worker.py @@ -450,16 +450,21 @@ def main() -> None: # Create a logging context as soon as possible so we can start associating # everything with this homeserver. with LoggingContext(name="main", server_name=homeserver_config.server.server_name): - # redirect stdio to the logs, if configured. - if not homeserver_config.logging.no_redirect_stdio: - redirect_stdio_to_logs() - + # Initialize and setup the homeserver hs = create_homeserver(homeserver_config) try: setup(hs) except Exception as e: handle_startup_exception(e) + # For problems immediately apparent during initialization, we want to log to + # stderr in the terminal so that they are obvious and visible to the operator. + # + # Now that we're past the initialization stage, we can redirect anything printed + # to stdio to the logs, if configured. + if not homeserver_config.logging.no_redirect_stdio: + redirect_stdio_to_logs() + # Register a callback to be invoked once the reactor is running register_start(hs, start, hs) diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index fb937c63c1..bd51aad9ab 100644 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -479,16 +479,21 @@ def main() -> None: # Create a logging context as soon as possible so we can start associating # everything with this homeserver. with LoggingContext(name="main", server_name=homeserver_config.server.server_name): - # redirect stdio to the logs, if configured. - if not homeserver_config.logging.no_redirect_stdio: - redirect_stdio_to_logs() - + # Initialize and setup the homeserver hs = create_homeserver(homeserver_config) try: setup(hs) except Exception as e: handle_startup_exception(e) + # For problems immediately apparent during initialization, we want to log to + # stderr in the terminal so that they are obvious and visible to the operator. + # + # Now that we're past the initialization stage, we can redirect anything printed + # to stdio to the logs, if configured. + if not homeserver_config.logging.no_redirect_stdio: + redirect_stdio_to_logs() + # Register a callback to be invoked once the reactor is running register_start(hs, start, hs) From 08f570f5f5668a9f4f3fad4669da3a34e9704566 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Tue, 4 Nov 2025 13:32:49 +0100 Subject: [PATCH 124/149] Fix "There is no current event loop in thread" error in tests (#19134) --- changelog.d/19134.bugfix | 1 + tests/events/test_auto_accept_invites.py | 19 +++++-------------- 2 files changed, 6 insertions(+), 14 deletions(-) create mode 100644 changelog.d/19134.bugfix diff --git a/changelog.d/19134.bugfix b/changelog.d/19134.bugfix new file mode 100644 index 0000000000..61e626cc9b --- /dev/null +++ b/changelog.d/19134.bugfix @@ -0,0 +1 @@ +Add support for Python 3.14. \ No newline at end of file diff --git a/tests/events/test_auto_accept_invites.py b/tests/events/test_auto_accept_invites.py index d3842e72d7..623ec67ed6 100644 --- a/tests/events/test_auto_accept_invites.py +++ b/tests/events/test_auto_accept_invites.py @@ -19,9 +19,8 @@ # # import asyncio -from asyncio import Future from http import HTTPStatus -from typing import Any, Awaitable, Optional, TypeVar, cast +from typing import Any, Optional, TypeVar, cast from unittest.mock import Mock import attr @@ -787,20 +786,12 @@ TV = TypeVar("TV") async def make_awaitable(value: T) -> T: + """ + Makes a fresh awaitable, suitable for mocking an `async` function. + """ return value -def make_multiple_awaitable(result: TV) -> Awaitable[TV]: - """ - Makes an awaitable, suitable for mocking an `async` function. - This uses Futures as they can be awaited multiple times so can be returned - to multiple callers. - """ - future: Future[TV] = Future() - future.set_result(result) - return future - - def create_module( config_override: Optional[dict[str, Any]] = None, worker_name: Optional[str] = None ) -> InviteAutoAccepter: @@ -809,7 +800,7 @@ def create_module( module_api = Mock(spec=ModuleApi) module_api.is_mine.side_effect = lambda a: a.split(":")[1] == "test" module_api.worker_name = worker_name - module_api.sleep.return_value = make_multiple_awaitable(None) + module_api.sleep.return_value = lambda *_args, **_kwargs: make_awaitable(None) module_api.get_userinfo_by_id.return_value = UserInfo( user_id=UserID.from_string("@user:test"), is_admin=False, From 5408101d21a08c42359737643a6cdab5021c1eb4 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 4 Nov 2025 12:44:57 +0000 Subject: [PATCH 125/149] Speed up pruning of ratelimiter (#19129) I noticed this in some profiling. Basically, we prune the ratelimiters by copying and iterating over every entry every 60 seconds. Instead, let's use a wheel timer to track when we should potentially prune a given key, and then we a) check fewer keys, and b) can run more frequently. Hopefully this should mean we don't have a large pause everytime we prune a ratelimiter with lots of keys. Also fixes a bug where we didn't prune entries that were added via `record_action` and never subsequently updated. This affected the media and joins-per-room ratelimiter. --- changelog.d/19129.misc | 1 + synapse/api/ratelimiting.py | 71 ++++++++++++++++++---- tests/api/test_ratelimiting.py | 15 +++++ tests/federation/test_federation_server.py | 4 +- tests/handlers/test_room_member.py | 8 +-- 5 files changed, 80 insertions(+), 19 deletions(-) create mode 100644 changelog.d/19129.misc diff --git a/changelog.d/19129.misc b/changelog.d/19129.misc new file mode 100644 index 0000000000..117dbfadea --- /dev/null +++ b/changelog.d/19129.misc @@ -0,0 +1 @@ +Speed up pruning of ratelimiters. diff --git a/synapse/api/ratelimiting.py b/synapse/api/ratelimiting.py index 1a43bdff23..ee0e9181ce 100644 --- a/synapse/api/ratelimiting.py +++ b/synapse/api/ratelimiting.py @@ -27,6 +27,7 @@ from synapse.config.ratelimiting import RatelimitSettings from synapse.storage.databases.main import DataStore from synapse.types import Requester from synapse.util.clock import Clock +from synapse.util.wheel_timer import WheelTimer if TYPE_CHECKING: # To avoid circular imports: @@ -92,9 +93,14 @@ class Ratelimiter: # * The number of tokens currently in the bucket, # * The time point when the bucket was last completely empty, and # * The rate_hz (leak rate) of this particular bucket. - self.actions: dict[Hashable, tuple[float, float, float]] = {} + self.actions: dict[Hashable, tuple[int, float, float]] = {} - self.clock.looping_call(self._prune_message_counts, 60 * 1000) + # Records when actions should potentially be pruned. Note that we don't + # need to be accurate here, as this is just a cleanup job of `actions` + # and doesn't affect correctness. + self._timer: WheelTimer[Hashable] = WheelTimer() + + self.clock.looping_call(self._prune_message_counts, 15 * 1000) def _get_key( self, requester: Optional[Requester], key: Optional[Hashable] @@ -109,9 +115,9 @@ class Ratelimiter: def _get_action_counts( self, key: Hashable, time_now_s: float - ) -> tuple[float, float, float]: + ) -> tuple[int, float, float]: """Retrieve the action counts, with a fallback representing an empty bucket.""" - return self.actions.get(key, (0.0, time_now_s, 0.0)) + return self.actions.get(key, (0, time_now_s, self.rate_hz)) async def can_do_action( self, @@ -217,8 +223,11 @@ class Ratelimiter: allowed = True action_count = action_count + n_actions - if update: - self.actions[key] = (action_count, time_start, rate_hz) + # Only record the action if we're allowed to perform it. + if allowed and update: + self._record_action_inner( + key, action_count, time_start, rate_hz, time_now_s + ) if rate_hz > 0: # Find out when the count of existing actions expires @@ -264,7 +273,37 @@ class Ratelimiter: key = self._get_key(requester, key) time_now_s = _time_now_s if _time_now_s is not None else self.clock.time() action_count, time_start, rate_hz = self._get_action_counts(key, time_now_s) - self.actions[key] = (action_count + n_actions, time_start, rate_hz) + self._record_action_inner( + key, action_count + n_actions, time_start, rate_hz, time_now_s + ) + + def _record_action_inner( + self, + key: Hashable, + action_count: int, + time_start: float, + rate_hz: float, + time_now_s: float, + ) -> None: + """Helper to atomically update the action count for a given key.""" + prune_time_s = time_start + action_count / rate_hz + + # If the prune time is in the past, we can just remove the entry rather + # than inserting and immediately pruning. + if prune_time_s <= time_now_s: + self.actions.pop(key, None) + return + + self.actions[key] = (action_count, time_start, rate_hz) + + # We need to make sure that we only call prune *after* the entry + # expires, otherwise the scheduled prune may not actually prune it. This + # is just a cleanup job, so it doesn't matter if entries aren't pruned + # immediately after they expire. Hence we schedule the prune a little + # after the entry is due to expire. + prune_time_s += 0.1 + + self._timer.insert(int(time_now_s * 1000), key, int(prune_time_s * 1000)) def _prune_message_counts(self) -> None: """Remove message count entries that have not exceeded their defined @@ -272,18 +311,24 @@ class Ratelimiter: """ time_now_s = self.clock.time() - # We create a copy of the key list here as the dictionary is modified during - # the loop - for key in list(self.actions.keys()): - action_count, time_start, rate_hz = self.actions[key] + # Pull out all the keys that *might* need pruning. We still need to + # verify they haven't since been updated. + to_prune = self._timer.fetch(int(time_now_s * 1000)) + + for key in to_prune: + value = self.actions.get(key) + if value is None: + continue + + action_count, time_start, rate_hz = value # Rate limit = "seconds since we started limiting this action" * rate_hz # If this limit has not been exceeded, wipe our record of this action time_delta = time_now_s - time_start if action_count - time_delta * rate_hz > 0: continue - else: - del self.actions[key] + + del self.actions[key] async def ratelimit( self, diff --git a/tests/api/test_ratelimiting.py b/tests/api/test_ratelimiting.py index 2e45d4e4d2..34369a8746 100644 --- a/tests/api/test_ratelimiting.py +++ b/tests/api/test_ratelimiting.py @@ -228,6 +228,21 @@ class TestRatelimiter(unittest.HomeserverTestCase): self.assertNotIn("test_id_1", limiter.actions) + def test_pruning_record_action(self) -> None: + """Test that entries added by record_action also get pruned.""" + limiter = Ratelimiter( + store=self.hs.get_datastores().main, + clock=self.clock, + cfg=RatelimitSettings(key="", per_second=0.1, burst_count=1), + ) + limiter.record_action(None, key="test_id_1", n_actions=1, _time_now_s=0) + + self.assertIn("test_id_1", limiter.actions) + + self.reactor.advance(60) + + self.assertNotIn("test_id_1", limiter.actions) + def test_db_user_override(self) -> None: """Test that users that have ratelimiting disabled in the DB aren't ratelimited. diff --git a/tests/federation/test_federation_server.py b/tests/federation/test_federation_server.py index 509f1f1e82..b1371d0ac7 100644 --- a/tests/federation/test_federation_server.py +++ b/tests/federation/test_federation_server.py @@ -462,7 +462,7 @@ class SendJoinFederationTests(unittest.FederatingHomeserverTestCase): ) self.assertEqual(r[("m.room.member", joining_user)].membership, "join") - @override_config({"rc_joins_per_room": {"per_second": 0, "burst_count": 3}}) + @override_config({"rc_joins_per_room": {"per_second": 0.1, "burst_count": 3}}) def test_make_join_respects_room_join_rate_limit(self) -> None: # In the test setup, two users join the room. Since the rate limiter burst # count is 3, a new make_join request to the room should be accepted. @@ -484,7 +484,7 @@ class SendJoinFederationTests(unittest.FederatingHomeserverTestCase): ) self.assertEqual(channel.code, HTTPStatus.TOO_MANY_REQUESTS, channel.json_body) - @override_config({"rc_joins_per_room": {"per_second": 0, "burst_count": 3}}) + @override_config({"rc_joins_per_room": {"per_second": 0.1, "burst_count": 3}}) def test_send_join_contributes_to_room_join_rate_limit_and_is_limited(self) -> None: # Make two make_join requests up front. (These are rate limited, but do not # contribute to the rate limit.) diff --git a/tests/handlers/test_room_member.py b/tests/handlers/test_room_member.py index 92c7c36602..8f9e27603e 100644 --- a/tests/handlers/test_room_member.py +++ b/tests/handlers/test_room_member.py @@ -50,7 +50,7 @@ class TestJoinsLimitedByPerRoomRateLimiter(FederatingHomeserverTestCase): self.intially_unjoined_room_id = f"!example:{self.OTHER_SERVER_NAME}" - @override_config({"rc_joins_per_room": {"per_second": 0, "burst_count": 2}}) + @override_config({"rc_joins_per_room": {"per_second": 0.1, "burst_count": 2}}) def test_local_user_local_joins_contribute_to_limit_and_are_limited(self) -> None: # The rate limiter has accumulated one token from Alice's join after the create # event. @@ -76,7 +76,7 @@ class TestJoinsLimitedByPerRoomRateLimiter(FederatingHomeserverTestCase): by=0.5, ) - @override_config({"rc_joins_per_room": {"per_second": 0, "burst_count": 2}}) + @override_config({"rc_joins_per_room": {"per_second": 0.1, "burst_count": 2}}) def test_local_user_profile_edits_dont_contribute_to_limit(self) -> None: # The rate limiter has accumulated one token from Alice's join after the create # event. Alice should still be able to change her displayname. @@ -100,7 +100,7 @@ class TestJoinsLimitedByPerRoomRateLimiter(FederatingHomeserverTestCase): ) ) - @override_config({"rc_joins_per_room": {"per_second": 0, "burst_count": 1}}) + @override_config({"rc_joins_per_room": {"per_second": 0.1, "burst_count": 1}}) def test_remote_joins_contribute_to_rate_limit(self) -> None: # Join once, to fill the rate limiter bucket. # @@ -248,7 +248,7 @@ class TestReplicatedJoinsLimitedByPerRoomRateLimiter(BaseMultiWorkerStreamTestCa self.room_id = self.helper.create_room_as(self.alice, tok=self.alice_token) self.intially_unjoined_room_id = "!example:otherhs" - @override_config({"rc_joins_per_room": {"per_second": 0, "burst_count": 2}}) + @override_config({"rc_joins_per_room": {"per_second": 0.01, "burst_count": 2}}) def test_local_users_joining_on_another_worker_contribute_to_rate_limit( self, ) -> None: From 2760d153488109c0c694a2dc5151ef7c260c9b36 Mon Sep 17 00:00:00 2001 From: Andrew Morgan Date: Tue, 4 Nov 2025 13:34:46 +0000 Subject: [PATCH 126/149] 1.142.0rc1 --- CHANGES.md | 97 +++++++++++++++++++++++++++++++ changelog.d/19020.misc | 1 - changelog.d/19021.feature | 2 - changelog.d/19046.misc | 1 - changelog.d/19047.doc | 1 - changelog.d/19047.misc | 1 - changelog.d/19047.removal | 1 - changelog.d/19055.misc | 1 - changelog.d/19056.misc | 1 - changelog.d/19058.misc | 1 - changelog.d/19062.bugfix | 1 - changelog.d/19067.misc | 1 - changelog.d/19068.misc | 1 - changelog.d/19071.misc | 1 - changelog.d/19073.doc | 1 - changelog.d/19079.bugfix | 1 - changelog.d/19080.misc | 1 - changelog.d/19081.misc | 1 - changelog.d/19085.misc | 1 - changelog.d/19088.misc | 1 - changelog.d/19089.misc | 1 - changelog.d/19090.bugfix | 1 - changelog.d/19092.misc | 1 - changelog.d/19094.misc | 1 - changelog.d/19095.misc | 1 - changelog.d/19096.misc | 1 - changelog.d/19098.misc | 1 - changelog.d/19099.removal | 1 - changelog.d/19100.doc | 1 - changelog.d/19107.misc | 1 - changelog.d/19108.bugfix | 1 - changelog.d/19109.doc | 1 - changelog.d/19110.misc | 1 - changelog.d/19116.misc | 1 - changelog.d/19118.misc | 1 - changelog.d/19121.misc | 1 - changelog.d/19129.misc | 1 - changelog.d/19131.misc | 1 - changelog.d/19134.bugfix | 1 - debian/changelog | 6 ++ pyproject.toml | 2 +- schema/synapse-config.schema.yaml | 2 +- 42 files changed, 105 insertions(+), 41 deletions(-) delete mode 100644 changelog.d/19020.misc delete mode 100644 changelog.d/19021.feature delete mode 100644 changelog.d/19046.misc delete mode 100644 changelog.d/19047.doc delete mode 100644 changelog.d/19047.misc delete mode 100644 changelog.d/19047.removal delete mode 100644 changelog.d/19055.misc delete mode 100644 changelog.d/19056.misc delete mode 100644 changelog.d/19058.misc delete mode 100644 changelog.d/19062.bugfix delete mode 100644 changelog.d/19067.misc delete mode 100644 changelog.d/19068.misc delete mode 100644 changelog.d/19071.misc delete mode 100644 changelog.d/19073.doc delete mode 100644 changelog.d/19079.bugfix delete mode 100644 changelog.d/19080.misc delete mode 100644 changelog.d/19081.misc delete mode 100644 changelog.d/19085.misc delete mode 100644 changelog.d/19088.misc delete mode 100644 changelog.d/19089.misc delete mode 100644 changelog.d/19090.bugfix delete mode 100644 changelog.d/19092.misc delete mode 100644 changelog.d/19094.misc delete mode 100644 changelog.d/19095.misc delete mode 100644 changelog.d/19096.misc delete mode 100644 changelog.d/19098.misc delete mode 100644 changelog.d/19099.removal delete mode 100644 changelog.d/19100.doc delete mode 100644 changelog.d/19107.misc delete mode 100644 changelog.d/19108.bugfix delete mode 100644 changelog.d/19109.doc delete mode 100644 changelog.d/19110.misc delete mode 100644 changelog.d/19116.misc delete mode 100644 changelog.d/19118.misc delete mode 100644 changelog.d/19121.misc delete mode 100644 changelog.d/19129.misc delete mode 100644 changelog.d/19131.misc delete mode 100644 changelog.d/19134.bugfix diff --git a/CHANGES.md b/CHANGES.md index eead7e35cd..2578bcdbc3 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,100 @@ +# Synapse 1.142.0rc1 (2025-11-04) + +## Dropped support for Python 3.9 + +This release drops support for Python 3.9, in line with our [dependency +deprecation +policy](https://element-hq.github.io/synapse/latest/deprecation_policy.html#platform-dependencies), +as it is now [end of life](https://endoflife.date/python). + + +## Deprecation of MacOS Python wheels + +The team has decided to deprecate and eventually stop publishing python wheels +for MacOS. This is a burden on the team, and we're not aware of any parties +that use them. Synapse docker images will continue to work on MacOS, as will +building Synapse from source (though note this requires a Rust compiler). + +At present, publishing MacOS Python wheels will continue for the next release +(1.143.0), but will not be available after that (1.144.0+). If you do make use +of these wheels downstream, please reach out to us in +[#synapse-dev:matrix.org](https://matrix.to/#/#synapse-dev:matrix.org). We'd +love to hear from you! + +## Features + +- Add support for Python 3.14. ([\#19055](https://github.com/element-hq/synapse/issues/19055), [\#19134](https://github.com/element-hq/synapse/issues/19134)) +- Add an [Admin API](https://element-hq.github.io/synapse/latest/usage/administration/admin_api/index.html) + to allow an admin to fetch the space/room hierarchy for a given space. ([\#19021](https://github.com/element-hq/synapse/issues/19021)) + +## Bugfixes + +- Fix a bug introduced in 1.111.0 where failed attempts to download authenticated remote media would not be handled correctly. ([\#19062](https://github.com/element-hq/synapse/issues/19062)) +- Update the `oidc_session_no_samesite` cookie to have the `Secure` attribute, so the only difference between it and the paired `oidc_session` cookie, is the configuration of the `SameSite` attribute as described in the comments / cookie names. Contributed by @kieranlane. ([\#19079](https://github.com/element-hq/synapse/issues/19079)) +- Fix a bug introduced in 1.140.0 where lost logcontext warnings would be emitted from timeouts in sync and requests made by Synapse itself. ([\#19090](https://github.com/element-hq/synapse/issues/19090)) +- Fix a bug introdued in 1.140.0 where lost logcontext warning were emitted when using `HomeServer.shutdown()`. ([\#19108](https://github.com/element-hq/synapse/issues/19108)) + +## Improved Documentation + +- Update the link to the Debian oldstable package for SQLite. ([\#19047](https://github.com/element-hq/synapse/issues/19047)) +- Point out additional Redis configuration options available in the worker docs. Contributed by @servisbryce. ([\#19073](https://github.com/element-hq/synapse/issues/19073)) +- Update the list of Debian releases that the downstream Debian package is maintained for. ([\#19100](https://github.com/element-hq/synapse/issues/19100)) +- Add [a page](https://element-hq.github.io/synapse/latest/development/internal_documentation/release_notes_review_checklist.html) to the documentation describing the steps the Synapse team takes to review the release notes before publishing them. ([\#19109](https://github.com/element-hq/synapse/issues/19109)) + +## Deprecations and Removals + +- Drop support for Python 3.9. ([\#19099](https://github.com/element-hq/synapse/issues/19099)) +- Remove support for SQLite < 3.37.2. ([\#19047](https://github.com/element-hq/synapse/issues/19047)) + +## Internal Changes + +- Fix CI linter for schema delta files to correctly handle all types of `CREATE TABLE` syntax. ([\#19020](https://github.com/element-hq/synapse/issues/19020)) +- Use type hinting generics in standard collections, as per [PEP 585](https://peps.python.org/pep-0585/), added in Python 3.9. ([\#19046](https://github.com/element-hq/synapse/issues/19046)) +- Always treat `RETURNING` as supported by SQL engines, now that the minimum-supported versions of both SQLite and PostgreSQL support it. ([\#19047](https://github.com/element-hq/synapse/issues/19047)) +- Move `oidc.load_metadata()` startup into `_base.start()`. ([\#19056](https://github.com/element-hq/synapse/issues/19056)) +- Remove logcontext problems caused by awaiting raw `deferLater(...)`. ([\#19058](https://github.com/element-hq/synapse/issues/19058)) +- Prevent duplicate logging setup when running multiple Synapse instances. ([\#19067](https://github.com/element-hq/synapse/issues/19067)) +- Be mindful of other logging context filters in 3rd-party code and avoid overwriting log record fields unless we know the log record is relevant to Synapse. ([\#19068](https://github.com/element-hq/synapse/issues/19068)) +- Update pydantic to v2. ([\#19071](https://github.com/element-hq/synapse/issues/19071)) +- Update deprecated code in the release script to prevent a warning message from being printed. ([\#19080](https://github.com/element-hq/synapse/issues/19080)) +- Update the deprecated poetry development dependencies group name in `pyproject.toml`. ([\#19081](https://github.com/element-hq/synapse/issues/19081)) +- Remove `pp38*` skip selector from cibuildwheel to silence warning. ([\#19085](https://github.com/element-hq/synapse/issues/19085)) +- Don't immediately exit the release script if the checkout is dirty. Instead, allow the user to clear the dirty changes and retry. ([\#19088](https://github.com/element-hq/synapse/issues/19088)) +- Update the release script's generated announcement text to include a title and extra text for RC's. ([\#19089](https://github.com/element-hq/synapse/issues/19089)) +- Fix lints on main branch. ([\#19092](https://github.com/element-hq/synapse/issues/19092)) +- Use cheaper random string function in logcontext utilities. ([\#19094](https://github.com/element-hq/synapse/issues/19094)) +- Avoid clobbering other `SIGHUP` handlers in 3rd-party code. ([\#19095](https://github.com/element-hq/synapse/issues/19095)) +- Prevent duplicate GitHub draft releases being created during the Synapse release process. ([\#19096](https://github.com/element-hq/synapse/issues/19096)) +- Use Pillow's `Image.getexif` method instead of the experimental `Image._getexif`. ([\#19098](https://github.com/element-hq/synapse/issues/19098)) +- Prevent uv `/usr/local/.lock` file from appearing in built Synapse docker images. ([\#19107](https://github.com/element-hq/synapse/issues/19107)) +- Allow Synapse's runtime dependency checking code to take packaging markers (i.e. `python <= 3.14`) into account when checking dependencies. ([\#19110](https://github.com/element-hq/synapse/issues/19110)) +- Move exception handling up the stack (avoid `exit(1)` in our composable functions). ([\#19116](https://github.com/element-hq/synapse/issues/19116)) +- Fix a lint error related to lifetimes in Rust 1.90. ([\#19118](https://github.com/element-hq/synapse/issues/19118)) +- Refactor and align app entrypoints (avoid `exit(1)` in our composable functions). ([\#19121](https://github.com/element-hq/synapse/issues/19121), [\#19131](https://github.com/element-hq/synapse/issues/19131)) +- Speed up pruning of ratelimiters. ([\#19129](https://github.com/element-hq/synapse/issues/19129)) + + + +### Updates to locked dependencies + +* Bump actions/download-artifact from 5.0.0 to 6.0.0. ([\#19102](https://github.com/element-hq/synapse/issues/19102)) +* Bump actions/upload-artifact from 4 to 5. ([\#19106](https://github.com/element-hq/synapse/issues/19106)) +* Bump hiredis from 3.2.1 to 3.3.0. ([\#19103](https://github.com/element-hq/synapse/issues/19103)) +* Bump icu_segmenter from 2.0.0 to 2.0.1. ([\#19126](https://github.com/element-hq/synapse/issues/19126)) +* Bump idna from 3.10 to 3.11. ([\#19053](https://github.com/element-hq/synapse/issues/19053)) +* Bump ijson from 3.4.0 to 3.4.0.post0. ([\#19051](https://github.com/element-hq/synapse/issues/19051)) +* Bump markdown-it-py from 3.0.0 to 4.0.0. ([\#19123](https://github.com/element-hq/synapse/issues/19123)) +* Bump msgpack from 1.1.1 to 1.1.2. ([\#19050](https://github.com/element-hq/synapse/issues/19050)) +* Bump psycopg2 from 2.9.10 to 2.9.11. ([\#19125](https://github.com/element-hq/synapse/issues/19125)) +* Bump pyyaml from 6.0.2 to 6.0.3. ([\#19105](https://github.com/element-hq/synapse/issues/19105)) +* Bump regex from 1.11.3 to 1.12.2. ([\#19074](https://github.com/element-hq/synapse/issues/19074)) +* Bump reqwest from 0.12.23 to 0.12.24. ([\#19077](https://github.com/element-hq/synapse/issues/19077)) +* Bump ruff from 0.12.10 to 0.14.3. ([\#19124](https://github.com/element-hq/synapse/issues/19124)) +* Bump sigstore/cosign-installer from 3.10.0 to 4.0.0. ([\#19075](https://github.com/element-hq/synapse/issues/19075)) +* Bump stefanzweifel/git-auto-commit-action from 6.0.1 to 7.0.0. ([\#19052](https://github.com/element-hq/synapse/issues/19052)) +* Bump tokio from 1.47.1 to 1.48.0. ([\#19076](https://github.com/element-hq/synapse/issues/19076)) +* Bump types-psycopg2 from 2.9.21.20250915 to 2.9.21.20251012. ([\#19054](https://github.com/element-hq/synapse/issues/19054)) + # Synapse 1.141.0 (2025-10-29) ## Deprecation of MacOS Python wheels diff --git a/changelog.d/19020.misc b/changelog.d/19020.misc deleted file mode 100644 index f5775ff194..0000000000 --- a/changelog.d/19020.misc +++ /dev/null @@ -1 +0,0 @@ -Fix CI linter for schema delta files to correctly handle all types of `CREATE TABLE` syntax. diff --git a/changelog.d/19021.feature b/changelog.d/19021.feature deleted file mode 100644 index dea4748769..0000000000 --- a/changelog.d/19021.feature +++ /dev/null @@ -1,2 +0,0 @@ -Add an [Admin API](https://element-hq.github.io/synapse/latest/usage/administration/admin_api/index.html) -to allow an admin to fetch the space/room hierarchy for a given space. \ No newline at end of file diff --git a/changelog.d/19046.misc b/changelog.d/19046.misc deleted file mode 100644 index 4013804f7f..0000000000 --- a/changelog.d/19046.misc +++ /dev/null @@ -1 +0,0 @@ -Use type hinting generics in standard collections, as per PEP 585, added in Python 3.9. diff --git a/changelog.d/19047.doc b/changelog.d/19047.doc deleted file mode 100644 index fee241f2a5..0000000000 --- a/changelog.d/19047.doc +++ /dev/null @@ -1 +0,0 @@ -Update the link to the Debian oldstable package for SQLite. diff --git a/changelog.d/19047.misc b/changelog.d/19047.misc deleted file mode 100644 index 47f686a158..0000000000 --- a/changelog.d/19047.misc +++ /dev/null @@ -1 +0,0 @@ -Always treat `RETURNING` as supported by SQL engines, now that the minimum-supported versions of both SQLite and PostgreSQL support it. diff --git a/changelog.d/19047.removal b/changelog.d/19047.removal deleted file mode 100644 index da7a161868..0000000000 --- a/changelog.d/19047.removal +++ /dev/null @@ -1 +0,0 @@ -Remove support for SQLite < 3.37.2. diff --git a/changelog.d/19055.misc b/changelog.d/19055.misc deleted file mode 100644 index 61e626cc9b..0000000000 --- a/changelog.d/19055.misc +++ /dev/null @@ -1 +0,0 @@ -Add support for Python 3.14. \ No newline at end of file diff --git a/changelog.d/19056.misc b/changelog.d/19056.misc deleted file mode 100644 index f3a1b4e66e..0000000000 --- a/changelog.d/19056.misc +++ /dev/null @@ -1 +0,0 @@ -Move `oidc.load_metadata()` startup into `_base.start()`. diff --git a/changelog.d/19058.misc b/changelog.d/19058.misc deleted file mode 100644 index 15bc4b39bd..0000000000 --- a/changelog.d/19058.misc +++ /dev/null @@ -1 +0,0 @@ -Remove logcontext problems caused by awaiting raw `deferLater(...)`. diff --git a/changelog.d/19062.bugfix b/changelog.d/19062.bugfix deleted file mode 100644 index c5231cbbc8..0000000000 --- a/changelog.d/19062.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug introduced in 1.111.0 where failed attempts to download authenticated remote media would not be handled correctly. \ No newline at end of file diff --git a/changelog.d/19067.misc b/changelog.d/19067.misc deleted file mode 100644 index 560fbfc668..0000000000 --- a/changelog.d/19067.misc +++ /dev/null @@ -1 +0,0 @@ -Prevent duplicate logging setup when running multiple Synapse instances. diff --git a/changelog.d/19068.misc b/changelog.d/19068.misc deleted file mode 100644 index 9e5c34b608..0000000000 --- a/changelog.d/19068.misc +++ /dev/null @@ -1 +0,0 @@ -Be mindful of other logging context filters in 3rd-party code and avoid overwriting log record fields unless we know the log record is relevant to Synapse. diff --git a/changelog.d/19071.misc b/changelog.d/19071.misc deleted file mode 100644 index d0930f339b..0000000000 --- a/changelog.d/19071.misc +++ /dev/null @@ -1 +0,0 @@ -Update pydantic to v2. \ No newline at end of file diff --git a/changelog.d/19073.doc b/changelog.d/19073.doc deleted file mode 100644 index 6bbaaba99e..0000000000 --- a/changelog.d/19073.doc +++ /dev/null @@ -1 +0,0 @@ -Point out additional Redis configuration options available in the worker docs. Contributed by @servisbryce. diff --git a/changelog.d/19079.bugfix b/changelog.d/19079.bugfix deleted file mode 100644 index a7d9800d1d..0000000000 --- a/changelog.d/19079.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix the `oidc_session_no_samesite` cookie to have the `Secure` attribute, so the only difference between it and the paired `oidc_session` cookie, is the configuration of the `SameSite` attribute as described in the comments / cookie names. Contributed by @kieranlane. \ No newline at end of file diff --git a/changelog.d/19080.misc b/changelog.d/19080.misc deleted file mode 100644 index c738be3fe9..0000000000 --- a/changelog.d/19080.misc +++ /dev/null @@ -1 +0,0 @@ -Update deprecated code in the release script to prevent a warning message from being printed. \ No newline at end of file diff --git a/changelog.d/19081.misc b/changelog.d/19081.misc deleted file mode 100644 index 8518840fb6..0000000000 --- a/changelog.d/19081.misc +++ /dev/null @@ -1 +0,0 @@ -Update the deprecated poetry development dependencies group name in `pyproject.toml`. \ No newline at end of file diff --git a/changelog.d/19085.misc b/changelog.d/19085.misc deleted file mode 100644 index d48fad9d5d..0000000000 --- a/changelog.d/19085.misc +++ /dev/null @@ -1 +0,0 @@ -Remove `pp38*` skip selector from cibuildwheel to silence warning. \ No newline at end of file diff --git a/changelog.d/19088.misc b/changelog.d/19088.misc deleted file mode 100644 index 3224b3697d..0000000000 --- a/changelog.d/19088.misc +++ /dev/null @@ -1 +0,0 @@ -Don't immediately exit the release script if the checkout is dirty. Instead, allow the user to clear the dirty changes and retry. \ No newline at end of file diff --git a/changelog.d/19089.misc b/changelog.d/19089.misc deleted file mode 100644 index 81c8775fd0..0000000000 --- a/changelog.d/19089.misc +++ /dev/null @@ -1 +0,0 @@ -Update the release script's generated announcement text to include a title and extra text for RC's. \ No newline at end of file diff --git a/changelog.d/19090.bugfix b/changelog.d/19090.bugfix deleted file mode 100644 index 077dafcbf8..0000000000 --- a/changelog.d/19090.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix lost logcontext warnings from timeouts in sync and requests made by Synapse itself. diff --git a/changelog.d/19092.misc b/changelog.d/19092.misc deleted file mode 100644 index c5060c1c8b..0000000000 --- a/changelog.d/19092.misc +++ /dev/null @@ -1 +0,0 @@ -Fix lints on main branch. diff --git a/changelog.d/19094.misc b/changelog.d/19094.misc deleted file mode 100644 index 0d38d17483..0000000000 --- a/changelog.d/19094.misc +++ /dev/null @@ -1 +0,0 @@ -Use cheaper random string function in logcontext utilities. diff --git a/changelog.d/19095.misc b/changelog.d/19095.misc deleted file mode 100644 index c9949c9cb5..0000000000 --- a/changelog.d/19095.misc +++ /dev/null @@ -1 +0,0 @@ -Avoid clobbering other `SIGHUP` handlers in 3rd-party code. diff --git a/changelog.d/19096.misc b/changelog.d/19096.misc deleted file mode 100644 index 0b7bdf0967..0000000000 --- a/changelog.d/19096.misc +++ /dev/null @@ -1 +0,0 @@ -Prevent duplicate GitHub draft releases being created during the Synapse release process. \ No newline at end of file diff --git a/changelog.d/19098.misc b/changelog.d/19098.misc deleted file mode 100644 index a6933348a3..0000000000 --- a/changelog.d/19098.misc +++ /dev/null @@ -1 +0,0 @@ -Use Pillow's `Image.getexif` method instead of the experimental `Image._getexif`. diff --git a/changelog.d/19099.removal b/changelog.d/19099.removal deleted file mode 100644 index 8279a1c7f9..0000000000 --- a/changelog.d/19099.removal +++ /dev/null @@ -1 +0,0 @@ -Drop support for Python 3.9. diff --git a/changelog.d/19100.doc b/changelog.d/19100.doc deleted file mode 100644 index a723f34c4f..0000000000 --- a/changelog.d/19100.doc +++ /dev/null @@ -1 +0,0 @@ -Update the list of Debian releases that the downstream Debian package is maintained for. diff --git a/changelog.d/19107.misc b/changelog.d/19107.misc deleted file mode 100644 index 38cb9a9b3b..0000000000 --- a/changelog.d/19107.misc +++ /dev/null @@ -1 +0,0 @@ -Prevent uv `/usr/local/.lock` file from appearing in built Synapse docker images. \ No newline at end of file diff --git a/changelog.d/19108.bugfix b/changelog.d/19108.bugfix deleted file mode 100644 index a2afe19f41..0000000000 --- a/changelog.d/19108.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix lost logcontext when using `HomeServer.shutdown()`. diff --git a/changelog.d/19109.doc b/changelog.d/19109.doc deleted file mode 100644 index 4cce54b486..0000000000 --- a/changelog.d/19109.doc +++ /dev/null @@ -1 +0,0 @@ -Add [a page](https://element-hq.github.io/synapse/latest/development/internal_documentation/release_notes_review_checklist.html) to the documentation describing the steps the Synapse team takes to review the release notes before publishing them. \ No newline at end of file diff --git a/changelog.d/19110.misc b/changelog.d/19110.misc deleted file mode 100644 index dc45eef17c..0000000000 --- a/changelog.d/19110.misc +++ /dev/null @@ -1 +0,0 @@ -Allow Synapse's runtime dependency checking code to take packaging markers (i.e. `python <= 3.14`) into account when checking dependencies. \ No newline at end of file diff --git a/changelog.d/19116.misc b/changelog.d/19116.misc deleted file mode 100644 index 2291d0781a..0000000000 --- a/changelog.d/19116.misc +++ /dev/null @@ -1 +0,0 @@ -Move exception handling up the stack (avoid `exit(1)` in our composable functions). diff --git a/changelog.d/19118.misc b/changelog.d/19118.misc deleted file mode 100644 index 672ed45573..0000000000 --- a/changelog.d/19118.misc +++ /dev/null @@ -1 +0,0 @@ -Fix a lint error related to lifetimes in Rust 1.90. \ No newline at end of file diff --git a/changelog.d/19121.misc b/changelog.d/19121.misc deleted file mode 100644 index cb1fb8f024..0000000000 --- a/changelog.d/19121.misc +++ /dev/null @@ -1 +0,0 @@ -Refactor and align app entrypoints (avoid `exit(1)` in our composable functions). diff --git a/changelog.d/19129.misc b/changelog.d/19129.misc deleted file mode 100644 index 117dbfadea..0000000000 --- a/changelog.d/19129.misc +++ /dev/null @@ -1 +0,0 @@ -Speed up pruning of ratelimiters. diff --git a/changelog.d/19131.misc b/changelog.d/19131.misc deleted file mode 100644 index cb1fb8f024..0000000000 --- a/changelog.d/19131.misc +++ /dev/null @@ -1 +0,0 @@ -Refactor and align app entrypoints (avoid `exit(1)` in our composable functions). diff --git a/changelog.d/19134.bugfix b/changelog.d/19134.bugfix deleted file mode 100644 index 61e626cc9b..0000000000 --- a/changelog.d/19134.bugfix +++ /dev/null @@ -1 +0,0 @@ -Add support for Python 3.14. \ No newline at end of file diff --git a/debian/changelog b/debian/changelog index 14278968a8..78c3a9e54c 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.142.0~rc1) stable; urgency=medium + + * New Synapse release 1.142.0rc1. + + -- Synapse Packaging team Tue, 04 Nov 2025 13:20:15 +0000 + matrix-synapse-py3 (1.141.0) stable; urgency=medium * New Synapse release 1.141.0. diff --git a/pyproject.toml b/pyproject.toml index f530666e45..25a9bfb746 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -107,7 +107,7 @@ module-name = "synapse.synapse_rust" [tool.poetry] name = "matrix-synapse" -version = "1.141.0" +version = "1.142.0rc1" description = "Homeserver for the Matrix decentralised comms protocol" authors = ["Matrix.org Team and Contributors "] license = "AGPL-3.0-or-later OR LicenseRef-Element-Commercial" diff --git a/schema/synapse-config.schema.yaml b/schema/synapse-config.schema.yaml index 75a9a0aac5..98204a724c 100644 --- a/schema/synapse-config.schema.yaml +++ b/schema/synapse-config.schema.yaml @@ -1,5 +1,5 @@ $schema: https://element-hq.github.io/synapse/latest/schema/v1/meta.schema.json -$id: https://element-hq.github.io/synapse/schema/synapse/v1.141/synapse-config.schema.json +$id: https://element-hq.github.io/synapse/schema/synapse/v1.142/synapse-config.schema.json type: object properties: modules: From b2237ff4f1b12b813d872252670b02e076d357ae Mon Sep 17 00:00:00 2001 From: Andrew Morgan Date: Tue, 4 Nov 2025 13:40:58 +0000 Subject: [PATCH 127/149] Add sqlite deprecation to changelog and upgrade notes --- CHANGES.md | 8 ++++++++ docs/upgrade.md | 10 +++++++++- 2 files changed, 17 insertions(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index 2578bcdbc3..a7369d0159 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -7,6 +7,14 @@ deprecation policy](https://element-hq.github.io/synapse/latest/deprecation_policy.html#platform-dependencies), as it is now [end of life](https://endoflife.date/python). +## SQLite 3.40.0+ is now required. + +The minimum supported SQLite version has been increased from 3.27.0 to 3.40.0. + +If you use current versions of the +[matrixorg/synapse](setup/installation.html#docker-images-and-ansible-playbooks) +Docker images, no action is required. + ## Deprecation of MacOS Python wheels diff --git a/docs/upgrade.md b/docs/upgrade.md index faf6cbf8dc..d38d07ca81 100644 --- a/docs/upgrade.md +++ b/docs/upgrade.md @@ -119,7 +119,7 @@ stacking them up. You can monitor the currently running background updates with # Upgrading to v1.142.0 -## Minimum supported Python version +## Python 3.10+ is now required The minimum supported Python version has been increased from v3.9 to v3.10. You will need Python 3.10+ to run Synapse v1.142.0. @@ -128,6 +128,14 @@ If you use current versions of the [matrixorg/synapse](setup/installation.html#docker-images-and-ansible-playbooks) Docker images, no action is required. +## SQLite 3.40.0+ is now required. + +The minimum supported SQLite version has been increased from 3.27.0 to 3.40.0. + +If you use current versions of the +[matrixorg/synapse](setup/installation.html#docker-images-and-ansible-playbooks) +Docker images, no action is required. + # Upgrading to v1.141.0 From d888126372ed171e5795a753fc6b5ba99bd5005e Mon Sep 17 00:00:00 2001 From: Andrew Morgan Date: Tue, 4 Nov 2025 14:05:51 +0000 Subject: [PATCH 128/149] Drop period from title --- CHANGES.md | 2 +- docs/upgrade.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index a7369d0159..298b134014 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -7,7 +7,7 @@ deprecation policy](https://element-hq.github.io/synapse/latest/deprecation_policy.html#platform-dependencies), as it is now [end of life](https://endoflife.date/python). -## SQLite 3.40.0+ is now required. +## SQLite 3.40.0+ is now required The minimum supported SQLite version has been increased from 3.27.0 to 3.40.0. diff --git a/docs/upgrade.md b/docs/upgrade.md index d38d07ca81..b3121a01a0 100644 --- a/docs/upgrade.md +++ b/docs/upgrade.md @@ -128,7 +128,7 @@ If you use current versions of the [matrixorg/synapse](setup/installation.html#docker-images-and-ansible-playbooks) Docker images, no action is required. -## SQLite 3.40.0+ is now required. +## SQLite 3.40.0+ is now required The minimum supported SQLite version has been increased from 3.27.0 to 3.40.0. From 4bbde142dc5837619748ae900f60c6b9d068de71 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Tue, 4 Nov 2025 17:20:01 +0100 Subject: [PATCH 129/149] Skip building Python 3.9 wheels with cibuildwheel (#19119) --- .github/workflows/release-artifacts.yml | 6 ++++-- changelog.d/19119.misc | 1 + 2 files changed, 5 insertions(+), 2 deletions(-) create mode 100644 changelog.d/19119.misc diff --git a/.github/workflows/release-artifacts.yml b/.github/workflows/release-artifacts.yml index 4e38c0f35b..7458d64726 100644 --- a/.github/workflows/release-artifacts.yml +++ b/.github/workflows/release-artifacts.yml @@ -141,7 +141,7 @@ jobs: python-version: "3.x" - name: Install cibuildwheel - run: python -m pip install cibuildwheel==3.0.0 + run: python -m pip install cibuildwheel==3.2.1 - name: Only build a single wheel on PR if: startsWith(github.ref, 'refs/pull/') @@ -152,7 +152,9 @@ jobs: env: # Skip testing for platforms which various libraries don't have wheels # for, and so need extra build deps. - CIBW_TEST_SKIP: pp3*-* *i686* *musl* + # + # cp39-*: Python 3.9 is EOL. + CIBW_TEST_SKIP: pp3*-* cp39-* *i686* *musl* - uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 with: diff --git a/changelog.d/19119.misc b/changelog.d/19119.misc new file mode 100644 index 0000000000..93f512ae7e --- /dev/null +++ b/changelog.d/19119.misc @@ -0,0 +1 @@ +Manually skip building Python 3.9 wheels, to prevent errors in the release workflow. \ No newline at end of file From 5d71034f816a22c98b9af2c2c71f4ea6ee581190 Mon Sep 17 00:00:00 2001 From: Andrew Morgan Date: Tue, 4 Nov 2025 16:21:50 +0000 Subject: [PATCH 130/149] 1.142.0rc2 --- CHANGES.md | 12 +++++++++++- changelog.d/19119.misc | 1 - debian/changelog | 6 ++++++ pyproject.toml | 2 +- 4 files changed, 18 insertions(+), 3 deletions(-) delete mode 100644 changelog.d/19119.misc diff --git a/CHANGES.md b/CHANGES.md index 298b134014..3b78672fdc 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,4 +1,4 @@ -# Synapse 1.142.0rc1 (2025-11-04) +# Synapse 1.142.0rc2 (2025-11-04) ## Dropped support for Python 3.9 @@ -29,6 +29,16 @@ of these wheels downstream, please reach out to us in [#synapse-dev:matrix.org](https://matrix.to/#/#synapse-dev:matrix.org). We'd love to hear from you! + +## Internal Changes + +- Manually skip building Python 3.9 wheels, to prevent errors in the release workflow. ([\#19119](https://github.com/element-hq/synapse/issues/19119)) + + + + +# Synapse 1.142.0rc1 (2025-11-04) + ## Features - Add support for Python 3.14. ([\#19055](https://github.com/element-hq/synapse/issues/19055), [\#19134](https://github.com/element-hq/synapse/issues/19134)) diff --git a/changelog.d/19119.misc b/changelog.d/19119.misc deleted file mode 100644 index 93f512ae7e..0000000000 --- a/changelog.d/19119.misc +++ /dev/null @@ -1 +0,0 @@ -Manually skip building Python 3.9 wheels, to prevent errors in the release workflow. \ No newline at end of file diff --git a/debian/changelog b/debian/changelog index 78c3a9e54c..764315d66a 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.142.0~rc2) stable; urgency=medium + + * New Synapse release 1.142.0rc2. + + -- Synapse Packaging team Tue, 04 Nov 2025 16:21:30 +0000 + matrix-synapse-py3 (1.142.0~rc1) stable; urgency=medium * New Synapse release 1.142.0rc1. diff --git a/pyproject.toml b/pyproject.toml index 25a9bfb746..8b63fae048 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -107,7 +107,7 @@ module-name = "synapse.synapse_rust" [tool.poetry] name = "matrix-synapse" -version = "1.142.0rc1" +version = "1.142.0rc2" description = "Homeserver for the Matrix decentralised comms protocol" authors = ["Matrix.org Team and Contributors "] license = "AGPL-3.0-or-later OR LicenseRef-Element-Commercial" From 0cbb2a15e0dbaad0aa528b289fc7afa462854ae2 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Tue, 4 Nov 2025 18:38:25 +0100 Subject: [PATCH 131/149] Don't build free-threaded wheels (#19140) Fixes https://github.com/element-hq/synapse/issues/19139. --- .github/workflows/release-artifacts.yml | 3 ++- changelog.d/19140.misc | 1 + 2 files changed, 3 insertions(+), 1 deletion(-) create mode 100644 changelog.d/19140.misc diff --git a/.github/workflows/release-artifacts.yml b/.github/workflows/release-artifacts.yml index 7458d64726..d346aeb597 100644 --- a/.github/workflows/release-artifacts.yml +++ b/.github/workflows/release-artifacts.yml @@ -154,7 +154,8 @@ jobs: # for, and so need extra build deps. # # cp39-*: Python 3.9 is EOL. - CIBW_TEST_SKIP: pp3*-* cp39-* *i686* *musl* + # cp3??t-*: Free-threaded builds are not currently supported. + CIBW_TEST_SKIP: pp3*-* cp39-* cp3??t-* *i686* *musl* - uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 with: diff --git a/changelog.d/19140.misc b/changelog.d/19140.misc new file mode 100644 index 0000000000..b4ae41c457 --- /dev/null +++ b/changelog.d/19140.misc @@ -0,0 +1 @@ +Update release scripts to prevent building wheels for free-threaded Python, as Synapse does not currently support it. \ No newline at end of file From 2fd8d88b424bf003ce8cb2ec74ac5b48ebff0cd8 Mon Sep 17 00:00:00 2001 From: Andrew Morgan Date: Tue, 4 Nov 2025 17:39:28 +0000 Subject: [PATCH 132/149] 1.142.0rc3 --- CHANGES.md | 10 +++++++++- changelog.d/19140.misc | 1 - debian/changelog | 6 ++++++ pyproject.toml | 2 +- 4 files changed, 16 insertions(+), 3 deletions(-) delete mode 100644 changelog.d/19140.misc diff --git a/CHANGES.md b/CHANGES.md index 3b78672fdc..ab9b72e2a8 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,4 +1,4 @@ -# Synapse 1.142.0rc2 (2025-11-04) +# Synapse 1.142.0rc3 (2025-11-04) ## Dropped support for Python 3.9 @@ -30,6 +30,14 @@ of these wheels downstream, please reach out to us in love to hear from you! +## Internal Changes + +- Update release scripts to prevent building wheels for free-threaded Python, as Synapse does not currently support it. ([\#19140](https://github.com/element-hq/synapse/issues/19140)) + + +# Synapse 1.142.0rc2 (2025-11-04) + + ## Internal Changes - Manually skip building Python 3.9 wheels, to prevent errors in the release workflow. ([\#19119](https://github.com/element-hq/synapse/issues/19119)) diff --git a/changelog.d/19140.misc b/changelog.d/19140.misc deleted file mode 100644 index b4ae41c457..0000000000 --- a/changelog.d/19140.misc +++ /dev/null @@ -1 +0,0 @@ -Update release scripts to prevent building wheels for free-threaded Python, as Synapse does not currently support it. \ No newline at end of file diff --git a/debian/changelog b/debian/changelog index 764315d66a..0dae012858 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.142.0~rc3) stable; urgency=medium + + * New Synapse release 1.142.0rc3. + + -- Synapse Packaging team Tue, 04 Nov 2025 17:39:11 +0000 + matrix-synapse-py3 (1.142.0~rc2) stable; urgency=medium * New Synapse release 1.142.0rc2. diff --git a/pyproject.toml b/pyproject.toml index 8b63fae048..991cb3e7f3 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -107,7 +107,7 @@ module-name = "synapse.synapse_rust" [tool.poetry] name = "matrix-synapse" -version = "1.142.0rc2" +version = "1.142.0rc3" description = "Homeserver for the Matrix decentralised comms protocol" authors = ["Matrix.org Team and Contributors "] license = "AGPL-3.0-or-later OR LicenseRef-Element-Commercial" From 4906771da1919fb69a37468317c12ebbef23fc8e Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 5 Nov 2025 13:42:04 +0000 Subject: [PATCH 133/149] Faster redis replication handling (#19138) Spawning a background process comes with a bunch of overhead, so let's try to reduce the number of background processes we need to spawn when handling inbound fed. Currently, we seem to be doing roughly one per command. Instead, lets keep the background process alive for a bit waiting for a new command to come in. --- changelog.d/19138.misc | 1 + synapse/replication/tcp/handler.py | 50 ++++------ synapse/util/bacckground_queue.py | 139 ++++++++++++++++++++++++++++ tests/util/test_background_queue.py | 105 +++++++++++++++++++++ 4 files changed, 262 insertions(+), 33 deletions(-) create mode 100644 changelog.d/19138.misc create mode 100644 synapse/util/bacckground_queue.py create mode 100644 tests/util/test_background_queue.py diff --git a/changelog.d/19138.misc b/changelog.d/19138.misc new file mode 100644 index 0000000000..1183361737 --- /dev/null +++ b/changelog.d/19138.misc @@ -0,0 +1 @@ +Minor speed up of processing of inbound replication. diff --git a/synapse/replication/tcp/handler.py b/synapse/replication/tcp/handler.py index bd1ee5ff9d..ed7cff72b6 100644 --- a/synapse/replication/tcp/handler.py +++ b/synapse/replication/tcp/handler.py @@ -20,7 +20,6 @@ # # import logging -from collections import deque from typing import ( TYPE_CHECKING, Any, @@ -71,6 +70,7 @@ from synapse.replication.tcp.streams._base import ( DeviceListsStream, ThreadSubscriptionsStream, ) +from synapse.util.bacckground_queue import BackgroundQueue if TYPE_CHECKING: from synapse.server import HomeServer @@ -115,8 +115,8 @@ tcp_command_queue_gauge = LaterGauge( # the type of the entries in _command_queues_by_stream -_StreamCommandQueue = deque[ - tuple[Union[RdataCommand, PositionCommand], IReplicationConnection] +_StreamCommandQueueItem = tuple[ + Union[RdataCommand, PositionCommand], IReplicationConnection ] @@ -265,7 +265,12 @@ class ReplicationCommandHandler: # for each stream, a queue of commands that are awaiting processing, and the # connection that they arrived on. self._command_queues_by_stream = { - stream_name: _StreamCommandQueue() for stream_name in self._streams + stream_name: BackgroundQueue[_StreamCommandQueueItem]( + hs, + "process-replication-data", + self._unsafe_process_item, + ) + for stream_name in self._streams } # For each connection, the incoming stream names that have received a POSITION @@ -349,38 +354,17 @@ class ReplicationCommandHandler: logger.error("Got %s for unknown stream: %s", cmd.NAME, stream_name) return - queue.append((cmd, conn)) + queue.add((cmd, conn)) - # if we're already processing this stream, there's nothing more to do: - # the new entry on the queue will get picked up in due course - if stream_name in self._processing_streams: - return + async def _unsafe_process_item(self, item: _StreamCommandQueueItem) -> None: + """Process a single command from the stream queue. - # fire off a background process to start processing the queue. - self.hs.run_as_background_process( - "process-replication-data", - self._unsafe_process_queue, - stream_name, - ) - - async def _unsafe_process_queue(self, stream_name: str) -> None: - """Processes the command queue for the given stream, until it is empty - - Does not check if there is already a thread processing the queue, hence "unsafe" + This should only be called one at a time per stream, and is called from + the stream's BackgroundQueue. """ - assert stream_name not in self._processing_streams - - self._processing_streams.add(stream_name) - try: - queue = self._command_queues_by_stream.get(stream_name) - while queue: - cmd, conn = queue.popleft() - try: - await self._process_command(cmd, conn, stream_name) - except Exception: - logger.exception("Failed to handle command %s", cmd) - finally: - self._processing_streams.discard(stream_name) + cmd, conn = item + stream_name = cmd.stream_name + await self._process_command(cmd, conn, stream_name) async def _process_command( self, diff --git a/synapse/util/bacckground_queue.py b/synapse/util/bacckground_queue.py new file mode 100644 index 0000000000..daf6a94842 --- /dev/null +++ b/synapse/util/bacckground_queue.py @@ -0,0 +1,139 @@ +# +# This file is licensed under the Affero General Public License (AGPL) version 3. +# +# Copyright (C) 2025 Element Creations Ltd +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as +# published by the Free Software Foundation, either version 3 of the +# License, or (at your option) any later version. +# +# See the GNU Affero General Public License for more details: +# . +# +# +# + +import collections +import logging +from typing import ( + TYPE_CHECKING, + Awaitable, + Callable, + Generic, + Optional, + TypeVar, +) + +from synapse.util.async_helpers import DeferredEvent +from synapse.util.constants import MILLISECONDS_PER_SECOND + +if TYPE_CHECKING: + from synapse.server import HomeServer + +logger = logging.getLogger(__name__) + +T = TypeVar("T") + + +class BackgroundQueue(Generic[T]): + """A single-producer single-consumer async queue processing items in the + background. + + This is optimised for the case where we receive many items, but processing + each one takes a short amount of time. In this case we don't want to pay the + overhead of a new background process each time. Instead, we spawn a + background process that will wait for new items to arrive. + + If the background process has been idle for a while, it will exit, and a new + background process will be spawned when new items arrive. + + Args: + hs: The homeserver. + name: The name of the background process. + callback: The async callback to process each item. + timeout_ms: The time in milliseconds to wait for new items before + exiting the background process. + """ + + def __init__( + self, + hs: "HomeServer", + name: str, + callback: Callable[[T], Awaitable[None]], + timeout_ms: int = 1000, + ) -> None: + self._hs = hs + self._name = name + self._callback = callback + self._timeout_ms = timeout_ms + + # The queue of items to process. + self._queue: collections.deque[T] = collections.deque() + + # Indicates if a background process is running, and if so whether there + # is new data in the queue. Used to signal to an existing background + # process that there is new data added to the queue. + self._wakeup_event: Optional[DeferredEvent] = None + + def add(self, item: T) -> None: + """Add an item into the queue.""" + + self._queue.append(item) + if self._wakeup_event is None: + self._hs.run_as_background_process(self._name, self._process_queue) + else: + self._wakeup_event.set() + + async def _process_queue(self) -> None: + """Process items in the queue until it is empty.""" + + # Make sure we're the only background process. + if self._wakeup_event is not None: + # If there is already a background process then we signal it to wake + # up and exit. We do not want multiple background processes running + # at a time. + self._wakeup_event.set() + return + + self._wakeup_event = DeferredEvent(self._hs.get_clock()) + + try: + while True: + # Clear the event before checking the queue. If we cleared after + # we run the risk of the wakeup signal racing with us checking + # the queue. (This can't really happen in Python due to the + # single threaded nature, but let's be a bit defensive anyway.) + self._wakeup_event.clear() + + while self._queue: + item = self._queue.popleft() + try: + await self._callback(item) + except Exception: + logger.exception("Error processing background queue item") + + # Wait for new data to arrive, timing out after a while to avoid + # keeping the background process alive forever. + # + # New data may have arrived and been processed while we were + # pulling from the queue, so this may return that there is new + # data immediately even though there isn't. That's fine, we'll + # just loop round, clear the event, recheck the queue, and then + # wait here again. + new_data = await self._wakeup_event.wait( + timeout_seconds=self._timeout_ms / MILLISECONDS_PER_SECOND + ) + if not new_data: + # Timed out waiting for new data, so exit the loop + break + finally: + # This background process is exiting, so clear the wakeup event to + # indicate that a new one should be started when new data arrives. + self._wakeup_event = None + + # The queue must be empty here. + assert not self._queue + + def __len__(self) -> int: + return len(self._queue) diff --git a/tests/util/test_background_queue.py b/tests/util/test_background_queue.py new file mode 100644 index 0000000000..d7eb4f4f02 --- /dev/null +++ b/tests/util/test_background_queue.py @@ -0,0 +1,105 @@ +# +# This file is licensed under the Affero General Public License (AGPL) version 3. +# +# Copyright (C) 2025 Element Creations Ltd +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as +# published by the Free Software Foundation, either version 3 of the +# License, or (at your option) any later version. +# +# See the GNU Affero General Public License for more details: +# . +# + + +from unittest.mock import Mock + +from twisted.internet.defer import Deferred +from twisted.internet.testing import MemoryReactor + +from synapse.server import HomeServer +from synapse.util.bacckground_queue import BackgroundQueue +from synapse.util.clock import Clock + +from tests.unittest import HomeserverTestCase + + +class BackgroundQueueTests(HomeserverTestCase): + def prepare( + self, reactor: MemoryReactor, clock: Clock, homeserver: HomeServer + ) -> None: + self._process_item_mock = Mock(spec_set=[]) + + self.queue = BackgroundQueue[int]( + hs=homeserver, + name="test_queue", + callback=self._process_item_mock, + timeout_ms=1000, + ) + + def test_simple_call(self) -> None: + """Test that items added to the queue are processed.""" + # Register a deferred to be the return value of the callback. + callback_result_deferred: Deferred[None] = Deferred() + self._process_item_mock.side_effect = callback_result_deferred + + # Adding an item should cause the callback to be invoked. + self.queue.add(1) + + self._process_item_mock.assert_called_once_with(1) + self._process_item_mock.reset_mock() + + # Adding another item should not cause the callback to be invoked again + # until the previous one has completed. + self.queue.add(2) + self._process_item_mock.assert_not_called() + + # Once the first callback completes, the second item should be + # processed. + callback_result_deferred.callback(None) + self._process_item_mock.assert_called_once_with(2) + + def test_timeout(self) -> None: + """Test that the background process wakes up if its idle, and that it + times out after being idle.""" + + # Register a deferred to be the return value of the callback. + callback_result_deferred: Deferred[None] = Deferred() + self._process_item_mock.side_effect = callback_result_deferred + + # Adding an item should cause the callback to be invoked. + self.queue.add(1) + + self._process_item_mock.assert_called_once_with(1) + self._process_item_mock.reset_mock() + + # Let the callback complete. + callback_result_deferred.callback(None) + + # Advance the clock by less than the timeout, and add another item. + self.reactor.advance(0.5) + self.assertIsNotNone(self.queue._wakeup_event) + self.queue.add(2) + + # The callback should be invoked again. + callback_result_deferred = Deferred() + self._process_item_mock.side_effect = callback_result_deferred + self._process_item_mock.assert_called_once_with(2) + self._process_item_mock.reset_mock() + + # Let the callback complete. + callback_result_deferred.callback(None) + + # Advance the clock by more than the timeout. + self.reactor.advance(1.5) + + # The background process should have exited, we check this by checking + # the internal wakeup event has been removed. + self.assertIsNone(self.queue._wakeup_event) + + # Add another item. This should cause a new background process to be + # started. + self.queue.add(3) + + self._process_item_mock.assert_called_once_with(3) From d3ffd04f66b423153c52bc65b3c6a2e9424486fa Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 5 Nov 2025 14:00:59 +0000 Subject: [PATCH 134/149] Fix spelling (#19145) Fixes up #19138 --- changelog.d/19145.misc | 1 + synapse/replication/tcp/handler.py | 2 +- synapse/util/{bacckground_queue.py => background_queue.py} | 0 tests/util/test_background_queue.py | 2 +- 4 files changed, 3 insertions(+), 2 deletions(-) create mode 100644 changelog.d/19145.misc rename synapse/util/{bacckground_queue.py => background_queue.py} (100%) diff --git a/changelog.d/19145.misc b/changelog.d/19145.misc new file mode 100644 index 0000000000..1183361737 --- /dev/null +++ b/changelog.d/19145.misc @@ -0,0 +1 @@ +Minor speed up of processing of inbound replication. diff --git a/synapse/replication/tcp/handler.py b/synapse/replication/tcp/handler.py index ed7cff72b6..8cf7f4b805 100644 --- a/synapse/replication/tcp/handler.py +++ b/synapse/replication/tcp/handler.py @@ -70,7 +70,7 @@ from synapse.replication.tcp.streams._base import ( DeviceListsStream, ThreadSubscriptionsStream, ) -from synapse.util.bacckground_queue import BackgroundQueue +from synapse.util.background_queue import BackgroundQueue if TYPE_CHECKING: from synapse.server import HomeServer diff --git a/synapse/util/bacckground_queue.py b/synapse/util/background_queue.py similarity index 100% rename from synapse/util/bacckground_queue.py rename to synapse/util/background_queue.py diff --git a/tests/util/test_background_queue.py b/tests/util/test_background_queue.py index d7eb4f4f02..56fa121285 100644 --- a/tests/util/test_background_queue.py +++ b/tests/util/test_background_queue.py @@ -19,7 +19,7 @@ from twisted.internet.defer import Deferred from twisted.internet.testing import MemoryReactor from synapse.server import HomeServer -from synapse.util.bacckground_queue import BackgroundQueue +from synapse.util.background_queue import BackgroundQueue from synapse.util.clock import Clock from tests.unittest import HomeserverTestCase From 67903128316b6ad4e8aebb42b1e318091b873f49 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 5 Nov 2025 15:38:14 +0000 Subject: [PATCH 135/149] Fixup logcontexts after replication PR. (#19146) Fixes logcontext leaks introduced in #19138. --- changelog.d/19146.misc | 1 + synapse/util/async_helpers.py | 26 ++++++++------------------ synapse/util/background_queue.py | 4 ++++ tests/util/test_background_queue.py | 26 +++++++++++++++++++------- 4 files changed, 32 insertions(+), 25 deletions(-) create mode 100644 changelog.d/19146.misc diff --git a/changelog.d/19146.misc b/changelog.d/19146.misc new file mode 100644 index 0000000000..1183361737 --- /dev/null +++ b/changelog.d/19146.misc @@ -0,0 +1 @@ +Minor speed up of processing of inbound replication. diff --git a/synapse/util/async_helpers.py b/synapse/util/async_helpers.py index 99e899d1ef..8322a1bb33 100644 --- a/synapse/util/async_helpers.py +++ b/synapse/util/async_helpers.py @@ -1029,7 +1029,8 @@ class DeferredEvent: def set(self) -> None: if not self._deferred.called: - self._deferred.callback(None) + with PreserveLoggingContext(): + self._deferred.callback(None) def clear(self) -> None: if self._deferred.called: @@ -1042,26 +1043,15 @@ class DeferredEvent: if self.is_set(): return True - # Create a deferred that gets called in N seconds - sleep_deferred: "defer.Deferred[None]" = defer.Deferred() - call = self._clock.call_later( - timeout_seconds, - sleep_deferred.callback, - None, - ) - try: await make_deferred_yieldable( - defer.DeferredList( - [sleep_deferred, self._deferred], - fireOnOneCallback=True, - fireOnOneErrback=True, - consumeErrors=True, + timeout_deferred( + deferred=stop_cancellation(self._deferred), + timeout=timeout_seconds, + clock=self._clock, ) ) - finally: - # Cancel the sleep if we were woken up - if call.active(): - call.cancel() + except defer.TimeoutError: + pass return self.is_set() diff --git a/synapse/util/background_queue.py b/synapse/util/background_queue.py index daf6a94842..7e4c322662 100644 --- a/synapse/util/background_queue.py +++ b/synapse/util/background_queue.py @@ -25,6 +25,8 @@ from typing import ( TypeVar, ) +from twisted.internet import defer + from synapse.util.async_helpers import DeferredEvent from synapse.util.constants import MILLISECONDS_PER_SECOND @@ -110,6 +112,8 @@ class BackgroundQueue(Generic[T]): item = self._queue.popleft() try: await self._callback(item) + except defer.CancelledError: + raise except Exception: logger.exception("Error processing background queue item") diff --git a/tests/util/test_background_queue.py b/tests/util/test_background_queue.py index 56fa121285..901b014845 100644 --- a/tests/util/test_background_queue.py +++ b/tests/util/test_background_queue.py @@ -18,11 +18,12 @@ from unittest.mock import Mock from twisted.internet.defer import Deferred from twisted.internet.testing import MemoryReactor +from synapse.logging.context import PreserveLoggingContext, make_deferred_yieldable from synapse.server import HomeServer from synapse.util.background_queue import BackgroundQueue from synapse.util.clock import Clock -from tests.unittest import HomeserverTestCase +from tests.unittest import HomeserverTestCase, logcontext_clean class BackgroundQueueTests(HomeserverTestCase): @@ -38,11 +39,14 @@ class BackgroundQueueTests(HomeserverTestCase): timeout_ms=1000, ) + @logcontext_clean def test_simple_call(self) -> None: """Test that items added to the queue are processed.""" # Register a deferred to be the return value of the callback. callback_result_deferred: Deferred[None] = Deferred() - self._process_item_mock.side_effect = callback_result_deferred + self._process_item_mock.side_effect = lambda _: make_deferred_yieldable( + callback_result_deferred + ) # Adding an item should cause the callback to be invoked. self.queue.add(1) @@ -57,16 +61,20 @@ class BackgroundQueueTests(HomeserverTestCase): # Once the first callback completes, the second item should be # processed. - callback_result_deferred.callback(None) + with PreserveLoggingContext(): + callback_result_deferred.callback(None) self._process_item_mock.assert_called_once_with(2) + @logcontext_clean def test_timeout(self) -> None: """Test that the background process wakes up if its idle, and that it times out after being idle.""" # Register a deferred to be the return value of the callback. callback_result_deferred: Deferred[None] = Deferred() - self._process_item_mock.side_effect = callback_result_deferred + self._process_item_mock.side_effect = lambda _: make_deferred_yieldable( + callback_result_deferred + ) # Adding an item should cause the callback to be invoked. self.queue.add(1) @@ -75,7 +83,8 @@ class BackgroundQueueTests(HomeserverTestCase): self._process_item_mock.reset_mock() # Let the callback complete. - callback_result_deferred.callback(None) + with PreserveLoggingContext(): + callback_result_deferred.callback(None) # Advance the clock by less than the timeout, and add another item. self.reactor.advance(0.5) @@ -84,12 +93,15 @@ class BackgroundQueueTests(HomeserverTestCase): # The callback should be invoked again. callback_result_deferred = Deferred() - self._process_item_mock.side_effect = callback_result_deferred + self._process_item_mock.side_effect = lambda _: make_deferred_yieldable( + callback_result_deferred + ) self._process_item_mock.assert_called_once_with(2) self._process_item_mock.reset_mock() # Let the callback complete. - callback_result_deferred.callback(None) + with PreserveLoggingContext(): + callback_result_deferred.callback(None) # Advance the clock by more than the timeout. self.reactor.advance(1.5) From fcac7e0282b074d4bd3414d1c9c181e9701875d9 Mon Sep 17 00:00:00 2001 From: Andrew Ferrazzutti Date: Thu, 6 Nov 2025 15:02:33 -0500 Subject: [PATCH 136/149] Write union types as `X | Y` where possible (#19111) aka PEP 604, added in Python 3.10 --- .ci/scripts/auditwheel_wrapper.py | 3 +- changelog.d/19111.misc | 1 + contrib/cmdclient/console.py | 3 +- contrib/cmdclient/http.py | 9 +- docker/configure_workers_and_start.py | 5 +- docker/start.py | 6 +- .../synapse_architecture/cancellation.md | 6 +- docs/modules/account_data_callbacks.md | 4 +- docs/modules/account_validity_callbacks.md | 2 +- docs/modules/media_repository_callbacks.md | 4 +- .../password_auth_provider_callbacks.md | 34 +-- docs/modules/presence_router_callbacks.md | 10 +- docs/modules/ratelimit_callbacks.md | 2 +- docs/modules/spam_checker_callbacks.md | 20 +- docs/modules/third_party_rules_callbacks.md | 4 +- docs/presence_router_module.md | 4 +- pyproject.toml | 9 +- scripts-dev/build_debian_packages.py | 6 +- scripts-dev/federation_client.py | 14 +- scripts-dev/gen_config_documentation.py | 8 +- scripts-dev/mypy_synapse_plugin.py | 14 +- scripts-dev/release.py | 26 +- scripts-dev/schema_versions.py | 4 +- stubs/sortedcontainers/sorteddict.pyi | 18 +- stubs/sortedcontainers/sortedlist.pyi | 34 +-- stubs/sortedcontainers/sortedset.pyi | 68 ++--- stubs/txredisapi.pyi | 28 +- synapse/_scripts/export_signing_key.py | 4 +- synapse/_scripts/generate_workers_map.py | 12 +- synapse/_scripts/register_new_matrix_user.py | 14 +- synapse/_scripts/synapse_port_db.py | 26 +- synapse/_scripts/synctl.py | 4 +- synapse/api/auth/__init__.py | 6 +- synapse/api/auth/base.py | 8 +- synapse/api/auth/mas.py | 12 +- synapse/api/auth/msc3861_delegated.py | 14 +- synapse/api/auth_blocking.py | 10 +- synapse/api/errors.py | 38 +-- synapse/api/filtering.py | 6 +- synapse/api/presence.py | 10 +- synapse/api/ratelimiting.py | 36 ++- synapse/api/room_versions.py | 4 +- synapse/api/urls.py | 3 +- synapse/app/_base.py | 12 +- synapse/app/admin_cmd.py | 6 +- synapse/app/complement_fork_starter.py | 4 +- synapse/app/generic_worker.py | 3 +- synapse/app/homeserver.py | 4 +- synapse/appservice/__init__.py | 17 +- synapse/appservice/api.py | 12 +- synapse/appservice/scheduler.py | 25 +- synapse/config/_base.py | 24 +- synapse/config/_base.pyi | 22 +- synapse/config/api.py | 4 +- synapse/config/cache.py | 6 +- synapse/config/cas.py | 4 +- synapse/config/consent.py | 10 +- synapse/config/experimental.py | 26 +- synapse/config/federation.py | 4 +- synapse/config/key.py | 10 +- synapse/config/logger.py | 6 +- synapse/config/mas.py | 6 +- synapse/config/matrixrtc.py | 4 +- synapse/config/metrics.py | 4 +- synapse/config/oembed.py | 4 +- synapse/config/oidc.py | 24 +- synapse/config/ratelimiting.py | 4 +- synapse/config/registration.py | 6 +- synapse/config/retention.py | 6 +- synapse/config/server.py | 60 ++-- synapse/config/server_notices.py | 14 +- synapse/config/sso.py | 6 +- synapse/config/tls.py | 10 +- synapse/config/user_types.py | 6 +- synapse/config/workers.py | 12 +- synapse/crypto/keyring.py | 4 +- synapse/event_auth.py | 10 +- synapse/events/__init__.py | 63 ++-- synapse/events/builder.py | 20 +- synapse/events/presence_router.py | 16 +- synapse/events/snapshot.py | 42 +-- synapse/events/utils.py | 34 +-- synapse/events/validator.py | 6 +- synapse/federation/federation_base.py | 9 +- synapse/federation/federation_client.py | 48 ++- synapse/federation/federation_server.py | 26 +- synapse/federation/persistence.py | 3 +- synapse/federation/send_queue.py | 3 +- synapse/federation/sender/__init__.py | 15 +- .../sender/per_destination_queue.py | 18 +- synapse/federation/transport/client.py | 34 +-- .../federation/transport/server/__init__.py | 12 +- synapse/federation/transport/server/_base.py | 12 +- .../federation/transport/server/federation.py | 12 +- synapse/federation/units.py | 6 +- synapse/handlers/account_data.py | 16 +- synapse/handlers/account_validity.py | 10 +- synapse/handlers/admin.py | 15 +- synapse/handlers/appservice.py | 22 +- synapse/handlers/auth.py | 147 +++++----- synapse/handlers/cas.py | 24 +- synapse/handlers/deactivate_account.py | 4 +- synapse/handlers/delayed_events.py | 16 +- synapse/handlers/device.py | 43 ++- synapse/handlers/devicemessage.py | 4 +- synapse/handlers/directory.py | 22 +- synapse/handlers/e2e_keys.py | 24 +- synapse/handlers/e2e_room_keys.py | 16 +- synapse/handlers/event_auth.py | 8 +- synapse/handlers/events.py | 8 +- synapse/handlers/federation.py | 22 +- synapse/handlers/federation_event.py | 5 +- synapse/handlers/identity.py | 18 +- synapse/handlers/initial_sync.py | 6 +- synapse/handlers/jwt.py | 4 +- synapse/handlers/message.py | 74 ++--- synapse/handlers/oidc.py | 50 ++-- synapse/handlers/pagination.py | 20 +- synapse/handlers/presence.py | 75 +++-- synapse/handlers/profile.py | 22 +- synapse/handlers/push_rules.py | 8 +- synapse/handlers/receipts.py | 8 +- synapse/handlers/register.py | 65 ++--- synapse/handlers/relations.py | 13 +- synapse/handlers/room.py | 63 ++-- synapse/handlers/room_list.py | 48 +-- synapse/handlers/room_member.py | 84 +++--- synapse/handlers/room_member_worker.py | 6 +- synapse/handlers/room_summary.py | 42 +-- synapse/handlers/saml.py | 12 +- synapse/handlers/search.py | 26 +- synapse/handlers/send_email.py | 10 +- synapse/handlers/set_password.py | 4 +- synapse/handlers/sliding_sync/__init__.py | 38 +-- synapse/handlers/sliding_sync/extensions.py | 33 +-- synapse/handlers/sliding_sync/room_lists.py | 42 ++- synapse/handlers/sliding_sync/store.py | 6 +- synapse/handlers/sso.py | 51 ++-- synapse/handlers/state_deltas.py | 6 +- synapse/handlers/stats.py | 3 +- synapse/handlers/sync.py | 55 ++-- synapse/handlers/thread_subscriptions.py | 10 +- synapse/handlers/typing.py | 6 +- synapse/handlers/ui_auth/checkers.py | 2 +- synapse/handlers/user_directory.py | 16 +- synapse/handlers/worker_lock.py | 30 +- synapse/http/__init__.py | 3 +- synapse/http/additional_resource.py | 6 +- synapse/http/client.py | 102 ++++--- synapse/http/connectproxyclient.py | 10 +- .../federation/matrix_federation_agent.py | 26 +- .../http/federation/well_known_resolver.py | 14 +- synapse/http/matrixfederationclient.py | 102 ++++--- synapse/http/proxy.py | 6 +- synapse/http/proxyagent.py | 36 +-- synapse/http/replicationagent.py | 11 +- synapse/http/server.py | 24 +- synapse/http/servlet.py | 131 +++++---- synapse/http/site.py | 34 +-- synapse/http/types.py | 6 +- synapse/logging/_remote.py | 8 +- synapse/logging/context.py | 49 ++-- synapse/logging/formatter.py | 7 +- synapse/logging/handlers.py | 6 +- synapse/logging/opentracing.py | 52 ++-- synapse/logging/scopecontextmanager.py | 3 +- synapse/media/_base.py | 47 ++- synapse/media/filepath.py | 6 +- synapse/media/media_repository.py | 32 +- synapse/media/media_storage.py | 24 +- synapse/media/oembed.py | 10 +- synapse/media/preview_html.py | 29 +- synapse/media/storage_provider.py | 8 +- synapse/media/thumbnailer.py | 20 +- synapse/media/url_previewer.py | 14 +- synapse/metrics/__init__.py | 22 +- synapse/metrics/background_process_metrics.py | 31 +- synapse/metrics/jemalloc.py | 14 +- synapse/module_api/__init__.py | 225 +++++++------- .../callbacks/account_validity_callbacks.py | 24 +- .../callbacks/media_repository_callbacks.py | 31 +- .../callbacks/ratelimit_callbacks.py | 13 +- .../callbacks/spamchecker_callbacks.py | 276 +++++------------- .../third_party_event_rules_callbacks.py | 55 ++-- synapse/notifier.py | 42 ++- synapse/push/__init__.py | 16 +- synapse/push/bulk_push_rule_evaluator.py | 22 +- synapse/push/clientformat.py | 4 +- synapse/push/emailpusher.py | 6 +- synapse/push/httppusher.py | 16 +- synapse/push/mailer.py | 14 +- synapse/push/presentable_names.py | 4 +- synapse/push/push_types.py | 14 +- synapse/push/pusher.py | 4 +- synapse/push/pusherpool.py | 14 +- synapse/replication/http/delayed_events.py | 4 +- synapse/replication/http/devices.py | 4 +- synapse/replication/http/login.py | 10 +- synapse/replication/http/membership.py | 6 +- synapse/replication/http/presence.py | 6 +- synapse/replication/http/register.py | 14 +- synapse/replication/tcp/client.py | 6 +- synapse/replication/tcp/commands.py | 10 +- synapse/replication/tcp/external_cache.py | 6 +- synapse/replication/tcp/handler.py | 24 +- synapse/replication/tcp/protocol.py | 6 +- synapse/replication/tcp/redis.py | 22 +- synapse/replication/tcp/resource.py | 6 +- synapse/replication/tcp/streams/_base.py | 13 +- synapse/replication/tcp/streams/events.py | 16 +- synapse/rest/__init__.py | 6 +- synapse/rest/admin/__init__.py | 6 +- synapse/rest/admin/media.py | 4 +- synapse/rest/admin/rooms.py | 4 +- synapse/rest/admin/server_notice_servlet.py | 4 +- synapse/rest/admin/users.py | 20 +- synapse/rest/client/account.py | 22 +- synapse/rest/client/account_data.py | 10 +- synapse/rest/client/devices.py | 14 +- synapse/rest/client/directory.py | 4 +- synapse/rest/client/events.py | 4 +- synapse/rest/client/keys.py | 12 +- synapse/rest/client/knock.py | 2 +- synapse/rest/client/login.py | 26 +- synapse/rest/client/media.py | 3 +- synapse/rest/client/mutual_rooms.py | 2 +- synapse/rest/client/push_rule.py | 4 +- synapse/rest/client/register.py | 4 +- synapse/rest/client/relations.py | 6 +- synapse/rest/client/rendezvous.py | 4 +- synapse/rest/client/room.py | 28 +- synapse/rest/client/room_keys.py | 8 +- synapse/rest/client/sync.py | 6 +- synapse/rest/client/thread_subscriptions.py | 4 +- synapse/rest/key/v2/local_key_resource.py | 4 +- synapse/rest/key/v2/remote_key_resource.py | 10 +- synapse/rest/media/download_resource.py | 4 +- synapse/rest/media/upload_resource.py | 6 +- synapse/rest/synapse/mas/devices.py | 4 +- synapse/rest/synapse/mas/users.py | 16 +- synapse/rest/well_known.py | 4 +- synapse/server.py | 15 +- .../server_notices/server_notices_manager.py | 14 +- .../server_notices/server_notices_sender.py | 4 +- synapse/state/__init__.py | 34 +-- synapse/state/v1.py | 3 +- synapse/state/v2.py | 21 +- synapse/storage/_base.py | 6 +- synapse/storage/admin_client_config.py | 3 +- synapse/storage/background_updates.py | 39 ++- synapse/storage/controllers/persist_events.py | 8 +- synapse/storage/controllers/purge_events.py | 3 +- synapse/storage/controllers/state.py | 46 ++- synapse/storage/database.py | 87 +++--- synapse/storage/databases/__init__.py | 12 +- synapse/storage/databases/main/__init__.py | 36 +-- .../storage/databases/main/account_data.py | 11 +- synapse/storage/databases/main/appservice.py | 22 +- synapse/storage/databases/main/cache.py | 14 +- .../storage/databases/main/censor_events.py | 4 +- synapse/storage/databases/main/client_ips.py | 32 +- .../storage/databases/main/delayed_events.py | 36 +-- synapse/storage/databases/main/deviceinbox.py | 33 +-- synapse/storage/databases/main/devices.py | 61 ++-- synapse/storage/databases/main/directory.py | 12 +- .../storage/databases/main/e2e_room_keys.py | 21 +- .../storage/databases/main/end_to_end_keys.py | 58 ++-- .../databases/main/event_federation.py | 43 ++- .../databases/main/event_push_actions.py | 42 ++- synapse/storage/databases/main/events.py | 31 +- .../databases/main/events_bg_updates.py | 26 +- .../main/events_forward_extremities.py | 8 +- .../storage/databases/main/events_worker.py | 53 ++-- synapse/storage/databases/main/filtering.py | 8 +- synapse/storage/databases/main/keys.py | 10 +- synapse/storage/databases/main/lock.py | 10 +- .../databases/main/media_repository.py | 58 ++-- .../databases/main/monthly_active_users.py | 6 +- synapse/storage/databases/main/openid.py | 5 +- synapse/storage/databases/main/presence.py | 8 +- synapse/storage/databases/main/profile.py | 18 +- synapse/storage/databases/main/push_rule.py | 16 +- synapse/storage/databases/main/pusher.py | 17 +- synapse/storage/databases/main/receipts.py | 37 ++- .../storage/databases/main/registration.py | 146 +++++---- synapse/storage/databases/main/rejections.py | 3 +- synapse/storage/databases/main/relations.py | 36 ++- synapse/storage/databases/main/room.py | 94 +++--- synapse/storage/databases/main/roommember.py | 32 +- synapse/storage/databases/main/search.py | 12 +- .../storage/databases/main/sliding_sync.py | 10 +- synapse/storage/databases/main/state.py | 32 +- .../storage/databases/main/state_deltas.py | 14 +- synapse/storage/databases/main/stats.py | 24 +- synapse/storage/databases/main/stream.py | 111 ++++--- .../storage/databases/main/task_scheduler.py | 22 +- .../databases/main/thread_subscriptions.py | 18 +- .../storage/databases/main/transactions.py | 38 +-- synapse/storage/databases/main/ui_auth.py | 8 +- .../storage/databases/main/user_directory.py | 25 +- synapse/storage/databases/state/bg_updates.py | 14 +- synapse/storage/databases/state/deletion.py | 5 +- synapse/storage/databases/state/store.py | 15 +- synapse/storage/engines/_base.py | 4 +- synapse/storage/engines/postgres.py | 8 +- synapse/storage/engines/sqlite.py | 6 +- synapse/storage/invite_rule.py | 6 +- synapse/storage/prepare_database.py | 11 +- synapse/storage/roommember.py | 15 +- synapse/storage/types.py | 18 +- synapse/storage/util/id_generators.py | 22 +- synapse/storage/util/sequence.py | 18 +- synapse/streams/__init__.py | 4 +- synapse/streams/config.py | 5 +- synapse/synapse_rust/events.pyi | 8 +- synapse/synapse_rust/push.pyi | 20 +- synapse/types/__init__.py | 66 ++--- synapse/types/handlers/__init__.py | 12 +- synapse/types/handlers/sliding_sync.py | 49 ++-- synapse/types/rest/client/__init__.py | 96 +++--- synapse/types/state.py | 17 +- synapse/util/__init__.py | 3 +- synapse/util/async_helpers.py | 72 +++-- synapse/util/background_queue.py | 3 +- synapse/util/caches/__init__.py | 12 +- synapse/util/caches/cached_call.py | 10 +- synapse/util/caches/deferred_cache.py | 33 +-- synapse/util/caches/descriptors.py | 46 ++- synapse/util/caches/dictionary_cache.py | 16 +- synapse/util/caches/expiringcache.py | 10 +- synapse/util/caches/lrucache.py | 76 +++-- synapse/util/caches/response_cache.py | 9 +- synapse/util/caches/stream_change_cache.py | 10 +- synapse/util/caches/ttlcache.py | 6 +- synapse/util/check_dependencies.py | 12 +- synapse/util/daemonize.py | 6 +- synapse/util/distributor.py | 4 +- synapse/util/events.py | 14 +- synapse/util/file_consumer.py | 12 +- synapse/util/gai_resolver.py | 8 +- synapse/util/linked_list.py | 10 +- synapse/util/macaroons.py | 4 +- synapse/util/manhole.py | 4 +- synapse/util/metrics.py | 11 +- synapse/util/pydantic_models.py | 4 +- synapse/util/ratelimitutils.py | 5 +- synapse/util/retryutils.py | 8 +- synapse/util/rust.py | 3 +- synapse/util/stringutils.py | 10 +- synapse/util/task_scheduler.py | 32 +- synapse/util/templates.py | 6 +- synapse/visibility.py | 9 +- synmark/suites/logging.py | 3 +- tests/api/test_ratelimiting.py | 4 +- tests/appservice/test_api.py | 10 +- tests/appservice/test_scheduler.py | 24 +- tests/config/test_workers.py | 4 +- tests/crypto/test_keyring.py | 4 +- tests/events/test_auto_accept_invites.py | 8 +- tests/events/test_presence_router.py | 10 +- tests/events/test_utils.py | 4 +- tests/federation/test_federation_catch_up.py | 4 +- .../test_federation_out_of_band_membership.py | 36 +-- tests/federation/test_federation_sender.py | 10 +- tests/federation/test_federation_server.py | 9 +- tests/federation/transport/test_client.py | 3 +- tests/federation/transport/test_knocking.py | 4 +- tests/handlers/test_appservice.py | 15 +- tests/handlers/test_auth.py | 3 +- tests/handlers/test_device.py | 5 +- tests/handlers/test_federation.py | 6 +- tests/handlers/test_federation_event.py | 7 +- tests/handlers/test_oauth_delegation.py | 10 +- tests/handlers/test_oidc.py | 4 +- tests/handlers/test_password_providers.py | 6 +- tests/handlers/test_presence.py | 4 +- tests/handlers/test_register.py | 42 +-- tests/handlers/test_room_list.py | 3 +- tests/handlers/test_room_policy.py | 23 +- tests/handlers/test_room_summary.py | 22 +- tests/handlers/test_saml.py | 4 +- tests/handlers/test_send_email.py | 4 +- tests/handlers/test_sliding_sync.py | 12 +- tests/handlers/test_sso.py | 8 +- tests/handlers/test_stats.py | 8 +- tests/handlers/test_sync.py | 8 +- .../test_matrix_federation_agent.py | 14 +- tests/http/server/_base.py | 12 +- tests/http/test_client.py | 5 +- tests/http/test_proxyagent.py | 11 +- tests/http/test_servlet.py | 3 +- tests/logging/test_opentracing.py | 8 +- tests/media/test_media_retention.py | 12 +- tests/media/test_media_storage.py | 20 +- tests/module_api/test_api.py | 6 +- tests/module_api/test_spamchecker.py | 16 +- tests/push/test_bulk_push_rule_evaluator.py | 4 +- tests/push/test_presentable_names.py | 6 +- tests/push/test_push_rule_evaluator.py | 12 +- tests/replication/_base.py | 10 +- tests/replication/storage/_base.py | 6 +- tests/replication/storage/test_events.py | 18 +- tests/replication/tcp/streams/test_events.py | 10 +- tests/replication/test_multi_media_repo.py | 8 +- tests/rest/admin/test_federation.py | 13 +- tests/rest/admin/test_registration_tokens.py | 7 +- tests/rest/admin/test_room.py | 3 +- tests/rest/admin/test_scheduled_tasks.py | 8 +- tests/rest/admin/test_statistics.py | 3 +- tests/rest/admin/test_user.py | 21 +- tests/rest/admin/test_username_available.py | 5 +- .../test_extension_thread_subscriptions.py | 6 +- .../sliding_sync/test_rooms_timeline.py | 5 +- .../client/sliding_sync/test_sliding_sync.py | 8 +- tests/rest/client/test_account.py | 22 +- tests/rest/client/test_auth.py | 6 +- tests/rest/client/test_login.py | 36 +-- tests/rest/client/test_media.py | 20 +- tests/rest/client/test_notifications.py | 3 +- tests/rest/client/test_profile.py | 10 +- tests/rest/client/test_receipts.py | 3 +- tests/rest/client/test_redactions.py | 5 +- tests/rest/client/test_relations.py | 14 +- tests/rest/client/test_reporting.py | 3 +- tests/rest/client/test_rooms.py | 26 +- tests/rest/client/test_third_party_rules.py | 18 +- tests/rest/client/test_upgrade_room.py | 5 +- tests/rest/client/utils.py | 125 ++++---- tests/rest/key/v2/test_remote_key_resource.py | 8 +- tests/rest/media/test_url_preview.py | 4 +- tests/scripts/test_new_matrix_user.py | 11 +- tests/server.py | 56 ++-- tests/state/test_v2.py | 9 +- tests/state/test_v21.py | 14 +- .../databases/main/test_end_to_end_keys.py | 3 +- tests/storage/databases/main/test_receipts.py | 4 +- tests/storage/test_account_data.py | 6 +- tests/storage/test_client_ips.py | 16 +- tests/storage/test_event_federation.py | 3 +- tests/storage/test_event_push_actions.py | 11 +- tests/storage/test_events.py | 3 +- tests/storage/test_id_generators.py | 7 +- tests/storage/test_monthly_active_users.py | 2 +- tests/storage/test_receipts.py | 6 +- tests/storage/test_redaction.py | 8 +- tests/storage/test_roommember.py | 8 +- tests/storage/test_sliding_sync_tables.py | 24 +- tests/storage/test_thread_subscriptions.py | 15 +- tests/storage/test_user_directory.py | 4 +- tests/test_event_auth.py | 10 +- tests/test_mau.py | 4 +- tests/test_server.py | 4 +- tests/test_state.py | 21 +- tests/test_utils/event_injection.py | 14 +- tests/test_utils/html_parsers.py | 8 +- tests/test_utils/oidc.py | 20 +- tests/test_visibility.py | 7 +- tests/unittest.py | 46 ++- tests/util/caches/test_descriptors.py | 3 +- tests/util/test_async_helpers.py | 6 +- tests/util/test_check_dependencies.py | 6 +- tests/util/test_file_consumer.py | 6 +- tests/util/test_ratelimitutils.py | 3 +- tests/util/test_task_scheduler.py | 11 +- tests/utils.py | 14 +- 465 files changed, 4034 insertions(+), 4555 deletions(-) create mode 100644 changelog.d/19111.misc diff --git a/.ci/scripts/auditwheel_wrapper.py b/.ci/scripts/auditwheel_wrapper.py index 9599b79e50..9832821221 100755 --- a/.ci/scripts/auditwheel_wrapper.py +++ b/.ci/scripts/auditwheel_wrapper.py @@ -25,7 +25,6 @@ import argparse import os import subprocess -from typing import Optional from zipfile import ZipFile from packaging.tags import Tag @@ -80,7 +79,7 @@ def cpython(wheel_file: str, name: str, version: Version, tag: Tag) -> str: return new_wheel_file -def main(wheel_file: str, dest_dir: str, archs: Optional[str]) -> None: +def main(wheel_file: str, dest_dir: str, archs: str | None) -> None: """Entry point""" # Parse the wheel file name into its parts. Note that `parse_wheel_filename` diff --git a/changelog.d/19111.misc b/changelog.d/19111.misc new file mode 100644 index 0000000000..cb4ca85c47 --- /dev/null +++ b/changelog.d/19111.misc @@ -0,0 +1 @@ +Write union types as `X | Y` where possible, as per PEP 604, added in Python 3.10. diff --git a/contrib/cmdclient/console.py b/contrib/cmdclient/console.py index 9b5d33d2b1..1c867d5336 100755 --- a/contrib/cmdclient/console.py +++ b/contrib/cmdclient/console.py @@ -33,7 +33,6 @@ import sys import time import urllib from http import TwistedHttpClient -from typing import Optional import urlparse from signedjson.key import NACL_ED25519, decode_verify_key_bytes @@ -726,7 +725,7 @@ class SynapseCmd(cmd.Cmd): method, path, data=None, - query_params: Optional[dict] = None, + query_params: dict | None = None, alt_text=None, ): """Runs an HTTP request and pretty prints the output. diff --git a/contrib/cmdclient/http.py b/contrib/cmdclient/http.py index 54363e4259..b92ccdd932 100644 --- a/contrib/cmdclient/http.py +++ b/contrib/cmdclient/http.py @@ -22,7 +22,6 @@ import json import urllib from pprint import pformat -from typing import Optional from twisted.internet import defer, reactor from twisted.web.client import Agent, readBody @@ -90,7 +89,7 @@ class TwistedHttpClient(HttpClient): body = yield readBody(response) return json.loads(body) - def _create_put_request(self, url, json_data, headers_dict: Optional[dict] = None): + def _create_put_request(self, url, json_data, headers_dict: dict | None = None): """Wrapper of _create_request to issue a PUT request""" headers_dict = headers_dict or {} @@ -101,7 +100,7 @@ class TwistedHttpClient(HttpClient): "PUT", url, producer=_JsonProducer(json_data), headers_dict=headers_dict ) - def _create_get_request(self, url, headers_dict: Optional[dict] = None): + def _create_get_request(self, url, headers_dict: dict | None = None): """Wrapper of _create_request to issue a GET request""" return self._create_request("GET", url, headers_dict=headers_dict or {}) @@ -113,7 +112,7 @@ class TwistedHttpClient(HttpClient): data=None, qparams=None, jsonreq=True, - headers: Optional[dict] = None, + headers: dict | None = None, ): headers = headers or {} @@ -138,7 +137,7 @@ class TwistedHttpClient(HttpClient): @defer.inlineCallbacks def _create_request( - self, method, url, producer=None, headers_dict: Optional[dict] = None + self, method, url, producer=None, headers_dict: dict | None = None ): """Creates and sends a request to the given url""" headers_dict = headers_dict or {} diff --git a/docker/configure_workers_and_start.py b/docker/configure_workers_and_start.py index 2451d1f300..e19b0a0039 100755 --- a/docker/configure_workers_and_start.py +++ b/docker/configure_workers_and_start.py @@ -68,7 +68,6 @@ from typing import ( Mapping, MutableMapping, NoReturn, - Optional, SupportsIndex, ) @@ -468,7 +467,7 @@ def add_worker_roles_to_shared_config( def merge_worker_template_configs( - existing_dict: Optional[dict[str, Any]], + existing_dict: dict[str, Any] | None, to_be_merged_dict: dict[str, Any], ) -> dict[str, Any]: """When given an existing dict of worker template configuration consisting with both @@ -1026,7 +1025,7 @@ def generate_worker_log_config( Returns: the path to the generated file """ # Check whether we should write worker logs to disk, in addition to the console - extra_log_template_args: dict[str, Optional[str]] = {} + extra_log_template_args: dict[str, str | None] = {} if environ.get("SYNAPSE_WORKERS_WRITE_LOGS_TO_DISK"): extra_log_template_args["LOG_FILE_PATH"] = f"{data_dir}/logs/{worker_name}.log" diff --git a/docker/start.py b/docker/start.py index daa041d463..c88d23695f 100755 --- a/docker/start.py +++ b/docker/start.py @@ -6,7 +6,7 @@ import os import platform import subprocess import sys -from typing import Any, Mapping, MutableMapping, NoReturn, Optional +from typing import Any, Mapping, MutableMapping, NoReturn import jinja2 @@ -50,7 +50,7 @@ def generate_config_from_template( config_dir: str, config_path: str, os_environ: Mapping[str, str], - ownership: Optional[str], + ownership: str | None, ) -> None: """Generate a homeserver.yaml from environment variables @@ -147,7 +147,7 @@ def generate_config_from_template( subprocess.run(args, check=True) -def run_generate_config(environ: Mapping[str, str], ownership: Optional[str]) -> None: +def run_generate_config(environ: Mapping[str, str], ownership: str | None) -> None: """Run synapse with a --generate-config param to generate a template config file Args: diff --git a/docs/development/synapse_architecture/cancellation.md b/docs/development/synapse_architecture/cancellation.md index ef9e022635..a12f119fb5 100644 --- a/docs/development/synapse_architecture/cancellation.md +++ b/docs/development/synapse_architecture/cancellation.md @@ -299,7 +299,7 @@ logcontext is not finished before the `async` processing completes. **Bad**: ```python -cache: Optional[ObservableDeferred[None]] = None +cache: ObservableDeferred[None] | None = None async def do_something_else( to_resolve: Deferred[None] @@ -326,7 +326,7 @@ with LoggingContext("request-1"): **Good**: ```python -cache: Optional[ObservableDeferred[None]] = None +cache: ObservableDeferred[None] | None = None async def do_something_else( to_resolve: Deferred[None] @@ -358,7 +358,7 @@ with LoggingContext("request-1"): **OK**: ```python -cache: Optional[ObservableDeferred[None]] = None +cache: ObservableDeferred[None] | None = None async def do_something_else( to_resolve: Deferred[None] diff --git a/docs/modules/account_data_callbacks.md b/docs/modules/account_data_callbacks.md index 25de911627..02b8c18bbf 100644 --- a/docs/modules/account_data_callbacks.md +++ b/docs/modules/account_data_callbacks.md @@ -15,7 +15,7 @@ _First introduced in Synapse v1.57.0_ ```python async def on_account_data_updated( user_id: str, - room_id: Optional[str], + room_id: str | None, account_data_type: str, content: "synapse.module_api.JsonDict", ) -> None: @@ -82,7 +82,7 @@ class CustomAccountDataModule: async def log_new_account_data( self, user_id: str, - room_id: Optional[str], + room_id: str | None, account_data_type: str, content: JsonDict, ) -> None: diff --git a/docs/modules/account_validity_callbacks.md b/docs/modules/account_validity_callbacks.md index f5eefcd7d6..2deb43c1be 100644 --- a/docs/modules/account_validity_callbacks.md +++ b/docs/modules/account_validity_callbacks.md @@ -12,7 +12,7 @@ The available account validity callbacks are: _First introduced in Synapse v1.39.0_ ```python -async def is_user_expired(user: str) -> Optional[bool] +async def is_user_expired(user: str) -> bool | None ``` Called when processing any authenticated request (except for logout requests). The module diff --git a/docs/modules/media_repository_callbacks.md b/docs/modules/media_repository_callbacks.md index 7c724038a7..d7c9074bde 100644 --- a/docs/modules/media_repository_callbacks.md +++ b/docs/modules/media_repository_callbacks.md @@ -11,7 +11,7 @@ The available media repository callbacks are: _First introduced in Synapse v1.132.0_ ```python -async def get_media_config_for_user(user_id: str) -> Optional[JsonDict] +async def get_media_config_for_user(user_id: str) -> JsonDict | None ``` ** @@ -70,7 +70,7 @@ implementations of this callback. _First introduced in Synapse v1.139.0_ ```python -async def get_media_upload_limits_for_user(user_id: str, size: int) -> Optional[List[synapse.module_api.MediaUploadLimit]] +async def get_media_upload_limits_for_user(user_id: str, size: int) -> list[synapse.module_api.MediaUploadLimit] | None ``` ** diff --git a/docs/modules/password_auth_provider_callbacks.md b/docs/modules/password_auth_provider_callbacks.md index d66ac7df31..88b22fdf21 100644 --- a/docs/modules/password_auth_provider_callbacks.md +++ b/docs/modules/password_auth_provider_callbacks.md @@ -23,12 +23,7 @@ async def check_auth( user: str, login_type: str, login_dict: "synapse.module_api.JsonDict", -) -> Optional[ - Tuple[ - str, - Optional[Callable[["synapse.module_api.LoginResponse"], Awaitable[None]]] - ] -] +) -> tuple[str, Callable[["synapse.module_api.LoginResponse"], Awaitable[None]] | None] | None ``` The login type and field names should be provided by the user in the @@ -67,12 +62,7 @@ async def check_3pid_auth( medium: str, address: str, password: str, -) -> Optional[ - Tuple[ - str, - Optional[Callable[["synapse.module_api.LoginResponse"], Awaitable[None]]] - ] -] +) -> tuple[str, Callable[["synapse.module_api.LoginResponse"], Awaitable[None]] | None] ``` Called when a user attempts to register or log in with a third party identifier, @@ -98,7 +88,7 @@ _First introduced in Synapse v1.46.0_ ```python async def on_logged_out( user_id: str, - device_id: Optional[str], + device_id: str | None, access_token: str ) -> None ``` @@ -119,7 +109,7 @@ _First introduced in Synapse v1.52.0_ async def get_username_for_registration( uia_results: Dict[str, Any], params: Dict[str, Any], -) -> Optional[str] +) -> str | None ``` Called when registering a new user. The module can return a username to set for the user @@ -180,7 +170,7 @@ _First introduced in Synapse v1.54.0_ async def get_displayname_for_registration( uia_results: Dict[str, Any], params: Dict[str, Any], -) -> Optional[str] +) -> str | None ``` Called when registering a new user. The module can return a display name to set for the @@ -259,12 +249,7 @@ class MyAuthProvider: username: str, login_type: str, login_dict: "synapse.module_api.JsonDict", - ) -> Optional[ - Tuple[ - str, - Optional[Callable[["synapse.module_api.LoginResponse"], Awaitable[None]]], - ] - ]: + ) -> tuple[str, Callable[["synapse.module_api.LoginResponse"], Awaitable[None]] | None] | None: if login_type != "my.login_type": return None @@ -276,12 +261,7 @@ class MyAuthProvider: username: str, login_type: str, login_dict: "synapse.module_api.JsonDict", - ) -> Optional[ - Tuple[ - str, - Optional[Callable[["synapse.module_api.LoginResponse"], Awaitable[None]]], - ] - ]: + ) -> tuple[str, Callable[["synapse.module_api.LoginResponse"], Awaitable[None]] | None] | None: if login_type != "m.login.password": return None diff --git a/docs/modules/presence_router_callbacks.md b/docs/modules/presence_router_callbacks.md index b210f0e3cd..f865e79f53 100644 --- a/docs/modules/presence_router_callbacks.md +++ b/docs/modules/presence_router_callbacks.md @@ -23,7 +23,7 @@ _First introduced in Synapse v1.42.0_ ```python async def get_users_for_states( state_updates: Iterable["synapse.api.UserPresenceState"], -) -> Dict[str, Set["synapse.api.UserPresenceState"]] +) -> dict[str, set["synapse.api.UserPresenceState"]] ``` **Requires** `get_interested_users` to also be registered @@ -45,7 +45,7 @@ _First introduced in Synapse v1.42.0_ ```python async def get_interested_users( user_id: str -) -> Union[Set[str], "synapse.module_api.PRESENCE_ALL_USERS"] +) -> set[str] | "synapse.module_api.PRESENCE_ALL_USERS" ``` **Requires** `get_users_for_states` to also be registered @@ -73,7 +73,7 @@ that `@alice:example.org` receives all presence updates from `@bob:example.com` `@charlie:somewhere.org`, regardless of whether Alice shares a room with any of them. ```python -from typing import Dict, Iterable, Set, Union +from typing import Iterable from synapse.module_api import ModuleApi @@ -90,7 +90,7 @@ class CustomPresenceRouter: async def get_users_for_states( self, state_updates: Iterable["synapse.api.UserPresenceState"], - ) -> Dict[str, Set["synapse.api.UserPresenceState"]]: + ) -> dict[str, set["synapse.api.UserPresenceState"]]: res = {} for update in state_updates: if ( @@ -104,7 +104,7 @@ class CustomPresenceRouter: async def get_interested_users( self, user_id: str, - ) -> Union[Set[str], "synapse.module_api.PRESENCE_ALL_USERS"]: + ) -> set[str] | "synapse.module_api.PRESENCE_ALL_USERS": if user_id == "@alice:example.com": return {"@bob:example.com", "@charlie:somewhere.org"} diff --git a/docs/modules/ratelimit_callbacks.md b/docs/modules/ratelimit_callbacks.md index 30d94024fa..048bdc6f9e 100644 --- a/docs/modules/ratelimit_callbacks.md +++ b/docs/modules/ratelimit_callbacks.md @@ -11,7 +11,7 @@ The available ratelimit callbacks are: _First introduced in Synapse v1.132.0_ ```python -async def get_ratelimit_override_for_user(user: str, limiter_name: str) -> Optional[synapse.module_api.RatelimitOverride] +async def get_ratelimit_override_for_user(user: str, limiter_name: str) -> synapse.module_api.RatelimitOverride | None ``` ** diff --git a/docs/modules/spam_checker_callbacks.md b/docs/modules/spam_checker_callbacks.md index 0f15a9dcc5..0d261e844f 100644 --- a/docs/modules/spam_checker_callbacks.md +++ b/docs/modules/spam_checker_callbacks.md @@ -331,9 +331,9 @@ search results; otherwise return `False`. The profile is represented as a dictionary with the following keys: * `user_id: str`. The Matrix ID for this user. -* `display_name: Optional[str]`. The user's display name, or `None` if this user +* `display_name: str | None`. The user's display name, or `None` if this user has not set a display name. -* `avatar_url: Optional[str]`. The `mxc://` URL to the user's avatar, or `None` +* `avatar_url: str | None`. The `mxc://` URL to the user's avatar, or `None` if this user has not set an avatar. The module is given a copy of the original dictionary, so modifying it from within the @@ -352,10 +352,10 @@ _First introduced in Synapse v1.37.0_ ```python async def check_registration_for_spam( - email_threepid: Optional[dict], - username: Optional[str], + email_threepid: dict | None, + username: str | None, request_info: Collection[Tuple[str, str]], - auth_provider_id: Optional[str] = None, + auth_provider_id: str | None = None, ) -> "synapse.spam_checker_api.RegistrationBehaviour" ``` @@ -438,10 +438,10 @@ _First introduced in Synapse v1.87.0_ ```python async def check_login_for_spam( user_id: str, - device_id: Optional[str], - initial_display_name: Optional[str], - request_info: Collection[Tuple[Optional[str], str]], - auth_provider_id: Optional[str] = None, + device_id: str | None, + initial_display_name: str | None, + request_info: Collection[tuple[str | None, str]], + auth_provider_id: str | None = None, ) -> Union["synapse.module_api.NOT_SPAM", "synapse.module_api.errors.Codes"] ``` @@ -509,7 +509,7 @@ class ListSpamChecker: resource=IsUserEvilResource(config), ) - async def check_event_for_spam(self, event: "synapse.events.EventBase") -> Union[Literal["NOT_SPAM"], Codes]: + async def check_event_for_spam(self, event: "synapse.events.EventBase") -> Literal["NOT_SPAM"] | Codes: if event.sender in self.evil_users: return Codes.FORBIDDEN else: diff --git a/docs/modules/third_party_rules_callbacks.md b/docs/modules/third_party_rules_callbacks.md index b97e28db11..1474b2dfd5 100644 --- a/docs/modules/third_party_rules_callbacks.md +++ b/docs/modules/third_party_rules_callbacks.md @@ -16,7 +16,7 @@ _First introduced in Synapse v1.39.0_ async def check_event_allowed( event: "synapse.events.EventBase", state_events: "synapse.types.StateMap", -) -> Tuple[bool, Optional[dict]] +) -> tuple[bool, dict | None] ``` ** @@ -340,7 +340,7 @@ class EventCensorer: self, event: "synapse.events.EventBase", state_events: "synapse.types.StateMap", - ) -> Tuple[bool, Optional[dict]]: + ) -> Tuple[bool, dict | None]: event_dict = event.get_dict() new_event_content = await self.api.http_client.post_json_get_json( uri=self._endpoint, post_json=event_dict, diff --git a/docs/presence_router_module.md b/docs/presence_router_module.md index face54fe2b..092b566c5f 100644 --- a/docs/presence_router_module.md +++ b/docs/presence_router_module.md @@ -76,7 +76,7 @@ possible. #### `get_interested_users` ```python -async def get_interested_users(self, user_id: str) -> Union[Set[str], str] +async def get_interested_users(self, user_id: str) -> set[str] | str ``` **Required.** An asynchronous method that is passed a single Matrix User ID. This @@ -182,7 +182,7 @@ class ExamplePresenceRouter: async def get_interested_users( self, user_id: str, - ) -> Union[Set[str], PresenceRouter.ALL_USERS]: + ) -> set[str] | PresenceRouter.ALL_USERS: """ Retrieve a list of users that `user_id` is interested in receiving the presence of. This will be in addition to those they share a room with. diff --git a/pyproject.toml b/pyproject.toml index 991cb3e7f3..0eef197cf2 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -80,10 +80,15 @@ select = [ "G", # pyupgrade "UP006", + "UP007", + "UP045", ] extend-safe-fixes = [ - # pyupgrade - "UP006" + # pyupgrade rules compatible with Python >= 3.9 + "UP006", + "UP007", + # pyupgrade rules compatible with Python >= 3.10 + "UP045", ] [tool.ruff.lint.isort] diff --git a/scripts-dev/build_debian_packages.py b/scripts-dev/build_debian_packages.py index 60aa8a5796..d462fe6c56 100755 --- a/scripts-dev/build_debian_packages.py +++ b/scripts-dev/build_debian_packages.py @@ -18,7 +18,7 @@ import sys import threading from concurrent.futures import ThreadPoolExecutor from types import FrameType -from typing import Collection, Optional, Sequence +from typing import Collection, Sequence # These are expanded inside the dockerfile to be a fully qualified image name. # e.g. docker.io/library/debian:bookworm @@ -49,7 +49,7 @@ class Builder: def __init__( self, redirect_stdout: bool = False, - docker_build_args: Optional[Sequence[str]] = None, + docker_build_args: Sequence[str] | None = None, ): self.redirect_stdout = redirect_stdout self._docker_build_args = tuple(docker_build_args or ()) @@ -167,7 +167,7 @@ class Builder: def run_builds( builder: Builder, dists: Collection[str], jobs: int = 1, skip_tests: bool = False ) -> None: - def sig(signum: int, _frame: Optional[FrameType]) -> None: + def sig(signum: int, _frame: FrameType | None) -> None: print("Caught SIGINT") builder.kill_containers() diff --git a/scripts-dev/federation_client.py b/scripts-dev/federation_client.py index db8655c1ce..0fefc23b22 100755 --- a/scripts-dev/federation_client.py +++ b/scripts-dev/federation_client.py @@ -43,7 +43,7 @@ import argparse import base64 import json import sys -from typing import Any, Mapping, Optional, Union +from typing import Any, Mapping from urllib import parse as urlparse import requests @@ -103,12 +103,12 @@ def sign_json( def request( - method: Optional[str], + method: str | None, origin_name: str, origin_key: signedjson.types.SigningKey, destination: str, path: str, - content: Optional[str], + content: str | None, verify_tls: bool, ) -> requests.Response: if method is None: @@ -301,9 +301,9 @@ class MatrixConnectionAdapter(HTTPAdapter): def get_connection_with_tls_context( self, request: PreparedRequest, - verify: Optional[Union[bool, str]], - proxies: Optional[Mapping[str, str]] = None, - cert: Optional[Union[tuple[str, str], str]] = None, + verify: bool | str | None, + proxies: Mapping[str, str] | None = None, + cert: tuple[str, str] | str | None = None, ) -> HTTPConnectionPool: # overrides the get_connection_with_tls_context() method in the base class parsed = urlparse.urlsplit(request.url) @@ -368,7 +368,7 @@ class MatrixConnectionAdapter(HTTPAdapter): return server_name, 8448, server_name @staticmethod - def _get_well_known(server_name: str) -> Optional[str]: + def _get_well_known(server_name: str) -> str | None: if ":" in server_name: # explicit port, or ipv6 literal. Either way, no .well-known return None diff --git a/scripts-dev/gen_config_documentation.py b/scripts-dev/gen_config_documentation.py index 9a49c07a34..aad25a4fc1 100755 --- a/scripts-dev/gen_config_documentation.py +++ b/scripts-dev/gen_config_documentation.py @@ -4,7 +4,7 @@ import json import re import sys -from typing import Any, Optional +from typing import Any import yaml @@ -259,17 +259,17 @@ def indent(text: str, first_line: bool = True) -> str: return text -def em(s: Optional[str]) -> str: +def em(s: str | None) -> str: """Add emphasis to text.""" return f"*{s}*" if s else "" -def a(s: Optional[str], suffix: str = " ") -> str: +def a(s: str | None, suffix: str = " ") -> str: """Appends a space if the given string is not empty.""" return s + suffix if s else "" -def p(s: Optional[str], prefix: str = " ") -> str: +def p(s: str | None, prefix: str = " ") -> str: """Prepend a space if the given string is not empty.""" return prefix + s if s else "" diff --git a/scripts-dev/mypy_synapse_plugin.py b/scripts-dev/mypy_synapse_plugin.py index 830c4ac4ab..24794a1925 100644 --- a/scripts-dev/mypy_synapse_plugin.py +++ b/scripts-dev/mypy_synapse_plugin.py @@ -24,7 +24,7 @@ can crop up, e.g the cache descriptors. """ import enum -from typing import Callable, Mapping, Optional, Union +from typing import Callable, Mapping import attr import mypy.types @@ -123,7 +123,7 @@ class ArgLocation: """ -prometheus_metric_fullname_to_label_arg_map: Mapping[str, Optional[ArgLocation]] = { +prometheus_metric_fullname_to_label_arg_map: Mapping[str, ArgLocation | None] = { # `Collector` subclasses: "prometheus_client.metrics.MetricWrapperBase": ArgLocation("labelnames", 2), "prometheus_client.metrics.Counter": ArgLocation("labelnames", 2), @@ -211,7 +211,7 @@ class SynapsePlugin(Plugin): def get_base_class_hook( self, fullname: str - ) -> Optional[Callable[[ClassDefContext], None]]: + ) -> Callable[[ClassDefContext], None] | None: def _get_base_class_hook(ctx: ClassDefContext) -> None: # Run any `get_base_class_hook` checks from other plugins first. # @@ -232,7 +232,7 @@ class SynapsePlugin(Plugin): def get_function_signature_hook( self, fullname: str - ) -> Optional[Callable[[FunctionSigContext], FunctionLike]]: + ) -> Callable[[FunctionSigContext], FunctionLike] | None: # Strip off the unique identifier for classes that are dynamically created inside # functions. ex. `synapse.metrics.jemalloc.JemallocCollector@185` (this is the line # number) @@ -262,7 +262,7 @@ class SynapsePlugin(Plugin): def get_method_signature_hook( self, fullname: str - ) -> Optional[Callable[[MethodSigContext], CallableType]]: + ) -> Callable[[MethodSigContext], CallableType] | None: if fullname.startswith( ( "synapse.util.caches.descriptors.CachedFunction.__call__", @@ -721,7 +721,7 @@ def check_is_cacheable_wrapper(ctx: MethodSigContext) -> CallableType: def check_is_cacheable( signature: CallableType, - ctx: Union[MethodSigContext, FunctionSigContext], + ctx: MethodSigContext | FunctionSigContext, ) -> None: """ Check if a callable returns a type which can be cached. @@ -795,7 +795,7 @@ AT_CACHED_MUTABLE_RETURN = ErrorCode( def is_cacheable( rt: mypy.types.Type, signature: CallableType, verbose: bool -) -> tuple[bool, Optional[str]]: +) -> tuple[bool, str | None]: """ Check if a particular type is cachable. diff --git a/scripts-dev/release.py b/scripts-dev/release.py index 262c1503c7..ba95a19382 100755 --- a/scripts-dev/release.py +++ b/scripts-dev/release.py @@ -32,7 +32,7 @@ import time import urllib.request from os import path from tempfile import TemporaryDirectory -from typing import Any, Match, Optional, Union +from typing import Any, Match import attr import click @@ -327,11 +327,11 @@ def _prepare() -> None: @cli.command() @click.option("--gh-token", envvar=["GH_TOKEN", "GITHUB_TOKEN"]) -def tag(gh_token: Optional[str]) -> None: +def tag(gh_token: str | None) -> None: _tag(gh_token) -def _tag(gh_token: Optional[str]) -> None: +def _tag(gh_token: str | None) -> None: """Tags the release and generates a draft GitHub release""" # Test that the GH Token is valid before continuing. @@ -471,11 +471,11 @@ def _publish(gh_token: str) -> None: @cli.command() @click.option("--gh-token", envvar=["GH_TOKEN", "GITHUB_TOKEN"], required=False) -def upload(gh_token: Optional[str]) -> None: +def upload(gh_token: str | None) -> None: _upload(gh_token) -def _upload(gh_token: Optional[str]) -> None: +def _upload(gh_token: str | None) -> None: """Upload release to pypi.""" # Test that the GH Token is valid before continuing. @@ -576,11 +576,11 @@ def _merge_into(repo: Repo, source: str, target: str) -> None: @cli.command() @click.option("--gh-token", envvar=["GH_TOKEN", "GITHUB_TOKEN"], required=False) -def wait_for_actions(gh_token: Optional[str]) -> None: +def wait_for_actions(gh_token: str | None) -> None: _wait_for_actions(gh_token) -def _wait_for_actions(gh_token: Optional[str]) -> None: +def _wait_for_actions(gh_token: str | None) -> None: # Test that the GH Token is valid before continuing. check_valid_gh_token(gh_token) @@ -658,7 +658,7 @@ def _notify(message: str) -> None: envvar=["GH_TOKEN", "GITHUB_TOKEN"], required=False, ) -def merge_back(_gh_token: Optional[str]) -> None: +def merge_back(_gh_token: str | None) -> None: _merge_back() @@ -715,7 +715,7 @@ def _merge_back() -> None: envvar=["GH_TOKEN", "GITHUB_TOKEN"], required=False, ) -def announce(_gh_token: Optional[str]) -> None: +def announce(_gh_token: str | None) -> None: _announce() @@ -851,7 +851,7 @@ def get_repo_and_check_clean_checkout( return repo -def check_valid_gh_token(gh_token: Optional[str]) -> None: +def check_valid_gh_token(gh_token: str | None) -> None: """Check that a github token is valid, if supplied""" if not gh_token: @@ -867,7 +867,7 @@ def check_valid_gh_token(gh_token: Optional[str]) -> None: raise click.ClickException(f"Github credentials are bad: {e}") -def find_ref(repo: git.Repo, ref_name: str) -> Optional[git.HEAD]: +def find_ref(repo: git.Repo, ref_name: str) -> git.HEAD | None: """Find the branch/ref, looking first locally then in the remote.""" if ref_name in repo.references: return repo.references[ref_name] @@ -904,7 +904,7 @@ def get_changes_for_version(wanted_version: version.Version) -> str: # These are 0-based. start_line: int - end_line: Optional[int] = None # Is none if its the last entry + end_line: int | None = None # Is none if its the last entry headings: list[VersionSection] = [] for i, token in enumerate(tokens): @@ -991,7 +991,7 @@ def build_dependabot_changelog(repo: Repo, current_version: version.Version) -> messages = [] for commit in reversed(commits): if commit.author.name == "dependabot[bot]": - message: Union[str, bytes] = commit.message + message: str | bytes = commit.message if isinstance(message, bytes): message = message.decode("utf-8") messages.append(message.split("\n", maxsplit=1)[0]) diff --git a/scripts-dev/schema_versions.py b/scripts-dev/schema_versions.py index cec58e177f..b3946ea7a1 100755 --- a/scripts-dev/schema_versions.py +++ b/scripts-dev/schema_versions.py @@ -38,7 +38,7 @@ import io import json import sys from collections import defaultdict -from typing import Any, Iterator, Optional +from typing import Any, Iterator import git from packaging import version @@ -57,7 +57,7 @@ SCHEMA_VERSION_FILES = ( OLDEST_SHOWN_VERSION = version.parse("v1.0") -def get_schema_versions(tag: git.Tag) -> tuple[Optional[int], Optional[int]]: +def get_schema_versions(tag: git.Tag) -> tuple[int | None, int | None]: """Get the schema and schema compat versions for a tag.""" schema_version = None schema_compat_version = None diff --git a/stubs/sortedcontainers/sorteddict.pyi b/stubs/sortedcontainers/sorteddict.pyi index a0be3e6349..8616f8d4f7 100644 --- a/stubs/sortedcontainers/sorteddict.pyi +++ b/stubs/sortedcontainers/sorteddict.pyi @@ -13,10 +13,8 @@ from typing import ( Iterator, KeysView, Mapping, - Optional, Sequence, TypeVar, - Union, ValuesView, overload, ) @@ -51,7 +49,7 @@ class SortedDict(dict[_KT, _VT]): self, __key: _Key[_KT], __iterable: Iterable[tuple[_KT, _VT]], **kwargs: _VT ) -> None: ... @property - def key(self) -> Optional[_Key[_KT]]: ... + def key(self) -> _Key[_KT] | None: ... @property def iloc(self) -> SortedKeysView[_KT]: ... def clear(self) -> None: ... @@ -79,10 +77,10 @@ class SortedDict(dict[_KT, _VT]): @overload def pop(self, key: _KT) -> _VT: ... @overload - def pop(self, key: _KT, default: _T = ...) -> Union[_VT, _T]: ... + def pop(self, key: _KT, default: _T = ...) -> _VT | _T: ... def popitem(self, index: int = ...) -> tuple[_KT, _VT]: ... def peekitem(self, index: int = ...) -> tuple[_KT, _VT]: ... - def setdefault(self, key: _KT, default: Optional[_VT] = ...) -> _VT: ... + def setdefault(self, key: _KT, default: _VT | None = ...) -> _VT: ... # Mypy now reports the first overload as an error, because typeshed widened the type # of `__map` to its internal `_typeshed.SupportsKeysAndGetItem` type in # https://github.com/python/typeshed/pull/6653 @@ -106,8 +104,8 @@ class SortedDict(dict[_KT, _VT]): def _check(self) -> None: ... def islice( self, - start: Optional[int] = ..., - stop: Optional[int] = ..., + start: int | None = ..., + stop: int | None = ..., reverse: bool = ..., ) -> Iterator[_KT]: ... def bisect_left(self, value: _KT) -> int: ... @@ -118,7 +116,7 @@ class SortedKeysView(KeysView[_KT_co], Sequence[_KT_co]): def __getitem__(self, index: int) -> _KT_co: ... @overload def __getitem__(self, index: slice) -> list[_KT_co]: ... - def __delitem__(self, index: Union[int, slice]) -> None: ... + def __delitem__(self, index: int | slice) -> None: ... class SortedItemsView(ItemsView[_KT_co, _VT_co], Sequence[tuple[_KT_co, _VT_co]]): def __iter__(self) -> Iterator[tuple[_KT_co, _VT_co]]: ... @@ -126,11 +124,11 @@ class SortedItemsView(ItemsView[_KT_co, _VT_co], Sequence[tuple[_KT_co, _VT_co]] def __getitem__(self, index: int) -> tuple[_KT_co, _VT_co]: ... @overload def __getitem__(self, index: slice) -> list[tuple[_KT_co, _VT_co]]: ... - def __delitem__(self, index: Union[int, slice]) -> None: ... + def __delitem__(self, index: int | slice) -> None: ... class SortedValuesView(ValuesView[_VT_co], Sequence[_VT_co]): @overload def __getitem__(self, index: int) -> _VT_co: ... @overload def __getitem__(self, index: slice) -> list[_VT_co]: ... - def __delitem__(self, index: Union[int, slice]) -> None: ... + def __delitem__(self, index: int | slice) -> None: ... diff --git a/stubs/sortedcontainers/sortedlist.pyi b/stubs/sortedcontainers/sortedlist.pyi index 25ceb74cc9..f5e056111b 100644 --- a/stubs/sortedcontainers/sortedlist.pyi +++ b/stubs/sortedcontainers/sortedlist.pyi @@ -10,10 +10,8 @@ from typing import ( Iterable, Iterator, MutableSequence, - Optional, Sequence, TypeVar, - Union, overload, ) @@ -29,8 +27,8 @@ class SortedList(MutableSequence[_T]): DEFAULT_LOAD_FACTOR: int = ... def __init__( self, - iterable: Optional[Iterable[_T]] = ..., - key: Optional[_Key[_T]] = ..., + iterable: Iterable[_T] | None = ..., + key: _Key[_T] | None = ..., ): ... # NB: currently mypy does not honour return type, see mypy #3307 @overload @@ -42,7 +40,7 @@ class SortedList(MutableSequence[_T]): @overload def __new__(cls, iterable: Iterable[_T], key: _Key[_T]) -> SortedKeyList[_T]: ... @property - def key(self) -> Optional[Callable[[_T], Any]]: ... + def key(self) -> Callable[[_T], Any] | None: ... def _reset(self, load: int) -> None: ... def clear(self) -> None: ... def _clear(self) -> None: ... @@ -57,7 +55,7 @@ class SortedList(MutableSequence[_T]): def _pos(self, idx: int) -> int: ... def _build_index(self) -> None: ... def __contains__(self, value: Any) -> bool: ... - def __delitem__(self, index: Union[int, slice]) -> None: ... + def __delitem__(self, index: int | slice) -> None: ... @overload def __getitem__(self, index: int) -> _T: ... @overload @@ -76,8 +74,8 @@ class SortedList(MutableSequence[_T]): def reverse(self) -> None: ... def islice( self, - start: Optional[int] = ..., - stop: Optional[int] = ..., + start: int | None = ..., + stop: int | None = ..., reverse: bool = ..., ) -> Iterator[_T]: ... def _islice( @@ -90,8 +88,8 @@ class SortedList(MutableSequence[_T]): ) -> Iterator[_T]: ... def irange( self, - minimum: Optional[int] = ..., - maximum: Optional[int] = ..., + minimum: int | None = ..., + maximum: int | None = ..., inclusive: tuple[bool, bool] = ..., reverse: bool = ..., ) -> Iterator[_T]: ... @@ -107,7 +105,7 @@ class SortedList(MutableSequence[_T]): def insert(self, index: int, value: _T) -> None: ... def pop(self, index: int = ...) -> _T: ... def index( - self, value: _T, start: Optional[int] = ..., stop: Optional[int] = ... + self, value: _T, start: int | None = ..., stop: int | None = ... ) -> int: ... def __add__(self: _SL, other: Iterable[_T]) -> _SL: ... def __radd__(self: _SL, other: Iterable[_T]) -> _SL: ... @@ -126,10 +124,10 @@ class SortedList(MutableSequence[_T]): class SortedKeyList(SortedList[_T]): def __init__( - self, iterable: Optional[Iterable[_T]] = ..., key: _Key[_T] = ... + self, iterable: Iterable[_T] | None = ..., key: _Key[_T] = ... ) -> None: ... def __new__( - cls, iterable: Optional[Iterable[_T]] = ..., key: _Key[_T] = ... + cls, iterable: Iterable[_T] | None = ..., key: _Key[_T] = ... ) -> SortedKeyList[_T]: ... @property def key(self) -> Callable[[_T], Any]: ... @@ -146,15 +144,15 @@ class SortedKeyList(SortedList[_T]): def _delete(self, pos: int, idx: int) -> None: ... def irange( self, - minimum: Optional[int] = ..., - maximum: Optional[int] = ..., + minimum: int | None = ..., + maximum: int | None = ..., inclusive: tuple[bool, bool] = ..., reverse: bool = ..., ) -> Iterator[_T]: ... def irange_key( self, - min_key: Optional[Any] = ..., - max_key: Optional[Any] = ..., + min_key: Any | None = ..., + max_key: Any | None = ..., inclusive: tuple[bool, bool] = ..., reserve: bool = ..., ) -> Iterator[_T]: ... @@ -170,7 +168,7 @@ class SortedKeyList(SortedList[_T]): def copy(self: _SKL) -> _SKL: ... def __copy__(self: _SKL) -> _SKL: ... def index( - self, value: _T, start: Optional[int] = ..., stop: Optional[int] = ... + self, value: _T, start: int | None = ..., stop: int | None = ... ) -> int: ... def __add__(self: _SKL, other: Iterable[_T]) -> _SKL: ... def __radd__(self: _SKL, other: Iterable[_T]) -> _SKL: ... diff --git a/stubs/sortedcontainers/sortedset.pyi b/stubs/sortedcontainers/sortedset.pyi index a3593ca579..da2696b262 100644 --- a/stubs/sortedcontainers/sortedset.pyi +++ b/stubs/sortedcontainers/sortedset.pyi @@ -11,10 +11,8 @@ from typing import ( Iterable, Iterator, MutableSet, - Optional, Sequence, TypeVar, - Union, overload, ) @@ -28,21 +26,19 @@ _Key = Callable[[_T], Any] class SortedSet(MutableSet[_T], Sequence[_T]): def __init__( self, - iterable: Optional[Iterable[_T]] = ..., - key: Optional[_Key[_T]] = ..., + iterable: Iterable[_T] | None = ..., + key: _Key[_T] | None = ..., ) -> None: ... @classmethod - def _fromset( - cls, values: set[_T], key: Optional[_Key[_T]] = ... - ) -> SortedSet[_T]: ... + def _fromset(cls, values: set[_T], key: _Key[_T] | None = ...) -> SortedSet[_T]: ... @property - def key(self) -> Optional[_Key[_T]]: ... + def key(self) -> _Key[_T] | None: ... def __contains__(self, value: Any) -> bool: ... @overload def __getitem__(self, index: int) -> _T: ... @overload def __getitem__(self, index: slice) -> list[_T]: ... - def __delitem__(self, index: Union[int, slice]) -> None: ... + def __delitem__(self, index: int | slice) -> None: ... def __eq__(self, other: Any) -> bool: ... def __ne__(self, other: Any) -> bool: ... def __lt__(self, other: Iterable[_T]) -> bool: ... @@ -62,32 +58,28 @@ class SortedSet(MutableSet[_T], Sequence[_T]): def _discard(self, value: _T) -> None: ... def pop(self, index: int = ...) -> _T: ... def remove(self, value: _T) -> None: ... - def difference(self, *iterables: Iterable[_S]) -> SortedSet[Union[_T, _S]]: ... - def __sub__(self, *iterables: Iterable[_S]) -> SortedSet[Union[_T, _S]]: ... - def difference_update( - self, *iterables: Iterable[_S] - ) -> SortedSet[Union[_T, _S]]: ... - def __isub__(self, *iterables: Iterable[_S]) -> SortedSet[Union[_T, _S]]: ... - def intersection(self, *iterables: Iterable[_S]) -> SortedSet[Union[_T, _S]]: ... - def __and__(self, *iterables: Iterable[_S]) -> SortedSet[Union[_T, _S]]: ... - def __rand__(self, *iterables: Iterable[_S]) -> SortedSet[Union[_T, _S]]: ... - def intersection_update( - self, *iterables: Iterable[_S] - ) -> SortedSet[Union[_T, _S]]: ... - def __iand__(self, *iterables: Iterable[_S]) -> SortedSet[Union[_T, _S]]: ... - def symmetric_difference(self, other: Iterable[_S]) -> SortedSet[Union[_T, _S]]: ... - def __xor__(self, other: Iterable[_S]) -> SortedSet[Union[_T, _S]]: ... - def __rxor__(self, other: Iterable[_S]) -> SortedSet[Union[_T, _S]]: ... + def difference(self, *iterables: Iterable[_S]) -> SortedSet[_T | _S]: ... + def __sub__(self, *iterables: Iterable[_S]) -> SortedSet[_T | _S]: ... + def difference_update(self, *iterables: Iterable[_S]) -> SortedSet[_T | _S]: ... + def __isub__(self, *iterables: Iterable[_S]) -> SortedSet[_T | _S]: ... + def intersection(self, *iterables: Iterable[_S]) -> SortedSet[_T | _S]: ... + def __and__(self, *iterables: Iterable[_S]) -> SortedSet[_T | _S]: ... + def __rand__(self, *iterables: Iterable[_S]) -> SortedSet[_T | _S]: ... + def intersection_update(self, *iterables: Iterable[_S]) -> SortedSet[_T | _S]: ... + def __iand__(self, *iterables: Iterable[_S]) -> SortedSet[_T | _S]: ... + def symmetric_difference(self, other: Iterable[_S]) -> SortedSet[_T | _S]: ... + def __xor__(self, other: Iterable[_S]) -> SortedSet[_T | _S]: ... + def __rxor__(self, other: Iterable[_S]) -> SortedSet[_T | _S]: ... def symmetric_difference_update( self, other: Iterable[_S] - ) -> SortedSet[Union[_T, _S]]: ... - def __ixor__(self, other: Iterable[_S]) -> SortedSet[Union[_T, _S]]: ... - def union(self, *iterables: Iterable[_S]) -> SortedSet[Union[_T, _S]]: ... - def __or__(self, *iterables: Iterable[_S]) -> SortedSet[Union[_T, _S]]: ... - def __ror__(self, *iterables: Iterable[_S]) -> SortedSet[Union[_T, _S]]: ... - def update(self, *iterables: Iterable[_S]) -> SortedSet[Union[_T, _S]]: ... - def __ior__(self, *iterables: Iterable[_S]) -> SortedSet[Union[_T, _S]]: ... - def _update(self, *iterables: Iterable[_S]) -> SortedSet[Union[_T, _S]]: ... + ) -> SortedSet[_T | _S]: ... + def __ixor__(self, other: Iterable[_S]) -> SortedSet[_T | _S]: ... + def union(self, *iterables: Iterable[_S]) -> SortedSet[_T | _S]: ... + def __or__(self, *iterables: Iterable[_S]) -> SortedSet[_T | _S]: ... + def __ror__(self, *iterables: Iterable[_S]) -> SortedSet[_T | _S]: ... + def update(self, *iterables: Iterable[_S]) -> SortedSet[_T | _S]: ... + def __ior__(self, *iterables: Iterable[_S]) -> SortedSet[_T | _S]: ... + def _update(self, *iterables: Iterable[_S]) -> SortedSet[_T | _S]: ... def __reduce__( self, ) -> tuple[type[SortedSet[_T]], set[_T], Callable[[_T], Any]]: ... @@ -97,18 +89,18 @@ class SortedSet(MutableSet[_T], Sequence[_T]): def bisect_right(self, value: _T) -> int: ... def islice( self, - start: Optional[int] = ..., - stop: Optional[int] = ..., + start: int | None = ..., + stop: int | None = ..., reverse: bool = ..., ) -> Iterator[_T]: ... def irange( self, - minimum: Optional[_T] = ..., - maximum: Optional[_T] = ..., + minimum: _T | None = ..., + maximum: _T | None = ..., inclusive: tuple[bool, bool] = ..., reverse: bool = ..., ) -> Iterator[_T]: ... def index( - self, value: _T, start: Optional[int] = ..., stop: Optional[int] = ... + self, value: _T, start: int | None = ..., stop: int | None = ... ) -> int: ... def _reset(self, load: int) -> None: ... diff --git a/stubs/txredisapi.pyi b/stubs/txredisapi.pyi index d2539aa37d..50ab54037a 100644 --- a/stubs/txredisapi.pyi +++ b/stubs/txredisapi.pyi @@ -15,7 +15,7 @@ """Contains *incomplete* type hints for txredisapi.""" -from typing import Any, Optional, Union +from typing import Any from twisted.internet import protocol from twisted.internet.defer import Deferred @@ -29,8 +29,8 @@ class RedisProtocol(protocol.Protocol): self, key: str, value: Any, - expire: Optional[int] = None, - pexpire: Optional[int] = None, + expire: int | None = None, + pexpire: int | None = None, only_if_not_exists: bool = False, only_if_exists: bool = False, ) -> "Deferred[None]": ... @@ -38,8 +38,8 @@ class RedisProtocol(protocol.Protocol): class SubscriberProtocol(RedisProtocol): def __init__(self, *args: object, **kwargs: object): ... - password: Optional[str] - def subscribe(self, channels: Union[str, list[str]]) -> "Deferred[None]": ... + password: str | None + def subscribe(self, channels: str | list[str]) -> "Deferred[None]": ... def connectionMade(self) -> None: ... # type-ignore: twisted.internet.protocol.Protocol provides a default argument for # `reason`. txredisapi's LineReceiver Protocol doesn't. But that's fine: it's what's @@ -49,12 +49,12 @@ class SubscriberProtocol(RedisProtocol): def lazyConnection( host: str = ..., port: int = ..., - dbid: Optional[int] = ..., + dbid: int | None = ..., reconnect: bool = ..., charset: str = ..., - password: Optional[str] = ..., - connectTimeout: Optional[int] = ..., - replyTimeout: Optional[int] = ..., + password: str | None = ..., + connectTimeout: int | None = ..., + replyTimeout: int | None = ..., convertNumbers: bool = ..., ) -> RedisProtocol: ... @@ -70,18 +70,18 @@ class RedisFactory(protocol.ReconnectingClientFactory): continueTrying: bool handler: ConnectionHandler pool: list[RedisProtocol] - replyTimeout: Optional[int] + replyTimeout: int | None def __init__( self, uuid: str, - dbid: Optional[int], + dbid: int | None, poolsize: int, isLazy: bool = False, handler: type = ConnectionHandler, charset: str = "utf-8", - password: Optional[str] = None, - replyTimeout: Optional[int] = None, - convertNumbers: Optional[int] = True, + password: str | None = None, + replyTimeout: int | None = None, + convertNumbers: int | None = True, ): ... def buildProtocol(self, addr: IAddress) -> RedisProtocol: ... diff --git a/synapse/_scripts/export_signing_key.py b/synapse/_scripts/export_signing_key.py index 690115aabe..bab5953802 100755 --- a/synapse/_scripts/export_signing_key.py +++ b/synapse/_scripts/export_signing_key.py @@ -22,13 +22,13 @@ import argparse import sys import time -from typing import NoReturn, Optional +from typing import NoReturn from signedjson.key import encode_verify_key_base64, get_verify_key, read_signing_keys from signedjson.types import VerifyKey -def exit(status: int = 0, message: Optional[str] = None) -> NoReturn: +def exit(status: int = 0, message: str | None = None) -> NoReturn: if message: print(message, file=sys.stderr) sys.exit(status) diff --git a/synapse/_scripts/generate_workers_map.py b/synapse/_scripts/generate_workers_map.py index e669f6902d..3fa27b4b2a 100755 --- a/synapse/_scripts/generate_workers_map.py +++ b/synapse/_scripts/generate_workers_map.py @@ -25,7 +25,7 @@ import logging import re from collections import defaultdict from dataclasses import dataclass -from typing import Iterable, Optional, Pattern +from typing import Iterable, Pattern import yaml @@ -46,7 +46,7 @@ logger = logging.getLogger("generate_workers_map") class MockHomeserver(HomeServer): DATASTORE_CLASS = DataStore - def __init__(self, config: HomeServerConfig, worker_app: Optional[str]) -> None: + def __init__(self, config: HomeServerConfig, worker_app: str | None) -> None: super().__init__(config.server.server_name, config=config) self.config.worker.worker_app = worker_app @@ -65,7 +65,7 @@ class EndpointDescription: # The category of this endpoint. Is read from the `CATEGORY` constant in the servlet # class. - category: Optional[str] + category: str | None # TODO: # - does it need to be routed based on a stream writer config? @@ -141,7 +141,7 @@ def get_registered_paths_for_hs( def get_registered_paths_for_default( - worker_app: Optional[str], base_config: HomeServerConfig + worker_app: str | None, base_config: HomeServerConfig ) -> dict[tuple[str, str], EndpointDescription]: """ Given the name of a worker application and a base homeserver configuration, @@ -271,7 +271,7 @@ def main() -> None: # TODO SSO endpoints (pick_idp etc) NOT REGISTERED BY THIS SCRIPT categories_to_methods_and_paths: dict[ - Optional[str], dict[tuple[str, str], EndpointDescription] + str | None, dict[tuple[str, str], EndpointDescription] ] = defaultdict(dict) for (method, path), desc in elided_worker_paths.items(): @@ -282,7 +282,7 @@ def main() -> None: def print_category( - category_name: Optional[str], + category_name: str | None, elided_worker_paths: dict[tuple[str, str], EndpointDescription], ) -> None: """ diff --git a/synapse/_scripts/register_new_matrix_user.py b/synapse/_scripts/register_new_matrix_user.py index 3fe2f33e52..1ce8221218 100644 --- a/synapse/_scripts/register_new_matrix_user.py +++ b/synapse/_scripts/register_new_matrix_user.py @@ -26,7 +26,7 @@ import hashlib import hmac import logging import sys -from typing import Any, Callable, Optional +from typing import Any, Callable import requests import yaml @@ -54,7 +54,7 @@ def request_registration( server_location: str, shared_secret: str, admin: bool = False, - user_type: Optional[str] = None, + user_type: str | None = None, _print: Callable[[str], None] = print, exit: Callable[[int], None] = sys.exit, exists_ok: bool = False, @@ -123,13 +123,13 @@ def register_new_user( password: str, server_location: str, shared_secret: str, - admin: Optional[bool], - user_type: Optional[str], + admin: bool | None, + user_type: str | None, exists_ok: bool = False, ) -> None: if not user: try: - default_user: Optional[str] = getpass.getuser() + default_user: str | None = getpass.getuser() except Exception: default_user = None @@ -262,7 +262,7 @@ def main() -> None: args = parser.parse_args() - config: Optional[dict[str, Any]] = None + config: dict[str, Any] | None = None if "config" in args and args.config: config = yaml.safe_load(args.config) @@ -350,7 +350,7 @@ def _read_file(file_path: Any, config_path: str) -> str: sys.exit(1) -def _find_client_listener(config: dict[str, Any]) -> Optional[str]: +def _find_client_listener(config: dict[str, Any]) -> str | None: # try to find a listener in the config. Returns a host:port pair for listener in config.get("listeners", []): if listener.get("type") != "http" or listener.get("tls", False): diff --git a/synapse/_scripts/synapse_port_db.py b/synapse/_scripts/synapse_port_db.py index e83c0de5a4..1806e42d90 100755 --- a/synapse/_scripts/synapse_port_db.py +++ b/synapse/_scripts/synapse_port_db.py @@ -233,14 +233,14 @@ IGNORED_BACKGROUND_UPDATES = { # Error returned by the run function. Used at the top-level part of the script to # handle errors and return codes. -end_error: Optional[str] = None +end_error: str | None = None # The exec_info for the error, if any. If error is defined but not exec_info the script # will show only the error message without the stacktrace, if exec_info is defined but # not the error then the script will show nothing outside of what's printed in the run # function. If both are defined, the script will print both the error and the stacktrace. -end_error_exec_info: Optional[ - tuple[type[BaseException], BaseException, TracebackType] -] = None +end_error_exec_info: tuple[type[BaseException], BaseException, TracebackType] | None = ( + None +) R = TypeVar("R") @@ -485,7 +485,7 @@ class Porter: def r( txn: LoggingTransaction, - ) -> tuple[Optional[list[str]], list[tuple], list[tuple]]: + ) -> tuple[list[str] | None, list[tuple], list[tuple]]: forward_rows = [] backward_rows = [] if do_forward[0]: @@ -502,7 +502,7 @@ class Porter: if forward_rows or backward_rows: assert txn.description is not None - headers: Optional[list[str]] = [ + headers: list[str] | None = [ column[0] for column in txn.description ] else: @@ -1152,9 +1152,7 @@ class Porter: return done, remaining + done async def _setup_state_group_id_seq(self) -> None: - curr_id: Optional[ - int - ] = await self.sqlite_store.db_pool.simple_select_one_onecol( + curr_id: int | None = await self.sqlite_store.db_pool.simple_select_one_onecol( table="state_groups", keyvalues={}, retcol="MAX(id)", allow_none=True ) @@ -1271,10 +1269,10 @@ class Porter: await self.postgres_store.db_pool.runInteraction("_setup_%s" % (seq_name,), r) - async def _pg_get_serial_sequence(self, table: str, column: str) -> Optional[str]: + async def _pg_get_serial_sequence(self, table: str, column: str) -> str | None: """Returns the name of the postgres sequence associated with a column, or NULL.""" - def r(txn: LoggingTransaction) -> Optional[str]: + def r(txn: LoggingTransaction) -> str | None: txn.execute("SELECT pg_get_serial_sequence('%s', '%s')" % (table, column)) result = txn.fetchone() if not result: @@ -1286,9 +1284,9 @@ class Porter: ) async def _setup_auth_chain_sequence(self) -> None: - curr_chain_id: Optional[ - int - ] = await self.sqlite_store.db_pool.simple_select_one_onecol( + curr_chain_id: ( + int | None + ) = await self.sqlite_store.db_pool.simple_select_one_onecol( table="event_auth_chains", keyvalues={}, retcol="MAX(chain_id)", diff --git a/synapse/_scripts/synctl.py b/synapse/_scripts/synctl.py index 2e2aa27a17..29ab955c45 100755 --- a/synapse/_scripts/synctl.py +++ b/synapse/_scripts/synctl.py @@ -30,7 +30,7 @@ import signal import subprocess import sys import time -from typing import Iterable, NoReturn, Optional, TextIO +from typing import Iterable, NoReturn, TextIO import yaml @@ -135,7 +135,7 @@ def start(pidfile: str, app: str, config_files: Iterable[str], daemonize: bool) return False -def stop(pidfile: str, app: str) -> Optional[int]: +def stop(pidfile: str, app: str) -> int | None: """Attempts to kill a synapse worker from the pidfile. Args: pidfile: path to file containing worker's pid diff --git a/synapse/api/auth/__init__.py b/synapse/api/auth/__init__.py index cc0c0d4601..201c295f06 100644 --- a/synapse/api/auth/__init__.py +++ b/synapse/api/auth/__init__.py @@ -18,7 +18,7 @@ # [This file includes modifications made by New Vector Limited] # # -from typing import TYPE_CHECKING, Optional, Protocol +from typing import TYPE_CHECKING, Protocol from prometheus_client import Histogram @@ -51,7 +51,7 @@ class Auth(Protocol): room_id: str, requester: Requester, allow_departed_users: bool = False, - ) -> tuple[str, Optional[str]]: + ) -> tuple[str, str | None]: """Check if the user is in the room, or was at some point. Args: room_id: The room to check. @@ -190,7 +190,7 @@ class Auth(Protocol): async def check_user_in_room_or_world_readable( self, room_id: str, requester: Requester, allow_departed_users: bool = False - ) -> tuple[str, Optional[str]]: + ) -> tuple[str, str | None]: """Checks that the user is or was in the room or the room is world readable. If it isn't then an exception is raised. diff --git a/synapse/api/auth/base.py b/synapse/api/auth/base.py index d5635e588f..ff876b9d22 100644 --- a/synapse/api/auth/base.py +++ b/synapse/api/auth/base.py @@ -19,7 +19,7 @@ # # import logging -from typing import TYPE_CHECKING, Optional +from typing import TYPE_CHECKING from netaddr import IPAddress @@ -64,7 +64,7 @@ class BaseAuth: room_id: str, requester: Requester, allow_departed_users: bool = False, - ) -> tuple[str, Optional[str]]: + ) -> tuple[str, str | None]: """Check if the user is in the room, or was at some point. Args: room_id: The room to check. @@ -114,7 +114,7 @@ class BaseAuth: @trace async def check_user_in_room_or_world_readable( self, room_id: str, requester: Requester, allow_departed_users: bool = False - ) -> tuple[str, Optional[str]]: + ) -> tuple[str, str | None]: """Checks that the user is or was in the room or the room is world readable. If it isn't then an exception is raised. @@ -294,7 +294,7 @@ class BaseAuth: @cancellable async def get_appservice_user( self, request: Request, access_token: str - ) -> Optional[Requester]: + ) -> Requester | None: """ Given a request, reads the request parameters to determine: - whether it's an application service that's making this request diff --git a/synapse/api/auth/mas.py b/synapse/api/auth/mas.py index f2b218e34f..e422a1e5c5 100644 --- a/synapse/api/auth/mas.py +++ b/synapse/api/auth/mas.py @@ -13,7 +13,7 @@ # # import logging -from typing import TYPE_CHECKING, Optional +from typing import TYPE_CHECKING from urllib.parse import urlencode from pydantic import ( @@ -74,11 +74,11 @@ class ServerMetadata(BaseModel): class IntrospectionResponse(BaseModel): retrieved_at_ms: StrictInt active: StrictBool - scope: Optional[StrictStr] = None - username: Optional[StrictStr] = None - sub: Optional[StrictStr] = None - device_id: Optional[StrictStr] = None - expires_in: Optional[StrictInt] = None + scope: StrictStr | None = None + username: StrictStr | None = None + sub: StrictStr | None = None + device_id: StrictStr | None = None + expires_in: StrictInt | None = None model_config = ConfigDict(extra="allow") def get_scope_set(self) -> set[str]: diff --git a/synapse/api/auth/msc3861_delegated.py b/synapse/api/auth/msc3861_delegated.py index 48b32aa04a..7999d6e459 100644 --- a/synapse/api/auth/msc3861_delegated.py +++ b/synapse/api/auth/msc3861_delegated.py @@ -20,7 +20,7 @@ # import logging from dataclasses import dataclass -from typing import TYPE_CHECKING, Any, Callable, Optional +from typing import TYPE_CHECKING, Any, Callable from urllib.parse import urlencode from authlib.oauth2 import ClientAuth @@ -102,25 +102,25 @@ class IntrospectionResult: return [] return scope_to_list(value) - def get_sub(self) -> Optional[str]: + def get_sub(self) -> str | None: value = self._inner.get("sub") if not isinstance(value, str): return None return value - def get_username(self) -> Optional[str]: + def get_username(self) -> str | None: value = self._inner.get("username") if not isinstance(value, str): return None return value - def get_name(self) -> Optional[str]: + def get_name(self) -> str | None: value = self._inner.get("name") if not isinstance(value, str): return None return value - def get_device_id(self) -> Optional[str]: + def get_device_id(self) -> str | None: value = self._inner.get("device_id") if value is not None and not isinstance(value, str): raise AuthError( @@ -174,7 +174,7 @@ class MSC3861DelegatedAuth(BaseAuth): self._clock = hs.get_clock() self._http_client = hs.get_proxied_http_client() self._hostname = hs.hostname - self._admin_token: Callable[[], Optional[str]] = self._config.admin_token + self._admin_token: Callable[[], str | None] = self._config.admin_token self._force_tracing_for_users = hs.config.tracing.force_tracing_for_users self._rust_http_client = HttpClient( @@ -247,7 +247,7 @@ class MSC3861DelegatedAuth(BaseAuth): metadata = await self._issuer_metadata.get() return metadata.issuer or self._config.issuer - async def account_management_url(self) -> Optional[str]: + async def account_management_url(self) -> str | None: """ Get the configured account management URL diff --git a/synapse/api/auth_blocking.py b/synapse/api/auth_blocking.py index 303c9ba03e..3ed47b20c4 100644 --- a/synapse/api/auth_blocking.py +++ b/synapse/api/auth_blocking.py @@ -20,7 +20,7 @@ # import logging -from typing import TYPE_CHECKING, Optional +from typing import TYPE_CHECKING from synapse.api.constants import LimitBlockingTypes, UserTypes from synapse.api.errors import Codes, ResourceLimitError @@ -51,10 +51,10 @@ class AuthBlocking: async def check_auth_blocking( self, - user_id: Optional[str] = None, - threepid: Optional[dict] = None, - user_type: Optional[str] = None, - requester: Optional[Requester] = None, + user_id: str | None = None, + threepid: dict | None = None, + user_type: str | None = None, + requester: Requester | None = None, ) -> None: """Checks if the user should be rejected for some external reason, such as monthly active user limiting or global disable flag diff --git a/synapse/api/errors.py b/synapse/api/errors.py index f75b34ef69..c4339ebef8 100644 --- a/synapse/api/errors.py +++ b/synapse/api/errors.py @@ -26,7 +26,7 @@ import math import typing from enum import Enum from http import HTTPStatus -from typing import Any, Optional, Union +from typing import Any, Optional from twisted.web import http @@ -164,9 +164,9 @@ class CodeMessageException(RuntimeError): def __init__( self, - code: Union[int, HTTPStatus], + code: int | HTTPStatus, msg: str, - headers: Optional[dict[str, str]] = None, + headers: dict[str, str] | None = None, ): super().__init__("%d: %s" % (code, msg)) @@ -223,8 +223,8 @@ class SynapseError(CodeMessageException): code: int, msg: str, errcode: str = Codes.UNKNOWN, - additional_fields: Optional[dict] = None, - headers: Optional[dict[str, str]] = None, + additional_fields: dict | None = None, + headers: dict[str, str] | None = None, ): """Constructs a synapse error. @@ -244,7 +244,7 @@ class SynapseError(CodeMessageException): return cs_error(self.msg, self.errcode, **self._additional_fields) @property - def debug_context(self) -> Optional[str]: + def debug_context(self) -> str | None: """Override this to add debugging context that shouldn't be sent to clients.""" return None @@ -276,7 +276,7 @@ class ProxiedRequestError(SynapseError): code: int, msg: str, errcode: str = Codes.UNKNOWN, - additional_fields: Optional[dict] = None, + additional_fields: dict | None = None, ): super().__init__(code, msg, errcode, additional_fields) @@ -340,7 +340,7 @@ class FederationDeniedError(SynapseError): destination: The destination which has been denied """ - def __init__(self, destination: Optional[str]): + def __init__(self, destination: str | None): """Raised by federation client or server to indicate that we are are deliberately not attempting to contact a given server because it is not on our federation whitelist. @@ -399,7 +399,7 @@ class AuthError(SynapseError): code: int, msg: str, errcode: str = Codes.FORBIDDEN, - additional_fields: Optional[dict] = None, + additional_fields: dict | None = None, ): super().__init__(code, msg, errcode, additional_fields) @@ -432,7 +432,7 @@ class UnstableSpecAuthError(AuthError): msg: str, errcode: str, previous_errcode: str = Codes.FORBIDDEN, - additional_fields: Optional[dict] = None, + additional_fields: dict | None = None, ): self.previous_errcode = previous_errcode super().__init__(code, msg, errcode, additional_fields) @@ -497,8 +497,8 @@ class ResourceLimitError(SynapseError): code: int, msg: str, errcode: str = Codes.RESOURCE_LIMIT_EXCEEDED, - admin_contact: Optional[str] = None, - limit_type: Optional[str] = None, + admin_contact: str | None = None, + limit_type: str | None = None, ): self.admin_contact = admin_contact self.limit_type = limit_type @@ -542,7 +542,7 @@ class InvalidCaptchaError(SynapseError): self, code: int = 400, msg: str = "Invalid captcha.", - error_url: Optional[str] = None, + error_url: str | None = None, errcode: str = Codes.CAPTCHA_INVALID, ): super().__init__(code, msg, errcode) @@ -563,9 +563,9 @@ class LimitExceededError(SynapseError): self, limiter_name: str, code: int = 429, - retry_after_ms: Optional[int] = None, + retry_after_ms: int | None = None, errcode: str = Codes.LIMIT_EXCEEDED, - pause: Optional[float] = None, + pause: float | None = None, ): # Use HTTP header Retry-After to enable library-assisted retry handling. headers = ( @@ -582,7 +582,7 @@ class LimitExceededError(SynapseError): return cs_error(self.msg, self.errcode, retry_after_ms=self.retry_after_ms) @property - def debug_context(self) -> Optional[str]: + def debug_context(self) -> str | None: return self.limiter_name @@ -675,7 +675,7 @@ class RequestSendFailed(RuntimeError): class UnredactedContentDeletedError(SynapseError): - def __init__(self, content_keep_ms: Optional[int] = None): + def __init__(self, content_keep_ms: int | None = None): super().__init__( 404, "The content for that event has already been erased from the database", @@ -751,7 +751,7 @@ class FederationError(RuntimeError): code: int, reason: str, affected: str, - source: Optional[str] = None, + source: str | None = None, ): if level not in ["FATAL", "ERROR", "WARN"]: raise ValueError("Level is not valid: %s" % (level,)) @@ -786,7 +786,7 @@ class FederationPullAttemptBackoffError(RuntimeError): """ def __init__( - self, event_ids: "StrCollection", message: Optional[str], retry_after_ms: int + self, event_ids: "StrCollection", message: str | None, retry_after_ms: int ): event_ids = list(event_ids) diff --git a/synapse/api/filtering.py b/synapse/api/filtering.py index e31bec1a00..9b47c20437 100644 --- a/synapse/api/filtering.py +++ b/synapse/api/filtering.py @@ -28,9 +28,7 @@ from typing import ( Collection, Iterable, Mapping, - Optional, TypeVar, - Union, ) import jsonschema @@ -155,7 +153,7 @@ class Filtering: self.DEFAULT_FILTER_COLLECTION = FilterCollection(hs, {}) async def get_user_filter( - self, user_id: UserID, filter_id: Union[int, str] + self, user_id: UserID, filter_id: int | str ) -> "FilterCollection": result = await self.store.get_user_filter(user_id, filter_id) return FilterCollection(self._hs, result) @@ -531,7 +529,7 @@ class Filter: return newFilter -def _matches_wildcard(actual_value: Optional[str], filter_value: str) -> bool: +def _matches_wildcard(actual_value: str | None, filter_value: str) -> bool: if filter_value.endswith("*") and isinstance(actual_value, str): type_prefix = filter_value[:-1] return actual_value.startswith(type_prefix) diff --git a/synapse/api/presence.py b/synapse/api/presence.py index 28c10403ce..0e2fe625c9 100644 --- a/synapse/api/presence.py +++ b/synapse/api/presence.py @@ -19,7 +19,7 @@ # # -from typing import Any, Optional +from typing import Any import attr @@ -41,15 +41,13 @@ class UserDevicePresenceState: """ user_id: str - device_id: Optional[str] + device_id: str | None state: str last_active_ts: int last_sync_ts: int @classmethod - def default( - cls, user_id: str, device_id: Optional[str] - ) -> "UserDevicePresenceState": + def default(cls, user_id: str, device_id: str | None) -> "UserDevicePresenceState": """Returns a default presence state.""" return cls( user_id=user_id, @@ -81,7 +79,7 @@ class UserPresenceState: last_active_ts: int last_federation_update_ts: int last_user_sync_ts: int - status_msg: Optional[str] + status_msg: str | None currently_active: bool def as_dict(self) -> JsonDict: diff --git a/synapse/api/ratelimiting.py b/synapse/api/ratelimiting.py index ee0e9181ce..df884d47d7 100644 --- a/synapse/api/ratelimiting.py +++ b/synapse/api/ratelimiting.py @@ -102,9 +102,7 @@ class Ratelimiter: self.clock.looping_call(self._prune_message_counts, 15 * 1000) - def _get_key( - self, requester: Optional[Requester], key: Optional[Hashable] - ) -> Hashable: + def _get_key(self, requester: Requester | None, key: Hashable | None) -> Hashable: """Use the requester's MXID as a fallback key if no key is provided.""" if key is None: if not requester: @@ -121,13 +119,13 @@ class Ratelimiter: async def can_do_action( self, - requester: Optional[Requester], - key: Optional[Hashable] = None, - rate_hz: Optional[float] = None, - burst_count: Optional[int] = None, + requester: Requester | None, + key: Hashable | None = None, + rate_hz: float | None = None, + burst_count: int | None = None, update: bool = True, n_actions: int = 1, - _time_now_s: Optional[float] = None, + _time_now_s: float | None = None, ) -> tuple[bool, float]: """Can the entity (e.g. user or IP address) perform the action? @@ -247,10 +245,10 @@ class Ratelimiter: def record_action( self, - requester: Optional[Requester], - key: Optional[Hashable] = None, + requester: Requester | None, + key: Hashable | None = None, n_actions: int = 1, - _time_now_s: Optional[float] = None, + _time_now_s: float | None = None, ) -> None: """Record that an action(s) took place, even if they violate the rate limit. @@ -332,14 +330,14 @@ class Ratelimiter: async def ratelimit( self, - requester: Optional[Requester], - key: Optional[Hashable] = None, - rate_hz: Optional[float] = None, - burst_count: Optional[int] = None, + requester: Requester | None, + key: Hashable | None = None, + rate_hz: float | None = None, + burst_count: int | None = None, update: bool = True, n_actions: int = 1, - _time_now_s: Optional[float] = None, - pause: Optional[float] = 0.5, + _time_now_s: float | None = None, + pause: float | None = 0.5, ) -> None: """Checks if an action can be performed. If not, raises a LimitExceededError @@ -396,7 +394,7 @@ class RequestRatelimiter: store: DataStore, clock: Clock, rc_message: RatelimitSettings, - rc_admin_redaction: Optional[RatelimitSettings], + rc_admin_redaction: RatelimitSettings | None, ): self.store = store self.clock = clock @@ -412,7 +410,7 @@ class RequestRatelimiter: # Check whether ratelimiting room admin message redaction is enabled # by the presence of rate limits in the config if rc_admin_redaction: - self.admin_redaction_ratelimiter: Optional[Ratelimiter] = Ratelimiter( + self.admin_redaction_ratelimiter: Ratelimiter | None = Ratelimiter( store=self.store, clock=self.clock, cfg=rc_admin_redaction, diff --git a/synapse/api/room_versions.py b/synapse/api/room_versions.py index b6e76379f1..97dac661a3 100644 --- a/synapse/api/room_versions.py +++ b/synapse/api/room_versions.py @@ -18,7 +18,7 @@ # # -from typing import Callable, Optional +from typing import Callable import attr @@ -503,7 +503,7 @@ class RoomVersionCapability: """An object which describes the unique attributes of a room version.""" identifier: str # the identifier for this capability - preferred_version: Optional[RoomVersion] + preferred_version: RoomVersion | None support_check_lambda: Callable[[RoomVersion], bool] diff --git a/synapse/api/urls.py b/synapse/api/urls.py index baa6e2d390..b6147353d4 100644 --- a/synapse/api/urls.py +++ b/synapse/api/urls.py @@ -24,7 +24,6 @@ import hmac import urllib.parse from hashlib import sha256 -from typing import Optional from urllib.parse import urlencode, urljoin from synapse.config import ConfigError @@ -75,7 +74,7 @@ class LoginSSORedirectURIBuilder: self._public_baseurl = hs_config.server.public_baseurl def build_login_sso_redirect_uri( - self, *, idp_id: Optional[str], client_redirect_url: str + self, *, idp_id: str | None, client_redirect_url: str ) -> str: """Build a `/login/sso/redirect` URI for the given identity provider. diff --git a/synapse/app/_base.py b/synapse/app/_base.py index 2de5bdb51e..52bdb9e0d7 100644 --- a/synapse/app/_base.py +++ b/synapse/app/_base.py @@ -36,8 +36,6 @@ from typing import ( Awaitable, Callable, NoReturn, - Optional, - Union, cast, ) from wsgiref.simple_server import WSGIServer @@ -180,8 +178,8 @@ def start_worker_reactor( def start_reactor( appname: str, soft_file_limit: int, - gc_thresholds: Optional[tuple[int, int, int]], - pid_file: Optional[str], + gc_thresholds: tuple[int, int, int] | None, + pid_file: str | None, daemonize: bool, print_pidfile: bool, logger: logging.Logger, @@ -421,7 +419,7 @@ def listen_http( root_resource: Resource, version_string: str, max_request_body_size: int, - context_factory: Optional[IOpenSSLContextFactory], + context_factory: IOpenSSLContextFactory | None, reactor: ISynapseReactor = reactor, ) -> list[Port]: """ @@ -564,9 +562,7 @@ def setup_sighup_handling() -> None: if _already_setup_sighup_handling: return - previous_sighup_handler: Union[ - Callable[[int, Optional[FrameType]], Any], int, None - ] = None + previous_sighup_handler: Callable[[int, FrameType | None], Any] | int | None = None # Set up the SIGHUP machinery. if hasattr(signal, "SIGHUP"): diff --git a/synapse/app/admin_cmd.py b/synapse/app/admin_cmd.py index 193482b7fc..facc98164e 100644 --- a/synapse/app/admin_cmd.py +++ b/synapse/app/admin_cmd.py @@ -24,7 +24,7 @@ import logging import os import sys import tempfile -from typing import Mapping, Optional, Sequence +from typing import Mapping, Sequence from twisted.internet import defer, task @@ -136,7 +136,7 @@ class FileExfiltrationWriter(ExfiltrationWriter): to a temporary directory. """ - def __init__(self, user_id: str, directory: Optional[str] = None): + def __init__(self, user_id: str, directory: str | None = None): self.user_id = user_id if directory: @@ -291,7 +291,7 @@ def load_config(argv_options: list[str]) -> tuple[HomeServerConfig, argparse.Nam def create_homeserver( config: HomeServerConfig, - reactor: Optional[ISynapseReactor] = None, + reactor: ISynapseReactor | None = None, ) -> AdminCmdServer: """ Create a homeserver instance for the Synapse admin command process. diff --git a/synapse/app/complement_fork_starter.py b/synapse/app/complement_fork_starter.py index 73e33d77a5..dcb45e234b 100644 --- a/synapse/app/complement_fork_starter.py +++ b/synapse/app/complement_fork_starter.py @@ -26,7 +26,7 @@ import os import signal import sys from types import FrameType -from typing import Any, Callable, Optional +from typing import Any, Callable from twisted.internet.main import installReactor @@ -172,7 +172,7 @@ def main() -> None: # Install signal handlers to propagate signals to all our children, so that they # shut down cleanly. This also inhibits our own exit, but that's good: we want to # wait until the children have exited. - def handle_signal(signum: int, frame: Optional[FrameType]) -> None: + def handle_signal(signum: int, frame: FrameType | None) -> None: print( f"complement_fork_starter: Caught signal {signum}. Stopping children.", file=sys.stderr, diff --git a/synapse/app/generic_worker.py b/synapse/app/generic_worker.py index 0a4abd1839..9939c0fe7d 100644 --- a/synapse/app/generic_worker.py +++ b/synapse/app/generic_worker.py @@ -21,7 +21,6 @@ # import logging import sys -from typing import Optional from twisted.web.resource import Resource @@ -336,7 +335,7 @@ def load_config(argv_options: list[str]) -> HomeServerConfig: def create_homeserver( config: HomeServerConfig, - reactor: Optional[ISynapseReactor] = None, + reactor: ISynapseReactor | None = None, ) -> GenericWorkerServer: """ Create a homeserver instance for the Synapse worker process. diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index bd51aad9ab..8fb906cdf7 100644 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -22,7 +22,7 @@ import logging import os import sys -from typing import Iterable, Optional +from typing import Iterable from twisted.internet.tcp import Port from twisted.web.resource import EncodingResourceWrapper, Resource @@ -350,7 +350,7 @@ def load_or_generate_config(argv_options: list[str]) -> HomeServerConfig: def create_homeserver( config: HomeServerConfig, - reactor: Optional[ISynapseReactor] = None, + reactor: ISynapseReactor | None = None, ) -> SynapseHomeServer: """ Create a homeserver instance for the Synapse main process. diff --git a/synapse/appservice/__init__.py b/synapse/appservice/__init__.py index e91fa3a624..620aa29dfc 100644 --- a/synapse/appservice/__init__.py +++ b/synapse/appservice/__init__.py @@ -26,7 +26,6 @@ from enum import Enum from typing import ( TYPE_CHECKING, Iterable, - Optional, Pattern, Sequence, cast, @@ -95,12 +94,12 @@ class ApplicationService: token: str, id: str, sender: UserID, - url: Optional[str] = None, - namespaces: Optional[JsonDict] = None, - hs_token: Optional[str] = None, - protocols: Optional[Iterable[str]] = None, + url: str | None = None, + namespaces: JsonDict | None = None, + hs_token: str | None = None, + protocols: Iterable[str] | None = None, rate_limited: bool = True, - ip_range_whitelist: Optional[IPSet] = None, + ip_range_whitelist: IPSet | None = None, supports_ephemeral: bool = False, msc3202_transaction_extensions: bool = False, msc4190_device_management: bool = False, @@ -142,7 +141,7 @@ class ApplicationService: self.rate_limited = rate_limited def _check_namespaces( - self, namespaces: Optional[JsonDict] + self, namespaces: JsonDict | None ) -> dict[str, list[Namespace]]: # Sanity check that it is of the form: # { @@ -179,9 +178,7 @@ class ApplicationService: return result - def _matches_regex( - self, namespace_key: str, test_string: str - ) -> Optional[Namespace]: + def _matches_regex(self, namespace_key: str, test_string: str) -> Namespace | None: for namespace in self.namespaces[namespace_key]: if namespace.regex.match(test_string): return namespace diff --git a/synapse/appservice/api.py b/synapse/appservice/api.py index f08a921998..71094de9be 100644 --- a/synapse/appservice/api.py +++ b/synapse/appservice/api.py @@ -25,10 +25,8 @@ from typing import ( TYPE_CHECKING, Iterable, Mapping, - Optional, Sequence, TypeVar, - Union, ) from prometheus_client import Counter @@ -222,7 +220,7 @@ class ApplicationServiceApi(SimpleHttpClient): assert service.hs_token is not None try: - args: Mapping[bytes, Union[list[bytes], str]] = fields + args: Mapping[bytes, list[bytes] | str] = fields if self.config.use_appservice_legacy_authorization: args = { **fields, @@ -258,11 +256,11 @@ class ApplicationServiceApi(SimpleHttpClient): async def get_3pe_protocol( self, service: "ApplicationService", protocol: str - ) -> Optional[JsonDict]: + ) -> JsonDict | None: if service.url is None: return {} - async def _get() -> Optional[JsonDict]: + async def _get() -> JsonDict | None: # This is required by the configuration. assert service.hs_token is not None try: @@ -300,7 +298,7 @@ class ApplicationServiceApi(SimpleHttpClient): key = (service.id, protocol) return await self.protocol_meta_cache.wrap(key, _get) - async def ping(self, service: "ApplicationService", txn_id: Optional[str]) -> None: + async def ping(self, service: "ApplicationService", txn_id: str | None) -> None: # The caller should check that url is set assert service.url is not None, "ping called without URL being set" @@ -322,7 +320,7 @@ class ApplicationServiceApi(SimpleHttpClient): one_time_keys_count: TransactionOneTimeKeysCount, unused_fallback_keys: TransactionUnusedFallbackKeys, device_list_summary: DeviceListUpdates, - txn_id: Optional[int] = None, + txn_id: int | None = None, ) -> bool: """ Push data to an application service. diff --git a/synapse/appservice/scheduler.py b/synapse/appservice/scheduler.py index b5fab5f50d..30c22780bd 100644 --- a/synapse/appservice/scheduler.py +++ b/synapse/appservice/scheduler.py @@ -62,7 +62,6 @@ from typing import ( Callable, Collection, Iterable, - Optional, Sequence, ) @@ -123,10 +122,10 @@ class ApplicationServiceScheduler: def enqueue_for_appservice( self, appservice: ApplicationService, - events: Optional[Collection[EventBase]] = None, - ephemeral: Optional[Collection[JsonMapping]] = None, - to_device_messages: Optional[Collection[JsonMapping]] = None, - device_list_summary: Optional[DeviceListUpdates] = None, + events: Collection[EventBase] | None = None, + ephemeral: Collection[JsonMapping] | None = None, + to_device_messages: Collection[JsonMapping] | None = None, + device_list_summary: DeviceListUpdates | None = None, ) -> None: """ Enqueue some data to be sent off to an application service. @@ -260,8 +259,8 @@ class _ServiceQueuer: ): return - one_time_keys_count: Optional[TransactionOneTimeKeysCount] = None - unused_fallback_keys: Optional[TransactionUnusedFallbackKeys] = None + one_time_keys_count: TransactionOneTimeKeysCount | None = None + unused_fallback_keys: TransactionUnusedFallbackKeys | None = None if ( self._msc3202_transaction_extensions_enabled @@ -369,11 +368,11 @@ class _TransactionController: self, service: ApplicationService, events: Sequence[EventBase], - ephemeral: Optional[list[JsonMapping]] = None, - to_device_messages: Optional[list[JsonMapping]] = None, - one_time_keys_count: Optional[TransactionOneTimeKeysCount] = None, - unused_fallback_keys: Optional[TransactionUnusedFallbackKeys] = None, - device_list_summary: Optional[DeviceListUpdates] = None, + ephemeral: list[JsonMapping] | None = None, + to_device_messages: list[JsonMapping] | None = None, + one_time_keys_count: TransactionOneTimeKeysCount | None = None, + unused_fallback_keys: TransactionUnusedFallbackKeys | None = None, + device_list_summary: DeviceListUpdates | None = None, ) -> None: """ Create a transaction with the given data and send to the provided @@ -504,7 +503,7 @@ class _Recoverer: self.service = service self.callback = callback self.backoff_counter = 1 - self.scheduled_recovery: Optional[IDelayedCall] = None + self.scheduled_recovery: IDelayedCall | None = None def recover(self) -> None: delay = 2**self.backoff_counter diff --git a/synapse/config/_base.py b/synapse/config/_base.py index ce06905390..95a00c6718 100644 --- a/synapse/config/_base.py +++ b/synapse/config/_base.py @@ -36,9 +36,7 @@ from typing import ( Iterable, Iterator, MutableMapping, - Optional, TypeVar, - Union, ) import attr @@ -60,7 +58,7 @@ class ConfigError(Exception): the problem lies. """ - def __init__(self, msg: str, path: Optional[StrSequence] = None): + def __init__(self, msg: str, path: StrSequence | None = None): self.msg = msg self.path = path @@ -175,7 +173,7 @@ class Config: ) @staticmethod - def parse_size(value: Union[str, int]) -> int: + def parse_size(value: str | int) -> int: """Interpret `value` as a number of bytes. If an integer is provided it is treated as bytes and is unchanged. @@ -202,7 +200,7 @@ class Config: raise TypeError(f"Bad byte size {value!r}") @staticmethod - def parse_duration(value: Union[str, int]) -> int: + def parse_duration(value: str | int) -> int: """Convert a duration as a string or integer to a number of milliseconds. If an integer is provided it is treated as milliseconds and is unchanged. @@ -270,7 +268,7 @@ class Config: return path_exists(file_path) @classmethod - def check_file(cls, file_path: Optional[str], config_name: str) -> str: + def check_file(cls, file_path: str | None, config_name: str) -> str: if file_path is None: raise ConfigError("Missing config for %s." % (config_name,)) try: @@ -318,7 +316,7 @@ class Config: def read_templates( self, filenames: list[str], - custom_template_directories: Optional[Iterable[str]] = None, + custom_template_directories: Iterable[str] | None = None, ) -> list[jinja2.Template]: """Load a list of template files from disk using the given variables. @@ -465,11 +463,11 @@ class RootConfig: data_dir_path: str, server_name: str, generate_secrets: bool = False, - report_stats: Optional[bool] = None, + report_stats: bool | None = None, open_private_ports: bool = False, - listeners: Optional[list[dict]] = None, - tls_certificate_path: Optional[str] = None, - tls_private_key_path: Optional[str] = None, + listeners: list[dict] | None = None, + tls_certificate_path: str | None = None, + tls_private_key_path: str | None = None, ) -> str: """ Build a default configuration file @@ -655,7 +653,7 @@ class RootConfig: @classmethod def load_or_generate_config( cls: type[TRootConfig], description: str, argv_options: list[str] - ) -> Optional[TRootConfig]: + ) -> TRootConfig | None: """Parse the commandline and config files Supports generation of config files, so is used for the main homeserver app. @@ -898,7 +896,7 @@ class RootConfig: :returns: the previous config object, which no longer has a reference to this RootConfig. """ - existing_config: Optional[Config] = getattr(self, section_name, None) + existing_config: Config | None = getattr(self, section_name, None) if existing_config is None: raise ValueError(f"Unknown config section '{section_name}'") logger.info("Reloading config section '%s'", section_name) diff --git a/synapse/config/_base.pyi b/synapse/config/_base.pyi index 1a9cb7db47..fe9b3333c4 100644 --- a/synapse/config/_base.pyi +++ b/synapse/config/_base.pyi @@ -6,9 +6,7 @@ from typing import ( Iterator, Literal, MutableMapping, - Optional, TypeVar, - Union, overload, ) @@ -64,7 +62,7 @@ from synapse.config import ( # noqa: F401 from synapse.types import StrSequence class ConfigError(Exception): - def __init__(self, msg: str, path: Optional[StrSequence] = None): + def __init__(self, msg: str, path: StrSequence | None = None): self.msg = msg self.path = path @@ -146,16 +144,16 @@ class RootConfig: data_dir_path: str, server_name: str, generate_secrets: bool = ..., - report_stats: Optional[bool] = ..., + report_stats: bool | None = ..., open_private_ports: bool = ..., - listeners: Optional[Any] = ..., - tls_certificate_path: Optional[str] = ..., - tls_private_key_path: Optional[str] = ..., + listeners: Any | None = ..., + tls_certificate_path: str | None = ..., + tls_private_key_path: str | None = ..., ) -> str: ... @classmethod def load_or_generate_config( cls: type[TRootConfig], description: str, argv_options: list[str] - ) -> Optional[TRootConfig]: ... + ) -> TRootConfig | None: ... @classmethod def load_config( cls: type[TRootConfig], description: str, argv_options: list[str] @@ -183,11 +181,11 @@ class Config: default_template_dir: str def __init__(self, root_config: RootConfig = ...) -> None: ... @staticmethod - def parse_size(value: Union[str, int]) -> int: ... + def parse_size(value: str | int) -> int: ... @staticmethod - def parse_duration(value: Union[str, int]) -> int: ... + def parse_duration(value: str | int) -> int: ... @staticmethod - def abspath(file_path: Optional[str]) -> str: ... + def abspath(file_path: str | None) -> str: ... @classmethod def path_exists(cls, file_path: str) -> bool: ... @classmethod @@ -200,7 +198,7 @@ class Config: def read_templates( self, filenames: list[str], - custom_template_directories: Optional[Iterable[str]] = None, + custom_template_directories: Iterable[str] | None = None, ) -> list[jinja2.Template]: ... def read_config_files(config_files: Iterable[str]) -> dict[str, Any]: ... diff --git a/synapse/config/api.py b/synapse/config/api.py index e32e03e55e..03b92249a9 100644 --- a/synapse/config/api.py +++ b/synapse/config/api.py @@ -20,7 +20,7 @@ # import logging -from typing import Any, Iterable, Optional +from typing import Any, Iterable from synapse.api.constants import EventTypes from synapse.config._base import Config, ConfigError @@ -46,7 +46,7 @@ class ApiConfig(Config): def _get_prejoin_state_entries( self, config: JsonDict - ) -> Iterable[tuple[str, Optional[str]]]: + ) -> Iterable[tuple[str, str | None]]: """Get the event types and state keys to include in the prejoin state.""" room_prejoin_state_config = config.get("room_prejoin_state") or {} diff --git a/synapse/config/cache.py b/synapse/config/cache.py index e51efc3dbd..c9ce826e1a 100644 --- a/synapse/config/cache.py +++ b/synapse/config/cache.py @@ -23,7 +23,7 @@ import logging import os import re import threading -from typing import Any, Callable, Mapping, Optional +from typing import Any, Callable, Mapping import attr @@ -53,7 +53,7 @@ class CacheProperties: default_factor_size: float = float( os.environ.get(_CACHE_PREFIX, _DEFAULT_FACTOR_SIZE) ) - resize_all_caches_func: Optional[Callable[[], None]] = None + resize_all_caches_func: Callable[[], None] | None = None properties = CacheProperties() @@ -107,7 +107,7 @@ class CacheConfig(Config): cache_factors: dict[str, float] global_factor: float track_memory_usage: bool - expiry_time_msec: Optional[int] + expiry_time_msec: int | None sync_response_cache_duration: int @staticmethod diff --git a/synapse/config/cas.py b/synapse/config/cas.py index e6e869bb16..dc5be7ccf1 100644 --- a/synapse/config/cas.py +++ b/synapse/config/cas.py @@ -20,7 +20,7 @@ # # -from typing import Any, Optional +from typing import Any from synapse.config.sso import SsoAttributeRequirement from synapse.types import JsonDict @@ -49,7 +49,7 @@ class CasConfig(Config): # TODO Update this to a _synapse URL. public_baseurl = self.root.server.public_baseurl - self.cas_service_url: Optional[str] = ( + self.cas_service_url: str | None = ( public_baseurl + "_matrix/client/r0/login/cas/ticket" ) diff --git a/synapse/config/consent.py b/synapse/config/consent.py index 7dc80d4cf8..35484ee033 100644 --- a/synapse/config/consent.py +++ b/synapse/config/consent.py @@ -19,7 +19,7 @@ # from os import path -from typing import Any, Optional +from typing import Any from synapse.config import ConfigError from synapse.types import JsonDict @@ -33,11 +33,11 @@ class ConsentConfig(Config): def __init__(self, *args: Any): super().__init__(*args) - self.user_consent_version: Optional[str] = None - self.user_consent_template_dir: Optional[str] = None - self.user_consent_server_notice_content: Optional[JsonDict] = None + self.user_consent_version: str | None = None + self.user_consent_template_dir: str | None = None + self.user_consent_server_notice_content: JsonDict | None = None self.user_consent_server_notice_to_guests = False - self.block_events_without_consent_error: Optional[str] = None + self.block_events_without_consent_error: str | None = None self.user_consent_at_registration = False self.user_consent_policy_name = "Privacy Policy" diff --git a/synapse/config/experimental.py b/synapse/config/experimental.py index f82e8572f2..52c3ec0da2 100644 --- a/synapse/config/experimental.py +++ b/synapse/config/experimental.py @@ -59,7 +59,7 @@ class ClientAuthMethod(enum.Enum): PRIVATE_KEY_JWT = "private_key_jwt" -def _parse_jwks(jwks: Optional[JsonDict]) -> Optional["JsonWebKey"]: +def _parse_jwks(jwks: JsonDict | None) -> Optional["JsonWebKey"]: """A helper function to parse a JWK dict into a JsonWebKey.""" if jwks is None: @@ -71,7 +71,7 @@ def _parse_jwks(jwks: Optional[JsonDict]) -> Optional["JsonWebKey"]: def _check_client_secret( - instance: "MSC3861", _attribute: attr.Attribute, _value: Optional[str] + instance: "MSC3861", _attribute: attr.Attribute, _value: str | None ) -> None: if instance._client_secret and instance._client_secret_path: raise ConfigError( @@ -88,7 +88,7 @@ def _check_client_secret( def _check_admin_token( - instance: "MSC3861", _attribute: attr.Attribute, _value: Optional[str] + instance: "MSC3861", _attribute: attr.Attribute, _value: str | None ) -> None: if instance._admin_token and instance._admin_token_path: raise ConfigError( @@ -124,7 +124,7 @@ class MSC3861: issuer: str = attr.ib(default="", validator=attr.validators.instance_of(str)) """The URL of the OIDC Provider.""" - issuer_metadata: Optional[JsonDict] = attr.ib(default=None) + issuer_metadata: JsonDict | None = attr.ib(default=None) """The issuer metadata to use, otherwise discovered from /.well-known/openid-configuration as per MSC2965.""" client_id: str = attr.ib( @@ -138,7 +138,7 @@ class MSC3861: ) """The auth method used when calling the introspection endpoint.""" - _client_secret: Optional[str] = attr.ib( + _client_secret: str | None = attr.ib( default=None, validator=[ attr.validators.optional(attr.validators.instance_of(str)), @@ -150,7 +150,7 @@ class MSC3861: when using any of the client_secret_* client auth methods. """ - _client_secret_path: Optional[str] = attr.ib( + _client_secret_path: str | None = attr.ib( default=None, validator=[ attr.validators.optional(attr.validators.instance_of(str)), @@ -196,19 +196,19 @@ class MSC3861: ("experimental", "msc3861", "client_auth_method"), ) - introspection_endpoint: Optional[str] = attr.ib( + introspection_endpoint: str | None = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(str)), ) """The URL of the introspection endpoint used to validate access tokens.""" - account_management_url: Optional[str] = attr.ib( + account_management_url: str | None = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(str)), ) """The URL of the My Account page on the OIDC Provider as per MSC2965.""" - _admin_token: Optional[str] = attr.ib( + _admin_token: str | None = attr.ib( default=None, validator=[ attr.validators.optional(attr.validators.instance_of(str)), @@ -220,7 +220,7 @@ class MSC3861: This is used by the OIDC provider, to make admin calls to Synapse. """ - _admin_token_path: Optional[str] = attr.ib( + _admin_token_path: str | None = attr.ib( default=None, validator=[ attr.validators.optional(attr.validators.instance_of(str)), @@ -232,7 +232,7 @@ class MSC3861: external file. """ - def client_secret(self) -> Optional[str]: + def client_secret(self) -> str | None: """Returns the secret given via `client_secret` or `client_secret_path`.""" if self._client_secret_path: return read_secret_from_file_once( @@ -241,7 +241,7 @@ class MSC3861: ) return self._client_secret - def admin_token(self) -> Optional[str]: + def admin_token(self) -> str | None: """Returns the admin token given via `admin_token` or `admin_token_path`.""" if self._admin_token_path: return read_secret_from_file_once( @@ -526,7 +526,7 @@ class ExperimentalConfig(Config): # MSC4108: Mechanism to allow OIDC sign in and E2EE set up via QR code self.msc4108_enabled = experimental.get("msc4108_enabled", False) - self.msc4108_delegation_endpoint: Optional[str] = experimental.get( + self.msc4108_delegation_endpoint: str | None = experimental.get( "msc4108_delegation_endpoint", None ) diff --git a/synapse/config/federation.py b/synapse/config/federation.py index 31f46e420d..ad0bd56a80 100644 --- a/synapse/config/federation.py +++ b/synapse/config/federation.py @@ -18,7 +18,7 @@ # [This file includes modifications made by New Vector Limited] # # -from typing import Any, Optional +from typing import Any from synapse.config._base import Config from synapse.config._util import validate_config @@ -32,7 +32,7 @@ class FederationConfig(Config): federation_config = config.setdefault("federation", {}) # FIXME: federation_domain_whitelist needs sytests - self.federation_domain_whitelist: Optional[dict] = None + self.federation_domain_whitelist: dict | None = None federation_domain_whitelist = config.get("federation_domain_whitelist", None) if federation_domain_whitelist is not None: diff --git a/synapse/config/key.py b/synapse/config/key.py index 3e832b4946..bfeeac5e30 100644 --- a/synapse/config/key.py +++ b/synapse/config/key.py @@ -23,7 +23,7 @@ import hashlib import logging import os -from typing import TYPE_CHECKING, Any, Iterator, Optional +from typing import TYPE_CHECKING, Any, Iterator import attr import jsonschema @@ -110,7 +110,7 @@ class TrustedKeyServer: server_name: str # map from key id to key object, or None to disable signature verification. - verify_keys: Optional[dict[str, VerifyKey]] = None + verify_keys: dict[str, VerifyKey] | None = None class KeyConfig(Config): @@ -219,7 +219,7 @@ class KeyConfig(Config): if form_secret_path: if form_secret: raise ConfigError(CONFLICTING_FORM_SECRET_OPTS_ERROR) - self.form_secret: Optional[str] = read_file( + self.form_secret: str | None = read_file( form_secret_path, ("form_secret_path",) ).strip() else: @@ -279,7 +279,7 @@ class KeyConfig(Config): raise ConfigError("Error reading %s: %s" % (name, str(e))) def read_old_signing_keys( - self, old_signing_keys: Optional[JsonDict] + self, old_signing_keys: JsonDict | None ) -> dict[str, "VerifyKeyWithExpiry"]: if old_signing_keys is None: return {} @@ -408,7 +408,7 @@ def _parse_key_servers( server_name = server["server_name"] result = TrustedKeyServer(server_name=server_name) - verify_keys: Optional[dict[str, str]] = server.get("verify_keys") + verify_keys: dict[str, str] | None = server.get("verify_keys") if verify_keys is not None: result.verify_keys = {} for key_id, key_base64 in verify_keys.items(): diff --git a/synapse/config/logger.py b/synapse/config/logger.py index 1f5c6da3ae..4af73627be 100644 --- a/synapse/config/logger.py +++ b/synapse/config/logger.py @@ -26,7 +26,7 @@ import os import sys import threading from string import Template -from typing import TYPE_CHECKING, Any, Optional +from typing import TYPE_CHECKING, Any import yaml from zope.interface import implementer @@ -280,7 +280,7 @@ def one_time_logging_setup(*, logBeginner: LogBeginner = globalLogBeginner) -> N def _setup_stdlib_logging( - config: "HomeServerConfig", log_config_path: Optional[str] + config: "HomeServerConfig", log_config_path: str | None ) -> None: """ Set up Python standard library logging. @@ -327,7 +327,7 @@ def _load_logging_config(log_config_path: str) -> None: reset_logging_config() -def _reload_logging_config(log_config_path: Optional[str]) -> None: +def _reload_logging_config(log_config_path: str | None) -> None: """ Reload the log configuration from the file and apply it. """ diff --git a/synapse/config/mas.py b/synapse/config/mas.py index 53cf500e95..dd982589a8 100644 --- a/synapse/config/mas.py +++ b/synapse/config/mas.py @@ -13,7 +13,7 @@ # # -from typing import Any, Optional +from typing import Any from pydantic import ( AnyHttpUrl, @@ -36,8 +36,8 @@ from ._base import Config, ConfigError, RootConfig class MasConfigModel(ParseModel): enabled: StrictBool = False endpoint: AnyHttpUrl = AnyHttpUrl("http://localhost:8080") - secret: Optional[StrictStr] = Field(default=None) - secret_path: Optional[FilePath] = Field(default=None) + secret: StrictStr | None = Field(default=None) + secret_path: FilePath | None = Field(default=None) @model_validator(mode="after") def verify_secret(self) -> Self: diff --git a/synapse/config/matrixrtc.py b/synapse/config/matrixrtc.py index 74fd7cad81..84c245e286 100644 --- a/synapse/config/matrixrtc.py +++ b/synapse/config/matrixrtc.py @@ -15,7 +15,7 @@ # # -from typing import Any, Optional +from typing import Any from pydantic import Field, StrictStr, ValidationError, model_validator from typing_extensions import Self @@ -29,7 +29,7 @@ from ._base import Config, ConfigError class TransportConfigModel(ParseModel): type: StrictStr - livekit_service_url: Optional[StrictStr] = Field(default=None) + livekit_service_url: StrictStr | None = Field(default=None) """An optional livekit service URL. Only required if type is "livekit".""" @model_validator(mode="after") diff --git a/synapse/config/metrics.py b/synapse/config/metrics.py index 8a4ded62ef..83dbee53b6 100644 --- a/synapse/config/metrics.py +++ b/synapse/config/metrics.py @@ -20,7 +20,7 @@ # # -from typing import Any, Optional +from typing import Any import attr @@ -75,7 +75,7 @@ class MetricsConfig(Config): ) def generate_config_section( - self, report_stats: Optional[bool] = None, **kwargs: Any + self, report_stats: bool | None = None, **kwargs: Any ) -> str: if report_stats is not None: res = "report_stats: %s\n" % ("true" if report_stats else "false") diff --git a/synapse/config/oembed.py b/synapse/config/oembed.py index a4a192302c..208f86374b 100644 --- a/synapse/config/oembed.py +++ b/synapse/config/oembed.py @@ -21,7 +21,7 @@ import importlib.resources as importlib_resources import json import re -from typing import Any, Iterable, Optional, Pattern +from typing import Any, Iterable, Pattern from urllib import parse as urlparse import attr @@ -39,7 +39,7 @@ class OEmbedEndpointConfig: # The patterns to match. url_patterns: list[Pattern[str]] # The supported formats. - formats: Optional[list[str]] + formats: list[str] | None class OembedConfig(Config): diff --git a/synapse/config/oidc.py b/synapse/config/oidc.py index ada89bb8bc..73fe6891cd 100644 --- a/synapse/config/oidc.py +++ b/synapse/config/oidc.py @@ -21,7 +21,7 @@ # from collections import Counter -from typing import Any, Collection, Iterable, Mapping, Optional +from typing import Any, Collection, Iterable, Mapping import attr @@ -276,7 +276,7 @@ def _parse_oidc_config_dict( ) from e client_secret_jwt_key_config = oidc_config.get("client_secret_jwt_key") - client_secret_jwt_key: Optional[OidcProviderClientSecretJwtKey] = None + client_secret_jwt_key: OidcProviderClientSecretJwtKey | None = None if client_secret_jwt_key_config is not None: keyfile = client_secret_jwt_key_config.get("key_file") if keyfile: @@ -384,10 +384,10 @@ class OidcProviderConfig: idp_name: str # Optional MXC URI for icon for this IdP. - idp_icon: Optional[str] + idp_icon: str | None # Optional brand identifier for this IdP. - idp_brand: Optional[str] + idp_brand: str | None # whether the OIDC discovery mechanism is used to discover endpoints discover: bool @@ -401,11 +401,11 @@ class OidcProviderConfig: # oauth2 client secret to use. if `None`, use client_secret_jwt_key to generate # a secret. - client_secret: Optional[str] + client_secret: str | None # key to use to construct a JWT to use as a client secret. May be `None` if # `client_secret` is set. - client_secret_jwt_key: Optional[OidcProviderClientSecretJwtKey] + client_secret_jwt_key: OidcProviderClientSecretJwtKey | None # auth method to use when exchanging the token. # Valid values are 'client_secret_basic', 'client_secret_post' and @@ -416,7 +416,7 @@ class OidcProviderConfig: # Valid values are 'auto', 'always', and 'never'. pkce_method: str - id_token_signing_alg_values_supported: Optional[list[str]] + id_token_signing_alg_values_supported: list[str] | None """ List of the JWS signing algorithms (`alg` values) that are supported for signing the `id_token`. @@ -448,18 +448,18 @@ class OidcProviderConfig: scopes: Collection[str] # the oauth2 authorization endpoint. Required if discovery is disabled. - authorization_endpoint: Optional[str] + authorization_endpoint: str | None # the oauth2 token endpoint. Required if discovery is disabled. - token_endpoint: Optional[str] + token_endpoint: str | None # the OIDC userinfo endpoint. Required if discovery is disabled and the # "openid" scope is not requested. - userinfo_endpoint: Optional[str] + userinfo_endpoint: str | None # URI where to fetch the JWKS. Required if discovery is disabled and the # "openid" scope is used. - jwks_uri: Optional[str] + jwks_uri: str | None # Whether Synapse should react to backchannel logouts backchannel_logout_enabled: bool @@ -474,7 +474,7 @@ class OidcProviderConfig: # values are: "auto" or "userinfo_endpoint". user_profile_method: str - redirect_uri: Optional[str] + redirect_uri: str | None """ An optional replacement for Synapse's hardcoded `redirect_uri` URL (`/_synapse/client/oidc/callback`). This can be used to send diff --git a/synapse/config/ratelimiting.py b/synapse/config/ratelimiting.py index be2f49f87c..78d9d61d3c 100644 --- a/synapse/config/ratelimiting.py +++ b/synapse/config/ratelimiting.py @@ -19,7 +19,7 @@ # # -from typing import Any, Optional, cast +from typing import Any, cast import attr @@ -39,7 +39,7 @@ class RatelimitSettings: cls, config: dict[str, Any], key: str, - defaults: Optional[dict[str, float]] = None, + defaults: dict[str, float] | None = None, ) -> "RatelimitSettings": """Parse config[key] as a new-style rate limiter config. diff --git a/synapse/config/registration.py b/synapse/config/registration.py index c0e7316bc3..7f7a224e02 100644 --- a/synapse/config/registration.py +++ b/synapse/config/registration.py @@ -20,7 +20,7 @@ # # import argparse -from typing import Any, Optional +from typing import Any from synapse.api.constants import RoomCreationPreset from synapse.config._base import Config, ConfigError, read_file @@ -181,7 +181,7 @@ class RegistrationConfig(Config): refreshable_access_token_lifetime = self.parse_duration( refreshable_access_token_lifetime ) - self.refreshable_access_token_lifetime: Optional[int] = ( + self.refreshable_access_token_lifetime: int | None = ( refreshable_access_token_lifetime ) @@ -226,7 +226,7 @@ class RegistrationConfig(Config): refresh_token_lifetime = config.get("refresh_token_lifetime") if refresh_token_lifetime is not None: refresh_token_lifetime = self.parse_duration(refresh_token_lifetime) - self.refresh_token_lifetime: Optional[int] = refresh_token_lifetime + self.refresh_token_lifetime: int | None = refresh_token_lifetime if ( self.session_lifetime is not None diff --git a/synapse/config/retention.py b/synapse/config/retention.py index 9d34f1e241..ab80ac214d 100644 --- a/synapse/config/retention.py +++ b/synapse/config/retention.py @@ -20,7 +20,7 @@ # import logging -from typing import Any, Optional +from typing import Any import attr @@ -35,8 +35,8 @@ class RetentionPurgeJob: """Object describing the configuration of the manhole""" interval: int - shortest_max_lifetime: Optional[int] - longest_max_lifetime: Optional[int] + shortest_max_lifetime: int | None + longest_max_lifetime: int | None class RetentionConfig(Config): diff --git a/synapse/config/server.py b/synapse/config/server.py index 662ed24a13..495f289159 100644 --- a/synapse/config/server.py +++ b/synapse/config/server.py @@ -25,7 +25,7 @@ import logging import os.path import urllib.parse from textwrap import indent -from typing import Any, Iterable, Optional, TypedDict, Union +from typing import Any, Iterable, TypedDict from urllib.request import getproxies_environment import attr @@ -95,9 +95,9 @@ def _6to4(network: IPNetwork) -> IPNetwork: def generate_ip_set( - ip_addresses: Optional[Iterable[str]], - extra_addresses: Optional[Iterable[str]] = None, - config_path: Optional[StrSequence] = None, + ip_addresses: Iterable[str] | None, + extra_addresses: Iterable[str] | None = None, + config_path: StrSequence | None = None, ) -> IPSet: """ Generate an IPSet from a list of IP addresses or CIDRs. @@ -230,8 +230,8 @@ class HttpListenerConfig: x_forwarded: bool = False resources: list[HttpResourceConfig] = attr.Factory(list) additional_resources: dict[str, dict] = attr.Factory(dict) - tag: Optional[str] = None - request_id_header: Optional[str] = None + tag: str | None = None + request_id_header: str | None = None @attr.s(slots=True, frozen=True, auto_attribs=True) @@ -244,7 +244,7 @@ class TCPListenerConfig: tls: bool = False # http_options is only populated if type=http - http_options: Optional[HttpListenerConfig] = None + http_options: HttpListenerConfig | None = None def get_site_tag(self) -> str: """Retrieves http_options.tag if it exists, otherwise the port number.""" @@ -269,7 +269,7 @@ class UnixListenerConfig: type: str = attr.ib(validator=attr.validators.in_(KNOWN_LISTENER_TYPES)) # http_options is only populated if type=http - http_options: Optional[HttpListenerConfig] = None + http_options: HttpListenerConfig | None = None def get_site_tag(self) -> str: return "unix" @@ -279,7 +279,7 @@ class UnixListenerConfig: return False -ListenerConfig = Union[TCPListenerConfig, UnixListenerConfig] +ListenerConfig = TCPListenerConfig | UnixListenerConfig @attr.s(slots=True, frozen=True, auto_attribs=True) @@ -288,14 +288,14 @@ class ManholeConfig: username: str = attr.ib(validator=attr.validators.instance_of(str)) password: str = attr.ib(validator=attr.validators.instance_of(str)) - priv_key: Optional[Key] - pub_key: Optional[Key] + priv_key: Key | None + pub_key: Key | None @attr.s(frozen=True) class LimitRemoteRoomsConfig: enabled: bool = attr.ib(validator=attr.validators.instance_of(bool), default=False) - complexity: Union[float, int] = attr.ib( + complexity: float | int = attr.ib( validator=attr.validators.instance_of((float, int)), # noqa default=1.0, ) @@ -313,11 +313,11 @@ class ProxyConfigDictionary(TypedDict): Dictionary of proxy settings suitable for interacting with `urllib.request` API's """ - http: Optional[str] + http: str | None """ Proxy server to use for HTTP requests. """ - https: Optional[str] + https: str | None """ Proxy server to use for HTTPS requests. """ @@ -336,15 +336,15 @@ class ProxyConfig: Synapse configuration for HTTP proxy settings. """ - http_proxy: Optional[str] + http_proxy: str | None """ Proxy server to use for HTTP requests. """ - https_proxy: Optional[str] + https_proxy: str | None """ Proxy server to use for HTTPS requests. """ - no_proxy_hosts: Optional[list[str]] + no_proxy_hosts: list[str] | None """ List of hosts, IP addresses, or IP ranges in CIDR format which should not use the proxy. Synapse will directly connect to these hosts. @@ -607,7 +607,7 @@ class ServerConfig(Config): # before redacting them. redaction_retention_period = config.get("redaction_retention_period", "7d") if redaction_retention_period is not None: - self.redaction_retention_period: Optional[int] = self.parse_duration( + self.redaction_retention_period: int | None = self.parse_duration( redaction_retention_period ) else: @@ -618,7 +618,7 @@ class ServerConfig(Config): "forgotten_room_retention_period", None ) if forgotten_room_retention_period is not None: - self.forgotten_room_retention_period: Optional[int] = self.parse_duration( + self.forgotten_room_retention_period: int | None = self.parse_duration( forgotten_room_retention_period ) else: @@ -627,7 +627,7 @@ class ServerConfig(Config): # How long to keep entries in the `users_ips` table. user_ips_max_age = config.get("user_ips_max_age", "28d") if user_ips_max_age is not None: - self.user_ips_max_age: Optional[int] = self.parse_duration(user_ips_max_age) + self.user_ips_max_age: int | None = self.parse_duration(user_ips_max_age) else: self.user_ips_max_age = None @@ -864,11 +864,11 @@ class ServerConfig(Config): ) # Whitelist of domain names that given next_link parameters must have - next_link_domain_whitelist: Optional[list[str]] = config.get( + next_link_domain_whitelist: list[str] | None = config.get( "next_link_domain_whitelist" ) - self.next_link_domain_whitelist: Optional[set[str]] = None + self.next_link_domain_whitelist: set[str] | None = None if next_link_domain_whitelist is not None: if not isinstance(next_link_domain_whitelist, list): raise ConfigError("'next_link_domain_whitelist' must be a list") @@ -880,7 +880,7 @@ class ServerConfig(Config): if not isinstance(templates_config, dict): raise ConfigError("The 'templates' section must be a dictionary") - self.custom_template_directory: Optional[str] = templates_config.get( + self.custom_template_directory: str | None = templates_config.get( "custom_template_directory" ) if self.custom_template_directory is not None and not isinstance( @@ -896,12 +896,12 @@ class ServerConfig(Config): config.get("exclude_rooms_from_sync") or [] ) - delete_stale_devices_after: Optional[str] = ( + delete_stale_devices_after: str | None = ( config.get("delete_stale_devices_after") or None ) if delete_stale_devices_after is not None: - self.delete_stale_devices_after: Optional[int] = self.parse_duration( + self.delete_stale_devices_after: int | None = self.parse_duration( delete_stale_devices_after ) else: @@ -910,7 +910,7 @@ class ServerConfig(Config): # The maximum allowed delay duration for delayed events (MSC4140). max_event_delay_duration = config.get("max_event_delay_duration") if max_event_delay_duration is not None: - self.max_event_delay_ms: Optional[int] = self.parse_duration( + self.max_event_delay_ms: int | None = self.parse_duration( max_event_delay_duration ) if self.max_event_delay_ms <= 0: @@ -927,7 +927,7 @@ class ServerConfig(Config): data_dir_path: str, server_name: str, open_private_ports: bool, - listeners: Optional[list[dict]], + listeners: list[dict] | None, **kwargs: Any, ) -> str: _, bind_port = parse_and_validate_server_name(server_name) @@ -1028,7 +1028,7 @@ class ServerConfig(Config): help="Turn on the twisted telnet manhole service on the given port.", ) - def read_gc_intervals(self, durations: Any) -> Optional[tuple[float, float, float]]: + def read_gc_intervals(self, durations: Any) -> tuple[float, float, float] | None: """Reads the three durations for the GC min interval option, returning seconds.""" if durations is None: return None @@ -1066,8 +1066,8 @@ def is_threepid_reserved( def read_gc_thresholds( - thresholds: Optional[list[Any]], -) -> Optional[tuple[int, int, int]]: + thresholds: list[Any] | None, +) -> tuple[int, int, int] | None: """Reads the three integer thresholds for garbage collection. Ensures that the thresholds are integers if thresholds are supplied. """ diff --git a/synapse/config/server_notices.py b/synapse/config/server_notices.py index 4de2d62b54..d19e2569a1 100644 --- a/synapse/config/server_notices.py +++ b/synapse/config/server_notices.py @@ -18,7 +18,7 @@ # # -from typing import Any, Optional +from typing import Any from synapse.types import JsonDict, UserID @@ -58,12 +58,12 @@ class ServerNoticesConfig(Config): def __init__(self, *args: Any): super().__init__(*args) - self.server_notices_mxid: Optional[str] = None - self.server_notices_mxid_display_name: Optional[str] = None - self.server_notices_mxid_avatar_url: Optional[str] = None - self.server_notices_room_name: Optional[str] = None - self.server_notices_room_avatar_url: Optional[str] = None - self.server_notices_room_topic: Optional[str] = None + self.server_notices_mxid: str | None = None + self.server_notices_mxid_display_name: str | None = None + self.server_notices_mxid_avatar_url: str | None = None + self.server_notices_room_name: str | None = None + self.server_notices_room_avatar_url: str | None = None + self.server_notices_room_topic: str | None = None self.server_notices_auto_join: bool = False def read_config(self, config: JsonDict, **kwargs: Any) -> None: diff --git a/synapse/config/sso.py b/synapse/config/sso.py index facb418510..1d08bef868 100644 --- a/synapse/config/sso.py +++ b/synapse/config/sso.py @@ -19,7 +19,7 @@ # # import logging -from typing import Any, Optional +from typing import Any import attr @@ -44,8 +44,8 @@ class SsoAttributeRequirement: attribute: str # If neither `value` nor `one_of` is given, the attribute must simply exist. - value: Optional[str] = None - one_of: Optional[list[str]] = None + value: str | None = None + one_of: list[str] | None = None JSON_SCHEMA = { "type": "object", diff --git a/synapse/config/tls.py b/synapse/config/tls.py index d03a77d9d2..de4d676e08 100644 --- a/synapse/config/tls.py +++ b/synapse/config/tls.py @@ -20,7 +20,7 @@ # import logging -from typing import Any, Optional, Pattern +from typing import Any, Pattern from matrix_common.regex import glob_to_regex @@ -135,8 +135,8 @@ class TlsConfig(Config): "use_insecure_ssl_client_just_for_testing_do_not_use" ) - self.tls_certificate: Optional[crypto.X509] = None - self.tls_private_key: Optional[crypto.PKey] = None + self.tls_certificate: crypto.X509 | None = None + self.tls_private_key: crypto.PKey | None = None def read_certificate_from_disk(self) -> None: """ @@ -147,8 +147,8 @@ class TlsConfig(Config): def generate_config_section( self, - tls_certificate_path: Optional[str], - tls_private_key_path: Optional[str], + tls_certificate_path: str | None, + tls_private_key_path: str | None, **kwargs: Any, ) -> str: """If the TLS paths are not specified the default will be certs in the diff --git a/synapse/config/user_types.py b/synapse/config/user_types.py index dd64425d6c..e47713b7f4 100644 --- a/synapse/config/user_types.py +++ b/synapse/config/user_types.py @@ -12,7 +12,7 @@ # . # -from typing import Any, Optional +from typing import Any from synapse.api.constants import UserTypes from synapse.types import JsonDict @@ -26,9 +26,7 @@ class UserTypesConfig(Config): def read_config(self, config: JsonDict, **kwargs: Any) -> None: user_types: JsonDict = config.get("user_types", {}) - self.default_user_type: Optional[str] = user_types.get( - "default_user_type", None - ) + self.default_user_type: str | None = user_types.get("default_user_type", None) self.extra_user_types: list[str] = user_types.get("extra_user_types", []) all_user_types: list[str] = [] diff --git a/synapse/config/workers.py b/synapse/config/workers.py index 90f8c72412..ec8ab9506b 100644 --- a/synapse/config/workers.py +++ b/synapse/config/workers.py @@ -22,7 +22,7 @@ import argparse import logging -from typing import Any, Optional, Union +from typing import Any import attr from pydantic import ( @@ -79,7 +79,7 @@ MAIN_PROCESS_INSTANCE_MAP_NAME = "main" logger = logging.getLogger(__name__) -def _instance_to_list_converter(obj: Union[str, list[str]]) -> list[str]: +def _instance_to_list_converter(obj: str | list[str]) -> list[str]: """Helper for allowing parsing a string or list of strings to a config option expecting a list of strings. """ @@ -119,7 +119,7 @@ class InstanceUnixLocationConfig(ParseModel): return f"{self.path}" -InstanceLocationConfig = Union[InstanceTcpLocationConfig, InstanceUnixLocationConfig] +InstanceLocationConfig = InstanceTcpLocationConfig | InstanceUnixLocationConfig @attr.s @@ -190,7 +190,7 @@ class OutboundFederationRestrictedTo: locations: list of instance locations to connect to proxy via. """ - instances: Optional[list[str]] + instances: list[str] | None locations: list[InstanceLocationConfig] = attr.Factory(list) def __contains__(self, instance: str) -> bool: @@ -246,7 +246,7 @@ class WorkerConfig(Config): if worker_replication_secret_path: if worker_replication_secret: raise ConfigError(CONFLICTING_WORKER_REPLICATION_SECRET_OPTS_ERROR) - self.worker_replication_secret: Optional[str] = read_file( + self.worker_replication_secret: str | None = read_file( worker_replication_secret_path, ("worker_replication_secret_path",) ).strip() else: @@ -341,7 +341,7 @@ class WorkerConfig(Config): % MAIN_PROCESS_INSTANCE_MAP_NAME ) - # type-ignore: the expression `Union[A, B]` is not a Type[Union[A, B]] currently + # type-ignore: the expression `A | B` is not a `type[A | B]` currently self.instance_map: dict[str, InstanceLocationConfig] = ( parse_and_validate_mapping( instance_map, diff --git a/synapse/crypto/keyring.py b/synapse/crypto/keyring.py index 24a693fdb1..3abb644df5 100644 --- a/synapse/crypto/keyring.py +++ b/synapse/crypto/keyring.py @@ -21,7 +21,7 @@ import abc import logging -from typing import TYPE_CHECKING, Callable, Iterable, Optional +from typing import TYPE_CHECKING, Callable, Iterable import attr from signedjson.key import ( @@ -150,7 +150,7 @@ class Keyring: """ def __init__( - self, hs: "HomeServer", key_fetchers: "Optional[Iterable[KeyFetcher]]" = None + self, hs: "HomeServer", key_fetchers: "Iterable[KeyFetcher] | None" = None ): self.server_name = hs.hostname diff --git a/synapse/event_auth.py b/synapse/event_auth.py index 5d927a925a..66f50115e3 100644 --- a/synapse/event_auth.py +++ b/synapse/event_auth.py @@ -160,7 +160,7 @@ def validate_event_for_room_version(event: "EventBase") -> None: async def check_state_independent_auth_rules( store: _EventSourceStore, event: "EventBase", - batched_auth_events: Optional[Mapping[str, "EventBase"]] = None, + batched_auth_events: Mapping[str, "EventBase"] | None = None, ) -> None: """Check that an event complies with auth rules that are independent of room state @@ -788,7 +788,7 @@ def _check_joined_room( def get_send_level( - etype: str, state_key: Optional[str], power_levels_event: Optional["EventBase"] + etype: str, state_key: str | None, power_levels_event: Optional["EventBase"] ) -> int: """Get the power level required to send an event of a given type @@ -989,7 +989,7 @@ def _check_power_levels( user_level = get_user_power_level(event.user_id, auth_events) # Check other levels: - levels_to_check: list[tuple[str, Optional[str]]] = [ + levels_to_check: list[tuple[str, str | None]] = [ ("users_default", None), ("events_default", None), ("state_default", None), @@ -1027,12 +1027,12 @@ def _check_power_levels( new_loc = new_loc.get(dir, {}) if level_to_check in old_loc: - old_level: Optional[int] = int(old_loc[level_to_check]) + old_level: int | None = int(old_loc[level_to_check]) else: old_level = None if level_to_check in new_loc: - new_level: Optional[int] = int(new_loc[level_to_check]) + new_level: int | None = int(new_loc[level_to_check]) else: new_level = None diff --git a/synapse/events/__init__.py b/synapse/events/__init__.py index a353076e0d..5f78603782 100644 --- a/synapse/events/__init__.py +++ b/synapse/events/__init__.py @@ -28,7 +28,6 @@ from typing import ( Generic, Iterable, Literal, - Optional, TypeVar, Union, overload, @@ -90,21 +89,21 @@ class DictProperty(Generic[T]): def __get__( self, instance: Literal[None], - owner: Optional[type[_DictPropertyInstance]] = None, + owner: type[_DictPropertyInstance] | None = None, ) -> "DictProperty": ... @overload def __get__( self, instance: _DictPropertyInstance, - owner: Optional[type[_DictPropertyInstance]] = None, + owner: type[_DictPropertyInstance] | None = None, ) -> T: ... def __get__( self, - instance: Optional[_DictPropertyInstance], - owner: Optional[type[_DictPropertyInstance]] = None, - ) -> Union[T, "DictProperty"]: + instance: _DictPropertyInstance | None, + owner: type[_DictPropertyInstance] | None = None, + ) -> T | "DictProperty": # if the property is accessed as a class property rather than an instance # property, return the property itself rather than the value if instance is None: @@ -156,21 +155,21 @@ class DefaultDictProperty(DictProperty, Generic[T]): def __get__( self, instance: Literal[None], - owner: Optional[type[_DictPropertyInstance]] = None, + owner: type[_DictPropertyInstance] | None = None, ) -> "DefaultDictProperty": ... @overload def __get__( self, instance: _DictPropertyInstance, - owner: Optional[type[_DictPropertyInstance]] = None, + owner: type[_DictPropertyInstance] | None = None, ) -> T: ... def __get__( self, - instance: Optional[_DictPropertyInstance], - owner: Optional[type[_DictPropertyInstance]] = None, - ) -> Union[T, "DefaultDictProperty"]: + instance: _DictPropertyInstance | None, + owner: type[_DictPropertyInstance] | None = None, + ) -> T | "DefaultDictProperty": if instance is None: return self assert isinstance(instance, EventBase) @@ -191,7 +190,7 @@ class EventBase(metaclass=abc.ABCMeta): signatures: dict[str, dict[str, str]], unsigned: JsonDict, internal_metadata_dict: JsonDict, - rejected_reason: Optional[str], + rejected_reason: str | None, ): assert room_version.event_format == self.format_version @@ -209,7 +208,7 @@ class EventBase(metaclass=abc.ABCMeta): hashes: DictProperty[dict[str, str]] = DictProperty("hashes") origin_server_ts: DictProperty[int] = DictProperty("origin_server_ts") sender: DictProperty[str] = DictProperty("sender") - # TODO state_key should be Optional[str]. This is generally asserted in Synapse + # TODO state_key should be str | None. This is generally asserted in Synapse # by calling is_state() first (which ensures it is not None), but it is hard (not possible?) # to properly annotate that calling is_state() asserts that state_key exists # and is non-None. It would be better to replace such direct references with @@ -231,7 +230,7 @@ class EventBase(metaclass=abc.ABCMeta): return self.content["membership"] @property - def redacts(self) -> Optional[str]: + def redacts(self) -> str | None: """MSC2176 moved the redacts field into the content.""" if self.room_version.updated_redaction_rules: return self.content.get("redacts") @@ -240,7 +239,7 @@ class EventBase(metaclass=abc.ABCMeta): def is_state(self) -> bool: return self.get_state_key() is not None - def get_state_key(self) -> Optional[str]: + def get_state_key(self) -> str | None: """Get the state key of this event, or None if it's not a state event""" return self._dict.get("state_key") @@ -250,13 +249,13 @@ class EventBase(metaclass=abc.ABCMeta): return d - def get(self, key: str, default: Optional[Any] = None) -> Any: + def get(self, key: str, default: Any | None = None) -> Any: return self._dict.get(key, default) def get_internal_metadata_dict(self) -> JsonDict: return self.internal_metadata.get_dict() - def get_pdu_json(self, time_now: Optional[int] = None) -> JsonDict: + def get_pdu_json(self, time_now: int | None = None) -> JsonDict: pdu_json = self.get_dict() if time_now is not None and "age_ts" in pdu_json["unsigned"]: @@ -283,13 +282,13 @@ class EventBase(metaclass=abc.ABCMeta): return template_json - def __getitem__(self, field: str) -> Optional[Any]: + def __getitem__(self, field: str) -> Any | None: return self._dict[field] def __contains__(self, field: str) -> bool: return field in self._dict - def items(self) -> list[tuple[str, Optional[Any]]]: + def items(self) -> list[tuple[str, Any | None]]: return list(self._dict.items()) def keys(self) -> Iterable[str]: @@ -348,8 +347,8 @@ class FrozenEvent(EventBase): self, event_dict: JsonDict, room_version: RoomVersion, - internal_metadata_dict: Optional[JsonDict] = None, - rejected_reason: Optional[str] = None, + internal_metadata_dict: JsonDict | None = None, + rejected_reason: str | None = None, ): internal_metadata_dict = internal_metadata_dict or {} @@ -400,8 +399,8 @@ class FrozenEventV2(EventBase): self, event_dict: JsonDict, room_version: RoomVersion, - internal_metadata_dict: Optional[JsonDict] = None, - rejected_reason: Optional[str] = None, + internal_metadata_dict: JsonDict | None = None, + rejected_reason: str | None = None, ): internal_metadata_dict = internal_metadata_dict or {} @@ -427,7 +426,7 @@ class FrozenEventV2(EventBase): else: frozen_dict = event_dict - self._event_id: Optional[str] = None + self._event_id: str | None = None super().__init__( frozen_dict, @@ -502,8 +501,8 @@ class FrozenEventV4(FrozenEventV3): self, event_dict: JsonDict, room_version: RoomVersion, - internal_metadata_dict: Optional[JsonDict] = None, - rejected_reason: Optional[str] = None, + internal_metadata_dict: JsonDict | None = None, + rejected_reason: str | None = None, ): super().__init__( event_dict=event_dict, @@ -511,7 +510,7 @@ class FrozenEventV4(FrozenEventV3): internal_metadata_dict=internal_metadata_dict, rejected_reason=rejected_reason, ) - self._room_id: Optional[str] = None + self._room_id: str | None = None @property def room_id(self) -> str: @@ -554,7 +553,7 @@ class FrozenEventV4(FrozenEventV3): def _event_type_from_format_version( format_version: int, -) -> type[Union[FrozenEvent, FrozenEventV2, FrozenEventV3]]: +) -> type[FrozenEvent | FrozenEventV2 | FrozenEventV3]: """Returns the python type to use to construct an Event object for the given event format version. @@ -580,8 +579,8 @@ def _event_type_from_format_version( def make_event_from_dict( event_dict: JsonDict, room_version: RoomVersion = RoomVersions.V1, - internal_metadata_dict: Optional[JsonDict] = None, - rejected_reason: Optional[str] = None, + internal_metadata_dict: JsonDict | None = None, + rejected_reason: str | None = None, ) -> EventBase: """Construct an EventBase from the given event dict""" event_type = _event_type_from_format_version(room_version.event_format) @@ -598,10 +597,10 @@ class _EventRelation: rel_type: str # The aggregation key. Will be None if the rel_type is not m.annotation or is # not a string. - aggregation_key: Optional[str] + aggregation_key: str | None -def relation_from_event(event: EventBase) -> Optional[_EventRelation]: +def relation_from_event(event: EventBase) -> _EventRelation | None: """ Attempt to parse relation information an event. diff --git a/synapse/events/builder.py b/synapse/events/builder.py index a57303c999..6a2812109d 100644 --- a/synapse/events/builder.py +++ b/synapse/events/builder.py @@ -19,7 +19,7 @@ # # import logging -from typing import TYPE_CHECKING, Any, Optional, Union +from typing import TYPE_CHECKING, Any import attr from signedjson.types import SigningKey @@ -83,7 +83,7 @@ class EventBuilder: room_version: RoomVersion # MSC4291 makes the room ID == the create event ID. This means the create event has no room_id. - room_id: Optional[str] + room_id: str | None type: str sender: str @@ -92,9 +92,9 @@ class EventBuilder: # These only exist on a subset of events, so they raise AttributeError if # someone tries to get them when they don't exist. - _state_key: Optional[str] = None - _redacts: Optional[str] = None - _origin_server_ts: Optional[int] = None + _state_key: str | None = None + _redacts: str | None = None + _origin_server_ts: int | None = None internal_metadata: EventInternalMetadata = attr.Factory( lambda: EventInternalMetadata({}) @@ -126,8 +126,8 @@ class EventBuilder: async def build( self, prev_event_ids: list[str], - auth_event_ids: Optional[list[str]], - depth: Optional[int] = None, + auth_event_ids: list[str] | None, + depth: int | None = None, ) -> EventBase: """Transform into a fully signed and hashed event @@ -205,8 +205,8 @@ class EventBuilder: format_version = self.room_version.event_format # The types of auth/prev events changes between event versions. - prev_events: Union[StrCollection, list[tuple[str, dict[str, str]]]] - auth_events: Union[list[str], list[tuple[str, dict[str, str]]]] + prev_events: StrCollection | list[tuple[str, dict[str, str]]] + auth_events: list[str] | list[tuple[str, dict[str, str]]] if format_version == EventFormatVersions.ROOM_V1_V2: auth_events = await self._store.add_event_hashes(auth_event_ids) prev_events = await self._store.add_event_hashes(prev_event_ids) @@ -327,7 +327,7 @@ def create_local_event_from_event_dict( signing_key: SigningKey, room_version: RoomVersion, event_dict: JsonDict, - internal_metadata_dict: Optional[JsonDict] = None, + internal_metadata_dict: JsonDict | None = None, ) -> EventBase: """Takes a fully formed event dict, ensuring that fields like `origin_server_ts` have correct values for a locally produced event, diff --git a/synapse/events/presence_router.py b/synapse/events/presence_router.py index 39dd7ee2b3..d71d3f8feb 100644 --- a/synapse/events/presence_router.py +++ b/synapse/events/presence_router.py @@ -25,9 +25,7 @@ from typing import ( Awaitable, Callable, Iterable, - Optional, TypeVar, - Union, ) from typing_extensions import ParamSpec @@ -44,7 +42,7 @@ GET_USERS_FOR_STATES_CALLBACK = Callable[ [Iterable[UserPresenceState]], Awaitable[dict[str, set[UserPresenceState]]] ] # This must either return a set of strings or the constant PresenceRouter.ALL_USERS. -GET_INTERESTED_USERS_CALLBACK = Callable[[str], Awaitable[Union[set[str], str]]] +GET_INTERESTED_USERS_CALLBACK = Callable[[str], Awaitable[set[str] | str]] logger = logging.getLogger(__name__) @@ -77,8 +75,8 @@ def load_legacy_presence_router(hs: "HomeServer") -> None: # All methods that the module provides should be async, but this wasn't enforced # in the old module system, so we wrap them if needed def async_wrapper( - f: Optional[Callable[P, R]], - ) -> Optional[Callable[P, Awaitable[R]]]: + f: Callable[P, R] | None, + ) -> Callable[P, Awaitable[R]] | None: # f might be None if the callback isn't implemented by the module. In this # case we don't want to register a callback at all so we return None. if f is None: @@ -95,7 +93,7 @@ def load_legacy_presence_router(hs: "HomeServer") -> None: return run # Register the hooks through the module API. - hooks: dict[str, Optional[Callable[..., Any]]] = { + hooks: dict[str, Callable[..., Any] | None] = { hook: async_wrapper(getattr(presence_router, hook, None)) for hook in presence_router_methods } @@ -118,8 +116,8 @@ class PresenceRouter: def register_presence_router_callbacks( self, - get_users_for_states: Optional[GET_USERS_FOR_STATES_CALLBACK] = None, - get_interested_users: Optional[GET_INTERESTED_USERS_CALLBACK] = None, + get_users_for_states: GET_USERS_FOR_STATES_CALLBACK | None = None, + get_interested_users: GET_INTERESTED_USERS_CALLBACK | None = None, ) -> None: # PresenceRouter modules are required to implement both of these methods # or neither of them as they are assumed to act in a complementary manner @@ -191,7 +189,7 @@ class PresenceRouter: return users_for_states - async def get_interested_users(self, user_id: str) -> Union[set[str], str]: + async def get_interested_users(self, user_id: str) -> set[str] | str: """ Retrieve a list of users that `user_id` is interested in receiving the presence of. This will be in addition to those they share a room with. diff --git a/synapse/events/snapshot.py b/synapse/events/snapshot.py index 764d31ee66..d7a987d52f 100644 --- a/synapse/events/snapshot.py +++ b/synapse/events/snapshot.py @@ -51,7 +51,7 @@ class UnpersistedEventContextBase(ABC): def __init__(self, storage_controller: "StorageControllers"): self._storage: "StorageControllers" = storage_controller - self.app_service: Optional[ApplicationService] = None + self.app_service: ApplicationService | None = None @abstractmethod async def persist( @@ -134,20 +134,20 @@ class EventContext(UnpersistedEventContextBase): _storage: "StorageControllers" state_group_deltas: dict[tuple[int, int], StateMap[str]] - rejected: Optional[str] = None - _state_group: Optional[int] = None - state_group_before_event: Optional[int] = None - _state_delta_due_to_event: Optional[StateMap[str]] = None - app_service: Optional[ApplicationService] = None + rejected: str | None = None + _state_group: int | None = None + state_group_before_event: int | None = None + _state_delta_due_to_event: StateMap[str] | None = None + app_service: ApplicationService | None = None partial_state: bool = False @staticmethod def with_state( storage: "StorageControllers", - state_group: Optional[int], - state_group_before_event: Optional[int], - state_delta_due_to_event: Optional[StateMap[str]], + state_group: int | None, + state_group_before_event: int | None, + state_delta_due_to_event: StateMap[str] | None, partial_state: bool, state_group_deltas: dict[tuple[int, int], StateMap[str]], ) -> "EventContext": @@ -227,7 +227,7 @@ class EventContext(UnpersistedEventContextBase): return context @property - def state_group(self) -> Optional[int]: + def state_group(self) -> int | None: """The ID of the state group for this event. Note that state events are persisted with a state group which includes the new @@ -354,13 +354,13 @@ class UnpersistedEventContext(UnpersistedEventContextBase): """ _storage: "StorageControllers" - state_group_before_event: Optional[int] - state_group_after_event: Optional[int] - state_delta_due_to_event: Optional[StateMap[str]] - prev_group_for_state_group_before_event: Optional[int] - delta_ids_to_state_group_before_event: Optional[StateMap[str]] + state_group_before_event: int | None + state_group_after_event: int | None + state_delta_due_to_event: StateMap[str] | None + prev_group_for_state_group_before_event: int | None + delta_ids_to_state_group_before_event: StateMap[str] | None partial_state: bool - state_map_before_event: Optional[StateMap[str]] = None + state_map_before_event: StateMap[str] | None = None @classmethod async def batch_persist_unpersisted_contexts( @@ -511,7 +511,7 @@ class UnpersistedEventContext(UnpersistedEventContextBase): def _encode_state_group_delta( state_group_delta: dict[tuple[int, int], StateMap[str]], -) -> list[tuple[int, int, Optional[list[tuple[str, str, str]]]]]: +) -> list[tuple[int, int, list[tuple[str, str, str]] | None]]: if not state_group_delta: return [] @@ -538,8 +538,8 @@ def _decode_state_group_delta( def _encode_state_dict( - state_dict: Optional[StateMap[str]], -) -> Optional[list[tuple[str, str, str]]]: + state_dict: StateMap[str] | None, +) -> list[tuple[str, str, str]] | None: """Since dicts of (type, state_key) -> event_id cannot be serialized in JSON we need to convert them to a form that can. """ @@ -550,8 +550,8 @@ def _encode_state_dict( def _decode_state_dict( - input: Optional[list[tuple[str, str, str]]], -) -> Optional[StateMap[str]]: + input: list[tuple[str, str, str]] | None, +) -> StateMap[str] | None: """Decodes a state dict encoded using `_encode_state_dict` above""" if input is None: return None diff --git a/synapse/events/utils.py b/synapse/events/utils.py index 9fa251abd8..b79a68f589 100644 --- a/synapse/events/utils.py +++ b/synapse/events/utils.py @@ -30,8 +30,6 @@ from typing import ( Mapping, Match, MutableMapping, - Optional, - Union, ) import attr @@ -415,9 +413,9 @@ class SerializeEventConfig: event_format: Callable[[JsonDict], JsonDict] = format_event_for_client_v1 # The entity that requested the event. This is used to determine whether to include # the transaction_id in the unsigned section of the event. - requester: Optional[Requester] = None + requester: Requester | None = None # List of event fields to include. If empty, all fields will be returned. - only_event_fields: Optional[list[str]] = None + only_event_fields: list[str] | None = None # Some events can have stripped room state stored in the `unsigned` field. # This is required for invite and knock functionality. If this option is # False, that state will be removed from the event before it is returned. @@ -439,7 +437,7 @@ def make_config_for_admin(existing: SerializeEventConfig) -> SerializeEventConfi def serialize_event( - e: Union[JsonDict, EventBase], + e: JsonDict | EventBase, time_now_ms: int, *, config: SerializeEventConfig = _DEFAULT_SERIALIZE_EVENT_CONFIG, @@ -480,7 +478,7 @@ def serialize_event( # If we have a txn_id saved in the internal_metadata, we should include it in the # unsigned section of the event if it was sent by the same session as the one # requesting the event. - txn_id: Optional[str] = getattr(e.internal_metadata, "txn_id", None) + txn_id: str | None = getattr(e.internal_metadata, "txn_id", None) if ( txn_id is not None and config.requester is not None @@ -490,7 +488,7 @@ def serialize_event( # this includes old events as well as those created by appservice, guests, # or with tokens minted with the admin API. For those events, fallback # to using the access token instead. - event_device_id: Optional[str] = getattr(e.internal_metadata, "device_id", None) + event_device_id: str | None = getattr(e.internal_metadata, "device_id", None) if event_device_id is not None: if event_device_id == config.requester.device_id: d["unsigned"]["transaction_id"] = txn_id @@ -504,9 +502,7 @@ def serialize_event( # # For guests and appservice users, we can't check the access token ID # so assume it is the same session. - event_token_id: Optional[int] = getattr( - e.internal_metadata, "token_id", None - ) + event_token_id: int | None = getattr(e.internal_metadata, "token_id", None) if ( ( event_token_id is not None @@ -577,11 +573,11 @@ class EventClientSerializer: async def serialize_event( self, - event: Union[JsonDict, EventBase], + event: JsonDict | EventBase, time_now: int, *, config: SerializeEventConfig = _DEFAULT_SERIALIZE_EVENT_CONFIG, - bundle_aggregations: Optional[dict[str, "BundledAggregations"]] = None, + bundle_aggregations: dict[str, "BundledAggregations"] | None = None, ) -> JsonDict: """Serializes a single event. @@ -712,11 +708,11 @@ class EventClientSerializer: @trace async def serialize_events( self, - events: Collection[Union[JsonDict, EventBase]], + events: Collection[JsonDict | EventBase], time_now: int, *, config: SerializeEventConfig = _DEFAULT_SERIALIZE_EVENT_CONFIG, - bundle_aggregations: Optional[dict[str, "BundledAggregations"]] = None, + bundle_aggregations: dict[str, "BundledAggregations"] | None = None, ) -> list[JsonDict]: """Serializes multiple events. @@ -755,13 +751,13 @@ class EventClientSerializer: self._add_extra_fields_to_unsigned_client_event_callbacks.append(callback) -_PowerLevel = Union[str, int] -PowerLevelsContent = Mapping[str, Union[_PowerLevel, Mapping[str, _PowerLevel]]] +_PowerLevel = str | int +PowerLevelsContent = Mapping[str, _PowerLevel | Mapping[str, _PowerLevel]] def copy_and_fixup_power_levels_contents( old_power_levels: PowerLevelsContent, -) -> dict[str, Union[int, dict[str, int]]]: +) -> dict[str, int | dict[str, int]]: """Copy the content of a power_levels event, unfreezing immutabledicts along the way. We accept as input power level values which are strings, provided they represent an @@ -777,7 +773,7 @@ def copy_and_fixup_power_levels_contents( if not isinstance(old_power_levels, collections.abc.Mapping): raise TypeError("Not a valid power-levels content: %r" % (old_power_levels,)) - power_levels: dict[str, Union[int, dict[str, int]]] = {} + power_levels: dict[str, int | dict[str, int]] = {} for k, v in old_power_levels.items(): if isinstance(v, collections.abc.Mapping): @@ -901,7 +897,7 @@ def strip_event(event: EventBase) -> JsonDict: } -def parse_stripped_state_event(raw_stripped_event: Any) -> Optional[StrippedStateEvent]: +def parse_stripped_state_event(raw_stripped_event: Any) -> StrippedStateEvent | None: """ Given a raw value from an event's `unsigned` field, attempt to parse it into a `StrippedStateEvent`. diff --git a/synapse/events/validator.py b/synapse/events/validator.py index c2cecd0fcb..b27f8a942a 100644 --- a/synapse/events/validator.py +++ b/synapse/events/validator.py @@ -19,7 +19,7 @@ # # import collections.abc -from typing import Union, cast +from typing import cast import jsonschema from pydantic import Field, StrictBool, StrictStr @@ -177,7 +177,7 @@ class EventValidator: errcode=Codes.BAD_JSON, ) - def validate_builder(self, event: Union[EventBase, EventBuilder]) -> None: + def validate_builder(self, event: EventBase | EventBuilder) -> None: """Validates that the builder/event has roughly the right format. Only checks values that we expect a proto event to have, rather than all the fields an event would have @@ -249,7 +249,7 @@ class EventValidator: if not isinstance(d[s], str): raise SynapseError(400, "'%s' not a string type" % (s,)) - def _ensure_state_event(self, event: Union[EventBase, EventBuilder]) -> None: + def _ensure_state_event(self, event: EventBase | EventBuilder) -> None: if not event.is_state(): raise SynapseError(400, "'%s' must be state events" % (event.type,)) diff --git a/synapse/federation/federation_base.py b/synapse/federation/federation_base.py index 13e445456a..04ba5b86db 100644 --- a/synapse/federation/federation_base.py +++ b/synapse/federation/federation_base.py @@ -20,7 +20,7 @@ # # import logging -from typing import TYPE_CHECKING, Awaitable, Callable, Optional, Sequence +from typing import TYPE_CHECKING, Awaitable, Callable, Sequence from synapse.api.constants import MAX_DEPTH, EventContentFields, EventTypes, Membership from synapse.api.errors import Codes, SynapseError @@ -67,7 +67,7 @@ class FederationBase: # We need to define this lazily otherwise we get a cyclic dependency. # self._policy_handler = hs.get_room_policy_handler() - self._policy_handler: Optional[RoomPolicyHandler] = None + self._policy_handler: RoomPolicyHandler | None = None def _lazily_get_policy_handler(self) -> RoomPolicyHandler: """Lazily get the room policy handler. @@ -88,9 +88,8 @@ class FederationBase: self, room_version: RoomVersion, pdu: EventBase, - record_failure_callback: Optional[ - Callable[[EventBase, str], Awaitable[None]] - ] = None, + record_failure_callback: Callable[[EventBase, str], Awaitable[None]] + | None = None, ) -> EventBase: """Checks that event is correctly signed by the sending server. diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index cb2fa59f54..4110a90ed6 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -37,7 +37,6 @@ from typing import ( Optional, Sequence, TypeVar, - Union, ) import attr @@ -263,7 +262,7 @@ class FederationClient(FederationBase): user: UserID, destination: str, query: dict[str, dict[str, dict[str, int]]], - timeout: Optional[int], + timeout: int | None, ) -> JsonDict: """Claims one-time keys for a device hosted on a remote server. @@ -334,7 +333,7 @@ class FederationClient(FederationBase): @tag_args async def backfill( self, dest: str, room_id: str, limit: int, extremities: Collection[str] - ) -> Optional[list[EventBase]]: + ) -> list[EventBase] | None: """Requests some more historic PDUs for the given room from the given destination server. @@ -381,8 +380,8 @@ class FederationClient(FederationBase): destination: str, event_id: str, room_version: RoomVersion, - timeout: Optional[int] = None, - ) -> Optional[EventBase]: + timeout: int | None = None, + ) -> EventBase | None: """Requests the PDU with given origin and ID from the remote home server. Does not have any caching or rate limiting! @@ -441,7 +440,7 @@ class FederationClient(FederationBase): @trace @tag_args async def get_pdu_policy_recommendation( - self, destination: str, pdu: EventBase, timeout: Optional[int] = None + self, destination: str, pdu: EventBase, timeout: int | None = None ) -> str: """Requests that the destination server (typically a policy server) check the event and return its recommendation on how to handle the @@ -497,8 +496,8 @@ class FederationClient(FederationBase): @trace @tag_args async def ask_policy_server_to_sign_event( - self, destination: str, pdu: EventBase, timeout: Optional[int] = None - ) -> Optional[JsonDict]: + self, destination: str, pdu: EventBase, timeout: int | None = None + ) -> JsonDict | None: """Requests that the destination server (typically a policy server) sign the event as not spam. @@ -538,8 +537,8 @@ class FederationClient(FederationBase): destinations: Collection[str], event_id: str, room_version: RoomVersion, - timeout: Optional[int] = None, - ) -> Optional[PulledPduInfo]: + timeout: int | None = None, + ) -> PulledPduInfo | None: """Requests the PDU with given origin and ID from the remote home servers. @@ -832,10 +831,9 @@ class FederationClient(FederationBase): pdu: EventBase, origin: str, room_version: RoomVersion, - record_failure_callback: Optional[ - Callable[[EventBase, str], Awaitable[None]] - ] = None, - ) -> Optional[EventBase]: + record_failure_callback: Callable[[EventBase, str], Awaitable[None]] + | None = None, + ) -> EventBase | None: """Takes a PDU and checks its signatures and hashes. If the PDU fails its signature check then we check if we have it in the @@ -931,7 +929,7 @@ class FederationClient(FederationBase): description: str, destinations: Iterable[str], callback: Callable[[str], Awaitable[T]], - failover_errcodes: Optional[Container[str]] = None, + failover_errcodes: Container[str] | None = None, failover_on_unknown_endpoint: bool = False, ) -> T: """Try an operation on a series of servers, until it succeeds @@ -1046,7 +1044,7 @@ class FederationClient(FederationBase): user_id: str, membership: str, content: dict, - params: Optional[Mapping[str, Union[str, Iterable[str]]]], + params: Mapping[str, str | Iterable[str]] | None, ) -> tuple[str, EventBase, RoomVersion]: """ Creates an m.room.member event, with context, without participating in the room. @@ -1563,11 +1561,11 @@ class FederationClient(FederationBase): async def get_public_rooms( self, remote_server: str, - limit: Optional[int] = None, - since_token: Optional[str] = None, - search_filter: Optional[dict] = None, + limit: int | None = None, + since_token: str | None = None, + search_filter: dict | None = None, include_all_networks: bool = False, - third_party_instance_id: Optional[str] = None, + third_party_instance_id: str | None = None, ) -> JsonDict: """Get the list of public rooms from a remote homeserver @@ -1676,7 +1674,7 @@ class FederationClient(FederationBase): async def get_room_complexity( self, destination: str, room_id: str - ) -> Optional[JsonDict]: + ) -> JsonDict | None: """ Fetch the complexity of a remote room from another server. @@ -1987,10 +1985,10 @@ class FederationClient(FederationBase): max_timeout_ms: int, download_ratelimiter: Ratelimiter, ip_address: str, - ) -> Union[ - tuple[int, dict[bytes, list[bytes]], bytes], - tuple[int, dict[bytes, list[bytes]]], - ]: + ) -> ( + tuple[int, dict[bytes, list[bytes]], bytes] + | tuple[int, dict[bytes, list[bytes]]] + ): try: return await self.transport_layer.federation_download_media( destination, diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py index 6e14f4a049..34abac1cec 100644 --- a/synapse/federation/federation_server.py +++ b/synapse/federation/federation_server.py @@ -28,8 +28,6 @@ from typing import ( Callable, Collection, Mapping, - Optional, - Union, ) from prometheus_client import Counter, Gauge, Histogram @@ -176,13 +174,11 @@ class FederationServer(FederationBase): # We cache responses to state queries, as they take a while and often # come in waves. - self._state_resp_cache: ResponseCache[tuple[str, Optional[str]]] = ( - ResponseCache( - clock=hs.get_clock(), - name="state_resp", - server_name=self.server_name, - timeout_ms=30000, - ) + self._state_resp_cache: ResponseCache[tuple[str, str | None]] = ResponseCache( + clock=hs.get_clock(), + name="state_resp", + server_name=self.server_name, + timeout_ms=30000, ) self._state_ids_resp_cache: ResponseCache[tuple[str, str]] = ResponseCache( clock=hs.get_clock(), @@ -666,7 +662,7 @@ class FederationServer(FederationBase): async def on_pdu_request( self, origin: str, event_id: str - ) -> tuple[int, Union[JsonDict, str]]: + ) -> tuple[int, JsonDict | str]: pdu = await self.handler.get_persisted_pdu(origin, event_id) if pdu: @@ -763,7 +759,7 @@ class FederationServer(FederationBase): prev_state_ids = await context.get_prev_state_ids() state_event_ids: Collection[str] - servers_in_room: Optional[Collection[str]] + servers_in_room: Collection[str] | None if caller_supports_partial_state: summary = await self.store.get_room_summary(room_id) state_event_ids = _get_event_ids_for_partial_state_join( @@ -1126,7 +1122,7 @@ class FederationServer(FederationBase): return {"events": serialize_and_filter_pdus(missing_events, time_now)} - async def on_openid_userinfo(self, token: str) -> Optional[str]: + async def on_openid_userinfo(self, token: str) -> str | None: ts_now_ms = self._clock.time_msec() return await self.store.get_user_id_for_open_id_token(token, ts_now_ms) @@ -1205,7 +1201,7 @@ class FederationServer(FederationBase): async def _get_next_nonspam_staged_event_for_room( self, room_id: str, room_version: RoomVersion - ) -> Optional[tuple[str, EventBase]]: + ) -> tuple[str, EventBase] | None: """Fetch the first non-spam event from staging queue. Args: @@ -1246,8 +1242,8 @@ class FederationServer(FederationBase): room_id: str, room_version: RoomVersion, lock: Lock, - latest_origin: Optional[str] = None, - latest_event: Optional[EventBase] = None, + latest_origin: str | None = None, + latest_event: EventBase | None = None, ) -> None: """Process events in the staging area for the given room. diff --git a/synapse/federation/persistence.py b/synapse/federation/persistence.py index 5628130429..dca13191fc 100644 --- a/synapse/federation/persistence.py +++ b/synapse/federation/persistence.py @@ -27,7 +27,6 @@ These actions are mostly only used by the :py:mod:`.replication` module. """ import logging -from typing import Optional from synapse.federation.units import Transaction from synapse.storage.databases.main import DataStore @@ -44,7 +43,7 @@ class TransactionActions: async def have_responded( self, origin: str, transaction: Transaction - ) -> Optional[tuple[int, JsonDict]]: + ) -> tuple[int, JsonDict] | None: """Have we already responded to a transaction with the same id and origin? diff --git a/synapse/federation/send_queue.py b/synapse/federation/send_queue.py index 80f31798e8..cf70e10a58 100644 --- a/synapse/federation/send_queue.py +++ b/synapse/federation/send_queue.py @@ -42,7 +42,6 @@ from typing import ( TYPE_CHECKING, Hashable, Iterable, - Optional, Sized, ) @@ -217,7 +216,7 @@ class FederationRemoteSendQueue(AbstractFederationSender): destination: str, edu_type: str, content: JsonDict, - key: Optional[Hashable] = None, + key: Hashable | None = None, ) -> None: """As per FederationSender""" if self.is_mine_server_name(destination): diff --git a/synapse/federation/sender/__init__.py b/synapse/federation/sender/__init__.py index 229ae647c0..0bd97c25df 100644 --- a/synapse/federation/sender/__init__.py +++ b/synapse/federation/sender/__init__.py @@ -138,7 +138,6 @@ from typing import ( Hashable, Iterable, Literal, - Optional, ) import attr @@ -266,7 +265,7 @@ class AbstractFederationSender(metaclass=abc.ABCMeta): destination: str, edu_type: str, content: JsonDict, - key: Optional[Hashable] = None, + key: Hashable | None = None, ) -> None: """Construct an Edu object, and queue it for sending @@ -410,7 +409,7 @@ class FederationSender(AbstractFederationSender): self.is_mine_id = hs.is_mine_id self.is_mine_server_name = hs.is_mine_server_name - self._presence_router: Optional["PresenceRouter"] = None + self._presence_router: "PresenceRouter" | None = None self._transaction_manager = TransactionManager(hs) self._instance_name = hs.get_instance_name() @@ -481,7 +480,7 @@ class FederationSender(AbstractFederationSender): def _get_per_destination_queue( self, destination: str - ) -> Optional[PerDestinationQueue]: + ) -> PerDestinationQueue | None: """Get or create a PerDestinationQueue for the given destination Args: @@ -605,7 +604,7 @@ class FederationSender(AbstractFederationSender): ) return - destinations: Optional[Collection[str]] = None + destinations: Collection[str] | None = None if not event.prev_event_ids(): # If there are no prev event IDs then the state is empty # and so no remote servers in the room @@ -1010,7 +1009,7 @@ class FederationSender(AbstractFederationSender): destination: str, edu_type: str, content: JsonDict, - key: Optional[Hashable] = None, + key: Hashable | None = None, ) -> None: """Construct an Edu object, and queue it for sending @@ -1038,7 +1037,7 @@ class FederationSender(AbstractFederationSender): self.send_edu(edu, key) - def send_edu(self, edu: Edu, key: Optional[Hashable]) -> None: + def send_edu(self, edu: Edu, key: Hashable | None) -> None: """Queue an EDU for sending Args: @@ -1134,7 +1133,7 @@ class FederationSender(AbstractFederationSender): In order to reduce load spikes, adds a delay between each destination. """ - last_processed: Optional[str] = None + last_processed: str | None = None while not self._is_shutdown: destinations_to_wake = ( diff --git a/synapse/federation/sender/per_destination_queue.py b/synapse/federation/sender/per_destination_queue.py index ecf4789d76..4a1b84aed7 100644 --- a/synapse/federation/sender/per_destination_queue.py +++ b/synapse/federation/sender/per_destination_queue.py @@ -23,7 +23,7 @@ import datetime import logging from collections import OrderedDict from types import TracebackType -from typing import TYPE_CHECKING, Hashable, Iterable, Optional +from typing import TYPE_CHECKING, Hashable, Iterable import attr from prometheus_client import Counter @@ -121,7 +121,7 @@ class PerDestinationQueue: self._destination = destination self.transmission_loop_running = False self._transmission_loop_enabled = True - self.active_transmission_loop: Optional[defer.Deferred] = None + self.active_transmission_loop: defer.Deferred | None = None # Flag to signal to any running transmission loop that there is new data # queued up to be sent. @@ -142,7 +142,7 @@ class PerDestinationQueue: # Cache of the last successfully-transmitted stream ordering for this # destination (we are the only updater so this is safe) - self._last_successful_stream_ordering: Optional[int] = None + self._last_successful_stream_ordering: int | None = None # a queue of pending PDUs self._pending_pdus: list[EventBase] = [] @@ -742,9 +742,9 @@ class _TransactionQueueManager: queue: PerDestinationQueue - _device_stream_id: Optional[int] = None - _device_list_id: Optional[int] = None - _last_stream_ordering: Optional[int] = None + _device_stream_id: int | None = None + _device_list_id: int | None = None + _last_stream_ordering: int | None = None _pdus: list[EventBase] = attr.Factory(list) async def __aenter__(self) -> tuple[list[EventBase], list[Edu]]: @@ -845,9 +845,9 @@ class _TransactionQueueManager: async def __aexit__( self, - exc_type: Optional[type[BaseException]], - exc: Optional[BaseException], - tb: Optional[TracebackType], + exc_type: type[BaseException] | None, + exc: BaseException | None, + tb: TracebackType | None, ) -> None: if exc_type is not None: # Failed to send transaction, so we bail out. diff --git a/synapse/federation/transport/client.py b/synapse/federation/transport/client.py index ee15b4804e..35d3c30c69 100644 --- a/synapse/federation/transport/client.py +++ b/synapse/federation/transport/client.py @@ -31,8 +31,6 @@ from typing import ( Generator, Iterable, Mapping, - Optional, - Union, ) import attr @@ -122,7 +120,7 @@ class TransportLayerClient: ) async def get_event( - self, destination: str, event_id: str, timeout: Optional[int] = None + self, destination: str, event_id: str, timeout: int | None = None ) -> JsonDict: """Requests the pdu with give id and origin from the given server. @@ -144,7 +142,7 @@ class TransportLayerClient: ) async def get_policy_recommendation_for_pdu( - self, destination: str, event: EventBase, timeout: Optional[int] = None + self, destination: str, event: EventBase, timeout: int | None = None ) -> JsonDict: """Requests the policy recommendation for the given pdu from the given policy server. @@ -171,7 +169,7 @@ class TransportLayerClient: ) async def ask_policy_server_to_sign_event( - self, destination: str, event: EventBase, timeout: Optional[int] = None + self, destination: str, event: EventBase, timeout: int | None = None ) -> JsonDict: """Requests that the destination server (typically a policy server) sign the event as not spam. @@ -198,7 +196,7 @@ class TransportLayerClient: async def backfill( self, destination: str, room_id: str, event_tuples: Collection[str], limit: int - ) -> Optional[Union[JsonDict, list]]: + ) -> JsonDict | list | None: """Requests `limit` previous PDUs in a given context before list of PDUs. @@ -235,7 +233,7 @@ class TransportLayerClient: async def timestamp_to_event( self, destination: str, room_id: str, timestamp: int, direction: Direction - ) -> Union[JsonDict, list]: + ) -> JsonDict | list: """ Calls a remote federating server at `destination` asking for their closest event to the given timestamp in the given direction. @@ -270,7 +268,7 @@ class TransportLayerClient: async def send_transaction( self, transaction: Transaction, - json_data_callback: Optional[Callable[[], JsonDict]] = None, + json_data_callback: Callable[[], JsonDict] | None = None, ) -> JsonDict: """Sends the given Transaction to its destination @@ -343,7 +341,7 @@ class TransportLayerClient: room_id: str, user_id: str, membership: str, - params: Optional[Mapping[str, Union[str, Iterable[str]]]], + params: Mapping[str, str | Iterable[str]] | None, ) -> JsonDict: """Asks a remote server to build and sign us a membership event @@ -528,11 +526,11 @@ class TransportLayerClient: async def get_public_rooms( self, remote_server: str, - limit: Optional[int] = None, - since_token: Optional[str] = None, - search_filter: Optional[dict] = None, + limit: int | None = None, + since_token: str | None = None, + search_filter: dict | None = None, include_all_networks: bool = False, - third_party_instance_id: Optional[str] = None, + third_party_instance_id: str | None = None, ) -> JsonDict: """Get the list of public rooms from a remote homeserver @@ -567,7 +565,7 @@ class TransportLayerClient: ) raise else: - args: dict[str, Union[str, Iterable[str]]] = { + args: dict[str, str | Iterable[str]] = { "include_all_networks": "true" if include_all_networks else "false" } if third_party_instance_id: @@ -694,7 +692,7 @@ class TransportLayerClient: user: UserID, destination: str, query_content: JsonDict, - timeout: Optional[int], + timeout: int | None, ) -> JsonDict: """Claim one-time keys for a list of devices hosted on a remote server. @@ -740,7 +738,7 @@ class TransportLayerClient: user: UserID, destination: str, query_content: JsonDict, - timeout: Optional[int], + timeout: int | None, ) -> JsonDict: """Claim one-time keys for a list of devices hosted on a remote server. @@ -997,13 +995,13 @@ class SendJoinResponse: event_dict: JsonDict # The parsed join event from the /send_join response. This will be None if # "event" is not included in the response. - event: Optional[EventBase] = None + event: EventBase | None = None # The room state is incomplete members_omitted: bool = False # List of servers in the room - servers_in_room: Optional[list[str]] = None + servers_in_room: list[str] | None = None @attr.s(slots=True, auto_attribs=True) diff --git a/synapse/federation/transport/server/__init__.py b/synapse/federation/transport/server/__init__.py index d5f05f7290..6d92d00523 100644 --- a/synapse/federation/transport/server/__init__.py +++ b/synapse/federation/transport/server/__init__.py @@ -20,7 +20,7 @@ # # import logging -from typing import TYPE_CHECKING, Iterable, Literal, Optional +from typing import TYPE_CHECKING, Iterable, Literal from synapse.api.errors import FederationDeniedError, SynapseError from synapse.federation.transport.server._base import ( @@ -52,7 +52,7 @@ logger = logging.getLogger(__name__) class TransportLayerServer(JsonResource): """Handles incoming federation HTTP requests""" - def __init__(self, hs: "HomeServer", servlet_groups: Optional[list[str]] = None): + def __init__(self, hs: "HomeServer", servlet_groups: list[str] | None = None): """Initialize the TransportLayerServer Will by default register all servlets. For custom behaviour, pass in @@ -135,7 +135,7 @@ class PublicRoomList(BaseFederationServlet): if not self.allow_access: raise FederationDeniedError(origin) - limit: Optional[int] = parse_integer_from_args(query, "limit", 0) + limit: int | None = parse_integer_from_args(query, "limit", 0) since_token = parse_string_from_args(query, "since", None) include_all_networks = parse_boolean_from_args( query, "include_all_networks", default=False @@ -170,7 +170,7 @@ class PublicRoomList(BaseFederationServlet): if not self.allow_access: raise FederationDeniedError(origin) - limit: Optional[int] = int(content.get("limit", 100)) + limit: int | None = int(content.get("limit", 100)) since_token = content.get("since", None) search_filter = content.get("filter", None) @@ -240,7 +240,7 @@ class OpenIdUserInfo(BaseFederationServlet): async def on_GET( self, - origin: Optional[str], + origin: str | None, content: Literal[None], query: dict[bytes, list[bytes]], ) -> tuple[int, JsonDict]: @@ -281,7 +281,7 @@ def register_servlets( resource: HttpServer, authenticator: Authenticator, ratelimiter: FederationRateLimiter, - servlet_groups: Optional[Iterable[str]] = None, + servlet_groups: Iterable[str] | None = None, ) -> None: """Initialize and register servlet classes. diff --git a/synapse/federation/transport/server/_base.py b/synapse/federation/transport/server/_base.py index 146cbebb27..52c0c96a3f 100644 --- a/synapse/federation/transport/server/_base.py +++ b/synapse/federation/transport/server/_base.py @@ -24,7 +24,7 @@ import logging import re import time from http import HTTPStatus -from typing import TYPE_CHECKING, Any, Awaitable, Callable, Optional, cast +from typing import TYPE_CHECKING, Any, Awaitable, Callable, cast from synapse.api.errors import Codes, FederationDeniedError, SynapseError from synapse.api.urls import FEDERATION_V1_PREFIX @@ -77,7 +77,7 @@ class Authenticator: # A method just so we can pass 'self' as the authenticator to the Servlets async def authenticate_request( - self, request: SynapseRequest, content: Optional[JsonDict] + self, request: SynapseRequest, content: JsonDict | None ) -> str: now = self._clock.time_msec() json_request: JsonDict = { @@ -165,7 +165,7 @@ class Authenticator: logger.exception("Error resetting retry timings on %s", origin) -def _parse_auth_header(header_bytes: bytes) -> tuple[str, str, str, Optional[str]]: +def _parse_auth_header(header_bytes: bytes) -> tuple[str, str, str, str | None]: """Parse an X-Matrix auth header Args: @@ -252,7 +252,7 @@ class BaseFederationServlet: components as specified in the path match regexp. Returns: - Optional[tuple[int, object]]: either (response code, response object) to + tuple[int, object] | None: either (response code, response object) to return a JSON response, or None if the request has already been handled. Raises: @@ -289,7 +289,7 @@ class BaseFederationServlet: @functools.wraps(func) async def new_func( request: SynapseRequest, *args: Any, **kwargs: str - ) -> Optional[tuple[int, Any]]: + ) -> tuple[int, Any] | None: """A callback which can be passed to HttpServer.RegisterPaths Args: @@ -309,7 +309,7 @@ class BaseFederationServlet: try: with start_active_span("authenticate_request"): - origin: Optional[str] = await authenticator.authenticate_request( + origin: str | None = await authenticator.authenticate_request( request, content ) except NoAuthenticationError: diff --git a/synapse/federation/transport/server/federation.py b/synapse/federation/transport/server/federation.py index 54c7dac1b7..a7c297c0b7 100644 --- a/synapse/federation/transport/server/federation.py +++ b/synapse/federation/transport/server/federation.py @@ -24,9 +24,7 @@ from typing import ( TYPE_CHECKING, Literal, Mapping, - Optional, Sequence, - Union, ) from synapse.api.constants import Direction, EduTypes @@ -156,7 +154,7 @@ class FederationEventServlet(BaseFederationServerServlet): content: Literal[None], query: dict[bytes, list[bytes]], event_id: str, - ) -> tuple[int, Union[JsonDict, str]]: + ) -> tuple[int, JsonDict | str]: return await self.handler.on_pdu_request(origin, event_id) @@ -642,7 +640,7 @@ class On3pidBindServlet(BaseFederationServerServlet): REQUIRE_AUTH = False async def on_POST( - self, origin: Optional[str], content: JsonDict, query: dict[bytes, list[bytes]] + self, origin: str | None, content: JsonDict, query: dict[bytes, list[bytes]] ) -> tuple[int, JsonDict]: if "invites" in content: last_exception = None @@ -676,7 +674,7 @@ class FederationVersionServlet(BaseFederationServlet): async def on_GET( self, - origin: Optional[str], + origin: str | None, content: Literal[None], query: dict[bytes, list[bytes]], ) -> tuple[int, JsonDict]: @@ -812,7 +810,7 @@ class FederationMediaDownloadServlet(BaseFederationServerServlet): async def on_GET( self, - origin: Optional[str], + origin: str | None, content: Literal[None], request: SynapseRequest, media_id: str, @@ -852,7 +850,7 @@ class FederationMediaThumbnailServlet(BaseFederationServerServlet): async def on_GET( self, - origin: Optional[str], + origin: str | None, content: Literal[None], request: SynapseRequest, media_id: str, diff --git a/synapse/federation/units.py b/synapse/federation/units.py index bff45bc2a9..547db9a394 100644 --- a/synapse/federation/units.py +++ b/synapse/federation/units.py @@ -24,7 +24,7 @@ server protocol. """ import logging -from typing import Optional, Sequence +from typing import Sequence import attr @@ -70,7 +70,7 @@ class Edu: getattr(self, "content", {})["org.matrix.opentracing_context"] = "{}" -def _none_to_list(edus: Optional[list[JsonDict]]) -> list[JsonDict]: +def _none_to_list(edus: list[JsonDict] | None) -> list[JsonDict]: if edus is None: return [] return edus @@ -128,6 +128,6 @@ def filter_pdus_for_valid_depth(pdus: Sequence[JsonDict]) -> list[JsonDict]: def serialize_and_filter_pdus( - pdus: Sequence[EventBase], time_now: Optional[int] = None + pdus: Sequence[EventBase], time_now: int | None = None ) -> list[JsonDict]: return filter_pdus_for_valid_depth([pdu.get_pdu_json(time_now) for pdu in pdus]) diff --git a/synapse/handlers/account_data.py b/synapse/handlers/account_data.py index 4492612859..c6168377ee 100644 --- a/synapse/handlers/account_data.py +++ b/synapse/handlers/account_data.py @@ -21,7 +21,7 @@ # import logging import random -from typing import TYPE_CHECKING, Awaitable, Callable, Optional +from typing import TYPE_CHECKING, Awaitable, Callable from synapse.api.constants import AccountDataTypes from synapse.replication.http.account_data import ( @@ -40,9 +40,7 @@ if TYPE_CHECKING: logger = logging.getLogger(__name__) -ON_ACCOUNT_DATA_UPDATED_CALLBACK = Callable[ - [str, Optional[str], str, JsonDict], Awaitable -] +ON_ACCOUNT_DATA_UPDATED_CALLBACK = Callable[[str, str | None, str, JsonDict], Awaitable] class AccountDataHandler: @@ -72,7 +70,7 @@ class AccountDataHandler: ] = [] def register_module_callbacks( - self, on_account_data_updated: Optional[ON_ACCOUNT_DATA_UPDATED_CALLBACK] = None + self, on_account_data_updated: ON_ACCOUNT_DATA_UPDATED_CALLBACK | None = None ) -> None: """Register callbacks from modules.""" if on_account_data_updated is not None: @@ -81,7 +79,7 @@ class AccountDataHandler: async def _notify_modules( self, user_id: str, - room_id: Optional[str], + room_id: str | None, account_data_type: str, content: JsonDict, ) -> None: @@ -143,7 +141,7 @@ class AccountDataHandler: async def remove_account_data_for_room( self, user_id: str, room_id: str, account_data_type: str - ) -> Optional[int]: + ) -> int | None: """ Deletes the room account data for the given user and account data type. @@ -219,7 +217,7 @@ class AccountDataHandler: async def remove_account_data_for_user( self, user_id: str, account_data_type: str - ) -> Optional[int]: + ) -> int | None: """Removes a piece of global account_data for a user. Args: @@ -324,7 +322,7 @@ class AccountDataEventSource(EventSource[int, JsonDict]): limit: int, room_ids: StrCollection, is_guest: bool, - explicit_room_id: Optional[str] = None, + explicit_room_id: str | None = None, ) -> tuple[list[JsonDict], int]: user_id = user.to_string() last_stream_id = from_key diff --git a/synapse/handlers/account_validity.py b/synapse/handlers/account_validity.py index a805de1f35..bc50efa1a7 100644 --- a/synapse/handlers/account_validity.py +++ b/synapse/handlers/account_validity.py @@ -21,7 +21,7 @@ import email.mime.multipart import email.utils import logging -from typing import TYPE_CHECKING, Optional +from typing import TYPE_CHECKING from synapse.api.errors import AuthError, StoreError, SynapseError from synapse.metrics.background_process_metrics import wrap_as_background_process @@ -108,8 +108,8 @@ class AccountValidityHandler: async def on_user_login( self, user_id: str, - auth_provider_type: Optional[str], - auth_provider_id: Optional[str], + auth_provider_type: str | None, + auth_provider_id: str | None, ) -> None: """Tell third-party modules about a user logins. @@ -326,9 +326,9 @@ class AccountValidityHandler: async def renew_account_for_user( self, user_id: str, - expiration_ts: Optional[int] = None, + expiration_ts: int | None = None, email_sent: bool = False, - renewal_token: Optional[str] = None, + renewal_token: str | None = None, ) -> int: """Renews the account attached to a given user by pushing back the expiration date by the current validity period in the server's diff --git a/synapse/handlers/admin.py b/synapse/handlers/admin.py index 3faaa4d2b3..c979752f7f 100644 --- a/synapse/handlers/admin.py +++ b/synapse/handlers/admin.py @@ -25,7 +25,6 @@ from typing import ( TYPE_CHECKING, Any, Mapping, - Optional, Sequence, ) @@ -71,7 +70,7 @@ class AdminHandler: self.hs = hs - async def get_redact_task(self, redact_id: str) -> Optional[ScheduledTask]: + async def get_redact_task(self, redact_id: str) -> ScheduledTask | None: """Get the current status of an active redaction process Args: @@ -99,11 +98,9 @@ class AdminHandler: return ret - async def get_user(self, user: UserID) -> Optional[JsonMapping]: + async def get_user(self, user: UserID) -> JsonMapping | None: """Function to get user details""" - user_info: Optional[UserInfo] = await self._store.get_user_by_id( - user.to_string() - ) + user_info: UserInfo | None = await self._store.get_user_by_id(user.to_string()) if user_info is None: return None @@ -355,8 +352,8 @@ class AdminHandler: rooms: list, requester: JsonMapping, use_admin: bool, - reason: Optional[str], - limit: Optional[int], + reason: str | None, + limit: int | None, ) -> str: """ Start a task redacting the events of the given user in the given rooms @@ -408,7 +405,7 @@ class AdminHandler: async def _redact_all_events( self, task: ScheduledTask - ) -> tuple[TaskStatus, Optional[Mapping[str, Any]], Optional[str]]: + ) -> tuple[TaskStatus, Mapping[str, Any] | None, str | None]: """ Task to redact all of a users events in the given rooms, tracking which, if any, events whose redaction failed diff --git a/synapse/handlers/appservice.py b/synapse/handlers/appservice.py index 5240178d80..c91d2adbe1 100644 --- a/synapse/handlers/appservice.py +++ b/synapse/handlers/appservice.py @@ -24,8 +24,6 @@ from typing import ( Collection, Iterable, Mapping, - Optional, - Union, ) from prometheus_client import Counter @@ -240,8 +238,8 @@ class ApplicationServicesHandler: def notify_interested_services_ephemeral( self, stream_key: StreamKeyType, - new_token: Union[int, RoomStreamToken, MultiWriterStreamToken], - users: Collection[Union[str, UserID]], + new_token: int | RoomStreamToken | MultiWriterStreamToken, + users: Collection[str | UserID], ) -> None: """ This is called by the notifier in the background when an ephemeral event is handled @@ -340,8 +338,8 @@ class ApplicationServicesHandler: self, services: list[ApplicationService], stream_key: StreamKeyType, - new_token: Union[int, MultiWriterStreamToken], - users: Collection[Union[str, UserID]], + new_token: int | MultiWriterStreamToken, + users: Collection[str | UserID], ) -> None: logger.debug("Checking interested services for %s", stream_key) with Measure( @@ -498,8 +496,8 @@ class ApplicationServicesHandler: async def _handle_presence( self, service: ApplicationService, - users: Collection[Union[str, UserID]], - new_token: Optional[int], + users: Collection[str | UserID], + new_token: int | None, ) -> list[JsonMapping]: """ Return the latest presence updates that the given application service should receive. @@ -559,7 +557,7 @@ class ApplicationServicesHandler: self, service: ApplicationService, new_token: int, - users: Collection[Union[str, UserID]], + users: Collection[str | UserID], ) -> list[JsonDict]: """ Given an application service, determine which events it should receive @@ -733,7 +731,7 @@ class ApplicationServicesHandler: async def query_room_alias_exists( self, room_alias: RoomAlias - ) -> Optional[RoomAliasMapping]: + ) -> RoomAliasMapping | None: """Check if an application service knows this room alias exists. Args: @@ -782,7 +780,7 @@ class ApplicationServicesHandler: return ret async def get_3pe_protocols( - self, only_protocol: Optional[str] = None + self, only_protocol: str | None = None ) -> dict[str, JsonDict]: services = self.store.get_app_services() protocols: dict[str, list[JsonDict]] = {} @@ -935,7 +933,7 @@ class ApplicationServicesHandler: return claimed_keys, missing async def query_keys( - self, query: Mapping[str, Optional[list[str]]] + self, query: Mapping[str, list[str] | None] ) -> dict[str, dict[str, dict[str, JsonDict]]]: """Query application services for device keys. diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index ed796cfe06..d9355d33da 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -33,8 +33,6 @@ from typing import ( Callable, Iterable, Mapping, - Optional, - Union, cast, ) @@ -289,7 +287,7 @@ class AuthHandler: request_body: dict[str, Any], description: str, can_skip_ui_auth: bool = False, - ) -> tuple[dict, Optional[str]]: + ) -> tuple[dict, str | None]: """ Checks that the user is who they claim to be, via a UI auth. @@ -440,7 +438,7 @@ class AuthHandler: request: SynapseRequest, clientdict: dict[str, Any], description: str, - get_new_session_data: Optional[Callable[[], JsonDict]] = None, + get_new_session_data: Callable[[], JsonDict] | None = None, ) -> tuple[dict, dict, str]: """ Takes a dictionary sent by the client in the login / registration @@ -487,7 +485,7 @@ class AuthHandler: all the stages in any of the permitted flows. """ - sid: Optional[str] = None + sid: str | None = None authdict = clientdict.pop("auth", {}) if "session" in authdict: sid = authdict["session"] @@ -637,7 +635,7 @@ class AuthHandler: authdict["session"], stagetype, result ) - def get_session_id(self, clientdict: dict[str, Any]) -> Optional[str]: + def get_session_id(self, clientdict: dict[str, Any]) -> str | None: """ Gets the session ID for a client given the client dictionary @@ -673,7 +671,7 @@ class AuthHandler: raise SynapseError(400, "Unknown session ID: %s" % (session_id,)) async def get_session_data( - self, session_id: str, key: str, default: Optional[Any] = None + self, session_id: str, key: str, default: Any | None = None ) -> Any: """ Retrieve data stored with set_session_data @@ -699,7 +697,7 @@ class AuthHandler: async def _check_auth_dict( self, authdict: dict[str, Any], clientip: str - ) -> Union[dict[str, Any], str]: + ) -> dict[str, Any] | str: """Attempt to validate the auth dict provided by a client Args: @@ -774,9 +772,9 @@ class AuthHandler: async def refresh_token( self, refresh_token: str, - access_token_valid_until_ms: Optional[int], - refresh_token_valid_until_ms: Optional[int], - ) -> tuple[str, str, Optional[int]]: + access_token_valid_until_ms: int | None, + refresh_token_valid_until_ms: int | None, + ) -> tuple[str, str, int | None]: """ Consumes a refresh token and generate both a new access token and a new refresh token from it. @@ -909,8 +907,8 @@ class AuthHandler: self, user_id: str, duration_ms: int = (2 * 60 * 1000), - auth_provider_id: Optional[str] = None, - auth_provider_session_id: Optional[str] = None, + auth_provider_id: str | None = None, + auth_provider_session_id: str | None = None, ) -> str: login_token = self.generate_login_token() now = self._clock.time_msec() @@ -928,8 +926,8 @@ class AuthHandler: self, user_id: str, device_id: str, - expiry_ts: Optional[int], - ultimate_session_expiry_ts: Optional[int], + expiry_ts: int | None, + ultimate_session_expiry_ts: int | None, ) -> tuple[str, int]: """ Creates a new refresh token for the user with the given user ID. @@ -961,11 +959,11 @@ class AuthHandler: async def create_access_token_for_user_id( self, user_id: str, - device_id: Optional[str], - valid_until_ms: Optional[int], - puppets_user_id: Optional[str] = None, + device_id: str | None, + valid_until_ms: int | None, + puppets_user_id: str | None = None, is_appservice_ghost: bool = False, - refresh_token_id: Optional[int] = None, + refresh_token_id: int | None = None, ) -> str: """ Creates a new access token for the user with the given user ID. @@ -1034,7 +1032,7 @@ class AuthHandler: return access_token - async def check_user_exists(self, user_id: str) -> Optional[str]: + async def check_user_exists(self, user_id: str) -> str | None: """ Checks to see if a user with the given id exists. Will check case insensitively, but return None if there are multiple inexact matches. @@ -1061,9 +1059,7 @@ class AuthHandler: """ return await self.store.is_user_approved(user_id) - async def _find_user_id_and_pwd_hash( - self, user_id: str - ) -> Optional[tuple[str, str]]: + async def _find_user_id_and_pwd_hash(self, user_id: str) -> tuple[str, str] | None: """Checks to see if a user with the given id exists. Will check case insensitively, but will return None if there are multiple inexact matches. @@ -1141,7 +1137,7 @@ class AuthHandler: login_submission: dict[str, Any], ratelimit: bool = False, is_reauth: bool = False, - ) -> tuple[str, Optional[Callable[["LoginResponse"], Awaitable[None]]]]: + ) -> tuple[str, Callable[["LoginResponse"], Awaitable[None]] | None]: """Authenticates the user for the /login API Also used by the user-interactive auth flow to validate auth types which don't @@ -1297,7 +1293,7 @@ class AuthHandler: self, username: str, login_submission: dict[str, Any], - ) -> tuple[str, Optional[Callable[["LoginResponse"], Awaitable[None]]]]: + ) -> tuple[str, Callable[["LoginResponse"], Awaitable[None]] | None]: """Helper for validate_login Handles login, once we've mapped 3pids onto userids @@ -1386,7 +1382,7 @@ class AuthHandler: async def check_password_provider_3pid( self, medium: str, address: str, password: str - ) -> tuple[Optional[str], Optional[Callable[["LoginResponse"], Awaitable[None]]]]: + ) -> tuple[str | None, Callable[["LoginResponse"], Awaitable[None]] | None]: """Check if a password provider is able to validate a thirdparty login Args: @@ -1413,7 +1409,7 @@ class AuthHandler: # if result is None then return (None, None) return None, None - async def _check_local_password(self, user_id: str, password: str) -> Optional[str]: + async def _check_local_password(self, user_id: str, password: str) -> str | None: """Authenticate a user against the local password database. user_id is checked case insensitively, but will return None if there are @@ -1528,8 +1524,8 @@ class AuthHandler: async def delete_access_tokens_for_user( self, user_id: str, - except_token_id: Optional[int] = None, - device_id: Optional[str] = None, + except_token_id: int | None = None, + device_id: str | None = None, ) -> None: """Invalidate access tokens belonging to a user @@ -1700,9 +1696,7 @@ class AuthHandler: return await defer_to_thread(self.hs.get_reactor(), _do_hash) - async def validate_hash( - self, password: str, stored_hash: Union[bytes, str] - ) -> bool: + async def validate_hash(self, password: str, stored_hash: bytes | str) -> bool: """Validates that self.hash(password) == stored_hash. Args: @@ -1799,9 +1793,9 @@ class AuthHandler: auth_provider_id: str, request: Request, client_redirect_url: str, - extra_attributes: Optional[JsonDict] = None, + extra_attributes: JsonDict | None = None, new_user: bool = False, - auth_provider_session_id: Optional[str] = None, + auth_provider_session_id: str | None = None, ) -> None: """Having figured out a mxid for this user, complete the HTTP request @@ -1960,7 +1954,7 @@ def load_single_legacy_password_auth_provider( # All methods that the module provides should be async, but this wasn't enforced # in the old module system, so we wrap them if needed - def async_wrapper(f: Optional[Callable]) -> Optional[Callable[..., Awaitable]]: + def async_wrapper(f: Callable | None) -> Callable[..., Awaitable] | None: # f might be None if the callback isn't implemented by the module. In this # case we don't want to register a callback at all so we return None. if f is None: @@ -1973,7 +1967,7 @@ def load_single_legacy_password_auth_provider( async def wrapped_check_password( username: str, login_type: str, login_dict: JsonDict - ) -> Optional[tuple[str, Optional[Callable]]]: + ) -> tuple[str, Callable | None] | None: # We've already made sure f is not None above, but mypy doesn't do well # across function boundaries so we need to tell it f is definitely not # None. @@ -1992,12 +1986,12 @@ def load_single_legacy_password_auth_provider( return wrapped_check_password # We need to wrap check_auth as in the old form it could return - # just a str, but now it must return Optional[tuple[str, Optional[Callable]] + # just a str, but now it must return tuple[str, Callable | None] | None if f.__name__ == "check_auth": async def wrapped_check_auth( username: str, login_type: str, login_dict: JsonDict - ) -> Optional[tuple[str, Optional[Callable]]]: + ) -> tuple[str, Callable | None] | None: # We've already made sure f is not None above, but mypy doesn't do well # across function boundaries so we need to tell it f is definitely not # None. @@ -2013,12 +2007,12 @@ def load_single_legacy_password_auth_provider( return wrapped_check_auth # We need to wrap check_3pid_auth as in the old form it could return - # just a str, but now it must return Optional[tuple[str, Optional[Callable]] + # just a str, but now it must return tuple[str, Callable | None] | None if f.__name__ == "check_3pid_auth": async def wrapped_check_3pid_auth( medium: str, address: str, password: str - ) -> Optional[tuple[str, Optional[Callable]]]: + ) -> tuple[str, Callable | None] | None: # We've already made sure f is not None above, but mypy doesn't do well # across function boundaries so we need to tell it f is definitely not # None. @@ -2044,10 +2038,10 @@ def load_single_legacy_password_auth_provider( # If the module has these methods implemented, then we pull them out # and register them as hooks. - check_3pid_auth_hook: Optional[CHECK_3PID_AUTH_CALLBACK] = async_wrapper( + check_3pid_auth_hook: CHECK_3PID_AUTH_CALLBACK | None = async_wrapper( getattr(provider, "check_3pid_auth", None) ) - on_logged_out_hook: Optional[ON_LOGGED_OUT_CALLBACK] = async_wrapper( + on_logged_out_hook: ON_LOGGED_OUT_CALLBACK | None = async_wrapper( getattr(provider, "on_logged_out", None) ) @@ -2085,24 +2079,20 @@ def load_single_legacy_password_auth_provider( CHECK_3PID_AUTH_CALLBACK = Callable[ [str, str, str], - Awaitable[ - Optional[tuple[str, Optional[Callable[["LoginResponse"], Awaitable[None]]]]] - ], + Awaitable[tuple[str, Callable[["LoginResponse"], Awaitable[None]] | None] | None], ] -ON_LOGGED_OUT_CALLBACK = Callable[[str, Optional[str], str], Awaitable] +ON_LOGGED_OUT_CALLBACK = Callable[[str, str | None, str], Awaitable] CHECK_AUTH_CALLBACK = Callable[ [str, str, JsonDict], - Awaitable[ - Optional[tuple[str, Optional[Callable[["LoginResponse"], Awaitable[None]]]]] - ], + Awaitable[tuple[str, Callable[["LoginResponse"], Awaitable[None]] | None] | None], ] GET_USERNAME_FOR_REGISTRATION_CALLBACK = Callable[ [JsonDict, JsonDict], - Awaitable[Optional[str]], + Awaitable[str | None], ] GET_DISPLAYNAME_FOR_REGISTRATION_CALLBACK = Callable[ [JsonDict, JsonDict], - Awaitable[Optional[str]], + Awaitable[str | None], ] IS_3PID_ALLOWED_CALLBACK = Callable[[str, str, bool], Awaitable[bool]] @@ -2133,18 +2123,15 @@ class PasswordAuthProvider: def register_password_auth_provider_callbacks( self, - check_3pid_auth: Optional[CHECK_3PID_AUTH_CALLBACK] = None, - on_logged_out: Optional[ON_LOGGED_OUT_CALLBACK] = None, - is_3pid_allowed: Optional[IS_3PID_ALLOWED_CALLBACK] = None, - auth_checkers: Optional[ - dict[tuple[str, tuple[str, ...]], CHECK_AUTH_CALLBACK] - ] = None, - get_username_for_registration: Optional[ - GET_USERNAME_FOR_REGISTRATION_CALLBACK - ] = None, - get_displayname_for_registration: Optional[ - GET_DISPLAYNAME_FOR_REGISTRATION_CALLBACK - ] = None, + check_3pid_auth: CHECK_3PID_AUTH_CALLBACK | None = None, + on_logged_out: ON_LOGGED_OUT_CALLBACK | None = None, + is_3pid_allowed: IS_3PID_ALLOWED_CALLBACK | None = None, + auth_checkers: dict[tuple[str, tuple[str, ...]], CHECK_AUTH_CALLBACK] + | None = None, + get_username_for_registration: GET_USERNAME_FOR_REGISTRATION_CALLBACK + | None = None, + get_displayname_for_registration: GET_DISPLAYNAME_FOR_REGISTRATION_CALLBACK + | None = None, ) -> None: # Register check_3pid_auth callback if check_3pid_auth is not None: @@ -2214,7 +2201,7 @@ class PasswordAuthProvider: async def check_auth( self, username: str, login_type: str, login_dict: JsonDict - ) -> Optional[tuple[str, Optional[Callable[["LoginResponse"], Awaitable[None]]]]]: + ) -> tuple[str, Callable[["LoginResponse"], Awaitable[None]] | None] | None: """Check if the user has presented valid login credentials Args: @@ -2245,14 +2232,14 @@ class PasswordAuthProvider: continue if result is not None: - # Check that the callback returned a Tuple[str, Optional[Callable]] + # Check that the callback returned a tuple[str, Callable | None] # "type: ignore[unreachable]" is used after some isinstance checks because mypy thinks # result is always the right type, but as it is 3rd party code it might not be if not isinstance(result, tuple) or len(result) != 2: logger.warning( # type: ignore[unreachable] "Wrong type returned by module API callback %s: %s, expected" - " Optional[tuple[str, Optional[Callable]]]", + " tuple[str, Callable | None] | None", callback, result, ) @@ -2265,24 +2252,24 @@ class PasswordAuthProvider: if not isinstance(str_result, str): logger.warning( # type: ignore[unreachable] "Wrong type returned by module API callback %s: %s, expected" - " Optional[tuple[str, Optional[Callable]]]", + " tuple[str, Callable | None] | None", callback, result, ) continue - # the second should be Optional[Callable] + # the second should be Callable | None if callback_result is not None: if not callable(callback_result): logger.warning( # type: ignore[unreachable] "Wrong type returned by module API callback %s: %s, expected" - " Optional[tuple[str, Optional[Callable]]]", + " tuple[str, Callable | None] | None", callback, result, ) continue - # The result is a (str, Optional[callback]) tuple so return the successful result + # The result is a (str, callback | None) tuple so return the successful result return result # If this point has been reached then none of the callbacks successfully authenticated @@ -2291,7 +2278,7 @@ class PasswordAuthProvider: async def check_3pid_auth( self, medium: str, address: str, password: str - ) -> Optional[tuple[str, Optional[Callable[["LoginResponse"], Awaitable[None]]]]]: + ) -> tuple[str, Callable[["LoginResponse"], Awaitable[None]] | None] | None: # This function is able to return a deferred that either # resolves None, meaning authentication failure, or upon # success, to a str (which is the user_id) or a tuple of @@ -2308,14 +2295,14 @@ class PasswordAuthProvider: continue if result is not None: - # Check that the callback returned a Tuple[str, Optional[Callable]] + # Check that the callback returned a tuple[str, Callable | None] # "type: ignore[unreachable]" is used after some isinstance checks because mypy thinks # result is always the right type, but as it is 3rd party code it might not be if not isinstance(result, tuple) or len(result) != 2: logger.warning( # type: ignore[unreachable] "Wrong type returned by module API callback %s: %s, expected" - " Optional[tuple[str, Optional[Callable]]]", + " tuple[str, Callable | None] | None", callback, result, ) @@ -2328,24 +2315,24 @@ class PasswordAuthProvider: if not isinstance(str_result, str): logger.warning( # type: ignore[unreachable] "Wrong type returned by module API callback %s: %s, expected" - " Optional[tuple[str, Optional[Callable]]]", + " tuple[str, Callable | None] | None", callback, result, ) continue - # the second should be Optional[Callable] + # the second should be Callable | None if callback_result is not None: if not callable(callback_result): logger.warning( # type: ignore[unreachable] "Wrong type returned by module API callback %s: %s, expected" - " Optional[tuple[str, Optional[Callable]]]", + " tuple[str, Callable | None] | None", callback, result, ) continue - # The result is a (str, Optional[callback]) tuple so return the successful result + # The result is a (str, callback | None) tuple so return the successful result return result # If this point has been reached then none of the callbacks successfully authenticated @@ -2353,7 +2340,7 @@ class PasswordAuthProvider: return None async def on_logged_out( - self, user_id: str, device_id: Optional[str], access_token: str + self, user_id: str, device_id: str | None, access_token: str ) -> None: # call all of the on_logged_out callbacks for callback in self.on_logged_out_callbacks: @@ -2367,7 +2354,7 @@ class PasswordAuthProvider: self, uia_results: JsonDict, params: JsonDict, - ) -> Optional[str]: + ) -> str | None: """Defines the username to use when registering the user, using the credentials and parameters provided during the UIA flow. @@ -2412,7 +2399,7 @@ class PasswordAuthProvider: self, uia_results: JsonDict, params: JsonDict, - ) -> Optional[str]: + ) -> str | None: """Defines the display name to use when registering the user, using the credentials and parameters provided during the UIA flow. diff --git a/synapse/handlers/cas.py b/synapse/handlers/cas.py index 438dcf9f2c..dbcf074d2b 100644 --- a/synapse/handlers/cas.py +++ b/synapse/handlers/cas.py @@ -20,7 +20,7 @@ # import logging import urllib.parse -from typing import TYPE_CHECKING, Optional +from typing import TYPE_CHECKING from xml.etree import ElementTree as ET import attr @@ -41,7 +41,7 @@ logger = logging.getLogger(__name__) class CasError(Exception): """Used to catch errors when validating the CAS ticket.""" - def __init__(self, error: str, error_description: Optional[str] = None): + def __init__(self, error: str, error_description: str | None = None): self.error = error self.error_description = error_description @@ -54,7 +54,7 @@ class CasError(Exception): @attr.s(slots=True, frozen=True, auto_attribs=True) class CasResponse: username: str - attributes: dict[str, list[Optional[str]]] + attributes: dict[str, list[str | None]] class CasHandler: @@ -145,7 +145,7 @@ class CasHandler: except PartialDownloadError as pde: # Twisted raises this error if the connection is closed, # even if that's being used old-http style to signal end-of-data - # Assertion is for mypy's benefit. Error.response is Optional[bytes], + # Assertion is for mypy's benefit. Error.response is bytes | None, # but a PartialDownloadError should always have a non-None response. assert pde.response is not None body = pde.response @@ -186,7 +186,7 @@ class CasHandler: # Iterate through the nodes and pull out the user and any extra attributes. user = None - attributes: dict[str, list[Optional[str]]] = {} + attributes: dict[str, list[str | None]] = {} for child in root[0]: if child.tag.endswith("user"): user = child.text @@ -213,8 +213,8 @@ class CasHandler: async def handle_redirect_request( self, request: SynapseRequest, - client_redirect_url: Optional[bytes], - ui_auth_session_id: Optional[str] = None, + client_redirect_url: bytes | None, + ui_auth_session_id: str | None = None, ) -> str: """Generates a URL for the CAS server where the client should be redirected. @@ -245,8 +245,8 @@ class CasHandler: self, request: SynapseRequest, ticket: str, - client_redirect_url: Optional[str], - session: Optional[str], + client_redirect_url: str | None, + session: str | None, ) -> None: """ Called once the user has successfully authenticated with the SSO. @@ -292,8 +292,8 @@ class CasHandler: self, request: SynapseRequest, cas_response: CasResponse, - client_redirect_url: Optional[str], - session: Optional[str], + client_redirect_url: str | None, + session: str | None, ) -> None: """Handle a CAS response to a ticket request. @@ -384,7 +384,7 @@ class CasHandler: return UserAttributes(localpart=localpart, display_name=display_name) - async def grandfather_existing_users() -> Optional[str]: + async def grandfather_existing_users() -> str | None: # Since CAS did not always use the user_external_ids table, always # to attempt to map to existing users. user_id = UserID(localpart, self._hostname).to_string() diff --git a/synapse/handlers/deactivate_account.py b/synapse/handlers/deactivate_account.py index 204dffd288..e4c646ce87 100644 --- a/synapse/handlers/deactivate_account.py +++ b/synapse/handlers/deactivate_account.py @@ -20,7 +20,7 @@ # import itertools import logging -from typing import TYPE_CHECKING, Optional +from typing import TYPE_CHECKING from synapse.api.constants import Membership from synapse.api.errors import SynapseError @@ -76,7 +76,7 @@ class DeactivateAccountHandler: user_id: str, erase_data: bool, requester: Requester, - id_server: Optional[str] = None, + id_server: str | None = None, by_admin: bool = False, ) -> bool: """Deactivate a user's account diff --git a/synapse/handlers/delayed_events.py b/synapse/handlers/delayed_events.py index b89b7416e6..3342420d7d 100644 --- a/synapse/handlers/delayed_events.py +++ b/synapse/handlers/delayed_events.py @@ -13,7 +13,7 @@ # import logging -from typing import TYPE_CHECKING, Optional +from typing import TYPE_CHECKING from twisted.internet.interfaces import IDelayedCall @@ -74,10 +74,10 @@ class DelayedEventsHandler: cfg=self._config.ratelimiting.rc_delayed_event_mgmt, ) - self._next_delayed_event_call: Optional[IDelayedCall] = None + self._next_delayed_event_call: IDelayedCall | None = None # The current position in the current_state_delta stream - self._event_pos: Optional[int] = None + self._event_pos: int | None = None # Guard to ensure we only process event deltas one at a time self._event_processing = False @@ -327,8 +327,8 @@ class DelayedEventsHandler: *, room_id: str, event_type: str, - state_key: Optional[str], - origin_server_ts: Optional[int], + state_key: str | None, + origin_server_ts: int | None, content: JsonDict, delay: int, ) -> str: @@ -526,7 +526,7 @@ class DelayedEventsHandler: state_key=state_key, ) - def _schedule_next_at_or_none(self, next_send_ts: Optional[Timestamp]) -> None: + def _schedule_next_at_or_none(self, next_send_ts: Timestamp | None) -> None: if next_send_ts is not None: self._schedule_next_at(next_send_ts) elif self._next_delayed_event_call is not None: @@ -560,7 +560,7 @@ class DelayedEventsHandler: async def _send_event( self, event: DelayedEventDetails, - txn_id: Optional[str] = None, + txn_id: str | None = None, ) -> None: user_id = UserID(event.user_localpart, self._config.server.server_name) user_id_str = user_id.to_string() @@ -622,7 +622,7 @@ class DelayedEventsHandler: def _get_current_ts(self) -> Timestamp: return Timestamp(self._clock.time_msec()) - def _next_send_ts_changed(self, next_send_ts: Optional[Timestamp]) -> bool: + def _next_send_ts_changed(self, next_send_ts: Timestamp | None) -> bool: # The DB alone knows if the next send time changed after adding/modifying # a delayed event, but if we were to ever miss updating our delayed call's # firing time, we may miss other updates. So, keep track of changes to the diff --git a/synapse/handlers/device.py b/synapse/handlers/device.py index f0558fc737..3f1a5fe6d6 100644 --- a/synapse/handlers/device.py +++ b/synapse/handlers/device.py @@ -27,7 +27,6 @@ from typing import ( AbstractSet, Iterable, Mapping, - Optional, cast, ) @@ -89,7 +88,7 @@ MAX_DEVICE_DISPLAY_NAME_LEN = 100 DELETE_STALE_DEVICES_INTERVAL_MS = 24 * 60 * 60 * 1000 -def _check_device_name_length(name: Optional[str]) -> None: +def _check_device_name_length(name: str | None) -> None: """ Checks whether a device name is longer than the maximum allowed length. @@ -208,10 +207,10 @@ class DeviceHandler: async def check_device_registered( self, user_id: str, - device_id: Optional[str], - initial_device_display_name: Optional[str] = None, - auth_provider_id: Optional[str] = None, - auth_provider_session_id: Optional[str] = None, + device_id: str | None, + initial_device_display_name: str | None = None, + auth_provider_id: str | None = None, + auth_provider_session_id: str | None = None, ) -> str: """ If the given device has not been registered, register it with the @@ -269,7 +268,7 @@ class DeviceHandler: @trace async def delete_all_devices_for_user( - self, user_id: str, except_device_id: Optional[str] = None + self, user_id: str, except_device_id: str | None = None ) -> None: """Delete all of the user's devices @@ -344,7 +343,7 @@ class DeviceHandler: await self.notify_device_update(user_id, device_ids) async def upsert_device( - self, user_id: str, device_id: str, display_name: Optional[str] = None + self, user_id: str, device_id: str, display_name: str | None = None ) -> bool: """Create or update a device @@ -425,9 +424,7 @@ class DeviceHandler: log_kv(device_map) return devices - async def get_dehydrated_device( - self, user_id: str - ) -> Optional[tuple[str, JsonDict]]: + async def get_dehydrated_device(self, user_id: str) -> tuple[str, JsonDict] | None: """Retrieve the information for a dehydrated device. Args: @@ -441,10 +438,10 @@ class DeviceHandler: async def store_dehydrated_device( self, user_id: str, - device_id: Optional[str], + device_id: str | None, device_data: JsonDict, - initial_device_display_name: Optional[str] = None, - keys_for_device: Optional[JsonDict] = None, + initial_device_display_name: str | None = None, + keys_for_device: JsonDict | None = None, ) -> str: """Store a dehydrated device for a user, optionally storing the keys associated with it as well. If the user had a previous dehydrated device, it is removed. @@ -563,7 +560,7 @@ class DeviceHandler: user_id: str, room_ids: StrCollection, from_token: StreamToken, - now_token: Optional[StreamToken] = None, + now_token: StreamToken | None = None, ) -> set[str]: """Get the set of users whose devices have changed who share a room with the given user. @@ -677,7 +674,7 @@ class DeviceHandler: memberships_to_fetch.add(delta.prev_event_id) # Fetch all the memberships for the membership events - event_id_to_memberships: Mapping[str, Optional[EventIdMembership]] = {} + event_id_to_memberships: Mapping[str, EventIdMembership | None] = {} if memberships_to_fetch: event_id_to_memberships = await self.store.get_membership_from_event_ids( memberships_to_fetch @@ -834,7 +831,7 @@ class DeviceHandler: # Check if the application services have any results. if self._query_appservices_for_keys: # Query the appservice for all devices for this user. - query: dict[str, Optional[list[str]]] = {user_id: None} + query: dict[str, list[str] | None] = {user_id: None} # Query the appservices for any keys. appservice_results = await self._appservice_handler.query_keys(query) @@ -923,7 +920,7 @@ class DeviceHandler: async def _delete_device_messages( self, task: ScheduledTask, - ) -> tuple[TaskStatus, Optional[JsonMapping], Optional[str]]: + ) -> tuple[TaskStatus, JsonMapping | None, str | None]: """Scheduler task to delete device messages in batch of `DEVICE_MSGS_DELETE_BATCH_LIMIT`.""" assert task.params is not None user_id = task.params["user_id"] @@ -1335,7 +1332,7 @@ class DeviceListWorkerUpdater: async def multi_user_device_resync( self, user_ids: list[str], - ) -> dict[str, Optional[JsonMapping]]: + ) -> dict[str, JsonMapping | None]: """ Like `user_device_resync` but operates on multiple users **from the same origin** at once. @@ -1359,8 +1356,8 @@ class DeviceListWorkerUpdater: async def process_cross_signing_key_update( self, user_id: str, - master_key: Optional[JsonDict], - self_signing_key: Optional[JsonDict], + master_key: JsonDict | None, + self_signing_key: JsonDict | None, ) -> list[str]: """Process the given new master and self-signing key for the given remote user. @@ -1699,7 +1696,7 @@ class DeviceListUpdater(DeviceListWorkerUpdater): async def multi_user_device_resync( self, user_ids: list[str], mark_failed_as_stale: bool = True - ) -> dict[str, Optional[JsonMapping]]: + ) -> dict[str, JsonMapping | None]: """ Like `user_device_resync` but operates on multiple users **from the same origin** at once. @@ -1735,7 +1732,7 @@ class DeviceListUpdater(DeviceListWorkerUpdater): async def _user_device_resync_returning_failed( self, user_id: str - ) -> tuple[Optional[JsonMapping], bool]: + ) -> tuple[JsonMapping | None, bool]: """Fetches all devices for a user and updates the device cache with them. Args: diff --git a/synapse/handlers/devicemessage.py b/synapse/handlers/devicemessage.py index 4dcdcc42fe..0ef14b31da 100644 --- a/synapse/handlers/devicemessage.py +++ b/synapse/handlers/devicemessage.py @@ -21,7 +21,7 @@ import logging from http import HTTPStatus -from typing import TYPE_CHECKING, Any, Optional +from typing import TYPE_CHECKING, Any from synapse.api.constants import EduTypes, EventContentFields, ToDeviceEventTypes from synapse.api.errors import Codes, SynapseError @@ -315,7 +315,7 @@ class DeviceMessageHandler: self, requester: Requester, device_id: str, - since_token: Optional[str], + since_token: str | None, limit: int, ) -> JsonDict: """Fetches up to `limit` events sent to `device_id` starting from `since_token` diff --git a/synapse/handlers/directory.py b/synapse/handlers/directory.py index 865c32d19e..03b23fe0be 100644 --- a/synapse/handlers/directory.py +++ b/synapse/handlers/directory.py @@ -21,7 +21,7 @@ import logging import string -from typing import TYPE_CHECKING, Iterable, Literal, Optional, Sequence +from typing import TYPE_CHECKING, Iterable, Literal, Sequence from synapse.api.constants import MAX_ALIAS_LENGTH, EventTypes from synapse.api.errors import ( @@ -73,8 +73,8 @@ class DirectoryHandler: self, room_alias: RoomAlias, room_id: str, - servers: Optional[Iterable[str]] = None, - creator: Optional[str] = None, + servers: Iterable[str] | None = None, + creator: str | None = None, ) -> None: # general association creation for both human users and app services @@ -108,7 +108,7 @@ class DirectoryHandler: requester: Requester, room_alias: RoomAlias, room_id: str, - servers: Optional[list[str]] = None, + servers: list[str] | None = None, check_membership: bool = True, ) -> None: """Attempt to create a new alias @@ -252,7 +252,7 @@ class DirectoryHandler: ) await self._delete_association(room_alias) - async def _delete_association(self, room_alias: RoomAlias) -> Optional[str]: + async def _delete_association(self, room_alias: RoomAlias) -> str | None: if not self.hs.is_mine(room_alias): raise SynapseError(400, "Room alias must be local") @@ -263,16 +263,16 @@ class DirectoryHandler: async def get_association(self, room_alias: RoomAlias) -> JsonDict: room_id = None if self.hs.is_mine(room_alias): - result: Optional[ - RoomAliasMapping - ] = await self.get_association_from_room_alias(room_alias) + result: ( + RoomAliasMapping | None + ) = await self.get_association_from_room_alias(room_alias) if result: room_id = result.room_id servers = result.servers else: try: - fed_result: Optional[JsonDict] = await self.federation.make_query( + fed_result: JsonDict | None = await self.federation.make_query( destination=room_alias.domain, query_type="directory", args={"room_alias": room_alias.to_string()}, @@ -387,7 +387,7 @@ class DirectoryHandler: async def get_association_from_room_alias( self, room_alias: RoomAlias - ) -> Optional[RoomAliasMapping]: + ) -> RoomAliasMapping | None: result = await self.store.get_association_from_room_alias(room_alias) if not result: # Query AS to see if it exists @@ -395,7 +395,7 @@ class DirectoryHandler: result = await as_handler.query_room_alias_exists(room_alias) return result - def can_modify_alias(self, alias: RoomAlias, user_id: Optional[str] = None) -> bool: + def can_modify_alias(self, alias: RoomAlias, user_id: str | None = None) -> bool: # Any application service "interested" in an alias they are regexing on # can modify the alias. # Users can only modify the alias if ALL the interested services have diff --git a/synapse/handlers/e2e_keys.py b/synapse/handlers/e2e_keys.py index 85a150b71a..41d27d47da 100644 --- a/synapse/handlers/e2e_keys.py +++ b/synapse/handlers/e2e_keys.py @@ -20,7 +20,7 @@ # # import logging -from typing import TYPE_CHECKING, Iterable, Mapping, Optional +from typing import TYPE_CHECKING, Iterable, Mapping import attr from canonicaljson import encode_canonical_json @@ -132,7 +132,7 @@ class E2eKeysHandler: query_body: JsonDict, timeout: int, from_user_id: str, - from_device_id: Optional[str], + from_device_id: str | None, ) -> JsonDict: """Handle a device key query from a client @@ -479,7 +479,7 @@ class E2eKeysHandler: @cancellable async def get_cross_signing_keys_from_cache( - self, query: Iterable[str], from_user_id: Optional[str] + self, query: Iterable[str], from_user_id: str | None ) -> dict[str, dict[str, JsonMapping]]: """Get cross-signing keys for users from the database @@ -527,7 +527,7 @@ class E2eKeysHandler: @cancellable async def query_local_devices( self, - query: Mapping[str, Optional[list[str]]], + query: Mapping[str, list[str] | None], include_displaynames: bool = True, ) -> dict[str, dict[str, dict]]: """Get E2E device keys for local users @@ -542,7 +542,7 @@ class E2eKeysHandler: A map from user_id -> device_id -> device details """ set_tag("local_query", str(query)) - local_query: list[tuple[str, Optional[str]]] = [] + local_query: list[tuple[str, str | None]] = [] result_dict: dict[str, dict[str, dict]] = {} for user_id, device_ids in query.items(): @@ -594,7 +594,7 @@ class E2eKeysHandler: return result_dict async def on_federation_query_client_keys( - self, query_body: dict[str, dict[str, Optional[list[str]]]] + self, query_body: dict[str, dict[str, list[str] | None]] ) -> JsonDict: """Handle a device key query from a federated server: @@ -614,7 +614,7 @@ class E2eKeysHandler: - self_signing_key: An optional dictionary of user ID -> self-signing key info. """ - device_keys_query: dict[str, Optional[list[str]]] = query_body.get( + device_keys_query: dict[str, list[str] | None] = query_body.get( "device_keys", {} ) if any( @@ -737,7 +737,7 @@ class E2eKeysHandler: self, query: dict[str, dict[str, dict[str, int]]], user: UserID, - timeout: Optional[int], + timeout: int | None, always_include_fallback_keys: bool, ) -> JsonDict: """ @@ -1395,7 +1395,7 @@ class E2eKeysHandler: return signature_list, failures async def _get_e2e_cross_signing_verify_key( - self, user_id: str, key_type: str, from_user_id: Optional[str] = None + self, user_id: str, key_type: str, from_user_id: str | None = None ) -> tuple[JsonMapping, str, VerifyKey]: """Fetch locally or remotely query for a cross-signing public key. @@ -1451,7 +1451,7 @@ class E2eKeysHandler: self, user: UserID, desired_key_type: str, - ) -> Optional[tuple[JsonMapping, str, VerifyKey]]: + ) -> tuple[JsonMapping, str, VerifyKey] | None: """Queries cross-signing keys for a remote user and saves them to the database Only the key specified by `key_type` will be returned, while all retrieved keys @@ -1599,7 +1599,7 @@ class E2eKeysHandler: async def _delete_old_one_time_keys_task( self, task: ScheduledTask - ) -> tuple[TaskStatus, Optional[JsonMapping], Optional[str]]: + ) -> tuple[TaskStatus, JsonMapping | None, str | None]: """Scheduler task to delete old one time keys. Until Synapse 1.119, Synapse used to issue one-time-keys in a random order, leading to the possibility @@ -1638,7 +1638,7 @@ class E2eKeysHandler: def _check_cross_signing_key( - key: JsonDict, user_id: str, key_type: str, signing_key: Optional[VerifyKey] = None + key: JsonDict, user_id: str, key_type: str, signing_key: VerifyKey | None = None ) -> None: """Check a cross-signing key uploaded by a user. Performs some basic sanity checking, and ensures that it is signed, if a signature is required. diff --git a/synapse/handlers/e2e_room_keys.py b/synapse/handlers/e2e_room_keys.py index 094b4bc27c..017fbcf8b3 100644 --- a/synapse/handlers/e2e_room_keys.py +++ b/synapse/handlers/e2e_room_keys.py @@ -20,7 +20,7 @@ # import logging -from typing import TYPE_CHECKING, Literal, Optional, cast +from typing import TYPE_CHECKING, Literal, cast from synapse.api.errors import ( Codes, @@ -63,8 +63,8 @@ class E2eRoomKeysHandler: self, user_id: str, version: str, - room_id: Optional[str] = None, - session_id: Optional[str] = None, + room_id: str | None = None, + session_id: str | None = None, ) -> dict[ Literal["rooms"], dict[str, dict[Literal["sessions"], dict[str, RoomKey]]] ]: @@ -109,8 +109,8 @@ class E2eRoomKeysHandler: self, user_id: str, version: str, - room_id: Optional[str] = None, - session_id: Optional[str] = None, + room_id: str | None = None, + session_id: str | None = None, ) -> JsonDict: """Bulk delete the E2E room keys for a given backup, optionally filtered to a given room or a given session. @@ -299,7 +299,7 @@ class E2eRoomKeysHandler: @staticmethod def _should_replace_room_key( - current_room_key: Optional[RoomKey], room_key: RoomKey + current_room_key: RoomKey | None, room_key: RoomKey ) -> bool: """ Determine whether to replace a given current_room_key (if any) @@ -360,7 +360,7 @@ class E2eRoomKeysHandler: return new_version async def get_version_info( - self, user_id: str, version: Optional[str] = None + self, user_id: str, version: str | None = None ) -> JsonDict: """Get the info about a given version of the user's backup @@ -394,7 +394,7 @@ class E2eRoomKeysHandler: return res @trace - async def delete_version(self, user_id: str, version: Optional[str] = None) -> None: + async def delete_version(self, user_id: str, version: str | None = None) -> None: """Deletes a given version of the user's e2e_room_keys backup Args: diff --git a/synapse/handlers/event_auth.py b/synapse/handlers/event_auth.py index b2caca8ce7..4f2657bba8 100644 --- a/synapse/handlers/event_auth.py +++ b/synapse/handlers/event_auth.py @@ -19,7 +19,7 @@ # # import logging -from typing import TYPE_CHECKING, Mapping, Optional, Union +from typing import TYPE_CHECKING, Mapping from synapse import event_auth from synapse.api.constants import ( @@ -61,7 +61,7 @@ class EventAuthHandler: async def check_auth_rules_from_context( self, event: EventBase, - batched_auth_events: Optional[Mapping[str, EventBase]] = None, + batched_auth_events: Mapping[str, EventBase] | None = None, ) -> None: """Check an event passes the auth rules at its own auth events Args: @@ -89,7 +89,7 @@ class EventAuthHandler: def compute_auth_events( self, - event: Union[EventBase, EventBuilder], + event: EventBase | EventBuilder, current_state_ids: StateMap[str], for_verification: bool = False, ) -> list[str]: @@ -236,7 +236,7 @@ class EventAuthHandler: state_ids: StateMap[str], room_version: RoomVersion, user_id: str, - prev_membership: Optional[str], + prev_membership: str | None, ) -> None: """ Check whether a user can join a room without an invite due to restricted join rules. diff --git a/synapse/handlers/events.py b/synapse/handlers/events.py index 9522d5a696..ae17639206 100644 --- a/synapse/handlers/events.py +++ b/synapse/handlers/events.py @@ -21,7 +21,7 @@ import logging import random -from typing import TYPE_CHECKING, Iterable, Optional +from typing import TYPE_CHECKING, Iterable from synapse.api.constants import EduTypes, EventTypes, Membership, PresenceState from synapse.api.errors import AuthError, SynapseError @@ -58,7 +58,7 @@ class EventStreamHandler: timeout: int = 0, as_client_event: bool = True, affect_presence: bool = True, - room_id: Optional[str] = None, + room_id: str | None = None, ) -> JsonDict: """Fetches the events stream for a given user.""" @@ -152,10 +152,10 @@ class EventHandler: async def get_event( self, user: UserID, - room_id: Optional[str], + room_id: str | None, event_id: str, show_redacted: bool = False, - ) -> Optional[EventBase]: + ) -> EventBase | None: """Retrieve a single specified event. Args: diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 3eb1d166f8..1bba3fc758 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -31,8 +31,6 @@ from typing import ( TYPE_CHECKING, AbstractSet, Iterable, - Optional, - Union, ) import attr @@ -169,7 +167,7 @@ class FederationHandler: # A dictionary mapping room IDs to (initial destination, other destinations) # tuples. self._partial_state_syncs_maybe_needing_restart: dict[ - str, tuple[Optional[str], AbstractSet[str]] + str, tuple[str | None, AbstractSet[str]] ] = {} # A lock guarding the partial state flag for rooms. # When the lock is held for a given room, no other concurrent code may @@ -232,7 +230,7 @@ class FederationHandler: current_depth: int, limit: int, *, - processing_start_time: Optional[int], + processing_start_time: int | None, ) -> bool: """ Checks whether the `current_depth` is at or approaching any backfill @@ -1174,7 +1172,7 @@ class FederationHandler: user_id: str, membership: str, content: JsonDict, - params: Optional[dict[str, Union[str, Iterable[str]]]] = None, + params: dict[str, str | Iterable[str]] | None = None, ) -> tuple[str, EventBase, RoomVersion]: ( origin, @@ -1371,9 +1369,7 @@ class FederationHandler: return events - async def get_persisted_pdu( - self, origin: str, event_id: str - ) -> Optional[EventBase]: + async def get_persisted_pdu(self, origin: str, event_id: str) -> EventBase | None: """Get an event from the database for the given server. Args: @@ -1670,7 +1666,7 @@ class FederationHandler: logger.debug("Checking auth on event %r", event.content) - last_exception: Optional[Exception] = None + last_exception: Exception | None = None # for each public key in the 3pid invite event for public_key_object in event_auth.get_public_keys(invite_event): @@ -1755,7 +1751,7 @@ class FederationHandler: async def get_room_complexity( self, remote_room_hosts: list[str], room_id: str - ) -> Optional[dict]: + ) -> dict | None: """ Fetch the complexity of a remote room over federation. @@ -1793,7 +1789,7 @@ class FederationHandler: def _start_partial_state_room_sync( self, - initial_destination: Optional[str], + initial_destination: str | None, other_destinations: AbstractSet[str], room_id: str, ) -> None: @@ -1876,7 +1872,7 @@ class FederationHandler: async def _sync_partial_state_room( self, - initial_destination: Optional[str], + initial_destination: str | None, other_destinations: AbstractSet[str], room_id: str, ) -> None: @@ -2018,7 +2014,7 @@ class FederationHandler: def _prioritise_destinations_for_partial_state_resync( - initial_destination: Optional[str], + initial_destination: str | None, other_destinations: AbstractSet[str], room_id: str, ) -> StrCollection: diff --git a/synapse/handlers/federation_event.py b/synapse/handlers/federation_event.py index 32b603e947..01e98f60ad 100644 --- a/synapse/handlers/federation_event.py +++ b/synapse/handlers/federation_event.py @@ -28,7 +28,6 @@ from typing import ( Collection, Container, Iterable, - Optional, Sequence, ) @@ -1818,7 +1817,7 @@ class FederationEventHandler: @trace async def _check_event_auth( - self, origin: Optional[str], event: EventBase, context: EventContext + self, origin: str | None, event: EventBase, context: EventContext ) -> None: """ Checks whether an event should be rejected (for failing auth checks). @@ -2101,7 +2100,7 @@ class FederationEventHandler: event.internal_metadata.soft_failed = True async def _load_or_fetch_auth_events_for_event( - self, destination: Optional[str], event: EventBase + self, destination: str | None, event: EventBase ) -> Collection[EventBase]: """Fetch this event's auth_events, from database or remote diff --git a/synapse/handlers/identity.py b/synapse/handlers/identity.py index 0f507b3317..1596c55570 100644 --- a/synapse/handlers/identity.py +++ b/synapse/handlers/identity.py @@ -24,7 +24,7 @@ import logging import urllib.parse -from typing import TYPE_CHECKING, Awaitable, Callable, Optional +from typing import TYPE_CHECKING, Awaitable, Callable import attr @@ -106,7 +106,7 @@ class IdentityHandler: async def threepid_from_creds( self, id_server: str, creds: dict[str, str] - ) -> Optional[JsonDict]: + ) -> JsonDict | None: """ Retrieve and validate a threepid identifier from a "credentials" dictionary against a given identity server @@ -227,7 +227,7 @@ class IdentityHandler: return data async def try_unbind_threepid( - self, mxid: str, medium: str, address: str, id_server: Optional[str] + self, mxid: str, medium: str, address: str, id_server: str | None ) -> bool: """Attempt to remove a 3PID from one or more identity servers. @@ -338,7 +338,7 @@ class IdentityHandler: client_secret: str, send_attempt: int, send_email_func: Callable[[str, str, str, str], Awaitable], - next_link: Optional[str] = None, + next_link: str | None = None, ) -> str: """Send a threepid validation email for password reset or registration purposes @@ -426,7 +426,7 @@ class IdentityHandler: phone_number: str, client_secret: str, send_attempt: int, - next_link: Optional[str] = None, + next_link: str | None = None, ) -> JsonDict: """ Request an external server send an SMS message on our behalf for the purposes of @@ -473,7 +473,7 @@ class IdentityHandler: async def validate_threepid_session( self, client_secret: str, sid: str - ) -> Optional[JsonDict]: + ) -> JsonDict | None: """Validates a threepid session with only the client secret and session ID Tries validating against any configured account_threepid_delegates as well as locally. @@ -541,7 +541,7 @@ class IdentityHandler: async def lookup_3pid( self, id_server: str, medium: str, address: str, id_access_token: str - ) -> Optional[str]: + ) -> str | None: """Looks up a 3pid in the passed identity server. Args: @@ -567,7 +567,7 @@ class IdentityHandler: async def _lookup_3pid_v2( self, id_server: str, id_access_token: str, medium: str, address: str - ) -> Optional[str]: + ) -> str | None: """Looks up a 3pid in the passed identity server using v2 lookup. Args: @@ -689,7 +689,7 @@ class IdentityHandler: room_avatar_url: str, room_join_rules: str, room_name: str, - room_type: Optional[str], + room_type: str | None, inviter_display_name: str, inviter_avatar_url: str, id_access_token: str, diff --git a/synapse/handlers/initial_sync.py b/synapse/handlers/initial_sync.py index 1c6f8bf53b..611c4fa7b3 100644 --- a/synapse/handlers/initial_sync.py +++ b/synapse/handlers/initial_sync.py @@ -20,7 +20,7 @@ # import logging -from typing import TYPE_CHECKING, Optional +from typing import TYPE_CHECKING from synapse.api.constants import ( AccountDataTypes, @@ -71,8 +71,8 @@ class InitialSyncHandler: self.snapshot_cache: ResponseCache[ tuple[ str, - Optional[StreamToken], - Optional[StreamToken], + StreamToken | None, + StreamToken | None, Direction, int, bool, diff --git a/synapse/handlers/jwt.py b/synapse/handlers/jwt.py index f1715f6495..67b2b7c31d 100644 --- a/synapse/handlers/jwt.py +++ b/synapse/handlers/jwt.py @@ -18,7 +18,7 @@ # [This file includes modifications made by New Vector Limited] # # -from typing import TYPE_CHECKING, Optional +from typing import TYPE_CHECKING from authlib.jose import JsonWebToken, JWTClaims from authlib.jose.errors import BadSignatureError, InvalidClaimError, JoseError @@ -41,7 +41,7 @@ class JwtHandler: self.jwt_issuer = hs.config.jwt.jwt_issuer self.jwt_audiences = hs.config.jwt.jwt_audiences - def validate_login(self, login_submission: JsonDict) -> tuple[str, Optional[str]]: + def validate_login(self, login_submission: JsonDict) -> tuple[str, str | None]: """ Authenticates the user for the /login API diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index 2ad1dbe73f..7679303a36 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -22,7 +22,7 @@ import logging import random from http import HTTPStatus -from typing import TYPE_CHECKING, Any, Mapping, Optional, Sequence +from typing import TYPE_CHECKING, Any, Mapping, Sequence from canonicaljson import encode_canonical_json @@ -110,7 +110,7 @@ class MessageHandler: # The scheduled call to self._expire_event. None if no call is currently # scheduled. - self._scheduled_expiry: Optional[IDelayedCall] = None + self._scheduled_expiry: IDelayedCall | None = None if not hs.config.worker.worker_app: self.hs.run_as_background_process( @@ -123,7 +123,7 @@ class MessageHandler: room_id: str, event_type: str, state_key: str, - ) -> Optional[EventBase]: + ) -> EventBase | None: """Get data from a room. Args: @@ -178,8 +178,8 @@ class MessageHandler: self, requester: Requester, room_id: str, - state_filter: Optional[StateFilter] = None, - at_token: Optional[StreamToken] = None, + state_filter: StateFilter | None = None, + at_token: StreamToken | None = None, ) -> list[dict]: """Retrieve all state events for a given room. If the user is joined to the room then return the current state. If the user has @@ -563,7 +563,7 @@ class EventCreationHandler: # Stores the state groups we've recently added to the joined hosts # external cache. Note that the timeout must be significantly less than # the TTL on the external cache. - self._external_cache_joined_hosts_updates: Optional[ExpiringCache] = None + self._external_cache_joined_hosts_updates: ExpiringCache | None = None if self._external_cache.is_enabled(): self._external_cache_joined_hosts_updates = ExpiringCache( cache_name="_external_cache_joined_hosts_updates", @@ -577,16 +577,16 @@ class EventCreationHandler: self, requester: Requester, event_dict: dict, - txn_id: Optional[str] = None, - prev_event_ids: Optional[list[str]] = None, - auth_event_ids: Optional[list[str]] = None, - state_event_ids: Optional[list[str]] = None, + txn_id: str | None = None, + prev_event_ids: list[str] | None = None, + auth_event_ids: list[str] | None = None, + state_event_ids: list[str] | None = None, require_consent: bool = True, outlier: bool = False, - depth: Optional[int] = None, - state_map: Optional[StateMap[str]] = None, + depth: int | None = None, + state_map: StateMap[str] | None = None, for_batch: bool = False, - current_state_group: Optional[int] = None, + current_state_group: int | None = None, ) -> tuple[EventBase, UnpersistedEventContextBase]: """ Given a dict from a client, create a new event. If bool for_batch is true, will @@ -865,7 +865,7 @@ class EventCreationHandler: async def deduplicate_state_event( self, event: EventBase, context: EventContext - ) -> Optional[EventBase]: + ) -> EventBase | None: """ Checks whether event is in the latest resolved state in context. @@ -903,7 +903,7 @@ class EventCreationHandler: requester: Requester, txn_id: str, room_id: str, - ) -> Optional[str]: + ) -> str | None: """For the given transaction ID and room ID, check if there is a matching event ID. Args: @@ -937,7 +937,7 @@ class EventCreationHandler: requester: Requester, txn_id: str, room_id: str, - ) -> Optional[EventBase]: + ) -> EventBase | None: """For the given transaction ID and room ID, check if there is a matching event. If so, fetch it and return it. @@ -961,13 +961,13 @@ class EventCreationHandler: self, requester: Requester, event_dict: dict, - prev_event_ids: Optional[list[str]] = None, - state_event_ids: Optional[list[str]] = None, + prev_event_ids: list[str] | None = None, + state_event_ids: list[str] | None = None, ratelimit: bool = True, - txn_id: Optional[str] = None, + txn_id: str | None = None, ignore_shadow_ban: bool = False, outlier: bool = False, - depth: Optional[int] = None, + depth: int | None = None, ) -> tuple[EventBase, int]: """ Creates an event, then sends it. @@ -1098,13 +1098,13 @@ class EventCreationHandler: self, requester: Requester, event_dict: dict, - prev_event_ids: Optional[list[str]] = None, - state_event_ids: Optional[list[str]] = None, + prev_event_ids: list[str] | None = None, + state_event_ids: list[str] | None = None, ratelimit: bool = True, - txn_id: Optional[str] = None, + txn_id: str | None = None, ignore_shadow_ban: bool = False, outlier: bool = False, - depth: Optional[int] = None, + depth: int | None = None, ) -> tuple[EventBase, int]: room_id = event_dict["room_id"] @@ -1219,14 +1219,14 @@ class EventCreationHandler: async def create_new_client_event( self, builder: EventBuilder, - requester: Optional[Requester] = None, - prev_event_ids: Optional[list[str]] = None, - auth_event_ids: Optional[list[str]] = None, - state_event_ids: Optional[list[str]] = None, - depth: Optional[int] = None, - state_map: Optional[StateMap[str]] = None, + requester: Requester | None = None, + prev_event_ids: list[str] | None = None, + auth_event_ids: list[str] | None = None, + state_event_ids: list[str] | None = None, + depth: int | None = None, + state_map: StateMap[str] | None = None, for_batch: bool = False, - current_state_group: Optional[int] = None, + current_state_group: int | None = None, ) -> tuple[EventBase, UnpersistedEventContextBase]: """Create a new event for a local client. If bool for_batch is true, will create an event using the prev_event_ids, and will create an event context for @@ -1473,7 +1473,7 @@ class EventCreationHandler: requester: Requester, events_and_context: list[EventPersistencePair], ratelimit: bool = True, - extra_users: Optional[list[UserID]] = None, + extra_users: list[UserID] | None = None, ignore_shadow_ban: bool = False, ) -> EventBase: """Processes new events. Please note that if batch persisting events, an error in @@ -1592,7 +1592,7 @@ class EventCreationHandler: self, requester: Requester, room_id: str, - prev_event_id: Optional[str], + prev_event_id: str | None, event_dicts: Sequence[JsonDict], ratelimit: bool = True, ignore_shadow_ban: bool = False, @@ -1685,7 +1685,7 @@ class EventCreationHandler: requester: Requester, events_and_context: list[EventPersistencePair], ratelimit: bool = True, - extra_users: Optional[list[UserID]] = None, + extra_users: list[UserID] | None = None, ) -> EventBase: """Actually persists new events. Should only be called by `handle_new_client_event`, and see its docstring for documentation of @@ -1877,7 +1877,7 @@ class EventCreationHandler: requester: Requester, events_and_context: list[EventPersistencePair], ratelimit: bool = True, - extra_users: Optional[list[UserID]] = None, + extra_users: list[UserID] | None = None, ) -> EventBase: """Called when we have fully built the events, have already calculated the push actions for the events, and checked auth. @@ -2132,7 +2132,7 @@ class EventCreationHandler: return persisted_events[-1] async def is_admin_redaction( - self, event_type: str, sender: str, redacts: Optional[str] + self, event_type: str, sender: str, redacts: str | None ) -> bool: """Return whether the event is a redaction made by an admin, and thus should use a different ratelimiter. @@ -2174,7 +2174,7 @@ class EventCreationHandler: logger.info("maybe_kick_guest_users %r", current_state) await self.hs.get_room_member_handler().kick_guest_users(current_state) - async def _bump_active_time(self, user: UserID, device_id: Optional[str]) -> None: + async def _bump_active_time(self, user: UserID, device_id: str | None) -> None: try: presence = self.hs.get_presence_handler() await presence.bump_presence_active_time(user, device_id) diff --git a/synapse/handlers/oidc.py b/synapse/handlers/oidc.py index f140912b2a..429a739380 100644 --- a/synapse/handlers/oidc.py +++ b/synapse/handlers/oidc.py @@ -27,10 +27,8 @@ from typing import ( TYPE_CHECKING, Any, Generic, - Optional, TypedDict, TypeVar, - Union, ) from urllib.parse import urlencode, urlparse @@ -102,10 +100,10 @@ _SESSION_COOKIES = [ class Token(TypedDict): access_token: str token_type: str - id_token: Optional[str] - refresh_token: Optional[str] + id_token: str | None + refresh_token: str | None expires_in: int - scope: Optional[str] + scope: str | None #: A JWK, as per RFC7517 sec 4. The type could be more precise than that, but @@ -206,7 +204,7 @@ class OidcHandler: # are two. for cookie_name, _ in _SESSION_COOKIES: - session: Optional[bytes] = request.getCookie(cookie_name) + session: bytes | None = request.getCookie(cookie_name) if session is not None: break else: @@ -335,7 +333,7 @@ class OidcHandler: # Now that we know the audience and the issuer, we can figure out from # what provider it is coming from - oidc_provider: Optional[OidcProvider] = None + oidc_provider: OidcProvider | None = None for provider in self._providers.values(): if provider.issuer == issuer and provider.client_id in audience: oidc_provider = provider @@ -351,7 +349,7 @@ class OidcHandler: class OidcError(Exception): """Used to catch errors when calling the token_endpoint""" - def __init__(self, error: str, error_description: Optional[str] = None): + def __init__(self, error: str, error_description: str | None = None): self.error = error self.error_description = error_description @@ -398,7 +396,7 @@ class OidcProvider: self._scopes = provider.scopes self._user_profile_method = provider.user_profile_method - client_secret: Optional[Union[str, JwtClientSecret]] = None + client_secret: str | JwtClientSecret | None = None if provider.client_secret: client_secret = provider.client_secret elif provider.client_secret_jwt_key: @@ -904,8 +902,8 @@ class OidcProvider: alg_values: list[str], token: str, claims_cls: type[C], - claims_options: Optional[dict] = None, - claims_params: Optional[dict] = None, + claims_options: dict | None = None, + claims_params: dict | None = None, ) -> C: """Decode and validate a JWT, re-fetching the JWKS as needed. @@ -1005,8 +1003,8 @@ class OidcProvider: async def handle_redirect_request( self, request: SynapseRequest, - client_redirect_url: Optional[bytes], - ui_auth_session_id: Optional[str] = None, + client_redirect_url: bytes | None, + ui_auth_session_id: str | None = None, ) -> str: """Handle an incoming request to /login/sso/redirect @@ -1235,7 +1233,7 @@ class OidcProvider: token: Token, request: SynapseRequest, client_redirect_url: str, - sid: Optional[str], + sid: str | None, ) -> None: """Given a UserInfo response, complete the login flow @@ -1300,7 +1298,7 @@ class OidcProvider: return UserAttributes(**attributes) - async def grandfather_existing_users() -> Optional[str]: + async def grandfather_existing_users() -> str | None: if self._allow_existing_users: # If allowing existing users we want to generate a single localpart # and attempt to match it. @@ -1444,8 +1442,8 @@ class OidcProvider: # If the `sub` claim was included in the logout token, we check that it matches # that it matches the right user. We can have cases where the `sub` claim is not # the ID saved in database, so we let admins disable this check in config. - sub: Optional[str] = claims.get("sub") - expected_user_id: Optional[str] = None + sub: str | None = claims.get("sub") + expected_user_id: str | None = None if sub is not None and not self._config.backchannel_logout_ignore_sub: expected_user_id = await self._store.get_user_by_external_id( self.idp_id, sub @@ -1473,7 +1471,7 @@ class LogoutToken(JWTClaims): # type: ignore[misc] REGISTERED_CLAIMS = ["iss", "sub", "aud", "iat", "jti", "events", "sid"] - def validate(self, now: Optional[int] = None, leeway: int = 0) -> None: + def validate(self, now: int | None = None, leeway: int = 0) -> None: """Validate everything in claims payload.""" super().validate(now, leeway) self.validate_sid() @@ -1584,10 +1582,10 @@ class JwtClientSecret: class UserAttributeDict(TypedDict): - localpart: Optional[str] + localpart: str | None confirm_localpart: bool - display_name: Optional[str] - picture: Optional[str] # may be omitted by older `OidcMappingProviders` + display_name: str | None + picture: str | None # may be omitted by older `OidcMappingProviders` emails: list[str] @@ -1674,9 +1672,9 @@ env.filters.update( class JinjaOidcMappingConfig: subject_template: Template picture_template: Template - localpart_template: Optional[Template] - display_name_template: Optional[Template] - email_template: Optional[Template] + localpart_template: Template | None + display_name_template: Template | None + email_template: Template | None extra_attributes: dict[str, Template] confirm_localpart: bool = False @@ -1710,7 +1708,7 @@ class JinjaOidcMappingProvider(OidcMappingProvider[JinjaOidcMappingConfig]): subject_template = parse_template_config_with_claim("subject", "sub") picture_template = parse_template_config_with_claim("picture", "picture") - def parse_template_config(option_name: str) -> Optional[Template]: + def parse_template_config(option_name: str) -> Template | None: if option_name not in config: return None try: @@ -1768,7 +1766,7 @@ class JinjaOidcMappingProvider(OidcMappingProvider[JinjaOidcMappingConfig]): # a usable mxid. localpart += str(failures) if failures else "" - def render_template_field(template: Optional[Template]) -> Optional[str]: + def render_template_field(template: Template | None) -> str | None: if template is None: return None return template.render(user=userinfo).strip() diff --git a/synapse/handlers/pagination.py b/synapse/handlers/pagination.py index 7274a512b0..a90ed3193c 100644 --- a/synapse/handlers/pagination.py +++ b/synapse/handlers/pagination.py @@ -19,7 +19,7 @@ # # import logging -from typing import TYPE_CHECKING, Optional, cast +from typing import TYPE_CHECKING, cast from twisted.python.failure import Failure @@ -132,7 +132,7 @@ class PaginationHandler: ) async def purge_history_for_rooms_in_range( - self, min_ms: Optional[int], max_ms: Optional[int] + self, min_ms: int | None, max_ms: int | None ) -> None: """Purge outdated events from rooms within the given retention range. @@ -279,7 +279,7 @@ class PaginationHandler: async def _purge_history( self, task: ScheduledTask, - ) -> tuple[TaskStatus, Optional[JsonMapping], Optional[str]]: + ) -> tuple[TaskStatus, JsonMapping | None, str | None]: """ Scheduler action to purge some history of a room. """ @@ -308,7 +308,7 @@ class PaginationHandler: room_id: str, token: str, delete_local_events: bool, - ) -> Optional[str]: + ) -> str | None: """Carry out a history purge on a room. Args: @@ -332,7 +332,7 @@ class PaginationHandler: ) return f.getErrorMessage() - async def get_delete_task(self, delete_id: str) -> Optional[ScheduledTask]: + async def get_delete_task(self, delete_id: str) -> ScheduledTask | None: """Get the current status of an active deleting Args: @@ -342,7 +342,7 @@ class PaginationHandler: return await self._task_scheduler.get_task(delete_id) async def get_delete_tasks_by_room( - self, room_id: str, only_active: Optional[bool] = False + self, room_id: str, only_active: bool | None = False ) -> list[ScheduledTask]: """Get complete, failed or active delete tasks by room @@ -363,7 +363,7 @@ class PaginationHandler: async def _purge_room( self, task: ScheduledTask, - ) -> tuple[TaskStatus, Optional[JsonMapping], Optional[str]]: + ) -> tuple[TaskStatus, JsonMapping | None, str | None]: """ Scheduler action to purge a room. """ @@ -415,7 +415,7 @@ class PaginationHandler: room_id: str, pagin_config: PaginationConfig, as_client_event: bool = True, - event_filter: Optional[Filter] = None, + event_filter: Filter | None = None, use_admin_priviledge: bool = False, ) -> JsonDict: """Get messages in a room. @@ -691,7 +691,7 @@ class PaginationHandler: async def _shutdown_and_purge_room( self, task: ScheduledTask, - ) -> tuple[TaskStatus, Optional[JsonMapping], Optional[str]]: + ) -> tuple[TaskStatus, JsonMapping | None, str | None]: """ Scheduler action to shutdown and purge a room. """ @@ -702,7 +702,7 @@ class PaginationHandler: room_id = task.resource_id - async def update_result(result: Optional[JsonMapping]) -> None: + async def update_result(result: JsonMapping | None) -> None: await self._task_scheduler.update_task(task.id, result=result) shutdown_result = ( diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py index d8150a5857..ca5002cab3 100644 --- a/synapse/handlers/presence.py +++ b/synapse/handlers/presence.py @@ -88,7 +88,6 @@ from typing import ( ContextManager, Generator, Iterable, - Optional, ) from prometheus_client import Counter @@ -248,7 +247,7 @@ class BasePresenceHandler(abc.ABC): async def user_syncing( self, user_id: str, - device_id: Optional[str], + device_id: str | None, affect_presence: bool, presence_state: str, ) -> ContextManager[None]: @@ -271,7 +270,7 @@ class BasePresenceHandler(abc.ABC): @abc.abstractmethod def get_currently_syncing_users_for_replication( self, - ) -> Iterable[tuple[str, Optional[str]]]: + ) -> Iterable[tuple[str, str | None]]: """Get an iterable of syncing users and devices on this worker, to send to the presence handler This is called when a replication connection is established. It should return @@ -340,7 +339,7 @@ class BasePresenceHandler(abc.ABC): async def set_state( self, target_user: UserID, - device_id: Optional[str], + device_id: str | None, state: JsonDict, force_notify: bool = False, is_sync: bool = False, @@ -360,7 +359,7 @@ class BasePresenceHandler(abc.ABC): @abc.abstractmethod async def bump_presence_active_time( - self, user: UserID, device_id: Optional[str] + self, user: UserID, device_id: str | None ) -> None: """We've seen the user do something that indicates they're interacting with the app. @@ -370,7 +369,7 @@ class BasePresenceHandler(abc.ABC): self, process_id: str, user_id: str, - device_id: Optional[str], + device_id: str | None, is_syncing: bool, sync_time_msec: int, ) -> None: @@ -496,9 +495,9 @@ class _NullContextManager(ContextManager[None]): def __exit__( self, - exc_type: Optional[type[BaseException]], - exc_val: Optional[BaseException], - exc_tb: Optional[TracebackType], + exc_type: type[BaseException] | None, + exc_val: BaseException | None, + exc_tb: TracebackType | None, ) -> None: pass @@ -517,16 +516,14 @@ class WorkerPresenceHandler(BasePresenceHandler): # The number of ongoing syncs on this process, by (user ID, device ID). # Empty if _presence_enabled is false. - self._user_device_to_num_current_syncs: dict[ - tuple[str, Optional[str]], int - ] = {} + self._user_device_to_num_current_syncs: dict[tuple[str, str | None], int] = {} self.notifier = hs.get_notifier() self.instance_id = hs.get_instance_id() # (user_id, device_id) -> last_sync_ms. Lists the devices that have stopped # syncing but we haven't notified the presence writer of that yet - self._user_devices_going_offline: dict[tuple[str, Optional[str]], int] = {} + self._user_devices_going_offline: dict[tuple[str, str | None], int] = {} self._bump_active_client = ReplicationBumpPresenceActiveTime.make_client(hs) self._set_state_client = ReplicationPresenceSetState.make_client(hs) @@ -549,7 +546,7 @@ class WorkerPresenceHandler(BasePresenceHandler): def send_user_sync( self, user_id: str, - device_id: Optional[str], + device_id: str | None, is_syncing: bool, last_sync_ms: int, ) -> None: @@ -558,7 +555,7 @@ class WorkerPresenceHandler(BasePresenceHandler): self.instance_id, user_id, device_id, is_syncing, last_sync_ms ) - def mark_as_coming_online(self, user_id: str, device_id: Optional[str]) -> None: + def mark_as_coming_online(self, user_id: str, device_id: str | None) -> None: """A user has started syncing. Send a UserSync to the presence writer, unless they had recently stopped syncing. """ @@ -568,7 +565,7 @@ class WorkerPresenceHandler(BasePresenceHandler): # were offline self.send_user_sync(user_id, device_id, True, self.clock.time_msec()) - def mark_as_going_offline(self, user_id: str, device_id: Optional[str]) -> None: + def mark_as_going_offline(self, user_id: str, device_id: str | None) -> None: """A user has stopped syncing. We wait before notifying the presence writer as its likely they'll come back soon. This allows us to avoid sending a stopped syncing immediately followed by a started syncing @@ -591,7 +588,7 @@ class WorkerPresenceHandler(BasePresenceHandler): async def user_syncing( self, user_id: str, - device_id: Optional[str], + device_id: str | None, affect_presence: bool, presence_state: str, ) -> ContextManager[None]: @@ -699,7 +696,7 @@ class WorkerPresenceHandler(BasePresenceHandler): def get_currently_syncing_users_for_replication( self, - ) -> Iterable[tuple[str, Optional[str]]]: + ) -> Iterable[tuple[str, str | None]]: return [ user_id_device_id for user_id_device_id, count in self._user_device_to_num_current_syncs.items() @@ -709,7 +706,7 @@ class WorkerPresenceHandler(BasePresenceHandler): async def set_state( self, target_user: UserID, - device_id: Optional[str], + device_id: str | None, state: JsonDict, force_notify: bool = False, is_sync: bool = False, @@ -748,7 +745,7 @@ class WorkerPresenceHandler(BasePresenceHandler): ) async def bump_presence_active_time( - self, user: UserID, device_id: Optional[str] + self, user: UserID, device_id: str | None ) -> None: """We've seen the user do something that indicates they're interacting with the app. @@ -786,7 +783,7 @@ class PresenceHandler(BasePresenceHandler): # The per-device presence state, maps user to devices to per-device presence state. self._user_to_device_to_current_state: dict[ - str, dict[Optional[str], UserDevicePresenceState] + str, dict[str | None, UserDevicePresenceState] ] = {} now = self.clock.time_msec() @@ -838,9 +835,7 @@ class PresenceHandler(BasePresenceHandler): # Keeps track of the number of *ongoing* syncs on this process. While # this is non zero a user will never go offline. - self._user_device_to_num_current_syncs: dict[ - tuple[str, Optional[str]], int - ] = {} + self._user_device_to_num_current_syncs: dict[tuple[str, str | None], int] = {} # Keeps track of the number of *ongoing* syncs on other processes. # @@ -853,7 +848,7 @@ class PresenceHandler(BasePresenceHandler): # Stored as a dict from process_id to set of (user_id, device_id), and # a dict of process_id to millisecond timestamp last updated. self.external_process_to_current_syncs: dict[ - str, set[tuple[str, Optional[str]]] + str, set[tuple[str, str | None]] ] = {} self.external_process_last_updated_ms: dict[str, int] = {} @@ -1117,7 +1112,7 @@ class PresenceHandler(BasePresenceHandler): return await self._update_states(changes) async def bump_presence_active_time( - self, user: UserID, device_id: Optional[str] + self, user: UserID, device_id: str | None ) -> None: """We've seen the user do something that indicates they're interacting with the app. @@ -1156,7 +1151,7 @@ class PresenceHandler(BasePresenceHandler): async def user_syncing( self, user_id: str, - device_id: Optional[str], + device_id: str | None, affect_presence: bool = True, presence_state: str = PresenceState.ONLINE, ) -> ContextManager[None]: @@ -1216,7 +1211,7 @@ class PresenceHandler(BasePresenceHandler): def get_currently_syncing_users_for_replication( self, - ) -> Iterable[tuple[str, Optional[str]]]: + ) -> Iterable[tuple[str, str | None]]: # since we are the process handling presence, there is nothing to do here. return [] @@ -1224,7 +1219,7 @@ class PresenceHandler(BasePresenceHandler): self, process_id: str, user_id: str, - device_id: Optional[str], + device_id: str | None, is_syncing: bool, sync_time_msec: int, ) -> None: @@ -1388,7 +1383,7 @@ class PresenceHandler(BasePresenceHandler): async def set_state( self, target_user: UserID, - device_id: Optional[str], + device_id: str | None, state: JsonDict, force_notify: bool = False, is_sync: bool = False, @@ -1835,15 +1830,15 @@ class PresenceEventSource(EventSource[int, UserPresenceState]): async def get_new_events( self, user: UserID, - from_key: Optional[int], + from_key: int | None, # Having a default limit doesn't match the EventSource API, but some # callers do not provide it. It is unused in this class. limit: int = 0, - room_ids: Optional[StrCollection] = None, + room_ids: StrCollection | None = None, is_guest: bool = False, - explicit_room_id: Optional[str] = None, + explicit_room_id: str | None = None, include_offline: bool = True, - service: Optional[ApplicationService] = None, + service: ApplicationService | None = None, ) -> tuple[list[UserPresenceState], int]: # The process for getting presence events are: # 1. Get the rooms the user is in. @@ -1995,7 +1990,7 @@ class PresenceEventSource(EventSource[int, UserPresenceState]): self, user_id: str, include_offline: bool, - from_key: Optional[int] = None, + from_key: int | None = None, ) -> list[UserPresenceState]: """ Computes the presence updates a user should receive. @@ -2076,8 +2071,8 @@ class PresenceEventSource(EventSource[int, UserPresenceState]): def handle_timeouts( user_states: list[UserPresenceState], is_mine_fn: Callable[[str], bool], - syncing_user_devices: AbstractSet[tuple[str, Optional[str]]], - user_to_devices: dict[str, dict[Optional[str], UserDevicePresenceState]], + syncing_user_devices: AbstractSet[tuple[str, str | None]], + user_to_devices: dict[str, dict[str | None, UserDevicePresenceState]], now: int, ) -> list[UserPresenceState]: """Checks the presence of users that have timed out and updates as @@ -2115,10 +2110,10 @@ def handle_timeouts( def handle_timeout( state: UserPresenceState, is_mine: bool, - syncing_device_ids: AbstractSet[tuple[str, Optional[str]]], - user_devices: dict[Optional[str], UserDevicePresenceState], + syncing_device_ids: AbstractSet[tuple[str, str | None]], + user_devices: dict[str | None, UserDevicePresenceState], now: int, -) -> Optional[UserPresenceState]: +) -> UserPresenceState | None: """Checks the presence of the user to see if any of the timers have elapsed Args: diff --git a/synapse/handlers/profile.py b/synapse/handlers/profile.py index 240a235a0e..59904cd995 100644 --- a/synapse/handlers/profile.py +++ b/synapse/handlers/profile.py @@ -20,7 +20,7 @@ # import logging import random -from typing import TYPE_CHECKING, Optional, Union +from typing import TYPE_CHECKING from synapse.api.constants import ProfileFields from synapse.api.errors import ( @@ -68,8 +68,8 @@ class ProfileHandler: self.user_directory_handler = hs.get_user_directory_handler() self.request_ratelimiter = hs.get_request_ratelimiter() - self.max_avatar_size: Optional[int] = hs.config.server.max_avatar_size - self.allowed_avatar_mimetypes: Optional[list[str]] = ( + self.max_avatar_size: int | None = hs.config.server.max_avatar_size + self.allowed_avatar_mimetypes: list[str] | None = ( hs.config.server.allowed_avatar_mimetypes ) @@ -133,7 +133,7 @@ class ProfileHandler: raise SynapseError(502, "Failed to fetch profile") raise e.to_synapse_error() - async def get_displayname(self, target_user: UserID) -> Optional[str]: + async def get_displayname(self, target_user: UserID) -> str | None: """ Fetch a user's display name from their profile. @@ -211,7 +211,7 @@ class ProfileHandler: 400, "Displayname is too long (max %i)" % (MAX_DISPLAYNAME_LEN,) ) - displayname_to_set: Optional[str] = new_displayname.strip() + displayname_to_set: str | None = new_displayname.strip() if new_displayname == "": displayname_to_set = None @@ -238,7 +238,7 @@ class ProfileHandler: if propagate: await self._update_join_states(requester, target_user) - async def get_avatar_url(self, target_user: UserID) -> Optional[str]: + async def get_avatar_url(self, target_user: UserID) -> str | None: """ Fetch a user's avatar URL from their profile. @@ -316,7 +316,7 @@ class ProfileHandler: if not await self.check_avatar_size_and_mime_type(new_avatar_url): raise SynapseError(403, "This avatar is not allowed", Codes.FORBIDDEN) - avatar_url_to_set: Optional[str] = new_avatar_url + avatar_url_to_set: str | None = new_avatar_url if new_avatar_url == "": avatar_url_to_set = None @@ -367,9 +367,9 @@ class ProfileHandler: server_name = host if self._is_mine_server_name(server_name): - media_info: Optional[ - Union[LocalMedia, RemoteMedia] - ] = await self.store.get_local_media(media_id) + media_info: ( + LocalMedia | RemoteMedia | None + ) = await self.store.get_local_media(media_id) else: media_info = await self.store.get_cached_remote_media(server_name, media_id) @@ -606,7 +606,7 @@ class ProfileHandler: ) async def check_profile_query_allowed( - self, target_user: UserID, requester: Optional[UserID] = None + self, target_user: UserID, requester: UserID | None = None ) -> None: """Checks whether a profile query is allowed. If the 'require_auth_for_profile_requests' config flag is set to True and a diff --git a/synapse/handlers/push_rules.py b/synapse/handlers/push_rules.py index 643fa72f3f..746c712bac 100644 --- a/synapse/handlers/push_rules.py +++ b/synapse/handlers/push_rules.py @@ -18,7 +18,7 @@ # [This file includes modifications made by New Vector Limited] # # -from typing import TYPE_CHECKING, Any, Optional, Union +from typing import TYPE_CHECKING, Any import attr @@ -40,7 +40,7 @@ class RuleSpec: scope: str template: str rule_id: str - attr: Optional[str] + attr: str | None class PushRulesHandler: @@ -51,7 +51,7 @@ class PushRulesHandler: self._main_store = hs.get_datastores().main async def set_rule_attr( - self, user_id: str, spec: RuleSpec, val: Union[bool, JsonDict] + self, user_id: str, spec: RuleSpec, val: bool | JsonDict ) -> None: """Set an attribute (enabled or actions) on an existing push rule. @@ -137,7 +137,7 @@ class PushRulesHandler: return rules -def check_actions(actions: list[Union[str, JsonDict]]) -> None: +def check_actions(actions: list[str | JsonDict]) -> None: """Check if the given actions are spec compliant. Args: diff --git a/synapse/handlers/receipts.py b/synapse/handlers/receipts.py index ad41113b5b..f6383baf0b 100644 --- a/synapse/handlers/receipts.py +++ b/synapse/handlers/receipts.py @@ -19,7 +19,7 @@ # # import logging -from typing import TYPE_CHECKING, Iterable, Optional, Sequence +from typing import TYPE_CHECKING, Iterable, Sequence from synapse.api.constants import EduTypes, ReceiptTypes from synapse.appservice import ApplicationService @@ -180,7 +180,7 @@ class ReceiptsHandler: receipt_type: str, user_id: UserID, event_id: str, - thread_id: Optional[str], + thread_id: str | None, ) -> None: """Called when a client tells us a local user has read up to the given event_id in the room. @@ -285,8 +285,8 @@ class ReceiptEventSource(EventSource[MultiWriterStreamToken, JsonMapping]): limit: int, room_ids: Iterable[str], is_guest: bool, - explicit_room_id: Optional[str] = None, - to_key: Optional[MultiWriterStreamToken] = None, + explicit_room_id: str | None = None, + to_key: MultiWriterStreamToken | None = None, ) -> tuple[list[JsonMapping], MultiWriterStreamToken]: """ Find read receipts for given rooms (> `from_token` and <= `to_token`) diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py index 8b620a91bc..139c14dcf4 100644 --- a/synapse/handlers/register.py +++ b/synapse/handlers/register.py @@ -26,7 +26,6 @@ import logging from typing import ( TYPE_CHECKING, Iterable, - Optional, TypedDict, ) @@ -106,8 +105,8 @@ def init_counters_for_auth_provider(auth_provider_id: str, server_name: str) -> class LoginDict(TypedDict): device_id: str access_token: str - valid_until_ms: Optional[int] - refresh_token: Optional[str] + valid_until_ms: int | None + refresh_token: str | None class RegistrationHandler: @@ -160,8 +159,8 @@ class RegistrationHandler: async def check_username( self, localpart: str, - guest_access_token: Optional[str] = None, - assigned_user_id: Optional[str] = None, + guest_access_token: str | None = None, + assigned_user_id: str | None = None, inhibit_user_in_use_error: bool = False, ) -> None: if types.contains_invalid_mxid_characters(localpart): @@ -228,19 +227,19 @@ class RegistrationHandler: async def register_user( self, - localpart: Optional[str] = None, - password_hash: Optional[str] = None, - guest_access_token: Optional[str] = None, + localpart: str | None = None, + password_hash: str | None = None, + guest_access_token: str | None = None, make_guest: bool = False, admin: bool = False, - threepid: Optional[dict] = None, - user_type: Optional[str] = None, - default_display_name: Optional[str] = None, - address: Optional[str] = None, - bind_emails: Optional[Iterable[str]] = None, + threepid: dict | None = None, + user_type: str | None = None, + default_display_name: str | None = None, + address: str | None = None, + bind_emails: Iterable[str] | None = None, by_admin: bool = False, - user_agent_ips: Optional[list[tuple[str, str]]] = None, - auth_provider_id: Optional[str] = None, + user_agent_ips: list[tuple[str, str]] | None = None, + auth_provider_id: str | None = None, approved: bool = False, ) -> str: """Registers a new client on the server. @@ -679,7 +678,7 @@ class RegistrationHandler: return (user_id, service) def check_user_id_not_appservice_exclusive( - self, user_id: str, allowed_appservice: Optional[ApplicationService] = None + self, user_id: str, allowed_appservice: ApplicationService | None = None ) -> None: # don't allow people to register the server notices mxid if self._server_notices_mxid is not None: @@ -704,7 +703,7 @@ class RegistrationHandler: errcode=Codes.EXCLUSIVE, ) - async def check_registration_ratelimit(self, address: Optional[str]) -> None: + async def check_registration_ratelimit(self, address: str | None) -> None: """A simple helper method to check whether the registration rate limit has been hit for a given IP address @@ -723,14 +722,14 @@ class RegistrationHandler: async def register_with_store( self, user_id: str, - password_hash: Optional[str] = None, + password_hash: str | None = None, was_guest: bool = False, make_guest: bool = False, - appservice_id: Optional[str] = None, - create_profile_with_displayname: Optional[str] = None, + appservice_id: str | None = None, + create_profile_with_displayname: str | None = None, admin: bool = False, - user_type: Optional[str] = None, - address: Optional[str] = None, + user_type: str | None = None, + address: str | None = None, shadow_banned: bool = False, approved: bool = False, ) -> None: @@ -771,14 +770,14 @@ class RegistrationHandler: async def register_device( self, user_id: str, - device_id: Optional[str], - initial_display_name: Optional[str], + device_id: str | None, + initial_display_name: str | None, is_guest: bool = False, is_appservice_ghost: bool = False, - auth_provider_id: Optional[str] = None, + auth_provider_id: str | None = None, should_issue_refresh_token: bool = False, - auth_provider_session_id: Optional[str] = None, - ) -> tuple[str, str, Optional[int], Optional[str]]: + auth_provider_session_id: str | None = None, + ) -> tuple[str, str, int | None, str | None]: """Register a device for a user and generate an access token. The access token will be limited by the homeserver's session_lifetime config. @@ -821,13 +820,13 @@ class RegistrationHandler: async def register_device_inner( self, user_id: str, - device_id: Optional[str], - initial_display_name: Optional[str], + device_id: str | None, + initial_display_name: str | None, is_guest: bool = False, is_appservice_ghost: bool = False, should_issue_refresh_token: bool = False, - auth_provider_id: Optional[str] = None, - auth_provider_session_id: Optional[str] = None, + auth_provider_id: str | None = None, + auth_provider_session_id: str | None = None, ) -> LoginDict: """Helper for register_device @@ -927,7 +926,7 @@ class RegistrationHandler: } async def post_registration_actions( - self, user_id: str, auth_result: dict, access_token: Optional[str] + self, user_id: str, auth_result: dict, access_token: str | None ) -> None: """A user has completed registration @@ -977,7 +976,7 @@ class RegistrationHandler: await self.post_consent_actions(user_id) async def _register_email_threepid( - self, user_id: str, threepid: dict, token: Optional[str] + self, user_id: str, threepid: dict, token: str | None ) -> None: """Add an email address as a 3pid identifier diff --git a/synapse/handlers/relations.py b/synapse/handlers/relations.py index 217681f7c0..fd38ffa920 100644 --- a/synapse/handlers/relations.py +++ b/synapse/handlers/relations.py @@ -25,7 +25,6 @@ from typing import ( Collection, Iterable, Mapping, - Optional, Sequence, ) @@ -75,9 +74,9 @@ class BundledAggregations: Some values require additional processing during serialization. """ - references: Optional[JsonDict] = None - replace: Optional[EventBase] = None - thread: Optional[_ThreadAggregation] = None + references: JsonDict | None = None + replace: EventBase | None = None + thread: _ThreadAggregation | None = None def __bool__(self) -> bool: return bool(self.references or self.replace or self.thread) @@ -101,8 +100,8 @@ class RelationsHandler: pagin_config: PaginationConfig, recurse: bool, include_original_event: bool, - relation_type: Optional[str] = None, - event_type: Optional[str] = None, + relation_type: str | None = None, + event_type: str | None = None, ) -> JsonDict: """Get related events of a event, ordered by topological ordering. @@ -553,7 +552,7 @@ class RelationsHandler: room_id: str, include: ThreadsListInclude, limit: int = 5, - from_token: Optional[ThreadsNextBatch] = None, + from_token: ThreadsNextBatch | None = None, ) -> JsonDict: """Get related events of a event, ordered by topological ordering. diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index f242accef1..d62ad5393f 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -33,7 +33,6 @@ from typing import ( Any, Awaitable, Callable, - Optional, cast, ) @@ -198,7 +197,7 @@ class RoomCreationHandler: requester: Requester, old_room_id: str, new_version: RoomVersion, - additional_creators: Optional[list[str]], + additional_creators: list[str] | None, auto_member: bool = False, ratelimit: bool = True, ) -> str: @@ -341,10 +340,11 @@ class RoomCreationHandler: new_version: RoomVersion, tombstone_event: EventBase, tombstone_context: synapse.events.snapshot.EventContext, - additional_creators: Optional[list[str]], - creation_event_with_context: Optional[ - tuple[EventBase, synapse.events.snapshot.EventContext] - ] = None, + additional_creators: list[str] | None, + creation_event_with_context: tuple[ + EventBase, synapse.events.snapshot.EventContext + ] + | None = None, auto_member: bool = False, ) -> str: """ @@ -434,7 +434,7 @@ class RoomCreationHandler: old_room_id: str, new_room_id: str, old_room_state: StateMap[str], - additional_creators: Optional[list[str]], + additional_creators: list[str] | None, ) -> None: """Send updated power levels in both rooms after an upgrade @@ -524,9 +524,9 @@ class RoomCreationHandler: def _calculate_upgraded_room_creation_content( self, old_room_create_event: EventBase, - tombstone_event_id: Optional[str], + tombstone_event_id: str | None, new_room_version: RoomVersion, - additional_creators: Optional[list[str]], + additional_creators: list[str] | None, ) -> JsonDict: creation_content: JsonDict = { "room_version": new_room_version.identifier, @@ -558,10 +558,11 @@ class RoomCreationHandler: new_room_id: str, new_room_version: RoomVersion, tombstone_event_id: str, - additional_creators: Optional[list[str]], - creation_event_with_context: Optional[ - tuple[EventBase, synapse.events.snapshot.EventContext] - ] = None, + additional_creators: list[str] | None, + creation_event_with_context: tuple[ + EventBase, synapse.events.snapshot.EventContext + ] + | None = None, auto_member: bool = False, ) -> None: """Populate a new room based on an old room @@ -597,7 +598,7 @@ class RoomCreationHandler: initial_state: MutableStateMap = {} # Replicate relevant room events - types_to_copy: list[tuple[str, Optional[str]]] = [ + types_to_copy: list[tuple[str, str | None]] = [ (EventTypes.JoinRules, ""), (EventTypes.Name, ""), (EventTypes.Topic, ""), @@ -1039,9 +1040,9 @@ class RoomCreationHandler: requester: Requester, config: JsonDict, ratelimit: bool = True, - creator_join_profile: Optional[JsonDict] = None, + creator_join_profile: JsonDict | None = None, ignore_forced_encryption: bool = False, - ) -> tuple[str, Optional[RoomAlias], int]: + ) -> tuple[str, RoomAlias | None, int]: """Creates a new room. Args: @@ -1426,13 +1427,14 @@ class RoomCreationHandler: invite_list: list[str], initial_state: MutableStateMap, creation_content: JsonDict, - room_alias: Optional[RoomAlias] = None, - power_level_content_override: Optional[JsonDict] = None, - creator_join_profile: Optional[JsonDict] = None, + room_alias: RoomAlias | None = None, + power_level_content_override: JsonDict | None = None, + creator_join_profile: JsonDict | None = None, ignore_forced_encryption: bool = False, - creation_event_with_context: Optional[ - tuple[EventBase, synapse.events.snapshot.EventContext] - ] = None, + creation_event_with_context: tuple[ + EventBase, synapse.events.snapshot.EventContext + ] + | None = None, ) -> tuple[int, str, int]: """Sends the initial events into a new room. Sends the room creation, membership, and power level events into the room sequentially, then creates and batches up the @@ -1813,7 +1815,7 @@ class RoomCreationHandler: self, users_map: dict[str, int], creator: str, - additional_creators: Optional[list[str]], + additional_creators: list[str] | None, ) -> None: creators = [creator] if additional_creators: @@ -1880,9 +1882,9 @@ class RoomContextHandler: room_id: str, event_id: str, limit: int, - event_filter: Optional[Filter], + event_filter: Filter | None, use_admin_priviledge: bool = False, - ) -> Optional[EventContext]: + ) -> EventContext | None: """Retrieves events, pagination tokens and state around a given event in a room. @@ -2168,7 +2170,7 @@ class RoomEventSource(EventSource[RoomStreamToken, EventBase]): limit: int, room_ids: StrCollection, is_guest: bool, - explicit_room_id: Optional[str] = None, + explicit_room_id: str | None = None, ) -> tuple[list[EventBase], RoomStreamToken]: # We just ignore the key for now. @@ -2244,11 +2246,10 @@ class RoomShutdownHandler: self, room_id: str, params: ShutdownRoomParams, - result: Optional[ShutdownRoomResponse] = None, - update_result_fct: Optional[ - Callable[[Optional[JsonMapping]], Awaitable[None]] - ] = None, - ) -> Optional[ShutdownRoomResponse]: + result: ShutdownRoomResponse | None = None, + update_result_fct: Callable[[JsonMapping | None], Awaitable[None]] + | None = None, + ) -> ShutdownRoomResponse | None: """ Shuts down a room. Moves all joined local users and room aliases automatically to a new room if `new_room_user_id` is set. Otherwise local users only diff --git a/synapse/handlers/room_list.py b/synapse/handlers/room_list.py index 97a5d07c7c..6377931b39 100644 --- a/synapse/handlers/room_list.py +++ b/synapse/handlers/room_list.py @@ -20,7 +20,7 @@ # import logging -from typing import TYPE_CHECKING, Any, Optional +from typing import TYPE_CHECKING, Any import attr import msgpack @@ -67,14 +67,14 @@ class RoomListHandler: self.hs = hs self.enable_room_list_search = hs.config.roomdirectory.enable_room_list_search self.response_cache: ResponseCache[ - tuple[Optional[int], Optional[str], Optional[ThirdPartyInstanceID]] + tuple[int | None, str | None, ThirdPartyInstanceID | None] ] = ResponseCache( clock=hs.get_clock(), name="room_list", server_name=self.server_name, ) self.remote_response_cache: ResponseCache[ - tuple[str, Optional[int], Optional[str], bool, Optional[str]] + tuple[str, int | None, str | None, bool, str | None] ] = ResponseCache( clock=hs.get_clock(), name="remote_room_list", @@ -84,11 +84,11 @@ class RoomListHandler: async def get_local_public_room_list( self, - limit: Optional[int] = None, - since_token: Optional[str] = None, - search_filter: Optional[dict] = None, - network_tuple: Optional[ThirdPartyInstanceID] = EMPTY_THIRD_PARTY_ID, - from_federation_origin: Optional[str] = None, + limit: int | None = None, + since_token: str | None = None, + search_filter: dict | None = None, + network_tuple: ThirdPartyInstanceID | None = EMPTY_THIRD_PARTY_ID, + from_federation_origin: str | None = None, ) -> JsonDict: """Generate a local public room list. @@ -150,10 +150,10 @@ class RoomListHandler: async def _get_public_room_list( self, limit: int, - since_token: Optional[str] = None, - search_filter: Optional[dict] = None, - network_tuple: Optional[ThirdPartyInstanceID] = EMPTY_THIRD_PARTY_ID, - from_federation_origin: Optional[str] = None, + since_token: str | None = None, + search_filter: dict | None = None, + network_tuple: ThirdPartyInstanceID | None = EMPTY_THIRD_PARTY_ID, + from_federation_origin: str | None = None, ) -> JsonDict: """Generate a public room list. Args: @@ -175,7 +175,7 @@ class RoomListHandler: if since_token: batch_token = RoomListNextBatch.from_token(since_token) - bounds: Optional[tuple[int, str]] = ( + bounds: tuple[int, str] | None = ( batch_token.last_joined_members, batch_token.last_room_id, ) @@ -235,8 +235,8 @@ class RoomListHandler: # `len(room_entries) >= limit` and we might be left with rooms we didn't # 'consider' (iterate over) and we should save those rooms for the next # batch. - first_considered_room: Optional[LargestRoomStats] = None - last_considered_room: Optional[LargestRoomStats] = None + first_considered_room: LargestRoomStats | None = None + last_considered_room: LargestRoomStats | None = None cut_off_due_to_limit: bool = False for room_result in rooms_iterator: @@ -349,7 +349,7 @@ class RoomListHandler: cache_context: _CacheContext, with_alias: bool = True, allow_private: bool = False, - ) -> Optional[JsonMapping]: + ) -> JsonMapping | None: """Returns the entry for a room Args: @@ -455,11 +455,11 @@ class RoomListHandler: async def get_remote_public_room_list( self, server_name: str, - limit: Optional[int] = None, - since_token: Optional[str] = None, - search_filter: Optional[dict] = None, + limit: int | None = None, + since_token: str | None = None, + search_filter: dict | None = None, include_all_networks: bool = False, - third_party_instance_id: Optional[str] = None, + third_party_instance_id: str | None = None, ) -> JsonDict: """Get the public room list from remote server @@ -531,11 +531,11 @@ class RoomListHandler: async def _get_remote_list_cached( self, server_name: str, - limit: Optional[int] = None, - since_token: Optional[str] = None, - search_filter: Optional[dict] = None, + limit: int | None = None, + since_token: str | None = None, + search_filter: dict | None = None, include_all_networks: bool = False, - third_party_instance_id: Optional[str] = None, + third_party_instance_id: str | None = None, ) -> JsonDict: """Wrapper around FederationClient.get_public_rooms that caches the result. diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py index 03cfc99260..d5f72c1732 100644 --- a/synapse/handlers/room_member.py +++ b/synapse/handlers/room_member.py @@ -23,7 +23,7 @@ import abc import logging import random from http import HTTPStatus -from typing import TYPE_CHECKING, Iterable, Optional +from typing import TYPE_CHECKING, Iterable from synapse import types from synapse.api.constants import ( @@ -260,7 +260,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta): async def remote_reject_invite( self, invite_event_id: str, - txn_id: Optional[str], + txn_id: str | None, requester: Requester, content: JsonDict, ) -> tuple[str, int]: @@ -283,7 +283,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta): async def remote_rescind_knock( self, knock_event_id: str, - txn_id: Optional[str], + txn_id: str | None, requester: Requester, content: JsonDict, ) -> tuple[str, int]: @@ -349,8 +349,8 @@ class RoomMemberHandler(metaclass=abc.ABCMeta): async def ratelimit_multiple_invites( self, - requester: Optional[Requester], - room_id: Optional[str], + requester: Requester | None, + room_id: str | None, n_invites: int, update: bool = True, ) -> None: @@ -374,8 +374,8 @@ class RoomMemberHandler(metaclass=abc.ABCMeta): async def ratelimit_invite( self, - requester: Optional[Requester], - room_id: Optional[str], + requester: Requester | None, + room_id: str | None, invitee_user_id: str, ) -> None: """Ratelimit invites by room and by target user. @@ -396,15 +396,15 @@ class RoomMemberHandler(metaclass=abc.ABCMeta): target: UserID, room_id: str, membership: str, - prev_event_ids: Optional[list[str]] = None, - state_event_ids: Optional[list[str]] = None, - depth: Optional[int] = None, - txn_id: Optional[str] = None, + prev_event_ids: list[str] | None = None, + state_event_ids: list[str] | None = None, + depth: int | None = None, + txn_id: str | None = None, ratelimit: bool = True, - content: Optional[dict] = None, + content: dict | None = None, require_consent: bool = True, outlier: bool = False, - origin_server_ts: Optional[int] = None, + origin_server_ts: int | None = None, ) -> tuple[str, int]: """ Internal membership update function to get an existing event or create @@ -572,18 +572,18 @@ class RoomMemberHandler(metaclass=abc.ABCMeta): target: UserID, room_id: str, action: str, - txn_id: Optional[str] = None, - remote_room_hosts: Optional[list[str]] = None, - third_party_signed: Optional[dict] = None, + txn_id: str | None = None, + remote_room_hosts: list[str] | None = None, + third_party_signed: dict | None = None, ratelimit: bool = True, - content: Optional[dict] = None, + content: dict | None = None, new_room: bool = False, require_consent: bool = True, outlier: bool = False, - prev_event_ids: Optional[list[str]] = None, - state_event_ids: Optional[list[str]] = None, - depth: Optional[int] = None, - origin_server_ts: Optional[int] = None, + prev_event_ids: list[str] | None = None, + state_event_ids: list[str] | None = None, + depth: int | None = None, + origin_server_ts: int | None = None, ) -> tuple[str, int]: """Update a user's membership in a room. @@ -686,18 +686,18 @@ class RoomMemberHandler(metaclass=abc.ABCMeta): target: UserID, room_id: str, action: str, - txn_id: Optional[str] = None, - remote_room_hosts: Optional[list[str]] = None, - third_party_signed: Optional[dict] = None, + txn_id: str | None = None, + remote_room_hosts: list[str] | None = None, + third_party_signed: dict | None = None, ratelimit: bool = True, - content: Optional[dict] = None, + content: dict | None = None, new_room: bool = False, require_consent: bool = True, outlier: bool = False, - prev_event_ids: Optional[list[str]] = None, - state_event_ids: Optional[list[str]] = None, - depth: Optional[int] = None, - origin_server_ts: Optional[int] = None, + prev_event_ids: list[str] | None = None, + state_event_ids: list[str] | None = None, + depth: int | None = None, + origin_server_ts: int | None = None, ) -> tuple[str, int]: """Helper for update_membership. @@ -1420,7 +1420,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta): async def send_membership_event( self, - requester: Optional[Requester], + requester: Requester | None, event: EventBase, context: EventContext, ratelimit: bool = True, @@ -1594,7 +1594,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta): return RoomID.from_string(room_id), servers - async def _get_inviter(self, user_id: str, room_id: str) -> Optional[UserID]: + async def _get_inviter(self, user_id: str, room_id: str) -> UserID | None: invite = await self.store.get_invite_for_local_user_in_room( user_id=user_id, room_id=room_id ) @@ -1610,10 +1610,10 @@ class RoomMemberHandler(metaclass=abc.ABCMeta): address: str, id_server: str, requester: Requester, - txn_id: Optional[str], + txn_id: str | None, id_access_token: str, - prev_event_ids: Optional[list[str]] = None, - depth: Optional[int] = None, + prev_event_ids: list[str] | None = None, + depth: int | None = None, ) -> tuple[str, int]: """Invite a 3PID to a room. @@ -1724,10 +1724,10 @@ class RoomMemberHandler(metaclass=abc.ABCMeta): address: str, room_id: str, user: UserID, - txn_id: Optional[str], + txn_id: str | None, id_access_token: str, - prev_event_ids: Optional[list[str]] = None, - depth: Optional[int] = None, + prev_event_ids: list[str] | None = None, + depth: int | None = None, ) -> tuple[EventBase, int]: room_state = await self._storage_controllers.state.get_current_state( room_id, @@ -1864,7 +1864,7 @@ class RoomMemberMasterHandler(RoomMemberHandler): async def _is_remote_room_too_complex( self, room_id: str, remote_room_hosts: list[str] - ) -> Optional[bool]: + ) -> bool | None: """ Check if complexity of a remote room is too great. @@ -1977,7 +1977,7 @@ class RoomMemberMasterHandler(RoomMemberHandler): async def remote_reject_invite( self, invite_event_id: str, - txn_id: Optional[str], + txn_id: str | None, requester: Requester, content: JsonDict, ) -> tuple[str, int]: @@ -2014,7 +2014,7 @@ class RoomMemberMasterHandler(RoomMemberHandler): async def remote_rescind_knock( self, knock_event_id: str, - txn_id: Optional[str], + txn_id: str | None, requester: Requester, content: JsonDict, ) -> tuple[str, int]: @@ -2043,7 +2043,7 @@ class RoomMemberMasterHandler(RoomMemberHandler): async def _generate_local_out_of_band_leave( self, previous_membership_event: EventBase, - txn_id: Optional[str], + txn_id: str | None, requester: Requester, content: JsonDict, ) -> tuple[str, int]: @@ -2180,7 +2180,7 @@ class RoomForgetterHandler(StateDeltasHandler): self._room_member_handler = hs.get_room_member_handler() # The current position in the current_state_delta stream - self.pos: Optional[int] = None + self.pos: int | None = None # Guard to ensure we only process deltas one at a time self._is_processing = False diff --git a/synapse/handlers/room_member_worker.py b/synapse/handlers/room_member_worker.py index 0927c031f7..b56519ab0a 100644 --- a/synapse/handlers/room_member_worker.py +++ b/synapse/handlers/room_member_worker.py @@ -20,7 +20,7 @@ # import logging -from typing import TYPE_CHECKING, Optional +from typing import TYPE_CHECKING from synapse.handlers.room_member import NoKnownServersError, RoomMemberHandler from synapse.replication.http.membership import ( @@ -73,7 +73,7 @@ class RoomMemberWorkerHandler(RoomMemberHandler): async def remote_reject_invite( self, invite_event_id: str, - txn_id: Optional[str], + txn_id: str | None, requester: Requester, content: dict, ) -> tuple[str, int]: @@ -93,7 +93,7 @@ class RoomMemberWorkerHandler(RoomMemberHandler): async def remote_rescind_knock( self, knock_event_id: str, - txn_id: Optional[str], + txn_id: str | None, requester: Requester, content: JsonDict, ) -> tuple[str, int]: diff --git a/synapse/handlers/room_summary.py b/synapse/handlers/room_summary.py index a3247d3cda..9ec0d33f11 100644 --- a/synapse/handlers/room_summary.py +++ b/synapse/handlers/room_summary.py @@ -71,7 +71,7 @@ class _PaginationKey: # during a pagination session). room_id: str suggested_only: bool - max_depth: Optional[int] + max_depth: int | None # The randomly generated token. token: str @@ -118,10 +118,10 @@ class RoomSummaryHandler: bool, bool, bool, - Optional[int], - Optional[int], - Optional[str], - Optional[tuple[str, ...]], + int | None, + int | None, + str | None, + tuple[str, ...] | None, ] ] = ResponseCache( clock=hs.get_clock(), @@ -137,10 +137,10 @@ class RoomSummaryHandler: suggested_only: bool = False, omit_remote_room_hierarchy: bool = False, admin_skip_room_visibility_check: bool = False, - max_depth: Optional[int] = None, - limit: Optional[int] = None, - from_token: Optional[str] = None, - remote_room_hosts: Optional[tuple[str, ...]] = None, + max_depth: int | None = None, + limit: int | None = None, + from_token: str | None = None, + remote_room_hosts: tuple[str, ...] | None = None, ) -> JsonDict: """ Implementation of the room hierarchy C-S API. @@ -208,10 +208,10 @@ class RoomSummaryHandler: suggested_only: bool = False, omit_remote_room_hierarchy: bool = False, admin_skip_room_visibility_check: bool = False, - max_depth: Optional[int] = None, - limit: Optional[int] = None, - from_token: Optional[str] = None, - remote_room_hosts: Optional[tuple[str, ...]] = None, + max_depth: int | None = None, + limit: int | None = None, + from_token: str | None = None, + remote_room_hosts: tuple[str, ...] | None = None, ) -> JsonDict: """See docstring for SpaceSummaryHandler.get_room_hierarchy.""" @@ -480,8 +480,8 @@ class RoomSummaryHandler: async def _summarize_local_room( self, - requester: Optional[str], - origin: Optional[str], + requester: str | None, + origin: str | None, room_id: str, suggested_only: bool, include_children: bool = True, @@ -594,7 +594,7 @@ class RoomSummaryHandler: ) async def _is_local_room_accessible( - self, room_id: str, requester: Optional[str], origin: Optional[str] = None + self, room_id: str, requester: str | None, origin: str | None = None ) -> bool: """ Calculate whether the room should be shown to the requester. @@ -723,7 +723,7 @@ class RoomSummaryHandler: return False async def _is_remote_room_accessible( - self, requester: Optional[str], room_id: str, room: JsonDict + self, requester: str | None, room_id: str, room: JsonDict ) -> bool: """ Calculate whether the room received over federation should be shown to the requester. @@ -864,9 +864,9 @@ class RoomSummaryHandler: async def get_room_summary( self, - requester: Optional[str], + requester: str | None, room_id: str, - remote_room_hosts: Optional[list[str]] = None, + remote_room_hosts: list[str] | None = None, ) -> JsonDict: """ Implementation of the room summary C-S API from MSC3266 @@ -965,7 +965,7 @@ class _RoomQueueEntry: depth: int = 0 # The room summary for this room returned via federation. This will only be # used if the room is not known locally (and is not a space). - remote_room: Optional[JsonDict] = None + remote_room: JsonDict | None = None @attr.s(frozen=True, slots=True, auto_attribs=True) @@ -1026,7 +1026,7 @@ _INVALID_ORDER_CHARS_RE = re.compile(r"[^\x20-\x7E]") def _child_events_comparison_key( child: EventBase, -) -> tuple[bool, Optional[str], int, str]: +) -> tuple[bool, str | None, int, str]: """ Generate a value for comparing two child events for ordering. diff --git a/synapse/handlers/saml.py b/synapse/handlers/saml.py index 218fbcaaa7..8f2b37c46d 100644 --- a/synapse/handlers/saml.py +++ b/synapse/handlers/saml.py @@ -20,7 +20,7 @@ # import logging import re -from typing import TYPE_CHECKING, Callable, Optional +from typing import TYPE_CHECKING, Callable import attr import saml2 @@ -54,7 +54,7 @@ class Saml2SessionData: creation_time: int # The user interactive authentication session ID associated with this SAML # session (or None if this SAML session is for an initial login). - ui_auth_session_id: Optional[str] = None + ui_auth_session_id: str | None = None class SamlHandler: @@ -98,8 +98,8 @@ class SamlHandler: async def handle_redirect_request( self, request: SynapseRequest, - client_redirect_url: Optional[bytes], - ui_auth_session_id: Optional[str] = None, + client_redirect_url: bytes | None, + ui_auth_session_id: str | None = None, ) -> str: """Handle an incoming request to /login/sso/redirect @@ -303,7 +303,7 @@ class SamlHandler: emails=result.get("emails", []), ) - async def grandfather_existing_users() -> Optional[str]: + async def grandfather_existing_users() -> str | None: # backwards-compatibility hack: see if there is an existing user with a # suitable mapping from the uid if ( @@ -341,7 +341,7 @@ class SamlHandler: def _remote_id_from_saml_response( self, saml2_auth: saml2.response.AuthnResponse, - client_redirect_url: Optional[str], + client_redirect_url: str | None, ) -> str: """Extract the unique remote id from a SAML2 AuthnResponse diff --git a/synapse/handlers/search.py b/synapse/handlers/search.py index 8f39c6ec6b..20b38427a6 100644 --- a/synapse/handlers/search.py +++ b/synapse/handlers/search.py @@ -21,7 +21,7 @@ import itertools import logging -from typing import TYPE_CHECKING, Iterable, Optional +from typing import TYPE_CHECKING, Iterable import attr from unpaddedbase64 import decode_base64, encode_base64 @@ -117,7 +117,7 @@ class SearchHandler: return historical_room_ids async def search( - self, requester: Requester, content: JsonDict, batch: Optional[str] = None + self, requester: Requester, content: JsonDict, batch: str | None = None ) -> JsonDict: """Performs a full text search for a user. @@ -226,18 +226,18 @@ class SearchHandler: async def _search( self, requester: Requester, - batch_group: Optional[str], - batch_group_key: Optional[str], - batch_token: Optional[str], + batch_group: str | None, + batch_group_key: str | None, + batch_token: str | None, search_term: str, keys: list[str], filter_dict: JsonDict, order_by: str, include_state: bool, group_keys: list[str], - event_context: Optional[bool], - before_limit: Optional[int], - after_limit: Optional[int], + event_context: bool | None, + before_limit: int | None, + after_limit: int | None, include_profile: bool, ) -> JsonDict: """Performs a full text search for a user. @@ -307,7 +307,7 @@ class SearchHandler: } } - sender_group: Optional[dict[str, JsonDict]] + sender_group: dict[str, JsonDict] | None if order_by == "rank": search_result, sender_group = await self._search_by_rank( @@ -517,10 +517,10 @@ class SearchHandler: search_term: str, keys: Iterable[str], search_filter: Filter, - batch_group: Optional[str], - batch_group_key: Optional[str], - batch_token: Optional[str], - ) -> tuple[_SearchResult, Optional[str]]: + batch_group: str | None, + batch_group_key: str | None, + batch_token: str | None, + ) -> tuple[_SearchResult, str | None]: """ Performs a full text search for a user ordering by recent. diff --git a/synapse/handlers/send_email.py b/synapse/handlers/send_email.py index 02fd48dbad..8cdf9c6a87 100644 --- a/synapse/handlers/send_email.py +++ b/synapse/handlers/send_email.py @@ -24,7 +24,7 @@ import logging from email.mime.multipart import MIMEMultipart from email.mime.text import MIMEText from io import BytesIO -from typing import TYPE_CHECKING, Optional +from typing import TYPE_CHECKING from twisted.internet.defer import Deferred from twisted.internet.endpoints import HostnameEndpoint @@ -49,13 +49,13 @@ async def _sendmail( from_addr: str, to_addr: str, msg_bytes: bytes, - username: Optional[bytes] = None, - password: Optional[bytes] = None, + username: bytes | None = None, + password: bytes | None = None, require_auth: bool = False, require_tls: bool = False, enable_tls: bool = True, force_tls: bool = False, - tlsname: Optional[str] = None, + tlsname: str | None = None, ) -> None: """A simple wrapper around ESMTPSenderFactory, to allow substitution in tests @@ -136,7 +136,7 @@ class SendEmailHandler: app_name: str, html: str, text: str, - additional_headers: Optional[dict[str, str]] = None, + additional_headers: dict[str, str] | None = None, ) -> None: """Send a multipart email with the given information. diff --git a/synapse/handlers/set_password.py b/synapse/handlers/set_password.py index 54116a9b72..042cb4e1b5 100644 --- a/synapse/handlers/set_password.py +++ b/synapse/handlers/set_password.py @@ -18,7 +18,7 @@ # # import logging -from typing import TYPE_CHECKING, Optional +from typing import TYPE_CHECKING from synapse.api.errors import Codes, StoreError, SynapseError from synapse.types import Requester @@ -42,7 +42,7 @@ class SetPasswordHandler: user_id: str, password_hash: str, logout_devices: bool, - requester: Optional[Requester] = None, + requester: Requester | None = None, ) -> None: if not self._auth_handler.can_change_password(): raise SynapseError(403, "Password change disabled", errcode=Codes.FORBIDDEN) diff --git a/synapse/handlers/sliding_sync/__init__.py b/synapse/handlers/sliding_sync/__init__.py index cea4b857ee..6a5d5c7b3c 100644 --- a/synapse/handlers/sliding_sync/__init__.py +++ b/synapse/handlers/sliding_sync/__init__.py @@ -15,7 +15,7 @@ import itertools import logging from itertools import chain -from typing import TYPE_CHECKING, AbstractSet, Mapping, Optional +from typing import TYPE_CHECKING, AbstractSet, Mapping from prometheus_client import Histogram from typing_extensions import assert_never @@ -114,7 +114,7 @@ class SlidingSyncHandler: self, requester: Requester, sync_config: SlidingSyncConfig, - from_token: Optional[SlidingSyncStreamToken] = None, + from_token: SlidingSyncStreamToken | None = None, timeout_ms: int = 0, ) -> tuple[SlidingSyncResult, bool]: """ @@ -201,7 +201,7 @@ class SlidingSyncHandler: self, sync_config: SlidingSyncConfig, to_token: StreamToken, - from_token: Optional[SlidingSyncStreamToken] = None, + from_token: SlidingSyncStreamToken | None = None, ) -> SlidingSyncResult: """ Generates the response body of a Sliding Sync result, represented as a @@ -550,7 +550,7 @@ class SlidingSyncHandler: room_id: str, room_sync_config: RoomSyncConfig, room_membership_for_user_at_to_token: RoomsForUserType, - from_token: Optional[SlidingSyncStreamToken], + from_token: SlidingSyncStreamToken | None, to_token: StreamToken, newly_joined: bool, newly_left: bool, @@ -678,10 +678,10 @@ class SlidingSyncHandler: # `invite`/`knock` rooms only have `stripped_state`. See # https://github.com/matrix-org/matrix-spec-proposals/pull/3575#discussion_r1653045932 timeline_events: list[EventBase] = [] - bundled_aggregations: Optional[dict[str, BundledAggregations]] = None - limited: Optional[bool] = None - prev_batch_token: Optional[StreamToken] = None - num_live: Optional[int] = None + bundled_aggregations: dict[str, BundledAggregations] | None = None + limited: bool | None = None + prev_batch_token: StreamToken | None = None + num_live: int | None = None if ( room_sync_config.timeline_limit > 0 # No timeline for invite/knock rooms (just `stripped_state`) @@ -850,7 +850,7 @@ class SlidingSyncHandler: # For incremental syncs, we can do this first to determine if something relevant # has changed and strategically avoid fetching other costly things. room_state_delta_id_map: MutableStateMap[str] = {} - name_event_id: Optional[str] = None + name_event_id: str | None = None membership_changed = False name_changed = False avatar_changed = False @@ -914,7 +914,7 @@ class SlidingSyncHandler: # We only need the room summary for calculating heroes, however if we do # fetch it then we can use it to calculate `joined_count` and # `invited_count`. - room_membership_summary: Optional[Mapping[str, MemberSummary]] = None + room_membership_summary: Mapping[str, MemberSummary] | None = None # `heroes` are required if the room name is not set. # @@ -950,8 +950,8 @@ class SlidingSyncHandler: # # Similarly to other metadata, we only need to calculate the member # counts if this is an initial sync or the memberships have changed. - joined_count: Optional[int] = None - invited_count: Optional[int] = None + joined_count: int | None = None + invited_count: int | None = None if ( initial or membership_changed ) and room_membership_for_user_at_to_token.membership == Membership.JOIN: @@ -1036,7 +1036,7 @@ class SlidingSyncHandler: ) required_state_filter = StateFilter.all() else: - required_state_types: list[tuple[str, Optional[str]]] = [] + required_state_types: list[tuple[str, str | None]] = [] num_wild_state_keys = 0 lazy_load_room_members = False num_others = 0 @@ -1146,7 +1146,7 @@ class SlidingSyncHandler: # The required state map to store in the room sync config, if it has # changed. - changed_required_state_map: Optional[Mapping[str, AbstractSet[str]]] = None + changed_required_state_map: Mapping[str, AbstractSet[str]] | None = None # We can return all of the state that was requested if this was the first # time we've sent the room down this connection. @@ -1205,7 +1205,7 @@ class SlidingSyncHandler: required_room_state = required_state_filter.filter_state(room_state) # Find the room name and avatar from the state - room_name: Optional[str] = None + room_name: str | None = None # TODO: Should we also check for `EventTypes.CanonicalAlias` # (`m.room.canonical_alias`) as a fallback for the room name? see # https://github.com/matrix-org/matrix-spec-proposals/pull/3575#discussion_r1671260153 @@ -1213,7 +1213,7 @@ class SlidingSyncHandler: if name_event is not None: room_name = name_event.content.get("name") - room_avatar: Optional[str] = None + room_avatar: str | None = None avatar_event = room_state.get((EventTypes.RoomAvatar, "")) if avatar_event is not None: room_avatar = avatar_event.content.get("url") @@ -1376,7 +1376,7 @@ class SlidingSyncHandler: to_token: StreamToken, timeline: list[EventBase], check_outside_timeline: bool, - ) -> Optional[int]: + ) -> int | None: """Get a bump stamp for the room, if we have a bump event and it has changed. @@ -1479,7 +1479,7 @@ def _required_state_changes( prev_required_state_map: Mapping[str, AbstractSet[str]], request_required_state_map: Mapping[str, AbstractSet[str]], state_deltas: StateMap[str], -) -> tuple[Optional[Mapping[str, AbstractSet[str]]], StateFilter]: +) -> tuple[Mapping[str, AbstractSet[str]] | None, StateFilter]: """Calculates the changes between the required state room config from the previous requests compared with the current request. @@ -1528,7 +1528,7 @@ def _required_state_changes( # The set of types/state keys that we need to fetch and return to the # client. Passed to `StateFilter.from_types(...)` - added: list[tuple[str, Optional[str]]] = [] + added: list[tuple[str, str | None]] = [] # Convert the list of state deltas to map from type to state_keys that have # changed. diff --git a/synapse/handlers/sliding_sync/extensions.py b/synapse/handlers/sliding_sync/extensions.py index 221af86f7d..d076bec51a 100644 --- a/synapse/handlers/sliding_sync/extensions.py +++ b/synapse/handlers/sliding_sync/extensions.py @@ -20,7 +20,6 @@ from typing import ( ChainMap, Mapping, MutableMapping, - Optional, Sequence, cast, ) @@ -86,7 +85,7 @@ class SlidingSyncExtensionHandler: actual_room_ids: set[str], actual_room_response_map: Mapping[str, SlidingSyncResult.RoomResult], to_token: StreamToken, - from_token: Optional[SlidingSyncStreamToken], + from_token: SlidingSyncStreamToken | None, ) -> SlidingSyncResult.Extensions: """Handle extension requests. @@ -202,8 +201,8 @@ class SlidingSyncExtensionHandler: def find_relevant_room_ids_for_extension( self, - requested_lists: Optional[StrCollection], - requested_room_ids: Optional[StrCollection], + requested_lists: StrCollection | None, + requested_room_ids: StrCollection | None, actual_lists: Mapping[str, SlidingSyncResult.SlidingWindowList], actual_room_ids: AbstractSet[str], ) -> set[str]: @@ -246,7 +245,7 @@ class SlidingSyncExtensionHandler: if requested_lists is not None: for list_key in requested_lists: # Just some typing because we share the variable name in multiple places - actual_list: Optional[SlidingSyncResult.SlidingWindowList] = None + actual_list: SlidingSyncResult.SlidingWindowList | None = None # A wildcard means we process rooms from all lists if list_key == "*": @@ -277,7 +276,7 @@ class SlidingSyncExtensionHandler: sync_config: SlidingSyncConfig, to_device_request: SlidingSyncConfig.Extensions.ToDeviceExtension, to_token: StreamToken, - ) -> Optional[SlidingSyncResult.Extensions.ToDeviceExtension]: + ) -> SlidingSyncResult.Extensions.ToDeviceExtension | None: """Handle to-device extension (MSC3885) Args: @@ -352,8 +351,8 @@ class SlidingSyncExtensionHandler: sync_config: SlidingSyncConfig, e2ee_request: SlidingSyncConfig.Extensions.E2eeExtension, to_token: StreamToken, - from_token: Optional[SlidingSyncStreamToken], - ) -> Optional[SlidingSyncResult.Extensions.E2eeExtension]: + from_token: SlidingSyncStreamToken | None, + ) -> SlidingSyncResult.Extensions.E2eeExtension | None: """Handle E2EE device extension (MSC3884) Args: @@ -369,7 +368,7 @@ class SlidingSyncExtensionHandler: if not e2ee_request.enabled: return None - device_list_updates: Optional[DeviceListUpdates] = None + device_list_updates: DeviceListUpdates | None = None if from_token is not None: # TODO: This should take into account the `from_token` and `to_token` device_list_updates = await self.device_handler.get_user_ids_changed( @@ -407,8 +406,8 @@ class SlidingSyncExtensionHandler: actual_room_ids: set[str], account_data_request: SlidingSyncConfig.Extensions.AccountDataExtension, to_token: StreamToken, - from_token: Optional[SlidingSyncStreamToken], - ) -> Optional[SlidingSyncResult.Extensions.AccountDataExtension]: + from_token: SlidingSyncStreamToken | None, + ) -> SlidingSyncResult.Extensions.AccountDataExtension | None: """Handle Account Data extension (MSC3959) Args: @@ -640,8 +639,8 @@ class SlidingSyncExtensionHandler: actual_room_response_map: Mapping[str, SlidingSyncResult.RoomResult], receipts_request: SlidingSyncConfig.Extensions.ReceiptsExtension, to_token: StreamToken, - from_token: Optional[SlidingSyncStreamToken], - ) -> Optional[SlidingSyncResult.Extensions.ReceiptsExtension]: + from_token: SlidingSyncStreamToken | None, + ) -> SlidingSyncResult.Extensions.ReceiptsExtension | None: """Handle Receipts extension (MSC3960) Args: @@ -844,8 +843,8 @@ class SlidingSyncExtensionHandler: actual_room_response_map: Mapping[str, SlidingSyncResult.RoomResult], typing_request: SlidingSyncConfig.Extensions.TypingExtension, to_token: StreamToken, - from_token: Optional[SlidingSyncStreamToken], - ) -> Optional[SlidingSyncResult.Extensions.TypingExtension]: + from_token: SlidingSyncStreamToken | None, + ) -> SlidingSyncResult.Extensions.TypingExtension | None: """Handle Typing Notification extension (MSC3961) Args: @@ -905,8 +904,8 @@ class SlidingSyncExtensionHandler: sync_config: SlidingSyncConfig, thread_subscriptions_request: SlidingSyncConfig.Extensions.ThreadSubscriptionsExtension, to_token: StreamToken, - from_token: Optional[SlidingSyncStreamToken], - ) -> Optional[SlidingSyncResult.Extensions.ThreadSubscriptionsExtension]: + from_token: SlidingSyncStreamToken | None, + ) -> SlidingSyncResult.Extensions.ThreadSubscriptionsExtension | None: """Handle Thread Subscriptions extension (MSC4308) Args: diff --git a/synapse/handlers/sliding_sync/room_lists.py b/synapse/handlers/sliding_sync/room_lists.py index fc77fd3c65..3d11902236 100644 --- a/synapse/handlers/sliding_sync/room_lists.py +++ b/synapse/handlers/sliding_sync/room_lists.py @@ -21,8 +21,6 @@ from typing import ( Literal, Mapping, MutableMapping, - Optional, - Union, cast, ) @@ -81,7 +79,7 @@ logger = logging.getLogger(__name__) # Helper definition for the types that we might return. We do this to avoid # copying data between types (which can be expensive for many rooms). -RoomsForUserType = Union[RoomsForUserStateReset, RoomsForUser, RoomsForUserSlidingSync] +RoomsForUserType = RoomsForUserStateReset | RoomsForUser | RoomsForUserSlidingSync @attr.s(auto_attribs=True, slots=True, frozen=True) @@ -184,7 +182,7 @@ class SlidingSyncRoomLists: sync_config: SlidingSyncConfig, previous_connection_state: "PerConnectionState", to_token: StreamToken, - from_token: Optional[StreamToken], + from_token: StreamToken | None, ) -> SlidingSyncInterestedRooms: """Fetch the set of rooms that match the request""" has_lists = sync_config.lists is not None and len(sync_config.lists) > 0 @@ -221,7 +219,7 @@ class SlidingSyncRoomLists: sync_config: SlidingSyncConfig, previous_connection_state: "PerConnectionState", to_token: StreamToken, - from_token: Optional[StreamToken], + from_token: StreamToken | None, ) -> SlidingSyncInterestedRooms: """Implementation of `compute_interested_rooms` using new sliding sync db tables.""" user_id = sync_config.user.to_string() @@ -620,7 +618,7 @@ class SlidingSyncRoomLists: sync_config: SlidingSyncConfig, previous_connection_state: "PerConnectionState", to_token: StreamToken, - from_token: Optional[StreamToken], + from_token: StreamToken | None, ) -> SlidingSyncInterestedRooms: """Fallback code when the database background updates haven't completed yet.""" @@ -806,7 +804,7 @@ class SlidingSyncRoomLists: async def _filter_relevant_rooms_to_send( self, previous_connection_state: PerConnectionState, - from_token: Optional[StreamToken], + from_token: StreamToken | None, relevant_room_map: dict[str, RoomSyncConfig], ) -> dict[str, RoomSyncConfig]: """Filters the `relevant_room_map` down to those rooms that may have @@ -879,7 +877,7 @@ class SlidingSyncRoomLists: user: UserID, rooms_for_user: Mapping[str, RoomsForUserType], to_token: StreamToken, - ) -> Mapping[str, Optional[RoomsForUser]]: + ) -> Mapping[str, RoomsForUser | None]: """ Takes the current set of rooms for a user (retrieved after the given token), and returns the changes needed to "rewind" it to match the set of @@ -962,7 +960,7 @@ class SlidingSyncRoomLists: # Otherwise we're about to make changes to `rooms_for_user`, so we turn # it into a mutable dict. - changes: dict[str, Optional[RoomsForUser]] = {} + changes: dict[str, RoomsForUser | None] = {} # Assemble a list of the first membership event after the `to_token` so we can # step backward to the previous membership that would apply to the from/to @@ -1028,7 +1026,7 @@ class SlidingSyncRoomLists: self, user: UserID, to_token: StreamToken, - from_token: Optional[StreamToken], + from_token: StreamToken | None, ) -> tuple[dict[str, RoomsForUserType], AbstractSet[str], AbstractSet[str]]: """ Fetch room IDs that the user has had membership in (the full room list including @@ -1138,7 +1136,7 @@ class SlidingSyncRoomLists: self, user_id: str, to_token: StreamToken, - from_token: Optional[StreamToken], + from_token: StreamToken | None, ) -> tuple[AbstractSet[str], Mapping[str, RoomsForUserStateReset]]: """Fetch the sets of rooms that the user newly joined or left in the given token range. @@ -1185,7 +1183,7 @@ class SlidingSyncRoomLists: self, user_id: str, to_token: StreamToken, - from_token: Optional[StreamToken], + from_token: StreamToken | None, ) -> tuple[AbstractSet[str], Mapping[str, RoomsForUserStateReset]]: """Fetch the sets of rooms that the user newly joined or left in the given token range. @@ -1400,7 +1398,7 @@ class SlidingSyncRoomLists: room_id: str, room_membership_for_user_map: dict[str, RoomsForUserType], to_token: StreamToken, - ) -> Optional[RoomsForUserType]: + ) -> RoomsForUserType | None: """ Check whether the user is allowed to see the room based on whether they have ever had membership in the room or if the room is `world_readable`. @@ -1466,7 +1464,7 @@ class SlidingSyncRoomLists: self, room_ids: StrCollection, sync_room_map: dict[str, RoomsForUserType], - ) -> dict[str, Optional[StateMap[StrippedStateEvent]]]: + ) -> dict[str, StateMap[StrippedStateEvent] | None]: """ Fetch stripped state for a list of room IDs. Stripped state is only applicable to invite/knock rooms. Other rooms will have `None` as their @@ -1485,7 +1483,7 @@ class SlidingSyncRoomLists: event. """ room_id_to_stripped_state_map: dict[ - str, Optional[StateMap[StrippedStateEvent]] + str, StateMap[StrippedStateEvent] | None ] = {} # Fetch what we haven't before @@ -1530,7 +1528,7 @@ class SlidingSyncRoomLists: f"Unexpected membership {membership} (this is a problem with Synapse itself)" ) - stripped_state_map: Optional[MutableStateMap[StrippedStateEvent]] = None + stripped_state_map: MutableStateMap[StrippedStateEvent] | None = None # Scrutinize unsigned things. `raw_stripped_state_events` should be a list # of stripped events if raw_stripped_state_events is not None: @@ -1564,10 +1562,8 @@ class SlidingSyncRoomLists: room_ids: set[str], sync_room_map: dict[str, RoomsForUserType], to_token: StreamToken, - room_id_to_stripped_state_map: dict[ - str, Optional[StateMap[StrippedStateEvent]] - ], - ) -> Mapping[str, Union[Optional[str], StateSentinel]]: + room_id_to_stripped_state_map: dict[str, StateMap[StrippedStateEvent] | None], + ) -> Mapping[str, str | None | StateSentinel]: """ Get the given state event content for a list of rooms. First we check the current state of the room, then fallback to stripped state if available, then @@ -1589,7 +1585,7 @@ class SlidingSyncRoomLists: the given state event (event_type, ""), otherwise `None`. Rooms unknown to this server will return `ROOM_UNKNOWN_SENTINEL`. """ - room_id_to_content: dict[str, Union[Optional[str], StateSentinel]] = {} + room_id_to_content: dict[str, str | None | StateSentinel] = {} # As a bulk shortcut, use the current state if the server is particpating in the # room (meaning we have current state). Ideally, for leave/ban rooms, we would @@ -1750,7 +1746,7 @@ class SlidingSyncRoomLists: user_id = user.to_string() room_id_to_stripped_state_map: dict[ - str, Optional[StateMap[StrippedStateEvent]] + str, StateMap[StrippedStateEvent] | None ] = {} filtered_room_id_set = set(sync_room_map.keys()) @@ -2107,7 +2103,7 @@ class SlidingSyncRoomLists: self, sync_room_map: dict[str, RoomsForUserType], to_token: StreamToken, - limit: Optional[int] = None, + limit: int | None = None, ) -> list[RoomsForUserType]: """ Sort by `stream_ordering` of the last event that the user should see in the diff --git a/synapse/handlers/sliding_sync/store.py b/synapse/handlers/sliding_sync/store.py index d24fccf76f..7bcd5f27ea 100644 --- a/synapse/handlers/sliding_sync/store.py +++ b/synapse/handlers/sliding_sync/store.py @@ -13,7 +13,7 @@ # import logging -from typing import TYPE_CHECKING, Optional +from typing import TYPE_CHECKING import attr @@ -66,7 +66,7 @@ class SlidingSyncConnectionStore: async def get_and_clear_connection_positions( self, sync_config: SlidingSyncConfig, - from_token: Optional[SlidingSyncStreamToken], + from_token: SlidingSyncStreamToken | None, ) -> PerConnectionState: """Fetch the per-connection state for the token. @@ -93,7 +93,7 @@ class SlidingSyncConnectionStore: async def record_new_state( self, sync_config: SlidingSyncConfig, - from_token: Optional[SlidingSyncStreamToken], + from_token: SlidingSyncStreamToken | None, new_connection_state: MutablePerConnectionState, ) -> int: """Record updated per-connection state, returning the connection diff --git a/synapse/handlers/sso.py b/synapse/handlers/sso.py index 641241287e..ebbe7afa84 100644 --- a/synapse/handlers/sso.py +++ b/synapse/handlers/sso.py @@ -30,7 +30,6 @@ from typing import ( Iterable, Mapping, NoReturn, - Optional, Protocol, ) from urllib.parse import urlencode @@ -102,12 +101,12 @@ class SsoIdentityProvider(Protocol): """User-facing name for this provider""" @property - def idp_icon(self) -> Optional[str]: + def idp_icon(self) -> str | None: """Optional MXC URI for user-facing icon""" return None @property - def idp_brand(self) -> Optional[str]: + def idp_brand(self) -> str | None: """Optional branding identifier""" return None @@ -115,8 +114,8 @@ class SsoIdentityProvider(Protocol): async def handle_redirect_request( self, request: SynapseRequest, - client_redirect_url: Optional[bytes], - ui_auth_session_id: Optional[str] = None, + client_redirect_url: bytes | None, + ui_auth_session_id: str | None = None, ) -> str: """Handle an incoming request to /login/sso/redirect @@ -141,10 +140,10 @@ class UserAttributes: # the localpart of the mxid that the mapper has assigned to the user. # if `None`, the mapper has not picked a userid, and the user should be prompted to # enter one. - localpart: Optional[str] + localpart: str | None confirm_localpart: bool = False - display_name: Optional[str] = None - picture: Optional[str] = None + display_name: str | None = None + picture: str | None = None # mypy thinks these are incompatible for some reason. emails: StrCollection = attr.Factory(list) @@ -157,19 +156,19 @@ class UsernameMappingSession: auth_provider_id: str # An optional session ID from the IdP. - auth_provider_session_id: Optional[str] + auth_provider_session_id: str | None # user ID on the IdP server remote_user_id: str # attributes returned by the ID mapper - display_name: Optional[str] + display_name: str | None emails: StrCollection - avatar_url: Optional[str] + avatar_url: str | None # An optional dictionary of extra attributes to be provided to the client in the # login response. - extra_login_attributes: Optional[JsonDict] + extra_login_attributes: JsonDict | None # where to redirect the client back to client_redirect_url: str @@ -178,11 +177,11 @@ class UsernameMappingSession: expiry_time_ms: int # choices made by the user - chosen_localpart: Optional[str] = None + chosen_localpart: str | None = None use_display_name: bool = True use_avatar: bool = True emails_to_use: StrCollection = () - terms_accepted_version: Optional[str] = None + terms_accepted_version: str | None = None # the HTTP cookie used to track the mapping session id @@ -278,7 +277,7 @@ class SsoHandler: self, request: Request, error: str, - error_description: Optional[str] = None, + error_description: str | None = None, code: int = 400, ) -> None: """Renders the error template and responds with it. @@ -302,7 +301,7 @@ class SsoHandler: self, request: SynapseRequest, client_redirect_url: bytes, - idp_id: Optional[str], + idp_id: str | None, ) -> str: """Handle a request to /login/sso/redirect @@ -321,7 +320,7 @@ class SsoHandler: ) # if the client chose an IdP, use that - idp: Optional[SsoIdentityProvider] = None + idp: SsoIdentityProvider | None = None if idp_id: idp = self._identity_providers.get(idp_id) if not idp: @@ -341,7 +340,7 @@ class SsoHandler: async def get_sso_user_by_remote_user_id( self, auth_provider_id: str, remote_user_id: str - ) -> Optional[str]: + ) -> str | None: """ Maps the user ID of a remote IdP to a mxid for a previously seen user. @@ -389,9 +388,9 @@ class SsoHandler: request: SynapseRequest, client_redirect_url: str, sso_to_matrix_id_mapper: Callable[[int], Awaitable[UserAttributes]], - grandfather_existing_users: Callable[[], Awaitable[Optional[str]]], - extra_login_attributes: Optional[JsonDict] = None, - auth_provider_session_id: Optional[str] = None, + grandfather_existing_users: Callable[[], Awaitable[str | None]], + extra_login_attributes: JsonDict | None = None, + auth_provider_session_id: str | None = None, registration_enabled: bool = True, ) -> None: """ @@ -582,8 +581,8 @@ class SsoHandler: def _get_url_for_next_new_user_step( self, - attributes: Optional[UserAttributes] = None, - session: Optional[UsernameMappingSession] = None, + attributes: UserAttributes | None = None, + session: UsernameMappingSession | None = None, ) -> bytes: """Returns the URL to redirect to for the next step of new user registration @@ -622,8 +621,8 @@ class SsoHandler: attributes: UserAttributes, client_redirect_url: str, next_step_url: bytes, - extra_login_attributes: Optional[JsonDict], - auth_provider_session_id: Optional[str], + extra_login_attributes: JsonDict | None, + auth_provider_session_id: str | None, ) -> NoReturn: """Creates a UsernameMappingSession and redirects the browser @@ -1175,7 +1174,7 @@ class SsoHandler: self, auth_provider_id: str, auth_provider_session_id: str, - expected_user_id: Optional[str] = None, + expected_user_id: str | None = None, ) -> None: """Revoke any devices and in-flight logins tied to a provider session. diff --git a/synapse/handlers/state_deltas.py b/synapse/handlers/state_deltas.py index 2fbe407a63..db63f0483d 100644 --- a/synapse/handlers/state_deltas.py +++ b/synapse/handlers/state_deltas.py @@ -20,7 +20,7 @@ import logging from enum import Enum, auto -from typing import TYPE_CHECKING, Optional +from typing import TYPE_CHECKING if TYPE_CHECKING: from synapse.server import HomeServer @@ -40,8 +40,8 @@ class StateDeltasHandler: async def _get_key_change( self, - prev_event_id: Optional[str], - event_id: Optional[str], + prev_event_id: str | None, + event_id: str | None, key_name: str, public_value: str, ) -> MatchChange: diff --git a/synapse/handlers/stats.py b/synapse/handlers/stats.py index 0804f72c47..6d661453ac 100644 --- a/synapse/handlers/stats.py +++ b/synapse/handlers/stats.py @@ -26,7 +26,6 @@ from typing import ( Any, Counter as CounterType, Iterable, - Optional, ) from synapse.api.constants import EventContentFields, EventTypes, Membership @@ -62,7 +61,7 @@ class StatsHandler: self.stats_enabled = hs.config.stats.stats_enabled # The current position in the current_state_delta stream - self.pos: Optional[int] = None + self.pos: int | None = None # Guard to ensure we only process deltas one at a time self._is_processing = False diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index a19b75203b..b534e24698 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -25,7 +25,6 @@ from typing import ( AbstractSet, Any, Mapping, - Optional, Sequence, ) @@ -116,7 +115,7 @@ class SyncConfig: user: UserID filter_collection: FilterCollection is_guest: bool - device_id: Optional[str] + device_id: str | None use_state_after: bool @@ -127,7 +126,7 @@ class TimelineBatch: limited: bool # A mapping of event ID to the bundled aggregations for the above events. # This is only calculated if limited is true. - bundled_aggregations: Optional[dict[str, BundledAggregations]] = None + bundled_aggregations: dict[str, BundledAggregations] | None = None def __bool__(self) -> bool: """Make the result appear empty if there are no updates. This is used @@ -150,7 +149,7 @@ class JoinedSyncResult: account_data: list[JsonDict] unread_notifications: JsonDict unread_thread_notifications: JsonDict - summary: Optional[JsonDict] + summary: JsonDict | None unread_count: int def __bool__(self) -> bool: @@ -314,7 +313,7 @@ class SyncHandler: # ExpiringCache((User, Device)) -> LruCache(user_id => event_id) self.lazy_loaded_members_cache: ExpiringCache[ - tuple[str, Optional[str]], LruCache[str, str] + tuple[str, str | None], LruCache[str, str] ] = ExpiringCache( cache_name="lazy_loaded_members_cache", server_name=self.server_name, @@ -331,7 +330,7 @@ class SyncHandler: requester: Requester, sync_config: SyncConfig, request_key: SyncRequestKey, - since_token: Optional[StreamToken] = None, + since_token: StreamToken | None = None, timeout: int = 0, full_state: bool = False, ) -> SyncResult: @@ -372,7 +371,7 @@ class SyncHandler: async def _wait_for_sync_for_user( self, sync_config: SyncConfig, - since_token: Optional[StreamToken], + since_token: StreamToken | None, timeout: int, full_state: bool, cache_context: ResponseCacheContext[SyncRequestKey], @@ -502,7 +501,7 @@ class SyncHandler: async def current_sync_for_user( self, sync_config: SyncConfig, - since_token: Optional[StreamToken] = None, + since_token: StreamToken | None = None, full_state: bool = False, ) -> SyncResult: """ @@ -537,7 +536,7 @@ class SyncHandler: self, sync_result_builder: "SyncResultBuilder", now_token: StreamToken, - since_token: Optional[StreamToken] = None, + since_token: StreamToken | None = None, ) -> tuple[StreamToken, dict[str, list[JsonDict]]]: """Get the ephemeral events for each room the user is in Args: @@ -604,8 +603,8 @@ class SyncHandler: sync_result_builder: "SyncResultBuilder", sync_config: SyncConfig, upto_token: StreamToken, - since_token: Optional[StreamToken] = None, - potential_recents: Optional[list[EventBase]] = None, + since_token: StreamToken | None = None, + potential_recents: list[EventBase] | None = None, newly_joined_room: bool = False, ) -> TimelineBatch: """Create a timeline batch for the room @@ -850,7 +849,7 @@ class SyncHandler: batch: TimelineBatch, state: MutableStateMap[EventBase], now_token: StreamToken, - ) -> Optional[JsonDict]: + ) -> JsonDict | None: """Works out a room summary block for this room, summarising the number of joined members in the room, and providing the 'hero' members if the room has no name so clients can consistently name rooms. Also adds @@ -963,11 +962,9 @@ class SyncHandler: return summary def get_lazy_loaded_members_cache( - self, cache_key: tuple[str, Optional[str]] + self, cache_key: tuple[str, str | None] ) -> LruCache[str, str]: - cache: Optional[LruCache[str, str]] = self.lazy_loaded_members_cache.get( - cache_key - ) + cache: LruCache[str, str] | None = self.lazy_loaded_members_cache.get(cache_key) if cache is None: logger.debug("creating LruCache for %r", cache_key) cache = LruCache( @@ -985,7 +982,7 @@ class SyncHandler: room_id: str, batch: TimelineBatch, sync_config: SyncConfig, - since_token: Optional[StreamToken], + since_token: StreamToken | None, end_token: StreamToken, full_state: bool, joined: bool, @@ -1024,11 +1021,11 @@ class SyncHandler: ): # The memberships needed for events in the timeline. # Only calculated when `lazy_load_members` is on. - members_to_fetch: Optional[set[str]] = None + members_to_fetch: set[str] | None = None # A dictionary mapping user IDs to the first event in the timeline sent by # them. Only calculated when `lazy_load_members` is on. - first_event_by_sender_map: Optional[dict[str, EventBase]] = None + first_event_by_sender_map: dict[str, EventBase] | None = None # The contribution to the room state from state events in the timeline. # Only contains the last event for any given state key. @@ -1172,7 +1169,7 @@ class SyncHandler: sync_config: SyncConfig, batch: TimelineBatch, end_token: StreamToken, - members_to_fetch: Optional[set[str]], + members_to_fetch: set[str] | None, timeline_state: StateMap[str], joined: bool, ) -> StateMap[str]: @@ -1322,7 +1319,7 @@ class SyncHandler: batch: TimelineBatch, since_token: StreamToken, end_token: StreamToken, - members_to_fetch: Optional[set[str]], + members_to_fetch: set[str] | None, timeline_state: StateMap[str], ) -> StateMap[str]: """Calculate the state events to be included in an incremental sync response. @@ -1649,7 +1646,7 @@ class SyncHandler: async def generate_sync_result( self, sync_config: SyncConfig, - since_token: Optional[StreamToken] = None, + since_token: StreamToken | None = None, full_state: bool = False, ) -> SyncResult: """Generates the response body of a sync result. @@ -1804,7 +1801,7 @@ class SyncHandler: async def get_sync_result_builder( self, sync_config: SyncConfig, - since_token: Optional[StreamToken] = None, + since_token: StreamToken | None = None, full_state: bool = False, ) -> "SyncResultBuilder": """ @@ -2439,7 +2436,7 @@ class SyncHandler: # This is all screaming out for a refactor, as the logic here is # subtle and the moving parts numerous. if leave_event.internal_metadata.is_out_of_band_membership(): - batch_events: Optional[list[EventBase]] = [leave_event] + batch_events: list[EventBase] | None = [leave_event] else: batch_events = None @@ -2608,7 +2605,7 @@ class SyncHandler: sync_result_builder: "SyncResultBuilder", room_builder: "RoomSyncResultBuilder", ephemeral: list[JsonDict], - tags: Optional[Mapping[str, JsonMapping]], + tags: Mapping[str, JsonMapping] | None, account_data: Mapping[str, JsonMapping], always_include: bool = False, ) -> None: @@ -2758,7 +2755,7 @@ class SyncHandler: # An out of band room won't have any state changes. state = {} - summary: Optional[JsonDict] = {} + summary: JsonDict | None = {} # we include a summary in room responses when we're lazy loading # members (as the client otherwise doesn't have enough info to form @@ -3007,7 +3004,7 @@ class SyncResultBuilder: sync_config: SyncConfig full_state: bool - since_token: Optional[StreamToken] + since_token: StreamToken | None now_token: StreamToken joined_room_ids: frozenset[str] excluded_room_ids: frozenset[str] @@ -3100,10 +3097,10 @@ class RoomSyncResultBuilder: room_id: str rtype: str - events: Optional[list[EventBase]] + events: list[EventBase] | None newly_joined: bool full_state: bool - since_token: Optional[StreamToken] + since_token: StreamToken | None upto_token: StreamToken end_token: StreamToken out_of_band: bool = False diff --git a/synapse/handlers/thread_subscriptions.py b/synapse/handlers/thread_subscriptions.py index d56c915e0a..539672c7fe 100644 --- a/synapse/handlers/thread_subscriptions.py +++ b/synapse/handlers/thread_subscriptions.py @@ -1,6 +1,6 @@ import logging from http import HTTPStatus -from typing import TYPE_CHECKING, Optional +from typing import TYPE_CHECKING from synapse.api.constants import RelationTypes from synapse.api.errors import AuthError, Codes, NotFoundError, SynapseError @@ -29,7 +29,7 @@ class ThreadSubscriptionsHandler: user_id: UserID, room_id: str, thread_root_event_id: str, - ) -> Optional[ThreadSubscription]: + ) -> ThreadSubscription | None: """Get thread subscription settings for a specific thread and user. Checks that the thread root is both a real event and also that it is visible to the user. @@ -62,8 +62,8 @@ class ThreadSubscriptionsHandler: room_id: str, thread_root_event_id: str, *, - automatic_event_id: Optional[str], - ) -> Optional[int]: + automatic_event_id: str | None, + ) -> int | None: """Sets or updates a user's subscription settings for a specific thread root. Args: @@ -146,7 +146,7 @@ class ThreadSubscriptionsHandler: async def unsubscribe_user_from_thread( self, user_id: UserID, room_id: str, thread_root_event_id: str - ) -> Optional[int]: + ) -> int | None: """Clears a user's subscription settings for a specific thread root. Args: diff --git a/synapse/handlers/typing.py b/synapse/handlers/typing.py index 17e43858c9..8b577d5d58 100644 --- a/synapse/handlers/typing.py +++ b/synapse/handlers/typing.py @@ -20,7 +20,7 @@ # import logging import random -from typing import TYPE_CHECKING, Iterable, Optional +from typing import TYPE_CHECKING, Iterable import attr @@ -576,8 +576,8 @@ class TypingNotificationEventSource(EventSource[int, JsonMapping]): limit: int, room_ids: Iterable[str], is_guest: bool, - explicit_room_id: Optional[str] = None, - to_key: Optional[int] = None, + explicit_room_id: str | None = None, + to_key: int | None = None, ) -> tuple[list[JsonMapping], int]: """ Find typing notifications for given rooms (> `from_token` and <= `to_token`) diff --git a/synapse/handlers/ui_auth/checkers.py b/synapse/handlers/ui_auth/checkers.py index cbae33eaec..a0097dbc96 100644 --- a/synapse/handlers/ui_auth/checkers.py +++ b/synapse/handlers/ui_auth/checkers.py @@ -136,7 +136,7 @@ class RecaptchaAuthChecker(UserInteractiveAuthChecker): except PartialDownloadError as pde: # Twisted is silly data = pde.response - # For mypy's benefit. A general Error.response is Optional[bytes], but + # For mypy's benefit. A general Error.response is bytes | None, but # a PartialDownloadError.response should be bytes AFAICS. assert data is not None resp_body = json_decoder.decode(data.decode("utf-8")) diff --git a/synapse/handlers/user_directory.py b/synapse/handlers/user_directory.py index fd05aff4c8..e5210a3e97 100644 --- a/synapse/handlers/user_directory.py +++ b/synapse/handlers/user_directory.py @@ -21,7 +21,7 @@ import logging from http import HTTPStatus -from typing import TYPE_CHECKING, Optional +from typing import TYPE_CHECKING from twisted.internet.interfaces import IDelayedCall @@ -116,7 +116,7 @@ class UserDirectoryHandler(StateDeltasHandler): self._hs = hs # The current position in the current_state_delta stream - self.pos: Optional[int] = None + self.pos: int | None = None # Guard to ensure we only process deltas one at a time self._is_processing = False @@ -124,7 +124,7 @@ class UserDirectoryHandler(StateDeltasHandler): # Guard to ensure we only have one process for refreshing remote profiles self._is_refreshing_remote_profiles = False # Handle to cancel the `call_later` of `kick_off_remote_profile_refresh_process` - self._refresh_remote_profiles_call_later: Optional[IDelayedCall] = None + self._refresh_remote_profiles_call_later: IDelayedCall | None = None # Guard to ensure we only have one process for refreshing remote profiles # for the given servers. @@ -299,8 +299,8 @@ class UserDirectoryHandler(StateDeltasHandler): async def _handle_room_publicity_change( self, room_id: str, - prev_event_id: Optional[str], - event_id: Optional[str], + prev_event_id: str | None, + event_id: str | None, typ: str, ) -> None: """Handle a room having potentially changed from/to world_readable/publicly @@ -372,8 +372,8 @@ class UserDirectoryHandler(StateDeltasHandler): async def _handle_room_membership_event( self, room_id: str, - prev_event_id: Optional[str], - event_id: Optional[str], + prev_event_id: str | None, + event_id: str | None, state_key: str, ) -> None: """Process a single room membershp event. @@ -519,7 +519,7 @@ class UserDirectoryHandler(StateDeltasHandler): self, user_id: str, room_id: str, - prev_event_id: Optional[str], + prev_event_id: str | None, event_id: str, ) -> None: """Check member event changes for any profile changes and update the diff --git a/synapse/handlers/worker_lock.py b/synapse/handlers/worker_lock.py index af5498c560..3e097d21f2 100644 --- a/synapse/handlers/worker_lock.py +++ b/synapse/handlers/worker_lock.py @@ -26,8 +26,6 @@ from typing import ( TYPE_CHECKING, AsyncContextManager, Collection, - Optional, - Union, ) from weakref import WeakSet @@ -72,9 +70,7 @@ class WorkerLocksHandler: # Map from lock name/key to set of `WaitingLock` that are active for # that lock. - self._locks: dict[ - tuple[str, str], WeakSet[Union[WaitingLock, WaitingMultiLock]] - ] = {} + self._locks: dict[tuple[str, str], WeakSet[WaitingLock | WaitingMultiLock]] = {} self._clock.looping_call(self._cleanup_locks, 30_000) @@ -185,7 +181,7 @@ class WorkerLocksHandler: return def _wake_all_locks( - locks: Collection[Union[WaitingLock, WaitingMultiLock]], + locks: Collection[WaitingLock | WaitingMultiLock], ) -> None: for lock in locks: deferred = lock.deferred @@ -211,9 +207,9 @@ class WaitingLock: handler: WorkerLocksHandler lock_name: str lock_key: str - write: Optional[bool] + write: bool | None deferred: "defer.Deferred[None]" = attr.Factory(defer.Deferred) - _inner_lock: Optional[Lock] = None + _inner_lock: Lock | None = None _retry_interval: float = 0.1 _lock_span: "opentracing.Scope" = attr.Factory( lambda: start_active_span("WaitingLock.lock") @@ -258,10 +254,10 @@ class WaitingLock: async def __aexit__( self, - exc_type: Optional[type[BaseException]], - exc: Optional[BaseException], - tb: Optional[TracebackType], - ) -> Optional[bool]: + exc_type: type[BaseException] | None, + exc: BaseException | None, + tb: TracebackType | None, + ) -> bool | None: assert self._inner_lock self.handler.notify_lock_released(self.lock_name, self.lock_key) @@ -296,7 +292,7 @@ class WaitingMultiLock: deferred: "defer.Deferred[None]" = attr.Factory(defer.Deferred) - _inner_lock_cm: Optional[AsyncContextManager] = None + _inner_lock_cm: AsyncContextManager | None = None _retry_interval: float = 0.1 _lock_span: "opentracing.Scope" = attr.Factory( lambda: start_active_span("WaitingLock.lock") @@ -338,10 +334,10 @@ class WaitingMultiLock: async def __aexit__( self, - exc_type: Optional[type[BaseException]], - exc: Optional[BaseException], - tb: Optional[TracebackType], - ) -> Optional[bool]: + exc_type: type[BaseException] | None, + exc: BaseException | None, + tb: TracebackType | None, + ) -> bool | None: assert self._inner_lock_cm for lock_name, lock_key in self.lock_names: diff --git a/synapse/http/__init__.py b/synapse/http/__init__.py index 272bbc05f9..f13271f302 100644 --- a/synapse/http/__init__.py +++ b/synapse/http/__init__.py @@ -19,7 +19,6 @@ # # import re -from typing import Union from twisted.internet import address, task from twisted.web.client import FileBodyProducer @@ -75,7 +74,7 @@ def _get_requested_host(request: IRequest) -> bytes: return hostname # no Host header, use the address/port that the request arrived on - host: Union[address.IPv4Address, address.IPv6Address] = request.getHost() + host: address.IPv4Address | address.IPv6Address = request.getHost() hostname = host.host.encode("ascii") diff --git a/synapse/http/additional_resource.py b/synapse/http/additional_resource.py index 1a17b8461f..3661a2aeb7 100644 --- a/synapse/http/additional_resource.py +++ b/synapse/http/additional_resource.py @@ -18,7 +18,7 @@ # # -from typing import TYPE_CHECKING, Any, Awaitable, Callable, Optional +from typing import TYPE_CHECKING, Any, Awaitable, Callable from twisted.web.server import Request @@ -41,7 +41,7 @@ class AdditionalResource(DirectServeJsonResource): def __init__( self, hs: "HomeServer", - handler: Callable[[Request], Awaitable[Optional[tuple[int, Any]]]], + handler: Callable[[Request], Awaitable[tuple[int, Any] | None]], ): """Initialise AdditionalResource @@ -56,7 +56,7 @@ class AdditionalResource(DirectServeJsonResource): super().__init__(clock=hs.get_clock()) self._handler = handler - async def _async_render(self, request: Request) -> Optional[tuple[int, Any]]: + async def _async_render(self, request: Request) -> tuple[int, Any] | None: # Cheekily pass the result straight through, so we don't need to worry # if its an awaitable or not. return await self._handler(request) diff --git a/synapse/http/client.py b/synapse/http/client.py index ff1f7c7128..9971accccd 100644 --- a/synapse/http/client.py +++ b/synapse/http/client.py @@ -28,9 +28,7 @@ from typing import ( BinaryIO, Callable, Mapping, - Optional, Protocol, - Union, ) import attr @@ -118,7 +116,7 @@ incoming_responses_counter = Counter( # the type of the headers map, to be passed to the t.w.h.Headers. # # The actual type accepted by Twisted is -# Mapping[Union[str, bytes], Sequence[Union[str, bytes]] , +# Mapping[str | bytes], Sequence[str | bytes] , # allowing us to mix and match str and bytes freely. However: any str is also a # Sequence[str]; passing a header string value which is a # standalone str is interpreted as a sequence of 1-codepoint strings. This is a disastrous footgun. @@ -126,21 +124,21 @@ incoming_responses_counter = Counter( # # We also simplify the keys to be either all str or all bytes. This helps because # Dict[K, V] is invariant in K (and indeed V). -RawHeaders = Union[Mapping[str, "RawHeaderValue"], Mapping[bytes, "RawHeaderValue"]] +RawHeaders = Mapping[str, "RawHeaderValue"] | Mapping[bytes, "RawHeaderValue"] # the value actually has to be a List, but List is invariant so we can't specify that # the entries can either be Lists or bytes. -RawHeaderValue = Union[ - StrSequence, - list[bytes], - list[Union[str, bytes]], - tuple[bytes, ...], - tuple[Union[str, bytes], ...], -] +RawHeaderValue = ( + StrSequence + | list[bytes] + | list[str | bytes] + | tuple[bytes, ...] + | tuple[str | bytes, ...] +) def _is_ip_blocked( - ip_address: IPAddress, allowlist: Optional[IPSet], blocklist: IPSet + ip_address: IPAddress, allowlist: IPSet | None, blocklist: IPSet ) -> bool: """ Compares an IP address to allowed and disallowed IP sets. @@ -186,7 +184,7 @@ class _IPBlockingResolver: def __init__( self, reactor: IReactorPluggableNameResolver, - ip_allowlist: Optional[IPSet], + ip_allowlist: IPSet | None, ip_blocklist: IPSet, ): """ @@ -262,7 +260,7 @@ class BlocklistingReactorWrapper: def __init__( self, reactor: IReactorPluggableNameResolver, - ip_allowlist: Optional[IPSet], + ip_allowlist: IPSet | None, ip_blocklist: IPSet, ): self._reactor = reactor @@ -291,7 +289,7 @@ class BlocklistingAgentWrapper(Agent): self, agent: IAgent, ip_blocklist: IPSet, - ip_allowlist: Optional[IPSet] = None, + ip_allowlist: IPSet | None = None, ): """ Args: @@ -307,13 +305,13 @@ class BlocklistingAgentWrapper(Agent): self, method: bytes, uri: bytes, - headers: Optional[Headers] = None, - bodyProducer: Optional[IBodyProducer] = None, + headers: Headers | None = None, + bodyProducer: IBodyProducer | None = None, ) -> defer.Deferred: h = urllib.parse.urlparse(uri.decode("ascii")) try: - # h.hostname is Optional[str], None raises an AddrFormatError, so + # h.hostname is str | None, None raises an AddrFormatError, so # this is safe even though IPAddress requires a str. ip_address = IPAddress(h.hostname) # type: ignore[arg-type] except AddrFormatError: @@ -346,7 +344,7 @@ class BaseHttpClient: def __init__( self, hs: "HomeServer", - treq_args: Optional[dict[str, Any]] = None, + treq_args: dict[str, Any] | None = None, ): self.hs = hs self.server_name = hs.hostname @@ -371,8 +369,8 @@ class BaseHttpClient: self, method: str, uri: str, - data: Optional[bytes] = None, - headers: Optional[Headers] = None, + data: bytes | None = None, + headers: Headers | None = None, ) -> IResponse: """ Args: @@ -476,8 +474,8 @@ class BaseHttpClient: async def post_urlencoded_get_json( self, uri: str, - args: Optional[Mapping[str, Union[str, list[str]]]] = None, - headers: Optional[RawHeaders] = None, + args: Mapping[str, str | list[str]] | None = None, + headers: RawHeaders | None = None, ) -> Any: """ Args: @@ -525,7 +523,7 @@ class BaseHttpClient: ) async def post_json_get_json( - self, uri: str, post_json: Any, headers: Optional[RawHeaders] = None + self, uri: str, post_json: Any, headers: RawHeaders | None = None ) -> Any: """ @@ -574,8 +572,8 @@ class BaseHttpClient: async def get_json( self, uri: str, - args: Optional[QueryParams] = None, - headers: Optional[RawHeaders] = None, + args: QueryParams | None = None, + headers: RawHeaders | None = None, ) -> Any: """Gets some json from the given URI. @@ -605,8 +603,8 @@ class BaseHttpClient: self, uri: str, json_body: Any, - args: Optional[QueryParams] = None, - headers: Optional[RawHeaders] = None, + args: QueryParams | None = None, + headers: RawHeaders | None = None, ) -> Any: """Puts some json to the given URI. @@ -656,8 +654,8 @@ class BaseHttpClient: async def get_raw( self, uri: str, - args: Optional[QueryParams] = None, - headers: Optional[RawHeaders] = None, + args: QueryParams | None = None, + headers: RawHeaders | None = None, ) -> bytes: """Gets raw text from the given URI. @@ -701,9 +699,9 @@ class BaseHttpClient: self, url: str, output_stream: BinaryIO, - max_size: Optional[int] = None, - headers: Optional[RawHeaders] = None, - is_allowed_content_type: Optional[Callable[[str], bool]] = None, + max_size: int | None = None, + headers: RawHeaders | None = None, + is_allowed_content_type: Callable[[str], bool] | None = None, ) -> tuple[int, dict[bytes, list[bytes]], str, int]: """GETs a file from a given URL Args: @@ -812,9 +810,9 @@ class SimpleHttpClient(BaseHttpClient): def __init__( self, hs: "HomeServer", - treq_args: Optional[dict[str, Any]] = None, - ip_allowlist: Optional[IPSet] = None, - ip_blocklist: Optional[IPSet] = None, + treq_args: dict[str, Any] | None = None, + ip_allowlist: IPSet | None = None, + ip_blocklist: IPSet | None = None, use_proxy: bool = False, ): super().__init__(hs, treq_args=treq_args) @@ -891,8 +889,8 @@ class ReplicationClient(BaseHttpClient): self, method: str, uri: str, - data: Optional[bytes] = None, - headers: Optional[Headers] = None, + data: bytes | None = None, + headers: Headers | None = None, ) -> IResponse: """ Make a request, differs from BaseHttpClient.request in that it does not use treq. @@ -1028,7 +1026,7 @@ class BodyExceededMaxSize(Exception): class _DiscardBodyWithMaxSizeProtocol(protocol.Protocol): """A protocol which immediately errors upon receiving data.""" - transport: Optional[ITCPTransport] = None + transport: ITCPTransport | None = None def __init__(self, deferred: defer.Deferred): self.deferred = deferred @@ -1058,10 +1056,10 @@ class MultipartResponse: """ json: bytes = b"{}" - length: Optional[int] = None - content_type: Optional[bytes] = None - disposition: Optional[bytes] = None - url: Optional[bytes] = None + length: int | None = None + content_type: bytes | None = None + disposition: bytes | None = None + url: bytes | None = None class _MultipartParserProtocol(protocol.Protocol): @@ -1069,20 +1067,20 @@ class _MultipartParserProtocol(protocol.Protocol): Protocol to read and parse a MSC3916 multipart/mixed response """ - transport: Optional[ITCPTransport] = None + transport: ITCPTransport | None = None def __init__( self, stream: ByteWriteable, deferred: defer.Deferred, boundary: str, - max_length: Optional[int], + max_length: int | None, ) -> None: self.stream = stream self.deferred = deferred self.boundary = boundary self.max_length = max_length - self.parser: Optional[MultipartParser] = None + self.parser: MultipartParser | None = None self.multipart_response = MultipartResponse() self.has_redirect = False self.in_json = False @@ -1177,10 +1175,10 @@ class _MultipartParserProtocol(protocol.Protocol): class _ReadBodyWithMaxSizeProtocol(protocol.Protocol): """A protocol which reads body to a stream, erroring if the body exceeds a maximum size.""" - transport: Optional[ITCPTransport] = None + transport: ITCPTransport | None = None def __init__( - self, stream: ByteWriteable, deferred: defer.Deferred, max_size: Optional[int] + self, stream: ByteWriteable, deferred: defer.Deferred, max_size: int | None ): self.stream = stream self.deferred = deferred @@ -1230,7 +1228,7 @@ class _ReadBodyWithMaxSizeProtocol(protocol.Protocol): def read_body_with_max_size( - response: IResponse, stream: ByteWriteable, max_size: Optional[int] + response: IResponse, stream: ByteWriteable, max_size: int | None ) -> "defer.Deferred[int]": """ Read a HTTP response body to a file-object. Optionally enforcing a maximum file size. @@ -1260,7 +1258,7 @@ def read_body_with_max_size( def read_multipart_response( - response: IResponse, stream: ByteWriteable, boundary: str, max_length: Optional[int] + response: IResponse, stream: ByteWriteable, boundary: str, max_length: int | None ) -> "defer.Deferred[MultipartResponse]": """ Reads a MSC3916 multipart/mixed response and parses it, reading the file part (if it contains one) into @@ -1285,7 +1283,7 @@ def read_multipart_response( return d -def encode_query_args(args: Optional[QueryParams]) -> bytes: +def encode_query_args(args: QueryParams | None) -> bytes: """ Encodes a map of query arguments to bytes which can be appended to a URL. @@ -1323,7 +1321,7 @@ class InsecureInterceptableContextFactory(ssl.ContextFactory): def is_unknown_endpoint( - e: HttpResponseException, synapse_error: Optional[SynapseError] = None + e: HttpResponseException, synapse_error: SynapseError | None = None ) -> bool: """ Returns true if the response was due to an endpoint being unimplemented. diff --git a/synapse/http/connectproxyclient.py b/synapse/http/connectproxyclient.py index db803bc75a..094655f91a 100644 --- a/synapse/http/connectproxyclient.py +++ b/synapse/http/connectproxyclient.py @@ -22,7 +22,7 @@ import abc import base64 import logging -from typing import Optional, Union +from typing import Union import attr from zope.interface import implementer @@ -106,7 +106,7 @@ class HTTPConnectProxyEndpoint: proxy_endpoint: IStreamClientEndpoint, host: bytes, port: int, - proxy_creds: Optional[ProxyCredentials], + proxy_creds: ProxyCredentials | None, ): self._reactor = reactor self._proxy_endpoint = proxy_endpoint @@ -146,7 +146,7 @@ class HTTPProxiedClientFactory(protocol.ClientFactory): dst_host: bytes, dst_port: int, wrapped_factory: IProtocolFactory, - proxy_creds: Optional[ProxyCredentials], + proxy_creds: ProxyCredentials | None, ): self.dst_host = dst_host self.dst_port = dst_port @@ -212,7 +212,7 @@ class HTTPConnectProtocol(protocol.Protocol): port: int, wrapped_protocol: IProtocol, connected_deferred: defer.Deferred, - proxy_creds: Optional[ProxyCredentials], + proxy_creds: ProxyCredentials | None, ): self.host = host self.port = port @@ -275,7 +275,7 @@ class HTTPConnectSetupClient(http.HTTPClient): self, host: bytes, port: int, - proxy_creds: Optional[ProxyCredentials], + proxy_creds: ProxyCredentials | None, ): self.host = host self.port = port diff --git a/synapse/http/federation/matrix_federation_agent.py b/synapse/http/federation/matrix_federation_agent.py index f8482d9c48..c3ba26fe03 100644 --- a/synapse/http/federation/matrix_federation_agent.py +++ b/synapse/http/federation/matrix_federation_agent.py @@ -19,7 +19,7 @@ # import logging import urllib.parse -from typing import Any, Generator, Optional +from typing import Any, Generator from urllib.request import ( # type: ignore[attr-defined] proxy_bypass_environment, ) @@ -101,13 +101,13 @@ class MatrixFederationAgent: server_name: str, reactor: ISynapseReactor, clock: Clock, - tls_client_options_factory: Optional[FederationPolicyForHTTPS], + tls_client_options_factory: FederationPolicyForHTTPS | None, user_agent: bytes, - ip_allowlist: Optional[IPSet], + ip_allowlist: IPSet | None, ip_blocklist: IPSet, - proxy_config: Optional[ProxyConfig] = None, - _srv_resolver: Optional[SrvResolver] = None, - _well_known_resolver: Optional[WellKnownResolver] = None, + proxy_config: ProxyConfig | None = None, + _srv_resolver: SrvResolver | None = None, + _well_known_resolver: WellKnownResolver | None = None, ): """ Args: @@ -172,8 +172,8 @@ class MatrixFederationAgent: self, method: bytes, uri: bytes, - headers: Optional[Headers] = None, - bodyProducer: Optional[IBodyProducer] = None, + headers: Headers | None = None, + bodyProducer: IBodyProducer | None = None, ) -> Generator[defer.Deferred, Any, IResponse]: """ Args: @@ -259,9 +259,9 @@ class MatrixHostnameEndpointFactory: *, reactor: IReactorCore, proxy_reactor: IReactorCore, - tls_client_options_factory: Optional[FederationPolicyForHTTPS], - srv_resolver: Optional[SrvResolver], - proxy_config: Optional[ProxyConfig], + tls_client_options_factory: FederationPolicyForHTTPS | None, + srv_resolver: SrvResolver | None, + proxy_config: ProxyConfig | None, ): self._reactor = reactor self._proxy_reactor = proxy_reactor @@ -310,9 +310,9 @@ class MatrixHostnameEndpoint: *, reactor: IReactorCore, proxy_reactor: IReactorCore, - tls_client_options_factory: Optional[FederationPolicyForHTTPS], + tls_client_options_factory: FederationPolicyForHTTPS | None, srv_resolver: SrvResolver, - proxy_config: Optional[ProxyConfig], + proxy_config: ProxyConfig | None, parsed_uri: URI, ): self._reactor = reactor diff --git a/synapse/http/federation/well_known_resolver.py b/synapse/http/federation/well_known_resolver.py index ac4d954c2c..ec72e178c9 100644 --- a/synapse/http/federation/well_known_resolver.py +++ b/synapse/http/federation/well_known_resolver.py @@ -22,7 +22,7 @@ import logging import random import time from io import BytesIO -from typing import Callable, Optional +from typing import Callable import attr @@ -80,7 +80,7 @@ logger = logging.getLogger(__name__) @attr.s(slots=True, frozen=True, auto_attribs=True) class WellKnownLookupResult: - delegated_server: Optional[bytes] + delegated_server: bytes | None class WellKnownResolver: @@ -93,8 +93,8 @@ class WellKnownResolver: clock: Clock, agent: IAgent, user_agent: bytes, - well_known_cache: Optional[TTLCache[bytes, Optional[bytes]]] = None, - had_well_known_cache: Optional[TTLCache[bytes, bool]] = None, + well_known_cache: TTLCache[bytes, bytes | None] | None = None, + had_well_known_cache: TTLCache[bytes, bool] | None = None, ): """ Args: @@ -156,7 +156,7 @@ class WellKnownResolver: # label metrics) server_name=self.server_name, ): - result: Optional[bytes] + result: bytes | None cache_period: float result, cache_period = await self._fetch_well_known(server_name) @@ -320,7 +320,7 @@ class WellKnownResolver: def _cache_period_from_headers( headers: Headers, time_now: Callable[[], float] = time.time -) -> Optional[float]: +) -> float | None: cache_controls = _parse_cache_control(headers) if b"no-store" in cache_controls: @@ -348,7 +348,7 @@ def _cache_period_from_headers( return None -def _parse_cache_control(headers: Headers) -> dict[bytes, Optional[bytes]]: +def _parse_cache_control(headers: Headers) -> dict[bytes, bytes | None]: cache_controls = {} cache_control_headers = headers.getRawHeaders(b"cache-control") or [] for hdr in cache_control_headers: diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py index 562007c74f..7090960cfb 100644 --- a/synapse/http/matrixfederationclient.py +++ b/synapse/http/matrixfederationclient.py @@ -33,10 +33,8 @@ from typing import ( Callable, Generic, Literal, - Optional, TextIO, TypeVar, - Union, cast, overload, ) @@ -153,15 +151,15 @@ class MatrixFederationRequest: """The remote server to send the HTTP request to. """ - json: Optional[JsonDict] = None + json: JsonDict | None = None """JSON to send in the body. """ - json_callback: Optional[Callable[[], JsonDict]] = None + json_callback: Callable[[], JsonDict] | None = None """A callback to generate the JSON. """ - query: Optional[QueryParams] = None + query: QueryParams | None = None """Query arguments. """ @@ -204,7 +202,7 @@ class MatrixFederationRequest: ) object.__setattr__(self, "uri", uri) - def get_json(self) -> Optional[JsonDict]: + def get_json(self) -> JsonDict | None: if self.json_callback: return self.json_callback() return self.json @@ -216,7 +214,7 @@ class _BaseJsonParser(ByteParser[T]): CONTENT_TYPE = "application/json" def __init__( - self, validator: Optional[Callable[[Optional[object]], bool]] = None + self, validator: Callable[[object | None], bool] | None = None ) -> None: """ Args: @@ -390,7 +388,7 @@ class BinaryIOWrapper: self.decoder = codecs.getincrementaldecoder(encoding)(errors) self.file = file - def write(self, b: Union[bytes, bytearray]) -> int: + def write(self, b: bytes | bytearray) -> int: self.file.write(self.decoder.decode(b)) return len(b) @@ -407,7 +405,7 @@ class MatrixFederationHttpClient: def __init__( self, hs: "HomeServer", - tls_client_options_factory: Optional[FederationPolicyForHTTPS], + tls_client_options_factory: FederationPolicyForHTTPS | None, ): self.hs = hs self.signing_key = hs.signing_key @@ -550,7 +548,7 @@ class MatrixFederationHttpClient: self, request: MatrixFederationRequest, retry_on_dns_fail: bool = True, - timeout: Optional[int] = None, + timeout: int | None = None, long_retries: bool = False, ignore_backoff: bool = False, backoff_on_404: bool = False, @@ -693,7 +691,7 @@ class MatrixFederationHttpClient: destination_bytes, method_bytes, url_to_sign_bytes, json ) data = encode_canonical_json(json) - producer: Optional[IBodyProducer] = QuieterFileBodyProducer( + producer: IBodyProducer | None = QuieterFileBodyProducer( BytesIO(data), cooperator=self._cooperator ) else: @@ -905,11 +903,11 @@ class MatrixFederationHttpClient: def build_auth_headers( self, - destination: Optional[bytes], + destination: bytes | None, method: bytes, url_bytes: bytes, - content: Optional[JsonDict] = None, - destination_is: Optional[bytes] = None, + content: JsonDict | None = None, + destination_is: bytes | None = None, ) -> list[bytes]: """ Builds the Authorization headers for a federation request @@ -970,11 +968,11 @@ class MatrixFederationHttpClient: self, destination: str, path: str, - args: Optional[QueryParams] = None, - data: Optional[JsonDict] = None, - json_data_callback: Optional[Callable[[], JsonDict]] = None, + args: QueryParams | None = None, + data: JsonDict | None = None, + json_data_callback: Callable[[], JsonDict] | None = None, long_retries: bool = False, - timeout: Optional[int] = None, + timeout: int | None = None, ignore_backoff: bool = False, backoff_on_404: bool = False, try_trailing_slash_on_400: bool = False, @@ -987,15 +985,15 @@ class MatrixFederationHttpClient: self, destination: str, path: str, - args: Optional[QueryParams] = None, - data: Optional[JsonDict] = None, - json_data_callback: Optional[Callable[[], JsonDict]] = None, + args: QueryParams | None = None, + data: JsonDict | None = None, + json_data_callback: Callable[[], JsonDict] | None = None, long_retries: bool = False, - timeout: Optional[int] = None, + timeout: int | None = None, ignore_backoff: bool = False, backoff_on_404: bool = False, try_trailing_slash_on_400: bool = False, - parser: Optional[ByteParser[T]] = None, + parser: ByteParser[T] | None = None, backoff_on_all_error_codes: bool = False, ) -> T: ... @@ -1003,17 +1001,17 @@ class MatrixFederationHttpClient: self, destination: str, path: str, - args: Optional[QueryParams] = None, - data: Optional[JsonDict] = None, - json_data_callback: Optional[Callable[[], JsonDict]] = None, + args: QueryParams | None = None, + data: JsonDict | None = None, + json_data_callback: Callable[[], JsonDict] | None = None, long_retries: bool = False, - timeout: Optional[int] = None, + timeout: int | None = None, ignore_backoff: bool = False, backoff_on_404: bool = False, try_trailing_slash_on_400: bool = False, - parser: Optional[ByteParser[T]] = None, + parser: ByteParser[T] | None = None, backoff_on_all_error_codes: bool = False, - ) -> Union[JsonDict, T]: + ) -> JsonDict | T: """Sends the specified json data using PUT Args: @@ -1109,11 +1107,11 @@ class MatrixFederationHttpClient: self, destination: str, path: str, - data: Optional[JsonDict] = None, + data: JsonDict | None = None, long_retries: bool = False, - timeout: Optional[int] = None, + timeout: int | None = None, ignore_backoff: bool = False, - args: Optional[QueryParams] = None, + args: QueryParams | None = None, ) -> JsonDict: """Sends the specified json data using POST @@ -1188,9 +1186,9 @@ class MatrixFederationHttpClient: self, destination: str, path: str, - args: Optional[QueryParams] = None, + args: QueryParams | None = None, retry_on_dns_fail: bool = True, - timeout: Optional[int] = None, + timeout: int | None = None, ignore_backoff: bool = False, try_trailing_slash_on_400: bool = False, parser: Literal[None] = None, @@ -1201,9 +1199,9 @@ class MatrixFederationHttpClient: self, destination: str, path: str, - args: Optional[QueryParams] = ..., + args: QueryParams | None = ..., retry_on_dns_fail: bool = ..., - timeout: Optional[int] = ..., + timeout: int | None = ..., ignore_backoff: bool = ..., try_trailing_slash_on_400: bool = ..., parser: ByteParser[T] = ..., @@ -1213,13 +1211,13 @@ class MatrixFederationHttpClient: self, destination: str, path: str, - args: Optional[QueryParams] = None, + args: QueryParams | None = None, retry_on_dns_fail: bool = True, - timeout: Optional[int] = None, + timeout: int | None = None, ignore_backoff: bool = False, try_trailing_slash_on_400: bool = False, - parser: Optional[ByteParser[T]] = None, - ) -> Union[JsonDict, T]: + parser: ByteParser[T] | None = None, + ) -> JsonDict | T: """GETs some json from the given host homeserver and path Args: @@ -1282,9 +1280,9 @@ class MatrixFederationHttpClient: self, destination: str, path: str, - args: Optional[QueryParams] = None, + args: QueryParams | None = None, retry_on_dns_fail: bool = True, - timeout: Optional[int] = None, + timeout: int | None = None, ignore_backoff: bool = False, try_trailing_slash_on_400: bool = False, parser: Literal[None] = None, @@ -1295,9 +1293,9 @@ class MatrixFederationHttpClient: self, destination: str, path: str, - args: Optional[QueryParams] = ..., + args: QueryParams | None = ..., retry_on_dns_fail: bool = ..., - timeout: Optional[int] = ..., + timeout: int | None = ..., ignore_backoff: bool = ..., try_trailing_slash_on_400: bool = ..., parser: ByteParser[T] = ..., @@ -1307,13 +1305,13 @@ class MatrixFederationHttpClient: self, destination: str, path: str, - args: Optional[QueryParams] = None, + args: QueryParams | None = None, retry_on_dns_fail: bool = True, - timeout: Optional[int] = None, + timeout: int | None = None, ignore_backoff: bool = False, try_trailing_slash_on_400: bool = False, - parser: Optional[ByteParser[T]] = None, - ) -> tuple[Union[JsonDict, T], dict[bytes, list[bytes]]]: + parser: ByteParser[T] | None = None, + ) -> tuple[JsonDict | T, dict[bytes, list[bytes]]]: """GETs some json from the given host homeserver and path Args: @@ -1401,9 +1399,9 @@ class MatrixFederationHttpClient: destination: str, path: str, long_retries: bool = False, - timeout: Optional[int] = None, + timeout: int | None = None, ignore_backoff: bool = False, - args: Optional[QueryParams] = None, + args: QueryParams | None = None, ) -> JsonDict: """Send a DELETE request to the remote expecting some json response @@ -1477,7 +1475,7 @@ class MatrixFederationHttpClient: download_ratelimiter: Ratelimiter, ip_address: str, max_size: int, - args: Optional[QueryParams] = None, + args: QueryParams | None = None, retry_on_dns_fail: bool = True, ignore_backoff: bool = False, follow_redirects: bool = False, @@ -1639,7 +1637,7 @@ class MatrixFederationHttpClient: download_ratelimiter: Ratelimiter, ip_address: str, max_size: int, - args: Optional[QueryParams] = None, + args: QueryParams | None = None, retry_on_dns_fail: bool = True, ignore_backoff: bool = False, ) -> tuple[int, dict[bytes, list[bytes]], bytes]: diff --git a/synapse/http/proxy.py b/synapse/http/proxy.py index 583dd092bd..c7f5e39dd8 100644 --- a/synapse/http/proxy.py +++ b/synapse/http/proxy.py @@ -22,7 +22,7 @@ import json import logging import urllib.parse -from typing import TYPE_CHECKING, Any, Optional, cast +from typing import TYPE_CHECKING, Any, cast from twisted.internet import protocol from twisted.internet.interfaces import ITCPTransport @@ -65,7 +65,7 @@ assert all(header.lower() == header for header in HOP_BY_HOP_HEADERS_LOWERCASE) def parse_connection_header_value( - connection_header_value: Optional[bytes], + connection_header_value: bytes | None, ) -> set[str]: """ Parse the `Connection` header to determine which headers we should not be copied @@ -237,7 +237,7 @@ class _ProxyResponseBody(protocol.Protocol): request. """ - transport: Optional[ITCPTransport] = None + transport: ITCPTransport | None = None def __init__(self, request: "SynapseRequest") -> None: self._request = request diff --git a/synapse/http/proxyagent.py b/synapse/http/proxyagent.py index 67e04b18d9..d315ce8475 100644 --- a/synapse/http/proxyagent.py +++ b/synapse/http/proxyagent.py @@ -21,7 +21,7 @@ import logging import random import re -from typing import Any, Collection, Optional, Sequence, Union, cast +from typing import Any, Collection, Sequence, cast from urllib.parse import urlparse from urllib.request import ( # type: ignore[attr-defined] proxy_bypass_environment, @@ -119,14 +119,14 @@ class ProxyAgent(_AgentBase): self, *, reactor: IReactorCore, - proxy_reactor: Optional[IReactorCore] = None, - contextFactory: Optional[IPolicyForHTTPS] = None, - connectTimeout: Optional[float] = None, - bindAddress: Optional[bytes] = None, - pool: Optional[HTTPConnectionPool] = None, - proxy_config: Optional[ProxyConfig] = None, + proxy_reactor: IReactorCore | None = None, + contextFactory: IPolicyForHTTPS | None = None, + connectTimeout: float | None = None, + bindAddress: bytes | None = None, + pool: HTTPConnectionPool | None = None, + proxy_config: ProxyConfig | None = None, federation_proxy_locations: Collection[InstanceLocationConfig] = (), - federation_proxy_credentials: Optional[ProxyCredentials] = None, + federation_proxy_credentials: ProxyCredentials | None = None, ): contextFactory = contextFactory or BrowserLikePolicyForHTTPS() @@ -175,8 +175,8 @@ class ProxyAgent(_AgentBase): self._policy_for_https = contextFactory self._reactor = cast(IReactorTime, reactor) - self._federation_proxy_endpoint: Optional[IStreamClientEndpoint] = None - self._federation_proxy_credentials: Optional[ProxyCredentials] = None + self._federation_proxy_endpoint: IStreamClientEndpoint | None = None + self._federation_proxy_credentials: ProxyCredentials | None = None if federation_proxy_locations: assert federation_proxy_credentials is not None, ( "`federation_proxy_credentials` are required when using `federation_proxy_locations`" @@ -220,8 +220,8 @@ class ProxyAgent(_AgentBase): self, method: bytes, uri: bytes, - headers: Optional[Headers] = None, - bodyProducer: Optional[IBodyProducer] = None, + headers: Headers | None = None, + bodyProducer: IBodyProducer | None = None, ) -> "defer.Deferred[IResponse]": """ Issue a request to the server indicated by the given uri. @@ -363,13 +363,13 @@ class ProxyAgent(_AgentBase): def http_proxy_endpoint( - proxy: Optional[bytes], + proxy: bytes | None, reactor: IReactorCore, - tls_options_factory: Optional[IPolicyForHTTPS], + tls_options_factory: IPolicyForHTTPS | None, timeout: float = 30, - bindAddress: Optional[Union[bytes, str, tuple[Union[bytes, str], int]]] = None, - attemptDelay: Optional[float] = None, -) -> tuple[Optional[IStreamClientEndpoint], Optional[ProxyCredentials]]: + bindAddress: bytes | str | tuple[bytes | str, int] | None = None, + attemptDelay: float | None = None, +) -> tuple[IStreamClientEndpoint | None, ProxyCredentials | None]: """Parses an http proxy setting and returns an endpoint for the proxy Args: @@ -418,7 +418,7 @@ def http_proxy_endpoint( def parse_proxy( proxy: bytes, default_scheme: bytes = b"http", default_port: int = 1080 -) -> tuple[bytes, bytes, int, Optional[ProxyCredentials]]: +) -> tuple[bytes, bytes, int, ProxyCredentials | None]: """ Parse a proxy connection string. diff --git a/synapse/http/replicationagent.py b/synapse/http/replicationagent.py index f4799bd1b2..708e4c386b 100644 --- a/synapse/http/replicationagent.py +++ b/synapse/http/replicationagent.py @@ -20,7 +20,6 @@ # import logging -from typing import Optional from zope.interface import implementer @@ -119,9 +118,9 @@ class ReplicationAgent(_AgentBase): reactor: ISynapseReactor, instance_map: dict[str, InstanceLocationConfig], contextFactory: IPolicyForHTTPS, - connectTimeout: Optional[float] = None, - bindAddress: Optional[bytes] = None, - pool: Optional[HTTPConnectionPool] = None, + connectTimeout: float | None = None, + bindAddress: bytes | None = None, + pool: HTTPConnectionPool | None = None, ): """ Create a ReplicationAgent. @@ -149,8 +148,8 @@ class ReplicationAgent(_AgentBase): self, method: bytes, uri: bytes, - headers: Optional[Headers] = None, - bodyProducer: Optional[IBodyProducer] = None, + headers: Headers | None = None, + bodyProducer: IBodyProducer | None = None, ) -> "defer.Deferred[IResponse]": """ Issue a request to the server indicated by the given uri. diff --git a/synapse/http/server.py b/synapse/http/server.py index 1f4728fba2..5f4e7484fd 100644 --- a/synapse/http/server.py +++ b/synapse/http/server.py @@ -35,10 +35,8 @@ from typing import ( Callable, Iterable, Iterator, - Optional, Pattern, Protocol, - Union, cast, ) @@ -111,7 +109,7 @@ HTTP_STATUS_REQUEST_CANCELLED = 499 def return_json_error( - f: failure.Failure, request: "SynapseRequest", config: Optional[HomeServerConfig] + f: failure.Failure, request: "SynapseRequest", config: HomeServerConfig | None ) -> None: """Sends a JSON error response to clients.""" @@ -173,7 +171,7 @@ def return_json_error( def return_html_error( f: failure.Failure, request: Request, - error_template: Union[str, jinja2.Template], + error_template: str | jinja2.Template, ) -> None: """Sends an HTML error page corresponding to the given failure. @@ -264,7 +262,7 @@ def wrap_async_request_handler( # it is actually called with a SynapseRequest and a kwargs dict for the params, # but I can't figure out how to represent that. ServletCallback = Callable[ - ..., Union[None, Awaitable[None], tuple[int, Any], Awaitable[tuple[int, Any]]] + ..., None | Awaitable[None] | tuple[int, Any] | Awaitable[tuple[int, Any]] ] @@ -349,9 +347,7 @@ class _AsyncResource(resource.Resource, metaclass=abc.ABCMeta): f = failure.Failure() self._send_error_response(f, request) - async def _async_render( - self, request: "SynapseRequest" - ) -> Optional[tuple[int, Any]]: + async def _async_render(self, request: "SynapseRequest") -> tuple[int, Any] | None: """Delegates to `_async_render_` methods, or returns a 400 if no appropriate method exists. Can be overridden in sub classes for different routing. @@ -406,7 +402,7 @@ class DirectServeJsonResource(_AsyncResource): canonical_json: bool = False, extract_context: bool = False, # Clock is optional as this class is exposed to the module API. - clock: Optional[Clock] = None, + clock: Clock | None = None, ): """ Args: @@ -603,7 +599,7 @@ class DirectServeHtmlResource(_AsyncResource): self, extract_context: bool = False, # Clock is optional as this class is exposed to the module API. - clock: Optional[Clock] = None, + clock: Clock | None = None, ): """ Args: @@ -732,7 +728,7 @@ class _ByteProducer: request: Request, iterator: Iterator[bytes], ): - self._request: Optional[Request] = request + self._request: Request | None = request self._iterator = iterator self._paused = False self.tracing_scope = start_active_span( @@ -831,7 +827,7 @@ def respond_with_json( json_object: Any, send_cors: bool = False, canonical_json: bool = True, -) -> Optional[int]: +) -> int | None: """Sends encoded JSON in response to the given request. Args: @@ -880,7 +876,7 @@ def respond_with_json_bytes( code: int, json_bytes: bytes, send_cors: bool = False, -) -> Optional[int]: +) -> int | None: """Sends encoded JSON in response to the given request. Args: @@ -929,7 +925,7 @@ async def _async_write_json_to_request_in_thread( expensive. """ - def encode(opentracing_span: "Optional[opentracing.Span]") -> bytes: + def encode(opentracing_span: "opentracing.Span | None") -> bytes: # it might take a while for the threadpool to schedule us, so we write # opentracing logs once we actually get scheduled, so that we can see how # much that contributed. diff --git a/synapse/http/servlet.py b/synapse/http/servlet.py index bca93fb036..c182497f2d 100644 --- a/synapse/http/servlet.py +++ b/synapse/http/servlet.py @@ -29,7 +29,6 @@ from typing import ( TYPE_CHECKING, Literal, Mapping, - Optional, Sequence, TypeVar, overload, @@ -80,26 +79,26 @@ def parse_integer( @overload -def parse_integer(request: Request, name: str, *, negative: bool) -> Optional[int]: ... +def parse_integer(request: Request, name: str, *, negative: bool) -> int | None: ... @overload def parse_integer( request: Request, name: str, - default: Optional[int] = None, + default: int | None = None, required: bool = False, negative: bool = False, -) -> Optional[int]: ... +) -> int | None: ... def parse_integer( request: Request, name: str, - default: Optional[int] = None, + default: int | None = None, required: bool = False, negative: bool = False, -) -> Optional[int]: +) -> int | None: """Parse an integer parameter from the request string Args: @@ -136,8 +135,8 @@ def parse_integer_from_args( def parse_integer_from_args( args: Mapping[bytes, Sequence[bytes]], name: str, - default: Optional[int] = None, -) -> Optional[int]: ... + default: int | None = None, +) -> int | None: ... @overload @@ -153,19 +152,19 @@ def parse_integer_from_args( def parse_integer_from_args( args: Mapping[bytes, Sequence[bytes]], name: str, - default: Optional[int] = None, + default: int | None = None, required: bool = False, negative: bool = False, -) -> Optional[int]: ... +) -> int | None: ... def parse_integer_from_args( args: Mapping[bytes, Sequence[bytes]], name: str, - default: Optional[int] = None, + default: int | None = None, required: bool = False, negative: bool = False, -) -> Optional[int]: +) -> int | None: """Parse an integer parameter from the request string Args: @@ -217,13 +216,13 @@ def parse_boolean(request: Request, name: str, *, required: Literal[True]) -> bo @overload def parse_boolean( - request: Request, name: str, default: Optional[bool] = None, required: bool = False -) -> Optional[bool]: ... + request: Request, name: str, default: bool | None = None, required: bool = False +) -> bool | None: ... def parse_boolean( - request: Request, name: str, default: Optional[bool] = None, required: bool = False -) -> Optional[bool]: + request: Request, name: str, default: bool | None = None, required: bool = False +) -> bool | None: """Parse a boolean parameter from the request query string Args: @@ -265,17 +264,17 @@ def parse_boolean_from_args( def parse_boolean_from_args( args: Mapping[bytes, Sequence[bytes]], name: str, - default: Optional[bool] = None, + default: bool | None = None, required: bool = False, -) -> Optional[bool]: ... +) -> bool | None: ... def parse_boolean_from_args( args: Mapping[bytes, Sequence[bytes]], name: str, - default: Optional[bool] = None, + default: bool | None = None, required: bool = False, -) -> Optional[bool]: +) -> bool | None: """Parse a boolean parameter from the request query string Args: @@ -318,8 +317,8 @@ def parse_boolean_from_args( def parse_bytes_from_args( args: Mapping[bytes, Sequence[bytes]], name: str, - default: Optional[bytes] = None, -) -> Optional[bytes]: ... + default: bytes | None = None, +) -> bytes | None: ... @overload @@ -336,17 +335,17 @@ def parse_bytes_from_args( def parse_bytes_from_args( args: Mapping[bytes, Sequence[bytes]], name: str, - default: Optional[bytes] = None, + default: bytes | None = None, required: bool = False, -) -> Optional[bytes]: ... +) -> bytes | None: ... def parse_bytes_from_args( args: Mapping[bytes, Sequence[bytes]], name: str, - default: Optional[bytes] = None, + default: bytes | None = None, required: bool = False, -) -> Optional[bytes]: +) -> bytes | None: """ Parse a string parameter as bytes from the request query string. @@ -380,7 +379,7 @@ def parse_string( name: str, default: str, *, - allowed_values: Optional[StrCollection] = None, + allowed_values: StrCollection | None = None, encoding: str = "ascii", ) -> str: ... @@ -391,7 +390,7 @@ def parse_string( name: str, *, required: Literal[True], - allowed_values: Optional[StrCollection] = None, + allowed_values: StrCollection | None = None, encoding: str = "ascii", ) -> str: ... @@ -401,21 +400,21 @@ def parse_string( request: Request, name: str, *, - default: Optional[str] = None, + default: str | None = None, required: bool = False, - allowed_values: Optional[StrCollection] = None, + allowed_values: StrCollection | None = None, encoding: str = "ascii", -) -> Optional[str]: ... +) -> str | None: ... def parse_string( request: Request, name: str, - default: Optional[str] = None, + default: str | None = None, required: bool = False, - allowed_values: Optional[StrCollection] = None, + allowed_values: StrCollection | None = None, encoding: str = "ascii", -) -> Optional[str]: +) -> str | None: """ Parse a string parameter from the request query string. @@ -455,10 +454,10 @@ def parse_string( def parse_json( request: Request, name: str, - default: Optional[dict] = None, + default: dict | None = None, required: bool = False, encoding: str = "ascii", -) -> Optional[JsonDict]: +) -> JsonDict | None: """ Parse a JSON parameter from the request query string. @@ -492,10 +491,10 @@ def parse_json( def parse_json_from_args( args: Mapping[bytes, Sequence[bytes]], name: str, - default: Optional[dict] = None, + default: dict | None = None, required: bool = False, encoding: str = "ascii", -) -> Optional[JsonDict]: +) -> JsonDict | None: """ Parse a JSON parameter from the request query string. @@ -559,9 +558,9 @@ def parse_enum( request: Request, name: str, E: type[EnumT], - default: Optional[EnumT] = None, + default: EnumT | None = None, required: bool = False, -) -> Optional[EnumT]: +) -> EnumT | None: """ Parse an enum parameter from the request query string. @@ -601,7 +600,7 @@ def parse_enum( def _parse_string_value( value: bytes, - allowed_values: Optional[StrCollection], + allowed_values: StrCollection | None, name: str, encoding: str, ) -> str: @@ -627,9 +626,9 @@ def parse_strings_from_args( args: Mapping[bytes, Sequence[bytes]], name: str, *, - allowed_values: Optional[StrCollection] = None, + allowed_values: StrCollection | None = None, encoding: str = "ascii", -) -> Optional[list[str]]: ... +) -> list[str] | None: ... @overload @@ -638,7 +637,7 @@ def parse_strings_from_args( name: str, default: list[str], *, - allowed_values: Optional[StrCollection] = None, + allowed_values: StrCollection | None = None, encoding: str = "ascii", ) -> list[str]: ... @@ -649,7 +648,7 @@ def parse_strings_from_args( name: str, *, required: Literal[True], - allowed_values: Optional[StrCollection] = None, + allowed_values: StrCollection | None = None, encoding: str = "ascii", ) -> list[str]: ... @@ -658,22 +657,22 @@ def parse_strings_from_args( def parse_strings_from_args( args: Mapping[bytes, Sequence[bytes]], name: str, - default: Optional[list[str]] = None, + default: list[str] | None = None, *, required: bool = False, - allowed_values: Optional[StrCollection] = None, + allowed_values: StrCollection | None = None, encoding: str = "ascii", -) -> Optional[list[str]]: ... +) -> list[str] | None: ... def parse_strings_from_args( args: Mapping[bytes, Sequence[bytes]], name: str, - default: Optional[list[str]] = None, + default: list[str] | None = None, required: bool = False, - allowed_values: Optional[StrCollection] = None, + allowed_values: StrCollection | None = None, encoding: str = "ascii", -) -> Optional[list[str]]: +) -> list[str] | None: """ Parse a string parameter from the request query string list. @@ -720,21 +719,21 @@ def parse_strings_from_args( def parse_string_from_args( args: Mapping[bytes, Sequence[bytes]], name: str, - default: Optional[str] = None, + default: str | None = None, *, - allowed_values: Optional[StrCollection] = None, + allowed_values: StrCollection | None = None, encoding: str = "ascii", -) -> Optional[str]: ... +) -> str | None: ... @overload def parse_string_from_args( args: Mapping[bytes, Sequence[bytes]], name: str, - default: Optional[str] = None, + default: str | None = None, *, required: Literal[True], - allowed_values: Optional[StrCollection] = None, + allowed_values: StrCollection | None = None, encoding: str = "ascii", ) -> str: ... @@ -743,21 +742,21 @@ def parse_string_from_args( def parse_string_from_args( args: Mapping[bytes, Sequence[bytes]], name: str, - default: Optional[str] = None, + default: str | None = None, required: bool = False, - allowed_values: Optional[StrCollection] = None, + allowed_values: StrCollection | None = None, encoding: str = "ascii", -) -> Optional[str]: ... +) -> str | None: ... def parse_string_from_args( args: Mapping[bytes, Sequence[bytes]], name: str, - default: Optional[str] = None, + default: str | None = None, required: bool = False, - allowed_values: Optional[StrCollection] = None, + allowed_values: StrCollection | None = None, encoding: str = "ascii", -) -> Optional[str]: +) -> str | None: """ Parse the string parameter from the request query string list and return the first result. @@ -812,12 +811,12 @@ def parse_json_value_from_request( @overload def parse_json_value_from_request( request: Request, allow_empty_body: bool = False -) -> Optional[JsonDict]: ... +) -> JsonDict | None: ... def parse_json_value_from_request( request: Request, allow_empty_body: bool = False -) -> Optional[JsonDict]: +) -> JsonDict | None: """Parse a JSON value from the body of a twisted HTTP request. Args: @@ -980,8 +979,8 @@ class ResolveRoomIdMixin: self.room_member_handler = hs.get_room_member_handler() async def resolve_room_id( - self, room_identifier: str, remote_room_hosts: Optional[list[str]] = None - ) -> tuple[str, Optional[list[str]]]: + self, room_identifier: str, remote_room_hosts: list[str] | None = None + ) -> tuple[str, list[str] | None]: """ Resolve a room identifier to a room ID, if necessary. diff --git a/synapse/http/site.py b/synapse/http/site.py index ccf6ff27f0..03d5d048b1 100644 --- a/synapse/http/site.py +++ b/synapse/http/site.py @@ -22,7 +22,7 @@ import contextlib import logging import time from http import HTTPStatus -from typing import TYPE_CHECKING, Any, Generator, Optional, Union +from typing import TYPE_CHECKING, Any, Generator import attr from zope.interface import implementer @@ -88,7 +88,7 @@ class SynapseRequest(Request): our_server_name: str, *args: Any, max_request_body_size: int = 1024, - request_id_header: Optional[str] = None, + request_id_header: str | None = None, **kw: Any, ): super().__init__(channel, *args, **kw) @@ -102,18 +102,18 @@ class SynapseRequest(Request): # The requester, if authenticated. For federation requests this is the # server name, for client requests this is the Requester object. - self._requester: Optional[Union[Requester, str]] = None + self._requester: Requester | str | None = None # An opentracing span for this request. Will be closed when the request is # completely processed. - self._opentracing_span: "Optional[opentracing.Span]" = None + self._opentracing_span: "opentracing.Span | None" = None # we can't yet create the logcontext, as we don't know the method. - self.logcontext: Optional[LoggingContext] = None + self.logcontext: LoggingContext | None = None # The `Deferred` to cancel if the client disconnects early and # `is_render_cancellable` is set. Expected to be set by `Resource.render`. - self.render_deferred: Optional["Deferred[None]"] = None + self.render_deferred: "Deferred[None]" | None = None # A boolean indicating whether `render_deferred` should be cancelled if the # client disconnects early. Expected to be set by the coroutine started by # `Resource.render`, if rendering is asynchronous. @@ -127,11 +127,11 @@ class SynapseRequest(Request): self._is_processing = False # the time when the asynchronous request handler completed its processing - self._processing_finished_time: Optional[float] = None + self._processing_finished_time: float | None = None # what time we finished sending the response to the client (or the connection # dropped) - self.finish_time: Optional[float] = None + self.finish_time: float | None = None def __repr__(self) -> str: # We overwrite this so that we don't log ``access_token`` @@ -195,11 +195,11 @@ class SynapseRequest(Request): super().handleContentChunk(data) @property - def requester(self) -> Optional[Union[Requester, str]]: + def requester(self) -> Requester | str | None: return self._requester @requester.setter - def requester(self, value: Union[Requester, str]) -> None: + def requester(self, value: Requester | str) -> None: # Store the requester, and update some properties based on it. # This should only be called once. @@ -246,7 +246,7 @@ class SynapseRequest(Request): Returns: The redacted URI as a string. """ - uri: Union[bytes, str] = self.uri + uri: bytes | str = self.uri if isinstance(uri, bytes): uri = uri.decode("ascii", errors="replace") return redact_uri(uri) @@ -261,12 +261,12 @@ class SynapseRequest(Request): Returns: The request method as a string. """ - method: Union[bytes, str] = self.method + method: bytes | str = self.method if isinstance(method, bytes): return self.method.decode("ascii") return method - def get_authenticated_entity(self) -> tuple[Optional[str], Optional[str]]: + def get_authenticated_entity(self) -> tuple[str | None, str | None]: """ Get the "authenticated" entity of the request, which might be the user performing the action, or a user being puppeted by a server admin. @@ -403,7 +403,7 @@ class SynapseRequest(Request): with PreserveLoggingContext(self.logcontext): self._finished_processing() - def connectionLost(self, reason: Union[Failure, Exception]) -> None: + def connectionLost(self, reason: Failure | Exception) -> None: """Called when the client connection is closed before the response is written. Overrides twisted.web.server.Request.connectionLost to record the finish time and @@ -595,7 +595,7 @@ class XForwardedForRequest(SynapseRequest): """ # the client IP and ssl flag, as extracted from the headers. - _forwarded_for: "Optional[_XForwardedForAddress]" = None + _forwarded_for: "_XForwardedForAddress | None" = None _forwarded_https: bool = False def requestReceived(self, command: bytes, path: bytes, version: bytes) -> None: @@ -674,7 +674,7 @@ class SynapseProtocol(HTTPChannel): site: "SynapseSite", our_server_name: str, max_request_body_size: int, - request_id_header: Optional[str], + request_id_header: str | None, request_class: type, ): super().__init__() @@ -821,5 +821,5 @@ class SynapseSite(ProxySite): @attr.s(auto_attribs=True, frozen=True, slots=True) class RequestInfo: - user_agent: Optional[str] + user_agent: str | None ip: str diff --git a/synapse/http/types.py b/synapse/http/types.py index dd954b6c20..a04a285397 100644 --- a/synapse/http/types.py +++ b/synapse/http/types.py @@ -18,10 +18,10 @@ # [This file includes modifications made by New Vector Limited] # # -from typing import Iterable, Mapping, Union +from typing import Iterable, Mapping # the type of the query params, to be passed into `urlencode` with `doseq=True`. -QueryParamValue = Union[str, bytes, Iterable[Union[str, bytes]]] -QueryParams = Union[Mapping[str, QueryParamValue], Mapping[bytes, QueryParamValue]] +QueryParamValue = str | bytes | Iterable[str | bytes] +QueryParams = Mapping[str, QueryParamValue] | Mapping[bytes, QueryParamValue] __all__ = ["QueryParams"] diff --git a/synapse/logging/_remote.py b/synapse/logging/_remote.py index a3444221a0..e3e0ba4beb 100644 --- a/synapse/logging/_remote.py +++ b/synapse/logging/_remote.py @@ -25,7 +25,7 @@ import traceback from collections import deque from ipaddress import IPv4Address, IPv6Address, ip_address from math import floor -from typing import Callable, Optional +from typing import Callable import attr from zope.interface import implementer @@ -113,7 +113,7 @@ class RemoteHandler(logging.Handler): port: int, maximum_buffer: int = 1000, level: int = logging.NOTSET, - _reactor: Optional[IReactorTime] = None, + _reactor: IReactorTime | None = None, ): super().__init__(level=level) self.host = host @@ -121,8 +121,8 @@ class RemoteHandler(logging.Handler): self.maximum_buffer = maximum_buffer self._buffer: deque[logging.LogRecord] = deque() - self._connection_waiter: Optional[Deferred] = None - self._producer: Optional[LogProducer] = None + self._connection_waiter: Deferred | None = None + self._producer: LogProducer | None = None # Connect without DNS lookups if it's a direct IP. if _reactor is None: diff --git a/synapse/logging/context.py b/synapse/logging/context.py index 919493d1a3..2410d95720 100644 --- a/synapse/logging/context.py +++ b/synapse/logging/context.py @@ -40,7 +40,6 @@ from typing import ( Awaitable, Callable, Literal, - Optional, TypeVar, Union, overload, @@ -88,7 +87,7 @@ try: is_thread_resource_usage_supported = True - def get_thread_resource_usage() -> "Optional[resource.struct_rusage]": + def get_thread_resource_usage() -> "resource.struct_rusage | None": return resource.getrusage(RUSAGE_THREAD) except Exception: @@ -96,7 +95,7 @@ except Exception: # won't track resource usage. is_thread_resource_usage_supported = False - def get_thread_resource_usage() -> "Optional[resource.struct_rusage]": + def get_thread_resource_usage() -> "resource.struct_rusage | None": return None @@ -137,7 +136,7 @@ class ContextResourceUsage: "evt_db_fetch_count", ] - def __init__(self, copy_from: "Optional[ContextResourceUsage]" = None) -> None: + def __init__(self, copy_from: "ContextResourceUsage | None" = None) -> None: """Create a new ContextResourceUsage Args: @@ -230,8 +229,8 @@ class ContextRequest: request_id: str ip_address: str site_tag: str - requester: Optional[str] - authenticated_entity: Optional[str] + requester: str | None + authenticated_entity: str | None method: str url: str protocol: str @@ -274,10 +273,10 @@ class _Sentinel: def __str__(self) -> str: return "sentinel" - def start(self, rusage: "Optional[resource.struct_rusage]") -> None: + def start(self, rusage: "resource.struct_rusage | None") -> None: pass - def stop(self, rusage: "Optional[resource.struct_rusage]") -> None: + def stop(self, rusage: "resource.struct_rusage | None") -> None: pass def add_database_transaction(self, duration_sec: float) -> None: @@ -334,8 +333,8 @@ class LoggingContext: *, name: str, server_name: str, - parent_context: "Optional[LoggingContext]" = None, - request: Optional[ContextRequest] = None, + parent_context: "LoggingContext | None" = None, + request: ContextRequest | None = None, ) -> None: self.previous_context = current_context() @@ -344,14 +343,14 @@ class LoggingContext: # The thread resource usage when the logcontext became active. None # if the context is not currently active. - self.usage_start: Optional[resource.struct_rusage] = None + self.usage_start: resource.struct_rusage | None = None self.name = name self.server_name = server_name self.main_thread = get_thread_id() self.request = None self.tag = "" - self.scope: Optional["_LogContextScope"] = None + self.scope: "_LogContextScope" | None = None # keep track of whether we have hit the __exit__ block for this context # (suggesting that the the thing that created the context thinks it should @@ -391,9 +390,9 @@ class LoggingContext: def __exit__( self, - type: Optional[type[BaseException]], - value: Optional[BaseException], - traceback: Optional[TracebackType], + type: type[BaseException] | None, + value: BaseException | None, + traceback: TracebackType | None, ) -> None: """Restore the logging context in thread local storage to the state it was before this context was entered. @@ -417,7 +416,7 @@ class LoggingContext: # recorded against the correct metrics. self.finished = True - def start(self, rusage: "Optional[resource.struct_rusage]") -> None: + def start(self, rusage: "resource.struct_rusage | None") -> None: """ Record that this logcontext is currently running. @@ -442,7 +441,7 @@ class LoggingContext: else: self.usage_start = rusage - def stop(self, rusage: "Optional[resource.struct_rusage]") -> None: + def stop(self, rusage: "resource.struct_rusage | None") -> None: """ Record that this logcontext is no longer running. @@ -702,9 +701,9 @@ class PreserveLoggingContext: def __exit__( self, - type: Optional[type[BaseException]], - value: Optional[BaseException], - traceback: Optional[TracebackType], + type: type[BaseException] | None, + value: BaseException | None, + traceback: TracebackType | None, ) -> None: logcontext_debug_logger.debug( "PreserveLoggingContext(%s).__exit %s --> %s", @@ -823,10 +822,7 @@ def preserve_fn(f: Callable[P, R]) -> Callable[P, "defer.Deferred[R]"]: ... def preserve_fn( - f: Union[ - Callable[P, R], - Callable[P, Awaitable[R]], - ], + f: Callable[P, R] | Callable[P, Awaitable[R]], ) -> Callable[P, "defer.Deferred[R]"]: """Function decorator which wraps the function with run_in_background""" @@ -852,10 +848,7 @@ def run_in_background( def run_in_background( - f: Union[ - Callable[P, R], - Callable[P, Awaitable[R]], - ], + f: Callable[P, R] | Callable[P, Awaitable[R]], *args: P.args, **kwargs: P.kwargs, ) -> "defer.Deferred[R]": diff --git a/synapse/logging/formatter.py b/synapse/logging/formatter.py index e5d73a47a8..70b6d7f6a1 100644 --- a/synapse/logging/formatter.py +++ b/synapse/logging/formatter.py @@ -23,7 +23,6 @@ import logging import traceback from io import StringIO from types import TracebackType -from typing import Optional class LogFormatter(logging.Formatter): @@ -39,9 +38,9 @@ class LogFormatter(logging.Formatter): def formatException( self, ei: tuple[ - Optional[type[BaseException]], - Optional[BaseException], - Optional[TracebackType], + type[BaseException] | None, + BaseException | None, + TracebackType | None, ], ) -> str: sio = StringIO() diff --git a/synapse/logging/handlers.py b/synapse/logging/handlers.py index b7945aac72..976c7075d4 100644 --- a/synapse/logging/handlers.py +++ b/synapse/logging/handlers.py @@ -3,7 +3,7 @@ import time from logging import Handler, LogRecord from logging.handlers import MemoryHandler from threading import Thread -from typing import Optional, cast +from typing import cast from twisted.internet.interfaces import IReactorCore @@ -23,10 +23,10 @@ class PeriodicallyFlushingMemoryHandler(MemoryHandler): self, capacity: int, flushLevel: int = logging.ERROR, - target: Optional[Handler] = None, + target: Handler | None = None, flushOnClose: bool = True, period: float = 5.0, - reactor: Optional[IReactorCore] = None, + reactor: IReactorCore | None = None, ) -> None: """ period: the period between automatic flushes diff --git a/synapse/logging/opentracing.py b/synapse/logging/opentracing.py index fbb9971b32..6e4e029163 100644 --- a/synapse/logging/opentracing.py +++ b/synapse/logging/opentracing.py @@ -289,7 +289,7 @@ try: except Exception: logger.exception("Failed to report span") - RustReporter: Optional[type[_WrappedRustReporter]] = _WrappedRustReporter + RustReporter: type[_WrappedRustReporter] | None = _WrappedRustReporter except ImportError: RustReporter = None @@ -354,7 +354,7 @@ class SynapseBaggage: # Block everything by default # A regex which matches the server_names to expose traces for. # None means 'block everything'. -_homeserver_whitelist: Optional[Pattern[str]] = None +_homeserver_whitelist: Pattern[str] | None = None # Util methods @@ -370,11 +370,11 @@ R = TypeVar("R") T = TypeVar("T") -def only_if_tracing(func: Callable[P, R]) -> Callable[P, Optional[R]]: +def only_if_tracing(func: Callable[P, R]) -> Callable[P, R | None]: """Executes the function only if we're tracing. Otherwise returns None.""" @wraps(func) - def _only_if_tracing_inner(*args: P.args, **kwargs: P.kwargs) -> Optional[R]: + def _only_if_tracing_inner(*args: P.args, **kwargs: P.kwargs) -> R | None: if opentracing: return func(*args, **kwargs) else: @@ -386,18 +386,18 @@ def only_if_tracing(func: Callable[P, R]) -> Callable[P, Optional[R]]: @overload def ensure_active_span( message: str, -) -> Callable[[Callable[P, R]], Callable[P, Optional[R]]]: ... +) -> Callable[[Callable[P, R]], Callable[P, R | None]]: ... @overload def ensure_active_span( message: str, ret: T -) -> Callable[[Callable[P, R]], Callable[P, Union[T, R]]]: ... +) -> Callable[[Callable[P, R]], Callable[P, T | R]]: ... def ensure_active_span( - message: str, ret: Optional[T] = None -) -> Callable[[Callable[P, R]], Callable[P, Union[Optional[T], R]]]: + message: str, ret: T | None = None +) -> Callable[[Callable[P, R]], Callable[P, T | None | R]]: """Executes the operation only if opentracing is enabled and there is an active span. If there is no active span it logs message at the error level. @@ -413,11 +413,11 @@ def ensure_active_span( def ensure_active_span_inner_1( func: Callable[P, R], - ) -> Callable[P, Union[Optional[T], R]]: + ) -> Callable[P, T | None | R]: @wraps(func) def ensure_active_span_inner_2( *args: P.args, **kwargs: P.kwargs - ) -> Union[Optional[T], R]: + ) -> T | None | R: if not opentracing: return ret @@ -532,10 +532,10 @@ def whitelisted_homeserver(destination: str) -> bool: # Could use kwargs but I want these to be explicit def start_active_span( operation_name: str, - child_of: Optional[Union["opentracing.Span", "opentracing.SpanContext"]] = None, - references: Optional[list["opentracing.Reference"]] = None, - tags: Optional[dict[str, str]] = None, - start_time: Optional[float] = None, + child_of: Union["opentracing.Span", "opentracing.SpanContext"] | None = None, + references: list["opentracing.Reference"] | None = None, + tags: dict[str, str] | None = None, + start_time: float | None = None, ignore_active_span: bool = False, finish_on_close: bool = True, *, @@ -573,9 +573,9 @@ def start_active_span( def start_active_span_follows_from( operation_name: str, contexts: Collection, - child_of: Optional[Union["opentracing.Span", "opentracing.SpanContext"]] = None, - tags: Optional[dict[str, str]] = None, - start_time: Optional[float] = None, + child_of: Union["opentracing.Span", "opentracing.SpanContext"] | None = None, + tags: dict[str, str] | None = None, + start_time: float | None = None, ignore_active_span: bool = False, *, inherit_force_tracing: bool = False, @@ -630,9 +630,9 @@ def start_active_span_follows_from( def start_active_span_from_edu( edu_content: dict[str, Any], operation_name: str, - references: Optional[list["opentracing.Reference"]] = None, - tags: Optional[dict[str, str]] = None, - start_time: Optional[float] = None, + references: list["opentracing.Reference"] | None = None, + tags: dict[str, str] | None = None, + start_time: float | None = None, ignore_active_span: bool = False, finish_on_close: bool = True, ) -> "opentracing.Scope": @@ -699,14 +699,14 @@ def active_span( @ensure_active_span("set a tag") -def set_tag(key: str, value: Union[str, bool, int, float]) -> None: +def set_tag(key: str, value: str | bool | int | float) -> None: """Sets a tag on the active span""" assert opentracing.tracer.active_span is not None opentracing.tracer.active_span.set_tag(key, value) @ensure_active_span("log") -def log_kv(key_values: dict[str, Any], timestamp: Optional[float] = None) -> None: +def log_kv(key_values: dict[str, Any], timestamp: float | None = None) -> None: """Log to the active span""" assert opentracing.tracer.active_span is not None opentracing.tracer.active_span.log_kv(key_values, timestamp) @@ -758,7 +758,7 @@ def is_context_forced_tracing( @ensure_active_span("inject the span into a header dict") def inject_header_dict( headers: dict[bytes, list[bytes]], - destination: Optional[str] = None, + destination: str | None = None, check_destination: bool = True, ) -> None: """ @@ -826,7 +826,7 @@ def inject_request_headers(headers: dict[str, str]) -> None: @ensure_active_span( "get the active span context as a dict", ret=cast(dict[str, str], {}) ) -def get_active_span_text_map(destination: Optional[str] = None) -> dict[str, str]: +def get_active_span_text_map(destination: str | None = None) -> dict[str, str]: """ Gets a span context as a dict. This can be used instead of manually injecting a span into an empty carrier. @@ -865,7 +865,7 @@ def active_span_context_as_string() -> str: return json_encoder.encode(carrier) -def span_context_from_request(request: Request) -> "Optional[opentracing.SpanContext]": +def span_context_from_request(request: Request) -> "opentracing.SpanContext | None": """Extract an opentracing context from the headers on an HTTP request This is useful when we have received an HTTP request from another part of our @@ -1119,7 +1119,7 @@ def trace_servlet( # with JsonResource). scope.span.set_operation_name(request.request_metrics.name) - # Mypy seems to think that start_context.tag below can be Optional[str], but + # Mypy seems to think that start_context.tag below can be str | None, but # that doesn't appear to be correct and works in practice. request_tags[SynapseTags.REQUEST_TAG] = ( diff --git a/synapse/logging/scopecontextmanager.py b/synapse/logging/scopecontextmanager.py index feaadc4d87..f3ec07eecf 100644 --- a/synapse/logging/scopecontextmanager.py +++ b/synapse/logging/scopecontextmanager.py @@ -20,7 +20,6 @@ # import logging -from typing import Optional from opentracing import Scope, ScopeManager, Span @@ -47,7 +46,7 @@ class LogContextScopeManager(ScopeManager): pass @property - def active(self) -> Optional[Scope]: + def active(self) -> Scope | None: """ Returns the currently active Scope which can be used to access the currently active Scope.span. diff --git a/synapse/media/_base.py b/synapse/media/_base.py index 319ca662e2..e0313d2893 100644 --- a/synapse/media/_base.py +++ b/synapse/media/_base.py @@ -30,7 +30,6 @@ from typing import ( Awaitable, BinaryIO, Generator, - Optional, ) import attr @@ -133,8 +132,8 @@ async def respond_with_file( request: SynapseRequest, media_type: str, file_path: str, - file_size: Optional[int] = None, - upload_name: Optional[str] = None, + file_size: int | None = None, + upload_name: str | None = None, ) -> None: logger.debug("Responding with %r", file_path) @@ -156,8 +155,8 @@ async def respond_with_file( def add_file_headers( request: Request, media_type: str, - file_size: Optional[int], - upload_name: Optional[str], + file_size: int | None, + upload_name: str | None, ) -> None: """Adds the correct response headers in preparation for responding with the media. @@ -301,10 +300,10 @@ def _can_encode_filename_as_token(x: str) -> bool: async def respond_with_multipart_responder( clock: Clock, request: SynapseRequest, - responder: "Optional[Responder]", + responder: "Responder | None", media_type: str, - media_length: Optional[int], - upload_name: Optional[str], + media_length: int | None, + upload_name: str | None, ) -> None: """ Responds to requests originating from the federation media `/download` endpoint by @@ -392,10 +391,10 @@ async def respond_with_multipart_responder( async def respond_with_responder( request: SynapseRequest, - responder: "Optional[Responder]", + responder: "Responder | None", media_type: str, - file_size: Optional[int], - upload_name: Optional[str] = None, + file_size: int | None, + upload_name: str | None = None, ) -> None: """Responds to the request with given responder. If responder is None then returns 404. @@ -501,9 +500,9 @@ class Responder(ABC): def __exit__( # noqa: B027 self, - exc_type: Optional[type[BaseException]], - exc_val: Optional[BaseException], - exc_tb: Optional[TracebackType], + exc_type: type[BaseException] | None, + exc_val: BaseException | None, + exc_tb: TracebackType | None, ) -> None: pass @@ -526,47 +525,47 @@ class FileInfo: """Details about a requested/uploaded file.""" # The server name where the media originated from, or None if local. - server_name: Optional[str] + server_name: str | None # The local ID of the file. For local files this is the same as the media_id file_id: str # If the file is for the url preview cache url_cache: bool = False # Whether the file is a thumbnail or not. - thumbnail: Optional[ThumbnailInfo] = None + thumbnail: ThumbnailInfo | None = None # The below properties exist to maintain compatibility with third-party modules. @property - def thumbnail_width(self) -> Optional[int]: + def thumbnail_width(self) -> int | None: if not self.thumbnail: return None return self.thumbnail.width @property - def thumbnail_height(self) -> Optional[int]: + def thumbnail_height(self) -> int | None: if not self.thumbnail: return None return self.thumbnail.height @property - def thumbnail_method(self) -> Optional[str]: + def thumbnail_method(self) -> str | None: if not self.thumbnail: return None return self.thumbnail.method @property - def thumbnail_type(self) -> Optional[str]: + def thumbnail_type(self) -> str | None: if not self.thumbnail: return None return self.thumbnail.type @property - def thumbnail_length(self) -> Optional[int]: + def thumbnail_length(self) -> int | None: if not self.thumbnail: return None return self.thumbnail.length -def get_filename_from_headers(headers: dict[bytes, list[bytes]]) -> Optional[str]: +def get_filename_from_headers(headers: dict[bytes, list[bytes]]) -> str | None: """ Get the filename of the downloaded file by inspecting the Content-Disposition HTTP header. @@ -703,9 +702,9 @@ class ThreadedFileSender: self.clock = hs.get_clock() self.thread_pool = hs.get_media_sender_thread_pool() - self.file: Optional[BinaryIO] = None + self.file: BinaryIO | None = None self.deferred: "Deferred[None]" = Deferred() - self.consumer: Optional[interfaces.IConsumer] = None + self.consumer: interfaces.IConsumer | None = None # Signals if the thread should keep reading/sending data. Set means # continue, clear means pause. diff --git a/synapse/media/filepath.py b/synapse/media/filepath.py index 7659971661..df637f3be3 100644 --- a/synapse/media/filepath.py +++ b/synapse/media/filepath.py @@ -24,7 +24,7 @@ import functools import os import re import string -from typing import Any, Callable, TypeVar, Union, cast +from typing import Any, Callable, TypeVar, cast NEW_FORMAT_ID_RE = re.compile(r"^\d\d\d\d-\d\d-\d\d") @@ -46,7 +46,7 @@ def _wrap_in_base_path(func: F) -> F: GetPathMethod = TypeVar( - "GetPathMethod", bound=Union[Callable[..., str], Callable[..., list[str]]] + "GetPathMethod", bound=Callable[..., str] | Callable[..., list[str]] ) @@ -73,7 +73,7 @@ def _wrap_with_jail_check(relative: bool) -> Callable[[GetPathMethod], GetPathMe @functools.wraps(func) def _wrapped( self: "MediaFilePaths", *args: Any, **kwargs: Any - ) -> Union[str, list[str]]: + ) -> str | list[str]: path_or_paths = func(self, *args, **kwargs) if isinstance(path_or_paths, list): diff --git a/synapse/media/media_repository.py b/synapse/media/media_repository.py index eda1410767..7b4408b2bc 100644 --- a/synapse/media/media_repository.py +++ b/synapse/media/media_repository.py @@ -24,7 +24,7 @@ import logging import os import shutil from io import BytesIO -from typing import IO, TYPE_CHECKING, Optional +from typing import IO, TYPE_CHECKING import attr from matrix_common.types.mxc_uri import MXCUri @@ -170,7 +170,7 @@ class MediaRepository: ) if hs.config.media.url_preview_enabled: - self.url_previewer: Optional[UrlPreviewer] = UrlPreviewer( + self.url_previewer: UrlPreviewer | None = UrlPreviewer( hs, self, self.media_storage ) else: @@ -208,7 +208,7 @@ class MediaRepository: local_media, remote_media, self.clock.time_msec() ) - def mark_recently_accessed(self, server_name: Optional[str], media_id: str) -> None: + def mark_recently_accessed(self, server_name: str | None, media_id: str) -> None: """Mark the given media as recently accessed. Args: @@ -298,11 +298,11 @@ class MediaRepository: async def create_or_update_content( self, media_type: str, - upload_name: Optional[str], + upload_name: str | None, content: IO, content_length: int, auth_user: UserID, - media_id: Optional[str] = None, + media_id: str | None = None, ) -> MXCUri: """Create or update the content of the given media ID. @@ -354,7 +354,7 @@ class MediaRepository: # This is the total size of media uploaded by the user in the last # `time_period_ms` milliseconds, or None if we haven't checked yet. - uploaded_media_size: Optional[int] = None + uploaded_media_size: int | None = None for limit in media_upload_limits: # We only need to check the amount of media uploaded by the user in @@ -422,7 +422,7 @@ class MediaRepository: async def get_cached_remote_media_info( self, origin: str, media_id: str - ) -> Optional[RemoteMedia]: + ) -> RemoteMedia | None: """ Get cached remote media info for a given origin/media ID combo. If the requested media is not found locally, it will not be requested over federation and the @@ -439,7 +439,7 @@ class MediaRepository: async def get_local_media_info( self, request: SynapseRequest, media_id: str, max_timeout_ms: int - ) -> Optional[LocalMedia]: + ) -> LocalMedia | None: """Gets the info dictionary for given local media ID. If the media has not been uploaded yet, this function will wait up to ``max_timeout_ms`` milliseconds for the media to be uploaded. @@ -495,7 +495,7 @@ class MediaRepository: self, request: SynapseRequest, media_id: str, - name: Optional[str], + name: str | None, max_timeout_ms: int, allow_authenticated: bool = True, federation: bool = False, @@ -555,7 +555,7 @@ class MediaRepository: request: SynapseRequest, server_name: str, media_id: str, - name: Optional[str], + name: str | None, max_timeout_ms: int, ip_address: str, use_federation_endpoint: bool, @@ -696,7 +696,7 @@ class MediaRepository: ip_address: str, use_federation_endpoint: bool, allow_authenticated: bool, - ) -> tuple[Optional[Responder], RemoteMedia]: + ) -> tuple[Responder | None, RemoteMedia]: """Looks for media in local cache, if not there then attempt to download from remote server. @@ -1065,7 +1065,7 @@ class MediaRepository: t_height: int, t_method: str, t_type: str, - ) -> Optional[BytesIO]: + ) -> BytesIO | None: m_width = thumbnailer.width m_height = thumbnailer.height @@ -1099,7 +1099,7 @@ class MediaRepository: t_method: str, t_type: str, url_cache: bool, - ) -> Optional[tuple[str, FileInfo]]: + ) -> tuple[str, FileInfo] | None: input_path = await self.media_storage.ensure_media_is_in_local_cache( FileInfo(None, media_id, url_cache=url_cache) ) @@ -1175,7 +1175,7 @@ class MediaRepository: t_height: int, t_method: str, t_type: str, - ) -> Optional[str]: + ) -> str | None: input_path = await self.media_storage.ensure_media_is_in_local_cache( FileInfo(server_name, file_id) ) @@ -1247,12 +1247,12 @@ class MediaRepository: @trace async def _generate_thumbnails( self, - server_name: Optional[str], + server_name: str | None, media_id: str, file_id: str, media_type: str, url_cache: bool = False, - ) -> Optional[dict]: + ) -> dict | None: """Generate and store thumbnails for an image. Args: diff --git a/synapse/media/media_storage.py b/synapse/media/media_storage.py index f6be9edf50..bc12212c46 100644 --- a/synapse/media/media_storage.py +++ b/synapse/media/media_storage.py @@ -34,9 +34,7 @@ from typing import ( AsyncIterator, BinaryIO, Callable, - Optional, Sequence, - Union, cast, ) from uuid import uuid4 @@ -79,7 +77,7 @@ class SHA256TransparentIOWriter: self._hash = hashlib.sha256() self._source = source - def write(self, buffer: Union[bytes, bytearray]) -> int: + def write(self, buffer: bytes | bytearray) -> int: """Wrapper for source.write() Args: @@ -260,7 +258,7 @@ class MediaStorage: raise e from None - async def fetch_media(self, file_info: FileInfo) -> Optional[Responder]: + async def fetch_media(self, file_info: FileInfo) -> Responder | None: """Attempts to fetch media described by file_info from the local cache and configured storage providers. @@ -420,9 +418,9 @@ class FileResponder(Responder): def __exit__( self, - exc_type: Optional[type[BaseException]], - exc_val: Optional[BaseException], - exc_tb: Optional[TracebackType], + exc_type: type[BaseException] | None, + exc_val: BaseException | None, + exc_tb: TracebackType | None, ) -> None: self.open_file.close() @@ -476,7 +474,7 @@ class MultipartFileConsumer: file_content_type: str, json_object: JsonDict, disposition: str, - content_length: Optional[int], + content_length: int | None, ) -> None: self.clock = clock self.wrapped_consumer = wrapped_consumer @@ -488,8 +486,8 @@ class MultipartFileConsumer: # The producer that registered with us, and if it's a push or pull # producer. - self.producer: Optional["interfaces.IProducer"] = None - self.streaming: Optional[bool] = None + self.producer: "interfaces.IProducer" | None = None + self.streaming: bool | None = None # Whether the wrapped consumer has asked us to pause. self.paused = False @@ -618,7 +616,7 @@ class MultipartFileConsumer: # repeatedly calling `resumeProducing` in a loop. run_in_background(self._resumeProducingRepeatedly) - def content_length(self) -> Optional[int]: + def content_length(self) -> int | None: """ Calculate the content length of the multipart response in bytes. @@ -671,7 +669,7 @@ class Header: self, name: bytes, value: Any, - params: Optional[list[tuple[Any, Any]]] = None, + params: list[tuple[Any, Any]] | None = None, ): self.name = name self.value = value @@ -693,7 +691,7 @@ class Header: return h.read() -def escape(value: Union[str, bytes]) -> str: +def escape(value: str | bytes) -> str: """ This function prevents header values from corrupting the request, a newline in the file name parameter makes form-data request unreadable diff --git a/synapse/media/oembed.py b/synapse/media/oembed.py index 059d8ad1cf..7e44072130 100644 --- a/synapse/media/oembed.py +++ b/synapse/media/oembed.py @@ -21,7 +21,7 @@ import html import logging import urllib.parse -from typing import TYPE_CHECKING, Optional, cast +from typing import TYPE_CHECKING, cast import attr @@ -42,12 +42,12 @@ class OEmbedResult: # The Open Graph result (converted from the oEmbed result). open_graph_result: JsonDict # The author_name of the oEmbed result - author_name: Optional[str] + author_name: str | None # Number of milliseconds to cache the content, according to the oEmbed response. # # This will be None if no cache-age is provided in the oEmbed response (or # if the oEmbed response cannot be turned into an Open Graph response). - cache_age: Optional[int] + cache_age: int | None class OEmbedProvider: @@ -80,7 +80,7 @@ class OEmbedProvider: for pattern in oembed_endpoint.url_patterns: self._oembed_patterns[pattern] = api_endpoint - def get_oembed_url(self, url: str) -> Optional[str]: + def get_oembed_url(self, url: str) -> str | None: """ Check whether the URL should be downloaded as oEmbed content instead. @@ -105,7 +105,7 @@ class OEmbedProvider: # No match. return None - def autodiscover_from_html(self, tree: "etree._Element") -> Optional[str]: + def autodiscover_from_html(self, tree: "etree._Element") -> str | None: """ Search an HTML document for oEmbed autodiscovery information. diff --git a/synapse/media/preview_html.py b/synapse/media/preview_html.py index 6a8e479152..22ad581f82 100644 --- a/synapse/media/preview_html.py +++ b/synapse/media/preview_html.py @@ -27,7 +27,6 @@ from typing import ( Generator, Iterable, Optional, - Union, cast, ) @@ -48,7 +47,7 @@ _content_type_match = re.compile(r'.*; *charset="?(.*?)"?(;|$)', flags=re.I) ARIA_ROLES_TO_IGNORE = {"directory", "menu", "menubar", "toolbar"} -def _normalise_encoding(encoding: str) -> Optional[str]: +def _normalise_encoding(encoding: str) -> str | None: """Use the Python codec's name as the normalised entry.""" try: return codecs.lookup(encoding).name @@ -56,9 +55,7 @@ def _normalise_encoding(encoding: str) -> Optional[str]: return None -def _get_html_media_encodings( - body: bytes, content_type: Optional[str] -) -> Iterable[str]: +def _get_html_media_encodings(body: bytes, content_type: str | None) -> Iterable[str]: """ Get potential encoding of the body based on the (presumably) HTML body or the content-type header. @@ -119,7 +116,7 @@ def _get_html_media_encodings( def decode_body( - body: bytes, uri: str, content_type: Optional[str] = None + body: bytes, uri: str, content_type: str | None = None ) -> Optional["etree._Element"]: """ This uses lxml to parse the HTML document. @@ -186,8 +183,8 @@ def _get_meta_tags( tree: "etree._Element", property: str, prefix: str, - property_mapper: Optional[Callable[[str], Optional[str]]] = None, -) -> dict[str, Optional[str]]: + property_mapper: Callable[[str], str | None] | None = None, +) -> dict[str, str | None]: """ Search for meta tags prefixed with a particular string. @@ -202,9 +199,9 @@ def _get_meta_tags( Returns: A map of tag name to value. """ - # This actually returns Dict[str, str], but the caller sets this as a variable - # which is Dict[str, Optional[str]]. - results: dict[str, Optional[str]] = {} + # This actually returns dict[str, str], but the caller sets this as a variable + # which is dict[str, str | None]. + results: dict[str, str | None] = {} # Cast: the type returned by xpath depends on the xpath expression: mypy can't deduce this. for tag in cast( list["etree._Element"], @@ -233,7 +230,7 @@ def _get_meta_tags( return results -def _map_twitter_to_open_graph(key: str) -> Optional[str]: +def _map_twitter_to_open_graph(key: str) -> str | None: """ Map a Twitter card property to the analogous Open Graph property. @@ -253,7 +250,7 @@ def _map_twitter_to_open_graph(key: str) -> Optional[str]: return "og" + key[7:] -def parse_html_to_open_graph(tree: "etree._Element") -> dict[str, Optional[str]]: +def parse_html_to_open_graph(tree: "etree._Element") -> dict[str, str | None]: """ Parse the HTML document into an Open Graph response. @@ -387,7 +384,7 @@ def parse_html_to_open_graph(tree: "etree._Element") -> dict[str, Optional[str]] return og -def parse_html_description(tree: "etree._Element") -> Optional[str]: +def parse_html_description(tree: "etree._Element") -> str | None: """ Calculate a text description based on an HTML document. @@ -460,7 +457,7 @@ def _iterate_over_text( # This is a stack whose items are elements to iterate over *or* strings # to be returned. - elements: list[Union[str, "etree._Element"]] = [tree] + elements: list[str | "etree._Element"] = [tree] while elements: el = elements.pop() @@ -496,7 +493,7 @@ def _iterate_over_text( def summarize_paragraphs( text_nodes: Iterable[str], min_size: int = 200, max_size: int = 500 -) -> Optional[str]: +) -> str | None: """ Try to get a summary respecting first paragraph and then word boundaries. diff --git a/synapse/media/storage_provider.py b/synapse/media/storage_provider.py index 300952025a..a87ffa0892 100644 --- a/synapse/media/storage_provider.py +++ b/synapse/media/storage_provider.py @@ -23,7 +23,7 @@ import abc import logging import os import shutil -from typing import TYPE_CHECKING, Callable, Optional +from typing import TYPE_CHECKING, Callable from synapse.config._base import Config from synapse.logging.context import defer_to_thread, run_in_background @@ -55,7 +55,7 @@ class StorageProvider(metaclass=abc.ABCMeta): """ @abc.abstractmethod - async def fetch(self, path: str, file_info: FileInfo) -> Optional[Responder]: + async def fetch(self, path: str, file_info: FileInfo) -> Responder | None: """Attempt to fetch the file described by file_info and stream it into writer. @@ -124,7 +124,7 @@ class StorageProviderWrapper(StorageProvider): run_in_background(store) @trace_with_opname("StorageProviderWrapper.fetch") - async def fetch(self, path: str, file_info: FileInfo) -> Optional[Responder]: + async def fetch(self, path: str, file_info: FileInfo) -> Responder | None: if file_info.url_cache: # Files in the URL preview cache definitely aren't stored here, # so avoid any potentially slow I/O or network access. @@ -173,7 +173,7 @@ class FileStorageProviderBackend(StorageProvider): ) @trace_with_opname("FileStorageProviderBackend.fetch") - async def fetch(self, path: str, file_info: FileInfo) -> Optional[Responder]: + async def fetch(self, path: str, file_info: FileInfo) -> Responder | None: """See StorageProvider.fetch""" backup_fname = os.path.join(self.base_directory, path) diff --git a/synapse/media/thumbnailer.py b/synapse/media/thumbnailer.py index a42d39c319..fd65131c63 100644 --- a/synapse/media/thumbnailer.py +++ b/synapse/media/thumbnailer.py @@ -22,7 +22,7 @@ import logging from io import BytesIO from types import TracebackType -from typing import TYPE_CHECKING, Optional +from typing import TYPE_CHECKING from PIL import Image @@ -237,9 +237,9 @@ class Thumbnailer: def __exit__( self, - type: Optional[type[BaseException]], - value: Optional[BaseException], - traceback: Optional[TracebackType], + type: type[BaseException] | None, + value: BaseException | None, + traceback: TracebackType | None, ) -> None: self.close() @@ -549,8 +549,8 @@ class ThumbnailProvider: file_id: str, url_cache: bool, for_federation: bool, - media_info: Optional[LocalMedia] = None, - server_name: Optional[str] = None, + media_info: LocalMedia | None = None, + server_name: str | None = None, ) -> None: """ Respond to a request with an appropriate thumbnail from the previously generated thumbnails. @@ -713,8 +713,8 @@ class ThumbnailProvider: thumbnail_infos: list[ThumbnailInfo], file_id: str, url_cache: bool, - server_name: Optional[str], - ) -> Optional[FileInfo]: + server_name: str | None, + ) -> FileInfo | None: """ Choose an appropriate thumbnail from the previously generated thumbnails. @@ -742,11 +742,11 @@ class ThumbnailProvider: if desired_method == "crop": # Thumbnails that match equal or larger sizes of desired width/height. crop_info_list: list[ - tuple[int, int, int, bool, Optional[int], ThumbnailInfo] + tuple[int, int, int, bool, int | None, ThumbnailInfo] ] = [] # Other thumbnails. crop_info_list2: list[ - tuple[int, int, int, bool, Optional[int], ThumbnailInfo] + tuple[int, int, int, bool, int | None, ThumbnailInfo] ] = [] for info in thumbnail_infos: # Skip thumbnails generated with different methods. diff --git a/synapse/media/url_previewer.py b/synapse/media/url_previewer.py index 2a63842fb7..bbd8017b13 100644 --- a/synapse/media/url_previewer.py +++ b/synapse/media/url_previewer.py @@ -28,7 +28,7 @@ import re import shutil import sys import traceback -from typing import TYPE_CHECKING, BinaryIO, Iterable, Optional +from typing import TYPE_CHECKING, BinaryIO, Iterable from urllib.parse import urljoin, urlparse, urlsplit from urllib.request import urlopen @@ -70,9 +70,9 @@ class DownloadResult: uri: str response_code: int media_type: str - download_name: Optional[str] + download_name: str | None expires: int - etag: Optional[str] + etag: str | None @attr.s(slots=True, frozen=True, auto_attribs=True) @@ -87,7 +87,7 @@ class MediaInfo: media_length: int # The media filename, according to the server. This is parsed from the # returned headers, if possible. - download_name: Optional[str] + download_name: str | None # The time of the preview. created_ts_ms: int # Information from the media storage provider about where the file is stored @@ -101,7 +101,7 @@ class MediaInfo: # The timestamp (in milliseconds) of when this preview expires. expires: int # The ETag header of the response. - etag: Optional[str] + etag: str | None class UrlPreviewer: @@ -268,7 +268,7 @@ class UrlPreviewer: # The number of milliseconds that the response should be considered valid. expiration_ms = media_info.expires - author_name: Optional[str] = None + author_name: str | None = None if _is_media(media_info.media_type): file_id = media_info.filesystem_id @@ -705,7 +705,7 @@ class UrlPreviewer: async def _handle_oembed_response( self, url: str, media_info: MediaInfo, expiration_ms: int - ) -> tuple[JsonDict, Optional[str], int]: + ) -> tuple[JsonDict, str | None, int]: """ Parse the downloaded oEmbed info. diff --git a/synapse/metrics/__init__.py b/synapse/metrics/__init__.py index def21ac942..cf7b2f1da0 100644 --- a/synapse/metrics/__init__.py +++ b/synapse/metrics/__init__.py @@ -31,10 +31,8 @@ from typing import ( Generic, Iterable, Mapping, - Optional, Sequence, TypeVar, - Union, cast, ) @@ -156,12 +154,10 @@ class LaterGauge(Collector): name: str desc: str - labelnames: Optional[StrSequence] = attr.ib(hash=False) + labelnames: StrSequence | None = attr.ib(hash=False) _instance_id_to_hook_map: dict[ - Optional[str], # instance_id - Callable[ - [], Union[Mapping[tuple[str, ...], Union[int, float]], Union[int, float]] - ], + str | None, # instance_id + Callable[[], Mapping[tuple[str, ...], int | float] | int | float], ] = attr.ib(factory=dict, hash=False) """ Map from homeserver instance_id to a callback. Each callback should either return a @@ -200,10 +196,8 @@ class LaterGauge(Collector): def register_hook( self, *, - homeserver_instance_id: Optional[str], - hook: Callable[ - [], Union[Mapping[tuple[str, ...], Union[int, float]], Union[int, float]] - ], + homeserver_instance_id: str | None, + hook: Callable[[], Mapping[tuple[str, ...], int | float] | int | float], ) -> None: """ Register a callback/hook that will be called to generate a metric samples for @@ -420,7 +414,7 @@ class GaugeHistogramMetricFamilyWithLabels(GaugeHistogramMetricFamily): name: str, documentation: str, gsum_value: float, - buckets: Optional[Sequence[tuple[str, float]]] = None, + buckets: Sequence[tuple[str, float]] | None = None, labelnames: StrSequence = (), labelvalues: StrSequence = (), unit: str = "", @@ -471,7 +465,7 @@ class GaugeBucketCollector(Collector): *, name: str, documentation: str, - labelnames: Optional[StrSequence], + labelnames: StrSequence | None, buckets: Iterable[float], registry: CollectorRegistry = REGISTRY, ): @@ -497,7 +491,7 @@ class GaugeBucketCollector(Collector): # We initially set this to None. We won't report metrics until # this has been initialised after a successful data update - self._metric: Optional[GaugeHistogramMetricFamilyWithLabels] = None + self._metric: GaugeHistogramMetricFamilyWithLabels | None = None registry.register(self) diff --git a/synapse/metrics/background_process_metrics.py b/synapse/metrics/background_process_metrics.py index c871598680..8ff2803455 100644 --- a/synapse/metrics/background_process_metrics.py +++ b/synapse/metrics/background_process_metrics.py @@ -34,7 +34,6 @@ from typing import ( Optional, Protocol, TypeVar, - Union, ) from prometheus_client import Metric @@ -188,7 +187,7 @@ class _BackgroundProcess: self.desc = desc self.server_name = server_name self._context = ctx - self._reported_stats: Optional[ContextResourceUsage] = None + self._reported_stats: ContextResourceUsage | None = None def update_metrics(self) -> None: """Updates the metrics with values from this process.""" @@ -224,12 +223,12 @@ R = TypeVar("R") def run_as_background_process( desc: "LiteralString", server_name: str, - func: Callable[..., Awaitable[Optional[R]]], + func: Callable[..., Awaitable[R | None]], *args: Any, bg_start_span: bool = True, test_only_tracer: Optional["opentracing.Tracer"] = None, **kwargs: Any, -) -> "defer.Deferred[Optional[R]]": +) -> "defer.Deferred[R | None]": """Run the given function in its own logcontext, with resource metrics This should be used to wrap processes which are fired off to run in the @@ -270,7 +269,7 @@ def run_as_background_process( # trace. original_active_tracing_span = active_span(tracer=test_only_tracer) - async def run() -> Optional[R]: + async def run() -> R | None: with _bg_metrics_lock: count = _background_process_counts.get(desc, 0) _background_process_counts[desc] = count + 1 @@ -425,8 +424,8 @@ class HasHomeServer(Protocol): def wrap_as_background_process( desc: "LiteralString", ) -> Callable[ - [Callable[P, Awaitable[Optional[R]]]], - Callable[P, "defer.Deferred[Optional[R]]"], + [Callable[P, Awaitable[R | None]]], + Callable[P, "defer.Deferred[R | None]"], ]: """Decorator that wraps an asynchronous function `func`, returning a synchronous decorated function. Calling the decorated version runs `func` as a background @@ -448,12 +447,12 @@ def wrap_as_background_process( """ def wrapper( - func: Callable[Concatenate[HasHomeServer, P], Awaitable[Optional[R]]], - ) -> Callable[P, "defer.Deferred[Optional[R]]"]: + func: Callable[Concatenate[HasHomeServer, P], Awaitable[R | None]], + ) -> Callable[P, "defer.Deferred[R | None]"]: @wraps(func) def wrapped_func( self: HasHomeServer, *args: P.args, **kwargs: P.kwargs - ) -> "defer.Deferred[Optional[R]]": + ) -> "defer.Deferred[R | None]": assert self.hs is not None, ( "The `hs` attribute must be set on the object where `@wrap_as_background_process` decorator is used." ) @@ -487,7 +486,7 @@ class BackgroundProcessLoggingContext(LoggingContext): *, name: str, server_name: str, - instance_id: Optional[Union[int, str]] = None, + instance_id: int | str | None = None, ): """ @@ -503,11 +502,11 @@ class BackgroundProcessLoggingContext(LoggingContext): if instance_id is None: instance_id = id(self) super().__init__(name="%s-%s" % (name, instance_id), server_name=server_name) - self._proc: Optional[_BackgroundProcess] = _BackgroundProcess( + self._proc: _BackgroundProcess | None = _BackgroundProcess( desc=name, server_name=server_name, ctx=self ) - def start(self, rusage: "Optional[resource.struct_rusage]") -> None: + def start(self, rusage: "resource.struct_rusage | None") -> None: """Log context has started running (again).""" super().start(rusage) @@ -528,9 +527,9 @@ class BackgroundProcessLoggingContext(LoggingContext): def __exit__( self, - type: Optional[type[BaseException]], - value: Optional[BaseException], - traceback: Optional[TracebackType], + type: type[BaseException] | None, + value: BaseException | None, + traceback: TracebackType | None, ) -> None: """Log context has finished.""" diff --git a/synapse/metrics/jemalloc.py b/synapse/metrics/jemalloc.py index fb8adbe060..03cecec3ca 100644 --- a/synapse/metrics/jemalloc.py +++ b/synapse/metrics/jemalloc.py @@ -23,7 +23,7 @@ import ctypes import logging import os import re -from typing import Iterable, Literal, Optional, overload +from typing import Iterable, Literal, overload import attr from prometheus_client import REGISTRY, Metric @@ -40,17 +40,17 @@ class JemallocStats: @overload def _mallctl( - self, name: str, read: Literal[True] = True, write: Optional[int] = None + self, name: str, read: Literal[True] = True, write: int | None = None ) -> int: ... @overload def _mallctl( - self, name: str, read: Literal[False], write: Optional[int] = None + self, name: str, read: Literal[False], write: int | None = None ) -> None: ... def _mallctl( - self, name: str, read: bool = True, write: Optional[int] = None - ) -> Optional[int]: + self, name: str, read: bool = True, write: int | None = None + ) -> int | None: """Wrapper around `mallctl` for reading and writing integers to jemalloc. @@ -131,10 +131,10 @@ class JemallocStats: return self._mallctl(f"stats.{name}") -_JEMALLOC_STATS: Optional[JemallocStats] = None +_JEMALLOC_STATS: JemallocStats | None = None -def get_jemalloc_stats() -> Optional[JemallocStats]: +def get_jemalloc_stats() -> JemallocStats | None: """Returns an interface to jemalloc, if it is being used. Note that this will always return None until `setup_jemalloc_stats` has been diff --git a/synapse/module_api/__init__.py b/synapse/module_api/__init__.py index 9287747cea..6a2d152e3f 100644 --- a/synapse/module_api/__init__.py +++ b/synapse/module_api/__init__.py @@ -29,9 +29,7 @@ from typing import ( Generator, Iterable, Mapping, - Optional, TypeVar, - Union, ) import attr @@ -227,11 +225,11 @@ class UserIpAndAgent: def run_as_background_process( desc: "LiteralString", - func: Callable[..., Awaitable[Optional[T]]], + func: Callable[..., Awaitable[T | None]], *args: Any, bg_start_span: bool = True, **kwargs: Any, -) -> "defer.Deferred[Optional[T]]": +) -> "defer.Deferred[T | None]": """ XXX: Deprecated: use `ModuleApi.run_as_background_process` instead. @@ -295,8 +293,8 @@ def run_as_background_process( def cached( *, max_entries: int = 1000, - num_args: Optional[int] = None, - uncached_args: Optional[Collection[str]] = None, + num_args: int | None = None, + uncached_args: Collection[str] | None = None, ) -> Callable[[F], CachedFunction[F]]: """Returns a decorator that applies a memoizing cache around the function. This decorator behaves similarly to functools.lru_cache. @@ -338,7 +336,7 @@ class ModuleApi: # TODO: Fix this type hint once the types for the data stores have been ironed # out. - self._store: Union[DataStore, "GenericWorkerStore"] = hs.get_datastores().main + self._store: DataStore | "GenericWorkerStore" = hs.get_datastores().main self._storage_controllers = hs.get_storage_controllers() self._auth = hs.get_auth() self._auth_handler = auth_handler @@ -387,26 +385,20 @@ class ModuleApi: def register_spam_checker_callbacks( self, *, - check_event_for_spam: Optional[CHECK_EVENT_FOR_SPAM_CALLBACK] = None, - should_drop_federated_event: Optional[ - SHOULD_DROP_FEDERATED_EVENT_CALLBACK - ] = None, - user_may_join_room: Optional[USER_MAY_JOIN_ROOM_CALLBACK] = None, - user_may_invite: Optional[USER_MAY_INVITE_CALLBACK] = None, - federated_user_may_invite: Optional[FEDERATED_USER_MAY_INVITE_CALLBACK] = None, - user_may_send_3pid_invite: Optional[USER_MAY_SEND_3PID_INVITE_CALLBACK] = None, - user_may_create_room: Optional[USER_MAY_CREATE_ROOM_CALLBACK] = None, - user_may_create_room_alias: Optional[ - USER_MAY_CREATE_ROOM_ALIAS_CALLBACK - ] = None, - user_may_publish_room: Optional[USER_MAY_PUBLISH_ROOM_CALLBACK] = None, - user_may_send_state_event: Optional[USER_MAY_SEND_STATE_EVENT_CALLBACK] = None, - check_username_for_spam: Optional[CHECK_USERNAME_FOR_SPAM_CALLBACK] = None, - check_registration_for_spam: Optional[ - CHECK_REGISTRATION_FOR_SPAM_CALLBACK - ] = None, - check_media_file_for_spam: Optional[CHECK_MEDIA_FILE_FOR_SPAM_CALLBACK] = None, - check_login_for_spam: Optional[CHECK_LOGIN_FOR_SPAM_CALLBACK] = None, + check_event_for_spam: CHECK_EVENT_FOR_SPAM_CALLBACK | None = None, + should_drop_federated_event: SHOULD_DROP_FEDERATED_EVENT_CALLBACK | None = None, + user_may_join_room: USER_MAY_JOIN_ROOM_CALLBACK | None = None, + user_may_invite: USER_MAY_INVITE_CALLBACK | None = None, + federated_user_may_invite: FEDERATED_USER_MAY_INVITE_CALLBACK | None = None, + user_may_send_3pid_invite: USER_MAY_SEND_3PID_INVITE_CALLBACK | None = None, + user_may_create_room: USER_MAY_CREATE_ROOM_CALLBACK | None = None, + user_may_create_room_alias: USER_MAY_CREATE_ROOM_ALIAS_CALLBACK | None = None, + user_may_publish_room: USER_MAY_PUBLISH_ROOM_CALLBACK | None = None, + user_may_send_state_event: USER_MAY_SEND_STATE_EVENT_CALLBACK | None = None, + check_username_for_spam: CHECK_USERNAME_FOR_SPAM_CALLBACK | None = None, + check_registration_for_spam: CHECK_REGISTRATION_FOR_SPAM_CALLBACK | None = None, + check_media_file_for_spam: CHECK_MEDIA_FILE_FOR_SPAM_CALLBACK | None = None, + check_login_for_spam: CHECK_LOGIN_FOR_SPAM_CALLBACK | None = None, ) -> None: """Registers callbacks for spam checking capabilities. @@ -432,12 +424,12 @@ class ModuleApi: def register_account_validity_callbacks( self, *, - is_user_expired: Optional[IS_USER_EXPIRED_CALLBACK] = None, - on_user_registration: Optional[ON_USER_REGISTRATION_CALLBACK] = None, - on_user_login: Optional[ON_USER_LOGIN_CALLBACK] = None, - on_legacy_send_mail: Optional[ON_LEGACY_SEND_MAIL_CALLBACK] = None, - on_legacy_renew: Optional[ON_LEGACY_RENEW_CALLBACK] = None, - on_legacy_admin_request: Optional[ON_LEGACY_ADMIN_REQUEST] = None, + is_user_expired: IS_USER_EXPIRED_CALLBACK | None = None, + on_user_registration: ON_USER_REGISTRATION_CALLBACK | None = None, + on_user_login: ON_USER_LOGIN_CALLBACK | None = None, + on_legacy_send_mail: ON_LEGACY_SEND_MAIL_CALLBACK | None = None, + on_legacy_renew: ON_LEGACY_RENEW_CALLBACK | None = None, + on_legacy_admin_request: ON_LEGACY_ADMIN_REQUEST | None = None, ) -> None: """Registers callbacks for account validity capabilities. @@ -455,9 +447,8 @@ class ModuleApi: def register_ratelimit_callbacks( self, *, - get_ratelimit_override_for_user: Optional[ - GET_RATELIMIT_OVERRIDE_FOR_USER_CALLBACK - ] = None, + get_ratelimit_override_for_user: GET_RATELIMIT_OVERRIDE_FOR_USER_CALLBACK + | None = None, ) -> None: """Registers callbacks for ratelimit capabilities. Added in Synapse v1.132.0. @@ -469,16 +460,13 @@ class ModuleApi: def register_media_repository_callbacks( self, *, - get_media_config_for_user: Optional[GET_MEDIA_CONFIG_FOR_USER_CALLBACK] = None, - is_user_allowed_to_upload_media_of_size: Optional[ - IS_USER_ALLOWED_TO_UPLOAD_MEDIA_OF_SIZE_CALLBACK - ] = None, - get_media_upload_limits_for_user: Optional[ - GET_MEDIA_UPLOAD_LIMITS_FOR_USER_CALLBACK - ] = None, - on_media_upload_limit_exceeded: Optional[ - ON_MEDIA_UPLOAD_LIMIT_EXCEEDED_CALLBACK - ] = None, + get_media_config_for_user: GET_MEDIA_CONFIG_FOR_USER_CALLBACK | None = None, + is_user_allowed_to_upload_media_of_size: IS_USER_ALLOWED_TO_UPLOAD_MEDIA_OF_SIZE_CALLBACK + | None = None, + get_media_upload_limits_for_user: GET_MEDIA_UPLOAD_LIMITS_FOR_USER_CALLBACK + | None = None, + on_media_upload_limit_exceeded: ON_MEDIA_UPLOAD_LIMIT_EXCEEDED_CALLBACK + | None = None, ) -> None: """Registers callbacks for media repository capabilities. Added in Synapse v1.132.0. @@ -493,28 +481,23 @@ class ModuleApi: def register_third_party_rules_callbacks( self, *, - check_event_allowed: Optional[CHECK_EVENT_ALLOWED_CALLBACK] = None, - on_create_room: Optional[ON_CREATE_ROOM_CALLBACK] = None, - check_threepid_can_be_invited: Optional[ - CHECK_THREEPID_CAN_BE_INVITED_CALLBACK - ] = None, - check_visibility_can_be_modified: Optional[ - CHECK_VISIBILITY_CAN_BE_MODIFIED_CALLBACK - ] = None, - on_new_event: Optional[ON_NEW_EVENT_CALLBACK] = None, - check_can_shutdown_room: Optional[CHECK_CAN_SHUTDOWN_ROOM_CALLBACK] = None, - check_can_deactivate_user: Optional[CHECK_CAN_DEACTIVATE_USER_CALLBACK] = None, - on_profile_update: Optional[ON_PROFILE_UPDATE_CALLBACK] = None, - on_user_deactivation_status_changed: Optional[ - ON_USER_DEACTIVATION_STATUS_CHANGED_CALLBACK - ] = None, - on_threepid_bind: Optional[ON_THREEPID_BIND_CALLBACK] = None, - on_add_user_third_party_identifier: Optional[ - ON_ADD_USER_THIRD_PARTY_IDENTIFIER_CALLBACK - ] = None, - on_remove_user_third_party_identifier: Optional[ - ON_REMOVE_USER_THIRD_PARTY_IDENTIFIER_CALLBACK - ] = None, + check_event_allowed: CHECK_EVENT_ALLOWED_CALLBACK | None = None, + on_create_room: ON_CREATE_ROOM_CALLBACK | None = None, + check_threepid_can_be_invited: CHECK_THREEPID_CAN_BE_INVITED_CALLBACK + | None = None, + check_visibility_can_be_modified: CHECK_VISIBILITY_CAN_BE_MODIFIED_CALLBACK + | None = None, + on_new_event: ON_NEW_EVENT_CALLBACK | None = None, + check_can_shutdown_room: CHECK_CAN_SHUTDOWN_ROOM_CALLBACK | None = None, + check_can_deactivate_user: CHECK_CAN_DEACTIVATE_USER_CALLBACK | None = None, + on_profile_update: ON_PROFILE_UPDATE_CALLBACK | None = None, + on_user_deactivation_status_changed: ON_USER_DEACTIVATION_STATUS_CHANGED_CALLBACK + | None = None, + on_threepid_bind: ON_THREEPID_BIND_CALLBACK | None = None, + on_add_user_third_party_identifier: ON_ADD_USER_THIRD_PARTY_IDENTIFIER_CALLBACK + | None = None, + on_remove_user_third_party_identifier: ON_REMOVE_USER_THIRD_PARTY_IDENTIFIER_CALLBACK + | None = None, ) -> None: """Registers callbacks for third party event rules capabilities. @@ -538,8 +521,8 @@ class ModuleApi: def register_presence_router_callbacks( self, *, - get_users_for_states: Optional[GET_USERS_FOR_STATES_CALLBACK] = None, - get_interested_users: Optional[GET_INTERESTED_USERS_CALLBACK] = None, + get_users_for_states: GET_USERS_FOR_STATES_CALLBACK | None = None, + get_interested_users: GET_INTERESTED_USERS_CALLBACK | None = None, ) -> None: """Registers callbacks for presence router capabilities. @@ -553,18 +536,15 @@ class ModuleApi: def register_password_auth_provider_callbacks( self, *, - check_3pid_auth: Optional[CHECK_3PID_AUTH_CALLBACK] = None, - on_logged_out: Optional[ON_LOGGED_OUT_CALLBACK] = None, - auth_checkers: Optional[ - dict[tuple[str, tuple[str, ...]], CHECK_AUTH_CALLBACK] - ] = None, - is_3pid_allowed: Optional[IS_3PID_ALLOWED_CALLBACK] = None, - get_username_for_registration: Optional[ - GET_USERNAME_FOR_REGISTRATION_CALLBACK - ] = None, - get_displayname_for_registration: Optional[ - GET_DISPLAYNAME_FOR_REGISTRATION_CALLBACK - ] = None, + check_3pid_auth: CHECK_3PID_AUTH_CALLBACK | None = None, + on_logged_out: ON_LOGGED_OUT_CALLBACK | None = None, + auth_checkers: dict[tuple[str, tuple[str, ...]], CHECK_AUTH_CALLBACK] + | None = None, + is_3pid_allowed: IS_3PID_ALLOWED_CALLBACK | None = None, + get_username_for_registration: GET_USERNAME_FOR_REGISTRATION_CALLBACK + | None = None, + get_displayname_for_registration: GET_DISPLAYNAME_FOR_REGISTRATION_CALLBACK + | None = None, ) -> None: """Registers callbacks for password auth provider capabilities. @@ -588,8 +568,8 @@ class ModuleApi: self, *, on_update: ON_UPDATE_CALLBACK, - default_batch_size: Optional[DEFAULT_BATCH_SIZE_CALLBACK] = None, - min_batch_size: Optional[MIN_BATCH_SIZE_CALLBACK] = None, + default_batch_size: DEFAULT_BATCH_SIZE_CALLBACK | None = None, + min_batch_size: MIN_BATCH_SIZE_CALLBACK | None = None, ) -> None: """Registers background update controller callbacks. @@ -606,7 +586,7 @@ class ModuleApi: def register_account_data_callbacks( self, *, - on_account_data_updated: Optional[ON_ACCOUNT_DATA_UPDATED_CALLBACK] = None, + on_account_data_updated: ON_ACCOUNT_DATA_UPDATED_CALLBACK | None = None, ) -> None: """Registers account data callbacks. @@ -635,9 +615,8 @@ class ModuleApi: def register_add_extra_fields_to_unsigned_client_event_callbacks( self, *, - add_field_to_unsigned_callback: Optional[ - ADD_EXTRA_FIELDS_TO_UNSIGNED_CLIENT_EVENT_CALLBACK - ] = None, + add_field_to_unsigned_callback: ADD_EXTRA_FIELDS_TO_UNSIGNED_CLIENT_EVENT_CALLBACK + | None = None, ) -> None: """Registers a callback that can be used to add fields to the unsigned section of events. @@ -708,7 +687,7 @@ class ModuleApi: return self._server_name @property - def worker_name(self) -> Optional[str]: + def worker_name(self) -> str | None: """The name of the worker this specific instance is running as per the "worker_name" configuration setting, or None if it's the main process. @@ -717,7 +696,7 @@ class ModuleApi: return self._hs.config.worker.worker_name @property - def worker_app(self) -> Optional[str]: + def worker_app(self) -> str | None: """The name of the worker app this specific instance is running as per the "worker_app" configuration setting, or None if it's the main process. @@ -725,7 +704,7 @@ class ModuleApi: """ return self._hs.config.worker.worker_app - async def get_userinfo_by_id(self, user_id: str) -> Optional[UserInfo]: + async def get_userinfo_by_id(self, user_id: str) -> UserInfo | None: """Get user info by user_id Added in Synapse v1.41.0. @@ -843,7 +822,7 @@ class ModuleApi: """ return [attr.asdict(t) for t in await self._store.user_get_threepids(user_id)] - def check_user_exists(self, user_id: str) -> "defer.Deferred[Optional[str]]": + def check_user_exists(self, user_id: str) -> "defer.Deferred[str | None]": """Check if user exists. Added in Synapse v0.25.0. @@ -861,8 +840,8 @@ class ModuleApi: def register( self, localpart: str, - displayname: Optional[str] = None, - emails: Optional[list[str]] = None, + displayname: str | None = None, + emails: list[str] | None = None, ) -> Generator["defer.Deferred[Any]", Any, tuple[str, str]]: """Registers a new user with given localpart and optional displayname, emails. @@ -892,8 +871,8 @@ class ModuleApi: def register_user( self, localpart: str, - displayname: Optional[str] = None, - emails: Optional[list[str]] = None, + displayname: str | None = None, + emails: list[str] | None = None, admin: bool = False, ) -> "defer.Deferred[str]": """Registers a new user with given localpart and optional displayname, emails. @@ -926,9 +905,9 @@ class ModuleApi: def register_device( self, user_id: str, - device_id: Optional[str] = None, - initial_display_name: Optional[str] = None, - ) -> "defer.Deferred[tuple[str, str, Optional[int], Optional[str]]]": + device_id: str | None = None, + initial_display_name: str | None = None, + ) -> "defer.Deferred[tuple[str, str, int | None, str | None]]": """Register a device for a user and generate an access token. Added in Synapse v1.2.0. @@ -978,8 +957,8 @@ class ModuleApi: self, user_id: str, duration_in_ms: int = (2 * 60 * 1000), - auth_provider_id: Optional[str] = None, - auth_provider_session_id: Optional[str] = None, + auth_provider_id: str | None = None, + auth_provider_session_id: str | None = None, ) -> str: """Create a login token suitable for m.login.token authentication @@ -1135,7 +1114,7 @@ class ModuleApi: @defer.inlineCallbacks def get_state_events_in_room( - self, room_id: str, types: Iterable[tuple[str, Optional[str]]] + self, room_id: str, types: Iterable[tuple[str, str | None]] ) -> Generator[defer.Deferred, Any, Iterable[EventBase]]: """Gets current state events for the given room. @@ -1166,8 +1145,8 @@ class ModuleApi: target: str, room_id: str, new_membership: str, - content: Optional[JsonDict] = None, - remote_room_hosts: Optional[list[str]] = None, + content: JsonDict | None = None, + remote_room_hosts: list[str] | None = None, ) -> EventBase: """Updates the membership of a user to the given value. @@ -1343,7 +1322,7 @@ class ModuleApi: ) async def set_presence_for_users( - self, users: Mapping[str, tuple[str, Optional[str]]] + self, users: Mapping[str, tuple[str, str | None]] ) -> None: """ Update the internal presence state of users. @@ -1378,7 +1357,7 @@ class ModuleApi: f: Callable, msec: float, *args: object, - desc: Optional[str] = None, + desc: str | None = None, run_on_all_instances: bool = False, **kwargs: object, ) -> None: @@ -1437,7 +1416,7 @@ class ModuleApi: msec: float, f: Callable, *args: object, - desc: Optional[str] = None, + desc: str | None = None, **kwargs: object, ) -> IDelayedCall: """Wraps a function as a background process and calls it in a given number of milliseconds. @@ -1483,10 +1462,10 @@ class ModuleApi: async def send_http_push_notification( self, user_id: str, - device_id: Optional[str], + device_id: str | None, content: JsonDict, - tweaks: Optional[JsonMapping] = None, - default_payload: Optional[JsonMapping] = None, + tweaks: JsonMapping | None = None, + default_payload: JsonMapping | None = None, ) -> dict[str, bool]: """Send an HTTP push notification that is forwarded to the registered push gateway for the specified user/device. @@ -1552,7 +1531,7 @@ class ModuleApi: def read_templates( self, filenames: list[str], - custom_template_directory: Optional[str] = None, + custom_template_directory: str | None = None, ) -> list[jinja2.Template]: """Read and load the content of the template files at the given location. By default, Synapse will look for these templates in its configured template @@ -1573,7 +1552,7 @@ class ModuleApi: (td for td in (self.custom_template_dir, custom_template_directory) if td), ) - def is_mine(self, id: Union[str, DomainSpecificString]) -> bool: + def is_mine(self, id: str | DomainSpecificString) -> bool: """ Checks whether an ID (user id, room, ...) comes from this homeserver. @@ -1635,7 +1614,7 @@ class ModuleApi: async def get_room_state( self, room_id: str, - event_filter: Optional[Iterable[tuple[str, Optional[str]]]] = None, + event_filter: Iterable[tuple[str, str | None]] | None = None, ) -> StateMap[EventBase]: """Returns the current state of the given room. @@ -1677,11 +1656,11 @@ class ModuleApi: def run_as_background_process( self, desc: "LiteralString", - func: Callable[..., Awaitable[Optional[T]]], + func: Callable[..., Awaitable[T | None]], *args: Any, bg_start_span: bool = True, **kwargs: Any, - ) -> "defer.Deferred[Optional[T]]": + ) -> "defer.Deferred[T | None]": """Run the given function in its own logcontext, with resource metrics This should be used to wrap processes which are fired off to run in the @@ -1799,9 +1778,7 @@ class ModuleApi: """ await self._store.add_user_bound_threepid(user_id, medium, address, id_server) - def check_push_rule_actions( - self, actions: list[Union[str, dict[str, str]]] - ) -> None: + def check_push_rule_actions(self, actions: list[str | dict[str, str]]) -> None: """Checks if the given push rule actions are valid according to the Matrix specification. @@ -1824,7 +1801,7 @@ class ModuleApi: scope: str, kind: str, rule_id: str, - actions: list[Union[str, dict[str, str]]], + actions: list[str | dict[str, str]], ) -> None: """Changes the actions of an existing push rule for the given user. @@ -1862,7 +1839,7 @@ class ModuleApi: ) async def get_monthly_active_users_by_service( - self, start_timestamp: Optional[int] = None, end_timestamp: Optional[int] = None + self, start_timestamp: int | None = None, end_timestamp: int | None = None ) -> list[tuple[str, str]]: """Generates list of monthly active users and their services. Please see corresponding storage docstring for more details. @@ -1883,7 +1860,7 @@ class ModuleApi: start_timestamp, end_timestamp ) - async def get_canonical_room_alias(self, room_id: RoomID) -> Optional[RoomAlias]: + async def get_canonical_room_alias(self, room_id: RoomID) -> RoomAlias | None: """ Retrieve the given room's current canonical alias. @@ -1938,8 +1915,8 @@ class ModuleApi: user_id: str, config: JsonDict, ratelimit: bool = True, - creator_join_profile: Optional[JsonDict] = None, - ) -> tuple[str, Optional[str]]: + creator_join_profile: JsonDict | None = None, + ) -> tuple[str, str | None]: """Creates a new room. Added in Synapse v1.65.0. @@ -2109,7 +2086,7 @@ class AccountDataManager: f"{user_id} is not local to this homeserver; can't access account data for remote users." ) - async def get_global(self, user_id: str, data_type: str) -> Optional[JsonMapping]: + async def get_global(self, user_id: str, data_type: str) -> JsonMapping | None: """ Gets some global account data, of a specified type, for the specified user. diff --git a/synapse/module_api/callbacks/account_validity_callbacks.py b/synapse/module_api/callbacks/account_validity_callbacks.py index da01414d9a..892e9c8ecb 100644 --- a/synapse/module_api/callbacks/account_validity_callbacks.py +++ b/synapse/module_api/callbacks/account_validity_callbacks.py @@ -20,16 +20,16 @@ # import logging -from typing import Awaitable, Callable, Optional +from typing import Awaitable, Callable from twisted.web.http import Request logger = logging.getLogger(__name__) # Types for callbacks to be registered via the module api -IS_USER_EXPIRED_CALLBACK = Callable[[str], Awaitable[Optional[bool]]] +IS_USER_EXPIRED_CALLBACK = Callable[[str], Awaitable[bool | None]] ON_USER_REGISTRATION_CALLBACK = Callable[[str], Awaitable] -ON_USER_LOGIN_CALLBACK = Callable[[str, Optional[str], Optional[str]], Awaitable] +ON_USER_LOGIN_CALLBACK = Callable[[str, str | None, str | None], Awaitable] # Temporary hooks to allow for a transition from `/_matrix/client` endpoints # to `/_synapse/client/account_validity`. See `register_callbacks` below. ON_LEGACY_SEND_MAIL_CALLBACK = Callable[[str], Awaitable] @@ -42,21 +42,21 @@ class AccountValidityModuleApiCallbacks: self.is_user_expired_callbacks: list[IS_USER_EXPIRED_CALLBACK] = [] self.on_user_registration_callbacks: list[ON_USER_REGISTRATION_CALLBACK] = [] self.on_user_login_callbacks: list[ON_USER_LOGIN_CALLBACK] = [] - self.on_legacy_send_mail_callback: Optional[ON_LEGACY_SEND_MAIL_CALLBACK] = None - self.on_legacy_renew_callback: Optional[ON_LEGACY_RENEW_CALLBACK] = None + self.on_legacy_send_mail_callback: ON_LEGACY_SEND_MAIL_CALLBACK | None = None + self.on_legacy_renew_callback: ON_LEGACY_RENEW_CALLBACK | None = None # The legacy admin requests callback isn't a protected attribute because we need # to access it from the admin servlet, which is outside of this handler. - self.on_legacy_admin_request_callback: Optional[ON_LEGACY_ADMIN_REQUEST] = None + self.on_legacy_admin_request_callback: ON_LEGACY_ADMIN_REQUEST | None = None def register_callbacks( self, - is_user_expired: Optional[IS_USER_EXPIRED_CALLBACK] = None, - on_user_registration: Optional[ON_USER_REGISTRATION_CALLBACK] = None, - on_user_login: Optional[ON_USER_LOGIN_CALLBACK] = None, - on_legacy_send_mail: Optional[ON_LEGACY_SEND_MAIL_CALLBACK] = None, - on_legacy_renew: Optional[ON_LEGACY_RENEW_CALLBACK] = None, - on_legacy_admin_request: Optional[ON_LEGACY_ADMIN_REQUEST] = None, + is_user_expired: IS_USER_EXPIRED_CALLBACK | None = None, + on_user_registration: ON_USER_REGISTRATION_CALLBACK | None = None, + on_user_login: ON_USER_LOGIN_CALLBACK | None = None, + on_legacy_send_mail: ON_LEGACY_SEND_MAIL_CALLBACK | None = None, + on_legacy_renew: ON_LEGACY_RENEW_CALLBACK | None = None, + on_legacy_admin_request: ON_LEGACY_ADMIN_REQUEST | None = None, ) -> None: """Register callbacks from module for each hook.""" if is_user_expired is not None: diff --git a/synapse/module_api/callbacks/media_repository_callbacks.py b/synapse/module_api/callbacks/media_repository_callbacks.py index 7cb56e558b..f1e6ea4c38 100644 --- a/synapse/module_api/callbacks/media_repository_callbacks.py +++ b/synapse/module_api/callbacks/media_repository_callbacks.py @@ -13,7 +13,7 @@ # import logging -from typing import TYPE_CHECKING, Awaitable, Callable, Optional +from typing import TYPE_CHECKING, Awaitable, Callable from synapse.config.repository import MediaUploadLimit from synapse.types import JsonDict @@ -25,12 +25,12 @@ if TYPE_CHECKING: logger = logging.getLogger(__name__) -GET_MEDIA_CONFIG_FOR_USER_CALLBACK = Callable[[str], Awaitable[Optional[JsonDict]]] +GET_MEDIA_CONFIG_FOR_USER_CALLBACK = Callable[[str], Awaitable[JsonDict | None]] IS_USER_ALLOWED_TO_UPLOAD_MEDIA_OF_SIZE_CALLBACK = Callable[[str, int], Awaitable[bool]] GET_MEDIA_UPLOAD_LIMITS_FOR_USER_CALLBACK = Callable[ - [str], Awaitable[Optional[list[MediaUploadLimit]]] + [str], Awaitable[list[MediaUploadLimit] | None] ] ON_MEDIA_UPLOAD_LIMIT_EXCEEDED_CALLBACK = Callable[ @@ -57,16 +57,13 @@ class MediaRepositoryModuleApiCallbacks: def register_callbacks( self, - get_media_config_for_user: Optional[GET_MEDIA_CONFIG_FOR_USER_CALLBACK] = None, - is_user_allowed_to_upload_media_of_size: Optional[ - IS_USER_ALLOWED_TO_UPLOAD_MEDIA_OF_SIZE_CALLBACK - ] = None, - get_media_upload_limits_for_user: Optional[ - GET_MEDIA_UPLOAD_LIMITS_FOR_USER_CALLBACK - ] = None, - on_media_upload_limit_exceeded: Optional[ - ON_MEDIA_UPLOAD_LIMIT_EXCEEDED_CALLBACK - ] = None, + get_media_config_for_user: GET_MEDIA_CONFIG_FOR_USER_CALLBACK | None = None, + is_user_allowed_to_upload_media_of_size: IS_USER_ALLOWED_TO_UPLOAD_MEDIA_OF_SIZE_CALLBACK + | None = None, + get_media_upload_limits_for_user: GET_MEDIA_UPLOAD_LIMITS_FOR_USER_CALLBACK + | None = None, + on_media_upload_limit_exceeded: ON_MEDIA_UPLOAD_LIMIT_EXCEEDED_CALLBACK + | None = None, ) -> None: """Register callbacks from module for each hook.""" if get_media_config_for_user is not None: @@ -87,14 +84,14 @@ class MediaRepositoryModuleApiCallbacks: on_media_upload_limit_exceeded ) - async def get_media_config_for_user(self, user_id: str) -> Optional[JsonDict]: + async def get_media_config_for_user(self, user_id: str) -> JsonDict | None: for callback in self._get_media_config_for_user_callbacks: with Measure( self.clock, name=f"{callback.__module__}.{callback.__qualname__}", server_name=self.server_name, ): - res: Optional[JsonDict] = await delay_cancellation(callback(user_id)) + res: JsonDict | None = await delay_cancellation(callback(user_id)) if res: return res @@ -117,7 +114,7 @@ class MediaRepositoryModuleApiCallbacks: async def get_media_upload_limits_for_user( self, user_id: str - ) -> Optional[list[MediaUploadLimit]]: + ) -> list[MediaUploadLimit] | None: """ Get the first non-None list of MediaUploadLimits for the user from the registered callbacks. If a list is returned it will be sorted in descending order of duration. @@ -128,7 +125,7 @@ class MediaRepositoryModuleApiCallbacks: name=f"{callback.__module__}.{callback.__qualname__}", server_name=self.server_name, ): - res: Optional[list[MediaUploadLimit]] = await delay_cancellation( + res: list[MediaUploadLimit] | None = await delay_cancellation( callback(user_id) ) if res is not None: # to allow [] to be returned meaning no limit diff --git a/synapse/module_api/callbacks/ratelimit_callbacks.py b/synapse/module_api/callbacks/ratelimit_callbacks.py index 6afcda1216..0f4080dcd6 100644 --- a/synapse/module_api/callbacks/ratelimit_callbacks.py +++ b/synapse/module_api/callbacks/ratelimit_callbacks.py @@ -13,7 +13,7 @@ # import logging -from typing import TYPE_CHECKING, Awaitable, Callable, Optional +from typing import TYPE_CHECKING, Awaitable, Callable import attr @@ -37,7 +37,7 @@ class RatelimitOverride: GET_RATELIMIT_OVERRIDE_FOR_USER_CALLBACK = Callable[ - [str, str], Awaitable[Optional[RatelimitOverride]] + [str, str], Awaitable[RatelimitOverride | None] ] @@ -51,9 +51,8 @@ class RatelimitModuleApiCallbacks: def register_callbacks( self, - get_ratelimit_override_for_user: Optional[ - GET_RATELIMIT_OVERRIDE_FOR_USER_CALLBACK - ] = None, + get_ratelimit_override_for_user: GET_RATELIMIT_OVERRIDE_FOR_USER_CALLBACK + | None = None, ) -> None: """Register callbacks from module for each hook.""" if get_ratelimit_override_for_user is not None: @@ -63,14 +62,14 @@ class RatelimitModuleApiCallbacks: async def get_ratelimit_override_for_user( self, user_id: str, limiter_name: str - ) -> Optional[RatelimitOverride]: + ) -> RatelimitOverride | None: for callback in self._get_ratelimit_override_for_user_callbacks: with Measure( self.clock, name=f"{callback.__module__}.{callback.__qualname__}", server_name=self.server_name, ): - res: Optional[RatelimitOverride] = await delay_cancellation( + res: RatelimitOverride | None = await delay_cancellation( callback(user_id, limiter_name) ) if res: diff --git a/synapse/module_api/callbacks/spamchecker_callbacks.py b/synapse/module_api/callbacks/spamchecker_callbacks.py index 4c331c4210..8b34f7ef6c 100644 --- a/synapse/module_api/callbacks/spamchecker_callbacks.py +++ b/synapse/module_api/callbacks/spamchecker_callbacks.py @@ -30,8 +30,6 @@ from typing import ( Callable, Collection, Literal, - Optional, - Union, cast, ) @@ -53,210 +51,96 @@ logger = logging.getLogger(__name__) CHECK_EVENT_FOR_SPAM_CALLBACK = Callable[ ["synapse.events.EventBase"], - Awaitable[ - Union[ - str, - Codes, - # Highly experimental, not officially part of the spamchecker API, may - # disappear without warning depending on the results of ongoing - # experiments. - # Use this to return additional information as part of an error. - tuple[Codes, JsonDict], - # Deprecated - bool, - ] - ], + Awaitable[str | Codes | tuple[Codes, JsonDict] | bool], ] SHOULD_DROP_FEDERATED_EVENT_CALLBACK = Callable[ ["synapse.events.EventBase"], - Awaitable[Union[bool, str]], + Awaitable[bool | str], ] USER_MAY_JOIN_ROOM_CALLBACK = Callable[ [str, str, bool], - Awaitable[ - Union[ - Literal["NOT_SPAM"], - Codes, - # Highly experimental, not officially part of the spamchecker API, may - # disappear without warning depending on the results of ongoing - # experiments. - # Use this to return additional information as part of an error. - tuple[Codes, JsonDict], - # Deprecated - bool, - ] - ], + Awaitable[Literal["NOT_SPAM"] | Codes | tuple[Codes, JsonDict] | bool], ] USER_MAY_INVITE_CALLBACK = Callable[ [str, str, str], - Awaitable[ - Union[ - Literal["NOT_SPAM"], - Codes, - # Highly experimental, not officially part of the spamchecker API, may - # disappear without warning depending on the results of ongoing - # experiments. - # Use this to return additional information as part of an error. - tuple[Codes, JsonDict], - # Deprecated - bool, - ] - ], + Awaitable[Literal["NOT_SPAM"] | Codes | tuple[Codes, JsonDict] | bool], ] FEDERATED_USER_MAY_INVITE_CALLBACK = Callable[ ["synapse.events.EventBase"], - Awaitable[ - Union[ - Literal["NOT_SPAM"], - Codes, - # Highly experimental, not officially part of the spamchecker API, may - # disappear without warning depending on the results of ongoing - # experiments. - # Use this to return additional information as part of an error. - tuple[Codes, JsonDict], - # Deprecated - bool, - ] - ], + Awaitable[Literal["NOT_SPAM"] | Codes | tuple[Codes, JsonDict] | bool], ] USER_MAY_SEND_3PID_INVITE_CALLBACK = Callable[ [str, str, str, str], - Awaitable[ - Union[ - Literal["NOT_SPAM"], - Codes, - # Highly experimental, not officially part of the spamchecker API, may - # disappear without warning depending on the results of ongoing - # experiments. - # Use this to return additional information as part of an error. - tuple[Codes, JsonDict], - # Deprecated - bool, - ] - ], + Awaitable[Literal["NOT_SPAM"] | Codes | tuple[Codes, JsonDict] | bool], ] -USER_MAY_CREATE_ROOM_CALLBACK_RETURN_VALUE = Union[ - Literal["NOT_SPAM"], - Codes, +USER_MAY_CREATE_ROOM_CALLBACK_RETURN_VALUE = ( + Literal["NOT_SPAM"] + | Codes + | # Highly experimental, not officially part of the spamchecker API, may # disappear without warning depending on the results of ongoing # experiments. # Use this to return additional information as part of an error. - tuple[Codes, JsonDict], + tuple[Codes, JsonDict] + | # Deprecated - bool, -] -USER_MAY_CREATE_ROOM_CALLBACK = Union[ + bool +) +USER_MAY_CREATE_ROOM_CALLBACK = ( Callable[ [str, JsonDict], Awaitable[USER_MAY_CREATE_ROOM_CALLBACK_RETURN_VALUE], - ], - Callable[ # Single argument variant for backwards compatibility + ] + | Callable[ # Single argument variant for backwards compatibility [str], Awaitable[USER_MAY_CREATE_ROOM_CALLBACK_RETURN_VALUE] - ], -] + ] +) USER_MAY_CREATE_ROOM_ALIAS_CALLBACK = Callable[ [str, RoomAlias], - Awaitable[ - Union[ - Literal["NOT_SPAM"], - Codes, - # Highly experimental, not officially part of the spamchecker API, may - # disappear without warning depending on the results of ongoing - # experiments. - # Use this to return additional information as part of an error. - tuple[Codes, JsonDict], - # Deprecated - bool, - ] - ], + Awaitable[Literal["NOT_SPAM"] | Codes | tuple[Codes, JsonDict] | bool], ] USER_MAY_PUBLISH_ROOM_CALLBACK = Callable[ [str, str], - Awaitable[ - Union[ - Literal["NOT_SPAM"], - Codes, - # Highly experimental, not officially part of the spamchecker API, may - # disappear without warning depending on the results of ongoing - # experiments. - # Use this to return additional information as part of an error. - tuple[Codes, JsonDict], - # Deprecated - bool, - ] - ], + Awaitable[Literal["NOT_SPAM"] | Codes | tuple[Codes, JsonDict] | bool], ] USER_MAY_SEND_STATE_EVENT_CALLBACK = Callable[ [str, str, str, str, JsonDict], - Awaitable[ - Union[ - Literal["NOT_SPAM"], - Codes, - # Highly experimental, not officially part of the spamchecker API, may - # disappear without warning depending on the results of ongoing - # experiments. - # Use this to return additional information as part of an error. - tuple[Codes, JsonDict], - ] - ], -] -CHECK_USERNAME_FOR_SPAM_CALLBACK = Union[ - Callable[[UserProfile], Awaitable[bool]], - Callable[[UserProfile, str], Awaitable[bool]], + Awaitable[Literal["NOT_SPAM"] | Codes | tuple[Codes, JsonDict]], ] +CHECK_USERNAME_FOR_SPAM_CALLBACK = ( + Callable[[UserProfile], Awaitable[bool]] + | Callable[[UserProfile, str], Awaitable[bool]] +) LEGACY_CHECK_REGISTRATION_FOR_SPAM_CALLBACK = Callable[ [ - Optional[dict], - Optional[str], + dict | None, + str | None, Collection[tuple[str, str]], ], Awaitable[RegistrationBehaviour], ] CHECK_REGISTRATION_FOR_SPAM_CALLBACK = Callable[ [ - Optional[dict], - Optional[str], + dict | None, + str | None, Collection[tuple[str, str]], - Optional[str], + str | None, ], Awaitable[RegistrationBehaviour], ] CHECK_MEDIA_FILE_FOR_SPAM_CALLBACK = Callable[ [ReadableFileWrapper, FileInfo], - Awaitable[ - Union[ - Literal["NOT_SPAM"], - Codes, - # Highly experimental, not officially part of the spamchecker API, may - # disappear without warning depending on the results of ongoing - # experiments. - # Use this to return additional information as part of an error. - tuple[Codes, JsonDict], - # Deprecated - bool, - ] - ], + Awaitable[Literal["NOT_SPAM"] | Codes | tuple[Codes, JsonDict] | bool], ] CHECK_LOGIN_FOR_SPAM_CALLBACK = Callable[ [ str, - Optional[str], - Optional[str], - Collection[tuple[Optional[str], str]], - Optional[str], - ], - Awaitable[ - Union[ - Literal["NOT_SPAM"], - Codes, - # Highly experimental, not officially part of the spamchecker API, may - # disappear without warning depending on the results of ongoing - # experiments. - # Use this to return additional information as part of an error. - tuple[Codes, JsonDict], - ] + str | None, + str | None, + Collection[tuple[str | None, str]], + str | None, ], + Awaitable[Literal["NOT_SPAM"] | Codes | tuple[Codes, JsonDict]], ] @@ -292,7 +176,7 @@ def load_legacy_spam_checkers(hs: "synapse.server.HomeServer") -> None: for spam_checker in spam_checkers: # Methods on legacy spam checkers might not be async, so we wrap them around a # wrapper that will call maybe_awaitable on the result. - def async_wrapper(f: Optional[Callable]) -> Optional[Callable[..., Awaitable]]: + def async_wrapper(f: Callable | None) -> Callable[..., Awaitable] | None: # f might be None if the callback isn't implemented by the module. In this # case we don't want to register a callback at all so we return None. if f is None: @@ -308,11 +192,11 @@ def load_legacy_spam_checkers(hs: "synapse.server.HomeServer") -> None: # that gives it only 3 arguments and drops the auth_provider_id on # the floor. def wrapper( - email_threepid: Optional[dict], - username: Optional[str], + email_threepid: dict | None, + username: str | None, request_info: Collection[tuple[str, str]], - auth_provider_id: Optional[str], - ) -> Union[Awaitable[RegistrationBehaviour], RegistrationBehaviour]: + auth_provider_id: str | None, + ) -> Awaitable[RegistrationBehaviour] | RegistrationBehaviour: # Assertion required because mypy can't prove we won't # change `f` back to `None`. See # https://mypy.readthedocs.io/en/latest/common_issues.html#narrowing-and-inner-functions @@ -390,26 +274,20 @@ class SpamCheckerModuleApiCallbacks: def register_callbacks( self, - check_event_for_spam: Optional[CHECK_EVENT_FOR_SPAM_CALLBACK] = None, - should_drop_federated_event: Optional[ - SHOULD_DROP_FEDERATED_EVENT_CALLBACK - ] = None, - user_may_join_room: Optional[USER_MAY_JOIN_ROOM_CALLBACK] = None, - user_may_invite: Optional[USER_MAY_INVITE_CALLBACK] = None, - federated_user_may_invite: Optional[FEDERATED_USER_MAY_INVITE_CALLBACK] = None, - user_may_send_3pid_invite: Optional[USER_MAY_SEND_3PID_INVITE_CALLBACK] = None, - user_may_create_room: Optional[USER_MAY_CREATE_ROOM_CALLBACK] = None, - user_may_create_room_alias: Optional[ - USER_MAY_CREATE_ROOM_ALIAS_CALLBACK - ] = None, - user_may_publish_room: Optional[USER_MAY_PUBLISH_ROOM_CALLBACK] = None, - check_username_for_spam: Optional[CHECK_USERNAME_FOR_SPAM_CALLBACK] = None, - check_registration_for_spam: Optional[ - CHECK_REGISTRATION_FOR_SPAM_CALLBACK - ] = None, - check_media_file_for_spam: Optional[CHECK_MEDIA_FILE_FOR_SPAM_CALLBACK] = None, - check_login_for_spam: Optional[CHECK_LOGIN_FOR_SPAM_CALLBACK] = None, - user_may_send_state_event: Optional[USER_MAY_SEND_STATE_EVENT_CALLBACK] = None, + check_event_for_spam: CHECK_EVENT_FOR_SPAM_CALLBACK | None = None, + should_drop_federated_event: SHOULD_DROP_FEDERATED_EVENT_CALLBACK | None = None, + user_may_join_room: USER_MAY_JOIN_ROOM_CALLBACK | None = None, + user_may_invite: USER_MAY_INVITE_CALLBACK | None = None, + federated_user_may_invite: FEDERATED_USER_MAY_INVITE_CALLBACK | None = None, + user_may_send_3pid_invite: USER_MAY_SEND_3PID_INVITE_CALLBACK | None = None, + user_may_create_room: USER_MAY_CREATE_ROOM_CALLBACK | None = None, + user_may_create_room_alias: USER_MAY_CREATE_ROOM_ALIAS_CALLBACK | None = None, + user_may_publish_room: USER_MAY_PUBLISH_ROOM_CALLBACK | None = None, + check_username_for_spam: CHECK_USERNAME_FOR_SPAM_CALLBACK | None = None, + check_registration_for_spam: CHECK_REGISTRATION_FOR_SPAM_CALLBACK | None = None, + check_media_file_for_spam: CHECK_MEDIA_FILE_FOR_SPAM_CALLBACK | None = None, + check_login_for_spam: CHECK_LOGIN_FOR_SPAM_CALLBACK | None = None, + user_may_send_state_event: USER_MAY_SEND_STATE_EVENT_CALLBACK | None = None, ) -> None: """Register callbacks from module for each hook.""" if check_event_for_spam is not None: @@ -469,7 +347,7 @@ class SpamCheckerModuleApiCallbacks: @trace async def check_event_for_spam( self, event: "synapse.events.EventBase" - ) -> Union[tuple[Codes, JsonDict], str]: + ) -> tuple[Codes, JsonDict] | str: """Checks if a given event is considered "spammy" by this server. If the server considers an event spammy, then it will be rejected if @@ -532,7 +410,7 @@ class SpamCheckerModuleApiCallbacks: async def should_drop_federated_event( self, event: "synapse.events.EventBase" - ) -> Union[bool, str]: + ) -> bool | str: """Checks if a given federated event is considered "spammy" by this server. @@ -551,7 +429,7 @@ class SpamCheckerModuleApiCallbacks: name=f"{callback.__module__}.{callback.__qualname__}", server_name=self.server_name, ): - res: Union[bool, str] = await delay_cancellation(callback(event)) + res: bool | str = await delay_cancellation(callback(event)) if res: return res @@ -559,7 +437,7 @@ class SpamCheckerModuleApiCallbacks: async def user_may_join_room( self, user_id: str, room_id: str, is_invited: bool - ) -> Union[tuple[Codes, JsonDict], Literal["NOT_SPAM"]]: + ) -> tuple[Codes, JsonDict] | Literal["NOT_SPAM"]: """Checks if a given users is allowed to join a room. Not called when a user creates a room. @@ -603,7 +481,7 @@ class SpamCheckerModuleApiCallbacks: async def user_may_invite( self, inviter_userid: str, invitee_userid: str, room_id: str - ) -> Union[tuple[Codes, dict], Literal["NOT_SPAM"]]: + ) -> tuple[Codes, dict] | Literal["NOT_SPAM"]: """Checks if a given user may send an invite Args: @@ -648,7 +526,7 @@ class SpamCheckerModuleApiCallbacks: async def federated_user_may_invite( self, event: "synapse.events.EventBase" - ) -> Union[tuple[Codes, dict], Literal["NOT_SPAM"]]: + ) -> tuple[Codes, dict] | Literal["NOT_SPAM"]: """Checks if a given user may send an invite Args: @@ -689,7 +567,7 @@ class SpamCheckerModuleApiCallbacks: async def user_may_send_3pid_invite( self, inviter_userid: str, medium: str, address: str, room_id: str - ) -> Union[tuple[Codes, dict], Literal["NOT_SPAM"]]: + ) -> tuple[Codes, dict] | Literal["NOT_SPAM"]: """Checks if a given user may invite a given threepid into the room Note that if the threepid is already associated with a Matrix user ID, Synapse @@ -737,7 +615,7 @@ class SpamCheckerModuleApiCallbacks: async def user_may_create_room( self, userid: str, room_config: JsonDict - ) -> Union[tuple[Codes, dict], Literal["NOT_SPAM"]]: + ) -> tuple[Codes, dict] | Literal["NOT_SPAM"]: """Checks if a given user may create a room Args: @@ -803,7 +681,7 @@ class SpamCheckerModuleApiCallbacks: event_type: str, state_key: str, content: JsonDict, - ) -> Union[tuple[Codes, dict], Literal["NOT_SPAM"]]: + ) -> tuple[Codes, dict] | Literal["NOT_SPAM"]: """Checks if a given user may create a room with a given visibility Args: user_id: The ID of the user attempting to create a room @@ -836,7 +714,7 @@ class SpamCheckerModuleApiCallbacks: async def user_may_create_room_alias( self, userid: str, room_alias: RoomAlias - ) -> Union[tuple[Codes, dict], Literal["NOT_SPAM"]]: + ) -> tuple[Codes, dict] | Literal["NOT_SPAM"]: """Checks if a given user may create a room alias Args: @@ -874,7 +752,7 @@ class SpamCheckerModuleApiCallbacks: async def user_may_publish_room( self, userid: str, room_id: str - ) -> Union[tuple[Codes, dict], Literal["NOT_SPAM"]]: + ) -> tuple[Codes, dict] | Literal["NOT_SPAM"]: """Checks if a given user may publish a room to the directory Args: @@ -960,10 +838,10 @@ class SpamCheckerModuleApiCallbacks: async def check_registration_for_spam( self, - email_threepid: Optional[dict], - username: Optional[str], + email_threepid: dict | None, + username: str | None, request_info: Collection[tuple[str, str]], - auth_provider_id: Optional[str] = None, + auth_provider_id: str | None = None, ) -> RegistrationBehaviour: """Checks if we should allow the given registration request. @@ -998,7 +876,7 @@ class SpamCheckerModuleApiCallbacks: @trace async def check_media_file_for_spam( self, file_wrapper: ReadableFileWrapper, file_info: FileInfo - ) -> Union[tuple[Codes, dict], Literal["NOT_SPAM"]]: + ) -> tuple[Codes, dict] | Literal["NOT_SPAM"]: """Checks if a piece of newly uploaded media should be blocked. This will be called for local uploads, downloads of remote media, each @@ -1011,7 +889,7 @@ class SpamCheckerModuleApiCallbacks: async def check_media_file_for_spam( self, file: ReadableFileWrapper, file_info: FileInfo - ) -> Union[Codes, Literal["NOT_SPAM"]]: + ) -> Codes | Literal["NOT_SPAM"]: buffer = BytesIO() await file.write_chunks_to(buffer.write) @@ -1058,11 +936,11 @@ class SpamCheckerModuleApiCallbacks: async def check_login_for_spam( self, user_id: str, - device_id: Optional[str], - initial_display_name: Optional[str], - request_info: Collection[tuple[Optional[str], str]], - auth_provider_id: Optional[str] = None, - ) -> Union[tuple[Codes, dict], Literal["NOT_SPAM"]]: + device_id: str | None, + initial_display_name: str | None, + request_info: Collection[tuple[str | None, str]], + auth_provider_id: str | None = None, + ) -> tuple[Codes, dict] | Literal["NOT_SPAM"]: """Checks if we should allow the given registration request. Args: diff --git a/synapse/module_api/callbacks/third_party_event_rules_callbacks.py b/synapse/module_api/callbacks/third_party_event_rules_callbacks.py index 2b886cbabb..65f5a6b183 100644 --- a/synapse/module_api/callbacks/third_party_event_rules_callbacks.py +++ b/synapse/module_api/callbacks/third_party_event_rules_callbacks.py @@ -19,7 +19,7 @@ # # import logging -from typing import TYPE_CHECKING, Any, Awaitable, Callable, Optional +from typing import TYPE_CHECKING, Any, Awaitable, Callable from twisted.internet.defer import CancelledError @@ -37,7 +37,7 @@ logger = logging.getLogger(__name__) CHECK_EVENT_ALLOWED_CALLBACK = Callable[ - [EventBase, StateMap[EventBase]], Awaitable[tuple[bool, Optional[dict]]] + [EventBase, StateMap[EventBase]], Awaitable[tuple[bool, dict | None]] ] ON_CREATE_ROOM_CALLBACK = Callable[[Requester, dict, bool], Awaitable] CHECK_THREEPID_CAN_BE_INVITED_CALLBACK = Callable[ @@ -47,7 +47,7 @@ CHECK_VISIBILITY_CAN_BE_MODIFIED_CALLBACK = Callable[ [str, StateMap[EventBase], str], Awaitable[bool] ] ON_NEW_EVENT_CALLBACK = Callable[[EventBase, StateMap[EventBase]], Awaitable] -CHECK_CAN_SHUTDOWN_ROOM_CALLBACK = Callable[[Optional[str], str], Awaitable[bool]] +CHECK_CAN_SHUTDOWN_ROOM_CALLBACK = Callable[[str | None, str], Awaitable[bool]] CHECK_CAN_DEACTIVATE_USER_CALLBACK = Callable[[str, bool], Awaitable[bool]] ON_PROFILE_UPDATE_CALLBACK = Callable[[str, ProfileInfo, bool, bool], Awaitable] ON_USER_DEACTIVATION_STATUS_CHANGED_CALLBACK = Callable[[str, bool, bool], Awaitable] @@ -77,7 +77,7 @@ def load_legacy_third_party_event_rules(hs: "HomeServer") -> None: "check_visibility_can_be_modified", } - def async_wrapper(f: Optional[Callable]) -> Optional[Callable[..., Awaitable]]: + def async_wrapper(f: Callable | None) -> Callable[..., Awaitable] | None: # f might be None if the callback isn't implemented by the module. In this # case we don't want to register a callback at all so we return None. if f is None: @@ -93,7 +93,7 @@ def load_legacy_third_party_event_rules(hs: "HomeServer") -> None: async def wrap_check_event_allowed( event: EventBase, state_events: StateMap[EventBase], - ) -> tuple[bool, Optional[dict]]: + ) -> tuple[bool, dict | None]: # Assertion required because mypy can't prove we won't change # `f` back to `None`. See # https://mypy.readthedocs.io/en/latest/common_issues.html#narrowing-and-inner-functions @@ -188,28 +188,23 @@ class ThirdPartyEventRulesModuleApiCallbacks: def register_third_party_rules_callbacks( self, - check_event_allowed: Optional[CHECK_EVENT_ALLOWED_CALLBACK] = None, - on_create_room: Optional[ON_CREATE_ROOM_CALLBACK] = None, - check_threepid_can_be_invited: Optional[ - CHECK_THREEPID_CAN_BE_INVITED_CALLBACK - ] = None, - check_visibility_can_be_modified: Optional[ - CHECK_VISIBILITY_CAN_BE_MODIFIED_CALLBACK - ] = None, - on_new_event: Optional[ON_NEW_EVENT_CALLBACK] = None, - check_can_shutdown_room: Optional[CHECK_CAN_SHUTDOWN_ROOM_CALLBACK] = None, - check_can_deactivate_user: Optional[CHECK_CAN_DEACTIVATE_USER_CALLBACK] = None, - on_profile_update: Optional[ON_PROFILE_UPDATE_CALLBACK] = None, - on_user_deactivation_status_changed: Optional[ - ON_USER_DEACTIVATION_STATUS_CHANGED_CALLBACK - ] = None, - on_threepid_bind: Optional[ON_THREEPID_BIND_CALLBACK] = None, - on_add_user_third_party_identifier: Optional[ - ON_ADD_USER_THIRD_PARTY_IDENTIFIER_CALLBACK - ] = None, - on_remove_user_third_party_identifier: Optional[ - ON_REMOVE_USER_THIRD_PARTY_IDENTIFIER_CALLBACK - ] = None, + check_event_allowed: CHECK_EVENT_ALLOWED_CALLBACK | None = None, + on_create_room: ON_CREATE_ROOM_CALLBACK | None = None, + check_threepid_can_be_invited: CHECK_THREEPID_CAN_BE_INVITED_CALLBACK + | None = None, + check_visibility_can_be_modified: CHECK_VISIBILITY_CAN_BE_MODIFIED_CALLBACK + | None = None, + on_new_event: ON_NEW_EVENT_CALLBACK | None = None, + check_can_shutdown_room: CHECK_CAN_SHUTDOWN_ROOM_CALLBACK | None = None, + check_can_deactivate_user: CHECK_CAN_DEACTIVATE_USER_CALLBACK | None = None, + on_profile_update: ON_PROFILE_UPDATE_CALLBACK | None = None, + on_user_deactivation_status_changed: ON_USER_DEACTIVATION_STATUS_CHANGED_CALLBACK + | None = None, + on_threepid_bind: ON_THREEPID_BIND_CALLBACK | None = None, + on_add_user_third_party_identifier: ON_ADD_USER_THIRD_PARTY_IDENTIFIER_CALLBACK + | None = None, + on_remove_user_third_party_identifier: ON_REMOVE_USER_THIRD_PARTY_IDENTIFIER_CALLBACK + | None = None, ) -> None: """Register callbacks from modules for each hook.""" if check_event_allowed is not None: @@ -261,7 +256,7 @@ class ThirdPartyEventRulesModuleApiCallbacks: self, event: EventBase, context: UnpersistedEventContextBase, - ) -> tuple[bool, Optional[dict]]: + ) -> tuple[bool, dict | None]: """Check if a provided event should be allowed in the given context. The module can return: @@ -443,9 +438,7 @@ class ThirdPartyEventRulesModuleApiCallbacks: "Failed to run module API callback %s: %s", callback, e ) - async def check_can_shutdown_room( - self, user_id: Optional[str], room_id: str - ) -> bool: + async def check_can_shutdown_room(self, user_id: str | None, room_id: str) -> bool: """Intercept requests to shutdown a room. If `False` is returned, the room must not be shut down. diff --git a/synapse/notifier.py b/synapse/notifier.py index 4a75d07e37..260a2c0d87 100644 --- a/synapse/notifier.py +++ b/synapse/notifier.py @@ -28,9 +28,7 @@ from typing import ( Iterable, Literal, Mapping, - Optional, TypeVar, - Union, overload, ) @@ -211,7 +209,7 @@ class _NotifierUserStream: @attr.s(slots=True, frozen=True, auto_attribs=True) class EventStreamResult: - events: list[Union[JsonDict, EventBase]] + events: list[JsonDict | EventBase] start_token: StreamToken end_token: StreamToken @@ -226,8 +224,8 @@ class _PendingRoomEventEntry: room_id: str type: str - state_key: Optional[str] - membership: Optional[str] + state_key: str | None + membership: str | None class Notifier: @@ -336,7 +334,7 @@ class Notifier: self, events_and_pos: list[tuple[EventBase, PersistedEventPosition]], max_room_stream_token: RoomStreamToken, - extra_users: Optional[Collection[UserID]] = None, + extra_users: Collection[UserID] | None = None, ) -> None: """Creates a _PendingRoomEventEntry for each of the listed events and calls notify_new_room_events with the results.""" @@ -421,11 +419,11 @@ class Notifier: def create_pending_room_event_entry( self, event_pos: PersistedEventPosition, - extra_users: Optional[Collection[UserID]], + extra_users: Collection[UserID] | None, room_id: str, event_type: str, - state_key: Optional[str], - membership: Optional[str], + state_key: str | None, + membership: str | None, ) -> _PendingRoomEventEntry: """Creates and returns a _PendingRoomEventEntry""" return _PendingRoomEventEntry( @@ -504,8 +502,8 @@ class Notifier: self, stream_key: Literal[StreamKeyType.ROOM], new_token: RoomStreamToken, - users: Optional[Collection[Union[str, UserID]]] = None, - rooms: Optional[StrCollection] = None, + users: Collection[str | UserID] | None = None, + rooms: StrCollection | None = None, ) -> None: ... @overload @@ -513,8 +511,8 @@ class Notifier: self, stream_key: Literal[StreamKeyType.RECEIPT], new_token: MultiWriterStreamToken, - users: Optional[Collection[Union[str, UserID]]] = None, - rooms: Optional[StrCollection] = None, + users: Collection[str | UserID] | None = None, + rooms: StrCollection | None = None, ) -> None: ... @overload @@ -531,16 +529,16 @@ class Notifier: StreamKeyType.THREAD_SUBSCRIPTIONS, ], new_token: int, - users: Optional[Collection[Union[str, UserID]]] = None, - rooms: Optional[StrCollection] = None, + users: Collection[str | UserID] | None = None, + rooms: StrCollection | None = None, ) -> None: ... def on_new_event( self, stream_key: StreamKeyType, - new_token: Union[int, RoomStreamToken, MultiWriterStreamToken], - users: Optional[Collection[Union[str, UserID]]] = None, - rooms: Optional[StrCollection] = None, + new_token: int | RoomStreamToken | MultiWriterStreamToken, + users: Collection[str | UserID] | None = None, + rooms: StrCollection | None = None, ) -> None: """Used to inform listeners that something has happened event wise. @@ -636,7 +634,7 @@ class Notifier: user_id: str, timeout: int, callback: Callable[[StreamToken, StreamToken], Awaitable[T]], - room_ids: Optional[StrCollection] = None, + room_ids: StrCollection | None = None, from_token: StreamToken = StreamToken.START, ) -> T: """Wait until the callback returns a non empty response or the @@ -737,7 +735,7 @@ class Notifier: pagination_config: PaginationConfig, timeout: int, is_guest: bool = False, - explicit_room_id: Optional[str] = None, + explicit_room_id: str | None = None, ) -> EventStreamResult: """For the given user and rooms, return any new events for them. If there are no new events wait for up to `timeout` milliseconds for any @@ -767,7 +765,7 @@ class Notifier: # The events fetched from each source are a JsonDict, EventBase, or # UserPresenceState, but see below for UserPresenceState being # converted to JsonDict. - events: list[Union[JsonDict, EventBase]] = [] + events: list[JsonDict | EventBase] = [] end_token = from_token for keyname, source in self.event_sources.sources.get_sources(): @@ -866,7 +864,7 @@ class Notifier: await self.clock.sleep(0.5) async def _get_room_ids( - self, user: UserID, explicit_room_id: Optional[str] + self, user: UserID, explicit_room_id: str | None ) -> tuple[StrCollection, bool]: joined_room_ids = await self.store.get_rooms_for_user(user.to_string()) if explicit_room_id: diff --git a/synapse/push/__init__.py b/synapse/push/__init__.py index 552af8e14a..58c58fbdbf 100644 --- a/synapse/push/__init__.py +++ b/synapse/push/__init__.py @@ -94,7 +94,7 @@ The Pusher instance also calls out to various utilities for generating payloads """ import abc -from typing import TYPE_CHECKING, Any, Optional +from typing import TYPE_CHECKING, Any import attr @@ -108,7 +108,7 @@ if TYPE_CHECKING: class PusherConfig: """Parameters necessary to configure a pusher.""" - id: Optional[int] + id: int | None user_name: str profile_tag: str @@ -118,18 +118,18 @@ class PusherConfig: device_display_name: str pushkey: str ts: int - lang: Optional[str] - data: Optional[JsonDict] + lang: str | None + data: JsonDict | None last_stream_ordering: int - last_success: Optional[int] - failing_since: Optional[int] + last_success: int | None + failing_since: int | None enabled: bool - device_id: Optional[str] + device_id: str | None # XXX(quenting): The access_token is not persisted anymore for new pushers, but we # keep it when reading from the database, so that we don't get stale pushers # while the "set_device_id_for_pushers" background update is running. - access_token: Optional[int] + access_token: int | None def as_dict(self) -> dict[str, Any]: """Information that can be retrieved about a pusher after creation.""" diff --git a/synapse/push/bulk_push_rule_evaluator.py b/synapse/push/bulk_push_rule_evaluator.py index 9fcd7fdc6e..7cf89200a8 100644 --- a/synapse/push/bulk_push_rule_evaluator.py +++ b/synapse/push/bulk_push_rule_evaluator.py @@ -25,9 +25,7 @@ from typing import ( Any, Collection, Mapping, - Optional, Sequence, - Union, cast, ) @@ -233,7 +231,7 @@ class BulkPushRuleEvaluator: event: EventBase, context: EventContext, event_id_to_event: Mapping[str, EventBase], - ) -> tuple[dict, Optional[int]]: + ) -> tuple[dict, int | None]: """ Given an event and an event context, get the power level event relevant to the event and the power level of the sender of the event. @@ -390,7 +388,7 @@ class BulkPushRuleEvaluator: count_as_unread = _should_count_as_unread(event, context) rules_by_user = await self._get_rules_for_event(event) - actions_by_user: dict[str, Collection[Union[Mapping, str]]] = {} + actions_by_user: dict[str, Collection[Mapping | str]] = {} # Gather a bunch of info in parallel. # @@ -405,7 +403,7 @@ class BulkPushRuleEvaluator: profiles, ) = await make_deferred_yieldable( cast( - "Deferred[tuple[int, tuple[dict, Optional[int]], dict[str, dict[str, JsonValue]], Mapping[str, ProfileInfo]]]", + "Deferred[tuple[int, tuple[dict, int | None], dict[str, dict[str, JsonValue]], Mapping[str, ProfileInfo]]]", gather_results( ( run_in_background( # type: ignore[call-overload] @@ -477,7 +475,7 @@ class BulkPushRuleEvaluator: self.hs.config.experimental.msc4306_enabled, ) - msc4306_thread_subscribers: Optional[frozenset[str]] = None + msc4306_thread_subscribers: frozenset[str] | None = None if self.hs.config.experimental.msc4306_enabled and thread_id != MAIN_TIMELINE: # pull out, in batch, all local subscribers to this thread # (in the common case, they will all be getting processed for push @@ -510,7 +508,7 @@ class BulkPushRuleEvaluator: # current user, it'll be added to the dict later. actions_by_user[uid] = [] - msc4306_thread_subscription_state: Optional[bool] = None + msc4306_thread_subscription_state: bool | None = None if msc4306_thread_subscribers is not None: msc4306_thread_subscription_state = uid in msc4306_thread_subscribers @@ -552,10 +550,10 @@ class BulkPushRuleEvaluator: ) -MemberMap = dict[str, Optional[EventIdMembership]] +MemberMap = dict[str, EventIdMembership | None] Rule = dict[str, dict] RulesByUser = dict[str, list[Rule]] -StateGroup = Union[object, int] +StateGroup = object | int def _is_simple_value(value: Any) -> bool: @@ -567,9 +565,9 @@ def _is_simple_value(value: Any) -> bool: def _flatten_dict( - d: Union[EventBase, Mapping[str, Any]], - prefix: Optional[list[str]] = None, - result: Optional[dict[str, JsonValue]] = None, + d: EventBase | Mapping[str, Any], + prefix: list[str] | None = None, + result: dict[str, JsonValue] | None = None, ) -> dict[str, JsonValue]: """ Given a JSON dictionary (or event) which might contain sub dictionaries, diff --git a/synapse/push/clientformat.py b/synapse/push/clientformat.py index fd1758db9d..db082e295d 100644 --- a/synapse/push/clientformat.py +++ b/synapse/push/clientformat.py @@ -20,7 +20,7 @@ # import copy -from typing import Any, Optional +from typing import Any from synapse.push.rulekinds import PRIORITY_CLASS_INVERSE_MAP, PRIORITY_CLASS_MAP from synapse.synapse_rust.push import FilteredPushRules, PushRule @@ -85,7 +85,7 @@ def _add_empty_priority_class_arrays(d: dict[str, list]) -> dict[str, list]: return d -def _rule_to_template(rule: PushRule) -> Optional[dict[str, Any]]: +def _rule_to_template(rule: PushRule) -> dict[str, Any] | None: templaterule: dict[str, Any] unscoped_rule_id = _rule_id_from_namespaced(rule.rule_id) diff --git a/synapse/push/emailpusher.py b/synapse/push/emailpusher.py index 83823c2284..36dc9bf6fc 100644 --- a/synapse/push/emailpusher.py +++ b/synapse/push/emailpusher.py @@ -20,7 +20,7 @@ # import logging -from typing import TYPE_CHECKING, Optional +from typing import TYPE_CHECKING from twisted.internet.error import AlreadyCalled, AlreadyCancelled from twisted.internet.interfaces import IDelayedCall @@ -70,7 +70,7 @@ class EmailPusher(Pusher): self.server_name = hs.hostname self.store = self.hs.get_datastores().main self.email = pusher_config.pushkey - self.timed_call: Optional[IDelayedCall] = None + self.timed_call: IDelayedCall | None = None self.throttle_params: dict[str, ThrottleParams] = {} self._inited = False @@ -174,7 +174,7 @@ class EmailPusher(Pusher): ) ) - soonest_due_at: Optional[int] = None + soonest_due_at: int | None = None if not unprocessed: await self.save_last_stream_ordering_and_success(self.max_stream_ordering) diff --git a/synapse/push/httppusher.py b/synapse/push/httppusher.py index 8df106b859..edcabf0c29 100644 --- a/synapse/push/httppusher.py +++ b/synapse/push/httppusher.py @@ -21,7 +21,7 @@ import logging import random import urllib.parse -from typing import TYPE_CHECKING, Optional, Union +from typing import TYPE_CHECKING from prometheus_client import Counter @@ -68,7 +68,7 @@ http_badges_failed_counter = Counter( ) -def tweaks_for_actions(actions: list[Union[str, dict]]) -> JsonMapping: +def tweaks_for_actions(actions: list[str | dict]) -> JsonMapping: """ Converts a list of actions into a `tweaks` dict (which can then be passed to the push gateway). @@ -119,7 +119,7 @@ class HttpPusher(Pusher): self.data = pusher_config.data self.backoff_delay = HttpPusher.INITIAL_BACKOFF_SEC self.failing_since = pusher_config.failing_since - self.timed_call: Optional[IDelayedCall] = None + self.timed_call: IDelayedCall | None = None self._is_processing = False self._group_unread_count_by_room = ( hs.config.push.push_group_unread_count_by_room @@ -163,7 +163,7 @@ class HttpPusher(Pusher): self.data_minus_url = {} self.data_minus_url.update(self.data) del self.data_minus_url["url"] - self.badge_count_last_call: Optional[int] = None + self.badge_count_last_call: int | None = None def on_started(self, should_check_for_notifs: bool) -> None: """Called when this pusher has been started. @@ -394,9 +394,9 @@ class HttpPusher(Pusher): async def dispatch_push( self, content: JsonDict, - tweaks: Optional[JsonMapping] = None, - default_payload: Optional[JsonMapping] = None, - ) -> Union[bool, list[str]]: + tweaks: JsonMapping | None = None, + default_payload: JsonMapping | None = None, + ) -> bool | list[str]: """Send a notification to the registered push gateway, with `content` being the content of the `notification` top property specified in the spec. Note that the `devices` property will be added with device-specific @@ -453,7 +453,7 @@ class HttpPusher(Pusher): event: EventBase, tweaks: JsonMapping, badge: int, - ) -> Union[bool, list[str]]: + ) -> bool | list[str]: """Send a notification to the registered push gateway by building it from an event. diff --git a/synapse/push/mailer.py b/synapse/push/mailer.py index 3dac61aed5..6492207403 100644 --- a/synapse/push/mailer.py +++ b/synapse/push/mailer.py @@ -21,7 +21,7 @@ import logging import urllib.parse -from typing import TYPE_CHECKING, Iterable, Optional, TypeVar +from typing import TYPE_CHECKING, Iterable, TypeVar import bleach import jinja2 @@ -372,7 +372,7 @@ class Mailer: email_address: str, subject: str, extra_template_vars: TemplateVars, - unsubscribe_link: Optional[str] = None, + unsubscribe_link: str | None = None, ) -> None: """Send an email with the given information and template text""" template_vars: TemplateVars = { @@ -486,7 +486,7 @@ class Mailer: async def _get_room_avatar( self, room_state_ids: StateMap[str], - ) -> Optional[str]: + ) -> str | None: """ Retrieve the avatar url for this room---if it exists. @@ -553,7 +553,7 @@ class Mailer: async def _get_message_vars( self, notif: EmailPushAction, event: EventBase, room_state_ids: StateMap[str] - ) -> Optional[MessageVars]: + ) -> MessageVars | None: """ Generate the variables for a single event, if possible. @@ -573,7 +573,7 @@ class Mailer: type_state_key = ("m.room.member", event.sender) sender_state_event_id = room_state_ids.get(type_state_key) if sender_state_event_id: - sender_state_event: Optional[EventBase] = await self.store.get_event( + sender_state_event: EventBase | None = await self.store.get_event( sender_state_event_id ) else: @@ -585,9 +585,7 @@ class Mailer: if sender_state_event: sender_name = name_from_member_event(sender_state_event) - sender_avatar_url: Optional[str] = sender_state_event.content.get( - "avatar_url" - ) + sender_avatar_url: str | None = sender_state_event.content.get("avatar_url") else: # No state could be found, fallback to the MXID. sender_name = event.sender diff --git a/synapse/push/presentable_names.py b/synapse/push/presentable_names.py index 2f32e18b9a..d8000dd607 100644 --- a/synapse/push/presentable_names.py +++ b/synapse/push/presentable_names.py @@ -21,7 +21,7 @@ import logging import re -from typing import TYPE_CHECKING, Iterable, Optional +from typing import TYPE_CHECKING, Iterable from synapse.api.constants import EventTypes, Membership from synapse.events import EventBase @@ -45,7 +45,7 @@ async def calculate_room_name( user_id: str, fallback_to_members: bool = True, fallback_to_single_member: bool = True, -) -> Optional[str]: +) -> str | None: """ Works out a user-facing name for the given room as per Matrix spec recommendations. diff --git a/synapse/push/push_types.py b/synapse/push/push_types.py index e1678cd717..7553b4bf10 100644 --- a/synapse/push/push_types.py +++ b/synapse/push/push_types.py @@ -18,7 +18,7 @@ # [This file includes modifications made by New Vector Limited] # # -from typing import Optional, TypedDict +from typing import TypedDict class EmailReason(TypedDict, total=False): @@ -40,7 +40,7 @@ class EmailReason(TypedDict, total=False): room_id: str now: int - room_name: Optional[str] + room_name: str | None received_at: int delay_before_mail_ms: int last_sent_ts: int @@ -71,9 +71,9 @@ class MessageVars(TypedDict, total=False): id: str ts: int sender_name: str - sender_avatar_url: Optional[str] + sender_avatar_url: str | None sender_hash: int - msgtype: Optional[str] + msgtype: str | None body_text_html: str body_text_plain: str image_url: str @@ -90,7 +90,7 @@ class NotifVars(TypedDict): """ link: str - ts: Optional[int] + ts: int | None messages: list[MessageVars] @@ -107,12 +107,12 @@ class RoomVars(TypedDict): avator_url: url to the room's avator """ - title: Optional[str] + title: str | None hash: int invite: bool notifs: list[NotifVars] link: str - avatar_url: Optional[str] + avatar_url: str | None class TemplateVars(TypedDict, total=False): diff --git a/synapse/push/pusher.py b/synapse/push/pusher.py index 17238c95c0..948465cad1 100644 --- a/synapse/push/pusher.py +++ b/synapse/push/pusher.py @@ -20,7 +20,7 @@ # import logging -from typing import TYPE_CHECKING, Callable, Optional +from typing import TYPE_CHECKING, Callable from synapse.push import Pusher, PusherConfig from synapse.push.emailpusher import EmailPusher @@ -53,7 +53,7 @@ class PusherFactory: logger.info("defined email pusher type") - def create_pusher(self, pusher_config: PusherConfig) -> Optional[Pusher]: + def create_pusher(self, pusher_config: PusherConfig) -> Pusher | None: kind = pusher_config.kind f = self.pusher_types.get(kind, None) if not f: diff --git a/synapse/push/pusherpool.py b/synapse/push/pusherpool.py index 6b70de976a..7b5b06db83 100644 --- a/synapse/push/pusherpool.py +++ b/synapse/push/pusherpool.py @@ -20,7 +20,7 @@ # import logging -from typing import TYPE_CHECKING, Iterable, Optional +from typing import TYPE_CHECKING, Iterable from prometheus_client import Gauge @@ -119,12 +119,12 @@ class PusherPool: app_display_name: str, device_display_name: str, pushkey: str, - lang: Optional[str], + lang: str | None, data: JsonDict, profile_tag: str = "", enabled: bool = True, - device_id: Optional[str] = None, - ) -> Optional[Pusher]: + device_id: str | None = None, + ) -> Pusher | None: """Creates a new pusher and adds it to the pool Returns: @@ -330,7 +330,7 @@ class PusherPool: async def _get_pusher_config_for_user_by_app_id_and_pushkey( self, user_id: str, app_id: str, pushkey: str - ) -> Optional[PusherConfig]: + ) -> PusherConfig | None: resultlist = await self.store.get_pushers_by_app_id_and_pushkey(app_id, pushkey) pusher_config = None @@ -342,7 +342,7 @@ class PusherPool: async def process_pusher_change_by_id( self, app_id: str, pushkey: str, user_id: str - ) -> Optional[Pusher]: + ) -> Pusher | None: """Look up the details for the given pusher, and either start it if its "enabled" flag is True, or try to stop it otherwise. @@ -381,7 +381,7 @@ class PusherPool: logger.info("Started pushers") - async def _start_pusher(self, pusher_config: PusherConfig) -> Optional[Pusher]: + async def _start_pusher(self, pusher_config: PusherConfig) -> Pusher | None: """Start the given pusher Args: diff --git a/synapse/replication/http/delayed_events.py b/synapse/replication/http/delayed_events.py index e448ac32bf..26eaf68dae 100644 --- a/synapse/replication/http/delayed_events.py +++ b/synapse/replication/http/delayed_events.py @@ -13,7 +13,7 @@ # import logging -from typing import TYPE_CHECKING, Optional +from typing import TYPE_CHECKING from twisted.web.server import Request @@ -52,7 +52,7 @@ class ReplicationAddedDelayedEventRestServlet(ReplicationEndpoint): async def _handle_request( # type: ignore[override] self, request: Request, content: JsonDict - ) -> tuple[int, dict[str, Optional[JsonMapping]]]: + ) -> tuple[int, dict[str, JsonMapping | None]]: self.handler.on_added(int(content["next_send_ts"])) return 200, {} diff --git a/synapse/replication/http/devices.py b/synapse/replication/http/devices.py index 2fadee8a06..7a11537f9e 100644 --- a/synapse/replication/http/devices.py +++ b/synapse/replication/http/devices.py @@ -19,7 +19,7 @@ # import logging -from typing import TYPE_CHECKING, Optional +from typing import TYPE_CHECKING from twisted.web.server import Request @@ -170,7 +170,7 @@ class ReplicationMultiUserDevicesResyncRestServlet(ReplicationEndpoint): async def _handle_request( # type: ignore[override] self, request: Request, content: JsonDict - ) -> tuple[int, dict[str, Optional[JsonMapping]]]: + ) -> tuple[int, dict[str, JsonMapping | None]]: user_ids: list[str] = content["user_ids"] logger.info("Resync for %r", user_ids) diff --git a/synapse/replication/http/login.py b/synapse/replication/http/login.py index 0022e12eac..fc21c20ca2 100644 --- a/synapse/replication/http/login.py +++ b/synapse/replication/http/login.py @@ -19,7 +19,7 @@ # import logging -from typing import TYPE_CHECKING, Optional, cast +from typing import TYPE_CHECKING, cast from twisted.web.server import Request @@ -50,13 +50,13 @@ class RegisterDeviceReplicationServlet(ReplicationEndpoint): @staticmethod async def _serialize_payload( # type: ignore[override] user_id: str, - device_id: Optional[str], - initial_display_name: Optional[str], + device_id: str | None, + initial_display_name: str | None, is_guest: bool, is_appservice_ghost: bool, should_issue_refresh_token: bool, - auth_provider_id: Optional[str], - auth_provider_session_id: Optional[str], + auth_provider_id: str | None, + auth_provider_session_id: str | None, ) -> JsonDict: """ Args: diff --git a/synapse/replication/http/membership.py b/synapse/replication/http/membership.py index 0e588037b6..8a6c971720 100644 --- a/synapse/replication/http/membership.py +++ b/synapse/replication/http/membership.py @@ -18,7 +18,7 @@ # # import logging -from typing import TYPE_CHECKING, Optional +from typing import TYPE_CHECKING from twisted.web.server import Request @@ -192,7 +192,7 @@ class ReplicationRemoteRejectInviteRestServlet(ReplicationEndpoint): @staticmethod async def _serialize_payload( # type: ignore[override] invite_event_id: str, - txn_id: Optional[str], + txn_id: str | None, requester: Requester, content: JsonDict, ) -> JsonDict: @@ -260,7 +260,7 @@ class ReplicationRemoteRescindKnockRestServlet(ReplicationEndpoint): @staticmethod async def _serialize_payload( # type: ignore[override] knock_event_id: str, - txn_id: Optional[str], + txn_id: str | None, requester: Requester, content: JsonDict, ) -> JsonDict: diff --git a/synapse/replication/http/presence.py b/synapse/replication/http/presence.py index 4a894b0221..960f0485ff 100644 --- a/synapse/replication/http/presence.py +++ b/synapse/replication/http/presence.py @@ -20,7 +20,7 @@ # import logging -from typing import TYPE_CHECKING, Optional +from typing import TYPE_CHECKING from twisted.web.server import Request @@ -58,7 +58,7 @@ class ReplicationBumpPresenceActiveTime(ReplicationEndpoint): self._presence_handler = hs.get_presence_handler() @staticmethod - async def _serialize_payload(user_id: str, device_id: Optional[str]) -> JsonDict: # type: ignore[override] + async def _serialize_payload(user_id: str, device_id: str | None) -> JsonDict: # type: ignore[override] return {"device_id": device_id} async def _handle_request( # type: ignore[override] @@ -102,7 +102,7 @@ class ReplicationPresenceSetState(ReplicationEndpoint): @staticmethod async def _serialize_payload( # type: ignore[override] user_id: str, - device_id: Optional[str], + device_id: str | None, state: JsonDict, force_notify: bool = False, is_sync: bool = False, diff --git a/synapse/replication/http/register.py b/synapse/replication/http/register.py index 780fcc463a..bd83b38c96 100644 --- a/synapse/replication/http/register.py +++ b/synapse/replication/http/register.py @@ -19,7 +19,7 @@ # import logging -from typing import TYPE_CHECKING, Optional +from typing import TYPE_CHECKING from twisted.web.server import Request @@ -59,14 +59,14 @@ class ReplicationRegisterServlet(ReplicationEndpoint): @staticmethod async def _serialize_payload( # type: ignore[override] user_id: str, - password_hash: Optional[str], + password_hash: str | None, was_guest: bool, make_guest: bool, - appservice_id: Optional[str], - create_profile_with_displayname: Optional[str], + appservice_id: str | None, + create_profile_with_displayname: str | None, admin: bool, - user_type: Optional[str], - address: Optional[str], + user_type: str | None, + address: str | None, shadow_banned: bool, approved: bool, ) -> JsonDict: @@ -143,7 +143,7 @@ class ReplicationPostRegisterActionsServlet(ReplicationEndpoint): @staticmethod async def _serialize_payload( # type: ignore[override] - user_id: str, auth_result: JsonDict, access_token: Optional[str] + user_id: str, auth_result: JsonDict, access_token: str | None ) -> JsonDict: """ Args: diff --git a/synapse/replication/tcp/client.py b/synapse/replication/tcp/client.py index f9605407af..297feb0049 100644 --- a/synapse/replication/tcp/client.py +++ b/synapse/replication/tcp/client.py @@ -21,7 +21,7 @@ """A replication client for use by synapse workers.""" import logging -from typing import TYPE_CHECKING, Iterable, Optional +from typing import TYPE_CHECKING, Iterable from sortedcontainers import SortedList @@ -89,7 +89,7 @@ class ReplicationDataHandler: self._pusher_pool = hs.get_pusherpool() self._presence_handler = hs.get_presence_handler() - self.send_handler: Optional[FederationSenderHandler] = None + self.send_handler: FederationSenderHandler | None = None if hs.should_send_federation(): self.send_handler = FederationSenderHandler(hs) @@ -435,7 +435,7 @@ class FederationSenderHandler: # Stores the latest position in the federation stream we've gotten up # to. This is always set before we use it. - self.federation_position: Optional[int] = None + self.federation_position: int | None = None self._fed_position_linearizer = Linearizer( name="_fed_position_linearizer", clock=hs.get_clock() diff --git a/synapse/replication/tcp/commands.py b/synapse/replication/tcp/commands.py index f115cc4db9..79194f7275 100644 --- a/synapse/replication/tcp/commands.py +++ b/synapse/replication/tcp/commands.py @@ -26,7 +26,7 @@ allowed to be sent by which side. import abc import logging -from typing import Optional, TypeVar +from typing import TypeVar from synapse.replication.tcp.streams._base import StreamRow from synapse.util.json import json_decoder, json_encoder @@ -137,7 +137,7 @@ class RdataCommand(Command): NAME = "RDATA" def __init__( - self, stream_name: str, instance_name: str, token: Optional[int], row: StreamRow + self, stream_name: str, instance_name: str, token: int | None, row: StreamRow ): self.stream_name = stream_name self.instance_name = instance_name @@ -288,7 +288,7 @@ class UserSyncCommand(Command): self, instance_id: str, user_id: str, - device_id: Optional[str], + device_id: str | None, is_syncing: bool, last_sync_ms: int, ): @@ -300,7 +300,7 @@ class UserSyncCommand(Command): @classmethod def from_line(cls: type["UserSyncCommand"], line: str) -> "UserSyncCommand": - device_id: Optional[str] + device_id: str | None instance_id, user_id, device_id, state, last_sync_ms = line.split(" ", 4) if device_id == "None": @@ -407,7 +407,7 @@ class UserIpCommand(Command): access_token: str, ip: str, user_agent: str, - device_id: Optional[str], + device_id: str | None, last_seen: int, ): self.user_id = user_id diff --git a/synapse/replication/tcp/external_cache.py b/synapse/replication/tcp/external_cache.py index bcdd55d2e6..ca959a7aae 100644 --- a/synapse/replication/tcp/external_cache.py +++ b/synapse/replication/tcp/external_cache.py @@ -20,7 +20,7 @@ # import logging -from typing import TYPE_CHECKING, Any, Optional +from typing import TYPE_CHECKING, Any from prometheus_client import Counter, Histogram @@ -73,7 +73,7 @@ class ExternalCache: self.server_name = hs.hostname if hs.config.redis.redis_enabled: - self._redis_connection: Optional["ConnectionHandler"] = ( + self._redis_connection: "ConnectionHandler" | None = ( hs.get_outbound_redis_connection() ) else: @@ -121,7 +121,7 @@ class ExternalCache: ) ) - async def get(self, cache_name: str, key: str) -> Optional[Any]: + async def get(self, cache_name: str, key: str) -> Any | None: """Look up a key/value in the named cache.""" if self._redis_connection is None: diff --git a/synapse/replication/tcp/handler.py b/synapse/replication/tcp/handler.py index 8cf7f4b805..05370045e6 100644 --- a/synapse/replication/tcp/handler.py +++ b/synapse/replication/tcp/handler.py @@ -26,9 +26,7 @@ from typing import ( Awaitable, Iterable, Iterator, - Optional, TypeVar, - Union, ) from prometheus_client import Counter @@ -115,9 +113,7 @@ tcp_command_queue_gauge = LaterGauge( # the type of the entries in _command_queues_by_stream -_StreamCommandQueueItem = tuple[ - Union[RdataCommand, PositionCommand], IReplicationConnection -] +_StreamCommandQueueItem = tuple[RdataCommand | PositionCommand, IReplicationConnection] class ReplicationCommandHandler: @@ -245,7 +241,7 @@ class ReplicationCommandHandler: self._pending_batches: dict[str, list[Any]] = {} # The factory used to create connections. - self._factory: Optional[ReconnectingClientFactory] = None + self._factory: ReconnectingClientFactory | None = None # The currently connected connections. (The list of places we need to send # outgoing replication commands to.) @@ -341,7 +337,7 @@ class ReplicationCommandHandler: self._channels_to_subscribe_to.append(channel_name) def _add_command_to_stream_queue( - self, conn: IReplicationConnection, cmd: Union[RdataCommand, PositionCommand] + self, conn: IReplicationConnection, cmd: RdataCommand | PositionCommand ) -> None: """Queue the given received command for processing @@ -368,7 +364,7 @@ class ReplicationCommandHandler: async def _process_command( self, - cmd: Union[PositionCommand, RdataCommand], + cmd: PositionCommand | RdataCommand, conn: IReplicationConnection, stream_name: str, ) -> None: @@ -459,7 +455,7 @@ class ReplicationCommandHandler: def on_USER_SYNC( self, conn: IReplicationConnection, cmd: UserSyncCommand - ) -> Optional[Awaitable[None]]: + ) -> Awaitable[None] | None: user_sync_counter.labels(**{SERVER_NAME_LABEL: self.server_name}).inc() if self._is_presence_writer: @@ -475,7 +471,7 @@ class ReplicationCommandHandler: def on_CLEAR_USER_SYNC( self, conn: IReplicationConnection, cmd: ClearUserSyncsCommand - ) -> Optional[Awaitable[None]]: + ) -> Awaitable[None] | None: if self._is_presence_writer: return self._presence_handler.update_external_syncs_clear(cmd.instance_id) else: @@ -491,7 +487,7 @@ class ReplicationCommandHandler: def on_USER_IP( self, conn: IReplicationConnection, cmd: UserIpCommand - ) -> Optional[Awaitable[None]]: + ) -> Awaitable[None] | None: user_ip_cache_counter.labels(**{SERVER_NAME_LABEL: self.server_name}).inc() if self._is_master or self._should_insert_client_ips: @@ -833,7 +829,7 @@ class ReplicationCommandHandler: self, instance_id: str, user_id: str, - device_id: Optional[str], + device_id: str | None, is_syncing: bool, last_sync_ms: int, ) -> None: @@ -848,7 +844,7 @@ class ReplicationCommandHandler: access_token: str, ip: str, user_agent: str, - device_id: Optional[str], + device_id: str | None, last_seen: int, ) -> None: """Tell the master that the user made a request.""" @@ -858,7 +854,7 @@ class ReplicationCommandHandler: def send_remote_server_up(self, server: str) -> None: self.send_command(RemoteServerUpCommand(server)) - def stream_update(self, stream_name: str, token: Optional[int], data: Any) -> None: + def stream_update(self, stream_name: str, token: int | None, data: Any) -> None: """Called when a new update is available to stream to Redis subscribers. We need to check if the client is interested in the stream or not diff --git a/synapse/replication/tcp/protocol.py b/synapse/replication/tcp/protocol.py index 733643cb64..3068e60af0 100644 --- a/synapse/replication/tcp/protocol.py +++ b/synapse/replication/tcp/protocol.py @@ -28,7 +28,7 @@ import fcntl import logging import struct from inspect import isawaitable -from typing import TYPE_CHECKING, Any, Collection, Optional +from typing import TYPE_CHECKING, Any, Collection from prometheus_client import Counter from zope.interface import Interface, implementer @@ -153,7 +153,7 @@ class BaseReplicationStreamProtocol(LineOnlyReceiver): self.last_received_command = self.clock.time_msec() self.last_sent_command = 0 # When we requested the connection be closed - self.time_we_closed: Optional[int] = None + self.time_we_closed: int | None = None self.received_ping = False # Have we received a ping from the other side @@ -166,7 +166,7 @@ class BaseReplicationStreamProtocol(LineOnlyReceiver): self.pending_commands: list[Command] = [] # The LoopingCall for sending pings. - self._send_ping_loop: Optional[task.LoopingCall] = None + self._send_ping_loop: task.LoopingCall | None = None # a logcontext which we use for processing incoming commands. We declare it as a # background process so that the CPU stats get reported to prometheus. diff --git a/synapse/replication/tcp/redis.py b/synapse/replication/tcp/redis.py index 4448117d62..27d43e6fba 100644 --- a/synapse/replication/tcp/redis.py +++ b/synapse/replication/tcp/redis.py @@ -21,7 +21,7 @@ import logging from inspect import isawaitable -from typing import TYPE_CHECKING, Any, Generic, Optional, TypeVar, cast +from typing import TYPE_CHECKING, Any, Generic, TypeVar, cast import attr from txredisapi import ( @@ -72,10 +72,10 @@ class ConstantProperty(Generic[T, V]): constant: V = attr.ib() - def __get__(self, obj: Optional[T], objtype: Optional[type[T]] = None) -> V: + def __get__(self, obj: T | None, objtype: type[T] | None = None) -> V: return self.constant - def __set__(self, obj: Optional[T], value: V) -> None: + def __set__(self, obj: T | None, value: V) -> None: pass @@ -119,7 +119,7 @@ class RedisSubscriber(SubscriberProtocol): # a logcontext which we use for processing incoming commands. We declare it as a # background process so that the CPU stats get reported to prometheus. - self._logging_context: Optional[BackgroundProcessLoggingContext] = None + self._logging_context: BackgroundProcessLoggingContext | None = None def _get_logging_context(self) -> BackgroundProcessLoggingContext: """ @@ -293,14 +293,14 @@ class SynapseRedisFactory(RedisFactory): self, hs: "HomeServer", uuid: str, - dbid: Optional[int], + dbid: int | None, poolsize: int, isLazy: bool = False, handler: type = ConnectionHandler, charset: str = "utf-8", - password: Optional[str] = None, + password: str | None = None, replyTimeout: int = 30, - convertNumbers: Optional[int] = True, + convertNumbers: int | None = True, ): super().__init__( uuid=uuid, @@ -422,9 +422,9 @@ def lazyConnection( hs: "HomeServer", host: str = "localhost", port: int = 6379, - dbid: Optional[int] = None, + dbid: int | None = None, reconnect: bool = True, - password: Optional[str] = None, + password: str | None = None, replyTimeout: int = 30, ) -> ConnectionHandler: """Creates a connection to Redis that is lazily set up and reconnects if the @@ -471,9 +471,9 @@ def lazyConnection( def lazyUnixConnection( hs: "HomeServer", path: str = "/tmp/redis.sock", - dbid: Optional[int] = None, + dbid: int | None = None, reconnect: bool = True, - password: Optional[str] = None, + password: str | None = None, replyTimeout: int = 30, ) -> ConnectionHandler: """Creates a connection to Redis that is lazily set up and reconnects if the diff --git a/synapse/replication/tcp/resource.py b/synapse/replication/tcp/resource.py index 8df0a3853f..134d8d921f 100644 --- a/synapse/replication/tcp/resource.py +++ b/synapse/replication/tcp/resource.py @@ -22,7 +22,7 @@ import logging import random -from typing import TYPE_CHECKING, Optional +from typing import TYPE_CHECKING from prometheus_client import Counter @@ -321,7 +321,7 @@ class ReplicationStreamer: def _batch_updates( updates: list[tuple[Token, StreamRow]], -) -> list[tuple[Optional[Token], StreamRow]]: +) -> list[tuple[Token | None, StreamRow]]: """Takes a list of updates of form [(token, row)] and sets the token to None for all rows where the next row has the same token. This is used to implement batching. @@ -337,7 +337,7 @@ def _batch_updates( if not updates: return [] - new_updates: list[tuple[Optional[Token], StreamRow]] = [] + new_updates: list[tuple[Token | None, StreamRow]] = [] for i, update in enumerate(updates[:-1]): if update[0] == updates[i + 1][0]: new_updates.append((None, update[1])) diff --git a/synapse/replication/tcp/streams/_base.py b/synapse/replication/tcp/streams/_base.py index d80bdb9b35..4fb2aac202 100644 --- a/synapse/replication/tcp/streams/_base.py +++ b/synapse/replication/tcp/streams/_base.py @@ -26,7 +26,6 @@ from typing import ( Any, Awaitable, Callable, - Optional, TypeVar, ) @@ -285,9 +284,9 @@ class BackfillStream(Stream): event_id: str room_id: str type: str - state_key: Optional[str] - redacts: Optional[str] - relates_to: Optional[str] + state_key: str | None + redacts: str | None + relates_to: str | None NAME = "backfill" ROW_TYPE = BackfillStreamRow @@ -435,7 +434,7 @@ class ReceiptsStream(_StreamFromIdGen): receipt_type: str user_id: str event_id: str - thread_id: Optional[str] + thread_id: str | None data: dict NAME = "receipts" @@ -510,7 +509,7 @@ class CachesStream(Stream): """ cache_func: str - keys: Optional[list[Any]] + keys: list[Any] | None invalidation_ts: int NAME = "caches" @@ -639,7 +638,7 @@ class AccountDataStream(_StreamFromIdGen): @attr.s(slots=True, frozen=True, auto_attribs=True) class AccountDataStreamRow: user_id: str - room_id: Optional[str] + room_id: str | None data_type: str NAME = "account_data" diff --git a/synapse/replication/tcp/streams/events.py b/synapse/replication/tcp/streams/events.py index a6314b0c7d..ca9f6f12da 100644 --- a/synapse/replication/tcp/streams/events.py +++ b/synapse/replication/tcp/streams/events.py @@ -20,7 +20,7 @@ # import heapq from collections import defaultdict -from typing import TYPE_CHECKING, Iterable, Optional, TypeVar, cast +from typing import TYPE_CHECKING, Iterable, TypeVar, cast import attr @@ -93,7 +93,7 @@ class BaseEventsStreamRow: TypeId: str @classmethod - def from_data(cls: type[T], data: Iterable[Optional[str]]) -> T: + def from_data(cls: type[T], data: Iterable[str | None]) -> T: """Parse the data from the replication stream into a row. By default we just call the constructor with the data list as arguments @@ -111,10 +111,10 @@ class EventsStreamEventRow(BaseEventsStreamRow): event_id: str room_id: str type: str - state_key: Optional[str] - redacts: Optional[str] - relates_to: Optional[str] - membership: Optional[str] + state_key: str | None + redacts: str | None + relates_to: str | None + membership: str | None rejected: bool outlier: bool @@ -126,7 +126,7 @@ class EventsStreamCurrentStateRow(BaseEventsStreamRow): room_id: str type: str state_key: str - event_id: Optional[str] + event_id: str | None @attr.s(slots=True, frozen=True, auto_attribs=True) @@ -282,6 +282,6 @@ class EventsStream(_StreamFromIdGen): @classmethod def parse_row(cls, row: StreamRow) -> "EventsStreamRow": - (typ, data) = cast(tuple[str, Iterable[Optional[str]]], row) + (typ, data) = cast(tuple[str, Iterable[str | None]], row) event_stream_row_data = TypeToRow[typ].from_data(data) return EventsStreamRow(typ, event_stream_row_data) diff --git a/synapse/rest/__init__.py b/synapse/rest/__init__.py index ea0e47ded4..fe66494d82 100644 --- a/synapse/rest/__init__.py +++ b/synapse/rest/__init__.py @@ -19,7 +19,7 @@ # # import logging -from typing import TYPE_CHECKING, Callable, Iterable, Optional +from typing import TYPE_CHECKING, Callable, Iterable from synapse.http.server import HttpServer, JsonResource from synapse.rest import admin @@ -143,7 +143,7 @@ class ClientRestResource(JsonResource): * etc """ - def __init__(self, hs: "HomeServer", servlet_groups: Optional[list[str]] = None): + def __init__(self, hs: "HomeServer", servlet_groups: list[str] | None = None): JsonResource.__init__(self, hs, canonical_json=False) if hs.config.media.can_load_media_repo: # This import is here to prevent a circular import failure @@ -156,7 +156,7 @@ class ClientRestResource(JsonResource): def register_servlets( client_resource: HttpServer, hs: "HomeServer", - servlet_groups: Optional[Iterable[str]] = None, + servlet_groups: Iterable[str] | None = None, ) -> None: # Some servlets are only registered on the main process (and not worker # processes). diff --git a/synapse/rest/admin/__init__.py b/synapse/rest/admin/__init__.py index bcaba85da3..e34ebb17e6 100644 --- a/synapse/rest/admin/__init__.py +++ b/synapse/rest/admin/__init__.py @@ -35,7 +35,7 @@ import logging from http import HTTPStatus -from typing import TYPE_CHECKING, Optional +from typing import TYPE_CHECKING from synapse.api.errors import Codes, NotFoundError, SynapseError from synapse.handlers.pagination import PURGE_HISTORY_ACTION_NAME @@ -153,7 +153,7 @@ class PurgeHistoryRestServlet(RestServlet): self.auth = hs.get_auth() async def on_POST( - self, request: SynapseRequest, room_id: str, event_id: Optional[str] + self, request: SynapseRequest, room_id: str, event_id: str | None ) -> tuple[int, JsonDict]: await assert_requester_is_admin(self.auth, request) @@ -173,7 +173,7 @@ class PurgeHistoryRestServlet(RestServlet): if event.room_id != room_id: raise SynapseError(HTTPStatus.BAD_REQUEST, "Event is for wrong room.") - # RoomStreamToken expects [int] not Optional[int] + # RoomStreamToken expects [int] not [int | None] assert event.internal_metadata.stream_ordering is not None room_token = RoomStreamToken( topological=event.depth, stream=event.internal_metadata.stream_ordering diff --git a/synapse/rest/admin/media.py b/synapse/rest/admin/media.py index cfdb314b1a..d5346fe0d5 100644 --- a/synapse/rest/admin/media.py +++ b/synapse/rest/admin/media.py @@ -20,7 +20,7 @@ # import logging from http import HTTPStatus -from typing import TYPE_CHECKING, Optional +from typing import TYPE_CHECKING import attr @@ -374,7 +374,7 @@ class DeleteMediaByDateSize(RestServlet): self.media_repository = hs.get_media_repository() async def on_POST( - self, request: SynapseRequest, server_name: Optional[str] = None + self, request: SynapseRequest, server_name: str | None = None ) -> tuple[int, JsonDict]: await assert_requester_is_admin(self.auth, request) diff --git a/synapse/rest/admin/rooms.py b/synapse/rest/admin/rooms.py index e1bfca3c03..cf24bc628a 100644 --- a/synapse/rest/admin/rooms.py +++ b/synapse/rest/admin/rooms.py @@ -20,7 +20,7 @@ # import logging from http import HTTPStatus -from typing import TYPE_CHECKING, Optional, cast +from typing import TYPE_CHECKING, cast import attr from immutabledict import immutabledict @@ -565,7 +565,7 @@ class JoinRoomAliasServlet(ResolveRoomIdMixin, RestServlet): # Get the room ID from the identifier. try: - remote_room_hosts: Optional[list[str]] = [ + remote_room_hosts: list[str] | None = [ x.decode("ascii") for x in request.args[b"server_name"] ] except Exception: diff --git a/synapse/rest/admin/server_notice_servlet.py b/synapse/rest/admin/server_notice_servlet.py index 0be04c0f90..50d2f35b18 100644 --- a/synapse/rest/admin/server_notice_servlet.py +++ b/synapse/rest/admin/server_notice_servlet.py @@ -18,7 +18,7 @@ # # from http import HTTPStatus -from typing import TYPE_CHECKING, Optional +from typing import TYPE_CHECKING from synapse.api.constants import EventTypes from synapse.api.errors import NotFoundError, SynapseError @@ -80,7 +80,7 @@ class SendServerNoticeServlet(RestServlet): self, request: SynapseRequest, requester: Requester, - txn_id: Optional[str], + txn_id: str | None, ) -> tuple[int, JsonDict]: await assert_user_is_admin(self.auth, requester) body = parse_json_object_from_request(request) diff --git a/synapse/rest/admin/users.py b/synapse/rest/admin/users.py index 3eab53e5a2..42e9f8043d 100644 --- a/synapse/rest/admin/users.py +++ b/synapse/rest/admin/users.py @@ -23,7 +23,7 @@ import hmac import logging import secrets from http import HTTPStatus -from typing import TYPE_CHECKING, Optional, Union +from typing import TYPE_CHECKING import attr from pydantic import StrictBool, StrictInt, StrictStr @@ -163,7 +163,7 @@ class UsersRestServletV2(RestServlet): direction = parse_enum(request, "dir", Direction, default=Direction.FORWARDS) - # twisted.web.server.Request.args is incorrectly defined as Optional[Any] + # twisted.web.server.Request.args is incorrectly defined as Any | None args: dict[bytes, list[bytes]] = request.args # type: ignore not_user_types = parse_strings_from_args(args, "not_user_type") @@ -195,7 +195,7 @@ class UsersRestServletV2(RestServlet): return HTTPStatus.OK, ret - def _parse_parameter_deactivated(self, request: SynapseRequest) -> Optional[bool]: + def _parse_parameter_deactivated(self, request: SynapseRequest) -> bool | None: """ Return None (no filtering) if `deactivated` is `true`, otherwise return `False` (exclude deactivated users from the results). @@ -206,9 +206,7 @@ class UsersRestServletV2(RestServlet): class UsersRestServletV3(UsersRestServletV2): PATTERNS = admin_patterns("/users$", "v3") - def _parse_parameter_deactivated( - self, request: SynapseRequest - ) -> Union[bool, None]: + def _parse_parameter_deactivated(self, request: SynapseRequest) -> bool | None: return parse_boolean(request, "deactivated") @@ -340,7 +338,7 @@ class UserRestServletV2(RestServlet): HTTPStatus.BAD_REQUEST, "An user can't be deactivated and locked" ) - approved: Optional[bool] = None + approved: bool | None = None if "approved" in body and self._msc3866_enabled: approved = body["approved"] if not isinstance(approved, bool): @@ -920,7 +918,7 @@ class SearchUsersRestServlet(RestServlet): async def on_GET( self, request: SynapseRequest, target_user_id: str - ) -> tuple[int, Optional[list[JsonDict]]]: + ) -> tuple[int, list[JsonDict] | None]: """Get request to search user table for specific users according to search term. This needs user to have a administrator access in Synapse. @@ -1476,9 +1474,9 @@ class RedactUser(RestServlet): class PostBody(RequestBodyModel): rooms: list[StrictStr] - reason: Optional[StrictStr] = None - limit: Optional[StrictInt] = None - use_admin: Optional[StrictBool] = None + reason: StrictStr | None = None + limit: StrictInt | None = None + use_admin: StrictBool | None = None async def on_POST( self, request: SynapseRequest, user_id: str diff --git a/synapse/rest/client/account.py b/synapse/rest/client/account.py index f928a8a3f4..b052052be0 100644 --- a/synapse/rest/client/account.py +++ b/synapse/rest/client/account.py @@ -21,7 +21,7 @@ # import logging import random -from typing import TYPE_CHECKING, Literal, Optional +from typing import TYPE_CHECKING, Literal from urllib.parse import urlparse import attr @@ -161,11 +161,11 @@ class PasswordRestServlet(RestServlet): self._set_password_handler = hs.get_set_password_handler() class PostBody(RequestBodyModel): - auth: Optional[AuthenticationData] = None + auth: AuthenticationData | None = None logout_devices: StrictBool = True - new_password: Optional[ - Annotated[str, StringConstraints(max_length=512, strict=True)] - ] = None + new_password: ( + Annotated[str, StringConstraints(max_length=512, strict=True)] | None + ) = None @interactive_auth_handler async def on_POST(self, request: SynapseRequest) -> tuple[int, JsonDict]: @@ -259,7 +259,7 @@ class PasswordRestServlet(RestServlet): # If we have a password in this request, prefer it. Otherwise, use the # password hash from an earlier request. if new_password: - password_hash: Optional[str] = await self.auth_handler.hash(new_password) + password_hash: str | None = await self.auth_handler.hash(new_password) elif session_id is not None: password_hash = existing_session_password_hash else: @@ -289,8 +289,8 @@ class DeactivateAccountRestServlet(RestServlet): self._deactivate_account_handler = hs.get_deactivate_account_handler() class PostBody(RequestBodyModel): - auth: Optional[AuthenticationData] = None - id_server: Optional[StrictStr] = None + auth: AuthenticationData | None = None + id_server: StrictStr | None = None # Not specced, see https://github.com/matrix-org/matrix-spec/issues/297 erase: StrictBool = False @@ -663,7 +663,7 @@ class ThreepidAddRestServlet(RestServlet): self.auth_handler = hs.get_auth_handler() class PostBody(RequestBodyModel): - auth: Optional[AuthenticationData] = None + auth: AuthenticationData | None = None client_secret: ClientSecretStr sid: StrictStr @@ -742,7 +742,7 @@ class ThreepidUnbindRestServlet(RestServlet): class PostBody(RequestBodyModel): address: StrictStr - id_server: Optional[StrictStr] = None + id_server: StrictStr | None = None medium: Literal["email", "msisdn"] async def on_POST(self, request: SynapseRequest) -> tuple[int, JsonDict]: @@ -771,7 +771,7 @@ class ThreepidDeleteRestServlet(RestServlet): class PostBody(RequestBodyModel): address: StrictStr - id_server: Optional[StrictStr] = None + id_server: StrictStr | None = None medium: Literal["email", "msisdn"] async def on_POST(self, request: SynapseRequest) -> tuple[int, JsonDict]: diff --git a/synapse/rest/client/account_data.py b/synapse/rest/client/account_data.py index 0800c0f5b8..b18232fc56 100644 --- a/synapse/rest/client/account_data.py +++ b/synapse/rest/client/account_data.py @@ -20,7 +20,7 @@ # import logging -from typing import TYPE_CHECKING, Optional +from typing import TYPE_CHECKING from synapse.api.constants import AccountDataTypes, ReceiptTypes from synapse.api.errors import AuthError, Codes, NotFoundError, SynapseError @@ -108,9 +108,9 @@ class AccountDataServlet(RestServlet): # Push rules are stored in a separate table and must be queried separately. if account_data_type == AccountDataTypes.PUSH_RULES: - account_data: Optional[ - JsonMapping - ] = await self._push_rules_handler.push_rules_for_user(requester.user) + account_data: ( + JsonMapping | None + ) = await self._push_rules_handler.push_rules_for_user(requester.user) else: account_data = await self.store.get_global_account_data_by_type_for_user( user_id, account_data_type @@ -244,7 +244,7 @@ class RoomAccountDataServlet(RestServlet): # Room-specific push rules are not currently supported. if account_data_type == AccountDataTypes.PUSH_RULES: - account_data: Optional[JsonMapping] = {} + account_data: JsonMapping | None = {} else: account_data = await self.store.get_account_data_for_room_and_type( user_id, room_id, account_data_type diff --git a/synapse/rest/client/devices.py b/synapse/rest/client/devices.py index e20e49d48b..636e4b6031 100644 --- a/synapse/rest/client/devices.py +++ b/synapse/rest/client/devices.py @@ -22,7 +22,7 @@ import logging from http import HTTPStatus -from typing import TYPE_CHECKING, Optional +from typing import TYPE_CHECKING from pydantic import ConfigDict, StrictStr @@ -95,7 +95,7 @@ class DeleteDevicesRestServlet(RestServlet): self.auth_handler = hs.get_auth_handler() class PostBody(RequestBodyModel): - auth: Optional[AuthenticationData] = None + auth: AuthenticationData | None = None devices: list[StrictStr] @interactive_auth_handler @@ -173,7 +173,7 @@ class DeviceRestServlet(RestServlet): return 200, device class DeleteBody(RequestBodyModel): - auth: Optional[AuthenticationData] = None + auth: AuthenticationData | None = None @interactive_auth_handler async def on_DELETE( @@ -218,7 +218,7 @@ class DeviceRestServlet(RestServlet): return 200, {} class PutBody(RequestBodyModel): - display_name: Optional[StrictStr] = None + display_name: StrictStr | None = None async def on_PUT( self, request: SynapseRequest, device_id: str @@ -316,7 +316,7 @@ class DehydratedDeviceServlet(RestServlet): class PutBody(RequestBodyModel): device_data: DehydratedDeviceDataModel - initial_device_display_name: Optional[StrictStr] = None + initial_device_display_name: StrictStr | None = None async def on_PUT(self, request: SynapseRequest) -> tuple[int, JsonDict]: submission = parse_and_validate_json_object_from_request(request, self.PutBody) @@ -391,7 +391,7 @@ class DehydratedDeviceEventsServlet(RestServlet): self.store = hs.get_datastores().main class PostBody(RequestBodyModel): - next_batch: Optional[StrictStr] = None + next_batch: StrictStr | None = None async def on_POST( self, request: SynapseRequest, device_id: str @@ -538,7 +538,7 @@ class DehydratedDeviceV2Servlet(RestServlet): class PutBody(RequestBodyModel): device_data: DehydratedDeviceDataModel device_id: StrictStr - initial_device_display_name: Optional[StrictStr] + initial_device_display_name: StrictStr | None model_config = ConfigDict(extra="allow") async def on_PUT(self, request: SynapseRequest) -> tuple[int, JsonDict]: diff --git a/synapse/rest/client/directory.py b/synapse/rest/client/directory.py index 943674bbb1..0b334f9b0b 100644 --- a/synapse/rest/client/directory.py +++ b/synapse/rest/client/directory.py @@ -20,7 +20,7 @@ # import logging -from typing import TYPE_CHECKING, Literal, Optional +from typing import TYPE_CHECKING, Literal from pydantic import StrictStr @@ -73,7 +73,7 @@ class ClientDirectoryServer(RestServlet): # TODO: get Pydantic to validate that this is a valid room id? room_id: StrictStr # `servers` is unspecced - servers: Optional[list[StrictStr]] = None + servers: list[StrictStr] | None = None async def on_PUT( self, request: SynapseRequest, room_alias: str diff --git a/synapse/rest/client/events.py b/synapse/rest/client/events.py index 082bacade6..de73c96fd0 100644 --- a/synapse/rest/client/events.py +++ b/synapse/rest/client/events.py @@ -22,7 +22,7 @@ """This module contains REST servlets to do with event streaming, /events.""" import logging -from typing import TYPE_CHECKING, Union +from typing import TYPE_CHECKING from synapse.api.errors import SynapseError from synapse.events.utils import SerializeEventConfig @@ -96,7 +96,7 @@ class EventRestServlet(RestServlet): async def on_GET( self, request: SynapseRequest, event_id: str - ) -> tuple[int, Union[str, JsonDict]]: + ) -> tuple[int, str | JsonDict]: requester = await self.auth.get_user_by_req(request) event = await self.event_handler.get_event(requester.user, None, event_id) diff --git a/synapse/rest/client/keys.py b/synapse/rest/client/keys.py index b87b9bd68a..5f488674b4 100644 --- a/synapse/rest/client/keys.py +++ b/synapse/rest/client/keys.py @@ -24,7 +24,7 @@ import logging import re from collections import Counter from http import HTTPStatus -from typing import TYPE_CHECKING, Any, Mapping, Optional, Union +from typing import TYPE_CHECKING, Any, Mapping from pydantic import StrictBool, StrictStr, field_validator @@ -147,7 +147,7 @@ class KeyUploadServlet(RestServlet): key: StrictStr """The key, encoded using unpadded base64.""" - fallback: Optional[StrictBool] = False + fallback: StrictBool | None = False """Whether this is a fallback key. Only used when handling fallback keys.""" signatures: Mapping[StrictStr, Mapping[StrictStr, StrictStr]] @@ -156,10 +156,10 @@ class KeyUploadServlet(RestServlet): See the following for more detail: https://spec.matrix.org/v1.16/appendices/#signing-details """ - device_keys: Optional[DeviceKeys] = None + device_keys: DeviceKeys | None = None """Identity keys for the device. May be absent if no new identity keys are required.""" - fallback_keys: Optional[Mapping[StrictStr, Union[StrictStr, KeyObject]]] = None + fallback_keys: Mapping[StrictStr, StrictStr | KeyObject] | None = None """ The public key which should be used if the device's one-time keys are exhausted. The fallback key is not deleted once used, but should be @@ -193,7 +193,7 @@ class KeyUploadServlet(RestServlet): ) return v - one_time_keys: Optional[Mapping[StrictStr, Union[StrictStr, KeyObject]]] = None + one_time_keys: Mapping[StrictStr, StrictStr | KeyObject] | None = None """ One-time public keys for "pre-key" messages. The names of the properties should be in the format `:`. @@ -221,7 +221,7 @@ class KeyUploadServlet(RestServlet): return v async def on_POST( - self, request: SynapseRequest, device_id: Optional[str] + self, request: SynapseRequest, device_id: str | None ) -> tuple[int, JsonDict]: requester = await self.auth.get_user_by_req(request, allow_guest=True) user_id = requester.user.to_string() diff --git a/synapse/rest/client/knock.py b/synapse/rest/client/knock.py index 5e96079b66..cd3afda11e 100644 --- a/synapse/rest/client/knock.py +++ b/synapse/rest/client/knock.py @@ -69,7 +69,7 @@ class KnockRoomAliasServlet(RestServlet): if RoomID.is_valid(room_identifier): room_id = room_identifier - # twisted.web.server.Request.args is incorrectly defined as Optional[Any] + # twisted.web.server.Request.args is incorrectly defined as Any | None args: dict[bytes, list[bytes]] = request.args # type: ignore # Prefer via over server_name (deprecated with MSC4156) remote_room_hosts = parse_strings_from_args(args, "via", required=False) diff --git a/synapse/rest/client/login.py b/synapse/rest/client/login.py index bba6944982..fe3cb9aa3d 100644 --- a/synapse/rest/client/login.py +++ b/synapse/rest/client/login.py @@ -26,9 +26,7 @@ from typing import ( Any, Awaitable, Callable, - Optional, TypedDict, - Union, ) from synapse.api.constants import ApprovalNoticeMedium @@ -67,12 +65,12 @@ logger = logging.getLogger(__name__) class LoginResponse(TypedDict, total=False): user_id: str - access_token: Optional[str] + access_token: str | None home_server: str - expires_in_ms: Optional[int] - refresh_token: Optional[str] - device_id: Optional[str] - well_known: Optional[dict[str, Any]] + expires_in_ms: int | None + refresh_token: str | None + device_id: str | None + well_known: dict[str, Any] | None class LoginRestServlet(RestServlet): @@ -367,13 +365,13 @@ class LoginRestServlet(RestServlet): self, user_id: str, login_submission: JsonDict, - callback: Optional[Callable[[LoginResponse], Awaitable[None]]] = None, + callback: Callable[[LoginResponse], Awaitable[None]] | None = None, create_non_existent_users: bool = False, - default_display_name: Optional[str] = None, + default_display_name: str | None = None, ratelimit: bool = True, - auth_provider_id: Optional[str] = None, + auth_provider_id: str | None = None, should_issue_refresh_token: bool = False, - auth_provider_session_id: Optional[str] = None, + auth_provider_session_id: str | None = None, should_check_deactivated_or_locked: bool = True, *, request_info: RequestInfo, @@ -623,7 +621,7 @@ class RefreshTokenServlet(RestServlet): token, access_valid_until_ms, refresh_valid_until_ms ) - response: dict[str, Union[str, int]] = { + response: dict[str, str | int] = { "access_token": access_token, "refresh_token": refresh_token, } @@ -652,9 +650,7 @@ class SsoRedirectServlet(RestServlet): self._sso_handler = hs.get_sso_handler() self._public_baseurl = hs.config.server.public_baseurl - async def on_GET( - self, request: SynapseRequest, idp_id: Optional[str] = None - ) -> None: + async def on_GET(self, request: SynapseRequest, idp_id: str | None = None) -> None: if not self._public_baseurl: raise SynapseError(400, "SSO requires a valid public_baseurl") diff --git a/synapse/rest/client/media.py b/synapse/rest/client/media.py index 4c044ae900..f145b03af4 100644 --- a/synapse/rest/client/media.py +++ b/synapse/rest/client/media.py @@ -22,7 +22,6 @@ import logging import re -from typing import Optional from synapse.http.server import ( HttpServer, @@ -231,7 +230,7 @@ class DownloadResource(RestServlet): request: SynapseRequest, server_name: str, media_id: str, - file_name: Optional[str] = None, + file_name: str | None = None, ) -> None: # Validate the server name, raising if invalid parse_and_validate_server_name(server_name) diff --git a/synapse/rest/client/mutual_rooms.py b/synapse/rest/client/mutual_rooms.py index 7d0570d0cb..bda6ed1f70 100644 --- a/synapse/rest/client/mutual_rooms.py +++ b/synapse/rest/client/mutual_rooms.py @@ -52,7 +52,7 @@ class UserMutualRoomsServlet(RestServlet): self.store = hs.get_datastores().main async def on_GET(self, request: SynapseRequest) -> tuple[int, JsonDict]: - # twisted.web.server.Request.args is incorrectly defined as Optional[Any] + # twisted.web.server.Request.args is incorrectly defined as Any | None args: dict[bytes, list[bytes]] = request.args # type: ignore user_ids = parse_strings_from_args(args, "user_id", required=True) diff --git a/synapse/rest/client/push_rule.py b/synapse/rest/client/push_rule.py index 0a9b83af95..39b0cde47b 100644 --- a/synapse/rest/client/push_rule.py +++ b/synapse/rest/client/push_rule.py @@ -20,7 +20,7 @@ # from http import HTTPStatus -from typing import TYPE_CHECKING, Union +from typing import TYPE_CHECKING from synapse.api.errors import ( Codes, @@ -240,7 +240,7 @@ def _rule_spec_from_path(path: list[str]) -> RuleSpec: def _rule_tuple_from_request_object( rule_template: str, rule_id: str, req_obj: JsonDict -) -> tuple[list[JsonDict], list[Union[str, JsonDict]]]: +) -> tuple[list[JsonDict], list[str | JsonDict]]: if rule_template == "postcontent": # postcontent is from MSC4306, which says that clients # cannot create their own postcontent rules right now. diff --git a/synapse/rest/client/register.py b/synapse/rest/client/register.py index 145dc6f569..9503446b92 100644 --- a/synapse/rest/client/register.py +++ b/synapse/rest/client/register.py @@ -21,7 +21,7 @@ # import logging import random -from typing import TYPE_CHECKING, Optional +from typing import TYPE_CHECKING from twisted.web.server import Request @@ -852,7 +852,7 @@ class RegisterRestServlet(RestServlet): return result async def _do_guest_registration( - self, params: JsonDict, address: Optional[str] = None + self, params: JsonDict, address: str | None = None ) -> tuple[int, JsonDict]: if not self.hs.config.registration.allow_guest_access: raise SynapseError(403, "Guest access is disabled") diff --git a/synapse/rest/client/relations.py b/synapse/rest/client/relations.py index d6c7411816..c913bc6970 100644 --- a/synapse/rest/client/relations.py +++ b/synapse/rest/client/relations.py @@ -20,7 +20,7 @@ import logging import re -from typing import TYPE_CHECKING, Optional +from typing import TYPE_CHECKING from synapse.api.constants import Direction from synapse.handlers.relations import ThreadsListInclude @@ -61,8 +61,8 @@ class RelationPaginationServlet(RestServlet): request: SynapseRequest, room_id: str, parent_id: str, - relation_type: Optional[str] = None, - event_type: Optional[str] = None, + relation_type: str | None = None, + event_type: str | None = None, ) -> tuple[int, JsonDict]: requester = await self.auth.get_user_by_req(request, allow_guest=True) diff --git a/synapse/rest/client/rendezvous.py b/synapse/rest/client/rendezvous.py index a1808847f0..08a449eefc 100644 --- a/synapse/rest/client/rendezvous.py +++ b/synapse/rest/client/rendezvous.py @@ -21,7 +21,7 @@ import logging from http.client import TEMPORARY_REDIRECT -from typing import TYPE_CHECKING, Optional +from typing import TYPE_CHECKING from synapse.http.server import HttpServer, respond_with_redirect from synapse.http.servlet import RestServlet @@ -41,7 +41,7 @@ class MSC4108DelegationRendezvousServlet(RestServlet): def __init__(self, hs: "HomeServer"): super().__init__() - redirection_target: Optional[str] = ( + redirection_target: str | None = ( hs.config.experimental.msc4108_delegation_endpoint ) assert redirection_target is not None, ( diff --git a/synapse/rest/client/room.py b/synapse/rest/client/room.py index 38e315d0e7..81a6bd57fc 100644 --- a/synapse/rest/client/room.py +++ b/synapse/rest/client/room.py @@ -25,7 +25,7 @@ import logging import re from enum import Enum from http import HTTPStatus -from typing import TYPE_CHECKING, Awaitable, Optional +from typing import TYPE_CHECKING, Awaitable from urllib import parse as urlparse from prometheus_client.core import Histogram @@ -294,7 +294,7 @@ class RoomStateEventRestServlet(RestServlet): room_id: str, event_type: str, state_key: str, - txn_id: Optional[str] = None, + txn_id: str | None = None, ) -> tuple[int, JsonDict]: requester = await self.auth.get_user_by_req(request, allow_guest=True) @@ -407,7 +407,7 @@ class RoomSendEventRestServlet(TransactionRestServlet): requester: Requester, room_id: str, event_type: str, - txn_id: Optional[str], + txn_id: str | None, ) -> tuple[int, JsonDict]: content = parse_json_object_from_request(request) @@ -484,8 +484,8 @@ class RoomSendEventRestServlet(TransactionRestServlet): def _parse_request_delay( request: SynapseRequest, - max_delay: Optional[int], -) -> Optional[int]: + max_delay: int | None, +) -> int | None: """Parses from the request string the delay parameter for delayed event requests, and checks it for correctness. @@ -544,11 +544,11 @@ class JoinRoomAliasServlet(ResolveRoomIdMixin, TransactionRestServlet): request: SynapseRequest, requester: Requester, room_identifier: str, - txn_id: Optional[str], + txn_id: str | None, ) -> tuple[int, JsonDict]: content = parse_json_object_from_request(request, allow_empty_body=True) - # twisted.web.server.Request.args is incorrectly defined as Optional[Any] + # twisted.web.server.Request.args is incorrectly defined as Any | None args: dict[bytes, list[bytes]] = request.args # type: ignore # Prefer via over server_name (deprecated with MSC4156) remote_room_hosts = parse_strings_from_args(args, "via", required=False) @@ -623,7 +623,7 @@ class PublicRoomListRestServlet(RestServlet): if server: raise e - limit: Optional[int] = parse_integer(request, "limit", 0) + limit: int | None = parse_integer(request, "limit", 0) since_token = parse_string(request, "since") if limit == 0: @@ -658,7 +658,7 @@ class PublicRoomListRestServlet(RestServlet): server = parse_string(request, "server") content = parse_json_object_from_request(request) - limit: Optional[int] = int(content.get("limit", 100)) + limit: int | None = int(content.get("limit", 100)) since_token = content.get("since", None) search_filter = content.get("filter", None) @@ -1118,7 +1118,7 @@ class RoomMembershipRestServlet(TransactionRestServlet): requester: Requester, room_id: str, membership_action: str, - txn_id: Optional[str], + txn_id: str | None, ) -> tuple[int, JsonDict]: if requester.is_guest and membership_action not in { Membership.JOIN, @@ -1241,7 +1241,7 @@ class RoomRedactEventRestServlet(TransactionRestServlet): requester: Requester, room_id: str, event_id: str, - txn_id: Optional[str], + txn_id: str | None, ) -> tuple[int, JsonDict]: content = parse_json_object_from_request(request) @@ -1572,7 +1572,7 @@ class RoomHierarchyRestServlet(RestServlet): max_depth = parse_integer(request, "max_depth") limit = parse_integer(request, "limit") - # twisted.web.server.Request.args is incorrectly defined as Optional[Any] + # twisted.web.server.Request.args is incorrectly defined as Any | None remote_room_hosts = None if self.msc4235_enabled: args: dict[bytes, list[bytes]] = request.args # type: ignore @@ -1617,12 +1617,12 @@ class RoomSummaryRestServlet(ResolveRoomIdMixin, RestServlet): ) -> tuple[int, JsonDict]: try: requester = await self._auth.get_user_by_req(request, allow_guest=True) - requester_user_id: Optional[str] = requester.user.to_string() + requester_user_id: str | None = requester.user.to_string() except MissingClientTokenError: # auth is optional requester_user_id = None - # twisted.web.server.Request.args is incorrectly defined as Optional[Any] + # twisted.web.server.Request.args is incorrectly defined as Any | None args: dict[bytes, list[bytes]] = request.args # type: ignore remote_room_hosts = parse_strings_from_args(args, "via", required=False) room_id, remote_room_hosts = await self.resolve_room_id( diff --git a/synapse/rest/client/room_keys.py b/synapse/rest/client/room_keys.py index b2de591dc5..b7f7c68d8f 100644 --- a/synapse/rest/client/room_keys.py +++ b/synapse/rest/client/room_keys.py @@ -19,7 +19,7 @@ # import logging -from typing import TYPE_CHECKING, Optional, cast +from typing import TYPE_CHECKING, cast from synapse.api.errors import Codes, NotFoundError, SynapseError from synapse.http.server import HttpServer @@ -51,7 +51,7 @@ class RoomKeysServlet(RestServlet): self.e2e_room_keys_handler = hs.get_e2e_room_keys_handler() async def on_PUT( - self, request: SynapseRequest, room_id: Optional[str], session_id: Optional[str] + self, request: SynapseRequest, room_id: str | None, session_id: str | None ) -> tuple[int, JsonDict]: """ Uploads one or more encrypted E2E room keys for backup purposes. @@ -146,7 +146,7 @@ class RoomKeysServlet(RestServlet): return 200, ret async def on_GET( - self, request: SynapseRequest, room_id: Optional[str], session_id: Optional[str] + self, request: SynapseRequest, room_id: str | None, session_id: str | None ) -> tuple[int, JsonDict]: """ Retrieves one or more encrypted E2E room keys for backup purposes. @@ -233,7 +233,7 @@ class RoomKeysServlet(RestServlet): return 200, room_keys async def on_DELETE( - self, request: SynapseRequest, room_id: Optional[str], session_id: Optional[str] + self, request: SynapseRequest, room_id: str | None, session_id: str | None ) -> tuple[int, JsonDict]: """ Deletes one or more encrypted E2E room keys for a user for backup purposes. diff --git a/synapse/rest/client/sync.py b/synapse/rest/client/sync.py index 9c03eecea4..458bf08a19 100644 --- a/synapse/rest/client/sync.py +++ b/synapse/rest/client/sync.py @@ -21,7 +21,7 @@ import itertools import logging from collections import defaultdict -from typing import TYPE_CHECKING, Any, Mapping, Optional, Union +from typing import TYPE_CHECKING, Any, Mapping import attr @@ -189,7 +189,7 @@ class SyncRestServlet(RestServlet): # in the response cache once the set of ignored users has changed. # (We filter out ignored users from timeline events, so our sync response # is invalid once the set of ignored users changes.) - last_ignore_accdata_streampos: Optional[int] = None + last_ignore_accdata_streampos: int | None = None if not since: # No `since`, so this is an initial sync. last_ignore_accdata_streampos = await self.store.get_latest_stream_id_for_global_account_data_by_type_for_user( @@ -547,7 +547,7 @@ class SyncRestServlet(RestServlet): async def encode_room( self, sync_config: SyncConfig, - room: Union[JoinedSyncResult, ArchivedSyncResult], + room: JoinedSyncResult | ArchivedSyncResult, time_now: int, joined: bool, serialize_options: SerializeEventConfig, diff --git a/synapse/rest/client/thread_subscriptions.py b/synapse/rest/client/thread_subscriptions.py index d02f2cb48a..60676a4032 100644 --- a/synapse/rest/client/thread_subscriptions.py +++ b/synapse/rest/client/thread_subscriptions.py @@ -1,5 +1,5 @@ from http import HTTPStatus -from typing import TYPE_CHECKING, Optional +from typing import TYPE_CHECKING import attr from typing_extensions import TypeAlias @@ -50,7 +50,7 @@ class ThreadSubscriptionsRestServlet(RestServlet): self.handler = hs.get_thread_subscriptions_handler() class PutBody(RequestBodyModel): - automatic: Optional[AnyEventId] = None + automatic: AnyEventId | None = None """ If supplied, the event ID of an event giving rise to this automatic subscription. diff --git a/synapse/rest/key/v2/local_key_resource.py b/synapse/rest/key/v2/local_key_resource.py index f783acdb83..41e49ac384 100644 --- a/synapse/rest/key/v2/local_key_resource.py +++ b/synapse/rest/key/v2/local_key_resource.py @@ -21,7 +21,7 @@ import logging import re -from typing import TYPE_CHECKING, Optional +from typing import TYPE_CHECKING from signedjson.sign import sign_json from unpaddedbase64 import encode_base64 @@ -107,7 +107,7 @@ class LocalKey(RestServlet): return json_object def on_GET( - self, request: Request, key_id: Optional[str] = None + self, request: Request, key_id: str | None = None ) -> tuple[int, JsonDict]: # Matrix 1.6 drops support for passing the key_id, this is incompatible # with earlier versions and is allowed in order to support both. diff --git a/synapse/rest/key/v2/remote_key_resource.py b/synapse/rest/key/v2/remote_key_resource.py index e8b0b31210..c3dc69889b 100644 --- a/synapse/rest/key/v2/remote_key_resource.py +++ b/synapse/rest/key/v2/remote_key_resource.py @@ -21,7 +21,7 @@ import logging import re -from typing import TYPE_CHECKING, Mapping, Optional +from typing import TYPE_CHECKING, Mapping from pydantic import ConfigDict, StrictInt, StrictStr from signedjson.sign import sign_json @@ -50,7 +50,7 @@ logger = logging.getLogger(__name__) class _KeyQueryCriteriaDataModel(RequestBodyModel): model_config = ConfigDict(extra="allow") - minimum_valid_until_ts: Optional[StrictInt] + minimum_valid_until_ts: StrictInt | None class RemoteKey(RestServlet): @@ -142,7 +142,7 @@ class RemoteKey(RestServlet): ) async def on_GET( - self, request: Request, server: str, key_id: Optional[str] = None + self, request: Request, server: str, key_id: str | None = None ) -> tuple[int, JsonDict]: if server and key_id: # Matrix 1.6 drops support for passing the key_id, this is incompatible @@ -181,11 +181,11 @@ class RemoteKey(RestServlet): ) -> JsonDict: logger.info("Handling query for keys %r", query) - server_keys: dict[tuple[str, str], Optional[FetchKeyResultForRemote]] = {} + server_keys: dict[tuple[str, str], FetchKeyResultForRemote | None] = {} for server_name, key_ids in query.items(): if key_ids: results: Mapping[ - str, Optional[FetchKeyResultForRemote] + str, FetchKeyResultForRemote | None ] = await self.store.get_server_keys_json_for_remote( server_name, key_ids ) diff --git a/synapse/rest/media/download_resource.py b/synapse/rest/media/download_resource.py index 3c3f703667..f4569cfc7e 100644 --- a/synapse/rest/media/download_resource.py +++ b/synapse/rest/media/download_resource.py @@ -21,7 +21,7 @@ # import logging import re -from typing import TYPE_CHECKING, Optional +from typing import TYPE_CHECKING from synapse.http.server import set_corp_headers, set_cors_headers from synapse.http.servlet import RestServlet, parse_boolean, parse_integer @@ -57,7 +57,7 @@ class DownloadResource(RestServlet): request: SynapseRequest, server_name: str, media_id: str, - file_name: Optional[str] = None, + file_name: str | None = None, ) -> None: # Validate the server name, raising if invalid parse_and_validate_server_name(server_name) diff --git a/synapse/rest/media/upload_resource.py b/synapse/rest/media/upload_resource.py index 484749dbe6..56bc727cf8 100644 --- a/synapse/rest/media/upload_resource.py +++ b/synapse/rest/media/upload_resource.py @@ -22,7 +22,7 @@ import logging import re -from typing import IO, TYPE_CHECKING, Optional +from typing import IO, TYPE_CHECKING from synapse.api.errors import Codes, SynapseError from synapse.http.server import respond_with_json @@ -56,7 +56,7 @@ class BaseUploadServlet(RestServlet): async def _get_file_metadata( self, request: SynapseRequest, user_id: str - ) -> tuple[int, Optional[str], str]: + ) -> tuple[int, str | None, str]: raw_content_length = request.getHeader("Content-Length") if raw_content_length is None: raise SynapseError(msg="Request must specify a Content-Length", code=400) @@ -82,7 +82,7 @@ class BaseUploadServlet(RestServlet): upload_name_bytes = parse_bytes_from_args(args, "filename") if upload_name_bytes: try: - upload_name: Optional[str] = upload_name_bytes.decode("utf8") + upload_name: str | None = upload_name_bytes.decode("utf8") except UnicodeDecodeError: raise SynapseError( msg="Invalid UTF-8 filename parameter: %r" % (upload_name_bytes,), diff --git a/synapse/rest/synapse/mas/devices.py b/synapse/rest/synapse/mas/devices.py index eac51de44c..9d94a67675 100644 --- a/synapse/rest/synapse/mas/devices.py +++ b/synapse/rest/synapse/mas/devices.py @@ -15,7 +15,7 @@ import logging from http import HTTPStatus -from typing import TYPE_CHECKING, Optional +from typing import TYPE_CHECKING from pydantic import StrictStr @@ -53,7 +53,7 @@ class MasUpsertDeviceResource(MasBaseResource): class PostBody(RequestBodyModel): localpart: StrictStr device_id: StrictStr - display_name: Optional[StrictStr] = None + display_name: StrictStr | None = None async def _async_render_POST( self, request: "SynapseRequest" diff --git a/synapse/rest/synapse/mas/users.py b/synapse/rest/synapse/mas/users.py index f52c4bb167..55c7337555 100644 --- a/synapse/rest/synapse/mas/users.py +++ b/synapse/rest/synapse/mas/users.py @@ -15,7 +15,7 @@ import logging from http import HTTPStatus -from typing import TYPE_CHECKING, Any, Optional, TypedDict +from typing import TYPE_CHECKING, Any, TypedDict from pydantic import StrictBool, StrictStr, model_validator @@ -52,8 +52,8 @@ class MasQueryUserResource(MasBaseResource): class Response(TypedDict): user_id: str - display_name: Optional[str] - avatar_url: Optional[str] + display_name: str | None + avatar_url: str | None is_suspended: bool is_deactivated: bool @@ -65,7 +65,7 @@ class MasQueryUserResource(MasBaseResource): localpart = parse_string(request, "localpart", required=True) user_id = UserID(localpart, self.hostname) - user: Optional[UserInfo] = await self.store.get_user_by_id(user_id=str(user_id)) + user: UserInfo | None = await self.store.get_user_by_id(user_id=str(user_id)) if user is None: raise NotFoundError("User not found") @@ -104,13 +104,13 @@ class MasProvisionUserResource(MasBaseResource): localpart: StrictStr unset_displayname: StrictBool = False - set_displayname: Optional[StrictStr] = None + set_displayname: StrictStr | None = None unset_avatar_url: StrictBool = False - set_avatar_url: Optional[StrictStr] = None + set_avatar_url: StrictStr | None = None unset_emails: StrictBool = False - set_emails: Optional[list[StrictStr]] = None + set_emails: list[StrictStr] | None = None @model_validator(mode="before") @classmethod @@ -165,7 +165,7 @@ class MasProvisionUserResource(MasBaseResource): by_admin=True, ) - new_email_list: Optional[set[str]] = None + new_email_list: set[str] | None = None if body.unset_emails: new_email_list = set() elif body.set_emails is not None: diff --git a/synapse/rest/well_known.py b/synapse/rest/well_known.py index 00965cfb82..801d474ecc 100644 --- a/synapse/rest/well_known.py +++ b/synapse/rest/well_known.py @@ -18,7 +18,7 @@ # # import logging -from typing import TYPE_CHECKING, Optional +from typing import TYPE_CHECKING from twisted.web.resource import Resource from twisted.web.server import Request @@ -42,7 +42,7 @@ class WellKnownBuilder: self._config = hs.config self._auth = hs.get_auth() - async def get_well_known(self) -> Optional[JsonDict]: + async def get_well_known(self) -> JsonDict | None: if not self._config.server.serve_client_wellknown: return None diff --git a/synapse/server.py b/synapse/server.py index 766515c930..de0a2b098c 100644 --- a/synapse/server.py +++ b/synapse/server.py @@ -34,7 +34,6 @@ from typing import ( Any, Awaitable, Callable, - Optional, TypeVar, cast, ) @@ -320,7 +319,7 @@ class HomeServer(metaclass=abc.ABCMeta): self, hostname: str, config: HomeServerConfig, - reactor: Optional[ISynapseReactor] = None, + reactor: ISynapseReactor | None = None, ): """ Args: @@ -340,33 +339,33 @@ class HomeServer(metaclass=abc.ABCMeta): self.config = config self._listening_services: list[Port] = [] self._metrics_listeners: list[tuple[WSGIServer, Thread]] = [] - self.start_time: Optional[int] = None + self.start_time: int | None = None self._instance_id = random_string(5) self._instance_name = config.worker.instance_name self.version_string = f"Synapse/{SYNAPSE_VERSION}" - self.datastores: Optional[Databases] = None + self.datastores: Databases | None = None self._module_web_resources: dict[str, Resource] = {} self._module_web_resources_consumed = False # This attribute is set by the free function `refresh_certificate`. - self.tls_server_context_factory: Optional[IOpenSSLContextFactory] = None + self.tls_server_context_factory: IOpenSSLContextFactory | None = None self._is_shutdown = False self._async_shutdown_handlers: list[ShutdownInfo] = [] self._sync_shutdown_handlers: list[ShutdownInfo] = [] - self._background_processes: set[defer.Deferred[Optional[Any]]] = set() + self._background_processes: set[defer.Deferred[Any | None]] = set() def run_as_background_process( self, desc: "LiteralString", - func: Callable[..., Awaitable[Optional[R]]], + func: Callable[..., Awaitable[R | None]], *args: Any, **kwargs: Any, - ) -> "defer.Deferred[Optional[R]]": + ) -> "defer.Deferred[R | None]": """Run the given function in its own logcontext, with resource metrics This should be used to wrap processes which are fired off to run in the diff --git a/synapse/server_notices/server_notices_manager.py b/synapse/server_notices/server_notices_manager.py index 73cf4091eb..b4e512618e 100644 --- a/synapse/server_notices/server_notices_manager.py +++ b/synapse/server_notices/server_notices_manager.py @@ -18,7 +18,7 @@ # # import logging -from typing import TYPE_CHECKING, Optional +from typing import TYPE_CHECKING from synapse.api.constants import EventTypes, Membership, RoomCreationPreset from synapse.events import EventBase @@ -59,8 +59,8 @@ class ServerNoticesManager: user_id: str, event_content: dict, type: str = EventTypes.Message, - state_key: Optional[str] = None, - txn_id: Optional[str] = None, + state_key: str | None = None, + txn_id: str | None = None, ) -> EventBase: """Send a notice to the given user @@ -99,7 +99,7 @@ class ServerNoticesManager: return event @cached() - async def maybe_get_notice_room_for_user(self, user_id: str) -> Optional[str]: + async def maybe_get_notice_room_for_user(self, user_id: str) -> str | None: """Try to look up the server notice room for this user if it exists. Does not create one if none can be found. @@ -294,8 +294,8 @@ class ServerNoticesManager: self, requester: Requester, room_id: str, - display_name: Optional[str], - avatar_url: Optional[str], + display_name: str | None, + avatar_url: str | None, ) -> None: """ Updates the notice user's profile if it's different from what is in the room. @@ -341,7 +341,7 @@ class ServerNoticesManager: room_id: str, info_event_type: str, info_content_key: str, - info_value: Optional[str], + info_value: str | None, ) -> None: """ Updates a specific notice room's info if it's different from what is set. diff --git a/synapse/server_notices/server_notices_sender.py b/synapse/server_notices/server_notices_sender.py index bc62d6ac6c..fd4f36f5c8 100644 --- a/synapse/server_notices/server_notices_sender.py +++ b/synapse/server_notices/server_notices_sender.py @@ -17,7 +17,7 @@ # [This file includes modifications made by New Vector Limited] # # -from typing import TYPE_CHECKING, Iterable, Union +from typing import TYPE_CHECKING, Iterable from synapse.server_notices.consent_server_notices import ConsentServerNotices from synapse.server_notices.resource_limits_server_notices import ( @@ -39,7 +39,7 @@ class ServerNoticesSender(WorkerServerNoticesSender): def __init__(self, hs: "HomeServer"): super().__init__(hs) self._server_notices: Iterable[ - Union[ConsentServerNotices, ResourceLimitsServerNotices] + ConsentServerNotices | ResourceLimitsServerNotices ] = ( ConsentServerNotices(hs), ResourceLimitsServerNotices(hs), diff --git a/synapse/state/__init__.py b/synapse/state/__init__.py index 991e1f847a..9fc49be4b1 100644 --- a/synapse/state/__init__.py +++ b/synapse/state/__init__.py @@ -96,10 +96,10 @@ class _StateCacheEntry: def __init__( self, - state: Optional[StateMap[str]], - state_group: Optional[int], - prev_group: Optional[int] = None, - delta_ids: Optional[StateMap[str]] = None, + state: StateMap[str] | None, + state_group: int | None, + prev_group: int | None = None, + delta_ids: StateMap[str] | None = None, ): if state is None and state_group is None and prev_group is None: raise Exception("One of state, state_group or prev_group must be not None") @@ -111,7 +111,7 @@ class _StateCacheEntry: # # This can be None if we have a `state_group` (as then we can fetch the # state from the DB.) - self._state: Optional[StateMap[str]] = ( + self._state: StateMap[str] | None = ( immutabledict(state) if state is not None else None ) @@ -120,7 +120,7 @@ class _StateCacheEntry: self.state_group = state_group self.prev_group = prev_group - self.delta_ids: Optional[StateMap[str]] = ( + self.delta_ids: StateMap[str] | None = ( immutabledict(delta_ids) if delta_ids is not None else None ) @@ -206,7 +206,7 @@ class StateHandler: self, room_id: str, event_ids: StrCollection, - state_filter: Optional[StateFilter] = None, + state_filter: StateFilter | None = None, await_full_state: bool = True, ) -> StateMap[str]: """Fetch the state after each of the given event IDs. Resolve them and return. @@ -283,9 +283,9 @@ class StateHandler: async def calculate_context_info( self, event: EventBase, - state_ids_before_event: Optional[StateMap[str]] = None, - partial_state: Optional[bool] = None, - state_group_before_event: Optional[int] = None, + state_ids_before_event: StateMap[str] | None = None, + partial_state: bool | None = None, + state_group_before_event: int | None = None, ) -> UnpersistedEventContextBase: """ Calulates the contents of an unpersisted event context, other than the current @@ -456,8 +456,8 @@ class StateHandler: async def compute_event_context( self, event: EventBase, - state_ids_before_event: Optional[StateMap[str]] = None, - partial_state: Optional[bool] = None, + state_ids_before_event: StateMap[str] | None = None, + partial_state: bool | None = None, ) -> EventContext: """Build an EventContext structure for a non-outlier event. @@ -670,7 +670,7 @@ class StateResolutionHandler: room_id: str, room_version: str, state_groups_ids: Mapping[int, StateMap[str]], - event_map: Optional[dict[str, EventBase]], + event_map: dict[str, EventBase] | None, state_res_store: "StateResolutionStore", ) -> _StateCacheEntry: """Resolves conflicts between a set of state groups @@ -770,7 +770,7 @@ class StateResolutionHandler: room_id: str, room_version: str, state_sets: Sequence[StateMap[str]], - event_map: Optional[dict[str, EventBase]], + event_map: dict[str, EventBase] | None, state_res_store: "StateResolutionStore", ) -> StateMap[str]: """ @@ -934,7 +934,7 @@ def _make_state_cache_entry( # failing that, look for the closest match. prev_group = None - delta_ids: Optional[StateMap[str]] = None + delta_ids: StateMap[str] | None = None for old_group, old_state in state_groups_ids.items(): if old_state.keys() - new_state.keys(): @@ -991,8 +991,8 @@ class StateResolutionStore: self, room_id: str, state_sets: list[set[str]], - conflicted_state: Optional[set[str]], - additional_backwards_reachable_conflicted_events: Optional[set[str]], + conflicted_state: set[str] | None, + additional_backwards_reachable_conflicted_events: set[str] | None, ) -> Awaitable[StateDifference]: """ "Given sets of state events figure out the auth chain difference (as per state res v2 algorithm). diff --git a/synapse/state/v1.py b/synapse/state/v1.py index a219347264..0b4514d322 100644 --- a/synapse/state/v1.py +++ b/synapse/state/v1.py @@ -24,7 +24,6 @@ from typing import ( Awaitable, Callable, Iterable, - Optional, Sequence, ) @@ -45,7 +44,7 @@ async def resolve_events_with_store( room_id: str, room_version: RoomVersion, state_sets: Sequence[StateMap[str]], - event_map: Optional[dict[str, EventBase]], + event_map: dict[str, EventBase] | None, state_map_factory: Callable[[StrCollection], Awaitable[dict[str, EventBase]]], ) -> StateMap[str]: """ diff --git a/synapse/state/v2.py b/synapse/state/v2.py index 683f0c1dcc..c410c3a7ec 100644 --- a/synapse/state/v2.py +++ b/synapse/state/v2.py @@ -28,7 +28,6 @@ from typing import ( Generator, Iterable, Literal, - Optional, Protocol, Sequence, overload, @@ -63,8 +62,8 @@ class StateResolutionStore(Protocol): self, room_id: str, state_sets: list[set[str]], - conflicted_state: Optional[set[str]], - additional_backwards_reachable_conflicted_events: Optional[set[str]], + conflicted_state: set[str] | None, + additional_backwards_reachable_conflicted_events: set[str] | None, ) -> Awaitable[StateDifference]: ... @@ -84,7 +83,7 @@ async def resolve_events_with_store( room_id: str, room_version: RoomVersion, state_sets: Sequence[StateMap[str]], - event_map: Optional[dict[str, EventBase]], + event_map: dict[str, EventBase] | None, state_res_store: StateResolutionStore, ) -> StateMap[str]: """Resolves the state using the v2 state resolution algorithm @@ -124,7 +123,7 @@ async def resolve_events_with_store( logger.debug("%d conflicted state entries", len(conflicted_state)) logger.debug("Calculating auth chain difference") - conflicted_set: Optional[set[str]] = None + conflicted_set: set[str] | None = None if room_version.state_res == StateResolutionVersions.V2_1: # calculate the conflicted subgraph conflicted_set = set(itertools.chain.from_iterable(conflicted_state.values())) @@ -313,7 +312,7 @@ async def _get_auth_chain_difference( state_sets: Sequence[StateMap[str]], unpersisted_events: dict[str, EventBase], state_res_store: StateResolutionStore, - conflicted_state: Optional[set[str]], + conflicted_state: set[str] | None, ) -> set[str]: """Compare the auth chains of each state set and return the set of events that only appear in some, but not all of the auth chains. @@ -546,7 +545,7 @@ def _seperate( conflicted_state[key] = event_ids # mypy doesn't understand that discarding None above means that conflicted - # state is StateMap[set[str]], not StateMap[set[Optional[Str]]]. + # state is StateMap[set[str]], not StateMap[set[str | None]]. return unconflicted_state, conflicted_state # type: ignore[return-value] @@ -755,7 +754,7 @@ async def _mainline_sort( clock: Clock, room_id: str, event_ids: list[str], - resolved_power_event_id: Optional[str], + resolved_power_event_id: str | None, event_map: dict[str, EventBase], state_res_store: StateResolutionStore, ) -> list[str]: @@ -842,7 +841,7 @@ async def _get_mainline_depth_for_event( """ room_id = event.room_id - tmp_event: Optional[EventBase] = event + tmp_event: EventBase | None = event # We do an iterative search, replacing `event with the power level in its # auth events (if any) @@ -889,7 +888,7 @@ async def _get_event( event_map: dict[str, EventBase], state_res_store: StateResolutionStore, allow_none: Literal[True], -) -> Optional[EventBase]: ... +) -> EventBase | None: ... async def _get_event( @@ -898,7 +897,7 @@ async def _get_event( event_map: dict[str, EventBase], state_res_store: StateResolutionStore, allow_none: bool = False, -) -> Optional[EventBase]: +) -> EventBase | None: """Helper function to look up event in event_map, falling back to looking it up in the store diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py index b6958ef06b..8eeea20967 100644 --- a/synapse/storage/_base.py +++ b/synapse/storage/_base.py @@ -21,7 +21,7 @@ # import logging from abc import ABCMeta -from typing import TYPE_CHECKING, Any, Collection, Iterable, Optional, Union +from typing import TYPE_CHECKING, Any, Collection, Iterable from synapse.storage.database import ( DatabasePool, @@ -176,7 +176,7 @@ class SQLBaseStore(metaclass=ABCMeta): ) def _attempt_to_invalidate_cache( - self, cache_name: str, key: Optional[Collection[Any]] + self, cache_name: str, key: Collection[Any] | None ) -> bool: """Attempts to invalidate the cache of the given name, ignoring if the cache doesn't exist. Mainly used for invalidating caches on workers, @@ -218,7 +218,7 @@ class SQLBaseStore(metaclass=ABCMeta): self.external_cached_functions[cache_name] = func -def db_to_json(db_content: Union[memoryview, bytes, bytearray, str]) -> Any: +def db_to_json(db_content: memoryview | bytes | bytearray | str) -> Any: """ Take some data from a database row and return a JSON-decoded object. diff --git a/synapse/storage/admin_client_config.py b/synapse/storage/admin_client_config.py index 07acddc660..b56e21edfa 100644 --- a/synapse/storage/admin_client_config.py +++ b/synapse/storage/admin_client_config.py @@ -1,5 +1,4 @@ import logging -from typing import Optional from synapse.types import JsonMapping @@ -9,7 +8,7 @@ logger = logging.getLogger(__name__) class AdminClientConfig: """Class to track various Synapse-specific admin-only client-impacting config options.""" - def __init__(self, account_data: Optional[JsonMapping]): + def __init__(self, account_data: JsonMapping | None): # Allow soft-failed events to be returned down `/sync` and other # client APIs. `io.element.synapse.soft_failed: true` is added to the # `unsigned` portion of the event to inform clients that the event diff --git a/synapse/storage/background_updates.py b/synapse/storage/background_updates.py index 1c17d4d609..c71bcdb7fb 100644 --- a/synapse/storage/background_updates.py +++ b/synapse/storage/background_updates.py @@ -29,7 +29,6 @@ from typing import ( Awaitable, Callable, Iterable, - Optional, Sequence, cast, ) @@ -169,9 +168,9 @@ class _BackgroundUpdateContextManager: async def __aexit__( self, - exc_type: Optional[type[BaseException]], - exc: Optional[BaseException], - tb: Optional[TracebackType], + exc_type: type[BaseException] | None, + exc: BaseException | None, + tb: TracebackType | None, ) -> None: pass @@ -196,7 +195,7 @@ class BackgroundUpdatePerformance: self.avg_item_count += 0.1 * (item_count - self.avg_item_count) self.avg_duration_ms += 0.1 * (duration_ms - self.avg_duration_ms) - def average_items_per_ms(self) -> Optional[float]: + def average_items_per_ms(self) -> float | None: """An estimate of how long it takes to do a single update. Returns: A duration in ms as a float @@ -212,7 +211,7 @@ class BackgroundUpdatePerformance: # changes in how long the update process takes. return float(self.avg_item_count) / float(self.avg_duration_ms) - def total_items_per_ms(self) -> Optional[float]: + def total_items_per_ms(self) -> float | None: """An estimate of how long it takes to do a single update. Returns: A duration in ms as a float @@ -250,11 +249,11 @@ class BackgroundUpdater: self._database_name = database.name() # if a background update is currently running, its name. - self._current_background_update: Optional[str] = None + self._current_background_update: str | None = None - self._on_update_callback: Optional[ON_UPDATE_CALLBACK] = None - self._default_batch_size_callback: Optional[DEFAULT_BATCH_SIZE_CALLBACK] = None - self._min_batch_size_callback: Optional[MIN_BATCH_SIZE_CALLBACK] = None + self._on_update_callback: ON_UPDATE_CALLBACK | None = None + self._default_batch_size_callback: DEFAULT_BATCH_SIZE_CALLBACK | None = None + self._min_batch_size_callback: MIN_BATCH_SIZE_CALLBACK | None = None self._background_update_performance: dict[str, BackgroundUpdatePerformance] = {} self._background_update_handlers: dict[str, _BackgroundUpdateHandler] = {} @@ -304,8 +303,8 @@ class BackgroundUpdater: def register_update_controller_callbacks( self, on_update: ON_UPDATE_CALLBACK, - default_batch_size: Optional[DEFAULT_BATCH_SIZE_CALLBACK] = None, - min_batch_size: Optional[DEFAULT_BATCH_SIZE_CALLBACK] = None, + default_batch_size: DEFAULT_BATCH_SIZE_CALLBACK | None = None, + min_batch_size: DEFAULT_BATCH_SIZE_CALLBACK | None = None, ) -> None: """Register callbacks from a module for each hook.""" if self._on_update_callback is not None: @@ -380,7 +379,7 @@ class BackgroundUpdater: return self.minimum_background_batch_size - def get_current_update(self) -> Optional[BackgroundUpdatePerformance]: + def get_current_update(self) -> BackgroundUpdatePerformance | None: """Returns the current background update, if any.""" update_name = self._current_background_update @@ -526,14 +525,14 @@ class BackgroundUpdater: True if we have finished running all the background updates, otherwise False """ - def get_background_updates_txn(txn: Cursor) -> list[tuple[str, Optional[str]]]: + def get_background_updates_txn(txn: Cursor) -> list[tuple[str, str | None]]: txn.execute( """ SELECT update_name, depends_on FROM background_updates ORDER BY ordering, update_name """ ) - return cast(list[tuple[str, Optional[str]]], txn.fetchall()) + return cast(list[tuple[str, str | None]], txn.fetchall()) if not self._current_background_update: all_pending_updates = await self.db_pool.runInteraction( @@ -669,10 +668,10 @@ class BackgroundUpdater: index_name: str, table: str, columns: Iterable[str], - where_clause: Optional[str] = None, + where_clause: str | None = None, unique: bool = False, psql_only: bool = False, - replaces_index: Optional[str] = None, + replaces_index: str | None = None, ) -> None: """Helper for store classes to do a background index addition @@ -763,10 +762,10 @@ class BackgroundUpdater: index_name: str, table: str, columns: Iterable[str], - where_clause: Optional[str] = None, + where_clause: str | None = None, unique: bool = False, psql_only: bool = False, - replaces_index: Optional[str] = None, + replaces_index: str | None = None, ) -> None: """Add an index in the background. @@ -862,7 +861,7 @@ class BackgroundUpdater: c.execute(sql) if isinstance(self.db_pool.engine, engines.PostgresEngine): - runner: Optional[Callable[[LoggingDatabaseConnection], None]] = ( + runner: Callable[[LoggingDatabaseConnection], None] | None = ( create_index_psql ) elif psql_only: diff --git a/synapse/storage/controllers/persist_events.py b/synapse/storage/controllers/persist_events.py index 0daf4830d9..2948227807 100644 --- a/synapse/storage/controllers/persist_events.py +++ b/synapse/storage/controllers/persist_events.py @@ -34,9 +34,7 @@ from typing import ( Generator, Generic, Iterable, - Optional, TypeVar, - Union, ) import attr @@ -164,7 +162,7 @@ class _UpdateCurrentStateTask: return isinstance(task, _UpdateCurrentStateTask) -_EventPersistQueueTask = Union[_PersistEventsTask, _UpdateCurrentStateTask] +_EventPersistQueueTask = _PersistEventsTask | _UpdateCurrentStateTask _PersistResult = TypeVar("_PersistResult") @@ -674,7 +672,7 @@ class EventsPersistenceStorageController: async def _calculate_new_forward_extremities_and_state_delta( self, room_id: str, ev_ctx_rm: list[EventPersistencePair] - ) -> tuple[Optional[set[str]], Optional[DeltaState]]: + ) -> tuple[set[str] | None, DeltaState | None]: """Calculates the new forward extremities and state delta for a room given events to persist. @@ -861,7 +859,7 @@ class EventsPersistenceStorageController: events_context: list[EventPersistencePair], old_latest_event_ids: AbstractSet[str], new_latest_event_ids: set[str], - ) -> tuple[Optional[StateMap[str]], Optional[StateMap[str]], set[str]]: + ) -> tuple[StateMap[str] | None, StateMap[str] | None, set[str]]: """Calculate the current state dict after adding some new events to a room diff --git a/synapse/storage/controllers/purge_events.py b/synapse/storage/controllers/purge_events.py index 6606fdcc30..4ca3f8f4e1 100644 --- a/synapse/storage/controllers/purge_events.py +++ b/synapse/storage/controllers/purge_events.py @@ -25,7 +25,6 @@ from typing import ( TYPE_CHECKING, Collection, Mapping, - Optional, ) from synapse.logging.context import nested_logging_context @@ -445,7 +444,7 @@ class PurgeEventsStorageController: # Remove state groups from deletion_candidates which are directly referenced or share a # future edge with a referenced state group within this batch. - def filter_reference_chains(group: Optional[int]) -> None: + def filter_reference_chains(group: int | None) -> None: while group is not None: deletion_candidates.discard(group) group = state_group_edges.get(group) diff --git a/synapse/storage/controllers/state.py b/synapse/storage/controllers/state.py index 690a0dde2e..9c5e837ab0 100644 --- a/synapse/storage/controllers/state.py +++ b/synapse/storage/controllers/state.py @@ -27,8 +27,6 @@ from typing import ( Collection, Iterable, Mapping, - Optional, - Union, ) from synapse.api.constants import EventTypes, Membership @@ -91,7 +89,7 @@ class StateStorageController: @tag_args async def get_state_group_delta( self, state_group: int - ) -> tuple[Optional[int], Optional[StateMap[str]]]: + ) -> tuple[int | None, StateMap[str] | None]: """Given a state group try to return a previous group and a delta between the old and the new. @@ -141,7 +139,7 @@ class StateStorageController: @trace @tag_args async def get_state_ids_for_group( - self, state_group: int, state_filter: Optional[StateFilter] = None + self, state_group: int, state_filter: StateFilter | None = None ) -> StateMap[str]: """Get the event IDs of all the state in the given state group @@ -217,7 +215,7 @@ class StateStorageController: @trace @tag_args async def get_state_for_events( - self, event_ids: Collection[str], state_filter: Optional[StateFilter] = None + self, event_ids: Collection[str], state_filter: StateFilter | None = None ) -> dict[str, StateMap[EventBase]]: """Given a list of event_ids and type tuples, return a list of state dicts for each event. @@ -271,7 +269,7 @@ class StateStorageController: async def get_state_ids_for_events( self, event_ids: Collection[str], - state_filter: Optional[StateFilter] = None, + state_filter: StateFilter | None = None, await_full_state: bool = True, ) -> dict[str, StateMap[str]]: """ @@ -322,7 +320,7 @@ class StateStorageController: @trace @tag_args async def get_state_for_event( - self, event_id: str, state_filter: Optional[StateFilter] = None + self, event_id: str, state_filter: StateFilter | None = None ) -> StateMap[EventBase]: """ Get the state dict corresponding to a particular event @@ -349,7 +347,7 @@ class StateStorageController: async def get_state_ids_for_event( self, event_id: str, - state_filter: Optional[StateFilter] = None, + state_filter: StateFilter | None = None, await_full_state: bool = True, ) -> StateMap[str]: """ @@ -382,7 +380,7 @@ class StateStorageController: async def get_state_after_event( self, event_id: str, - state_filter: Optional[StateFilter] = None, + state_filter: StateFilter | None = None, await_full_state: bool = True, ) -> StateMap[str]: """ @@ -423,7 +421,7 @@ class StateStorageController: self, room_id: str, stream_position: StreamToken, - state_filter: Optional[StateFilter] = None, + state_filter: StateFilter | None = None, await_full_state: bool = True, ) -> StateMap[str]: """Get the room state at a particular stream position @@ -479,7 +477,7 @@ class StateStorageController: self, room_id: str, stream_position: StreamToken, - state_filter: Optional[StateFilter] = None, + state_filter: StateFilter | None = None, await_full_state: bool = True, ) -> StateMap[EventBase]: """Same as `get_state_ids_at` but also fetches the events""" @@ -500,7 +498,7 @@ class StateStorageController: @trace @tag_args async def get_state_for_groups( - self, groups: Iterable[int], state_filter: Optional[StateFilter] = None + self, groups: Iterable[int], state_filter: StateFilter | None = None ) -> dict[int, MutableStateMap[str]]: """Gets the state at each of a list of state groups, optionally filtering by type/state_key @@ -546,9 +544,9 @@ class StateStorageController: self, event_id: str, room_id: str, - prev_group: Optional[int], - delta_ids: Optional[StateMap[str]], - current_state_ids: Optional[StateMap[str]], + prev_group: int | None, + delta_ids: StateMap[str] | None, + current_state_ids: StateMap[str] | None, ) -> int: """Store a new set of state, returning a newly assigned state group. @@ -575,9 +573,9 @@ class StateStorageController: async def get_current_state_ids( self, room_id: str, - state_filter: Optional[StateFilter] = None, + state_filter: StateFilter | None = None, await_full_state: bool = True, - on_invalidate: Optional[Callable[[], None]] = None, + on_invalidate: Callable[[], None] | None = None, ) -> StateMap[str]: """Get the current state event ids for a room based on the current_state_events table. @@ -614,7 +612,7 @@ class StateStorageController: @trace @tag_args - async def get_canonical_alias_for_room(self, room_id: str) -> Optional[str]: + async def get_canonical_alias_for_room(self, room_id: str) -> str | None: """Get canonical alias for room, if any Args: @@ -639,9 +637,7 @@ class StateStorageController: return event.content.get("alias") @cached() - async def get_server_acl_for_room( - self, room_id: str - ) -> Optional[ServerAclEvaluator]: + async def get_server_acl_for_room(self, room_id: str) -> ServerAclEvaluator | None: """Get the server ACL evaluator for room, if any This does up-front parsing of the content to ignore bad data and pre-compile @@ -695,7 +691,7 @@ class StateStorageController: async def get_current_state( self, room_id: str, - state_filter: Optional[StateFilter] = None, + state_filter: StateFilter | None = None, await_full_state: bool = True, ) -> StateMap[EventBase]: """Same as `get_current_state_ids` but also fetches the events""" @@ -717,7 +713,7 @@ class StateStorageController: @tag_args async def get_current_state_event( self, room_id: str, event_type: str, state_key: str - ) -> Optional[EventBase]: + ) -> EventBase | None: """Get the current state event for the given type/state_key.""" key = (event_type, state_key) @@ -804,7 +800,7 @@ class StateStorageController: async def get_joined_hosts( self, room_id: str, state_entry: "_StateCacheEntry" ) -> frozenset[str]: - state_group: Union[object, int] = state_entry.state_group + state_group: object | int = state_entry.state_group if not state_group: # If state_group is None it means it has yet to be assigned a # state group, i.e. we need to make sure that calls with a state_group @@ -822,7 +818,7 @@ class StateStorageController: async def _get_joined_hosts( self, room_id: str, - state_group: Union[object, int], + state_group: object | int, state_entry: "_StateCacheEntry", ) -> frozenset[str]: # We don't use `state_group`, it's there so that we can cache based on diff --git a/synapse/storage/database.py b/synapse/storage/database.py index b7f870bd26..3d351e8aea 100644 --- a/synapse/storage/database.py +++ b/synapse/storage/database.py @@ -35,7 +35,6 @@ from typing import ( Iterator, Literal, Mapping, - Optional, Sequence, TypeVar, cast, @@ -213,10 +212,10 @@ class LoggingDatabaseConnection: def cursor( self, *, - txn_name: Optional[str] = None, - after_callbacks: Optional[list["_CallbackListEntry"]] = None, - async_after_callbacks: Optional[list["_AsyncCallbackListEntry"]] = None, - exception_callbacks: Optional[list["_CallbackListEntry"]] = None, + txn_name: str | None = None, + after_callbacks: list["_CallbackListEntry"] | None = None, + async_after_callbacks: list["_AsyncCallbackListEntry"] | None = None, + exception_callbacks: list["_CallbackListEntry"] | None = None, ) -> "LoggingTransaction": if not txn_name: txn_name = self.default_txn_name @@ -246,10 +245,10 @@ class LoggingDatabaseConnection: def __exit__( self, - exc_type: Optional[type[BaseException]], - exc_value: Optional[BaseException], - traceback: Optional[types.TracebackType], - ) -> Optional[bool]: + exc_type: type[BaseException] | None, + exc_value: BaseException | None, + traceback: types.TracebackType | None, + ) -> bool | None: return self.conn.__exit__(exc_type, exc_value, traceback) # Proxy through any unknown lookups to the DB conn class. @@ -307,9 +306,9 @@ class LoggingTransaction: name: str, server_name: str, database_engine: BaseDatabaseEngine, - after_callbacks: Optional[list[_CallbackListEntry]] = None, - async_after_callbacks: Optional[list[_AsyncCallbackListEntry]] = None, - exception_callbacks: Optional[list[_CallbackListEntry]] = None, + after_callbacks: list[_CallbackListEntry] | None = None, + async_after_callbacks: list[_AsyncCallbackListEntry] | None = None, + exception_callbacks: list[_CallbackListEntry] | None = None, ): self.txn = txn self.name = name @@ -379,10 +378,10 @@ class LoggingTransaction: assert self.exception_callbacks is not None self.exception_callbacks.append((callback, args, kwargs)) - def fetchone(self) -> Optional[tuple]: + def fetchone(self) -> tuple | None: return self.txn.fetchone() - def fetchmany(self, size: Optional[int] = None) -> list[tuple]: + def fetchmany(self, size: int | None = None) -> list[tuple]: return self.txn.fetchmany(size=size) def fetchall(self) -> list[tuple]: @@ -398,7 +397,7 @@ class LoggingTransaction: @property def description( self, - ) -> Optional[Sequence[Any]]: + ) -> Sequence[Any] | None: return self.txn.description def execute_batch(self, sql: str, args: Iterable[Iterable[Any]]) -> None: @@ -429,7 +428,7 @@ class LoggingTransaction: self, sql: str, values: Iterable[Iterable[Any]], - template: Optional[str] = None, + template: str | None = None, fetch: bool = True, ) -> list[tuple]: """Corresponds to psycopg2.extras.execute_values. Only available when @@ -536,9 +535,9 @@ class LoggingTransaction: def __exit__( self, - exc_type: Optional[type[BaseException]], - exc_value: Optional[BaseException], - traceback: Optional[types.TracebackType], + exc_type: type[BaseException] | None, + exc_value: BaseException | None, + traceback: types.TracebackType | None, ) -> None: self.close() @@ -920,7 +919,7 @@ class DatabasePool: func: Callable[..., R], *args: Any, db_autocommit: bool = False, - isolation_level: Optional[int] = None, + isolation_level: int | None = None, **kwargs: Any, ) -> R: """Starts a transaction on the database and runs a given function @@ -1002,7 +1001,7 @@ class DatabasePool: func: Callable[Concatenate[LoggingDatabaseConnection, P], R], *args: Any, db_autocommit: bool = False, - isolation_level: Optional[int] = None, + isolation_level: int | None = None, **kwargs: Any, ) -> R: """Wraps the .runWithConnection() method on the underlying db_pool. @@ -1240,8 +1239,8 @@ class DatabasePool: table: str, keyvalues: dict[str, Any], values: dict[str, Any], - insertion_values: Optional[dict[str, Any]] = None, - where_clause: Optional[str] = None, + insertion_values: dict[str, Any] | None = None, + where_clause: str | None = None, desc: str = "simple_upsert", ) -> bool: """Insert a row with values + insertion_values; on conflict, update with values. @@ -1334,8 +1333,8 @@ class DatabasePool: table: str, keyvalues: Mapping[str, Any], values: Mapping[str, Any], - insertion_values: Optional[Mapping[str, Any]] = None, - where_clause: Optional[str] = None, + insertion_values: Mapping[str, Any] | None = None, + where_clause: str | None = None, ) -> bool: """ Pick the UPSERT method which works best on the platform. Either the @@ -1379,8 +1378,8 @@ class DatabasePool: table: str, keyvalues: Mapping[str, Any], values: Mapping[str, Any], - insertion_values: Optional[Mapping[str, Any]] = None, - where_clause: Optional[str] = None, + insertion_values: Mapping[str, Any] | None = None, + where_clause: str | None = None, lock: bool = True, ) -> bool: """ @@ -1460,8 +1459,8 @@ class DatabasePool: table: str, keyvalues: Mapping[str, Any], values: Mapping[str, Any], - insertion_values: Optional[Mapping[str, Any]] = None, - where_clause: Optional[str] = None, + insertion_values: Mapping[str, Any] | None = None, + where_clause: str | None = None, ) -> bool: """ Use the native UPSERT functionality in PostgreSQL. @@ -1728,7 +1727,7 @@ class DatabasePool: retcols: Collection[str], allow_none: Literal[True] = True, desc: str = "simple_select_one", - ) -> Optional[tuple[Any, ...]]: ... + ) -> tuple[Any, ...] | None: ... async def simple_select_one( self, @@ -1737,7 +1736,7 @@ class DatabasePool: retcols: Collection[str], allow_none: bool = False, desc: str = "simple_select_one", - ) -> Optional[tuple[Any, ...]]: + ) -> tuple[Any, ...] | None: """Executes a SELECT query on the named table, which is expected to return a single row, returning multiple columns from it. @@ -1777,7 +1776,7 @@ class DatabasePool: retcol: str, allow_none: Literal[True] = True, desc: str = "simple_select_one_onecol", - ) -> Optional[Any]: ... + ) -> Any | None: ... async def simple_select_one_onecol( self, @@ -1786,7 +1785,7 @@ class DatabasePool: retcol: str, allow_none: bool = False, desc: str = "simple_select_one_onecol", - ) -> Optional[Any]: + ) -> Any | None: """Executes a SELECT query on the named table, which is expected to return a single row, returning a single column from it. @@ -1828,7 +1827,7 @@ class DatabasePool: keyvalues: dict[str, Any], retcol: str, allow_none: Literal[True] = True, - ) -> Optional[Any]: ... + ) -> Any | None: ... @classmethod def simple_select_one_onecol_txn( @@ -1838,7 +1837,7 @@ class DatabasePool: keyvalues: dict[str, Any], retcol: str, allow_none: bool = False, - ) -> Optional[Any]: + ) -> Any | None: ret = cls.simple_select_onecol_txn( txn, table=table, keyvalues=keyvalues, retcol=retcol ) @@ -1871,7 +1870,7 @@ class DatabasePool: async def simple_select_onecol( self, table: str, - keyvalues: Optional[dict[str, Any]], + keyvalues: dict[str, Any] | None, retcol: str, desc: str = "simple_select_onecol", ) -> list[Any]: @@ -1899,7 +1898,7 @@ class DatabasePool: async def simple_select_list( self, table: str, - keyvalues: Optional[dict[str, Any]], + keyvalues: dict[str, Any] | None, retcols: Collection[str], desc: str = "simple_select_list", ) -> list[tuple[Any, ...]]: @@ -1931,7 +1930,7 @@ class DatabasePool: cls, txn: LoggingTransaction, table: str, - keyvalues: Optional[dict[str, Any]], + keyvalues: dict[str, Any] | None, retcols: Iterable[str], ) -> list[tuple[Any, ...]]: """Executes a SELECT query on the named table, which may return zero or @@ -1967,7 +1966,7 @@ class DatabasePool: column: str, iterable: Iterable[Any], retcols: Collection[str], - keyvalues: Optional[dict[str, Any]] = None, + keyvalues: dict[str, Any] | None = None, desc: str = "simple_select_many_batch", batch_size: int = 100, ) -> list[tuple[Any, ...]]: @@ -2249,7 +2248,7 @@ class DatabasePool: keyvalues: dict[str, Any], retcols: Collection[str], allow_none: Literal[True] = True, - ) -> Optional[tuple[Any, ...]]: ... + ) -> tuple[Any, ...] | None: ... @staticmethod def simple_select_one_txn( @@ -2258,7 +2257,7 @@ class DatabasePool: keyvalues: dict[str, Any], retcols: Collection[str], allow_none: bool = False, - ) -> Optional[tuple[Any, ...]]: + ) -> tuple[Any, ...] | None: select_sql = "SELECT %s FROM %s" % (", ".join(retcols), table) if keyvalues: @@ -2529,9 +2528,9 @@ class DatabasePool: start: int, limit: int, retcols: Iterable[str], - filters: Optional[dict[str, Any]] = None, - keyvalues: Optional[dict[str, Any]] = None, - exclude_keyvalues: Optional[dict[str, Any]] = None, + filters: dict[str, Any] | None = None, + keyvalues: dict[str, Any] | None = None, + exclude_keyvalues: dict[str, Any] | None = None, order_direction: str = "ASC", ) -> list[tuple[Any, ...]]: """ diff --git a/synapse/storage/databases/__init__.py b/synapse/storage/databases/__init__.py index f145d21096..b44b84b913 100644 --- a/synapse/storage/databases/__init__.py +++ b/synapse/storage/databases/__init__.py @@ -20,7 +20,7 @@ # import logging -from typing import TYPE_CHECKING, Generic, Optional, TypeVar +from typing import TYPE_CHECKING, Generic, TypeVar from synapse.metrics import SERVER_NAME_LABEL, LaterGauge from synapse.storage._base import SQLBaseStore @@ -64,7 +64,7 @@ class Databases(Generic[DataStoreT]): databases: list[DatabasePool] main: "DataStore" # FIXME: https://github.com/matrix-org/synapse/issues/11165: actually an instance of `main_store_class` state: StateGroupDataStore - persist_events: Optional[PersistEventsStore] + persist_events: PersistEventsStore | None state_deletion: StateDeletionDataStore def __init__(self, main_store_class: type[DataStoreT], hs: "HomeServer"): @@ -72,10 +72,10 @@ class Databases(Generic[DataStoreT]): # store. self.databases = [] - main: Optional[DataStoreT] = None - state: Optional[StateGroupDataStore] = None - state_deletion: Optional[StateDeletionDataStore] = None - persist_events: Optional[PersistEventsStore] = None + main: DataStoreT | None = None + state: StateGroupDataStore | None = None + state_deletion: StateDeletionDataStore | None = None + persist_events: PersistEventsStore | None = None server_name = hs.hostname diff --git a/synapse/storage/databases/main/__init__.py b/synapse/storage/databases/main/__init__.py index 9f23c1a4e0..12593094f1 100644 --- a/synapse/storage/databases/main/__init__.py +++ b/synapse/storage/databases/main/__init__.py @@ -20,7 +20,7 @@ # # import logging -from typing import TYPE_CHECKING, Optional, Union, cast +from typing import TYPE_CHECKING, cast import attr @@ -99,14 +99,14 @@ class UserPaginateResponse: """This is very similar to UserInfo, but not quite the same.""" name: str - user_type: Optional[str] + user_type: str | None is_guest: bool admin: bool deactivated: bool shadow_banned: bool - displayname: Optional[str] - avatar_url: Optional[str] - creation_ts: Optional[int] + displayname: str | None + avatar_url: str | None + creation_ts: int | None approved: bool erased: bool last_seen_ts: int @@ -180,15 +180,15 @@ class DataStore( self, start: int, limit: int, - user_id: Optional[str] = None, - name: Optional[str] = None, + user_id: str | None = None, + name: str | None = None, guests: bool = True, - deactivated: Optional[bool] = None, - admins: Optional[bool] = None, + deactivated: bool | None = None, + admins: bool | None = None, order_by: str = UserSortOrder.NAME.value, direction: Direction = Direction.FORWARDS, approved: bool = True, - not_user_types: Optional[list[str]] = None, + not_user_types: list[str] | None = None, locked: bool = False, ) -> tuple[list[UserPaginateResponse], int]: """Function to retrieve a paginated list of users from @@ -351,9 +351,7 @@ class DataStore( async def search_users( self, term: str - ) -> list[ - tuple[str, Optional[str], Union[int, bool], Union[int, bool], Optional[str]] - ]: + ) -> list[tuple[str, str | None, int | bool, int | bool, str | None]]: """Function to search users list for one or more users with the matched term. @@ -366,9 +364,7 @@ class DataStore( def search_users( txn: LoggingTransaction, - ) -> list[ - tuple[str, Optional[str], Union[int, bool], Union[int, bool], Optional[str]] - ]: + ) -> list[tuple[str, str | None, int | bool, int | bool, str | None]]: search_term = "%%" + term + "%%" sql = """ @@ -382,10 +378,10 @@ class DataStore( list[ tuple[ str, - Optional[str], - Union[int, bool], - Union[int, bool], - Optional[str], + str | None, + int | bool, + int | bool, + str | None, ] ], txn.fetchall(), diff --git a/synapse/storage/databases/main/account_data.py b/synapse/storage/databases/main/account_data.py index f1fb5fe188..15728cf618 100644 --- a/synapse/storage/databases/main/account_data.py +++ b/synapse/storage/databases/main/account_data.py @@ -25,7 +25,6 @@ from typing import ( Any, Iterable, Mapping, - Optional, cast, ) @@ -213,7 +212,7 @@ class AccountDataWorkerStore(PushRulesWorkerStore, CacheInvalidationWorkerStore) @cached(num_args=2, max_entries=5000, tree=True) async def get_global_account_data_by_type_for_user( self, user_id: str, data_type: str - ) -> Optional[JsonMapping]: + ) -> JsonMapping | None: """ Returns: The account data. @@ -233,7 +232,7 @@ class AccountDataWorkerStore(PushRulesWorkerStore, CacheInvalidationWorkerStore) async def get_latest_stream_id_for_global_account_data_by_type_for_user( self, user_id: str, data_type: str - ) -> Optional[int]: + ) -> int | None: """ Returns: The stream ID of the account data, @@ -242,7 +241,7 @@ class AccountDataWorkerStore(PushRulesWorkerStore, CacheInvalidationWorkerStore) def get_latest_stream_id_for_global_account_data_by_type_for_user_txn( txn: LoggingTransaction, - ) -> Optional[int]: + ) -> int | None: sql = """ SELECT stream_id FROM account_data WHERE user_id = ? AND account_data_type = ? @@ -300,7 +299,7 @@ class AccountDataWorkerStore(PushRulesWorkerStore, CacheInvalidationWorkerStore) @cached(num_args=3, max_entries=5000, tree=True) async def get_account_data_for_room_and_type( self, user_id: str, room_id: str, account_data_type: str - ) -> Optional[JsonMapping]: + ) -> JsonMapping | None: """Get the client account_data of given type for a user for a room. Args: @@ -313,7 +312,7 @@ class AccountDataWorkerStore(PushRulesWorkerStore, CacheInvalidationWorkerStore) def get_account_data_for_room_and_type_txn( txn: LoggingTransaction, - ) -> Optional[JsonDict]: + ) -> JsonDict | None: content_json = self.db_pool.simple_select_one_onecol_txn( txn, table="room_account_data", diff --git a/synapse/storage/databases/main/appservice.py b/synapse/storage/databases/main/appservice.py index 7558672905..6c2bf90b37 100644 --- a/synapse/storage/databases/main/appservice.py +++ b/synapse/storage/databases/main/appservice.py @@ -20,7 +20,7 @@ # import logging import re -from typing import TYPE_CHECKING, Optional, Pattern, Sequence, cast +from typing import TYPE_CHECKING, Pattern, Sequence, cast from synapse.appservice import ( ApplicationService, @@ -53,7 +53,7 @@ logger = logging.getLogger(__name__) def _make_exclusive_regex( services_cache: list[ApplicationService], -) -> Optional[Pattern]: +) -> Pattern | None: # We precompile a regex constructed from all the regexes that the AS's # have registered for exclusive users. exclusive_user_regexes = [ @@ -63,7 +63,7 @@ def _make_exclusive_regex( ] if exclusive_user_regexes: exclusive_user_regex = "|".join("(" + r + ")" for r in exclusive_user_regexes) - exclusive_user_pattern: Optional[Pattern] = re.compile(exclusive_user_regex) + exclusive_user_pattern: Pattern | None = re.compile(exclusive_user_regex) else: # We handle this case specially otherwise the constructed regex # will always match @@ -116,7 +116,7 @@ class ApplicationServiceWorkerStore(RoomMemberWorkerStore): else: return False - def get_app_service_by_user_id(self, user_id: str) -> Optional[ApplicationService]: + def get_app_service_by_user_id(self, user_id: str) -> ApplicationService | None: """Retrieve an application service from their user ID. All application services have associated with them a particular user ID. @@ -134,7 +134,7 @@ class ApplicationServiceWorkerStore(RoomMemberWorkerStore): return service return None - def get_app_service_by_token(self, token: str) -> Optional[ApplicationService]: + def get_app_service_by_token(self, token: str) -> ApplicationService | None: """Get the application service with the given appservice token. Args: @@ -147,7 +147,7 @@ class ApplicationServiceWorkerStore(RoomMemberWorkerStore): return service return None - def get_app_service_by_id(self, as_id: str) -> Optional[ApplicationService]: + def get_app_service_by_id(self, as_id: str) -> ApplicationService | None: """Get the application service with the given appservice ID. Args: @@ -227,7 +227,7 @@ class ApplicationServiceTransactionWorkerStore( async def get_appservice_state( self, service: ApplicationService - ) -> Optional[ApplicationServiceState]: + ) -> ApplicationServiceState | None: """Get the application service state. Args: @@ -347,7 +347,7 @@ class ApplicationServiceTransactionWorkerStore( async def get_oldest_unsent_txn( self, service: ApplicationService - ) -> Optional[AppServiceTransaction]: + ) -> AppServiceTransaction | None: """Get the oldest transaction which has not been sent for this service. Args: @@ -358,7 +358,7 @@ class ApplicationServiceTransactionWorkerStore( def _get_oldest_unsent_txn( txn: LoggingTransaction, - ) -> Optional[tuple[int, str]]: + ) -> tuple[int, str] | None: # Monotonically increasing txn ids, so just select the smallest # one in the txns table (we delete them when they are sent) txn.execute( @@ -366,7 +366,7 @@ class ApplicationServiceTransactionWorkerStore( " ORDER BY txn_id ASC LIMIT 1", (service.id,), ) - return cast(Optional[tuple[int, str]], txn.fetchone()) + return cast(tuple[int, str] | None, txn.fetchone()) entry = await self.db_pool.runInteraction( "get_oldest_unsent_appservice_txn", _get_oldest_unsent_txn @@ -447,7 +447,7 @@ class ApplicationServiceTransactionWorkerStore( ) async def set_appservice_stream_type_pos( - self, service: ApplicationService, stream_type: str, pos: Optional[int] + self, service: ApplicationService, stream_type: str, pos: int | None ) -> None: if stream_type not in ("read_receipt", "presence", "to_device", "device_list"): raise ValueError( diff --git a/synapse/storage/databases/main/cache.py b/synapse/storage/databases/main/cache.py index 5a96510b13..b7b9b42461 100644 --- a/synapse/storage/databases/main/cache.py +++ b/synapse/storage/databases/main/cache.py @@ -23,7 +23,7 @@ import itertools import json import logging -from typing import TYPE_CHECKING, Any, Collection, Iterable, Optional +from typing import TYPE_CHECKING, Any, Collection, Iterable from synapse.api.constants import EventTypes from synapse.config._base import Config @@ -104,7 +104,7 @@ class CacheInvalidationWorkerStore(SQLBaseStore): psql_only=True, # The table is only on postgres DBs. ) - self._cache_id_gen: Optional[MultiWriterIdGenerator] + self._cache_id_gen: MultiWriterIdGenerator | None if isinstance(self.database_engine, PostgresEngine): # We set the `writers` to an empty list here as we don't care about # missing updates over restarts, as we'll not have anything in our @@ -381,9 +381,9 @@ class CacheInvalidationWorkerStore(SQLBaseStore): event_id: str, room_id: str, etype: str, - state_key: Optional[str], - redacts: Optional[str], - relates_to: Optional[str], + state_key: str | None, + redacts: str | None, + relates_to: str | None, backfilled: bool, ) -> None: # This is needed to avoid a circular import. @@ -699,7 +699,7 @@ class CacheInvalidationWorkerStore(SQLBaseStore): ) async def send_invalidation_to_replication( - self, cache_name: str, keys: Optional[Collection[Any]] + self, cache_name: str, keys: Collection[Any] | None ) -> None: await self.db_pool.runInteraction( "send_invalidation_to_replication", @@ -709,7 +709,7 @@ class CacheInvalidationWorkerStore(SQLBaseStore): ) def _send_invalidation_to_replication( - self, txn: LoggingTransaction, cache_name: str, keys: Optional[Iterable[Any]] + self, txn: LoggingTransaction, cache_name: str, keys: Iterable[Any] | None ) -> None: """Notifies replication that given cache has been invalidated. diff --git a/synapse/storage/databases/main/censor_events.py b/synapse/storage/databases/main/censor_events.py index 45cfe97dba..5d667a5345 100644 --- a/synapse/storage/databases/main/censor_events.py +++ b/synapse/storage/databases/main/censor_events.py @@ -20,7 +20,7 @@ # import logging -from typing import TYPE_CHECKING, Optional +from typing import TYPE_CHECKING from synapse.events.utils import prune_event_dict from synapse.metrics.background_process_metrics import wrap_as_background_process @@ -121,7 +121,7 @@ class CensorEventsStore(EventsWorkerStore, CacheInvalidationWorkerStore, SQLBase and original_event.internal_metadata.is_redacted() ): # Redaction was allowed - pruned_json: Optional[str] = json_encoder.encode( + pruned_json: str | None = json_encoder.encode( prune_event_dict( original_event.room_version, original_event.get_dict() ) diff --git a/synapse/storage/databases/main/client_ips.py b/synapse/storage/databases/main/client_ips.py index 1033d85a40..4948d0c286 100644 --- a/synapse/storage/databases/main/client_ips.py +++ b/synapse/storage/databases/main/client_ips.py @@ -23,9 +23,7 @@ import logging from typing import ( TYPE_CHECKING, Mapping, - Optional, TypedDict, - Union, cast, ) @@ -64,9 +62,9 @@ class DeviceLastConnectionInfo: user_id: str device_id: str - ip: Optional[str] - user_agent: Optional[str] - last_seen: Optional[int] + ip: str | None + user_agent: str | None + last_seen: int | None class LastConnectionInfo(TypedDict): @@ -176,7 +174,7 @@ class ClientIpBackgroundUpdateStore(SQLBaseStore): # Fetch the start of the batch begin_last_seen: int = progress.get("last_seen", 0) - def get_last_seen(txn: LoggingTransaction) -> Optional[int]: + def get_last_seen(txn: LoggingTransaction) -> int | None: txn.execute( """ SELECT last_seen FROM user_ips @@ -187,7 +185,7 @@ class ClientIpBackgroundUpdateStore(SQLBaseStore): """, (begin_last_seen, batch_size), ) - row = cast(Optional[tuple[int]], txn.fetchone()) + row = cast(tuple[int] | None, txn.fetchone()) if row: return row[0] else: @@ -248,7 +246,7 @@ class ClientIpBackgroundUpdateStore(SQLBaseStore): args, ) res = cast( - list[tuple[str, str, str, Optional[str], str, int, int]], txn.fetchall() + list[tuple[str, str, str, str | None, str, int, int]], txn.fetchall() ) # We've got some duplicates @@ -358,7 +356,7 @@ class ClientIpBackgroundUpdateStore(SQLBaseStore): # we'll just end up updating the same device row multiple # times, which is fine. - where_args: list[Union[str, int]] + where_args: list[str | int] where_clause, where_args = make_tuple_comparison_clause( [("user_id", last_user_id), ("device_id", last_device_id)], ) @@ -447,7 +445,7 @@ class ClientIpWorkerStore(ClientIpBackgroundUpdateStore, MonthlyActiveUsersWorke # (user_id, access_token, ip,) -> (user_agent, device_id, last_seen) self._batch_row_update: dict[ - tuple[str, str, str], tuple[str, Optional[str], int] + tuple[str, str, str], tuple[str, str | None, int] ] = {} self.clock.looping_call(self._update_client_ips_batch, 5 * 1000) @@ -500,7 +498,7 @@ class ClientIpWorkerStore(ClientIpBackgroundUpdateStore, MonthlyActiveUsersWorke ) async def _get_last_client_ip_by_device_from_database( - self, user_id: str, device_id: Optional[str] + self, user_id: str, device_id: str | None ) -> dict[tuple[str, str], DeviceLastConnectionInfo]: """For each device_id listed, give the user_ip it was last seen on. @@ -519,7 +517,7 @@ class ClientIpWorkerStore(ClientIpBackgroundUpdateStore, MonthlyActiveUsersWorke keyvalues["device_id"] = device_id res = cast( - list[tuple[str, Optional[str], Optional[str], str, Optional[int]]], + list[tuple[str, str | None, str | None, str, int | None]], await self.db_pool.simple_select_list( table="devices", keyvalues=keyvalues, @@ -596,8 +594,8 @@ class ClientIpWorkerStore(ClientIpBackgroundUpdateStore, MonthlyActiveUsersWorke access_token: str, ip: str, user_agent: str, - device_id: Optional[str], - now: Optional[int] = None, + device_id: str | None, + now: int | None = None, ) -> None: """Record that `user_id` used `access_token` from this `ip` address. @@ -670,7 +668,7 @@ class ClientIpWorkerStore(ClientIpBackgroundUpdateStore, MonthlyActiveUsersWorke def _update_client_ips_batch_txn( self, txn: LoggingTransaction, - to_update: Mapping[tuple[str, str, str], tuple[str, Optional[str], int]], + to_update: Mapping[tuple[str, str, str], tuple[str, str | None, int]], ) -> None: assert self._update_on_this_worker, ( "This worker is not designated to update client IPs" @@ -715,7 +713,7 @@ class ClientIpWorkerStore(ClientIpBackgroundUpdateStore, MonthlyActiveUsersWorke ) async def get_last_client_ip_by_device( - self, user_id: str, device_id: Optional[str] + self, user_id: str, device_id: str | None ) -> dict[tuple[str, str], DeviceLastConnectionInfo]: """For each device_id listed, give the user_ip it was last seen on @@ -805,7 +803,7 @@ class ClientIpWorkerStore(ClientIpBackgroundUpdateStore, MonthlyActiveUsersWorke return list(results.values()) - async def get_last_seen_for_user_id(self, user_id: str) -> Optional[int]: + async def get_last_seen_for_user_id(self, user_id: str) -> int | None: """Get the last seen timestamp for a user, if we have it.""" return await self.db_pool.simple_select_one_onecol( diff --git a/synapse/storage/databases/main/delayed_events.py b/synapse/storage/databases/main/delayed_events.py index 6ad161db33..b11ed86db2 100644 --- a/synapse/storage/databases/main/delayed_events.py +++ b/synapse/storage/databases/main/delayed_events.py @@ -13,7 +13,7 @@ # import logging -from typing import NewType, Optional +from typing import NewType import attr @@ -42,10 +42,10 @@ Timestamp = NewType("Timestamp", int) class EventDetails: room_id: RoomID type: EventType - state_key: Optional[StateKey] - origin_server_ts: Optional[Timestamp] + state_key: StateKey | None + origin_server_ts: Timestamp | None content: JsonDict - device_id: Optional[DeviceID] + device_id: DeviceID | None @attr.s(slots=True, frozen=True, auto_attribs=True) @@ -67,7 +67,7 @@ class DelayedEventsStore(SQLBaseStore): desc="get_delayed_events_stream_pos", ) - async def update_delayed_events_stream_pos(self, stream_id: Optional[int]) -> None: + async def update_delayed_events_stream_pos(self, stream_id: int | None) -> None: """ Updates the stream position of the background process to watch for state events that target the same piece of state as any pending delayed events. @@ -85,12 +85,12 @@ class DelayedEventsStore(SQLBaseStore): self, *, user_localpart: str, - device_id: Optional[str], + device_id: str | None, creation_ts: Timestamp, room_id: str, event_type: str, - state_key: Optional[str], - origin_server_ts: Optional[int], + state_key: str | None, + origin_server_ts: int | None, content: JsonDict, delay: int, ) -> tuple[DelayID, Timestamp]: @@ -238,7 +238,7 @@ class DelayedEventsStore(SQLBaseStore): self, current_ts: Timestamp ) -> tuple[ list[DelayedEventDetails], - Optional[Timestamp], + Timestamp | None, ]: """ Marks for processing all delayed events that should have been sent prior to the provided time @@ -252,7 +252,7 @@ class DelayedEventsStore(SQLBaseStore): txn: LoggingTransaction, ) -> tuple[ list[DelayedEventDetails], - Optional[Timestamp], + Timestamp | None, ]: sql_cols = ", ".join( ( @@ -324,7 +324,7 @@ class DelayedEventsStore(SQLBaseStore): user_localpart: str, ) -> tuple[ EventDetails, - Optional[Timestamp], + Timestamp | None, ]: """ Marks for processing the matching delayed event, regardless of its timeout time, @@ -345,7 +345,7 @@ class DelayedEventsStore(SQLBaseStore): txn: LoggingTransaction, ) -> tuple[ EventDetails, - Optional[Timestamp], + Timestamp | None, ]: txn.execute( """ @@ -390,7 +390,7 @@ class DelayedEventsStore(SQLBaseStore): *, delay_id: str, user_localpart: str, - ) -> Optional[Timestamp]: + ) -> Timestamp | None: """ Cancels the matching delayed event, i.e. remove it as long as it hasn't been processed. @@ -406,7 +406,7 @@ class DelayedEventsStore(SQLBaseStore): def cancel_delayed_event_txn( txn: LoggingTransaction, - ) -> Optional[Timestamp]: + ) -> Timestamp | None: try: self.db_pool.simple_delete_one_txn( txn, @@ -436,7 +436,7 @@ class DelayedEventsStore(SQLBaseStore): event_type: str, state_key: str, not_from_localpart: str, - ) -> Optional[Timestamp]: + ) -> Timestamp | None: """ Cancels all matching delayed state events, i.e. remove them as long as they haven't been processed. @@ -452,7 +452,7 @@ class DelayedEventsStore(SQLBaseStore): def cancel_delayed_state_events_txn( txn: LoggingTransaction, - ) -> Optional[Timestamp]: + ) -> Timestamp | None: txn.execute( """ DELETE FROM delayed_events @@ -526,7 +526,7 @@ class DelayedEventsStore(SQLBaseStore): desc="unprocess_delayed_events", ) - async def get_next_delayed_event_send_ts(self) -> Optional[Timestamp]: + async def get_next_delayed_event_send_ts(self) -> Timestamp | None: """ Returns the send time of the next delayed event to be sent, if any. """ @@ -538,7 +538,7 @@ class DelayedEventsStore(SQLBaseStore): def _get_next_delayed_event_send_ts_txn( self, txn: LoggingTransaction - ) -> Optional[Timestamp]: + ) -> Timestamp | None: result = self.db_pool.simple_select_one_onecol_txn( txn, table="delayed_events", diff --git a/synapse/storage/databases/main/deviceinbox.py b/synapse/storage/databases/main/deviceinbox.py index 49a82b98d3..a12411d723 100644 --- a/synapse/storage/databases/main/deviceinbox.py +++ b/synapse/storage/databases/main/deviceinbox.py @@ -25,7 +25,6 @@ from typing import ( TYPE_CHECKING, Collection, Iterable, - Optional, cast, ) @@ -87,15 +86,15 @@ class DeviceInboxWorkerStore(SQLBaseStore): # Map of (user_id, device_id) to the last stream_id that has been # deleted up to. This is so that we can no op deletions. - self._last_device_delete_cache: ExpiringCache[ - tuple[str, Optional[str]], int - ] = ExpiringCache( - cache_name="last_device_delete_cache", - server_name=self.server_name, - hs=hs, - clock=self.clock, - max_len=10000, - expiry_ms=30 * 60 * 1000, + self._last_device_delete_cache: ExpiringCache[tuple[str, str | None], int] = ( + ExpiringCache( + cache_name="last_device_delete_cache", + server_name=self.server_name, + hs=hs, + clock=self.clock, + max_len=10000, + expiry_ms=30 * 60 * 1000, + ) ) self._can_write_to_device = ( @@ -469,7 +468,7 @@ class DeviceInboxWorkerStore(SQLBaseStore): async def delete_messages_for_device( self, user_id: str, - device_id: Optional[str], + device_id: str | None, up_to_stream_id: int, ) -> int: """ @@ -527,11 +526,11 @@ class DeviceInboxWorkerStore(SQLBaseStore): async def delete_messages_for_device_between( self, user_id: str, - device_id: Optional[str], - from_stream_id: Optional[int], + device_id: str | None, + from_stream_id: int | None, to_stream_id: int, limit: int, - ) -> tuple[Optional[int], int]: + ) -> tuple[int | None, int]: """Delete N device messages between the stream IDs, returning the highest stream ID deleted (or None if all messages in the range have been deleted) and the number of messages deleted. @@ -551,7 +550,7 @@ class DeviceInboxWorkerStore(SQLBaseStore): def delete_messages_for_device_between_txn( txn: LoggingTransaction, - ) -> tuple[Optional[int], int]: + ) -> tuple[int | None, int]: txn.execute( """ SELECT MAX(stream_id) FROM ( @@ -1147,7 +1146,7 @@ class DeviceInboxBackgroundUpdateStore(SQLBaseStore): # There's a type mismatch here between how we want to type the row and # what fetchone says it returns, but we silence it because we know that # res can't be None. - res = cast(tuple[Optional[int]], txn.fetchone()) + res = cast(tuple[int | None], txn.fetchone()) if res[0] is None: # this can only happen if the `device_inbox` table is empty, in which # case we have no work to do. @@ -1210,7 +1209,7 @@ class DeviceInboxBackgroundUpdateStore(SQLBaseStore): max_stream_id = progress["max_stream_id"] else: txn.execute("SELECT max(stream_id) FROM device_federation_outbox") - res = cast(tuple[Optional[int]], txn.fetchone()) + res = cast(tuple[int | None], txn.fetchone()) if res[0] is None: # this can only happen if the `device_inbox` table is empty, in which # case we have no work to do. diff --git a/synapse/storage/databases/main/devices.py b/synapse/storage/databases/main/devices.py index bf5e05ea51..caae2a0648 100644 --- a/synapse/storage/databases/main/devices.py +++ b/synapse/storage/databases/main/devices.py @@ -26,7 +26,6 @@ from typing import ( Collection, Iterable, Mapping, - Optional, cast, ) @@ -254,7 +253,7 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore): return self._device_list_id_gen async def count_devices_by_users( - self, user_ids: Optional[Collection[str]] = None + self, user_ids: Collection[str] | None = None ) -> int: """Retrieve number of all devices of given users. Only returns number of devices that are not marked as hidden. @@ -293,9 +292,9 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore): self, user_id: str, device_id: str, - initial_device_display_name: Optional[str], - auth_provider_id: Optional[str] = None, - auth_provider_session_id: Optional[str] = None, + initial_device_display_name: str | None, + auth_provider_id: str | None = None, + auth_provider_session_id: str | None = None, ) -> bool: """Ensure the given device is known; add it to the store if not @@ -441,7 +440,7 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore): ) async def update_device( - self, user_id: str, device_id: str, new_display_name: Optional[str] = None + self, user_id: str, device_id: str, new_display_name: str | None = None ) -> None: """Update a device. Only updates the device if it is not marked as hidden. @@ -469,7 +468,7 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore): @cached(tree=True) async def get_device( self, user_id: str, device_id: str - ) -> Optional[Mapping[str, Any]]: + ) -> Mapping[str, Any] | None: """Retrieve a device. Only returns devices that are not marked as hidden. @@ -493,7 +492,7 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore): async def get_devices_by_user( self, user_id: str - ) -> dict[str, dict[str, Optional[str]]]: + ) -> dict[str, dict[str, str | None]]: """Retrieve all of a user's registered devices. Only returns devices that are not marked as hidden. @@ -504,7 +503,7 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore): and "display_name" for each device. Display name may be null. """ devices = cast( - list[tuple[str, str, Optional[str]]], + list[tuple[str, str, str | None]], await self.db_pool.simple_select_list( table="devices", keyvalues={"user_id": user_id, "hidden": False}, @@ -655,7 +654,7 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore): last_processed_stream_id = from_stream_id # A map of (user ID, device ID) to (stream ID, context). - query_map: dict[tuple[str, str], tuple[int, Optional[str]]] = {} + query_map: dict[tuple[str, str], tuple[int, str | None]] = {} cross_signing_keys_by_user: dict[str, dict[str, object]] = {} for user_id, device_id, update_stream_id, update_context in updates: # Calculate the remaining length budget. @@ -762,7 +761,7 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore): from_stream_id: int, now_stream_id: int, limit: int, - ) -> list[tuple[str, str, int, Optional[str]]]: + ) -> list[tuple[str, str, int, str | None]]: """Return device update information for a given remote destination Args: @@ -788,13 +787,13 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore): """ txn.execute(sql, (destination, from_stream_id, now_stream_id, limit)) - return cast(list[tuple[str, str, int, Optional[str]]], txn.fetchall()) + return cast(list[tuple[str, str, int, str | None]], txn.fetchall()) async def _get_device_update_edus_by_remote( self, destination: str, from_stream_id: int, - query_map: dict[tuple[str, str], tuple[int, Optional[str]]], + query_map: dict[tuple[str, str], tuple[int, str | None]], ) -> list[tuple[str, dict]]: """Returns a list of device update EDUs as well as E2EE keys @@ -1126,7 +1125,7 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore): self, from_key: MultiWriterStreamToken, user_ids: Collection[str], - to_key: Optional[MultiWriterStreamToken] = None, + to_key: MultiWriterStreamToken | None = None, ) -> set[str]: """Get set of users whose devices have changed since `from_key` that are in the given list of user_ids. @@ -1298,7 +1297,7 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore): @cached(max_entries=10000) async def get_device_list_last_stream_id_for_remote( self, user_id: str - ) -> Optional[str]: + ) -> str | None: """Get the last stream_id we got for a user. May be None if we haven't got any information for them. """ @@ -1316,7 +1315,7 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore): ) async def get_device_list_last_stream_id_for_remotes( self, user_ids: Iterable[str] - ) -> Mapping[str, Optional[str]]: + ) -> Mapping[str, str | None]: rows = cast( list[tuple[str, str]], await self.db_pool.simple_select_many_batch( @@ -1328,14 +1327,14 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore): ), ) - results: dict[str, Optional[str]] = dict.fromkeys(user_ids) + results: dict[str, str | None] = dict.fromkeys(user_ids) results.update(rows) return results async def get_user_ids_requiring_device_list_resync( self, - user_ids: Optional[Collection[str]] = None, + user_ids: Collection[str] | None = None, ) -> set[str]: """Given a list of remote users return the list of users that we should resync the device lists for. If None is given instead of a list, @@ -1457,9 +1456,7 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore): txn, self.get_device_list_last_stream_id_for_remote, (user_id,) ) - async def get_dehydrated_device( - self, user_id: str - ) -> Optional[tuple[str, JsonDict]]: + async def get_dehydrated_device(self, user_id: str) -> tuple[str, JsonDict] | None: """Retrieve the information for a dehydrated device. Args: @@ -1484,8 +1481,8 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore): device_id: str, device_data: str, time: int, - keys: Optional[JsonDict] = None, - ) -> Optional[str]: + keys: JsonDict | None = None, + ) -> str | None: # TODO: make keys non-optional once support for msc2697 is dropped if keys: device_keys = keys.get("device_keys", None) @@ -1534,8 +1531,8 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore): device_id: str, device_data: JsonDict, time_now: int, - keys: Optional[dict] = None, - ) -> Optional[str]: + keys: dict | None = None, + ) -> str | None: """Store a dehydrated device for a user. Args: @@ -1724,7 +1721,7 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore): room_ids: Collection[str], from_token: MultiWriterStreamToken, to_token: MultiWriterStreamToken, - ) -> Optional[set[str]]: + ) -> set[str] | None: """Return the set of users whose devices have changed in the given rooms since the given stream ID. @@ -1963,7 +1960,7 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore): user_id: str, device_ids: StrCollection, room_ids: StrCollection, - ) -> Optional[int]: + ) -> int | None: """Persist that a user's devices have been updated, and which hosts (if any) should be poked. @@ -2012,7 +2009,7 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore): return stream_ids[-1] - last_stream_id: Optional[int] = None + last_stream_id: int | None = None for batch_device_ids in batch_iter(device_ids, 1000): last_stream_id = await self.db_pool.runInteraction( "add_device_change_to_stream", @@ -2072,7 +2069,7 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore): device_id: str, hosts: Collection[str], stream_id: int, - context: Optional[dict[str, str]], + context: dict[str, str] | None, ) -> None: if self._device_list_federation_stream_cache: for host in hosts: @@ -2204,7 +2201,7 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore): async def get_uncoverted_outbound_room_pokes( self, start_stream_id: int, start_room_id: str, limit: int = 10 - ) -> list[tuple[str, str, str, int, Optional[dict[str, str]]]]: + ) -> list[tuple[str, str, str, int, dict[str, str] | None]]: """Get device list changes by room that have not yet been handled and written to `device_lists_outbound_pokes`. @@ -2232,7 +2229,7 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore): def get_uncoverted_outbound_room_pokes_txn( txn: LoggingTransaction, - ) -> list[tuple[str, str, str, int, Optional[dict[str, str]]]]: + ) -> list[tuple[str, str, str, int, dict[str, str] | None]]: txn.execute( sql, ( @@ -2266,7 +2263,7 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore): device_id: str, room_id: str, hosts: Collection[str], - context: Optional[dict[str, str]], + context: dict[str, str] | None, ) -> None: """Queue the device update to be sent to the given set of hosts, calculated from the room ID. diff --git a/synapse/storage/databases/main/directory.py b/synapse/storage/databases/main/directory.py index 99a951ca4a..5e14ad4480 100644 --- a/synapse/storage/databases/main/directory.py +++ b/synapse/storage/databases/main/directory.py @@ -19,7 +19,7 @@ # # -from typing import Iterable, Optional, Sequence +from typing import Iterable, Sequence import attr @@ -40,7 +40,7 @@ class RoomAliasMapping: class DirectoryWorkerStore(CacheInvalidationWorkerStore): async def get_association_from_room_alias( self, room_alias: RoomAlias - ) -> Optional[RoomAliasMapping]: + ) -> RoomAliasMapping | None: """Gets the room_id and server list for a given room_alias Args: @@ -94,7 +94,7 @@ class DirectoryWorkerStore(CacheInvalidationWorkerStore): room_alias: RoomAlias, room_id: str, servers: Iterable[str], - creator: Optional[str] = None, + creator: str | None = None, ) -> None: """Creates an association between a room alias and room_id/servers @@ -136,7 +136,7 @@ class DirectoryWorkerStore(CacheInvalidationWorkerStore): 409, "Room alias %s already exists" % room_alias.to_string() ) - async def delete_room_alias(self, room_alias: RoomAlias) -> Optional[str]: + async def delete_room_alias(self, room_alias: RoomAlias) -> str | None: room_id = await self.db_pool.runInteraction( "delete_room_alias", self._delete_room_alias_txn, room_alias ) @@ -145,7 +145,7 @@ class DirectoryWorkerStore(CacheInvalidationWorkerStore): def _delete_room_alias_txn( self, txn: LoggingTransaction, room_alias: RoomAlias - ) -> Optional[str]: + ) -> str | None: txn.execute( "SELECT room_id FROM room_aliases WHERE room_alias = ?", (room_alias.to_string(),), @@ -174,7 +174,7 @@ class DirectoryWorkerStore(CacheInvalidationWorkerStore): self, old_room_id: str, new_room_id: str, - creator: Optional[str] = None, + creator: str | None = None, ) -> None: """Repoint all of the aliases for a given room, to a different room. diff --git a/synapse/storage/databases/main/e2e_room_keys.py b/synapse/storage/databases/main/e2e_room_keys.py index a4d03d1d90..01e9fb4dcf 100644 --- a/synapse/storage/databases/main/e2e_room_keys.py +++ b/synapse/storage/databases/main/e2e_room_keys.py @@ -24,7 +24,6 @@ from typing import ( Iterable, Literal, Mapping, - Optional, TypedDict, cast, ) @@ -252,8 +251,8 @@ class EndToEndRoomKeyStore(EndToEndRoomKeyBackgroundStore): self, user_id: str, version: str, - room_id: Optional[str] = None, - session_id: Optional[str] = None, + room_id: str | None = None, + session_id: str | None = None, ) -> dict[ Literal["rooms"], dict[str, dict[Literal["sessions"], dict[str, RoomKey]]] ]: @@ -438,8 +437,8 @@ class EndToEndRoomKeyStore(EndToEndRoomKeyBackgroundStore): self, user_id: str, version: str, - room_id: Optional[str] = None, - session_id: Optional[str] = None, + room_id: str | None = None, + session_id: str | None = None, ) -> None: """Bulk delete the E2E room keys for a given backup, optionally filtered to a given room or a given session. @@ -480,13 +479,13 @@ class EndToEndRoomKeyStore(EndToEndRoomKeyBackgroundStore): ) # `SELECT MAX() FROM ...` will always return 1 row. The value in that row will # be `NULL` when there are no available versions. - row = cast(tuple[Optional[int]], txn.fetchone()) + row = cast(tuple[int | None], txn.fetchone()) if row[0] is None: raise StoreError(404, "No current backup version") return row[0] async def get_e2e_room_keys_version_info( - self, user_id: str, version: Optional[str] = None + self, user_id: str, version: str | None = None ) -> JsonDict: """Get info metadata about a version of our room_keys backup. @@ -556,7 +555,7 @@ class EndToEndRoomKeyStore(EndToEndRoomKeyBackgroundStore): "SELECT MAX(version) FROM e2e_room_keys_versions WHERE user_id=?", (user_id,), ) - current_version = cast(tuple[Optional[int]], txn.fetchone())[0] + current_version = cast(tuple[int | None], txn.fetchone())[0] if current_version is None: current_version = 0 @@ -584,8 +583,8 @@ class EndToEndRoomKeyStore(EndToEndRoomKeyBackgroundStore): self, user_id: str, version: str, - info: Optional[JsonDict] = None, - version_etag: Optional[int] = None, + info: JsonDict | None = None, + version_etag: int | None = None, ) -> None: """Update a given backup version @@ -621,7 +620,7 @@ class EndToEndRoomKeyStore(EndToEndRoomKeyBackgroundStore): @trace async def delete_e2e_room_keys_version( - self, user_id: str, version: Optional[str] = None + self, user_id: str, version: str | None = None ) -> None: """Delete a given backup version of the user's room keys. Doesn't delete their actual key data. diff --git a/synapse/storage/databases/main/end_to_end_keys.py b/synapse/storage/databases/main/end_to_end_keys.py index 991d64db44..c93ebd3dda 100644 --- a/synapse/storage/databases/main/end_to_end_keys.py +++ b/synapse/storage/databases/main/end_to_end_keys.py @@ -28,9 +28,7 @@ from typing import ( Iterable, Literal, Mapping, - Optional, Sequence, - Union, cast, overload, ) @@ -71,12 +69,12 @@ if TYPE_CHECKING: class DeviceKeyLookupResult: """The type returned by get_e2e_device_keys_and_signatures""" - display_name: Optional[str] + display_name: str | None # the key data from e2e_device_keys_json. Typically includes fields like # "algorithm", "keys" (including the curve25519 identity key and the ed25519 signing # key) and "signatures" (a map from (user id) to (key id/device_id) to signature.) - keys: Optional[JsonDict] + keys: JsonDict | None class EndToEndKeyBackgroundStore(SQLBaseStore): @@ -237,7 +235,7 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker @cancellable async def get_e2e_device_keys_for_cs_api( self, - query_list: Collection[tuple[str, Optional[str]]], + query_list: Collection[tuple[str, str | None]], include_displaynames: bool = True, ) -> dict[str, dict[str, JsonDict]]: """Fetch a list of device keys, formatted suitably for the C/S API. @@ -280,14 +278,14 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker @overload async def get_e2e_device_keys_and_signatures( self, - query_list: Collection[tuple[str, Optional[str]]], + query_list: Collection[tuple[str, str | None]], include_all_devices: Literal[False] = False, ) -> dict[str, dict[str, DeviceKeyLookupResult]]: ... @overload async def get_e2e_device_keys_and_signatures( self, - query_list: Collection[tuple[str, Optional[str]]], + query_list: Collection[tuple[str, str | None]], include_all_devices: bool = False, include_deleted_devices: Literal[False] = False, ) -> dict[str, dict[str, DeviceKeyLookupResult]]: ... @@ -295,22 +293,22 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker @overload async def get_e2e_device_keys_and_signatures( self, - query_list: Collection[tuple[str, Optional[str]]], + query_list: Collection[tuple[str, str | None]], include_all_devices: Literal[True], include_deleted_devices: Literal[True], - ) -> dict[str, dict[str, Optional[DeviceKeyLookupResult]]]: ... + ) -> dict[str, dict[str, DeviceKeyLookupResult | None]]: ... @trace @cancellable async def get_e2e_device_keys_and_signatures( self, - query_list: Collection[tuple[str, Optional[str]]], + query_list: Collection[tuple[str, str | None]], include_all_devices: bool = False, include_deleted_devices: bool = False, - ) -> Union[ - dict[str, dict[str, DeviceKeyLookupResult]], - dict[str, dict[str, Optional[DeviceKeyLookupResult]]], - ]: + ) -> ( + dict[str, dict[str, DeviceKeyLookupResult]] + | dict[str, dict[str, DeviceKeyLookupResult | None]] + ): """Fetch a list of device keys Any cross-signatures made on the keys by the owner of the device are also @@ -384,10 +382,10 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker async def _get_e2e_device_keys( self, - query_list: Collection[tuple[str, Optional[str]]], + query_list: Collection[tuple[str, str | None]], include_all_devices: bool = False, include_deleted_devices: bool = False, - ) -> dict[str, dict[str, Optional[DeviceKeyLookupResult]]]: + ) -> dict[str, dict[str, DeviceKeyLookupResult | None]]: """Get information on devices from the database The results include the device's keys and self-signatures, but *not* any @@ -433,7 +431,7 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker query_clauses.append(user_device_id_in_list_clause) query_params_list.append(user_device_args) - result: dict[str, dict[str, Optional[DeviceKeyLookupResult]]] = {} + result: dict[str, dict[str, DeviceKeyLookupResult | None]] = {} def get_e2e_device_keys_txn( txn: LoggingTransaction, query_clause: str, query_params: list @@ -897,8 +895,8 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker ) async def get_e2e_cross_signing_key( - self, user_id: str, key_type: str, from_user_id: Optional[str] = None - ) -> Optional[JsonMapping]: + self, user_id: str, key_type: str, from_user_id: str | None = None + ) -> JsonMapping | None: """Returns a user's cross-signing key. Args: @@ -934,7 +932,7 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker ) async def _get_bare_e2e_cross_signing_keys_bulk( self, user_ids: Iterable[str] - ) -> Mapping[str, Optional[Mapping[str, JsonMapping]]]: + ) -> Mapping[str, Mapping[str, JsonMapping] | None]: """Returns the cross-signing keys for a set of users. The output of this function should be passed to _get_e2e_cross_signing_signatures_txn if the signatures for the calling user need to be fetched. @@ -1013,9 +1011,9 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker def _get_e2e_cross_signing_signatures_txn( self, txn: LoggingTransaction, - keys: dict[str, Optional[dict[str, JsonDict]]], + keys: dict[str, dict[str, JsonDict] | None], from_user_id: str, - ) -> dict[str, Optional[dict[str, JsonDict]]]: + ) -> dict[str, dict[str, JsonDict] | None]: """Returns the cross-signing signatures made by a user on a set of keys. Args: @@ -1096,8 +1094,8 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker @cancellable async def get_e2e_cross_signing_keys_bulk( - self, user_ids: list[str], from_user_id: Optional[str] = None - ) -> Mapping[str, Optional[Mapping[str, JsonMapping]]]: + self, user_ids: list[str], from_user_id: str | None = None + ) -> Mapping[str, Mapping[str, JsonMapping] | None]: """Returns the cross-signing keys for a set of users. Args: @@ -1114,7 +1112,7 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker if from_user_id: result = cast( - dict[str, Optional[Mapping[str, JsonMapping]]], + dict[str, Mapping[str, JsonMapping] | None], await self.db_pool.runInteraction( "get_e2e_cross_signing_signatures", self._get_e2e_cross_signing_signatures_txn, @@ -1478,7 +1476,7 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker async def get_master_cross_signing_key_updatable_before( self, user_id: str - ) -> tuple[bool, Optional[int]]: + ) -> tuple[bool, int | None]: """Get time before which a master cross-signing key may be replaced without UIA. (UIA means "User-Interactive Auth".) @@ -1499,7 +1497,7 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker """ - def impl(txn: LoggingTransaction) -> tuple[bool, Optional[int]]: + def impl(txn: LoggingTransaction) -> tuple[bool, int | None]: # We want to distinguish between three cases: txn.execute( """ @@ -1511,7 +1509,7 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker """, (user_id,), ) - row = cast(Optional[tuple[Optional[int]]], txn.fetchone()) + row = cast(tuple[int | None] | None, txn.fetchone()) if row is None: return False, None return True, row[0] @@ -1571,7 +1569,7 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker async def allow_master_cross_signing_key_replacement_without_uia( self, user_id: str, duration_ms: int - ) -> Optional[int]: + ) -> int | None: """Mark this user's latest master key as being replaceable without UIA. Said replacement will only be permitted for a short time after calling this @@ -1583,7 +1581,7 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker """ timestamp = self.clock.time_msec() + duration_ms - def impl(txn: LoggingTransaction) -> Optional[int]: + def impl(txn: LoggingTransaction) -> int | None: txn.execute( """ UPDATE e2e_cross_signing_keys diff --git a/synapse/storage/databases/main/event_federation.py b/synapse/storage/databases/main/event_federation.py index 0a8571f0c8..b2f0aeaf58 100644 --- a/synapse/storage/databases/main/event_federation.py +++ b/synapse/storage/databases/main/event_federation.py @@ -27,7 +27,6 @@ from typing import ( Collection, Generator, Iterable, - Optional, Sequence, cast, ) @@ -129,7 +128,7 @@ class StateDifference: # The event IDs in the auth difference. auth_difference: set[str] # The event IDs in the conflicted state subgraph. Used in v2.1 only. - conflicted_subgraph: Optional[set[str]] + conflicted_subgraph: set[str] | None class _NoChainCoverIndex(Exception): @@ -142,7 +141,7 @@ class EventFederationWorkerStore( ): # TODO: this attribute comes from EventPushActionWorkerStore. Should we inherit from # that store so that mypy can deduce this for itself? - stream_ordering_month_ago: Optional[int] + stream_ordering_month_ago: int | None def __init__( self, @@ -494,8 +493,8 @@ class EventFederationWorkerStore( self, room_id: str, state_sets: list[set[str]], - conflicted_set: Optional[set[str]], - additional_backwards_reachable_conflicted_events: Optional[set[str]], + conflicted_set: set[str] | None, + additional_backwards_reachable_conflicted_events: set[str] | None, ) -> StateDifference: """ "Given sets of state events figure out the auth chain difference (as per state res v2 algorithm). @@ -556,8 +555,8 @@ class EventFederationWorkerStore( txn: LoggingTransaction, room_id: str, state_sets: list[set[str]], - conflicted_set: Optional[set[str]] = None, - additional_backwards_reachable_conflicted_events: Optional[set[str]] = None, + conflicted_set: set[str] | None = None, + additional_backwards_reachable_conflicted_events: set[str] | None = None, ) -> StateDifference: """Calculates the auth chain difference using the chain index. @@ -1341,7 +1340,7 @@ class EventFederationWorkerStore( async def get_max_depth_of( self, event_ids: Collection[str] - ) -> tuple[Optional[str], int]: + ) -> tuple[str | None, int]: """Returns the event ID and depth for the event that has the max depth from a set of event IDs Args: @@ -1373,7 +1372,7 @@ class EventFederationWorkerStore( return max_depth_event_id, current_max_depth - async def get_min_depth_of(self, event_ids: list[str]) -> tuple[Optional[str], int]: + async def get_min_depth_of(self, event_ids: list[str]) -> tuple[str | None, int]: """Returns the event ID and depth for the event that has the min depth from a set of event IDs Args: @@ -1491,7 +1490,7 @@ class EventFederationWorkerStore( ) return frozenset(event_ids) - async def get_min_depth(self, room_id: str) -> Optional[int]: + async def get_min_depth(self, room_id: str) -> int | None: """For the given room, get the minimum depth we have seen for it.""" return await self.db_pool.runInteraction( "get_min_depth", self._get_min_depth_interaction, room_id @@ -1499,7 +1498,7 @@ class EventFederationWorkerStore( def _get_min_depth_interaction( self, txn: LoggingTransaction, room_id: str - ) -> Optional[int]: + ) -> int | None: min_depth = self.db_pool.simple_select_one_onecol_txn( txn, table="room_depth", @@ -1689,7 +1688,7 @@ class EventFederationWorkerStore( ) events = await self.get_events_as_list(event_ids) return sorted( - # type-ignore: mypy doesn't like negating the Optional[int] stream_ordering. + # type-ignore: mypy doesn't like negating the int | None stream_ordering. # But it's never None, because these events were previously persisted to the DB. events, key=lambda e: (-e.depth, -e.internal_metadata.stream_ordering), # type: ignore[operator] @@ -2034,7 +2033,7 @@ class EventFederationWorkerStore( self, origin: str, event_id: str, - ) -> Optional[int]: + ) -> int | None: """Remove the given event from the staging area. Returns: @@ -2043,7 +2042,7 @@ class EventFederationWorkerStore( def _remove_received_event_from_staging_txn( txn: LoggingTransaction, - ) -> Optional[int]: + ) -> int | None: sql = """ DELETE FROM federation_inbound_events_staging WHERE origin = ? AND event_id = ? @@ -2051,7 +2050,7 @@ class EventFederationWorkerStore( """ txn.execute(sql, (origin, event_id)) - row = cast(Optional[tuple[int]], txn.fetchone()) + row = cast(tuple[int] | None, txn.fetchone()) if row is None: return None @@ -2067,7 +2066,7 @@ class EventFederationWorkerStore( async def get_next_staged_event_id_for_room( self, room_id: str, - ) -> Optional[tuple[str, str]]: + ) -> tuple[str, str] | None: """ Get the next event ID in the staging area for the given room. @@ -2077,7 +2076,7 @@ class EventFederationWorkerStore( def _get_next_staged_event_id_for_room_txn( txn: LoggingTransaction, - ) -> Optional[tuple[str, str]]: + ) -> tuple[str, str] | None: sql = """ SELECT origin, event_id FROM federation_inbound_events_staging @@ -2088,7 +2087,7 @@ class EventFederationWorkerStore( txn.execute(sql, (room_id,)) - return cast(Optional[tuple[str, str]], txn.fetchone()) + return cast(tuple[str, str] | None, txn.fetchone()) return await self.db_pool.runInteraction( "get_next_staged_event_id_for_room", _get_next_staged_event_id_for_room_txn @@ -2098,12 +2097,12 @@ class EventFederationWorkerStore( self, room_id: str, room_version: RoomVersion, - ) -> Optional[tuple[str, EventBase]]: + ) -> tuple[str, EventBase] | None: """Get the next event in the staging area for the given room.""" def _get_next_staged_event_for_room_txn( txn: LoggingTransaction, - ) -> Optional[tuple[str, str, str]]: + ) -> tuple[str, str, str] | None: sql = """ SELECT event_json, internal_metadata, origin FROM federation_inbound_events_staging @@ -2113,7 +2112,7 @@ class EventFederationWorkerStore( """ txn.execute(sql, (room_id,)) - return cast(Optional[tuple[str, str, str]], txn.fetchone()) + return cast(tuple[str, str, str] | None, txn.fetchone()) row = await self.db_pool.runInteraction( "get_next_staged_event_for_room", _get_next_staged_event_for_room_txn @@ -2258,7 +2257,7 @@ class EventFederationWorkerStore( "SELECT min(received_ts) FROM federation_inbound_events_staging" ) - (received_ts,) = cast(tuple[Optional[int]], txn.fetchone()) + (received_ts,) = cast(tuple[int | None], txn.fetchone()) # If there is nothing in the staging area default it to 0. age = 0 diff --git a/synapse/storage/databases/main/event_push_actions.py b/synapse/storage/databases/main/event_push_actions.py index d65ab82fff..2e99d7314e 100644 --- a/synapse/storage/databases/main/event_push_actions.py +++ b/synapse/storage/databases/main/event_push_actions.py @@ -85,8 +85,6 @@ from typing import ( TYPE_CHECKING, Collection, Mapping, - Optional, - Union, cast, ) @@ -115,11 +113,11 @@ if TYPE_CHECKING: logger = logging.getLogger(__name__) -DEFAULT_NOTIF_ACTION: list[Union[dict, str]] = [ +DEFAULT_NOTIF_ACTION: list[dict | str] = [ "notify", {"set_tweak": "highlight", "value": False}, ] -DEFAULT_HIGHLIGHT_ACTION: list[Union[dict, str]] = [ +DEFAULT_HIGHLIGHT_ACTION: list[dict | str] = [ "notify", {"set_tweak": "sound", "value": "default"}, {"set_tweak": "highlight"}, @@ -162,7 +160,7 @@ class HttpPushAction: event_id: str room_id: str stream_ordering: int - actions: list[Union[dict, str]] + actions: list[dict | str] @attr.s(slots=True, frozen=True, auto_attribs=True) @@ -172,7 +170,7 @@ class EmailPushAction(HttpPushAction): push notification. """ - received_ts: Optional[int] + received_ts: int | None @attr.s(slots=True, frozen=True, auto_attribs=True) @@ -221,9 +219,7 @@ class RoomNotifCounts: _EMPTY_ROOM_NOTIF_COUNTS = RoomNotifCounts(NotifCounts(), {}) -def _serialize_action( - actions: Collection[Union[Mapping, str]], is_highlight: bool -) -> str: +def _serialize_action(actions: Collection[Mapping | str], is_highlight: bool) -> str: """Custom serializer for actions. This allows us to "compress" common actions. We use the fact that most users have the same actions for notifs (and for @@ -241,7 +237,7 @@ def _serialize_action( return json_encoder.encode(actions) -def _deserialize_action(actions: str, is_highlight: bool) -> list[Union[dict, str]]: +def _deserialize_action(actions: str, is_highlight: bool) -> list[dict | str]: """Custom deserializer for actions. This allows us to "compress" common actions""" if actions: return db_to_json(actions) @@ -267,8 +263,8 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas self._started_ts = self.clock.time_msec() # These get correctly set by _find_stream_orderings_for_times_txn - self.stream_ordering_month_ago: Optional[int] = None - self.stream_ordering_day_ago: Optional[int] = None + self.stream_ordering_month_ago: int | None = None + self.stream_ordering_day_ago: int | None = None cur = db_conn.cursor(txn_name="_find_stream_orderings_for_times_txn") self._find_stream_orderings_for_times_txn(cur) @@ -773,8 +769,8 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas room_id: str, user_id: str, stream_ordering: int, - max_stream_ordering: Optional[int] = None, - thread_id: Optional[str] = None, + max_stream_ordering: int | None = None, + thread_id: str | None = None, ) -> list[tuple[int, int, str]]: """Returns the notify and unread counts from `event_push_actions` for the given user/room in the given range. @@ -1156,7 +1152,7 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas async def add_push_actions_to_staging( self, event_id: str, - user_id_actions: dict[str, Collection[Union[Mapping, str]]], + user_id_actions: dict[str, Collection[Mapping | str]], count_as_unread: bool, thread_id: str, ) -> None: @@ -1175,7 +1171,7 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas # This is a helper function for generating the necessary tuple that # can be used to insert into the `event_push_actions_staging` table. def _gen_entry( - user_id: str, actions: Collection[Union[Mapping, str]] + user_id: str, actions: Collection[Mapping | str] ) -> tuple[str, str, str, int, int, int, str, int]: is_highlight = 1 if _action_has_highlight(actions) else 0 notif = 1 if "notify" in actions else 0 @@ -1293,7 +1289,7 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas The stream ordering """ txn.execute("SELECT MAX(stream_ordering) FROM events") - max_stream_ordering = cast(tuple[Optional[int]], txn.fetchone())[0] + max_stream_ordering = cast(tuple[int | None], txn.fetchone())[0] if max_stream_ordering is None: return 0 @@ -1351,8 +1347,8 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas async def get_time_of_last_push_action_before( self, stream_ordering: int - ) -> Optional[int]: - def f(txn: LoggingTransaction) -> Optional[tuple[int]]: + ) -> int | None: + def f(txn: LoggingTransaction) -> tuple[int] | None: sql = """ SELECT e.received_ts FROM event_push_actions AS ep @@ -1362,7 +1358,7 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas LIMIT 1 """ txn.execute(sql, (stream_ordering,)) - return cast(Optional[tuple[int]], txn.fetchone()) + return cast(tuple[int] | None, txn.fetchone()) result = await self.db_pool.runInteraction( "get_time_of_last_push_action_before", f @@ -1454,7 +1450,7 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas limit, ), ) - rows = cast(list[tuple[int, str, str, Optional[str], int]], txn.fetchall()) + rows = cast(list[tuple[int, str, str, str | None, int]], txn.fetchall()) # For each new read receipt we delete push actions from before it and # recalculate the summary. @@ -1826,7 +1822,7 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas async def get_push_actions_for_user( self, user_id: str, - before: Optional[int] = None, + before: int | None = None, limit: int = 50, only_highlight: bool = False, ) -> list[UserPushAction]: @@ -1915,7 +1911,7 @@ class EventPushActionsStore(EventPushActionsWorkerStore): ) -def _action_has_highlight(actions: Collection[Union[Mapping, str]]) -> bool: +def _action_has_highlight(actions: Collection[Mapping | str]) -> bool: for action in actions: if not isinstance(action, dict): continue diff --git a/synapse/storage/databases/main/events.py b/synapse/storage/databases/main/events.py index da9ecfbdb9..59112e647c 100644 --- a/synapse/storage/databases/main/events.py +++ b/synapse/storage/databases/main/events.py @@ -29,7 +29,6 @@ from typing import ( Collection, Generator, Iterable, - Optional, Sequence, TypedDict, cast, @@ -141,10 +140,10 @@ class SlidingSyncStateInsertValues(TypedDict, total=False): `sliding_sync_membership_snapshots` database tables. """ - room_type: Optional[str] - is_encrypted: Optional[bool] - room_name: Optional[str] - tombstone_successor_room_id: Optional[str] + room_type: str | None + is_encrypted: bool | None + room_name: str | None + tombstone_successor_room_id: str | None class SlidingSyncMembershipSnapshotSharedInsertValues( @@ -155,7 +154,7 @@ class SlidingSyncMembershipSnapshotSharedInsertValues( multiple memberships """ - has_known_state: Optional[bool] + has_known_state: bool | None @attr.s(slots=True, auto_attribs=True) @@ -193,7 +192,7 @@ class SlidingSyncTableChanges: # foreground update for # `sliding_sync_joined_rooms`/`sliding_sync_membership_snapshots` (tracked by # https://github.com/element-hq/synapse/issues/17623) - joined_room_bump_stamp_to_fully_insert: Optional[int] + joined_room_bump_stamp_to_fully_insert: int | None # Values to upsert into `sliding_sync_joined_rooms` joined_room_updates: SlidingSyncStateInsertValues @@ -272,8 +271,8 @@ class PersistEventsStore: room_id: str, events_and_contexts: list[EventPersistencePair], *, - state_delta_for_room: Optional[DeltaState], - new_forward_extremities: Optional[set[str]], + state_delta_for_room: DeltaState | None, + new_forward_extremities: set[str] | None, new_event_links: dict[str, NewEventChainLinks], use_negative_stream_ordering: bool = False, inhibit_local_membership_updates: bool = False, @@ -717,7 +716,7 @@ class PersistEventsStore: # `_update_sliding_sync_tables_with_new_persisted_events_txn()`) # joined_room_updates: SlidingSyncStateInsertValues = {} - bump_stamp_to_fully_insert: Optional[int] = None + bump_stamp_to_fully_insert: int | None = None if not delta_state.no_longer_in_room: current_state_ids_map = {} @@ -1014,10 +1013,10 @@ class PersistEventsStore: room_id: str, events_and_contexts: list[EventPersistencePair], inhibit_local_membership_updates: bool, - state_delta_for_room: Optional[DeltaState], - new_forward_extremities: Optional[set[str]], + state_delta_for_room: DeltaState | None, + new_forward_extremities: set[str] | None, new_event_links: dict[str, NewEventChainLinks], - sliding_sync_table_changes: Optional[SlidingSyncTableChanges], + sliding_sync_table_changes: SlidingSyncTableChanges | None, ) -> None: """Insert some number of room events into the necessary database tables. @@ -1570,7 +1569,7 @@ class PersistEventsStore: # existing_chains: set[int] = set() - tree: list[tuple[str, Optional[str]]] = [] + tree: list[tuple[str, str | None]] = [] # We need to do this in a topologically sorted order as we want to # generate chain IDs/sequence numbers of an event's auth events before @@ -1622,7 +1621,7 @@ class PersistEventsStore: if not existing_chain_id: existing_chain_id = chain_map[auth_event_id] - new_chain_tuple: Optional[tuple[Any, int]] = None + new_chain_tuple: tuple[Any, int] | None = None if existing_chain_id: # We found a chain ID/sequence number candidate, check its # not already taken. @@ -2491,7 +2490,7 @@ class PersistEventsStore: room_id: The room ID events_and_contexts: events we are persisting """ - stream_ordering: Optional[int] = None + stream_ordering: int | None = None depth_update = 0 for event, context in events_and_contexts: # Don't update the stream ordering for backfilled events because diff --git a/synapse/storage/databases/main/events_bg_updates.py b/synapse/storage/databases/main/events_bg_updates.py index 637b9104c0..f8300e016b 100644 --- a/synapse/storage/databases/main/events_bg_updates.py +++ b/synapse/storage/databases/main/events_bg_updates.py @@ -20,7 +20,7 @@ # import logging -from typing import TYPE_CHECKING, Optional, cast +from typing import TYPE_CHECKING, cast import attr @@ -109,7 +109,7 @@ class _JoinedRoomStreamOrderingUpdate: # The most recent event stream_ordering for the room most_recent_event_stream_ordering: int # The most recent event `bump_stamp` for the room - most_recent_bump_stamp: Optional[int] + most_recent_bump_stamp: int | None class EventsBackgroundUpdatesStore(StreamWorkerStore, StateDeltasStore, SQLBaseStore): @@ -1038,7 +1038,7 @@ class EventsBackgroundUpdatesStore(StreamWorkerStore, StateDeltasStore, SQLBaseS last_room_id: str, last_depth: int, last_stream: int, - batch_size: Optional[int], + batch_size: int | None, single_room: bool, ) -> _CalculateChainCover: """Calculate the chain cover for `batch_size` events, ordered by @@ -1889,14 +1889,14 @@ class EventsBackgroundUpdatesStore(StreamWorkerStore, StateDeltasStore, SQLBaseS ) -> list[ tuple[ str, - Optional[str], - Optional[str], + str | None, + str | None, str, str, str, str, int, - Optional[str], + str | None, bool, ] ]: @@ -1982,14 +1982,14 @@ class EventsBackgroundUpdatesStore(StreamWorkerStore, StateDeltasStore, SQLBaseS list[ tuple[ str, - Optional[str], - Optional[str], + str | None, + str | None, str, str, str, str, int, - Optional[str], + str | None, bool, ] ], @@ -2023,7 +2023,7 @@ class EventsBackgroundUpdatesStore(StreamWorkerStore, StateDeltasStore, SQLBaseS def _find_previous_invite_or_knock_membership_txn( txn: LoggingTransaction, room_id: str, user_id: str, event_id: str - ) -> Optional[tuple[str, str]]: + ) -> tuple[str, str] | None: # Find the previous invite/knock event before the leave event # # Here are some notes on how we landed on this query: @@ -2598,7 +2598,7 @@ class EventsBackgroundUpdatesStore(StreamWorkerStore, StateDeltasStore, SQLBaseS # Find the next room ID to process, with a relevant room version. room_ids: list[str] = [] - max_room_id: Optional[str] = None + max_room_id: str | None = None for room_id, room_version_str in txn: max_room_id = room_id @@ -2704,7 +2704,7 @@ def _resolve_stale_data_in_sliding_sync_joined_rooms_table( # If we have nothing written to the `sliding_sync_joined_rooms` table, there is # nothing to clean up - row = cast(Optional[tuple[int]], txn.fetchone()) + row = cast(tuple[int] | None, txn.fetchone()) max_stream_ordering_sliding_sync_joined_rooms_table = None depends_on = None if row is not None: @@ -2830,7 +2830,7 @@ def _resolve_stale_data_in_sliding_sync_membership_snapshots_table( # If we have nothing written to the `sliding_sync_membership_snapshots` table, # there is nothing to clean up - row = cast(Optional[tuple[int]], txn.fetchone()) + row = cast(tuple[int] | None, txn.fetchone()) max_stream_ordering_sliding_sync_membership_snapshots_table = None if row is not None: (max_stream_ordering_sliding_sync_membership_snapshots_table,) = row diff --git a/synapse/storage/databases/main/events_forward_extremities.py b/synapse/storage/databases/main/events_forward_extremities.py index d43fb443fd..9908244dbf 100644 --- a/synapse/storage/databases/main/events_forward_extremities.py +++ b/synapse/storage/databases/main/events_forward_extremities.py @@ -20,7 +20,7 @@ # import logging -from typing import Optional, cast +from typing import cast from synapse.api.errors import SynapseError from synapse.storage.database import LoggingTransaction @@ -98,7 +98,7 @@ class EventForwardExtremitiesStore( async def get_forward_extremities_for_room( self, room_id: str - ) -> list[tuple[str, int, int, Optional[int]]]: + ) -> list[tuple[str, int, int, int | None]]: """ Get list of forward extremities for a room. @@ -108,7 +108,7 @@ class EventForwardExtremitiesStore( def get_forward_extremities_for_room_txn( txn: LoggingTransaction, - ) -> list[tuple[str, int, int, Optional[int]]]: + ) -> list[tuple[str, int, int, int | None]]: sql = """ SELECT event_id, state_group, depth, received_ts FROM event_forward_extremities @@ -118,7 +118,7 @@ class EventForwardExtremitiesStore( """ txn.execute(sql, (room_id,)) - return cast(list[tuple[str, int, int, Optional[int]]], txn.fetchall()) + return cast(list[tuple[str, int, int, int | None]], txn.fetchall()) return await self.db_pool.runInteraction( "get_forward_extremities_for_room", diff --git a/synapse/storage/databases/main/events_worker.py b/synapse/storage/databases/main/events_worker.py index 005f75a2d8..29bc1b982a 100644 --- a/synapse/storage/databases/main/events_worker.py +++ b/synapse/storage/databases/main/events_worker.py @@ -31,7 +31,6 @@ from typing import ( Literal, Mapping, MutableMapping, - Optional, cast, overload, ) @@ -146,7 +145,7 @@ class InvalidEventError(Exception): @attr.s(slots=True, auto_attribs=True) class EventCacheEntry: event: EventBase - redacted_event: Optional[EventBase] + redacted_event: EventBase | None @attr.s(slots=True, frozen=True, auto_attribs=True) @@ -184,9 +183,9 @@ class _EventRow: instance_name: str json: str internal_metadata: str - format_version: Optional[int] - room_version_id: Optional[str] - rejected_reason: Optional[str] + format_version: int | None + room_version_id: str | None + rejected_reason: str | None redactions: list[str] outlier: bool @@ -501,7 +500,7 @@ class EventsWorkerStore(SQLBaseStore): get_prev_content: bool = ..., allow_rejected: bool = ..., allow_none: Literal[False] = ..., - check_room_id: Optional[str] = ..., + check_room_id: str | None = ..., ) -> EventBase: ... @overload @@ -512,8 +511,8 @@ class EventsWorkerStore(SQLBaseStore): get_prev_content: bool = ..., allow_rejected: bool = ..., allow_none: Literal[True] = ..., - check_room_id: Optional[str] = ..., - ) -> Optional[EventBase]: ... + check_room_id: str | None = ..., + ) -> EventBase | None: ... @cancellable async def get_event( @@ -523,8 +522,8 @@ class EventsWorkerStore(SQLBaseStore): get_prev_content: bool = False, allow_rejected: bool = False, allow_none: bool = False, - check_room_id: Optional[str] = None, - ) -> Optional[EventBase]: + check_room_id: str | None = None, + ) -> EventBase | None: """Get an event from the database by event_id. Events for unknown room versions will also be filtered out. @@ -1090,7 +1089,7 @@ class EventsWorkerStore(SQLBaseStore): self, context: EventContext, state_keys_to_include: StateFilter, - membership_user_id: Optional[str] = None, + membership_user_id: str | None = None, ) -> list[JsonDict]: """ Retrieve the stripped state from a room, given an event context to retrieve state @@ -1403,7 +1402,7 @@ class EventsWorkerStore(SQLBaseStore): room_version_id = row.room_version_id - room_version: Optional[RoomVersion] + room_version: RoomVersion | None if not room_version_id: # this should only happen for out-of-band membership events which # arrived before https://github.com/matrix-org/synapse/issues/6983 @@ -1653,7 +1652,7 @@ class EventsWorkerStore(SQLBaseStore): original_ev: EventBase, redactions: Iterable[str], event_map: dict[str, EventBase], - ) -> Optional[EventBase]: + ) -> EventBase | None: """Given an event object and a list of possible redacting event ids, determine whether to honour any of those redactions and if so return a redacted event. @@ -2131,7 +2130,7 @@ class EventsWorkerStore(SQLBaseStore): async def get_senders_for_event_ids( self, event_ids: Collection[str] - ) -> dict[str, Optional[str]]: + ) -> dict[str, str | None]: """ Given a sequence of event IDs, return the sender associated with each. @@ -2147,7 +2146,7 @@ class EventsWorkerStore(SQLBaseStore): def _get_senders_for_event_ids( txn: LoggingTransaction, - ) -> dict[str, Optional[str]]: + ) -> dict[str, str | None]: rows = self.db_pool.simple_select_many_txn( txn=txn, table="events", @@ -2178,7 +2177,7 @@ class EventsWorkerStore(SQLBaseStore): return int(res[0]), int(res[1]) - async def get_next_event_to_expire(self) -> Optional[tuple[str, int]]: + async def get_next_event_to_expire(self) -> tuple[str, int] | None: """Retrieve the entry with the lowest expiry timestamp in the event_expiry table, or None if there's no more event to expire. @@ -2190,7 +2189,7 @@ class EventsWorkerStore(SQLBaseStore): def get_next_event_to_expire_txn( txn: LoggingTransaction, - ) -> Optional[tuple[str, int]]: + ) -> tuple[str, int] | None: txn.execute( """ SELECT event_id, expiry_ts FROM event_expiry @@ -2198,7 +2197,7 @@ class EventsWorkerStore(SQLBaseStore): """ ) - return cast(Optional[tuple[str, int]], txn.fetchone()) + return cast(tuple[str, int] | None, txn.fetchone()) return await self.db_pool.runInteraction( desc="get_next_event_to_expire", func=get_next_event_to_expire_txn @@ -2206,7 +2205,7 @@ class EventsWorkerStore(SQLBaseStore): async def get_event_id_from_transaction_id_and_device_id( self, room_id: str, user_id: str, device_id: str, txn_id: str - ) -> Optional[str]: + ) -> str | None: """Look up if we have already persisted an event for the transaction ID, returning the event ID if so. """ @@ -2427,7 +2426,7 @@ class EventsWorkerStore(SQLBaseStore): async def get_event_id_for_timestamp( self, room_id: str, timestamp: int, direction: Direction - ) -> Optional[str]: + ) -> str | None: """Find the closest event to the given timestamp in the given direction. Args: @@ -2481,7 +2480,7 @@ class EventsWorkerStore(SQLBaseStore): LIMIT 1; """ - def get_event_id_for_timestamp_txn(txn: LoggingTransaction) -> Optional[str]: + def get_event_id_for_timestamp_txn(txn: LoggingTransaction) -> str | None: txn.execute( sql_template, (room_id, timestamp), @@ -2591,7 +2590,7 @@ class EventsWorkerStore(SQLBaseStore): self, txn: LoggingTransaction, event_id: str, - rejection_reason: Optional[str], + rejection_reason: str | None, ) -> None: """Mark an event that was previously accepted as rejected, or vice versa @@ -2640,8 +2639,8 @@ class EventsWorkerStore(SQLBaseStore): self.invalidate_get_event_cache_after_txn(txn, event_id) async def get_events_sent_by_user_in_room( - self, user_id: str, room_id: str, limit: int, filter: Optional[list[str]] = None - ) -> Optional[list[str]]: + self, user_id: str, room_id: str, limit: int, filter: list[str] | None = None + ) -> list[str] | None: """ Get a list of event ids of events sent by the user in the specified room @@ -2656,10 +2655,10 @@ class EventsWorkerStore(SQLBaseStore): txn: LoggingTransaction, user_id: str, room_id: str, - filter: Optional[list[str]], + filter: list[str] | None, batch_size: int, offset: int, - ) -> tuple[Optional[list[str]], int]: + ) -> tuple[list[str] | None, int]: if filter: base_clause, args = make_in_list_sql_clause( txn.database_engine, "type", filter @@ -2767,7 +2766,7 @@ class EventsWorkerStore(SQLBaseStore): @cached(tree=True) async def get_metadata_for_event( self, room_id: str, event_id: str - ) -> Optional[EventMetadata]: + ) -> EventMetadata | None: row = await self.db_pool.simple_select_one( table="events", keyvalues={"room_id": room_id, "event_id": event_id}, diff --git a/synapse/storage/databases/main/filtering.py b/synapse/storage/databases/main/filtering.py index 4b3bc69d20..2019ad9904 100644 --- a/synapse/storage/databases/main/filtering.py +++ b/synapse/storage/databases/main/filtering.py @@ -20,7 +20,7 @@ # # -from typing import TYPE_CHECKING, Optional, Union, cast +from typing import TYPE_CHECKING, cast from canonicaljson import encode_canonical_json @@ -72,7 +72,7 @@ class FilteringWorkerStore(SQLBaseStore): lower_bound_id = progress.get("lower_bound_id", "") - def _get_last_id(txn: LoggingTransaction) -> Optional[str]: + def _get_last_id(txn: LoggingTransaction) -> str | None: sql = """ SELECT user_id FROM user_filters WHERE user_id > ? @@ -151,7 +151,7 @@ class FilteringWorkerStore(SQLBaseStore): @cached(num_args=2) async def get_user_filter( - self, user_id: UserID, filter_id: Union[int, str] + self, user_id: UserID, filter_id: int | str ) -> JsonMapping: # filter_id is BIGINT UNSIGNED, so if it isn't a number, fail # with a coherent error message rather than 500 M_UNKNOWN. @@ -187,7 +187,7 @@ class FilteringWorkerStore(SQLBaseStore): sql = "SELECT MAX(filter_id) FROM user_filters WHERE full_user_id = ?" txn.execute(sql, (user_id.to_string(),)) - max_id = cast(tuple[Optional[int]], txn.fetchone())[0] + max_id = cast(tuple[int | None], txn.fetchone())[0] if max_id is None: filter_id = 0 else: diff --git a/synapse/storage/databases/main/keys.py b/synapse/storage/databases/main/keys.py index 9833565095..f81257b5a1 100644 --- a/synapse/storage/databases/main/keys.py +++ b/synapse/storage/databases/main/keys.py @@ -22,7 +22,7 @@ import itertools import json import logging -from typing import Iterable, Mapping, Optional, Union, cast +from typing import Iterable, Mapping, cast from canonicaljson import encode_canonical_json from signedjson.key import decode_verify_key_bytes @@ -201,7 +201,7 @@ class KeyStore(CacheInvalidationWorkerStore): self, server_name: str, key_id: str, - ) -> Optional[FetchKeyResultForRemote]: + ) -> FetchKeyResultForRemote | None: raise NotImplementedError() @cachedList( @@ -209,13 +209,13 @@ class KeyStore(CacheInvalidationWorkerStore): ) async def get_server_keys_json_for_remote( self, server_name: str, key_ids: Iterable[str] - ) -> Mapping[str, Optional[FetchKeyResultForRemote]]: + ) -> Mapping[str, FetchKeyResultForRemote | None]: """Fetch the cached keys for the given server/key IDs. If we have multiple entries for a given key ID, returns the most recent. """ rows = cast( - list[tuple[str, str, int, int, Union[bytes, memoryview]]], + list[tuple[str, str, int, int, bytes | memoryview]], await self.db_pool.simple_select_many_batch( table="server_keys_json", column="key_id", @@ -258,7 +258,7 @@ class KeyStore(CacheInvalidationWorkerStore): If we have multiple entries for a given key ID, returns the most recent. """ rows = cast( - list[tuple[str, str, int, int, Union[bytes, memoryview]]], + list[tuple[str, str, int, int, bytes | memoryview]], await self.db_pool.simple_select_list( table="server_keys_json", keyvalues={"server_name": server_name}, diff --git a/synapse/storage/databases/main/lock.py b/synapse/storage/databases/main/lock.py index 9dd2cae344..51f04acbcb 100644 --- a/synapse/storage/databases/main/lock.py +++ b/synapse/storage/databases/main/lock.py @@ -290,7 +290,7 @@ class LockStore(SQLBaseStore): self, lock_names: Collection[tuple[str, str]], write: bool, - ) -> Optional[AsyncExitStack]: + ) -> AsyncExitStack | None: """Try to acquire multiple locks for the given names/keys. Will return an async context manager if the locks are successfully acquired, which *must* be used (otherwise the lock will leak). @@ -402,7 +402,7 @@ class Lock: # We might be called from a non-main thread, so we defer setting up the # looping call. - self._looping_call: Optional[LoopingCall] = None + self._looping_call: LoopingCall | None = None reactor.callFromThread(self._setup_looping_call) self._dropped = False @@ -497,9 +497,9 @@ class Lock: async def __aexit__( self, - _exctype: Optional[type[BaseException]], - _excinst: Optional[BaseException], - _exctb: Optional[TracebackType], + _exctype: type[BaseException] | None, + _excinst: BaseException | None, + _exctb: TracebackType | None, ) -> bool: await self.release() diff --git a/synapse/storage/databases/main/media_repository.py b/synapse/storage/databases/main/media_repository.py index b9f882662e..50664d63e5 100644 --- a/synapse/storage/databases/main/media_repository.py +++ b/synapse/storage/databases/main/media_repository.py @@ -25,8 +25,6 @@ from typing import ( TYPE_CHECKING, Collection, Iterable, - Optional, - Union, cast, ) @@ -57,16 +55,16 @@ logger = logging.getLogger(__name__) class LocalMedia: media_id: str media_type: str - media_length: Optional[int] + media_length: int | None upload_name: str created_ts: int - url_cache: Optional[str] + url_cache: str | None last_access_ts: int - quarantined_by: Optional[str] + quarantined_by: str | None safe_from_quarantine: bool - user_id: Optional[str] - authenticated: Optional[bool] - sha256: Optional[str] + user_id: str | None + authenticated: bool | None + sha256: str | None @attr.s(slots=True, frozen=True, auto_attribs=True) @@ -75,20 +73,20 @@ class RemoteMedia: media_id: str media_type: str media_length: int - upload_name: Optional[str] + upload_name: str | None filesystem_id: str created_ts: int last_access_ts: int - quarantined_by: Optional[str] - authenticated: Optional[bool] - sha256: Optional[str] + quarantined_by: str | None + authenticated: bool | None + sha256: str | None @attr.s(slots=True, frozen=True, auto_attribs=True) class UrlCache: response_code: int expires_ts: int - og: Union[str, bytes] + og: str | bytes class MediaSortOrder(Enum): @@ -183,7 +181,7 @@ class MediaRepositoryBackgroundUpdateStore(SQLBaseStore): ) if hs.config.media.can_load_media_repo: - self.unused_expiration_time: Optional[int] = ( + self.unused_expiration_time: int | None = ( hs.config.media.unused_expiration_time ) else: @@ -224,7 +222,7 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore): super().__init__(database, db_conn, hs) self.server_name: str = hs.hostname - async def get_local_media(self, media_id: str) -> Optional[LocalMedia]: + async def get_local_media(self, media_id: str) -> LocalMedia | None: """Get the metadata for a local piece of media Returns: @@ -299,7 +297,7 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore): else: order = "ASC" - args: list[Union[str, int]] = [user_id] + args: list[str | int] = [user_id] sql = """ SELECT COUNT(*) as total_media FROM local_media_repository @@ -472,12 +470,12 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore): media_id: str, media_type: str, time_now_ms: int, - upload_name: Optional[str], + upload_name: str | None, media_length: int, user_id: UserID, - url_cache: Optional[str] = None, - sha256: Optional[str] = None, - quarantined_by: Optional[str] = None, + url_cache: str | None = None, + sha256: str | None = None, + quarantined_by: str | None = None, ) -> None: if self.hs.config.media.enable_authenticated_media: authenticated = True @@ -505,12 +503,12 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore): self, media_id: str, media_type: str, - upload_name: Optional[str], + upload_name: str | None, media_length: int, user_id: UserID, sha256: str, - url_cache: Optional[str] = None, - quarantined_by: Optional[str] = None, + url_cache: str | None = None, + quarantined_by: str | None = None, ) -> None: updatevalues = { "media_type": media_type, @@ -575,13 +573,13 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore): "get_pending_media", get_pending_media_txn ) - async def get_url_cache(self, url: str, ts: int) -> Optional[UrlCache]: + async def get_url_cache(self, url: str, ts: int) -> UrlCache | None: """Get the media_id and ts for a cached URL as of the given timestamp Returns: None if the URL isn't cached. """ - def get_url_cache_txn(txn: LoggingTransaction) -> Optional[UrlCache]: + def get_url_cache_txn(txn: LoggingTransaction) -> UrlCache | None: # get the most recently cached result (relative to the given ts) sql = """ SELECT response_code, expires_ts, og @@ -615,7 +613,7 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore): self, url: str, response_code: int, - etag: Optional[str], + etag: str | None, expires_ts: int, og: str, media_id: str, @@ -683,7 +681,7 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore): async def get_cached_remote_media( self, origin: str, media_id: str - ) -> Optional[RemoteMedia]: + ) -> RemoteMedia | None: row = await self.db_pool.simple_select_one( "remote_media_cache", {"media_origin": origin, "media_id": media_id}, @@ -724,9 +722,9 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore): media_type: str, media_length: int, time_now_ms: int, - upload_name: Optional[str], + upload_name: str | None, filesystem_id: str, - sha256: Optional[str], + sha256: str | None, ) -> None: if self.hs.config.media.enable_authenticated_media: authenticated = True @@ -822,7 +820,7 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore): t_width: int, t_height: int, t_type: str, - ) -> Optional[ThumbnailInfo]: + ) -> ThumbnailInfo | None: """Fetch the thumbnail info of given width, height and type.""" row = await self.db_pool.simple_select_one( diff --git a/synapse/storage/databases/main/monthly_active_users.py b/synapse/storage/databases/main/monthly_active_users.py index bf8e540ffb..b51edd5d0c 100644 --- a/synapse/storage/databases/main/monthly_active_users.py +++ b/synapse/storage/databases/main/monthly_active_users.py @@ -18,7 +18,7 @@ # # import logging -from typing import TYPE_CHECKING, Mapping, Optional, cast +from typing import TYPE_CHECKING, Mapping, cast from synapse.metrics.background_process_metrics import wrap_as_background_process from synapse.storage.database import ( @@ -129,7 +129,7 @@ class MonthlyActiveUsersWorkerStore(RegistrationWorkerStore): ) async def get_monthly_active_users_by_service( - self, start_timestamp: Optional[int] = None, end_timestamp: Optional[int] = None + self, start_timestamp: int | None = None, end_timestamp: int | None = None ) -> list[tuple[str, str]]: """Generates list of monthly active users and their services. Please see "get_monthly_active_count_by_service" docstring for more details @@ -194,7 +194,7 @@ class MonthlyActiveUsersWorkerStore(RegistrationWorkerStore): return users @cached(num_args=1) - async def user_last_seen_monthly_active(self, user_id: str) -> Optional[int]: + async def user_last_seen_monthly_active(self, user_id: str) -> int | None: """ Checks if a given user is part of the monthly active user group diff --git a/synapse/storage/databases/main/openid.py b/synapse/storage/databases/main/openid.py index 0db7f73730..15c47a2562 100644 --- a/synapse/storage/databases/main/openid.py +++ b/synapse/storage/databases/main/openid.py @@ -19,7 +19,6 @@ # # -from typing import Optional from synapse.storage._base import SQLBaseStore from synapse.storage.database import LoggingTransaction @@ -41,8 +40,8 @@ class OpenIdStore(SQLBaseStore): async def get_user_id_for_open_id_token( self, token: str, ts_now_ms: int - ) -> Optional[str]: - def get_user_id_for_token_txn(txn: LoggingTransaction) -> Optional[str]: + ) -> str | None: + def get_user_id_for_token_txn(txn: LoggingTransaction) -> str | None: sql = ( "SELECT user_id FROM open_id_tokens" " WHERE token = ? AND ? <= ts_valid_until_ms" diff --git a/synapse/storage/databases/main/presence.py b/synapse/storage/databases/main/presence.py index fec94f4e5a..75ca9e40d7 100644 --- a/synapse/storage/databases/main/presence.py +++ b/synapse/storage/databases/main/presence.py @@ -23,8 +23,6 @@ from typing import ( Any, Iterable, Mapping, - Optional, - Union, cast, ) @@ -260,7 +258,7 @@ class PresenceStore(PresenceBackgroundUpdateStore, CacheInvalidationWorkerStore) # TODO All these columns are nullable, but we don't expect that: # https://github.com/matrix-org/synapse/issues/16467 rows = cast( - list[tuple[str, str, int, int, int, Optional[str], Union[int, bool]]], + list[tuple[str, str, int, int, int, str | None, int | bool]], await self.db_pool.simple_select_many_batch( table="presence_stream", column="user_id", @@ -317,7 +315,7 @@ class PresenceStore(PresenceBackgroundUpdateStore, CacheInvalidationWorkerStore) @cached() async def _get_full_presence_stream_token_for_user( self, user_id: str - ) -> Optional[int]: + ) -> int | None: """Get the presence token corresponding to the last full presence update for this user. @@ -399,7 +397,7 @@ class PresenceStore(PresenceBackgroundUpdateStore, CacheInvalidationWorkerStore) # TODO All these columns are nullable, but we don't expect that: # https://github.com/matrix-org/synapse/issues/16467 rows = cast( - list[tuple[str, str, int, int, int, Optional[str], Union[int, bool]]], + list[tuple[str, str, int, int, int, str | None, int | bool]], await self.db_pool.runInteraction( "get_presence_for_all_users", self.db_pool.simple_select_list_paginate_txn, diff --git a/synapse/storage/databases/main/profile.py b/synapse/storage/databases/main/profile.py index 71f01a597b..11ad516eb3 100644 --- a/synapse/storage/databases/main/profile.py +++ b/synapse/storage/databases/main/profile.py @@ -19,7 +19,7 @@ # # import json -from typing import TYPE_CHECKING, Optional, cast +from typing import TYPE_CHECKING, cast from canonicaljson import encode_canonical_json @@ -75,7 +75,7 @@ class ProfileWorkerStore(SQLBaseStore): lower_bound_id = progress.get("lower_bound_id", "") - def _get_last_id(txn: LoggingTransaction) -> Optional[str]: + def _get_last_id(txn: LoggingTransaction) -> str | None: sql = """ SELECT user_id FROM profiles WHERE user_id > ? @@ -176,7 +176,7 @@ class ProfileWorkerStore(SQLBaseStore): return ProfileInfo(avatar_url=profile[1], display_name=profile[0]) - async def get_profile_displayname(self, user_id: UserID) -> Optional[str]: + async def get_profile_displayname(self, user_id: UserID) -> str | None: """ Fetch the display name of a user. @@ -193,7 +193,7 @@ class ProfileWorkerStore(SQLBaseStore): desc="get_profile_displayname", ) - async def get_profile_avatar_url(self, user_id: UserID) -> Optional[str]: + async def get_profile_avatar_url(self, user_id: UserID) -> str | None: """ Fetch the avatar URL of a user. @@ -257,9 +257,7 @@ class ProfileWorkerStore(SQLBaseStore): ) # If value_type is None, then the value did not exist. - value_type, value = cast( - tuple[Optional[str], JsonValue], txn.fetchone() - ) + value_type, value = cast(tuple[str | None, JsonValue], txn.fetchone()) if not value_type: raise StoreError(404, "No row found") # If value_type is object or array, then need to deserialize the JSON. @@ -346,7 +344,7 @@ class ProfileWorkerStore(SQLBaseStore): # possible due to the grammar. (f'$."{new_field_name}"', user_id.localpart), ) - row = cast(tuple[Optional[int], Optional[int], Optional[int]], txn.fetchone()) + row = cast(tuple[int | None, int | None, int | None], txn.fetchone()) # The values return null if the column is null. total_bytes = ( @@ -373,7 +371,7 @@ class ProfileWorkerStore(SQLBaseStore): raise StoreError(400, "Profile too large", Codes.PROFILE_TOO_LARGE) async def set_profile_displayname( - self, user_id: UserID, new_displayname: Optional[str] + self, user_id: UserID, new_displayname: str | None ) -> None: """ Set the display name of a user. @@ -406,7 +404,7 @@ class ProfileWorkerStore(SQLBaseStore): ) async def set_profile_avatar_url( - self, user_id: UserID, new_avatar_url: Optional[str] + self, user_id: UserID, new_avatar_url: str | None ) -> None: """ Set the avatar of a user. diff --git a/synapse/storage/databases/main/push_rule.py b/synapse/storage/databases/main/push_rule.py index ecab19eb2e..d361166cec 100644 --- a/synapse/storage/databases/main/push_rule.py +++ b/synapse/storage/databases/main/push_rule.py @@ -25,9 +25,7 @@ from typing import ( Collection, Iterable, Mapping, - Optional, Sequence, - Union, cast, ) @@ -231,7 +229,7 @@ class PushRulesWorkerStore( async def get_push_rules_enabled_for_user(self, user_id: str) -> dict[str, bool]: results = cast( - list[tuple[str, Optional[Union[int, bool]]]], + list[tuple[str, int | bool | None]], await self.db_pool.simple_select_list( table="push_rules_enable", keyvalues={"user_name": user_id}, @@ -327,7 +325,7 @@ class PushRulesWorkerStore( results: dict[str, dict[str, bool]] = {user_id: {} for user_id in user_ids} rows = cast( - list[tuple[str, str, Optional[int]]], + list[tuple[str, str, int | None]], await self.db_pool.simple_select_many_batch( table="push_rules_enable", column="user_name", @@ -402,9 +400,9 @@ class PushRulesWorkerStore( rule_id: str, priority_class: int, conditions: Sequence[Mapping[str, str]], - actions: Sequence[Union[Mapping[str, Any], str]], - before: Optional[str] = None, - after: Optional[str] = None, + actions: Sequence[Mapping[str, Any] | str], + before: str | None = None, + after: str | None = None, ) -> None: if not self._is_push_writer: raise Exception("Not a push writer") @@ -791,7 +789,7 @@ class PushRulesWorkerStore( self, user_id: str, rule_id: str, - actions: list[Union[dict, str]], + actions: list[dict | str], is_default_rule: bool, ) -> None: """ @@ -882,7 +880,7 @@ class PushRulesWorkerStore( user_id: str, rule_id: str, op: str, - data: Optional[JsonDict] = None, + data: JsonDict | None = None, ) -> None: if not self._is_push_writer: raise Exception("Not a push writer") diff --git a/synapse/storage/databases/main/pusher.py b/synapse/storage/databases/main/pusher.py index c8f049536a..e7ab7f64f9 100644 --- a/synapse/storage/databases/main/pusher.py +++ b/synapse/storage/databases/main/pusher.py @@ -25,7 +25,6 @@ from typing import ( Any, Iterable, Iterator, - Optional, cast, ) @@ -51,7 +50,7 @@ logger = logging.getLogger(__name__) PusherRow = tuple[ int, # id str, # user_name - Optional[int], # access_token + int | None, # access_token str, # profile_tag str, # kind str, # app_id @@ -365,7 +364,7 @@ class PusherWorkerStore(SQLBaseStore): return bool(updated) async def update_pusher_failing_since( - self, app_id: str, pushkey: str, user_id: str, failing_since: Optional[int] + self, app_id: str, pushkey: str, user_id: str, failing_since: int | None ) -> None: await self.db_pool.simple_update( table="pushers", @@ -378,7 +377,7 @@ class PusherWorkerStore(SQLBaseStore): self, pusher_id: int ) -> dict[str, ThrottleParams]: res = cast( - list[tuple[str, Optional[int], Optional[int]]], + list[tuple[str, int | None, int | None]], await self.db_pool.simple_select_list( "pusher_throttle", {"pusher": pusher_id}, @@ -607,7 +606,7 @@ class PusherBackgroundUpdatesStore(SQLBaseStore): (last_pusher_id, batch_size), ) - rows = cast(list[tuple[int, Optional[str], Optional[str]]], txn.fetchall()) + rows = cast(list[tuple[int, str | None, str | None]], txn.fetchall()) if len(rows) == 0: return 0 @@ -666,13 +665,13 @@ class PusherStore(PusherWorkerStore, PusherBackgroundUpdatesStore): device_display_name: str, pushkey: str, pushkey_ts: int, - lang: Optional[str], - data: Optional[JsonDict], + lang: str | None, + data: JsonDict | None, last_stream_ordering: int, profile_tag: str = "", enabled: bool = True, - device_id: Optional[str] = None, - access_token_id: Optional[int] = None, + device_id: str | None = None, + access_token_id: int | None = None, ) -> None: async with self._pushers_id_gen.get_next() as stream_id: await self.db_pool.simple_upsert( diff --git a/synapse/storage/databases/main/receipts.py b/synapse/storage/databases/main/receipts.py index 63d4e1f68c..ba5e07a051 100644 --- a/synapse/storage/databases/main/receipts.py +++ b/synapse/storage/databases/main/receipts.py @@ -26,7 +26,6 @@ from typing import ( Collection, Iterable, Mapping, - Optional, Sequence, cast, ) @@ -67,7 +66,7 @@ class ReceiptInRoom: receipt_type: str user_id: str event_id: str - thread_id: Optional[str] + thread_id: str | None data: JsonMapping @staticmethod @@ -176,7 +175,7 @@ class ReceiptsWorkerStore(SQLBaseStore): user_id: str, room_id: str, receipt_types: Collection[str], - ) -> Optional[tuple[str, int]]: + ) -> tuple[str, int] | None: """ Fetch the event ID and stream_ordering for the latest unthreaded receipt in a room with one of the given receipt types. @@ -208,7 +207,7 @@ class ReceiptsWorkerStore(SQLBaseStore): args.extend((user_id, room_id)) txn.execute(sql, args) - return cast(Optional[tuple[str, int]], txn.fetchone()) + return cast(tuple[str, int] | None, txn.fetchone()) async def get_receipts_for_user( self, user_id: str, receipt_types: Iterable[str] @@ -311,7 +310,7 @@ class ReceiptsWorkerStore(SQLBaseStore): self, room_ids: Iterable[str], to_key: MultiWriterStreamToken, - from_key: Optional[MultiWriterStreamToken] = None, + from_key: MultiWriterStreamToken | None = None, ) -> list[JsonMapping]: """Get receipts for multiple rooms for sending to clients. @@ -343,7 +342,7 @@ class ReceiptsWorkerStore(SQLBaseStore): self, room_id: str, to_key: MultiWriterStreamToken, - from_key: Optional[MultiWriterStreamToken] = None, + from_key: MultiWriterStreamToken | None = None, ) -> Sequence[JsonMapping]: """Get receipts for a single room for sending to clients. @@ -371,7 +370,7 @@ class ReceiptsWorkerStore(SQLBaseStore): self, room_id: str, to_key: MultiWriterStreamToken, - from_key: Optional[MultiWriterStreamToken] = None, + from_key: MultiWriterStreamToken | None = None, ) -> Sequence[JsonMapping]: """See get_linearized_receipts_for_room""" @@ -425,7 +424,7 @@ class ReceiptsWorkerStore(SQLBaseStore): self, room_ids: Collection[str], to_key: MultiWriterStreamToken, - from_key: Optional[MultiWriterStreamToken] = None, + from_key: MultiWriterStreamToken | None = None, ) -> Mapping[str, Sequence[JsonMapping]]: if not room_ids: return {} @@ -528,7 +527,7 @@ class ReceiptsWorkerStore(SQLBaseStore): def get_linearized_receipts_for_events_txn( txn: LoggingTransaction, room_id_event_id_tuples: Collection[tuple[str, str]], - ) -> list[tuple[str, str, str, str, Optional[str], str]]: + ) -> list[tuple[str, str, str, str, str | None, str]]: clause, args = make_tuple_in_list_sql_clause( self.database_engine, ("room_id", "event_id"), room_id_event_id_tuples ) @@ -578,7 +577,7 @@ class ReceiptsWorkerStore(SQLBaseStore): async def get_linearized_receipts_for_all_rooms( self, to_key: MultiWriterStreamToken, - from_key: Optional[MultiWriterStreamToken] = None, + from_key: MultiWriterStreamToken | None = None, ) -> Mapping[str, JsonMapping]: """Get receipts for all rooms between two stream_ids, up to a limit of the latest 100 read receipts. @@ -655,7 +654,7 @@ class ReceiptsWorkerStore(SQLBaseStore): def get_linearized_receipts_for_user_in_rooms_txn( txn: LoggingTransaction, batch_room_ids: StrCollection, - ) -> list[tuple[str, str, str, str, Optional[str], str]]: + ) -> list[tuple[str, str, str, str, str | None, str]]: clause, args = make_in_list_sql_clause( self.database_engine, "room_id", batch_room_ids ) @@ -780,7 +779,7 @@ class ReceiptsWorkerStore(SQLBaseStore): async def get_all_updated_receipts( self, instance_name: str, last_id: int, current_id: int, limit: int ) -> tuple[ - list[tuple[int, tuple[str, str, str, str, Optional[str], JsonDict]]], int, bool + list[tuple[int, tuple[str, str, str, str, str | None, JsonDict]]], int, bool ]: """Get updates for receipts replication stream. @@ -809,7 +808,7 @@ class ReceiptsWorkerStore(SQLBaseStore): def get_all_updated_receipts_txn( txn: LoggingTransaction, ) -> tuple[ - list[tuple[int, tuple[str, str, str, str, Optional[str], JsonDict]]], + list[tuple[int, tuple[str, str, str, str, str | None, JsonDict]]], int, bool, ]: @@ -824,7 +823,7 @@ class ReceiptsWorkerStore(SQLBaseStore): txn.execute(sql, (last_id, current_id, instance_name, limit)) updates = cast( - list[tuple[int, tuple[str, str, str, str, Optional[str], JsonDict]]], + list[tuple[int, tuple[str, str, str, str, str | None, JsonDict]]], [(r[0], r[1:6] + (db_to_json(r[6]),)) for r in txn], ) @@ -884,10 +883,10 @@ class ReceiptsWorkerStore(SQLBaseStore): receipt_type: str, user_id: str, event_id: str, - thread_id: Optional[str], + thread_id: str | None, data: JsonDict, stream_id: int, - ) -> Optional[int]: + ) -> int | None: """Inserts a receipt into the database if it's newer than the current one. Returns: @@ -1023,9 +1022,9 @@ class ReceiptsWorkerStore(SQLBaseStore): receipt_type: str, user_id: str, event_ids: list[str], - thread_id: Optional[str], + thread_id: str | None, data: dict, - ) -> Optional[PersistedPosition]: + ) -> PersistedPosition | None: """Insert a receipt, either from local client or remote server. Automatically does conversion between linearized and graph @@ -1095,7 +1094,7 @@ class ReceiptsWorkerStore(SQLBaseStore): receipt_type: str, user_id: str, event_ids: list[str], - thread_id: Optional[str], + thread_id: str | None, data: JsonDict, ) -> None: assert self._can_write_to_receipts diff --git a/synapse/storage/databases/main/registration.py b/synapse/storage/databases/main/registration.py index bad2d0b63a..545b0f11c4 100644 --- a/synapse/storage/databases/main/registration.py +++ b/synapse/storage/databases/main/registration.py @@ -22,7 +22,7 @@ import logging import random import re -from typing import TYPE_CHECKING, Any, Optional, Union, cast +from typing import TYPE_CHECKING, Any, cast import attr @@ -94,8 +94,8 @@ class TokenLookupResult: token_id: int is_guest: bool = False shadow_banned: bool = False - device_id: Optional[str] = None - valid_until_ms: Optional[int] = None + device_id: str | None = None + valid_until_ms: int | None = None token_owner: str = attr.ib() token_used: bool = False @@ -118,7 +118,7 @@ class RefreshTokenLookupResult: token_id: int """The ID of this refresh token.""" - next_token_id: Optional[int] + next_token_id: int | None """The ID of the refresh token which replaced this one.""" has_next_refresh_token_been_refreshed: bool @@ -127,11 +127,11 @@ class RefreshTokenLookupResult: has_next_access_token_been_used: bool """True if the next access token was already used at least once.""" - expiry_ts: Optional[int] + expiry_ts: int | None """The time at which the refresh token expires and can not be used. If None, the refresh token doesn't expire.""" - ultimate_session_expiry_ts: Optional[int] + ultimate_session_expiry_ts: int | None """The time at which the session comes to an end and can no longer be refreshed. If None, the session can be refreshed indefinitely.""" @@ -144,10 +144,10 @@ class LoginTokenLookupResult: user_id: str """The user this token belongs to.""" - auth_provider_id: Optional[str] + auth_provider_id: str | None """The SSO Identity Provider that the user authenticated with, to get this token.""" - auth_provider_session_id: Optional[str] + auth_provider_session_id: str | None """The session ID advertised by the SSO Identity Provider.""" @@ -171,7 +171,7 @@ class ThreepidValidationSession: """ID of the validation session""" last_send_attempt: int """a number serving to dedupe send attempts for this session""" - validated_at: Optional[int] + validated_at: int | None """timestamp of when this session was validated if so""" @@ -233,13 +233,13 @@ class RegistrationWorkerStore(StatsStore, CacheInvalidationWorkerStore): async def register_user( self, user_id: str, - password_hash: Optional[str] = None, + password_hash: str | None = None, was_guest: bool = False, make_guest: bool = False, - appservice_id: Optional[str] = None, - create_profile_with_displayname: Optional[str] = None, + appservice_id: str | None = None, + create_profile_with_displayname: str | None = None, admin: bool = False, - user_type: Optional[str] = None, + user_type: str | None = None, shadow_banned: bool = False, approved: bool = False, ) -> None: @@ -286,13 +286,13 @@ class RegistrationWorkerStore(StatsStore, CacheInvalidationWorkerStore): self, txn: LoggingTransaction, user_id: str, - password_hash: Optional[str], + password_hash: str | None, was_guest: bool, make_guest: bool, - appservice_id: Optional[str], - create_profile_with_displayname: Optional[str], + appservice_id: str | None, + create_profile_with_displayname: str | None, admin: bool, - user_type: Optional[str], + user_type: str | None, shadow_banned: bool, approved: bool, ) -> None: @@ -379,10 +379,10 @@ class RegistrationWorkerStore(StatsStore, CacheInvalidationWorkerStore): self._invalidate_cache_and_stream(txn, self.get_user_by_id, (user_id,)) @cached() - async def get_user_by_id(self, user_id: str) -> Optional[UserInfo]: + async def get_user_by_id(self, user_id: str) -> UserInfo | None: """Returns info about the user account, if it exists.""" - def get_user_by_id_txn(txn: LoggingTransaction) -> Optional[UserInfo]: + def get_user_by_id_txn(txn: LoggingTransaction) -> UserInfo | None: # We could technically use simple_select_one here, but it would not perform # the COALESCEs (unless hacked into the column names), which could yield # confusing results. @@ -466,7 +466,7 @@ class RegistrationWorkerStore(StatsStore, CacheInvalidationWorkerStore): return is_trial @cached() - async def get_user_by_access_token(self, token: str) -> Optional[TokenLookupResult]: + async def get_user_by_access_token(self, token: str) -> TokenLookupResult | None: """Get a user from the given access token. Args: @@ -479,7 +479,7 @@ class RegistrationWorkerStore(StatsStore, CacheInvalidationWorkerStore): ) @cached() - async def get_expiration_ts_for_user(self, user_id: str) -> Optional[int]: + async def get_expiration_ts_for_user(self, user_id: str) -> int | None: """Get the expiration timestamp for the account bearing a given user ID. Args: @@ -515,8 +515,8 @@ class RegistrationWorkerStore(StatsStore, CacheInvalidationWorkerStore): user_id: str, expiration_ts: int, email_sent: bool, - renewal_token: Optional[str] = None, - token_used_ts: Optional[int] = None, + renewal_token: str | None = None, + token_used_ts: int | None = None, ) -> None: """Updates the account validity properties of the given account, with the given values. @@ -576,7 +576,7 @@ class RegistrationWorkerStore(StatsStore, CacheInvalidationWorkerStore): async def get_user_from_renewal_token( self, renewal_token: str - ) -> tuple[str, int, Optional[int]]: + ) -> tuple[str, int, int | None]: """Get a user ID and renewal status from a renewal token. Args: @@ -592,7 +592,7 @@ class RegistrationWorkerStore(StatsStore, CacheInvalidationWorkerStore): has not been renewed using the current token yet. """ return cast( - tuple[str, int, Optional[int]], + tuple[str, int, int | None], await self.db_pool.simple_select_one( table="account_validity", keyvalues={"renewal_token": renewal_token}, @@ -745,7 +745,7 @@ class RegistrationWorkerStore(StatsStore, CacheInvalidationWorkerStore): await self.db_pool.runInteraction("set_shadow_banned", set_shadow_banned_txn) async def set_user_type( - self, user: UserID, user_type: Optional[Union[UserTypes, str]] + self, user: UserID, user_type: UserTypes | str | None ) -> None: """Sets the user type. @@ -766,7 +766,7 @@ class RegistrationWorkerStore(StatsStore, CacheInvalidationWorkerStore): def _query_for_auth( self, txn: LoggingTransaction, token: str - ) -> Optional[TokenLookupResult]: + ) -> TokenLookupResult | None: sql = """ SELECT users.name as user_id, users.is_guest, @@ -1027,7 +1027,7 @@ class RegistrationWorkerStore(StatsStore, CacheInvalidationWorkerStore): @cached() async def get_user_by_external_id( self, auth_provider: str, external_id: str - ) -> Optional[str]: + ) -> str | None: """Look up a user by their external auth id Args: @@ -1145,7 +1145,7 @@ class RegistrationWorkerStore(StatsStore, CacheInvalidationWorkerStore): return str(next_id) - async def get_user_id_by_threepid(self, medium: str, address: str) -> Optional[str]: + async def get_user_id_by_threepid(self, medium: str, address: str) -> str | None: """Returns user id from threepid Args: @@ -1163,7 +1163,7 @@ class RegistrationWorkerStore(StatsStore, CacheInvalidationWorkerStore): def get_user_id_by_threepid_txn( self, txn: LoggingTransaction, medium: str, address: str - ) -> Optional[str]: + ) -> str | None: """Returns user id from threepid Args: @@ -1386,12 +1386,12 @@ class RegistrationWorkerStore(StatsStore, CacheInvalidationWorkerStore): async def get_threepid_validation_session( self, - medium: Optional[str], + medium: str | None, client_secret: str, - address: Optional[str] = None, - sid: Optional[str] = None, - validated: Optional[bool] = True, - ) -> Optional[ThreepidValidationSession]: + address: str | None = None, + sid: str | None = None, + validated: bool | None = True, + ) -> ThreepidValidationSession | None: """Gets a session_id and last_send_attempt (if available) for a combination of validation metadata @@ -1425,7 +1425,7 @@ class RegistrationWorkerStore(StatsStore, CacheInvalidationWorkerStore): def get_threepid_validation_session_txn( txn: LoggingTransaction, - ) -> Optional[ThreepidValidationSession]: + ) -> ThreepidValidationSession | None: sql = """ SELECT address, session_id, medium, client_secret, last_send_attempt, validated_at @@ -1555,7 +1555,7 @@ class RegistrationWorkerStore(StatsStore, CacheInvalidationWorkerStore): values={"expiration_ts_ms": expiration_ts, "email_sent": False}, ) - async def get_user_pending_deactivation(self) -> Optional[str]: + async def get_user_pending_deactivation(self) -> str | None: """ Gets one user from the table of users waiting to be parted from all the rooms they're in. @@ -1686,7 +1686,7 @@ class RegistrationWorkerStore(StatsStore, CacheInvalidationWorkerStore): """ def _use_registration_token_txn(txn: LoggingTransaction) -> None: - # Normally, res is Optional[dict[str, Any]]. + # Normally, res is dict[str, Any] | None. # Override type because the return type is only optional if # allow_none is True, and we don't want mypy throwing errors # about None not being indexable. @@ -1715,8 +1715,8 @@ class RegistrationWorkerStore(StatsStore, CacheInvalidationWorkerStore): ) async def get_registration_tokens( - self, valid: Optional[bool] = None - ) -> list[tuple[str, Optional[int], int, int, Optional[int]]]: + self, valid: bool | None = None + ) -> list[tuple[str, int | None, int, int, int | None]]: """List all registration tokens. Used by the admin API. Args: @@ -1734,8 +1734,8 @@ class RegistrationWorkerStore(StatsStore, CacheInvalidationWorkerStore): """ def select_registration_tokens_txn( - txn: LoggingTransaction, now: int, valid: Optional[bool] - ) -> list[tuple[str, Optional[int], int, int, Optional[int]]]: + txn: LoggingTransaction, now: int, valid: bool | None + ) -> list[tuple[str, int | None, int, int, int | None]]: if valid is None: # Return all tokens regardless of validity txn.execute( @@ -1765,7 +1765,7 @@ class RegistrationWorkerStore(StatsStore, CacheInvalidationWorkerStore): txn.execute(sql, [now]) return cast( - list[tuple[str, Optional[int], int, int, Optional[int]]], txn.fetchall() + list[tuple[str, int | None, int, int, int | None]], txn.fetchall() ) return await self.db_pool.runInteraction( @@ -1775,7 +1775,7 @@ class RegistrationWorkerStore(StatsStore, CacheInvalidationWorkerStore): valid, ) - async def get_one_registration_token(self, token: str) -> Optional[dict[str, Any]]: + async def get_one_registration_token(self, token: str) -> dict[str, Any] | None: """Get info about the given registration token. Used by the admin API. Args: @@ -1801,9 +1801,7 @@ class RegistrationWorkerStore(StatsStore, CacheInvalidationWorkerStore): "expiry_time": row[4], } - async def generate_registration_token( - self, length: int, chars: str - ) -> Optional[str]: + async def generate_registration_token(self, length: int, chars: str) -> str | None: """Generate a random registration token. Used by the admin API. Args: @@ -1843,7 +1841,7 @@ class RegistrationWorkerStore(StatsStore, CacheInvalidationWorkerStore): ) async def create_registration_token( - self, token: str, uses_allowed: Optional[int], expiry_time: Optional[int] + self, token: str, uses_allowed: int | None, expiry_time: int | None ) -> bool: """Create a new registration token. Used by the admin API. @@ -1892,8 +1890,8 @@ class RegistrationWorkerStore(StatsStore, CacheInvalidationWorkerStore): ) async def update_registration_token( - self, token: str, updatevalues: dict[str, Optional[int]] - ) -> Optional[dict[str, Any]]: + self, token: str, updatevalues: dict[str, int | None] + ) -> dict[str, Any] | None: """Update a registration token. Used by the admin API. Args: @@ -1909,7 +1907,7 @@ class RegistrationWorkerStore(StatsStore, CacheInvalidationWorkerStore): def _update_registration_token_txn( txn: LoggingTransaction, - ) -> Optional[dict[str, Any]]: + ) -> dict[str, Any] | None: try: self.db_pool.simple_update_one_txn( txn, @@ -1996,14 +1994,12 @@ class RegistrationWorkerStore(StatsStore, CacheInvalidationWorkerStore): desc="mark_access_token_as_used", ) - async def lookup_refresh_token( - self, token: str - ) -> Optional[RefreshTokenLookupResult]: + async def lookup_refresh_token(self, token: str) -> RefreshTokenLookupResult | None: """Lookup a refresh token with hints about its validity.""" def _lookup_refresh_token_txn( txn: LoggingTransaction, - ) -> Optional[RefreshTokenLookupResult]: + ) -> RefreshTokenLookupResult | None: txn.execute( """ SELECT @@ -2154,8 +2150,8 @@ class RegistrationWorkerStore(StatsStore, CacheInvalidationWorkerStore): user_id: str, token: str, expiry_ts: int, - auth_provider_id: Optional[str], - auth_provider_session_id: Optional[str], + auth_provider_id: str | None, + auth_provider_session_id: str | None, ) -> None: """Adds a short-term login token for the given user. @@ -2455,9 +2451,9 @@ class RegistrationWorkerStore(StatsStore, CacheInvalidationWorkerStore): async def user_delete_access_tokens( self, user_id: str, - except_token_id: Optional[int] = None, - device_id: Optional[str] = None, - ) -> list[tuple[str, int, Optional[str]]]: + except_token_id: int | None = None, + device_id: str | None = None, + ) -> list[tuple[str, int, str | None]]: """ Invalidate access and refresh tokens belonging to a user @@ -2471,14 +2467,14 @@ class RegistrationWorkerStore(StatsStore, CacheInvalidationWorkerStore): A tuple of (token, token id, device id) for each of the deleted tokens """ - def f(txn: LoggingTransaction) -> list[tuple[str, int, Optional[str]]]: + def f(txn: LoggingTransaction) -> list[tuple[str, int, str | None]]: keyvalues = {"user_id": user_id} if device_id is not None: keyvalues["device_id"] = device_id items = keyvalues.items() where_clause = " AND ".join(k + " = ?" for k, _ in items) - values: list[Union[str, int]] = [v for _, v in items] + values: list[str | int] = [v for _, v in items] # Conveniently, refresh_tokens and access_tokens both use the user_id and device_id fields. Only caveat # is the `except_token_id` param that is tricky to get right, so for now we're just using the same where # clause and values before we handle that. This seems to be only used in the "set password" handler. @@ -2517,7 +2513,7 @@ class RegistrationWorkerStore(StatsStore, CacheInvalidationWorkerStore): self, user_id: str, device_ids: StrCollection, - ) -> list[tuple[str, int, Optional[str]]]: + ) -> list[tuple[str, int, str | None]]: """ Invalidate access and refresh tokens belonging to a user @@ -2530,7 +2526,7 @@ class RegistrationWorkerStore(StatsStore, CacheInvalidationWorkerStore): def user_delete_access_tokens_for_devices_txn( txn: LoggingTransaction, batch_device_ids: StrCollection - ) -> list[tuple[str, int, Optional[str]]]: + ) -> list[tuple[str, int, str | None]]: self.db_pool.simple_delete_many_txn( txn, table="refresh_tokens", @@ -2583,7 +2579,7 @@ class RegistrationWorkerStore(StatsStore, CacheInvalidationWorkerStore): await self.db_pool.runInteraction("delete_access_token", f) async def user_set_password_hash( - self, user_id: str, password_hash: Optional[str] + self, user_id: str, password_hash: str | None ) -> None: """ NB. This does *not* evict any cache because the one use for this @@ -2750,10 +2746,10 @@ class RegistrationStore(RegistrationBackgroundUpdateStore): self, user_id: str, token: str, - device_id: Optional[str], - valid_until_ms: Optional[int], - puppets_user_id: Optional[str] = None, - refresh_token_id: Optional[int] = None, + device_id: str | None, + valid_until_ms: int | None, + puppets_user_id: str | None = None, + refresh_token_id: int | None = None, ) -> int: """Adds an access token for the given user. @@ -2795,9 +2791,9 @@ class RegistrationStore(RegistrationBackgroundUpdateStore): self, user_id: str, token: str, - device_id: Optional[str], - expiry_ts: Optional[int], - ultimate_session_expiry_ts: Optional[int], + device_id: str | None, + expiry_ts: int | None, + ultimate_session_expiry_ts: int | None, ) -> int: """Adds a refresh token for the given user. @@ -2889,7 +2885,7 @@ class RegistrationStore(RegistrationBackgroundUpdateStore): async def validate_threepid_session( self, session_id: str, client_secret: str, token: str, current_ts: int - ) -> Optional[str]: + ) -> str | None: """Attempt to validate a threepid session using a token Args: @@ -2909,7 +2905,7 @@ class RegistrationStore(RegistrationBackgroundUpdateStore): """ # Insert everything into a transaction in order to run atomically - def validate_threepid_session_txn(txn: LoggingTransaction) -> Optional[str]: + def validate_threepid_session_txn(txn: LoggingTransaction) -> str | None: row = self.db_pool.simple_select_one_txn( txn, table="threepid_validation_session", @@ -2984,7 +2980,7 @@ class RegistrationStore(RegistrationBackgroundUpdateStore): session_id: str, client_secret: str, send_attempt: int, - next_link: Optional[str], + next_link: str | None, token: str, token_expires: int, ) -> None: diff --git a/synapse/storage/databases/main/rejections.py b/synapse/storage/databases/main/rejections.py index a603258644..c73c3d761d 100644 --- a/synapse/storage/databases/main/rejections.py +++ b/synapse/storage/databases/main/rejections.py @@ -20,7 +20,6 @@ # import logging -from typing import Optional from synapse.storage._base import SQLBaseStore @@ -28,7 +27,7 @@ logger = logging.getLogger(__name__) class RejectionsStore(SQLBaseStore): - async def get_rejection_reason(self, event_id: str) -> Optional[str]: + async def get_rejection_reason(self, event_id: str) -> str | None: return await self.db_pool.simple_select_one_onecol( table="rejections", retcol="reason", diff --git a/synapse/storage/databases/main/relations.py b/synapse/storage/databases/main/relations.py index 529102c245..9d9c37e2a4 100644 --- a/synapse/storage/databases/main/relations.py +++ b/synapse/storage/databases/main/relations.py @@ -24,9 +24,7 @@ from typing import ( Collection, Iterable, Mapping, - Optional, Sequence, - Union, cast, ) @@ -167,14 +165,14 @@ class RelationsWorkerStore(SQLBaseStore): room_id: str, event_id: str, event: EventBase, - relation_type: Optional[str] = None, - event_type: Optional[str] = None, + relation_type: str | None = None, + event_type: str | None = None, limit: int = 5, direction: Direction = Direction.BACKWARDS, - from_token: Optional[StreamToken] = None, - to_token: Optional[StreamToken] = None, + from_token: StreamToken | None = None, + to_token: StreamToken | None = None, recurse: bool = False, - ) -> tuple[Sequence[_RelatedEvent], Optional[StreamToken]]: + ) -> tuple[Sequence[_RelatedEvent], StreamToken | None]: """Get a list of relations for an event, ordered by topological ordering. Args: @@ -204,7 +202,7 @@ class RelationsWorkerStore(SQLBaseStore): assert limit >= 0 where_clause = ["room_id = ?"] - where_args: list[Union[str, int]] = [room_id] + where_args: list[str | int] = [room_id] is_redacted = event.internal_metadata.is_redacted() if relation_type is not None: @@ -276,7 +274,7 @@ class RelationsWorkerStore(SQLBaseStore): def _get_recent_references_for_event_txn( txn: LoggingTransaction, - ) -> tuple[list[_RelatedEvent], Optional[StreamToken]]: + ) -> tuple[list[_RelatedEvent], StreamToken | None]: txn.execute(sql, [event.event_id] + where_args + [limit + 1]) events = [] @@ -463,7 +461,7 @@ class RelationsWorkerStore(SQLBaseStore): @cachedList(cached_method_name="get_references_for_event", list_name="event_ids") async def get_references_for_events( self, event_ids: Collection[str] - ) -> Mapping[str, Optional[Sequence[_RelatedEvent]]]: + ) -> Mapping[str, Sequence[_RelatedEvent] | None]: """Get a list of references to the given events. Args: @@ -511,14 +509,14 @@ class RelationsWorkerStore(SQLBaseStore): ) @cached() # type: ignore[synapse-@cached-mutable] - def get_applicable_edit(self, event_id: str) -> Optional[EventBase]: + def get_applicable_edit(self, event_id: str) -> EventBase | None: raise NotImplementedError() # TODO: This returns a mutable object, which is generally bad. @cachedList(cached_method_name="get_applicable_edit", list_name="event_ids") # type: ignore[synapse-@cached-mutable] async def get_applicable_edits( self, event_ids: Collection[str] - ) -> Mapping[str, Optional[EventBase]]: + ) -> Mapping[str, EventBase | None]: """Get the most recent edit (if any) that has happened for the given events. @@ -598,14 +596,14 @@ class RelationsWorkerStore(SQLBaseStore): } @cached() # type: ignore[synapse-@cached-mutable] - def get_thread_summary(self, event_id: str) -> Optional[tuple[int, EventBase]]: + def get_thread_summary(self, event_id: str) -> tuple[int, EventBase] | None: raise NotImplementedError() # TODO: This returns a mutable object, which is generally bad. @cachedList(cached_method_name="get_thread_summary", list_name="event_ids") # type: ignore[synapse-@cached-mutable] async def get_thread_summaries( self, event_ids: Collection[str] - ) -> Mapping[str, Optional[tuple[int, EventBase]]]: + ) -> Mapping[str, tuple[int, EventBase] | None]: """Get the number of threaded replies and the latest reply (if any) for the given events. Args: @@ -826,8 +824,8 @@ class RelationsWorkerStore(SQLBaseStore): async def events_have_relations( self, parent_ids: list[str], - relation_senders: Optional[list[str]], - relation_types: Optional[list[str]], + relation_senders: list[str] | None, + relation_types: list[str] | None, ) -> list[str]: """Check which events have a relationship from the given senders of the given types. @@ -930,8 +928,8 @@ class RelationsWorkerStore(SQLBaseStore): self, room_id: str, limit: int = 5, - from_token: Optional[ThreadsNextBatch] = None, - ) -> tuple[Sequence[str], Optional[ThreadsNextBatch]]: + from_token: ThreadsNextBatch | None = None, + ) -> tuple[Sequence[str], ThreadsNextBatch | None]: """Get a list of thread IDs, ordered by topological ordering of their latest reply. @@ -971,7 +969,7 @@ class RelationsWorkerStore(SQLBaseStore): def _get_threads_txn( txn: LoggingTransaction, - ) -> tuple[list[str], Optional[ThreadsNextBatch]]: + ) -> tuple[list[str], ThreadsNextBatch | None]: txn.execute(sql, (room_id, *pagination_args, limit + 1)) rows = cast(list[tuple[str, int, int]], txn.fetchall()) diff --git a/synapse/storage/databases/main/room.py b/synapse/storage/databases/main/room.py index 7a294de558..633df07736 100644 --- a/synapse/storage/databases/main/room.py +++ b/synapse/storage/databases/main/room.py @@ -28,8 +28,6 @@ from typing import ( Any, Collection, Mapping, - Optional, - Union, cast, ) @@ -82,24 +80,24 @@ class RatelimitOverride: @attr.s(slots=True, frozen=True, auto_attribs=True) class LargestRoomStats: room_id: str - name: Optional[str] - canonical_alias: Optional[str] + name: str | None + canonical_alias: str | None joined_members: int - join_rules: Optional[str] - guest_access: Optional[str] - history_visibility: Optional[str] + join_rules: str | None + guest_access: str | None + history_visibility: str | None state_events: int - avatar: Optional[str] - topic: Optional[str] - room_type: Optional[str] + avatar: str | None + topic: str | None + room_type: str | None @attr.s(slots=True, frozen=True, auto_attribs=True) class RoomStats(LargestRoomStats): joined_local_members: int - version: Optional[str] - creator: Optional[str] - encryption: Optional[str] + version: str | None + creator: str | None + encryption: str | None federatable: bool public: bool @@ -134,7 +132,7 @@ class RoomSortOrder(Enum): @attr.s(slots=True, frozen=True, auto_attribs=True) class PartialStateResyncInfo: - joined_via: Optional[str] + joined_via: str | None servers_in_room: set[str] = attr.ib(factory=set) @@ -205,7 +203,7 @@ class RoomWorkerStore(CacheInvalidationWorkerStore): logger.error("store_room with room_id=%s failed: %s", room_id, e) raise StoreError(500, "Problem creating room.") - async def get_room(self, room_id: str) -> Optional[tuple[bool, bool]]: + async def get_room(self, room_id: str) -> tuple[bool, bool] | None: """Retrieve a room. Args: @@ -218,7 +216,7 @@ class RoomWorkerStore(CacheInvalidationWorkerStore): or None if the room is unknown. """ row = cast( - Optional[tuple[Optional[Union[int, bool]], Optional[Union[int, bool]]]], + tuple[int | bool | None, int | bool | None] | None, await self.db_pool.simple_select_one( table="rooms", keyvalues={"room_id": room_id}, @@ -231,7 +229,7 @@ class RoomWorkerStore(CacheInvalidationWorkerStore): return row return bool(row[0]), bool(row[1]) - async def get_room_with_stats(self, room_id: str) -> Optional[RoomStats]: + async def get_room_with_stats(self, room_id: str) -> RoomStats | None: """Retrieve room with statistics. Args: @@ -242,7 +240,7 @@ class RoomWorkerStore(CacheInvalidationWorkerStore): def get_room_with_stats_txn( txn: LoggingTransaction, room_id: str - ) -> Optional[RoomStats]: + ) -> RoomStats | None: sql = """ SELECT room_id, state.name, state.canonical_alias, curr.joined_members, curr.local_users_in_room AS joined_local_members, rooms.room_version AS version, @@ -292,8 +290,8 @@ class RoomWorkerStore(CacheInvalidationWorkerStore): ) def _construct_room_type_where_clause( - self, room_types: Union[list[Union[str, None]], None] - ) -> tuple[Union[str, None], list]: + self, room_types: list[str | None] | None + ) -> tuple[str | None, list]: if not room_types: return None, [] @@ -320,9 +318,9 @@ class RoomWorkerStore(CacheInvalidationWorkerStore): async def count_public_rooms( self, - network_tuple: Optional[ThirdPartyInstanceID], + network_tuple: ThirdPartyInstanceID | None, ignore_non_federatable: bool, - search_filter: Optional[dict], + search_filter: dict | None, ) -> int: """Counts the number of public rooms as tracked in the room_stats_current and room_stats_state table. @@ -402,10 +400,10 @@ class RoomWorkerStore(CacheInvalidationWorkerStore): async def get_largest_public_rooms( self, - network_tuple: Optional[ThirdPartyInstanceID], - search_filter: Optional[dict], - limit: Optional[int], - bounds: Optional[tuple[int, str]], + network_tuple: ThirdPartyInstanceID | None, + search_filter: dict | None, + limit: int | None, + bounds: tuple[int, str] | None, forwards: bool, ignore_non_federatable: bool = False, ) -> list[LargestRoomStats]: @@ -429,7 +427,7 @@ class RoomWorkerStore(CacheInvalidationWorkerStore): """ where_clauses = [] - query_args: list[Union[str, int]] = [] + query_args: list[str | int] = [] if network_tuple: if network_tuple.appservice_id: @@ -575,7 +573,7 @@ class RoomWorkerStore(CacheInvalidationWorkerStore): ) @cached(max_entries=10000) - async def is_room_blocked(self, room_id: str) -> Optional[bool]: + async def is_room_blocked(self, room_id: str) -> bool | None: return await self.db_pool.simple_select_one_onecol( table="blocked_rooms", keyvalues={"room_id": room_id}, @@ -584,7 +582,7 @@ class RoomWorkerStore(CacheInvalidationWorkerStore): desc="is_room_blocked", ) - async def room_is_blocked_by(self, room_id: str) -> Optional[str]: + async def room_is_blocked_by(self, room_id: str) -> str | None: """ Function to retrieve user who has blocked the room. user_id is non-nullable @@ -604,9 +602,9 @@ class RoomWorkerStore(CacheInvalidationWorkerStore): limit: int, order_by: str, reverse_order: bool, - search_term: Optional[str], - public_rooms: Optional[bool], - empty_rooms: Optional[bool], + search_term: str | None, + public_rooms: bool | None, + empty_rooms: bool | None, ) -> tuple[list[dict[str, Any]], int]: """Function to retrieve a paginated list of rooms as json. @@ -800,7 +798,7 @@ class RoomWorkerStore(CacheInvalidationWorkerStore): ) @cached(max_entries=10000) - async def get_ratelimit_for_user(self, user_id: str) -> Optional[RatelimitOverride]: + async def get_ratelimit_for_user(self, user_id: str) -> RatelimitOverride | None: """Check if there are any overrides for ratelimiting for the given user Args: @@ -905,7 +903,7 @@ class RoomWorkerStore(CacheInvalidationWorkerStore): def get_retention_policy_for_room_txn( txn: LoggingTransaction, - ) -> Optional[tuple[Optional[int], Optional[int]]]: + ) -> tuple[int | None, int | None] | None: txn.execute( """ SELECT min_lifetime, max_lifetime FROM room_retention @@ -915,7 +913,7 @@ class RoomWorkerStore(CacheInvalidationWorkerStore): (room_id,), ) - return cast(Optional[tuple[Optional[int], Optional[int]]], txn.fetchone()) + return cast(tuple[int | None, int | None] | None, txn.fetchone()) ret = await self.db_pool.runInteraction( "get_retention_policy_for_room", @@ -1058,7 +1056,7 @@ class RoomWorkerStore(CacheInvalidationWorkerStore): self, server_name: str, media_id: str, - quarantined_by: Optional[str], + quarantined_by: str | None, ) -> int: """quarantines or unquarantines a single local or remote media id @@ -1135,7 +1133,7 @@ class RoomWorkerStore(CacheInvalidationWorkerStore): txn: LoggingTransaction, hashes: set[str], media_ids: set[str], - quarantined_by: Optional[str], + quarantined_by: str | None, ) -> int: """Quarantine and unquarantine local media items. @@ -1190,7 +1188,7 @@ class RoomWorkerStore(CacheInvalidationWorkerStore): txn: LoggingTransaction, hashes: set[str], media: set[tuple[str, str]], - quarantined_by: Optional[str], + quarantined_by: str | None, ) -> int: """Quarantine and unquarantine remote items @@ -1238,7 +1236,7 @@ class RoomWorkerStore(CacheInvalidationWorkerStore): txn: LoggingTransaction, local_mxcs: list[str], remote_mxcs: list[tuple[str, str]], - quarantined_by: Optional[str], + quarantined_by: str | None, ) -> int: """Quarantine and unquarantine local and remote media items @@ -1341,7 +1339,7 @@ class RoomWorkerStore(CacheInvalidationWorkerStore): ) async def get_rooms_for_retention_period_in_range( - self, min_ms: Optional[int], max_ms: Optional[int], include_null: bool = False + self, min_ms: int | None, max_ms: int | None, include_null: bool = False ) -> dict[str, RetentionPolicy]: """Retrieves all of the rooms within the given retention range. @@ -1421,7 +1419,7 @@ class RoomWorkerStore(CacheInvalidationWorkerStore): async def get_partial_state_servers_at_join( self, room_id: str - ) -> Optional[AbstractSet[str]]: + ) -> AbstractSet[str] | None: """Gets the set of servers in a partial state room at the time we joined it. Returns: @@ -1682,7 +1680,7 @@ class RoomWorkerStore(CacheInvalidationWorkerStore): get_un_partial_stated_rooms_from_stream_txn, ) - async def get_event_report(self, report_id: int) -> Optional[dict[str, Any]]: + async def get_event_report(self, report_id: int) -> dict[str, Any] | None: """Retrieve an event report Args: @@ -1694,7 +1692,7 @@ class RoomWorkerStore(CacheInvalidationWorkerStore): def _get_event_report_txn( txn: LoggingTransaction, report_id: int - ) -> Optional[dict[str, Any]]: + ) -> dict[str, Any] | None: sql = """ SELECT er.id, @@ -1748,9 +1746,9 @@ class RoomWorkerStore(CacheInvalidationWorkerStore): start: int, limit: int, direction: Direction = Direction.BACKWARDS, - user_id: Optional[str] = None, - room_id: Optional[str] = None, - event_sender_user_id: Optional[str] = None, + user_id: str | None = None, + room_id: str | None = None, + event_sender_user_id: str | None = None, ) -> tuple[list[dict[str, Any]], int]: """Retrieve a paginated list of event reports @@ -2602,7 +2600,7 @@ class RoomStore(RoomBackgroundUpdateStore, RoomWorkerStore): room_id: str, event_id: str, user_id: str, - reason: Optional[str], + reason: str | None, content: JsonDict, received_ts: int, ) -> int: @@ -2696,7 +2694,7 @@ class RoomStore(RoomBackgroundUpdateStore, RoomWorkerStore): ) return next_id - async def clear_partial_state_room(self, room_id: str) -> Optional[int]: + async def clear_partial_state_room(self, room_id: str) -> int | None: """Clears the partial state flag for a room. Args: diff --git a/synapse/storage/databases/main/roommember.py b/synapse/storage/databases/main/roommember.py index 1e22ab4e6d..4fb7779d38 100644 --- a/synapse/storage/databases/main/roommember.py +++ b/synapse/storage/databases/main/roommember.py @@ -26,9 +26,7 @@ from typing import ( Collection, Iterable, Mapping, - Optional, Sequence, - Union, cast, ) @@ -446,7 +444,7 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore): async def get_invite_for_local_user_in_room( self, user_id: str, room_id: str - ) -> Optional[RoomsForUser]: + ) -> RoomsForUser | None: """Gets the invite for the given *local* user and room. Args: @@ -655,7 +653,7 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore): async def get_local_current_membership_for_user_in_room( self, user_id: str, room_id: str - ) -> tuple[Optional[str], Optional[str]]: + ) -> tuple[str | None, str | None]: """Retrieve the current local membership state and event ID for a user in a room. Args: @@ -672,7 +670,7 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore): raise SynapseError(HTTPStatus.BAD_REQUEST, message, errcode=Codes.BAD_JSON) results = cast( - Optional[tuple[str, str]], + tuple[str, str] | None, await self.db_pool.simple_select_one( "local_current_membership", {"room_id": room_id, "user_id": user_id}, @@ -833,7 +831,7 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore): ) async def _do_users_share_a_room( self, user_id: str, other_user_ids: Collection[str] - ) -> Mapping[str, Optional[bool]]: + ) -> Mapping[str, bool | None]: """Return mapping from user ID to whether they share a room with the given user. @@ -896,7 +894,7 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore): ) async def _do_users_share_a_room_joined_or_invited( self, user_id: str, other_user_ids: Collection[str] - ) -> Mapping[str, Optional[bool]]: + ) -> Mapping[str, bool | None]: """Return mapping from user ID to whether they share a room with the given user via being either joined or invited. @@ -974,7 +972,7 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore): overlapping joined rooms for. cache_context """ - shared_room_ids: Optional[frozenset[str]] = None + shared_room_ids: frozenset[str] | None = None for user_id in user_ids: room_ids = await self.get_rooms_for_user( user_id, on_invalidate=cache_context.invalidate @@ -1045,7 +1043,7 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore): ) def _get_user_id_from_membership_event_id( self, event_id: str - ) -> Optional[tuple[str, ProfileInfo]]: + ) -> tuple[str, ProfileInfo] | None: raise NotImplementedError() @cachedList( @@ -1054,7 +1052,7 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore): ) async def _get_user_ids_from_membership_event_ids( self, event_ids: Iterable[str] - ) -> Mapping[str, Optional[str]]: + ) -> Mapping[str, str | None]: """For given set of member event_ids check if they point to a join event. @@ -1229,7 +1227,7 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore): async def _get_approximate_current_memberships_in_room( self, room_id: str - ) -> Mapping[str, Optional[str]]: + ) -> Mapping[str, str | None]: """Build a map from event id to membership, for all events in the current state. The event ids of non-memberships events (e.g. `m.room.power_levels`) are present @@ -1240,7 +1238,7 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore): """ rows = cast( - list[tuple[str, Optional[str]]], + list[tuple[str, str | None]], await self.db_pool.simple_select_list( "current_state_events", keyvalues={"room_id": room_id}, @@ -1387,7 +1385,7 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore): @cached(max_entries=5000) async def _get_membership_from_event_id( self, member_event_id: str - ) -> Optional[EventIdMembership]: + ) -> EventIdMembership | None: raise NotImplementedError() @cachedList( @@ -1395,7 +1393,7 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore): ) async def get_membership_from_event_ids( self, member_event_ids: Iterable[str] - ) -> Mapping[str, Optional[EventIdMembership]]: + ) -> Mapping[str, EventIdMembership | None]: """Get user_id and membership of a set of event IDs. Returns: @@ -1680,12 +1678,12 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore): async def get_sliding_sync_room_for_user( self, user_id: str, room_id: str - ) -> Optional[RoomsForUserSlidingSync]: + ) -> RoomsForUserSlidingSync | None: """Get the sliding sync room entry for the given user and room.""" def get_sliding_sync_room_for_user_txn( txn: LoggingTransaction, - ) -> Optional[RoomsForUserSlidingSync]: + ) -> RoomsForUserSlidingSync | None: sql = """ SELECT m.room_id, m.sender, m.membership, m.membership_event_id, r.room_version, @@ -2106,7 +2104,7 @@ class _JoinedHostsCache: # if the instance is newly created or if the state is not based on a state # group. (An object is used as a sentinel value to ensure that it never is # equal to anything else). - state_group: Union[object, int] = attr.Factory(object) + state_group: object | int = attr.Factory(object) def __len__(self) -> int: return sum(len(v) for v in self.hosts_to_joined_users.values()) diff --git a/synapse/storage/databases/main/search.py b/synapse/storage/databases/main/search.py index 63489f5c27..d6eace5efa 100644 --- a/synapse/storage/databases/main/search.py +++ b/synapse/storage/databases/main/search.py @@ -28,8 +28,6 @@ from typing import ( Any, Collection, Iterable, - Optional, - Union, cast, ) @@ -60,7 +58,7 @@ class SearchEntry: value: str event_id: str room_id: str - stream_ordering: Optional[int] + stream_ordering: int | None origin_server_ts: int @@ -516,7 +514,7 @@ class SearchStore(SearchBackgroundUpdateStore): # List of tuples of (rank, room_id, event_id). results = cast( - list[tuple[Union[int, float], str, str]], + list[tuple[int | float, str, str]], await self.db_pool.execute("search_msgs", sql, *args), ) @@ -562,7 +560,7 @@ class SearchStore(SearchBackgroundUpdateStore): search_term: str, keys: Iterable[str], limit: int, - pagination_token: Optional[str] = None, + pagination_token: str | None = None, ) -> JsonDict: """Performs a full text search over events with given keys. @@ -683,7 +681,7 @@ class SearchStore(SearchBackgroundUpdateStore): # List of tuples of (rank, room_id, event_id, origin_server_ts, stream_ordering). results = cast( - list[tuple[Union[int, float], str, str, int, int]], + list[tuple[int | float, str, str, int, int]], await self.db_pool.execute("search_rooms", sql, *args), ) @@ -817,7 +815,7 @@ class SearchToken(enum.Enum): And = enum.auto() -Token = Union[str, Phrase, SearchToken] +Token = str | Phrase | SearchToken TokenList = list[Token] diff --git a/synapse/storage/databases/main/sliding_sync.py b/synapse/storage/databases/main/sliding_sync.py index 62463c0259..2b67e75ac4 100644 --- a/synapse/storage/databases/main/sliding_sync.py +++ b/synapse/storage/databases/main/sliding_sync.py @@ -14,7 +14,7 @@ import logging -from typing import TYPE_CHECKING, Mapping, Optional, cast +from typing import TYPE_CHECKING, Mapping, cast import attr @@ -79,7 +79,7 @@ class SlidingSyncStore(SQLBaseStore): async def get_latest_bump_stamp_for_room( self, room_id: str, - ) -> Optional[int]: + ) -> int | None: """ Get the `bump_stamp` for the room. @@ -99,7 +99,7 @@ class SlidingSyncStore(SQLBaseStore): """ return cast( - Optional[int], + int | None, await self.db_pool.simple_select_one_onecol( table="sliding_sync_joined_rooms", keyvalues={"room_id": room_id}, @@ -121,7 +121,7 @@ class SlidingSyncStore(SQLBaseStore): user_id: str, device_id: str, conn_id: str, - previous_connection_position: Optional[int], + previous_connection_position: int | None, per_connection_state: "MutablePerConnectionState", ) -> int: """Persist updates to the per-connection state for a sliding sync @@ -154,7 +154,7 @@ class SlidingSyncStore(SQLBaseStore): user_id: str, device_id: str, conn_id: str, - previous_connection_position: Optional[int], + previous_connection_position: int | None, per_connection_state: "PerConnectionStateDB", ) -> int: # First we fetch (or create) the connection key associated with the diff --git a/synapse/storage/databases/main/state.py b/synapse/storage/databases/main/state.py index c2c1b62d7e..a0aea4975c 100644 --- a/synapse/storage/databases/main/state.py +++ b/synapse/storage/databases/main/state.py @@ -28,9 +28,7 @@ from typing import ( Iterable, Mapping, MutableMapping, - Optional, TypeVar, - Union, cast, overload, ) @@ -86,8 +84,8 @@ class EventMetadata: room_id: str event_type: str - state_key: Optional[str] - rejection_reason: Optional[str] + state_key: str | None + rejection_reason: str | None def _retrieve_and_check_room_version(room_id: str, room_version_id: str) -> RoomVersion: @@ -243,7 +241,7 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): return result_map - async def get_room_predecessor(self, room_id: str) -> Optional[JsonMapping]: + async def get_room_predecessor(self, room_id: str) -> JsonMapping | None: """Get the predecessor of an upgraded room if it exists. Otherwise return None. @@ -303,7 +301,7 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): return create_event @cached(max_entries=10000) - async def get_room_type(self, room_id: str) -> Union[Optional[str], Sentinel]: + async def get_room_type(self, room_id: str) -> str | None | Sentinel: """Fetch room type for given room. Since this function is cached, any missing values would be cached as @@ -325,7 +323,7 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): @cachedList(cached_method_name="get_room_type", list_name="room_ids") async def bulk_get_room_type( self, room_ids: set[str] - ) -> Mapping[str, Union[Optional[str], Sentinel]]: + ) -> Mapping[str, str | None | Sentinel]: """ Bulk fetch room types for the given rooms (via current state). @@ -342,7 +340,7 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): def txn( txn: LoggingTransaction, - ) -> MutableMapping[str, Union[Optional[str], Sentinel]]: + ) -> MutableMapping[str, str | None | Sentinel]: clause, args = make_in_list_sql_clause( txn.database_engine, "room_id", room_ids ) @@ -398,13 +396,13 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): return results @cached(max_entries=10000) - async def get_room_encryption(self, room_id: str) -> Optional[str]: + async def get_room_encryption(self, room_id: str) -> str | None: raise NotImplementedError() @cachedList(cached_method_name="get_room_encryption", list_name="room_ids") async def bulk_get_room_encryption( self, room_ids: set[str] - ) -> Mapping[str, Union[Optional[str], Sentinel]]: + ) -> Mapping[str, str | None | Sentinel]: """ Bulk fetch room encryption for the given rooms (via current state). @@ -422,7 +420,7 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): def txn( txn: LoggingTransaction, - ) -> MutableMapping[str, Union[Optional[str], Sentinel]]: + ) -> MutableMapping[str, str | None | Sentinel]: clause, args = make_in_list_sql_clause( txn.database_engine, "room_id", room_ids ) @@ -551,7 +549,7 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): # FIXME: how should this be cached? @cancellable async def get_partial_filtered_current_state_ids( - self, room_id: str, state_filter: Optional[StateFilter] = None + self, room_id: str, state_filter: StateFilter | None = None ) -> StateMap[str]: """Get the current state event of a given type for a room based on the current_state_events table. This may not be as up-to-date as the result @@ -604,7 +602,7 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): ) @cached(max_entries=50000) - async def _get_state_group_for_event(self, event_id: str) -> Optional[int]: + async def _get_state_group_for_event(self, event_id: str) -> int | None: return await self.db_pool.simple_select_one_onecol( table="event_to_state_groups", keyvalues={"event_id": event_id}, @@ -986,15 +984,13 @@ class StateMapWrapper(dict[StateKey, str]): return super().__getitem__(key) @overload # type: ignore[override] - def get(self, key: StateKey, default: None = None, /) -> Optional[str]: ... + def get(self, key: StateKey, default: None = None, /) -> str | None: ... @overload def get(self, key: StateKey, default: str, /) -> str: ... @overload - def get(self, key: StateKey, default: _T, /) -> Union[str, _T]: ... + def get(self, key: StateKey, default: _T, /) -> str | _T: ... - def get( - self, key: StateKey, default: Union[str, _T, None] = None - ) -> Union[str, _T, None]: + def get(self, key: StateKey, default: str | _T | None = None) -> str | _T | None: if key not in self.state_filter: raise Exception("State map was filtered and doesn't include: %s", key) return super().get(key, default) diff --git a/synapse/storage/databases/main/state_deltas.py b/synapse/storage/databases/main/state_deltas.py index 3df5c8b6f4..cd8f286d08 100644 --- a/synapse/storage/databases/main/state_deltas.py +++ b/synapse/storage/databases/main/state_deltas.py @@ -20,7 +20,7 @@ # import logging -from typing import TYPE_CHECKING, Optional +from typing import TYPE_CHECKING import attr @@ -50,10 +50,10 @@ class StateDelta: event_type: str state_key: str - event_id: Optional[str] + event_id: str | None """new event_id for this state key. None if the state has been deleted.""" - prev_event_id: Optional[str] + prev_event_id: str | None """previous event_id for this state key. None if it's new state.""" @@ -191,8 +191,8 @@ class StateDeltasStore(SQLBaseStore): txn: LoggingTransaction, room_id: str, *, - from_token: Optional[RoomStreamToken], - to_token: Optional[RoomStreamToken], + from_token: RoomStreamToken | None, + to_token: RoomStreamToken | None, ) -> list[StateDelta]: """ Get the state deltas between two tokens. @@ -237,8 +237,8 @@ class StateDeltasStore(SQLBaseStore): self, room_id: str, *, - from_token: Optional[RoomStreamToken], - to_token: Optional[RoomStreamToken], + from_token: RoomStreamToken | None, + to_token: RoomStreamToken | None, ) -> list[StateDelta]: """ Get the state deltas between two tokens. diff --git a/synapse/storage/databases/main/stats.py b/synapse/storage/databases/main/stats.py index 19e525a3cd..6568e2aa08 100644 --- a/synapse/storage/databases/main/stats.py +++ b/synapse/storage/databases/main/stats.py @@ -27,8 +27,6 @@ from typing import ( Any, Counter, Iterable, - Optional, - Union, cast, ) @@ -296,7 +294,7 @@ class StatsStore(StateDeltasStore): @cached() async def get_earliest_token_for_stats( self, stats_type: str, id: str - ) -> Optional[int]: + ) -> int | None: """ Fetch the "earliest token". This is used by the room stats delta processor to ignore deltas that have been processed between the @@ -362,7 +360,7 @@ class StatsStore(StateDeltasStore): stats_id: str, fields: dict[str, int], complete_with_stream_id: int, - absolute_field_overrides: Optional[dict[str, int]] = None, + absolute_field_overrides: dict[str, int] | None = None, ) -> None: """ Updates the statistics for a subject, with a delta (difference/relative @@ -400,7 +398,7 @@ class StatsStore(StateDeltasStore): stats_id: str, fields: dict[str, int], complete_with_stream_id: int, - absolute_field_overrides: Optional[dict[str, int]] = None, + absolute_field_overrides: dict[str, int] | None = None, ) -> None: if absolute_field_overrides is None: absolute_field_overrides = {} @@ -585,7 +583,7 @@ class StatsStore(StateDeltasStore): ) return - room_state: dict[str, Union[None, bool, str]] = { + room_state: dict[str, None | bool | str] = { "join_rules": None, "history_visibility": None, "encryption": None, @@ -680,12 +678,12 @@ class StatsStore(StateDeltasStore): self, start: int, limit: int, - from_ts: Optional[int] = None, - until_ts: Optional[int] = None, - order_by: Optional[str] = UserSortOrder.USER_ID.value, + from_ts: int | None = None, + until_ts: int | None = None, + order_by: str | None = UserSortOrder.USER_ID.value, direction: Direction = Direction.FORWARDS, - search_term: Optional[str] = None, - ) -> tuple[list[tuple[str, Optional[str], int, int]], int]: + search_term: str | None = None, + ) -> tuple[list[tuple[str, str | None, int, int]], int]: """Function to retrieve a paginated list of users and their uploaded local media (size and number). This will return a json list of users and the total number of users matching the filter criteria. @@ -710,7 +708,7 @@ class StatsStore(StateDeltasStore): def get_users_media_usage_paginate_txn( txn: LoggingTransaction, - ) -> tuple[list[tuple[str, Optional[str], int, int]], int]: + ) -> tuple[list[tuple[str, str | None, int, int]], int]: filters = [] args: list = [] @@ -782,7 +780,7 @@ class StatsStore(StateDeltasStore): args += [limit, start] txn.execute(sql, args) - users = cast(list[tuple[str, Optional[str], int, int]], txn.fetchall()) + users = cast(list[tuple[str, str | None, int, int]], txn.fetchall()) return users, count diff --git a/synapse/storage/databases/main/stream.py b/synapse/storage/databases/main/stream.py index e8ea1e5480..8644ff412e 100644 --- a/synapse/storage/databases/main/stream.py +++ b/synapse/storage/databases/main/stream.py @@ -50,7 +50,6 @@ from typing import ( Iterable, Literal, Mapping, - Optional, Protocol, cast, overload, @@ -102,7 +101,7 @@ class PaginateFunction(Protocol): *, room_id: str, from_key: RoomStreamToken, - to_key: Optional[RoomStreamToken] = None, + to_key: RoomStreamToken | None = None, direction: Direction = Direction.BACKWARDS, limit: int = 0, ) -> tuple[list[EventBase], RoomStreamToken, bool]: ... @@ -112,7 +111,7 @@ class PaginateFunction(Protocol): @attr.s(slots=True, frozen=True, auto_attribs=True) class _EventDictReturn: event_id: str - topological_ordering: Optional[int] + topological_ordering: int | None stream_ordering: int @@ -139,22 +138,22 @@ class CurrentStateDeltaMembership: room_id: str # Event - event_id: Optional[str] + event_id: str | None event_pos: PersistedEventPosition membership: str - sender: Optional[str] + sender: str | None # Prev event - prev_event_id: Optional[str] - prev_event_pos: Optional[PersistedEventPosition] - prev_membership: Optional[str] - prev_sender: Optional[str] + prev_event_id: str | None + prev_event_pos: PersistedEventPosition | None + prev_membership: str | None + prev_sender: str | None def generate_pagination_where_clause( direction: Direction, column_names: tuple[str, str], - from_token: Optional[tuple[Optional[int], int]], - to_token: Optional[tuple[Optional[int], int]], + from_token: tuple[int | None, int] | None, + to_token: tuple[int | None, int] | None, engine: BaseDatabaseEngine, ) -> str: """Creates an SQL expression to bound the columns by the pagination @@ -218,11 +217,9 @@ def generate_pagination_where_clause( def generate_pagination_bounds( direction: Direction, - from_token: Optional[RoomStreamToken], - to_token: Optional[RoomStreamToken], -) -> tuple[ - str, Optional[tuple[Optional[int], int]], Optional[tuple[Optional[int], int]] -]: + from_token: RoomStreamToken | None, + to_token: RoomStreamToken | None, +) -> tuple[str, tuple[int | None, int] | None, tuple[int | None, int] | None]: """ Generate a start and end point for this page of events. @@ -257,7 +254,7 @@ def generate_pagination_bounds( # by fetching all events between the min stream token and the maximum # stream token (as returned by `RoomStreamToken.get_max_stream_pos`) and # then filtering the results. - from_bound: Optional[tuple[Optional[int], int]] = None + from_bound: tuple[int | None, int] | None = None if from_token: if from_token.topological is not None: from_bound = from_token.as_historical_tuple() @@ -272,7 +269,7 @@ def generate_pagination_bounds( from_token.stream, ) - to_bound: Optional[tuple[Optional[int], int]] = None + to_bound: tuple[int | None, int] | None = None if to_token: if to_token.topological is not None: to_bound = to_token.as_historical_tuple() @@ -291,7 +288,7 @@ def generate_pagination_bounds( def generate_next_token( - direction: Direction, last_topo_ordering: Optional[int], last_stream_ordering: int + direction: Direction, last_topo_ordering: int | None, last_stream_ordering: int ) -> RoomStreamToken: """ Generate the next room stream token based on the currently returned data. @@ -317,7 +314,7 @@ def generate_next_token( def _make_generic_sql_bound( bound: str, column_names: tuple[str, str], - values: tuple[Optional[int], int], + values: tuple[int | None, int], engine: BaseDatabaseEngine, ) -> str: """Create an SQL expression that bounds the given column names by the @@ -381,9 +378,9 @@ def _make_generic_sql_bound( def _filter_results( - lower_token: Optional[RoomStreamToken], - upper_token: Optional[RoomStreamToken], - instance_name: Optional[str], + lower_token: RoomStreamToken | None, + upper_token: RoomStreamToken | None, + instance_name: str | None, topological_ordering: int, stream_ordering: int, ) -> bool: @@ -436,9 +433,9 @@ def _filter_results( def _filter_results_by_stream( - lower_token: Optional[RoomStreamToken], - upper_token: Optional[RoomStreamToken], - instance_name: Optional[str], + lower_token: RoomStreamToken | None, + upper_token: RoomStreamToken | None, + instance_name: str | None, stream_ordering: int, ) -> bool: """ @@ -480,7 +477,7 @@ def _filter_results_by_stream( return True -def filter_to_clause(event_filter: Optional[Filter]) -> tuple[str, list[str]]: +def filter_to_clause(event_filter: Filter | None) -> tuple[str, list[str]]: # NB: This may create SQL clauses that don't optimise well (and we don't # have indices on all possible clauses). E.g. it may create # "room_id == X AND room_id != X", which postgres doesn't optimise. @@ -662,7 +659,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): *, room_ids: Collection[str], from_key: RoomStreamToken, - to_key: Optional[RoomStreamToken] = None, + to_key: RoomStreamToken | None = None, direction: Direction = Direction.BACKWARDS, limit: int = 0, ) -> dict[str, tuple[list[EventBase], RoomStreamToken, bool]]: @@ -784,7 +781,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): *, room_id: str, from_key: RoomStreamToken, - to_key: Optional[RoomStreamToken] = None, + to_key: RoomStreamToken | None = None, direction: Direction = Direction.BACKWARDS, limit: int = 0, ) -> tuple[list[EventBase], RoomStreamToken, bool]: @@ -936,7 +933,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): user_id: str, from_key: RoomStreamToken, to_key: RoomStreamToken, - excluded_room_ids: Optional[list[str]] = None, + excluded_room_ids: list[str] | None = None, ) -> list[CurrentStateDeltaMembership]: """ Fetch membership events (and the previous event that was replaced by that one) @@ -1131,7 +1128,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): user_id: str, from_key: RoomStreamToken, to_key: RoomStreamToken, - excluded_room_ids: Optional[AbstractSet[str]] = None, + excluded_room_ids: AbstractSet[str] | None = None, ) -> dict[str, RoomsForUserStateReset]: """ Fetch membership events that result in a meaningful membership change for a @@ -1328,7 +1325,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): user_id: str, from_key: RoomStreamToken, to_key: RoomStreamToken, - excluded_rooms: Optional[list[str]] = None, + excluded_rooms: list[str] | None = None, ) -> list[EventBase]: """Fetch membership events for a given user. @@ -1455,7 +1452,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): async def get_room_event_before_stream_ordering( self, room_id: str, stream_ordering: int - ) -> Optional[tuple[int, int, str]]: + ) -> tuple[int, int, str] | None: """Gets details of the first event in a room at or before a stream ordering Args: @@ -1466,7 +1463,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): A tuple of (stream ordering, topological ordering, event_id) """ - def _f(txn: LoggingTransaction) -> Optional[tuple[int, int, str]]: + def _f(txn: LoggingTransaction) -> tuple[int, int, str] | None: sql = """ SELECT stream_ordering, topological_ordering, event_id FROM events @@ -1479,7 +1476,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): LIMIT 1 """ txn.execute(sql, (room_id, stream_ordering)) - return cast(Optional[tuple[int, int, str]], txn.fetchone()) + return cast(tuple[int, int, str] | None, txn.fetchone()) return await self.db_pool.runInteraction( "get_room_event_before_stream_ordering", _f @@ -1489,7 +1486,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): self, room_id: str, end_token: RoomStreamToken, - ) -> Optional[str]: + ) -> str | None: """Returns the ID of the last event in a room at or before a stream ordering Args: @@ -1514,8 +1511,8 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): async def get_last_event_pos_in_room( self, room_id: str, - event_types: Optional[StrCollection] = None, - ) -> Optional[tuple[str, PersistedEventPosition]]: + event_types: StrCollection | None = None, + ) -> tuple[str, PersistedEventPosition] | None: """ Returns the ID and event position of the last event in a room. @@ -1532,7 +1529,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): def _get_last_event_pos_in_room_txn( txn: LoggingTransaction, - ) -> Optional[tuple[str, PersistedEventPosition]]: + ) -> tuple[str, PersistedEventPosition] | None: event_type_clause = "" event_type_args: list[str] = [] if event_types is not None and len(event_types) > 0: @@ -1558,7 +1555,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): [room_id] + event_type_args, ) - row = cast(Optional[tuple[str, int, str]], txn.fetchone()) + row = cast(tuple[str, int, str] | None, txn.fetchone()) if row is not None: event_id, stream_ordering, instance_name = row @@ -1580,8 +1577,8 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): self, room_id: str, end_token: RoomStreamToken, - event_types: Optional[StrCollection] = None, - ) -> Optional[tuple[str, PersistedEventPosition]]: + event_types: StrCollection | None = None, + ) -> tuple[str, PersistedEventPosition] | None: """ Returns the ID and event position of the last event in a room at or before a stream ordering. @@ -1598,7 +1595,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): def get_last_event_pos_in_room_before_stream_ordering_txn( txn: LoggingTransaction, - ) -> Optional[tuple[str, PersistedEventPosition]]: + ) -> tuple[str, PersistedEventPosition] | None: # We're looking for the closest event at or before the token. We need to # handle the fact that the stream token can be a vector clock (with an # `instance_map`) and events can be persisted on different instances @@ -1735,7 +1732,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): @cachedList(cached_method_name="_get_max_event_pos", list_name="room_ids") async def _bulk_get_max_event_pos( self, room_ids: StrCollection - ) -> Mapping[str, Optional[int]]: + ) -> Mapping[str, int | None]: """Fetch the max position of a persisted event in the room.""" # We need to be careful not to return positions ahead of the current @@ -1860,14 +1857,14 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): txn: LoggingTransaction, event_id: str, allow_none: bool = False, - ) -> Optional[int]: ... + ) -> int | None: ... def get_stream_id_for_event_txn( self, txn: LoggingTransaction, event_id: str, allow_none: bool = False, - ) -> Optional[int]: + ) -> int | None: # Type ignore: we pass keyvalues a Dict[str, str]; the function wants # Dict[str, Any]. I think mypy is unhappy because Dict is invariant? return self.db_pool.simple_select_one_onecol_txn( # type: ignore[call-overload] @@ -1970,7 +1967,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): event_id: str, before_limit: int, after_limit: int, - event_filter: Optional[Filter] = None, + event_filter: Filter | None = None, ) -> _EventsAround: """Retrieve events and pagination tokens around a given event in a room. @@ -2008,7 +2005,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): event_id: str, before_limit: int, after_limit: int, - event_filter: Optional[Filter], + event_filter: Filter | None, ) -> dict: """Retrieves event_ids and pagination tokens around a given event in a room. @@ -2073,7 +2070,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): from_id: int, current_id: int, limit: int, - ) -> tuple[int, dict[str, Optional[int]]]: + ) -> tuple[int, dict[str, int | None]]: """Get all new events Returns all event ids with from_id < stream_ordering <= current_id. @@ -2094,7 +2091,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): def get_all_new_event_ids_stream_txn( txn: LoggingTransaction, - ) -> tuple[int, dict[str, Optional[int]]]: + ) -> tuple[int, dict[str, int | None]]: sql = ( "SELECT e.stream_ordering, e.event_id, e.received_ts" " FROM events AS e" @@ -2111,7 +2108,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): if len(rows) == limit: upper_bound = rows[-1][0] - event_to_received_ts: dict[str, Optional[int]] = { + event_to_received_ts: dict[str, int | None] = { row[1]: row[2] for row in rows } return upper_bound, event_to_received_ts @@ -2221,10 +2218,10 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): txn: LoggingTransaction, room_id: str, from_token: RoomStreamToken, - to_token: Optional[RoomStreamToken] = None, + to_token: RoomStreamToken | None = None, direction: Direction = Direction.BACKWARDS, limit: int = 0, - event_filter: Optional[Filter] = None, + event_filter: Filter | None = None, ) -> tuple[list[_EventDictReturn], RoomStreamToken, bool]: """Returns list of events before or after a given token. @@ -2395,10 +2392,10 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): *, room_id: str, from_key: RoomStreamToken, - to_key: Optional[RoomStreamToken] = None, + to_key: RoomStreamToken | None = None, direction: Direction = Direction.BACKWARDS, limit: int = 0, - event_filter: Optional[Filter] = None, + event_filter: Filter | None = None, ) -> tuple[list[EventBase], RoomStreamToken, bool]: """ Paginate events by `topological_ordering` (tie-break with `stream_ordering`) in @@ -2525,9 +2522,9 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): async def get_timeline_gaps( self, room_id: str, - from_token: Optional[RoomStreamToken], + from_token: RoomStreamToken | None, to_token: RoomStreamToken, - ) -> Optional[RoomStreamToken]: + ) -> RoomStreamToken | None: """Check if there is a gap, and return a token that marks the position of the gap in the stream. """ diff --git a/synapse/storage/databases/main/task_scheduler.py b/synapse/storage/databases/main/task_scheduler.py index 7410507255..05ebb57cf3 100644 --- a/synapse/storage/databases/main/task_scheduler.py +++ b/synapse/storage/databases/main/task_scheduler.py @@ -19,7 +19,7 @@ # # -from typing import TYPE_CHECKING, Any, Optional, cast +from typing import TYPE_CHECKING, Any, cast from synapse.storage._base import SQLBaseStore, db_to_json from synapse.storage.database import ( @@ -63,11 +63,11 @@ class TaskSchedulerWorkerStore(SQLBaseStore): async def get_scheduled_tasks( self, *, - actions: Optional[list[str]] = None, - resource_id: Optional[str] = None, - statuses: Optional[list[TaskStatus]] = None, - max_timestamp: Optional[int] = None, - limit: Optional[int] = None, + actions: list[str] | None = None, + resource_id: str | None = None, + statuses: list[TaskStatus] | None = None, + max_timestamp: int | None = None, + limit: int | None = None, ) -> list[ScheduledTask]: """Get a list of scheduled tasks from the DB. @@ -152,9 +152,9 @@ class TaskSchedulerWorkerStore(SQLBaseStore): id: str, timestamp: int, *, - status: Optional[TaskStatus] = None, - result: Optional[JsonMapping] = None, - error: Optional[str] = None, + status: TaskStatus | None = None, + result: JsonMapping | None = None, + error: str | None = None, ) -> bool: """Update a scheduled task in the DB with some new value(s). @@ -182,7 +182,7 @@ class TaskSchedulerWorkerStore(SQLBaseStore): ) return nb_rows > 0 - async def get_scheduled_task(self, id: str) -> Optional[ScheduledTask]: + async def get_scheduled_task(self, id: str) -> ScheduledTask | None: """Get a specific `ScheduledTask` from its id. Args: @@ -191,7 +191,7 @@ class TaskSchedulerWorkerStore(SQLBaseStore): Returns: the task if available, `None` otherwise """ row = cast( - Optional[ScheduledTaskRow], + ScheduledTaskRow | None, await self.db_pool.simple_select_one( table="scheduled_tasks", keyvalues={"id": id}, diff --git a/synapse/storage/databases/main/thread_subscriptions.py b/synapse/storage/databases/main/thread_subscriptions.py index 1c02ab1611..e177e67ab1 100644 --- a/synapse/storage/databases/main/thread_subscriptions.py +++ b/synapse/storage/databases/main/thread_subscriptions.py @@ -15,8 +15,6 @@ from typing import ( TYPE_CHECKING, Any, Iterable, - Optional, - Union, cast, ) @@ -162,8 +160,8 @@ class ThreadSubscriptionsWorkerStore(CacheInvalidationWorkerStore): room_id: str, thread_root_event_id: str, *, - automatic_event_orderings: Optional[EventOrderings], - ) -> Optional[Union[int, AutomaticSubscriptionConflicted]]: + automatic_event_orderings: EventOrderings | None, + ) -> int | AutomaticSubscriptionConflicted | None: """Updates a user's subscription settings for a specific thread root. If no change would be made to the subscription, does not produce any database change. @@ -205,7 +203,7 @@ class ThreadSubscriptionsWorkerStore(CacheInvalidationWorkerStore): def _subscribe_user_to_thread_txn( txn: LoggingTransaction, - ) -> Optional[Union[int, AutomaticSubscriptionConflicted]]: + ) -> int | AutomaticSubscriptionConflicted | None: requested_automatic = automatic_event_orderings is not None row = self.db_pool.simple_select_one_txn( @@ -307,7 +305,7 @@ class ThreadSubscriptionsWorkerStore(CacheInvalidationWorkerStore): async def unsubscribe_user_from_thread( self, user_id: str, room_id: str, thread_root_event_id: str - ) -> Optional[int]: + ) -> int | None: """Unsubscribes a user from a thread. If no change would be made to the subscription, does not produce any database change. @@ -323,7 +321,7 @@ class ThreadSubscriptionsWorkerStore(CacheInvalidationWorkerStore): assert self._can_write_to_thread_subscriptions - def _unsubscribe_user_from_thread_txn(txn: LoggingTransaction) -> Optional[int]: + def _unsubscribe_user_from_thread_txn(txn: LoggingTransaction) -> int | None: already_subscribed = self.db_pool.simple_select_one_onecol_txn( txn, table="thread_subscriptions", @@ -420,7 +418,7 @@ class ThreadSubscriptionsWorkerStore(CacheInvalidationWorkerStore): @cached(tree=True) async def get_subscription_for_thread( self, user_id: str, room_id: str, thread_root_event_id: str - ) -> Optional[ThreadSubscription]: + ) -> ThreadSubscription | None: """Get the thread subscription for a specific thread and user. Args: @@ -540,7 +538,7 @@ class ThreadSubscriptionsWorkerStore(CacheInvalidationWorkerStore): async def get_latest_updated_thread_subscriptions_for_user( self, user_id: str, *, from_id: int, to_id: int, limit: int - ) -> list[tuple[int, str, str, bool, Optional[bool]]]: + ) -> list[tuple[int, str, str, bool, bool | None]]: """Get the latest updates to thread subscriptions for a specific user. Args: @@ -558,7 +556,7 @@ class ThreadSubscriptionsWorkerStore(CacheInvalidationWorkerStore): def get_updated_thread_subscriptions_for_user_txn( txn: LoggingTransaction, - ) -> list[tuple[int, str, str, bool, Optional[bool]]]: + ) -> list[tuple[int, str, str, bool, bool | None]]: sql = """ WITH the_updates AS ( SELECT stream_id, room_id, event_id, subscribed, automatic diff --git a/synapse/storage/databases/main/transactions.py b/synapse/storage/databases/main/transactions.py index e0422f7459..70c5b928fd 100644 --- a/synapse/storage/databases/main/transactions.py +++ b/synapse/storage/databases/main/transactions.py @@ -21,7 +21,7 @@ import logging from enum import Enum -from typing import TYPE_CHECKING, Iterable, Mapping, Optional, cast +from typing import TYPE_CHECKING, Iterable, Mapping, cast import attr from canonicaljson import encode_canonical_json @@ -97,7 +97,7 @@ class TransactionWorkerStore(CacheInvalidationWorkerStore): async def get_received_txn_response( self, transaction_id: str, origin: str - ) -> Optional[tuple[int, JsonDict]]: + ) -> tuple[int, JsonDict] | None: """For an incoming transaction from a given origin, check if we have already responded to it. If so, return the response code and response body (as a dict). @@ -120,7 +120,7 @@ class TransactionWorkerStore(CacheInvalidationWorkerStore): def _get_received_txn_response( self, txn: LoggingTransaction, transaction_id: str, origin: str - ) -> Optional[tuple[int, JsonDict]]: + ) -> tuple[int, JsonDict] | None: result = self.db_pool.simple_select_one_txn( txn, table="received_transactions", @@ -169,7 +169,7 @@ class TransactionWorkerStore(CacheInvalidationWorkerStore): async def get_destination_retry_timings( self, destination: str, - ) -> Optional[DestinationRetryTimings]: + ) -> DestinationRetryTimings | None: """Gets the current retry timings (if any) for a given destination. Args: @@ -190,7 +190,7 @@ class TransactionWorkerStore(CacheInvalidationWorkerStore): def _get_destination_retry_timings( self, txn: LoggingTransaction, destination: str - ) -> Optional[DestinationRetryTimings]: + ) -> DestinationRetryTimings | None: result = self.db_pool.simple_select_one_txn( txn, table="destinations", @@ -213,9 +213,9 @@ class TransactionWorkerStore(CacheInvalidationWorkerStore): ) async def get_destination_retry_timings_batch( self, destinations: StrCollection - ) -> Mapping[str, Optional[DestinationRetryTimings]]: + ) -> Mapping[str, DestinationRetryTimings | None]: rows = cast( - list[tuple[str, Optional[int], Optional[int], Optional[int]]], + list[tuple[str, int | None, int | None, int | None]], await self.db_pool.simple_select_many_batch( table="destinations", iterable=destinations, @@ -241,7 +241,7 @@ class TransactionWorkerStore(CacheInvalidationWorkerStore): async def set_destination_retry_timings( self, destination: str, - failure_ts: Optional[int], + failure_ts: int | None, retry_last_ts: int, retry_interval: int, ) -> None: @@ -269,7 +269,7 @@ class TransactionWorkerStore(CacheInvalidationWorkerStore): self, txn: LoggingTransaction, destination: str, - failure_ts: Optional[int], + failure_ts: int | None, retry_last_ts: int, retry_interval: int, ) -> None: @@ -337,7 +337,7 @@ class TransactionWorkerStore(CacheInvalidationWorkerStore): async def get_destination_last_successful_stream_ordering( self, destination: str - ) -> Optional[int]: + ) -> int | None: """ Gets the stream ordering of the PDU most-recently successfully sent to the specified destination, or None if this information has not been @@ -420,7 +420,7 @@ class TransactionWorkerStore(CacheInvalidationWorkerStore): return event_ids async def get_catch_up_outstanding_destinations( - self, after_destination: Optional[str] + self, after_destination: str | None ) -> list[str]: """ Get a list of destinations we should retry transaction sending to. @@ -449,7 +449,7 @@ class TransactionWorkerStore(CacheInvalidationWorkerStore): @staticmethod def _get_catch_up_outstanding_destinations_txn( - txn: LoggingTransaction, now_time_ms: int, after_destination: Optional[str] + txn: LoggingTransaction, now_time_ms: int, after_destination: str | None ) -> list[str]: # We're looking for destinations which satisfy either of the following # conditions: @@ -537,11 +537,11 @@ class TransactionWorkerStore(CacheInvalidationWorkerStore): self, start: int, limit: int, - destination: Optional[str] = None, + destination: str | None = None, order_by: str = DestinationSortOrder.DESTINATION.value, direction: Direction = Direction.FORWARDS, ) -> tuple[ - list[tuple[str, Optional[int], Optional[int], Optional[int], Optional[int]]], + list[tuple[str, int | None, int | None, int | None, int | None]], int, ]: """Function to retrieve a paginated list of destinations. @@ -567,9 +567,7 @@ class TransactionWorkerStore(CacheInvalidationWorkerStore): def get_destinations_paginate_txn( txn: LoggingTransaction, ) -> tuple[ - list[ - tuple[str, Optional[int], Optional[int], Optional[int], Optional[int]] - ], + list[tuple[str, int | None, int | None, int | None, int | None]], int, ]: order_by_column = DestinationSortOrder(order_by).value @@ -599,11 +597,7 @@ class TransactionWorkerStore(CacheInvalidationWorkerStore): """ txn.execute(sql, args + [limit, start]) destinations = cast( - list[ - tuple[ - str, Optional[int], Optional[int], Optional[int], Optional[int] - ] - ], + list[tuple[str, int | None, int | None, int | None, int | None]], txn.fetchall(), ) return destinations, count diff --git a/synapse/storage/databases/main/ui_auth.py b/synapse/storage/databases/main/ui_auth.py index 69a4431f29..e523f0238a 100644 --- a/synapse/storage/databases/main/ui_auth.py +++ b/synapse/storage/databases/main/ui_auth.py @@ -18,7 +18,7 @@ # [This file includes modifications made by New Vector Limited] # # -from typing import Any, Optional, Union, cast +from typing import Any, cast import attr @@ -142,7 +142,7 @@ class UIAuthWorkerStore(SQLBaseStore): self, session_id: str, stage_type: str, - result: Union[str, bool, JsonDict], + result: str | bool | JsonDict, ) -> None: """ Mark a session stage as completed. @@ -170,7 +170,7 @@ class UIAuthWorkerStore(SQLBaseStore): async def get_completed_ui_auth_stages( self, session_id: str - ) -> dict[str, Union[str, bool, JsonDict]]: + ) -> dict[str, str | bool | JsonDict]: """ Retrieve the completed stages of a UI authentication session. @@ -262,7 +262,7 @@ class UIAuthWorkerStore(SQLBaseStore): ) async def get_ui_auth_session_data( - self, session_id: str, key: str, default: Optional[Any] = None + self, session_id: str, key: str, default: Any | None = None ) -> Any: """ Retrieve data stored with set_session_data diff --git a/synapse/storage/databases/main/user_directory.py b/synapse/storage/databases/main/user_directory.py index 7a57beee71..6c5abc71ae 100644 --- a/synapse/storage/databases/main/user_directory.py +++ b/synapse/storage/databases/main/user_directory.py @@ -26,7 +26,6 @@ from typing import ( TYPE_CHECKING, Collection, Iterable, - Optional, Sequence, TypedDict, cast, @@ -72,8 +71,8 @@ class _UserDirProfile: user_id: str # If the display name or avatar URL are unexpected types, replace with None - display_name: Optional[str] = attr.ib(default=None, converter=non_null_str_or_none) - avatar_url: Optional[str] = attr.ib(default=None, converter=non_null_str_or_none) + display_name: str | None = attr.ib(default=None, converter=non_null_str_or_none) + avatar_url: str | None = attr.ib(default=None, converter=non_null_str_or_none) class UserDirectoryBackgroundUpdateStore(StateDeltasStore): @@ -206,7 +205,7 @@ class UserDirectoryBackgroundUpdateStore(StateDeltasStore): def _get_next_batch( txn: LoggingTransaction, - ) -> Optional[Sequence[tuple[str, int]]]: + ) -> Sequence[tuple[str, int]] | None: # Only fetch 250 rooms, so we don't fetch too many at once, even # if those 250 rooms have less than batch_size state events. sql = """ @@ -352,7 +351,7 @@ class UserDirectoryBackgroundUpdateStore(StateDeltasStore): def _populate_user_directory_process_users_txn( txn: LoggingTransaction, - ) -> Optional[int]: + ) -> int | None: # Note: we use an ORDER BY in the SELECT to force usage of an # index. Otherwise, postgres does a sequential scan that is # surprisingly slow (I think due to the fact it will read/skip @@ -397,7 +396,7 @@ class UserDirectoryBackgroundUpdateStore(StateDeltasStore): # Next fetch their profiles. Note that not all users have profiles. profile_rows = cast( - list[tuple[str, Optional[str], Optional[str]]], + list[tuple[str, str | None, str | None]], self.db_pool.simple_select_many_txn( txn, table="profiles", @@ -492,7 +491,7 @@ class UserDirectoryBackgroundUpdateStore(StateDeltasStore): ] rows = cast( - list[tuple[str, Optional[str]]], + list[tuple[str, str | None]], self.db_pool.simple_select_many_txn( txn, table="users", @@ -646,7 +645,7 @@ class UserDirectoryBackgroundUpdateStore(StateDeltasStore): ) async def update_profile_in_user_dir( - self, user_id: str, display_name: Optional[str], avatar_url: Optional[str] + self, user_id: str, display_name: str | None, avatar_url: str | None ) -> None: """ Update or add a user's profile in the user directory. @@ -812,7 +811,7 @@ class UserDirectoryBackgroundUpdateStore(StateDeltasStore): async def _get_user_in_directory( self, user_id: str - ) -> Optional[tuple[Optional[str], Optional[str]]]: + ) -> tuple[str | None, str | None] | None: """ Fetch the user information in the user directory. @@ -821,7 +820,7 @@ class UserDirectoryBackgroundUpdateStore(StateDeltasStore): avatar URL (both of which may be None). """ return cast( - Optional[tuple[Optional[str], Optional[str]]], + tuple[str | None, str | None] | None, await self.db_pool.simple_select_one( table="user_directory", keyvalues={"user_id": user_id}, @@ -831,7 +830,7 @@ class UserDirectoryBackgroundUpdateStore(StateDeltasStore): ), ) - async def update_user_directory_stream_pos(self, stream_id: Optional[int]) -> None: + async def update_user_directory_stream_pos(self, stream_id: int | None) -> None: await self.db_pool.simple_update_one( table="user_directory_stream_pos", keyvalues={}, @@ -971,7 +970,7 @@ class UserDirectoryStore(UserDirectoryBackgroundUpdateStore): users.update(rows) return list(users) - async def get_user_directory_stream_pos(self) -> Optional[int]: + async def get_user_directory_stream_pos(self) -> int | None: """ Get the stream ID of the user directory stream. @@ -1144,7 +1143,7 @@ class UserDirectoryStore(UserDirectoryBackgroundUpdateStore): raise Exception("Unrecognized database engine") results = cast( - list[tuple[str, Optional[str], Optional[str]]], + list[tuple[str, str | None, str | None]], await self.db_pool.execute("search_user_dir", sql, *args), ) diff --git a/synapse/storage/databases/state/bg_updates.py b/synapse/storage/databases/state/bg_updates.py index a0d8667b07..8c505041f0 100644 --- a/synapse/storage/databases/state/bg_updates.py +++ b/synapse/storage/databases/state/bg_updates.py @@ -23,8 +23,6 @@ import logging from typing import ( TYPE_CHECKING, Mapping, - Optional, - Union, ) from synapse.logging.opentracing import tag_args, trace @@ -82,7 +80,7 @@ class StateGroupBackgroundUpdateStore(SQLBaseStore): else: # We don't use WITH RECURSIVE on sqlite3 as there are distributions # that ship with an sqlite3 version that doesn't support it (e.g. wheezy) - next_group: Optional[int] = state_group + next_group: int | None = state_group count = 0 while next_group: @@ -104,7 +102,7 @@ class StateGroupBackgroundUpdateStore(SQLBaseStore): self, txn: LoggingTransaction, groups: list[int], - state_filter: Optional[StateFilter] = None, + state_filter: StateFilter | None = None, ) -> Mapping[int, StateMap[str]]: """ Given a number of state groups, fetch the latest state for each group. @@ -144,7 +142,7 @@ class StateGroupBackgroundUpdateStore(SQLBaseStore): %s """ - overall_select_query_args: list[Union[int, str]] = [] + overall_select_query_args: list[int | str] = [] # This is an optimization to create a select clause per-condition. This # makes the query planner a lot smarter on what rows should pull out in the @@ -153,7 +151,7 @@ class StateGroupBackgroundUpdateStore(SQLBaseStore): use_condition_optimization = ( not state_filter.include_others and not state_filter.is_full() ) - state_filter_condition_combos: list[tuple[str, Optional[str]]] = [] + state_filter_condition_combos: list[tuple[str, str | None]] = [] # We don't need to caclculate this list if we're not using the condition # optimization if use_condition_optimization: @@ -213,7 +211,7 @@ class StateGroupBackgroundUpdateStore(SQLBaseStore): """ for group in groups: - args: list[Union[int, str]] = [group] + args: list[int | str] = [group] args.extend(overall_select_query_args) txn.execute(sql % (overall_select_clause,), args) @@ -235,7 +233,7 @@ class StateGroupBackgroundUpdateStore(SQLBaseStore): # # We just haven't put in the time to refactor this. for group in groups: - next_group: Optional[int] = group + next_group: int | None = group while next_group: # We did this before by getting the list of group ids, and diff --git a/synapse/storage/databases/state/deletion.py b/synapse/storage/databases/state/deletion.py index 6975690c51..23150e8626 100644 --- a/synapse/storage/databases/state/deletion.py +++ b/synapse/storage/databases/state/deletion.py @@ -20,7 +20,6 @@ from typing import ( AsyncIterator, Collection, Mapping, - Optional, ) from synapse.events.snapshot import EventPersistencePair @@ -506,7 +505,7 @@ class StateDeletionDataStore: async def get_next_state_group_collection_to_delete( self, - ) -> Optional[tuple[str, Mapping[int, int]]]: + ) -> tuple[str, Mapping[int, int]] | None: """Get the next set of state groups to try and delete Returns: @@ -520,7 +519,7 @@ class StateDeletionDataStore: def _get_next_state_group_collection_to_delete_txn( self, txn: LoggingTransaction, - ) -> Optional[tuple[str, Mapping[int, int]]]: + ) -> tuple[str, Mapping[int, int]] | None: """Implementation of `get_next_state_group_collection_to_delete`""" # We want to return chunks of state groups that were marked for deletion diff --git a/synapse/storage/databases/state/store.py b/synapse/storage/databases/state/store.py index 6f25e7f0bc..d3ce7a8b55 100644 --- a/synapse/storage/databases/state/store.py +++ b/synapse/storage/databases/state/store.py @@ -24,7 +24,6 @@ from typing import ( TYPE_CHECKING, Iterable, Mapping, - Optional, cast, ) @@ -69,8 +68,8 @@ class _GetStateGroupDelta: us use the iterable flag when caching """ - prev_group: Optional[int] - delta_ids: Optional[StateMap[str]] + prev_group: int | None + delta_ids: StateMap[str] | None def __len__(self) -> int: return len(self.delta_ids) if self.delta_ids else 0 @@ -279,7 +278,7 @@ class StateGroupDataStore(StateBackgroundUpdateStore, SQLBaseStore): @tag_args @cancellable async def _get_state_for_groups( - self, groups: Iterable[int], state_filter: Optional[StateFilter] = None + self, groups: Iterable[int], state_filter: StateFilter | None = None ) -> dict[int, MutableStateMap[str]]: """Gets the state at each of a list of state groups, optionally filtering by type/state_key @@ -571,9 +570,9 @@ class StateGroupDataStore(StateBackgroundUpdateStore, SQLBaseStore): self, event_id: str, room_id: str, - prev_group: Optional[int], - delta_ids: Optional[StateMap[str]], - current_state_ids: Optional[StateMap[str]], + prev_group: int | None, + delta_ids: StateMap[str] | None, + current_state_ids: StateMap[str] | None, ) -> int: """Store a new set of state, returning a newly assigned state group. @@ -602,7 +601,7 @@ class StateGroupDataStore(StateBackgroundUpdateStore, SQLBaseStore): def insert_delta_group_txn( txn: LoggingTransaction, prev_group: int, delta_ids: StateMap[str] - ) -> Optional[int]: + ) -> int | None: """Try and persist the new group as a delta. Requires that we have the state as a delta from a previous state group. diff --git a/synapse/storage/engines/_base.py b/synapse/storage/engines/_base.py index be6981f77c..026b742aad 100644 --- a/synapse/storage/engines/_base.py +++ b/synapse/storage/engines/_base.py @@ -20,7 +20,7 @@ # import abc from enum import IntEnum -from typing import TYPE_CHECKING, Any, Generic, Mapping, Optional, TypeVar +from typing import TYPE_CHECKING, Any, Generic, Mapping, TypeVar from synapse.storage.types import Connection, Cursor, DBAPI2Module @@ -123,7 +123,7 @@ class BaseDatabaseEngine(Generic[ConnectionType, CursorType], metaclass=abc.ABCM @abc.abstractmethod def attempt_to_set_isolation_level( - self, conn: ConnectionType, isolation_level: Optional[int] + self, conn: ConnectionType, isolation_level: int | None ) -> None: """Attempt to set the connections isolation level. diff --git a/synapse/storage/engines/postgres.py b/synapse/storage/engines/postgres.py index b059b924c2..cc7e5508fd 100644 --- a/synapse/storage/engines/postgres.py +++ b/synapse/storage/engines/postgres.py @@ -20,7 +20,7 @@ # import logging -from typing import TYPE_CHECKING, Any, Mapping, NoReturn, Optional, cast +from typing import TYPE_CHECKING, Any, Mapping, NoReturn, cast import psycopg2.extensions @@ -60,10 +60,10 @@ class PostgresEngine( # some degenerate query plan has been created and the client has probably # timed out/walked off anyway. # This is in milliseconds. - self.statement_timeout: Optional[int] = database_config.get( + self.statement_timeout: int | None = database_config.get( "statement_timeout", 60 * 60 * 1000 ) - self._version: Optional[int] = None # unknown as yet + self._version: int | None = None # unknown as yet self.isolation_level_map: Mapping[int, int] = { IsolationLevel.READ_COMMITTED: psycopg2.extensions.ISOLATION_LEVEL_READ_COMMITTED, @@ -234,7 +234,7 @@ class PostgresEngine( return conn.set_session(autocommit=autocommit) def attempt_to_set_isolation_level( - self, conn: psycopg2.extensions.connection, isolation_level: Optional[int] + self, conn: psycopg2.extensions.connection, isolation_level: int | None ) -> None: if isolation_level is None: isolation_level = self.default_isolation_level diff --git a/synapse/storage/engines/sqlite.py b/synapse/storage/engines/sqlite.py index b49d230eed..3b1b19c00e 100644 --- a/synapse/storage/engines/sqlite.py +++ b/synapse/storage/engines/sqlite.py @@ -22,7 +22,7 @@ import platform import sqlite3 import struct import threading -from typing import TYPE_CHECKING, Any, Mapping, Optional +from typing import TYPE_CHECKING, Any, Mapping from synapse.storage.engines import BaseDatabaseEngine from synapse.storage.engines._base import AUTO_INCREMENT_PRIMARY_KEYPLACEHOLDER @@ -45,7 +45,7 @@ class Sqlite3Engine(BaseDatabaseEngine[sqlite3.Connection, sqlite3.Cursor]): # A connection to a database that has already been prepared, to use as a # base for an in-memory connection. This is used during unit tests to # speed up setting up the DB. - self._prepped_conn: Optional[sqlite3.Connection] = database_config.get( + self._prepped_conn: sqlite3.Connection | None = database_config.get( "_TEST_PREPPED_CONN" ) @@ -141,7 +141,7 @@ class Sqlite3Engine(BaseDatabaseEngine[sqlite3.Connection, sqlite3.Cursor]): pass def attempt_to_set_isolation_level( - self, conn: sqlite3.Connection, isolation_level: Optional[int] + self, conn: sqlite3.Connection, isolation_level: int | None ) -> None: # All transactions are SERIALIZABLE by default in sqlite pass diff --git a/synapse/storage/invite_rule.py b/synapse/storage/invite_rule.py index f63390871e..3de77e8c21 100644 --- a/synapse/storage/invite_rule.py +++ b/synapse/storage/invite_rule.py @@ -1,6 +1,6 @@ import logging from enum import Enum -from typing import Optional, Pattern +from typing import Pattern from matrix_common.regex import glob_to_regex @@ -20,7 +20,7 @@ class InviteRule(Enum): class InviteRulesConfig: """Class to determine if a given user permits an invite from another user, and the action to take.""" - def __init__(self, account_data: Optional[JsonMapping]): + def __init__(self, account_data: JsonMapping | None): self.allowed_users: list[Pattern[str]] = [] self.ignored_users: list[Pattern[str]] = [] self.blocked_users: list[Pattern[str]] = [] @@ -30,7 +30,7 @@ class InviteRulesConfig: self.blocked_servers: list[Pattern[str]] = [] def process_field( - values: Optional[list[str]], + values: list[str] | None, ruleset: list[Pattern[str]], rule: InviteRule, ) -> None: diff --git a/synapse/storage/prepare_database.py b/synapse/storage/prepare_database.py index d4bd8020e1..2def1e130c 100644 --- a/synapse/storage/prepare_database.py +++ b/synapse/storage/prepare_database.py @@ -28,7 +28,6 @@ from typing import ( Counter as CounterType, Generator, Iterable, - Optional, TextIO, ) @@ -75,7 +74,7 @@ class _SchemaState: current_version: int = attr.ib() """The current schema version of the database""" - compat_version: Optional[int] = attr.ib() + compat_version: int | None = attr.ib() """The SCHEMA_VERSION of the oldest version of Synapse for this database If this is None, we have an old version of the database without the necessary @@ -95,7 +94,7 @@ class _SchemaState: def prepare_database( db_conn: LoggingDatabaseConnection, database_engine: BaseDatabaseEngine, - config: Optional[HomeServerConfig], + config: HomeServerConfig | None, databases: Collection[str] = ("main", "state"), ) -> None: """Prepares a physical database for usage. Will either create all necessary tables @@ -307,7 +306,7 @@ def _upgrade_existing_database( cur: LoggingTransaction, current_schema_state: _SchemaState, database_engine: BaseDatabaseEngine, - config: Optional[HomeServerConfig], + config: HomeServerConfig | None, databases: Collection[str], is_empty: bool = False, ) -> None: @@ -683,7 +682,7 @@ def execute_statements_from_stream(cur: Cursor, f: TextIO) -> None: def _get_or_create_schema_state( txn: Cursor, database_engine: BaseDatabaseEngine -) -> Optional[_SchemaState]: +) -> _SchemaState | None: # Bluntly try creating the schema_version tables. sql_path = os.path.join(schema_path, "common", "schema_version.sql") database_engine.execute_script_file(txn, sql_path) @@ -698,7 +697,7 @@ def _get_or_create_schema_state( current_version = int(row[0]) upgraded = bool(row[1]) - compat_version: Optional[int] = None + compat_version: int | None = None txn.execute("SELECT compat_version FROM schema_compat_version") row = txn.fetchone() if row is not None: diff --git a/synapse/storage/roommember.py b/synapse/storage/roommember.py index 35da5351f8..4c1ace28e7 100644 --- a/synapse/storage/roommember.py +++ b/synapse/storage/roommember.py @@ -20,7 +20,6 @@ # import logging -from typing import Optional import attr @@ -42,14 +41,14 @@ class RoomsForUser: @attr.s(slots=True, frozen=True, weakref_slot=False, auto_attribs=True) class RoomsForUserSlidingSync: room_id: str - sender: Optional[str] + sender: str | None membership: str - event_id: Optional[str] + event_id: str | None event_pos: PersistedEventPosition room_version_id: str has_known_state: bool - room_type: Optional[str] + room_type: str | None is_encrypted: bool @@ -60,9 +59,9 @@ class RoomsForUserStateReset: without a corresponding event so that information isn't always available.""" room_id: str - sender: Optional[str] + sender: str | None membership: str - event_id: Optional[str] + event_id: str | None event_pos: PersistedEventPosition room_version_id: str @@ -75,8 +74,8 @@ class GetRoomsForUserWithStreamOrdering: @attr.s(slots=True, frozen=True, weakref_slot=False, auto_attribs=True) class ProfileInfo: - avatar_url: Optional[str] - display_name: Optional[str] + avatar_url: str | None + display_name: str | None # TODO This is used as a cached value and is mutable. diff --git a/synapse/storage/types.py b/synapse/storage/types.py index fedf10dfc0..ad9e5391e3 100644 --- a/synapse/storage/types.py +++ b/synapse/storage/types.py @@ -24,17 +24,15 @@ from typing import ( Callable, Iterator, Mapping, - Optional, Protocol, Sequence, - Union, ) """ Some very basic protocol definitions for the DB-API2 classes specified in PEP-249 """ -SQLQueryParameters = Union[Sequence[Any], Mapping[str, Any]] +SQLQueryParameters = Sequence[Any] | Mapping[str, Any] class Cursor(Protocol): @@ -44,16 +42,16 @@ class Cursor(Protocol): self, sql: str, parameters: Sequence[SQLQueryParameters] ) -> Any: ... - def fetchone(self) -> Optional[tuple]: ... + def fetchone(self) -> tuple | None: ... - def fetchmany(self, size: Optional[int] = ...) -> list[tuple]: ... + def fetchmany(self, size: int | None = ...) -> list[tuple]: ... def fetchall(self) -> list[tuple]: ... @property def description( self, - ) -> Optional[Sequence[Any]]: + ) -> Sequence[Any] | None: # At the time of writing, Synapse only assumes that `column[0]: str` for each # `column in description`. Since this is hard to express in the type system, and # as this is rarely used in Synapse, we deem `column: Any` good enough. @@ -81,10 +79,10 @@ class Connection(Protocol): def __exit__( self, - exc_type: Optional[type[BaseException]], - exc_value: Optional[BaseException], - traceback: Optional[TracebackType], - ) -> Optional[bool]: ... + exc_type: type[BaseException] | None, + exc_value: BaseException | None, + traceback: TracebackType | None, + ) -> bool | None: ... class DBAPI2Module(Protocol): diff --git a/synapse/storage/util/id_generators.py b/synapse/storage/util/id_generators.py index 5bf5c2b4bf..66c993cbd9 100644 --- a/synapse/storage/util/id_generators.py +++ b/synapse/storage/util/id_generators.py @@ -30,10 +30,8 @@ from typing import ( ContextManager, Generic, Iterable, - Optional, Sequence, TypeVar, - Union, cast, ) @@ -619,7 +617,7 @@ class MultiWriterIdGenerator(AbstractStreamIdGenerator): self._unfinished_ids.difference_update(next_ids) self._finished_ids.update(next_ids) - new_cur: Optional[int] = None + new_cur: int | None = None if self._unfinished_ids or self._in_flight_fetches: # If there are unfinished IDs then the new position will be the @@ -844,10 +842,10 @@ class _AsyncCtxManagerWrapper(Generic[T]): async def __aexit__( self, - exc_type: Optional[type[BaseException]], - exc: Optional[BaseException], - tb: Optional[TracebackType], - ) -> Optional[bool]: + exc_type: type[BaseException] | None, + exc: BaseException | None, + tb: TracebackType | None, + ) -> bool | None: return self.inner.__exit__(exc_type, exc, tb) @@ -857,10 +855,10 @@ class _MultiWriterCtxManager: id_gen: MultiWriterIdGenerator notifier: "ReplicationNotifier" - multiple_ids: Optional[int] = None + multiple_ids: int | None = None stream_ids: list[int] = attr.Factory(list) - async def __aenter__(self) -> Union[int, list[int]]: + async def __aenter__(self) -> int | list[int]: # It's safe to run this in autocommit mode as fetching values from a # sequence ignores transaction semantics anyway. self.stream_ids = await self.id_gen._db.runInteraction( @@ -877,9 +875,9 @@ class _MultiWriterCtxManager: async def __aexit__( self, - exc_type: Optional[type[BaseException]], - exc: Optional[BaseException], - tb: Optional[TracebackType], + exc_type: type[BaseException] | None, + exc: BaseException | None, + tb: TracebackType | None, ) -> bool: self.id_gen._mark_ids_as_finished(self.stream_ids) diff --git a/synapse/storage/util/sequence.py b/synapse/storage/util/sequence.py index e2256aa109..5bee3cf34f 100644 --- a/synapse/storage/util/sequence.py +++ b/synapse/storage/util/sequence.py @@ -21,7 +21,7 @@ import abc import logging import threading -from typing import TYPE_CHECKING, Callable, Optional +from typing import TYPE_CHECKING, Callable from synapse.storage.engines import ( BaseDatabaseEngine, @@ -71,7 +71,7 @@ class SequenceGenerator(metaclass=abc.ABCMeta): db_conn: "LoggingDatabaseConnection", table: str, id_column: str, - stream_name: Optional[str] = None, + stream_name: str | None = None, positive: bool = True, ) -> None: """Should be called during start up to test that the current value of @@ -116,7 +116,7 @@ class PostgresSequenceGenerator(SequenceGenerator): db_conn: "LoggingDatabaseConnection", table: str, id_column: str, - stream_name: Optional[str] = None, + stream_name: str | None = None, positive: bool = True, ) -> None: """See SequenceGenerator.check_consistency for docstring.""" @@ -223,10 +223,10 @@ class LocalSequenceGenerator(SequenceGenerator): get_next_id_txn; should return the current maximum id """ # the callback. this is cleared after it is called, so that it can be GCed. - self._callback: Optional[GetFirstCallbackType] = get_first_callback + self._callback: GetFirstCallbackType | None = get_first_callback # The current max value, or None if we haven't looked in the DB yet. - self._current_max_id: Optional[int] = None + self._current_max_id: int | None = None self._lock = threading.Lock() def get_next_id_txn(self, txn: Cursor) -> int: @@ -257,7 +257,7 @@ class LocalSequenceGenerator(SequenceGenerator): db_conn: Connection, table: str, id_column: str, - stream_name: Optional[str] = None, + stream_name: str | None = None, positive: bool = True, ) -> None: # There is nothing to do for in memory sequences @@ -278,9 +278,9 @@ def build_sequence_generator( database_engine: BaseDatabaseEngine, get_first_callback: GetFirstCallbackType, sequence_name: str, - table: Optional[str], - id_column: Optional[str], - stream_name: Optional[str] = None, + table: str | None, + id_column: str | None, + stream_name: str | None = None, positive: bool = True, ) -> SequenceGenerator: """Get the best impl of SequenceGenerator available diff --git a/synapse/streams/__init__.py b/synapse/streams/__init__.py index faf453b8a1..0d386e538e 100644 --- a/synapse/streams/__init__.py +++ b/synapse/streams/__init__.py @@ -19,7 +19,7 @@ # # from abc import ABC, abstractmethod -from typing import Generic, Optional, TypeVar +from typing import Generic, TypeVar from synapse.types import StrCollection, UserID @@ -38,6 +38,6 @@ class EventSource(ABC, Generic[K, R]): limit: int, room_ids: StrCollection, is_guest: bool, - explicit_room_id: Optional[str] = None, + explicit_room_id: str | None = None, ) -> tuple[list[R], K]: raise NotImplementedError() diff --git a/synapse/streams/config.py b/synapse/streams/config.py index 9fee5bfb92..52688a8b6b 100644 --- a/synapse/streams/config.py +++ b/synapse/streams/config.py @@ -19,7 +19,6 @@ # # import logging -from typing import Optional import attr @@ -40,8 +39,8 @@ MAX_LIMIT = 1000 class PaginationConfig: """A configuration object which stores pagination parameters.""" - from_token: Optional[StreamToken] - to_token: Optional[StreamToken] + from_token: StreamToken | None + to_token: StreamToken | None direction: Direction limit: int diff --git a/synapse/synapse_rust/events.pyi b/synapse/synapse_rust/events.pyi index 08c976121a..0add391c65 100644 --- a/synapse/synapse_rust/events.pyi +++ b/synapse/synapse_rust/events.pyi @@ -10,16 +10,16 @@ # See the GNU Affero General Public License for more details: # . -from typing import Mapping, Optional +from typing import Mapping from synapse.types import JsonDict class EventInternalMetadata: def __init__(self, internal_metadata_dict: JsonDict): ... - stream_ordering: Optional[int] + stream_ordering: int | None """the stream ordering of this event. None, until it has been persisted.""" - instance_name: Optional[str] + instance_name: str | None """the instance name of the server that persisted this event. None, until it has been persisted.""" outlier: bool @@ -62,7 +62,7 @@ class EventInternalMetadata: (Added in synapse 0.99.0, so may be unreliable for events received before that) """ - def get_send_on_behalf_of(self) -> Optional[str]: + def get_send_on_behalf_of(self) -> str | None: """Whether this server should send the event on behalf of another server. This is used by the federation "send_join" API to forward the initial join event for a server in the room. diff --git a/synapse/synapse_rust/push.pyi b/synapse/synapse_rust/push.pyi index 1e135b8c69..9d8f0389e8 100644 --- a/synapse/synapse_rust/push.pyi +++ b/synapse/synapse_rust/push.pyi @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import Any, Collection, Mapping, Optional, Sequence, Union +from typing import Any, Collection, Mapping, Sequence from synapse.types import JsonDict, JsonValue @@ -25,7 +25,7 @@ class PushRule: @property def conditions(self) -> Sequence[Mapping[str, str]]: ... @property - def actions(self) -> Sequence[Union[Mapping[str, Any], str]]: ... + def actions(self) -> Sequence[Mapping[str, Any] | str]: ... @property def default(self) -> bool: ... @property @@ -61,7 +61,7 @@ class PushRuleEvaluator: flattened_keys: Mapping[str, JsonValue], has_mentions: bool, room_member_count: int, - sender_power_level: Optional[int], + sender_power_level: int | None, notification_power_levels: Mapping[str, int], related_events_flattened: Mapping[str, Mapping[str, JsonValue]], related_event_match_enabled: bool, @@ -73,14 +73,14 @@ class PushRuleEvaluator: def run( self, push_rules: FilteredPushRules, - user_id: Optional[str], - display_name: Optional[str], - msc4306_thread_subscription_state: Optional[bool], - ) -> Collection[Union[Mapping, str]]: ... + user_id: str | None, + display_name: str | None, + msc4306_thread_subscription_state: bool | None, + ) -> Collection[Mapping | str]: ... def matches( self, condition: JsonDict, - user_id: Optional[str], - display_name: Optional[str], - msc4306_thread_subscription_state: Optional[bool] = None, + user_id: str | None, + display_name: str | None, + msc4306_thread_subscription_state: bool | None = None, ) -> bool: ... diff --git a/synapse/types/__init__.py b/synapse/types/__init__.py index 87436459ac..16892b37c0 100644 --- a/synapse/types/__init__.py +++ b/synapse/types/__init__.py @@ -85,8 +85,8 @@ MutableStateMap = MutableMapping[StateKey, T] # JSON types. These could be made stronger, but will do for now. # A "simple" (canonical) JSON value. -SimpleJsonValue = Optional[Union[str, int, bool]] -JsonValue = Union[list[SimpleJsonValue], tuple[SimpleJsonValue, ...], SimpleJsonValue] +SimpleJsonValue = str | int | bool | None +JsonValue = list[SimpleJsonValue] | tuple[SimpleJsonValue, ...] | SimpleJsonValue # A JSON-serialisable dict. JsonDict = dict[str, Any] # A JSON-serialisable mapping; roughly speaking an immutable JSONDict. @@ -101,12 +101,12 @@ JsonSerializable = object # # StrCollection is an unordered collection of strings. If ordering is important, # StrSequence can be used instead. -StrCollection = Union[tuple[str, ...], list[str], AbstractSet[str]] +StrCollection = tuple[str, ...] | list[str] | AbstractSet[str] # Sequence[str] that does not include str itself; str being a Sequence[str] # is very misleading and results in bugs. # # Unlike StrCollection, StrSequence is an ordered collection of strings. -StrSequence = Union[tuple[str, ...], list[str]] +StrSequence = tuple[str, ...] | list[str] # Note that this seems to require inheriting *directly* from Interface in order @@ -158,11 +158,11 @@ class Requester: """ user: "UserID" - access_token_id: Optional[int] + access_token_id: int | None is_guest: bool scope: set[str] shadow_banned: bool - device_id: Optional[str] + device_id: str | None app_service: Optional["ApplicationService"] authenticated_entity: str @@ -216,13 +216,13 @@ class Requester: def create_requester( user_id: Union[str, "UserID"], - access_token_id: Optional[int] = None, + access_token_id: int | None = None, is_guest: bool = False, scope: StrCollection = (), shadow_banned: bool = False, - device_id: Optional[str] = None, + device_id: str | None = None, app_service: Optional["ApplicationService"] = None, - authenticated_entity: Optional[str] = None, + authenticated_entity: str | None = None, ) -> Requester: """ Create a new ``Requester`` object @@ -385,7 +385,7 @@ class RoomID: SIGIL = "!" id: str - room_id_with_domain: Optional[RoomIdWithDomain] + room_id_with_domain: RoomIdWithDomain | None @classmethod def is_valid(cls: type["RoomID"], s: str) -> bool: @@ -397,7 +397,7 @@ class RoomID: except Exception: return False - def get_domain(self) -> Optional[str]: + def get_domain(self) -> str | None: if not self.room_id_with_domain: return None return self.room_id_with_domain.domain @@ -419,7 +419,7 @@ class RoomID: Codes.INVALID_PARAM, ) - room_id_with_domain: Optional[RoomIdWithDomain] = None + room_id_with_domain: RoomIdWithDomain | None = None if ":" in s: room_id_with_domain = RoomIdWithDomain.from_string(s) else: @@ -487,7 +487,7 @@ NON_MXID_CHARACTER_PATTERN = re.compile( def map_username_to_mxid_localpart( - username: Union[str, bytes], case_sensitive: bool = False + username: str | bytes, case_sensitive: bool = False ) -> str: """Map a username onto a string suitable for a MXID @@ -744,7 +744,7 @@ class RoomStreamToken(AbstractMultiWriterStreamToken): attributes, must be hashable. """ - topological: Optional[int] = attr.ib( + topological: int | None = attr.ib( validator=attr.validators.optional(attr.validators.instance_of(int)), kw_only=True, default=None, @@ -954,7 +954,7 @@ class MultiWriterStreamToken(AbstractMultiWriterStreamToken): def is_stream_position_in_range( low: Optional["AbstractMultiWriterStreamToken"], high: Optional["AbstractMultiWriterStreamToken"], - instance_name: Optional[str], + instance_name: str | None, pos: int, ) -> bool: """Checks if a given persisted position is between the two given tokens. @@ -1224,11 +1224,11 @@ class StreamToken: @overload def get_field( self, key: StreamKeyType - ) -> Union[int, RoomStreamToken, MultiWriterStreamToken]: ... + ) -> int | RoomStreamToken | MultiWriterStreamToken: ... def get_field( self, key: StreamKeyType - ) -> Union[int, RoomStreamToken, MultiWriterStreamToken]: + ) -> int | RoomStreamToken | MultiWriterStreamToken: """Returns the stream ID for the given key.""" return getattr(self, key.value) @@ -1394,8 +1394,8 @@ class PersistedEventPosition(PersistedPosition): @attr.s(slots=True, frozen=True, auto_attribs=True) class ThirdPartyInstanceID: - appservice_id: Optional[str] - network_id: Optional[str] + appservice_id: str | None + network_id: str | None # Deny iteration because it will bite you if you try to create a singleton # set by: @@ -1432,7 +1432,7 @@ class ReadReceipt: receipt_type: str user_id: str event_ids: list[str] - thread_id: Optional[str] + thread_id: str | None data: JsonDict @@ -1507,11 +1507,11 @@ class UserInfo: """ user_id: UserID - appservice_id: Optional[int] - consent_server_notice_sent: Optional[str] - consent_version: Optional[str] - consent_ts: Optional[int] - user_type: Optional[str] + appservice_id: int | None + consent_server_notice_sent: str | None + consent_version: str | None + consent_ts: int | None + user_type: str | None creation_ts: int is_admin: bool is_deactivated: bool @@ -1524,14 +1524,14 @@ class UserInfo: class UserProfile(TypedDict): user_id: str - display_name: Optional[str] - avatar_url: Optional[str] + display_name: str | None + avatar_url: str | None @attr.s(auto_attribs=True, frozen=True, slots=True) class RetentionPolicy: - min_lifetime: Optional[int] = None - max_lifetime: Optional[int] = None + min_lifetime: int | None = None + max_lifetime: int | None = None class TaskStatus(str, Enum): @@ -1563,13 +1563,13 @@ class ScheduledTask: # In milliseconds since epoch in system time timezone, usually UTC. timestamp: int # Optionally bind a task to some resource id for easy retrieval - resource_id: Optional[str] + resource_id: str | None # Optional parameters that will be passed to the function ran by the task - params: Optional[JsonMapping] + params: JsonMapping | None # Optional result that can be updated by the running task - result: Optional[JsonMapping] + result: JsonMapping | None # Optional error that should be assigned a value when the status is FAILED - error: Optional[str] + error: str | None @attr.s(auto_attribs=True, frozen=True, slots=True) diff --git a/synapse/types/handlers/__init__.py b/synapse/types/handlers/__init__.py index 80651bb685..b9d1b41a75 100644 --- a/synapse/types/handlers/__init__.py +++ b/synapse/types/handlers/__init__.py @@ -19,7 +19,7 @@ # -from typing import Optional, TypedDict +from typing import TypedDict from synapse.api.constants import EventTypes @@ -66,10 +66,10 @@ class ShutdownRoomParams(TypedDict): even if there are still users joined to the room. """ - requester_user_id: Optional[str] - new_room_user_id: Optional[str] - new_room_name: Optional[str] - message: Optional[str] + requester_user_id: str | None + new_room_user_id: str | None + new_room_name: str | None + message: str | None block: bool purge: bool force_purge: bool @@ -90,4 +90,4 @@ class ShutdownRoomResponse(TypedDict): kicked_users: list[str] failed_to_kick_users: list[str] local_aliases: list[str] - new_room_id: Optional[str] + new_room_id: str | None diff --git a/synapse/types/handlers/sliding_sync.py b/synapse/types/handlers/sliding_sync.py index c83b534e00..494e3570d0 100644 --- a/synapse/types/handlers/sliding_sync.py +++ b/synapse/types/handlers/sliding_sync.py @@ -25,7 +25,6 @@ from typing import ( Generic, Mapping, MutableMapping, - Optional, Sequence, TypeVar, cast, @@ -166,12 +165,12 @@ class SlidingSyncResult: @attr.s(slots=True, frozen=True, auto_attribs=True) class StrippedHero: user_id: str - display_name: Optional[str] - avatar_url: Optional[str] + display_name: str | None + avatar_url: str | None - name: Optional[str] - avatar: Optional[str] - heroes: Optional[list[StrippedHero]] + name: str | None + avatar: str | None + heroes: list[StrippedHero] | None is_dm: bool initial: bool unstable_expanded_timeline: bool @@ -179,18 +178,18 @@ class SlidingSyncResult: required_state: list[EventBase] # Should be empty for invite/knock rooms with `stripped_state` timeline_events: list[EventBase] - bundled_aggregations: Optional[dict[str, "BundledAggregations"]] + bundled_aggregations: dict[str, "BundledAggregations"] | None # Optional because it's only relevant to invite/knock rooms stripped_state: list[JsonDict] # Only optional because it won't be included for invite/knock rooms with `stripped_state` - prev_batch: Optional[StreamToken] + prev_batch: StreamToken | None # Only optional because it won't be included for invite/knock rooms with `stripped_state` - limited: Optional[bool] + limited: bool | None # Only optional because it won't be included for invite/knock rooms with `stripped_state` - num_live: Optional[int] - bump_stamp: Optional[int] - joined_count: Optional[int] - invited_count: Optional[int] + num_live: int | None + bump_stamp: int | None + joined_count: int | None + invited_count: int | None notification_count: int highlight_count: int @@ -281,7 +280,7 @@ class SlidingSyncResult: """ # Only present on incremental syncs - device_list_updates: Optional[DeviceListUpdates] + device_list_updates: DeviceListUpdates | None device_one_time_keys_count: Mapping[str, int] device_unused_fallback_key_types: Sequence[str] @@ -364,7 +363,7 @@ class SlidingSyncResult: @attr.s(slots=True, frozen=True, auto_attribs=True) class ThreadSubscription: # always present when `subscribed` - automatic: Optional[bool] + automatic: bool | None # the same as our stream_id; useful for clients to resolve # race conditions locally @@ -377,10 +376,10 @@ class SlidingSyncResult: bump_stamp: int # room_id -> event_id (of thread root) -> the subscription change - subscribed: Optional[Mapping[str, Mapping[str, ThreadSubscription]]] + subscribed: Mapping[str, Mapping[str, ThreadSubscription]] | None # room_id -> event_id (of thread root) -> the unsubscription - unsubscribed: Optional[Mapping[str, Mapping[str, ThreadUnsubscription]]] - prev_batch: Optional[ThreadSubscriptionsToken] + unsubscribed: Mapping[str, Mapping[str, ThreadUnsubscription]] | None + prev_batch: ThreadSubscriptionsToken | None def __bool__(self) -> bool: return ( @@ -389,12 +388,12 @@ class SlidingSyncResult: or bool(self.prev_batch) ) - to_device: Optional[ToDeviceExtension] = None - e2ee: Optional[E2eeExtension] = None - account_data: Optional[AccountDataExtension] = None - receipts: Optional[ReceiptsExtension] = None - typing: Optional[TypingExtension] = None - thread_subscriptions: Optional[ThreadSubscriptionsExtension] = None + to_device: ToDeviceExtension | None = None + e2ee: E2eeExtension | None = None + account_data: AccountDataExtension | None = None + receipts: ReceiptsExtension | None = None + typing: TypingExtension | None = None + thread_subscriptions: ThreadSubscriptionsExtension | None = None def __bool__(self) -> bool: return bool( @@ -730,7 +729,7 @@ class HaveSentRoom(Generic[T]): """ status: HaveSentRoomFlag - last_token: Optional[T] + last_token: T | None @staticmethod def live() -> "HaveSentRoom[T]": diff --git a/synapse/types/rest/client/__init__.py b/synapse/types/rest/client/__init__.py index 865c2ba532..49782b5234 100644 --- a/synapse/types/rest/client/__init__.py +++ b/synapse/types/rest/client/__init__.py @@ -18,8 +18,6 @@ # [This file includes modifications made by New Vector Limited] # # -from typing import Optional, Union - from pydantic import ( ConfigDict, Field, @@ -49,8 +47,8 @@ class AuthenticationData(RequestBodyModel): model_config = ConfigDict(extra="allow") - session: Optional[StrictStr] = None - type: Optional[StrictStr] = None + session: StrictStr | None = None + type: StrictStr | None = None # See also assert_valid_client_secret() @@ -67,9 +65,9 @@ ClientSecretStr = Annotated[ class ThreepidRequestTokenBody(RequestBodyModel): client_secret: ClientSecretStr - id_server: Optional[StrictStr] = None - id_access_token: Optional[StrictStr] = None - next_link: Optional[StrictStr] = None + id_server: StrictStr | None = None + id_access_token: StrictStr | None = None + next_link: StrictStr | None = None send_attempt: StrictInt @model_validator(mode="after") @@ -246,17 +244,17 @@ class SlidingSyncBody(RequestBodyModel): list of favourite rooms again. """ - is_dm: Optional[StrictBool] = None - spaces: Optional[list[StrictStr]] = None - is_encrypted: Optional[StrictBool] = None - is_invite: Optional[StrictBool] = None - room_types: Optional[list[Union[StrictStr, None]]] = None - not_room_types: Optional[list[Union[StrictStr, None]]] = None - room_name_like: Optional[StrictStr] = None - tags: Optional[list[StrictStr]] = None - not_tags: Optional[list[StrictStr]] = None + is_dm: StrictBool | None = None + spaces: list[StrictStr] | None = None + is_encrypted: StrictBool | None = None + is_invite: StrictBool | None = None + room_types: list[StrictStr | None] | None = None + not_room_types: list[StrictStr | None] | None = None + room_name_like: StrictStr | None = None + tags: list[StrictStr] | None = None + not_tags: list[StrictStr] | None = None - ranges: Optional[ + ranges: ( list[ Annotated[ tuple[ @@ -266,9 +264,10 @@ class SlidingSyncBody(RequestBodyModel): Field(strict=False), ] ] - ] = None - slow_get_all_rooms: Optional[StrictBool] = False - filters: Optional[Filters] = None + | None + ) = None + slow_get_all_rooms: StrictBool | None = False + filters: Filters | None = None class RoomSubscription(CommonRoomParameters): pass @@ -291,15 +290,13 @@ class SlidingSyncBody(RequestBodyModel): since: The `next_batch` from the previous sync response """ - enabled: Optional[StrictBool] = False + enabled: StrictBool | None = False limit: StrictInt = 100 - since: Optional[StrictStr] = None + since: StrictStr | None = None @field_validator("since") @classmethod - def since_token_check( - cls, value: Optional[StrictStr] - ) -> Optional[StrictStr]: + def since_token_check(cls, value: StrictStr | None) -> StrictStr | None: # `since` comes in as an opaque string token but we know that it's just # an integer representing the position in the device inbox stream. We # want to pre-validate it to make sure it works fine in downstream code. @@ -322,7 +319,7 @@ class SlidingSyncBody(RequestBodyModel): enabled """ - enabled: Optional[StrictBool] = False + enabled: StrictBool | None = False class AccountDataExtension(RequestBodyModel): """The Account Data extension (MSC3959) @@ -335,11 +332,11 @@ class SlidingSyncBody(RequestBodyModel): extension to. """ - enabled: Optional[StrictBool] = False + enabled: StrictBool | None = False # Process all lists defined in the Sliding Window API. (This is the default.) - lists: Optional[list[StrictStr]] = ["*"] + lists: list[StrictStr] | None = ["*"] # Process all room subscriptions defined in the Room Subscription API. (This is the default.) - rooms: Optional[list[StrictStr]] = ["*"] + rooms: list[StrictStr] | None = ["*"] class ReceiptsExtension(RequestBodyModel): """The Receipts extension (MSC3960) @@ -352,11 +349,11 @@ class SlidingSyncBody(RequestBodyModel): extension to. """ - enabled: Optional[StrictBool] = False + enabled: StrictBool | None = False # Process all lists defined in the Sliding Window API. (This is the default.) - lists: Optional[list[StrictStr]] = ["*"] + lists: list[StrictStr] | None = ["*"] # Process all room subscriptions defined in the Room Subscription API. (This is the default.) - rooms: Optional[list[StrictStr]] = ["*"] + rooms: list[StrictStr] | None = ["*"] class TypingExtension(RequestBodyModel): """The Typing Notification extension (MSC3961) @@ -369,11 +366,11 @@ class SlidingSyncBody(RequestBodyModel): extension to. """ - enabled: Optional[StrictBool] = False + enabled: StrictBool | None = False # Process all lists defined in the Sliding Window API. (This is the default.) - lists: Optional[list[StrictStr]] = ["*"] + lists: list[StrictStr] | None = ["*"] # Process all room subscriptions defined in the Room Subscription API. (This is the default.) - rooms: Optional[list[StrictStr]] = ["*"] + rooms: list[StrictStr] | None = ["*"] class ThreadSubscriptionsExtension(RequestBodyModel): """The Thread Subscriptions extension (MSC4308) @@ -383,33 +380,34 @@ class SlidingSyncBody(RequestBodyModel): limit: maximum number of subscription changes to return (default 100) """ - enabled: Optional[StrictBool] = False + enabled: StrictBool | None = False limit: StrictInt = 100 - to_device: Optional[ToDeviceExtension] = None - e2ee: Optional[E2eeExtension] = None - account_data: Optional[AccountDataExtension] = None - receipts: Optional[ReceiptsExtension] = None - typing: Optional[TypingExtension] = None - thread_subscriptions: Optional[ThreadSubscriptionsExtension] = Field( + to_device: ToDeviceExtension | None = None + e2ee: E2eeExtension | None = None + account_data: AccountDataExtension | None = None + receipts: ReceiptsExtension | None = None + typing: TypingExtension | None = None + thread_subscriptions: ThreadSubscriptionsExtension | None = Field( None, alias="io.element.msc4308.thread_subscriptions" ) - conn_id: Optional[StrictStr] = None - lists: Optional[ + conn_id: StrictStr | None = None + lists: ( dict[ Annotated[str, StringConstraints(max_length=64, strict=True)], SlidingSyncList, ] - ] = None - room_subscriptions: Optional[dict[StrictStr, RoomSubscription]] = None - extensions: Optional[Extensions] = None + | None + ) = None + room_subscriptions: dict[StrictStr, RoomSubscription] | None = None + extensions: Extensions | None = None @field_validator("lists") @classmethod def lists_length_check( - cls, value: Optional[dict[str, SlidingSyncList]] - ) -> Optional[dict[str, SlidingSyncList]]: + cls, value: dict[str, SlidingSyncList] | None + ) -> dict[str, SlidingSyncList] | None: if value is not None: assert len(value) <= 100, f"Max lists: 100 but saw {len(value)}" return value diff --git a/synapse/types/state.py b/synapse/types/state.py index 1b4de61d3e..ab619a7fb8 100644 --- a/synapse/types/state.py +++ b/synapse/types/state.py @@ -27,7 +27,6 @@ from typing import ( Collection, Iterable, Mapping, - Optional, TypeVar, ) @@ -60,7 +59,7 @@ class StateFilter: appear in `types`. """ - types: "immutabledict[str, Optional[frozenset[str]]]" + types: "immutabledict[str, frozenset[str] | None]" include_others: bool = False def __attrs_post_init__(self) -> None: @@ -101,7 +100,7 @@ class StateFilter: return _NONE_STATE_FILTER @staticmethod - def from_types(types: Iterable[tuple[str, Optional[str]]]) -> "StateFilter": + def from_types(types: Iterable[tuple[str, str | None]]) -> "StateFilter": """Creates a filter that only fetches the given types Args: @@ -111,7 +110,7 @@ class StateFilter: Returns: The new state filter. """ - type_dict: dict[str, Optional[set[str]]] = {} + type_dict: dict[str, set[str] | None] = {} for typ, s in types: if typ in type_dict: if type_dict[typ] is None: @@ -130,7 +129,7 @@ class StateFilter: ) ) - def to_types(self) -> Iterable[tuple[str, Optional[str]]]: + def to_types(self) -> Iterable[tuple[str, str | None]]: """The inverse to `from_types`.""" for event_type, state_keys in self.types.items(): if state_keys is None: @@ -157,13 +156,13 @@ class StateFilter: @staticmethod def freeze( - types: Mapping[str, Optional[Collection[str]]], include_others: bool + types: Mapping[str, Collection[str] | None], include_others: bool ) -> "StateFilter": """ Returns a (frozen) StateFilter with the same contents as the parameters specified here, which can be made of mutable types. """ - types_with_frozen_values: dict[str, Optional[frozenset[str]]] = {} + types_with_frozen_values: dict[str, frozenset[str] | None] = {} for state_types, state_keys in types.items(): if state_keys is not None: types_with_frozen_values[state_types] = frozenset(state_keys) @@ -289,7 +288,7 @@ class StateFilter: return where_clause, where_args - def max_entries_returned(self) -> Optional[int]: + def max_entries_returned(self) -> int | None: """Returns the maximum number of entries this filter will return if known, otherwise returns None. @@ -450,7 +449,7 @@ class StateFilter: # {state type -> set of state keys OR None for wildcard} # (The same structure as that of a StateFilter.) - new_types: dict[str, Optional[set[str]]] = {} + new_types: dict[str, set[str] | None] = {} # if we start with all, insert the excluded statetypes as empty sets # to prevent them from being included diff --git a/synapse/util/__init__.py b/synapse/util/__init__.py index 0d3b7ca740..f937080f9e 100644 --- a/synapse/util/__init__.py +++ b/synapse/util/__init__.py @@ -25,7 +25,6 @@ import typing from typing import ( Iterator, Mapping, - Optional, Sequence, TypeVar, ) @@ -61,7 +60,7 @@ def unwrapFirstError(failure: Failure) -> Failure: def log_failure( failure: Failure, msg: str, consumeErrors: bool = True -) -> Optional[Failure]: +) -> Failure | None: """Creates a function suitable for passing to `Deferred.addErrback` that logs any failures that occur. diff --git a/synapse/util/async_helpers.py b/synapse/util/async_helpers.py index 8322a1bb33..825fb10acf 100644 --- a/synapse/util/async_helpers.py +++ b/synapse/util/async_helpers.py @@ -40,9 +40,7 @@ from typing import ( Hashable, Iterable, Literal, - Optional, TypeVar, - Union, overload, ) @@ -104,8 +102,8 @@ class ObservableDeferred(Generic[_T], AbstractObservableDeferred[_T]): __slots__ = ["_deferred", "_observers", "_result"] _deferred: "defer.Deferred[_T]" - _observers: Union[list["defer.Deferred[_T]"], tuple[()]] - _result: Union[None, tuple[Literal[True], _T], tuple[Literal[False], Failure]] + _observers: list["defer.Deferred[_T]"] | tuple[()] + _result: None | tuple[Literal[True], _T] | tuple[Literal[False], Failure] def __init__(self, deferred: "defer.Deferred[_T]", consumeErrors: bool = False): object.__setattr__(self, "_deferred", deferred) @@ -132,7 +130,7 @@ class ObservableDeferred(Generic[_T], AbstractObservableDeferred[_T]): ) return r - def errback(f: Failure) -> Optional[Failure]: + def errback(f: Failure) -> Failure | None: object.__setattr__(self, "_result", (False, f)) # once we have set _result, no more entries will be added to _observers, @@ -187,7 +185,7 @@ class ObservableDeferred(Generic[_T], AbstractObservableDeferred[_T]): def has_succeeded(self) -> bool: return self._result is not None and self._result[0] is True - def get_result(self) -> Union[_T, Failure]: + def get_result(self) -> _T | Failure: if self._result is None: raise ValueError(f"{self!r} has no result yet") return self._result[1] @@ -402,80 +400,78 @@ def gather_results( # type: ignore[misc] @overload async def gather_optional_coroutines( - *coroutines: Unpack[tuple[Optional[Coroutine[Any, Any, T1]]]], -) -> tuple[Optional[T1]]: ... + *coroutines: Unpack[tuple[Coroutine[Any, Any, T1] | None]], +) -> tuple[T1 | None]: ... @overload async def gather_optional_coroutines( *coroutines: Unpack[ tuple[ - Optional[Coroutine[Any, Any, T1]], - Optional[Coroutine[Any, Any, T2]], + Coroutine[Any, Any, T1] | None, + Coroutine[Any, Any, T2] | None, ] ], -) -> tuple[Optional[T1], Optional[T2]]: ... +) -> tuple[T1 | None, T2 | None]: ... @overload async def gather_optional_coroutines( *coroutines: Unpack[ tuple[ - Optional[Coroutine[Any, Any, T1]], - Optional[Coroutine[Any, Any, T2]], - Optional[Coroutine[Any, Any, T3]], + Coroutine[Any, Any, T1] | None, + Coroutine[Any, Any, T2] | None, + Coroutine[Any, Any, T3] | None, ] ], -) -> tuple[Optional[T1], Optional[T2], Optional[T3]]: ... +) -> tuple[T1 | None, T2 | None, T3 | None]: ... @overload async def gather_optional_coroutines( *coroutines: Unpack[ tuple[ - Optional[Coroutine[Any, Any, T1]], - Optional[Coroutine[Any, Any, T2]], - Optional[Coroutine[Any, Any, T3]], - Optional[Coroutine[Any, Any, T4]], + Coroutine[Any, Any, T1] | None, + Coroutine[Any, Any, T2] | None, + Coroutine[Any, Any, T3] | None, + Coroutine[Any, Any, T4] | None, ] ], -) -> tuple[Optional[T1], Optional[T2], Optional[T3], Optional[T4]]: ... +) -> tuple[T1 | None, T2 | None, T3 | None, T4 | None]: ... @overload async def gather_optional_coroutines( *coroutines: Unpack[ tuple[ - Optional[Coroutine[Any, Any, T1]], - Optional[Coroutine[Any, Any, T2]], - Optional[Coroutine[Any, Any, T3]], - Optional[Coroutine[Any, Any, T4]], - Optional[Coroutine[Any, Any, T5]], + Coroutine[Any, Any, T1] | None, + Coroutine[Any, Any, T2] | None, + Coroutine[Any, Any, T3] | None, + Coroutine[Any, Any, T4] | None, + Coroutine[Any, Any, T5] | None, ] ], -) -> tuple[Optional[T1], Optional[T2], Optional[T3], Optional[T4], Optional[T5]]: ... +) -> tuple[T1 | None, T2 | None, T3 | None, T4 | None, T5 | None]: ... @overload async def gather_optional_coroutines( *coroutines: Unpack[ tuple[ - Optional[Coroutine[Any, Any, T1]], - Optional[Coroutine[Any, Any, T2]], - Optional[Coroutine[Any, Any, T3]], - Optional[Coroutine[Any, Any, T4]], - Optional[Coroutine[Any, Any, T5]], - Optional[Coroutine[Any, Any, T6]], + Coroutine[Any, Any, T1] | None, + Coroutine[Any, Any, T2] | None, + Coroutine[Any, Any, T3] | None, + Coroutine[Any, Any, T4] | None, + Coroutine[Any, Any, T5] | None, + Coroutine[Any, Any, T6] | None, ] ], -) -> tuple[ - Optional[T1], Optional[T2], Optional[T3], Optional[T4], Optional[T5], Optional[T6] -]: ... +) -> tuple[T1 | None, T2 | None, T3 | None, T4 | None, T5 | None, T6 | None]: ... async def gather_optional_coroutines( - *coroutines: Unpack[tuple[Optional[Coroutine[Any, Any, T1]], ...]], -) -> tuple[Optional[T1], ...]: + *coroutines: Unpack[tuple[Coroutine[Any, Any, T1] | None, ...]], +) -> tuple[T1 | None, ...]: """Helper function that allows waiting on multiple coroutines at once. The return value is a tuple of the return values of the coroutines in order. @@ -866,7 +862,7 @@ class DoneAwaitable(Awaitable[R]): return self.value -def maybe_awaitable(value: Union[Awaitable[R], R]) -> Awaitable[R]: +def maybe_awaitable(value: Awaitable[R] | R) -> Awaitable[R]: """Convert a value to an awaitable if not already an awaitable.""" if inspect.isawaitable(value): return value diff --git a/synapse/util/background_queue.py b/synapse/util/background_queue.py index 7e4c322662..93ffd9f271 100644 --- a/synapse/util/background_queue.py +++ b/synapse/util/background_queue.py @@ -21,7 +21,6 @@ from typing import ( Awaitable, Callable, Generic, - Optional, TypeVar, ) @@ -76,7 +75,7 @@ class BackgroundQueue(Generic[T]): # Indicates if a background process is running, and if so whether there # is new data in the queue. Used to signal to an existing background # process that there is new data added to the queue. - self._wakeup_event: Optional[DeferredEvent] = None + self._wakeup_event: DeferredEvent | None = None def add(self, item: T) -> None: """Add an item into the queue.""" diff --git a/synapse/util/caches/__init__.py b/synapse/util/caches/__init__.py index c799fca550..a65ab7f57d 100644 --- a/synapse/util/caches/__init__.py +++ b/synapse/util/caches/__init__.py @@ -24,7 +24,7 @@ import logging import typing from enum import Enum, auto from sys import intern -from typing import Any, Callable, Optional, Sized, TypeVar +from typing import Any, Callable, Sized, TypeVar import attr from prometheus_client import REGISTRY @@ -129,7 +129,7 @@ class CacheMetric: _cache: Sized _cache_type: str _cache_name: str - _collect_callback: Optional[Callable] + _collect_callback: Callable | None _server_name: str hits: int = 0 @@ -137,7 +137,7 @@ class CacheMetric: eviction_size_by_reason: typing.Counter[EvictionReason] = attr.ib( factory=collections.Counter ) - memory_usage: Optional[int] = None + memory_usage: int | None = None def inc_hits(self) -> None: self.hits += 1 @@ -208,9 +208,9 @@ def register_cache( cache_name: str, cache: Sized, server_name: str, - collect_callback: Optional[Callable] = None, + collect_callback: Callable | None = None, resizable: bool = True, - resize_callback: Optional[Callable] = None, + resize_callback: Callable | None = None, ) -> CacheMetric: """Register a cache object for metric collection and resizing. @@ -269,7 +269,7 @@ KNOWN_KEYS = { ) } -T = TypeVar("T", Optional[str], str) +T = TypeVar("T", str | None, str) def intern_string(string: T) -> T: diff --git a/synapse/util/caches/cached_call.py b/synapse/util/caches/cached_call.py index 9b86017cd9..491e7e52a1 100644 --- a/synapse/util/caches/cached_call.py +++ b/synapse/util/caches/cached_call.py @@ -19,7 +19,7 @@ # # import enum -from typing import Awaitable, Callable, Generic, Optional, TypeVar, Union +from typing import Awaitable, Callable, Generic, TypeVar from twisted.internet.defer import Deferred from twisted.python.failure import Failure @@ -74,9 +74,9 @@ class CachedCall(Generic[TV]): f: The underlying function. Only one call to this function will be alive at once (per instance of CachedCall) """ - self._callable: Optional[Callable[[], Awaitable[TV]]] = f - self._deferred: Optional[Deferred] = None - self._result: Union[_Sentinel, TV, Failure] = _Sentinel.sentinel + self._callable: Callable[[], Awaitable[TV]] | None = f + self._deferred: Deferred | None = None + self._result: _Sentinel | TV | Failure = _Sentinel.sentinel async def get(self) -> TV: """Kick off the call if necessary, and return the result""" @@ -93,7 +93,7 @@ class CachedCall(Generic[TV]): # result in the deferred, since `awaiting` a deferred destroys its result. # (Also, if it's a Failure, GCing the deferred would log a critical error # about unhandled Failures) - def got_result(r: Union[TV, Failure]) -> None: + def got_result(r: TV | Failure) -> None: self._result = r self._deferred.addBoth(got_result) diff --git a/synapse/util/caches/deferred_cache.py b/synapse/util/caches/deferred_cache.py index 380f2a78ca..a1601cd4e9 100644 --- a/synapse/util/caches/deferred_cache.py +++ b/synapse/util/caches/deferred_cache.py @@ -31,7 +31,6 @@ from typing import ( Optional, Sized, TypeVar, - Union, cast, ) @@ -107,9 +106,9 @@ class DeferredCache(Generic[KT, VT]): cache_type = TreeCache if tree else dict # _pending_deferred_cache maps from the key value to a `CacheEntry` object. - self._pending_deferred_cache: Union[ - TreeCache, "MutableMapping[KT, CacheEntry[KT, VT]]" - ] = cache_type() + self._pending_deferred_cache: ( + TreeCache | "MutableMapping[KT, CacheEntry[KT, VT]]" + ) = cache_type() def metrics_cb() -> None: cache_pending_metric.labels( @@ -136,7 +135,7 @@ class DeferredCache(Generic[KT, VT]): prune_unread_entries=prune_unread_entries, ) - self.thread: Optional[threading.Thread] = None + self.thread: threading.Thread | None = None @property def max_entries(self) -> int: @@ -155,7 +154,7 @@ class DeferredCache(Generic[KT, VT]): def get( self, key: KT, - callback: Optional[Callable[[], None]] = None, + callback: Callable[[], None] | None = None, update_metrics: bool = True, ) -> defer.Deferred: """Looks the key up in the caches. @@ -199,7 +198,7 @@ class DeferredCache(Generic[KT, VT]): def get_bulk( self, keys: Collection[KT], - callback: Optional[Callable[[], None]] = None, + callback: Callable[[], None] | None = None, ) -> tuple[dict[KT, VT], Optional["defer.Deferred[dict[KT, VT]]"], Collection[KT]]: """Bulk lookup of items in the cache. @@ -263,9 +262,7 @@ class DeferredCache(Generic[KT, VT]): return (cached, pending_deferred, missing) - def get_immediate( - self, key: KT, default: T, update_metrics: bool = True - ) -> Union[VT, T]: + def get_immediate(self, key: KT, default: T, update_metrics: bool = True) -> VT | T: """If we have a *completed* cached value, return it.""" return self.cache.get(key, default, update_metrics=update_metrics) @@ -273,7 +270,7 @@ class DeferredCache(Generic[KT, VT]): self, key: KT, value: "defer.Deferred[VT]", - callback: Optional[Callable[[], None]] = None, + callback: Callable[[], None] | None = None, ) -> defer.Deferred: """Adds a new entry to the cache (or updates an existing one). @@ -328,7 +325,7 @@ class DeferredCache(Generic[KT, VT]): def start_bulk_input( self, keys: Collection[KT], - callback: Optional[Callable[[], None]] = None, + callback: Callable[[], None] | None = None, ) -> "CacheMultipleEntries[KT, VT]": """Bulk set API for use when fetching multiple keys at once from the DB. @@ -382,7 +379,7 @@ class DeferredCache(Generic[KT, VT]): return failure def prefill( - self, key: KT, value: VT, callback: Optional[Callable[[], None]] = None + self, key: KT, value: VT, callback: Callable[[], None] | None = None ) -> None: callbacks = (callback,) if callback else () self.cache.set(key, value, callbacks=callbacks) @@ -435,7 +432,7 @@ class CacheEntry(Generic[KT, VT], metaclass=abc.ABCMeta): @abc.abstractmethod def add_invalidation_callback( - self, key: KT, callback: Optional[Callable[[], None]] + self, key: KT, callback: Callable[[], None] | None ) -> None: """Add an invalidation callback""" ... @@ -461,7 +458,7 @@ class CacheEntrySingle(CacheEntry[KT, VT]): return self._deferred.observe() def add_invalidation_callback( - self, key: KT, callback: Optional[Callable[[], None]] + self, key: KT, callback: Callable[[], None] | None ) -> None: if callback is None: return @@ -478,7 +475,7 @@ class CacheMultipleEntries(CacheEntry[KT, VT]): __slots__ = ["_deferred", "_callbacks", "_global_callbacks"] def __init__(self) -> None: - self._deferred: Optional[ObservableDeferred[dict[KT, VT]]] = None + self._deferred: ObservableDeferred[dict[KT, VT]] | None = None self._callbacks: dict[KT, set[Callable[[], None]]] = {} self._global_callbacks: set[Callable[[], None]] = set() @@ -488,7 +485,7 @@ class CacheMultipleEntries(CacheEntry[KT, VT]): return self._deferred.observe().addCallback(lambda res: res[key]) def add_invalidation_callback( - self, key: KT, callback: Optional[Callable[[], None]] + self, key: KT, callback: Callable[[], None] | None ) -> None: if callback is None: return @@ -499,7 +496,7 @@ class CacheMultipleEntries(CacheEntry[KT, VT]): return self._callbacks.get(key, set()) | self._global_callbacks def add_global_invalidation_callback( - self, callback: Optional[Callable[[], None]] + self, callback: Callable[[], None] | None ) -> None: """Add a callback for when any keys get invalidated.""" if callback is None: diff --git a/synapse/util/caches/descriptors.py b/synapse/util/caches/descriptors.py index 7cc83bad37..fd931cac89 100644 --- a/synapse/util/caches/descriptors.py +++ b/synapse/util/caches/descriptors.py @@ -30,11 +30,9 @@ from typing import ( Hashable, Iterable, Mapping, - Optional, Protocol, Sequence, TypeVar, - Union, cast, ) from weakref import WeakValueDictionary @@ -53,7 +51,7 @@ from synapse.util.clock import Clock logger = logging.getLogger(__name__) -CacheKey = Union[tuple, Any] +CacheKey = tuple | Any F = TypeVar("F", bound=Callable[..., Any]) @@ -76,10 +74,10 @@ class _CacheDescriptorBase: def __init__( self, orig: Callable[..., Any], - num_args: Optional[int], - uncached_args: Optional[Collection[str]] = None, + num_args: int | None, + uncached_args: Collection[str] | None = None, cache_context: bool = False, - name: Optional[str] = None, + name: str | None = None, ): self.orig = orig self.name = name or orig.__name__ @@ -216,13 +214,13 @@ class DeferredCacheDescriptor(_CacheDescriptorBase): *, orig: Callable[..., Any], max_entries: int = 1000, - num_args: Optional[int] = None, - uncached_args: Optional[Collection[str]] = None, + num_args: int | None = None, + uncached_args: Collection[str] | None = None, tree: bool = False, cache_context: bool = False, iterable: bool = False, prune_unread_entries: bool = True, - name: Optional[str] = None, + name: str | None = None, ): super().__init__( orig, @@ -243,7 +241,7 @@ class DeferredCacheDescriptor(_CacheDescriptorBase): self.prune_unread_entries = prune_unread_entries def __get__( - self, obj: Optional[HasServerNameAndClock], owner: Optional[type] + self, obj: HasServerNameAndClock | None, owner: type | None ) -> Callable[..., "defer.Deferred[Any]"]: # We need access to instance-level `obj.server_name` attribute assert obj is not None, ( @@ -331,8 +329,8 @@ class DeferredCacheListDescriptor(_CacheDescriptorBase): orig: Callable[..., Awaitable[dict]], cached_method_name: str, list_name: str, - num_args: Optional[int] = None, - name: Optional[str] = None, + num_args: int | None = None, + name: str | None = None, ): """ Args: @@ -359,7 +357,7 @@ class DeferredCacheListDescriptor(_CacheDescriptorBase): ) def __get__( - self, obj: Optional[Any], objtype: Optional[type] = None + self, obj: Any | None, objtype: type | None = None ) -> Callable[..., "defer.Deferred[dict[Hashable, Any]]"]: cached_method = getattr(obj, self.cached_method_name) cache: DeferredCache[CacheKey, Any] = cached_method.cache @@ -471,7 +469,7 @@ class _CacheContext: on a lower level. """ - Cache = Union[DeferredCache, LruCache] + Cache = DeferredCache | LruCache _cache_context_objects: """WeakValueDictionary[ tuple["_CacheContext.Cache", CacheKey], "_CacheContext" @@ -508,13 +506,13 @@ class _CachedFunctionDescriptor: plugin.""" max_entries: int - num_args: Optional[int] - uncached_args: Optional[Collection[str]] + num_args: int | None + uncached_args: Collection[str] | None tree: bool cache_context: bool iterable: bool prune_unread_entries: bool - name: Optional[str] + name: str | None def __call__(self, orig: F) -> CachedFunction[F]: d = DeferredCacheDescriptor( @@ -534,13 +532,13 @@ class _CachedFunctionDescriptor: def cached( *, max_entries: int = 1000, - num_args: Optional[int] = None, - uncached_args: Optional[Collection[str]] = None, + num_args: int | None = None, + uncached_args: Collection[str] | None = None, tree: bool = False, cache_context: bool = False, iterable: bool = False, prune_unread_entries: bool = True, - name: Optional[str] = None, + name: str | None = None, ) -> _CachedFunctionDescriptor: return _CachedFunctionDescriptor( max_entries=max_entries, @@ -561,8 +559,8 @@ class _CachedListFunctionDescriptor: cached_method_name: str list_name: str - num_args: Optional[int] = None - name: Optional[str] = None + num_args: int | None = None + name: str | None = None def __call__(self, orig: F) -> CachedFunction[F]: d = DeferredCacheListDescriptor( @@ -579,8 +577,8 @@ def cachedList( *, cached_method_name: str, list_name: str, - num_args: Optional[int] = None, - name: Optional[str] = None, + num_args: int | None = None, + name: str | None = None, ) -> _CachedListFunctionDescriptor: """Creates a descriptor that wraps a function in a `DeferredCacheListDescriptor`. diff --git a/synapse/util/caches/dictionary_cache.py b/synapse/util/caches/dictionary_cache.py index dd6f413e79..4289e327af 100644 --- a/synapse/util/caches/dictionary_cache.py +++ b/synapse/util/caches/dictionary_cache.py @@ -25,9 +25,7 @@ from typing import ( Generic, Iterable, Literal, - Optional, TypeVar, - Union, ) import attr @@ -88,7 +86,7 @@ class _PerKeyValue(Generic[DV]): __slots__ = ["value"] - def __init__(self, value: Union[DV, Literal[_Sentinel.sentinel]]) -> None: + def __init__(self, value: DV | Literal[_Sentinel.sentinel]) -> None: self.value = value def __len__(self) -> int: @@ -157,8 +155,8 @@ class DictionaryCache(Generic[KT, DKT, DV]): # * A key of `(KT, DKT)` has a value of `_PerKeyValue` # * A key of `(KT, _FullCacheKey.KEY)` has a value of `Dict[DKT, DV]` self.cache: LruCache[ - tuple[KT, Union[DKT, Literal[_FullCacheKey.KEY]]], - Union[_PerKeyValue, dict[DKT, DV]], + tuple[KT, DKT | Literal[_FullCacheKey.KEY]], + _PerKeyValue | dict[DKT, DV], ] = LruCache( max_size=max_entries, clock=clock, @@ -170,7 +168,7 @@ class DictionaryCache(Generic[KT, DKT, DV]): self.name = name self.sequence = 0 - self.thread: Optional[threading.Thread] = None + self.thread: threading.Thread | None = None def check_thread(self) -> None: expected_thread = self.thread @@ -182,9 +180,7 @@ class DictionaryCache(Generic[KT, DKT, DV]): "Cache objects can only be accessed from the main thread" ) - def get( - self, key: KT, dict_keys: Optional[Iterable[DKT]] = None - ) -> DictionaryEntry: + def get(self, key: KT, dict_keys: Iterable[DKT] | None = None) -> DictionaryEntry: """Fetch an entry out of the cache Args: @@ -295,7 +291,7 @@ class DictionaryCache(Generic[KT, DKT, DV]): sequence: int, key: KT, value: dict[DKT, DV], - fetched_keys: Optional[Iterable[DKT]] = None, + fetched_keys: Iterable[DKT] | None = None, ) -> None: """Updates the entry in the cache. diff --git a/synapse/util/caches/expiringcache.py b/synapse/util/caches/expiringcache.py index 29ce6c0a77..528e4bb852 100644 --- a/synapse/util/caches/expiringcache.py +++ b/synapse/util/caches/expiringcache.py @@ -27,9 +27,7 @@ from typing import ( Generic, Iterable, Literal, - Optional, TypeVar, - Union, overload, ) @@ -146,7 +144,7 @@ class ExpiringCache(Generic[KT, VT]): return entry.value - def pop(self, key: KT, default: T = SENTINEL) -> Union[VT, T]: + def pop(self, key: KT, default: T = SENTINEL) -> VT | T: """Removes and returns the value with the given key from the cache. If the key isn't in the cache then `default` will be returned if @@ -173,12 +171,12 @@ class ExpiringCache(Generic[KT, VT]): return key in self._cache @overload - def get(self, key: KT, default: Literal[None] = None) -> Optional[VT]: ... + def get(self, key: KT, default: Literal[None] = None) -> VT | None: ... @overload - def get(self, key: KT, default: T) -> Union[VT, T]: ... + def get(self, key: KT, default: T) -> VT | T: ... - def get(self, key: KT, default: Optional[T] = None) -> Union[VT, Optional[T]]: + def get(self, key: KT, default: T | None = None) -> VT | T | None: try: return self[key] except KeyError: diff --git a/synapse/util/caches/lrucache.py b/synapse/util/caches/lrucache.py index 04549ab65f..d304e804e9 100644 --- a/synapse/util/caches/lrucache.py +++ b/synapse/util/caches/lrucache.py @@ -33,9 +33,7 @@ from typing import ( Generic, Iterable, Literal, - Optional, TypeVar, - Union, cast, overload, ) @@ -117,14 +115,14 @@ def _expire_old_entries( hs: "HomeServer", clock: Clock, expiry_seconds: float, - autotune_config: Optional[dict], + autotune_config: dict | None, ) -> "defer.Deferred[None]": """Walks the global cache list to find cache entries that haven't been accessed in the given number of seconds, or if a given memory threshold has been breached. """ async def _internal_expire_old_entries( - clock: Clock, expiry_seconds: float, autotune_config: Optional[dict] + clock: Clock, expiry_seconds: float, autotune_config: dict | None ) -> None: if autotune_config: max_cache_memory_usage = autotune_config["max_cache_memory_usage"] @@ -281,7 +279,7 @@ class _Node(Generic[KT, VT]): prune_unread_entries: bool = True, ): self._list_node = ListNode.insert_after(self, root) - self._global_list_node: Optional[_TimedListNode] = None + self._global_list_node: _TimedListNode | None = None if USE_GLOBAL_LIST and prune_unread_entries: self._global_list_node = _TimedListNode.insert_after(self, GLOBAL_ROOT) self._global_list_node.update_last_access(clock) @@ -303,7 +301,7 @@ class _Node(Generic[KT, VT]): # footprint down. Storing `None` is free as its a singleton, while empty # lists are 56 bytes (and empty sets are 216 bytes, if we did the naive # thing and used sets). - self.callbacks: Optional[list[Callable[[], None]]] = None + self.callbacks: list[Callable[[], None]] | None = None self.add_callbacks(callbacks) @@ -399,12 +397,12 @@ class LruCache(Generic[KT, VT]): clock: Clock, server_name: str, cache_name: str, - cache_type: type[Union[dict, TreeCache]] = dict, - size_callback: Optional[Callable[[VT], int]] = None, - metrics_collection_callback: Optional[Callable[[], None]] = None, + cache_type: type[dict | TreeCache] = dict, + size_callback: Callable[[VT], int] | None = None, + metrics_collection_callback: Callable[[], None] | None = None, apply_cache_factor_from_config: bool = True, prune_unread_entries: bool = True, - extra_index_cb: Optional[Callable[[KT, VT], KT]] = None, + extra_index_cb: Callable[[KT, VT], KT] | None = None, ): ... @overload @@ -415,12 +413,12 @@ class LruCache(Generic[KT, VT]): clock: Clock, server_name: str, cache_name: Literal[None] = None, - cache_type: type[Union[dict, TreeCache]] = dict, - size_callback: Optional[Callable[[VT], int]] = None, - metrics_collection_callback: Optional[Callable[[], None]] = None, + cache_type: type[dict | TreeCache] = dict, + size_callback: Callable[[VT], int] | None = None, + metrics_collection_callback: Callable[[], None] | None = None, apply_cache_factor_from_config: bool = True, prune_unread_entries: bool = True, - extra_index_cb: Optional[Callable[[KT, VT], KT]] = None, + extra_index_cb: Callable[[KT, VT], KT] | None = None, ): ... def __init__( @@ -429,13 +427,13 @@ class LruCache(Generic[KT, VT]): max_size: int, clock: Clock, server_name: str, - cache_name: Optional[str] = None, - cache_type: type[Union[dict, TreeCache]] = dict, - size_callback: Optional[Callable[[VT], int]] = None, - metrics_collection_callback: Optional[Callable[[], None]] = None, + cache_name: str | None = None, + cache_type: type[dict | TreeCache] = dict, + size_callback: Callable[[VT], int] | None = None, + metrics_collection_callback: Callable[[], None] | None = None, apply_cache_factor_from_config: bool = True, prune_unread_entries: bool = True, - extra_index_cb: Optional[Callable[[KT, VT], KT]] = None, + extra_index_cb: Callable[[KT, VT], KT] | None = None, ): """ Args: @@ -484,7 +482,7 @@ class LruCache(Generic[KT, VT]): Note: The new key does not have to be unique. """ - cache: Union[dict[KT, _Node[KT, VT]], TreeCache] = cache_type() + cache: dict[KT, _Node[KT, VT]] | TreeCache = cache_type() self.cache = cache # Used for introspection. self.apply_cache_factor_from_config = apply_cache_factor_from_config @@ -500,10 +498,10 @@ class LruCache(Generic[KT, VT]): # register_cache might call our "set_cache_factor" callback; there's nothing to # do yet when we get resized. - self._on_resize: Optional[Callable[[], None]] = None + self._on_resize: Callable[[], None] | None = None if cache_name is not None and server_name is not None: - metrics: Optional[CacheMetric] = register_cache( + metrics: CacheMetric | None = register_cache( cache_type="lru_cache", cache_name=cache_name, cache=self, @@ -625,7 +623,7 @@ class LruCache(Generic[KT, VT]): callbacks: Collection[Callable[[], None]] = ..., update_metrics: bool = ..., update_last_access: bool = ..., - ) -> Optional[VT]: ... + ) -> VT | None: ... @overload def cache_get( @@ -634,16 +632,16 @@ class LruCache(Generic[KT, VT]): callbacks: Collection[Callable[[], None]] = ..., update_metrics: bool = ..., update_last_access: bool = ..., - ) -> Union[T, VT]: ... + ) -> T | VT: ... @synchronized def cache_get( key: KT, - default: Optional[T] = None, + default: T | None = None, callbacks: Collection[Callable[[], None]] = (), update_metrics: bool = True, update_last_access: bool = True, - ) -> Union[None, T, VT]: + ) -> None | T | VT: """Look up a key in the cache Args: @@ -677,21 +675,21 @@ class LruCache(Generic[KT, VT]): key: tuple, default: Literal[None] = None, update_metrics: bool = True, - ) -> Union[None, Iterable[tuple[KT, VT]]]: ... + ) -> None | Iterable[tuple[KT, VT]]: ... @overload def cache_get_multi( key: tuple, default: T, update_metrics: bool = True, - ) -> Union[T, Iterable[tuple[KT, VT]]]: ... + ) -> T | Iterable[tuple[KT, VT]]: ... @synchronized def cache_get_multi( key: tuple, - default: Optional[T] = None, + default: T | None = None, update_metrics: bool = True, - ) -> Union[None, T, Iterable[tuple[KT, VT]]]: + ) -> None | T | Iterable[tuple[KT, VT]]: """Returns a generator yielding all entries under the given key. Can only be used if backed by a tree cache. @@ -769,13 +767,13 @@ class LruCache(Generic[KT, VT]): return value @overload - def cache_pop(key: KT, default: Literal[None] = None) -> Optional[VT]: ... + def cache_pop(key: KT, default: Literal[None] = None) -> VT | None: ... @overload - def cache_pop(key: KT, default: T) -> Union[T, VT]: ... + def cache_pop(key: KT, default: T) -> T | VT: ... @synchronized - def cache_pop(key: KT, default: Optional[T] = None) -> Union[None, T, VT]: + def cache_pop(key: KT, default: T | None = None) -> None | T | VT: node = cache.get(key, None) if node: evicted_len = delete_node(node) @@ -925,22 +923,22 @@ class AsyncLruCache(Generic[KT, VT]): self._lru_cache: LruCache[KT, VT] = LruCache(*args, **kwargs) async def get( - self, key: KT, default: Optional[T] = None, update_metrics: bool = True - ) -> Optional[VT]: + self, key: KT, default: T | None = None, update_metrics: bool = True + ) -> VT | None: return self._lru_cache.get(key, update_metrics=update_metrics) async def get_external( self, key: KT, - default: Optional[T] = None, + default: T | None = None, update_metrics: bool = True, - ) -> Optional[VT]: + ) -> VT | None: # This method should fetch from any configured external cache, in this case noop. return None def get_local( - self, key: KT, default: Optional[T] = None, update_metrics: bool = True - ) -> Optional[VT]: + self, key: KT, default: T | None = None, update_metrics: bool = True + ) -> VT | None: return self._lru_cache.get(key, update_metrics=update_metrics) async def set(self, key: KT, value: VT) -> None: diff --git a/synapse/util/caches/response_cache.py b/synapse/util/caches/response_cache.py index e82036d7e0..b1cdc81dda 100644 --- a/synapse/util/caches/response_cache.py +++ b/synapse/util/caches/response_cache.py @@ -26,7 +26,6 @@ from typing import ( Callable, Generic, Iterable, - Optional, TypeVar, ) @@ -88,7 +87,7 @@ class ResponseCacheEntry: easier to cache Failure results. """ - opentracing_span_context: "Optional[opentracing.SpanContext]" + opentracing_span_context: "opentracing.SpanContext | None" """The opentracing span which generated/is generating the result""" @@ -150,7 +149,7 @@ class ResponseCache(Generic[KV]): """ return self._result_cache.keys() - def _get(self, key: KV) -> Optional[ResponseCacheEntry]: + def _get(self, key: KV) -> ResponseCacheEntry | None: """Look up the given key. Args: @@ -171,7 +170,7 @@ class ResponseCache(Generic[KV]): self, context: ResponseCacheContext[KV], deferred: "defer.Deferred[RV]", - opentracing_span_context: "Optional[opentracing.SpanContext]", + opentracing_span_context: "opentracing.SpanContext | None", ) -> ResponseCacheEntry: """Set the entry for the given key to the given deferred. @@ -289,7 +288,7 @@ class ResponseCache(Generic[KV]): if cache_context: kwargs["cache_context"] = context - span_context: Optional[opentracing.SpanContext] = None + span_context: opentracing.SpanContext | None = None async def cb() -> RV: # NB it is important that we do not `await` before setting span_context! diff --git a/synapse/util/caches/stream_change_cache.py b/synapse/util/caches/stream_change_cache.py index 552570fbb9..7c6c9bc572 100644 --- a/synapse/util/caches/stream_change_cache.py +++ b/synapse/util/caches/stream_change_cache.py @@ -21,7 +21,7 @@ import logging import math -from typing import Collection, Mapping, Optional, Union +from typing import Collection, Mapping import attr from sortedcontainers import SortedDict @@ -45,7 +45,7 @@ class AllEntitiesChangedResult: that callers do the correct checks. """ - _entities: Optional[list[EntityType]] + _entities: list[EntityType] | None @property def hit(self) -> bool: @@ -78,7 +78,7 @@ class StreamChangeCache: server_name: str, current_stream_pos: int, max_size: int = 10000, - prefilled_cache: Optional[Mapping[EntityType, int]] = None, + prefilled_cache: Mapping[EntityType, int] | None = None, ) -> None: """ Args: @@ -182,7 +182,7 @@ class StreamChangeCache: def get_entities_changed( self, entities: Collection[EntityType], stream_pos: int, _perf_factor: int = 1 - ) -> Union[set[EntityType], frozenset[EntityType]]: + ) -> set[EntityType] | frozenset[EntityType]: """ Returns the subset of the given entities that have had changes after the given position. @@ -352,7 +352,7 @@ class StreamChangeCache: for entity in r: self._entity_to_key.pop(entity, None) - def get_max_pos_of_last_change(self, entity: EntityType) -> Optional[int]: + def get_max_pos_of_last_change(self, entity: EntityType) -> int | None: """Returns an upper bound of the stream id of the last change to an entity. diff --git a/synapse/util/caches/ttlcache.py b/synapse/util/caches/ttlcache.py index 2be9463d6a..25b87832d8 100644 --- a/synapse/util/caches/ttlcache.py +++ b/synapse/util/caches/ttlcache.py @@ -21,7 +21,7 @@ import logging import time -from typing import Any, Callable, Generic, TypeVar, Union +from typing import Any, Callable, Generic, TypeVar import attr from sortedcontainers import SortedList @@ -91,7 +91,7 @@ class TTLCache(Generic[KT, VT]): self._data[key] = entry self._expiry_list.add(entry) - def get(self, key: KT, default: T = SENTINEL) -> Union[VT, T]: + def get(self, key: KT, default: T = SENTINEL) -> VT | T: """Get a value from the cache Args: @@ -134,7 +134,7 @@ class TTLCache(Generic[KT, VT]): self._metrics.inc_hits() return e.value, e.expiry_time, e.ttl - def pop(self, key: KT, default: T = SENTINEL) -> Union[VT, T]: + def pop(self, key: KT, default: T = SENTINEL) -> VT | T: """Remove a value from the cache If key is in the cache, remove it and return its value, else return default. diff --git a/synapse/util/check_dependencies.py b/synapse/util/check_dependencies.py index 715240c8ce..7e92b55592 100644 --- a/synapse/util/check_dependencies.py +++ b/synapse/util/check_dependencies.py @@ -28,7 +28,7 @@ require. But this is probably just symptomatic of Python's package management. import logging from importlib import metadata -from typing import Any, Iterable, NamedTuple, Optional, Sequence, cast +from typing import Any, Iterable, NamedTuple, Sequence, cast from packaging.markers import Marker, Value, Variable, default_environment from packaging.requirements import Requirement @@ -153,7 +153,7 @@ def _values_from_marker_value(value: Value) -> set[str]: return {str(raw)} -def _extras_from_marker(marker: Optional[Marker]) -> set[str]: +def _extras_from_marker(marker: Marker | None) -> set[str]: """Return every `extra` referenced in the supplied marker tree.""" extras: set[str] = set() @@ -214,7 +214,7 @@ def _marker_applies_for_any_extra(requirement: Requirement, extras: set[str]) -> ) -def _not_installed(requirement: Requirement, extra: Optional[str] = None) -> str: +def _not_installed(requirement: Requirement, extra: str | None = None) -> str: if extra: return ( f"Synapse {VERSION} needs {requirement.name} for {extra}, " @@ -225,7 +225,7 @@ def _not_installed(requirement: Requirement, extra: Optional[str] = None) -> str def _incorrect_version( - requirement: Requirement, got: str, extra: Optional[str] = None + requirement: Requirement, got: str, extra: str | None = None ) -> str: if extra: return ( @@ -238,7 +238,7 @@ def _incorrect_version( ) -def _no_reported_version(requirement: Requirement, extra: Optional[str] = None) -> str: +def _no_reported_version(requirement: Requirement, extra: str | None = None) -> str: if extra: return ( f"Synapse {VERSION} needs {requirement} for {extra}, " @@ -251,7 +251,7 @@ def _no_reported_version(requirement: Requirement, extra: Optional[str] = None) ) -def check_requirements(extra: Optional[str] = None) -> None: +def check_requirements(extra: str | None = None) -> None: """Check Synapse's dependencies are present and correctly versioned. If provided, `extra` must be the name of an packaging extra (e.g. "saml2" in diff --git a/synapse/util/daemonize.py b/synapse/util/daemonize.py index 411b47f939..63e0571a78 100644 --- a/synapse/util/daemonize.py +++ b/synapse/util/daemonize.py @@ -27,7 +27,7 @@ import os import signal import sys from types import FrameType, TracebackType -from typing import NoReturn, Optional +from typing import NoReturn from synapse.logging.context import ( LoggingContext, @@ -121,7 +121,7 @@ def daemonize_process(pid_file: str, logger: logging.Logger, chdir: str = "/") - def excepthook( type_: type[BaseException], value: BaseException, - traceback: Optional[TracebackType], + traceback: TracebackType | None, ) -> None: logger.critical("Unhanded exception", exc_info=(type_, value, traceback)) @@ -144,7 +144,7 @@ def daemonize_process(pid_file: str, logger: logging.Logger, chdir: str = "/") - sys.exit(1) # write a log line on SIGTERM. - def sigterm(signum: int, frame: Optional[FrameType]) -> NoReturn: + def sigterm(signum: int, frame: FrameType | None) -> NoReturn: logger.warning("Caught signal %s. Stopping daemon.", signum) sys.exit(0) diff --git a/synapse/util/distributor.py b/synapse/util/distributor.py index e8df5399cd..23ef67c752 100644 --- a/synapse/util/distributor.py +++ b/synapse/util/distributor.py @@ -25,9 +25,7 @@ from typing import ( Awaitable, Callable, Generic, - Optional, TypeVar, - Union, ) from typing_extensions import ParamSpec @@ -137,7 +135,7 @@ class Signal(Generic[P]): Returns a Deferred that will complete when all the observers have completed.""" - async def do(observer: Callable[P, Union[R, Awaitable[R]]]) -> Optional[R]: + async def do(observer: Callable[P, R | Awaitable[R]]) -> R | None: try: return await maybe_awaitable(observer(*args, **kwargs)) except Exception as e: diff --git a/synapse/util/events.py b/synapse/util/events.py index 4a1aa28ce4..19eca1c1ae 100644 --- a/synapse/util/events.py +++ b/synapse/util/events.py @@ -13,7 +13,7 @@ # # -from typing import Any, Optional +from typing import Any from pydantic import Field, StrictStr, ValidationError, field_validator @@ -41,7 +41,7 @@ class MTextRepresentation(ParseModel): """ body: StrictStr - mimetype: Optional[StrictStr] = None + mimetype: StrictStr | None = None class MTopic(ParseModel): @@ -53,7 +53,7 @@ class MTopic(ParseModel): See `TopicContentBlock` in the Matrix specification. """ - m_text: Optional[list[MTextRepresentation]] = Field(None, alias="m.text") + m_text: list[MTextRepresentation] | None = Field(None, alias="m.text") """ An ordered array of textual representations in different mimetypes. """ @@ -65,7 +65,7 @@ class MTopic(ParseModel): @classmethod def ignore_invalid_representations( cls, m_text: Any - ) -> Optional[list[MTextRepresentation]]: + ) -> list[MTextRepresentation] | None: if not isinstance(m_text, (list, tuple)): raise ValueError("m.text must be a list or a tuple") representations = [] @@ -87,7 +87,7 @@ class TopicContent(ParseModel): The topic in plain text. """ - m_topic: Optional[MTopic] = Field(None, alias="m.topic") + m_topic: MTopic | None = Field(None, alias="m.topic") """ Textual representation of the room topic in different mimetypes. """ @@ -96,14 +96,14 @@ class TopicContent(ParseModel): # `topic` field. @field_validator("m_topic", mode="before") @classmethod - def ignore_invalid_m_topic(cls, m_topic: Any) -> Optional[MTopic]: + def ignore_invalid_m_topic(cls, m_topic: Any) -> MTopic | None: try: return MTopic.model_validate(m_topic) except ValidationError: return None -def get_plain_text_topic_from_event_content(content: JsonDict) -> Optional[str]: +def get_plain_text_topic_from_event_content(content: JsonDict) -> str | None: """ Given the `content` of an `m.room.topic` event, returns the plain-text topic representation. Prefers pulling plain-text from the newer `m.topic` field if diff --git a/synapse/util/file_consumer.py b/synapse/util/file_consumer.py index 9fa8d40234..8d64684084 100644 --- a/synapse/util/file_consumer.py +++ b/synapse/util/file_consumer.py @@ -19,7 +19,7 @@ # import queue -from typing import Any, BinaryIO, Optional, Union, cast +from typing import Any, BinaryIO, cast from twisted.internet import threads from twisted.internet.defer import Deferred @@ -50,7 +50,7 @@ class BackgroundFileConsumer: self._reactor: ISynapseReactor = reactor # Producer we're registered with - self._producer: Optional[Union[IPushProducer, IPullProducer]] = None + self._producer: IPushProducer | IPullProducer | None = None # True if PushProducer, false if PullProducer self.streaming = False @@ -61,18 +61,18 @@ class BackgroundFileConsumer: # Queue of slices of bytes to be written. When producer calls # unregister a final None is sent. - self._bytes_queue: queue.Queue[Optional[bytes]] = queue.Queue() + self._bytes_queue: queue.Queue[bytes | None] = queue.Queue() # Deferred that is resolved when finished writing # # This is really Deferred[None], but mypy doesn't seem to like that. - self._finished_deferred: Optional[Deferred[Any]] = None + self._finished_deferred: Deferred[Any] | None = None # If the _writer thread throws an exception it gets stored here. - self._write_exception: Optional[Exception] = None + self._write_exception: Exception | None = None def registerProducer( - self, producer: Union[IPushProducer, IPullProducer], streaming: bool + self, producer: IPushProducer | IPullProducer, streaming: bool ) -> None: """Part of IConsumer interface diff --git a/synapse/util/gai_resolver.py b/synapse/util/gai_resolver.py index e07003f1af..f40de8dcc2 100644 --- a/synapse/util/gai_resolver.py +++ b/synapse/util/gai_resolver.py @@ -18,9 +18,7 @@ from typing import ( TYPE_CHECKING, Callable, NoReturn, - Optional, Sequence, - Union, ) from zope.interface import implementer @@ -94,7 +92,7 @@ _GETADDRINFO_RESULT = list[ SocketKind, int, str, - Union[tuple[str, int], tuple[str, int, int, int], tuple[int, bytes]], + tuple[str, int] | tuple[str, int, int, int] | tuple[int, bytes], ] ] @@ -109,7 +107,7 @@ class GAIResolver: def __init__( self, reactor: IReactorThreads, - getThreadPool: Optional[Callable[[], "ThreadPool"]] = None, + getThreadPool: Callable[[], "ThreadPool"] | None = None, getaddrinfo: Callable[[str, int, int, int], _GETADDRINFO_RESULT] = getaddrinfo, ): """ @@ -138,7 +136,7 @@ class GAIResolver: resolutionReceiver: IResolutionReceiver, hostName: str, portNumber: int = 0, - addressTypes: Optional[Sequence[type[IAddress]]] = None, + addressTypes: Sequence[type[IAddress]] | None = None, transportSemantics: str = "TCP", ) -> IHostResolution: """ diff --git a/synapse/util/linked_list.py b/synapse/util/linked_list.py index 052863fdd6..c7a164d02e 100644 --- a/synapse/util/linked_list.py +++ b/synapse/util/linked_list.py @@ -22,7 +22,7 @@ """A circular doubly linked list implementation.""" import threading -from typing import Generic, Optional, TypeVar +from typing import Generic, TypeVar P = TypeVar("P") LN = TypeVar("LN", bound="ListNode") @@ -47,10 +47,10 @@ class ListNode(Generic[P]): "next_node", ] - def __init__(self, cache_entry: Optional[P] = None) -> None: + def __init__(self, cache_entry: P | None = None) -> None: self.cache_entry = cache_entry - self.prev_node: Optional[ListNode[P]] = None - self.next_node: Optional[ListNode[P]] = None + self.prev_node: ListNode[P] | None = None + self.next_node: ListNode[P] | None = None @classmethod def create_root_node(cls: type["ListNode[P]"]) -> "ListNode[P]": @@ -149,7 +149,7 @@ class ListNode(Generic[P]): prev_node.next_node = self next_node.prev_node = self - def get_cache_entry(self) -> Optional[P]: + def get_cache_entry(self) -> P | None: """Get the cache entry, returns None if this is the root node (i.e. cache_entry is None) or if the entry has been dropped. """ diff --git a/synapse/util/macaroons.py b/synapse/util/macaroons.py index d683a57ab1..178b6fa377 100644 --- a/synapse/util/macaroons.py +++ b/synapse/util/macaroons.py @@ -22,7 +22,7 @@ """Utilities for manipulating macaroons""" -from typing import Callable, Literal, Optional +from typing import Callable, Literal import attr import pymacaroons @@ -52,7 +52,7 @@ def get_value_from_macaroon(macaroon: pymacaroons.Macaroon, key: str) -> str: caveat in the macaroon, or if the caveat was not found in the macaroon. """ prefix = key + " = " - result: Optional[str] = None + result: str | None = None for caveat in macaroon.caveats: if not caveat.caveat_id.startswith(prefix): continue diff --git a/synapse/util/manhole.py b/synapse/util/manhole.py index dbf444e015..859e9a9072 100644 --- a/synapse/util/manhole.py +++ b/synapse/util/manhole.py @@ -21,7 +21,7 @@ import inspect import sys import traceback -from typing import Any, Optional +from typing import Any from twisted.conch import manhole_ssh from twisted.conch.insults import insults @@ -130,7 +130,7 @@ class SynapseManhole(ColoredManhole): class SynapseManholeInterpreter(ManholeInterpreter): - def showsyntaxerror(self, filename: Optional[str] = None) -> None: + def showsyntaxerror(self, filename: str | None = None) -> None: """Display the syntax error that just occurred. Overrides the base implementation, ignoring sys.excepthook. We always want diff --git a/synapse/util/metrics.py b/synapse/util/metrics.py index 6d1adf1131..3daba79124 100644 --- a/synapse/util/metrics.py +++ b/synapse/util/metrics.py @@ -26,7 +26,6 @@ from typing import ( Awaitable, Callable, Generator, - Optional, Protocol, TypeVar, ) @@ -136,7 +135,7 @@ class HasClockAndServerName(Protocol): def measure_func( - name: Optional[str] = None, + name: str | None = None, ) -> Callable[[Callable[P, Awaitable[R]]], Callable[P, Awaitable[R]]]: """Decorate an async method with a `Measure` context manager. @@ -220,7 +219,7 @@ class Measure: server_name=self.server_name, parent_context=parent_context, ) - self.start: Optional[float] = None + self.start: float | None = None def __enter__(self) -> "Measure": if self.start is not None: @@ -236,9 +235,9 @@ class Measure: def __exit__( self, - exc_type: Optional[type[BaseException]], - exc_val: Optional[BaseException], - exc_tb: Optional[TracebackType], + exc_type: type[BaseException] | None, + exc_val: BaseException | None, + exc_tb: TracebackType | None, ) -> None: if self.start is None: raise RuntimeError("Measure() block exited without being entered") diff --git a/synapse/util/pydantic_models.py b/synapse/util/pydantic_models.py index e1e2d8b99f..f1d35a35ec 100644 --- a/synapse/util/pydantic_models.py +++ b/synapse/util/pydantic_models.py @@ -13,7 +13,7 @@ # # -from typing import Annotated, Union +from typing import Annotated from pydantic import AfterValidator, BaseModel, ConfigDict, StrictStr, StringConstraints @@ -53,4 +53,4 @@ EventIdV1And2 = Annotated[StrictStr, AfterValidator(validate_event_id_v1_and_2)] EventIdV3Plus = Annotated[ StrictStr, StringConstraints(pattern=r"^\$([a-zA-Z0-9-_]{43}|[a-zA-Z0-9+/]{43})$") ] -AnyEventId = Union[EventIdV1And2, EventIdV3Plus] +AnyEventId = EventIdV1And2 | EventIdV3Plus diff --git a/synapse/util/ratelimitutils.py b/synapse/util/ratelimitutils.py index 37d2e4505d..024706d9cf 100644 --- a/synapse/util/ratelimitutils.py +++ b/synapse/util/ratelimitutils.py @@ -31,7 +31,6 @@ from typing import ( Iterator, Mapping, MutableSet, - Optional, ) from weakref import WeakSet @@ -164,7 +163,7 @@ class FederationRateLimiter: our_server_name: str, clock: Clock, config: FederationRatelimitSettings, - metrics_name: Optional[str] = None, + metrics_name: str | None = None, ): """ Args: @@ -217,7 +216,7 @@ class _PerHostRatelimiter: our_server_name: str, clock: Clock, config: FederationRatelimitSettings, - metrics_name: Optional[str] = None, + metrics_name: str | None = None, ): """ Args: diff --git a/synapse/util/retryutils.py b/synapse/util/retryutils.py index ce747c3f19..8a5aab50f1 100644 --- a/synapse/util/retryutils.py +++ b/synapse/util/retryutils.py @@ -168,7 +168,7 @@ class RetryDestinationLimiter: hs: "HomeServer", clock: Clock, store: DataStore, - failure_ts: Optional[int], + failure_ts: int | None, retry_interval: int, backoff_on_404: bool = False, backoff_on_failure: bool = True, @@ -230,9 +230,9 @@ class RetryDestinationLimiter: def __exit__( self, - exc_type: Optional[type[BaseException]], - exc_val: Optional[BaseException], - exc_tb: Optional[TracebackType], + exc_type: type[BaseException] | None, + exc_val: BaseException | None, + exc_tb: TracebackType | None, ) -> None: success = exc_type is None valid_err_code = False diff --git a/synapse/util/rust.py b/synapse/util/rust.py index 37f43459f1..63b53b917f 100644 --- a/synapse/util/rust.py +++ b/synapse/util/rust.py @@ -24,7 +24,6 @@ import os import urllib.parse from hashlib import blake2b from importlib.metadata import Distribution, PackageNotFoundError -from typing import Optional import synapse from synapse.synapse_rust import get_rust_file_digest @@ -80,7 +79,7 @@ def _hash_rust_files_in_directory(directory: str) -> str: return hasher.hexdigest() -def get_synapse_source_directory() -> Optional[str]: +def get_synapse_source_directory() -> str | None: """Try and find the source directory of synapse for editable installs (like those used in development). diff --git a/synapse/util/stringutils.py b/synapse/util/stringutils.py index 0dadafbc78..cc26c5181b 100644 --- a/synapse/util/stringutils.py +++ b/synapse/util/stringutils.py @@ -24,7 +24,7 @@ import random import re import secrets import string -from typing import Any, Iterable, Optional +from typing import Any, Iterable from netaddr import valid_ipv6 @@ -109,7 +109,7 @@ def assert_valid_client_secret(client_secret: str) -> None: ) -def parse_server_name(server_name: str) -> tuple[str, Optional[int]]: +def parse_server_name(server_name: str) -> tuple[str, int | None]: """Split a server name into host/port parts. Args: @@ -140,7 +140,7 @@ def parse_server_name(server_name: str) -> tuple[str, Optional[int]]: VALID_HOST_REGEX = re.compile("\\A[0-9a-zA-Z-]+(?:\\.[0-9a-zA-Z-]+)*\\Z") -def parse_and_validate_server_name(server_name: str) -> tuple[str, Optional[int]]: +def parse_and_validate_server_name(server_name: str) -> tuple[str, int | None]: """Split a server name into host/port parts and do some basic validation. Args: @@ -207,7 +207,7 @@ def valid_id_server_location(id_server: str) -> bool: return "#" not in path and "?" not in path -def parse_and_validate_mxc_uri(mxc: str) -> tuple[str, Optional[int], str]: +def parse_and_validate_mxc_uri(mxc: str) -> tuple[str, int | None, str]: """Parse the given string as an MXC URI Checks that the "server name" part is a valid server name @@ -285,7 +285,7 @@ def base62_encode(num: int, minwidth: int = 1) -> str: return pad + res -def non_null_str_or_none(val: Any) -> Optional[str]: +def non_null_str_or_none(val: Any) -> str | None: """Check that the arg is a string containing no null (U+0000) codepoints. If so, returns the given string unmodified; otherwise, returns None. diff --git a/synapse/util/task_scheduler.py b/synapse/util/task_scheduler.py index 22b3bf8c15..3b4423a1ff 100644 --- a/synapse/util/task_scheduler.py +++ b/synapse/util/task_scheduler.py @@ -20,7 +20,7 @@ # import logging -from typing import TYPE_CHECKING, Awaitable, Callable, Optional +from typing import TYPE_CHECKING, Awaitable, Callable from twisted.python.failure import Failure @@ -116,7 +116,7 @@ class TaskScheduler: str, Callable[ [ScheduledTask], - Awaitable[tuple[TaskStatus, Optional[JsonMapping], Optional[str]]], + Awaitable[tuple[TaskStatus, JsonMapping | None, str | None]], ], ] = {} self._run_background_tasks = hs.config.worker.run_background_tasks @@ -143,7 +143,7 @@ class TaskScheduler: self, function: Callable[ [ScheduledTask], - Awaitable[tuple[TaskStatus, Optional[JsonMapping], Optional[str]]], + Awaitable[tuple[TaskStatus, JsonMapping | None, str | None]], ], action_name: str, ) -> None: @@ -167,9 +167,9 @@ class TaskScheduler: self, action: str, *, - resource_id: Optional[str] = None, - timestamp: Optional[int] = None, - params: Optional[JsonMapping] = None, + resource_id: str | None = None, + timestamp: int | None = None, + params: JsonMapping | None = None, ) -> str: """Schedule a new potentially resumable task. A function matching the specified `action` should've been registered with `register_action` before the task is run. @@ -220,10 +220,10 @@ class TaskScheduler: self, id: str, *, - timestamp: Optional[int] = None, - status: Optional[TaskStatus] = None, - result: Optional[JsonMapping] = None, - error: Optional[str] = None, + timestamp: int | None = None, + status: TaskStatus | None = None, + result: JsonMapping | None = None, + error: str | None = None, ) -> bool: """Update some task-associated values. This is exposed publicly so it can be used inside task functions, mainly to update the result or resume @@ -263,7 +263,7 @@ class TaskScheduler: error=error, ) - async def get_task(self, id: str) -> Optional[ScheduledTask]: + async def get_task(self, id: str) -> ScheduledTask | None: """Get a specific task description by id. Args: @@ -278,11 +278,11 @@ class TaskScheduler: async def get_tasks( self, *, - actions: Optional[list[str]] = None, - resource_id: Optional[str] = None, - statuses: Optional[list[TaskStatus]] = None, - max_timestamp: Optional[int] = None, - limit: Optional[int] = None, + actions: list[str] | None = None, + resource_id: str | None = None, + statuses: list[TaskStatus] | None = None, + max_timestamp: int | None = None, + limit: int | None = None, ) -> list[ScheduledTask]: """Get a list of tasks. Returns all the tasks if no args are provided. diff --git a/synapse/util/templates.py b/synapse/util/templates.py index fc5dbc069c..d399b167c1 100644 --- a/synapse/util/templates.py +++ b/synapse/util/templates.py @@ -23,7 +23,7 @@ import time import urllib.parse -from typing import TYPE_CHECKING, Callable, Optional, Sequence, Union +from typing import TYPE_CHECKING, Callable, Sequence import jinja2 @@ -34,7 +34,7 @@ if TYPE_CHECKING: def build_jinja_env( template_search_directories: Sequence[str], config: "HomeServerConfig", - autoescape: Union[bool, Callable[[Optional[str]], bool], None] = None, + autoescape: bool | Callable[[str | None], bool] | None = None, ) -> jinja2.Environment: """Set up a Jinja2 environment to load templates from the given search path @@ -82,7 +82,7 @@ def build_jinja_env( def _create_mxc_to_http_filter( - public_baseurl: Optional[str], + public_baseurl: str | None, ) -> Callable[[str, int, int, str], str]: """Create and return a jinja2 filter that converts MXC urls to HTTP diff --git a/synapse/visibility.py b/synapse/visibility.py index 41b6198af0..16b39e6200 100644 --- a/synapse/visibility.py +++ b/synapse/visibility.py @@ -24,7 +24,6 @@ from enum import Enum, auto from typing import ( Collection, Final, - Optional, Sequence, ) @@ -162,7 +161,7 @@ async def filter_events_for_client( room_id ] = await storage.main.get_retention_policy_for_room(room_id) - def allowed(event: EventBase) -> Optional[EventBase]: + def allowed(event: EventBase) -> EventBase | None: state_after_event = event_id_to_state.get(event.event_id) filtered = _check_client_allowed_to_see_event( user_id=user_id, @@ -185,7 +184,7 @@ async def filter_events_for_client( # we won't have such a state. The only outliers that are returned here are the # user's own membership event, so we can just inspect that. - user_membership_event: Optional[EventBase] + user_membership_event: EventBase | None if event.type == EventTypes.Member and event.state_key == user_id: user_membership_event = event elif state_after_event is not None: @@ -349,9 +348,9 @@ def _check_client_allowed_to_see_event( always_include_ids: frozenset[str], sender_ignored: bool, retention_policy: RetentionPolicy, - state: Optional[StateMap[EventBase]], + state: StateMap[EventBase] | None, sender_erased: bool, -) -> Optional[EventBase]: +) -> EventBase | None: """Check with the given user is allowed to see the given event See `filter_events_for_client` for details about args diff --git a/synmark/suites/logging.py b/synmark/suites/logging.py index db77484f4c..d89f487d3d 100644 --- a/synmark/suites/logging.py +++ b/synmark/suites/logging.py @@ -22,7 +22,6 @@ import logging import logging.config import warnings from io import StringIO -from typing import Optional from unittest.mock import Mock from pyperf import perf_counter @@ -58,7 +57,7 @@ class LineCounter(LineOnlyReceiver): class Factory(ServerFactory): protocol = LineCounter wait_for: int - on_done: Optional[Deferred] + on_done: Deferred | None async def main(reactor: ISynapseReactor, loops: int) -> float: diff --git a/tests/api/test_ratelimiting.py b/tests/api/test_ratelimiting.py index 34369a8746..0ef537841d 100644 --- a/tests/api/test_ratelimiting.py +++ b/tests/api/test_ratelimiting.py @@ -1,5 +1,3 @@ -from typing import Optional - from synapse.api.ratelimiting import LimitExceededError, Ratelimiter from synapse.appservice import ApplicationService from synapse.config.ratelimiting import RatelimitSettings @@ -489,7 +487,7 @@ class TestRatelimiter(unittest.HomeserverTestCase): # and limiter name. async def get_ratelimit_override_for_user( user_id: str, limiter_name: str - ) -> Optional[RatelimitOverride]: + ) -> RatelimitOverride | None: if user_id == test_user_id: return RatelimitOverride( per_second=0.1, diff --git a/tests/appservice/test_api.py b/tests/appservice/test_api.py index 1943292a8f..bf55f261bb 100644 --- a/tests/appservice/test_api.py +++ b/tests/appservice/test_api.py @@ -18,7 +18,7 @@ # [This file includes modifications made by New Vector Limited] # # -from typing import Any, Mapping, Optional, Sequence, Union +from typing import Any, Mapping, Sequence from unittest.mock import Mock from twisted.internet.testing import MemoryReactor @@ -80,7 +80,7 @@ class ApplicationServiceApiTestCase(unittest.HomeserverTestCase): async def get_json( url: str, args: Mapping[Any, Any], - headers: Mapping[Union[str, bytes], Sequence[Union[str, bytes]]], + headers: Mapping[str | bytes, Sequence[str | bytes]], ) -> list[JsonDict]: # Ensure the access token is passed as a header. if not headers or not headers.get(b"Authorization"): @@ -154,9 +154,7 @@ class ApplicationServiceApiTestCase(unittest.HomeserverTestCase): async def get_json( url: str, args: Mapping[Any, Any], - headers: Optional[ - Mapping[Union[str, bytes], Sequence[Union[str, bytes]]] - ] = None, + headers: Mapping[str | bytes, Sequence[str | bytes]] | None = None, ) -> list[JsonDict]: # Ensure the access token is passed as a both a query param and in the headers. if not args.get(b"access_token"): @@ -216,7 +214,7 @@ class ApplicationServiceApiTestCase(unittest.HomeserverTestCase): async def post_json_get_json( uri: str, post_json: Any, - headers: Mapping[Union[str, bytes], Sequence[Union[str, bytes]]], + headers: Mapping[str | bytes, Sequence[str | bytes]], ) -> JsonDict: # Ensure the access token is passed as both a header and query arg. if not headers.get(b"Authorization"): diff --git a/tests/appservice/test_scheduler.py b/tests/appservice/test_scheduler.py index f17957c206..3caf006386 100644 --- a/tests/appservice/test_scheduler.py +++ b/tests/appservice/test_scheduler.py @@ -18,7 +18,7 @@ # [This file includes modifications made by New Vector Limited] # # -from typing import Optional, Sequence +from typing import Sequence from unittest.mock import AsyncMock, Mock from typing_extensions import TypeAlias @@ -190,9 +190,7 @@ class ApplicationServiceSchedulerRecovererTestCase(unittest.HomeserverTestCase): # return one txn to send, then no more old txns txns = [txn, None] - def take_txn( - *args: object, **kwargs: object - ) -> "defer.Deferred[Optional[Mock]]": + def take_txn(*args: object, **kwargs: object) -> "defer.Deferred[Mock | None]": return defer.succeed(txns.pop(0)) self.store.get_oldest_unsent_txn = Mock(side_effect=take_txn) @@ -216,9 +214,7 @@ class ApplicationServiceSchedulerRecovererTestCase(unittest.HomeserverTestCase): txns = [txn, None] pop_txn = False - def take_txn( - *args: object, **kwargs: object - ) -> "defer.Deferred[Optional[Mock]]": + def take_txn(*args: object, **kwargs: object) -> "defer.Deferred[Mock | None]": if pop_txn: return defer.succeed(txns.pop(0)) else: @@ -254,9 +250,7 @@ class ApplicationServiceSchedulerRecovererTestCase(unittest.HomeserverTestCase): txns = [txn, None] pop_txn = False - def take_txn( - *args: object, **kwargs: object - ) -> "defer.Deferred[Optional[Mock]]": + def take_txn(*args: object, **kwargs: object) -> "defer.Deferred[Mock | None]": if pop_txn: return defer.succeed(txns.pop(0)) else: @@ -291,11 +285,11 @@ defer.Deferred[ tuple[ ApplicationService, Sequence[EventBase], - Optional[list[JsonDict]], - Optional[list[JsonDict]], - Optional[TransactionOneTimeKeysCount], - Optional[TransactionUnusedFallbackKeys], - Optional[DeviceListUpdates], + list[JsonDict] | None, + list[JsonDict] | None, + TransactionOneTimeKeysCount | None, + TransactionUnusedFallbackKeys | None, + DeviceListUpdates | None, ] ] """ diff --git a/tests/config/test_workers.py b/tests/config/test_workers.py index 3a21975b89..55439a502c 100644 --- a/tests/config/test_workers.py +++ b/tests/config/test_workers.py @@ -18,7 +18,7 @@ # [This file includes modifications made by New Vector Limited] # # -from typing import Any, Mapping, Optional +from typing import Any, Mapping from unittest.mock import Mock from immutabledict import immutabledict @@ -35,7 +35,7 @@ class WorkerDutyConfigTestCase(TestCase): def _make_worker_config( self, worker_app: str, - worker_name: Optional[str], + worker_name: str | None, extras: Mapping[str, Any] = _EMPTY_IMMUTABLEDICT, ) -> WorkerConfig: root_config = Mock() diff --git a/tests/crypto/test_keyring.py b/tests/crypto/test_keyring.py index 2eaf77e9dc..d3e8da97f8 100644 --- a/tests/crypto/test_keyring.py +++ b/tests/crypto/test_keyring.py @@ -19,7 +19,7 @@ # # import time -from typing import Any, Optional, cast +from typing import Any, cast from unittest.mock import Mock import attr @@ -87,7 +87,7 @@ class FakeRequest: @logcontext_clean class KeyringTestCase(unittest.HomeserverTestCase): def check_context( - self, val: ContextRequest, expected: Optional[ContextRequest] + self, val: ContextRequest, expected: ContextRequest | None ) -> ContextRequest: self.assertEqual(getattr(current_context(), "request", None), expected) return val diff --git a/tests/events/test_auto_accept_invites.py b/tests/events/test_auto_accept_invites.py index 623ec67ed6..72ade45758 100644 --- a/tests/events/test_auto_accept_invites.py +++ b/tests/events/test_auto_accept_invites.py @@ -20,7 +20,7 @@ # import asyncio from http import HTTPStatus -from typing import Any, Optional, TypeVar, cast +from typing import Any, TypeVar, cast from unittest.mock import Mock import attr @@ -525,7 +525,7 @@ def generate_request_key() -> SyncRequestKey: def sync_join( testcase: HomeserverTestCase, user_id: str, - since_token: Optional[StreamToken] = None, + since_token: StreamToken | None = None, ) -> tuple[list[JoinedSyncResult], StreamToken]: """Perform a sync request for the given user and return the user join updates they've received, as well as the next_batch token. @@ -766,7 +766,7 @@ class MockEvent: type: str content: dict[str, Any] room_id: str = "!someroom" - state_key: Optional[str] = None + state_key: str | None = None def is_state(self) -> bool: """Checks if the event is a state event by checking if it has a state key.""" @@ -793,7 +793,7 @@ async def make_awaitable(value: T) -> T: def create_module( - config_override: Optional[dict[str, Any]] = None, worker_name: Optional[str] = None + config_override: dict[str, Any] | None = None, worker_name: str | None = None ) -> InviteAutoAccepter: # Create a mock based on the ModuleApi spec, but override some mocked functions # because some capabilities are needed for running the tests. diff --git a/tests/events/test_presence_router.py b/tests/events/test_presence_router.py index aa8d7454c0..4132050647 100644 --- a/tests/events/test_presence_router.py +++ b/tests/events/test_presence_router.py @@ -18,7 +18,7 @@ # [This file includes modifications made by New Vector Limited] # # -from typing import Iterable, Optional, Union +from typing import Iterable from unittest.mock import AsyncMock, Mock import attr @@ -63,7 +63,7 @@ class LegacyPresenceRouterTestModule: } return users_to_state - async def get_interested_users(self, user_id: str) -> Union[set[str], str]: + async def get_interested_users(self, user_id: str) -> set[str] | str: if user_id in self._config.users_who_should_receive_all_presence: return PresenceRouter.ALL_USERS @@ -113,7 +113,7 @@ class PresenceRouterTestModule: } return users_to_state - async def get_interested_users(self, user_id: str) -> Union[set[str], str]: + async def get_interested_users(self, user_id: str) -> set[str] | str: if user_id in self._config.users_who_should_receive_all_presence: return PresenceRouter.ALL_USERS @@ -482,7 +482,7 @@ def send_presence_update( user_id: str, access_token: str, presence_state: str, - status_message: Optional[str] = None, + status_message: str | None = None, ) -> JsonDict: # Build the presence body body = {"presence": presence_state} @@ -510,7 +510,7 @@ def generate_request_key() -> SyncRequestKey: def sync_presence( testcase: HomeserverTestCase, user_id: str, - since_token: Optional[StreamToken] = None, + since_token: StreamToken | None = None, ) -> tuple[list[UserPresenceState], StreamToken]: """Perform a sync request for the given user and return the user presence updates they've received, as well as the next_batch token. diff --git a/tests/events/test_utils.py b/tests/events/test_utils.py index 9d41067844..9ea015e138 100644 --- a/tests/events/test_utils.py +++ b/tests/events/test_utils.py @@ -20,7 +20,7 @@ # import unittest as stdlib_unittest -from typing import Any, Mapping, Optional +from typing import Any, Mapping import attr from parameterized import parameterized @@ -648,7 +648,7 @@ class SerializeEventTestCase(stdlib_unittest.TestCase): def serialize( self, ev: EventBase, - fields: Optional[list[str]], + fields: list[str] | None, include_admin_metadata: bool = False, ) -> JsonDict: return serialize_event( diff --git a/tests/federation/test_federation_catch_up.py b/tests/federation/test_federation_catch_up.py index 34b552b9ed..fd1ef043bb 100644 --- a/tests/federation/test_federation_catch_up.py +++ b/tests/federation/test_federation_catch_up.py @@ -1,4 +1,4 @@ -from typing import Callable, Collection, Optional +from typing import Callable, Collection from unittest import mock from unittest.mock import AsyncMock, Mock @@ -72,7 +72,7 @@ class FederationCatchUpTestCases(FederatingHomeserverTestCase): return config async def record_transaction( - self, txn: Transaction, json_cb: Optional[Callable[[], JsonDict]] + self, txn: Transaction, json_cb: Callable[[], JsonDict] | None ) -> JsonDict: if json_cb is None: # The tests seem to expect that this method raises in this situation. diff --git a/tests/federation/test_federation_out_of_band_membership.py b/tests/federation/test_federation_out_of_band_membership.py index 905f9e6580..a1ab72b7a1 100644 --- a/tests/federation/test_federation_out_of_band_membership.py +++ b/tests/federation/test_federation_out_of_band_membership.py @@ -23,7 +23,7 @@ import logging import time import urllib.parse from http import HTTPStatus -from typing import Any, Callable, Optional, TypeVar, Union +from typing import Any, Callable, TypeVar from unittest.mock import Mock import attr @@ -146,7 +146,7 @@ class OutOfBandMembershipTests(unittest.FederatingHomeserverTestCase): self.storage_controllers = hs.get_storage_controllers() def do_sync( - self, sync_body: JsonDict, *, since: Optional[str] = None, tok: str + self, sync_body: JsonDict, *, since: str | None = None, tok: str ) -> tuple[JsonDict, str]: """Do a sliding sync request with given body. @@ -326,13 +326,13 @@ class OutOfBandMembershipTests(unittest.FederatingHomeserverTestCase): async def get_json( destination: str, path: str, - args: Optional[QueryParams] = None, + args: QueryParams | None = None, retry_on_dns_fail: bool = True, - timeout: Optional[int] = None, + timeout: int | None = None, ignore_backoff: bool = False, try_trailing_slash_on_400: bool = False, - parser: Optional[ByteParser[T]] = None, - ) -> Union[JsonDict, T]: + parser: ByteParser[T] | None = None, + ) -> JsonDict | T: if ( path == f"/_matrix/federation/v1/make_join/{urllib.parse.quote_plus(remote_room_id)}/{urllib.parse.quote_plus(local_user1_id)}" @@ -355,17 +355,17 @@ class OutOfBandMembershipTests(unittest.FederatingHomeserverTestCase): async def put_json( destination: str, path: str, - args: Optional[QueryParams] = None, - data: Optional[JsonDict] = None, - json_data_callback: Optional[Callable[[], JsonDict]] = None, + args: QueryParams | None = None, + data: JsonDict | None = None, + json_data_callback: Callable[[], JsonDict] | None = None, long_retries: bool = False, - timeout: Optional[int] = None, + timeout: int | None = None, ignore_backoff: bool = False, backoff_on_404: bool = False, try_trailing_slash_on_400: bool = False, - parser: Optional[ByteParser[T]] = None, + parser: ByteParser[T] | None = None, backoff_on_all_error_codes: bool = False, - ) -> Union[JsonDict, T, SendJoinResponse]: + ) -> JsonDict | T | SendJoinResponse: if ( path.startswith( f"/_matrix/federation/v2/send_join/{urllib.parse.quote_plus(remote_room_id)}/" @@ -508,17 +508,17 @@ class OutOfBandMembershipTests(unittest.FederatingHomeserverTestCase): async def put_json( destination: str, path: str, - args: Optional[QueryParams] = None, - data: Optional[JsonDict] = None, - json_data_callback: Optional[Callable[[], JsonDict]] = None, + args: QueryParams | None = None, + data: JsonDict | None = None, + json_data_callback: Callable[[], JsonDict] | None = None, long_retries: bool = False, - timeout: Optional[int] = None, + timeout: int | None = None, ignore_backoff: bool = False, backoff_on_404: bool = False, try_trailing_slash_on_400: bool = False, - parser: Optional[ByteParser[T]] = None, + parser: ByteParser[T] | None = None, backoff_on_all_error_codes: bool = False, - ) -> Union[JsonDict, T]: + ) -> JsonDict | T: if path.startswith("/_matrix/federation/v1/send/") and data is not None: for pdu in data.get("pdus", []): event = event_from_pdu_json(pdu, room_version) diff --git a/tests/federation/test_federation_sender.py b/tests/federation/test_federation_sender.py index 20b67e3a73..ced98a8b00 100644 --- a/tests/federation/test_federation_sender.py +++ b/tests/federation/test_federation_sender.py @@ -17,7 +17,7 @@ # [This file includes modifications made by New Vector Limited] # # -from typing import Callable, Optional +from typing import Callable from unittest.mock import AsyncMock, Mock from signedjson import key, sign @@ -510,7 +510,7 @@ class FederationSenderDevicesTestCases(HomeserverTestCase): ) async def record_transaction( - self, txn: Transaction, json_cb: Optional[Callable[[], JsonDict]] = None + self, txn: Transaction, json_cb: Callable[[], JsonDict] | None = None ) -> JsonDict: assert json_cb is not None data = json_cb() @@ -592,7 +592,7 @@ class FederationSenderDevicesTestCases(HomeserverTestCase): # expect two edus self.assertEqual(len(self.edus), 2) - stream_id: Optional[int] = None + stream_id: int | None = None stream_id = self.check_device_update_edu(self.edus.pop(0), u1, "D1", stream_id) stream_id = self.check_device_update_edu(self.edus.pop(0), u1, "D2", stream_id) @@ -754,7 +754,7 @@ class FederationSenderDevicesTestCases(HomeserverTestCase): # for each device, there should be a single update self.assertEqual(len(self.edus), 3) - stream_id: Optional[int] = None + stream_id: int | None = None for edu in self.edus: self.assertEqual(edu["edu_type"], EduTypes.DEVICE_LIST_UPDATE) c = edu["content"] @@ -876,7 +876,7 @@ class FederationSenderDevicesTestCases(HomeserverTestCase): edu: JsonDict, user_id: str, device_id: str, - prev_stream_id: Optional[int], + prev_stream_id: int | None, ) -> int: """Check that the given EDU is an update for the given device Returns the stream_id. diff --git a/tests/federation/test_federation_server.py b/tests/federation/test_federation_server.py index b1371d0ac7..0d74791290 100644 --- a/tests/federation/test_federation_server.py +++ b/tests/federation/test_federation_server.py @@ -20,7 +20,6 @@ # import logging from http import HTTPStatus -from typing import Optional, Union from unittest.mock import Mock from parameterized import parameterized @@ -192,12 +191,12 @@ class MessageAcceptTests(unittest.FederatingHomeserverTestCase): async def post_json( destination: str, path: str, - data: Optional[JsonDict] = None, + data: JsonDict | None = None, long_retries: bool = False, - timeout: Optional[int] = None, + timeout: int | None = None, ignore_backoff: bool = False, - args: Optional[QueryParams] = None, - ) -> Union[JsonDict, list]: + args: QueryParams | None = None, + ) -> JsonDict | list: # If it asks us for new missing events, give them NOTHING if path.startswith("/_matrix/federation/v1/get_missing_events/"): return {"events": []} diff --git a/tests/federation/transport/test_client.py b/tests/federation/transport/test_client.py index f538b67e41..9a6bbabd35 100644 --- a/tests/federation/transport/test_client.py +++ b/tests/federation/transport/test_client.py @@ -20,7 +20,6 @@ # import json -from typing import Optional from unittest.mock import Mock import ijson.common @@ -98,7 +97,7 @@ class SendJoinParserTestCase(TestCase): def test_servers_in_room(self) -> None: """Check that the servers_in_room field is correctly parsed""" - def parse(response: JsonDict) -> Optional[list[str]]: + def parse(response: JsonDict) -> list[str] | None: parser = SendJoinParser(RoomVersions.V1, False) serialised_response = json.dumps(response).encode() diff --git a/tests/federation/transport/test_knocking.py b/tests/federation/transport/test_knocking.py index 9e92b06d91..ec705676cc 100644 --- a/tests/federation/transport/test_knocking.py +++ b/tests/federation/transport/test_knocking.py @@ -19,7 +19,7 @@ # # from collections import OrderedDict -from typing import Any, Optional +from typing import Any from twisted.internet.testing import MemoryReactor @@ -232,7 +232,7 @@ class FederationKnockingTestCase( # Have this homeserver skip event auth checks. This is necessary due to # event auth checks ensuring that events were signed by the sender's homeserver. async def _check_event_auth( - origin: Optional[str], event: EventBase, context: EventContext + origin: str | None, event: EventBase, context: EventContext ) -> None: pass diff --git a/tests/handlers/test_appservice.py b/tests/handlers/test_appservice.py index 7d6bd35a9a..6336edb108 100644 --- a/tests/handlers/test_appservice.py +++ b/tests/handlers/test_appservice.py @@ -25,7 +25,6 @@ from typing import ( Awaitable, Callable, Iterable, - Optional, TypeVar, ) from unittest.mock import AsyncMock, Mock @@ -81,10 +80,10 @@ class AppServiceHandlerTestCase(unittest.TestCase): def test_run_as_background_process( desc: "LiteralString", - func: Callable[..., Awaitable[Optional[R]]], + func: Callable[..., Awaitable[R | None]], *args: Any, **kwargs: Any, - ) -> "defer.Deferred[Optional[R]]": + ) -> "defer.Deferred[R | None]": # Ignore linter error as this is used only for testing purposes (i.e. outside of Synapse). return run_as_background_process(desc, "test_server", func, *args, **kwargs) # type: ignore[untracked-background-process] @@ -293,7 +292,7 @@ class AppServiceHandlerTestCase(unittest.TestCase): async def get_3pe_protocol( service: ApplicationService, protocol: str - ) -> Optional[JsonDict]: + ) -> JsonDict | None: if service == service_one: return { "x-protocol-data": 42, @@ -385,7 +384,7 @@ class AppServiceHandlerTestCase(unittest.TestCase): ) def _mkservice( - self, is_interested_in_event: bool, protocols: Optional[Iterable] = None + self, is_interested_in_event: bool, protocols: Iterable | None = None ) -> Mock: """ Create a new mock representing an ApplicationService. @@ -1021,7 +1020,7 @@ class ApplicationServicesHandlerSendEventsTestCase(unittest.HomeserverTestCase): def _register_application_service( self, - namespaces: Optional[dict[str, Iterable[dict]]] = None, + namespaces: dict[str, Iterable[dict]] | None = None, ) -> ApplicationService: """ Register a new application service, with the given namespaces of interest. @@ -1316,8 +1315,8 @@ class ApplicationServicesHandlerOtkCountsTestCase(unittest.HomeserverTestCase): # Capture what was sent as an AS transaction. self.send_mock.assert_called() last_args, _last_kwargs = self.send_mock.call_args - otks: Optional[TransactionOneTimeKeysCount] = last_args[self.ARG_OTK_COUNTS] - unused_fallbacks: Optional[TransactionUnusedFallbackKeys] = last_args[ + otks: TransactionOneTimeKeysCount | None = last_args[self.ARG_OTK_COUNTS] + unused_fallbacks: TransactionUnusedFallbackKeys | None = last_args[ self.ARG_FALLBACK_KEYS ] diff --git a/tests/handlers/test_auth.py b/tests/handlers/test_auth.py index acefd707f5..648be7e7e7 100644 --- a/tests/handlers/test_auth.py +++ b/tests/handlers/test_auth.py @@ -18,7 +18,6 @@ # [This file includes modifications made by New Vector Limited] # # -from typing import Optional from unittest.mock import AsyncMock import pymacaroons @@ -55,7 +54,7 @@ class AuthTestCase(unittest.HomeserverTestCase): self.user1 = self.register_user("a_user", "pass") - def token_login(self, token: str) -> Optional[str]: + def token_login(self, token: str) -> str | None: body = { "type": "m.login.token", "token": token, diff --git a/tests/handlers/test_device.py b/tests/handlers/test_device.py index 5b04da8640..acd37a1c71 100644 --- a/tests/handlers/test_device.py +++ b/tests/handlers/test_device.py @@ -20,7 +20,6 @@ # # -from typing import Optional from unittest import mock from twisted.internet.defer import ensureDeferred @@ -312,8 +311,8 @@ class DeviceTestCase(unittest.HomeserverTestCase): user_id: str, device_id: str, display_name: str, - access_token: Optional[str] = None, - ip: Optional[str] = None, + access_token: str | None = None, + ip: str | None = None, ) -> None: device_id = self.get_success( self.handler.check_device_registered( diff --git a/tests/handlers/test_federation.py b/tests/handlers/test_federation.py index c9ece68729..7085531548 100644 --- a/tests/handlers/test_federation.py +++ b/tests/handlers/test_federation.py @@ -19,7 +19,7 @@ # # import logging -from typing import Collection, Optional, cast +from typing import Collection, cast from unittest import TestCase from unittest.mock import AsyncMock, Mock, patch @@ -689,7 +689,7 @@ class PartialJoinTestCase(unittest.FederatingHomeserverTestCase): return is_partial_state async def sync_partial_state_room( - initial_destination: Optional[str], + initial_destination: str | None, other_destinations: Collection[str], room_id: str, ) -> None: @@ -744,7 +744,7 @@ class PartialJoinTestCase(unittest.FederatingHomeserverTestCase): return is_partial_state async def sync_partial_state_room( - initial_destination: Optional[str], + initial_destination: str | None, other_destinations: Collection[str], room_id: str, ) -> None: diff --git a/tests/handlers/test_federation_event.py b/tests/handlers/test_federation_event.py index 5771699a62..3d856b9346 100644 --- a/tests/handlers/test_federation_event.py +++ b/tests/handlers/test_federation_event.py @@ -18,7 +18,6 @@ # [This file includes modifications made by New Vector Limited] # # -from typing import Optional from unittest import mock from twisted.internet.testing import MemoryReactor @@ -183,7 +182,7 @@ class FederationEventHandlerTests(unittest.FederatingHomeserverTestCase): else: async def get_event( - destination: str, event_id: str, timeout: Optional[int] = None + destination: str, event_id: str, timeout: int | None = None ) -> JsonDict: self.assertEqual(destination, self.OTHER_SERVER_NAME) self.assertEqual(event_id, prev_event.event_id) @@ -585,7 +584,7 @@ class FederationEventHandlerTests(unittest.FederatingHomeserverTestCase): room_state_endpoint_requested_count = 0 async def get_event( - destination: str, event_id: str, timeout: Optional[int] = None + destination: str, event_id: str, timeout: int | None = None ) -> None: nonlocal event_endpoint_requested_count event_endpoint_requested_count += 1 @@ -1115,7 +1114,7 @@ class FederationEventHandlerTests(unittest.FederatingHomeserverTestCase): ): async def get_event( - destination: str, event_id: str, timeout: Optional[int] = None + destination: str, event_id: str, timeout: int | None = None ) -> JsonDict: self.assertEqual(destination, self.OTHER_SERVER_NAME) self.assertEqual(event_id, missing_event.event_id) diff --git a/tests/handlers/test_oauth_delegation.py b/tests/handlers/test_oauth_delegation.py index 43004bfc69..c0a197874e 100644 --- a/tests/handlers/test_oauth_delegation.py +++ b/tests/handlers/test_oauth_delegation.py @@ -25,7 +25,7 @@ import time from http import HTTPStatus from http.server import BaseHTTPRequestHandler, HTTPServer from io import BytesIO -from typing import Any, ClassVar, Coroutine, Generator, Optional, TypeVar, Union +from typing import Any, ClassVar, Coroutine, Generator, TypeVar, Union from unittest.mock import ANY, AsyncMock, Mock from urllib.parse import parse_qs @@ -759,7 +759,7 @@ class FakeMasServer(HTTPServer): secret: str = "verysecret" """The shared secret used to authenticate the introspection endpoint.""" - last_token_seen: Optional[str] = None + last_token_seen: str | None = None """What is the last access token seen by the introspection endpoint.""" calls: int = 0 @@ -1110,7 +1110,7 @@ class DisabledEndpointsTestCase(HomeserverTestCase): return config def expect_unauthorized( - self, method: str, path: str, content: Union[bytes, str, JsonDict] = "" + self, method: str, path: str, content: bytes | str | JsonDict = "" ) -> None: channel = self.make_request(method, path, content, shorthand=False) @@ -1120,7 +1120,7 @@ class DisabledEndpointsTestCase(HomeserverTestCase): self, method: str, path: str, - content: Union[bytes, str, JsonDict] = "", + content: bytes | str | JsonDict = "", auth: bool = False, ) -> None: channel = self.make_request( @@ -1133,7 +1133,7 @@ class DisabledEndpointsTestCase(HomeserverTestCase): ) def expect_forbidden( - self, method: str, path: str, content: Union[bytes, str, JsonDict] = "" + self, method: str, path: str, content: bytes | str | JsonDict = "" ) -> None: channel = self.make_request(method, path, content) diff --git a/tests/handlers/test_oidc.py b/tests/handlers/test_oidc.py index 3180969e7b..4583afb625 100644 --- a/tests/handlers/test_oidc.py +++ b/tests/handlers/test_oidc.py @@ -19,7 +19,7 @@ # # import os -from typing import Any, Awaitable, ContextManager, Optional +from typing import Any, Awaitable, ContextManager from unittest.mock import ANY, AsyncMock, Mock, patch from urllib.parse import parse_qs, urlparse @@ -221,7 +221,7 @@ class OidcHandlerTestCase(HomeserverTestCase): return _build_callback_request(code, state, session), grant def assertRenderedError( - self, error: str, error_description: Optional[str] = None + self, error: str, error_description: str | None = None ) -> tuple[Any, ...]: self.render_error.assert_called_once() args = self.render_error.call_args[0] diff --git a/tests/handlers/test_password_providers.py b/tests/handlers/test_password_providers.py index faa269bd35..573ba58c4f 100644 --- a/tests/handlers/test_password_providers.py +++ b/tests/handlers/test_password_providers.py @@ -22,7 +22,7 @@ """Tests for the password_auth_provider interface""" from http import HTTPStatus -from typing import Any, Optional, Union +from typing import Any from unittest.mock import AsyncMock, Mock from twisted.internet.testing import MemoryReactor @@ -707,7 +707,7 @@ class PasswordAuthProviderTests(unittest.HomeserverTestCase): self.called = False async def on_logged_out( - user_id: str, device_id: Optional[str], access_token: str + user_id: str, device_id: str | None, access_token: str ) -> None: self.called = True @@ -978,7 +978,7 @@ class PasswordAuthProviderTests(unittest.HomeserverTestCase): self, access_token: str, device: str, - body: Union[JsonDict, bytes] = b"", + body: JsonDict | bytes = b"", ) -> FakeChannel: """Delete an individual device.""" channel = self.make_request( diff --git a/tests/handlers/test_presence.py b/tests/handlers/test_presence.py index de1bc90c67..44f1e6432d 100644 --- a/tests/handlers/test_presence.py +++ b/tests/handlers/test_presence.py @@ -19,7 +19,7 @@ # # import itertools -from typing import Optional, cast +from typing import cast from unittest.mock import Mock, call from parameterized import parameterized @@ -1650,7 +1650,7 @@ class PresenceHandlerTestCase(BaseMultiWorkerStreamTestCase): self.assertEqual(state.state, PresenceState.ONLINE) def _set_presencestate_with_status_msg( - self, state: str, status_msg: Optional[str] + self, state: str, status_msg: str | None ) -> None: """Set a PresenceState and status_msg and check the result. diff --git a/tests/handlers/test_register.py b/tests/handlers/test_register.py index 20c2554e25..0db7f30b1f 100644 --- a/tests/handlers/test_register.py +++ b/tests/handlers/test_register.py @@ -19,7 +19,7 @@ # # -from typing import Any, Collection, Optional +from typing import Any, Collection from unittest.mock import AsyncMock, Mock from twisted.internet.testing import MemoryReactor @@ -63,10 +63,10 @@ class TestSpamChecker: async def check_registration_for_spam( self, - email_threepid: Optional[dict], - username: Optional[str], + email_threepid: dict | None, + username: str | None, request_info: Collection[tuple[str, str]], - auth_provider_id: Optional[str], + auth_provider_id: str | None, ) -> RegistrationBehaviour: return RegistrationBehaviour.ALLOW @@ -74,10 +74,10 @@ class TestSpamChecker: class DenyAll(TestSpamChecker): async def check_registration_for_spam( self, - email_threepid: Optional[dict], - username: Optional[str], + email_threepid: dict | None, + username: str | None, request_info: Collection[tuple[str, str]], - auth_provider_id: Optional[str], + auth_provider_id: str | None, ) -> RegistrationBehaviour: return RegistrationBehaviour.DENY @@ -85,10 +85,10 @@ class DenyAll(TestSpamChecker): class BanAll(TestSpamChecker): async def check_registration_for_spam( self, - email_threepid: Optional[dict], - username: Optional[str], + email_threepid: dict | None, + username: str | None, request_info: Collection[tuple[str, str]], - auth_provider_id: Optional[str], + auth_provider_id: str | None, ) -> RegistrationBehaviour: return RegistrationBehaviour.SHADOW_BAN @@ -96,10 +96,10 @@ class BanAll(TestSpamChecker): class BanBadIdPUser(TestSpamChecker): async def check_registration_for_spam( self, - email_threepid: Optional[dict], - username: Optional[str], + email_threepid: dict | None, + username: str | None, request_info: Collection[tuple[str, str]], - auth_provider_id: Optional[str] = None, + auth_provider_id: str | None = None, ) -> RegistrationBehaviour: # Reject any user coming from CAS and whose username contains profanity if auth_provider_id == "cas" and username and "flimflob" in username: @@ -113,8 +113,8 @@ class TestLegacyRegistrationSpamChecker: async def check_registration_for_spam( self, - email_threepid: Optional[dict], - username: Optional[str], + email_threepid: dict | None, + username: str | None, request_info: Collection[tuple[str, str]], ) -> RegistrationBehaviour: return RegistrationBehaviour.ALLOW @@ -123,8 +123,8 @@ class TestLegacyRegistrationSpamChecker: class LegacyAllowAll(TestLegacyRegistrationSpamChecker): async def check_registration_for_spam( self, - email_threepid: Optional[dict], - username: Optional[str], + email_threepid: dict | None, + username: str | None, request_info: Collection[tuple[str, str]], ) -> RegistrationBehaviour: return RegistrationBehaviour.ALLOW @@ -133,8 +133,8 @@ class LegacyAllowAll(TestLegacyRegistrationSpamChecker): class LegacyDenyAll(TestLegacyRegistrationSpamChecker): async def check_registration_for_spam( self, - email_threepid: Optional[dict], - username: Optional[str], + email_threepid: dict | None, + username: str | None, request_info: Collection[tuple[str, str]], ) -> RegistrationBehaviour: return RegistrationBehaviour.DENY @@ -777,8 +777,8 @@ class RegistrationTestCase(unittest.HomeserverTestCase): self, requester: Requester, localpart: str, - displayname: Optional[str], - password_hash: Optional[str] = None, + displayname: str | None, + password_hash: str | None = None, ) -> tuple[str, str]: """Creates a new user if the user does not exist, else revokes all previous access tokens and generates a new one. diff --git a/tests/handlers/test_room_list.py b/tests/handlers/test_room_list.py index f6e9309f1f..e7c4436d1d 100644 --- a/tests/handlers/test_room_list.py +++ b/tests/handlers/test_room_list.py @@ -1,5 +1,4 @@ from http import HTTPStatus -from typing import Optional from synapse.rest import admin from synapse.rest.client import directory, login, room @@ -18,7 +17,7 @@ class RoomListHandlerTestCase(unittest.HomeserverTestCase): ] def _create_published_room( - self, tok: str, extra_content: Optional[JsonDict] = None + self, tok: str, extra_content: JsonDict | None = None ) -> str: room_id = self.helper.create_room_as(tok=tok, extra_content=extra_content) channel = self.make_request( diff --git a/tests/handlers/test_room_policy.py b/tests/handlers/test_room_policy.py index 00da1d942f..ff212ab06e 100644 --- a/tests/handlers/test_room_policy.py +++ b/tests/handlers/test_room_policy.py @@ -12,7 +12,6 @@ # . # # -from typing import Optional from unittest import mock import signedjson @@ -113,7 +112,7 @@ class RoomPolicyTestCase(unittest.FederatingHomeserverTestCase): async def get_policy_recommendation_for_pdu( destination: str, pdu: EventBase, - timeout: Optional[int] = None, + timeout: int | None = None, ) -> JsonDict: self.call_count += 1 self.assertEqual(destination, self.OTHER_SERVER_NAME) @@ -128,8 +127,8 @@ class RoomPolicyTestCase(unittest.FederatingHomeserverTestCase): # Mock policy server actions on signing events async def policy_server_signs_event( - destination: str, pdu: EventBase, timeout: Optional[int] = None - ) -> Optional[JsonDict]: + destination: str, pdu: EventBase, timeout: int | None = None + ) -> JsonDict | None: sigs = compute_event_signature( pdu.room_version, pdu.get_dict(), @@ -139,8 +138,8 @@ class RoomPolicyTestCase(unittest.FederatingHomeserverTestCase): return sigs async def policy_server_signs_event_with_wrong_key( - destination: str, pdu: EventBase, timeout: Optional[int] = None - ) -> Optional[JsonDict]: + destination: str, pdu: EventBase, timeout: int | None = None + ) -> JsonDict | None: sk = signedjson.key.generate_signing_key("policy_server") sigs = compute_event_signature( pdu.room_version, @@ -151,13 +150,13 @@ class RoomPolicyTestCase(unittest.FederatingHomeserverTestCase): return sigs async def policy_server_refuses_to_sign_event( - destination: str, pdu: EventBase, timeout: Optional[int] = None - ) -> Optional[JsonDict]: + destination: str, pdu: EventBase, timeout: int | None = None + ) -> JsonDict | None: return {} async def policy_server_event_sign_error( - destination: str, pdu: EventBase, timeout: Optional[int] = None - ) -> Optional[JsonDict]: + destination: str, pdu: EventBase, timeout: int | None = None + ) -> JsonDict | None: return None self.policy_server_signs_event = policy_server_signs_event @@ -167,7 +166,7 @@ class RoomPolicyTestCase(unittest.FederatingHomeserverTestCase): policy_server_signs_event_with_wrong_key ) - def _add_policy_server_to_room(self, public_key: Optional[str] = None) -> None: + def _add_policy_server_to_room(self, public_key: str | None = None) -> None: # Inject a member event into the room policy_user_id = f"@policy:{self.OTHER_SERVER_NAME}" self.get_success( @@ -442,7 +441,7 @@ class RoomPolicyTestCase(unittest.FederatingHomeserverTestCase): f"event did not include policy server signature, signature block = {ev.get('signatures', None)}", ) - def _fetch_federation_event(self, event_id: str) -> Optional[JsonDict]: + def _fetch_federation_event(self, event_id: str) -> JsonDict | None: # Request federation events to see the signatures channel = self.make_request( "POST", diff --git a/tests/handlers/test_room_summary.py b/tests/handlers/test_room_summary.py index 3c8c483921..ee65cb1afb 100644 --- a/tests/handlers/test_room_summary.py +++ b/tests/handlers/test_room_summary.py @@ -18,7 +18,7 @@ # [This file includes modifications made by New Vector Limited] # # -from typing import Any, Iterable, Optional +from typing import Any, Iterable from unittest import mock from twisted.internet.defer import ensureDeferred @@ -49,7 +49,7 @@ from tests.unittest import override_config def _create_event( - room_id: str, order: Optional[Any] = None, origin_server_ts: int = 0 + room_id: str, order: Any | None = None, origin_server_ts: int = 0 ) -> mock.Mock: result = mock.Mock(name=room_id) result.room_id = room_id @@ -151,8 +151,8 @@ class SpaceSummaryTestCase(unittest.HomeserverTestCase): space_id: str, room_id: str, token: str, - order: Optional[str] = None, - via: Optional[list[str]] = None, + order: str | None = None, + via: list[str] | None = None, ) -> None: """Add a child room to a space.""" if via is None: @@ -393,7 +393,7 @@ class SpaceSummaryTestCase(unittest.HomeserverTestCase): self._assert_hierarchy(result2, [(self.space, [self.room])]) def _create_room_with_join_rule( - self, join_rule: str, room_version: Optional[str] = None, **extra_content: Any + self, join_rule: str, room_version: str | None = None, **extra_content: Any ) -> str: """Create a room with the given join rule and add it to the space.""" room_id = self.helper.create_room_as( @@ -740,7 +740,7 @@ class SpaceSummaryTestCase(unittest.HomeserverTestCase): async def summarize_remote_room_hierarchy( _self: Any, room: Any, suggested_only: bool - ) -> tuple[Optional[_RoomEntry], dict[str, JsonDict], set[str]]: + ) -> tuple[_RoomEntry | None, dict[str, JsonDict], set[str]]: return requested_room_entry, {subroom: child_room}, set() # Add a room to the space which is on another server. @@ -793,7 +793,7 @@ class SpaceSummaryTestCase(unittest.HomeserverTestCase): async def summarize_remote_room_hierarchy( _self: Any, room: Any, suggested_only: bool - ) -> tuple[Optional[_RoomEntry], dict[str, JsonDict], set[str]]: + ) -> tuple[_RoomEntry | None, dict[str, JsonDict], set[str]]: return requested_room_entry, {fed_subroom: child_room}, set() expected = [ @@ -921,7 +921,7 @@ class SpaceSummaryTestCase(unittest.HomeserverTestCase): async def summarize_remote_room_hierarchy( _self: Any, room: Any, suggested_only: bool - ) -> tuple[Optional[_RoomEntry], dict[str, JsonDict], set[str]]: + ) -> tuple[_RoomEntry | None, dict[str, JsonDict], set[str]]: return subspace_room_entry, dict(children_rooms), set() # Add a room to the space which is on another server. @@ -985,7 +985,7 @@ class SpaceSummaryTestCase(unittest.HomeserverTestCase): async def summarize_remote_room_hierarchy( _self: Any, room: Any, suggested_only: bool - ) -> tuple[Optional[_RoomEntry], dict[str, JsonDict], set[str]]: + ) -> tuple[_RoomEntry | None, dict[str, JsonDict], set[str]]: return fed_room_entry, {}, set() # Add a room to the space which is on another server. @@ -1120,7 +1120,7 @@ class SpaceSummaryTestCase(unittest.HomeserverTestCase): async def summarize_remote_room_hierarchy( _self: Any, room: Any, suggested_only: bool - ) -> tuple[Optional[_RoomEntry], dict[str, JsonDict], set[str]]: + ) -> tuple[_RoomEntry | None, dict[str, JsonDict], set[str]]: return requested_room_entry, {fed_subroom: child_room}, set() expected = [ @@ -1233,7 +1233,7 @@ class RoomSummaryTestCase(unittest.HomeserverTestCase): async def summarize_remote_room_hierarchy( _self: Any, room: Any, suggested_only: bool - ) -> tuple[Optional[_RoomEntry], dict[str, JsonDict], set[str]]: + ) -> tuple[_RoomEntry | None, dict[str, JsonDict], set[str]]: return requested_room_entry, {}, set() with mock.patch( diff --git a/tests/handlers/test_saml.py b/tests/handlers/test_saml.py index 28159abbcb..c2aeab5f7e 100644 --- a/tests/handlers/test_saml.py +++ b/tests/handlers/test_saml.py @@ -19,7 +19,7 @@ # # -from typing import Any, Optional +from typing import Any from unittest.mock import AsyncMock, Mock import attr @@ -61,7 +61,7 @@ BASE_URL = "https://synapse/" class FakeAuthnResponse: ava = attr.ib(type=dict) assertions = attr.ib(type=list, factory=list) - in_response_to = attr.ib(type=Optional[str], default=None) + in_response_to = attr.ib(type=(str | None), default=None) class TestMappingProvider: diff --git a/tests/handlers/test_send_email.py b/tests/handlers/test_send_email.py index d033ed3a1c..eea88cd136 100644 --- a/tests/handlers/test_send_email.py +++ b/tests/handlers/test_send_email.py @@ -20,7 +20,7 @@ # -from typing import Callable, Union +from typing import Callable from unittest.mock import patch from zope.interface import implementer @@ -104,7 +104,7 @@ class _DummyMessage: class SendEmailHandlerTestCaseIPv4(HomeserverTestCase): - ip_class: Union[type[IPv4Address], type[IPv6Address]] = IPv4Address + ip_class: type[IPv4Address] | type[IPv6Address] = IPv4Address def setUp(self) -> None: super().setUp() diff --git a/tests/handlers/test_sliding_sync.py b/tests/handlers/test_sliding_sync.py index a35910e4dd..4582906441 100644 --- a/tests/handlers/test_sliding_sync.py +++ b/tests/handlers/test_sliding_sync.py @@ -18,7 +18,7 @@ # # import logging -from typing import AbstractSet, Mapping, Optional +from typing import AbstractSet, Mapping from unittest.mock import patch import attr @@ -62,7 +62,7 @@ class RoomSyncConfigTestCase(TestCase): self, actual: RoomSyncConfig, expected: RoomSyncConfig, - message_prefix: Optional[str] = None, + message_prefix: str | None = None, ) -> None: self.assertEqual(actual.timeline_limit, expected.timeline_limit, message_prefix) @@ -3277,7 +3277,7 @@ class FilterRoomsRelevantForSyncTestCase(HomeserverTestCase): self, user: UserID, to_token: StreamToken, - from_token: Optional[StreamToken], + from_token: StreamToken | None, ) -> tuple[dict[str, RoomsForUserType], AbstractSet[str], AbstractSet[str]]: """ Get the rooms the user should be syncing with @@ -3614,7 +3614,7 @@ class SortRoomsTestCase(HomeserverTestCase): self, user: UserID, to_token: StreamToken, - from_token: Optional[StreamToken], + from_token: StreamToken | None, ) -> tuple[dict[str, RoomsForUserType], AbstractSet[str], AbstractSet[str]]: """ Get the rooms the user should be syncing with @@ -3828,10 +3828,10 @@ class RequiredStateChangesTestParameters: request_required_state_map: dict[str, set[str]] state_deltas: StateMap[str] expected_with_state_deltas: tuple[ - Optional[Mapping[str, AbstractSet[str]]], StateFilter + Mapping[str, AbstractSet[str]] | None, StateFilter ] expected_without_state_deltas: tuple[ - Optional[Mapping[str, AbstractSet[str]]], StateFilter + Mapping[str, AbstractSet[str]] | None, StateFilter ] diff --git a/tests/handlers/test_sso.py b/tests/handlers/test_sso.py index 5ac088f601..95595b8ff9 100644 --- a/tests/handlers/test_sso.py +++ b/tests/handlers/test_sso.py @@ -18,7 +18,7 @@ # # from http import HTTPStatus -from typing import BinaryIO, Callable, Optional +from typing import BinaryIO, Callable from unittest.mock import Mock from twisted.internet.testing import MemoryReactor @@ -117,9 +117,9 @@ class TestSSOHandler(unittest.HomeserverTestCase): async def mock_get_file( url: str, output_stream: BinaryIO, - max_size: Optional[int] = None, - headers: Optional[RawHeaders] = None, - is_allowed_content_type: Optional[Callable[[str], bool]] = None, + max_size: int | None = None, + headers: RawHeaders | None = None, + is_allowed_content_type: Callable[[str], bool] | None = None, ) -> tuple[int, dict[bytes, list[bytes]], str, int]: fake_response = FakeResponse(code=404) if url == "http://my.server/me.png": diff --git a/tests/handlers/test_stats.py b/tests/handlers/test_stats.py index 94f5e472ca..0072327044 100644 --- a/tests/handlers/test_stats.py +++ b/tests/handlers/test_stats.py @@ -18,7 +18,7 @@ # # -from typing import Any, Optional, cast +from typing import Any, cast from twisted.internet.testing import MemoryReactor @@ -74,9 +74,9 @@ class StatsRoomTests(unittest.HomeserverTestCase): ) ) - async def get_all_room_state(self) -> list[Optional[str]]: + async def get_all_room_state(self) -> list[str | None]: rows = cast( - list[tuple[Optional[str]]], + list[tuple[str | None]], await self.store.db_pool.simple_select_list( "room_stats_state", None, retcols=("topic",) ), @@ -85,7 +85,7 @@ class StatsRoomTests(unittest.HomeserverTestCase): def _get_current_stats( self, stats_type: str, stat_id: str - ) -> Optional[dict[str, Any]]: + ) -> dict[str, Any] | None: table, id_col = stats.TYPE_TO_TABLE[stats_type] cols = list(stats.ABSOLUTE_STATS_FIELDS[stats_type]) diff --git a/tests/handlers/test_sync.py b/tests/handlers/test_sync.py index 140dd4a0ba..18ec2ca6b6 100644 --- a/tests/handlers/test_sync.py +++ b/tests/handlers/test_sync.py @@ -18,7 +18,7 @@ # # from http import HTTPStatus -from typing import Collection, ContextManager, Optional +from typing import Collection, ContextManager from unittest.mock import AsyncMock, Mock, patch from parameterized import parameterized, parameterized_class @@ -893,7 +893,7 @@ class SyncTestCase(tests.unittest.HomeserverTestCase): federation_event_handler = self.hs.get_federation_event_handler() async def _check_event_auth( - origin: Optional[str], event: EventBase, context: EventContext + origin: str | None, event: EventBase, context: EventContext ) -> None: pass @@ -1117,8 +1117,8 @@ class SyncTestCase(tests.unittest.HomeserverTestCase): def generate_sync_config( user_id: str, - device_id: Optional[str] = "device_id", - filter_collection: Optional[FilterCollection] = None, + device_id: str | None = "device_id", + filter_collection: FilterCollection | None = None, use_state_after: bool = False, ) -> SyncConfig: """Generate a sync config (with a unique request key). diff --git a/tests/http/federation/test_matrix_federation_agent.py b/tests/http/federation/test_matrix_federation_agent.py index 949564fcc7..49ecaa30ff 100644 --- a/tests/http/federation/test_matrix_federation_agent.py +++ b/tests/http/federation/test_matrix_federation_agent.py @@ -20,7 +20,7 @@ import base64 import logging import os -from typing import Generator, Optional, cast +from typing import Generator, cast from unittest.mock import AsyncMock, call, patch import treq @@ -85,7 +85,7 @@ class MatrixFederationAgentTests(unittest.TestCase): self.tls_factory = FederationPolicyForHTTPS(config) - self.well_known_cache: TTLCache[bytes, Optional[bytes]] = TTLCache( + self.well_known_cache: TTLCache[bytes, bytes | None] = TTLCache( cache_name="test_cache", server_name="test_server", timer=self.reactor.seconds, @@ -109,8 +109,8 @@ class MatrixFederationAgentTests(unittest.TestCase): self, client_factory: IProtocolFactory, ssl: bool = True, - expected_sni: Optional[bytes] = None, - tls_sanlist: Optional[list[bytes]] = None, + expected_sni: bytes | None = None, + tls_sanlist: list[bytes] | None = None, ) -> HTTPChannel: """Builds a test server, and completes the outgoing client connection Args: @@ -228,7 +228,7 @@ class MatrixFederationAgentTests(unittest.TestCase): client_factory: IProtocolFactory, expected_sni: bytes, content: bytes, - response_headers: Optional[dict] = None, + response_headers: dict | None = None, ) -> HTTPChannel: """Handle an outgoing HTTPs connection: wire it up to a server, check that the request is for a .well-known, and send the response. @@ -257,7 +257,7 @@ class MatrixFederationAgentTests(unittest.TestCase): self, request: Request, content: bytes, - headers: Optional[dict] = None, + headers: dict | None = None, ) -> None: """Check that an incoming request looks like a valid .well-known request, and send back the response. @@ -397,7 +397,7 @@ class MatrixFederationAgentTests(unittest.TestCase): def _do_get_via_proxy( self, expect_proxy_ssl: bool = False, - expected_auth_credentials: Optional[bytes] = None, + expected_auth_credentials: bytes | None = None, ) -> None: """Send a https federation request via an agent and check that it is correctly received at the proxy and client. The proxy can use either http or https. diff --git a/tests/http/server/_base.py b/tests/http/server/_base.py index cc9b5fd6e1..afa69d1b7b 100644 --- a/tests/http/server/_base.py +++ b/tests/http/server/_base.py @@ -27,9 +27,7 @@ from typing import ( Callable, ContextManager, Generator, - Optional, TypeVar, - Union, ) from unittest import mock from unittest.mock import Mock @@ -65,8 +63,8 @@ def test_disconnect( reactor: MemoryReactorClock, channel: FakeChannel, expect_cancellation: bool, - expected_body: Union[bytes, JsonDict], - expected_code: Optional[int] = None, + expected_body: bytes | JsonDict, + expected_code: int | None = None, ) -> None: """Disconnects an in-flight request and checks the response. @@ -146,9 +144,9 @@ def make_request_with_cancellation_test( site: Site, method: str, path: str, - content: Union[bytes, str, JsonDict] = b"", + content: bytes | str | JsonDict = b"", *, - token: Optional[str] = None, + token: str | None = None, ) -> FakeChannel: """Performs a request repeatedly, disconnecting at successive `await`s, until one completes. @@ -361,7 +359,7 @@ class Deferred__await__Patch: # unresolved `Deferred` and return it out of `Deferred.__await__` / # `coroutine.send()`. We have to resolve it later, in case the `await`ing # coroutine is part of some shared processing, such as `@cached`. - self._to_unblock: dict[Deferred, Union[object, Failure]] = {} + self._to_unblock: dict[Deferred, object | Failure] = {} # The last stack we logged. self._previous_stack: list[inspect.FrameInfo] = [] diff --git a/tests/http/test_client.py b/tests/http/test_client.py index d9eaa78a39..5c8c1220e4 100644 --- a/tests/http/test_client.py +++ b/tests/http/test_client.py @@ -20,7 +20,6 @@ # from io import BytesIO -from typing import Union from unittest.mock import Mock from netaddr import IPSet @@ -58,7 +57,7 @@ class ReadMultipartResponseTests(TestCase): redirect_data = b"\r\n\r\n--6067d4698f8d40a0a794ea7d7379d53a\r\nContent-Type: application/json\r\n\r\n{}\r\n--6067d4698f8d40a0a794ea7d7379d53a\r\nLocation: https://cdn.example.org/ab/c1/2345.txt\r\n\r\n--6067d4698f8d40a0a794ea7d7379d53a--\r\n\r\n" def _build_multipart_response( - self, response_length: Union[int, str], max_length: int + self, response_length: int | str, max_length: int ) -> tuple[ BytesIO, "Deferred[MultipartResponse]", @@ -208,7 +207,7 @@ class ReadMultipartResponseTests(TestCase): class ReadBodyWithMaxSizeTests(TestCase): def _build_response( - self, length: Union[int, str] = UNKNOWN_LENGTH + self, length: int | str = UNKNOWN_LENGTH ) -> tuple[ BytesIO, "Deferred[int]", diff --git a/tests/http/test_proxyagent.py b/tests/http/test_proxyagent.py index a9b4f3d956..c65115b3e5 100644 --- a/tests/http/test_proxyagent.py +++ b/tests/http/test_proxyagent.py @@ -21,7 +21,6 @@ import base64 import logging import os -from typing import Optional from unittest.mock import patch import treq @@ -195,7 +194,7 @@ class ProxyParserTests(TestCase): expected_scheme: bytes, expected_hostname: bytes, expected_port: int, - expected_credentials: Optional[bytes], + expected_credentials: bytes | None, ) -> None: """ Tests that a given proxy URL will be broken into the components. @@ -251,8 +250,8 @@ class ProxyAgentTests(TestCase): client_factory: IProtocolFactory, server_factory: IProtocolFactory, ssl: bool = False, - expected_sni: Optional[bytes] = None, - tls_sanlist: Optional[list[bytes]] = None, + expected_sni: bytes | None = None, + tls_sanlist: list[bytes] | None = None, ) -> IProtocol: """Builds a test server, and completes the outgoing client connection @@ -602,7 +601,7 @@ class ProxyAgentTests(TestCase): self, proxy_config: ProxyConfig, expect_proxy_ssl: bool = False, - expected_auth_credentials: Optional[bytes] = None, + expected_auth_credentials: bytes | None = None, ) -> None: """Send a http request via an agent and check that it is correctly received at the proxy. The proxy can use either http or https. @@ -682,7 +681,7 @@ class ProxyAgentTests(TestCase): self, proxy_config: ProxyConfig, expect_proxy_ssl: bool = False, - expected_auth_credentials: Optional[bytes] = None, + expected_auth_credentials: bytes | None = None, ) -> None: """Send a https request via an agent and check that it is correctly received at the proxy and client. The proxy can use either http or https. diff --git a/tests/http/test_servlet.py b/tests/http/test_servlet.py index 087191b220..5bf8305d05 100644 --- a/tests/http/test_servlet.py +++ b/tests/http/test_servlet.py @@ -21,7 +21,6 @@ import json from http import HTTPStatus from io import BytesIO -from typing import Union from unittest.mock import Mock from synapse.api.errors import Codes, SynapseError @@ -40,7 +39,7 @@ from tests import unittest from tests.http.server._base import test_disconnect -def make_request(content: Union[bytes, JsonDict]) -> Mock: +def make_request(content: bytes | JsonDict) -> Mock: """Make an object that acts enough like a request.""" request = Mock(spec=["method", "uri", "content"]) diff --git a/tests/logging/test_opentracing.py b/tests/logging/test_opentracing.py index 2f389f7f44..3aaa743265 100644 --- a/tests/logging/test_opentracing.py +++ b/tests/logging/test_opentracing.py @@ -19,7 +19,7 @@ # # -from typing import Awaitable, Optional, cast +from typing import Awaitable, cast from twisted.internet import defer from twisted.internet.testing import MemoryReactorClock @@ -329,7 +329,7 @@ class LogContextScopeManagerTestCase(TestCase): reactor, clock = get_clock() callback_finished = False - active_span_in_callback: Optional[jaeger_client.Span] = None + active_span_in_callback: jaeger_client.Span | None = None async def bg_task() -> None: nonlocal callback_finished, active_span_in_callback @@ -391,7 +391,7 @@ class LogContextScopeManagerTestCase(TestCase): reactor, clock = get_clock() callback_finished = False - active_span_in_callback: Optional[jaeger_client.Span] = None + active_span_in_callback: jaeger_client.Span | None = None async def bg_task() -> None: nonlocal callback_finished, active_span_in_callback @@ -461,7 +461,7 @@ class LogContextScopeManagerTestCase(TestCase): span.span_id: span.operation_name for span in self._reporter.get_spans() } - def get_span_friendly_name(span_id: Optional[int]) -> str: + def get_span_friendly_name(span_id: int | None) -> str: if span_id is None: return "None" diff --git a/tests/media/test_media_retention.py b/tests/media/test_media_retention.py index 6dba214514..f27a9ed685 100644 --- a/tests/media/test_media_retention.py +++ b/tests/media/test_media_retention.py @@ -20,7 +20,7 @@ # import io -from typing import Iterable, Optional +from typing import Iterable from matrix_common.types.mxc_uri import MXCUri @@ -63,9 +63,9 @@ class MediaRetentionTestCase(unittest.HomeserverTestCase): media_repository = hs.get_media_repository() def _create_media_and_set_attributes( - last_accessed_ms: Optional[int], - is_quarantined: Optional[bool] = False, - is_protected: Optional[bool] = False, + last_accessed_ms: int | None, + is_quarantined: bool | None = False, + is_protected: bool | None = False, ) -> MXCUri: # "Upload" some media to the local media store # If the meda @@ -113,8 +113,8 @@ class MediaRetentionTestCase(unittest.HomeserverTestCase): def _cache_remote_media_and_set_attributes( media_id: str, - last_accessed_ms: Optional[int], - is_quarantined: Optional[bool] = False, + last_accessed_ms: int | None, + is_quarantined: bool | None = False, ) -> MXCUri: # Pretend to cache some remote media self.get_success( diff --git a/tests/media/test_media_storage.py b/tests/media/test_media_storage.py index d584ea951c..e56354e0b3 100644 --- a/tests/media/test_media_storage.py +++ b/tests/media/test_media_storage.py @@ -23,7 +23,7 @@ import shutil import tempfile from binascii import unhexlify from io import BytesIO -from typing import Any, BinaryIO, ClassVar, Literal, Optional, Union +from typing import Any, BinaryIO, ClassVar, Literal from unittest.mock import MagicMock, Mock, patch from urllib import parse @@ -150,8 +150,8 @@ class TestImage: data: bytes content_type: bytes extension: bytes - expected_cropped: Optional[bytes] = None - expected_scaled: Optional[bytes] = None + expected_cropped: bytes | None = None + expected_scaled: bytes | None = None expected_found: bool = True unable_to_thumbnail: bool = False is_inline: bool = True @@ -302,7 +302,7 @@ class MediaRepoTests(unittest.HomeserverTestCase): "Deferred[tuple[bytes, tuple[int, dict[bytes, list[bytes]]]]]", str, str, - Optional[QueryParams], + QueryParams | None, ] ] = [] @@ -313,7 +313,7 @@ class MediaRepoTests(unittest.HomeserverTestCase): download_ratelimiter: Ratelimiter, ip_address: Any, max_size: int, - args: Optional[QueryParams] = None, + args: QueryParams | None = None, retry_on_dns_fail: bool = True, ignore_backoff: bool = False, follow_redirects: bool = False, @@ -376,7 +376,7 @@ class MediaRepoTests(unittest.HomeserverTestCase): return resources def _req( - self, content_disposition: Optional[bytes], include_content_type: bool = True + self, content_disposition: bytes | None, include_content_type: bool = True ) -> FakeChannel: channel = self.make_request( "GET", @@ -654,7 +654,7 @@ class MediaRepoTests(unittest.HomeserverTestCase): def _test_thumbnail( self, method: str, - expected_body: Optional[bytes], + expected_body: bytes | None, expected_found: bool, unable_to_thumbnail: bool = False, ) -> None: @@ -868,7 +868,7 @@ class TestSpamCheckerLegacy: def parse_config(config: dict[str, Any]) -> dict[str, Any]: return config - async def check_event_for_spam(self, event: EventBase) -> Union[bool, str]: + async def check_event_for_spam(self, event: EventBase) -> bool | str: return False # allow all events async def user_may_invite( @@ -972,7 +972,7 @@ class SpamCheckerTestCase(unittest.HomeserverTestCase): async def check_media_file_for_spam( self, file_wrapper: ReadableFileWrapper, file_info: FileInfo - ) -> Union[Codes, Literal["NOT_SPAM"], tuple[Codes, JsonDict]]: + ) -> Codes | Literal["NOT_SPAM"] | tuple[Codes, JsonDict]: buf = BytesIO() await file_wrapper.write_chunks_to(buf.write) @@ -1259,7 +1259,7 @@ class RemoteDownloadLimiterTestCase(unittest.HomeserverTestCase): def read_body( - response: IResponse, stream: ByteWriteable, max_size: Optional[int] + response: IResponse, stream: ByteWriteable, max_size: int | None ) -> Deferred: d: Deferred = defer.Deferred() stream.write(SMALL_PNG) diff --git a/tests/module_api/test_api.py b/tests/module_api/test_api.py index b768a913d7..12c8942bc8 100644 --- a/tests/module_api/test_api.py +++ b/tests/module_api/test_api.py @@ -18,7 +18,7 @@ # [This file includes modifications made by New Vector Limited] # # -from typing import Any, Optional +from typing import Any from unittest.mock import AsyncMock, Mock from twisted.internet import defer @@ -803,10 +803,10 @@ class ModuleApiTestCase(BaseModuleApiTestCase): ) # Setup a callback counting the number of pushers. - number_of_pushers_in_callback: Optional[int] = None + number_of_pushers_in_callback: int | None = None async def _on_logged_out_mock( - user_id: str, device_id: Optional[str], access_token: str + user_id: str, device_id: str | None, access_token: str ) -> None: nonlocal number_of_pushers_in_callback number_of_pushers_in_callback = len( diff --git a/tests/module_api/test_spamchecker.py b/tests/module_api/test_spamchecker.py index d461d6cea2..42ef969ce0 100644 --- a/tests/module_api/test_spamchecker.py +++ b/tests/module_api/test_spamchecker.py @@ -12,7 +12,7 @@ # . # # -from typing import Literal, Union +from typing import Literal from twisted.internet.testing import MemoryReactor @@ -59,7 +59,7 @@ class SpamCheckerTestCase(HomeserverTestCase): async def user_may_create_room( user_id: str, room_config: JsonDict - ) -> Union[Literal["NOT_SPAM"], Codes]: + ) -> Literal["NOT_SPAM"] | Codes: self.last_room_config = room_config self.last_user_id = user_id return "NOT_SPAM" @@ -82,7 +82,7 @@ class SpamCheckerTestCase(HomeserverTestCase): async def user_may_create_room( user_id: str, room_config: JsonDict - ) -> Union[Literal["NOT_SPAM"], Codes]: + ) -> Literal["NOT_SPAM"] | Codes: self.last_room_config = room_config self.last_user_id = user_id return "NOT_SPAM" @@ -117,7 +117,7 @@ class SpamCheckerTestCase(HomeserverTestCase): async def user_may_create_room( user_id: str, room_config: JsonDict - ) -> Union[Literal["NOT_SPAM"], Codes]: + ) -> Literal["NOT_SPAM"] | Codes: self.last_room_config = room_config self.last_user_id = user_id return "NOT_SPAM" @@ -156,7 +156,7 @@ class SpamCheckerTestCase(HomeserverTestCase): async def user_may_create_room( user_id: str, room_config: JsonDict - ) -> Union[Literal["NOT_SPAM"], Codes]: + ) -> Literal["NOT_SPAM"] | Codes: self.last_room_config = room_config self.last_user_id = user_id return Codes.UNAUTHORIZED @@ -181,7 +181,7 @@ class SpamCheckerTestCase(HomeserverTestCase): async def user_may_create_room( user_id: str, - ) -> Union[Literal["NOT_SPAM"], Codes]: + ) -> Literal["NOT_SPAM"] | Codes: self.last_user_id = user_id return "NOT_SPAM" @@ -205,7 +205,7 @@ class SpamCheckerTestCase(HomeserverTestCase): event_type: str, state_key: str, content: JsonDict, - ) -> Union[Literal["NOT_SPAM"], Codes]: + ) -> Literal["NOT_SPAM"] | Codes: self.last_user_id = user_id self.last_room_id = room_id self.last_event_type = event_type @@ -255,7 +255,7 @@ class SpamCheckerTestCase(HomeserverTestCase): event_type: str, state_key: str, content: JsonDict, - ) -> Union[Literal["NOT_SPAM"], Codes]: + ) -> Literal["NOT_SPAM"] | Codes: return Codes.FORBIDDEN self._module_api.register_spam_checker_callbacks( diff --git a/tests/push/test_bulk_push_rule_evaluator.py b/tests/push/test_bulk_push_rule_evaluator.py index 560d7234ec..137bbe24b2 100644 --- a/tests/push/test_bulk_push_rule_evaluator.py +++ b/tests/push/test_bulk_push_rule_evaluator.py @@ -20,7 +20,7 @@ # from http import HTTPStatus -from typing import Any, Optional +from typing import Any from unittest.mock import AsyncMock, patch from parameterized import parameterized @@ -210,7 +210,7 @@ class TestBulkPushRuleEvaluator(HomeserverTestCase): def _create_and_process( self, bulk_evaluator: BulkPushRuleEvaluator, - content: Optional[JsonDict] = None, + content: JsonDict | None = None, type: str = "test", ) -> bool: """Returns true iff the `mentions` trigger an event push action.""" diff --git a/tests/push/test_presentable_names.py b/tests/push/test_presentable_names.py index 4982a80cce..2558f2c0b2 100644 --- a/tests/push/test_presentable_names.py +++ b/tests/push/test_presentable_names.py @@ -19,7 +19,7 @@ # # -from typing import Iterable, Optional, cast +from typing import Iterable, cast from synapse.api.constants import EventTypes, Membership from synapse.api.room_versions import RoomVersions @@ -59,7 +59,7 @@ class MockDataStore: async def get_event( self, event_id: str, allow_none: bool = False - ) -> Optional[FrozenEvent]: + ) -> FrozenEvent | None: assert allow_none, "Mock not configured for allow_none = False" # Decode the state key from the event ID. @@ -81,7 +81,7 @@ class PresentableNamesTestCase(unittest.HomeserverTestCase): user_id: str = "", fallback_to_members: bool = True, fallback_to_single_member: bool = True, - ) -> Optional[str]: + ) -> str | None: # Encode the state key into the event ID. room_state_ids = {k[0]: "|".join(k[0]) for k in events} diff --git a/tests/push/test_push_rule_evaluator.py b/tests/push/test_push_rule_evaluator.py index b1f7ba6973..a786d74bf1 100644 --- a/tests/push/test_push_rule_evaluator.py +++ b/tests/push/test_push_rule_evaluator.py @@ -19,7 +19,7 @@ # # -from typing import Any, Optional, Union, cast +from typing import Any, cast from twisted.internet.testing import MemoryReactor @@ -148,7 +148,7 @@ class PushRuleEvaluatorTestCase(unittest.TestCase): self, content: JsonMapping, *, - related_events: Optional[JsonDict] = None, + related_events: JsonDict | None = None, msc4210: bool = False, msc4306: bool = False, ) -> PushRuleEvaluator: @@ -165,7 +165,7 @@ class PushRuleEvaluatorTestCase(unittest.TestCase): ) room_member_count = 0 sender_power_level = 0 - power_levels: dict[str, Union[int, dict[str, int]]] = {} + power_levels: dict[str, int | dict[str, int]] = {} return PushRuleEvaluator( _flatten_dict(event), False, @@ -205,13 +205,13 @@ class PushRuleEvaluatorTestCase(unittest.TestCase): self.assertTrue(evaluator.matches(condition, "@user:test", "foo bar")) def _assert_matches( - self, condition: JsonDict, content: JsonMapping, msg: Optional[str] = None + self, condition: JsonDict, content: JsonMapping, msg: str | None = None ) -> None: evaluator = self._get_evaluator(content) self.assertTrue(evaluator.matches(condition, "@user:test", "display_name"), msg) def _assert_not_matches( - self, condition: JsonDict, content: JsonDict, msg: Optional[str] = None + self, condition: JsonDict, content: JsonDict, msg: str | None = None ) -> None: evaluator = self._get_evaluator(content) self.assertFalse( @@ -588,7 +588,7 @@ class PushRuleEvaluatorTestCase(unittest.TestCase): This tests the behaviour of tweaks_for_actions. """ - actions: list[Union[dict[str, str], str]] = [ + actions: list[dict[str, str] | str] = [ {"set_tweak": "sound", "value": "default"}, {"set_tweak": "highlight"}, "notify", diff --git a/tests/replication/_base.py b/tests/replication/_base.py index 84bdc84ce9..b23696668f 100644 --- a/tests/replication/_base.py +++ b/tests/replication/_base.py @@ -19,7 +19,7 @@ # import logging from collections import defaultdict -from typing import Any, Optional +from typing import Any from twisted.internet.address import IPv4Address from twisted.internet.protocol import Protocol, connectionDone @@ -105,8 +105,8 @@ class BaseStreamTestCase(unittest.HomeserverTestCase): repl_handler, ) - self._client_transport: Optional[FakeTransport] = None - self._server_transport: Optional[FakeTransport] = None + self._client_transport: FakeTransport | None = None + self._server_transport: FakeTransport | None = None def create_resource_dict(self) -> dict[str, Resource]: d = super().create_resource_dict() @@ -325,7 +325,7 @@ class BaseMultiWorkerStreamTestCase(unittest.HomeserverTestCase): return resource def make_worker_hs( - self, worker_app: str, extra_config: Optional[dict] = None, **kwargs: Any + self, worker_app: str, extra_config: dict | None = None, **kwargs: Any ) -> HomeServer: """Make a new worker HS instance, correctly connecting replication stream to the master HS. @@ -534,7 +534,7 @@ class FakeRedisPubSubServer: class FakeRedisPubSubProtocol(Protocol): """A connection from a client talking to the fake Redis server.""" - transport: Optional[FakeTransport] = None + transport: FakeTransport | None = None def __init__(self, server: FakeRedisPubSubServer): self._server = server diff --git a/tests/replication/storage/_base.py b/tests/replication/storage/_base.py index fb99cb2335..7b757e9e9e 100644 --- a/tests/replication/storage/_base.py +++ b/tests/replication/storage/_base.py @@ -19,7 +19,7 @@ # # -from typing import Any, Callable, Iterable, Optional +from typing import Any, Callable, Iterable from unittest.mock import Mock from twisted.internet.testing import MemoryReactor @@ -56,8 +56,8 @@ class BaseWorkerStoreTestCase(BaseStreamTestCase): self, method: str, args: Iterable[Any], - expected_result: Optional[Any] = None, - asserter: Optional[Callable[[Any, Any, Optional[Any]], None]] = None, + expected_result: Any | None = None, + asserter: Callable[[Any, Any, Any | None], None] | None = None, ) -> None: if asserter is None: asserter = self.assertEqual diff --git a/tests/replication/storage/test_events.py b/tests/replication/storage/test_events.py index 1398689c2d..28bfb8b8ea 100644 --- a/tests/replication/storage/test_events.py +++ b/tests/replication/storage/test_events.py @@ -19,7 +19,7 @@ # # import logging -from typing import Any, Iterable, Optional +from typing import Any, Iterable from canonicaljson import encode_canonical_json from parameterized import parameterized @@ -66,7 +66,7 @@ class EventsWorkerStoreTestCase(BaseWorkerStoreTestCase): ) def assertEventsEqual( - self, first: EventBase, second: EventBase, msg: Optional[Any] = None + self, first: EventBase, second: EventBase, msg: Any | None = None ) -> None: self.assertEqual( encode_canonical_json(first.get_pdu_json()), @@ -241,13 +241,13 @@ class EventsWorkerStoreTestCase(BaseWorkerStoreTestCase): sender: str = USER_ID, room_id: str = ROOM_ID, type: str = "m.room.message", - key: Optional[str] = None, - internal: Optional[dict] = None, - depth: Optional[int] = None, - prev_events: Optional[list[tuple[str, dict]]] = None, - auth_events: Optional[list[str]] = None, - prev_state: Optional[list[str]] = None, - redacts: Optional[str] = None, + key: str | None = None, + internal: dict | None = None, + depth: int | None = None, + prev_events: list[tuple[str, dict]] | None = None, + auth_events: list[str] | None = None, + prev_state: list[str] | None = None, + redacts: str | None = None, push_actions: Iterable = frozenset(), **content: object, ) -> tuple[EventBase, EventContext]: diff --git a/tests/replication/tcp/streams/test_events.py b/tests/replication/tcp/streams/test_events.py index 9607c03224..484fd6b6db 100644 --- a/tests/replication/tcp/streams/test_events.py +++ b/tests/replication/tcp/streams/test_events.py @@ -18,7 +18,7 @@ # # -from typing import Any, Optional +from typing import Any from parameterized import parameterized @@ -517,7 +517,7 @@ class EventsStreamTestCase(BaseStreamTestCase): event_count = 0 def _inject_test_event( - self, body: Optional[str] = None, sender: Optional[str] = None, **kwargs: Any + self, body: str | None = None, sender: str | None = None, **kwargs: Any ) -> EventBase: if sender is None: sender = self.user_id @@ -539,9 +539,9 @@ class EventsStreamTestCase(BaseStreamTestCase): def _inject_state_event( self, - body: Optional[str] = None, - state_key: Optional[str] = None, - sender: Optional[str] = None, + body: str | None = None, + state_key: str | None = None, + sender: str | None = None, ) -> EventBase: if sender is None: sender = self.user_id diff --git a/tests/replication/test_multi_media_repo.py b/tests/replication/test_multi_media_repo.py index 193c6c0198..8dbb989850 100644 --- a/tests/replication/test_multi_media_repo.py +++ b/tests/replication/test_multi_media_repo.py @@ -20,7 +20,7 @@ # import logging import os -from typing import Any, Optional +from typing import Any from twisted.internet.protocol import Factory from twisted.internet.testing import MemoryReactor @@ -44,7 +44,7 @@ from tests.unittest import override_config logger = logging.getLogger(__name__) -test_server_connection_factory: Optional[TestServerTLSConnectionFactory] = None +test_server_connection_factory: TestServerTLSConnectionFactory | None = None class MediaRepoShardTestCase(BaseMultiWorkerStreamTestCase): @@ -67,7 +67,7 @@ class MediaRepoShardTestCase(BaseMultiWorkerStreamTestCase): return conf def make_worker_hs( - self, worker_app: str, extra_config: Optional[dict] = None, **kwargs: Any + self, worker_app: str, extra_config: dict | None = None, **kwargs: Any ) -> HomeServer: worker_hs = super().make_worker_hs(worker_app, extra_config, **kwargs) # Force the media paths onto the replication resource. @@ -282,7 +282,7 @@ class AuthenticatedMediaRepoShardTestCase(BaseMultiWorkerStreamTestCase): return conf def make_worker_hs( - self, worker_app: str, extra_config: Optional[dict] = None, **kwargs: Any + self, worker_app: str, extra_config: dict | None = None, **kwargs: Any ) -> HomeServer: worker_hs = super().make_worker_hs(worker_app, extra_config, **kwargs) # Force the media paths onto the replication resource. diff --git a/tests/rest/admin/test_federation.py b/tests/rest/admin/test_federation.py index 5586bb47e1..561566de76 100644 --- a/tests/rest/admin/test_federation.py +++ b/tests/rest/admin/test_federation.py @@ -18,7 +18,6 @@ # [This file includes modifications made by New Vector Limited] # # -from typing import Optional from parameterized import parameterized @@ -273,8 +272,8 @@ class FederationTestCase(unittest.HomeserverTestCase): def _order_test( expected_destination_list: list[str], - order_by: Optional[str], - dir: Optional[str] = None, + order_by: str | None, + dir: str | None = None, ) -> None: """Request the list of destinations in a certain order. Assert that order is what we expect @@ -366,7 +365,7 @@ class FederationTestCase(unittest.HomeserverTestCase): """Test that searching for a destination works correctly""" def _search_test( - expected_destination: Optional[str], + expected_destination: str | None, search_term: str, ) -> None: """Search for a destination and check that the returned destinationis a match @@ -484,10 +483,10 @@ class FederationTestCase(unittest.HomeserverTestCase): def _create_destination( self, destination: str, - failure_ts: Optional[int] = None, + failure_ts: int | None = None, retry_last_ts: int = 0, retry_interval: int = 0, - last_successful_stream_ordering: Optional[int] = None, + last_successful_stream_ordering: int | None = None, ) -> None: """Create one specific destination @@ -819,7 +818,7 @@ class DestinationMembershipTestCase(unittest.HomeserverTestCase): def _create_destination_rooms( self, number_rooms: int, - destination: Optional[str] = None, + destination: str | None = None, ) -> list[str]: """ Create the given number of rooms. The given `destination` homeserver will diff --git a/tests/rest/admin/test_registration_tokens.py b/tests/rest/admin/test_registration_tokens.py index 9afe86b724..447e1098e5 100644 --- a/tests/rest/admin/test_registration_tokens.py +++ b/tests/rest/admin/test_registration_tokens.py @@ -20,7 +20,6 @@ # import random import string -from typing import Optional from twisted.internet.testing import MemoryReactor @@ -51,11 +50,11 @@ class ManageRegistrationTokensTestCase(unittest.HomeserverTestCase): def _new_token( self, - token: Optional[str] = None, - uses_allowed: Optional[int] = None, + token: str | None = None, + uses_allowed: int | None = None, pending: int = 0, completed: int = 0, - expiry_time: Optional[int] = None, + expiry_time: int | None = None, ) -> str: """Helper function to create a token.""" if token is None: diff --git a/tests/rest/admin/test_room.py b/tests/rest/admin/test_room.py index 40b34f4433..7daf13ad22 100644 --- a/tests/rest/admin/test_room.py +++ b/tests/rest/admin/test_room.py @@ -22,7 +22,6 @@ import json import time import urllib.parse from http import HTTPStatus -from typing import Optional from unittest.mock import AsyncMock, Mock from parameterized import parameterized @@ -2074,7 +2073,7 @@ class RoomTestCase(unittest.HomeserverTestCase): self._set_canonical_alias(room_id_1, "#Room_Alias1:test", self.admin_user_tok) def _search_test( - expected_room_id: Optional[str], + expected_room_id: str | None, search_term: str, expected_http_code: int = 200, ) -> None: diff --git a/tests/rest/admin/test_scheduled_tasks.py b/tests/rest/admin/test_scheduled_tasks.py index 264c62e2de..fb275f6d55 100644 --- a/tests/rest/admin/test_scheduled_tasks.py +++ b/tests/rest/admin/test_scheduled_tasks.py @@ -13,7 +13,7 @@ # # # -from typing import Mapping, Optional +from typing import Mapping from twisted.internet.testing import MemoryReactor @@ -42,17 +42,17 @@ class ScheduledTasksAdminApiTestCase(unittest.HomeserverTestCase): # create and schedule a few tasks async def _test_task( task: ScheduledTask, - ) -> tuple[TaskStatus, Optional[JsonMapping], Optional[str]]: + ) -> tuple[TaskStatus, JsonMapping | None, str | None]: return TaskStatus.ACTIVE, None, None async def _finished_test_task( task: ScheduledTask, - ) -> tuple[TaskStatus, Optional[JsonMapping], Optional[str]]: + ) -> tuple[TaskStatus, JsonMapping | None, str | None]: return TaskStatus.COMPLETE, None, None async def _failed_test_task( task: ScheduledTask, - ) -> tuple[TaskStatus, Optional[JsonMapping], Optional[str]]: + ) -> tuple[TaskStatus, JsonMapping | None, str | None]: return TaskStatus.FAILED, None, "Everything failed" self._task_scheduler.register_action(_test_task, "test_task") diff --git a/tests/rest/admin/test_statistics.py b/tests/rest/admin/test_statistics.py index a18952983e..3dc7e5dc97 100644 --- a/tests/rest/admin/test_statistics.py +++ b/tests/rest/admin/test_statistics.py @@ -19,7 +19,6 @@ # [This file includes modifications made by New Vector Limited] # # -from typing import Optional from twisted.internet.testing import MemoryReactor from twisted.web.resource import Resource @@ -497,7 +496,7 @@ class UserMediaStatisticsTestCase(unittest.HomeserverTestCase): self.assertIn("media_length", c) def _order_test( - self, order_type: str, expected_user_list: list[str], dir: Optional[str] = None + self, order_type: str, expected_user_list: list[str], dir: str | None = None ) -> None: """Request the list of users in a certain order. Assert that order is what we expect diff --git a/tests/rest/admin/test_user.py b/tests/rest/admin/test_user.py index 040b21d471..6d0584fa63 100644 --- a/tests/rest/admin/test_user.py +++ b/tests/rest/admin/test_user.py @@ -27,7 +27,6 @@ import time import urllib.parse from binascii import unhexlify from http import HTTPStatus -from typing import Optional from unittest.mock import AsyncMock, Mock, patch from parameterized import parameterized, parameterized_class @@ -643,10 +642,10 @@ class UsersListTestCase(unittest.HomeserverTestCase): """Test that searching for a users works correctly""" def _search_test( - expected_user_id: Optional[str], + expected_user_id: str | None, search_term: str, - search_field: Optional[str] = "name", - expected_http_code: Optional[int] = 200, + search_field: str | None = "name", + expected_http_code: int | None = 200, ) -> None: """Search for a user and check that the returned user's id is a match @@ -1185,7 +1184,7 @@ class UsersListTestCase(unittest.HomeserverTestCase): ) def test_user_type( - expected_user_ids: list[str], not_user_types: Optional[list[str]] = None + expected_user_ids: list[str], not_user_types: list[str] | None = None ) -> None: """Runs a test for the not_user_types param Args: @@ -1262,7 +1261,7 @@ class UsersListTestCase(unittest.HomeserverTestCase): ) def test_user_type( - expected_user_ids: list[str], not_user_types: Optional[list[str]] = None + expected_user_ids: list[str], not_user_types: list[str] | None = None ) -> None: """Runs a test for the not_user_types param Args: @@ -1374,8 +1373,8 @@ class UsersListTestCase(unittest.HomeserverTestCase): def _order_test( self, expected_user_list: list[str], - order_by: Optional[str], - dir: Optional[str] = None, + order_by: str | None, + dir: str | None = None, ) -> None: """Request the list of users in a certain order. Assert that order is what we expect @@ -3116,7 +3115,7 @@ class UserRestTestCase(unittest.HomeserverTestCase): self.assertEqual("@user:test", channel.json_body["name"]) self.assertTrue(channel.json_body["admin"]) - def set_user_type(self, user_type: Optional[str]) -> None: + def set_user_type(self, user_type: str | None) -> None: # Set to user_type channel = self.make_request( "PUT", @@ -4213,8 +4212,8 @@ class UserMediaRestTestCase(unittest.HomeserverTestCase): def _order_test( self, expected_media_list: list[str], - order_by: Optional[str], - dir: Optional[str] = None, + order_by: str | None, + dir: str | None = None, ) -> None: """Request the list of media in a certain order. Assert that order is what we expect diff --git a/tests/rest/admin/test_username_available.py b/tests/rest/admin/test_username_available.py index b2c1d7ac0a..c3091ce412 100644 --- a/tests/rest/admin/test_username_available.py +++ b/tests/rest/admin/test_username_available.py @@ -18,7 +18,6 @@ # [This file includes modifications made by New Vector Limited] # # -from typing import Optional from twisted.internet.testing import MemoryReactor @@ -44,8 +43,8 @@ class UsernameAvailableTestCase(unittest.HomeserverTestCase): async def check_username( localpart: str, - guest_access_token: Optional[str] = None, - assigned_user_id: Optional[str] = None, + guest_access_token: str | None = None, + assigned_user_id: str | None = None, inhibit_user_in_use_error: bool = False, ) -> None: if localpart == "allowed": diff --git a/tests/rest/client/sliding_sync/test_extension_thread_subscriptions.py b/tests/rest/client/sliding_sync/test_extension_thread_subscriptions.py index de76334f64..aa251bd78b 100644 --- a/tests/rest/client/sliding_sync/test_extension_thread_subscriptions.py +++ b/tests/rest/client/sliding_sync/test_extension_thread_subscriptions.py @@ -13,7 +13,7 @@ # import logging from http import HTTPStatus -from typing import Optional, cast +from typing import cast from twisted.test.proto_helpers import MemoryReactor @@ -455,7 +455,7 @@ class SlidingSyncThreadSubscriptionsExtensionTestCase(SlidingSyncBase): def _do_backpaginate( self, *, from_tok: str, to_tok: str, limit: int, access_token: str - ) -> tuple[JsonDict, Optional[str]]: + ) -> tuple[JsonDict, str | None]: channel = self.make_request( "GET", "/_matrix/client/unstable/io.element.msc4308/thread_subscriptions" @@ -465,7 +465,7 @@ class SlidingSyncThreadSubscriptionsExtensionTestCase(SlidingSyncBase): self.assertEqual(channel.code, HTTPStatus.OK, channel.json_body) body = channel.json_body - return body, cast(Optional[str], body.get("end")) + return body, cast(str | None, body.get("end")) def _subscribe_to_thread( self, user_id: str, room_id: str, thread_root_id: str diff --git a/tests/rest/client/sliding_sync/test_rooms_timeline.py b/tests/rest/client/sliding_sync/test_rooms_timeline.py index 04a9cd5382..bc23776326 100644 --- a/tests/rest/client/sliding_sync/test_rooms_timeline.py +++ b/tests/rest/client/sliding_sync/test_rooms_timeline.py @@ -12,7 +12,6 @@ # . # import logging -from typing import Optional from parameterized import parameterized_class @@ -66,7 +65,7 @@ class SlidingSyncRoomsTimelineTestCase(SlidingSyncBase): self, actual_items: StrSequence, expected_items: StrSequence, - message: Optional[str] = None, + message: str | None = None, ) -> None: """ Like `self.assertListEqual(...)` but with an actually understandable diff message. @@ -103,7 +102,7 @@ class SlidingSyncRoomsTimelineTestCase(SlidingSyncBase): room_id: str, actual_event_ids: list[str], expected_event_ids: list[str], - message: Optional[str] = None, + message: str | None = None, ) -> None: """ Like `self.assertListEqual(...)` for event IDs in a room but will give a nicer diff --git a/tests/rest/client/sliding_sync/test_sliding_sync.py b/tests/rest/client/sliding_sync/test_sliding_sync.py index 9f4c6bad05..c27a712088 100644 --- a/tests/rest/client/sliding_sync/test_sliding_sync.py +++ b/tests/rest/client/sliding_sync/test_sliding_sync.py @@ -12,7 +12,7 @@ # . # import logging -from typing import Any, Iterable, Literal, Optional +from typing import Any, Iterable, Literal from unittest.mock import AsyncMock from parameterized import parameterized, parameterized_class @@ -81,7 +81,7 @@ class SlidingSyncBase(unittest.HomeserverTestCase): return config def do_sync( - self, sync_body: JsonDict, *, since: Optional[str] = None, tok: str + self, sync_body: JsonDict, *, since: str | None = None, tok: str ) -> tuple[JsonDict, str]: """Do a sliding sync request with given body. @@ -239,8 +239,8 @@ class SlidingSyncBase(unittest.HomeserverTestCase): def _create_remote_invite_room_for_user( self, invitee_user_id: str, - unsigned_invite_room_state: Optional[list[StrippedStateEvent]], - invite_room_id: Optional[str] = None, + unsigned_invite_room_state: list[StrippedStateEvent] | None, + invite_room_id: str | None = None, ) -> str: """ Create a fake invite for a remote room and persist it. diff --git a/tests/rest/client/test_account.py b/tests/rest/client/test_account.py index 03474d7400..ffa96c7840 100644 --- a/tests/rest/client/test_account.py +++ b/tests/rest/client/test_account.py @@ -23,7 +23,7 @@ import os import re from email.parser import Parser from http import HTTPStatus -from typing import Any, Optional, Union +from typing import Any from unittest.mock import Mock from twisted.internet.interfaces import IReactorTCP @@ -363,7 +363,7 @@ class PasswordResetTestCase(unittest.HomeserverTestCase): email: str, client_secret: str, ip: str = "127.0.0.1", - next_link: Optional[str] = None, + next_link: str | None = None, ) -> str: body = {"client_secret": client_secret, "email": email, "send_attempt": 1} if next_link is not None: @@ -384,7 +384,7 @@ class PasswordResetTestCase(unittest.HomeserverTestCase): return channel.json_body["sid"] - def _validate_token(self, link: str, next_link: Optional[str] = None) -> None: + def _validate_token(self, link: str, next_link: str | None = None) -> None: # Remove the host path = link.replace("https://example.com", "") @@ -1152,9 +1152,9 @@ class ThreepidEmailRestTestCase(unittest.HomeserverTestCase): self, email: str, client_secret: str, - next_link: Optional[str] = None, + next_link: str | None = None, expect_code: int = HTTPStatus.OK, - ) -> Optional[str]: + ) -> str | None: """Request a validation token to add an email address to a user's account Args: @@ -1394,10 +1394,10 @@ class AccountStatusTestCase(unittest.HomeserverTestCase): async def post_json( destination: str, path: str, - data: Optional[JsonDict] = None, + data: JsonDict | None = None, *a: Any, **kwa: Any, - ) -> Union[JsonDict, list]: + ) -> JsonDict | list: if destination == "remote": return { "account_statuses": { @@ -1503,11 +1503,11 @@ class AccountStatusTestCase(unittest.HomeserverTestCase): def _test_status( self, - users: Optional[list[str]], + users: list[str] | None, expected_status_code: int = HTTPStatus.OK, - expected_statuses: Optional[dict[str, dict[str, bool]]] = None, - expected_failures: Optional[list[str]] = None, - expected_errcode: Optional[str] = None, + expected_statuses: dict[str, dict[str, bool]] | None = None, + expected_failures: list[str] | None = None, + expected_errcode: str | None = None, ) -> None: """Send a request to the account status endpoint and check that the response matches with what's expected. diff --git a/tests/rest/client/test_auth.py b/tests/rest/client/test_auth.py index 5955d4b7a2..ffaf0e5a32 100644 --- a/tests/rest/client/test_auth.py +++ b/tests/rest/client/test_auth.py @@ -20,7 +20,7 @@ # import re from http import HTTPStatus -from typing import Any, Optional, Union +from typing import Any from twisted.internet.defer import succeed from twisted.internet.testing import MemoryReactor @@ -90,7 +90,7 @@ class FallbackAuthTests(unittest.HomeserverTestCase): self, session: str, expected_post_response: int, - post_session: Optional[str] = None, + post_session: str | None = None, ) -> None: """Get and respond to a fallback recaptcha. Returns the second request.""" if post_session is None: @@ -220,7 +220,7 @@ class UIAuthTests(unittest.HomeserverTestCase): access_token: str, device: str, expected_response: int, - body: Union[bytes, JsonDict] = b"", + body: bytes | JsonDict = b"", ) -> FakeChannel: """Delete an individual device.""" channel = self.make_request( diff --git a/tests/rest/client/test_login.py b/tests/rest/client/test_login.py index 1ebd59b42a..d599351df7 100644 --- a/tests/rest/client/test_login.py +++ b/tests/rest/client/test_login.py @@ -26,8 +26,6 @@ from typing import ( Callable, Collection, Literal, - Optional, - Union, ) from unittest.mock import Mock from urllib.parse import urlencode @@ -141,14 +139,11 @@ class TestSpamChecker: async def check_login_for_spam( self, user_id: str, - device_id: Optional[str], - initial_display_name: Optional[str], - request_info: Collection[tuple[Optional[str], str]], - auth_provider_id: Optional[str] = None, - ) -> Union[ - Literal["NOT_SPAM"], - tuple["synapse.module_api.errors.Codes", JsonDict], - ]: + device_id: str | None, + initial_display_name: str | None, + request_info: Collection[tuple[str | None, str]], + auth_provider_id: str | None = None, + ) -> Literal["NOT_SPAM"] | tuple["synapse.module_api.errors.Codes", JsonDict]: return "NOT_SPAM" @@ -165,14 +160,11 @@ class DenyAllSpamChecker: async def check_login_for_spam( self, user_id: str, - device_id: Optional[str], - initial_display_name: Optional[str], - request_info: Collection[tuple[Optional[str], str]], - auth_provider_id: Optional[str] = None, - ) -> Union[ - Literal["NOT_SPAM"], - tuple["synapse.module_api.errors.Codes", JsonDict], - ]: + device_id: str | None, + initial_display_name: str | None, + request_info: Collection[tuple[str | None, str]], + auth_provider_id: str | None = None, + ) -> Literal["NOT_SPAM"] | tuple["synapse.module_api.errors.Codes", JsonDict]: # Return an odd set of values to ensure that they get correctly passed # to the client. return Codes.LIMIT_EXCEEDED, {"extra": "value"} @@ -984,7 +976,7 @@ class MultiSSOTestCase(unittest.HomeserverTestCase): # it should redirect us to the auth page of the OIDC server self.assertEqual(oidc_uri_path, fake_oidc_server.authorization_endpoint) - def _make_sso_redirect_request(self, idp_prov: Optional[str] = None) -> FakeChannel: + def _make_sso_redirect_request(self, idp_prov: str | None = None) -> FakeChannel: """Send a request to /_matrix/client/r0/login/sso/redirect ... possibly specifying an IDP provider @@ -1888,8 +1880,8 @@ class UsernamePickerTestCase(HomeserverTestCase): async def mock_get_file( url: str, output_stream: BinaryIO, - max_size: Optional[int] = None, - headers: Optional[RawHeaders] = None, - is_allowed_content_type: Optional[Callable[[str], bool]] = None, + max_size: int | None = None, + headers: RawHeaders | None = None, + is_allowed_content_type: Callable[[str], bool] | None = None, ) -> tuple[int, dict[bytes, list[bytes]], str, int]: return 0, {b"Content-Type": [b"image/png"]}, "", 200 diff --git a/tests/rest/client/test_media.py b/tests/rest/client/test_media.py index 79f70db8a3..33172f930e 100644 --- a/tests/rest/client/test_media.py +++ b/tests/rest/client/test_media.py @@ -24,7 +24,7 @@ import json import os import re import shutil -from typing import Any, BinaryIO, ClassVar, Optional, Sequence +from typing import Any, BinaryIO, ClassVar, Sequence from unittest.mock import MagicMock, Mock, patch from urllib import parse from urllib.parse import quote, urlencode @@ -273,7 +273,7 @@ class URLPreviewTests(unittest.HomeserverTestCase): resolutionReceiver: IResolutionReceiver, hostName: str, portNumber: int = 0, - addressTypes: Optional[Sequence[type[IAddress]]] = None, + addressTypes: Sequence[type[IAddress]] | None = None, transportSemantics: str = "TCP", ) -> IResolutionReceiver: resolution = HostResolution(hostName) @@ -1661,7 +1661,7 @@ class MediaConfigModuleCallbackTestCase(unittest.HomeserverTestCase): async def get_media_config_for_user( self, user_id: str, - ) -> Optional[JsonDict]: + ) -> JsonDict | None: # We echo back the user_id and set a custom upload size. return {"m.upload.size": 1024, "user_id": user_id} @@ -1999,7 +1999,7 @@ class DownloadAndThumbnailTestCase(unittest.HomeserverTestCase): "Deferred[Any]", str, str, - Optional[QueryParams], + QueryParams | None, ] ] = [] @@ -2010,7 +2010,7 @@ class DownloadAndThumbnailTestCase(unittest.HomeserverTestCase): download_ratelimiter: Ratelimiter, ip_address: Any, max_size: int, - args: Optional[QueryParams] = None, + args: QueryParams | None = None, retry_on_dns_fail: bool = True, ignore_backoff: bool = False, follow_redirects: bool = False, @@ -2044,7 +2044,7 @@ class DownloadAndThumbnailTestCase(unittest.HomeserverTestCase): download_ratelimiter: Ratelimiter, ip_address: Any, max_size: int, - args: Optional[QueryParams] = None, + args: QueryParams | None = None, retry_on_dns_fail: bool = True, ignore_backoff: bool = False, follow_redirects: bool = False, @@ -2107,7 +2107,7 @@ class DownloadAndThumbnailTestCase(unittest.HomeserverTestCase): self.tok = self.login("user", "pass") def _req( - self, content_disposition: Optional[bytes], include_content_type: bool = True + self, content_disposition: bytes | None, include_content_type: bool = True ) -> FakeChannel: channel = self.make_request( "GET", @@ -2418,7 +2418,7 @@ class DownloadAndThumbnailTestCase(unittest.HomeserverTestCase): def _test_thumbnail( self, method: str, - expected_body: Optional[bytes], + expected_body: bytes | None, expected_found: bool, unable_to_thumbnail: bool = False, ) -> None: @@ -3012,7 +3012,7 @@ class MediaUploadLimitsModuleOverrides(unittest.HomeserverTestCase): async def _get_media_upload_limits_for_user( self, user_id: str, - ) -> Optional[list[MediaUploadLimit]]: + ) -> list[MediaUploadLimit] | None: # user1 has custom limits if user_id == self.user1: # n.b. we return these in increasing duration order and Synapse will need to sort them correctly @@ -3037,7 +3037,7 @@ class MediaUploadLimitsModuleOverrides(unittest.HomeserverTestCase): sent_bytes: int, attempted_bytes: int, ) -> None: - self.last_media_upload_limit_exceeded: Optional[dict[str, object]] = { + self.last_media_upload_limit_exceeded: dict[str, object] | None = { "user_id": user_id, "limit": limit, "sent_bytes": sent_bytes, diff --git a/tests/rest/client/test_notifications.py b/tests/rest/client/test_notifications.py index 7e2a63955c..17f8da3e8b 100644 --- a/tests/rest/client/test_notifications.py +++ b/tests/rest/client/test_notifications.py @@ -18,7 +18,6 @@ # [This file includes modifications made by New Vector Limited] # # -from typing import Optional from unittest.mock import AsyncMock, Mock from twisted.internet.testing import MemoryReactor @@ -155,7 +154,7 @@ class HTTPPusherTests(HomeserverTestCase): self.assertEqual(notification_event_ids, sent_event_ids[2:]) def _request_notifications( - self, from_token: Optional[str], limit: int, expected_count: int + self, from_token: str | None, limit: int, expected_count: int ) -> tuple[list[str], str]: """ Make a request to /notifications to get the latest events to be notified about. diff --git a/tests/rest/client/test_profile.py b/tests/rest/client/test_profile.py index aa9b72c65e..023a376ed1 100644 --- a/tests/rest/client/test_profile.py +++ b/tests/rest/client/test_profile.py @@ -24,7 +24,7 @@ import logging import urllib.parse from http import HTTPStatus -from typing import Any, Optional +from typing import Any from canonicaljson import encode_canonical_json @@ -177,7 +177,7 @@ class ProfileTestCase(unittest.HomeserverTestCase): ) self.assertEqual(channel.code, 400, channel.result) - def _get_displayname(self, name: Optional[str] = None) -> Optional[str]: + def _get_displayname(self, name: str | None = None) -> str | None: channel = self.make_request( "GET", "/profile/%s/displayname" % (name or self.owner,) ) @@ -187,7 +187,7 @@ class ProfileTestCase(unittest.HomeserverTestCase): # https://github.com/matrix-org/synapse/issues/13137. return channel.json_body.get("displayname") - def _get_avatar_url(self, name: Optional[str] = None) -> Optional[str]: + def _get_avatar_url(self, name: str | None = None) -> str | None: channel = self.make_request( "GET", "/profile/%s/avatar_url" % (name or self.owner,) ) @@ -846,7 +846,7 @@ class ProfilesRestrictedTestCase(unittest.HomeserverTestCase): self.try_fetch_profile(200, self.requester_tok) def try_fetch_profile( - self, expected_code: int, access_token: Optional[str] = None + self, expected_code: int, access_token: str | None = None ) -> None: self.request_profile(expected_code, access_token=access_token) @@ -862,7 +862,7 @@ class ProfilesRestrictedTestCase(unittest.HomeserverTestCase): self, expected_code: int, url_suffix: str = "", - access_token: Optional[str] = None, + access_token: str | None = None, ) -> None: channel = self.make_request( "GET", self.profile_url + url_suffix, access_token=access_token diff --git a/tests/rest/client/test_receipts.py b/tests/rest/client/test_receipts.py index 0c1b631b8e..3a6a869c54 100644 --- a/tests/rest/client/test_receipts.py +++ b/tests/rest/client/test_receipts.py @@ -19,7 +19,6 @@ # # from http import HTTPStatus -from typing import Optional from twisted.internet.testing import MemoryReactor @@ -259,7 +258,7 @@ class ReceiptsTestCase(unittest.HomeserverTestCase): self.assertEqual(channel.code, HTTPStatus.BAD_REQUEST) self.assertEqual(channel.json_body["errcode"], "M_NOT_JSON", channel.json_body) - def _get_read_receipt(self) -> Optional[JsonDict]: + def _get_read_receipt(self) -> JsonDict | None: """Syncs and returns the read receipt.""" # Checks if event is a read receipt diff --git a/tests/rest/client/test_redactions.py b/tests/rest/client/test_redactions.py index 88be8748ee..997ca5f9ca 100644 --- a/tests/rest/client/test_redactions.py +++ b/tests/rest/client/test_redactions.py @@ -18,7 +18,6 @@ # [This file includes modifications made by New Vector Limited] # # -from typing import Optional from parameterized import parameterized @@ -85,8 +84,8 @@ class RedactionsTestCase(HomeserverTestCase): room_id: str, event_id: str, expect_code: int = 200, - with_relations: Optional[list[str]] = None, - content: Optional[JsonDict] = None, + with_relations: list[str] | None = None, + content: JsonDict | None = None, ) -> JsonDict: """Helper function to send a redaction event. diff --git a/tests/rest/client/test_relations.py b/tests/rest/client/test_relations.py index 3912a3c772..2d8ba77a77 100644 --- a/tests/rest/client/test_relations.py +++ b/tests/rest/client/test_relations.py @@ -20,7 +20,7 @@ # import urllib.parse -from typing import Any, Callable, Optional +from typing import Any, Callable from unittest.mock import AsyncMock, patch from twisted.internet.testing import MemoryReactor @@ -79,10 +79,10 @@ class BaseRelationsTestCase(unittest.HomeserverTestCase): self, relation_type: str, event_type: str, - key: Optional[str] = None, - content: Optional[dict] = None, - access_token: Optional[str] = None, - parent_id: Optional[str] = None, + key: str | None = None, + content: dict | None = None, + access_token: str | None = None, + parent_id: str | None = None, expected_response_code: int = 200, ) -> FakeChannel: """Helper function to send a relation pointing at `self.parent_id` @@ -845,7 +845,7 @@ class RelationPaginationTestCase(BaseRelationsTestCase): ) expected_event_ids.append(channel.json_body["event_id"]) - prev_token: Optional[str] = "" + prev_token: str | None = "" found_event_ids: list[str] = [] for _ in range(20): from_token = "" @@ -1085,7 +1085,7 @@ class BundledAggregationsTestCase(BaseRelationsTestCase): relation_type: str, assertion_callable: Callable[[JsonDict], None], expected_db_txn_for_event: int, - access_token: Optional[str] = None, + access_token: str | None = None, ) -> None: """ Makes requests to various endpoints which should include bundled aggregations diff --git a/tests/rest/client/test_reporting.py b/tests/rest/client/test_reporting.py index 0fd02f65a6..96697b96d5 100644 --- a/tests/rest/client/test_reporting.py +++ b/tests/rest/client/test_reporting.py @@ -18,7 +18,6 @@ # [This file includes modifications made by New Vector Limited] # # -from typing import Optional from twisted.internet.testing import MemoryReactor @@ -311,7 +310,7 @@ class ReportUserTestCase(unittest.HomeserverTestCase): self.assertEqual(len(rows), 0) def _assert_status( - self, response_status: int, data: JsonDict, user_id: Optional[str] = None + self, response_status: int, data: JsonDict, user_id: str | None = None ) -> None: if user_id is None: user_id = self.target_user_id diff --git a/tests/rest/client/test_rooms.py b/tests/rest/client/test_rooms.py index 4142aed363..68e09afc54 100644 --- a/tests/rest/client/test_rooms.py +++ b/tests/rest/client/test_rooms.py @@ -25,7 +25,7 @@ import json from http import HTTPStatus -from typing import Any, Iterable, Literal, Optional, Union +from typing import Any, Iterable, Literal from unittest.mock import AsyncMock, Mock, call, patch from urllib import parse as urlparse @@ -74,7 +74,7 @@ PATH_PREFIX = b"/_matrix/client/api/v1" class RoomBase(unittest.HomeserverTestCase): - rmcreator_id: Optional[str] = None + rmcreator_id: str | None = None servlets = [room.register_servlets, room.register_deprecated_servlets] @@ -959,7 +959,7 @@ class RoomsCreateTestCase(RoomBase): """Tests that the user_may_join_room spam checker callback is correctly bypassed when creating a new room. - In this test, we use the more recent API in which callbacks return a `Union[Codes, Literal["NOT_SPAM"]]`. + In this test, we use the more recent API in which callbacks return a `Codes | Literal["NOT_SPAM"]`. """ async def user_may_join_room_codes( @@ -1351,7 +1351,7 @@ class RoomJoinTestCase(RoomBase): """ # Register a dummy callback. Make it allow all room joins for now. - return_value: Union[Literal["NOT_SPAM"], tuple[Codes, dict], Codes] = ( + return_value: Literal["NOT_SPAM"] | tuple[Codes, dict] | Codes = ( synapse.module_api.NOT_SPAM ) @@ -1359,7 +1359,7 @@ class RoomJoinTestCase(RoomBase): userid: str, room_id: str, is_invited: bool, - ) -> Union[Literal["NOT_SPAM"], tuple[Codes, dict], Codes]: + ) -> Literal["NOT_SPAM"] | tuple[Codes, dict] | Codes: return return_value # `spec` argument is needed for this function mock to have `__qualname__`, which @@ -1848,20 +1848,20 @@ class RoomMessagesTestCase(RoomBase): def test_spam_checker_check_event_for_spam( self, name: str, - value: Union[str, bool, Codes, tuple[Codes, JsonDict]], + value: str | bool | Codes | tuple[Codes, JsonDict], expected_code: int, expected_fields: dict, ) -> None: class SpamCheck: - mock_return_value: Union[str, bool, Codes, tuple[Codes, JsonDict], bool] = ( + mock_return_value: str | bool | Codes | tuple[Codes, JsonDict] | bool = ( "NOT_SPAM" ) - mock_content: Optional[JsonDict] = None + mock_content: JsonDict | None = None async def check_event_for_spam( self, event: synapse.events.EventBase, - ) -> Union[str, Codes, tuple[Codes, JsonDict], bool]: + ) -> str | Codes | tuple[Codes, JsonDict] | bool: self.mock_content = event.content return self.mock_return_value @@ -2707,8 +2707,8 @@ class PublicRoomsRoomTypeFilterTestCase(unittest.HomeserverTestCase): def make_public_rooms_request( self, - room_types: Optional[list[Union[str, None]]], - instance_id: Optional[str] = None, + room_types: list[str | None] | None, + instance_id: str | None = None, ) -> tuple[list[dict[str, Any]], int]: body: JsonDict = {"filter": {PublicRoomsFilterFields.ROOM_TYPES: room_types}} if instance_id: @@ -3968,7 +3968,7 @@ class ThreepidInviteTestCase(unittest.HomeserverTestCase): """ Test allowing/blocking threepid invites with a spam-check module. - In this test, we use the more recent API in which callbacks return a `Union[Codes, Literal["NOT_SPAM"]]`. + In this test, we use the more recent API in which callbacks return a `Codes | Literal["NOT_SPAM"]`. """ # Mock a few functions to prevent the test from failing due to failing to talk to # a remote IS. We keep the mock for make_and_store_3pid_invite around so we @@ -4532,7 +4532,7 @@ class MSC4293RedactOnBanKickTestCase(unittest.FederatingHomeserverTestCase): original_events: list[EventBase], pulled_events: list[JsonDict], expect_redaction: bool, - reason: Optional[str] = None, + reason: str | None = None, ) -> None: """ Checks a set of original events against a second set of the same events, pulled diff --git a/tests/rest/client/test_third_party_rules.py b/tests/rest/client/test_third_party_rules.py index 78fa8f4e1c..0d319dff7e 100644 --- a/tests/rest/client/test_third_party_rules.py +++ b/tests/rest/client/test_third_party_rules.py @@ -19,7 +19,7 @@ # # import threading -from typing import TYPE_CHECKING, Any, Optional, Union +from typing import TYPE_CHECKING, Any from unittest.mock import AsyncMock, Mock from twisted.internet.testing import MemoryReactor @@ -61,7 +61,7 @@ class LegacyThirdPartyRulesTestModule: async def check_event_allowed( self, event: EventBase, state: StateMap[EventBase] - ) -> Union[bool, dict]: + ) -> bool | dict: return True @staticmethod @@ -150,7 +150,7 @@ class ThirdPartyRulesTestCase(unittest.FederatingHomeserverTestCase): # types async def check( ev: EventBase, state: StateMap[EventBase] - ) -> tuple[bool, Optional[JsonDict]]: + ) -> tuple[bool, JsonDict | None]: return ev.type != "foo.bar.forbidden", None callback = Mock(spec=[], side_effect=check) @@ -195,7 +195,7 @@ class ThirdPartyRulesTestCase(unittest.FederatingHomeserverTestCase): """ class NastyHackException(SynapseError): - def error_dict(self, config: Optional[HomeServerConfig]) -> JsonDict: + def error_dict(self, config: HomeServerConfig | None) -> JsonDict: """ This overrides SynapseError's `error_dict` to nastily inject JSON into the error response. @@ -207,7 +207,7 @@ class ThirdPartyRulesTestCase(unittest.FederatingHomeserverTestCase): # add a callback that will raise our hacky exception async def check( ev: EventBase, state: StateMap[EventBase] - ) -> tuple[bool, Optional[JsonDict]]: + ) -> tuple[bool, JsonDict | None]: raise NastyHackException(429, "message") self.hs.get_module_api_callbacks().third_party_event_rules._check_event_allowed_callbacks = [ @@ -235,7 +235,7 @@ class ThirdPartyRulesTestCase(unittest.FederatingHomeserverTestCase): # first patch the event checker so that it will try to modify the event async def check( ev: EventBase, state: StateMap[EventBase] - ) -> tuple[bool, Optional[JsonDict]]: + ) -> tuple[bool, JsonDict | None]: ev.content = {"x": "y"} return True, None @@ -260,7 +260,7 @@ class ThirdPartyRulesTestCase(unittest.FederatingHomeserverTestCase): # first patch the event checker so that it will modify the event async def check( ev: EventBase, state: StateMap[EventBase] - ) -> tuple[bool, Optional[JsonDict]]: + ) -> tuple[bool, JsonDict | None]: d = ev.get_dict() d["content"] = {"x": "y"} return True, d @@ -295,7 +295,7 @@ class ThirdPartyRulesTestCase(unittest.FederatingHomeserverTestCase): # first patch the event checker so that it will modify the event async def check( ev: EventBase, state: StateMap[EventBase] - ) -> tuple[bool, Optional[JsonDict]]: + ) -> tuple[bool, JsonDict | None]: d = ev.get_dict() d["content"] = { "msgtype": "m.text", @@ -443,7 +443,7 @@ class ThirdPartyRulesTestCase(unittest.FederatingHomeserverTestCase): # Define a callback that sends a custom event on power levels update. async def test_fn( event: EventBase, state_events: StateMap[EventBase] - ) -> tuple[bool, Optional[JsonDict]]: + ) -> tuple[bool, JsonDict | None]: if event.is_state() and event.type == EventTypes.PowerLevels: await api.create_and_send_event_into_room( { diff --git a/tests/rest/client/test_upgrade_room.py b/tests/rest/client/test_upgrade_room.py index da114e505d..ee26492909 100644 --- a/tests/rest/client/test_upgrade_room.py +++ b/tests/rest/client/test_upgrade_room.py @@ -18,7 +18,6 @@ # [This file includes modifications made by New Vector Limited] # # -from typing import Optional from unittest.mock import patch from twisted.internet.testing import MemoryReactor @@ -56,8 +55,8 @@ class UpgradeRoomTest(unittest.HomeserverTestCase): def _upgrade_room( self, - token: Optional[str] = None, - room_id: Optional[str] = None, + token: str | None = None, + room_id: str | None = None, expire_cache: bool = True, ) -> FakeChannel: if expire_cache: diff --git a/tests/rest/client/utils.py b/tests/rest/client/utils.py index d5c824b291..613c317b8a 100644 --- a/tests/rest/client/utils.py +++ b/tests/rest/client/utils.py @@ -34,7 +34,6 @@ from typing import ( Literal, Mapping, MutableMapping, - Optional, Sequence, overload, ) @@ -75,42 +74,42 @@ class RestHelper: hs: HomeServer reactor: MemoryReactorClock site: Site - auth_user_id: Optional[str] + auth_user_id: str | None @overload def create_room_as( self, - room_creator: Optional[str] = ..., - is_public: Optional[bool] = ..., - room_version: Optional[str] = ..., - tok: Optional[str] = ..., + room_creator: str | None = ..., + is_public: bool | None = ..., + room_version: str | None = ..., + tok: str | None = ..., expect_code: Literal[200] = ..., - extra_content: Optional[dict] = ..., - custom_headers: Optional[Iterable[tuple[AnyStr, AnyStr]]] = ..., + extra_content: dict | None = ..., + custom_headers: Iterable[tuple[AnyStr, AnyStr]] | None = ..., ) -> str: ... @overload def create_room_as( self, - room_creator: Optional[str] = ..., - is_public: Optional[bool] = ..., - room_version: Optional[str] = ..., - tok: Optional[str] = ..., + room_creator: str | None = ..., + is_public: bool | None = ..., + room_version: str | None = ..., + tok: str | None = ..., expect_code: int = ..., - extra_content: Optional[dict] = ..., - custom_headers: Optional[Iterable[tuple[AnyStr, AnyStr]]] = ..., - ) -> Optional[str]: ... + extra_content: dict | None = ..., + custom_headers: Iterable[tuple[AnyStr, AnyStr]] | None = ..., + ) -> str | None: ... def create_room_as( self, - room_creator: Optional[str] = None, - is_public: Optional[bool] = True, - room_version: Optional[str] = None, - tok: Optional[str] = None, + room_creator: str | None = None, + is_public: bool | None = True, + room_version: str | None = None, + tok: str | None = None, expect_code: int = HTTPStatus.OK, - extra_content: Optional[dict] = None, - custom_headers: Optional[Iterable[tuple[AnyStr, AnyStr]]] = None, - ) -> Optional[str]: + extra_content: dict | None = None, + custom_headers: Iterable[tuple[AnyStr, AnyStr]] | None = None, + ) -> str | None: """ Create a room. @@ -166,11 +165,11 @@ class RestHelper: def invite( self, room: str, - src: Optional[str] = None, - targ: Optional[str] = None, + src: str | None = None, + targ: str | None = None, expect_code: int = HTTPStatus.OK, - tok: Optional[str] = None, - extra_data: Optional[dict] = None, + tok: str | None = None, + extra_data: dict | None = None, ) -> JsonDict: return self.change_membership( room=room, @@ -187,10 +186,10 @@ class RestHelper: room: str, user: str, expect_code: int = HTTPStatus.OK, - tok: Optional[str] = None, - appservice_user_id: Optional[str] = None, - expect_errcode: Optional[Codes] = None, - expect_additional_fields: Optional[dict] = None, + tok: str | None = None, + appservice_user_id: str | None = None, + expect_errcode: Codes | None = None, + expect_additional_fields: dict | None = None, ) -> JsonDict: return self.change_membership( room=room, @@ -206,11 +205,11 @@ class RestHelper: def knock( self, - room: Optional[str] = None, - user: Optional[str] = None, - reason: Optional[str] = None, + room: str | None = None, + user: str | None = None, + reason: str | None = None, expect_code: int = HTTPStatus.OK, - tok: Optional[str] = None, + tok: str | None = None, ) -> None: temp_id = self.auth_user_id self.auth_user_id = user @@ -241,9 +240,9 @@ class RestHelper: def leave( self, room: str, - user: Optional[str] = None, + user: str | None = None, expect_code: int = HTTPStatus.OK, - tok: Optional[str] = None, + tok: str | None = None, ) -> JsonDict: return self.change_membership( room=room, @@ -260,7 +259,7 @@ class RestHelper: src: str, targ: str, expect_code: int = HTTPStatus.OK, - tok: Optional[str] = None, + tok: str | None = None, ) -> JsonDict: """A convenience helper: `change_membership` with `membership` preset to "ban".""" return self.change_membership( @@ -275,15 +274,15 @@ class RestHelper: def change_membership( self, room: str, - src: Optional[str], - targ: Optional[str], + src: str | None, + targ: str | None, membership: str, - extra_data: Optional[dict] = None, - tok: Optional[str] = None, - appservice_user_id: Optional[str] = None, + extra_data: dict | None = None, + tok: str | None = None, + appservice_user_id: str | None = None, expect_code: int = HTTPStatus.OK, - expect_errcode: Optional[str] = None, - expect_additional_fields: Optional[dict] = None, + expect_errcode: str | None = None, + expect_additional_fields: dict | None = None, ) -> JsonDict: """ Send a membership state event into a room. @@ -372,11 +371,11 @@ class RestHelper: def send( self, room_id: str, - body: Optional[str] = None, - txn_id: Optional[str] = None, - tok: Optional[str] = None, + body: str | None = None, + txn_id: str | None = None, + tok: str | None = None, expect_code: int = HTTPStatus.OK, - custom_headers: Optional[Iterable[tuple[AnyStr, AnyStr]]] = None, + custom_headers: Iterable[tuple[AnyStr, AnyStr]] | None = None, type: str = "m.room.message", ) -> JsonDict: if body is None: @@ -402,7 +401,7 @@ class RestHelper: "msgtype": "m.text", "body": f"Test event {idx}", }, - tok: Optional[str] = None, + tok: str | None = None, ) -> Sequence[str]: """ Helper to send a handful of sequential events and return their event IDs as a sequence. @@ -424,11 +423,11 @@ class RestHelper: self, room_id: str, type: str, - content: Optional[dict] = None, - txn_id: Optional[str] = None, - tok: Optional[str] = None, + content: dict | None = None, + txn_id: str | None = None, + tok: str | None = None, expect_code: int = HTTPStatus.OK, - custom_headers: Optional[Iterable[tuple[AnyStr, AnyStr]]] = None, + custom_headers: Iterable[tuple[AnyStr, AnyStr]] | None = None, ) -> JsonDict: if txn_id is None: txn_id = "m%s" % (str(time.time())) @@ -458,7 +457,7 @@ class RestHelper: self, room_id: str, event_id: str, - tok: Optional[str] = None, + tok: str | None = None, expect_code: int = HTTPStatus.OK, ) -> JsonDict: """Request a specific event from the server. @@ -495,8 +494,8 @@ class RestHelper: self, room_id: str, event_type: str, - body: Optional[dict[str, Any]], - tok: Optional[str], + body: dict[str, Any] | None, + tok: str | None, expect_code: int = HTTPStatus.OK, state_key: str = "", method: str = "GET", @@ -574,7 +573,7 @@ class RestHelper: room_id: str, event_type: str, body: dict[str, Any], - tok: Optional[str] = None, + tok: str | None = None, expect_code: int = HTTPStatus.OK, state_key: str = "", ) -> JsonDict: @@ -680,7 +679,7 @@ class RestHelper: fake_server: FakeOidcServer, remote_user_id: str, with_sid: bool = False, - idp_id: Optional[str] = None, + idp_id: str | None = None, expected_status: int = 200, ) -> tuple[JsonDict, FakeAuthorizationGrant]: """Log in (as a new user) via OIDC @@ -751,10 +750,10 @@ class RestHelper: self, fake_server: FakeOidcServer, user_info_dict: JsonDict, - client_redirect_url: Optional[str] = None, - ui_auth_session_id: Optional[str] = None, + client_redirect_url: str | None = None, + ui_auth_session_id: str | None = None, with_sid: bool = False, - idp_id: Optional[str] = None, + idp_id: str | None = None, ) -> tuple[FakeChannel, FakeAuthorizationGrant]: """Perform an OIDC authentication flow via a mock OIDC provider. @@ -878,9 +877,9 @@ class RestHelper: def initiate_sso_login( self, - client_redirect_url: Optional[str], + client_redirect_url: str | None, cookies: MutableMapping[str, str], - idp_id: Optional[str] = None, + idp_id: str | None = None, ) -> str: """Make a request to the login-via-sso redirect endpoint, and return the target diff --git a/tests/rest/key/v2/test_remote_key_resource.py b/tests/rest/key/v2/test_remote_key_resource.py index c412a19f9b..aaf39e70e4 100644 --- a/tests/rest/key/v2/test_remote_key_resource.py +++ b/tests/rest/key/v2/test_remote_key_resource.py @@ -19,7 +19,7 @@ # # from io import BytesIO, StringIO -from typing import Any, Optional, Union +from typing import Any from unittest.mock import Mock import signedjson.key @@ -67,7 +67,7 @@ class BaseRemoteKeyResourceTestCase(unittest.HomeserverTestCase): path: str, ignore_backoff: bool = False, **kwargs: Any, - ) -> Union[JsonDict, list]: + ) -> JsonDict | list: self.assertTrue(ignore_backoff) self.assertEqual(destination, server_name) key_id = "%s:%s" % (signing_key.alg, signing_key.version) @@ -191,8 +191,8 @@ class EndToEndPerspectivesTests(BaseRemoteKeyResourceTestCase): # wire up outbound POST /key/v2/query requests from hs2 so that they # will be forwarded to hs1 async def post_json( - destination: str, path: str, data: Optional[JsonDict] = None - ) -> Union[JsonDict, list]: + destination: str, path: str, data: JsonDict | None = None + ) -> JsonDict | list: self.assertEqual(destination, self.hs.hostname) self.assertEqual( path, diff --git a/tests/rest/media/test_url_preview.py b/tests/rest/media/test_url_preview.py index 5af2e79f45..32e78fc12a 100644 --- a/tests/rest/media/test_url_preview.py +++ b/tests/rest/media/test_url_preview.py @@ -22,7 +22,7 @@ import base64 import json import os import re -from typing import Any, Optional, Sequence +from typing import Any, Sequence from urllib.parse import quote, urlencode from twisted.internet._resolver import HostResolution @@ -135,7 +135,7 @@ class URLPreviewTests(unittest.HomeserverTestCase): resolutionReceiver: IResolutionReceiver, hostName: str, portNumber: int = 0, - addressTypes: Optional[Sequence[type[IAddress]]] = None, + addressTypes: Sequence[type[IAddress]] | None = None, transportSemantics: str = "TCP", ) -> IResolutionReceiver: resolution = HostResolution(hostName) diff --git a/tests/scripts/test_new_matrix_user.py b/tests/scripts/test_new_matrix_user.py index 2e71e2a797..0e697427bb 100644 --- a/tests/scripts/test_new_matrix_user.py +++ b/tests/scripts/test_new_matrix_user.py @@ -18,7 +18,6 @@ # # -from typing import Optional from unittest.mock import Mock, patch from synapse._scripts.register_new_matrix_user import request_registration @@ -34,14 +33,14 @@ class RegisterTestCase(TestCase): post that MAC. """ - def get(url: str, verify: Optional[bool] = None) -> Mock: + def get(url: str, verify: bool | None = None) -> Mock: r = Mock() r.status_code = 200 r.json = lambda: {"nonce": "a"} return r def post( - url: str, json: Optional[JsonDict] = None, verify: Optional[bool] = None + url: str, json: JsonDict | None = None, verify: bool | None = None ) -> Mock: # Make sure we are sent the correct info assert json is not None @@ -85,7 +84,7 @@ class RegisterTestCase(TestCase): If the script fails to fetch a nonce, it throws an error and quits. """ - def get(url: str, verify: Optional[bool] = None) -> Mock: + def get(url: str, verify: bool | None = None) -> Mock: r = Mock() r.status_code = 404 r.reason = "Not Found" @@ -123,14 +122,14 @@ class RegisterTestCase(TestCase): report an error and quit. """ - def get(url: str, verify: Optional[bool] = None) -> Mock: + def get(url: str, verify: bool | None = None) -> Mock: r = Mock() r.status_code = 200 r.json = lambda: {"nonce": "a"} return r def post( - url: str, json: Optional[JsonDict] = None, verify: Optional[bool] = None + url: str, json: JsonDict | None = None, verify: bool | None = None ) -> Mock: # Make sure we are sent the correct info assert json is not None diff --git a/tests/server.py b/tests/server.py index ff5c606180..30337f3e38 100644 --- a/tests/server.py +++ b/tests/server.py @@ -119,11 +119,11 @@ R = TypeVar("R") P = ParamSpec("P") # the type of thing that can be passed into `make_request` in the headers list -CustomHeaderType = tuple[Union[str, bytes], Union[str, bytes]] +CustomHeaderType = tuple[str | bytes, str | bytes] # A pre-prepared SQLite DB that is used as a template when creating new SQLite # DB each test run. This dramatically speeds up test set up when using SQLite. -PREPPED_SQLITE_DB_CONN: Optional[LoggingDatabaseConnection] = None +PREPPED_SQLITE_DB_CONN: LoggingDatabaseConnection | None = None class TimedOutException(Exception): @@ -146,9 +146,9 @@ class FakeChannel: _reactor: MemoryReactorClock result: dict = attr.Factory(dict) _ip: str = "127.0.0.1" - _producer: Optional[Union[IPullProducer, IPushProducer]] = None - resource_usage: Optional[ContextResourceUsage] = None - _request: Optional[Request] = None + _producer: IPullProducer | IPushProducer | None = None + resource_usage: ContextResourceUsage | None = None + _request: Request | None = None @property def request(self) -> Request: @@ -206,7 +206,7 @@ class FakeChannel: version: bytes, code: bytes, reason: bytes, - headers: Union[Headers, list[tuple[bytes, bytes]]], + headers: Headers | list[tuple[bytes, bytes]], ) -> None: self.result["version"] = version self.result["code"] = code @@ -248,7 +248,7 @@ class FakeChannel: # TODO This should ensure that the IProducer is an IPushProducer or # IPullProducer, unfortunately twisted.protocols.basic.FileSender does # implement those, but doesn't declare it. - self._producer = cast(Union[IPushProducer, IPullProducer], producer) + self._producer = cast(IPushProducer | IPullProducer, producer) self.producerStreaming = streaming def _produce() -> None: @@ -357,18 +357,18 @@ class FakeSite: def make_request( reactor: MemoryReactorClock, - site: Union[Site, FakeSite], - method: Union[bytes, str], - path: Union[bytes, str], - content: Union[bytes, str, JsonDict] = b"", - access_token: Optional[str] = None, + site: Site | FakeSite, + method: bytes | str, + path: bytes | str, + content: bytes | str | JsonDict = b"", + access_token: str | None = None, request: type[Request] = SynapseRequest, shorthand: bool = True, - federation_auth_origin: Optional[bytes] = None, - content_type: Optional[bytes] = None, + federation_auth_origin: bytes | None = None, + content_type: bytes | None = None, content_is_form: bool = False, await_result: bool = True, - custom_headers: Optional[Iterable[CustomHeaderType]] = None, + custom_headers: Iterable[CustomHeaderType] | None = None, client_ip: str = "127.0.0.1", ) -> FakeChannel: """ @@ -497,7 +497,7 @@ class ThreadedMemoryReactorClock(MemoryReactorClock): @implementer(IResolverSimple) class FakeResolver: def getHostByName( - self, name: str, timeout: Optional[Sequence[int]] = None + self, name: str, timeout: Sequence[int] | None = None ) -> "Deferred[str]": if name not in lookups: return fail(DNSLookupError("OH NO: unknown %s" % (name,))) @@ -617,7 +617,7 @@ class ThreadedMemoryReactorClock(MemoryReactorClock): port: int, factory: ClientFactory, timeout: float = 30, - bindAddress: Optional[tuple[str, int]] = None, + bindAddress: tuple[str, int] | None = None, ) -> IConnector: """Fake L{IReactorTCP.connectTCP}.""" @@ -788,7 +788,7 @@ class ThreadPool: def callInThreadWithCallback( self, - onResult: Callable[[bool, Union[Failure, R]], None], + onResult: Callable[[bool, Failure | R], None], function: Callable[P, R], *args: P.args, **kwargs: P.kwargs, @@ -841,17 +841,17 @@ class FakeTransport: """Test reactor """ - _protocol: Optional[IProtocol] = None + _protocol: IProtocol | None = None """The Protocol which is producing data for this transport. Optional, but if set will get called back for connectionLost() notifications etc. """ - _peer_address: Union[IPv4Address, IPv6Address] = attr.Factory( + _peer_address: IPv4Address | IPv6Address = attr.Factory( lambda: address.IPv4Address("TCP", "127.0.0.1", 5678) ) """The value to be returned by getPeer""" - _host_address: Union[IPv4Address, IPv6Address] = attr.Factory( + _host_address: IPv4Address | IPv6Address = attr.Factory( lambda: address.IPv4Address("TCP", "127.0.0.1", 1234) ) """The value to be returned by getHost""" @@ -860,13 +860,13 @@ class FakeTransport: disconnected = False connected = True buffer: bytes = b"" - producer: Optional[IPushProducer] = None + producer: IPushProducer | None = None autoflush: bool = True - def getPeer(self) -> Union[IPv4Address, IPv6Address]: + def getPeer(self) -> IPv4Address | IPv6Address: return self._peer_address - def getHost(self) -> Union[IPv4Address, IPv6Address]: + def getHost(self) -> IPv4Address | IPv6Address: return self._host_address def loseConnection(self) -> None: @@ -955,7 +955,7 @@ class FakeTransport: for x in seq: self.write(x) - def flush(self, maxbytes: Optional[int] = None) -> None: + def flush(self, maxbytes: int | None = None) -> None: if not self.buffer: # nothing to do. Don't write empty buffers: it upsets the # TLSMemoryBIOProtocol @@ -1061,10 +1061,10 @@ def setup_test_homeserver( *, cleanup_func: Callable[[Callable[[], Optional["Deferred[None]"]]], None], server_name: str = "test", - config: Optional[HomeServerConfig] = None, - reactor: Optional[ISynapseReactor] = None, + config: HomeServerConfig | None = None, + reactor: ISynapseReactor | None = None, homeserver_to_use: type[HomeServer] = TestHomeServer, - db_txn_limit: Optional[int] = None, + db_txn_limit: int | None = None, **extra_homeserver_attributes: Any, ) -> HomeServer: """ diff --git a/tests/state/test_v2.py b/tests/state/test_v2.py index 2cf411e30b..7db710846d 100644 --- a/tests/state/test_v2.py +++ b/tests/state/test_v2.py @@ -23,7 +23,6 @@ from typing import ( Collection, Iterable, Mapping, - Optional, TypeVar, ) @@ -79,7 +78,7 @@ class FakeEvent: id: str, sender: str, type: str, - state_key: Optional[str], + state_key: str | None, content: Mapping[str, object], ): self.node_id = id @@ -525,7 +524,7 @@ class StateTestCase(unittest.TestCase): # EventBuilder. But this is Hard because the relevant attributes are # DictProperty[T] descriptors on EventBase but normal Ts on FakeEvent. # 2. Define a `GenericEvent` Protocol describing `FakeEvent` only, and - # change this function to accept Union[Event, EventBase, EventBuilder]. + # change this function to accept Event | EventBase | EventBuilder. # This seems reasonable to me, but mypy isn't happy. I think that's # a mypy bug, see https://github.com/python/mypy/issues/5570 # Instead, resort to a type-ignore. @@ -1082,8 +1081,8 @@ class TestStateResolutionStore: self, room_id: str, auth_sets: list[set[str]], - conflicted_state: Optional[set[str]], - additional_backwards_reachable_conflicted_events: Optional[set[str]], + conflicted_state: set[str] | None, + additional_backwards_reachable_conflicted_events: set[str] | None, ) -> "defer.Deferred[StateDifference]": chains = [frozenset(self._get_auth_chain(a)) for a in auth_sets] diff --git a/tests/state/test_v21.py b/tests/state/test_v21.py index 7bef3decf0..b17773fb56 100644 --- a/tests/state/test_v21.py +++ b/tests/state/test_v21.py @@ -18,7 +18,7 @@ # # import itertools -from typing import Optional, Sequence +from typing import Sequence from twisted.internet import defer from twisted.test.proto_helpers import MemoryReactor @@ -357,11 +357,11 @@ class StateResV21TestCase(unittest.HomeserverTestCase): self, room_id: str, state_maps: Sequence[StateMap[str]], - event_map: Optional[dict[str, EventBase]], + event_map: dict[str, EventBase] | None, state_res_store: StateResolutionStoreInterface, ) -> set[str]: _, conflicted_state = _seperate(state_maps) - conflicted_set: Optional[set[str]] = set( + conflicted_set: set[str] | None = set( itertools.chain.from_iterable(conflicted_state.values()) ) if event_map is None: @@ -458,7 +458,7 @@ class StateResV21TestCase(unittest.HomeserverTestCase): resolve_and_check() def persist_event( - self, event: EventBase, state: Optional[StateMap[str]] = None + self, event: EventBase, state: StateMap[str] | None = None ) -> None: """Persist the event, with optional state""" context = self.get_success( @@ -473,12 +473,12 @@ class StateResV21TestCase(unittest.HomeserverTestCase): def create_event( self, event_type: str, - state_key: Optional[str], + state_key: str | None, sender: str, content: dict, auth_events: list[str], - prev_events: Optional[list[str]] = None, - room_id: Optional[str] = None, + prev_events: list[str] | None = None, + room_id: str | None = None, ) -> EventBase: """Short-hand for event_from_pdu_json for fields we typically care about. Tests can override by just calling event_from_pdu_json directly.""" diff --git a/tests/storage/databases/main/test_end_to_end_keys.py b/tests/storage/databases/main/test_end_to_end_keys.py index 35e1e15d66..d21dac6024 100644 --- a/tests/storage/databases/main/test_end_to_end_keys.py +++ b/tests/storage/databases/main/test_end_to_end_keys.py @@ -18,7 +18,6 @@ # [This file includes modifications made by New Vector Limited] # # -from typing import Optional from twisted.internet.testing import MemoryReactor @@ -99,7 +98,7 @@ class EndToEndKeyWorkerStoreTestCase(HomeserverTestCase): def check_timestamp_column( txn: LoggingTransaction, - ) -> list[tuple[JsonDict, Optional[int]]]: + ) -> list[tuple[JsonDict, int | None]]: """Fetch all rows for Alice's keys.""" txn.execute( """ diff --git a/tests/storage/databases/main/test_receipts.py b/tests/storage/databases/main/test_receipts.py index 2d63b52aca..be29e0a7f4 100644 --- a/tests/storage/databases/main/test_receipts.py +++ b/tests/storage/databases/main/test_receipts.py @@ -19,7 +19,7 @@ # # -from typing import Any, Optional, Sequence +from typing import Any, Sequence from twisted.internet.testing import MemoryReactor @@ -52,7 +52,7 @@ class ReceiptsBackgroundUpdateStoreTestCase(HomeserverTestCase): index_name: str, table: str, receipts: dict[tuple[str, str, str], Sequence[dict[str, Any]]], - expected_unique_receipts: dict[tuple[str, str, str], Optional[dict[str, Any]]], + expected_unique_receipts: dict[tuple[str, str, str], dict[str, Any] | None], ) -> None: """Test that the background update to uniqueify non-thread receipts in the given receipts table works properly. diff --git a/tests/storage/test_account_data.py b/tests/storage/test_account_data.py index d9307154da..c91aad097d 100644 --- a/tests/storage/test_account_data.py +++ b/tests/storage/test_account_data.py @@ -19,7 +19,7 @@ # # -from typing import Iterable, Optional +from typing import Iterable from twisted.internet.testing import MemoryReactor @@ -37,7 +37,7 @@ class IgnoredUsersTestCase(unittest.HomeserverTestCase): self.user = "@user:test" def _update_ignore_list( - self, *ignored_user_ids: Iterable[str], ignorer_user_id: Optional[str] = None + self, *ignored_user_ids: Iterable[str], ignorer_user_id: str | None = None ) -> None: """Update the account data to block the given users.""" if ignorer_user_id is None: @@ -167,7 +167,7 @@ class IgnoredUsersTestCase(unittest.HomeserverTestCase): """Test that ignoring users updates the latest stream ID for the ignored user list account data.""" - def get_latest_ignore_streampos(user_id: str) -> Optional[int]: + def get_latest_ignore_streampos(user_id: str) -> int | None: return self.get_success( self.store.get_latest_stream_id_for_global_account_data_by_type_for_user( user_id, AccountDataTypes.IGNORED_USER_LIST diff --git a/tests/storage/test_client_ips.py b/tests/storage/test_client_ips.py index 2c1ba9d6c2..bd68f2aaa1 100644 --- a/tests/storage/test_client_ips.py +++ b/tests/storage/test_client_ips.py @@ -19,7 +19,7 @@ # # -from typing import Any, Optional, cast +from typing import Any, cast from unittest.mock import AsyncMock from parameterized import parameterized @@ -104,7 +104,7 @@ class ClientIpStoreTestCase(unittest.HomeserverTestCase): self.pump(0) result = cast( - list[tuple[str, str, str, Optional[str], int]], + list[tuple[str, str, str, str | None, int]], self.get_success( self.store.db_pool.simple_select_list( table="user_ips", @@ -135,7 +135,7 @@ class ClientIpStoreTestCase(unittest.HomeserverTestCase): self.pump(0) result = cast( - list[tuple[str, str, str, Optional[str], int]], + list[tuple[str, str, str, str | None, int]], self.get_success( self.store.db_pool.simple_select_list( table="user_ips", @@ -184,7 +184,7 @@ class ClientIpStoreTestCase(unittest.HomeserverTestCase): else: # Check that the new IP and user agent has not been stored yet db_result = cast( - list[tuple[str, Optional[str], Optional[str], str, Optional[int]]], + list[tuple[str, str | None, str | None, str, int | None]], self.get_success( self.store.db_pool.simple_select_list( table="devices", @@ -266,7 +266,7 @@ class ClientIpStoreTestCase(unittest.HomeserverTestCase): # Check that the new IP and user agent has not been stored yet db_result = cast( - list[tuple[str, Optional[str], Optional[str], str, Optional[int]]], + list[tuple[str, str | None, str | None, str, int | None]], self.get_success( self.store.db_pool.simple_select_list( table="devices", @@ -589,7 +589,7 @@ class ClientIpStoreTestCase(unittest.HomeserverTestCase): # We should see that in the DB result = cast( - list[tuple[str, str, str, Optional[str], int]], + list[tuple[str, str, str, str | None, int]], self.get_success( self.store.db_pool.simple_select_list( table="user_ips", @@ -616,7 +616,7 @@ class ClientIpStoreTestCase(unittest.HomeserverTestCase): # We should get no results. result = cast( - list[tuple[str, str, str, Optional[str], int]], + list[tuple[str, str, str, str | None, int]], self.get_success( self.store.db_pool.simple_select_list( table="user_ips", @@ -695,7 +695,7 @@ class ClientIpStoreTestCase(unittest.HomeserverTestCase): # We should see that in the DB result = cast( - list[tuple[str, str, str, Optional[str], int]], + list[tuple[str, str, str, str | None, int]], self.get_success( self.store.db_pool.simple_select_list( table="user_ips", diff --git a/tests/storage/test_event_federation.py b/tests/storage/test_event_federation.py index d8c6a1cd04..508f82de4f 100644 --- a/tests/storage/test_event_federation.py +++ b/tests/storage/test_event_federation.py @@ -25,7 +25,6 @@ from typing import ( Mapping, NamedTuple, TypeVar, - Union, cast, ) @@ -931,7 +930,7 @@ class EventFederationWorkerStoreTestCase(tests.unittest.HomeserverTestCase): room_id = "some_room_id" - def prev_event_format(prev_event_id: str) -> Union[tuple[str, dict], str]: + def prev_event_format(prev_event_id: str) -> tuple[str, dict] | str: """Account for differences in prev_events format across room versions""" if room_version.event_format == EventFormatVersions.ROOM_V1_V2: return prev_event_id, {} diff --git a/tests/storage/test_event_push_actions.py b/tests/storage/test_event_push_actions.py index ef6c0f2465..d5ed947094 100644 --- a/tests/storage/test_event_push_actions.py +++ b/tests/storage/test_event_push_actions.py @@ -19,7 +19,6 @@ # # -from typing import Optional from twisted.internet.testing import MemoryReactor @@ -345,9 +344,7 @@ class EventPushActionsStoreTestCase(HomeserverTestCase): aggregate_counts[room_id], notif_count + thread_notif_count ) - def _create_event( - highlight: bool = False, thread_id: Optional[str] = None - ) -> str: + def _create_event(highlight: bool = False, thread_id: str | None = None) -> str: content: JsonDict = { "msgtype": "m.text", "body": user_id if highlight else "msg", @@ -527,9 +524,7 @@ class EventPushActionsStoreTestCase(HomeserverTestCase): aggregate_counts[room_id], notif_count + thread_notif_count ) - def _create_event( - highlight: bool = False, thread_id: Optional[str] = None - ) -> str: + def _create_event(highlight: bool = False, thread_id: str | None = None) -> str: content: JsonDict = { "msgtype": "m.text", "body": user_id if highlight else "msg", @@ -553,7 +548,7 @@ class EventPushActionsStoreTestCase(HomeserverTestCase): def _rotate() -> None: self.get_success(self.store._rotate_notifs()) - def _mark_read(event_id: str, thread_id: Optional[str] = None) -> None: + def _mark_read(event_id: str, thread_id: str | None = None) -> None: self.get_success( self.store.insert_receipt( room_id, diff --git a/tests/storage/test_events.py b/tests/storage/test_events.py index 5c7f814078..7d1c96f97f 100644 --- a/tests/storage/test_events.py +++ b/tests/storage/test_events.py @@ -20,7 +20,6 @@ # import logging -from typing import Optional from twisted.internet.testing import MemoryReactor @@ -168,7 +167,7 @@ class ExtremPruneTestCase(HomeserverTestCase): self.assert_extremities([self.remote_event_1.event_id]) def persist_event( - self, event: EventBase, state: Optional[StateMap[str]] = None + self, event: EventBase, state: StateMap[str] | None = None ) -> None: """Persist the event, with optional state""" context = self.get_success( diff --git a/tests/storage/test_id_generators.py b/tests/storage/test_id_generators.py index 4846e8cac3..051c5de44d 100644 --- a/tests/storage/test_id_generators.py +++ b/tests/storage/test_id_generators.py @@ -18,7 +18,6 @@ # [This file includes modifications made by New Vector Limited] # # -from typing import Optional from twisted.internet.testing import MemoryReactor @@ -76,7 +75,7 @@ class MultiWriterIdGeneratorBase(HomeserverTestCase): def _create_id_generator( self, instance_name: str = "master", - writers: Optional[list[str]] = None, + writers: list[str] | None = None, ) -> MultiWriterIdGenerator: def _create(conn: LoggingDatabaseConnection) -> MultiWriterIdGenerator: return MultiWriterIdGenerator( @@ -113,7 +112,7 @@ class MultiWriterIdGeneratorBase(HomeserverTestCase): self._replicate(instance_name) def _insert_row( - self, instance_name: str, stream_id: int, table: Optional[str] = None + self, instance_name: str, stream_id: int, table: str | None = None ) -> None: """Insert one row as the given instance with given stream_id.""" @@ -144,7 +143,7 @@ class MultiWriterIdGeneratorBase(HomeserverTestCase): self, instance_name: str, number: int, - table: Optional[str] = None, + table: str | None = None, update_stream_table: bool = True, ) -> None: """Insert N rows as the given instance, inserting with stream IDs pulled diff --git a/tests/storage/test_monthly_active_users.py b/tests/storage/test_monthly_active_users.py index 9ea2fa5311..4f97d89f78 100644 --- a/tests/storage/test_monthly_active_users.py +++ b/tests/storage/test_monthly_active_users.py @@ -101,7 +101,7 @@ class MonthlyActiveUsersTestCase(unittest.HomeserverTestCase): # Test each of the registered users is marked as active timestamp = self.get_success(self.store.user_last_seen_monthly_active(user1)) - # Mypy notes that one shouldn't compare Optional[int] to 0 with assertGreater. + # Mypy notes that one shouldn't compare int | None to 0 with assertGreater. # Check that timestamp really is an int. assert timestamp is not None self.assertGreater(timestamp, 0) diff --git a/tests/storage/test_receipts.py b/tests/storage/test_receipts.py index 10ded391f4..27875dcebb 100644 --- a/tests/storage/test_receipts.py +++ b/tests/storage/test_receipts.py @@ -19,7 +19,7 @@ # # -from typing import Collection, Optional +from typing import Collection from twisted.internet.testing import MemoryReactor @@ -101,8 +101,8 @@ class ReceiptTestCase(HomeserverTestCase): ) def get_last_unthreaded_receipt( - self, receipt_types: Collection[str], room_id: Optional[str] = None - ) -> Optional[str]: + self, receipt_types: Collection[str], room_id: str | None = None + ) -> str | None: """ Fetch the event ID for the latest unthreaded receipt in the test room for the test user. diff --git a/tests/storage/test_redaction.py b/tests/storage/test_redaction.py index 2c188b8046..92eb99f1d5 100644 --- a/tests/storage/test_redaction.py +++ b/tests/storage/test_redaction.py @@ -18,7 +18,7 @@ # [This file includes modifications made by New Vector Limited] # # -from typing import Optional, cast +from typing import cast from canonicaljson import json @@ -67,7 +67,7 @@ class RedactionTestCase(unittest.HomeserverTestCase): room: RoomID, user: UserID, membership: str, - extra_content: Optional[JsonDict] = None, + extra_content: JsonDict | None = None, ) -> EventBase: content = {"membership": membership} content.update(extra_content or {}) @@ -248,8 +248,8 @@ class RedactionTestCase(unittest.HomeserverTestCase): async def build( self, prev_event_ids: list[str], - auth_event_ids: Optional[list[str]], - depth: Optional[int] = None, + auth_event_ids: list[str] | None, + depth: int | None = None, ) -> EventBase: built_event = await self._base_builder.build( prev_event_ids=prev_event_ids, auth_event_ids=auth_event_ids diff --git a/tests/storage/test_roommember.py b/tests/storage/test_roommember.py index c5487d81e6..f8d64e8ce6 100644 --- a/tests/storage/test_roommember.py +++ b/tests/storage/test_roommember.py @@ -20,7 +20,7 @@ # # import logging -from typing import Optional, cast +from typing import cast from twisted.internet.testing import MemoryReactor @@ -133,7 +133,7 @@ class RoomMemberStoreTestCase(unittest.HomeserverTestCase): room = self.helper.create_room_as(self.u_alice, tok=self.t_alice) res = cast( - list[tuple[Optional[str], str]], + list[tuple[str | None, str]], self.get_success( self.store.db_pool.simple_select_list( "room_memberships", @@ -165,7 +165,7 @@ class RoomMemberStoreTestCase(unittest.HomeserverTestCase): ) res2 = cast( - list[tuple[Optional[str], str]], + list[tuple[str | None, str]], self.get_success( self.store.db_pool.simple_select_list( "room_memberships", @@ -410,7 +410,7 @@ class RoomSummaryTestCase(unittest.HomeserverTestCase): actual_member_summary: MemberSummary, expected_member_list: list[str], *, - expected_member_count: Optional[int] = None, + expected_member_count: int | None = None, ) -> None: """ Assert that the `MemberSummary` object has the expected members. diff --git a/tests/storage/test_sliding_sync_tables.py b/tests/storage/test_sliding_sync_tables.py index 5cfc1a9c29..db31348a8c 100644 --- a/tests/storage/test_sliding_sync_tables.py +++ b/tests/storage/test_sliding_sync_tables.py @@ -18,7 +18,7 @@ # # import logging -from typing import Optional, cast +from typing import cast import attr from parameterized import parameterized @@ -55,12 +55,12 @@ class _SlidingSyncJoinedRoomResult: # `event.internal_metadata.stream_ordering` is marked optional because it only # exists for persisted events but in the context of these tests, we're only working # with persisted events and we're making comparisons so we will find any mismatch. - event_stream_ordering: Optional[int] - bump_stamp: Optional[int] - room_type: Optional[str] - room_name: Optional[str] + event_stream_ordering: int | None + bump_stamp: int | None + room_type: str | None + room_name: str | None is_encrypted: bool - tombstone_successor_room_id: Optional[str] + tombstone_successor_room_id: str | None @attr.s(slots=True, frozen=True, auto_attribs=True) @@ -75,12 +75,12 @@ class _SlidingSyncMembershipSnapshotResult: # `event.internal_metadata.stream_ordering` is marked optional because it only # exists for persisted events but in the context of these tests, we're only working # with persisted events and we're making comparisons so we will find any mismatch. - event_stream_ordering: Optional[int] + event_stream_ordering: int | None has_known_state: bool - room_type: Optional[str] - room_name: Optional[str] + room_type: str | None + room_name: str | None is_encrypted: bool - tombstone_successor_room_id: Optional[str] + tombstone_successor_room_id: str | None # Make this default to "not forgotten" because it doesn't apply to many tests and we # don't want to force all of the tests to deal with it. forgotten: bool = False @@ -207,7 +207,7 @@ class SlidingSyncTablesTestCaseBase(HomeserverTestCase): def _create_remote_invite_room_for_user( self, invitee_user_id: str, - unsigned_invite_room_state: Optional[list[StrippedStateEvent]], + unsigned_invite_room_state: list[StrippedStateEvent] | None, ) -> tuple[str, EventBase]: """ Create a fake invite for a remote room and persist it. @@ -2246,7 +2246,7 @@ class SlidingSyncTablesTestCase(SlidingSyncTablesTestCaseBase): ] ) def test_non_join_remote_invite_no_stripped_state( - self, _description: str, stripped_state: Optional[list[StrippedStateEvent]] + self, _description: str, stripped_state: list[StrippedStateEvent] | None ) -> None: """ Test remote invite with no stripped state provided shows up in diff --git a/tests/storage/test_thread_subscriptions.py b/tests/storage/test_thread_subscriptions.py index 3f78308e45..ec6f8c5bfb 100644 --- a/tests/storage/test_thread_subscriptions.py +++ b/tests/storage/test_thread_subscriptions.py @@ -12,7 +12,6 @@ # . # -from typing import Optional, Union from twisted.internet.testing import MemoryReactor @@ -102,10 +101,10 @@ class ThreadSubscriptionsTestCase(unittest.HomeserverTestCase): self, thread_root_id: str, *, - automatic_event_orderings: Optional[EventOrderings], - room_id: Optional[str] = None, - user_id: Optional[str] = None, - ) -> Optional[Union[int, AutomaticSubscriptionConflicted]]: + automatic_event_orderings: EventOrderings | None, + room_id: str | None = None, + user_id: str | None = None, + ) -> int | AutomaticSubscriptionConflicted | None: if user_id is None: user_id = self.user_id @@ -124,9 +123,9 @@ class ThreadSubscriptionsTestCase(unittest.HomeserverTestCase): def _unsubscribe( self, thread_root_id: str, - room_id: Optional[str] = None, - user_id: Optional[str] = None, - ) -> Optional[int]: + room_id: str | None = None, + user_id: str | None = None, + ) -> int | None: if user_id is None: user_id = self.user_id diff --git a/tests/storage/test_user_directory.py b/tests/storage/test_user_directory.py index 83d3357c65..7b4acd985c 100644 --- a/tests/storage/test_user_directory.py +++ b/tests/storage/test_user_directory.py @@ -19,7 +19,7 @@ # # import re -from typing import Any, Optional, cast +from typing import Any, cast from unittest import mock from unittest.mock import Mock, patch @@ -110,7 +110,7 @@ class GetUserDirectoryTables: thing missing is an unused room_id column. """ rows = cast( - list[tuple[str, Optional[str], Optional[str]]], + list[tuple[str, str | None, str | None]], await self.store.db_pool.simple_select_list( "user_directory", None, diff --git a/tests/test_event_auth.py b/tests/test_event_auth.py index 7737101967..934a2fd307 100644 --- a/tests/test_event_auth.py +++ b/tests/test_event_auth.py @@ -20,7 +20,7 @@ # import unittest -from typing import Any, Collection, Iterable, Optional +from typing import Any, Collection, Iterable from parameterized import parameterized @@ -797,8 +797,8 @@ def _member_event( room_version: RoomVersion, user_id: str, membership: str, - sender: Optional[str] = None, - additional_content: Optional[dict] = None, + sender: str | None = None, + additional_content: dict | None = None, ) -> EventBase: return make_event_from_dict( { @@ -818,7 +818,7 @@ def _member_event( def _join_event( room_version: RoomVersion, user_id: str, - additional_content: Optional[dict] = None, + additional_content: dict | None = None, ) -> EventBase: return _member_event( room_version, @@ -871,7 +871,7 @@ def _build_auth_dict_for_room_version( def _random_state_event( room_version: RoomVersion, sender: str, - auth_events: Optional[Iterable[EventBase]] = None, + auth_events: Iterable[EventBase] | None = None, ) -> EventBase: if auth_events is None: auth_events = [] diff --git a/tests/test_mau.py b/tests/test_mau.py index e535e7dc2e..2d5c4c5d1c 100644 --- a/tests/test_mau.py +++ b/tests/test_mau.py @@ -20,8 +20,6 @@ """Tests REST events for /rooms paths.""" -from typing import Optional - from twisted.internet.testing import MemoryReactor from synapse.api.constants import APP_SERVICE_REGISTRATION_TYPE, LoginType @@ -313,7 +311,7 @@ class TestMauLimit(unittest.HomeserverTestCase): ) def create_user( - self, localpart: str, token: Optional[str] = None, appservice: bool = False + self, localpart: str, token: str | None = None, appservice: bool = False ) -> str: request_data = { "username": localpart, diff --git a/tests/test_server.py b/tests/test_server.py index e7d3febe3f..2df6bdfa44 100644 --- a/tests/test_server.py +++ b/tests/test_server.py @@ -20,7 +20,7 @@ import re from http import HTTPStatus -from typing import Awaitable, Callable, NoReturn, Optional +from typing import Awaitable, Callable, NoReturn from twisted.internet.defer import Deferred from twisted.web.resource import Resource @@ -309,7 +309,7 @@ class OptionsResourceTests(unittest.TestCase): class WrapHtmlRequestHandlerTests(unittest.TestCase): class TestResource(DirectServeHtmlResource): - callback: Optional[Callable[..., Awaitable[None]]] + callback: Callable[..., Awaitable[None]] | None async def _async_render_GET(self, request: SynapseRequest) -> None: assert self.callback is not None diff --git a/tests/test_state.py b/tests/test_state.py index 6e5a6d845d..7df95ebf8b 100644 --- a/tests/test_state.py +++ b/tests/test_state.py @@ -24,7 +24,6 @@ from typing import ( Generator, Iterable, Iterator, - Optional, ) from unittest.mock import AsyncMock, Mock @@ -48,12 +47,12 @@ _next_event_id = 1000 def create_event( - name: Optional[str] = None, - type: Optional[str] = None, - state_key: Optional[str] = None, + name: str | None = None, + type: str | None = None, + state_key: str | None = None, depth: int = 2, - event_id: Optional[str] = None, - prev_events: Optional[list[tuple[str, dict]]] = None, + event_id: str | None = None, + prev_events: list[tuple[str, dict]] | None = None, **kwargs: Any, ) -> EventBase: global _next_event_id @@ -106,7 +105,7 @@ class _DummyStore: return groups async def get_state_ids_for_group( - self, state_group: int, state_filter: Optional[StateFilter] = None + self, state_group: int, state_filter: StateFilter | None = None ) -> MutableStateMap[str]: return self._group_to_state[state_group] @@ -114,9 +113,9 @@ class _DummyStore: self, event_id: str, room_id: str, - prev_group: Optional[int], - delta_ids: Optional[StateMap[str]], - current_state_ids: Optional[StateMap[str]], + prev_group: int | None, + delta_ids: StateMap[str] | None, + current_state_ids: StateMap[str] | None, ) -> int: state_group = self._next_group self._next_group += 1 @@ -147,7 +146,7 @@ class _DummyStore: async def get_state_group_delta( self, name: str - ) -> tuple[Optional[int], Optional[StateMap[str]]]: + ) -> tuple[int | None, StateMap[str] | None]: return None, None def register_events(self, events: Iterable[EventBase]) -> None: diff --git a/tests/test_utils/event_injection.py b/tests/test_utils/event_injection.py index 9cdb456b1b..a90fc5884d 100644 --- a/tests/test_utils/event_injection.py +++ b/tests/test_utils/event_injection.py @@ -18,7 +18,7 @@ # [This file includes modifications made by New Vector Limited] # # -from typing import Any, Optional +from typing import Any import synapse.server from synapse.api.constants import EventTypes @@ -36,8 +36,8 @@ async def inject_member_event( room_id: str, sender: str, membership: str, - target: Optional[str] = None, - extra_content: Optional[dict] = None, + target: str | None = None, + extra_content: dict | None = None, **kwargs: Any, ) -> EventBase: """Inject a membership event into a room.""" @@ -61,8 +61,8 @@ async def inject_member_event( async def inject_event( hs: synapse.server.HomeServer, - room_version: Optional[str] = None, - prev_event_ids: Optional[list[str]] = None, + room_version: str | None = None, + prev_event_ids: list[str] | None = None, **kwargs: Any, ) -> EventBase: """Inject a generic event into a room @@ -86,8 +86,8 @@ async def inject_event( async def create_event( hs: synapse.server.HomeServer, - room_version: Optional[str] = None, - prev_event_ids: Optional[list[str]] = None, + room_version: str | None = None, + prev_event_ids: list[str] | None = None, **kwargs: Any, ) -> tuple[EventBase, EventContext]: if room_version is None: diff --git a/tests/test_utils/html_parsers.py b/tests/test_utils/html_parsers.py index aff1626295..a9a4b98df2 100644 --- a/tests/test_utils/html_parsers.py +++ b/tests/test_utils/html_parsers.py @@ -20,7 +20,7 @@ # from html.parser import HTMLParser -from typing import Iterable, NoReturn, Optional +from typing import Iterable, NoReturn class TestHtmlParser(HTMLParser): @@ -33,13 +33,13 @@ class TestHtmlParser(HTMLParser): self.links: list[str] = [] # the values of any hidden s: map from name to value - self.hiddens: dict[str, Optional[str]] = {} + self.hiddens: dict[str, str | None] = {} # the values of any radio buttons: map from name to list of values - self.radios: dict[str, list[Optional[str]]] = {} + self.radios: dict[str, list[str | None]] = {} def handle_starttag( - self, tag: str, attrs: Iterable[tuple[str, Optional[str]]] + self, tag: str, attrs: Iterable[tuple[str, str | None]] ) -> None: attr_dict = dict(attrs) if tag == "a": diff --git a/tests/test_utils/oidc.py b/tests/test_utils/oidc.py index c2d6af029a..837a04077c 100644 --- a/tests/test_utils/oidc.py +++ b/tests/test_utils/oidc.py @@ -23,7 +23,7 @@ import base64 import json from hashlib import sha256 -from typing import Any, ContextManager, Optional +from typing import Any, ContextManager from unittest.mock import Mock, patch from urllib.parse import parse_qs @@ -45,8 +45,8 @@ class FakeAuthorizationGrant: client_id: str redirect_uri: str scope: str - nonce: Optional[str] - sid: Optional[str] + nonce: str | None + sid: str | None class FakeOidcServer: @@ -140,7 +140,7 @@ class FakeOidcServer: def get_jwks(self) -> dict: return self._jwks.as_dict() - def get_userinfo(self, access_token: str) -> Optional[dict]: + def get_userinfo(self, access_token: str) -> dict | None: """Given an access token, get the userinfo of the associated session.""" session = self._sessions.get(access_token, None) if session is None: @@ -220,7 +220,7 @@ class FakeOidcServer: scope: str, redirect_uri: str, userinfo: dict, - nonce: Optional[str] = None, + nonce: str | None = None, with_sid: bool = False, ) -> tuple[str, FakeAuthorizationGrant]: """Start an authorization request, and get back the code to use on the authorization endpoint.""" @@ -242,7 +242,7 @@ class FakeOidcServer: return code, grant - def exchange_code(self, code: str) -> Optional[dict[str, Any]]: + def exchange_code(self, code: str) -> dict[str, Any] | None: grant = self._authorization_grants.pop(code, None) if grant is None: return None @@ -296,11 +296,11 @@ class FakeOidcServer: self, method: str, uri: str, - data: Optional[bytes] = None, - headers: Optional[Headers] = None, + data: bytes | None = None, + headers: Headers | None = None, ) -> IResponse: """The override of the SimpleHttpClient#request() method""" - access_token: Optional[str] = None + access_token: str | None = None if headers is None: headers = Headers() @@ -346,7 +346,7 @@ class FakeOidcServer: """Handles requests to the OIDC well-known document.""" return FakeResponse.json(payload=self.get_metadata()) - def _get_userinfo_handler(self, access_token: Optional[str]) -> IResponse: + def _get_userinfo_handler(self, access_token: str | None) -> IResponse: """Handles requests to the userinfo endpoint.""" if access_token is None: return FakeResponse(code=401) diff --git a/tests/test_visibility.py b/tests/test_visibility.py index 9a8cad6454..06598c29de 100644 --- a/tests/test_visibility.py +++ b/tests/test_visibility.py @@ -18,7 +18,6 @@ # # import logging -from typing import Optional from unittest.mock import patch from twisted.test.proto_helpers import MemoryReactor @@ -693,9 +692,9 @@ async def inject_message_event( hs: HomeServer, room_id: str, sender: str, - body: Optional[str] = "testytest", - soft_failed: Optional[bool] = False, - policy_server_spammy: Optional[bool] = False, + body: str | None = "testytest", + soft_failed: bool | None = False, + policy_server_spammy: bool | None = False, ) -> EventBase: return await inject_event( hs, diff --git a/tests/unittest.py b/tests/unittest.py index 049a92caaa..7ea29364db 100644 --- a/tests/unittest.py +++ b/tests/unittest.py @@ -37,10 +37,8 @@ from typing import ( Iterable, Mapping, NoReturn, - Optional, Protocol, TypeVar, - Union, ) from unittest.mock import Mock, patch @@ -274,7 +272,7 @@ class TestCase(unittest.TestCase): actual_items: AbstractSet[TV], expected_items: AbstractSet[TV], exact: bool = False, - message: Optional[str] = None, + message: str | None = None, ) -> None: """ Assert that all of the `expected_items` are included in the `actual_items`. @@ -573,17 +571,17 @@ class HomeserverTestCase(TestCase): def make_request( self, - method: Union[bytes, str], - path: Union[bytes, str], - content: Union[bytes, str, JsonDict] = b"", - access_token: Optional[str] = None, + method: bytes | str, + path: bytes | str, + content: bytes | str | JsonDict = b"", + access_token: str | None = None, request: type[Request] = SynapseRequest, shorthand: bool = True, - federation_auth_origin: Optional[bytes] = None, - content_type: Optional[bytes] = None, + federation_auth_origin: bytes | None = None, + content_type: bytes | None = None, content_is_form: bool = False, await_result: bool = True, - custom_headers: Optional[Iterable[CustomHeaderType]] = None, + custom_headers: Iterable[CustomHeaderType] | None = None, client_ip: str = "127.0.0.1", ) -> FakeChannel: """ @@ -636,10 +634,10 @@ class HomeserverTestCase(TestCase): def setup_test_homeserver( self, - server_name: Optional[str] = None, - config: Optional[JsonDict] = None, - reactor: Optional[ISynapseReactor] = None, - clock: Optional[Clock] = None, + server_name: str | None = None, + config: JsonDict | None = None, + reactor: ISynapseReactor | None = None, + clock: Clock | None = None, **extra_homeserver_attributes: Any, ) -> HomeServer: """ @@ -746,8 +744,8 @@ class HomeserverTestCase(TestCase): self, username: str, password: str, - admin: Optional[bool] = False, - displayname: Optional[str] = None, + admin: bool | None = False, + displayname: str | None = None, ) -> str: """ Register a user. Requires the Admin API be registered. @@ -798,7 +796,7 @@ class HomeserverTestCase(TestCase): username: str, appservice_token: str, inhibit_login: bool = False, - ) -> tuple[str, Optional[str]]: + ) -> tuple[str, str | None]: """Register an appservice user as an application service. Requires the client-facing registration API be registered. @@ -829,9 +827,9 @@ class HomeserverTestCase(TestCase): self, username: str, password: str, - device_id: Optional[str] = None, - additional_request_fields: Optional[dict[str, str]] = None, - custom_headers: Optional[Iterable[CustomHeaderType]] = None, + device_id: str | None = None, + additional_request_fields: dict[str, str] | None = None, + custom_headers: Iterable[CustomHeaderType] | None = None, ) -> str: """ Log in a user, and get an access token. Requires the Login API be registered. @@ -870,7 +868,7 @@ class HomeserverTestCase(TestCase): room_id: str, user: UserID, soft_failed: bool = False, - prev_event_ids: Optional[list[str]] = None, + prev_event_ids: list[str] | None = None, ) -> str: """ Create and send an event. @@ -971,9 +969,9 @@ class FederatingHomeserverTestCase(HomeserverTestCase): self, method: str, path: str, - content: Optional[JsonDict] = None, + content: JsonDict | None = None, await_result: bool = True, - custom_headers: Optional[Iterable[CustomHeaderType]] = None, + custom_headers: Iterable[CustomHeaderType] | None = None, client_ip: str = "127.0.0.1", ) -> FakeChannel: """Make an inbound signed federation request to this server @@ -1038,7 +1036,7 @@ def _auth_header_for_request( signing_key: signedjson.key.SigningKey, method: str, path: str, - content: Optional[JsonDict], + content: JsonDict | None, ) -> str: """Build a suitable Authorization header for an outgoing federation request""" request_description: JsonDict = { diff --git a/tests/util/caches/test_descriptors.py b/tests/util/caches/test_descriptors.py index e27f84fa6d..3fab6c4c57 100644 --- a/tests/util/caches/test_descriptors.py +++ b/tests/util/caches/test_descriptors.py @@ -25,7 +25,6 @@ from typing import ( Iterable, Mapping, NoReturn, - Optional, cast, ) from unittest import mock @@ -242,7 +241,7 @@ class DescriptorTestCase(unittest.TestCase): """The wrapped function returns a failure""" class Cls: - result: Optional[Deferred] = None + result: Deferred | None = None call_count = 0 server_name = "test_server" # nb must be called this for @cached _, clock = get_clock() # nb must be called this for @cached diff --git a/tests/util/test_async_helpers.py b/tests/util/test_async_helpers.py index 8fbee12fb9..a6b7ddf485 100644 --- a/tests/util/test_async_helpers.py +++ b/tests/util/test_async_helpers.py @@ -19,7 +19,7 @@ # import logging import traceback -from typing import Any, Coroutine, NoReturn, Optional, TypeVar +from typing import Any, Coroutine, NoReturn, TypeVar from parameterized import parameterized_class @@ -71,7 +71,7 @@ class ObservableDeferredTest(TestCase): observer1.addBoth(check_called_first) # store the results - results: list[Optional[int]] = [None, None] + results: list[int | None] = [None, None] def check_val(res: int, idx: int) -> int: results[idx] = res @@ -102,7 +102,7 @@ class ObservableDeferredTest(TestCase): observer1.addBoth(check_called_first) # store the results - results: list[Optional[Failure]] = [None, None] + results: list[Failure | None] = [None, None] def check_failure(res: Failure, idx: int) -> None: results[idx] = res diff --git a/tests/util/test_check_dependencies.py b/tests/util/test_check_dependencies.py index ab2e2f6291..b7a23dcd9d 100644 --- a/tests/util/test_check_dependencies.py +++ b/tests/util/test_check_dependencies.py @@ -22,7 +22,7 @@ from contextlib import contextmanager from os import PathLike from pathlib import Path -from typing import Generator, Optional, Union, cast +from typing import Generator, cast from unittest.mock import patch from packaging.markers import default_environment as packaging_default_environment @@ -44,7 +44,7 @@ class DummyDistribution(metadata.Distribution): def version(self) -> str: return self._version - def locate_file(self, path: Union[str, PathLike]) -> Path: + def locate_file(self, path: str | PathLike) -> Path: raise NotImplementedError() def read_text(self, filename: str) -> None: @@ -63,7 +63,7 @@ distribution_with_no_version = DummyDistribution(None) # type: ignore[arg-type] class TestDependencyChecker(TestCase): @contextmanager def mock_installed_package( - self, distribution: Optional[DummyDistribution] + self, distribution: DummyDistribution | None ) -> Generator[None, None, None]: """Pretend that looking up any package yields the given `distribution`. diff --git a/tests/util/test_file_consumer.py b/tests/util/test_file_consumer.py index ab0143e605..e6d54ddb8d 100644 --- a/tests/util/test_file_consumer.py +++ b/tests/util/test_file_consumer.py @@ -20,7 +20,7 @@ import threading from io import BytesIO -from typing import BinaryIO, Generator, Optional, cast +from typing import BinaryIO, Generator, cast from unittest.mock import NonCallableMock from zope.interface import implementer @@ -127,7 +127,7 @@ class FileConsumerTests(unittest.TestCase): @implementer(IPullProducer) class DummyPullProducer: def __init__(self) -> None: - self.consumer: Optional[BackgroundFileConsumer] = None + self.consumer: BackgroundFileConsumer | None = None self.deferred: "defer.Deferred[object]" = defer.Deferred() def resumeProducing(self) -> None: @@ -159,7 +159,7 @@ class BlockingBytesWrite: self.closed = False self.write_lock = threading.Lock() - self._notify_write_deferred: Optional[defer.Deferred] = None + self._notify_write_deferred: defer.Deferred | None = None self._number_of_writes = 0 def write(self, write_bytes: bytes) -> None: diff --git a/tests/util/test_ratelimitutils.py b/tests/util/test_ratelimitutils.py index 20281d04fe..d3b123c778 100644 --- a/tests/util/test_ratelimitutils.py +++ b/tests/util/test_ratelimitutils.py @@ -18,7 +18,6 @@ # [This file includes modifications made by New Vector Limited] # # -from typing import Optional from twisted.internet import defer from twisted.internet.defer import Deferred @@ -139,7 +138,7 @@ def _await_resolution(reactor: ThreadedMemoryReactorClock, d: Deferred) -> float return (reactor.seconds() - start_time) * 1000 -def build_rc_config(settings: Optional[dict] = None) -> FederationRatelimitSettings: +def build_rc_config(settings: dict | None = None) -> FederationRatelimitSettings: config_dict = default_config("test") config_dict.update(settings or {}) config = HomeServerConfig() diff --git a/tests/util/test_task_scheduler.py b/tests/util/test_task_scheduler.py index de9e381489..e33ded8a7f 100644 --- a/tests/util/test_task_scheduler.py +++ b/tests/util/test_task_scheduler.py @@ -18,7 +18,6 @@ # [This file includes modifications made by New Vector Limited] # # -from typing import Optional from twisted.internet.defer import Deferred from twisted.internet.testing import MemoryReactor @@ -43,7 +42,7 @@ class TestTaskScheduler(HomeserverTestCase): async def _test_task( self, task: ScheduledTask - ) -> tuple[TaskStatus, Optional[JsonMapping], Optional[str]]: + ) -> tuple[TaskStatus, JsonMapping | None, str | None]: # This test task will copy the parameters to the result result = None if task.params: @@ -86,7 +85,7 @@ class TestTaskScheduler(HomeserverTestCase): async def _sleeping_task( self, task: ScheduledTask - ) -> tuple[TaskStatus, Optional[JsonMapping], Optional[str]]: + ) -> tuple[TaskStatus, JsonMapping | None, str | None]: # Sleep for a second await self.hs.get_clock().sleep(1) return TaskStatus.COMPLETE, None, None @@ -152,7 +151,7 @@ class TestTaskScheduler(HomeserverTestCase): async def _raising_task( self, task: ScheduledTask - ) -> tuple[TaskStatus, Optional[JsonMapping], Optional[str]]: + ) -> tuple[TaskStatus, JsonMapping | None, str | None]: raise Exception("raising") def test_schedule_raising_task(self) -> None: @@ -166,7 +165,7 @@ class TestTaskScheduler(HomeserverTestCase): async def _resumable_task( self, task: ScheduledTask - ) -> tuple[TaskStatus, Optional[JsonMapping], Optional[str]]: + ) -> tuple[TaskStatus, JsonMapping | None, str | None]: if task.result and "in_progress" in task.result: return TaskStatus.COMPLETE, {"success": True}, None else: @@ -204,7 +203,7 @@ class TestTaskSchedulerWithBackgroundWorker(BaseMultiWorkerStreamTestCase): async def _test_task( self, task: ScheduledTask - ) -> tuple[TaskStatus, Optional[JsonMapping], Optional[str]]: + ) -> tuple[TaskStatus, JsonMapping | None, str | None]: return (TaskStatus.COMPLETE, None, None) @override_config({"run_background_tasks_on": "worker1"}) diff --git a/tests/utils.py b/tests/utils.py index b3d59a0ebe..4052c9a4fb 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -25,9 +25,7 @@ import signal from types import FrameType, TracebackType from typing import ( Literal, - Optional, TypeVar, - Union, overload, ) @@ -141,7 +139,7 @@ def default_config(server_name: str, parse: Literal[True]) -> HomeServerConfig: def default_config( server_name: str, parse: bool = False -) -> Union[dict[str, object], HomeServerConfig]: +) -> dict[str, object] | HomeServerConfig: """ Create a reasonable test config. @@ -320,13 +318,13 @@ class test_timeout: ``` """ - def __init__(self, seconds: int, error_message: Optional[str] = None) -> None: + def __init__(self, seconds: int, error_message: str | None = None) -> None: self.error_message = f"Test timed out after {seconds}s" if error_message is not None: self.error_message += f": {error_message}" self.seconds = seconds - def handle_timeout(self, signum: int, frame: Optional[FrameType]) -> None: + def handle_timeout(self, signum: int, frame: FrameType | None) -> None: raise TestTimeout(self.error_message) def __enter__(self) -> None: @@ -335,8 +333,8 @@ class test_timeout: def __exit__( self, - exc_type: Optional[type[BaseException]], - exc_val: Optional[BaseException], - exc_tb: Optional[TracebackType], + exc_type: type[BaseException] | None, + exc_val: BaseException | None, + exc_tb: TracebackType | None, ) -> None: signal.alarm(0) From 18f1d28a498c8ac18a7f8b3fdb778af2f0954fc4 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Fri, 7 Nov 2025 10:41:05 +0000 Subject: [PATCH 137/149] 1.142.0rc1 regression fix: Allow coercing a `str` to a `FilePath` in `MasConfigModel` (#19144) --- changelog.d/19144.bugfix | 1 + synapse/config/mas.py | 3 ++- synapse/util/pydantic_models.py | 7 +++++++ 3 files changed, 10 insertions(+), 1 deletion(-) create mode 100644 changelog.d/19144.bugfix diff --git a/changelog.d/19144.bugfix b/changelog.d/19144.bugfix new file mode 100644 index 0000000000..3efec8080b --- /dev/null +++ b/changelog.d/19144.bugfix @@ -0,0 +1 @@ +Fix a bug introduced in 1.142.0rc1 where any attempt to configure `matrix_authentication_service.secret_path` would prevent the homeserver from starting up. \ No newline at end of file diff --git a/synapse/config/mas.py b/synapse/config/mas.py index 53cf500e95..c3e2630f2c 100644 --- a/synapse/config/mas.py +++ b/synapse/config/mas.py @@ -37,7 +37,8 @@ class MasConfigModel(ParseModel): enabled: StrictBool = False endpoint: AnyHttpUrl = AnyHttpUrl("http://localhost:8080") secret: Optional[StrictStr] = Field(default=None) - secret_path: Optional[FilePath] = Field(default=None) + # We set `strict=False` to allow `str` instances. + secret_path: Optional[FilePath] = Field(default=None, strict=False) @model_validator(mode="after") def verify_secret(self) -> Self: diff --git a/synapse/util/pydantic_models.py b/synapse/util/pydantic_models.py index e1e2d8b99f..506063d1a1 100644 --- a/synapse/util/pydantic_models.py +++ b/synapse/util/pydantic_models.py @@ -30,6 +30,13 @@ class ParseModel(BaseModel): but otherwise uses Pydantic's default behaviour. + Strict mode can adversely affect some types of fields, and should be disabled + for a field if: + + - the field's type is a `Path` or `FilePath`. Strict mode will refuse to + coerce from `str` (likely what the yaml parser will produce) to `FilePath`, + raising a `ValidationError`. + For now, ignore unknown fields. In the future, we could change this so that unknown config values cause a ValidationError, provided the error messages are meaningful to server operators. From 5d4a73149919b52a6f286e8964121ccb2467620c Mon Sep 17 00:00:00 2001 From: Andrew Morgan Date: Fri, 7 Nov 2025 10:54:55 +0000 Subject: [PATCH 138/149] 1.142.0rc4 --- CHANGES.md | 9 +++++++++ changelog.d/19144.bugfix | 1 - debian/changelog | 6 ++++++ pyproject.toml | 2 +- 4 files changed, 16 insertions(+), 2 deletions(-) delete mode 100644 changelog.d/19144.bugfix diff --git a/CHANGES.md b/CHANGES.md index ab9b72e2a8..7bb0ece7e7 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,12 @@ +# Synapse 1.142.0rc4 (2025-11-07) + +## Bugfixes + +- Fix a bug introduced in 1.142.0rc1 where any attempt to configure `matrix_authentication_service.secret_path` would prevent the homeserver from starting up. ([\#19144](https://github.com/element-hq/synapse/issues/19144)) + + + + # Synapse 1.142.0rc3 (2025-11-04) ## Dropped support for Python 3.9 diff --git a/changelog.d/19144.bugfix b/changelog.d/19144.bugfix deleted file mode 100644 index 3efec8080b..0000000000 --- a/changelog.d/19144.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug introduced in 1.142.0rc1 where any attempt to configure `matrix_authentication_service.secret_path` would prevent the homeserver from starting up. \ No newline at end of file diff --git a/debian/changelog b/debian/changelog index 0dae012858..6531a59569 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.142.0~rc4) stable; urgency=medium + + * New Synapse release 1.142.0rc4. + + -- Synapse Packaging team Fri, 07 Nov 2025 10:54:42 +0000 + matrix-synapse-py3 (1.142.0~rc3) stable; urgency=medium * New Synapse release 1.142.0rc3. diff --git a/pyproject.toml b/pyproject.toml index 991cb3e7f3..f81c1e6baf 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -107,7 +107,7 @@ module-name = "synapse.synapse_rust" [tool.poetry] name = "matrix-synapse" -version = "1.142.0rc3" +version = "1.142.0rc4" description = "Homeserver for the Matrix decentralised comms protocol" authors = ["Matrix.org Team and Contributors "] license = "AGPL-3.0-or-later OR LicenseRef-Element-Commercial" From 72073d82ae339700a07486d32f2d2c29c67cce4c Mon Sep 17 00:00:00 2001 From: Andrew Morgan Date: Fri, 7 Nov 2025 11:20:20 +0000 Subject: [PATCH 139/149] Move important messages to the top of the changelog --- CHANGES.md | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index 7bb0ece7e7..8fbd50f20e 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,14 +1,5 @@ # Synapse 1.142.0rc4 (2025-11-07) -## Bugfixes - -- Fix a bug introduced in 1.142.0rc1 where any attempt to configure `matrix_authentication_service.secret_path` would prevent the homeserver from starting up. ([\#19144](https://github.com/element-hq/synapse/issues/19144)) - - - - -# Synapse 1.142.0rc3 (2025-11-04) - ## Dropped support for Python 3.9 This release drops support for Python 3.9, in line with our [dependency @@ -38,6 +29,14 @@ of these wheels downstream, please reach out to us in [#synapse-dev:matrix.org](https://matrix.to/#/#synapse-dev:matrix.org). We'd love to hear from you! +## Bugfixes + +- Fix a bug introduced in 1.142.0rc1 where any attempt to configure `matrix_authentication_service.secret_path` would prevent the homeserver from starting up. ([\#19144](https://github.com/element-hq/synapse/issues/19144)) + + + + +# Synapse 1.142.0rc3 (2025-11-04) ## Internal Changes From dedd6e35e6f7d99785ecb5f1e844f69ee4ca3473 Mon Sep 17 00:00:00 2001 From: Devon Hudson Date: Sat, 8 Nov 2025 09:12:37 -0700 Subject: [PATCH 140/149] Rejig thread updates to use room lists --- synapse/handlers/sliding_sync/__init__.py | 1 + synapse/handlers/sliding_sync/extensions.py | 157 ++++++++++++++++-- synapse/storage/databases/main/relations.py | 145 ++++++---------- .../sliding_sync/test_extension_threads.py | 71 ++++++++ 4 files changed, 262 insertions(+), 112 deletions(-) diff --git a/synapse/handlers/sliding_sync/__init__.py b/synapse/handlers/sliding_sync/__init__.py index 255a041d0e..c15c9e1de6 100644 --- a/synapse/handlers/sliding_sync/__init__.py +++ b/synapse/handlers/sliding_sync/__init__.py @@ -305,6 +305,7 @@ class SlidingSyncHandler: # account data, read receipts, typing indicators, to-device messages, etc). actual_room_ids=set(relevant_room_map.keys()), actual_room_response_map=rooms, + room_membership_for_user_at_to_token_map=room_membership_for_user_map, from_token=from_token, to_token=to_token, ) diff --git a/synapse/handlers/sliding_sync/extensions.py b/synapse/handlers/sliding_sync/extensions.py index 66663c5e68..44a1cf7058 100644 --- a/synapse/handlers/sliding_sync/extensions.py +++ b/synapse/handlers/sliding_sync/extensions.py @@ -12,6 +12,7 @@ # . # +from collections import defaultdict import itertools import logging from typing import ( @@ -19,6 +20,7 @@ from typing import ( AbstractSet, ChainMap, Dict, + List, Mapping, MutableMapping, Optional, @@ -37,14 +39,18 @@ from synapse.api.constants import ( RelationTypes, ) from synapse.handlers.receipts import ReceiptEventSource +from synapse.handlers.sliding_sync.room_lists import RoomsForUserType from synapse.logging.opentracing import trace from synapse.storage.databases.main.receipts import ReceiptInRoom +from synapse.storage.databases.main.relations import ThreadUpdateInfo from synapse.types import ( DeviceListUpdates, JsonMapping, MultiWriterStreamToken, + RoomStreamToken, SlidingSyncStreamToken, StrCollection, + StreamKeyType, StreamToken, ThreadSubscriptionsToken, ) @@ -60,6 +66,7 @@ from synapse.util.async_helpers import ( concurrently_execute, gather_optional_coroutines, ) +from synapse.visibility import filter_events_for_client _ThreadSubscription: TypeAlias = ( SlidingSyncResult.Extensions.ThreadSubscriptionsExtension.ThreadSubscription @@ -84,6 +91,7 @@ class SlidingSyncExtensionHandler: self.device_handler = hs.get_device_handler() self.push_rules_handler = hs.get_push_rules_handler() self.relations_handler = hs.get_relations_handler() + self._storage_controllers = hs.get_storage_controllers() self._enable_thread_subscriptions = hs.config.experimental.msc4306_enabled self._enable_threads_ext = hs.config.experimental.msc4360_enabled @@ -96,6 +104,7 @@ class SlidingSyncExtensionHandler: actual_lists: Mapping[str, SlidingSyncResult.SlidingWindowList], actual_room_ids: Set[str], actual_room_response_map: Mapping[str, SlidingSyncResult.RoomResult], + room_membership_for_user_at_to_token_map: Mapping[str, RoomsForUserType], to_token: StreamToken, from_token: Optional[SlidingSyncStreamToken], ) -> SlidingSyncResult.Extensions: @@ -111,6 +120,8 @@ class SlidingSyncExtensionHandler: actual_room_ids: The actual room IDs in the the Sliding Sync response. actual_room_response_map: A map of room ID to room results in the the Sliding Sync response. + room_membership_for_user_at_to_token_map: A map of room ID to the membership + information for the user in the room at the time of `to_token`. to_token: The latest point in the stream to sync up to. from_token: The point in the stream to sync from. """ @@ -191,7 +202,9 @@ class SlidingSyncExtensionHandler: threads_coro = self.get_threads_extension_response( sync_config=sync_config, threads_request=sync_config.extensions.threads, + actual_room_ids=actual_room_ids, actual_room_response_map=actual_room_response_map, + room_membership_for_user_at_to_token_map=room_membership_for_user_at_to_token_map, to_token=to_token, from_token=from_token, ) @@ -997,7 +1010,9 @@ class SlidingSyncExtensionHandler: self, sync_config: SlidingSyncConfig, threads_request: SlidingSyncConfig.Extensions.ThreadsExtension, + actual_room_ids: Set[str], actual_room_response_map: Mapping[str, SlidingSyncResult.RoomResult], + room_membership_for_user_at_to_token_map: Mapping[str, RoomsForUserType], to_token: StreamToken, from_token: Optional[SlidingSyncStreamToken], ) -> Optional[SlidingSyncResult.Extensions.ThreadsExtension]: @@ -1009,6 +1024,8 @@ class SlidingSyncExtensionHandler: actual_room_response_map: A map of room ID to room results in the sliding sync response. Used to determine which threads already have events in the room timeline. + room_membership_for_user_at_to_token_map: A map of room ID to the membership + information for the user in the room at the time of `to_token`. to_token: The point in the stream to sync up to. from_token: The point in the stream to sync from. @@ -1018,23 +1035,113 @@ class SlidingSyncExtensionHandler: if not threads_request.enabled: return None + + # if ( + # # No timeline for invite/knock rooms + # room_membership_for_user_at_to_token.membership + # not in (Membership.INVITE, Membership.KNOCK) + # ): + # limited = False + # # We want to start off using the `to_token` (vs `from_token`) because we look + # # backwards from the `to_token` up to the `timeline_limit` and we might not + # # reach the `from_token` before we hit the limit. We will update the room stream + # # position once we've fetched the events to point to the earliest event fetched. + # prev_batch_token = to_token + # + # # We're going to paginate backwards from the `to_token` + # to_bound = to_token.room_key + # # People shouldn't see past their leave/ban event + # if room_membership_for_user_at_to_token.membership in ( + # Membership.LEAVE, + # Membership.BAN, + # ): + # to_bound = room_membership_for_user_at_to_token.event_pos.to_room_stream_token() + # + # # For initial `/sync` (and other historical scenarios mentioned above), we + # # want to view a historical section of the timeline; to fetch events by + # # `topological_ordering` (best representation of the room DAG as others were + # # seeing it at the time). This also aligns with the order that `/messages` + # # returns events in. + # # + # # For incremental `/sync`, we want to get all updates for rooms since + # # the last `/sync` (regardless if those updates arrived late or happened + # # a while ago in the past); to fetch events by `stream_ordering` (in the + # # order they were received by the server). + # # + # # Relevant spec issue: https://github.com/matrix-org/matrix-spec/issues/1917 + # # + # # FIXME: Using workaround for mypy, + # # https://github.com/python/mypy/issues/10740#issuecomment-1997047277 and + # # https://github.com/python/mypy/issues/17479 + # paginate_room_events_by_topological_ordering: PaginateFunction = ( + # self.store.paginate_room_events_by_topological_ordering + # ) + # paginate_room_events_by_stream_ordering: PaginateFunction = ( + # self.store.paginate_room_events_by_stream_ordering + # ) + # pagination_method: PaginateFunction = ( + # # Use `topographical_ordering` for historical events + # paginate_room_events_by_topological_ordering + # if timeline_from_bound is None + # # Use `stream_ordering` for updates + # else paginate_room_events_by_stream_ordering + # ) + # timeline_events, new_room_key, limited = await pagination_method( + # room_id=room_id, + # # The bounds are reversed so we can paginate backwards + # # (from newer to older events) starting at to_bound. + # # This ensures we fill the `limit` with the newest events first, + # from_key=to_bound, + # to_key=timeline_from_bound, + # direction=Direction.BACKWARDS, + # limit=room_sync_config.timeline_limit, + # ) + # + # # Make sure we don't expose any events that the client shouldn't see + # timeline_events = await filter_events_for_client( + # self.storage_controllers, + # user.to_string(), + # timeline_events, + # is_peeking=room_membership_for_user_at_to_token.membership + # != Membership.JOIN, + # filter_send_to_client=True, + # ) + + # TODO: Improve by doing subqueries for rooms where user membership is changed + # Fetch thread updates globally across all joined rooms. # The database layer returns a StreamToken (exclusive) for prev_batch if there # are more results. ( all_thread_updates, prev_batch_token, - ) = await self.store.get_thread_updates_for_user( - user_id=sync_config.user.to_string(), + ) = await self.store.get_thread_updates_for_rooms( + room_ids=actual_room_ids, from_token=from_token.stream_token.room_key if from_token else None, to_token=to_token.room_key, limit=threads_request.limit, - include_thread_roots=threads_request.include_roots, ) if len(all_thread_updates) == 0: return None + all_event_ids = {update.event_id for updates in all_thread_updates.values() for update in updates} + all_events = await self.store.get_events_as_list(all_event_ids) + filtered_events = await filter_events_for_client(self._storage_controllers, sync_config.user.to_string(), all_events) + + filtered_updates: Dict[str, List[ThreadUpdateInfo]] = defaultdict(list) + for thread_id, updates in all_thread_updates.items(): + for update in updates: + for ev in filtered_events: + if update.event_id == ev.event_id: + filtered_updates[thread_id].append(update) + + # Optionally fetch thread root events + thread_root_event_map = {} + if threads_request.include_roots: + thread_root_events = await self.store.get_events_as_list(filtered_updates.keys()) + thread_root_event_map = {e.event_id: e for e in thread_root_events} + # Identify which threads already have events in the room timelines. # If include_roots=False, we'll omit these threads from the extension response # since the client already sees the thread activity in the timeline. @@ -1061,12 +1168,11 @@ class SlidingSyncExtensionHandler: # Collect thread root events and get bundled aggregations. # Only fetch bundled aggregations if we have thread root events to attach them to. thread_root_events = [ - update.thread_root_event - for update in all_thread_updates + root_event + for root_event in thread_root_event_map.values() # Don't fetch bundled aggregations for threads with events already in the # timeline response since they will get filtered out later anyway. - if update.thread_root_event - and update.thread_root_event.event_id not in threads_in_timeline + if root_event.event_id not in threads_in_timeline ] aggregations_map = {} if thread_root_events: @@ -1076,23 +1182,38 @@ class SlidingSyncExtensionHandler: ) thread_updates: Dict[str, Dict[str, _ThreadUpdate]] = {} - for update in all_thread_updates: + for thread_root, updates in filtered_updates.items(): # Skip this thread if it already has events in the room timeline # (unless include_roots=True, in which case we always include it) - if update.thread_id in threads_in_timeline: + if thread_root in threads_in_timeline: continue - # Only look up bundled aggregations if we have a thread root event - bundled_aggs = ( - aggregations_map.get(update.thread_id) - if update.thread_root_event - else None - ) + # We only care about the latest update for the thread. + # Since values were obtained from the db in DESC order, the first event in + # the list will be the latest event. + update = updates[0] - thread_updates.setdefault(update.room_id, {})[update.thread_id] = ( + # # Generate prev_batch token if this thread has more than one update. + per_thread_prev_batch = None + if len(updates) > 1: + # Create a token pointing to one position before the latest event's + # stream position. + # This makes it exclusive - /relations with dir=b won't return the + # latest event again. + # Use StreamToken.START as base (all other streams at 0) since only room + # position matters. + per_thread_prev_batch = StreamToken.START.copy_and_replace( + StreamKeyType.ROOM, RoomStreamToken(stream=update.stream_ordering - 1) + ) + + # Only look up bundled aggregations if we have a thread root event + bundled_aggs = (aggregations_map.get(thread_root)) + thread_root_event = thread_root_event_map.get(thread_root) + + thread_updates.setdefault(update.room_id, {})[thread_root] = ( _ThreadUpdate( - thread_root=update.thread_root_event, - prev_batch=update.prev_batch, + thread_root=thread_root_event, + prev_batch=per_thread_prev_batch, bundled_aggregations=bundled_aggs, ) ) diff --git a/synapse/storage/databases/main/relations.py b/synapse/storage/databases/main/relations.py index 5d412d38b3..96be8e7ffc 100644 --- a/synapse/storage/databases/main/relations.py +++ b/synapse/storage/databases/main/relations.py @@ -18,6 +18,7 @@ # # +from collections import defaultdict import logging from typing import ( TYPE_CHECKING, @@ -37,7 +38,7 @@ from typing import ( import attr -from synapse.api.constants import MAIN_TIMELINE, Direction, Membership, RelationTypes +from synapse.api.constants import MAIN_TIMELINE, Direction, RelationTypes from synapse.api.errors import SynapseError from synapse.events import EventBase from synapse.storage._base import SQLBaseStore @@ -117,10 +118,9 @@ class ThreadUpdateInfo: re-receiving the latest event that was already included in the sliding sync response. """ - thread_id: str + event_id: str room_id: str - thread_root_event: Optional[EventBase] - prev_batch: Optional[StreamToken] + stream_ordering: int class RelationsWorkerStore(EventsWorkerStore, SQLBaseStore): @@ -1150,15 +1150,14 @@ class RelationsWorkerStore(EventsWorkerStore, SQLBaseStore): "get_related_thread_id", _get_related_thread_id ) - async def get_thread_updates_for_user( + async def get_thread_updates_for_rooms( self, *, - user_id: str, + room_ids: Collection[str], from_token: Optional[RoomStreamToken] = None, to_token: Optional[RoomStreamToken] = None, limit: int = 5, - include_thread_roots: bool = False, - ) -> Tuple[Sequence[ThreadUpdateInfo], Optional[StreamToken]]: + ) -> Tuple[Dict[str, List[ThreadUpdateInfo]], Optional[StreamToken]]: """Get a list of updated threads, ordered by stream ordering of their latest reply, filtered to only include threads in rooms where the user is currently joined. @@ -1184,74 +1183,53 @@ class RelationsWorkerStore(EventsWorkerStore, SQLBaseStore): # Ensure bad limits aren't being passed in. assert limit > 0 - # Generate the pagination clause, if necessary. - # - # Find any threads where the latest reply is between the stream ordering bounds. - pagination_clause = "" - pagination_args: List[str] = [] - if from_token: - from_bound = from_token.stream - pagination_clause += " AND stream_ordering > ?" - pagination_args.append(str(from_bound)) - - if to_token: - to_bound = to_token.stream - pagination_clause += " AND stream_ordering <= ?" - pagination_args.append(str(to_bound)) - - # Build the update count clause - count events in the thread within the sync window - update_count_clause = "" - update_count_args: List[str] = [] - update_count_clause = f""" - (SELECT COUNT(*) - FROM event_relations AS er - INNER JOIN events AS e ON er.event_id = e.event_id - WHERE er.relates_to_id = threads.thread_id - AND er.relation_type = '{RelationTypes.THREAD}'""" - if from_token: - update_count_clause += " AND e.stream_ordering > ?" - update_count_args.append(str(from_token.stream)) - if to_token: - update_count_clause += " AND e.stream_ordering <= ?" - update_count_args.append(str(to_token.stream)) - update_count_clause += ")" - - # Filter threads to only those in rooms where the user is currently joined. - sql = f""" - SELECT thread_id, room_id, stream_ordering, {update_count_clause} AS update_count - FROM threads - WHERE EXISTS ( - SELECT 1 - FROM local_current_membership AS lcm - WHERE lcm.room_id = threads.room_id - AND lcm.user_id = ? - AND lcm.membership = ? - ) - {pagination_clause} - ORDER BY stream_ordering DESC - LIMIT ? - """ + if len(room_ids) == 0: + return ({}), None def _get_thread_updates_for_user_txn( txn: LoggingTransaction, - ) -> Tuple[List[Tuple[str, str, int, int]], Optional[int]]: - # Add 1 to the limit as a free way of determining if there are more results - # than the limit amount. If `limit + 1` results are returned, then there are - # more results. Otherwise we would need to do a separate query to determine - # if this was true when exactly `limit` results are returned. + ) -> Tuple[List[Tuple[str, str, str, int]], Optional[int]]: + room_clause, room_id_values = make_in_list_sql_clause(txn.database_engine, "e.room_id", room_ids) + + # Generate the pagination clause, if necessary. + pagination_clause = "" + pagination_args: List[str] = [] + if from_token: + from_bound = from_token.stream + pagination_clause += " AND stream_ordering > ?" + pagination_args.append(str(from_bound)) + + if to_token: + to_bound = to_token.stream + pagination_clause += " AND stream_ordering <= ?" + pagination_args.append(str(to_bound)) + + # TODO: improve the fact that multiple hits for the same thread means we + # won't get as many updates for the sss response + + # Find any thread events between the stream ordering bounds. + sql = f""" + SELECT e.event_id, er.relates_to_id, e.room_id, e.stream_ordering + FROM event_relations AS er + INNER JOIN events AS e ON er.event_id = e.event_id + WHERE er.relation_type = '{RelationTypes.THREAD}' + AND {room_clause} + {pagination_clause} + ORDER BY stream_ordering DESC + LIMIT ? + """ + txn.execute( sql, ( - *update_count_args, - user_id, - Membership.JOIN, + *room_id_values, *pagination_args, - limit + 1, + limit, ), ) - # SQL returns: thread_id, room_id, stream_ordering, update_count - rows = cast(List[Tuple[str, str, int, int]], txn.fetchall()) + # SQL returns: event_id, thread_id, room_id, stream_ordering + rows = cast(List[Tuple[str, str, str, int]], txn.fetchall()) # If there are more events, generate the next pagination key from the # last thread which will be returned. @@ -1261,7 +1239,7 @@ class RelationsWorkerStore(EventsWorkerStore, SQLBaseStore): # that will be the last row we return from this function. # This works as an exclusive bound that can be backpaginated from. # Use the stream_ordering field (index 2 in original rows) - next_token = rows[-2][2] + next_token = rows[-2][3] return rows[:limit], next_token @@ -1280,35 +1258,14 @@ class RelationsWorkerStore(EventsWorkerStore, SQLBaseStore): StreamKeyType.ROOM, RoomStreamToken(stream=next_token_int - 1) ) - # Optionally fetch thread root events - event_map = {} - if include_thread_roots and thread_infos: - thread_root_ids = [thread_id for thread_id, _, _, _ in thread_infos] - thread_root_events = await self.get_events_as_list(thread_root_ids) - event_map = {e.event_id: e for e in thread_root_events} - - # Build ThreadUpdateInfo objects with per-thread prev_batch tokens. - thread_update_infos = [] - for thread_id, room_id, stream_ordering, update_count in thread_infos: - # Generate prev_batch token if this thread has more than one update. - per_thread_prev_batch = None - if update_count > 1: - # Create a token pointing to one position before the latest event's - # stream position. - # This makes it exclusive - /relations with dir=b won't return the - # latest event again. - # Use StreamToken.START as base (all other streams at 0) since only room - # position matters. - per_thread_prev_batch = StreamToken.START.copy_and_replace( - StreamKeyType.ROOM, RoomStreamToken(stream=stream_ordering - 1) - ) - - thread_update_infos.append( + # Build ThreadUpdateInfo objects. + thread_update_infos: Dict[str, List[ThreadUpdateInfo]] = defaultdict(list) + for event_id, thread_id, room_id, stream_ordering in thread_infos: + thread_update_infos[thread_id].append( ThreadUpdateInfo( - thread_id=thread_id, + event_id=event_id, room_id=room_id, - thread_root_event=event_map.get(thread_id), - prev_batch=per_thread_prev_batch, + stream_ordering=stream_ordering, ) ) diff --git a/tests/rest/client/sliding_sync/test_extension_threads.py b/tests/rest/client/sliding_sync/test_extension_threads.py index 3855b03672..4a10313a93 100644 --- a/tests/rest/client/sliding_sync/test_extension_threads.py +++ b/tests/rest/client/sliding_sync/test_extension_threads.py @@ -134,6 +134,13 @@ class SlidingSyncThreadsExtensionTestCase(SlidingSyncBase): # base = self.store.get_max_thread_subscriptions_stream_id() sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 1]], + "required_state": [], + "timeline_limit": 0, # Set to 0, otherwise events will be in timeline, not extension + } + }, "extensions": { EXT_NAME: { "enabled": True, @@ -150,6 +157,7 @@ class SlidingSyncThreadsExtensionTestCase(SlidingSyncBase): {"updates": {room_id: {thread_root_id: {}}}}, ) + def test_threads_incremental_sync(self) -> None: """ Test new thread updates appear in incremental sync response. @@ -158,6 +166,13 @@ class SlidingSyncThreadsExtensionTestCase(SlidingSyncBase): user1_tok = self.login(user1_id, "pass") room_id = self.helper.create_room_as(user1_id, tok=user1_tok) sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 1]], + "required_state": [], + "timeline_limit": 0, + } + }, "extensions": { EXT_NAME: { "enabled": True, @@ -256,6 +271,13 @@ class SlidingSyncThreadsExtensionTestCase(SlidingSyncBase): # User2 syncs with threads extension enabled sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 1]], + "required_state": [], + "timeline_limit": 0, + } + }, "extensions": { EXT_NAME: { "enabled": True, @@ -296,6 +318,13 @@ class SlidingSyncThreadsExtensionTestCase(SlidingSyncBase): # Initial sync for user2 sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 1]], + "required_state": [], + "timeline_limit": 0, + } + }, "extensions": { EXT_NAME: { "enabled": True, @@ -380,6 +409,13 @@ class SlidingSyncThreadsExtensionTestCase(SlidingSyncBase): # Sync with include_roots=True sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 1]], + "required_state": [], + "timeline_limit": 0, + } + }, "extensions": { EXT_NAME: { "enabled": True, @@ -434,6 +470,13 @@ class SlidingSyncThreadsExtensionTestCase(SlidingSyncBase): # Sync with include_roots=False (explicitly) sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 1]], + "required_state": [], + "timeline_limit": 0, + } + }, "extensions": { EXT_NAME: { "enabled": True, @@ -451,6 +494,13 @@ class SlidingSyncThreadsExtensionTestCase(SlidingSyncBase): # Also test with include_roots omitted (should behave the same) sync_body_no_param = { + "lists": { + "foo-list": { + "ranges": [[0, 1]], + "required_state": [], + "timeline_limit": 0, + } + }, "extensions": { EXT_NAME: { "enabled": True, @@ -478,6 +528,13 @@ class SlidingSyncThreadsExtensionTestCase(SlidingSyncBase): # Initial sync to establish baseline sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 1]], + "required_state": [], + "timeline_limit": 0, + } + }, "extensions": { EXT_NAME: { "enabled": True, @@ -529,6 +586,13 @@ class SlidingSyncThreadsExtensionTestCase(SlidingSyncBase): # Initial sync to establish baseline sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 1]], + "required_state": [], + "timeline_limit": 0, + } + }, "extensions": { EXT_NAME: { "enabled": True, @@ -668,6 +732,13 @@ class SlidingSyncThreadsExtensionTestCase(SlidingSyncBase): # Initial sync (no from_token) sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 1]], + "required_state": [], + "timeline_limit": 0, + } + }, "extensions": { EXT_NAME: { "enabled": True, From 46e3f6756c88a422db2388f4f66bd75d21170539 Mon Sep 17 00:00:00 2001 From: Devon Hudson Date: Sat, 8 Nov 2025 10:07:46 -0700 Subject: [PATCH 141/149] Cleanup logic --- synapse/handlers/sliding_sync/extensions.py | 179 +++++++++++--------- synapse/storage/databases/main/relations.py | 22 ++- 2 files changed, 117 insertions(+), 84 deletions(-) diff --git a/synapse/handlers/sliding_sync/extensions.py b/synapse/handlers/sliding_sync/extensions.py index 44a1cf7058..f9d2a34a31 100644 --- a/synapse/handlers/sliding_sync/extensions.py +++ b/synapse/handlers/sliding_sync/extensions.py @@ -38,6 +38,7 @@ from synapse.api.constants import ( MRelatesToFields, RelationTypes, ) +from synapse.events import EventBase from synapse.handlers.receipts import ReceiptEventSource from synapse.handlers.sliding_sync.room_lists import RoomsForUserType from synapse.logging.opentracing import trace @@ -1006,6 +1007,44 @@ class SlidingSyncExtensionHandler: prev_batch=prev_batch, ) + def _extract_thread_id_from_event( + self, event: EventBase + ) -> Optional[str]: + """Extract thread ID from event if it's a thread reply. + + Args: + event: The event to check. + + Returns: + The thread ID if the event is a thread reply, None otherwise. + """ + relates_to = event.content.get(EventContentFields.RELATIONS) + if isinstance(relates_to, dict): + if relates_to.get(MRelatesToFields.REL_TYPE) == RelationTypes.THREAD: + return relates_to.get(MRelatesToFields.EVENT_ID) + return None + + def _find_threads_in_timeline( + self, + actual_room_response_map: Mapping[str, SlidingSyncResult.RoomResult], + ) -> Set[str]: + """Find all thread IDs that have events in room timelines. + + Args: + actual_room_response_map: A map of room ID to room results. + + Returns: + A set of thread IDs (thread root event IDs) that appear in timelines. + """ + threads_in_timeline: Set[str] = set() + for room_result in actual_room_response_map.values(): + if room_result.timeline_events: + for event in room_result.timeline_events: + thread_id = self._extract_thread_id_from_event(event) + if thread_id: + threads_in_timeline.add(thread_id) + return threads_in_timeline + async def get_threads_extension_response( self, sync_config: SlidingSyncConfig, @@ -1109,9 +1148,18 @@ class SlidingSyncExtensionHandler: # TODO: Improve by doing subqueries for rooms where user membership is changed + # Identify which threads already have events in the room timelines. + # If include_roots=False, we'll exclude these threads from the DB query + # since the client already sees the thread activity in the timeline. + # If include_roots=True, we fetch all threads regardless, because the client + # wants the thread root events. + threads_to_exclude: Optional[Set[str]] = None + if not threads_request.include_roots: + threads_to_exclude = self._find_threads_in_timeline(actual_room_response_map) + # Fetch thread updates globally across all joined rooms. - # The database layer returns a StreamToken (exclusive) for prev_batch if there - # are more results. + # The database layer filters out excluded threads and returns a StreamToken + # (exclusive) for prev_batch if there are more results. ( all_thread_updates, prev_batch_token, @@ -1120,108 +1168,81 @@ class SlidingSyncExtensionHandler: from_token=from_token.stream_token.room_key if from_token else None, to_token=to_token.room_key, limit=threads_request.limit, + exclude_thread_ids=threads_to_exclude, ) + # Early return: no thread updates found if len(all_thread_updates) == 0: return None - all_event_ids = {update.event_id for updates in all_thread_updates.values() for update in updates} - all_events = await self.store.get_events_as_list(all_event_ids) - filtered_events = await filter_events_for_client(self._storage_controllers, sync_config.user.to_string(), all_events) - - filtered_updates: Dict[str, List[ThreadUpdateInfo]] = defaultdict(list) + # Build a mapping of event_id -> (thread_id, update) for efficient lookup + # during visibility filtering. + event_to_thread_map: Dict[str, tuple[str, ThreadUpdateInfo]] = {} for thread_id, updates in all_thread_updates.items(): for update in updates: - for ev in filtered_events: - if update.event_id == ev.event_id: - filtered_updates[thread_id].append(update) + event_to_thread_map[update.event_id] = (thread_id, update) - # Optionally fetch thread root events + # Fetch and filter events for visibility + all_events = await self.store.get_events_as_list(event_to_thread_map.keys()) + filtered_events = await filter_events_for_client( + self._storage_controllers, sync_config.user.to_string(), all_events + ) + + # Rebuild thread updates from filtered events + filtered_updates: Dict[str, List[ThreadUpdateInfo]] = defaultdict(list) + for event in filtered_events: + if event.event_id in event_to_thread_map: + thread_id, update = event_to_thread_map[event.event_id] + filtered_updates[thread_id].append(update) + + # Early return: no visible thread updates after filtering + if not filtered_updates: + return None + + # Sort updates for each thread by stream_ordering DESC to ensure updates[0] is the latest. + # This is critical because the prev_batch token generation below assumes DESC order. + for updates in filtered_updates.values(): + updates.sort(key=lambda u: u.stream_ordering, reverse=True) + + # Optionally fetch thread root events and their bundled aggregations thread_root_event_map = {} + aggregations_map = {} if threads_request.include_roots: thread_root_events = await self.store.get_events_as_list(filtered_updates.keys()) thread_root_event_map = {e.event_id: e for e in thread_root_events} - # Identify which threads already have events in the room timelines. - # If include_roots=False, we'll omit these threads from the extension response - # since the client already sees the thread activity in the timeline. - # If include_roots=True, we include all threads regardless, because the client - # wants the thread root events. - threads_in_timeline: Set[str] = set() # thread_id - if not threads_request.include_roots: - for _, room_result in actual_room_response_map.items(): - if room_result.timeline_events: - for event in room_result.timeline_events: - # Check if this event is part of a thread - relates_to = event.content.get(EventContentFields.RELATIONS) - if not isinstance(relates_to, dict): - continue - - rel_type = relates_to.get(MRelatesToFields.REL_TYPE) - - # If this is a thread reply, track the thread - if rel_type == RelationTypes.THREAD: - thread_id = relates_to.get(MRelatesToFields.EVENT_ID) - if thread_id: - threads_in_timeline.add(thread_id) - - # Collect thread root events and get bundled aggregations. - # Only fetch bundled aggregations if we have thread root events to attach them to. - thread_root_events = [ - root_event - for root_event in thread_root_event_map.values() - # Don't fetch bundled aggregations for threads with events already in the - # timeline response since they will get filtered out later anyway. - if root_event.event_id not in threads_in_timeline - ] - aggregations_map = {} - if thread_root_events: - aggregations_map = await self.relations_handler.get_bundled_aggregations( - thread_root_events, - sync_config.user.to_string(), - ) + if thread_root_event_map: + aggregations_map = await self.relations_handler.get_bundled_aggregations( + thread_root_event_map.values(), + sync_config.user.to_string(), + ) thread_updates: Dict[str, Dict[str, _ThreadUpdate]] = {} for thread_root, updates in filtered_updates.items(): - # Skip this thread if it already has events in the room timeline - # (unless include_roots=True, in which case we always include it) - if thread_root in threads_in_timeline: - continue - # We only care about the latest update for the thread. - # Since values were obtained from the db in DESC order, the first event in - # the list will be the latest event. - update = updates[0] + # After sorting above, updates[0] is guaranteed to be the latest (highest stream_ordering). + latest_update = updates[0] - # # Generate prev_batch token if this thread has more than one update. + # TODO: What if we were limited in the amount of events we fetched from the + # db? Then how can we know for sure if we missed out on additional updates + # to this thread? + + # Generate per-thread prev_batch token if this thread has multiple visible updates. per_thread_prev_batch = None if len(updates) > 1: - # Create a token pointing to one position before the latest event's - # stream position. - # This makes it exclusive - /relations with dir=b won't return the - # latest event again. - # Use StreamToken.START as base (all other streams at 0) since only room - # position matters. + # Create a token pointing to one position before the latest event's stream position. + # This makes it exclusive - /relations with dir=b won't return the latest event again. + # Use StreamToken.START as base (all other streams at 0) since only room position matters. per_thread_prev_batch = StreamToken.START.copy_and_replace( - StreamKeyType.ROOM, RoomStreamToken(stream=update.stream_ordering - 1) + StreamKeyType.ROOM, RoomStreamToken(stream=latest_update.stream_ordering - 1) ) - # Only look up bundled aggregations if we have a thread root event - bundled_aggs = (aggregations_map.get(thread_root)) - thread_root_event = thread_root_event_map.get(thread_root) - - thread_updates.setdefault(update.room_id, {})[thread_root] = ( - _ThreadUpdate( - thread_root=thread_root_event, - prev_batch=per_thread_prev_batch, - bundled_aggregations=bundled_aggs, - ) + thread_updates.setdefault(latest_update.room_id, {})[thread_root] = _ThreadUpdate( + thread_root=thread_root_event_map.get(thread_root), + prev_batch=per_thread_prev_batch, + bundled_aggregations=aggregations_map.get(thread_root), ) - # If after filtering we have no thread updates, return None to omit the extension - if not thread_updates: - return None - return SlidingSyncResult.Extensions.ThreadsExtension( updates=thread_updates, prev_batch=prev_batch_token, diff --git a/synapse/storage/databases/main/relations.py b/synapse/storage/databases/main/relations.py index 96be8e7ffc..6cc4f512a4 100644 --- a/synapse/storage/databases/main/relations.py +++ b/synapse/storage/databases/main/relations.py @@ -1157,25 +1157,26 @@ class RelationsWorkerStore(EventsWorkerStore, SQLBaseStore): from_token: Optional[RoomStreamToken] = None, to_token: Optional[RoomStreamToken] = None, limit: int = 5, + exclude_thread_ids: Optional[Collection[str]] = None, ) -> Tuple[Dict[str, List[ThreadUpdateInfo]], Optional[StreamToken]]: """Get a list of updated threads, ordered by stream ordering of their latest reply, filtered to only include threads in rooms where the user is currently joined. Args: - user_id: The user ID to fetch thread updates for. Only threads in rooms - where this user is currently joined will be returned. + room_ids: The room IDs to fetch thread updates for. from_token: The lower bound (exclusive) for thread updates. If None, fetch from the start of the room timeline. to_token: The upper bound (inclusive) for thread updates. If None, fetch up to the current position in the room timeline. limit: Maximum number of thread updates to return. - include_thread_roots: If True, fetch and return the thread root EventBase - objects. If False, return None for the thread_root_event field. + exclude_thread_ids: Optional collection of thread root event IDs to exclude + from the results. Useful for filtering out threads already visible + in the room timeline. Returns: A tuple of: - A list of ThreadUpdateInfo objects containing thread update information, + A dict mapping thread_id to list of ThreadUpdateInfo objects, ordered by stream_ordering descending (most recent first). A prev_batch StreamToken (exclusive) if there are more results available, None otherwise. @@ -1204,6 +1205,15 @@ class RelationsWorkerStore(EventsWorkerStore, SQLBaseStore): pagination_clause += " AND stream_ordering <= ?" pagination_args.append(str(to_bound)) + # Generate the exclusion clause for thread IDs, if necessary. + exclusion_clause = "" + exclusion_args: List[str] = [] + if exclude_thread_ids: + exclusion_clause, exclusion_args = make_in_list_sql_clause( + txn.database_engine, "er.relates_to_id", exclude_thread_ids, negative=True, + ) + exclusion_clause = f" AND {exclusion_clause}" + # TODO: improve the fact that multiple hits for the same thread means we # won't get as many updates for the sss response @@ -1214,6 +1224,7 @@ class RelationsWorkerStore(EventsWorkerStore, SQLBaseStore): INNER JOIN events AS e ON er.event_id = e.event_id WHERE er.relation_type = '{RelationTypes.THREAD}' AND {room_clause} + {exclusion_clause} {pagination_clause} ORDER BY stream_ordering DESC LIMIT ? @@ -1223,6 +1234,7 @@ class RelationsWorkerStore(EventsWorkerStore, SQLBaseStore): sql, ( *room_id_values, + *exclusion_args, *pagination_args, limit, ), From 0c0ece96124ef7194714318c325b351636917cf2 Mon Sep 17 00:00:00 2001 From: Devon Hudson Date: Sun, 9 Nov 2025 08:29:49 -0700 Subject: [PATCH 142/149] Fix next_token logic --- synapse/handlers/sliding_sync/extensions.py | 11 ++++++----- synapse/storage/databases/main/relations.py | 6 +++++- 2 files changed, 11 insertions(+), 6 deletions(-) diff --git a/synapse/handlers/sliding_sync/extensions.py b/synapse/handlers/sliding_sync/extensions.py index f9d2a34a31..80453516b1 100644 --- a/synapse/handlers/sliding_sync/extensions.py +++ b/synapse/handlers/sliding_sync/extensions.py @@ -1223,13 +1223,14 @@ class SlidingSyncExtensionHandler: # After sorting above, updates[0] is guaranteed to be the latest (highest stream_ordering). latest_update = updates[0] - # TODO: What if we were limited in the amount of events we fetched from the - # db? Then how can we know for sure if we missed out on additional updates - # to this thread? - # Generate per-thread prev_batch token if this thread has multiple visible updates. + # When we hit the global limit, we generate prev_batch tokens for all threads, even if + # we only saw 1 update for them. This is to cover the case where we only saw + # a single update for a given thread, but the global limit prevent us from + # obtaining other updates which would have otherwise been included in the + # range. per_thread_prev_batch = None - if len(updates) > 1: + if len(updates) > 1 or prev_batch_token is not None: # Create a token pointing to one position before the latest event's stream position. # This makes it exclusive - /relations with dir=b won't return the latest event again. # Use StreamToken.START as base (all other streams at 0) since only room position matters. diff --git a/synapse/storage/databases/main/relations.py b/synapse/storage/databases/main/relations.py index 6cc4f512a4..52946cd36c 100644 --- a/synapse/storage/databases/main/relations.py +++ b/synapse/storage/databases/main/relations.py @@ -1230,13 +1230,17 @@ class RelationsWorkerStore(EventsWorkerStore, SQLBaseStore): LIMIT ? """ + # Fetch `limit + 1` rows as a way to detect if there are more results beyond + # what we're returning. If we get exactly `limit + 1` rows back, we know there + # are more results available and we can set `next_token`. We only return the + # first `limit` rows to the caller. This avoids needing a separate COUNT query. txn.execute( sql, ( *room_id_values, *exclusion_args, *pagination_args, - limit, + limit + 1, ), ) From cb82a4a687e78985fb468fb9632f6cd1e039ffb9 Mon Sep 17 00:00:00 2001 From: Devon Hudson Date: Sun, 9 Nov 2025 08:45:52 -0700 Subject: [PATCH 143/149] Handle user leave/ban rooms to prevent leaking data --- synapse/handlers/sliding_sync/extensions.py | 179 +++++++++--------- .../sliding_sync/test_extension_threads.py | 38 +++- 2 files changed, 119 insertions(+), 98 deletions(-) diff --git a/synapse/handlers/sliding_sync/extensions.py b/synapse/handlers/sliding_sync/extensions.py index 80453516b1..3bbfc95c46 100644 --- a/synapse/handlers/sliding_sync/extensions.py +++ b/synapse/handlers/sliding_sync/extensions.py @@ -35,6 +35,7 @@ from synapse.api.constants import ( AccountDataTypes, EduTypes, EventContentFields, + Membership, MRelatesToFields, RelationTypes, ) @@ -1074,80 +1075,6 @@ class SlidingSyncExtensionHandler: if not threads_request.enabled: return None - - # if ( - # # No timeline for invite/knock rooms - # room_membership_for_user_at_to_token.membership - # not in (Membership.INVITE, Membership.KNOCK) - # ): - # limited = False - # # We want to start off using the `to_token` (vs `from_token`) because we look - # # backwards from the `to_token` up to the `timeline_limit` and we might not - # # reach the `from_token` before we hit the limit. We will update the room stream - # # position once we've fetched the events to point to the earliest event fetched. - # prev_batch_token = to_token - # - # # We're going to paginate backwards from the `to_token` - # to_bound = to_token.room_key - # # People shouldn't see past their leave/ban event - # if room_membership_for_user_at_to_token.membership in ( - # Membership.LEAVE, - # Membership.BAN, - # ): - # to_bound = room_membership_for_user_at_to_token.event_pos.to_room_stream_token() - # - # # For initial `/sync` (and other historical scenarios mentioned above), we - # # want to view a historical section of the timeline; to fetch events by - # # `topological_ordering` (best representation of the room DAG as others were - # # seeing it at the time). This also aligns with the order that `/messages` - # # returns events in. - # # - # # For incremental `/sync`, we want to get all updates for rooms since - # # the last `/sync` (regardless if those updates arrived late or happened - # # a while ago in the past); to fetch events by `stream_ordering` (in the - # # order they were received by the server). - # # - # # Relevant spec issue: https://github.com/matrix-org/matrix-spec/issues/1917 - # # - # # FIXME: Using workaround for mypy, - # # https://github.com/python/mypy/issues/10740#issuecomment-1997047277 and - # # https://github.com/python/mypy/issues/17479 - # paginate_room_events_by_topological_ordering: PaginateFunction = ( - # self.store.paginate_room_events_by_topological_ordering - # ) - # paginate_room_events_by_stream_ordering: PaginateFunction = ( - # self.store.paginate_room_events_by_stream_ordering - # ) - # pagination_method: PaginateFunction = ( - # # Use `topographical_ordering` for historical events - # paginate_room_events_by_topological_ordering - # if timeline_from_bound is None - # # Use `stream_ordering` for updates - # else paginate_room_events_by_stream_ordering - # ) - # timeline_events, new_room_key, limited = await pagination_method( - # room_id=room_id, - # # The bounds are reversed so we can paginate backwards - # # (from newer to older events) starting at to_bound. - # # This ensures we fill the `limit` with the newest events first, - # from_key=to_bound, - # to_key=timeline_from_bound, - # direction=Direction.BACKWARDS, - # limit=room_sync_config.timeline_limit, - # ) - # - # # Make sure we don't expose any events that the client shouldn't see - # timeline_events = await filter_events_for_client( - # self.storage_controllers, - # user.to_string(), - # timeline_events, - # is_peeking=room_membership_for_user_at_to_token.membership - # != Membership.JOIN, - # filter_send_to_client=True, - # ) - - # TODO: Improve by doing subqueries for rooms where user membership is changed - # Identify which threads already have events in the room timelines. # If include_roots=False, we'll exclude these threads from the DB query # since the client already sees the thread activity in the timeline. @@ -1157,19 +1084,95 @@ class SlidingSyncExtensionHandler: if not threads_request.include_roots: threads_to_exclude = self._find_threads_in_timeline(actual_room_response_map) - # Fetch thread updates globally across all joined rooms. - # The database layer filters out excluded threads and returns a StreamToken - # (exclusive) for prev_batch if there are more results. - ( - all_thread_updates, - prev_batch_token, - ) = await self.store.get_thread_updates_for_rooms( - room_ids=actual_room_ids, - from_token=from_token.stream_token.room_key if from_token else None, - to_token=to_token.room_key, - limit=threads_request.limit, - exclude_thread_ids=threads_to_exclude, - ) + # Separate rooms into groups based on membership status. + # For LEAVE/BAN rooms, we need to bound the to_token to prevent leaking events + # that occurred after the user left/was banned. + leave_ban_rooms: Set[str] = set() + other_rooms: Set[str] = set() + + for room_id in actual_room_ids: + membership_info = room_membership_for_user_at_to_token_map.get(room_id) + if membership_info and membership_info.membership in ( + Membership.LEAVE, + Membership.BAN, + ): + leave_ban_rooms.add(room_id) + else: + other_rooms.add(room_id) + + # Fetch thread updates, handling LEAVE/BAN rooms separately to avoid data leaks. + all_thread_updates: Dict[str, List[ThreadUpdateInfo]] = {} + prev_batch_token: Optional[StreamToken] = None + remaining_limit = threads_request.limit + + # Query for rooms where the user has left or been banned, using their leave/ban + # event position as the upper bound to prevent seeing events after they left. + if leave_ban_rooms: + for room_id in leave_ban_rooms: + if remaining_limit <= 0: + # We've already fetched enough updates, but we still need to set + # prev_batch to indicate there are more results. + prev_batch_token = to_token + break + + membership_info = room_membership_for_user_at_to_token_map[room_id] + bounded_to_token = membership_info.event_pos.to_room_stream_token() + + ( + room_thread_updates, + room_prev_batch, + ) = await self.store.get_thread_updates_for_rooms( + room_ids={room_id}, + from_token=from_token.stream_token.room_key if from_token else None, + to_token=bounded_to_token, + limit=remaining_limit, + exclude_thread_ids=threads_to_exclude, + ) + + # Count how many updates we fetched and reduce the remaining limit + num_updates = sum(len(updates) for updates in room_thread_updates.values()) + remaining_limit -= num_updates + + # Merge results + for thread_id, updates in room_thread_updates.items(): + all_thread_updates.setdefault(thread_id, []).extend(updates) + + # If any room has a prev_batch, we should set the global prev_batch. + # We use the maximum (latest) prev_batch token for backwards pagination. + if room_prev_batch is not None: + if prev_batch_token is None: + prev_batch_token = room_prev_batch + else: + # Take the maximum (latest) prev_batch token for backwards pagination + if room_prev_batch.room_key.stream > prev_batch_token.room_key.stream: + prev_batch_token = room_prev_batch + + # Query for rooms where the user is joined, invited, or knocking, using the + # normal to_token as the upper bound. + if other_rooms and remaining_limit > 0: + ( + other_thread_updates, + other_prev_batch, + ) = await self.store.get_thread_updates_for_rooms( + room_ids=other_rooms, + from_token=from_token.stream_token.room_key if from_token else None, + to_token=to_token.room_key, + limit=remaining_limit, + exclude_thread_ids=threads_to_exclude, + ) + + # Merge results + for thread_id, updates in other_thread_updates.items(): + all_thread_updates.setdefault(thread_id, []).extend(updates) + + # Merge prev_batch tokens + if other_prev_batch is not None: + if prev_batch_token is None: + prev_batch_token = other_prev_batch + else: + # Take the maximum (latest) prev_batch token for backwards pagination + if other_prev_batch.room_key.stream > prev_batch_token.room_key.stream: + prev_batch_token = other_prev_batch # Early return: no thread updates found if len(all_thread_updates) == 0: @@ -1226,7 +1229,7 @@ class SlidingSyncExtensionHandler: # Generate per-thread prev_batch token if this thread has multiple visible updates. # When we hit the global limit, we generate prev_batch tokens for all threads, even if # we only saw 1 update for them. This is to cover the case where we only saw - # a single update for a given thread, but the global limit prevent us from + # a single update for a given thread, but the global limit prevents us from # obtaining other updates which would have otherwise been included in the # range. per_thread_prev_batch = None diff --git a/tests/rest/client/sliding_sync/test_extension_threads.py b/tests/rest/client/sliding_sync/test_extension_threads.py index 4a10313a93..1e84e60046 100644 --- a/tests/rest/client/sliding_sync/test_extension_threads.py +++ b/tests/rest/client/sliding_sync/test_extension_threads.py @@ -295,12 +295,10 @@ class SlidingSyncThreadsExtensionTestCase(SlidingSyncBase): def test_threads_not_returned_after_leaving_room(self) -> None: """ - Test that thread updates are not returned after a user leaves the room, - even if the thread was updated while they were joined. + Test that thread updates are properly bounded when a user leaves a room. - This tests the known limitation: if a thread has multiple updates and the - user leaves between them, they won't see any updates (even earlier ones - while joined). + Users should see thread updates that occurred up to the point they left, + but NOT updates that occurred after they left. """ user1_id = self.register_user("user1", "pass") user1_tok = self.login(user1_id, "pass") @@ -369,13 +367,33 @@ class SlidingSyncThreadsExtensionTestCase(SlidingSyncBase): # User2 incremental sync response_body, _ = self.do_sync(sync_body, tok=user2_tok, since=sync_pos) - # Assert: User2 should NOT see the thread update (they left before latest update) - # Note: This also demonstrates that only currently joined rooms are returned - user2 - # won't see the thread even though there was an update while they were joined (Reply 1) - self.assertNotIn( + # Assert: User2 SHOULD see Reply 1 (happened while joined) but NOT Reply 2 (after leaving) + self.assertIn( EXT_NAME, response_body["extensions"], - "User2 should not see thread updates after leaving the room", + "User2 should see thread updates up to the point they left", + ) + self.assertIn( + room_id, + response_body["extensions"][EXT_NAME]["updates"], + "Thread updates should include the room user2 left", + ) + self.assertIn( + thread_root, + response_body["extensions"][EXT_NAME]["updates"][room_id], + "Thread root should be in the updates", + ) + + # Verify that only a single update was seen (Reply 1) by checking that there's + # no prev_batch token. If Reply 2 was also included, there would be multiple + # updates and a prev_batch token would be present. + thread_update = response_body["extensions"][EXT_NAME]["updates"][room_id][ + thread_root + ] + self.assertNotIn( + "prev_batch", + thread_update, + "No prev_batch should be present since only one update (Reply 1) is visible", ) def test_threads_with_include_roots_true(self) -> None: From a3b34dfafdfc4f4cb8632ff4cb7aab871fab4d71 Mon Sep 17 00:00:00 2001 From: Devon Hudson Date: Sun, 9 Nov 2025 09:30:44 -0700 Subject: [PATCH 144/149] Run linter --- synapse/handlers/sliding_sync/extensions.py | 49 ++++++++++++------- synapse/storage/databases/main/relations.py | 11 +++-- .../sliding_sync/test_extension_threads.py | 3 +- 3 files changed, 41 insertions(+), 22 deletions(-) diff --git a/synapse/handlers/sliding_sync/extensions.py b/synapse/handlers/sliding_sync/extensions.py index 3bbfc95c46..de50560583 100644 --- a/synapse/handlers/sliding_sync/extensions.py +++ b/synapse/handlers/sliding_sync/extensions.py @@ -12,9 +12,9 @@ # . # -from collections import defaultdict import itertools import logging +from collections import defaultdict from typing import ( TYPE_CHECKING, AbstractSet, @@ -1008,9 +1008,7 @@ class SlidingSyncExtensionHandler: prev_batch=prev_batch, ) - def _extract_thread_id_from_event( - self, event: EventBase - ) -> Optional[str]: + def _extract_thread_id_from_event(self, event: EventBase) -> Optional[str]: """Extract thread ID from event if it's a thread reply. Args: @@ -1082,7 +1080,9 @@ class SlidingSyncExtensionHandler: # wants the thread root events. threads_to_exclude: Optional[Set[str]] = None if not threads_request.include_roots: - threads_to_exclude = self._find_threads_in_timeline(actual_room_response_map) + threads_to_exclude = self._find_threads_in_timeline( + actual_room_response_map + ) # Separate rooms into groups based on membership status. # For LEAVE/BAN rooms, we need to bound the to_token to prevent leaking events @@ -1130,7 +1130,9 @@ class SlidingSyncExtensionHandler: ) # Count how many updates we fetched and reduce the remaining limit - num_updates = sum(len(updates) for updates in room_thread_updates.values()) + num_updates = sum( + len(updates) for updates in room_thread_updates.values() + ) remaining_limit -= num_updates # Merge results @@ -1144,7 +1146,10 @@ class SlidingSyncExtensionHandler: prev_batch_token = room_prev_batch else: # Take the maximum (latest) prev_batch token for backwards pagination - if room_prev_batch.room_key.stream > prev_batch_token.room_key.stream: + if ( + room_prev_batch.room_key.stream + > prev_batch_token.room_key.stream + ): prev_batch_token = room_prev_batch # Query for rooms where the user is joined, invited, or knocking, using the @@ -1171,7 +1176,10 @@ class SlidingSyncExtensionHandler: prev_batch_token = other_prev_batch else: # Take the maximum (latest) prev_batch token for backwards pagination - if other_prev_batch.room_key.stream > prev_batch_token.room_key.stream: + if ( + other_prev_batch.room_key.stream + > prev_batch_token.room_key.stream + ): prev_batch_token = other_prev_batch # Early return: no thread updates found @@ -1211,13 +1219,17 @@ class SlidingSyncExtensionHandler: thread_root_event_map = {} aggregations_map = {} if threads_request.include_roots: - thread_root_events = await self.store.get_events_as_list(filtered_updates.keys()) + thread_root_events = await self.store.get_events_as_list( + filtered_updates.keys() + ) thread_root_event_map = {e.event_id: e for e in thread_root_events} if thread_root_event_map: - aggregations_map = await self.relations_handler.get_bundled_aggregations( - thread_root_event_map.values(), - sync_config.user.to_string(), + aggregations_map = ( + await self.relations_handler.get_bundled_aggregations( + thread_root_event_map.values(), + sync_config.user.to_string(), + ) ) thread_updates: Dict[str, Dict[str, _ThreadUpdate]] = {} @@ -1238,13 +1250,16 @@ class SlidingSyncExtensionHandler: # This makes it exclusive - /relations with dir=b won't return the latest event again. # Use StreamToken.START as base (all other streams at 0) since only room position matters. per_thread_prev_batch = StreamToken.START.copy_and_replace( - StreamKeyType.ROOM, RoomStreamToken(stream=latest_update.stream_ordering - 1) + StreamKeyType.ROOM, + RoomStreamToken(stream=latest_update.stream_ordering - 1), ) - thread_updates.setdefault(latest_update.room_id, {})[thread_root] = _ThreadUpdate( - thread_root=thread_root_event_map.get(thread_root), - prev_batch=per_thread_prev_batch, - bundled_aggregations=aggregations_map.get(thread_root), + thread_updates.setdefault(latest_update.room_id, {})[thread_root] = ( + _ThreadUpdate( + thread_root=thread_root_event_map.get(thread_root), + prev_batch=per_thread_prev_batch, + bundled_aggregations=aggregations_map.get(thread_root), + ) ) return SlidingSyncResult.Extensions.ThreadsExtension( diff --git a/synapse/storage/databases/main/relations.py b/synapse/storage/databases/main/relations.py index 52946cd36c..90c2206e60 100644 --- a/synapse/storage/databases/main/relations.py +++ b/synapse/storage/databases/main/relations.py @@ -18,8 +18,8 @@ # # -from collections import defaultdict import logging +from collections import defaultdict from typing import ( TYPE_CHECKING, Collection, @@ -1190,7 +1190,9 @@ class RelationsWorkerStore(EventsWorkerStore, SQLBaseStore): def _get_thread_updates_for_user_txn( txn: LoggingTransaction, ) -> Tuple[List[Tuple[str, str, str, int]], Optional[int]]: - room_clause, room_id_values = make_in_list_sql_clause(txn.database_engine, "e.room_id", room_ids) + room_clause, room_id_values = make_in_list_sql_clause( + txn.database_engine, "e.room_id", room_ids + ) # Generate the pagination clause, if necessary. pagination_clause = "" @@ -1210,7 +1212,10 @@ class RelationsWorkerStore(EventsWorkerStore, SQLBaseStore): exclusion_args: List[str] = [] if exclude_thread_ids: exclusion_clause, exclusion_args = make_in_list_sql_clause( - txn.database_engine, "er.relates_to_id", exclude_thread_ids, negative=True, + txn.database_engine, + "er.relates_to_id", + exclude_thread_ids, + negative=True, ) exclusion_clause = f" AND {exclusion_clause}" diff --git a/tests/rest/client/sliding_sync/test_extension_threads.py b/tests/rest/client/sliding_sync/test_extension_threads.py index 1e84e60046..fdb422a629 100644 --- a/tests/rest/client/sliding_sync/test_extension_threads.py +++ b/tests/rest/client/sliding_sync/test_extension_threads.py @@ -138,7 +138,7 @@ class SlidingSyncThreadsExtensionTestCase(SlidingSyncBase): "foo-list": { "ranges": [[0, 1]], "required_state": [], - "timeline_limit": 0, # Set to 0, otherwise events will be in timeline, not extension + "timeline_limit": 0, # Set to 0, otherwise events will be in timeline, not extension } }, "extensions": { @@ -157,7 +157,6 @@ class SlidingSyncThreadsExtensionTestCase(SlidingSyncBase): {"updates": {room_id: {thread_root_id: {}}}}, ) - def test_threads_incremental_sync(self) -> None: """ Test new thread updates appear in incremental sync response. From f59419377d4ef34952ee7ab898668ca94cd5d6d4 Mon Sep 17 00:00:00 2001 From: Devon Hudson Date: Sun, 9 Nov 2025 09:35:11 -0700 Subject: [PATCH 145/149] Refactor for clarity --- synapse/handlers/sliding_sync/extensions.py | 79 +++++++++++++-------- 1 file changed, 49 insertions(+), 30 deletions(-) diff --git a/synapse/handlers/sliding_sync/extensions.py b/synapse/handlers/sliding_sync/extensions.py index de50560583..c2dffa4bf6 100644 --- a/synapse/handlers/sliding_sync/extensions.py +++ b/synapse/handlers/sliding_sync/extensions.py @@ -1044,6 +1044,43 @@ class SlidingSyncExtensionHandler: threads_in_timeline.add(thread_id) return threads_in_timeline + def _merge_prev_batch_token( + self, + current_token: Optional[StreamToken], + new_token: Optional[StreamToken], + ) -> Optional[StreamToken]: + """Merge two prev_batch tokens, taking the maximum (latest) for backwards pagination. + + Args: + current_token: The current prev_batch token (may be None) + new_token: The new prev_batch token to merge (may be None) + + Returns: + The merged token (maximum of the two, or None if both are None) + """ + if new_token is None: + return current_token + if current_token is None: + return new_token + # Take the maximum (latest) prev_batch token for backwards pagination + if new_token.room_key.stream > current_token.room_key.stream: + return new_token + return current_token + + def _merge_thread_updates( + self, + target: Dict[str, List[ThreadUpdateInfo]], + source: Dict[str, List[ThreadUpdateInfo]], + ) -> None: + """Merge thread updates from source into target. + + Args: + target: The target dict to merge into (modified in place) + source: The source dict to merge from + """ + for thread_id, updates in source.items(): + target.setdefault(thread_id, []).extend(updates) + async def get_threads_extension_response( self, sync_config: SlidingSyncConfig, @@ -1136,21 +1173,12 @@ class SlidingSyncExtensionHandler: remaining_limit -= num_updates # Merge results - for thread_id, updates in room_thread_updates.items(): - all_thread_updates.setdefault(thread_id, []).extend(updates) + self._merge_thread_updates(all_thread_updates, room_thread_updates) - # If any room has a prev_batch, we should set the global prev_batch. - # We use the maximum (latest) prev_batch token for backwards pagination. - if room_prev_batch is not None: - if prev_batch_token is None: - prev_batch_token = room_prev_batch - else: - # Take the maximum (latest) prev_batch token for backwards pagination - if ( - room_prev_batch.room_key.stream - > prev_batch_token.room_key.stream - ): - prev_batch_token = room_prev_batch + # Merge prev_batch tokens + prev_batch_token = self._merge_prev_batch_token( + prev_batch_token, room_prev_batch + ) # Query for rooms where the user is joined, invited, or knocking, using the # normal to_token as the upper bound. @@ -1167,20 +1195,12 @@ class SlidingSyncExtensionHandler: ) # Merge results - for thread_id, updates in other_thread_updates.items(): - all_thread_updates.setdefault(thread_id, []).extend(updates) + self._merge_thread_updates(all_thread_updates, other_thread_updates) # Merge prev_batch tokens - if other_prev_batch is not None: - if prev_batch_token is None: - prev_batch_token = other_prev_batch - else: - # Take the maximum (latest) prev_batch token for backwards pagination - if ( - other_prev_batch.room_key.stream - > prev_batch_token.room_key.stream - ): - prev_batch_token = other_prev_batch + prev_batch_token = self._merge_prev_batch_token( + prev_batch_token, other_prev_batch + ) # Early return: no thread updates found if len(all_thread_updates) == 0: @@ -1210,10 +1230,9 @@ class SlidingSyncExtensionHandler: if not filtered_updates: return None - # Sort updates for each thread by stream_ordering DESC to ensure updates[0] is the latest. - # This is critical because the prev_batch token generation below assumes DESC order. - for updates in filtered_updates.values(): - updates.sort(key=lambda u: u.stream_ordering, reverse=True) + # Note: Updates are already sorted by stream_ordering DESC from the database query, + # and filter_events_for_client preserves order, so updates[0] is guaranteed to be + # the latest event for each thread. # Optionally fetch thread root events and their bundled aggregations thread_root_event_map = {} From 78e8ec61616079c8e48445f6df6d4e737d46d5d6 Mon Sep 17 00:00:00 2001 From: Devon Hudson Date: Sun, 9 Nov 2025 09:44:52 -0700 Subject: [PATCH 146/149] Add test for room list filtering --- .../sliding_sync/test_extension_threads.py | 121 ++++++++++++++++++ 1 file changed, 121 insertions(+) diff --git a/tests/rest/client/sliding_sync/test_extension_threads.py b/tests/rest/client/sliding_sync/test_extension_threads.py index fdb422a629..a08eff3cc6 100644 --- a/tests/rest/client/sliding_sync/test_extension_threads.py +++ b/tests/rest/client/sliding_sync/test_extension_threads.py @@ -922,3 +922,124 @@ class SlidingSyncThreadsExtensionTestCase(SlidingSyncBase): ) # Verify the thread root event is present self.assertIn("thread_root", thread_updates[thread_root_id]) + + def test_threads_only_from_rooms_in_list(self) -> None: + """ + Test that thread updates are only returned for rooms that are in the + sliding sync response, not from all rooms the user is joined to. + + This tests the scenario where a user is joined to multiple rooms but + the room list range/limit means only some rooms are in the response. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + + # Create three rooms + room_a_id = self.helper.create_room_as(user1_id, tok=user1_tok) + room_b_id = self.helper.create_room_as(user1_id, tok=user1_tok) + room_c_id = self.helper.create_room_as(user1_id, tok=user1_tok) + + # Create threads in all three rooms + thread_a_root = self.helper.send(room_a_id, body="Thread A", tok=user1_tok)[ + "event_id" + ] + thread_b_root = self.helper.send(room_b_id, body="Thread B", tok=user1_tok)[ + "event_id" + ] + thread_c_root = self.helper.send(room_c_id, body="Thread C", tok=user1_tok)[ + "event_id" + ] + + # Do an initial sync to get the sync position and see room ordering + initial_sync_body = { + "lists": { + "all-rooms": { + "ranges": [[0, 2]], + "required_state": [], + "timeline_limit": 0, + } + }, + } + response_body, sync_pos = self.do_sync(initial_sync_body, tok=user1_tok) + + # Add replies to all threads after the initial sync + self.helper.send_event( + room_a_id, + type="m.room.message", + content={ + "msgtype": "m.text", + "body": "Reply to A", + "m.relates_to": { + "rel_type": RelationTypes.THREAD, + "event_id": thread_a_root, + }, + }, + tok=user1_tok, + ) + self.helper.send_event( + room_b_id, + type="m.room.message", + content={ + "msgtype": "m.text", + "body": "Reply to B", + "m.relates_to": { + "rel_type": RelationTypes.THREAD, + "event_id": thread_b_root, + }, + }, + tok=user1_tok, + ) + self.helper.send_event( + room_c_id, + type="m.room.message", + content={ + "msgtype": "m.text", + "body": "Reply to C", + "m.relates_to": { + "rel_type": RelationTypes.THREAD, + "event_id": thread_c_root, + }, + }, + tok=user1_tok, + ) + + # Now do a sync with a limited range that excludes the last room + sync_body = { + "lists": { + "limited-list": { + "ranges": [[0, 1]], # Only include first 2 rooms + "required_state": [], + "timeline_limit": 0, + } + }, + "extensions": { + EXT_NAME: { + "enabled": True, + } + }, + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok, since=sync_pos) + + # Get which rooms were included in this limited response + included_rooms = set( + response_body["lists"]["limited-list"]["ops"][0]["room_ids"] + ) + excluded_room = ({room_a_id, room_b_id, room_c_id} - included_rooms).pop() + + # Assert: Only threads from rooms in the response should be included + thread_updates = response_body["extensions"][EXT_NAME]["updates"] + + # Check that included rooms have thread updates + for room_id in included_rooms: + self.assertIn( + room_id, + thread_updates, + f"Room {room_id} should have thread updates since it's in the room list", + ) + + # Check that the excluded room is NOT present + self.assertNotIn( + excluded_room, + thread_updates, + f"Room {excluded_room} should NOT have thread updates since it's excluded from the room list", + ) From 934f99a694b8a55b4ece1a5a2a318acb6075225d Mon Sep 17 00:00:00 2001 From: Devon Hudson Date: Sun, 9 Nov 2025 12:09:56 -0700 Subject: [PATCH 147/149] Add wait_for_new_data tests --- .../sliding_sync/test_extension_threads.py | 122 +++++++++++++++++- 1 file changed, 121 insertions(+), 1 deletion(-) diff --git a/tests/rest/client/sliding_sync/test_extension_threads.py b/tests/rest/client/sliding_sync/test_extension_threads.py index a08eff3cc6..cfbc3a2155 100644 --- a/tests/rest/client/sliding_sync/test_extension_threads.py +++ b/tests/rest/client/sliding_sync/test_extension_threads.py @@ -19,10 +19,11 @@ import synapse.rest.admin from synapse.api.constants import RelationTypes from synapse.rest.client import login, relations, room, sync from synapse.server import HomeServer -from synapse.types import JsonDict +from synapse.types import JsonDict, StreamKeyType from synapse.util.clock import Clock from tests.rest.client.sliding_sync.test_sliding_sync import SlidingSyncBase +from tests.server import TimedOutException logger = logging.getLogger(__name__) @@ -1043,3 +1044,122 @@ class SlidingSyncThreadsExtensionTestCase(SlidingSyncBase): thread_updates, f"Room {excluded_room} should NOT have thread updates since it's excluded from the room list", ) + + def test_wait_for_new_data(self) -> None: + """ + Test to make sure that the Sliding Sync request waits for new data to arrive. + + (Only applies to incremental syncs with a `timeout` specified) + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + + room_id = self.helper.create_room_as(user2_id, tok=user2_tok) + self.helper.join(room_id, user1_id, tok=user1_tok) + + # Create a thread + thread_root = self.helper.send(room_id, body="Thread root", tok=user1_tok)[ + "event_id" + ] + + sync_body = { + "lists": {}, + "room_subscriptions": { + room_id: { + "required_state": [], + "timeline_limit": 0, + }, + }, + "extensions": { + EXT_NAME: { + "enabled": True, + } + }, + } + _, from_token = self.do_sync(sync_body, tok=user1_tok) + + # Make an incremental Sliding Sync request with the threads extension enabled + channel = self.make_request( + "POST", + self.sync_endpoint + f"?timeout=10000&pos={from_token}", + content=sync_body, + access_token=user1_tok, + await_result=False, + ) + # Block for 5 seconds to make sure we are `notifier.wait_for_events(...)` + with self.assertRaises(TimedOutException): + channel.await_result(timeout_ms=5000) + # Send a thread reply to trigger new results + self.helper.send_event( + room_id, + type="m.room.message", + content={ + "msgtype": "m.text", + "body": "Reply in thread", + "m.relates_to": { + "rel_type": RelationTypes.THREAD, + "event_id": thread_root, + }, + }, + tok=user2_tok, + ) + # Should respond before the 10 second timeout + channel.await_result(timeout_ms=3000) + self.assertEqual(channel.code, 200, channel.json_body) + + # We should see the new thread update + self.assertIn( + thread_root, + channel.json_body["extensions"][EXT_NAME]["updates"][room_id], + ) + + def test_wait_for_new_data_timeout(self) -> None: + """ + Test to make sure that the Sliding Sync request waits for new data to arrive but + no data ever arrives so we timeout. We're also making sure that the default data + from the threads extension doesn't trigger a false-positive for new data. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + + sync_body = { + "lists": {}, + "extensions": { + EXT_NAME: { + "enabled": True, + } + }, + } + _, from_token = self.do_sync(sync_body, tok=user1_tok) + + # Make the Sliding Sync request + channel = self.make_request( + "POST", + self.sync_endpoint + f"?timeout=10000&pos={from_token}", + content=sync_body, + access_token=user1_tok, + await_result=False, + ) + # Block for 5 seconds to make sure we are `notifier.wait_for_events(...)` + with self.assertRaises(TimedOutException): + channel.await_result(timeout_ms=5000) + # Wake-up `notifier.wait_for_events(...)` that will cause us test + # `SlidingSyncResult.__bool__` for new results. + self._bump_notifier_wait_for_events( + user1_id, wake_stream_key=StreamKeyType.ACCOUNT_DATA + ) + # Block for a little bit more to ensure we don't see any new results. + with self.assertRaises(TimedOutException): + channel.await_result(timeout_ms=4000) + # Wait for the sync to complete (wait for the rest of the 10 second timeout, + # 5000 + 4000 + 1200 > 10000) + channel.await_result(timeout_ms=1200) + self.assertEqual(channel.code, 200, channel.json_body) + + # Should be no thread updates + self.assertNotIn( + EXT_NAME, + channel.json_body.get("extensions", {}), + ) From f778ac32c112a94061e80a7e64dc6a04c382e0b9 Mon Sep 17 00:00:00 2001 From: Devon Hudson Date: Sun, 9 Nov 2025 12:37:04 -0700 Subject: [PATCH 148/149] Update docstring --- synapse/storage/databases/main/relations.py | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/synapse/storage/databases/main/relations.py b/synapse/storage/databases/main/relations.py index 5829aed84e..48acc56f8c 100644 --- a/synapse/storage/databases/main/relations.py +++ b/synapse/storage/databases/main/relations.py @@ -101,14 +101,9 @@ class ThreadUpdateInfo: Information about a thread update for the sliding sync threads extension. Attributes: - thread_id: The event ID of the thread root event (the event that started the thread). + event_id: The event ID of the event in the thread. room_id: The room ID where this thread exists. - thread_root_event: The actual EventBase object for the thread root event, - if include_thread_roots was True in the request. Otherwise None. - prev_batch: A pagination token (exclusive) for fetching older events in this thread. - Only present if update_count > 1. This token can be used with the /relations - endpoint with dir=b to paginate backwards through the thread's history without - re-receiving the latest event that was already included in the sliding sync response. + stream_ordering: The stream ordering of this event. """ event_id: str From 6fa43cb0b440c6d4b139b558f358d14643fa391e Mon Sep 17 00:00:00 2001 From: Devon Hudson Date: Sun, 9 Nov 2025 12:43:41 -0700 Subject: [PATCH 149/149] Comment cleanup --- synapse/handlers/sliding_sync/extensions.py | 12 ++---------- synapse/storage/databases/main/relations.py | 2 +- 2 files changed, 3 insertions(+), 11 deletions(-) diff --git a/synapse/handlers/sliding_sync/extensions.py b/synapse/handlers/sliding_sync/extensions.py index cd999b5cec..d62f2d675f 100644 --- a/synapse/handlers/sliding_sync/extensions.py +++ b/synapse/handlers/sliding_sync/extensions.py @@ -1029,7 +1029,7 @@ class SlidingSyncExtensionHandler: actual_room_response_map: A map of room ID to room results. Returns: - A set of thread IDs (thread root event IDs) that appear in timelines. + A set of thread IDs (thread root event IDs) that appear in the timeline. """ threads_in_timeline: set[str] = set() for room_result in actual_room_response_map.values(): @@ -1058,7 +1058,6 @@ class SlidingSyncExtensionHandler: return current_token if current_token is None: return new_token - # Take the maximum (latest) prev_batch token for backwards pagination if new_token.room_key.stream > current_token.room_key.stream: return new_token return current_token @@ -1092,6 +1091,7 @@ class SlidingSyncExtensionHandler: Args: sync_config: Sync configuration. threads_request: The threads extension from the request. + actual_room_ids: The actual room IDs in the the Sliding Sync response. actual_room_response_map: A map of room ID to room results in the sliding sync response. Used to determine which threads already have events in the room timeline. @@ -1168,10 +1168,7 @@ class SlidingSyncExtensionHandler: ) remaining_limit -= num_updates - # Merge results self._merge_thread_updates(all_thread_updates, room_thread_updates) - - # Merge prev_batch tokens prev_batch_token = self._merge_prev_batch_token( prev_batch_token, room_prev_batch ) @@ -1190,15 +1187,11 @@ class SlidingSyncExtensionHandler: exclude_thread_ids=threads_to_exclude, ) - # Merge results self._merge_thread_updates(all_thread_updates, other_thread_updates) - - # Merge prev_batch tokens prev_batch_token = self._merge_prev_batch_token( prev_batch_token, other_prev_batch ) - # Early return: no thread updates found if len(all_thread_updates) == 0: return None @@ -1222,7 +1215,6 @@ class SlidingSyncExtensionHandler: thread_id, update = event_to_thread_map[event.event_id] filtered_updates[thread_id].append(update) - # Early return: no visible thread updates after filtering if not filtered_updates: return None diff --git a/synapse/storage/databases/main/relations.py b/synapse/storage/databases/main/relations.py index 48acc56f8c..c367c8a071 100644 --- a/synapse/storage/databases/main/relations.py +++ b/synapse/storage/databases/main/relations.py @@ -1208,7 +1208,7 @@ class RelationsWorkerStore(EventsWorkerStore, SQLBaseStore): exclusion_clause = f" AND {exclusion_clause}" # TODO: improve the fact that multiple hits for the same thread means we - # won't get as many updates for the sss response + # won't get as many overall updates for the sss response # Find any thread events between the stream ordering bounds. sql = f"""