Port Clock functions to use Duration class (#19229)
This changes the arguments in clock functions to be `Duration` and converts call sites and constants into `Duration`. There are still some more functions around that should be converted (e.g. `timeout_deferred`), but we leave that to another PR. We also changes `.as_secs()` to return a float, as the rounding broke things subtly. The only reason to keep it (its the same as `timedelta.total_seconds()`) is for symmetry with `as_millis()`. Follows on from https://github.com/element-hq/synapse/pull/19223
This commit is contained in:
@@ -30,6 +30,7 @@ from synapse.http.server import JsonResource
|
||||
from synapse.server import HomeServer
|
||||
from synapse.types import JsonDict
|
||||
from synapse.util.cancellation import cancellable
|
||||
from synapse.util.duration import Duration
|
||||
from synapse.util.ratelimitutils import FederationRateLimiter
|
||||
|
||||
from tests import unittest
|
||||
@@ -53,13 +54,13 @@ class CancellableFederationServlet(BaseFederationServlet):
|
||||
async def on_GET(
|
||||
self, origin: str, content: None, query: dict[bytes, list[bytes]]
|
||||
) -> tuple[int, JsonDict]:
|
||||
await self.clock.sleep(1.0)
|
||||
await self.clock.sleep(Duration(seconds=1))
|
||||
return HTTPStatus.OK, {"result": True}
|
||||
|
||||
async def on_POST(
|
||||
self, origin: str, content: JsonDict, query: dict[bytes, list[bytes]]
|
||||
) -> tuple[int, JsonDict]:
|
||||
await self.clock.sleep(1.0)
|
||||
await self.clock.sleep(Duration(seconds=1))
|
||||
return HTTPStatus.OK, {"result": True}
|
||||
|
||||
|
||||
|
||||
@@ -250,7 +250,7 @@ class DeviceTestCase(unittest.HomeserverTestCase):
|
||||
self.assertEqual(10, len(res))
|
||||
|
||||
# wait for the task scheduler to do a second delete pass
|
||||
self.reactor.advance(TaskScheduler.SCHEDULE_INTERVAL_MS / 1000)
|
||||
self.reactor.advance(TaskScheduler.SCHEDULE_INTERVAL.as_secs())
|
||||
|
||||
# remaining messages should now be deleted
|
||||
res = self.get_success(
|
||||
|
||||
@@ -544,7 +544,7 @@ class TypingNotificationsTestCase(unittest.HomeserverTestCase):
|
||||
)
|
||||
self.assertEqual(rows, [(2, [ROOM_ID, []])])
|
||||
|
||||
self.reactor.advance(FORGET_TIMEOUT)
|
||||
self.reactor.advance(FORGET_TIMEOUT.as_secs())
|
||||
|
||||
rows, _, _ = self.get_success(
|
||||
self.handler.get_all_typing_updates(
|
||||
|
||||
@@ -34,6 +34,7 @@ from synapse.rest.client._base import client_patterns
|
||||
from synapse.server import HomeServer
|
||||
from synapse.types import JsonDict
|
||||
from synapse.util.cancellation import cancellable
|
||||
from synapse.util.duration import Duration
|
||||
|
||||
from tests import unittest
|
||||
from tests.http.server._base import test_disconnect
|
||||
@@ -108,11 +109,11 @@ class CancellableRestServlet(RestServlet):
|
||||
|
||||
@cancellable
|
||||
async def on_GET(self, request: SynapseRequest) -> tuple[int, JsonDict]:
|
||||
await self.clock.sleep(1.0)
|
||||
await self.clock.sleep(Duration(seconds=1))
|
||||
return HTTPStatus.OK, {"result": True}
|
||||
|
||||
async def on_POST(self, request: SynapseRequest) -> tuple[int, JsonDict]:
|
||||
await self.clock.sleep(1.0)
|
||||
await self.clock.sleep(Duration(seconds=1))
|
||||
return HTTPStatus.OK, {"result": True}
|
||||
|
||||
|
||||
|
||||
@@ -37,6 +37,7 @@ from synapse.logging.opentracing import (
|
||||
)
|
||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||
from synapse.util.clock import Clock
|
||||
from synapse.util.duration import Duration
|
||||
|
||||
from tests.server import get_clock
|
||||
|
||||
@@ -184,7 +185,7 @@ class LogContextScopeManagerTestCase(TestCase):
|
||||
scopes.append(scope)
|
||||
|
||||
self.assertEqual(self._tracer.active_span, scope.span)
|
||||
await clock.sleep(4)
|
||||
await clock.sleep(Duration(seconds=4))
|
||||
self.assertEqual(self._tracer.active_span, scope.span)
|
||||
scope.close()
|
||||
|
||||
@@ -194,7 +195,7 @@ class LogContextScopeManagerTestCase(TestCase):
|
||||
scopes.append(root_scope)
|
||||
|
||||
d1 = run_in_background(task, 1)
|
||||
await clock.sleep(2)
|
||||
await clock.sleep(Duration(seconds=2))
|
||||
d2 = run_in_background(task, 2)
|
||||
|
||||
# because we did run_in_background, the active span should still be the
|
||||
@@ -351,7 +352,7 @@ class LogContextScopeManagerTestCase(TestCase):
|
||||
|
||||
# Now wait for the background process to finish
|
||||
while not callback_finished:
|
||||
await clock.sleep(0)
|
||||
await clock.sleep(Duration(seconds=0))
|
||||
|
||||
self.assertTrue(
|
||||
callback_finished,
|
||||
@@ -418,7 +419,7 @@ class LogContextScopeManagerTestCase(TestCase):
|
||||
|
||||
# Now wait for the background process to finish
|
||||
while not callback_finished:
|
||||
await clock.sleep(0)
|
||||
await clock.sleep(Duration(seconds=0))
|
||||
|
||||
self.assertTrue(
|
||||
callback_finished,
|
||||
|
||||
@@ -30,6 +30,7 @@ from synapse.replication.http._base import ReplicationEndpoint
|
||||
from synapse.server import HomeServer
|
||||
from synapse.types import JsonDict
|
||||
from synapse.util.cancellation import cancellable
|
||||
from synapse.util.duration import Duration
|
||||
|
||||
from tests import unittest
|
||||
from tests.http.server._base import test_disconnect
|
||||
@@ -52,7 +53,7 @@ class CancellableReplicationEndpoint(ReplicationEndpoint):
|
||||
async def _handle_request( # type: ignore[override]
|
||||
self, request: Request, content: JsonDict
|
||||
) -> tuple[int, JsonDict]:
|
||||
await self.clock.sleep(1.0)
|
||||
await self.clock.sleep(Duration(seconds=1))
|
||||
return HTTPStatus.OK, {"result": True}
|
||||
|
||||
|
||||
@@ -73,7 +74,7 @@ class UncancellableReplicationEndpoint(ReplicationEndpoint):
|
||||
async def _handle_request( # type: ignore[override]
|
||||
self, request: Request, content: JsonDict
|
||||
) -> tuple[int, JsonDict]:
|
||||
await self.clock.sleep(1.0)
|
||||
await self.clock.sleep(Duration(seconds=1))
|
||||
return HTTPStatus.OK, {"result": True}
|
||||
|
||||
|
||||
|
||||
@@ -31,6 +31,7 @@ from synapse.server import HomeServer
|
||||
from synapse.storage.background_updates import BackgroundUpdater
|
||||
from synapse.types import JsonDict
|
||||
from synapse.util.clock import Clock
|
||||
from synapse.util.duration import Duration
|
||||
|
||||
from tests import unittest
|
||||
|
||||
@@ -105,7 +106,7 @@ class BackgroundUpdatesTestCase(unittest.HomeserverTestCase):
|
||||
"Adds a bg update but doesn't start it"
|
||||
|
||||
async def _fake_update(progress: JsonDict, batch_size: int) -> int:
|
||||
await self.clock.sleep(0.2)
|
||||
await self.clock.sleep(Duration(milliseconds=200))
|
||||
return batch_size
|
||||
|
||||
self.store.db_pool.updates.register_background_update_handler(
|
||||
|
||||
@@ -44,6 +44,7 @@ from synapse.storage.databases.main.purge_events import (
|
||||
)
|
||||
from synapse.types import UserID
|
||||
from synapse.util.clock import Clock
|
||||
from synapse.util.duration import Duration
|
||||
from synapse.util.task_scheduler import TaskScheduler
|
||||
|
||||
from tests import unittest
|
||||
@@ -1161,7 +1162,7 @@ class DeleteRoomV2TestCase(unittest.HomeserverTestCase):
|
||||
# Mock PaginationHandler.purge_room to sleep for 100s, so we have time to do a second call
|
||||
# before the purge is over. Note that it doesn't purge anymore, but we don't care.
|
||||
async def purge_room(room_id: str, force: bool) -> None:
|
||||
await self.hs.get_clock().sleep(100)
|
||||
await self.hs.get_clock().sleep(Duration(seconds=100))
|
||||
|
||||
self.pagination_handler.purge_room = AsyncMock(side_effect=purge_room) # type: ignore[method-assign]
|
||||
|
||||
@@ -1464,7 +1465,7 @@ class DeleteRoomV2TestCase(unittest.HomeserverTestCase):
|
||||
self._is_purged(room_id)
|
||||
|
||||
# Wait for next scheduler run
|
||||
self.reactor.advance(TaskScheduler.SCHEDULE_INTERVAL_MS)
|
||||
self.reactor.advance(TaskScheduler.SCHEDULE_INTERVAL.as_secs())
|
||||
|
||||
self._is_purged(room_id)
|
||||
|
||||
@@ -1501,7 +1502,7 @@ class DeleteRoomV2TestCase(unittest.HomeserverTestCase):
|
||||
self._is_purged(room_id)
|
||||
|
||||
# Wait for next scheduler run
|
||||
self.reactor.advance(TaskScheduler.SCHEDULE_INTERVAL_MS)
|
||||
self.reactor.advance(TaskScheduler.SCHEDULE_INTERVAL.as_secs())
|
||||
|
||||
# Test that all users has been kicked (room is shutdown)
|
||||
self._has_no_members(room_id)
|
||||
|
||||
@@ -29,6 +29,7 @@ from synapse.logging.context import SENTINEL_CONTEXT, LoggingContext, current_co
|
||||
from synapse.rest.client.transactions import CLEANUP_PERIOD, HttpTransactionCache
|
||||
from synapse.types import ISynapseReactor, JsonDict
|
||||
from synapse.util.clock import Clock
|
||||
from synapse.util.duration import Duration
|
||||
|
||||
from tests import unittest
|
||||
from tests.server import get_clock
|
||||
@@ -93,7 +94,7 @@ class HttpTransactionCacheTestCase(unittest.TestCase):
|
||||
# Ignore `multiple-internal-clocks` linter error here since we are creating a `Clock`
|
||||
# for testing purposes.
|
||||
yield defer.ensureDeferred(
|
||||
Clock(reactor, server_name="test_server").sleep(0) # type: ignore[multiple-internal-clocks]
|
||||
Clock(reactor, server_name="test_server").sleep(Duration(seconds=0)) # type: ignore[multiple-internal-clocks]
|
||||
)
|
||||
return 1, {}
|
||||
|
||||
|
||||
@@ -20,6 +20,7 @@ from synapse.rest.client import login, room, sync
|
||||
from synapse.server import HomeServer
|
||||
from synapse.types import JsonDict
|
||||
from synapse.util.clock import Clock
|
||||
from synapse.util.duration import Duration
|
||||
|
||||
from tests import unittest
|
||||
from tests.unittest import override_config
|
||||
@@ -131,7 +132,7 @@ class ServerNoticesTests(unittest.HomeserverTestCase):
|
||||
break
|
||||
|
||||
# Sleep and try again.
|
||||
self.get_success(self.clock.sleep(0.1))
|
||||
self.get_success(self.clock.sleep(Duration(milliseconds=100)))
|
||||
else:
|
||||
self.fail(
|
||||
f"Failed to join the server notices room. No 'join' field in sync_body['rooms']: {sync_body['rooms']}"
|
||||
|
||||
@@ -42,6 +42,7 @@ from synapse.state.v2 import (
|
||||
)
|
||||
from synapse.storage.databases.main.event_federation import StateDifference
|
||||
from synapse.types import EventID, StateMap
|
||||
from synapse.util.duration import Duration
|
||||
|
||||
from tests import unittest
|
||||
|
||||
@@ -61,7 +62,7 @@ ORIGIN_SERVER_TS = 0
|
||||
|
||||
|
||||
class FakeClock:
|
||||
async def sleep(self, msec: float) -> None:
|
||||
async def sleep(self, duration: Duration) -> None:
|
||||
return None
|
||||
|
||||
|
||||
|
||||
@@ -39,6 +39,7 @@ from synapse.state.v2 import (
|
||||
)
|
||||
from synapse.types import StateMap
|
||||
from synapse.util.clock import Clock
|
||||
from synapse.util.duration import Duration
|
||||
|
||||
from tests import unittest
|
||||
from tests.state.test_v2 import TestStateResolutionStore
|
||||
@@ -66,7 +67,7 @@ def monotonic_timestamp() -> int:
|
||||
|
||||
|
||||
class FakeClock:
|
||||
async def sleep(self, duration_ms: float) -> None:
|
||||
async def sleep(self, duration: Duration) -> None:
|
||||
defer.succeed(None)
|
||||
|
||||
|
||||
|
||||
@@ -26,7 +26,7 @@ from twisted.internet.defer import Deferred
|
||||
from twisted.internet.testing import MemoryReactor
|
||||
|
||||
from synapse.server import HomeServer
|
||||
from synapse.storage.databases.main.lock import _LOCK_TIMEOUT_MS, _RENEWAL_INTERVAL_MS
|
||||
from synapse.storage.databases.main.lock import _LOCK_TIMEOUT_MS, _RENEWAL_INTERVAL
|
||||
from synapse.util.clock import Clock
|
||||
|
||||
from tests import unittest
|
||||
@@ -377,7 +377,7 @@ class ReadWriteLockTestCase(unittest.HomeserverTestCase):
|
||||
|
||||
# Wait for ages with the lock, we should not be able to get the lock.
|
||||
for _ in range(10):
|
||||
self.reactor.advance((_RENEWAL_INTERVAL_MS / 1000))
|
||||
self.reactor.advance((_RENEWAL_INTERVAL.as_secs()))
|
||||
|
||||
lock2 = self.get_success(
|
||||
self.store.try_acquire_read_write_lock("name", "key", write=True)
|
||||
|
||||
@@ -38,6 +38,7 @@ from synapse.storage.database import LoggingTransaction
|
||||
from synapse.storage.engines import PostgresEngine, Sqlite3Engine
|
||||
from synapse.types import JsonDict
|
||||
from synapse.util.clock import Clock
|
||||
from synapse.util.duration import Duration
|
||||
|
||||
from tests import unittest
|
||||
from tests.unittest import override_config
|
||||
@@ -59,7 +60,7 @@ class BackgroundUpdateTestCase(unittest.HomeserverTestCase):
|
||||
|
||||
async def update(self, progress: JsonDict, count: int) -> int:
|
||||
duration_ms = 10
|
||||
await self.clock.sleep((count * duration_ms) / 1000)
|
||||
await self.clock.sleep(Duration(milliseconds=count * duration_ms))
|
||||
progress = {"my_key": progress["my_key"] + 1}
|
||||
await self.store.db_pool.runInteraction(
|
||||
"update_progress",
|
||||
@@ -309,7 +310,7 @@ class BackgroundUpdateTestCase(unittest.HomeserverTestCase):
|
||||
|
||||
# Run the update with the long-running update item
|
||||
async def update_long(progress: JsonDict, count: int) -> int:
|
||||
await self.clock.sleep((count * duration_ms) / 1000)
|
||||
await self.clock.sleep(Duration(milliseconds=count * duration_ms))
|
||||
progress = {"my_key": progress["my_key"] + 1}
|
||||
await self.store.db_pool.runInteraction(
|
||||
"update_progress",
|
||||
|
||||
@@ -38,6 +38,7 @@ from synapse.logging.context import make_deferred_yieldable
|
||||
from synapse.types import JsonDict
|
||||
from synapse.util.cancellation import cancellable
|
||||
from synapse.util.clock import Clock
|
||||
from synapse.util.duration import Duration
|
||||
|
||||
from tests import unittest
|
||||
from tests.http.server._base import test_disconnect
|
||||
@@ -406,11 +407,11 @@ class CancellableDirectServeJsonResource(DirectServeJsonResource):
|
||||
|
||||
@cancellable
|
||||
async def _async_render_GET(self, request: SynapseRequest) -> tuple[int, JsonDict]:
|
||||
await self.clock.sleep(1.0)
|
||||
await self.clock.sleep(Duration(seconds=1))
|
||||
return HTTPStatus.OK, {"result": True}
|
||||
|
||||
async def _async_render_POST(self, request: SynapseRequest) -> tuple[int, JsonDict]:
|
||||
await self.clock.sleep(1.0)
|
||||
await self.clock.sleep(Duration(seconds=1))
|
||||
return HTTPStatus.OK, {"result": True}
|
||||
|
||||
|
||||
@@ -423,11 +424,11 @@ class CancellableDirectServeHtmlResource(DirectServeHtmlResource):
|
||||
|
||||
@cancellable
|
||||
async def _async_render_GET(self, request: SynapseRequest) -> tuple[int, bytes]:
|
||||
await self.clock.sleep(1.0)
|
||||
await self.clock.sleep(Duration(seconds=1))
|
||||
return HTTPStatus.OK, b"ok"
|
||||
|
||||
async def _async_render_POST(self, request: SynapseRequest) -> tuple[int, bytes]:
|
||||
await self.clock.sleep(1.0)
|
||||
await self.clock.sleep(Duration(seconds=1))
|
||||
return HTTPStatus.OK, b"ok"
|
||||
|
||||
|
||||
|
||||
@@ -26,6 +26,7 @@ from parameterized import parameterized
|
||||
from twisted.internet import defer
|
||||
|
||||
from synapse.util.caches.response_cache import ResponseCache, ResponseCacheContext
|
||||
from synapse.util.duration import Duration
|
||||
|
||||
from tests.server import get_clock
|
||||
from tests.unittest import TestCase
|
||||
@@ -55,7 +56,7 @@ class ResponseCacheTestCase(TestCase):
|
||||
return o
|
||||
|
||||
async def delayed_return(self, o: str) -> str:
|
||||
await self.clock.sleep(1)
|
||||
await self.clock.sleep(Duration(seconds=1))
|
||||
return o
|
||||
|
||||
def test_cache_hit(self) -> None:
|
||||
@@ -182,7 +183,7 @@ class ResponseCacheTestCase(TestCase):
|
||||
async def non_caching(o: str, cache_context: ResponseCacheContext[int]) -> str:
|
||||
nonlocal call_count
|
||||
call_count += 1
|
||||
await self.clock.sleep(1)
|
||||
await self.clock.sleep(Duration(seconds=1))
|
||||
cache_context.should_cache = should_cache
|
||||
return o
|
||||
|
||||
|
||||
@@ -37,6 +37,7 @@ from synapse.logging.context import (
|
||||
)
|
||||
from synapse.types import ISynapseReactor
|
||||
from synapse.util.clock import Clock
|
||||
from synapse.util.duration import Duration
|
||||
|
||||
from tests import unittest
|
||||
from tests.unittest import logcontext_clean
|
||||
@@ -82,7 +83,7 @@ class LoggingContextTestCase(unittest.TestCase):
|
||||
self._check_test_key("sentinel")
|
||||
|
||||
with LoggingContext(name="competing", server_name="test_server"):
|
||||
await clock.sleep(0)
|
||||
await clock.sleep(Duration(seconds=0))
|
||||
self._check_test_key("competing")
|
||||
|
||||
self._check_test_key("sentinel")
|
||||
@@ -94,9 +95,9 @@ class LoggingContextTestCase(unittest.TestCase):
|
||||
reactor.callLater(0, lambda: defer.ensureDeferred(competing_callback())) # type: ignore[call-later-not-tracked]
|
||||
|
||||
with LoggingContext(name="foo", server_name="test_server"):
|
||||
await clock.sleep(0)
|
||||
await clock.sleep(Duration(seconds=0))
|
||||
self._check_test_key("foo")
|
||||
await clock.sleep(0)
|
||||
await clock.sleep(Duration(seconds=0))
|
||||
self._check_test_key("foo")
|
||||
|
||||
self.assertTrue(
|
||||
@@ -128,7 +129,7 @@ class LoggingContextTestCase(unittest.TestCase):
|
||||
self._check_test_key("looping_call")
|
||||
|
||||
with LoggingContext(name="competing", server_name="test_server"):
|
||||
await clock.sleep(0)
|
||||
await clock.sleep(Duration(seconds=0))
|
||||
self._check_test_key("competing")
|
||||
|
||||
self._check_test_key("looping_call")
|
||||
@@ -139,12 +140,12 @@ class LoggingContextTestCase(unittest.TestCase):
|
||||
|
||||
with LoggingContext(name="foo", server_name="test_server"):
|
||||
lc = clock.looping_call(
|
||||
lambda: defer.ensureDeferred(competing_callback()), 0
|
||||
lambda: defer.ensureDeferred(competing_callback()), Duration(seconds=0)
|
||||
)
|
||||
self._check_test_key("foo")
|
||||
await clock.sleep(0)
|
||||
await clock.sleep(Duration(seconds=0))
|
||||
self._check_test_key("foo")
|
||||
await clock.sleep(0)
|
||||
await clock.sleep(Duration(seconds=0))
|
||||
self._check_test_key("foo")
|
||||
|
||||
self.assertTrue(
|
||||
@@ -179,7 +180,7 @@ class LoggingContextTestCase(unittest.TestCase):
|
||||
self._check_test_key("looping_call")
|
||||
|
||||
with LoggingContext(name="competing", server_name="test_server"):
|
||||
await clock.sleep(0)
|
||||
await clock.sleep(Duration(seconds=0))
|
||||
self._check_test_key("competing")
|
||||
|
||||
self._check_test_key("looping_call")
|
||||
@@ -190,10 +191,10 @@ class LoggingContextTestCase(unittest.TestCase):
|
||||
|
||||
with LoggingContext(name="foo", server_name="test_server"):
|
||||
lc = clock.looping_call_now(
|
||||
lambda: defer.ensureDeferred(competing_callback()), 0
|
||||
lambda: defer.ensureDeferred(competing_callback()), Duration(seconds=0)
|
||||
)
|
||||
self._check_test_key("foo")
|
||||
await clock.sleep(0)
|
||||
await clock.sleep(Duration(seconds=0))
|
||||
self._check_test_key("foo")
|
||||
|
||||
self.assertTrue(
|
||||
@@ -228,7 +229,7 @@ class LoggingContextTestCase(unittest.TestCase):
|
||||
self._check_test_key("call_later")
|
||||
|
||||
with LoggingContext(name="competing", server_name="test_server"):
|
||||
await clock.sleep(0)
|
||||
await clock.sleep(Duration(seconds=0))
|
||||
self._check_test_key("competing")
|
||||
|
||||
self._check_test_key("call_later")
|
||||
@@ -238,11 +239,13 @@ class LoggingContextTestCase(unittest.TestCase):
|
||||
callback_finished = True
|
||||
|
||||
with LoggingContext(name="foo", server_name="test_server"):
|
||||
clock.call_later(0, lambda: defer.ensureDeferred(competing_callback()))
|
||||
clock.call_later(
|
||||
Duration(seconds=0), lambda: defer.ensureDeferred(competing_callback())
|
||||
)
|
||||
self._check_test_key("foo")
|
||||
await clock.sleep(0)
|
||||
await clock.sleep(Duration(seconds=0))
|
||||
self._check_test_key("foo")
|
||||
await clock.sleep(0)
|
||||
await clock.sleep(Duration(seconds=0))
|
||||
self._check_test_key("foo")
|
||||
|
||||
self.assertTrue(
|
||||
@@ -280,7 +283,7 @@ class LoggingContextTestCase(unittest.TestCase):
|
||||
self._check_test_key("foo")
|
||||
|
||||
with LoggingContext(name="competing", server_name="test_server"):
|
||||
await clock.sleep(0)
|
||||
await clock.sleep(Duration(seconds=0))
|
||||
self._check_test_key("competing")
|
||||
|
||||
self._check_test_key("foo")
|
||||
@@ -303,7 +306,7 @@ class LoggingContextTestCase(unittest.TestCase):
|
||||
await d
|
||||
self._check_test_key("foo")
|
||||
|
||||
await clock.sleep(0)
|
||||
await clock.sleep(Duration(seconds=0))
|
||||
|
||||
self.assertTrue(
|
||||
callback_finished,
|
||||
@@ -338,7 +341,7 @@ class LoggingContextTestCase(unittest.TestCase):
|
||||
self._check_test_key("sentinel")
|
||||
|
||||
with LoggingContext(name="competing", server_name="test_server"):
|
||||
await clock.sleep(0)
|
||||
await clock.sleep(Duration(seconds=0))
|
||||
self._check_test_key("competing")
|
||||
|
||||
self._check_test_key("sentinel")
|
||||
@@ -364,7 +367,7 @@ class LoggingContextTestCase(unittest.TestCase):
|
||||
d.callback(None)
|
||||
self._check_test_key("foo")
|
||||
|
||||
await clock.sleep(0)
|
||||
await clock.sleep(Duration(seconds=0))
|
||||
|
||||
self.assertTrue(
|
||||
callback_finished,
|
||||
@@ -400,7 +403,7 @@ class LoggingContextTestCase(unittest.TestCase):
|
||||
self._check_test_key("foo")
|
||||
|
||||
with LoggingContext(name="competing", server_name="test_server"):
|
||||
await clock.sleep(0)
|
||||
await clock.sleep(Duration(seconds=0))
|
||||
self._check_test_key("competing")
|
||||
|
||||
self._check_test_key("foo")
|
||||
@@ -446,7 +449,7 @@ class LoggingContextTestCase(unittest.TestCase):
|
||||
run_in_background(lambda: (d.callback(None), d)[1]) # type: ignore[call-overload, func-returns-value]
|
||||
self._check_test_key("foo")
|
||||
|
||||
await clock.sleep(0)
|
||||
await clock.sleep(Duration(seconds=0))
|
||||
|
||||
self.assertTrue(
|
||||
callback_finished,
|
||||
@@ -486,7 +489,7 @@ class LoggingContextTestCase(unittest.TestCase):
|
||||
# Now wait for the function under test to have run, and check that
|
||||
# the logcontext is left in a sane state.
|
||||
while not callback_finished:
|
||||
await clock.sleep(0)
|
||||
await clock.sleep(Duration(seconds=0))
|
||||
self._check_test_key("foo")
|
||||
|
||||
self.assertTrue(
|
||||
@@ -501,7 +504,7 @@ class LoggingContextTestCase(unittest.TestCase):
|
||||
async def test_run_in_background_with_blocking_fn(self) -> None:
|
||||
async def blocking_function() -> None:
|
||||
# Ignore linter error since we are creating a `Clock` for testing purposes.
|
||||
await Clock(reactor, server_name="test_server").sleep(0) # type: ignore[multiple-internal-clocks]
|
||||
await Clock(reactor, server_name="test_server").sleep(Duration(seconds=0)) # type: ignore[multiple-internal-clocks]
|
||||
|
||||
await self._test_run_in_background(blocking_function)
|
||||
|
||||
@@ -535,7 +538,9 @@ class LoggingContextTestCase(unittest.TestCase):
|
||||
async def testfunc() -> None:
|
||||
self._check_test_key("foo")
|
||||
# Ignore linter error since we are creating a `Clock` for testing purposes.
|
||||
d = defer.ensureDeferred(Clock(reactor, server_name="test_server").sleep(0)) # type: ignore[multiple-internal-clocks]
|
||||
d = defer.ensureDeferred(
|
||||
Clock(reactor, server_name="test_server").sleep(Duration(seconds=0)) # type: ignore[multiple-internal-clocks]
|
||||
)
|
||||
self.assertIs(current_context(), SENTINEL_CONTEXT)
|
||||
await d
|
||||
self._check_test_key("foo")
|
||||
@@ -579,7 +584,7 @@ class LoggingContextTestCase(unittest.TestCase):
|
||||
self._check_test_key("foo")
|
||||
|
||||
with LoggingContext(name="competing", server_name="test_server"):
|
||||
await clock.sleep(0)
|
||||
await clock.sleep(Duration(seconds=0))
|
||||
self._check_test_key("competing")
|
||||
|
||||
self._check_test_key("foo")
|
||||
@@ -591,7 +596,7 @@ class LoggingContextTestCase(unittest.TestCase):
|
||||
with LoggingContext(name="foo", server_name="test_server"):
|
||||
run_coroutine_in_background(competing_callback())
|
||||
self._check_test_key("foo")
|
||||
await clock.sleep(0)
|
||||
await clock.sleep(Duration(seconds=0))
|
||||
self._check_test_key("foo")
|
||||
|
||||
self.assertTrue(
|
||||
|
||||
@@ -26,6 +26,7 @@ from synapse.logging.context import make_deferred_yieldable
|
||||
from synapse.server import HomeServer
|
||||
from synapse.types import JsonMapping, ScheduledTask, TaskStatus
|
||||
from synapse.util.clock import Clock
|
||||
from synapse.util.duration import Duration
|
||||
from synapse.util.task_scheduler import TaskScheduler
|
||||
|
||||
from tests.replication._base import BaseMultiWorkerStreamTestCase
|
||||
@@ -68,7 +69,7 @@ class TestTaskScheduler(HomeserverTestCase):
|
||||
|
||||
# The timestamp being 30s after now the task should been executed
|
||||
# after the first scheduling loop is run
|
||||
self.reactor.advance(TaskScheduler.SCHEDULE_INTERVAL_MS / 1000)
|
||||
self.reactor.advance(TaskScheduler.SCHEDULE_INTERVAL.as_secs())
|
||||
|
||||
task = self.get_success(self.task_scheduler.get_task(task_id))
|
||||
assert task is not None
|
||||
@@ -87,7 +88,7 @@ class TestTaskScheduler(HomeserverTestCase):
|
||||
self, task: ScheduledTask
|
||||
) -> tuple[TaskStatus, JsonMapping | None, str | None]:
|
||||
# Sleep for a second
|
||||
await self.hs.get_clock().sleep(1)
|
||||
await self.hs.get_clock().sleep(Duration(seconds=1))
|
||||
return TaskStatus.COMPLETE, None, None
|
||||
|
||||
def test_schedule_lot_of_tasks(self) -> None:
|
||||
@@ -187,7 +188,7 @@ class TestTaskScheduler(HomeserverTestCase):
|
||||
|
||||
# Simulate a synapse restart by emptying the list of running tasks
|
||||
self.task_scheduler._running_tasks = set()
|
||||
self.reactor.advance((TaskScheduler.SCHEDULE_INTERVAL_MS / 1000))
|
||||
self.reactor.advance((TaskScheduler.SCHEDULE_INTERVAL.as_secs()))
|
||||
|
||||
task = self.get_success(self.task_scheduler.get_task(task_id))
|
||||
assert task is not None
|
||||
|
||||
Reference in New Issue
Block a user