Move towards a dedicated Duration class (#19223)

We have various constants to try and avoid mistyping of durations, e.g.
`ONE_HOUR_SECONDS * MILLISECONDS_PER_SECOND`, however this can get a
little verbose and doesn't help with typing.

Instead, let's move towards a dedicated `Duration` class (basically a
[`timedelta`](https://docs.python.org/3/library/datetime.html#timedelta-objects)
with helper methods).

This PR introduces the new types and converts all usages of the existing
constants with it. Future PRs may work to move the clock methods to also
use it (e.g. `call_later` and `looping_call`).

Reviewable commit-by-commit.
This commit is contained in:
Erik Johnston
2025-11-26 10:56:59 +00:00
committed by GitHub
parent 2741ead569
commit b74c29f694
15 changed files with 95 additions and 93 deletions

View File

@@ -17,7 +17,7 @@ from unittest.mock import AsyncMock
from twisted.internet.testing import MemoryReactor
from synapse.app.phone_stats_home import (
PHONE_HOME_INTERVAL_SECONDS,
PHONE_HOME_INTERVAL,
start_phone_stats_home,
)
from synapse.rest import admin, login, register, room
@@ -78,7 +78,7 @@ class PhoneHomeStatsTestCase(unittest.HomeserverTestCase):
def _get_latest_phone_home_stats(self) -> JsonDict:
# Wait for `phone_stats_home` to be called again + a healthy margin (50s).
self.reactor.advance(2 * PHONE_HOME_INTERVAL_SECONDS + 50)
self.reactor.advance(2 * PHONE_HOME_INTERVAL.as_secs() + 50)
# Extract the reported stats from our http client mock
mock_calls = self.put_json_mock.call_args_list

View File

@@ -24,7 +24,7 @@ from synapse.api.errors import Codes
from synapse.handlers.sliding_sync import room_lists
from synapse.rest.client import login, room, sync
from synapse.server import HomeServer
from synapse.storage.databases.main.sliding_sync import CONNECTION_EXPIRY_MS
from synapse.storage.databases.main.sliding_sync import CONNECTION_EXPIRY
from synapse.util.clock import Clock
from tests.rest.client.sliding_sync.test_sliding_sync import SlidingSyncBase
@@ -407,7 +407,7 @@ class SlidingSyncConnectionTrackingTestCase(SlidingSyncBase):
we expire the connection and ask the client to do a full resync.
Connections are only expired if they have not been used for a minimum
amount of time (MINIMUM_NOT_USED_AGE_EXPIRY_MS) to avoid expiring
amount of time (MINIMUM_NOT_USED_AGE_EXPIRY) to avoid expiring
connections that are actively being used.
"""
@@ -455,7 +455,7 @@ class SlidingSyncConnectionTrackingTestCase(SlidingSyncBase):
self.helper.send(room_id, "msg", tok=user2_tok)
# Advance the clock to ensure that the last_used_ts is old enough
self.reactor.advance(2 * room_lists.MINIMUM_NOT_USED_AGE_EXPIRY_MS / 1000)
self.reactor.advance(2 * room_lists.MINIMUM_NOT_USED_AGE_EXPIRY.as_secs())
# This sync should now raise SlidingSyncUnknownPosition
channel = self.make_sync_request(sync_body, since=from_token, tok=user1_tok)
@@ -490,14 +490,14 @@ class SlidingSyncConnectionTrackingTestCase(SlidingSyncBase):
_, from_token = self.do_sync(sync_body, tok=user1_tok)
# We can keep syncing so long as the interval between requests is less
# than CONNECTION_EXPIRY_MS
# than CONNECTION_EXPIRY
for _ in range(5):
self.reactor.advance(0.5 * CONNECTION_EXPIRY_MS / 1000)
self.reactor.advance(0.5 * CONNECTION_EXPIRY.as_secs())
_, from_token = self.do_sync(sync_body, tok=user1_tok)
# ... but if we wait too long, the connection expires
self.reactor.advance(1 + CONNECTION_EXPIRY_MS / 1000)
self.reactor.advance(1 + CONNECTION_EXPIRY.as_secs())
# This sync should now raise SlidingSyncUnknownPosition
channel = self.make_sync_request(sync_body, since=from_token, tok=user1_tok)

View File

@@ -26,12 +26,9 @@ from unittest.mock import AsyncMock, Mock, call
from twisted.internet import defer, reactor as _reactor
from synapse.logging.context import SENTINEL_CONTEXT, LoggingContext, current_context
from synapse.rest.client.transactions import CLEANUP_PERIOD_MS, HttpTransactionCache
from synapse.rest.client.transactions import CLEANUP_PERIOD, HttpTransactionCache
from synapse.types import ISynapseReactor, JsonDict
from synapse.util.clock import Clock
from synapse.util.constants import (
MILLISECONDS_PER_SECOND,
)
from tests import unittest
from tests.server import get_clock
@@ -187,7 +184,7 @@ class HttpTransactionCacheTestCase(unittest.TestCase):
)
# Advance time just under the cleanup period.
# Should NOT have cleaned up yet
self.reactor.advance((CLEANUP_PERIOD_MS - 1) / MILLISECONDS_PER_SECOND)
self.reactor.advance(CLEANUP_PERIOD.as_secs() - 1)
yield self.cache.fetch_or_execute_request(
self.mock_request, self.mock_requester, cb, "an arg"
@@ -196,7 +193,7 @@ class HttpTransactionCacheTestCase(unittest.TestCase):
cb.assert_called_once_with("an arg")
# Advance time just after the cleanup period.
self.reactor.advance(2 / MILLISECONDS_PER_SECOND)
self.reactor.advance(2)
yield self.cache.fetch_or_execute_request(
self.mock_request, self.mock_requester, cb, "an arg"

View File

@@ -28,7 +28,7 @@ from synapse.rest import admin
from synapse.rest.client import devices
from synapse.server import HomeServer
from synapse.storage.databases.main.deviceinbox import (
DEVICE_FEDERATION_INBOX_CLEANUP_DELAY_MS,
DEVICE_FEDERATION_INBOX_CLEANUP_DELAY,
)
from synapse.util.clock import Clock
@@ -191,7 +191,7 @@ class DeviceInboxFederationInboxCleanupTestCase(HomeserverTestCase):
self.db_pool = self.store.db_pool
# Advance time to ensure we are past the cleanup delay
self.reactor.advance(DEVICE_FEDERATION_INBOX_CLEANUP_DELAY_MS * 2 / 1000)
self.reactor.advance(DEVICE_FEDERATION_INBOX_CLEANUP_DELAY.as_secs() * 2)
def test_delete_old_federation_inbox_rows_skips_if_no_index(self) -> None:
"""Test that we don't delete rows if the index hasn't been created yet."""
@@ -245,7 +245,7 @@ class DeviceInboxFederationInboxCleanupTestCase(HomeserverTestCase):
)
)
self.reactor.advance(2 * DEVICE_FEDERATION_INBOX_CLEANUP_DELAY_MS / 1000)
self.reactor.advance(2 * DEVICE_FEDERATION_INBOX_CLEANUP_DELAY.as_secs())
# Insert new messages
for i in range(5):
@@ -293,7 +293,7 @@ class DeviceInboxFederationInboxCleanupTestCase(HomeserverTestCase):
)
# Advance time to ensure we are past the cleanup delay
self.reactor.advance(2 * DEVICE_FEDERATION_INBOX_CLEANUP_DELAY_MS / 1000)
self.reactor.advance(2 * DEVICE_FEDERATION_INBOX_CLEANUP_DELAY.as_millis())
# Run the cleanup - it should delete in batches and sleep between them
deferred = defer.ensureDeferred(