Fix sliding sync performance slow down for long lived connections. (#19206)
Fixes https://github.com/element-hq/synapse/issues/19175 This PR moves tracking of what lazy loaded membership we've sent to each room out of the required state table. This avoids that table from continuously growing, which massively helps performance as we pull out all matching rows for the connection when we receive a request. The new table is only read when we have data in a room to send, so we end up reading a lot fewer rows from the DB. Though we now read from that table for every room we have events to return in, rather than once at the start of the request. For an explanation of how the new table works, see the [comment](https://github.com/element-hq/synapse/blob/erikj/sss_better_membership_storage2/synapse/storage/schema/main/delta/93/02_sliding_sync_members.sql#L15-L38) on the table schema. The table is designed so that we can later prune old entries if we wish, but that is not implemented in this PR. Reviewable commit-by-commit. --------- Co-authored-by: Eric Eastwood <erice@element.io>
This commit is contained in:
1
changelog.d/19206.bugfix
Normal file
1
changelog.d/19206.bugfix
Normal file
@@ -0,0 +1 @@
|
||||
Fix sliding sync performance slow down for long lived connections.
|
||||
@@ -14,7 +14,6 @@ import sqlglot.expressions
|
||||
|
||||
SCHEMA_FILE_REGEX = re.compile(r"^synapse/storage/schema/(.*)/delta/(.*)/(.*)$")
|
||||
|
||||
|
||||
# The base branch we want to check against. We use the main development branch
|
||||
# on the assumption that is what we are developing against.
|
||||
DEVELOP_BRANCH = "develop"
|
||||
|
||||
@@ -17,6 +17,7 @@ import logging
|
||||
from itertools import chain
|
||||
from typing import TYPE_CHECKING, AbstractSet, Mapping
|
||||
|
||||
import attr
|
||||
from prometheus_client import Histogram
|
||||
from typing_extensions import assert_never
|
||||
|
||||
@@ -62,6 +63,7 @@ from synapse.types.handlers.sliding_sync import (
|
||||
HaveSentRoomFlag,
|
||||
MutablePerConnectionState,
|
||||
PerConnectionState,
|
||||
RoomLazyMembershipChanges,
|
||||
RoomSyncConfig,
|
||||
SlidingSyncConfig,
|
||||
SlidingSyncResult,
|
||||
@@ -106,7 +108,7 @@ class SlidingSyncHandler:
|
||||
self.rooms_to_exclude_globally = hs.config.server.rooms_to_exclude_from_sync
|
||||
self.is_mine_id = hs.is_mine_id
|
||||
|
||||
self.connection_store = SlidingSyncConnectionStore(self.store)
|
||||
self.connection_store = SlidingSyncConnectionStore(self.clock, self.store)
|
||||
self.extensions = SlidingSyncExtensionHandler(hs)
|
||||
self.room_lists = SlidingSyncRoomLists(hs)
|
||||
|
||||
@@ -981,14 +983,15 @@ class SlidingSyncHandler:
|
||||
#
|
||||
# Calculate the `StateFilter` based on the `required_state` for the room
|
||||
required_state_filter = StateFilter.none()
|
||||
# The requested `required_state_map` with the lazy membership expanded and
|
||||
# `$ME` replaced with the user's ID. This allows us to see what membership we've
|
||||
# sent down to the client in the next request.
|
||||
#
|
||||
# Make a copy so we can modify it. Still need to be careful to make a copy of
|
||||
# the state key sets if we want to add/remove from them. We could make a deep
|
||||
# copy but this saves us some work.
|
||||
expanded_required_state_map = dict(room_sync_config.required_state_map)
|
||||
|
||||
# Keep track of which users' state we may need to fetch. We split this
|
||||
# into explicit users and lazy loaded users.
|
||||
explicit_user_state = set()
|
||||
lazy_load_user_ids = set()
|
||||
|
||||
# Whether lazy-loading of room members is enabled.
|
||||
lazy_load_room_members = False
|
||||
|
||||
if room_membership_for_user_at_to_token.membership not in (
|
||||
Membership.INVITE,
|
||||
Membership.KNOCK,
|
||||
@@ -1036,7 +1039,6 @@ class SlidingSyncHandler:
|
||||
else:
|
||||
required_state_types: list[tuple[str, str | None]] = []
|
||||
num_wild_state_keys = 0
|
||||
lazy_load_room_members = False
|
||||
num_others = 0
|
||||
for (
|
||||
state_type,
|
||||
@@ -1068,43 +1070,60 @@ class SlidingSyncHandler:
|
||||
timeline_event.state_key
|
||||
)
|
||||
|
||||
# The client needs to know the membership of everyone in
|
||||
# the timeline we're returning.
|
||||
lazy_load_user_ids.update(timeline_membership)
|
||||
|
||||
# Update the required state filter so we pick up the new
|
||||
# membership
|
||||
if limited or initial:
|
||||
# If the timeline is limited, we only need to
|
||||
# return the membership changes for people in
|
||||
# the timeline.
|
||||
for user_id in timeline_membership:
|
||||
required_state_types.append(
|
||||
(EventTypes.Member, user_id)
|
||||
)
|
||||
else:
|
||||
# For non-limited timelines we always return all
|
||||
# membership changes. This is so that clients
|
||||
# who have fetched the full membership list
|
||||
# already can continue to maintain it for
|
||||
# non-limited syncs.
|
||||
#
|
||||
# This assumes that for non-limited syncs there
|
||||
# won't be many membership changes that wouldn't
|
||||
# have been included already (this can only
|
||||
# happen if membership state was rolled back due
|
||||
# to state resolution anyway).
|
||||
#
|
||||
# `None` is a wildcard in the `StateFilter`
|
||||
required_state_types.append((EventTypes.Member, None))
|
||||
|
||||
# Add an explicit entry for each user in the timeline
|
||||
#
|
||||
# Make a new set or copy of the state key set so we can
|
||||
# modify it without affecting the original
|
||||
# `required_state_map`
|
||||
expanded_required_state_map[EventTypes.Member] = (
|
||||
expanded_required_state_map.get(
|
||||
EventTypes.Member, set()
|
||||
)
|
||||
| timeline_membership
|
||||
)
|
||||
elif state_key == StateValues.ME:
|
||||
num_others += 1
|
||||
required_state_types.append((state_type, user.to_string()))
|
||||
# Replace `$ME` with the user's ID so we can deduplicate
|
||||
# when someone requests the same state with `$ME` or with
|
||||
# their user ID.
|
||||
#
|
||||
# Make a new set or copy of the state key set so we can
|
||||
# modify it without affecting the original
|
||||
# `required_state_map`
|
||||
expanded_required_state_map[EventTypes.Member] = (
|
||||
expanded_required_state_map.get(
|
||||
EventTypes.Member, set()
|
||||
)
|
||||
| {user.to_string()}
|
||||
# Record the extra members we're returning.
|
||||
lazy_load_user_ids.update(
|
||||
state_key
|
||||
for event_type, state_key in room_state_delta_id_map
|
||||
if event_type == EventTypes.Member
|
||||
)
|
||||
else:
|
||||
num_others += 1
|
||||
required_state_types.append((state_type, state_key))
|
||||
|
||||
# Replace `$ME` with the user's ID so we can deduplicate
|
||||
# when someone requests the same state with `$ME` or with
|
||||
# their user ID.
|
||||
normalized_state_key = state_key
|
||||
if state_key == StateValues.ME:
|
||||
normalized_state_key = user.to_string()
|
||||
|
||||
if state_type == EventTypes.Member:
|
||||
# Also track explicitly requested member state for
|
||||
# lazy membership tracking.
|
||||
explicit_user_state.add(normalized_state_key)
|
||||
|
||||
required_state_types.append(
|
||||
(state_type, normalized_state_key)
|
||||
)
|
||||
|
||||
set_tag(
|
||||
SynapseTags.FUNC_ARG_PREFIX
|
||||
@@ -1122,6 +1141,10 @@ class SlidingSyncHandler:
|
||||
|
||||
required_state_filter = StateFilter.from_types(required_state_types)
|
||||
|
||||
# Remove any explicitly requested user state from the lazy-loaded set,
|
||||
# as we track them separately.
|
||||
lazy_load_user_ids -= explicit_user_state
|
||||
|
||||
# We need this base set of info for the response so let's just fetch it along
|
||||
# with the `required_state` for the room
|
||||
hero_room_state = [
|
||||
@@ -1149,6 +1172,22 @@ class SlidingSyncHandler:
|
||||
# We can return all of the state that was requested if this was the first
|
||||
# time we've sent the room down this connection.
|
||||
room_state: StateMap[EventBase] = {}
|
||||
|
||||
# Includes the state for the heroes if we need them (may contain other
|
||||
# state as well).
|
||||
hero_membership_state: StateMap[EventBase] = {}
|
||||
|
||||
# By default, we mark all `lazy_load_user_ids` as being sent down
|
||||
# for the first time in this sync. We later check if we sent any of them
|
||||
# down previously and update `returned_user_id_to_last_seen_ts_map` if
|
||||
# we have.
|
||||
returned_user_id_to_last_seen_ts_map = {}
|
||||
if lazy_load_room_members:
|
||||
returned_user_id_to_last_seen_ts_map = dict.fromkeys(lazy_load_user_ids)
|
||||
new_connection_state.room_lazy_membership[room_id] = RoomLazyMembershipChanges(
|
||||
returned_user_id_to_last_seen_ts_map=returned_user_id_to_last_seen_ts_map
|
||||
)
|
||||
|
||||
if initial:
|
||||
room_state = await self.get_current_state_at(
|
||||
room_id=room_id,
|
||||
@@ -1156,28 +1195,97 @@ class SlidingSyncHandler:
|
||||
state_filter=state_filter,
|
||||
to_token=to_token,
|
||||
)
|
||||
|
||||
# The `room_state` includes the hero membership state if needed.
|
||||
# We'll later filter this down so we don't need to do so here.
|
||||
hero_membership_state = room_state
|
||||
else:
|
||||
assert from_token is not None
|
||||
assert from_bound is not None
|
||||
|
||||
if prev_room_sync_config is not None:
|
||||
# Check if there are any changes to the required state config
|
||||
# that we need to handle.
|
||||
changed_required_state_map, added_state_filter = (
|
||||
_required_state_changes(
|
||||
user.to_string(),
|
||||
prev_required_state_map=prev_room_sync_config.required_state_map,
|
||||
request_required_state_map=expanded_required_state_map,
|
||||
state_deltas=room_state_delta_id_map,
|
||||
# Define `all_required_user_state` as all user state we want, which
|
||||
# is the explicitly requested members, any needed for lazy
|
||||
# loading, and users whose membership has changed.
|
||||
all_required_user_state = explicit_user_state | lazy_load_user_ids
|
||||
for state_type, state_key in room_state_delta_id_map:
|
||||
if state_type == EventTypes.Member:
|
||||
all_required_user_state.add(state_key)
|
||||
|
||||
# We need to know what user state we previously sent down the
|
||||
# connection so we can determine what has changed.
|
||||
#
|
||||
# We need to fetch all users whose memberships we may want
|
||||
# to send down this sync. This includes (and matches
|
||||
# `all_required_user_state`):
|
||||
# 1. Explicitly requested user state
|
||||
# 2. Lazy loaded members, i.e. users who appear in the
|
||||
# timeline.
|
||||
# 3. The users whose membership has changed in the room, i.e.
|
||||
# in the state deltas.
|
||||
#
|
||||
# This is to correctly handle the cases where a user was
|
||||
# previously sent down as a lazy loaded member:
|
||||
# - and is now explicitly requested (so shouldn't be sent down
|
||||
# again); or
|
||||
# - their membership has changed (so we need to invalidate
|
||||
# their entry in the lazy loaded table if we don't send the
|
||||
# change down).
|
||||
if all_required_user_state:
|
||||
previously_returned_user_to_last_seen = (
|
||||
await self.store.get_sliding_sync_connection_lazy_members(
|
||||
connection_position=from_token.connection_position,
|
||||
room_id=room_id,
|
||||
user_ids=all_required_user_state,
|
||||
)
|
||||
)
|
||||
|
||||
if added_state_filter:
|
||||
# Update the room lazy membership changes to track which
|
||||
# lazy loaded members were needed for this sync. This is so
|
||||
# that we can correctly track the last time we sent down
|
||||
# users' membership (and so can evict old membership state
|
||||
# from the DB tables).
|
||||
returned_user_id_to_last_seen_ts_map.update(
|
||||
(user_id, timestamp)
|
||||
for user_id, timestamp in previously_returned_user_to_last_seen.items()
|
||||
if user_id in lazy_load_user_ids
|
||||
)
|
||||
else:
|
||||
previously_returned_user_to_last_seen = {}
|
||||
|
||||
# Check if there are any changes to the required state config
|
||||
# that we need to handle.
|
||||
changes_return = _required_state_changes(
|
||||
user.to_string(),
|
||||
prev_required_state_map=prev_room_sync_config.required_state_map,
|
||||
request_required_state_map=room_sync_config.required_state_map,
|
||||
previously_returned_lazy_user_ids=previously_returned_user_to_last_seen.keys(),
|
||||
request_lazy_load_user_ids=lazy_load_user_ids,
|
||||
state_deltas=room_state_delta_id_map,
|
||||
)
|
||||
changed_required_state_map = changes_return.changed_required_state_map
|
||||
|
||||
new_connection_state.room_lazy_membership[
|
||||
room_id
|
||||
].invalidated_user_ids = changes_return.lazy_members_invalidated
|
||||
|
||||
# Add any previously returned explicit memberships to the lazy
|
||||
# loaded table. This happens when a client requested explicit
|
||||
# members and then converted them to lazy loading.
|
||||
for user_id in changes_return.extra_users_to_add_to_lazy_cache:
|
||||
# We don't know the right timestamp to use here, as we don't
|
||||
# know the last time we would have sent the membership down.
|
||||
# So we don't overwrite it if we have a timestamp already,
|
||||
# and fallback to `None` (which means now) if we don't.
|
||||
returned_user_id_to_last_seen_ts_map.setdefault(user_id, None)
|
||||
|
||||
if changes_return.added_state_filter:
|
||||
# Some state entries got added, so we pull out the current
|
||||
# state for them. If we don't do this we'd only send down new deltas.
|
||||
state_ids = await self.get_current_state_ids_at(
|
||||
room_id=room_id,
|
||||
room_membership_for_user_at_to_token=room_membership_for_user_at_to_token,
|
||||
state_filter=added_state_filter,
|
||||
state_filter=changes_return.added_state_filter,
|
||||
to_token=to_token,
|
||||
)
|
||||
room_state_delta_id_map.update(state_ids)
|
||||
@@ -1189,6 +1297,7 @@ class SlidingSyncHandler:
|
||||
|
||||
# If the membership changed and we have to get heroes, get the remaining
|
||||
# heroes from the state
|
||||
hero_membership_state = {}
|
||||
if hero_user_ids:
|
||||
hero_membership_state = await self.get_current_state_at(
|
||||
room_id=room_id,
|
||||
@@ -1196,7 +1305,6 @@ class SlidingSyncHandler:
|
||||
state_filter=StateFilter.from_types(hero_room_state),
|
||||
to_token=to_token,
|
||||
)
|
||||
room_state.update(hero_membership_state)
|
||||
|
||||
required_room_state: StateMap[EventBase] = {}
|
||||
if required_state_filter != StateFilter.none():
|
||||
@@ -1219,7 +1327,7 @@ class SlidingSyncHandler:
|
||||
# Assemble heroes: extract the info from the state we just fetched
|
||||
heroes: list[SlidingSyncResult.RoomResult.StrippedHero] = []
|
||||
for hero_user_id in hero_user_ids:
|
||||
member_event = room_state.get((EventTypes.Member, hero_user_id))
|
||||
member_event = hero_membership_state.get((EventTypes.Member, hero_user_id))
|
||||
if member_event is not None:
|
||||
heroes.append(
|
||||
SlidingSyncResult.RoomResult.StrippedHero(
|
||||
@@ -1281,7 +1389,7 @@ class SlidingSyncHandler:
|
||||
bump_stamp = 0
|
||||
|
||||
room_sync_required_state_map_to_persist: Mapping[str, AbstractSet[str]] = (
|
||||
expanded_required_state_map
|
||||
room_sync_config.required_state_map
|
||||
)
|
||||
if changed_required_state_map:
|
||||
room_sync_required_state_map_to_persist = changed_required_state_map
|
||||
@@ -1471,13 +1579,37 @@ class SlidingSyncHandler:
|
||||
return None
|
||||
|
||||
|
||||
@attr.s(auto_attribs=True)
|
||||
class _RequiredStateChangesReturn:
|
||||
"""Return type for _required_state_changes."""
|
||||
|
||||
changed_required_state_map: Mapping[str, AbstractSet[str]] | None
|
||||
"""The updated required state map to store in the room config, or None if
|
||||
there is no change."""
|
||||
|
||||
added_state_filter: StateFilter
|
||||
"""The state filter to use to fetch any additional current state that needs
|
||||
to be returned to the client."""
|
||||
|
||||
extra_users_to_add_to_lazy_cache: AbstractSet[str] = frozenset()
|
||||
"""The set of user IDs we should add to the lazy members cache that we had
|
||||
previously returned. Handles the case where a user was previously sent down
|
||||
explicitly but is now being lazy loaded."""
|
||||
|
||||
lazy_members_invalidated: AbstractSet[str] = frozenset()
|
||||
"""The set of user IDs whose membership has changed but we didn't send down,
|
||||
so we need to invalidate them from the cache."""
|
||||
|
||||
|
||||
def _required_state_changes(
|
||||
user_id: str,
|
||||
*,
|
||||
prev_required_state_map: Mapping[str, AbstractSet[str]],
|
||||
request_required_state_map: Mapping[str, AbstractSet[str]],
|
||||
previously_returned_lazy_user_ids: AbstractSet[str],
|
||||
request_lazy_load_user_ids: AbstractSet[str],
|
||||
state_deltas: StateMap[str],
|
||||
) -> tuple[Mapping[str, AbstractSet[str]] | None, StateFilter]:
|
||||
) -> _RequiredStateChangesReturn:
|
||||
"""Calculates the changes between the required state room config from the
|
||||
previous requests compared with the current request.
|
||||
|
||||
@@ -1491,14 +1623,62 @@ def _required_state_changes(
|
||||
added, removed and then added again to the required state. In that case we
|
||||
only want to re-send that entry down sync if it has changed.
|
||||
|
||||
Returns:
|
||||
A 2-tuple of updated required state config (or None if there is no update)
|
||||
and the state filter to use to fetch extra current state that we need to
|
||||
return.
|
||||
Args:
|
||||
user_id: The user ID of the user making the request.
|
||||
prev_required_state_map: The required state map from the previous
|
||||
request.
|
||||
request_required_state_map: The required state map from the current
|
||||
request.
|
||||
previously_returned_lazy_user_ids: The set of user IDs whose membership
|
||||
we have previously returned to the client due to lazy loading. This
|
||||
is filtered to only include users who have either sent events in the
|
||||
`timeline`, `required_state` or whose membership changed.
|
||||
request_lazy_load_user_ids: The set of user IDs whose lazy-loaded
|
||||
membership is required for this request.
|
||||
state_deltas: The state deltas in the room in the request token range,
|
||||
considering user membership. See `get_current_state_deltas_for_room`
|
||||
for more details.
|
||||
"""
|
||||
|
||||
# First we find any lazy members that have been invalidated due to state
|
||||
# changes that we are not sending down.
|
||||
lazy_members_invalidated = set()
|
||||
for event_type, state_key in state_deltas:
|
||||
if event_type != EventTypes.Member:
|
||||
continue
|
||||
|
||||
if state_key in request_lazy_load_user_ids:
|
||||
# Because it's part of the `request_lazy_load_user_ids`, we're going to
|
||||
# send this member change down.
|
||||
continue
|
||||
|
||||
if state_key not in previously_returned_lazy_user_ids:
|
||||
# We've not previously returned this member so nothing to
|
||||
# invalidate.
|
||||
continue
|
||||
|
||||
lazy_members_invalidated.add(state_key)
|
||||
|
||||
if prev_required_state_map == request_required_state_map:
|
||||
# There has been no change. Return immediately.
|
||||
return None, StateFilter.none()
|
||||
# There has been no change in state, just need to check lazy members.
|
||||
newly_returned_lazy_members = (
|
||||
request_lazy_load_user_ids - previously_returned_lazy_user_ids
|
||||
)
|
||||
if newly_returned_lazy_members:
|
||||
# There are some new lazy members we need to fetch.
|
||||
added_types: list[tuple[str, str | None]] = []
|
||||
for new_user_id in newly_returned_lazy_members:
|
||||
added_types.append((EventTypes.Member, new_user_id))
|
||||
|
||||
added_state_filter = StateFilter.from_types(added_types)
|
||||
else:
|
||||
added_state_filter = StateFilter.none()
|
||||
|
||||
return _RequiredStateChangesReturn(
|
||||
changed_required_state_map=None,
|
||||
added_state_filter=added_state_filter,
|
||||
lazy_members_invalidated=lazy_members_invalidated,
|
||||
)
|
||||
|
||||
prev_wildcard = prev_required_state_map.get(StateValues.WILDCARD, set())
|
||||
request_wildcard = request_required_state_map.get(StateValues.WILDCARD, set())
|
||||
@@ -1508,17 +1688,29 @@ def _required_state_changes(
|
||||
# already fetching everything, we don't have to fetch anything now that they've
|
||||
# narrowed.
|
||||
if StateValues.WILDCARD in prev_wildcard:
|
||||
return request_required_state_map, StateFilter.none()
|
||||
return _RequiredStateChangesReturn(
|
||||
changed_required_state_map=request_required_state_map,
|
||||
added_state_filter=StateFilter.none(),
|
||||
lazy_members_invalidated=lazy_members_invalidated,
|
||||
)
|
||||
|
||||
# If a event type wildcard has been added or removed we don't try and do
|
||||
# anything fancy, and instead always update the effective room required
|
||||
# state config to match the request.
|
||||
if request_wildcard - prev_wildcard:
|
||||
# Some keys were added, so we need to fetch everything
|
||||
return request_required_state_map, StateFilter.all()
|
||||
return _RequiredStateChangesReturn(
|
||||
changed_required_state_map=request_required_state_map,
|
||||
added_state_filter=StateFilter.all(),
|
||||
lazy_members_invalidated=lazy_members_invalidated,
|
||||
)
|
||||
if prev_wildcard - request_wildcard:
|
||||
# Keys were only removed, so we don't have to fetch everything.
|
||||
return request_required_state_map, StateFilter.none()
|
||||
return _RequiredStateChangesReturn(
|
||||
changed_required_state_map=request_required_state_map,
|
||||
added_state_filter=StateFilter.none(),
|
||||
lazy_members_invalidated=lazy_members_invalidated,
|
||||
)
|
||||
|
||||
# Contains updates to the required state map compared with the previous room
|
||||
# config. This has the same format as `RoomSyncConfig.required_state`
|
||||
@@ -1550,6 +1742,17 @@ def _required_state_changes(
|
||||
# Nothing *added*, so we skip. Removals happen below.
|
||||
continue
|
||||
|
||||
# Handle the special case of adding `$LAZY` membership, where we want to
|
||||
# always record the change to be lazy loading, as we immediately start
|
||||
# using the lazy loading tables so there is no point *not* recording the
|
||||
# change to lazy load in the effective room config.
|
||||
if event_type == EventTypes.Member:
|
||||
old_state_key_lazy = StateValues.LAZY in old_state_keys
|
||||
request_state_key_lazy = StateValues.LAZY in request_state_keys
|
||||
if not old_state_key_lazy and request_state_key_lazy:
|
||||
changes[event_type] = request_state_keys
|
||||
continue
|
||||
|
||||
# We only remove state keys from the effective state if they've been
|
||||
# removed from the request *and* the state has changed. This ensures
|
||||
# that if a client removes and then re-adds a state key, we only send
|
||||
@@ -1620,9 +1823,31 @@ def _required_state_changes(
|
||||
# LAZY values should also be ignore for event types that are
|
||||
# not membership.
|
||||
pass
|
||||
elif event_type == EventTypes.Member:
|
||||
if state_key not in previously_returned_lazy_user_ids:
|
||||
# Only add *explicit* members we haven't previously sent
|
||||
# down.
|
||||
added.append((event_type, state_key))
|
||||
else:
|
||||
added.append((event_type, state_key))
|
||||
|
||||
previously_required_state_members = set(
|
||||
prev_required_state_map.get(EventTypes.Member, ())
|
||||
)
|
||||
if StateValues.ME in previously_required_state_members:
|
||||
previously_required_state_members.add(user_id)
|
||||
|
||||
# We also need to pull out any lazy members that are now required but
|
||||
# haven't previously been returned.
|
||||
for required_user_id in (
|
||||
request_lazy_load_user_ids
|
||||
# Remove previously returned users
|
||||
- previously_returned_lazy_user_ids
|
||||
# Exclude previously explicitly requested members.
|
||||
- previously_required_state_members
|
||||
):
|
||||
added.append((EventTypes.Member, required_user_id))
|
||||
|
||||
added_state_filter = StateFilter.from_types(added)
|
||||
|
||||
# Figure out what changes we need to apply to the effective required state
|
||||
@@ -1663,13 +1888,25 @@ def _required_state_changes(
|
||||
changes[event_type] = request_state_keys
|
||||
continue
|
||||
|
||||
# When handling $LAZY membership, we want to either a) not update the
|
||||
# state or b) update it to match the request. This is to avoid churn of
|
||||
# the effective required state for rooms (we deduplicate required state
|
||||
# between rooms), and because we can store the previously returned
|
||||
# explicit memberships with the lazy loaded memberships.
|
||||
if event_type == EventTypes.Member:
|
||||
old_state_key_lazy = StateValues.LAZY in old_state_keys
|
||||
request_state_key_lazy = StateValues.LAZY in request_state_keys
|
||||
has_lazy = old_state_key_lazy or request_state_key_lazy
|
||||
|
||||
# If a "$LAZY" has been added or removed we always update to match
|
||||
# the request.
|
||||
if old_state_key_lazy != request_state_key_lazy:
|
||||
# If a "$LAZY" has been added or removed we always update the effective room
|
||||
# required state config to match the request.
|
||||
changes[event_type] = request_state_keys
|
||||
continue
|
||||
|
||||
# Or if we have lazy membership and there are invalidated
|
||||
# explicit memberships.
|
||||
if has_lazy and invalidated_state_keys:
|
||||
changes[event_type] = request_state_keys
|
||||
continue
|
||||
|
||||
@@ -1684,6 +1921,28 @@ def _required_state_changes(
|
||||
if invalidated_state_keys:
|
||||
changes[event_type] = old_state_keys - invalidated_state_keys
|
||||
|
||||
# Check for any explicit membership changes that were removed that we can
|
||||
# add to the lazy members previously returned. This is so that we don't
|
||||
# return a user due to lazy loading if they were previously returned as an
|
||||
# explicit membership.
|
||||
users_to_add_to_lazy_cache: set[str] = set()
|
||||
|
||||
membership_changes = changes.get(EventTypes.Member, set())
|
||||
if membership_changes and StateValues.LAZY in request_state_keys:
|
||||
for state_key in prev_required_state_map.get(EventTypes.Member, set()):
|
||||
if state_key == StateValues.WILDCARD or state_key == StateValues.LAZY:
|
||||
# Ignore non-user IDs.
|
||||
continue
|
||||
|
||||
if state_key == StateValues.ME:
|
||||
# Normalize to proper user ID
|
||||
state_key = user_id
|
||||
|
||||
# We remember the user if they haven't been invalidated
|
||||
if (EventTypes.Member, state_key) not in state_deltas:
|
||||
users_to_add_to_lazy_cache.add(state_key)
|
||||
|
||||
new_required_state_map = None
|
||||
if changes:
|
||||
# Update the required state config based on the changes.
|
||||
new_required_state_map = dict(prev_required_state_map)
|
||||
@@ -1694,6 +1953,9 @@ def _required_state_changes(
|
||||
# Remove entries with empty state keys.
|
||||
new_required_state_map.pop(event_type, None)
|
||||
|
||||
return new_required_state_map, added_state_filter
|
||||
else:
|
||||
return None, added_state_filter
|
||||
return _RequiredStateChangesReturn(
|
||||
changed_required_state_map=new_required_state_map,
|
||||
added_state_filter=added_state_filter,
|
||||
lazy_members_invalidated=lazy_members_invalidated,
|
||||
extra_users_to_add_to_lazy_cache=users_to_add_to_lazy_cache,
|
||||
)
|
||||
|
||||
@@ -13,7 +13,6 @@
|
||||
#
|
||||
|
||||
import logging
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
import attr
|
||||
|
||||
@@ -25,9 +24,7 @@ from synapse.types.handlers.sliding_sync import (
|
||||
PerConnectionState,
|
||||
SlidingSyncConfig,
|
||||
)
|
||||
|
||||
if TYPE_CHECKING:
|
||||
pass
|
||||
from synapse.util.clock import Clock
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -61,7 +58,8 @@ class SlidingSyncConnectionStore:
|
||||
to mapping of room ID to `HaveSentRoom`.
|
||||
"""
|
||||
|
||||
store: "DataStore"
|
||||
clock: Clock
|
||||
store: DataStore
|
||||
|
||||
async def get_and_clear_connection_positions(
|
||||
self,
|
||||
@@ -101,7 +99,7 @@ class SlidingSyncConnectionStore:
|
||||
If there are no changes to the state this may return the same token as
|
||||
the existing per-connection state.
|
||||
"""
|
||||
if not new_connection_state.has_updates():
|
||||
if not new_connection_state.has_updates(self.clock):
|
||||
if from_token is not None:
|
||||
return from_token.connection_position
|
||||
else:
|
||||
|
||||
@@ -14,7 +14,7 @@
|
||||
|
||||
|
||||
import logging
|
||||
from typing import TYPE_CHECKING, Mapping, cast
|
||||
from typing import TYPE_CHECKING, AbstractSet, Mapping, cast
|
||||
|
||||
import attr
|
||||
|
||||
@@ -26,13 +26,16 @@ from synapse.storage.database import (
|
||||
DatabasePool,
|
||||
LoggingDatabaseConnection,
|
||||
LoggingTransaction,
|
||||
make_in_list_sql_clause,
|
||||
)
|
||||
from synapse.storage.engines import PostgresEngine
|
||||
from synapse.types import MultiWriterStreamToken, RoomStreamToken
|
||||
from synapse.types.handlers.sliding_sync import (
|
||||
HaveSentRoom,
|
||||
HaveSentRoomFlag,
|
||||
MutablePerConnectionState,
|
||||
PerConnectionState,
|
||||
RoomLazyMembershipChanges,
|
||||
RoomStatusMap,
|
||||
RoomSyncConfig,
|
||||
)
|
||||
@@ -373,6 +376,13 @@ class SlidingSyncStore(SQLBaseStore):
|
||||
value_values=values,
|
||||
)
|
||||
|
||||
self._persist_sliding_sync_connection_lazy_members_txn(
|
||||
txn,
|
||||
connection_key,
|
||||
connection_position,
|
||||
per_connection_state.room_lazy_membership,
|
||||
)
|
||||
|
||||
return connection_position
|
||||
|
||||
@cached(iterable=True, max_entries=100000)
|
||||
@@ -446,6 +456,23 @@ class SlidingSyncStore(SQLBaseStore):
|
||||
"""
|
||||
txn.execute(sql, (connection_key, connection_position))
|
||||
|
||||
# Move any lazy membership entries for this connection position to have
|
||||
# `NULL` connection position, indicating that it applies to all future
|
||||
# positions on this connection. This is safe because we have deleted all
|
||||
# other (potentially forked) connection positions, and so all future
|
||||
# positions in this connection will be a continuation of the current
|
||||
# position. Thus any lazy membership entries we have sent down will still
|
||||
# be valid.
|
||||
self.db_pool.simple_update_txn(
|
||||
txn,
|
||||
table="sliding_sync_connection_lazy_members",
|
||||
keyvalues={
|
||||
"connection_key": connection_key,
|
||||
"connection_position": connection_position,
|
||||
},
|
||||
updatevalues={"connection_position": None},
|
||||
)
|
||||
|
||||
# Fetch and create a mapping from required state ID to the actual
|
||||
# required state for the connection.
|
||||
rows = self.db_pool.simple_select_list_txn(
|
||||
@@ -525,6 +552,151 @@ class SlidingSyncStore(SQLBaseStore):
|
||||
receipts=RoomStatusMap(receipts),
|
||||
account_data=RoomStatusMap(account_data),
|
||||
room_configs=room_configs,
|
||||
room_lazy_membership={},
|
||||
)
|
||||
|
||||
async def get_sliding_sync_connection_lazy_members(
|
||||
self,
|
||||
connection_position: int,
|
||||
room_id: str,
|
||||
user_ids: AbstractSet[str],
|
||||
) -> Mapping[str, int]:
|
||||
"""Get which user IDs in the room we have previously sent lazy
|
||||
membership for.
|
||||
|
||||
Args:
|
||||
connection_position: The sliding sync connection position.
|
||||
room_id: The room ID to get lazy members for.
|
||||
user_ids: The user IDs to check whether we've previously sent
|
||||
because of lazy membership.
|
||||
|
||||
Returns:
|
||||
The mapping of user IDs to the last seen timestamp for those user
|
||||
IDs. Only includes user IDs that we have previously sent lazy
|
||||
membership for, and so may be a subset of the `user_ids` passed in.
|
||||
"""
|
||||
|
||||
def get_sliding_sync_connection_lazy_members_txn(
|
||||
txn: LoggingTransaction,
|
||||
) -> Mapping[str, int]:
|
||||
user_clause, user_args = make_in_list_sql_clause(
|
||||
txn.database_engine, "user_id", user_ids
|
||||
)
|
||||
|
||||
# Fetch all the lazy membership entries for the given connection,
|
||||
# room and user IDs. We don't have the `connection_key` here, so we
|
||||
# join against `sliding_sync_connection_positions` to get it.
|
||||
#
|
||||
# Beware that there are two `connection_position` columns in the
|
||||
# query which are different, the one in
|
||||
# `sliding_sync_connection_positions` is the one we match to get the
|
||||
# connection_key, whereas the one in
|
||||
# `sliding_sync_connection_lazy_members` is what we filter against
|
||||
# (it may be null or the same as the one passed in).
|
||||
#
|
||||
# FIXME: We should pass in `connection_key` here to avoid the join.
|
||||
# We don't do this currently as the caller doesn't have it handy.
|
||||
sql = f"""
|
||||
SELECT user_id, members.connection_position, last_seen_ts
|
||||
FROM sliding_sync_connection_lazy_members AS members
|
||||
INNER JOIN sliding_sync_connection_positions AS pos USING (connection_key)
|
||||
WHERE pos.connection_position = ? AND room_id = ? AND {user_clause}
|
||||
"""
|
||||
|
||||
txn.execute(sql, (connection_position, room_id, *user_args))
|
||||
|
||||
# Filter out any cache entries that only apply to forked connection
|
||||
# positions. Entries with `NULL` `connection_position` apply to all
|
||||
# positions on the connection.
|
||||
return {
|
||||
user_id: last_seen_ts
|
||||
for user_id, db_connection_position, last_seen_ts in txn
|
||||
if db_connection_position == connection_position
|
||||
or db_connection_position is None
|
||||
}
|
||||
|
||||
return await self.db_pool.runInteraction(
|
||||
"get_sliding_sync_connection_lazy_members",
|
||||
get_sliding_sync_connection_lazy_members_txn,
|
||||
db_autocommit=True, # Avoid transaction for single read
|
||||
)
|
||||
|
||||
def _persist_sliding_sync_connection_lazy_members_txn(
|
||||
self,
|
||||
txn: LoggingTransaction,
|
||||
connection_key: int,
|
||||
new_connection_position: int,
|
||||
all_changes: dict[str, RoomLazyMembershipChanges],
|
||||
) -> None:
|
||||
"""Persist that we have sent lazy membership for the given user IDs."""
|
||||
|
||||
now = self.clock.time_msec()
|
||||
|
||||
# Figure out which cache entries to add or update.
|
||||
#
|
||||
# These are either a) new entries we've never sent before (i.e. with a
|
||||
# None last_seen_ts), or b) where the `last_seen_ts` is old enough that
|
||||
# we want to update it.
|
||||
#
|
||||
# We don't update the timestamp every time to avoid hammering the DB
|
||||
# with writes, and we don't need the timestamp to be precise. It is used
|
||||
# to evict old entries that haven't been used in a while.
|
||||
to_update: list[tuple[str, str]] = []
|
||||
for room_id, room_changes in all_changes.items():
|
||||
user_ids_to_update = room_changes.get_returned_user_ids_to_update(
|
||||
self.clock
|
||||
)
|
||||
to_update.extend((room_id, user_id) for user_id in user_ids_to_update)
|
||||
|
||||
if to_update:
|
||||
# Upsert the new/updated entries.
|
||||
#
|
||||
# Ignore conflicts where the existing entry has a different
|
||||
# connection position (i.e. from a forked connection position). This
|
||||
# may mean that we lose some updates, but that's acceptable as this
|
||||
# is a cache and its fine for it to *not* include rows. (Downstream
|
||||
# this will cause us to maybe send a few extra lazy members down
|
||||
# sync, but we're allowed to send extra members).
|
||||
sql = """
|
||||
INSERT INTO sliding_sync_connection_lazy_members
|
||||
(connection_key, connection_position, room_id, user_id, last_seen_ts)
|
||||
VALUES {value_placeholder}
|
||||
ON CONFLICT (connection_key, room_id, user_id)
|
||||
DO UPDATE SET last_seen_ts = EXCLUDED.last_seen_ts
|
||||
WHERE sliding_sync_connection_lazy_members.connection_position IS NULL
|
||||
OR sliding_sync_connection_lazy_members.connection_position = EXCLUDED.connection_position
|
||||
"""
|
||||
|
||||
args = [
|
||||
(connection_key, new_connection_position, room_id, user_id, now)
|
||||
for room_id, user_id in to_update
|
||||
]
|
||||
|
||||
if isinstance(self.database_engine, PostgresEngine):
|
||||
sql = sql.format(value_placeholder="?")
|
||||
txn.execute_values(sql, args, fetch=False)
|
||||
else:
|
||||
sql = sql.format(value_placeholder="(?, ?, ?, ?, ?)")
|
||||
txn.execute_batch(sql, args)
|
||||
|
||||
# Remove any invalidated entries.
|
||||
to_remove: list[tuple[str, str]] = []
|
||||
for room_id, room_changes in all_changes.items():
|
||||
for user_id in room_changes.invalidated_user_ids:
|
||||
to_remove.append((room_id, user_id))
|
||||
|
||||
if to_remove:
|
||||
# We don't try and match on connection position here: it's fine to
|
||||
# remove it from all forks. This is a cache so it's fine to expire
|
||||
# arbitrary entries, the worst that happens is we send a few extra
|
||||
# lazy members down sync.
|
||||
self.db_pool.simple_delete_many_batch_txn(
|
||||
txn,
|
||||
table="sliding_sync_connection_lazy_members",
|
||||
keys=("connection_key", "room_id", "user_id"),
|
||||
values=[
|
||||
(connection_key, room_id, user_id) for room_id, user_id in to_remove
|
||||
],
|
||||
)
|
||||
|
||||
@wrap_as_background_process("delete_old_sliding_sync_connections")
|
||||
@@ -564,6 +736,10 @@ class PerConnectionStateDB:
|
||||
|
||||
room_configs: Mapping[str, "RoomSyncConfig"]
|
||||
|
||||
room_lazy_membership: dict[str, RoomLazyMembershipChanges]
|
||||
"""Lazy membership changes to persist alongside this state. Only used
|
||||
when persisting."""
|
||||
|
||||
@staticmethod
|
||||
async def from_state(
|
||||
per_connection_state: "MutablePerConnectionState", store: "DataStore"
|
||||
@@ -618,6 +794,7 @@ class PerConnectionStateDB:
|
||||
receipts=RoomStatusMap(receipts),
|
||||
account_data=RoomStatusMap(account_data),
|
||||
room_configs=per_connection_state.room_configs.maps[0],
|
||||
room_lazy_membership=per_connection_state.room_lazy_membership,
|
||||
)
|
||||
|
||||
async def to_state(self, store: "DataStore") -> "PerConnectionState":
|
||||
|
||||
@@ -0,0 +1,60 @@
|
||||
--
|
||||
-- This file is licensed under the Affero General Public License (AGPL) version 3.
|
||||
--
|
||||
-- Copyright (C) 2025 Element Creations Ltd
|
||||
--
|
||||
-- This program is free software: you can redistribute it and/or modify
|
||||
-- it under the terms of the GNU Affero General Public License as
|
||||
-- published by the Free Software Foundation, either version 3 of the
|
||||
-- License, or (at your option) any later version.
|
||||
--
|
||||
-- See the GNU Affero General Public License for more details:
|
||||
-- <https://www.gnu.org/licenses/agpl-3.0.html>.
|
||||
|
||||
|
||||
-- Tracks which member states have been sent to the client for lazy-loaded
|
||||
-- members in sliding sync. This is a *cache* as it doesn't matter if we send
|
||||
-- down members we've previously sent down, i.e. it's safe to delete any rows.
|
||||
--
|
||||
-- We could have tracked these as part of the
|
||||
-- `sliding_sync_connection_required_state` table, but that would bloat that
|
||||
-- table significantly as most rooms will have lazy-loaded members. We want to
|
||||
-- keep that table small as we always pull out all rows for the connection for
|
||||
-- every request, so storing lots of data there would be bad for performance. To
|
||||
-- keep that table small we also deduplicate the requested state across
|
||||
-- different rooms, which if we stored lazy members there would prevent.
|
||||
--
|
||||
-- We track a *rough* `last_seen_ts` for each user in each room which indicates
|
||||
-- when we last would've sent their member state to the client. `last_seen_ts`
|
||||
-- is used so that we can remove members which haven't been seen for a while to
|
||||
-- save space. This is a *rough* timestamp as we don't want to update the
|
||||
-- timestamp every time to avoid hammering the DB with writes, and we don't need
|
||||
-- the timestamp to be precise (as it is used to evict old entries that haven't
|
||||
-- been used in a while).
|
||||
--
|
||||
-- Care must be taken when handling "forked" positions, i.e. we have responded
|
||||
-- to a request with a position and then get another different request using the
|
||||
-- previous position as a base. We track this by including a
|
||||
-- `connection_position` for newly inserted rows. When we advance the position
|
||||
-- we set this to NULL for all rows which were present at that position, and
|
||||
-- delete all other rows. When reading rows we can then filter out any rows
|
||||
-- which have a non-NULL `connection_position` which is not the current
|
||||
-- position.
|
||||
--
|
||||
-- I.e. `connection_position` is NULL for rows which are valid for *all*
|
||||
-- positions on the connection, and is non-NULL for rows which are only valid
|
||||
-- for a specific position.
|
||||
--
|
||||
-- When invalidating rows, we can just delete them. Technically this could
|
||||
-- invalidate for a forked position, but this is acceptable as equivalent to a
|
||||
-- cache eviction.
|
||||
CREATE TABLE sliding_sync_connection_lazy_members (
|
||||
connection_key BIGINT NOT NULL REFERENCES sliding_sync_connections(connection_key) ON DELETE CASCADE,
|
||||
connection_position BIGINT REFERENCES sliding_sync_connection_positions(connection_position) ON DELETE CASCADE,
|
||||
room_id TEXT NOT NULL,
|
||||
user_id TEXT NOT NULL,
|
||||
last_seen_ts BIGINT NOT NULL
|
||||
);
|
||||
|
||||
CREATE UNIQUE INDEX sliding_sync_connection_lazy_members_idx ON sliding_sync_connection_lazy_members (connection_key, room_id, user_id);
|
||||
CREATE INDEX sliding_sync_connection_lazy_members_pos_idx ON sliding_sync_connection_lazy_members (connection_key, connection_position) WHERE connection_position IS NOT NULL;
|
||||
@@ -49,12 +49,21 @@ from synapse.types import (
|
||||
UserID,
|
||||
)
|
||||
from synapse.types.rest.client import SlidingSyncBody
|
||||
from synapse.util.clock import Clock
|
||||
from synapse.util.duration import Duration
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.handlers.relations import BundledAggregations
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# How often to update the last seen timestamp for lazy members.
|
||||
#
|
||||
# We don't update the timestamp every time to avoid hammering the DB with
|
||||
# writes, and we don't need the timestamp to be precise (as it is used to evict
|
||||
# old entries that haven't been used in a while).
|
||||
LAZY_MEMBERS_UPDATE_INTERVAL = Duration(hours=1)
|
||||
|
||||
|
||||
class SlidingSyncConfig(SlidingSyncBody):
|
||||
"""
|
||||
@@ -891,6 +900,69 @@ class PerConnectionState:
|
||||
return len(self.rooms) + len(self.receipts) + len(self.room_configs)
|
||||
|
||||
|
||||
@attr.s(auto_attribs=True)
|
||||
class RoomLazyMembershipChanges:
|
||||
"""Changes to lazily-loaded room memberships for a given room."""
|
||||
|
||||
returned_user_id_to_last_seen_ts_map: Mapping[str, int | None] = attr.Factory(dict)
|
||||
"""Map from user ID to timestamp for users whose membership we have lazily
|
||||
loaded in this room an request. The timestamp indicates the time we
|
||||
previously needed the membership, or None if we sent it down for the first
|
||||
time in this request.
|
||||
|
||||
We track a *rough* `last_seen_ts` for each user in each room which indicates
|
||||
when we last would've sent their member state to the client. This is used so
|
||||
that we can remove members which haven't been seen for a while to save
|
||||
space.
|
||||
|
||||
Note: this will include users whose membership we would have sent down but
|
||||
didn't due to us having previously sent them.
|
||||
"""
|
||||
|
||||
invalidated_user_ids: AbstractSet[str] = attr.Factory(set)
|
||||
"""Set of user IDs whose latest membership we have *not* sent down"""
|
||||
|
||||
def get_returned_user_ids_to_update(self, clock: Clock) -> StrCollection:
|
||||
"""Get the user IDs whose last seen timestamp we need to update in the
|
||||
database.
|
||||
|
||||
This is a subset of user IDs in `returned_user_id_to_last_seen_ts_map`,
|
||||
whose timestamp is either None (first time we've sent them) or older
|
||||
than `LAZY_MEMBERS_UPDATE_INTERVAL`.
|
||||
|
||||
We only update the timestamp in the database every so often to avoid
|
||||
hammering the DB with writes. We don't need the timestamp to be precise,
|
||||
as the timestamp is used to evict old entries that haven't been used in
|
||||
a while.
|
||||
"""
|
||||
|
||||
now_ms = clock.time_msec()
|
||||
return [
|
||||
user_id
|
||||
for user_id, last_seen_ts in self.returned_user_id_to_last_seen_ts_map.items()
|
||||
if last_seen_ts is None
|
||||
or now_ms - last_seen_ts >= LAZY_MEMBERS_UPDATE_INTERVAL.as_millis()
|
||||
]
|
||||
|
||||
def has_updates(self, clock: Clock) -> bool:
|
||||
"""Check if there are any updates to the lazy membership changes.
|
||||
|
||||
Called to check if we need to persist changes to the lazy membership
|
||||
state for the room. We want to avoid persisting the state if there are
|
||||
no changes, to avoid unnecessary writes (and cache misses due to new
|
||||
connection position).
|
||||
"""
|
||||
|
||||
# We consider there to be updates if there are any invalidated user
|
||||
# IDs...
|
||||
if self.invalidated_user_ids:
|
||||
return True
|
||||
|
||||
# ...or if any of the returned user IDs need their last seen timestamp
|
||||
# updating in the database.
|
||||
return bool(self.get_returned_user_ids_to_update(clock))
|
||||
|
||||
|
||||
@attr.s(auto_attribs=True)
|
||||
class MutablePerConnectionState(PerConnectionState):
|
||||
"""A mutable version of `PerConnectionState`"""
|
||||
@@ -903,12 +975,28 @@ class MutablePerConnectionState(PerConnectionState):
|
||||
|
||||
room_configs: typing.ChainMap[str, RoomSyncConfig]
|
||||
|
||||
def has_updates(self) -> bool:
|
||||
# A map from room ID to the lazily-loaded memberships needed for the
|
||||
# request in that room.
|
||||
room_lazy_membership: dict[str, RoomLazyMembershipChanges] = attr.Factory(dict)
|
||||
|
||||
def has_updates(self, clock: Clock) -> bool:
|
||||
"""Check if there are any updates to the per-connection state that need
|
||||
persisting.
|
||||
|
||||
It is important that we don't spuriously do persistence, as that will
|
||||
always generate a new connection position which will invalidate some of
|
||||
the caches. It doesn't need to be perfect, but we should avoid always
|
||||
generating new connection positions when doing lazy loading
|
||||
"""
|
||||
return (
|
||||
bool(self.rooms.get_updates())
|
||||
or bool(self.receipts.get_updates())
|
||||
or bool(self.account_data.get_updates())
|
||||
or bool(self.get_room_config_updates())
|
||||
or any(
|
||||
change.has_updates(clock)
|
||||
for change in self.room_lazy_membership.values()
|
||||
)
|
||||
)
|
||||
|
||||
def get_room_config_updates(self) -> Mapping[str, RoomSyncConfig]:
|
||||
|
||||
@@ -18,7 +18,7 @@
|
||||
#
|
||||
#
|
||||
import logging
|
||||
from typing import AbstractSet, Mapping
|
||||
from typing import AbstractSet
|
||||
from unittest.mock import patch
|
||||
|
||||
import attr
|
||||
@@ -38,13 +38,17 @@ from synapse.handlers.sliding_sync import (
|
||||
RoomSyncConfig,
|
||||
StateValues,
|
||||
_required_state_changes,
|
||||
_RequiredStateChangesReturn,
|
||||
)
|
||||
from synapse.rest import admin
|
||||
from synapse.rest.client import knock, login, room
|
||||
from synapse.server import HomeServer
|
||||
from synapse.storage.util.id_generators import MultiWriterIdGenerator
|
||||
from synapse.types import JsonDict, StateMap, StreamToken, UserID, create_requester
|
||||
from synapse.types.handlers.sliding_sync import PerConnectionState, SlidingSyncConfig
|
||||
from synapse.types.handlers.sliding_sync import (
|
||||
PerConnectionState,
|
||||
SlidingSyncConfig,
|
||||
)
|
||||
from synapse.types.state import StateFilter
|
||||
from synapse.util.clock import Clock
|
||||
|
||||
@@ -3827,12 +3831,11 @@ class RequiredStateChangesTestParameters:
|
||||
previous_required_state_map: dict[str, set[str]]
|
||||
request_required_state_map: dict[str, set[str]]
|
||||
state_deltas: StateMap[str]
|
||||
expected_with_state_deltas: tuple[
|
||||
Mapping[str, AbstractSet[str]] | None, StateFilter
|
||||
]
|
||||
expected_without_state_deltas: tuple[
|
||||
Mapping[str, AbstractSet[str]] | None, StateFilter
|
||||
]
|
||||
expected_with_state_deltas: _RequiredStateChangesReturn
|
||||
expected_without_state_deltas: _RequiredStateChangesReturn
|
||||
|
||||
previously_returned_lazy_user_ids: AbstractSet[str] = frozenset()
|
||||
request_lazy_load_user_ids: AbstractSet[str] = frozenset()
|
||||
|
||||
|
||||
class RequiredStateChangesTestCase(unittest.TestCase):
|
||||
@@ -3848,8 +3851,12 @@ class RequiredStateChangesTestCase(unittest.TestCase):
|
||||
request_required_state_map={"type1": {"state_key"}},
|
||||
state_deltas={("type1", "state_key"): "$event_id"},
|
||||
# No changes
|
||||
expected_with_state_deltas=(None, StateFilter.none()),
|
||||
expected_without_state_deltas=(None, StateFilter.none()),
|
||||
expected_with_state_deltas=_RequiredStateChangesReturn(
|
||||
None, StateFilter.none()
|
||||
),
|
||||
expected_without_state_deltas=_RequiredStateChangesReturn(
|
||||
None, StateFilter.none()
|
||||
),
|
||||
),
|
||||
),
|
||||
(
|
||||
@@ -3862,14 +3869,14 @@ class RequiredStateChangesTestCase(unittest.TestCase):
|
||||
"type2": {"state_key"},
|
||||
},
|
||||
state_deltas={("type2", "state_key"): "$event_id"},
|
||||
expected_with_state_deltas=(
|
||||
expected_with_state_deltas=_RequiredStateChangesReturn(
|
||||
# We've added a type so we should persist the changed required state
|
||||
# config.
|
||||
{"type1": {"state_key"}, "type2": {"state_key"}},
|
||||
# We should see the new type added
|
||||
StateFilter.from_types([("type2", "state_key")]),
|
||||
),
|
||||
expected_without_state_deltas=(
|
||||
expected_without_state_deltas=_RequiredStateChangesReturn(
|
||||
{"type1": {"state_key"}, "type2": {"state_key"}},
|
||||
StateFilter.from_types([("type2", "state_key")]),
|
||||
),
|
||||
@@ -3885,7 +3892,7 @@ class RequiredStateChangesTestCase(unittest.TestCase):
|
||||
"type2": {"state_key"},
|
||||
},
|
||||
state_deltas={("type2", "state_key"): "$event_id"},
|
||||
expected_with_state_deltas=(
|
||||
expected_with_state_deltas=_RequiredStateChangesReturn(
|
||||
# We've added a type so we should persist the changed required state
|
||||
# config.
|
||||
{"type1": {"state_key"}, "type2": {"state_key"}},
|
||||
@@ -3894,7 +3901,7 @@ class RequiredStateChangesTestCase(unittest.TestCase):
|
||||
[("type1", "state_key"), ("type2", "state_key")]
|
||||
),
|
||||
),
|
||||
expected_without_state_deltas=(
|
||||
expected_without_state_deltas=_RequiredStateChangesReturn(
|
||||
{"type1": {"state_key"}, "type2": {"state_key"}},
|
||||
StateFilter.from_types(
|
||||
[("type1", "state_key"), ("type2", "state_key")]
|
||||
@@ -3909,14 +3916,14 @@ class RequiredStateChangesTestCase(unittest.TestCase):
|
||||
previous_required_state_map={"type": {"state_key1"}},
|
||||
request_required_state_map={"type": {"state_key1", "state_key2"}},
|
||||
state_deltas={("type", "state_key2"): "$event_id"},
|
||||
expected_with_state_deltas=(
|
||||
expected_with_state_deltas=_RequiredStateChangesReturn(
|
||||
# We've added a key so we should persist the changed required state
|
||||
# config.
|
||||
{"type": {"state_key1", "state_key2"}},
|
||||
# We should see the new state_keys added
|
||||
StateFilter.from_types([("type", "state_key2")]),
|
||||
),
|
||||
expected_without_state_deltas=(
|
||||
expected_without_state_deltas=_RequiredStateChangesReturn(
|
||||
{"type": {"state_key1", "state_key2"}},
|
||||
StateFilter.from_types([("type", "state_key2")]),
|
||||
),
|
||||
@@ -3929,7 +3936,7 @@ class RequiredStateChangesTestCase(unittest.TestCase):
|
||||
previous_required_state_map={"type": {"state_key1"}},
|
||||
request_required_state_map={"type": {"state_key2", "state_key3"}},
|
||||
state_deltas={("type", "state_key2"): "$event_id"},
|
||||
expected_with_state_deltas=(
|
||||
expected_with_state_deltas=_RequiredStateChangesReturn(
|
||||
# We've added a key so we should persist the changed required state
|
||||
# config.
|
||||
#
|
||||
@@ -3940,7 +3947,7 @@ class RequiredStateChangesTestCase(unittest.TestCase):
|
||||
[("type", "state_key2"), ("type", "state_key3")]
|
||||
),
|
||||
),
|
||||
expected_without_state_deltas=(
|
||||
expected_without_state_deltas=_RequiredStateChangesReturn(
|
||||
{"type": {"state_key1", "state_key2", "state_key3"}},
|
||||
StateFilter.from_types(
|
||||
[("type", "state_key2"), ("type", "state_key3")]
|
||||
@@ -3964,7 +3971,7 @@ class RequiredStateChangesTestCase(unittest.TestCase):
|
||||
},
|
||||
request_required_state_map={"type1": {"state_key"}},
|
||||
state_deltas={("type2", "state_key"): "$event_id"},
|
||||
expected_with_state_deltas=(
|
||||
expected_with_state_deltas=_RequiredStateChangesReturn(
|
||||
# Remove `type2` since there's been a change to that state,
|
||||
# (persist the change to required state). That way next time,
|
||||
# they request `type2`, we see that we haven't sent it before
|
||||
@@ -3975,7 +3982,7 @@ class RequiredStateChangesTestCase(unittest.TestCase):
|
||||
# less state now
|
||||
StateFilter.none(),
|
||||
),
|
||||
expected_without_state_deltas=(
|
||||
expected_without_state_deltas=_RequiredStateChangesReturn(
|
||||
# `type2` is no longer requested but since that state hasn't
|
||||
# changed, nothing should change (we should still keep track
|
||||
# that we've sent `type2` before).
|
||||
@@ -3998,7 +4005,7 @@ class RequiredStateChangesTestCase(unittest.TestCase):
|
||||
},
|
||||
request_required_state_map={},
|
||||
state_deltas={("type2", "state_key"): "$event_id"},
|
||||
expected_with_state_deltas=(
|
||||
expected_with_state_deltas=_RequiredStateChangesReturn(
|
||||
# Remove `type2` since there's been a change to that state,
|
||||
# (persist the change to required state). That way next time,
|
||||
# they request `type2`, we see that we haven't sent it before
|
||||
@@ -4009,7 +4016,7 @@ class RequiredStateChangesTestCase(unittest.TestCase):
|
||||
# less state now
|
||||
StateFilter.none(),
|
||||
),
|
||||
expected_without_state_deltas=(
|
||||
expected_without_state_deltas=_RequiredStateChangesReturn(
|
||||
# `type2` is no longer requested but since that state hasn't
|
||||
# changed, nothing should change (we should still keep track
|
||||
# that we've sent `type2` before).
|
||||
@@ -4029,7 +4036,7 @@ class RequiredStateChangesTestCase(unittest.TestCase):
|
||||
previous_required_state_map={"type": {"state_key1", "state_key2"}},
|
||||
request_required_state_map={"type": {"state_key1"}},
|
||||
state_deltas={("type", "state_key2"): "$event_id"},
|
||||
expected_with_state_deltas=(
|
||||
expected_with_state_deltas=_RequiredStateChangesReturn(
|
||||
# Remove `(type, state_key2)` since there's been a change
|
||||
# to that state (persist the change to required state).
|
||||
# That way next time, they request `(type, state_key2)`, we see
|
||||
@@ -4041,7 +4048,7 @@ class RequiredStateChangesTestCase(unittest.TestCase):
|
||||
# less state now
|
||||
StateFilter.none(),
|
||||
),
|
||||
expected_without_state_deltas=(
|
||||
expected_without_state_deltas=_RequiredStateChangesReturn(
|
||||
# `(type, state_key2)` is no longer requested but since that
|
||||
# state hasn't changed, nothing should change (we should still
|
||||
# keep track that we've sent `(type, state_key1)` and `(type,
|
||||
@@ -4073,11 +4080,11 @@ class RequiredStateChangesTestCase(unittest.TestCase):
|
||||
("other_type", "state_key"): "$event_id",
|
||||
},
|
||||
# We've added a wildcard, so we persist the change and request everything
|
||||
expected_with_state_deltas=(
|
||||
expected_with_state_deltas=_RequiredStateChangesReturn(
|
||||
{"type1": {"state_key2"}, StateValues.WILDCARD: {"state_key"}},
|
||||
StateFilter.all(),
|
||||
),
|
||||
expected_without_state_deltas=(
|
||||
expected_without_state_deltas=_RequiredStateChangesReturn(
|
||||
{"type1": {"state_key2"}, StateValues.WILDCARD: {"state_key"}},
|
||||
StateFilter.all(),
|
||||
),
|
||||
@@ -4103,13 +4110,13 @@ class RequiredStateChangesTestCase(unittest.TestCase):
|
||||
("other_type", "state_key"): "$event_id",
|
||||
},
|
||||
# We've removed a type wildcard, so we persist the change but don't request anything
|
||||
expected_with_state_deltas=(
|
||||
expected_with_state_deltas=_RequiredStateChangesReturn(
|
||||
{"type1": {"state_key2"}},
|
||||
# We don't need to request anything more if they are requesting
|
||||
# less state now
|
||||
StateFilter.none(),
|
||||
),
|
||||
expected_without_state_deltas=(
|
||||
expected_without_state_deltas=_RequiredStateChangesReturn(
|
||||
{"type1": {"state_key2"}},
|
||||
# We don't need to request anything more if they are requesting
|
||||
# less state now
|
||||
@@ -4129,11 +4136,11 @@ class RequiredStateChangesTestCase(unittest.TestCase):
|
||||
state_deltas={("type2", "state_key"): "$event_id"},
|
||||
# We've added a wildcard state_key, so we persist the change and
|
||||
# request all of the state for that type
|
||||
expected_with_state_deltas=(
|
||||
expected_with_state_deltas=_RequiredStateChangesReturn(
|
||||
{"type1": {"state_key"}, "type2": {StateValues.WILDCARD}},
|
||||
StateFilter.from_types([("type2", None)]),
|
||||
),
|
||||
expected_without_state_deltas=(
|
||||
expected_without_state_deltas=_RequiredStateChangesReturn(
|
||||
{"type1": {"state_key"}, "type2": {StateValues.WILDCARD}},
|
||||
StateFilter.from_types([("type2", None)]),
|
||||
),
|
||||
@@ -4151,7 +4158,7 @@ class RequiredStateChangesTestCase(unittest.TestCase):
|
||||
state_deltas={("type2", "state_key"): "$event_id"},
|
||||
# We've removed a state_key wildcard, so we persist the change and
|
||||
# request nothing
|
||||
expected_with_state_deltas=(
|
||||
expected_with_state_deltas=_RequiredStateChangesReturn(
|
||||
{"type1": {"state_key"}},
|
||||
# We don't need to request anything more if they are requesting
|
||||
# less state now
|
||||
@@ -4160,7 +4167,7 @@ class RequiredStateChangesTestCase(unittest.TestCase):
|
||||
# We've removed a state_key wildcard but there have been no matching
|
||||
# state changes, so no changes needed, just persist the
|
||||
# `request_required_state_map` as-is.
|
||||
expected_without_state_deltas=(
|
||||
expected_without_state_deltas=_RequiredStateChangesReturn(
|
||||
None,
|
||||
# We don't need to request anything more if they are requesting
|
||||
# less state now
|
||||
@@ -4180,7 +4187,7 @@ class RequiredStateChangesTestCase(unittest.TestCase):
|
||||
},
|
||||
request_required_state_map={"type1": {"state_key1"}},
|
||||
state_deltas={("type1", "state_key3"): "$event_id"},
|
||||
expected_with_state_deltas=(
|
||||
expected_with_state_deltas=_RequiredStateChangesReturn(
|
||||
# We've removed some state keys from the type, but only state_key3 was
|
||||
# changed so only that one should be removed.
|
||||
{"type1": {"state_key1", "state_key2"}},
|
||||
@@ -4188,7 +4195,7 @@ class RequiredStateChangesTestCase(unittest.TestCase):
|
||||
# less state now
|
||||
StateFilter.none(),
|
||||
),
|
||||
expected_without_state_deltas=(
|
||||
expected_without_state_deltas=_RequiredStateChangesReturn(
|
||||
# No changes needed, just persist the
|
||||
# `request_required_state_map` as-is
|
||||
None,
|
||||
@@ -4207,14 +4214,14 @@ class RequiredStateChangesTestCase(unittest.TestCase):
|
||||
previous_required_state_map={},
|
||||
request_required_state_map={"type1": {StateValues.ME}},
|
||||
state_deltas={("type1", "@user:test"): "$event_id"},
|
||||
expected_with_state_deltas=(
|
||||
expected_with_state_deltas=_RequiredStateChangesReturn(
|
||||
# We've added a type so we should persist the changed required state
|
||||
# config.
|
||||
{"type1": {StateValues.ME}},
|
||||
# We should see the new state_keys added
|
||||
StateFilter.from_types([("type1", "@user:test")]),
|
||||
),
|
||||
expected_without_state_deltas=(
|
||||
expected_without_state_deltas=_RequiredStateChangesReturn(
|
||||
{"type1": {StateValues.ME}},
|
||||
StateFilter.from_types([("type1", "@user:test")]),
|
||||
),
|
||||
@@ -4229,7 +4236,7 @@ class RequiredStateChangesTestCase(unittest.TestCase):
|
||||
previous_required_state_map={"type1": {StateValues.ME}},
|
||||
request_required_state_map={},
|
||||
state_deltas={("type1", "@user:test"): "$event_id"},
|
||||
expected_with_state_deltas=(
|
||||
expected_with_state_deltas=_RequiredStateChangesReturn(
|
||||
# Remove `type1` since there's been a change to that state,
|
||||
# (persist the change to required state). That way next time,
|
||||
# they request `type1`, we see that we haven't sent it before
|
||||
@@ -4240,7 +4247,7 @@ class RequiredStateChangesTestCase(unittest.TestCase):
|
||||
# less state now
|
||||
StateFilter.none(),
|
||||
),
|
||||
expected_without_state_deltas=(
|
||||
expected_without_state_deltas=_RequiredStateChangesReturn(
|
||||
# `type1` is no longer requested but since that state hasn't
|
||||
# changed, nothing should change (we should still keep track
|
||||
# that we've sent `type1` before).
|
||||
@@ -4260,14 +4267,14 @@ class RequiredStateChangesTestCase(unittest.TestCase):
|
||||
previous_required_state_map={},
|
||||
request_required_state_map={"type1": {"@user:test"}},
|
||||
state_deltas={("type1", "@user:test"): "$event_id"},
|
||||
expected_with_state_deltas=(
|
||||
expected_with_state_deltas=_RequiredStateChangesReturn(
|
||||
# We've added a type so we should persist the changed required state
|
||||
# config.
|
||||
{"type1": {"@user:test"}},
|
||||
# We should see the new state_keys added
|
||||
StateFilter.from_types([("type1", "@user:test")]),
|
||||
),
|
||||
expected_without_state_deltas=(
|
||||
expected_without_state_deltas=_RequiredStateChangesReturn(
|
||||
{"type1": {"@user:test"}},
|
||||
StateFilter.from_types([("type1", "@user:test")]),
|
||||
),
|
||||
@@ -4282,7 +4289,7 @@ class RequiredStateChangesTestCase(unittest.TestCase):
|
||||
previous_required_state_map={"type1": {"@user:test"}},
|
||||
request_required_state_map={},
|
||||
state_deltas={("type1", "@user:test"): "$event_id"},
|
||||
expected_with_state_deltas=(
|
||||
expected_with_state_deltas=_RequiredStateChangesReturn(
|
||||
# Remove `type1` since there's been a change to that state,
|
||||
# (persist the change to required state). That way next time,
|
||||
# they request `type1`, we see that we haven't sent it before
|
||||
@@ -4293,7 +4300,7 @@ class RequiredStateChangesTestCase(unittest.TestCase):
|
||||
# less state now
|
||||
StateFilter.none(),
|
||||
),
|
||||
expected_without_state_deltas=(
|
||||
expected_without_state_deltas=_RequiredStateChangesReturn(
|
||||
# `type1` is no longer requested but since that state hasn't
|
||||
# changed, nothing should change (we should still keep track
|
||||
# that we've sent `type1` before).
|
||||
@@ -4313,13 +4320,13 @@ class RequiredStateChangesTestCase(unittest.TestCase):
|
||||
previous_required_state_map={},
|
||||
request_required_state_map={EventTypes.Member: {StateValues.LAZY}},
|
||||
state_deltas={(EventTypes.Member, "@user:test"): "$event_id"},
|
||||
expected_with_state_deltas=(
|
||||
expected_with_state_deltas=_RequiredStateChangesReturn(
|
||||
# If a "$LAZY" has been added or removed we always update the
|
||||
# required state to what was requested for simplicity.
|
||||
{EventTypes.Member: {StateValues.LAZY}},
|
||||
StateFilter.none(),
|
||||
),
|
||||
expected_without_state_deltas=(
|
||||
expected_without_state_deltas=_RequiredStateChangesReturn(
|
||||
{EventTypes.Member: {StateValues.LAZY}},
|
||||
StateFilter.none(),
|
||||
),
|
||||
@@ -4334,7 +4341,7 @@ class RequiredStateChangesTestCase(unittest.TestCase):
|
||||
previous_required_state_map={EventTypes.Member: {StateValues.LAZY}},
|
||||
request_required_state_map={},
|
||||
state_deltas={(EventTypes.Member, "@user:test"): "$event_id"},
|
||||
expected_with_state_deltas=(
|
||||
expected_with_state_deltas=_RequiredStateChangesReturn(
|
||||
# If a "$LAZY" has been added or removed we always update the
|
||||
# required state to what was requested for simplicity.
|
||||
{},
|
||||
@@ -4342,7 +4349,7 @@ class RequiredStateChangesTestCase(unittest.TestCase):
|
||||
# less state now
|
||||
StateFilter.none(),
|
||||
),
|
||||
expected_without_state_deltas=(
|
||||
expected_without_state_deltas=_RequiredStateChangesReturn(
|
||||
# `EventTypes.Member` is no longer requested but since that
|
||||
# state hasn't changed, nothing should change (we should still
|
||||
# keep track that we've sent `EventTypes.Member` before).
|
||||
@@ -4361,41 +4368,40 @@ class RequiredStateChangesTestCase(unittest.TestCase):
|
||||
we're sending down another response without any timeline events.
|
||||
""",
|
||||
RequiredStateChangesTestParameters(
|
||||
previous_required_state_map={
|
||||
EventTypes.Member: {
|
||||
StateValues.LAZY,
|
||||
"@user2:test",
|
||||
"@user3:test",
|
||||
}
|
||||
},
|
||||
previous_required_state_map={EventTypes.Member: {StateValues.LAZY}},
|
||||
request_required_state_map={EventTypes.Member: {StateValues.LAZY}},
|
||||
previously_returned_lazy_user_ids={"@user2:test", "@user3:test"},
|
||||
request_lazy_load_user_ids=set(),
|
||||
state_deltas={(EventTypes.Member, "@user2:test"): "$event_id"},
|
||||
expected_with_state_deltas=(
|
||||
expected_with_state_deltas=_RequiredStateChangesReturn(
|
||||
# The `request_required_state_map` hasn't changed
|
||||
None,
|
||||
# We don't need to request anything more if they are requesting
|
||||
# less state now
|
||||
StateFilter.none(),
|
||||
# Previous request did not include any explicit members,
|
||||
# so there is no extra users to add to the lazy cache.
|
||||
extra_users_to_add_to_lazy_cache=frozenset(),
|
||||
# Remove "@user2:test" since that state has changed and is no
|
||||
# longer being requested anymore. Since something was removed,
|
||||
# we should persist the changed to required state. That way next
|
||||
# time, they request "@user2:test", we see that we haven't sent
|
||||
# it before and send the new state. (we should still keep track
|
||||
# that we've sent specific `EventTypes.Member` before)
|
||||
{
|
||||
EventTypes.Member: {
|
||||
StateValues.LAZY,
|
||||
"@user3:test",
|
||||
}
|
||||
},
|
||||
# We don't need to request anything more if they are requesting
|
||||
# less state now
|
||||
StateFilter.none(),
|
||||
lazy_members_invalidated={"@user2:test"},
|
||||
),
|
||||
expected_without_state_deltas=(
|
||||
# We're not requesting any specific `EventTypes.Member` now but
|
||||
# since that state hasn't changed, nothing should change (we
|
||||
# should still keep track that we've sent specific
|
||||
# `EventTypes.Member` before).
|
||||
expected_without_state_deltas=_RequiredStateChangesReturn(
|
||||
# The `request_required_state_map` hasn't changed
|
||||
None,
|
||||
# We don't need to request anything more if they are requesting
|
||||
# less state now
|
||||
StateFilter.none(),
|
||||
# Previous request did not include any explicit members,
|
||||
# so there is no extra users to add to the lazy cache.
|
||||
extra_users_to_add_to_lazy_cache=frozenset(),
|
||||
# Nothing should change (we should still keep track that
|
||||
# we've sent specific `EventTypes.Member` before).
|
||||
lazy_members_invalidated=frozenset(),
|
||||
),
|
||||
),
|
||||
),
|
||||
@@ -4407,50 +4413,37 @@ class RequiredStateChangesTestCase(unittest.TestCase):
|
||||
we're sending down another response with a new event from user4.
|
||||
""",
|
||||
RequiredStateChangesTestParameters(
|
||||
previous_required_state_map={
|
||||
EventTypes.Member: {
|
||||
StateValues.LAZY,
|
||||
"@user2:test",
|
||||
"@user3:test",
|
||||
}
|
||||
},
|
||||
request_required_state_map={
|
||||
EventTypes.Member: {StateValues.LAZY, "@user4:test"}
|
||||
},
|
||||
previous_required_state_map={EventTypes.Member: {StateValues.LAZY}},
|
||||
request_required_state_map={EventTypes.Member: {StateValues.LAZY}},
|
||||
previously_returned_lazy_user_ids={"@user2:test", "@user3:test"},
|
||||
request_lazy_load_user_ids={"@user4:test"},
|
||||
state_deltas={(EventTypes.Member, "@user2:test"): "$event_id"},
|
||||
expected_with_state_deltas=(
|
||||
# Since "@user4:test" was added, we should persist the changed
|
||||
# required state config.
|
||||
#
|
||||
# Also remove "@user2:test" since that state has changed and is no
|
||||
# longer being requested anymore. Since something was removed,
|
||||
# we also should persist the changed to required state. That way next
|
||||
# time, they request "@user2:test", we see that we haven't sent
|
||||
# it before and send the new state. (we should still keep track
|
||||
expected_with_state_deltas=_RequiredStateChangesReturn(
|
||||
# The `request_required_state_map` hasn't changed
|
||||
None,
|
||||
# We should see the new state_keys added
|
||||
StateFilter.from_types([(EventTypes.Member, "@user4:test")]),
|
||||
# Previous request did not include any explicit members,
|
||||
# so there is no extra users to add to the lazy cache.
|
||||
extra_users_to_add_to_lazy_cache=frozenset(),
|
||||
# Remove "@user2:test" since that state has changed and
|
||||
# is no longer being requested anymore. Since something
|
||||
# was removed, we also should persist the changed to
|
||||
# required state. That way next time, they request
|
||||
# "@user2:test", we see that we haven't sent it before
|
||||
# and send the new state. (we should still keep track
|
||||
# that we've sent specific `EventTypes.Member` before)
|
||||
{
|
||||
EventTypes.Member: {
|
||||
StateValues.LAZY,
|
||||
"@user3:test",
|
||||
"@user4:test",
|
||||
}
|
||||
},
|
||||
# We should see the new state_keys added
|
||||
StateFilter.from_types([(EventTypes.Member, "@user4:test")]),
|
||||
lazy_members_invalidated={"@user2:test"},
|
||||
),
|
||||
expected_without_state_deltas=(
|
||||
# Since "@user4:test" was added, we should persist the changed
|
||||
# required state config.
|
||||
{
|
||||
EventTypes.Member: {
|
||||
StateValues.LAZY,
|
||||
"@user2:test",
|
||||
"@user3:test",
|
||||
"@user4:test",
|
||||
}
|
||||
},
|
||||
expected_without_state_deltas=_RequiredStateChangesReturn(
|
||||
# The `request_required_state_map` hasn't changed
|
||||
None,
|
||||
# We should see the new state_keys added
|
||||
StateFilter.from_types([(EventTypes.Member, "@user4:test")]),
|
||||
# Previous request did not include any explicit members,
|
||||
# so there is no extra users to add to the lazy cache.
|
||||
extra_users_to_add_to_lazy_cache=frozenset(),
|
||||
lazy_members_invalidated=frozenset(),
|
||||
),
|
||||
),
|
||||
),
|
||||
@@ -4464,40 +4457,81 @@ class RequiredStateChangesTestCase(unittest.TestCase):
|
||||
EventTypes.Member: {"@user2:test", "@user3:test"}
|
||||
},
|
||||
request_required_state_map={EventTypes.Member: {StateValues.LAZY}},
|
||||
previously_returned_lazy_user_ids=frozenset(),
|
||||
request_lazy_load_user_ids=frozenset(),
|
||||
state_deltas={(EventTypes.Member, "@user2:test"): "$event_id"},
|
||||
expected_with_state_deltas=(
|
||||
expected_with_state_deltas=_RequiredStateChangesReturn(
|
||||
# Since `StateValues.LAZY` was added, we should persist the
|
||||
# changed required state config.
|
||||
#
|
||||
# Also remove "@user2:test" since that state has changed and is no
|
||||
# longer being requested anymore. Since something was removed,
|
||||
# we also should persist the changed to required state. That way next
|
||||
# time, they request "@user2:test", we see that we haven't sent
|
||||
# it before and send the new state. (we should still keep track
|
||||
# that we've sent specific `EventTypes.Member` before)
|
||||
{
|
||||
EventTypes.Member: {
|
||||
StateValues.LAZY,
|
||||
"@user3:test",
|
||||
}
|
||||
},
|
||||
# We don't need to request anything more if they are requesting
|
||||
# less state now
|
||||
{EventTypes.Member: {StateValues.LAZY}},
|
||||
# No users are being lazy loaded, so nothing to request.
|
||||
StateFilter.none(),
|
||||
# Remember the fact that we've sent @user3 down before,
|
||||
# but not @user2 as that has been invalidated.
|
||||
extra_users_to_add_to_lazy_cache={"@user3:test"},
|
||||
# Nothing to invalidate as there are no existing lazy members.
|
||||
lazy_members_invalidated=frozenset(),
|
||||
),
|
||||
expected_without_state_deltas=(
|
||||
expected_without_state_deltas=_RequiredStateChangesReturn(
|
||||
# Since `StateValues.LAZY` was added, we should persist the
|
||||
# changed required state config.
|
||||
{
|
||||
EventTypes.Member: {
|
||||
StateValues.LAZY,
|
||||
"@user2:test",
|
||||
"@user3:test",
|
||||
}
|
||||
},
|
||||
# We don't need to request anything more if they are requesting
|
||||
# less state now
|
||||
{EventTypes.Member: {StateValues.LAZY}},
|
||||
# No users are being lazy loaded, so nothing to request.
|
||||
StateFilter.none(),
|
||||
# Remember the fact that we've sent the users down before.
|
||||
extra_users_to_add_to_lazy_cache={"@user2:test", "@user3:test"},
|
||||
# Nothing to invalidate as there are no existing lazy members.
|
||||
lazy_members_invalidated=frozenset(),
|
||||
),
|
||||
),
|
||||
),
|
||||
(
|
||||
"state_key_expand_lazy_keep_previous_memberships_need_previous_sent",
|
||||
"""
|
||||
Test expanding the `required_state` to lazy-loading room
|
||||
members. If a previously explicit membership is requested then
|
||||
we should not send it again (as it was already sent before).
|
||||
""",
|
||||
RequiredStateChangesTestParameters(
|
||||
previous_required_state_map={
|
||||
EventTypes.Member: {"@user2:test", "@user3:test"}
|
||||
},
|
||||
request_required_state_map={EventTypes.Member: {StateValues.LAZY}},
|
||||
previously_returned_lazy_user_ids=frozenset(),
|
||||
request_lazy_load_user_ids={"@user3:test"},
|
||||
state_deltas={(EventTypes.Member, "@user2:test"): "$event_id"},
|
||||
expected_with_state_deltas=_RequiredStateChangesReturn(
|
||||
# Since `StateValues.LAZY` was added, we should persist the
|
||||
# changed required state config.
|
||||
{EventTypes.Member: {StateValues.LAZY}},
|
||||
# We have already sent @user3 down before.
|
||||
#
|
||||
# `@user3:test` is required for lazy loading, but we've
|
||||
# already sent it down before (due to it being in
|
||||
# `previous_required_state_map`), so we don't need to
|
||||
# request it again.
|
||||
StateFilter.none(),
|
||||
# Remember the fact that we've sent @user3 down before,
|
||||
# but not @user2 as that has been invalidated.
|
||||
extra_users_to_add_to_lazy_cache={"@user3:test"},
|
||||
# Nothing to invalidate as there are no existing lazy members.
|
||||
lazy_members_invalidated=frozenset(),
|
||||
),
|
||||
expected_without_state_deltas=_RequiredStateChangesReturn(
|
||||
# Since `StateValues.LAZY` was added, we should persist the
|
||||
# changed required state config.
|
||||
{EventTypes.Member: {StateValues.LAZY}},
|
||||
# We have already sent @user3 down before.
|
||||
#
|
||||
# `@user3:test` is required for lazy loading, but we've
|
||||
# already sent it down before (due to it being in
|
||||
# `previous_required_state_map`), so we don't need to
|
||||
# request it again.
|
||||
StateFilter.none(),
|
||||
# Remember the fact that we've sent the users down before.
|
||||
extra_users_to_add_to_lazy_cache={"@user2:test", "@user3:test"},
|
||||
# Nothing to invalidate as there are no existing lazy members.
|
||||
lazy_members_invalidated=frozenset(),
|
||||
),
|
||||
),
|
||||
),
|
||||
@@ -4507,36 +4541,33 @@ class RequiredStateChangesTestCase(unittest.TestCase):
|
||||
Test retracting the `required_state` to no longer lazy-loading room members.
|
||||
""",
|
||||
RequiredStateChangesTestParameters(
|
||||
previous_required_state_map={
|
||||
EventTypes.Member: {
|
||||
StateValues.LAZY,
|
||||
"@user2:test",
|
||||
"@user3:test",
|
||||
}
|
||||
},
|
||||
previous_required_state_map={EventTypes.Member: {StateValues.LAZY}},
|
||||
request_required_state_map={},
|
||||
previously_returned_lazy_user_ids={"@user2:test", "@user3:test"},
|
||||
request_lazy_load_user_ids=set(),
|
||||
state_deltas={(EventTypes.Member, "@user2:test"): "$event_id"},
|
||||
expected_with_state_deltas=(
|
||||
expected_with_state_deltas=_RequiredStateChangesReturn(
|
||||
# Remove `EventTypes.Member` since there's been a change to that
|
||||
# state, (persist the change to required state). That way next
|
||||
# time, they request `EventTypes.Member`, we see that we haven't
|
||||
# sent it before and send the new state. (if we were tracking
|
||||
# that we sent any other state, we should still keep track
|
||||
# that).
|
||||
#
|
||||
# This acts the same as the `simple_remove_type` test. It's
|
||||
# possible that we could remember the specific `state_keys` that
|
||||
# we have sent down before but this currently just acts the same
|
||||
# as if a whole `type` was removed. Perhaps it's good that we
|
||||
# "garbage collect" and forget what we've sent before for a
|
||||
# given `type` when the client stops caring about a certain
|
||||
# `type`.
|
||||
# state, (persist the change to required state).
|
||||
{},
|
||||
# We don't need to request anything more if they are requesting
|
||||
# less state now
|
||||
StateFilter.none(),
|
||||
# Previous request did not include any explicit members,
|
||||
# so there is no extra users to add to the lazy cache.
|
||||
extra_users_to_add_to_lazy_cache=frozenset(),
|
||||
# Explicitly remove the now invalidated @user2:test
|
||||
# membership.
|
||||
#
|
||||
# We don't invalidate @user3:test as that membership
|
||||
# hasn't changed. We continue to store the existing lazy
|
||||
# members since they might be useful for future
|
||||
# requests. (Alternatively, we could invalidate all
|
||||
# members in the room when the client stops lazy
|
||||
# loading, but we opt to keep track of them).
|
||||
lazy_members_invalidated={"@user2:test"},
|
||||
),
|
||||
expected_without_state_deltas=(
|
||||
expected_without_state_deltas=_RequiredStateChangesReturn(
|
||||
# `EventTypes.Member` is no longer requested but since that
|
||||
# state hasn't changed, nothing should change (we should still
|
||||
# keep track that we've sent `EventTypes.Member` before).
|
||||
@@ -4544,13 +4575,20 @@ class RequiredStateChangesTestCase(unittest.TestCase):
|
||||
# We don't need to request anything more if they are requesting
|
||||
# less state now
|
||||
StateFilter.none(),
|
||||
# Previous request did not include any explicit members,
|
||||
# so there is no extra users to add to the lazy cache.
|
||||
extra_users_to_add_to_lazy_cache=frozenset(),
|
||||
# Nothing has been invalidated.
|
||||
lazy_members_invalidated=frozenset(),
|
||||
),
|
||||
),
|
||||
),
|
||||
(
|
||||
"state_key_retract_lazy_keep_previous_memberships_with_new_memberships",
|
||||
"state_key_retract_lazy_keep_previous_explicit_memberships",
|
||||
"""
|
||||
Test retracting the `required_state` to no longer lazy-loading room members.
|
||||
Test removing explicit memberships from the `required_state`
|
||||
when lazy-loading room members tracks previously sent
|
||||
memberships.
|
||||
""",
|
||||
RequiredStateChangesTestParameters(
|
||||
previous_required_state_map={
|
||||
@@ -4560,39 +4598,144 @@ class RequiredStateChangesTestCase(unittest.TestCase):
|
||||
"@user3:test",
|
||||
}
|
||||
},
|
||||
request_required_state_map={EventTypes.Member: {"@user4:test"}},
|
||||
request_required_state_map={EventTypes.Member: {StateValues.LAZY}},
|
||||
previously_returned_lazy_user_ids=frozenset(),
|
||||
request_lazy_load_user_ids={"@user3:test"},
|
||||
state_deltas={(EventTypes.Member, "@user2:test"): "$event_id"},
|
||||
expected_with_state_deltas=(
|
||||
expected_with_state_deltas=_RequiredStateChangesReturn(
|
||||
# Since an explicit membership was removed, we record
|
||||
# the new required state config and move them to lazy
|
||||
# members.
|
||||
{EventTypes.Member: {StateValues.LAZY}},
|
||||
# We have already sent @user3 down before.
|
||||
#
|
||||
# `@user3:test` is required for lazy loading, but we've
|
||||
# already sent it down before (due to it being in
|
||||
# `previous_required_state_map`), so we don't need to
|
||||
# request it again.
|
||||
StateFilter.none(),
|
||||
# Remember the fact that we've sent @user3 down before,
|
||||
# but not @user2 as that has been invalidated.
|
||||
extra_users_to_add_to_lazy_cache={"@user3:test"},
|
||||
# Nothing to invalidate as there are no existing lazy members.
|
||||
lazy_members_invalidated=frozenset(),
|
||||
),
|
||||
expected_without_state_deltas=_RequiredStateChangesReturn(
|
||||
# While some explicit memberships were removed, there were no
|
||||
# state changes, so we don't need to persist the new required
|
||||
# state config yet.
|
||||
None,
|
||||
# We have already sent @user3 down before.
|
||||
#
|
||||
# `@user3:test` is required for lazy loading, but we've
|
||||
# already sent it down before (due to it being in
|
||||
# `previous_required_state_map`), so we don't need to
|
||||
# request it again.
|
||||
StateFilter.none(),
|
||||
# Remember the fact that we've sent the users down before.
|
||||
extra_users_to_add_to_lazy_cache=frozenset(),
|
||||
# Nothing to invalidate as there are no existing lazy members.
|
||||
lazy_members_invalidated=frozenset(),
|
||||
),
|
||||
),
|
||||
),
|
||||
(
|
||||
"state_key_retract_lazy_keep_previous_explicit_me_memberships",
|
||||
"""
|
||||
Test removing explicit $ME memberships from the `required_state`
|
||||
when lazy-loading room members tracks previously sent
|
||||
memberships.
|
||||
""",
|
||||
RequiredStateChangesTestParameters(
|
||||
previous_required_state_map={
|
||||
EventTypes.Member: {
|
||||
StateValues.LAZY,
|
||||
StateValues.ME,
|
||||
"@user2:test",
|
||||
}
|
||||
},
|
||||
request_required_state_map={EventTypes.Member: {StateValues.LAZY}},
|
||||
previously_returned_lazy_user_ids=frozenset(),
|
||||
request_lazy_load_user_ids={"@user:test"},
|
||||
state_deltas={(EventTypes.Member, "@user2:test"): "$event_id"},
|
||||
expected_with_state_deltas=_RequiredStateChangesReturn(
|
||||
# Since an explicit membership was removed, we record
|
||||
# the new required state config and move them to lazy
|
||||
# members.
|
||||
{EventTypes.Member: {StateValues.LAZY}},
|
||||
# We have already sent @user down before.
|
||||
#
|
||||
# `@user:test` is required for lazy loading, but we've
|
||||
# already sent it down before (due to `StateValues.ME`
|
||||
# being in `previous_required_state_map`), so we don't
|
||||
# need to request it again.
|
||||
StateFilter.none(),
|
||||
# Remember the fact that we've sent @user down before,
|
||||
# but not @user2 as that has been invalidated.
|
||||
extra_users_to_add_to_lazy_cache={"@user:test"},
|
||||
# Nothing to invalidate as there are no existing lazy members.
|
||||
lazy_members_invalidated=frozenset(),
|
||||
),
|
||||
expected_without_state_deltas=_RequiredStateChangesReturn(
|
||||
# While some explicit memberships were removed, there were no
|
||||
# state changes, so we don't need to persist the new required
|
||||
# state config yet.
|
||||
None,
|
||||
# We have already sent @user down before.
|
||||
#
|
||||
# `@user:test` is required for lazy loading, but we've
|
||||
# already sent it down before (due to `StateValues.ME`
|
||||
# being in `previous_required_state_map`), so we don't
|
||||
# need to request it again.
|
||||
StateFilter.none(),
|
||||
# No relevant state has changed and we don't persist the
|
||||
# changed required_state_map, so we don't yet move the
|
||||
# $ME state to the lazy cache.
|
||||
extra_users_to_add_to_lazy_cache=frozenset(),
|
||||
# Nothing to invalidate as there are no existing lazy members.
|
||||
lazy_members_invalidated=frozenset(),
|
||||
),
|
||||
),
|
||||
),
|
||||
(
|
||||
"state_key_retract_lazy_keep_previous_memberships_with_new_memberships",
|
||||
"""
|
||||
Test retracting the `required_state` to no longer lazy-loading room members.
|
||||
""",
|
||||
RequiredStateChangesTestParameters(
|
||||
previous_required_state_map={EventTypes.Member: {StateValues.LAZY}},
|
||||
request_required_state_map={EventTypes.Member: {"@user4:test"}},
|
||||
previously_returned_lazy_user_ids={"@user2:test", "@user3:test"},
|
||||
request_lazy_load_user_ids=frozenset(),
|
||||
state_deltas={(EventTypes.Member, "@user2:test"): "$event_id"},
|
||||
expected_with_state_deltas=_RequiredStateChangesReturn(
|
||||
# Since "@user4:test" was added, we should persist the changed
|
||||
# required state config.
|
||||
#
|
||||
{EventTypes.Member: {"@user4:test"}},
|
||||
# We should see the new state_keys added
|
||||
StateFilter.from_types([(EventTypes.Member, "@user4:test")]),
|
||||
# Previous request did not include any explicit members,
|
||||
# so there is no extra users to add to the lazy cache.
|
||||
extra_users_to_add_to_lazy_cache=frozenset(),
|
||||
# Also remove "@user2:test" since that state has changed and is no
|
||||
# longer being requested anymore. Since something was removed,
|
||||
# we also should persist the changed to required state. That way next
|
||||
# time, they request "@user2:test", we see that we haven't sent
|
||||
# it before and send the new state. (we should still keep track
|
||||
# that we've sent specific `EventTypes.Member` before)
|
||||
{
|
||||
EventTypes.Member: {
|
||||
"@user3:test",
|
||||
"@user4:test",
|
||||
}
|
||||
},
|
||||
# We should see the new state_keys added
|
||||
StateFilter.from_types([(EventTypes.Member, "@user4:test")]),
|
||||
lazy_members_invalidated={"@user2:test"},
|
||||
),
|
||||
expected_without_state_deltas=(
|
||||
expected_without_state_deltas=_RequiredStateChangesReturn(
|
||||
# Since "@user4:test" was added, we should persist the changed
|
||||
# required state config.
|
||||
{
|
||||
EventTypes.Member: {
|
||||
"@user2:test",
|
||||
"@user3:test",
|
||||
"@user4:test",
|
||||
}
|
||||
},
|
||||
{EventTypes.Member: {"@user4:test"}},
|
||||
# We should see the new state_keys added
|
||||
StateFilter.from_types([(EventTypes.Member, "@user4:test")]),
|
||||
# Previous request did not include any explicit members,
|
||||
# so there is no extra users to add to the lazy cache.
|
||||
extra_users_to_add_to_lazy_cache=frozenset(),
|
||||
# We don't invalidate user2 as they haven't changed
|
||||
lazy_members_invalidated=frozenset(),
|
||||
),
|
||||
),
|
||||
),
|
||||
@@ -4613,7 +4756,7 @@ class RequiredStateChangesTestCase(unittest.TestCase):
|
||||
# room required state config to match the request. And since we we're previously
|
||||
# already fetching everything, we don't have to fetch anything now that they've
|
||||
# narrowed.
|
||||
expected_with_state_deltas=(
|
||||
expected_with_state_deltas=_RequiredStateChangesReturn(
|
||||
{
|
||||
StateValues.WILDCARD: {
|
||||
"state_key1",
|
||||
@@ -4623,7 +4766,7 @@ class RequiredStateChangesTestCase(unittest.TestCase):
|
||||
},
|
||||
StateFilter.none(),
|
||||
),
|
||||
expected_without_state_deltas=(
|
||||
expected_without_state_deltas=_RequiredStateChangesReturn(
|
||||
{
|
||||
StateValues.WILDCARD: {
|
||||
"state_key1",
|
||||
@@ -4649,11 +4792,11 @@ class RequiredStateChangesTestCase(unittest.TestCase):
|
||||
},
|
||||
state_deltas={("type1", "state_key1"): "$event_id"},
|
||||
# We've added a wildcard, so we persist the change and request everything
|
||||
expected_with_state_deltas=(
|
||||
expected_with_state_deltas=_RequiredStateChangesReturn(
|
||||
{StateValues.WILDCARD: {StateValues.WILDCARD}},
|
||||
StateFilter.all(),
|
||||
),
|
||||
expected_without_state_deltas=(
|
||||
expected_without_state_deltas=_RequiredStateChangesReturn(
|
||||
{StateValues.WILDCARD: {StateValues.WILDCARD}},
|
||||
StateFilter.all(),
|
||||
),
|
||||
@@ -4673,7 +4816,7 @@ class RequiredStateChangesTestCase(unittest.TestCase):
|
||||
# request. And since we we're previously already fetching
|
||||
# everything, we don't have to fetch anything now that they've
|
||||
# narrowed.
|
||||
expected_with_state_deltas=(
|
||||
expected_with_state_deltas=_RequiredStateChangesReturn(
|
||||
{
|
||||
"type1": {
|
||||
"state_key1",
|
||||
@@ -4683,7 +4826,7 @@ class RequiredStateChangesTestCase(unittest.TestCase):
|
||||
},
|
||||
StateFilter.none(),
|
||||
),
|
||||
expected_without_state_deltas=(
|
||||
expected_without_state_deltas=_RequiredStateChangesReturn(
|
||||
{
|
||||
"type1": {
|
||||
"state_key1",
|
||||
@@ -4708,11 +4851,11 @@ class RequiredStateChangesTestCase(unittest.TestCase):
|
||||
# update the effective room required state config to match the
|
||||
# request. And we need to request all of the state for that type
|
||||
# because we previously, only sent down a few keys.
|
||||
expected_with_state_deltas=(
|
||||
expected_with_state_deltas=_RequiredStateChangesReturn(
|
||||
{"type1": {StateValues.WILDCARD, "state_key2", "state_key3"}},
|
||||
StateFilter.from_types([("type1", None)]),
|
||||
),
|
||||
expected_without_state_deltas=(
|
||||
expected_without_state_deltas=_RequiredStateChangesReturn(
|
||||
{
|
||||
"type1": {
|
||||
StateValues.WILDCARD,
|
||||
@@ -4734,42 +4877,66 @@ class RequiredStateChangesTestCase(unittest.TestCase):
|
||||
test_parameters: RequiredStateChangesTestParameters,
|
||||
) -> None:
|
||||
# Without `state_deltas`
|
||||
changed_required_state_map, added_state_filter = _required_state_changes(
|
||||
state_changes = _required_state_changes(
|
||||
user_id="@user:test",
|
||||
prev_required_state_map=test_parameters.previous_required_state_map,
|
||||
request_required_state_map=test_parameters.request_required_state_map,
|
||||
previously_returned_lazy_user_ids=test_parameters.previously_returned_lazy_user_ids,
|
||||
request_lazy_load_user_ids=test_parameters.request_lazy_load_user_ids,
|
||||
state_deltas={},
|
||||
)
|
||||
|
||||
self.assertEqual(
|
||||
changed_required_state_map,
|
||||
test_parameters.expected_without_state_deltas[0],
|
||||
state_changes.changed_required_state_map,
|
||||
test_parameters.expected_without_state_deltas.changed_required_state_map,
|
||||
"changed_required_state_map does not match (without state_deltas)",
|
||||
)
|
||||
self.assertEqual(
|
||||
added_state_filter,
|
||||
test_parameters.expected_without_state_deltas[1],
|
||||
state_changes.added_state_filter,
|
||||
test_parameters.expected_without_state_deltas.added_state_filter,
|
||||
"added_state_filter does not match (without state_deltas)",
|
||||
)
|
||||
self.assertEqual(
|
||||
state_changes.lazy_members_invalidated,
|
||||
test_parameters.expected_without_state_deltas.lazy_members_invalidated,
|
||||
"lazy_members_invalidated does not match (without state_deltas)",
|
||||
)
|
||||
self.assertEqual(
|
||||
state_changes.extra_users_to_add_to_lazy_cache,
|
||||
test_parameters.expected_without_state_deltas.extra_users_to_add_to_lazy_cache,
|
||||
"lazy_members_previously_returned does not match (without state_deltas)",
|
||||
)
|
||||
|
||||
# With `state_deltas`
|
||||
changed_required_state_map, added_state_filter = _required_state_changes(
|
||||
state_changes = _required_state_changes(
|
||||
user_id="@user:test",
|
||||
prev_required_state_map=test_parameters.previous_required_state_map,
|
||||
request_required_state_map=test_parameters.request_required_state_map,
|
||||
previously_returned_lazy_user_ids=test_parameters.previously_returned_lazy_user_ids,
|
||||
request_lazy_load_user_ids=test_parameters.request_lazy_load_user_ids,
|
||||
state_deltas=test_parameters.state_deltas,
|
||||
)
|
||||
|
||||
self.assertEqual(
|
||||
changed_required_state_map,
|
||||
test_parameters.expected_with_state_deltas[0],
|
||||
state_changes.changed_required_state_map,
|
||||
test_parameters.expected_with_state_deltas.changed_required_state_map,
|
||||
"changed_required_state_map does not match (with state_deltas)",
|
||||
)
|
||||
self.assertEqual(
|
||||
added_state_filter,
|
||||
test_parameters.expected_with_state_deltas[1],
|
||||
state_changes.added_state_filter,
|
||||
test_parameters.expected_with_state_deltas.added_state_filter,
|
||||
"added_state_filter does not match (with state_deltas)",
|
||||
)
|
||||
self.assertEqual(
|
||||
state_changes.lazy_members_invalidated,
|
||||
test_parameters.expected_with_state_deltas.lazy_members_invalidated,
|
||||
"lazy_members_invalidated does not match (with state_deltas)",
|
||||
)
|
||||
self.assertEqual(
|
||||
state_changes.extra_users_to_add_to_lazy_cache,
|
||||
test_parameters.expected_with_state_deltas.extra_users_to_add_to_lazy_cache,
|
||||
"lazy_members_previously_returned does not match (with state_deltas)",
|
||||
)
|
||||
|
||||
@parameterized.expand(
|
||||
[
|
||||
@@ -4805,12 +4972,16 @@ class RequiredStateChangesTestCase(unittest.TestCase):
|
||||
}
|
||||
|
||||
# (function under test)
|
||||
changed_required_state_map, added_state_filter = _required_state_changes(
|
||||
state_changes = _required_state_changes(
|
||||
user_id="@user:test",
|
||||
prev_required_state_map=previous_required_state_map,
|
||||
request_required_state_map=request_required_state_map,
|
||||
previously_returned_lazy_user_ids=frozenset(),
|
||||
request_lazy_load_user_ids=frozenset(),
|
||||
state_deltas={},
|
||||
)
|
||||
changed_required_state_map = state_changes.changed_required_state_map
|
||||
|
||||
assert changed_required_state_map is not None
|
||||
|
||||
# We should only remember up to the maximum number of state keys
|
||||
@@ -4874,12 +5045,16 @@ class RequiredStateChangesTestCase(unittest.TestCase):
|
||||
)
|
||||
|
||||
# (function under test)
|
||||
changed_required_state_map, added_state_filter = _required_state_changes(
|
||||
state_changes = _required_state_changes(
|
||||
user_id="@user:test",
|
||||
prev_required_state_map=previous_required_state_map,
|
||||
request_required_state_map=request_required_state_map,
|
||||
previously_returned_lazy_user_ids=frozenset(),
|
||||
request_lazy_load_user_ids=frozenset(),
|
||||
state_deltas={},
|
||||
)
|
||||
changed_required_state_map = state_changes.changed_required_state_map
|
||||
|
||||
assert changed_required_state_map is not None
|
||||
|
||||
# Should include all of the requested state
|
||||
|
||||
@@ -690,7 +690,7 @@ class SlidingSyncFiltersTestCase(SlidingSyncBase):
|
||||
user1_tok = self.login(user1_id, "pass")
|
||||
|
||||
# Create a remote invite room without any `unsigned.invite_room_state`
|
||||
_remote_invite_room_id = self._create_remote_invite_room_for_user(
|
||||
_remote_invite_room_id, _ = self._create_remote_invite_room_for_user(
|
||||
user1_id, None
|
||||
)
|
||||
|
||||
@@ -760,7 +760,7 @@ class SlidingSyncFiltersTestCase(SlidingSyncBase):
|
||||
|
||||
# Create a remote invite room with some `unsigned.invite_room_state`
|
||||
# indicating that the room is encrypted.
|
||||
remote_invite_room_id = self._create_remote_invite_room_for_user(
|
||||
remote_invite_room_id, _ = self._create_remote_invite_room_for_user(
|
||||
user1_id,
|
||||
[
|
||||
StrippedStateEvent(
|
||||
@@ -849,7 +849,7 @@ class SlidingSyncFiltersTestCase(SlidingSyncBase):
|
||||
|
||||
# Create a remote invite room with some `unsigned.invite_room_state`
|
||||
# but don't set any room encryption event.
|
||||
remote_invite_room_id = self._create_remote_invite_room_for_user(
|
||||
remote_invite_room_id, _ = self._create_remote_invite_room_for_user(
|
||||
user1_id,
|
||||
[
|
||||
StrippedStateEvent(
|
||||
@@ -1484,7 +1484,7 @@ class SlidingSyncFiltersTestCase(SlidingSyncBase):
|
||||
user1_tok = self.login(user1_id, "pass")
|
||||
|
||||
# Create a remote invite room without any `unsigned.invite_room_state`
|
||||
_remote_invite_room_id = self._create_remote_invite_room_for_user(
|
||||
_remote_invite_room_id, _ = self._create_remote_invite_room_for_user(
|
||||
user1_id, None
|
||||
)
|
||||
|
||||
@@ -1554,7 +1554,7 @@ class SlidingSyncFiltersTestCase(SlidingSyncBase):
|
||||
|
||||
# Create a remote invite room with some `unsigned.invite_room_state` indicating
|
||||
# that it is a space room
|
||||
remote_invite_room_id = self._create_remote_invite_room_for_user(
|
||||
remote_invite_room_id, _ = self._create_remote_invite_room_for_user(
|
||||
user1_id,
|
||||
[
|
||||
StrippedStateEvent(
|
||||
@@ -1637,7 +1637,7 @@ class SlidingSyncFiltersTestCase(SlidingSyncBase):
|
||||
|
||||
# Create a remote invite room with some `unsigned.invite_room_state`
|
||||
# but the create event does not specify a room type (normal room)
|
||||
remote_invite_room_id = self._create_remote_invite_room_for_user(
|
||||
remote_invite_room_id, _ = self._create_remote_invite_room_for_user(
|
||||
user1_id,
|
||||
[
|
||||
StrippedStateEvent(
|
||||
|
||||
@@ -23,6 +23,7 @@ from synapse.api.constants import EventContentFields, EventTypes, JoinRules, Mem
|
||||
from synapse.handlers.sliding_sync import StateValues
|
||||
from synapse.rest.client import knock, login, room, sync
|
||||
from synapse.server import HomeServer
|
||||
from synapse.storage.databases.main.events import DeltaState, SlidingSyncTableChanges
|
||||
from synapse.util.clock import Clock
|
||||
|
||||
from tests.rest.client.sliding_sync.test_sliding_sync import SlidingSyncBase
|
||||
@@ -642,11 +643,6 @@ class SlidingSyncRoomsRequiredStateTestCase(SlidingSyncBase):
|
||||
self._assertRequiredStateIncludes(
|
||||
response_body["rooms"][room_id1]["required_state"],
|
||||
{
|
||||
# This appears because *some* membership in the room changed and the
|
||||
# heroes are recalculated and is thrown in because we have it. But this
|
||||
# is technically optional and not needed because we've already seen user2
|
||||
# in the last sync (and their membership hasn't changed).
|
||||
state_map[(EventTypes.Member, user2_id)],
|
||||
# Appears because there is a message in the timeline from this user
|
||||
state_map[(EventTypes.Member, user4_id)],
|
||||
# Appears because there is a membership event in the timeline from this user
|
||||
@@ -841,6 +837,437 @@ class SlidingSyncRoomsRequiredStateTestCase(SlidingSyncBase):
|
||||
exact=True,
|
||||
)
|
||||
|
||||
def test_lazy_loading_room_members_limited_sync(self) -> None:
|
||||
"""Test that when using lazy loading for room members and a limited sync
|
||||
missing a membership change, we include the membership change next time
|
||||
said user says something.
|
||||
"""
|
||||
|
||||
user1_id = self.register_user("user1", "pass")
|
||||
user1_tok = self.login(user1_id, "pass")
|
||||
user2_id = self.register_user("user2", "pass")
|
||||
user2_tok = self.login(user2_id, "pass")
|
||||
|
||||
room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
|
||||
self.helper.join(room_id1, user1_id, tok=user1_tok)
|
||||
|
||||
# Send a message from each user to the room so that both memberships are sent down.
|
||||
self.helper.send(room_id1, "1", tok=user1_tok)
|
||||
self.helper.send(room_id1, "2", tok=user2_tok)
|
||||
|
||||
# Make a first sync with lazy loading for the room members to establish
|
||||
# a position
|
||||
sync_body = {
|
||||
"lists": {
|
||||
"foo-list": {
|
||||
"ranges": [[0, 1]],
|
||||
"required_state": [
|
||||
[EventTypes.Member, StateValues.LAZY],
|
||||
],
|
||||
"timeline_limit": 2,
|
||||
}
|
||||
}
|
||||
}
|
||||
response_body, from_token = self.do_sync(sync_body, tok=user1_tok)
|
||||
|
||||
# We should see both membership events in required_state
|
||||
state_map = self.get_success(
|
||||
self.storage_controllers.state.get_current_state(room_id1)
|
||||
)
|
||||
self._assertRequiredStateIncludes(
|
||||
response_body["rooms"][room_id1]["required_state"],
|
||||
{
|
||||
state_map[(EventTypes.Member, user1_id)],
|
||||
state_map[(EventTypes.Member, user2_id)],
|
||||
},
|
||||
exact=True,
|
||||
)
|
||||
|
||||
# User2 changes their display name (causing a membership change)
|
||||
self.helper.send_state(
|
||||
room_id1,
|
||||
event_type=EventTypes.Member,
|
||||
state_key=user2_id,
|
||||
body={
|
||||
EventContentFields.MEMBERSHIP: Membership.JOIN,
|
||||
EventContentFields.MEMBERSHIP_DISPLAYNAME: "New Name",
|
||||
},
|
||||
tok=user2_tok,
|
||||
)
|
||||
|
||||
# Send a couple of messages to the room to push out the membership change
|
||||
self.helper.send(room_id1, "3", tok=user1_tok)
|
||||
self.helper.send(room_id1, "4", tok=user1_tok)
|
||||
|
||||
# Make an incremental Sliding Sync request
|
||||
response_body, from_token = self.do_sync(
|
||||
sync_body, since=from_token, tok=user1_tok
|
||||
)
|
||||
|
||||
# The membership change should *not* be included yet as user2 doesn't
|
||||
# have any events in the timeline.
|
||||
self._assertRequiredStateIncludes(
|
||||
response_body["rooms"][room_id1].get("required_state", []),
|
||||
set(),
|
||||
exact=True,
|
||||
)
|
||||
|
||||
# Now user2 sends a message to the room
|
||||
self.helper.send(room_id1, "5", tok=user2_tok)
|
||||
|
||||
# Make another incremental Sliding Sync request
|
||||
response_body, from_token = self.do_sync(
|
||||
sync_body, since=from_token, tok=user1_tok
|
||||
)
|
||||
|
||||
# The membership change should now be included as user2 has an event
|
||||
# in the timeline.
|
||||
state_map = self.get_success(
|
||||
self.storage_controllers.state.get_current_state(room_id1)
|
||||
)
|
||||
self._assertRequiredStateIncludes(
|
||||
response_body["rooms"][room_id1].get("required_state", []),
|
||||
{
|
||||
state_map[(EventTypes.Member, user2_id)],
|
||||
},
|
||||
exact=True,
|
||||
)
|
||||
|
||||
def test_lazy_loading_room_members_across_multiple_rooms(self) -> None:
|
||||
"""Test that lazy loading room members are tracked per-room correctly."""
|
||||
|
||||
user1_id = self.register_user("user1", "pass")
|
||||
user1_tok = self.login(user1_id, "pass")
|
||||
user2_id = self.register_user("user2", "pass")
|
||||
user2_tok = self.login(user2_id, "pass")
|
||||
|
||||
# Create two rooms with both users in them and send a message in each
|
||||
room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
|
||||
self.helper.join(room_id1, user1_id, tok=user1_tok)
|
||||
self.helper.send(room_id1, "room1-msg1", tok=user2_tok)
|
||||
|
||||
room_id2 = self.helper.create_room_as(user2_id, tok=user2_tok)
|
||||
self.helper.join(room_id2, user1_id, tok=user1_tok)
|
||||
self.helper.send(room_id2, "room2-msg1", tok=user2_tok)
|
||||
|
||||
# Make a sync with lazy loading for the room members to establish
|
||||
# a position
|
||||
sync_body = {
|
||||
"lists": {
|
||||
"foo-list": {
|
||||
"ranges": [[0, 1]],
|
||||
"required_state": [
|
||||
[EventTypes.Member, StateValues.LAZY],
|
||||
],
|
||||
"timeline_limit": 1,
|
||||
}
|
||||
}
|
||||
}
|
||||
response_body, from_token = self.do_sync(sync_body, tok=user1_tok)
|
||||
|
||||
# We expect to see only user2's membership in both rooms
|
||||
state_map = self.get_success(
|
||||
self.storage_controllers.state.get_current_state(room_id1)
|
||||
)
|
||||
self._assertRequiredStateIncludes(
|
||||
response_body["rooms"][room_id1]["required_state"],
|
||||
{
|
||||
state_map[(EventTypes.Member, user2_id)],
|
||||
},
|
||||
exact=True,
|
||||
)
|
||||
|
||||
# Send a message in room1 from user1
|
||||
self.helper.send(room_id1, "room1-msg2", tok=user1_tok)
|
||||
|
||||
# Make an incremental Sliding Sync request and check that we get user1's
|
||||
# membership.
|
||||
response_body, from_token = self.do_sync(
|
||||
sync_body, since=from_token, tok=user1_tok
|
||||
)
|
||||
|
||||
state_map = self.get_success(
|
||||
self.storage_controllers.state.get_current_state(room_id1)
|
||||
)
|
||||
self._assertRequiredStateIncludes(
|
||||
response_body["rooms"][room_id1]["required_state"],
|
||||
{
|
||||
state_map[(EventTypes.Member, user1_id)],
|
||||
},
|
||||
exact=True,
|
||||
)
|
||||
|
||||
# Send a message in room2 from user1
|
||||
self.helper.send(room_id2, "room2-msg2", tok=user1_tok)
|
||||
|
||||
# Make an incremental Sliding Sync request and check that we get user1's
|
||||
# membership.
|
||||
response_body, from_token = self.do_sync(
|
||||
sync_body, since=from_token, tok=user1_tok
|
||||
)
|
||||
state_map = self.get_success(
|
||||
self.storage_controllers.state.get_current_state(room_id2)
|
||||
)
|
||||
self._assertRequiredStateIncludes(
|
||||
response_body["rooms"][room_id2]["required_state"],
|
||||
{
|
||||
state_map[(EventTypes.Member, user1_id)],
|
||||
},
|
||||
exact=True,
|
||||
)
|
||||
|
||||
def test_lazy_loading_room_members_across_multiple_connections(self) -> None:
|
||||
"""Test that lazy loading room members are tracked per-connection
|
||||
correctly.
|
||||
|
||||
This catches bugs where if a membership got sent down one connection,
|
||||
it would incorrectly assume it was sent down another connection.
|
||||
"""
|
||||
|
||||
user1_id = self.register_user("user1", "pass")
|
||||
user1_tok = self.login(user1_id, "pass")
|
||||
user2_id = self.register_user("user2", "pass")
|
||||
user2_tok = self.login(user2_id, "pass")
|
||||
|
||||
room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
|
||||
self.helper.join(room_id1, user1_id, tok=user1_tok)
|
||||
|
||||
self.helper.send(room_id1, "1", tok=user2_tok)
|
||||
|
||||
# Make a sync with lazy loading for the room members to establish
|
||||
# a position
|
||||
sync_body1 = {
|
||||
"conn_id": "first-connection",
|
||||
"lists": {
|
||||
"foo-list": {
|
||||
"ranges": [[0, 1]],
|
||||
"required_state": [
|
||||
[EventTypes.Member, StateValues.LAZY],
|
||||
],
|
||||
"timeline_limit": 1,
|
||||
}
|
||||
},
|
||||
}
|
||||
response_body, from_token1 = self.do_sync(sync_body1, tok=user1_tok)
|
||||
|
||||
# We expect to see only user2's membership in the room
|
||||
state_map = self.get_success(
|
||||
self.storage_controllers.state.get_current_state(room_id1)
|
||||
)
|
||||
self._assertRequiredStateIncludes(
|
||||
response_body["rooms"][room_id1]["required_state"],
|
||||
{
|
||||
state_map[(EventTypes.Member, user2_id)],
|
||||
},
|
||||
exact=True,
|
||||
)
|
||||
|
||||
# Now make a new connection
|
||||
sync_body2 = {
|
||||
"conn_id": "second-connection",
|
||||
"lists": {
|
||||
"foo-list": {
|
||||
"ranges": [[0, 1]],
|
||||
"required_state": [
|
||||
[EventTypes.Member, StateValues.LAZY],
|
||||
],
|
||||
"timeline_limit": 1,
|
||||
}
|
||||
},
|
||||
}
|
||||
response_body, from_token2 = self.do_sync(sync_body2, tok=user1_tok)
|
||||
|
||||
# We should see user2's membership as this is a new connection
|
||||
self._assertRequiredStateIncludes(
|
||||
response_body["rooms"][room_id1]["required_state"],
|
||||
{
|
||||
state_map[(EventTypes.Member, user2_id)],
|
||||
},
|
||||
exact=True,
|
||||
)
|
||||
|
||||
# If we send a message from user1 and sync again on the first connection,
|
||||
# we should get user1's membership
|
||||
self.helper.send(room_id1, "2", tok=user1_tok)
|
||||
response_body, from_token1 = self.do_sync(
|
||||
sync_body1, since=from_token1, tok=user1_tok
|
||||
)
|
||||
self._assertRequiredStateIncludes(
|
||||
response_body["rooms"][room_id1]["required_state"],
|
||||
{
|
||||
state_map[(EventTypes.Member, user1_id)],
|
||||
},
|
||||
exact=True,
|
||||
)
|
||||
|
||||
# We sync again on the first connection to "ack" the position. This
|
||||
# triggers the `sliding_sync_connection_lazy_members` to set its
|
||||
# connection_position to null.
|
||||
self.do_sync(sync_body1, since=from_token1, tok=user1_tok)
|
||||
|
||||
# If we sync again on the second connection, we should also get user1's
|
||||
# membership
|
||||
response_body, _ = self.do_sync(sync_body2, since=from_token2, tok=user1_tok)
|
||||
self._assertRequiredStateIncludes(
|
||||
response_body["rooms"][room_id1]["required_state"],
|
||||
{
|
||||
state_map[(EventTypes.Member, user1_id)],
|
||||
},
|
||||
exact=True,
|
||||
)
|
||||
|
||||
def test_lazy_loading_room_members_forked_position(self) -> None:
|
||||
"""Test that lazy loading room members are tracked correctly when a
|
||||
connection position is reused"""
|
||||
|
||||
user1_id = self.register_user("user1", "pass")
|
||||
user1_tok = self.login(user1_id, "pass")
|
||||
user2_id = self.register_user("user2", "pass")
|
||||
user2_tok = self.login(user2_id, "pass")
|
||||
|
||||
room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
|
||||
self.helper.join(room_id1, user1_id, tok=user1_tok)
|
||||
|
||||
self.helper.send(room_id1, "1", tok=user2_tok)
|
||||
|
||||
# Make a sync with lazy loading for the room members to establish
|
||||
# a position
|
||||
sync_body = {
|
||||
"lists": {
|
||||
"foo-list": {
|
||||
"ranges": [[0, 1]],
|
||||
"required_state": [
|
||||
[EventTypes.Member, StateValues.LAZY],
|
||||
],
|
||||
"timeline_limit": 1,
|
||||
}
|
||||
}
|
||||
}
|
||||
response_body, from_token = self.do_sync(sync_body, tok=user1_tok)
|
||||
|
||||
# We expect to see only user2's membership in the room
|
||||
state_map = self.get_success(
|
||||
self.storage_controllers.state.get_current_state(room_id1)
|
||||
)
|
||||
self._assertRequiredStateIncludes(
|
||||
response_body["rooms"][room_id1]["required_state"],
|
||||
{
|
||||
state_map[(EventTypes.Member, user2_id)],
|
||||
},
|
||||
exact=True,
|
||||
)
|
||||
|
||||
# Send a message in room1 from user1
|
||||
self.helper.send(room_id1, "2", tok=user1_tok)
|
||||
|
||||
# Make an incremental Sliding Sync request and check that we get user1's
|
||||
# membership.
|
||||
response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok)
|
||||
state_map = self.get_success(
|
||||
self.storage_controllers.state.get_current_state(room_id1)
|
||||
)
|
||||
self._assertRequiredStateIncludes(
|
||||
response_body["rooms"][room_id1]["required_state"],
|
||||
{
|
||||
state_map[(EventTypes.Member, user1_id)],
|
||||
},
|
||||
exact=True,
|
||||
)
|
||||
|
||||
# Now, reuse the original position and check we still get user1's
|
||||
# membership.
|
||||
response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok)
|
||||
state_map = self.get_success(
|
||||
self.storage_controllers.state.get_current_state(room_id1)
|
||||
)
|
||||
self._assertRequiredStateIncludes(
|
||||
response_body["rooms"][room_id1]["required_state"],
|
||||
{
|
||||
state_map[(EventTypes.Member, user1_id)],
|
||||
},
|
||||
exact=True,
|
||||
)
|
||||
|
||||
def test_lazy_loading_room_members_explicit_membership_removed(self) -> None:
|
||||
"""Test the case where we requested explicit memberships and then later
|
||||
changed to lazy loading."""
|
||||
|
||||
user1_id = self.register_user("user1", "pass")
|
||||
user1_tok = self.login(user1_id, "pass")
|
||||
user2_id = self.register_user("user2", "pass")
|
||||
user2_tok = self.login(user2_id, "pass")
|
||||
|
||||
room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
|
||||
self.helper.join(room_id1, user1_id, tok=user1_tok)
|
||||
|
||||
self.helper.send(room_id1, "1", tok=user2_tok)
|
||||
|
||||
# Make a sync with lazy loading for the room members to establish
|
||||
# a position
|
||||
sync_body = {
|
||||
"lists": {
|
||||
"foo-list": {
|
||||
"ranges": [[0, 1]],
|
||||
"required_state": [
|
||||
[EventTypes.Member, StateValues.ME],
|
||||
],
|
||||
"timeline_limit": 1,
|
||||
}
|
||||
}
|
||||
}
|
||||
response_body, from_token = self.do_sync(sync_body, tok=user1_tok)
|
||||
|
||||
# We expect to see only user1's membership in the room
|
||||
state_map = self.get_success(
|
||||
self.storage_controllers.state.get_current_state(room_id1)
|
||||
)
|
||||
self._assertRequiredStateIncludes(
|
||||
response_body["rooms"][room_id1]["required_state"],
|
||||
{
|
||||
state_map[(EventTypes.Member, user1_id)],
|
||||
},
|
||||
exact=True,
|
||||
)
|
||||
|
||||
# Now change to lazy loading...
|
||||
sync_body["lists"]["foo-list"]["required_state"] = [
|
||||
[EventTypes.Member, StateValues.LAZY],
|
||||
]
|
||||
|
||||
# Send a message in room1 from user2
|
||||
self.helper.send(room_id1, "2", tok=user2_tok)
|
||||
response_body, from_token = self.do_sync(
|
||||
sync_body, since=from_token, tok=user1_tok
|
||||
)
|
||||
|
||||
# We should see user2's membership as it's in the timeline
|
||||
state_map = self.get_success(
|
||||
self.storage_controllers.state.get_current_state(room_id1)
|
||||
)
|
||||
self._assertRequiredStateIncludes(
|
||||
response_body["rooms"][room_id1]["required_state"],
|
||||
{
|
||||
state_map[(EventTypes.Member, user2_id)],
|
||||
},
|
||||
exact=True,
|
||||
)
|
||||
|
||||
# Now send a message in room1 from user1
|
||||
self.helper.send(room_id1, "3", tok=user1_tok)
|
||||
|
||||
response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok)
|
||||
|
||||
# We should not see any memberships as we've already seen user1's
|
||||
# membership.
|
||||
state_map = self.get_success(
|
||||
self.storage_controllers.state.get_current_state(room_id1)
|
||||
)
|
||||
self._assertRequiredStateIncludes(
|
||||
response_body["rooms"][room_id1].get("required_state", []),
|
||||
[],
|
||||
exact=True,
|
||||
)
|
||||
|
||||
def test_rooms_required_state_me(self) -> None:
|
||||
"""
|
||||
Test `rooms.required_state` correctly handles $ME.
|
||||
@@ -1686,3 +2113,135 @@ class SlidingSyncRoomsRequiredStateTestCase(SlidingSyncBase):
|
||||
# We should not see the room name again, as we have already sent that
|
||||
# down.
|
||||
self.assertIsNone(response_body["rooms"][room_id1].get("required_state"))
|
||||
|
||||
def test_lazy_loading_room_members_state_reset_non_limited_timeline(self) -> None:
|
||||
"""Test that when using lazy-loaded members, if a membership state is
|
||||
reset to a previous state and the sync is not limited, then we send down
|
||||
the state reset.
|
||||
|
||||
Regression test as previously we only returned membership relevant to
|
||||
the timeline and so did not tell clients about state resets for
|
||||
users who did not send any timeline events.
|
||||
"""
|
||||
|
||||
user1_id = self.register_user("user1", "pass")
|
||||
user1_tok = self.login(user1_id, "pass")
|
||||
user2_id = self.register_user("user2", "pass")
|
||||
user2_tok = self.login(user2_id, "pass")
|
||||
|
||||
room_id = self.helper.create_room_as(user2_id, tok=user2_tok, is_public=True)
|
||||
content = self.helper.join(room_id, user1_id, tok=user1_tok)
|
||||
first_event_id = content["event_id"]
|
||||
|
||||
# Send a message so that the user1 membership comes down sync (because we're lazy-loading room members)
|
||||
self.helper.send(room_id, "msg", tok=user1_tok)
|
||||
|
||||
sync_body = {
|
||||
"lists": {
|
||||
"foo-list": {
|
||||
"ranges": [[0, 1]],
|
||||
"required_state": [
|
||||
[EventTypes.Member, StateValues.LAZY],
|
||||
],
|
||||
"timeline_limit": 1,
|
||||
}
|
||||
}
|
||||
}
|
||||
response_body, from_token = self.do_sync(sync_body, tok=user1_tok)
|
||||
|
||||
# Check that user1 is returned
|
||||
state_map = self.get_success(
|
||||
self.storage_controllers.state.get_current_state(room_id)
|
||||
)
|
||||
self._assertRequiredStateIncludes(
|
||||
response_body["rooms"][room_id]["required_state"],
|
||||
{
|
||||
state_map[(EventTypes.Member, user1_id)],
|
||||
},
|
||||
exact=True,
|
||||
)
|
||||
|
||||
# user1 changes their display name
|
||||
content = self.helper.send_state(
|
||||
room_id,
|
||||
EventTypes.Member,
|
||||
body={"membership": "join", "displayname": "New display name"},
|
||||
state_key=user1_id,
|
||||
tok=user1_tok,
|
||||
)
|
||||
second_event_id = content["event_id"]
|
||||
|
||||
response_body, from_token = self.do_sync(
|
||||
sync_body, since=from_token, tok=user1_tok
|
||||
)
|
||||
|
||||
# We should see the updated membership state
|
||||
state_map = self.get_success(
|
||||
self.storage_controllers.state.get_current_state(room_id)
|
||||
)
|
||||
self._assertRequiredStateIncludes(
|
||||
response_body["rooms"][room_id]["required_state"],
|
||||
{
|
||||
state_map[(EventTypes.Member, user1_id)],
|
||||
},
|
||||
exact=True,
|
||||
)
|
||||
self.assertEqual(
|
||||
response_body["rooms"][room_id]["required_state"][0]["event_id"],
|
||||
second_event_id,
|
||||
)
|
||||
|
||||
# Now, fake a reset the membership state to the first event
|
||||
persist_event_store = self.hs.get_datastores().persist_events
|
||||
assert persist_event_store is not None
|
||||
|
||||
self.get_success(
|
||||
persist_event_store.update_current_state(
|
||||
room_id,
|
||||
DeltaState(
|
||||
to_insert={(EventTypes.Member, user1_id): first_event_id},
|
||||
to_delete=[],
|
||||
),
|
||||
# We don't need to worry about sliding sync changes for this test
|
||||
SlidingSyncTableChanges(
|
||||
room_id=room_id,
|
||||
joined_room_bump_stamp_to_fully_insert=None,
|
||||
joined_room_updates={},
|
||||
membership_snapshot_shared_insert_values={},
|
||||
to_insert_membership_snapshots=[],
|
||||
to_delete_membership_snapshots=[],
|
||||
),
|
||||
)
|
||||
)
|
||||
|
||||
# Send a message from *user2* so that user1 wouldn't normally get
|
||||
# synced.
|
||||
self.helper.send(room_id, "msg2", tok=user2_tok)
|
||||
|
||||
response_body, from_token = self.do_sync(
|
||||
sync_body, since=from_token, tok=user1_tok
|
||||
)
|
||||
|
||||
# This should be a non-limited sync as there is only one timeline event
|
||||
# (<= `timeline_limit). This is important as we're specifically testing the non-`limited`
|
||||
# timeline scenario. And for reference, we don't send down state resets
|
||||
# on limited timelines when using lazy loaded memberships.
|
||||
self.assertFalse(
|
||||
response_body["rooms"][room_id].get("limited", False),
|
||||
"Expected a non-limited timeline",
|
||||
)
|
||||
|
||||
# We should see the reset membership state of user1
|
||||
state_map = self.get_success(
|
||||
self.storage_controllers.state.get_current_state(room_id)
|
||||
)
|
||||
self._assertRequiredStateIncludes(
|
||||
response_body["rooms"][room_id]["required_state"],
|
||||
{
|
||||
state_map[(EventTypes.Member, user1_id)],
|
||||
},
|
||||
)
|
||||
self.assertEqual(
|
||||
response_body["rooms"][room_id]["required_state"][0]["event_id"],
|
||||
first_event_id,
|
||||
)
|
||||
|
||||
@@ -257,7 +257,7 @@ class SlidingSyncBase(unittest.HomeserverTestCase):
|
||||
invitee_user_id: str,
|
||||
unsigned_invite_room_state: list[StrippedStateEvent] | None,
|
||||
invite_room_id: str | None = None,
|
||||
) -> str:
|
||||
) -> tuple[str, EventBase]:
|
||||
"""
|
||||
Create a fake invite for a remote room and persist it.
|
||||
|
||||
@@ -323,11 +323,13 @@ class SlidingSyncBase(unittest.HomeserverTestCase):
|
||||
context = EventContext.for_outlier(self.hs.get_storage_controllers())
|
||||
persist_controller = self.hs.get_storage_controllers().persistence
|
||||
assert persist_controller is not None
|
||||
self.get_success(persist_controller.persist_event(invite_event, context))
|
||||
persisted_event, _, _ = self.get_success(
|
||||
persist_controller.persist_event(invite_event, context)
|
||||
)
|
||||
|
||||
self._remote_invite_count += 1
|
||||
|
||||
return invite_room_id
|
||||
return invite_room_id, persisted_event
|
||||
|
||||
def _bump_notifier_wait_for_events(
|
||||
self,
|
||||
@@ -763,7 +765,7 @@ class SlidingSyncTestCase(SlidingSyncBase):
|
||||
user1_tok = self.login(user1_id, "pass")
|
||||
|
||||
# Create a remote room invite (out-of-band membership)
|
||||
room_id = self._create_remote_invite_room_for_user(user1_id, None)
|
||||
room_id, _ = self._create_remote_invite_room_for_user(user1_id, None)
|
||||
|
||||
# Make the Sliding Sync request
|
||||
sync_body = {
|
||||
|
||||
@@ -30,19 +30,23 @@ from synapse.api.room_versions import RoomVersions
|
||||
from synapse.events import EventBase, StrippedStateEvent, make_event_from_dict
|
||||
from synapse.events.snapshot import EventContext
|
||||
from synapse.rest import admin
|
||||
from synapse.rest.client import login, room
|
||||
from synapse.rest.client import login, room, sync
|
||||
from synapse.server import HomeServer
|
||||
from synapse.storage.databases.main.events import DeltaState
|
||||
from synapse.storage.databases.main.events_bg_updates import (
|
||||
_resolve_stale_data_in_sliding_sync_joined_rooms_table,
|
||||
_resolve_stale_data_in_sliding_sync_membership_snapshots_table,
|
||||
)
|
||||
from synapse.types import create_requester
|
||||
from synapse.types import SlidingSyncStreamToken, create_requester
|
||||
from synapse.types.handlers.sliding_sync import (
|
||||
LAZY_MEMBERS_UPDATE_INTERVAL,
|
||||
StateValues,
|
||||
)
|
||||
from synapse.types.storage import _BackgroundUpdates
|
||||
from synapse.util.clock import Clock
|
||||
|
||||
from tests.rest.client.sliding_sync.test_sliding_sync import SlidingSyncBase
|
||||
from tests.test_utils.event_injection import create_event
|
||||
from tests.unittest import HomeserverTestCase
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -86,7 +90,7 @@ class _SlidingSyncMembershipSnapshotResult:
|
||||
forgotten: bool = False
|
||||
|
||||
|
||||
class SlidingSyncTablesTestCaseBase(HomeserverTestCase):
|
||||
class SlidingSyncTablesTestCaseBase(SlidingSyncBase):
|
||||
"""
|
||||
Helpers to deal with testing that the
|
||||
`sliding_sync_joined_rooms`/`sliding_sync_membership_snapshots` database tables are
|
||||
@@ -97,6 +101,7 @@ class SlidingSyncTablesTestCaseBase(HomeserverTestCase):
|
||||
admin.register_servlets,
|
||||
login.register_servlets,
|
||||
room.register_servlets,
|
||||
sync.register_servlets,
|
||||
]
|
||||
|
||||
def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
|
||||
@@ -202,78 +207,6 @@ class SlidingSyncTablesTestCaseBase(HomeserverTestCase):
|
||||
for row in rows
|
||||
}
|
||||
|
||||
_remote_invite_count: int = 0
|
||||
|
||||
def _create_remote_invite_room_for_user(
|
||||
self,
|
||||
invitee_user_id: str,
|
||||
unsigned_invite_room_state: list[StrippedStateEvent] | None,
|
||||
) -> tuple[str, EventBase]:
|
||||
"""
|
||||
Create a fake invite for a remote room and persist it.
|
||||
|
||||
We don't have any state for these kind of rooms and can only rely on the
|
||||
stripped state included in the unsigned portion of the invite event to identify
|
||||
the room.
|
||||
|
||||
Args:
|
||||
invitee_user_id: The person being invited
|
||||
unsigned_invite_room_state: List of stripped state events to assist the
|
||||
receiver in identifying the room.
|
||||
|
||||
Returns:
|
||||
The room ID of the remote invite room and the persisted remote invite event.
|
||||
"""
|
||||
invite_room_id = f"!test_room{self._remote_invite_count}:remote_server"
|
||||
|
||||
invite_event_dict = {
|
||||
"room_id": invite_room_id,
|
||||
"sender": "@inviter:remote_server",
|
||||
"state_key": invitee_user_id,
|
||||
"depth": 1,
|
||||
"origin_server_ts": 1,
|
||||
"type": EventTypes.Member,
|
||||
"content": {"membership": Membership.INVITE},
|
||||
"auth_events": [],
|
||||
"prev_events": [],
|
||||
}
|
||||
if unsigned_invite_room_state is not None:
|
||||
serialized_stripped_state_events = []
|
||||
for stripped_event in unsigned_invite_room_state:
|
||||
serialized_stripped_state_events.append(
|
||||
{
|
||||
"type": stripped_event.type,
|
||||
"state_key": stripped_event.state_key,
|
||||
"sender": stripped_event.sender,
|
||||
"content": stripped_event.content,
|
||||
}
|
||||
)
|
||||
|
||||
invite_event_dict["unsigned"] = {
|
||||
"invite_room_state": serialized_stripped_state_events
|
||||
}
|
||||
|
||||
invite_event = make_event_from_dict(
|
||||
invite_event_dict,
|
||||
room_version=RoomVersions.V10,
|
||||
)
|
||||
invite_event.internal_metadata.outlier = True
|
||||
invite_event.internal_metadata.out_of_band_membership = True
|
||||
|
||||
self.get_success(
|
||||
self.store.maybe_store_room_on_outlier_membership(
|
||||
room_id=invite_room_id, room_version=invite_event.room_version
|
||||
)
|
||||
)
|
||||
context = EventContext.for_outlier(self.hs.get_storage_controllers())
|
||||
persisted_event, _, _ = self.get_success(
|
||||
self.persist_controller.persist_event(invite_event, context)
|
||||
)
|
||||
|
||||
self._remote_invite_count += 1
|
||||
|
||||
return invite_room_id, persisted_event
|
||||
|
||||
def _retract_remote_invite_for_user(
|
||||
self,
|
||||
user_id: str,
|
||||
@@ -3052,6 +2985,141 @@ class SlidingSyncTablesTestCase(SlidingSyncTablesTestCaseBase):
|
||||
exact=True,
|
||||
)
|
||||
|
||||
def test_lazy_loading_room_members_last_seen_ts(self) -> None:
|
||||
"""Test that the `last_seen_ts` column in
|
||||
`sliding_sync_connection_lazy_members` is correctly kept up to date.
|
||||
|
||||
We expect that it only gets updated every
|
||||
`LAZY_MEMBERS_UPDATE_INTERVAL`, rather than on every sync.
|
||||
"""
|
||||
|
||||
user1_id = self.register_user("user1", "pass")
|
||||
user1_tok = self.login(user1_id, "pass")
|
||||
user2_id = self.register_user("user2", "pass")
|
||||
user2_tok = self.login(user2_id, "pass")
|
||||
|
||||
room_id = self.helper.create_room_as(user2_id, tok=user2_tok, is_public=True)
|
||||
self.helper.join(room_id, user1_id, tok=user1_tok)
|
||||
|
||||
# Send a message so that user1 comes down sync.
|
||||
self.helper.send(room_id, "msg", tok=user1_tok)
|
||||
|
||||
sync_body = {
|
||||
"lists": {
|
||||
"foo-list": {
|
||||
"ranges": [[0, 1]],
|
||||
"required_state": [
|
||||
[EventTypes.Member, StateValues.LAZY],
|
||||
],
|
||||
"timeline_limit": 1,
|
||||
}
|
||||
}
|
||||
}
|
||||
response_body, from_token = self.do_sync(sync_body, tok=user1_tok)
|
||||
|
||||
# Check that user1 is returned
|
||||
state_map = self.get_success(
|
||||
self.storage_controllers.state.get_current_state(room_id)
|
||||
)
|
||||
self._assertRequiredStateIncludes(
|
||||
response_body["rooms"][room_id]["required_state"],
|
||||
{
|
||||
state_map[(EventTypes.Member, user1_id)],
|
||||
},
|
||||
exact=True,
|
||||
)
|
||||
|
||||
# Check that we have an entry in sliding_sync_connection_lazy_members
|
||||
connection_pos1 = self.get_success(
|
||||
SlidingSyncStreamToken.from_string(self.store, from_token)
|
||||
).connection_position
|
||||
lazy_member_entries = self.get_success(
|
||||
self.store.get_sliding_sync_connection_lazy_members(
|
||||
connection_pos1, room_id, {user1_id}
|
||||
)
|
||||
)
|
||||
self.assertIn(user1_id, lazy_member_entries)
|
||||
|
||||
prev_timestamp = lazy_member_entries[user1_id]
|
||||
|
||||
# If user1 sends a message then we consider it for lazy loading. We have
|
||||
# previously returned it so we don't send the state down again, but it
|
||||
# is still eligible for updating the timestamp. Since we last updated
|
||||
# the timestamp within the last `LAZY_MEMBERS_UPDATE_INTERVAL`, we do not
|
||||
# update it.
|
||||
self.helper.send(room_id, "msg2", tok=user1_tok)
|
||||
|
||||
response_body, from_token = self.do_sync(
|
||||
sync_body, since=from_token, tok=user1_tok
|
||||
)
|
||||
|
||||
# We expect the required_state map to be empty as nothing has changed.
|
||||
state_map = self.get_success(
|
||||
self.storage_controllers.state.get_current_state(room_id)
|
||||
)
|
||||
self._assertRequiredStateIncludes(
|
||||
response_body["rooms"][room_id].get("required_state", []),
|
||||
{},
|
||||
exact=True,
|
||||
)
|
||||
|
||||
connection_pos2 = self.get_success(
|
||||
SlidingSyncStreamToken.from_string(self.store, from_token)
|
||||
).connection_position
|
||||
|
||||
lazy_member_entries = self.get_success(
|
||||
self.store.get_sliding_sync_connection_lazy_members(
|
||||
connection_pos2, room_id, {user1_id}
|
||||
)
|
||||
)
|
||||
|
||||
# The timestamp should be unchanged.
|
||||
self.assertEqual(lazy_member_entries[user1_id], prev_timestamp)
|
||||
|
||||
# Now advance the time by `LAZY_MEMBERS_UPDATE_INTERVAL` so that we
|
||||
# would update the timestamp.
|
||||
self.reactor.advance(LAZY_MEMBERS_UPDATE_INTERVAL.as_secs())
|
||||
|
||||
# Send a message from user2
|
||||
self.helper.send(room_id, "msg3", tok=user2_tok)
|
||||
|
||||
response_body, from_token = self.do_sync(
|
||||
sync_body, since=from_token, tok=user1_tok
|
||||
)
|
||||
|
||||
connection_pos3 = self.get_success(
|
||||
SlidingSyncStreamToken.from_string(self.store, from_token)
|
||||
).connection_position
|
||||
|
||||
lazy_member_entries = self.get_success(
|
||||
self.store.get_sliding_sync_connection_lazy_members(
|
||||
connection_pos3, room_id, {user1_id}
|
||||
)
|
||||
)
|
||||
|
||||
# The timestamp for user1 should be unchanged, as they were not sent down.
|
||||
self.assertEqual(lazy_member_entries[user1_id], prev_timestamp)
|
||||
|
||||
# Now if user1 sends a message, then the timestamp should be updated as
|
||||
# its been over `LAZY_MEMBERS_UPDATE_INTERVAL` since we last updated it.
|
||||
# (Even though we don't send the state down again).
|
||||
self.helper.send(room_id, "msg4", tok=user1_tok)
|
||||
|
||||
response_body, from_token = self.do_sync(
|
||||
sync_body, since=from_token, tok=user1_tok
|
||||
)
|
||||
connection_pos4 = self.get_success(
|
||||
SlidingSyncStreamToken.from_string(self.store, from_token)
|
||||
).connection_position
|
||||
|
||||
lazy_member_entries = self.get_success(
|
||||
self.store.get_sliding_sync_connection_lazy_members(
|
||||
connection_pos4, room_id, {user1_id}
|
||||
)
|
||||
)
|
||||
# The timestamp for user1 should be updated.
|
||||
self.assertGreater(lazy_member_entries[user1_id], prev_timestamp)
|
||||
|
||||
|
||||
class SlidingSyncTablesBackgroundUpdatesTestCase(SlidingSyncTablesTestCaseBase):
|
||||
"""
|
||||
|
||||
Reference in New Issue
Block a user