Remove debug logging
This commit is contained in:
@@ -196,12 +196,8 @@ class FederationServer(FederationBase):
|
||||
origin, room_id, versions, limit
|
||||
)
|
||||
|
||||
logger.info("on_backfill_request pdus(%d)=%s", len(pdus), pdus)
|
||||
|
||||
res = self._transaction_dict_from_pdus(pdus)
|
||||
|
||||
logger.info("on_backfill_request res=%s", res)
|
||||
|
||||
return 200, res
|
||||
|
||||
async def on_incoming_transaction(
|
||||
|
||||
@@ -148,14 +148,14 @@ class FederationHandler:
|
||||
insertion_events_to_be_backfilled = (
|
||||
await self.store.get_insertion_event_backwards_extremities_in_room(room_id)
|
||||
)
|
||||
logger.info(
|
||||
logger.debug(
|
||||
"_maybe_backfill_inner: extremities oldest_events_with_depth=%s insertion_events_to_be_backfilled=%s",
|
||||
oldest_events_with_depth,
|
||||
insertion_events_to_be_backfilled,
|
||||
)
|
||||
|
||||
if not oldest_events_with_depth and not insertion_events_to_be_backfilled:
|
||||
logger.info("Not backfilling as no extremeties found.")
|
||||
logger.debug("Not backfilling as no extremeties found.")
|
||||
return False
|
||||
|
||||
# We only want to paginate if we can actually see the events we'll get,
|
||||
@@ -203,7 +203,7 @@ class FederationHandler:
|
||||
redact=False,
|
||||
check_history_visibility_only=True,
|
||||
)
|
||||
logger.info(
|
||||
logger.debug(
|
||||
"_maybe_backfill_inner: filtered_extremities %s", filtered_extremities
|
||||
)
|
||||
|
||||
@@ -230,7 +230,7 @@ class FederationHandler:
|
||||
# much larger factor will result in triggering a backfill request much
|
||||
# earlier than necessary.
|
||||
if current_depth - 2 * limit > max_depth:
|
||||
logger.info(
|
||||
logger.debug(
|
||||
"Not backfilling as we don't need to. %d < %d - 2 * %d",
|
||||
max_depth,
|
||||
current_depth,
|
||||
@@ -249,7 +249,7 @@ class FederationHandler:
|
||||
t for t in sorted_extremeties_tuple if int(t[1]) <= current_depth
|
||||
]
|
||||
|
||||
logger.info(
|
||||
logger.debug(
|
||||
"room_id: %s, backfill: current_depth: %s, limit: %s, max_depth: %s, extrems (%d): %s filtered_sorted_extremeties_tuple: %s",
|
||||
room_id,
|
||||
current_depth,
|
||||
@@ -271,8 +271,6 @@ class FederationHandler:
|
||||
# request URI to be too long.
|
||||
extremities = dict(sorted_extremeties_tuple[:5])
|
||||
|
||||
logger.info("backfill extremities=%s", extremities)
|
||||
|
||||
# Now we need to decide which hosts to hit first.
|
||||
|
||||
# First we try hosts that are already in the room
|
||||
@@ -1061,7 +1059,6 @@ class FederationHandler:
|
||||
events = await self.store.get_backfill_events(room_id, pdu_list, limit)
|
||||
|
||||
events = await filter_events_for_server(self.storage, origin, events)
|
||||
logger.info("on_backfill_request resultant events(%d)=%s", len(events), events)
|
||||
|
||||
return events
|
||||
|
||||
|
||||
@@ -416,19 +416,6 @@ class FederationEventHandler:
|
||||
events = await self._federation_client.backfill(
|
||||
dest, room_id, limit=limit, extremities=extremities
|
||||
)
|
||||
logger.info(
|
||||
"from remote server: got backfill response events(%d)=%s",
|
||||
len(events),
|
||||
[
|
||||
{
|
||||
"event_id": ev.event_id,
|
||||
"type": ev["type"],
|
||||
"depth": ev["depth"],
|
||||
"content": ev["content"].get("body", None),
|
||||
}
|
||||
for ev in events
|
||||
],
|
||||
)
|
||||
|
||||
if not events:
|
||||
return
|
||||
@@ -444,38 +431,10 @@ class FederationEventHandler:
|
||||
|
||||
await self._process_pulled_events(
|
||||
dest,
|
||||
# The /backfill response should start from `?v` and include the
|
||||
# events that preceded it (so the list will be newest -> oldest). We
|
||||
# reverse that order so the messages are oldest -> newest and we can
|
||||
# persist the backfilled events without constantly have to go fetch
|
||||
# missing prev_events which are probably included in the same
|
||||
# backfill chunk.
|
||||
# TODO: If we try to reverse this list, the stream_ordering will be backwards
|
||||
# reversed(events),
|
||||
events,
|
||||
backfilled=True,
|
||||
)
|
||||
|
||||
for ev in events:
|
||||
event_after_persisted = await self._store.get_event(
|
||||
ev.event_id, allow_none=True
|
||||
)
|
||||
|
||||
if event_after_persisted:
|
||||
logger.info(
|
||||
"from remote server: processed backfilled event_id=%s type=%s depth=%s stream_ordering=%s content=%s",
|
||||
ev.event_id,
|
||||
event_after_persisted["type"],
|
||||
event_after_persisted["depth"],
|
||||
event_after_persisted.internal_metadata.stream_ordering,
|
||||
event_after_persisted["content"].get("body", None),
|
||||
)
|
||||
else:
|
||||
logger.info(
|
||||
"from remote server: processed backfilled event_id=%s failed to lookup",
|
||||
ev.event_id,
|
||||
)
|
||||
|
||||
async def _get_missing_events_for_pdu(
|
||||
self, origin: str, pdu: EventBase, prevs: Set[str], min_depth: int
|
||||
) -> None:
|
||||
@@ -1277,7 +1236,7 @@ class FederationEventHandler:
|
||||
await self.persist_events_and_notify(
|
||||
room_id,
|
||||
tuple(events_to_persist),
|
||||
# TODO: Maybe this to get fetched missing events during backfill as backfilled also :/
|
||||
# Events we fetch during backfill should be marked as backfilled as well
|
||||
backfilled=True,
|
||||
)
|
||||
|
||||
|
||||
@@ -13,10 +13,6 @@ if TYPE_CHECKING:
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def generate_fake_event_id() -> str:
|
||||
return "$fake_" + random_string(43)
|
||||
|
||||
|
||||
class RoomBatchHandler:
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
self.hs = hs
|
||||
@@ -184,7 +180,7 @@ class RoomBatchHandler:
|
||||
|
||||
# Make the state events float off on their own so we don't have a
|
||||
# bunch of `@mxid joined the room` noise between each batch
|
||||
prev_event_ids_for_state_chain = [] # generate_fake_event_id()
|
||||
prev_event_ids_for_state_chain: List[str] = []
|
||||
|
||||
for state_event in state_events_at_start:
|
||||
assert_params_in_dict(
|
||||
@@ -227,15 +223,6 @@ class RoomBatchHandler:
|
||||
# reference and also update in the event when we append later.
|
||||
auth_event_ids=auth_event_ids.copy(),
|
||||
)
|
||||
|
||||
mem_event = await self.store.get_event(event_id)
|
||||
logger.info(
|
||||
"room_batch mem_event_id=%s depth=%s stream_ordering=%s prev_event_ids=%s",
|
||||
mem_event.event_id,
|
||||
mem_event.depth,
|
||||
mem_event.internal_metadata.stream_ordering,
|
||||
mem_event.prev_event_ids(),
|
||||
)
|
||||
else:
|
||||
# TODO: Add some complement tests that adds state that is not member joins
|
||||
# and will use this code path. Maybe we only want to support join state events
|
||||
@@ -362,20 +349,13 @@ class RoomBatchHandler:
|
||||
# Events are sorted by (topological_ordering, stream_ordering)
|
||||
# where topological_ordering is just depth.
|
||||
for (event, context) in reversed(events_to_persist):
|
||||
result_event = await self.event_creation_handler.handle_new_client_event(
|
||||
await self.event_creation_handler.handle_new_client_event(
|
||||
await self.create_requester_for_user_id_from_app_service(
|
||||
event["sender"], app_service_requester.app_service
|
||||
),
|
||||
event=event,
|
||||
context=context,
|
||||
)
|
||||
logger.info(
|
||||
"result_event depth=%s stream_ordering=%s event_id=%s body=%s",
|
||||
result_event.depth,
|
||||
result_event.internal_metadata.stream_ordering,
|
||||
result_event.event_id,
|
||||
result_event.content.get("body", None),
|
||||
)
|
||||
|
||||
return event_ids
|
||||
|
||||
|
||||
@@ -561,7 +561,6 @@ class RoomMessageListRestServlet(RestServlet):
|
||||
pagination_config = await PaginationConfig.from_request(
|
||||
self.store, request, default_limit=10
|
||||
)
|
||||
logger.info("/messages rest start pagination_config=%s", pagination_config)
|
||||
# Twisted will have processed the args by now.
|
||||
assert request.args is not None
|
||||
as_client_event = b"raw" not in request.args
|
||||
|
||||
@@ -1057,11 +1057,6 @@ class EventFederationWorkerStore(EventsWorkerStore, SignatureWorkerStore, SQLBas
|
||||
# we process the newest-in-time messages first going backwards in time.
|
||||
queue = PriorityQueue()
|
||||
|
||||
logger.info(
|
||||
"_get_backfill_events: seeding backfill with event_list(%d)=%s",
|
||||
len(event_list),
|
||||
event_list,
|
||||
)
|
||||
for event_id in event_list:
|
||||
event_lookup_result = self.db_pool.simple_select_one_txn(
|
||||
txn,
|
||||
@@ -1075,14 +1070,6 @@ class EventFederationWorkerStore(EventsWorkerStore, SignatureWorkerStore, SQLBas
|
||||
allow_none=True,
|
||||
)
|
||||
|
||||
logger.info(
|
||||
"_get_backfill_events: seeding backfill with event_id=%s type=%s depth=%s stream_ordering=%s",
|
||||
event_id,
|
||||
event_lookup_result["type"],
|
||||
event_lookup_result["depth"],
|
||||
event_lookup_result["stream_ordering"],
|
||||
)
|
||||
|
||||
if event_lookup_result["depth"]:
|
||||
queue.put(
|
||||
(
|
||||
@@ -1102,39 +1089,6 @@ class EventFederationWorkerStore(EventsWorkerStore, SignatureWorkerStore, SQLBas
|
||||
if event_id in event_results:
|
||||
continue
|
||||
|
||||
event_lookup_result = self.db_pool.simple_select_one_txn(
|
||||
txn,
|
||||
table="events",
|
||||
keyvalues={"event_id": event_id},
|
||||
retcols=["type", "depth", "stream_ordering", "content"],
|
||||
allow_none=True,
|
||||
)
|
||||
|
||||
event_json_lookup_result = self.db_pool.simple_select_one_onecol_txn(
|
||||
txn,
|
||||
table="event_json",
|
||||
keyvalues={"event_id": event_id},
|
||||
retcol="json",
|
||||
allow_none=True,
|
||||
)
|
||||
|
||||
ev = db_to_json(event_json_lookup_result)
|
||||
|
||||
if event_lookup_result:
|
||||
logger.info(
|
||||
"_get_backfill_events: event_results add event_id=%s type=%s depth=%s stream_ordering=%s content=%s",
|
||||
event_id,
|
||||
ev["type"],
|
||||
ev["depth"],
|
||||
event_lookup_result["stream_ordering"],
|
||||
ev["content"].get("body", ev["content"]),
|
||||
)
|
||||
else:
|
||||
logger.info(
|
||||
"_get_backfill_events: event_results event_id=%s failed to lookup",
|
||||
event_id,
|
||||
)
|
||||
|
||||
event_results[event_id] = event_id
|
||||
|
||||
# Try and find any potential historical batches of message history.
|
||||
@@ -1147,7 +1101,7 @@ class EventFederationWorkerStore(EventsWorkerStore, SignatureWorkerStore, SQLBas
|
||||
connected_insertion_event_query, (event_id, limit - len(event_results))
|
||||
)
|
||||
connected_insertion_event_id_results = txn.fetchall()
|
||||
logger.info(
|
||||
logger.debug(
|
||||
"_get_backfill_events: connected_insertion_event_query %s",
|
||||
connected_insertion_event_id_results,
|
||||
)
|
||||
@@ -1177,7 +1131,7 @@ class EventFederationWorkerStore(EventsWorkerStore, SignatureWorkerStore, SQLBas
|
||||
(event_id, limit - len(event_results)),
|
||||
)
|
||||
batch_start_event_id_results = txn.fetchall()
|
||||
logger.info(
|
||||
logger.debug(
|
||||
"_get_backfill_events: batch_start_event_id_results %s",
|
||||
batch_start_event_id_results,
|
||||
)
|
||||
@@ -1190,7 +1144,7 @@ class EventFederationWorkerStore(EventsWorkerStore, SignatureWorkerStore, SQLBas
|
||||
(event_id, False, limit - len(event_results)),
|
||||
)
|
||||
prev_event_id_results = txn.fetchall()
|
||||
logger.info(
|
||||
logger.debug(
|
||||
"_get_backfill_events: prev_event_ids %s", prev_event_id_results
|
||||
)
|
||||
|
||||
|
||||
@@ -169,14 +169,6 @@ class PersistEventsStore:
|
||||
|
||||
async with stream_ordering_manager as stream_orderings:
|
||||
for (event, _), stream in zip(events_and_contexts, stream_orderings):
|
||||
logger.info(
|
||||
"_persist_events_and_state_updates backfilled=%s event_id=%s depth=%s stream_ordering=%s content=%s",
|
||||
backfilled,
|
||||
event.event_id,
|
||||
event.depth,
|
||||
stream,
|
||||
event["content"].get("body", None),
|
||||
)
|
||||
event.internal_metadata.stream_ordering = stream
|
||||
|
||||
await self.db_pool.runInteraction(
|
||||
@@ -2139,38 +2131,6 @@ class PersistEventsStore:
|
||||
|
||||
Forward extremities are handled when we first start persisting the events.
|
||||
"""
|
||||
logger.info(
|
||||
"_update_backward_extremeties events=%s",
|
||||
[
|
||||
{
|
||||
"event_id": ev.event_id,
|
||||
"prev_events": ev.prev_event_ids(),
|
||||
"outlier": ev.internal_metadata.is_outlier(),
|
||||
}
|
||||
for ev in events
|
||||
],
|
||||
)
|
||||
|
||||
for ev in events:
|
||||
for e_id in ev.prev_event_ids():
|
||||
query = """
|
||||
SELECT 1 FROM event_edges
|
||||
INNER JOIN events AS e USING (event_id, room_id)
|
||||
WHERE event_id = ? AND room_id = ? AND e.outlier = TRUE
|
||||
"""
|
||||
|
||||
txn.execute(
|
||||
query,
|
||||
(e_id, ev.room_id),
|
||||
)
|
||||
result = txn.fetchall()
|
||||
logger.info(
|
||||
"_update_backward_extremeties test ev=%s prev_event_id=%s result=%s",
|
||||
ev.event_id,
|
||||
e_id,
|
||||
result,
|
||||
)
|
||||
|
||||
# From the events passed in, add all of the prev events as backwards extremities.
|
||||
# Ignore any events that are already backwards extrems or outliers.
|
||||
query = (
|
||||
@@ -2200,24 +2160,6 @@ class PersistEventsStore:
|
||||
],
|
||||
)
|
||||
|
||||
for ev in events:
|
||||
for e_id in ev.prev_event_ids():
|
||||
query = """
|
||||
SELECT * FROM event_backward_extremities
|
||||
WHERE event_id = ? AND room_id = ?
|
||||
"""
|
||||
|
||||
txn.execute(
|
||||
query,
|
||||
(e_id, ev.room_id),
|
||||
)
|
||||
result = txn.fetchall()
|
||||
logger.info(
|
||||
"_update_backward_extremeties ended up as prev_event_id=%s result=%s",
|
||||
e_id,
|
||||
result,
|
||||
)
|
||||
|
||||
# Delete all these events that we've already fetched and now know that their
|
||||
# prev events are the new backwards extremeties.
|
||||
query = (
|
||||
|
||||
Reference in New Issue
Block a user