Compare commits
5 Commits
release-v0
...
matthew/st
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
03bdbb8c6b | ||
|
|
18752982db | ||
|
|
6dacdd5fbe | ||
|
|
c82785f5cb | ||
|
|
a34061d332 |
@@ -23,9 +23,6 @@ matrix:
|
||||
- python: 3.6
|
||||
env: TOX_ENV=py36
|
||||
|
||||
- python: 3.6
|
||||
env: TOX_ENV=check_isort
|
||||
|
||||
- python: 3.6
|
||||
env: TOX_ENV=check-newsfragment
|
||||
|
||||
|
||||
48
CHANGES.rst
48
CHANGES.rst
@@ -1,51 +1,3 @@
|
||||
Synapse 0.33.1 (2018-08-02)
|
||||
===========================
|
||||
|
||||
SECURITY FIXES
|
||||
--------------
|
||||
|
||||
- Fix a potential issue where servers could request events for rooms they have not joined. (`#3641 <https://github.com/matrix-org/synapse/issues/3641>`_)
|
||||
- Fix a potential issue where users could see events in private rooms before they joined. (`#3642 <https://github.com/matrix-org/synapse/issues/3642>`_)
|
||||
|
||||
|
||||
Synapse 0.33.0 (2018-07-19)
|
||||
===========================
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Disable a noisy warning about logcontexts. (`#3561 <https://github.com/matrix-org/synapse/issues/3561>`_)
|
||||
|
||||
|
||||
Synapse 0.33.0rc1 (2018-07-18)
|
||||
==============================
|
||||
|
||||
Features
|
||||
--------
|
||||
|
||||
- Enforce the specified API for report_event. (`#3316 <https://github.com/matrix-org/synapse/issues/3316>`_)
|
||||
- Include CPU time from database threads in request/block metrics. (`#3496 <https://github.com/matrix-org/synapse/issues/3496>`_, `#3501 <https://github.com/matrix-org/synapse/issues/3501>`_)
|
||||
- Add CPU metrics for _fetch_event_list. (`#3497 <https://github.com/matrix-org/synapse/issues/3497>`_)
|
||||
- Optimisation to make handling incoming federation requests more efficient. (`#3541 <https://github.com/matrix-org/synapse/issues/3541>`_)
|
||||
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Fix a significant performance regression in /sync. (`#3505 <https://github.com/matrix-org/synapse/issues/3505>`_, `#3521 <https://github.com/matrix-org/synapse/issues/3521>`_, `#3530 <https://github.com/matrix-org/synapse/issues/3530>`_, `#3544 <https://github.com/matrix-org/synapse/issues/3544>`_)
|
||||
- Use more portable syntax in our use of the attrs package, widening the supported versions. (`#3498 <https://github.com/matrix-org/synapse/issues/3498>`_)
|
||||
- Fix queued federation requests being processed in the wrong order. (`#3533 <https://github.com/matrix-org/synapse/issues/3533>`_)
|
||||
- Ensure that erasure requests are correctly honoured for publicly accessible rooms when accessed over federation. (`#3546 <https://github.com/matrix-org/synapse/issues/3546>`_)
|
||||
|
||||
|
||||
Misc
|
||||
----
|
||||
|
||||
- Refactoring to improve testability. (`#3351 <https://github.com/matrix-org/synapse/issues/3351>`_, `#3499 <https://github.com/matrix-org/synapse/issues/3499>`_)
|
||||
- Use ``isort`` to sort imports. (`#3463 <https://github.com/matrix-org/synapse/issues/3463>`_, `#3464 <https://github.com/matrix-org/synapse/issues/3464>`_, `#3540 <https://github.com/matrix-org/synapse/issues/3540>`_)
|
||||
- Use parse and asserts from http.servlet. (`#3534 <https://github.com/matrix-org/synapse/issues/3534>`_, `#3535 <https://github.com/matrix-org/synapse/issues/3535>`_).
|
||||
|
||||
|
||||
Synapse 0.32.2 (2018-07-07)
|
||||
===========================
|
||||
|
||||
|
||||
1
changelog.d/3316.feature
Normal file
1
changelog.d/3316.feature
Normal file
@@ -0,0 +1 @@
|
||||
Enforce the specified API for report_event
|
||||
0
changelog.d/3463.misc
Normal file
0
changelog.d/3463.misc
Normal file
0
changelog.d/3464.misc
Normal file
0
changelog.d/3464.misc
Normal file
1
changelog.d/3496.feature
Normal file
1
changelog.d/3496.feature
Normal file
@@ -0,0 +1 @@
|
||||
Include CPU time from database threads in request/block metrics.
|
||||
1
changelog.d/3497.feature
Normal file
1
changelog.d/3497.feature
Normal file
@@ -0,0 +1 @@
|
||||
Add CPU metrics for _fetch_event_list
|
||||
0
changelog.d/3498.misc
Normal file
0
changelog.d/3498.misc
Normal file
0
changelog.d/3499.misc
Normal file
0
changelog.d/3499.misc
Normal file
0
changelog.d/3501.misc
Normal file
0
changelog.d/3501.misc
Normal file
1
changelog.d/3505.feature
Normal file
1
changelog.d/3505.feature
Normal file
@@ -0,0 +1 @@
|
||||
Reduce database consumption when processing large numbers of receipts
|
||||
1
changelog.d/3521.feature
Normal file
1
changelog.d/3521.feature
Normal file
@@ -0,0 +1 @@
|
||||
Cache optimisation for /sync requests
|
||||
1
changelog.d/3533.bugfix
Normal file
1
changelog.d/3533.bugfix
Normal file
@@ -0,0 +1 @@
|
||||
Fix queued federation requests being processed in the wrong order
|
||||
1
changelog.d/3534.misc
Normal file
1
changelog.d/3534.misc
Normal file
@@ -0,0 +1 @@
|
||||
refactor: use parse_{string,integer} and assert's from http.servlet for deduplication
|
||||
0
changelog.d/3535.misc
Normal file
0
changelog.d/3535.misc
Normal file
@@ -17,4 +17,4 @@
|
||||
""" This is a reference implementation of a Matrix home server.
|
||||
"""
|
||||
|
||||
__version__ = "0.33.1"
|
||||
__version__ = "0.32.2"
|
||||
|
||||
@@ -68,6 +68,7 @@ class EventTypes(object):
|
||||
|
||||
RoomHistoryVisibility = "m.room.history_visibility"
|
||||
CanonicalAlias = "m.room.canonical_alias"
|
||||
Encryption = "m.room.encryption"
|
||||
RoomAvatar = "m.room.avatar"
|
||||
GuestAccess = "m.room.guest_access"
|
||||
|
||||
|
||||
46
synapse/config/stats.py
Normal file
46
synapse/config/stats.py
Normal file
@@ -0,0 +1,46 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2018 New Vector Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from ._base import Config
|
||||
|
||||
import sys
|
||||
|
||||
|
||||
class StatsConfig(Config):
|
||||
"""Stats Configuration
|
||||
Configuration for the behaviour of synapse's stats engine
|
||||
"""
|
||||
|
||||
def read_config(self, config):
|
||||
self.stats_enable = False
|
||||
self.stats_bucket_size = 86400
|
||||
self.stats_retention = sys.maxint
|
||||
stats_config = config.get("stats", None)
|
||||
if stats_config:
|
||||
self.stats_enable = stats_config.get("enable", self.stats_enable)
|
||||
self.stats_bucket_size = stats_config.get(
|
||||
"bucket_size", self.stats_bucket_size
|
||||
)
|
||||
self.stats_retention = stats_config.get("retention", self.stats_retention)
|
||||
|
||||
def default_config(self, config_dir_path, server_name, **kwargs):
|
||||
return """
|
||||
# Stats configuration
|
||||
#
|
||||
# stats:
|
||||
# enable: false
|
||||
# bucket_size: 86400 # 1 day
|
||||
# retention: 31536000 # 1 year
|
||||
"""
|
||||
@@ -425,7 +425,6 @@ class FederationServer(FederationBase):
|
||||
ret = yield self.handler.on_query_auth(
|
||||
origin,
|
||||
event_id,
|
||||
room_id,
|
||||
signed_auth,
|
||||
content.get("rejects", []),
|
||||
content.get("missing", []),
|
||||
|
||||
@@ -19,12 +19,10 @@ import random
|
||||
from twisted.internet import defer
|
||||
|
||||
from synapse.api.constants import EventTypes, Membership
|
||||
from synapse.api.errors import AuthError
|
||||
from synapse.events import EventBase
|
||||
from synapse.events.utils import serialize_event
|
||||
from synapse.types import UserID
|
||||
from synapse.util.logutils import log_function
|
||||
from synapse.visibility import filter_events_for_client
|
||||
|
||||
from ._base import BaseHandler
|
||||
|
||||
@@ -131,13 +129,11 @@ class EventStreamHandler(BaseHandler):
|
||||
class EventHandler(BaseHandler):
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_event(self, user, room_id, event_id):
|
||||
def get_event(self, user, event_id):
|
||||
"""Retrieve a single specified event.
|
||||
|
||||
Args:
|
||||
user (synapse.types.UserID): The user requesting the event
|
||||
room_id (str|None): The expected room id. We'll return None if the
|
||||
event's room does not match.
|
||||
event_id (str): The event ID to obtain.
|
||||
Returns:
|
||||
dict: An event, or None if there is no event matching this ID.
|
||||
@@ -146,26 +142,13 @@ class EventHandler(BaseHandler):
|
||||
AuthError if the user does not have the rights to inspect this
|
||||
event.
|
||||
"""
|
||||
event = yield self.store.get_event(event_id, check_room_id=room_id)
|
||||
event = yield self.store.get_event(event_id)
|
||||
|
||||
if not event:
|
||||
defer.returnValue(None)
|
||||
return
|
||||
|
||||
users = yield self.store.get_users_in_room(event.room_id)
|
||||
is_peeking = user.to_string() not in users
|
||||
|
||||
filtered = yield filter_events_for_client(
|
||||
self.store,
|
||||
user.to_string(),
|
||||
[event],
|
||||
is_peeking=is_peeking
|
||||
)
|
||||
|
||||
if not filtered:
|
||||
raise AuthError(
|
||||
403,
|
||||
"You don't have permission to access that event."
|
||||
)
|
||||
if hasattr(event, "room_id"):
|
||||
yield self.auth.check_joined_room(event.room_id, user.to_string())
|
||||
|
||||
defer.returnValue(event)
|
||||
|
||||
@@ -43,6 +43,7 @@ from synapse.crypto.event_signing import (
|
||||
add_hashes_and_signatures,
|
||||
compute_event_signature,
|
||||
)
|
||||
from synapse.events.utils import prune_event
|
||||
from synapse.events.validator import EventValidator
|
||||
from synapse.state import resolve_events_with_factory
|
||||
from synapse.types import UserID, get_domain_from_id
|
||||
@@ -51,8 +52,8 @@ from synapse.util.async import Linearizer
|
||||
from synapse.util.distributor import user_joined_room
|
||||
from synapse.util.frozenutils import unfreeze
|
||||
from synapse.util.logutils import log_function
|
||||
from synapse.util.metrics import measure_func
|
||||
from synapse.util.retryutils import NotRetryingDestination
|
||||
from synapse.visibility import filter_events_for_server
|
||||
|
||||
from ._base import BaseHandler
|
||||
|
||||
@@ -500,6 +501,137 @@ class FederationHandler(BaseHandler):
|
||||
user = UserID.from_string(event.state_key)
|
||||
yield user_joined_room(self.distributor, user, event.room_id)
|
||||
|
||||
@measure_func("_filter_events_for_server")
|
||||
@defer.inlineCallbacks
|
||||
def _filter_events_for_server(self, server_name, room_id, events):
|
||||
"""Filter the given events for the given server, redacting those the
|
||||
server can't see.
|
||||
|
||||
Assumes the server is currently in the room.
|
||||
|
||||
Returns
|
||||
list[FrozenEvent]
|
||||
"""
|
||||
# First lets check to see if all the events have a history visibility
|
||||
# of "shared" or "world_readable". If thats the case then we don't
|
||||
# need to check membership (as we know the server is in the room).
|
||||
event_to_state_ids = yield self.store.get_state_ids_for_events(
|
||||
frozenset(e.event_id for e in events),
|
||||
types=(
|
||||
(EventTypes.RoomHistoryVisibility, ""),
|
||||
)
|
||||
)
|
||||
|
||||
visibility_ids = set()
|
||||
for sids in event_to_state_ids.itervalues():
|
||||
hist = sids.get((EventTypes.RoomHistoryVisibility, ""))
|
||||
if hist:
|
||||
visibility_ids.add(hist)
|
||||
|
||||
# If we failed to find any history visibility events then the default
|
||||
# is "shared" visiblity.
|
||||
if not visibility_ids:
|
||||
defer.returnValue(events)
|
||||
|
||||
event_map = yield self.store.get_events(visibility_ids)
|
||||
all_open = all(
|
||||
e.content.get("history_visibility") in (None, "shared", "world_readable")
|
||||
for e in event_map.itervalues()
|
||||
)
|
||||
|
||||
if all_open:
|
||||
defer.returnValue(events)
|
||||
|
||||
# Ok, so we're dealing with events that have non-trivial visibility
|
||||
# rules, so we need to also get the memberships of the room.
|
||||
|
||||
event_to_state_ids = yield self.store.get_state_ids_for_events(
|
||||
frozenset(e.event_id for e in events),
|
||||
types=(
|
||||
(EventTypes.RoomHistoryVisibility, ""),
|
||||
(EventTypes.Member, None),
|
||||
)
|
||||
)
|
||||
|
||||
# We only want to pull out member events that correspond to the
|
||||
# server's domain.
|
||||
|
||||
def check_match(id):
|
||||
try:
|
||||
return server_name == get_domain_from_id(id)
|
||||
except Exception:
|
||||
return False
|
||||
|
||||
# Parses mapping `event_id -> (type, state_key) -> state event_id`
|
||||
# to get all state ids that we're interested in.
|
||||
event_map = yield self.store.get_events([
|
||||
e_id
|
||||
for key_to_eid in list(event_to_state_ids.values())
|
||||
for key, e_id in key_to_eid.items()
|
||||
if key[0] != EventTypes.Member or check_match(key[1])
|
||||
])
|
||||
|
||||
event_to_state = {
|
||||
e_id: {
|
||||
key: event_map[inner_e_id]
|
||||
for key, inner_e_id in key_to_eid.iteritems()
|
||||
if inner_e_id in event_map
|
||||
}
|
||||
for e_id, key_to_eid in event_to_state_ids.iteritems()
|
||||
}
|
||||
|
||||
erased_senders = yield self.store.are_users_erased(
|
||||
e.sender for e in events,
|
||||
)
|
||||
|
||||
def redact_disallowed(event, state):
|
||||
# if the sender has been gdpr17ed, always return a redacted
|
||||
# copy of the event.
|
||||
if erased_senders[event.sender]:
|
||||
logger.info(
|
||||
"Sender of %s has been erased, redacting",
|
||||
event.event_id,
|
||||
)
|
||||
return prune_event(event)
|
||||
|
||||
if not state:
|
||||
return event
|
||||
|
||||
history = state.get((EventTypes.RoomHistoryVisibility, ''), None)
|
||||
if history:
|
||||
visibility = history.content.get("history_visibility", "shared")
|
||||
if visibility in ["invited", "joined"]:
|
||||
# We now loop through all state events looking for
|
||||
# membership states for the requesting server to determine
|
||||
# if the server is either in the room or has been invited
|
||||
# into the room.
|
||||
for ev in state.itervalues():
|
||||
if ev.type != EventTypes.Member:
|
||||
continue
|
||||
try:
|
||||
domain = get_domain_from_id(ev.state_key)
|
||||
except Exception:
|
||||
continue
|
||||
|
||||
if domain != server_name:
|
||||
continue
|
||||
|
||||
memtype = ev.membership
|
||||
if memtype == Membership.JOIN:
|
||||
return event
|
||||
elif memtype == Membership.INVITE:
|
||||
if visibility == "invited":
|
||||
return event
|
||||
else:
|
||||
return prune_event(event)
|
||||
|
||||
return event
|
||||
|
||||
defer.returnValue([
|
||||
redact_disallowed(e, event_to_state[e.event_id])
|
||||
for e in events
|
||||
])
|
||||
|
||||
@log_function
|
||||
@defer.inlineCallbacks
|
||||
def backfill(self, dest, room_id, limit, extremities):
|
||||
@@ -1349,11 +1481,6 @@ class FederationHandler(BaseHandler):
|
||||
def get_state_for_pdu(self, room_id, event_id):
|
||||
"""Returns the state at the event. i.e. not including said event.
|
||||
"""
|
||||
|
||||
event = yield self.store.get_event(
|
||||
event_id, allow_none=False, check_room_id=room_id,
|
||||
)
|
||||
|
||||
state_groups = yield self.store.get_state_groups(
|
||||
room_id, [event_id]
|
||||
)
|
||||
@@ -1364,7 +1491,8 @@ class FederationHandler(BaseHandler):
|
||||
(e.type, e.state_key): e for e in state
|
||||
}
|
||||
|
||||
if event.is_state():
|
||||
event = yield self.store.get_event(event_id)
|
||||
if event and event.is_state():
|
||||
# Get previous state
|
||||
if "replaces_state" in event.unsigned:
|
||||
prev_id = event.unsigned["replaces_state"]
|
||||
@@ -1395,10 +1523,6 @@ class FederationHandler(BaseHandler):
|
||||
def get_state_ids_for_pdu(self, room_id, event_id):
|
||||
"""Returns the state at the event. i.e. not including said event.
|
||||
"""
|
||||
event = yield self.store.get_event(
|
||||
event_id, allow_none=False, check_room_id=room_id,
|
||||
)
|
||||
|
||||
state_groups = yield self.store.get_state_groups_ids(
|
||||
room_id, [event_id]
|
||||
)
|
||||
@@ -1407,7 +1531,8 @@ class FederationHandler(BaseHandler):
|
||||
_, state = state_groups.items().pop()
|
||||
results = state
|
||||
|
||||
if event.is_state():
|
||||
event = yield self.store.get_event(event_id)
|
||||
if event and event.is_state():
|
||||
# Get previous state
|
||||
if "replaces_state" in event.unsigned:
|
||||
prev_id = event.unsigned["replaces_state"]
|
||||
@@ -1433,7 +1558,7 @@ class FederationHandler(BaseHandler):
|
||||
limit
|
||||
)
|
||||
|
||||
events = yield filter_events_for_server(self.store, origin, events)
|
||||
events = yield self._filter_events_for_server(origin, room_id, events)
|
||||
|
||||
defer.returnValue(events)
|
||||
|
||||
@@ -1480,8 +1605,8 @@ class FederationHandler(BaseHandler):
|
||||
if not in_room:
|
||||
raise AuthError(403, "Host not in room.")
|
||||
|
||||
events = yield filter_events_for_server(
|
||||
self.store, origin, [event],
|
||||
events = yield self._filter_events_for_server(
|
||||
origin, event.room_id, [event]
|
||||
)
|
||||
event = events[0]
|
||||
defer.returnValue(event)
|
||||
@@ -1713,19 +1838,8 @@ class FederationHandler(BaseHandler):
|
||||
defer.returnValue(context)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_query_auth(self, origin, event_id, room_id, remote_auth_chain, rejects,
|
||||
def on_query_auth(self, origin, event_id, remote_auth_chain, rejects,
|
||||
missing):
|
||||
in_room = yield self.auth.check_host_in_room(
|
||||
room_id,
|
||||
origin
|
||||
)
|
||||
if not in_room:
|
||||
raise AuthError(403, "Host not in room.")
|
||||
|
||||
event = yield self.store.get_event(
|
||||
event_id, allow_none=False, check_room_id=room_id
|
||||
)
|
||||
|
||||
# Just go through and process each event in `remote_auth_chain`. We
|
||||
# don't want to fall into the trap of `missing` being wrong.
|
||||
for e in remote_auth_chain:
|
||||
@@ -1735,6 +1849,7 @@ class FederationHandler(BaseHandler):
|
||||
pass
|
||||
|
||||
# Now get the current auth_chain for the event.
|
||||
event = yield self.store.get_event(event_id)
|
||||
local_auth_chain = yield self.store.get_auth_chain(
|
||||
[auth_id for auth_id, _ in event.auth_events],
|
||||
include_given=True
|
||||
@@ -1781,8 +1896,8 @@ class FederationHandler(BaseHandler):
|
||||
min_depth=min_depth,
|
||||
)
|
||||
|
||||
missing_events = yield filter_events_for_server(
|
||||
self.store, origin, missing_events,
|
||||
missing_events = yield self._filter_events_for_server(
|
||||
origin, room_id, missing_events,
|
||||
)
|
||||
|
||||
defer.returnValue(missing_events)
|
||||
|
||||
73
synapse/handlers/state_deltas.py
Normal file
73
synapse/handlers/state_deltas.py
Normal file
@@ -0,0 +1,73 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2017 Vector Creations Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import logging
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
from .base import BaseHandler
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class StateDeltasHandler(BaseHandler):
|
||||
|
||||
def __init__(self, hs):
|
||||
super(StateDeltasHandler, self).__init__(hs)
|
||||
self.store = hs.get_datastore()
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _get_key_change(self, prev_event_id, event_id, key_name, public_value):
|
||||
"""Given two events check if the `key_name` field in content changed
|
||||
from not matching `public_value` to doing so.
|
||||
|
||||
For example, check if `history_visibility` (`key_name`) changed from
|
||||
`shared` to `world_readable` (`public_value`).
|
||||
|
||||
Returns:
|
||||
None if the field in the events either both match `public_value`
|
||||
or if neither do, i.e. there has been no change.
|
||||
True if it didnt match `public_value` but now does
|
||||
False if it did match `public_value` but now doesn't
|
||||
"""
|
||||
prev_event = None
|
||||
event = None
|
||||
if prev_event_id:
|
||||
prev_event = yield self.store.get_event(prev_event_id, allow_none=True)
|
||||
|
||||
if event_id:
|
||||
event = yield self.store.get_event(event_id, allow_none=True)
|
||||
|
||||
if not event and not prev_event:
|
||||
logger.debug("Neither event exists: %r %r", prev_event_id, event_id)
|
||||
defer.returnValue(None)
|
||||
|
||||
prev_value = None
|
||||
value = None
|
||||
|
||||
if prev_event:
|
||||
prev_value = prev_event.content.get(key_name)
|
||||
|
||||
if event:
|
||||
value = event.content.get(key_name)
|
||||
|
||||
logger.debug("prev_value: %r -> value: %r", prev_value, value)
|
||||
|
||||
if value == public_value and prev_value != public_value:
|
||||
defer.returnValue(True)
|
||||
elif value != public_value and prev_value == public_value:
|
||||
defer.returnValue(False)
|
||||
else:
|
||||
defer.returnValue(None)
|
||||
427
synapse/handlers/stats.py
Normal file
427
synapse/handlers/stats.py
Normal file
@@ -0,0 +1,427 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2018 New Vector Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import logging
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
from synapse.api.constants import EventTypes, Membership, JoinRules
|
||||
from synapse.util.metrics import Measure
|
||||
|
||||
from .state_deltas import StateDeltasHandler
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class StatsHandler(StateDeltasHandler):
|
||||
"""Handles keeping the *_stats tables updated with a simple time-series of
|
||||
information about the users, rooms and media on the server, such that admins
|
||||
have some idea of who is consuming their resources.
|
||||
|
||||
Heavily derived from UserDirectoryHandler
|
||||
"""
|
||||
|
||||
INITIAL_ROOM_SLEEP_MS = 50
|
||||
INITIAL_USER_SLEEP_MS = 10
|
||||
|
||||
def __init__(self, hs):
|
||||
super(StatsHandler, self).__init__(hs)
|
||||
|
||||
self.store = hs.get_datastore()
|
||||
self.state = hs.get_state_handler()
|
||||
self.server_name = hs.hostname
|
||||
self.clock = hs.get_clock()
|
||||
self.notifier = hs.get_notifier()
|
||||
self.is_mine_id = hs.is_mine_id
|
||||
self.stats_enable = hs.config.stats_enable
|
||||
self.stats_bucket_size = hs.config.stats_bucket_size
|
||||
|
||||
# The current position in the current_state_delta stream
|
||||
self.pos = None
|
||||
|
||||
# Guard to ensure we only process deltas one at a time
|
||||
self._is_processing = False
|
||||
|
||||
if self.stats_enable:
|
||||
self.notifier.add_replication_callback(self.notify_new_event)
|
||||
|
||||
# We kick this off so that we don't have to wait for a change before
|
||||
# we start populating stats
|
||||
self.clock.call_later(0, self.notify_new_event)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def notify_new_event(self):
|
||||
"""Called when there may be more deltas to process
|
||||
"""
|
||||
if not self.stats_enable:
|
||||
return
|
||||
|
||||
if self._is_processing:
|
||||
return
|
||||
|
||||
self._is_processing = True
|
||||
try:
|
||||
yield self._unsafe_process()
|
||||
finally:
|
||||
self._is_processing = False
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _unsafe_process(self):
|
||||
# If self.pos is None then means we haven't fetched it from DB
|
||||
if self.pos is None:
|
||||
self.pos = yield self.store.get_stats_stream_pos()
|
||||
|
||||
# If still None then we need to do the initial fill of stats
|
||||
if self.pos is None:
|
||||
yield self._do_initial_spam()
|
||||
self.pos = yield self.store.get_stats_stream_pos()
|
||||
|
||||
# Loop round handling deltas until we're up to date
|
||||
while True:
|
||||
with Measure(self.clock, "stats_delta"):
|
||||
deltas = yield self.store.get_current_state_deltas(self.pos)
|
||||
if not deltas:
|
||||
return
|
||||
|
||||
logger.info("Handling %d state deltas", len(deltas))
|
||||
yield self._handle_deltas(deltas)
|
||||
|
||||
self.pos = deltas[-1]["stream_id"]
|
||||
yield self.store.update_stats_stream_pos(self.pos)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _do_initial_spam(self):
|
||||
"""Populates the stats tables from the current state of the DB, used
|
||||
when synapse first starts with stats support
|
||||
"""
|
||||
new_pos = yield self.store.get_max_stream_id_in_current_state_deltas()
|
||||
|
||||
# We process by going through each existing room at a time.
|
||||
room_ids = yield self.store.get_all_rooms()
|
||||
|
||||
logger.info("Doing initial update of room_stats. %d rooms", len(room_ids))
|
||||
num_processed_rooms = 0
|
||||
|
||||
for room_id in room_ids:
|
||||
logger.info("Handling room %d/%d", num_processed_rooms + 1, len(room_ids))
|
||||
yield self._handle_initial_room(room_id)
|
||||
num_processed_rooms += 1
|
||||
yield self.clock.sleep(self.INITIAL_ROOM_SLEEP_MS / 1000.)
|
||||
|
||||
logger.info("Processed all rooms.")
|
||||
|
||||
num_processed_users = 0
|
||||
user_ids = yield self.store.get_all_local_users()
|
||||
logger.info("Doing initial update user_stats. %d users", len(user_ids))
|
||||
for user_id in user_ids:
|
||||
logger.info("Handling user %d/%d", num_processed_users + 1, len(user_ids))
|
||||
yield self._handle_local_user(user_id)
|
||||
num_processed_users += 1
|
||||
yield self.clock.sleep(self.INITIAL_USER_SLEEP_MS / 1000.)
|
||||
|
||||
logger.info("Processed all users")
|
||||
|
||||
yield self.store.update_stats_stream_pos(new_pos)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _handle_initial_room(self, room_id):
|
||||
"""Called when we initially fill out stats one room at a time
|
||||
"""
|
||||
|
||||
current_state_ids = yield self.store.get_current_state_ids(room_id)
|
||||
|
||||
join_rules = yield self.store.get_event(
|
||||
current_state_ids.get((EventTypes.JoinRules, ""))
|
||||
)
|
||||
history_visibility = yield self.store.get_event(
|
||||
current_state_ids.get((EventTypes.RoomHistoryVisibility, ""))
|
||||
)
|
||||
encryption = yield self.store.get_event(
|
||||
current_state_ids.get((EventTypes.RoomEncryption, ""))
|
||||
)
|
||||
name = yield self.store.get_event(
|
||||
current_state_ids.get((EventTypes.Name, ""))
|
||||
)
|
||||
topic = yield self.store.get_event(
|
||||
current_state_ids.get((EventTypes.Topic, ""))
|
||||
)
|
||||
avatar = yield self.store.get_event(
|
||||
current_state_ids.get((EventTypes.RoomAvatar, ""))
|
||||
)
|
||||
canonical_alias = yield self.store.get_event(
|
||||
current_state_ids.get((EventTypes.CanonicalAlias, ""))
|
||||
)
|
||||
|
||||
yield self.store.update_room_state(
|
||||
room_id,
|
||||
{
|
||||
"join_rules": join_rules.content.get("join_rule")
|
||||
if join_rules else None,
|
||||
"history_visibility": history_visibility.content.get("history_visibility")
|
||||
if history_visibility else None,
|
||||
"encryption": encryption.content.get("algorithm")
|
||||
if encryption else None,
|
||||
"name": name.content.get("name")
|
||||
if name else None,
|
||||
"topic": name.content.get("topic")
|
||||
if topic else None,
|
||||
"avatar": name.content.get("url")
|
||||
if avatar else None,
|
||||
"canonical_alias": name.content.get("alias")
|
||||
if canonical_alias else None,
|
||||
}
|
||||
)
|
||||
|
||||
now = self.clock.time_msec()
|
||||
|
||||
# quantise time to the nearest bucket
|
||||
now = int(now / (self.stats_bucket_size * 1000)) * self.stats_bucket_size * 1000
|
||||
|
||||
current_state_events = len(current_state_ids)
|
||||
joined_members = yield self.store.get_user_count_in_room(
|
||||
room_id, Membership.JOIN
|
||||
)
|
||||
invited_members = yield self.store.get_user_count_in_room(
|
||||
room_id, Membership.INVITE
|
||||
)
|
||||
left_members = yield self.store.get_user_count_in_room(
|
||||
room_id, Membership.LEAVE
|
||||
)
|
||||
banned_members = yield self.store.get_user_count_in_room(
|
||||
room_id, Membership.BAN
|
||||
)
|
||||
state_events = yield self.store.get_state_event_counts(room_id)
|
||||
(local_events, remote_events) = yield self.store.get_event_counts(
|
||||
room_id, self.server_name
|
||||
)
|
||||
|
||||
yield self.store.delete_room_stats(room_id, now)
|
||||
|
||||
self.store.update_room_stats(
|
||||
room_id,
|
||||
now,
|
||||
{
|
||||
"bucket_size": self.stats_bucket_size,
|
||||
"current_state_events": current_state_events,
|
||||
"joined_members": joined_members,
|
||||
"invited_members": invited_members,
|
||||
"left_members": left_members,
|
||||
"banned_members": banned_members,
|
||||
"state_events": state_events,
|
||||
"local_events": local_events,
|
||||
"remote_events": remote_events,
|
||||
}
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _handle_deltas(self, deltas):
|
||||
"""Called with the state deltas to process
|
||||
"""
|
||||
|
||||
# XXX: shouldn't this be the timestamp where the delta was emitted rather
|
||||
# than received?
|
||||
now = self.clock.time_msec()
|
||||
|
||||
# quantise time to the nearest bucket
|
||||
now = int(now / (self.stats_bucket_size * 1000)) * self.stats_bucket_size * 1000
|
||||
|
||||
for delta in deltas:
|
||||
typ = delta["type"]
|
||||
state_key = delta["state_key"]
|
||||
room_id = delta["room_id"]
|
||||
event_id = delta["event_id"]
|
||||
prev_event_id = delta["prev_event_id"]
|
||||
|
||||
logger.debug("Handling: %r %r, %s", typ, state_key, event_id)
|
||||
|
||||
if event_id is None:
|
||||
return
|
||||
|
||||
event = yield self.store.get_event(event_id)
|
||||
if event is None:
|
||||
return
|
||||
|
||||
if typ == EventTypes.Member:
|
||||
# we could use _get_key_change here but it's a bit inefficient
|
||||
# given we're not testing for a specific result; might as well
|
||||
# just grab the prev_membership and membership strings and
|
||||
# compare them.
|
||||
|
||||
if prev_event_id is not None:
|
||||
prev_event = yield self.store.get_event(prev_event_id)
|
||||
|
||||
prev_membership = None
|
||||
membership = event.content.get("membership")
|
||||
if prev_event:
|
||||
prev_membership = prev_event.content.get("membership")
|
||||
|
||||
if prev_membership != membership:
|
||||
if prev_membership == Membership.JOIN:
|
||||
yield self.store.update_stats_delta(
|
||||
now, self.stats_bucket_size,
|
||||
"room", room_id, "joined_members", -1
|
||||
)
|
||||
elif prev_membership == Membership.INVITE:
|
||||
yield self.store.update_stats_delta(
|
||||
now, self.stats_bucket_size,
|
||||
"room", room_id, "invited_members", -1
|
||||
)
|
||||
elif prev_membership == Membership.LEAVE:
|
||||
yield self.store.update_stats_delta(
|
||||
now, self.stats_bucket_size,
|
||||
"room", room_id, "left_members", -1
|
||||
)
|
||||
elif prev_membership == Membership.BAN:
|
||||
yield self.store.update_stats_delta(
|
||||
now, self.stats_bucket_size,
|
||||
"room", room_id, "banned_members", -1
|
||||
)
|
||||
|
||||
if membership == Membership.JOIN:
|
||||
yield self.store.update_stats_delta(
|
||||
now, self.stats_bucket_size,
|
||||
"room", room_id, "joined_members", +1
|
||||
)
|
||||
elif membership == Membership.INVITE:
|
||||
yield self.store.update_stats_delta(
|
||||
now, self.stats_bucket_size,
|
||||
"room", room_id, "invited_members", +1
|
||||
)
|
||||
elif membership == Membership.LEAVE:
|
||||
yield self.store.update_stats_delta(
|
||||
now, self.stats_bucket_size,
|
||||
"room", room_id, "left_members", +1
|
||||
)
|
||||
elif membership == Membership.BAN:
|
||||
yield self.store.update_stats_delta(
|
||||
now, self.stats_bucket_size,
|
||||
"room", room_id, "banned_members", +1
|
||||
)
|
||||
|
||||
user_id = event.state_key
|
||||
if self.is_mine_id(user_id):
|
||||
# update user_stats as it's one of our users
|
||||
public = yield self._is_public_room(room_id)
|
||||
|
||||
if prev_membership != membership:
|
||||
if prev_membership == Membership.JOIN:
|
||||
yield self.store.update_stats_delta(
|
||||
now, self.stats_bucket_size,
|
||||
"user", user_id,
|
||||
"public_rooms" if public else "private_rooms",
|
||||
-1
|
||||
)
|
||||
elif membership == Membership.JOIN:
|
||||
yield self.store.update_stats_delta(
|
||||
now, self.stats_bucket_size,
|
||||
"user", user_id,
|
||||
"public_rooms" if public else "private_rooms",
|
||||
+1
|
||||
)
|
||||
|
||||
elif typ == EventTypes.JoinRules:
|
||||
self.store.update_room_state(room_id, {
|
||||
"join_rules": event.content.get("join_rule")
|
||||
})
|
||||
|
||||
is_public = self._get_key_change(
|
||||
room_id, prev_event_id, event_id,
|
||||
"join_rule", JoinRules.PUBLIC
|
||||
)
|
||||
if is_public is not None:
|
||||
self.store.update_public_room_stats(
|
||||
now, self.stats_bucket_size,
|
||||
room_id, is_public
|
||||
)
|
||||
|
||||
elif typ == EventTypes.RoomHistoryVisibility:
|
||||
yield self.store.update_room_state(room_id, {
|
||||
"history_visibility": event.content.get("history_visibility")
|
||||
})
|
||||
|
||||
is_public = self._get_key_change(
|
||||
room_id, prev_event_id, event_id,
|
||||
"history_visibility", "world_readable"
|
||||
)
|
||||
if is_public is not None:
|
||||
yield self.update_public_room_stats(
|
||||
now, self.stats_bucket_size,
|
||||
room_id, is_public
|
||||
)
|
||||
|
||||
elif typ == EventTypes.RoomEncryption:
|
||||
self.store.update_room_state(room_id, {
|
||||
"encryption": event.content.get("algorithm")
|
||||
})
|
||||
elif typ == EventTypes.Name:
|
||||
self.store.update_room_state(room_id, {
|
||||
"name": event.content.get("name")
|
||||
})
|
||||
elif typ == EventTypes.Topic:
|
||||
self.store.update_room_state(room_id, {
|
||||
"topic": event.content.get("topic")
|
||||
})
|
||||
elif typ == EventTypes.RoomAvatar:
|
||||
self.store.update_room_state(room_id, {
|
||||
"avatar": event.content.get("url")
|
||||
})
|
||||
elif typ == EventTypes.CanonicalAlias:
|
||||
self.store.update_room_state(room_id, {
|
||||
"canonical_alias": event.content.get("alias")
|
||||
})
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def update_public_room_stats(self, ts, bucket_size, room_id, is_public):
|
||||
# For now, blindly iterate over all local users in the room so that
|
||||
# we can handle the whole problem of copying buckets over as needed
|
||||
|
||||
user_ids = yield self.store.get_users_in_room(room_id)
|
||||
|
||||
for user_id in user_ids:
|
||||
if self.is_mine(user_id):
|
||||
self.store.update_stats_delta(
|
||||
ts, bucket_size,
|
||||
"user", user_id,
|
||||
"public_rooms", +1 if is_public else -1
|
||||
)
|
||||
self.store.update_stats_delta(
|
||||
ts, bucket_size,
|
||||
"user", user_id,
|
||||
"private_rooms", -1 if is_public else +1
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _is_public_room(self, room_id):
|
||||
events = yield self.store.get_current_state(
|
||||
room_id, (
|
||||
(EventTypes.JoinRules, ""),
|
||||
(EventTypes.RoomHistoryVisibility, "")
|
||||
)
|
||||
)
|
||||
|
||||
join_rules = events.get((EventTypes.JoinRules, ""))
|
||||
history_visibility = events.get((EventTypes.RoomHistoryVisibility, ""))
|
||||
|
||||
if (
|
||||
join_rules.content.get("join_rule") == JoinRules.PUBLIC or
|
||||
history_visibility.content.get("history_visibility") == "world_readable"
|
||||
):
|
||||
defer.returnValue(True)
|
||||
else:
|
||||
defer.returnValue(True)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _handle_local_user(self, user_id):
|
||||
logger.debug("Adding new local user to stats, %r", user_id)
|
||||
@@ -24,10 +24,12 @@ from synapse.storage.roommember import ProfileInfo
|
||||
from synapse.types import get_localpart_from_id
|
||||
from synapse.util.metrics import Measure
|
||||
|
||||
from .state_deltas import StateDeltasHandler
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class UserDirectoryHandler(object):
|
||||
class UserDirectoryHandler(StateDeltasHandler):
|
||||
"""Handles querying of and keeping updated the user_directory.
|
||||
|
||||
N.B.: ASSUMES IT IS THE ONLY THING THAT MODIFIES THE USER DIRECTORY
|
||||
@@ -49,6 +51,8 @@ class UserDirectoryHandler(object):
|
||||
INITIAL_USER_SLEEP_MS = 10
|
||||
|
||||
def __init__(self, hs):
|
||||
super(UserDirectoryHandler, self).__init__(hs)
|
||||
|
||||
self.store = hs.get_datastore()
|
||||
self.state = hs.get_state_handler()
|
||||
self.server_name = hs.hostname
|
||||
@@ -531,7 +535,7 @@ class UserDirectoryHandler(object):
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _handle_remove_user(self, room_id, user_id):
|
||||
"""Called when we might need to remove user to directory
|
||||
"""Called when we might need to remove user from directory
|
||||
|
||||
Args:
|
||||
room_id (str): room_id that user left or stopped being public that
|
||||
@@ -643,47 +647,3 @@ class UserDirectoryHandler(object):
|
||||
yield self.store.update_profile_in_user_dir(
|
||||
user_id, new_name, new_avatar, room_id,
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _get_key_change(self, prev_event_id, event_id, key_name, public_value):
|
||||
"""Given two events check if the `key_name` field in content changed
|
||||
from not matching `public_value` to doing so.
|
||||
|
||||
For example, check if `history_visibility` (`key_name`) changed from
|
||||
`shared` to `world_readable` (`public_value`).
|
||||
|
||||
Returns:
|
||||
None if the field in the events either both match `public_value`
|
||||
or if neither do, i.e. there has been no change.
|
||||
True if it didnt match `public_value` but now does
|
||||
False if it did match `public_value` but now doesn't
|
||||
"""
|
||||
prev_event = None
|
||||
event = None
|
||||
if prev_event_id:
|
||||
prev_event = yield self.store.get_event(prev_event_id, allow_none=True)
|
||||
|
||||
if event_id:
|
||||
event = yield self.store.get_event(event_id, allow_none=True)
|
||||
|
||||
if not event and not prev_event:
|
||||
logger.debug("Neither event exists: %r %r", prev_event_id, event_id)
|
||||
defer.returnValue(None)
|
||||
|
||||
prev_value = None
|
||||
value = None
|
||||
|
||||
if prev_event:
|
||||
prev_value = prev_event.content.get(key_name)
|
||||
|
||||
if event:
|
||||
value = event.content.get(key_name)
|
||||
|
||||
logger.debug("prev_value: %r -> value: %r", prev_value, value)
|
||||
|
||||
if value == public_value and prev_value != public_value:
|
||||
defer.returnValue(True)
|
||||
elif value != public_value and prev_value == public_value:
|
||||
defer.returnValue(False)
|
||||
else:
|
||||
defer.returnValue(None)
|
||||
|
||||
@@ -20,7 +20,7 @@ from twisted.web.server import Request, Site
|
||||
|
||||
from synapse.http import redact_uri
|
||||
from synapse.http.request_metrics import RequestMetrics
|
||||
from synapse.util.logcontext import ContextResourceUsage, LoggingContext
|
||||
from synapse.util.logcontext import LoggingContext, ContextResourceUsage
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -42,10 +42,9 @@ class SynapseRequest(Request):
|
||||
which is handling the request, and returns a context manager.
|
||||
|
||||
"""
|
||||
def __init__(self, site, channel, *args, **kw):
|
||||
Request.__init__(self, channel, *args, **kw)
|
||||
def __init__(self, site, *args, **kw):
|
||||
Request.__init__(self, *args, **kw)
|
||||
self.site = site
|
||||
self._channel = channel
|
||||
self.authenticated_entity = None
|
||||
self.start_time = 0
|
||||
|
||||
|
||||
@@ -24,9 +24,9 @@ from synapse.api.constants import Membership
|
||||
from synapse.api.errors import AuthError, Codes, NotFoundError, SynapseError
|
||||
from synapse.http.servlet import (
|
||||
assert_params_in_dict,
|
||||
parse_integer,
|
||||
parse_json_object_from_request,
|
||||
parse_string,
|
||||
parse_integer,
|
||||
parse_string
|
||||
)
|
||||
from synapse.types import UserID, create_requester
|
||||
|
||||
|
||||
@@ -88,7 +88,7 @@ class EventRestServlet(ClientV1RestServlet):
|
||||
@defer.inlineCallbacks
|
||||
def on_GET(self, request, event_id):
|
||||
requester = yield self.auth.get_user_by_req(request)
|
||||
event = yield self.event_handler.get_event(requester.user, None, event_id)
|
||||
event = yield self.event_handler.get_event(requester.user, event_id)
|
||||
|
||||
time_now = self.clock.time_msec()
|
||||
if event:
|
||||
|
||||
@@ -15,8 +15,8 @@
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
from synapse.http.servlet import parse_boolean
|
||||
from synapse.streams.config import PaginationConfig
|
||||
from synapse.http.servlet import parse_boolean
|
||||
|
||||
from .base import ClientV1RestServlet, client_path_patterns
|
||||
|
||||
|
||||
@@ -508,7 +508,7 @@ class RoomEventServlet(ClientV1RestServlet):
|
||||
@defer.inlineCallbacks
|
||||
def on_GET(self, request, room_id, event_id):
|
||||
requester = yield self.auth.get_user_by_req(request)
|
||||
event = yield self.event_handler.get_event(requester.user, room_id, event_id)
|
||||
event = yield self.event_handler.get_event(requester.user, event_id)
|
||||
|
||||
time_now = self.clock.time_msec()
|
||||
if event:
|
||||
|
||||
@@ -19,9 +19,9 @@ from twisted.internet import defer
|
||||
|
||||
from synapse.api import errors
|
||||
from synapse.http.servlet import (
|
||||
RestServlet,
|
||||
assert_params_in_dict,
|
||||
parse_json_object_from_request,
|
||||
RestServlet
|
||||
)
|
||||
|
||||
from ._base import client_v2_patterns, interactive_auth_handler
|
||||
|
||||
@@ -643,7 +643,7 @@ class RegisterRestServlet(RestServlet):
|
||||
@defer.inlineCallbacks
|
||||
def _do_guest_registration(self, params):
|
||||
if not self.hs.config.allow_guest_access:
|
||||
raise SynapseError(403, "Guest access is disabled")
|
||||
defer.returnValue((403, "Guest access is disabled"))
|
||||
user_id, _ = yield self.registration_handler.register(
|
||||
generate_token=False,
|
||||
make_guest=True
|
||||
|
||||
@@ -14,10 +14,10 @@
|
||||
|
||||
from pydenticon import Generator
|
||||
|
||||
from twisted.web.resource import Resource
|
||||
|
||||
from synapse.http.servlet import parse_integer
|
||||
|
||||
from twisted.web.resource import Resource
|
||||
|
||||
FOREGROUND = [
|
||||
"rgb(45,79,255)",
|
||||
"rgb(254,180,44)",
|
||||
|
||||
@@ -343,10 +343,9 @@ class SQLBaseStore(object):
|
||||
"""
|
||||
parent_context = LoggingContext.current_context()
|
||||
if parent_context == LoggingContext.sentinel:
|
||||
# warning disabled for 0.33.0 release; proper fixes will land imminently.
|
||||
# logger.warn(
|
||||
# "Running db txn from sentinel context: metrics will be lost",
|
||||
# )
|
||||
logger.warn(
|
||||
"Running db txn from sentinel context: metrics will be lost",
|
||||
)
|
||||
parent_context = None
|
||||
|
||||
start_time = time.time()
|
||||
@@ -503,7 +502,7 @@ class SQLBaseStore(object):
|
||||
|
||||
Args:
|
||||
table (str): The table to upsert into
|
||||
keyvalues (dict): The unique key tables and their new values
|
||||
keyvalues (dict): The unique key columns and their new values
|
||||
values (dict): The nonunique columns and their new values
|
||||
insertion_values (dict): additional key/values to use only when
|
||||
inserting
|
||||
|
||||
@@ -343,7 +343,6 @@ class EventFederationWorkerStore(EventsWorkerStore, SignatureWorkerStore,
|
||||
table="events",
|
||||
keyvalues={
|
||||
"event_id": event_id,
|
||||
"room_id": room_id,
|
||||
},
|
||||
retcol="depth",
|
||||
allow_none=True,
|
||||
|
||||
@@ -1807,6 +1807,43 @@ class EventsStore(EventsWorkerStore):
|
||||
)
|
||||
return self.runInteraction("get_all_new_events", get_all_new_events_txn)
|
||||
|
||||
def get_state_event_counts(self, room_id):
|
||||
"""Gets the total number of state events in the room
|
||||
"""
|
||||
|
||||
def f(txn):
|
||||
sql = (
|
||||
"SELECT COUNT(*)"
|
||||
" FROM state_events"
|
||||
" WHERE room_id=?"
|
||||
)
|
||||
txn.execute(sql, (room_id,))
|
||||
row = txn.fetchone()
|
||||
return row[0] if row else 0
|
||||
|
||||
return self.runInteraction("get_state_event_counts", f)
|
||||
|
||||
def get_event_counts(self, room_id, local_server):
|
||||
"""Gets the number of events in the room, split into local versus remote
|
||||
"""
|
||||
|
||||
def f(txn):
|
||||
sql = (
|
||||
"SELECT sender LIKE '%%:%s' AS local, COUNT(*)"
|
||||
" FROM events"
|
||||
" WHERE room_id=?"
|
||||
" GROUP BY local"
|
||||
)
|
||||
txn.execute(sql, (local_server, room_id,))
|
||||
rows = txn.fetchall()
|
||||
results = {
|
||||
("local" if row[0] else "remote") : row[1]
|
||||
for row in rows
|
||||
}
|
||||
return (results.get("local", 0), results.get("remote", 0))
|
||||
|
||||
return self.runInteraction("get_event_counts", f)
|
||||
|
||||
def purge_history(
|
||||
self, room_id, token, delete_local_events,
|
||||
):
|
||||
|
||||
@@ -19,7 +19,7 @@ from canonicaljson import json
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
from synapse.api.errors import NotFoundError
|
||||
from synapse.api.errors import SynapseError
|
||||
# these are only included to make the type annotations work
|
||||
from synapse.events import EventBase # noqa: F401
|
||||
from synapse.events import FrozenEvent
|
||||
@@ -76,7 +76,7 @@ class EventsWorkerStore(SQLBaseStore):
|
||||
@defer.inlineCallbacks
|
||||
def get_event(self, event_id, check_redacted=True,
|
||||
get_prev_content=False, allow_rejected=False,
|
||||
allow_none=False, check_room_id=None):
|
||||
allow_none=False):
|
||||
"""Get an event from the database by event_id.
|
||||
|
||||
Args:
|
||||
@@ -87,9 +87,7 @@ class EventsWorkerStore(SQLBaseStore):
|
||||
include the previous states content in the unsigned field.
|
||||
allow_rejected (bool): If True return rejected events.
|
||||
allow_none (bool): If True, return None if no event found, if
|
||||
False throw a NotFoundError
|
||||
check_room_id (str|None): if not None, check the room of the found event.
|
||||
If there is a mismatch, behave as per allow_none.
|
||||
False throw an exception.
|
||||
|
||||
Returns:
|
||||
Deferred : A FrozenEvent.
|
||||
@@ -101,16 +99,10 @@ class EventsWorkerStore(SQLBaseStore):
|
||||
allow_rejected=allow_rejected,
|
||||
)
|
||||
|
||||
event = events[0] if events else None
|
||||
if not events and not allow_none:
|
||||
raise SynapseError(404, "Could not find event %s" % (event_id,))
|
||||
|
||||
if event is not None and check_room_id is not None:
|
||||
if event.room_id != check_room_id:
|
||||
event = None
|
||||
|
||||
if event is None and not allow_none:
|
||||
raise NotFoundError("Could not find event %s" % (event_id,))
|
||||
|
||||
defer.returnValue(event)
|
||||
defer.returnValue(events[0] if events else None)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_events(self, event_ids, check_redacted=True,
|
||||
|
||||
@@ -276,7 +276,7 @@ class GroupServerStore(SQLBaseStore):
|
||||
"category_id": category_id,
|
||||
"room_id": room_id,
|
||||
},
|
||||
values=to_update,
|
||||
updatevalues=to_update,
|
||||
)
|
||||
else:
|
||||
if is_public is None:
|
||||
@@ -562,7 +562,7 @@ class GroupServerStore(SQLBaseStore):
|
||||
"role_id": role_id,
|
||||
"user_id": user_id,
|
||||
},
|
||||
values=to_update,
|
||||
updatevalues=to_update,
|
||||
)
|
||||
else:
|
||||
if is_public is None:
|
||||
|
||||
@@ -82,6 +82,24 @@ class RoomMemberWorkerStore(EventsWorkerStore):
|
||||
return [to_ascii(r[0]) for r in txn]
|
||||
return self.runInteraction("get_users_in_room", f)
|
||||
|
||||
@cached()
|
||||
def get_user_count_in_room(self, room_id, membership):
|
||||
def f(txn):
|
||||
sql = (
|
||||
"SELECT count(*) FROM room_memberships as m"
|
||||
" INNER JOIN current_state_events as c"
|
||||
" ON m.event_id = c.event_id "
|
||||
" AND m.room_id = c.room_id "
|
||||
" AND m.user_id = c.state_key"
|
||||
" WHERE c.type = 'm.room.member' AND c.room_id = ? AND m.membership = ?"
|
||||
)
|
||||
|
||||
txn.execute(sql, (room_id, membership,))
|
||||
row = txn.fetchone()
|
||||
return row[0]
|
||||
|
||||
return self.runInteraction("get_users_in_room", f)
|
||||
|
||||
@cached()
|
||||
def get_invited_rooms_for_user(self, user_id):
|
||||
""" Get all the rooms the user is invited to
|
||||
|
||||
79
synapse/storage/schema/delta/51/stats.sql
Normal file
79
synapse/storage/schema/delta/51/stats.sql
Normal file
@@ -0,0 +1,79 @@
|
||||
/* Copyright 2018 New Vector Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
CREATE TABLE stats_stream_pos (
|
||||
Lock CHAR(1) NOT NULL DEFAULT 'X' UNIQUE, -- Makes sure this table only has one row.
|
||||
stream_id BIGINT,
|
||||
CHECK (Lock='X')
|
||||
);
|
||||
|
||||
INSERT INTO stats_stream_pos (stream_id) VALUES (null);
|
||||
|
||||
CREATE TABLE user_stats (
|
||||
user_id TEXT NOT NULL,
|
||||
ts BIGINT NOT NULL,
|
||||
bucket_size INT NOT NULL,
|
||||
sent_events INT NOT NULL,
|
||||
local_events INT NOT NULL,
|
||||
public_rooms INT NOT NULL,
|
||||
private_rooms INT NOT NULL,
|
||||
sent_file_count INT NOT NULL,
|
||||
sent_file_size INT NOT NULL,
|
||||
);
|
||||
|
||||
CREATE UNIQUE INDEX user_stats_user_ts ON user_stats(user_id, ts);
|
||||
|
||||
CREATE TABLE room_stats (
|
||||
room_id TEXT NOT NULL,
|
||||
ts BIGINT NOT NULL,
|
||||
bucket_size INT NOT NULL,
|
||||
current_state_events INT NOT NULL,
|
||||
joined_members INT NOT NULL,
|
||||
invited_members INT NOT NULL,
|
||||
left_members INT NOT NULL,
|
||||
banned_members INT NOT NULL,
|
||||
state_events INT NOT NULL,
|
||||
local_events INT NOT NULL,
|
||||
remote_events INT NOT NULL,
|
||||
sent_events INT NOT NULL, -- number sent per timeslice
|
||||
);
|
||||
|
||||
CREATE UNIQUE INDEX room_stats_room_ts ON room_stats(room_id, ts);
|
||||
|
||||
-- cache of current room state; useful for the publicRooms list
|
||||
CREATE TABLE room_state (
|
||||
room_id TEXT NOT NULL,
|
||||
join_rules TEXT NOT NULL,
|
||||
history_visibility TEXT NOT NULL,
|
||||
encrypted BOOLEAN,
|
||||
name TEXT NOT NULL,
|
||||
topic TEXT NOT NULL,
|
||||
avatar TEXT NOT NULL,
|
||||
canonical_alias TEXT NOT NULL,
|
||||
-- get aliases straight from the right table
|
||||
);
|
||||
|
||||
CREATE UNIQUE INDEX room_state_room ON room_state(room_id);
|
||||
|
||||
CREATE TABLE media_stats (
|
||||
ts BIGINT NOT NULL,
|
||||
bucket_size INT NOT NULL,
|
||||
local_media_count INT NOT NULL,
|
||||
local_media_size INT NOT NULL,
|
||||
remote_media_count INT NOT NULL,
|
||||
remote_media_size INT NOT NULL,
|
||||
);
|
||||
|
||||
CREATE UNIQUE INDEX media_stats_ts ON media_stats(ts);
|
||||
@@ -89,6 +89,59 @@ class StateGroupWorkerStore(SQLBaseStore):
|
||||
_get_current_state_ids_txn,
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_current_state(self, room_id, types):
|
||||
"""Get the current state event of a given type for a room based on the
|
||||
current_state_events table. This may not be as up-to-date as the result
|
||||
of doing a fresh state resolution as per state_handler.get_current_state
|
||||
Args:
|
||||
room_id (str)
|
||||
types (list): List of (type, state_key) tuples which are used to
|
||||
filter the state fetched. `state_key` may be None, which matches
|
||||
any `state_key`
|
||||
Returns:
|
||||
deferred: dict of (type, state_key) -> event
|
||||
"""
|
||||
def _get_current_state_txn(txn):
|
||||
sql = """SELECT type, state_key, event_id FROM current_state_events
|
||||
WHERE room_id = ? and %s"""
|
||||
# Turns out that postgres doesn't like doing a list of OR's and
|
||||
# is about 1000x slower, so we just issue a query for each specific
|
||||
# type seperately.
|
||||
if types:
|
||||
clause_to_args = [
|
||||
(
|
||||
"AND type = ? AND state_key = ?",
|
||||
(etype, state_key)
|
||||
) if state_key is not None else (
|
||||
"AND type = ?",
|
||||
(etype,)
|
||||
)
|
||||
for etype, state_key in types
|
||||
]
|
||||
else:
|
||||
# If types is None we fetch all the state, and so just use an
|
||||
# empty where clause with no extra args.
|
||||
clause_to_args = [("", [])]
|
||||
for where_clause, where_args in clause_to_args:
|
||||
args = [room_id]
|
||||
args.extend(where_args)
|
||||
txn.execute(sql % (where_clause,), args)
|
||||
for row in txn:
|
||||
typ, state_key, event_id = row
|
||||
key = (typ, state_key)
|
||||
results[intern_string(key)] = event_id
|
||||
return results
|
||||
|
||||
results = self.runInteraction(
|
||||
"get_current_state",
|
||||
_get_current_state_txn,
|
||||
)
|
||||
for (key, event_id) in iteritems(results):
|
||||
results[key] = yield self.store.get_event(event_id, allow_none=True)
|
||||
|
||||
defer.returnValue(results)
|
||||
|
||||
@cached(max_entries=10000, iterable=True)
|
||||
def get_state_group_delta(self, state_group):
|
||||
"""Given a state group try to return a previous group and a delta between
|
||||
|
||||
98
synapse/storage/state_deltas.py
Normal file
98
synapse/storage/state_deltas.py
Normal file
@@ -0,0 +1,98 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2018 Vector Creations Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import logging
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
from ._base import SQLBaseStore
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class StateDeltasStore(SQLBaseStore):
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_all_rooms(self):
|
||||
"""Get all room_ids we've ever known about, in ascending order of "size"
|
||||
"""
|
||||
sql = """
|
||||
SELECT room_id FROM current_state_events
|
||||
GROUP BY room_id
|
||||
ORDER BY count(*) ASC
|
||||
"""
|
||||
rows = yield self._execute("get_all_rooms", None, sql)
|
||||
defer.returnValue([room_id for room_id, in rows])
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_all_local_users(self):
|
||||
"""Get all local users
|
||||
"""
|
||||
sql = """
|
||||
SELECT name FROM users
|
||||
"""
|
||||
rows = yield self._execute("get_all_local_users", None, sql)
|
||||
defer.returnValue([name for name, in rows])
|
||||
|
||||
def get_current_state_deltas(self, prev_stream_id):
|
||||
prev_stream_id = int(prev_stream_id)
|
||||
if not self._curr_state_delta_stream_cache.has_any_entity_changed(prev_stream_id):
|
||||
return []
|
||||
|
||||
def get_current_state_deltas_txn(txn):
|
||||
# First we calculate the max stream id that will give us less than
|
||||
# N results.
|
||||
# We arbitarily limit to 100 stream_id entries to ensure we don't
|
||||
# select toooo many.
|
||||
sql = """
|
||||
SELECT stream_id, count(*)
|
||||
FROM current_state_delta_stream
|
||||
WHERE stream_id > ?
|
||||
GROUP BY stream_id
|
||||
ORDER BY stream_id ASC
|
||||
LIMIT 100
|
||||
"""
|
||||
txn.execute(sql, (prev_stream_id,))
|
||||
|
||||
total = 0
|
||||
max_stream_id = prev_stream_id
|
||||
for max_stream_id, count in txn:
|
||||
total += count
|
||||
if total > 100:
|
||||
# We arbitarily limit to 100 entries to ensure we don't
|
||||
# select toooo many.
|
||||
break
|
||||
|
||||
# Now actually get the deltas
|
||||
sql = """
|
||||
SELECT stream_id, room_id, type, state_key, event_id, prev_event_id
|
||||
FROM current_state_delta_stream
|
||||
WHERE ? < stream_id AND stream_id <= ?
|
||||
ORDER BY stream_id ASC
|
||||
"""
|
||||
txn.execute(sql, (prev_stream_id, max_stream_id,))
|
||||
return self.cursor_to_dict(txn)
|
||||
|
||||
return self.runInteraction(
|
||||
"get_current_state_deltas", get_current_state_deltas_txn
|
||||
)
|
||||
|
||||
def get_max_stream_id_in_current_state_deltas(self):
|
||||
return self._simple_select_one_onecol(
|
||||
table="current_state_delta_stream",
|
||||
keyvalues={},
|
||||
retcol="COALESCE(MAX(stream_id), -1)",
|
||||
desc="get_max_stream_id_in_current_state_deltas",
|
||||
)
|
||||
152
synapse/storage/stats.py
Normal file
152
synapse/storage/stats.py
Normal file
@@ -0,0 +1,152 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2018 Vector Creations Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import logging
|
||||
|
||||
from .StateDeltasStore import StateDeltasStore
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# these fields track relative numbers (e.g. number of events sent in this timeslice)
|
||||
RELATIVE_STATS_FIELDS = {
|
||||
"room": (
|
||||
"sent_events"
|
||||
),
|
||||
"user": (
|
||||
"sent_events"
|
||||
)
|
||||
}
|
||||
|
||||
# these fields track rather than absolutes (e.g. total number of rooms on the server)
|
||||
ABSOLUTE_STATS_FIELDS = {
|
||||
"room": (
|
||||
"current_state_events",
|
||||
"joined_members",
|
||||
"invited_members",
|
||||
"left_members",
|
||||
"banned_members",
|
||||
"state_events",
|
||||
"local_events",
|
||||
"remote_events",
|
||||
),
|
||||
"user": (
|
||||
"local_events",
|
||||
"public_rooms",
|
||||
"private_rooms",
|
||||
"sent_file_count",
|
||||
"sent_file_size",
|
||||
),
|
||||
}
|
||||
|
||||
|
||||
class StatsStore(StateDeltasStore):
|
||||
|
||||
def get_stats_stream_pos(self):
|
||||
return self._simple_select_one_onecol(
|
||||
table="stats_stream_pos",
|
||||
keyvalues={},
|
||||
retcol="stream_id",
|
||||
desc="stats_stream_pos",
|
||||
)
|
||||
|
||||
def update_stats_stream_pos(self, stream_id):
|
||||
return self._simple_update_one(
|
||||
table="stats_stream_pos",
|
||||
keyvalues={},
|
||||
updatevalues={"stream_id": stream_id},
|
||||
desc="update_stats_stream_pos",
|
||||
)
|
||||
|
||||
def update_room_state(self, room_id, fields):
|
||||
return self._simple_upsert(
|
||||
table="room_state",
|
||||
keyvalues={
|
||||
"room_id": room_id,
|
||||
},
|
||||
values=fields,
|
||||
desc="update_room_state",
|
||||
)
|
||||
|
||||
def update_stats(self, stats_type, stats_id, ts, fields):
|
||||
return self._simple_upsert(
|
||||
table=("%s_stats" % stats_type),
|
||||
keyvalues={
|
||||
("%s_id" % stats_type): stats_id,
|
||||
"ts": ts,
|
||||
},
|
||||
updatevalues=fields,
|
||||
desc="update_stats",
|
||||
)
|
||||
|
||||
def update_stats_delta(self, ts, bucket_size, stats_type, stats_id, field, value):
|
||||
def _update_stats_delta(txn):
|
||||
table = "%s_stats" % stats_type
|
||||
id_col = "%s_id" % stats_type
|
||||
|
||||
sql = (
|
||||
"SELECT * FROM %s"
|
||||
" WHERE %s=? and ts=("
|
||||
" SELECT MAX(ts) FROM %s"
|
||||
" WHERE where %s=?"
|
||||
")"
|
||||
) % (table, id_col, table, id_col)
|
||||
txn.execute(sql, (stats_id, stats_id))
|
||||
rows = self.cursor_to_dict(txn)
|
||||
if len(rows) == 0:
|
||||
# silently skip as we don't have anything to apply a delta to yet.
|
||||
# this tries to minimise any race between the initial sync and
|
||||
# subsequent deltas arriving.
|
||||
return
|
||||
|
||||
values = {
|
||||
key: rows[0][key] for key in ABSOLUTE_STATS_FIELDS[stats_type]
|
||||
}
|
||||
values[id_col] = stats_id
|
||||
values["ts"] = ts
|
||||
values["bucket_size"] = bucket_size
|
||||
|
||||
latest_ts = rows[0]["ts"]
|
||||
if ts != latest_ts:
|
||||
# we have to copy our absolute counters over to the new entry.
|
||||
self._simple_insert_txn(
|
||||
txn,
|
||||
table=table,
|
||||
values=values
|
||||
)
|
||||
|
||||
# actually update the new value
|
||||
if stats_type in ABSOLUTE_STATS_FIELDS[stats_type]:
|
||||
self._simple_update_txn(
|
||||
txn,
|
||||
table=table,
|
||||
keyvalues={
|
||||
id_col: stats_id,
|
||||
"ts": ts,
|
||||
},
|
||||
updatevalues={
|
||||
field: value
|
||||
}
|
||||
)
|
||||
else:
|
||||
sql = (
|
||||
"UPDATE %s "
|
||||
" SET %s=%s+?"
|
||||
" WHERE %s=? AND ts=?"
|
||||
) % (table, field, field, id_col)
|
||||
txn.execute(sql, (value, stats_id, ts))
|
||||
|
||||
return self.runInteraction(
|
||||
"update_stats_delta", _update_stats_delta
|
||||
)
|
||||
@@ -25,12 +25,12 @@ from synapse.storage.engines import PostgresEngine, Sqlite3Engine
|
||||
from synapse.types import get_domain_from_id, get_localpart_from_id
|
||||
from synapse.util.caches.descriptors import cached, cachedInlineCallbacks
|
||||
|
||||
from ._base import SQLBaseStore
|
||||
from .state_deltas import StateDeltasStore
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class UserDirectoryStore(SQLBaseStore):
|
||||
class UserDirectoryStore(StateDeltasStore):
|
||||
@cachedInlineCallbacks(cache_context=True)
|
||||
def is_room_world_readable_or_publicly_joinable(self, room_id, cache_context):
|
||||
"""Check if the room is either world_readable or publically joinable
|
||||
@@ -307,28 +307,6 @@ class UserDirectoryStore(SQLBaseStore):
|
||||
|
||||
defer.returnValue(user_ids)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_all_rooms(self):
|
||||
"""Get all room_ids we've ever known about, in ascending order of "size"
|
||||
"""
|
||||
sql = """
|
||||
SELECT room_id FROM current_state_events
|
||||
GROUP BY room_id
|
||||
ORDER BY count(*) ASC
|
||||
"""
|
||||
rows = yield self._execute("get_all_rooms", None, sql)
|
||||
defer.returnValue([room_id for room_id, in rows])
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_all_local_users(self):
|
||||
"""Get all local users
|
||||
"""
|
||||
sql = """
|
||||
SELECT name FROM users
|
||||
"""
|
||||
rows = yield self._execute("get_all_local_users", None, sql)
|
||||
defer.returnValue([name for name, in rows])
|
||||
|
||||
def add_users_who_share_room(self, room_id, share_private, user_id_tuples):
|
||||
"""Insert entries into the users_who_share_rooms table. The first
|
||||
user should be a local user.
|
||||
@@ -572,57 +550,6 @@ class UserDirectoryStore(SQLBaseStore):
|
||||
desc="update_user_directory_stream_pos",
|
||||
)
|
||||
|
||||
def get_current_state_deltas(self, prev_stream_id):
|
||||
prev_stream_id = int(prev_stream_id)
|
||||
if not self._curr_state_delta_stream_cache.has_any_entity_changed(prev_stream_id):
|
||||
return []
|
||||
|
||||
def get_current_state_deltas_txn(txn):
|
||||
# First we calculate the max stream id that will give us less than
|
||||
# N results.
|
||||
# We arbitarily limit to 100 stream_id entries to ensure we don't
|
||||
# select toooo many.
|
||||
sql = """
|
||||
SELECT stream_id, count(*)
|
||||
FROM current_state_delta_stream
|
||||
WHERE stream_id > ?
|
||||
GROUP BY stream_id
|
||||
ORDER BY stream_id ASC
|
||||
LIMIT 100
|
||||
"""
|
||||
txn.execute(sql, (prev_stream_id,))
|
||||
|
||||
total = 0
|
||||
max_stream_id = prev_stream_id
|
||||
for max_stream_id, count in txn:
|
||||
total += count
|
||||
if total > 100:
|
||||
# We arbitarily limit to 100 entries to ensure we don't
|
||||
# select toooo many.
|
||||
break
|
||||
|
||||
# Now actually get the deltas
|
||||
sql = """
|
||||
SELECT stream_id, room_id, type, state_key, event_id, prev_event_id
|
||||
FROM current_state_delta_stream
|
||||
WHERE ? < stream_id AND stream_id <= ?
|
||||
ORDER BY stream_id ASC
|
||||
"""
|
||||
txn.execute(sql, (prev_stream_id, max_stream_id,))
|
||||
return self.cursor_to_dict(txn)
|
||||
|
||||
return self.runInteraction(
|
||||
"get_current_state_deltas", get_current_state_deltas_txn
|
||||
)
|
||||
|
||||
def get_max_stream_id_in_current_state_deltas(self):
|
||||
return self._simple_select_one_onecol(
|
||||
table="current_state_delta_stream",
|
||||
keyvalues={},
|
||||
retcol="COALESCE(MAX(stream_id), -1)",
|
||||
desc="get_max_stream_id_in_current_state_deltas",
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def search_user_dir(self, user_id, search_term, limit):
|
||||
"""Searches for users in directory
|
||||
|
||||
@@ -80,7 +80,12 @@ class StreamChangeCache(object):
|
||||
)
|
||||
}
|
||||
|
||||
result = changed_entities.intersection(entities)
|
||||
# we need to include entities which we don't know about, as well as
|
||||
# those which are known to have changed since the stream pos.
|
||||
result = {
|
||||
e for e in entities
|
||||
if e in changed_entities or e not in self._entity_to_key
|
||||
}
|
||||
|
||||
self.metrics.inc_hits()
|
||||
else:
|
||||
|
||||
@@ -16,13 +16,10 @@ import itertools
|
||||
import logging
|
||||
import operator
|
||||
|
||||
import six
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
from synapse.api.constants import EventTypes, Membership
|
||||
from synapse.events.utils import prune_event
|
||||
from synapse.types import get_domain_from_id
|
||||
from synapse.util.logcontext import make_deferred_yieldable, preserve_fn
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@@ -228,154 +225,3 @@ def filter_events_for_client(store, user_id, events, is_peeking=False,
|
||||
|
||||
# we turn it into a list before returning it.
|
||||
defer.returnValue(list(filtered_events))
|
||||
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def filter_events_for_server(store, server_name, events):
|
||||
# Whatever else we do, we need to check for senders which have requested
|
||||
# erasure of their data.
|
||||
erased_senders = yield store.are_users_erased(
|
||||
e.sender for e in events,
|
||||
)
|
||||
|
||||
def redact_disallowed(event, state):
|
||||
# if the sender has been gdpr17ed, always return a redacted
|
||||
# copy of the event.
|
||||
if erased_senders[event.sender]:
|
||||
logger.info(
|
||||
"Sender of %s has been erased, redacting",
|
||||
event.event_id,
|
||||
)
|
||||
return prune_event(event)
|
||||
|
||||
# state will be None if we decided we didn't need to filter by
|
||||
# room membership.
|
||||
if not state:
|
||||
return event
|
||||
|
||||
history = state.get((EventTypes.RoomHistoryVisibility, ''), None)
|
||||
if history:
|
||||
visibility = history.content.get("history_visibility", "shared")
|
||||
if visibility in ["invited", "joined"]:
|
||||
# We now loop through all state events looking for
|
||||
# membership states for the requesting server to determine
|
||||
# if the server is either in the room or has been invited
|
||||
# into the room.
|
||||
for ev in state.itervalues():
|
||||
if ev.type != EventTypes.Member:
|
||||
continue
|
||||
try:
|
||||
domain = get_domain_from_id(ev.state_key)
|
||||
except Exception:
|
||||
continue
|
||||
|
||||
if domain != server_name:
|
||||
continue
|
||||
|
||||
memtype = ev.membership
|
||||
if memtype == Membership.JOIN:
|
||||
return event
|
||||
elif memtype == Membership.INVITE:
|
||||
if visibility == "invited":
|
||||
return event
|
||||
else:
|
||||
# server has no users in the room: redact
|
||||
return prune_event(event)
|
||||
|
||||
return event
|
||||
|
||||
# Next lets check to see if all the events have a history visibility
|
||||
# of "shared" or "world_readable". If thats the case then we don't
|
||||
# need to check membership (as we know the server is in the room).
|
||||
event_to_state_ids = yield store.get_state_ids_for_events(
|
||||
frozenset(e.event_id for e in events),
|
||||
types=(
|
||||
(EventTypes.RoomHistoryVisibility, ""),
|
||||
)
|
||||
)
|
||||
|
||||
visibility_ids = set()
|
||||
for sids in event_to_state_ids.itervalues():
|
||||
hist = sids.get((EventTypes.RoomHistoryVisibility, ""))
|
||||
if hist:
|
||||
visibility_ids.add(hist)
|
||||
|
||||
# If we failed to find any history visibility events then the default
|
||||
# is "shared" visiblity.
|
||||
if not visibility_ids:
|
||||
all_open = True
|
||||
else:
|
||||
event_map = yield store.get_events(visibility_ids)
|
||||
all_open = all(
|
||||
e.content.get("history_visibility") in (None, "shared", "world_readable")
|
||||
for e in event_map.itervalues()
|
||||
)
|
||||
|
||||
if all_open:
|
||||
# all the history_visibility state affecting these events is open, so
|
||||
# we don't need to filter by membership state. We *do* need to check
|
||||
# for user erasure, though.
|
||||
if erased_senders:
|
||||
events = [
|
||||
redact_disallowed(e, None)
|
||||
for e in events
|
||||
]
|
||||
|
||||
defer.returnValue(events)
|
||||
|
||||
# Ok, so we're dealing with events that have non-trivial visibility
|
||||
# rules, so we need to also get the memberships of the room.
|
||||
|
||||
# first, for each event we're wanting to return, get the event_ids
|
||||
# of the history vis and membership state at those events.
|
||||
event_to_state_ids = yield store.get_state_ids_for_events(
|
||||
frozenset(e.event_id for e in events),
|
||||
types=(
|
||||
(EventTypes.RoomHistoryVisibility, ""),
|
||||
(EventTypes.Member, None),
|
||||
)
|
||||
)
|
||||
|
||||
# We only want to pull out member events that correspond to the
|
||||
# server's domain.
|
||||
#
|
||||
# event_to_state_ids contains lots of duplicates, so it turns out to be
|
||||
# cheaper to build a complete set of unique
|
||||
# ((type, state_key), event_id) tuples, and then filter out the ones we
|
||||
# don't want.
|
||||
#
|
||||
state_key_to_event_id_set = {
|
||||
e
|
||||
for key_to_eid in six.itervalues(event_to_state_ids)
|
||||
for e in key_to_eid.items()
|
||||
}
|
||||
|
||||
def include(typ, state_key):
|
||||
if typ != EventTypes.Member:
|
||||
return True
|
||||
|
||||
# we avoid using get_domain_from_id here for efficiency.
|
||||
idx = state_key.find(":")
|
||||
if idx == -1:
|
||||
return False
|
||||
return state_key[idx + 1:] == server_name
|
||||
|
||||
event_map = yield store.get_events([
|
||||
e_id
|
||||
for key, e_id in state_key_to_event_id_set
|
||||
if include(key[0], key[1])
|
||||
])
|
||||
|
||||
event_to_state = {
|
||||
e_id: {
|
||||
key: event_map[inner_e_id]
|
||||
for key, inner_e_id in key_to_eid.iteritems()
|
||||
if inner_e_id in event_map
|
||||
}
|
||||
for e_id, key_to_eid in event_to_state_ids.iteritems()
|
||||
}
|
||||
|
||||
defer.returnValue([
|
||||
redact_disallowed(e, event_to_state[e.event_id])
|
||||
for e in events
|
||||
])
|
||||
|
||||
@@ -17,22 +17,26 @@ import json
|
||||
|
||||
from mock import Mock
|
||||
|
||||
from twisted.test.proto_helpers import MemoryReactorClock
|
||||
from twisted.internet import defer
|
||||
|
||||
from synapse.http.server import JsonResource
|
||||
from synapse.rest.client.v1.register import register_servlets
|
||||
from synapse.util import Clock
|
||||
from synapse.rest.client.v1.register import CreateUserRestServlet
|
||||
|
||||
from tests import unittest
|
||||
from tests.server import make_request, setup_test_homeserver
|
||||
from tests.utils import mock_getRawHeaders
|
||||
|
||||
|
||||
class CreateUserServletTestCase(unittest.TestCase):
|
||||
"""
|
||||
Tests for CreateUserRestServlet.
|
||||
"""
|
||||
|
||||
def setUp(self):
|
||||
# do the dance to hook up request data to self.request_data
|
||||
self.request_data = ""
|
||||
self.request = Mock(
|
||||
content=Mock(read=Mock(side_effect=lambda: self.request_data)),
|
||||
path='/_matrix/client/api/v1/createUser'
|
||||
)
|
||||
self.request.args = {}
|
||||
self.request.requestHeaders.getRawHeaders = mock_getRawHeaders()
|
||||
|
||||
self.registration_handler = Mock()
|
||||
|
||||
self.appservice = Mock(sender="@as:test")
|
||||
@@ -40,49 +44,39 @@ class CreateUserServletTestCase(unittest.TestCase):
|
||||
get_app_service_by_token=Mock(return_value=self.appservice)
|
||||
)
|
||||
|
||||
handlers = Mock(registration_handler=self.registration_handler)
|
||||
self.clock = MemoryReactorClock()
|
||||
self.hs_clock = Clock(self.clock)
|
||||
|
||||
self.hs = self.hs = setup_test_homeserver(
|
||||
http_client=None, clock=self.hs_clock, reactor=self.clock
|
||||
# do the dance to hook things up to the hs global
|
||||
handlers = Mock(
|
||||
registration_handler=self.registration_handler,
|
||||
)
|
||||
self.hs = Mock()
|
||||
self.hs.hostname = "superbig~testing~thing.com"
|
||||
self.hs.get_datastore = Mock(return_value=self.datastore)
|
||||
self.hs.get_handlers = Mock(return_value=handlers)
|
||||
self.servlet = CreateUserRestServlet(self.hs)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def test_POST_createuser_with_valid_user(self):
|
||||
|
||||
res = JsonResource(self.hs)
|
||||
register_servlets(self.hs, res)
|
||||
|
||||
request_data = json.dumps(
|
||||
{
|
||||
"localpart": "someone",
|
||||
"displayname": "someone interesting",
|
||||
"duration_seconds": 200,
|
||||
}
|
||||
)
|
||||
|
||||
url = b'/_matrix/client/api/v1/createUser?access_token=i_am_an_app_service'
|
||||
|
||||
user_id = "@someone:interesting"
|
||||
token = "my token"
|
||||
self.request.args = {
|
||||
"access_token": "i_am_an_app_service"
|
||||
}
|
||||
self.request_data = json.dumps({
|
||||
"localpart": "someone",
|
||||
"displayname": "someone interesting",
|
||||
"duration_seconds": 200
|
||||
})
|
||||
|
||||
self.registration_handler.get_or_create_user = Mock(
|
||||
return_value=(user_id, token)
|
||||
)
|
||||
|
||||
request, channel = make_request(b"POST", url, request_data)
|
||||
request.render(res)
|
||||
|
||||
# Advance the clock because it waits
|
||||
self.clock.advance(1)
|
||||
|
||||
self.assertEquals(channel.result["code"], b"200")
|
||||
(code, result) = yield self.servlet.on_POST(self.request)
|
||||
self.assertEquals(code, 200)
|
||||
|
||||
det_data = {
|
||||
"user_id": user_id,
|
||||
"access_token": token,
|
||||
"home_server": self.hs.hostname,
|
||||
"home_server": self.hs.hostname
|
||||
}
|
||||
self.assertDictContainsSubset(det_data, json.loads(channel.result["body"]))
|
||||
self.assertDictContainsSubset(det_data, result)
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -16,14 +16,13 @@
|
||||
import json
|
||||
import time
|
||||
|
||||
import attr
|
||||
|
||||
# twisted imports
|
||||
from twisted.internet import defer
|
||||
|
||||
from synapse.api.constants import Membership
|
||||
|
||||
# trial imports
|
||||
from tests import unittest
|
||||
from tests.server import make_request, wait_until_result
|
||||
|
||||
|
||||
class RestTestCase(unittest.TestCase):
|
||||
@@ -134,113 +133,3 @@ class RestTestCase(unittest.TestCase):
|
||||
for key in required:
|
||||
self.assertEquals(required[key], actual[key],
|
||||
msg="%s mismatch. %s" % (key, actual))
|
||||
|
||||
|
||||
@attr.s
|
||||
class RestHelper(object):
|
||||
"""Contains extra helper functions to quickly and clearly perform a given
|
||||
REST action, which isn't the focus of the test.
|
||||
"""
|
||||
|
||||
hs = attr.ib()
|
||||
resource = attr.ib()
|
||||
auth_user_id = attr.ib()
|
||||
|
||||
def create_room_as(self, room_creator, is_public=True, tok=None):
|
||||
temp_id = self.auth_user_id
|
||||
self.auth_user_id = room_creator
|
||||
path = b"/_matrix/client/r0/createRoom"
|
||||
content = {}
|
||||
if not is_public:
|
||||
content["visibility"] = "private"
|
||||
if tok:
|
||||
path = path + b"?access_token=%s" % tok.encode('ascii')
|
||||
|
||||
request, channel = make_request(b"POST", path, json.dumps(content).encode('utf8'))
|
||||
request.render(self.resource)
|
||||
wait_until_result(self.hs.get_reactor(), channel)
|
||||
|
||||
assert channel.result["code"] == b"200", channel.result
|
||||
self.auth_user_id = temp_id
|
||||
return channel.json_body["room_id"]
|
||||
|
||||
def invite(self, room=None, src=None, targ=None, expect_code=200, tok=None):
|
||||
self.change_membership(
|
||||
room=room,
|
||||
src=src,
|
||||
targ=targ,
|
||||
tok=tok,
|
||||
membership=Membership.INVITE,
|
||||
expect_code=expect_code,
|
||||
)
|
||||
|
||||
def join(self, room=None, user=None, expect_code=200, tok=None):
|
||||
self.change_membership(
|
||||
room=room,
|
||||
src=user,
|
||||
targ=user,
|
||||
tok=tok,
|
||||
membership=Membership.JOIN,
|
||||
expect_code=expect_code,
|
||||
)
|
||||
|
||||
def leave(self, room=None, user=None, expect_code=200, tok=None):
|
||||
self.change_membership(
|
||||
room=room,
|
||||
src=user,
|
||||
targ=user,
|
||||
tok=tok,
|
||||
membership=Membership.LEAVE,
|
||||
expect_code=expect_code,
|
||||
)
|
||||
|
||||
def change_membership(self, room, src, targ, membership, tok=None, expect_code=200):
|
||||
temp_id = self.auth_user_id
|
||||
self.auth_user_id = src
|
||||
|
||||
path = "/_matrix/client/r0/rooms/%s/state/m.room.member/%s" % (room, targ)
|
||||
if tok:
|
||||
path = path + "?access_token=%s" % tok
|
||||
|
||||
data = {"membership": membership}
|
||||
|
||||
request, channel = make_request(
|
||||
b"PUT", path.encode('ascii'), json.dumps(data).encode('utf8')
|
||||
)
|
||||
|
||||
request.render(self.resource)
|
||||
wait_until_result(self.hs.get_reactor(), channel)
|
||||
|
||||
assert int(channel.result["code"]) == expect_code, (
|
||||
"Expected: %d, got: %d, resp: %r"
|
||||
% (expect_code, int(channel.result["code"]), channel.result["body"])
|
||||
)
|
||||
|
||||
self.auth_user_id = temp_id
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def register(self, user_id):
|
||||
(code, response) = yield self.mock_resource.trigger(
|
||||
"POST",
|
||||
"/_matrix/client/r0/register",
|
||||
json.dumps(
|
||||
{"user": user_id, "password": "test", "type": "m.login.password"}
|
||||
),
|
||||
)
|
||||
self.assertEquals(200, code)
|
||||
defer.returnValue(response)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def send(self, room_id, body=None, txn_id=None, tok=None, expect_code=200):
|
||||
if txn_id is None:
|
||||
txn_id = "m%s" % (str(time.time()))
|
||||
if body is None:
|
||||
body = "body_text_here"
|
||||
|
||||
path = "/_matrix/client/r0/rooms/%s/send/m.room.message/%s" % (room_id, txn_id)
|
||||
content = '{"msgtype":"m.text","body":"%s"}' % body
|
||||
if tok:
|
||||
path = path + "?access_token=%s" % tok
|
||||
|
||||
(code, response) = yield self.mock_resource.trigger("PUT", path, content)
|
||||
self.assertEquals(expect_code, code, msg=str(response))
|
||||
|
||||
@@ -0,0 +1,61 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2015, 2016 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from mock import Mock
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
from synapse.types import UserID
|
||||
|
||||
from tests import unittest
|
||||
|
||||
from ....utils import MockHttpResource, setup_test_homeserver
|
||||
|
||||
PATH_PREFIX = "/_matrix/client/v2_alpha"
|
||||
|
||||
|
||||
class V2AlphaRestTestCase(unittest.TestCase):
|
||||
# Consumer must define
|
||||
# USER_ID = <some string>
|
||||
# TO_REGISTER = [<list of REST servlets to register>]
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def setUp(self):
|
||||
self.mock_resource = MockHttpResource(prefix=PATH_PREFIX)
|
||||
|
||||
hs = yield setup_test_homeserver(
|
||||
datastore=self.make_datastore_mock(),
|
||||
http_client=None,
|
||||
resource_for_client=self.mock_resource,
|
||||
resource_for_federation=self.mock_resource,
|
||||
)
|
||||
|
||||
def get_user_by_access_token(token=None, allow_guest=False):
|
||||
return {
|
||||
"user": UserID.from_string(self.USER_ID),
|
||||
"token_id": 1,
|
||||
"is_guest": False,
|
||||
}
|
||||
hs.get_auth().get_user_by_access_token = get_user_by_access_token
|
||||
|
||||
for r in self.TO_REGISTER:
|
||||
r.register_servlets(hs, self.mock_resource)
|
||||
|
||||
def make_datastore_mock(self):
|
||||
store = Mock(spec=[
|
||||
"insert_client_ip",
|
||||
])
|
||||
store.get_app_service_by_token = Mock(return_value=None)
|
||||
return store
|
||||
|
||||
@@ -13,33 +13,35 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
import synapse.types
|
||||
from synapse.api.errors import Codes
|
||||
from synapse.http.server import JsonResource
|
||||
from synapse.rest.client.v2_alpha import filter
|
||||
from synapse.types import UserID
|
||||
from synapse.util import Clock
|
||||
|
||||
from tests import unittest
|
||||
from tests.server import ThreadedMemoryReactorClock as MemoryReactorClock
|
||||
from tests.server import make_request, setup_test_homeserver, wait_until_result
|
||||
|
||||
from ....utils import MockHttpResource, setup_test_homeserver
|
||||
|
||||
PATH_PREFIX = "/_matrix/client/v2_alpha"
|
||||
|
||||
|
||||
class FilterTestCase(unittest.TestCase):
|
||||
|
||||
USER_ID = b"@apple:test"
|
||||
USER_ID = "@apple:test"
|
||||
EXAMPLE_FILTER = {"room": {"timeline": {"types": ["m.room.message"]}}}
|
||||
EXAMPLE_FILTER_JSON = b'{"room": {"timeline": {"types": ["m.room.message"]}}}'
|
||||
EXAMPLE_FILTER_JSON = '{"room": {"timeline": {"types": ["m.room.message"]}}}'
|
||||
TO_REGISTER = [filter]
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def setUp(self):
|
||||
self.clock = MemoryReactorClock()
|
||||
self.hs_clock = Clock(self.clock)
|
||||
self.mock_resource = MockHttpResource(prefix=PATH_PREFIX)
|
||||
|
||||
self.hs = setup_test_homeserver(
|
||||
http_client=None, clock=self.hs_clock, reactor=self.clock
|
||||
self.hs = yield setup_test_homeserver(
|
||||
http_client=None,
|
||||
resource_for_client=self.mock_resource,
|
||||
resource_for_federation=self.mock_resource,
|
||||
)
|
||||
|
||||
self.auth = self.hs.get_auth()
|
||||
@@ -53,103 +55,82 @@ class FilterTestCase(unittest.TestCase):
|
||||
|
||||
def get_user_by_req(request, allow_guest=False, rights="access"):
|
||||
return synapse.types.create_requester(
|
||||
UserID.from_string(self.USER_ID), 1, False, None
|
||||
)
|
||||
UserID.from_string(self.USER_ID), 1, False, None)
|
||||
|
||||
self.auth.get_user_by_access_token = get_user_by_access_token
|
||||
self.auth.get_user_by_req = get_user_by_req
|
||||
|
||||
self.store = self.hs.get_datastore()
|
||||
self.filtering = self.hs.get_filtering()
|
||||
self.resource = JsonResource(self.hs)
|
||||
|
||||
for r in self.TO_REGISTER:
|
||||
r.register_servlets(self.hs, self.resource)
|
||||
r.register_servlets(self.hs, self.mock_resource)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def test_add_filter(self):
|
||||
request, channel = make_request(
|
||||
b"POST",
|
||||
b"/_matrix/client/r0/user/%s/filter" % (self.USER_ID),
|
||||
self.EXAMPLE_FILTER_JSON,
|
||||
(code, response) = yield self.mock_resource.trigger(
|
||||
"POST", "/user/%s/filter" % (self.USER_ID), self.EXAMPLE_FILTER_JSON
|
||||
)
|
||||
request.render(self.resource)
|
||||
wait_until_result(self.clock, channel)
|
||||
|
||||
self.assertEqual(channel.result["code"], b"200")
|
||||
self.assertEqual(channel.json_body, {"filter_id": "0"})
|
||||
filter = self.store.get_user_filter(user_localpart="apple", filter_id=0)
|
||||
self.clock.advance(0)
|
||||
self.assertEquals(filter.result, self.EXAMPLE_FILTER)
|
||||
self.assertEquals(200, code)
|
||||
self.assertEquals({"filter_id": "0"}, response)
|
||||
filter = yield self.store.get_user_filter(
|
||||
user_localpart='apple',
|
||||
filter_id=0,
|
||||
)
|
||||
self.assertEquals(filter, self.EXAMPLE_FILTER)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def test_add_filter_for_other_user(self):
|
||||
request, channel = make_request(
|
||||
b"POST",
|
||||
b"/_matrix/client/r0/user/%s/filter" % (b"@watermelon:test"),
|
||||
self.EXAMPLE_FILTER_JSON,
|
||||
(code, response) = yield self.mock_resource.trigger(
|
||||
"POST", "/user/%s/filter" % ('@watermelon:test'), self.EXAMPLE_FILTER_JSON
|
||||
)
|
||||
request.render(self.resource)
|
||||
wait_until_result(self.clock, channel)
|
||||
|
||||
self.assertEqual(channel.result["code"], b"403")
|
||||
self.assertEquals(channel.json_body["errcode"], Codes.FORBIDDEN)
|
||||
self.assertEquals(403, code)
|
||||
self.assertEquals(response['errcode'], Codes.FORBIDDEN)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def test_add_filter_non_local_user(self):
|
||||
_is_mine = self.hs.is_mine
|
||||
self.hs.is_mine = lambda target_user: False
|
||||
request, channel = make_request(
|
||||
b"POST",
|
||||
b"/_matrix/client/r0/user/%s/filter" % (self.USER_ID),
|
||||
self.EXAMPLE_FILTER_JSON,
|
||||
(code, response) = yield self.mock_resource.trigger(
|
||||
"POST", "/user/%s/filter" % (self.USER_ID), self.EXAMPLE_FILTER_JSON
|
||||
)
|
||||
request.render(self.resource)
|
||||
wait_until_result(self.clock, channel)
|
||||
|
||||
self.hs.is_mine = _is_mine
|
||||
self.assertEqual(channel.result["code"], b"403")
|
||||
self.assertEquals(channel.json_body["errcode"], Codes.FORBIDDEN)
|
||||
self.assertEquals(403, code)
|
||||
self.assertEquals(response['errcode'], Codes.FORBIDDEN)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def test_get_filter(self):
|
||||
filter_id = self.filtering.add_user_filter(
|
||||
user_localpart="apple", user_filter=self.EXAMPLE_FILTER
|
||||
filter_id = yield self.filtering.add_user_filter(
|
||||
user_localpart='apple',
|
||||
user_filter=self.EXAMPLE_FILTER
|
||||
)
|
||||
self.clock.advance(1)
|
||||
filter_id = filter_id.result
|
||||
request, channel = make_request(
|
||||
b"GET", b"/_matrix/client/r0/user/%s/filter/%s" % (self.USER_ID, filter_id)
|
||||
(code, response) = yield self.mock_resource.trigger_get(
|
||||
"/user/%s/filter/%s" % (self.USER_ID, filter_id)
|
||||
)
|
||||
request.render(self.resource)
|
||||
wait_until_result(self.clock, channel)
|
||||
|
||||
self.assertEqual(channel.result["code"], b"200")
|
||||
self.assertEquals(channel.json_body, self.EXAMPLE_FILTER)
|
||||
self.assertEquals(200, code)
|
||||
self.assertEquals(self.EXAMPLE_FILTER, response)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def test_get_filter_non_existant(self):
|
||||
request, channel = make_request(
|
||||
b"GET", "/_matrix/client/r0/user/%s/filter/12382148321" % (self.USER_ID)
|
||||
(code, response) = yield self.mock_resource.trigger_get(
|
||||
"/user/%s/filter/12382148321" % (self.USER_ID)
|
||||
)
|
||||
request.render(self.resource)
|
||||
wait_until_result(self.clock, channel)
|
||||
|
||||
self.assertEqual(channel.result["code"], b"400")
|
||||
self.assertEquals(channel.json_body["errcode"], Codes.NOT_FOUND)
|
||||
self.assertEquals(400, code)
|
||||
self.assertEquals(response['errcode'], Codes.NOT_FOUND)
|
||||
|
||||
# Currently invalid params do not have an appropriate errcode
|
||||
# in errors.py
|
||||
@defer.inlineCallbacks
|
||||
def test_get_filter_invalid_id(self):
|
||||
request, channel = make_request(
|
||||
b"GET", "/_matrix/client/r0/user/%s/filter/foobar" % (self.USER_ID)
|
||||
(code, response) = yield self.mock_resource.trigger_get(
|
||||
"/user/%s/filter/foobar" % (self.USER_ID)
|
||||
)
|
||||
request.render(self.resource)
|
||||
wait_until_result(self.clock, channel)
|
||||
|
||||
self.assertEqual(channel.result["code"], b"400")
|
||||
self.assertEquals(400, code)
|
||||
|
||||
# No ID also returns an invalid_id error
|
||||
@defer.inlineCallbacks
|
||||
def test_get_filter_no_id(self):
|
||||
request, channel = make_request(
|
||||
b"GET", "/_matrix/client/r0/user/%s/filter/" % (self.USER_ID)
|
||||
(code, response) = yield self.mock_resource.trigger_get(
|
||||
"/user/%s/filter/" % (self.USER_ID)
|
||||
)
|
||||
request.render(self.resource)
|
||||
wait_until_result(self.clock, channel)
|
||||
|
||||
self.assertEqual(channel.result["code"], b"400")
|
||||
self.assertEquals(400, code)
|
||||
|
||||
@@ -2,192 +2,165 @@ import json
|
||||
|
||||
from mock import Mock
|
||||
|
||||
from twisted.internet import defer
|
||||
from twisted.python import failure
|
||||
from twisted.test.proto_helpers import MemoryReactorClock
|
||||
|
||||
from synapse.api.errors import InteractiveAuthIncompleteError
|
||||
from synapse.http.server import JsonResource
|
||||
from synapse.rest.client.v2_alpha.register import register_servlets
|
||||
from synapse.util import Clock
|
||||
from synapse.api.errors import InteractiveAuthIncompleteError, SynapseError
|
||||
from synapse.rest.client.v2_alpha.register import RegisterRestServlet
|
||||
|
||||
from tests import unittest
|
||||
from tests.server import make_request, setup_test_homeserver, wait_until_result
|
||||
from tests.utils import mock_getRawHeaders
|
||||
|
||||
|
||||
class RegisterRestServletTestCase(unittest.TestCase):
|
||||
def setUp(self):
|
||||
|
||||
self.clock = MemoryReactorClock()
|
||||
self.hs_clock = Clock(self.clock)
|
||||
self.url = b"/_matrix/client/r0/register"
|
||||
def setUp(self):
|
||||
# do the dance to hook up request data to self.request_data
|
||||
self.request_data = ""
|
||||
self.request = Mock(
|
||||
content=Mock(read=Mock(side_effect=lambda: self.request_data)),
|
||||
path='/_matrix/api/v2_alpha/register'
|
||||
)
|
||||
self.request.args = {}
|
||||
self.request.requestHeaders.getRawHeaders = mock_getRawHeaders()
|
||||
|
||||
self.appservice = None
|
||||
self.auth = Mock(
|
||||
get_appservice_by_req=Mock(side_effect=lambda x: self.appservice)
|
||||
self.auth = Mock(get_appservice_by_req=Mock(
|
||||
side_effect=lambda x: self.appservice)
|
||||
)
|
||||
|
||||
self.auth_result = failure.Failure(InteractiveAuthIncompleteError(None))
|
||||
self.auth_handler = Mock(
|
||||
check_auth=Mock(side_effect=lambda x, y, z: self.auth_result),
|
||||
get_session_data=Mock(return_value=None),
|
||||
get_session_data=Mock(return_value=None)
|
||||
)
|
||||
self.registration_handler = Mock()
|
||||
self.identity_handler = Mock()
|
||||
self.login_handler = Mock()
|
||||
self.device_handler = Mock()
|
||||
self.device_handler.check_device_registered = Mock(return_value="FAKE")
|
||||
|
||||
self.datastore = Mock(return_value=Mock())
|
||||
self.datastore.get_current_state_deltas = Mock(return_value=[])
|
||||
|
||||
# do the dance to hook it up to the hs global
|
||||
self.handlers = Mock(
|
||||
registration_handler=self.registration_handler,
|
||||
identity_handler=self.identity_handler,
|
||||
login_handler=self.login_handler,
|
||||
)
|
||||
self.hs = setup_test_homeserver(
|
||||
http_client=None, clock=self.hs_clock, reactor=self.clock
|
||||
login_handler=self.login_handler
|
||||
)
|
||||
self.hs = Mock()
|
||||
self.hs.hostname = "superbig~testing~thing.com"
|
||||
self.hs.get_auth = Mock(return_value=self.auth)
|
||||
self.hs.get_handlers = Mock(return_value=self.handlers)
|
||||
self.hs.get_auth_handler = Mock(return_value=self.auth_handler)
|
||||
self.hs.get_device_handler = Mock(return_value=self.device_handler)
|
||||
self.hs.get_datastore = Mock(return_value=self.datastore)
|
||||
self.hs.config.enable_registration = True
|
||||
self.hs.config.registrations_require_3pid = []
|
||||
self.hs.config.auto_join_rooms = []
|
||||
|
||||
self.resource = JsonResource(self.hs)
|
||||
register_servlets(self.hs, self.resource)
|
||||
# init the thing we're testing
|
||||
self.servlet = RegisterRestServlet(self.hs)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def test_POST_appservice_registration_valid(self):
|
||||
user_id = "@kermit:muppet"
|
||||
token = "kermits_access_token"
|
||||
self.appservice = {"id": "1234"}
|
||||
self.registration_handler.appservice_register = Mock(return_value=user_id)
|
||||
self.auth_handler.get_access_token_for_user_id = Mock(return_value=token)
|
||||
request_data = json.dumps({"username": "kermit"})
|
||||
|
||||
request, channel = make_request(
|
||||
b"POST", self.url + b"?access_token=i_am_an_app_service", request_data
|
||||
self.request.args = {
|
||||
"access_token": "i_am_an_app_service"
|
||||
}
|
||||
self.request_data = json.dumps({
|
||||
"username": "kermit"
|
||||
})
|
||||
self.appservice = {
|
||||
"id": "1234"
|
||||
}
|
||||
self.registration_handler.appservice_register = Mock(
|
||||
return_value=user_id
|
||||
)
|
||||
self.auth_handler.get_access_token_for_user_id = Mock(
|
||||
return_value=token
|
||||
)
|
||||
request.render(self.resource)
|
||||
wait_until_result(self.clock, channel)
|
||||
|
||||
self.assertEquals(channel.result["code"], b"200", channel.result)
|
||||
(code, result) = yield self.servlet.on_POST(self.request)
|
||||
self.assertEquals(code, 200)
|
||||
det_data = {
|
||||
"user_id": user_id,
|
||||
"access_token": token,
|
||||
"home_server": self.hs.hostname,
|
||||
"home_server": self.hs.hostname
|
||||
}
|
||||
self.assertDictContainsSubset(det_data, json.loads(channel.result["body"]))
|
||||
self.assertDictContainsSubset(det_data, result)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def test_POST_appservice_registration_invalid(self):
|
||||
self.appservice = None # no application service exists
|
||||
request_data = json.dumps({"username": "kermit"})
|
||||
request, channel = make_request(
|
||||
b"POST", self.url + b"?access_token=i_am_an_app_service", request_data
|
||||
)
|
||||
request.render(self.resource)
|
||||
wait_until_result(self.clock, channel)
|
||||
self.request.args = {
|
||||
"access_token": "i_am_an_app_service"
|
||||
}
|
||||
|
||||
self.assertEquals(channel.result["code"], b"401", channel.result)
|
||||
self.request_data = json.dumps({
|
||||
"username": "kermit"
|
||||
})
|
||||
self.appservice = None # no application service exists
|
||||
result = yield self.servlet.on_POST(self.request)
|
||||
self.assertEquals(result, (401, None))
|
||||
|
||||
def test_POST_bad_password(self):
|
||||
request_data = json.dumps({"username": "kermit", "password": 666})
|
||||
request, channel = make_request(b"POST", self.url, request_data)
|
||||
request.render(self.resource)
|
||||
wait_until_result(self.clock, channel)
|
||||
|
||||
self.assertEquals(channel.result["code"], b"400", channel.result)
|
||||
self.assertEquals(
|
||||
json.loads(channel.result["body"])["error"], "Invalid password"
|
||||
)
|
||||
self.request_data = json.dumps({
|
||||
"username": "kermit",
|
||||
"password": 666
|
||||
})
|
||||
d = self.servlet.on_POST(self.request)
|
||||
return self.assertFailure(d, SynapseError)
|
||||
|
||||
def test_POST_bad_username(self):
|
||||
request_data = json.dumps({"username": 777, "password": "monkey"})
|
||||
request, channel = make_request(b"POST", self.url, request_data)
|
||||
request.render(self.resource)
|
||||
wait_until_result(self.clock, channel)
|
||||
|
||||
self.assertEquals(channel.result["code"], b"400", channel.result)
|
||||
self.assertEquals(
|
||||
json.loads(channel.result["body"])["error"], "Invalid username"
|
||||
)
|
||||
self.request_data = json.dumps({
|
||||
"username": 777,
|
||||
"password": "monkey"
|
||||
})
|
||||
d = self.servlet.on_POST(self.request)
|
||||
return self.assertFailure(d, SynapseError)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def test_POST_user_valid(self):
|
||||
user_id = "@kermit:muppet"
|
||||
token = "kermits_access_token"
|
||||
device_id = "frogfone"
|
||||
request_data = json.dumps(
|
||||
{"username": "kermit", "password": "monkey", "device_id": device_id}
|
||||
)
|
||||
self.request_data = json.dumps({
|
||||
"username": "kermit",
|
||||
"password": "monkey",
|
||||
"device_id": device_id,
|
||||
})
|
||||
self.registration_handler.check_username = Mock(return_value=True)
|
||||
self.auth_result = (None, {"username": "kermit", "password": "monkey"}, None)
|
||||
self.auth_result = (None, {
|
||||
"username": "kermit",
|
||||
"password": "monkey"
|
||||
}, None)
|
||||
self.registration_handler.register = Mock(return_value=(user_id, None))
|
||||
self.auth_handler.get_access_token_for_user_id = Mock(return_value=token)
|
||||
self.device_handler.check_device_registered = Mock(return_value=device_id)
|
||||
|
||||
request, channel = make_request(b"POST", self.url, request_data)
|
||||
request.render(self.resource)
|
||||
wait_until_result(self.clock, channel)
|
||||
self.auth_handler.get_access_token_for_user_id = Mock(
|
||||
return_value=token
|
||||
)
|
||||
self.device_handler.check_device_registered = \
|
||||
Mock(return_value=device_id)
|
||||
|
||||
(code, result) = yield self.servlet.on_POST(self.request)
|
||||
self.assertEquals(code, 200)
|
||||
det_data = {
|
||||
"user_id": user_id,
|
||||
"access_token": token,
|
||||
"home_server": self.hs.hostname,
|
||||
"device_id": device_id,
|
||||
}
|
||||
self.assertEquals(channel.result["code"], b"200", channel.result)
|
||||
self.assertDictContainsSubset(det_data, json.loads(channel.result["body"]))
|
||||
self.assertDictContainsSubset(det_data, result)
|
||||
self.auth_handler.get_login_tuple_for_user_id(
|
||||
user_id, device_id=device_id, initial_device_display_name=None
|
||||
)
|
||||
user_id, device_id=device_id, initial_device_display_name=None)
|
||||
|
||||
def test_POST_disabled_registration(self):
|
||||
self.hs.config.enable_registration = False
|
||||
request_data = json.dumps({"username": "kermit", "password": "monkey"})
|
||||
self.request_data = json.dumps({
|
||||
"username": "kermit",
|
||||
"password": "monkey"
|
||||
})
|
||||
self.registration_handler.check_username = Mock(return_value=True)
|
||||
self.auth_result = (None, {"username": "kermit", "password": "monkey"}, None)
|
||||
self.auth_result = (None, {
|
||||
"username": "kermit",
|
||||
"password": "monkey"
|
||||
}, None)
|
||||
self.registration_handler.register = Mock(return_value=("@user:id", "t"))
|
||||
|
||||
request, channel = make_request(b"POST", self.url, request_data)
|
||||
request.render(self.resource)
|
||||
wait_until_result(self.clock, channel)
|
||||
|
||||
self.assertEquals(channel.result["code"], b"403", channel.result)
|
||||
self.assertEquals(
|
||||
json.loads(channel.result["body"])["error"],
|
||||
"Registration has been disabled",
|
||||
)
|
||||
|
||||
def test_POST_guest_registration(self):
|
||||
user_id = "a@b"
|
||||
self.hs.config.macaroon_secret_key = "test"
|
||||
self.hs.config.allow_guest_access = True
|
||||
self.registration_handler.register = Mock(return_value=(user_id, None))
|
||||
|
||||
request, channel = make_request(b"POST", self.url + b"?kind=guest", b"{}")
|
||||
request.render(self.resource)
|
||||
wait_until_result(self.clock, channel)
|
||||
|
||||
det_data = {
|
||||
"user_id": user_id,
|
||||
"home_server": self.hs.hostname,
|
||||
"device_id": "guest_device",
|
||||
}
|
||||
self.assertEquals(channel.result["code"], b"200", channel.result)
|
||||
self.assertDictContainsSubset(det_data, json.loads(channel.result["body"]))
|
||||
|
||||
def test_POST_disabled_guest_registration(self):
|
||||
self.hs.config.allow_guest_access = False
|
||||
|
||||
request, channel = make_request(b"POST", self.url + b"?kind=guest", b"{}")
|
||||
request.render(self.resource)
|
||||
wait_until_result(self.clock, channel)
|
||||
|
||||
self.assertEquals(channel.result["code"], b"403", channel.result)
|
||||
self.assertEquals(
|
||||
json.loads(channel.result["body"])["error"], "Guest access is disabled"
|
||||
)
|
||||
d = self.servlet.on_POST(self.request)
|
||||
return self.assertFailure(d, SynapseError)
|
||||
|
||||
@@ -1,83 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2018 New Vector
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import synapse.types
|
||||
from synapse.http.server import JsonResource
|
||||
from synapse.rest.client.v2_alpha import sync
|
||||
from synapse.types import UserID
|
||||
from synapse.util import Clock
|
||||
|
||||
from tests import unittest
|
||||
from tests.server import ThreadedMemoryReactorClock as MemoryReactorClock
|
||||
from tests.server import make_request, setup_test_homeserver, wait_until_result
|
||||
|
||||
PATH_PREFIX = "/_matrix/client/v2_alpha"
|
||||
|
||||
|
||||
class FilterTestCase(unittest.TestCase):
|
||||
|
||||
USER_ID = b"@apple:test"
|
||||
TO_REGISTER = [sync]
|
||||
|
||||
def setUp(self):
|
||||
self.clock = MemoryReactorClock()
|
||||
self.hs_clock = Clock(self.clock)
|
||||
|
||||
self.hs = setup_test_homeserver(
|
||||
http_client=None, clock=self.hs_clock, reactor=self.clock
|
||||
)
|
||||
|
||||
self.auth = self.hs.get_auth()
|
||||
|
||||
def get_user_by_access_token(token=None, allow_guest=False):
|
||||
return {
|
||||
"user": UserID.from_string(self.USER_ID),
|
||||
"token_id": 1,
|
||||
"is_guest": False,
|
||||
}
|
||||
|
||||
def get_user_by_req(request, allow_guest=False, rights="access"):
|
||||
return synapse.types.create_requester(
|
||||
UserID.from_string(self.USER_ID), 1, False, None
|
||||
)
|
||||
|
||||
self.auth.get_user_by_access_token = get_user_by_access_token
|
||||
self.auth.get_user_by_req = get_user_by_req
|
||||
|
||||
self.store = self.hs.get_datastore()
|
||||
self.filtering = self.hs.get_filtering()
|
||||
self.resource = JsonResource(self.hs)
|
||||
|
||||
for r in self.TO_REGISTER:
|
||||
r.register_servlets(self.hs, self.resource)
|
||||
|
||||
def test_sync_argless(self):
|
||||
request, channel = make_request(b"GET", b"/_matrix/client/r0/sync")
|
||||
request.render(self.resource)
|
||||
wait_until_result(self.clock, channel)
|
||||
|
||||
self.assertEqual(channel.result["code"], b"200")
|
||||
self.assertTrue(
|
||||
set(
|
||||
[
|
||||
"next_batch",
|
||||
"rooms",
|
||||
"presence",
|
||||
"account_data",
|
||||
"to_device",
|
||||
"device_lists",
|
||||
]
|
||||
).issubset(set(channel.json_body.keys()))
|
||||
)
|
||||
@@ -80,11 +80,6 @@ def make_request(method, path, content=b""):
|
||||
content, and return the Request and the Channel underneath.
|
||||
"""
|
||||
|
||||
# Decorate it to be the full path
|
||||
if not path.startswith(b"/_matrix"):
|
||||
path = b"/_matrix/client/r0/" + path
|
||||
path = path.replace("//", "/")
|
||||
|
||||
if isinstance(content, text_type):
|
||||
content = content.encode('utf8')
|
||||
|
||||
@@ -115,11 +110,6 @@ def wait_until_result(clock, channel, timeout=100):
|
||||
clock.advance(0.1)
|
||||
|
||||
|
||||
def render(request, resource, clock):
|
||||
request.render(resource)
|
||||
wait_until_result(clock, request._channel)
|
||||
|
||||
|
||||
class ThreadedMemoryReactorClock(MemoryReactorClock):
|
||||
"""
|
||||
A MemoryReactorClock that supports callFromThread.
|
||||
|
||||
@@ -33,11 +33,9 @@ class JsonResourceTests(unittest.TestCase):
|
||||
return (200, kwargs)
|
||||
|
||||
res = JsonResource(self.homeserver)
|
||||
res.register_paths(
|
||||
"GET", [re.compile("^/_matrix/foo/(?P<room_id>[^/]*)$")], _callback
|
||||
)
|
||||
res.register_paths("GET", [re.compile("^/foo/(?P<room_id>[^/]*)$")], _callback)
|
||||
|
||||
request, channel = make_request(b"GET", b"/_matrix/foo/%E2%98%83?a=%E2%98%83")
|
||||
request, channel = make_request(b"GET", b"/foo/%E2%98%83?a=%E2%98%83")
|
||||
request.render(res)
|
||||
|
||||
self.assertEqual(request.args, {b'a': [u"\N{SNOWMAN}".encode('utf8')]})
|
||||
@@ -53,9 +51,9 @@ class JsonResourceTests(unittest.TestCase):
|
||||
raise Exception("boo")
|
||||
|
||||
res = JsonResource(self.homeserver)
|
||||
res.register_paths("GET", [re.compile("^/_matrix/foo$")], _callback)
|
||||
res.register_paths("GET", [re.compile("^/foo$")], _callback)
|
||||
|
||||
request, channel = make_request(b"GET", b"/_matrix/foo")
|
||||
request, channel = make_request(b"GET", b"/foo")
|
||||
request.render(res)
|
||||
|
||||
self.assertEqual(channel.result["code"], b'500')
|
||||
@@ -76,9 +74,9 @@ class JsonResourceTests(unittest.TestCase):
|
||||
return d
|
||||
|
||||
res = JsonResource(self.homeserver)
|
||||
res.register_paths("GET", [re.compile("^/_matrix/foo$")], _callback)
|
||||
res.register_paths("GET", [re.compile("^/foo$")], _callback)
|
||||
|
||||
request, channel = make_request(b"GET", b"/_matrix/foo")
|
||||
request, channel = make_request(b"GET", b"/foo")
|
||||
request.render(res)
|
||||
|
||||
# No error has been raised yet
|
||||
@@ -98,9 +96,9 @@ class JsonResourceTests(unittest.TestCase):
|
||||
raise SynapseError(403, "Forbidden!!one!", Codes.FORBIDDEN)
|
||||
|
||||
res = JsonResource(self.homeserver)
|
||||
res.register_paths("GET", [re.compile("^/_matrix/foo$")], _callback)
|
||||
res.register_paths("GET", [re.compile("^/foo$")], _callback)
|
||||
|
||||
request, channel = make_request(b"GET", b"/_matrix/foo")
|
||||
request, channel = make_request(b"GET", b"/foo")
|
||||
request.render(res)
|
||||
|
||||
self.assertEqual(channel.result["code"], b'403')
|
||||
@@ -120,9 +118,9 @@ class JsonResourceTests(unittest.TestCase):
|
||||
self.fail("shouldn't ever get here")
|
||||
|
||||
res = JsonResource(self.homeserver)
|
||||
res.register_paths("GET", [re.compile("^/_matrix/foo$")], _callback)
|
||||
res.register_paths("GET", [re.compile("^/foo$")], _callback)
|
||||
|
||||
request, channel = make_request(b"GET", b"/_matrix/foobar")
|
||||
request, channel = make_request(b"GET", b"/foobar")
|
||||
request.render(res)
|
||||
|
||||
self.assertEqual(channel.result["code"], b'400')
|
||||
|
||||
@@ -1,324 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2018 New Vector Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import logging
|
||||
|
||||
from twisted.internet import defer
|
||||
from twisted.internet.defer import succeed
|
||||
|
||||
from synapse.events import FrozenEvent
|
||||
from synapse.visibility import filter_events_for_server
|
||||
|
||||
import tests.unittest
|
||||
from tests.utils import setup_test_homeserver
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
TEST_ROOM_ID = "!TEST:ROOM"
|
||||
|
||||
|
||||
class FilterEventsForServerTestCase(tests.unittest.TestCase):
|
||||
@defer.inlineCallbacks
|
||||
def setUp(self):
|
||||
self.hs = yield setup_test_homeserver()
|
||||
self.event_creation_handler = self.hs.get_event_creation_handler()
|
||||
self.event_builder_factory = self.hs.get_event_builder_factory()
|
||||
self.store = self.hs.get_datastore()
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def test_filtering(self):
|
||||
#
|
||||
# The events to be filtered consist of 10 membership events (it doesn't
|
||||
# really matter if they are joins or leaves, so let's make them joins).
|
||||
# One of those membership events is going to be for a user on the
|
||||
# server we are filtering for (so we can check the filtering is doing
|
||||
# the right thing).
|
||||
#
|
||||
|
||||
# before we do that, we persist some other events to act as state.
|
||||
self.inject_visibility("@admin:hs", "joined")
|
||||
for i in range(0, 10):
|
||||
yield self.inject_room_member("@resident%i:hs" % i)
|
||||
|
||||
events_to_filter = []
|
||||
|
||||
for i in range(0, 10):
|
||||
user = "@user%i:%s" % (
|
||||
i, "test_server" if i == 5 else "other_server"
|
||||
)
|
||||
evt = yield self.inject_room_member(user, extra_content={"a": "b"})
|
||||
events_to_filter.append(evt)
|
||||
|
||||
filtered = yield filter_events_for_server(
|
||||
self.store, "test_server", events_to_filter,
|
||||
)
|
||||
|
||||
# the result should be 5 redacted events, and 5 unredacted events.
|
||||
for i in range(0, 5):
|
||||
self.assertEqual(events_to_filter[i].event_id, filtered[i].event_id)
|
||||
self.assertNotIn("a", filtered[i].content)
|
||||
|
||||
for i in range(5, 10):
|
||||
self.assertEqual(events_to_filter[i].event_id, filtered[i].event_id)
|
||||
self.assertEqual(filtered[i].content["a"], "b")
|
||||
|
||||
@tests.unittest.DEBUG
|
||||
@defer.inlineCallbacks
|
||||
def test_erased_user(self):
|
||||
# 4 message events, from erased and unerased users, with a membership
|
||||
# change in the middle of them.
|
||||
events_to_filter = []
|
||||
|
||||
evt = yield self.inject_message("@unerased:local_hs")
|
||||
events_to_filter.append(evt)
|
||||
|
||||
evt = yield self.inject_message("@erased:local_hs")
|
||||
events_to_filter.append(evt)
|
||||
|
||||
evt = yield self.inject_room_member("@joiner:remote_hs")
|
||||
events_to_filter.append(evt)
|
||||
|
||||
evt = yield self.inject_message("@unerased:local_hs")
|
||||
events_to_filter.append(evt)
|
||||
|
||||
evt = yield self.inject_message("@erased:local_hs")
|
||||
events_to_filter.append(evt)
|
||||
|
||||
# the erasey user gets erased
|
||||
self.hs.get_datastore().mark_user_erased("@erased:local_hs")
|
||||
|
||||
# ... and the filtering happens.
|
||||
filtered = yield filter_events_for_server(
|
||||
self.store, "test_server", events_to_filter,
|
||||
)
|
||||
|
||||
for i in range(0, len(events_to_filter)):
|
||||
self.assertEqual(
|
||||
events_to_filter[i].event_id, filtered[i].event_id,
|
||||
"Unexpected event at result position %i" % (i, )
|
||||
)
|
||||
|
||||
for i in (0, 3):
|
||||
self.assertEqual(
|
||||
events_to_filter[i].content["body"], filtered[i].content["body"],
|
||||
"Unexpected event content at result position %i" % (i,)
|
||||
)
|
||||
|
||||
for i in (1, 4):
|
||||
self.assertNotIn("body", filtered[i].content)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def inject_visibility(self, user_id, visibility):
|
||||
content = {"history_visibility": visibility}
|
||||
builder = self.event_builder_factory.new({
|
||||
"type": "m.room.history_visibility",
|
||||
"sender": user_id,
|
||||
"state_key": "",
|
||||
"room_id": TEST_ROOM_ID,
|
||||
"content": content,
|
||||
})
|
||||
|
||||
event, context = yield self.event_creation_handler.create_new_client_event(
|
||||
builder
|
||||
)
|
||||
yield self.hs.get_datastore().persist_event(event, context)
|
||||
defer.returnValue(event)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def inject_room_member(self, user_id, membership="join", extra_content={}):
|
||||
content = {"membership": membership}
|
||||
content.update(extra_content)
|
||||
builder = self.event_builder_factory.new({
|
||||
"type": "m.room.member",
|
||||
"sender": user_id,
|
||||
"state_key": user_id,
|
||||
"room_id": TEST_ROOM_ID,
|
||||
"content": content,
|
||||
})
|
||||
|
||||
event, context = yield self.event_creation_handler.create_new_client_event(
|
||||
builder
|
||||
)
|
||||
|
||||
yield self.hs.get_datastore().persist_event(event, context)
|
||||
defer.returnValue(event)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def inject_message(self, user_id, content=None):
|
||||
if content is None:
|
||||
content = {"body": "testytest"}
|
||||
builder = self.event_builder_factory.new({
|
||||
"type": "m.room.message",
|
||||
"sender": user_id,
|
||||
"room_id": TEST_ROOM_ID,
|
||||
"content": content,
|
||||
})
|
||||
|
||||
event, context = yield self.event_creation_handler.create_new_client_event(
|
||||
builder
|
||||
)
|
||||
|
||||
yield self.hs.get_datastore().persist_event(event, context)
|
||||
defer.returnValue(event)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def test_large_room(self):
|
||||
# see what happens when we have a large room with hundreds of thousands
|
||||
# of membership events
|
||||
|
||||
# As above, the events to be filtered consist of 10 membership events,
|
||||
# where one of them is for a user on the server we are filtering for.
|
||||
|
||||
import cProfile
|
||||
import pstats
|
||||
import time
|
||||
|
||||
# we stub out the store, because building up all that state the normal
|
||||
# way is very slow.
|
||||
test_store = _TestStore()
|
||||
|
||||
# our initial state is 100000 membership events and one
|
||||
# history_visibility event.
|
||||
room_state = []
|
||||
|
||||
history_visibility_evt = FrozenEvent({
|
||||
"event_id": "$history_vis",
|
||||
"type": "m.room.history_visibility",
|
||||
"sender": "@resident_user_0:test.com",
|
||||
"state_key": "",
|
||||
"room_id": TEST_ROOM_ID,
|
||||
"content": {"history_visibility": "joined"},
|
||||
})
|
||||
room_state.append(history_visibility_evt)
|
||||
test_store.add_event(history_visibility_evt)
|
||||
|
||||
for i in range(0, 100000):
|
||||
user = "@resident_user_%i:test.com" % (i, )
|
||||
evt = FrozenEvent({
|
||||
"event_id": "$res_event_%i" % (i, ),
|
||||
"type": "m.room.member",
|
||||
"state_key": user,
|
||||
"sender": user,
|
||||
"room_id": TEST_ROOM_ID,
|
||||
"content": {
|
||||
"membership": "join",
|
||||
"extra": "zzz,"
|
||||
},
|
||||
})
|
||||
room_state.append(evt)
|
||||
test_store.add_event(evt)
|
||||
|
||||
events_to_filter = []
|
||||
for i in range(0, 10):
|
||||
user = "@user%i:%s" % (
|
||||
i, "test_server" if i == 5 else "other_server"
|
||||
)
|
||||
evt = FrozenEvent({
|
||||
"event_id": "$evt%i" % (i, ),
|
||||
"type": "m.room.member",
|
||||
"state_key": user,
|
||||
"sender": user,
|
||||
"room_id": TEST_ROOM_ID,
|
||||
"content": {
|
||||
"membership": "join",
|
||||
"extra": "zzz",
|
||||
},
|
||||
})
|
||||
events_to_filter.append(evt)
|
||||
room_state.append(evt)
|
||||
|
||||
test_store.add_event(evt)
|
||||
test_store.set_state_ids_for_event(evt, {
|
||||
(e.type, e.state_key): e.event_id for e in room_state
|
||||
})
|
||||
|
||||
pr = cProfile.Profile()
|
||||
pr.enable()
|
||||
|
||||
logger.info("Starting filtering")
|
||||
start = time.time()
|
||||
filtered = yield filter_events_for_server(
|
||||
test_store, "test_server", events_to_filter,
|
||||
)
|
||||
logger.info("Filtering took %f seconds", time.time() - start)
|
||||
|
||||
pr.disable()
|
||||
with open("filter_events_for_server.profile", "w+") as f:
|
||||
ps = pstats.Stats(pr, stream=f).sort_stats('cumulative')
|
||||
ps.print_stats()
|
||||
|
||||
# the result should be 5 redacted events, and 5 unredacted events.
|
||||
for i in range(0, 5):
|
||||
self.assertEqual(events_to_filter[i].event_id, filtered[i].event_id)
|
||||
self.assertNotIn("extra", filtered[i].content)
|
||||
|
||||
for i in range(5, 10):
|
||||
self.assertEqual(events_to_filter[i].event_id, filtered[i].event_id)
|
||||
self.assertEqual(filtered[i].content["extra"], "zzz")
|
||||
|
||||
test_large_room.skip = "Disabled by default because it's slow"
|
||||
|
||||
|
||||
class _TestStore(object):
|
||||
"""Implements a few methods of the DataStore, so that we can test
|
||||
filter_events_for_server
|
||||
|
||||
"""
|
||||
def __init__(self):
|
||||
# data for get_events: a map from event_id to event
|
||||
self.events = {}
|
||||
|
||||
# data for get_state_ids_for_events mock: a map from event_id to
|
||||
# a map from (type_state_key) -> event_id for the state at that
|
||||
# event
|
||||
self.state_ids_for_events = {}
|
||||
|
||||
def add_event(self, event):
|
||||
self.events[event.event_id] = event
|
||||
|
||||
def set_state_ids_for_event(self, event, state):
|
||||
self.state_ids_for_events[event.event_id] = state
|
||||
|
||||
def get_state_ids_for_events(self, events, types):
|
||||
res = {}
|
||||
include_memberships = False
|
||||
for (type, state_key) in types:
|
||||
if type == "m.room.history_visibility":
|
||||
continue
|
||||
if type != "m.room.member" or state_key is not None:
|
||||
raise RuntimeError(
|
||||
"Unimplemented: get_state_ids with type (%s, %s)" %
|
||||
(type, state_key),
|
||||
)
|
||||
include_memberships = True
|
||||
|
||||
if include_memberships:
|
||||
for event_id in events:
|
||||
res[event_id] = self.state_ids_for_events[event_id]
|
||||
|
||||
else:
|
||||
k = ("m.room.history_visibility", "")
|
||||
for event_id in events:
|
||||
hve = self.state_ids_for_events[event_id][k]
|
||||
res[event_id] = {k: hve}
|
||||
|
||||
return succeed(res)
|
||||
|
||||
def get_events(self, events):
|
||||
return succeed({
|
||||
event_id: self.events[event_id] for event_id in events
|
||||
})
|
||||
|
||||
def are_users_erased(self, users):
|
||||
return succeed({u: False for u in users})
|
||||
@@ -109,17 +109,6 @@ class TestCase(unittest.TestCase):
|
||||
except AssertionError as e:
|
||||
raise (type(e))(e.message + " for '.%s'" % key)
|
||||
|
||||
def assert_dict(self, required, actual):
|
||||
"""Does a partial assert of a dict.
|
||||
|
||||
Args:
|
||||
required (dict): The keys and value which MUST be in 'actual'.
|
||||
actual (dict): The test result. Extra keys will not be checked.
|
||||
"""
|
||||
for key in required:
|
||||
self.assertEquals(required[key], actual[key],
|
||||
msg="%s mismatch. %s" % (key, actual))
|
||||
|
||||
|
||||
def DEBUG(target):
|
||||
"""A decorator to set the .loglevel attribute to logging.DEBUG.
|
||||
|
||||
@@ -141,8 +141,8 @@ class StreamChangeCacheTests(unittest.TestCase):
|
||||
)
|
||||
|
||||
# Query all the entries mid-way through the stream, but include one
|
||||
# that doesn't exist in it. We shouldn't get back the one that doesn't
|
||||
# exist.
|
||||
# that doesn't exist in it. We should get back the one that doesn't
|
||||
# exist, too.
|
||||
self.assertEqual(
|
||||
cache.get_entities_changed(
|
||||
[
|
||||
@@ -153,7 +153,7 @@ class StreamChangeCacheTests(unittest.TestCase):
|
||||
],
|
||||
stream_pos=2,
|
||||
),
|
||||
set(["bar@baz.net", "user@elsewhere.org"]),
|
||||
set(["bar@baz.net", "user@elsewhere.org", "not@here.website"]),
|
||||
)
|
||||
|
||||
# Query all the entries, but before the first known point. We will get
|
||||
@@ -178,22 +178,6 @@ class StreamChangeCacheTests(unittest.TestCase):
|
||||
),
|
||||
)
|
||||
|
||||
# Query a subset of the entries mid-way through the stream. We should
|
||||
# only get back the subset.
|
||||
self.assertEqual(
|
||||
cache.get_entities_changed(
|
||||
[
|
||||
"bar@baz.net",
|
||||
],
|
||||
stream_pos=2,
|
||||
),
|
||||
set(
|
||||
[
|
||||
"bar@baz.net",
|
||||
]
|
||||
),
|
||||
)
|
||||
|
||||
def test_max_pos(self):
|
||||
"""
|
||||
StreamChangeCache.get_max_pos_of_last_change will return the most
|
||||
|
||||
8
tox.ini
8
tox.ini
@@ -1,5 +1,5 @@
|
||||
[tox]
|
||||
envlist = packaging, py27, py36, pep8, check_isort
|
||||
envlist = packaging, py27, py36, pep8
|
||||
|
||||
[testenv]
|
||||
deps =
|
||||
@@ -103,14 +103,10 @@ deps =
|
||||
flake8
|
||||
commands = /bin/sh -c "flake8 synapse tests {env:PEP8SUFFIX:}"
|
||||
|
||||
[testenv:check_isort]
|
||||
skip_install = True
|
||||
deps = isort
|
||||
commands = /bin/sh -c "isort -c -sp setup.cfg -rc synapse tests"
|
||||
|
||||
[testenv:check-newsfragment]
|
||||
skip_install = True
|
||||
deps = towncrier>=18.6.0rc1
|
||||
commands =
|
||||
python -m towncrier.check --compare-with=origin/develop
|
||||
basepython = python3.6
|
||||
basepython = python3.6
|
||||
Reference in New Issue
Block a user