1
0

Compare commits

..

51 Commits

Author SHA1 Message Date
Erik Johnston
86090eadb0 Don't send outlier events to ASes 2018-04-06 10:21:17 +01:00
Erik Johnston
edbeed06ca Fix MRO for replication stores 2018-04-06 10:20:50 +01:00
Erik Johnston
277d2c506d Add cache for if ASes have users in a room 2018-04-05 17:28:27 +01:00
Jan Christian Grünhage
7d0f712348 Merge pull request #3063 from matrix-org/jcgruenhage/cache_settings_stats
phone home cache size configurations
2018-04-04 17:24:40 +01:00
Jan Christian Grünhage
e4570c53dd phone home cache size configurations 2018-04-04 16:46:58 +01:00
Richard van der Hoff
9cd3f06ab7 Merge pull request #3062 from matrix-org/revert-3053-speedup-mxid-check
Revert "improve mxid check performance"
2018-04-04 12:09:17 +01:00
Richard van der Hoff
f92963f5db Revert "improve mxid check performance" 2018-04-04 12:08:29 +01:00
Richard van der Hoff
725a72ec5a Merge pull request #3000 from NotAFile/change-except-style
Replace old style error catching with 'as' keyword
2018-04-04 10:45:22 +01:00
Richard van der Hoff
a89f9f830c Merge pull request #3044 from matrix-org/michaelk/performance_stats
Add basic performance statistics to phone home
2018-04-04 10:37:25 +01:00
Richard van der Hoff
39ce38b024 Merge pull request #3053 from NotAFile/speedup-mxid-check
improve mxid check performance
2018-04-04 10:19:52 +01:00
Richard van der Hoff
8da39ad98f Merge pull request #3049 from matrix-org/rav/use_staticjson
Use static JSONEncoders
2018-04-03 15:18:32 +01:00
Richard van der Hoff
3ee4ad09eb Fix json encoding bug in replication
json encoders have an encode method, not a dumps method.
2018-04-03 15:09:48 +01:00
Richard van der Hoff
0ca5c4d2af Merge pull request #3048 from matrix-org/rav/use_simplejson
Use simplejson throughout
2018-04-03 14:39:57 +01:00
Adrian Tschira
11597ddea5 improve mxid check performance ~4x
Signed-off-by: Adrian Tschira <nota@notafile.com>
2018-03-31 02:00:22 +02:00
Richard van der Hoff
05630758f2 Use static JSONEncoders
using json.dumps with custom options requires us to create a new JSONEncoder on
each call. It's more efficient to create one upfront and reuse it.
2018-03-29 23:13:33 +01:00
Richard van der Hoff
fcfe7f6ad3 Use simplejson throughout
Let's use simplejson rather than json, for consistency.
2018-03-29 22:45:52 +01:00
Erik Johnston
88cc9cc69e Merge pull request #3043 from matrix-org/erikj/measure_state_group_creation
Measure time it takes to calculate state group ID
2018-03-28 17:40:14 +01:00
Richard van der Hoff
9a0db062af Merge pull request #3034 from matrix-org/rav/fix_key_claim_errors
Fix error when claiming e2e keys from offline servers
2018-03-28 14:50:38 +01:00
Michael Kaye
33f6195d9a Handle review comments 2018-03-28 14:25:25 +01:00
Erik Johnston
e9e4cb25fc Merge pull request #3042 from matrix-org/fix_locally_failing_tests
fix tests/storage/test_user_directory.py
2018-03-28 13:10:37 +01:00
Michael Kaye
4ceaa7433a As daemonizing will make a new process, defer call to init. 2018-03-28 12:19:01 +01:00
Neil Johnson
545001b9e4 Fix search_user_dir multiple sqlite versions do different things 2018-03-28 11:19:45 +01:00
Erik Johnston
01ccc9e6f2 Measure time it takes to calculate state group ID 2018-03-28 11:03:52 +01:00
Neil Johnson
a9cb1a35c8 fix tests/storage/test_user_directory.py 2018-03-28 10:57:27 +01:00
Erik Johnston
f879127aaa Merge pull request #3029 from matrix-org/erikj/linearize_generate_user_id
Linearize calls to _generate_user_id
2018-03-28 10:00:31 +01:00
Erik Johnston
e6d87c93f3 Merge pull request #3030 from matrix-org/erikj/no_ujson
Remove last usage of ujson
2018-03-28 10:00:06 +01:00
Erik Johnston
004cc8a328 Merge pull request #3033 from matrix-org/erikj/calculate_state_metrics
Add counter metrics for calculating state delta
2018-03-28 09:59:42 +01:00
Michael Kaye
ef520d8d0e Include coarse CPU and Memory use in stats callbacks.
This requires the psutil module, and is still opt-in based on the report_stats
config option.
2018-03-27 17:56:03 +01:00
Richard van der Hoff
a134c572a6 Stringify exceptions for keys/{query,claim}
Make sure we stringify any exceptions we return from keys/query and keys/claim,
to avoid a 'not JSON serializable' error later

Fixes #3010
2018-03-27 17:15:06 +01:00
Richard van der Hoff
c2a5cf2fe3 factor out exception handling for keys/claim and keys/query
this stuff is badly c&p'ed
2018-03-27 17:11:23 +01:00
Erik Johnston
800cfd5774 Comment 2018-03-27 13:30:39 +01:00
Erik Johnston
152c2ac19e Fix indent 2018-03-27 13:13:46 +01:00
Erik Johnston
e70287cff3 Comment 2018-03-27 13:13:38 +01:00
Erik Johnston
03a26e28d9 Merge pull request #3017 from matrix-org/erikj/add_cache_control_headers
Add Cache-Control headers to all JSON APIs
2018-03-27 13:10:38 +01:00
Erik Johnston
3e0c0660b3 Also do check inside linearizer 2018-03-27 13:01:34 +01:00
Erik Johnston
3f49e131d9 Add counter metrics for calculating state delta
This will allow us to measure how often we calculate state deltas in
event persistence that we would have been able to calculate at the same
time we calculated the state for the event.
2018-03-27 10:57:35 +01:00
Erik Johnston
9b8c0fb162 Merge branch 'master' of github.com:matrix-org/synapse into develop 2018-03-26 21:41:55 +01:00
Erik Johnston
fecb45e0c3 Remove last usage of ujson 2018-03-26 13:32:29 +01:00
Erik Johnston
44cd6e1358 PEP8 2018-03-26 12:06:48 +01:00
Erik Johnston
8d6dc106d1 Don't use _cursor_to_dict in find_next_generated_user_id_localpart 2018-03-26 12:02:44 +01:00
Erik Johnston
a052aa42e7 Linearize calls to _generate_user_id 2018-03-26 12:02:20 +01:00
Matthew Hodgson
8efe773ef1 fix typo 2018-03-23 21:38:42 +00:00
Matthew Hodgson
b7e7b52452 Merge pull request #3022 from matrix-org/matthew/noresource
404 correctly on missing paths via NoResource
2018-03-23 11:10:32 +00:00
Matthew Hodgson
8cbbfaefc1 404 correctly on missing paths via NoResource
fixes https://github.com/matrix-org/synapse/issues/2043 and https://github.com/matrix-org/synapse/issues/2029
2018-03-23 10:32:50 +00:00
Erik Johnston
84b5cc69f5 Merge pull request #3006 from matrix-org/erikj/state_iter
Use .iter* to avoid copies in StateHandler
2018-03-22 11:51:13 +00:00
Erik Johnston
fde8e8f09f Fix s/iteriterms/itervalues 2018-03-22 11:42:16 +00:00
Erik Johnston
eb9fc021e3 Merge branch 'release-v0.27.0' of github.com:matrix-org/synapse into develop 2018-03-22 10:19:53 +00:00
Erik Johnston
1c41b05c8c Add Cache-Control headers to all JSON APIs
It is especially important that sync requests don't get cached, as if a
sync returns the same token given then the client will call sync with
the same parameters again. If the previous response was cached it will
get reused, resulting in the client tight looping making the same
request and never making any progress.

In general, clients will expect to get up to date data when requesting
APIs, and so its safer to do a blanket no cache policy than only
whitelisting APIs that we know will break things if they get cached.
2018-03-21 17:46:26 +00:00
Erik Johnston
5bdb57cb66 Merge pull request #3015 from matrix-org/erikj/simplejson_replication
Fix replication after switch to simplejson
2018-03-20 15:06:33 +00:00
Erik Johnston
9cf519769b Use .iter* to avoid copies in StateHandler 2018-03-15 17:50:26 +00:00
NotAFile
2cc9f76bc3 replace old style error catching with 'as' keyword
This is both easier to read and compatible with python3 (not that that
matters)

Signed-off-by: Adrian Tschira <nota@notafile.com>
2018-03-15 16:11:17 +01:00
36 changed files with 416 additions and 171 deletions

View File

@@ -59,7 +59,7 @@ Features:
Changes:
* Continue to factor out processing from main process and into worker processes. See updated `docs/workers.rst <docs/metrics-howto.rst>`_ (PR #2892 - #2904, #2913, #2920 - #2926, #2947, #2847, #2854, #2872, #2873, #2874, #2928, #2929, #2934, #2856, #2976 - #2984, #2987 - #2989, #2991 - #2993, #2995, #2784)
* Continue to factor out processing from main process and into worker processes. See updated `docs/workers.rst <docs/workers.rst>`_ (PR #2892 - #2904, #2913, #2920 - #2926, #2947, #2847, #2854, #2872, #2873, #2874, #2928, #2929, #2934, #2856, #2976 - #2984, #2987 - #2989, #2991 - #2993, #2995, #2784)
* Ensure state cache is used when persisting events (PR #2864, #2871, #2802, #2835, #2836, #2841, #2842, #2849)
* Change the default config to bind on both IPv4 and IPv6 on all platforms (PR #2435) Thanks to @silkeh!
* No longer require a specific version of saml2 (PR #2695) Thanks to @okurz!

View File

@@ -48,6 +48,18 @@ returned by the Client-Server API:
# configured on port 443.
curl -kv https://<host.name>/_matrix/client/versions 2>&1 | grep "Server:"
Upgrading to $NEXT_VERSION
====================
This release expands the anonymous usage stats sent if the opt-in
``report_stats`` configuration is set to ``true``. We now capture RSS memory
and cpu use at a very coarse level. This requires administrators to install
the optional ``psutil`` python module.
We would appreciate it if you could assist by ensuring this module is available
and ``report_stats`` is enabled. This will let us see if performance changes to
synapse are having an impact to the general community.
Upgrading to v0.15.0
====================

View File

@@ -15,9 +15,10 @@
"""Contains exceptions and error codes."""
import json
import logging
import simplejson as json
logger = logging.getLogger(__name__)

View File

@@ -36,13 +36,13 @@ from synapse.util.logcontext import LoggingContext, preserve_fn
from synapse.util.manhole import manhole
from synapse.util.versionstring import get_version_string
from twisted.internet import reactor
from twisted.web.resource import Resource
from twisted.web.resource import NoResource
logger = logging.getLogger("synapse.app.appservice")
class AppserviceSlaveStore(
DirectoryStore, SlavedEventStore, SlavedApplicationServiceStore,
DirectoryStore, SlavedApplicationServiceStore, SlavedEventStore,
SlavedRegistrationStore,
):
pass
@@ -64,7 +64,7 @@ class AppserviceServer(HomeServer):
if name == "metrics":
resources[METRICS_PREFIX] = MetricsResource(self)
root_resource = create_resource_tree(resources, Resource())
root_resource = create_resource_tree(resources, NoResource())
_base.listen_tcp(
bind_addresses,

View File

@@ -44,17 +44,17 @@ from synapse.util.logcontext import LoggingContext
from synapse.util.manhole import manhole
from synapse.util.versionstring import get_version_string
from twisted.internet import reactor
from twisted.web.resource import Resource
from twisted.web.resource import NoResource
logger = logging.getLogger("synapse.app.client_reader")
class ClientReaderSlavedStore(
SlavedApplicationServiceStore,
SlavedEventStore,
SlavedKeyStore,
RoomStore,
DirectoryStore,
SlavedApplicationServiceStore,
SlavedRegistrationStore,
TransactionStore,
SlavedClientIpStore,
@@ -88,7 +88,7 @@ class ClientReaderServer(HomeServer):
"/_matrix/client/api/v1": resource,
})
root_resource = create_resource_tree(resources, Resource())
root_resource = create_resource_tree(resources, NoResource())
_base.listen_tcp(
bind_addresses,

View File

@@ -52,7 +52,7 @@ from synapse.util.logcontext import LoggingContext
from synapse.util.manhole import manhole
from synapse.util.versionstring import get_version_string
from twisted.internet import reactor
from twisted.web.resource import Resource
from twisted.web.resource import NoResource
logger = logging.getLogger("synapse.app.event_creator")
@@ -104,7 +104,7 @@ class EventCreatorServer(HomeServer):
"/_matrix/client/api/v1": resource,
})
root_resource = create_resource_tree(resources, Resource())
root_resource = create_resource_tree(resources, NoResource())
_base.listen_tcp(
bind_addresses,

View File

@@ -41,7 +41,7 @@ from synapse.util.logcontext import LoggingContext
from synapse.util.manhole import manhole
from synapse.util.versionstring import get_version_string
from twisted.internet import reactor
from twisted.web.resource import Resource
from twisted.web.resource import NoResource
logger = logging.getLogger("synapse.app.federation_reader")
@@ -77,7 +77,7 @@ class FederationReaderServer(HomeServer):
FEDERATION_PREFIX: TransportLayerServer(self),
})
root_resource = create_resource_tree(resources, Resource())
root_resource = create_resource_tree(resources, NoResource())
_base.listen_tcp(
bind_addresses,

View File

@@ -42,7 +42,7 @@ from synapse.util.logcontext import LoggingContext, preserve_fn
from synapse.util.manhole import manhole
from synapse.util.versionstring import get_version_string
from twisted.internet import defer, reactor
from twisted.web.resource import Resource
from twisted.web.resource import NoResource
logger = logging.getLogger("synapse.app.federation_sender")
@@ -91,7 +91,7 @@ class FederationSenderServer(HomeServer):
if name == "metrics":
resources[METRICS_PREFIX] = MetricsResource(self)
root_resource = create_resource_tree(resources, Resource())
root_resource = create_resource_tree(resources, NoResource())
_base.listen_tcp(
bind_addresses,

View File

@@ -44,7 +44,7 @@ from synapse.util.logcontext import LoggingContext
from synapse.util.manhole import manhole
from synapse.util.versionstring import get_version_string
from twisted.internet import defer, reactor
from twisted.web.resource import Resource
from twisted.web.resource import NoResource
logger = logging.getLogger("synapse.app.frontend_proxy")
@@ -142,7 +142,7 @@ class FrontendProxyServer(HomeServer):
"/_matrix/client/api/v1": resource,
})
root_resource = create_resource_tree(resources, Resource())
root_resource = create_resource_tree(resources, NoResource())
_base.listen_tcp(
bind_addresses,

View File

@@ -48,6 +48,7 @@ from synapse.server import HomeServer
from synapse.storage import are_all_users_on_domain
from synapse.storage.engines import IncorrectDatabaseSetup, create_engine
from synapse.storage.prepare_database import UpgradeDatabaseException, prepare_database
from synapse.util.caches import CACHE_SIZE_FACTOR
from synapse.util.httpresourcetree import create_resource_tree
from synapse.util.logcontext import LoggingContext
from synapse.util.manhole import manhole
@@ -56,7 +57,7 @@ from synapse.util.rlimit import change_resource_limit
from synapse.util.versionstring import get_version_string
from twisted.application import service
from twisted.internet import defer, reactor
from twisted.web.resource import EncodingResourceWrapper, Resource
from twisted.web.resource import EncodingResourceWrapper, NoResource
from twisted.web.server import GzipEncoderFactory
from twisted.web.static import File
@@ -126,7 +127,7 @@ class SynapseHomeServer(HomeServer):
if WEB_CLIENT_PREFIX in resources:
root_resource = RootRedirect(WEB_CLIENT_PREFIX)
else:
root_resource = Resource()
root_resource = NoResource()
root_resource = create_resource_tree(resources, root_resource)
@@ -402,6 +403,10 @@ def run(hs):
stats = {}
# Contains the list of processes we will be monitoring
# currently either 0 or 1
stats_process = []
@defer.inlineCallbacks
def phone_stats_home():
logger.info("Gathering stats for reporting")
@@ -427,6 +432,15 @@ def run(hs):
daily_sent_messages = yield hs.get_datastore().count_daily_sent_messages()
stats["daily_sent_messages"] = daily_sent_messages
stats["cache_factor"] = CACHE_SIZE_FACTOR
stats["event_cache_size"] = hs.config.event_cache_size
if len(stats_process) > 0:
stats["memory_rss"] = 0
stats["cpu_average"] = 0
for process in stats_process:
stats["memory_rss"] += process.memory_info().rss
stats["cpu_average"] += int(process.cpu_percent(interval=None))
logger.info("Reporting stats to matrix.org: %s" % (stats,))
try:
@@ -437,10 +451,32 @@ def run(hs):
except Exception as e:
logger.warn("Error reporting stats: %s", e)
def performance_stats_init():
try:
import psutil
process = psutil.Process()
# Ensure we can fetch both, and make the initial request for cpu_percent
# so the next request will use this as the initial point.
process.memory_info().rss
process.cpu_percent(interval=None)
logger.info("report_stats can use psutil")
stats_process.append(process)
except (ImportError, AttributeError):
logger.warn(
"report_stats enabled but psutil is not installed or incorrect version."
" Disabling reporting of memory/cpu stats."
" Ensuring psutil is available will help matrix.org track performance"
" changes across releases."
)
if hs.config.report_stats:
logger.info("Scheduling stats reporting for 3 hour intervals")
clock.looping_call(phone_stats_home, 3 * 60 * 60 * 1000)
# We need to defer this init for the cases that we daemonize
# otherwise the process ID we get is that of the non-daemon process
clock.call_later(0, performance_stats_init)
# We wait 5 minutes to send the first set of stats as the server can
# be quite busy the first few minutes
clock.call_later(5 * 60, phone_stats_home)

View File

@@ -43,7 +43,7 @@ from synapse.util.logcontext import LoggingContext
from synapse.util.manhole import manhole
from synapse.util.versionstring import get_version_string
from twisted.internet import reactor
from twisted.web.resource import Resource
from twisted.web.resource import NoResource
logger = logging.getLogger("synapse.app.media_repository")
@@ -84,7 +84,7 @@ class MediaRepositoryServer(HomeServer):
),
})
root_resource = create_resource_tree(resources, Resource())
root_resource = create_resource_tree(resources, NoResource())
_base.listen_tcp(
bind_addresses,

View File

@@ -37,7 +37,7 @@ from synapse.util.logcontext import LoggingContext, preserve_fn
from synapse.util.manhole import manhole
from synapse.util.versionstring import get_version_string
from twisted.internet import defer, reactor
from twisted.web.resource import Resource
from twisted.web.resource import NoResource
logger = logging.getLogger("synapse.app.pusher")
@@ -94,7 +94,7 @@ class PusherServer(HomeServer):
if name == "metrics":
resources[METRICS_PREFIX] = MetricsResource(self)
root_resource = create_resource_tree(resources, Resource())
root_resource = create_resource_tree(resources, NoResource())
_base.listen_tcp(
bind_addresses,

View File

@@ -56,7 +56,7 @@ from synapse.util.manhole import manhole
from synapse.util.stringutils import random_string
from synapse.util.versionstring import get_version_string
from twisted.internet import defer, reactor
from twisted.web.resource import Resource
from twisted.web.resource import NoResource
logger = logging.getLogger("synapse.app.synchrotron")
@@ -64,6 +64,7 @@ logger = logging.getLogger("synapse.app.synchrotron")
class SynchrotronSlavedStore(
SlavedReceiptsStore,
SlavedAccountDataStore,
SlavedPushRuleStore,
SlavedApplicationServiceStore,
SlavedRegistrationStore,
SlavedFilteringStore,
@@ -71,7 +72,6 @@ class SynchrotronSlavedStore(
SlavedGroupServerStore,
SlavedDeviceInboxStore,
SlavedDeviceStore,
SlavedPushRuleStore,
SlavedEventStore,
SlavedClientIpStore,
RoomStore,
@@ -269,7 +269,7 @@ class SynchrotronServer(HomeServer):
"/_matrix/client/api/v1": resource,
})
root_resource = create_resource_tree(resources, Resource())
root_resource = create_resource_tree(resources, NoResource())
_base.listen_tcp(
bind_addresses,

View File

@@ -38,7 +38,7 @@ def pid_running(pid):
try:
os.kill(pid, 0)
return True
except OSError, err:
except OSError as err:
if err.errno == errno.EPERM:
return True
return False
@@ -98,7 +98,7 @@ def stop(pidfile, app):
try:
os.kill(pid, signal.SIGTERM)
write("stopped %s" % (app,), colour=GREEN)
except OSError, err:
except OSError as err:
if err.errno == errno.ESRCH:
write("%s not running" % (app,), colour=YELLOW)
elif err.errno == errno.EPERM:

View File

@@ -43,14 +43,14 @@ from synapse.util.logcontext import LoggingContext, preserve_fn
from synapse.util.manhole import manhole
from synapse.util.versionstring import get_version_string
from twisted.internet import reactor
from twisted.web.resource import Resource
from twisted.web.resource import NoResource
logger = logging.getLogger("synapse.app.user_dir")
class UserDirectorySlaveStore(
SlavedEventStore,
SlavedApplicationServiceStore,
SlavedEventStore,
SlavedRegistrationStore,
SlavedClientIpStore,
UserDirectoryStore,
@@ -116,7 +116,7 @@ class UserDirectoryServer(HomeServer):
"/_matrix/client/api/v1": resource,
})
root_resource = create_resource_tree(resources, Resource())
root_resource = create_resource_tree(resources, NoResource())
_base.listen_tcp(
bind_addresses,

View File

@@ -13,7 +13,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from synapse.api.constants import EventTypes
from synapse.util.caches.descriptors import cachedInlineCallbacks
from synapse.types import GroupID, get_domain_from_id
from twisted.internet import defer
@@ -173,6 +172,7 @@ class ApplicationService(object):
if self.is_interested_in_user(event.sender):
defer.returnValue(True)
# also check m.room.member state key
if (event.type == EventTypes.Member and
self.is_interested_in_user(event.state_key)):
@@ -181,20 +181,18 @@ class ApplicationService(object):
if not store:
defer.returnValue(False)
does_match = yield self._matches_user_in_member_list(event.room_id, store)
does_match = yield self._matches_user_in_member_list(
event, store,
)
defer.returnValue(does_match)
@cachedInlineCallbacks(num_args=1, cache_context=True)
def _matches_user_in_member_list(self, room_id, store, cache_context):
member_list = yield store.get_users_in_room(
room_id, on_invalidate=cache_context.invalidate
@defer.inlineCallbacks
def _matches_user_in_member_list(self, event, store):
ases = yield store.get_appservices_with_user_in_room(
event,
)
# check joined member events
for user_id in member_list:
if self.is_interested_in_user(user_id):
defer.returnValue(True)
defer.returnValue(False)
defer.returnValue(self.id in ases)
def _matches_room_id(self, event):
if hasattr(event, "room_id"):

View File

@@ -155,7 +155,7 @@ class DeviceHandler(BaseHandler):
try:
yield self.store.delete_device(user_id, device_id)
except errors.StoreError, e:
except errors.StoreError as e:
if e.code == 404:
# no match
pass
@@ -204,7 +204,7 @@ class DeviceHandler(BaseHandler):
try:
yield self.store.delete_devices(user_id, device_ids)
except errors.StoreError, e:
except errors.StoreError as e:
if e.code == 404:
# no match
pass
@@ -243,7 +243,7 @@ class DeviceHandler(BaseHandler):
new_display_name=content.get("display_name")
)
yield self.notify_device_update(user_id, [device_id])
except errors.StoreError, e:
except errors.StoreError as e:
if e.code == 404:
raise errors.NotFoundError()
else:

View File

@@ -1,5 +1,6 @@
# -*- coding: utf-8 -*-
# Copyright 2016 OpenMarket Ltd
# Copyright 2018 New Vector Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -134,23 +135,8 @@ class E2eKeysHandler(object):
if user_id in destination_query:
results[user_id] = keys
except CodeMessageException as e:
failures[destination] = {
"status": e.code, "message": e.message
}
except NotRetryingDestination as e:
failures[destination] = {
"status": 503, "message": "Not ready for retry",
}
except FederationDeniedError as e:
failures[destination] = {
"status": 403, "message": "Federation Denied",
}
except Exception as e:
# include ConnectionRefused and other errors
failures[destination] = {
"status": 503, "message": e.message
}
failures[destination] = _exception_to_failure(e)
yield make_deferred_yieldable(defer.gatherResults([
preserve_fn(do_remote_query)(destination)
@@ -252,19 +238,8 @@ class E2eKeysHandler(object):
for user_id, keys in remote_result["one_time_keys"].items():
if user_id in device_keys:
json_result[user_id] = keys
except CodeMessageException as e:
failures[destination] = {
"status": e.code, "message": e.message
}
except NotRetryingDestination as e:
failures[destination] = {
"status": 503, "message": "Not ready for retry",
}
except Exception as e:
# include ConnectionRefused and other errors
failures[destination] = {
"status": 503, "message": e.message
}
failures[destination] = _exception_to_failure(e)
yield make_deferred_yieldable(defer.gatherResults([
preserve_fn(claim_client_keys)(destination)
@@ -362,6 +337,31 @@ class E2eKeysHandler(object):
)
def _exception_to_failure(e):
if isinstance(e, CodeMessageException):
return {
"status": e.code, "message": e.message,
}
if isinstance(e, NotRetryingDestination):
return {
"status": 503, "message": "Not ready for retry",
}
if isinstance(e, FederationDeniedError):
return {
"status": 403, "message": "Federation Denied",
}
# include ConnectionRefused and other errors
#
# Note that some Exceptions (notably twisted's ResponseFailed etc) don't
# give a string for e.message, which simplejson then fails to serialize.
return {
"status": 503, "message": str(e.message),
}
def _one_time_keys_match(old_key_json, new_key):
old_key = json.loads(old_key_json)

View File

@@ -15,6 +15,11 @@
# limitations under the License.
"""Utilities for interacting with Identity Servers"""
import logging
import simplejson as json
from twisted.internet import defer
from synapse.api.errors import (
@@ -24,9 +29,6 @@ from ._base import BaseHandler
from synapse.util.async import run_on_reactor
from synapse.api.errors import SynapseError, Codes
import json
import logging
logger = logging.getLogger(__name__)

View File

@@ -27,7 +27,7 @@ from synapse.types import (
from synapse.util.async import run_on_reactor, ReadWriteLock, Limiter
from synapse.util.logcontext import preserve_fn, run_in_background
from synapse.util.metrics import measure_func
from synapse.util.frozenutils import unfreeze
from synapse.util.frozenutils import frozendict_json_encoder
from synapse.util.stringutils import random_string
from synapse.visibility import filter_events_for_client
from synapse.replication.http.send_event import send_event_to_master
@@ -678,7 +678,7 @@ class EventCreationHandler(object):
# Ensure that we can round trip before trying to persist in db
try:
dump = simplejson.dumps(unfreeze(event.content))
dump = frozendict_json_encoder.encode(event.content)
simplejson.loads(dump)
except Exception:
logger.exception("Failed to encode content: %r", event.content)

View File

@@ -24,7 +24,7 @@ from synapse.api.errors import (
from synapse.http.client import CaptchaServerHttpClient
from synapse import types
from synapse.types import UserID
from synapse.util.async import run_on_reactor
from synapse.util.async import run_on_reactor, Linearizer
from synapse.util.threepids import check_3pid_allowed
from ._base import BaseHandler
@@ -46,6 +46,10 @@ class RegistrationHandler(BaseHandler):
self.macaroon_gen = hs.get_macaroon_generator()
self._generate_user_id_linearizer = Linearizer(
name="_generate_user_id_linearizer",
)
@defer.inlineCallbacks
def check_username(self, localpart, guest_access_token=None,
assigned_user_id=None):
@@ -345,9 +349,11 @@ class RegistrationHandler(BaseHandler):
@defer.inlineCallbacks
def _generate_user_id(self, reseed=False):
if reseed or self._next_generated_user_id is None:
self._next_generated_user_id = (
yield self.store.find_next_generated_user_id_localpart()
)
with (yield self._generate_user_id_linearizer.queue(())):
if reseed or self._next_generated_user_id is None:
self._next_generated_user_id = (
yield self.store.find_next_generated_user_id_localpart()
)
id = self._next_generated_user_id
self._next_generated_user_id += 1

View File

@@ -488,6 +488,7 @@ def respond_with_json_bytes(request, code, json_bytes, send_cors=False,
request.setHeader(b"Content-Type", b"application/json")
request.setHeader(b"Server", version_string)
request.setHeader(b"Content-Length", b"%d" % (len(json_bytes),))
request.setHeader(b"Cache-Control", b"no-cache, no-store, must-revalidate")
if send_cors:
set_cors_headers(request)

View File

@@ -34,7 +34,6 @@ REQUIREMENTS = {
"bcrypt": ["bcrypt>=3.1.0"],
"pillow": ["PIL"],
"pydenticon": ["pydenticon"],
"ujson": ["ujson"],
"blist": ["blist"],
"pysaml2>=3.0.0": ["saml2>=3.0.0"],
"pymacaroons-pynacl": ["pymacaroons"],

View File

@@ -17,8 +17,10 @@
from synapse.storage.appservice import (
ApplicationServiceWorkerStore, ApplicationServiceTransactionWorkerStore,
)
from synapse.replication.slave.storage.events import SlavedEventStore
class SlavedApplicationServiceStore(ApplicationServiceTransactionWorkerStore,
ApplicationServiceWorkerStore):
ApplicationServiceWorkerStore,
SlavedEventStore):
pass

View File

@@ -24,6 +24,8 @@ import simplejson
logger = logging.getLogger(__name__)
_json_encoder = simplejson.JSONEncoder(namedtuple_as_object=False)
class Command(object):
"""The base command class.
@@ -107,7 +109,7 @@ class RdataCommand(Command):
return " ".join((
self.stream_name,
str(self.token) if self.token is not None else "batch",
simplejson.dumps(self.row, namedtuple_as_object=False),
_json_encoder.encode(self.row),
))
@@ -302,7 +304,7 @@ class InvalidateCacheCommand(Command):
def to_line(self):
return " ".join((
self.cache_func, simplejson.dumps(self.keys, namedtuple_as_object=False)
self.cache_func, _json_encoder.encode(self.keys),
))
@@ -334,7 +336,7 @@ class UserIpCommand(Command):
)
def to_line(self):
return self.user_id + " " + simplejson.dumps((
return self.user_id + " " + _json_encoder.encode((
self.access_token, self.ip, self.user_agent, self.device_id,
self.last_seen,
))

View File

@@ -132,7 +132,7 @@ class StateHandler(object):
state_map = yield self.store.get_events(state.values(), get_prev_content=False)
state = {
key: state_map[e_id] for key, e_id in state.items() if e_id in state_map
key: state_map[e_id] for key, e_id in state.iteritems() if e_id in state_map
}
defer.returnValue(state)
@@ -378,7 +378,7 @@ class StateHandler(object):
new_state = resolve_events_with_state_map(state_set_ids, state_map)
new_state = {
key: state_map[ev_id] for key, ev_id in new_state.items()
key: state_map[ev_id] for key, ev_id in new_state.iteritems()
}
return new_state
@@ -458,15 +458,15 @@ class StateResolutionHandler(object):
# build a map from state key to the event_ids which set that state.
# dict[(str, str), set[str])
state = {}
for st in state_groups_ids.values():
for key, e_id in st.items():
for st in state_groups_ids.itervalues():
for key, e_id in st.iteritems():
state.setdefault(key, set()).add(e_id)
# build a map from state key to the event_ids which set that state,
# including only those where there are state keys in conflict.
conflicted_state = {
k: list(v)
for k, v in state.items()
for k, v in state.iteritems()
if len(v) > 1
}
@@ -480,36 +480,37 @@ class StateResolutionHandler(object):
)
else:
new_state = {
key: e_ids.pop() for key, e_ids in state.items()
key: e_ids.pop() for key, e_ids in state.iteritems()
}
# if the new state matches any of the input state groups, we can
# use that state group again. Otherwise we will generate a state_id
# which will be used as a cache key for future resolutions, but
# not get persisted.
state_group = None
new_state_event_ids = frozenset(new_state.values())
for sg, events in state_groups_ids.items():
if new_state_event_ids == frozenset(e_id for e_id in events):
state_group = sg
break
with Measure(self.clock, "state.create_group_ids"):
# if the new state matches any of the input state groups, we can
# use that state group again. Otherwise we will generate a state_id
# which will be used as a cache key for future resolutions, but
# not get persisted.
state_group = None
new_state_event_ids = frozenset(new_state.itervalues())
for sg, events in state_groups_ids.iteritems():
if new_state_event_ids == frozenset(e_id for e_id in events):
state_group = sg
break
# TODO: We want to create a state group for this set of events, to
# increase cache hits, but we need to make sure that it doesn't
# end up as a prev_group without being added to the database
# TODO: We want to create a state group for this set of events, to
# increase cache hits, but we need to make sure that it doesn't
# end up as a prev_group without being added to the database
prev_group = None
delta_ids = None
for old_group, old_ids in state_groups_ids.iteritems():
if not set(new_state) - set(old_ids):
n_delta_ids = {
k: v
for k, v in new_state.iteritems()
if old_ids.get(k) != v
}
if not delta_ids or len(n_delta_ids) < len(delta_ids):
prev_group = old_group
delta_ids = n_delta_ids
prev_group = None
delta_ids = None
for old_group, old_ids in state_groups_ids.iteritems():
if not set(new_state) - set(old_ids):
n_delta_ids = {
k: v
for k, v in new_state.iteritems()
if old_ids.get(k) != v
}
if not delta_ids or len(n_delta_ids) < len(delta_ids):
prev_group = old_group
delta_ids = n_delta_ids
cache = _StateCacheEntry(
state=new_state,
@@ -702,7 +703,7 @@ def _resolve_with_state(unconflicted_state_ids, conflicted_state_ds, auth_event_
auth_events = {
key: state_map[ev_id]
for key, ev_id in auth_event_ids.items()
for key, ev_id in auth_event_ids.iteritems()
if ev_id in state_map
}
@@ -740,7 +741,7 @@ def _resolve_state_events(conflicted_state, auth_events):
auth_events.update(resolved_state)
for key, events in conflicted_state.items():
for key, events in conflicted_state.iteritems():
if key[0] == EventTypes.JoinRules:
logger.debug("Resolving conflicted join rules %r", events)
resolved_state[key] = _resolve_auth_events(
@@ -750,7 +751,7 @@ def _resolve_state_events(conflicted_state, auth_events):
auth_events.update(resolved_state)
for key, events in conflicted_state.items():
for key, events in conflicted_state.iteritems():
if key[0] == EventTypes.Member:
logger.debug("Resolving conflicted member lists %r", events)
resolved_state[key] = _resolve_auth_events(
@@ -760,7 +761,7 @@ def _resolve_state_events(conflicted_state, auth_events):
auth_events.update(resolved_state)
for key, events in conflicted_state.items():
for key, events in conflicted_state.iteritems():
if key not in resolved_state:
logger.debug("Resolving conflicted state %r:%r", key, events)
resolved_state[key] = _resolve_normal_events(

View File

@@ -18,9 +18,14 @@ import re
import simplejson as json
from twisted.internet import defer
from synapse.api.constants import EventTypes, Membership
from synapse.appservice import AppServiceTransaction
from synapse.config.appservice import load_appservices
from synapse.storage.events import EventsWorkerStore
from synapse.storage.roommember import RoomMemberWorkerStore
from synapse.storage.state import StateGroupWorkerStore
from synapse.util.caches.descriptors import cached, cachedInlineCallbacks
from synapse.util.async import Linearizer
from ._base import SQLBaseStore
@@ -46,7 +51,8 @@ def _make_exclusive_regex(services_cache):
return exclusive_user_regex
class ApplicationServiceWorkerStore(SQLBaseStore):
class ApplicationServiceWorkerStore(RoomMemberWorkerStore, StateGroupWorkerStore,
SQLBaseStore):
def __init__(self, db_conn, hs):
self.services_cache = load_appservices(
hs.hostname,
@@ -111,6 +117,38 @@ class ApplicationServiceWorkerStore(SQLBaseStore):
return service
return None
@defer.inlineCallbacks
def get_appservices_with_user_in_room(self, event):
"""Get the list of appservices in the room at the given event
Args:
event (Event)
Returns:
Deferred[set(str)]: The IDs of all ASes in the room
"""
state_group = yield self._get_state_group_for_event(event.event_id)
if not state_group:
raise Exception("No state group for event %s", event.event_id)
ases_in_room = yield self._get_appservices_with_user_in_room(
event.room_id, state_group,
)
defer.returnValue(ases_in_room)
@cachedInlineCallbacks(num_args=2, max_entries=10000)
def _get_appservices_with_user_in_room(self, room_id, state_group):
cache = self._get_appservices_with_user_in_room_cache(room_id)
ases_in_room = yield cache.get_appservices_in_room_by_user(state_group)
defer.returnValue(ases_in_room)
@cached(max_entries=10000)
def _get_appservices_with_user_in_room_cache(self, room_id):
return _AppserviceUsersCache(self, room_id)
class ApplicationServiceStore(ApplicationServiceWorkerStore):
# This is currently empty due to there not being any AS storage functions
@@ -346,6 +384,7 @@ class ApplicationServiceTransactionWorkerStore(ApplicationServiceWorkerStore,
" (SELECT stream_ordering FROM appservice_stream_position)"
" < e.stream_ordering"
" AND e.stream_ordering <= ?"
" AND NOT e.outlier"
" ORDER BY e.stream_ordering ASC"
" LIMIT ?"
)
@@ -374,3 +413,119 @@ class ApplicationServiceTransactionStore(ApplicationServiceTransactionWorkerStor
# to keep consistency with the other stores, we keep this empty class for
# now.
pass
class _AppserviceUsersCache(object):
"""Attempts to calculate which appservices have users in a given room by
looking at state groups and their delta_ids
"""
def __init__(self, store, room_id):
self.store = store
self.room_id = room_id
self.linearizer = Linearizer("_AppserviceUsersCache")
# The last state group we calculated the ASes in the room for.
self.state_group = object()
# A dict of all appservices in the room at the above state group,
# along with a user_id of an AS user in the room.
# Dict of as_id -> user_id.
self.appservices_in_room = {}
@defer.inlineCallbacks
def get_appservices_in_room_by_user(self, state_group):
"""
Args:
state_group(str)
Returns:
Deferred[set(str)]: The IDs of all ASes in the room
"""
assert state_group is not None
if state_group == self.state_group:
defer.returnValue(frozenset(self.appservices_in_room))
with (yield self.linearizer.queue(())):
# Set of ASes that we need to recalculate their membership of
# the room
uhandled_ases = set()
# If the state groups match then there is nothing to do
if state_group == self.state_group:
defer.returnValue(frozenset(self.appservices_in_room))
prev_group, delta_ids = yield self.store.get_state_group_delta(state_group)
# If the prev_group matches the last state group we can calculate
# the new value by looking at the deltas
if prev_group and prev_group == self.state_group:
for (typ, state_key), event_id in delta_ids.iteritems():
if typ != EventTypes.Member:
continue
user_id = state_key
event = yield self.store.get_event(event_id)
is_join = event.membership == Membership.JOIN
for appservice in self.store.get_app_services():
as_id = appservice.id
# If this is a join and the appservice is already in
# the room then its a noop
if is_join:
if as_id in self.appservices_in_room:
continue
# If this is not a join, then we only need to recalculate
# if the AS is in the room and the cached joined AS user
# matches this event.
elif self.appservices_in_room.get(as_id, None) != user_id:
continue
# If the AS is not interested in the user then its a
# noop.
if not appservice.is_interested_in_user(user_id):
continue
if is_join:
# If an AS user is joining then the AS is now
# interested in the room
self.appservices_in_room[as_id] = user_id
else:
# If an AS user has left then we need to
# recalcualte if they're in the room.
uhandled_ases.add(appservice)
self.appservices_in_room.pop(as_id, None)
else:
uhandled_ases = set(self.store.get_app_services())
if uhandled_ases:
# We need to recalculate which ASes are in the room, so lets
# get the current state and try and find a join event
# that the AS is interested in.
current_state_ids = yield self.store.get_state_ids_for_group(state_group)
for appservice in uhandled_ases:
as_id = appservice.id
self.appservices_in_room.pop(as_id, None)
for (etype, state_key), event_id in current_state_ids.iteritems():
if etype != EventTypes.Member:
continue
if not appservice.is_interested_in_user(state_key):
continue
event = yield self.store.get_event(event_id)
if event.membership == Membership.JOIN:
self.appservices_in_room[as_id] = state_key
break
self.state_group = state_group
defer.returnValue(frozenset(self.appservices_in_room))

View File

@@ -14,15 +14,19 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from synapse.storage.events_worker import EventsWorkerStore
from collections import OrderedDict, deque, namedtuple
from functools import wraps
import logging
import simplejson as json
from twisted.internet import defer
from synapse.events import USE_FROZEN_DICTS
from synapse.storage.events_worker import EventsWorkerStore
from synapse.util.async import ObservableDeferred
from synapse.util.frozenutils import frozendict_json_encoder
from synapse.util.logcontext import (
PreserveLoggingContext, make_deferred_yieldable
PreserveLoggingContext, make_deferred_yieldable,
)
from synapse.util.logutils import log_function
from synapse.util.metrics import Measure
@@ -30,16 +34,8 @@ from synapse.api.constants import EventTypes
from synapse.api.errors import SynapseError
from synapse.util.caches.descriptors import cached, cachedInlineCallbacks
from synapse.types import get_domain_from_id
from canonicaljson import encode_canonical_json
from collections import deque, namedtuple, OrderedDict
from functools import wraps
import synapse.metrics
import logging
import simplejson as json
# these are only included to make the type annotations work
from synapse.events import EventBase # noqa: F401
from synapse.events.snapshot import EventContext # noqa: F401
@@ -53,12 +49,25 @@ event_counter = metrics.register_counter(
"persisted_events_sep", labels=["type", "origin_type", "origin_entity"]
)
# The number of times we are recalculating the current state
state_delta_counter = metrics.register_counter(
"state_delta",
)
# The number of times we are recalculating state when there is only a
# single forward extremity
state_delta_single_event_counter = metrics.register_counter(
"state_delta_single_event",
)
# The number of times we are reculating state when we could have resonably
# calculated the delta when we calculated the state for an event we were
# persisting.
state_delta_reuse_delta_counter = metrics.register_counter(
"state_delta_reuse_delta",
)
def encode_json(json_object):
if USE_FROZEN_DICTS:
return encode_canonical_json(json_object)
else:
return json.dumps(json_object, ensure_ascii=False)
return frozendict_json_encoder.encode(json_object)
class _EventPeristenceQueue(object):
@@ -368,7 +377,8 @@ class EventsStore(EventsWorkerStore):
room_id, ev_ctx_rm, latest_event_ids
)
if new_latest_event_ids == set(latest_event_ids):
latest_event_ids = set(latest_event_ids)
if new_latest_event_ids == latest_event_ids:
# No change in extremities, so no change in state
continue
@@ -389,6 +399,26 @@ class EventsStore(EventsWorkerStore):
if all_single_prev_not_state:
continue
state_delta_counter.inc()
if len(new_latest_event_ids) == 1:
state_delta_single_event_counter.inc()
# This is a fairly handwavey check to see if we could
# have guessed what the delta would have been when
# processing one of these events.
# What we're interested in is if the latest extremities
# were the same when we created the event as they are
# now. When this server creates a new event (as opposed
# to receiving it over federation) it will use the
# forward extremities as the prev_events, so we can
# guess this by looking at the prev_events and checking
# if they match the current forward extremities.
for ev, _ in ev_ctx_rm:
prev_event_ids = set(e for e, _ in ev.prev_events)
if latest_event_ids == prev_event_ids:
state_delta_reuse_delta_counter.inc()
break
logger.info(
"Calculating state delta for room %s", room_id,
)

View File

@@ -19,7 +19,7 @@ from synapse.api.errors import SynapseError
from ._base import SQLBaseStore
import ujson as json
import simplejson as json
# The category ID for the "default" category. We don't store as null in the

View File

@@ -460,14 +460,12 @@ class RegistrationStore(RegistrationWorkerStore,
"""
def _find_next_generated_user_id(txn):
txn.execute("SELECT name FROM users")
rows = self.cursor_to_dict(txn)
regex = re.compile("^@(\d+):")
found = set()
for r in rows:
user_id = r["name"]
for user_id, in txn:
match = regex.search(user_id)
if match:
found.add(int(match.group(1)))

View File

@@ -440,7 +440,6 @@ class RoomMemberWorkerStore(EventsWorkerStore):
)
@cachedInlineCallbacks(num_args=2, max_entries=10000, iterable=True)
# @defer.inlineCallbacks
def _get_joined_hosts(self, room_id, state_group, current_state_ids, state_entry):
# We don't use `state_group`, its there so that we can cache based
# on it. However, its important that its never None, since two current_state's

View File

@@ -12,9 +12,10 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import simplejson as json
logger = logging.getLogger(__name__)

View File

@@ -667,7 +667,7 @@ class UserDirectoryStore(SQLBaseStore):
# The array of numbers are the weights for the various part of the
# search: (domain, _, display name, localpart)
sql = """
SELECT d.user_id, display_name, avatar_url
SELECT d.user_id AS user_id, display_name, avatar_url
FROM user_directory_search
INNER JOIN user_directory AS d USING (user_id)
%s
@@ -702,7 +702,7 @@ class UserDirectoryStore(SQLBaseStore):
search_query = _parse_query_sqlite(search_term)
sql = """
SELECT d.user_id, display_name, avatar_url
SELECT d.user_id AS user_id, display_name, avatar_url
FROM user_directory_search
INNER JOIN user_directory AS d USING (user_id)
%s

View File

@@ -14,6 +14,7 @@
# limitations under the License.
from frozendict import frozendict
import simplejson as json
def freeze(o):
@@ -49,3 +50,21 @@ def unfreeze(o):
pass
return o
def _handle_frozendict(obj):
"""Helper for EventEncoder. Makes frozendicts serializable by returning
the underlying dict
"""
if type(obj) is frozendict:
# fishing the protected dict out of the object is a bit nasty,
# but we don't really want the overhead of copying the dict.
return obj._dict
raise TypeError('Object of type %s is not JSON serializable' %
obj.__class__.__name__)
# A JSONEncoder which is capable of encoding frozendics without barfing
frozendict_json_encoder = json.JSONEncoder(
default=_handle_frozendict,
)

View File

@@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from twisted.web.resource import Resource
from twisted.web.resource import NoResource
import logging
@@ -45,7 +45,7 @@ def create_resource_tree(desired_tree, root_resource):
for path_seg in full_path.split('/')[1:-1]:
if path_seg not in last_resource.listNames():
# resource doesn't exist, so make a "dummy resource"
child_resource = Resource()
child_resource = NoResource()
last_resource.putChild(path_seg, child_resource)
res_id = _resource_id(last_resource, path_seg)
resource_mappings[res_id] = child_resource

View File

@@ -55,7 +55,7 @@ class ApplicationServiceTestCase(unittest.TestCase):
_regex("@irc_.*")
)
self.event.sender = "@irc_foobar:matrix.org"
self.assertTrue((yield self.service.is_interested(self.event)))
self.assertTrue((yield self.service.is_interested(self.event, None)))
@defer.inlineCallbacks
def test_regex_user_id_prefix_no_match(self):
@@ -63,7 +63,7 @@ class ApplicationServiceTestCase(unittest.TestCase):
_regex("@irc_.*")
)
self.event.sender = "@someone_else:matrix.org"
self.assertFalse((yield self.service.is_interested(self.event)))
self.assertFalse((yield self.service.is_interested(self.event, None)))
@defer.inlineCallbacks
def test_regex_room_member_is_checked(self):
@@ -73,7 +73,7 @@ class ApplicationServiceTestCase(unittest.TestCase):
self.event.sender = "@someone_else:matrix.org"
self.event.type = "m.room.member"
self.event.state_key = "@irc_foobar:matrix.org"
self.assertTrue((yield self.service.is_interested(self.event)))
self.assertTrue((yield self.service.is_interested(self.event, None)))
@defer.inlineCallbacks
def test_regex_room_id_match(self):
@@ -81,7 +81,7 @@ class ApplicationServiceTestCase(unittest.TestCase):
_regex("!some_prefix.*some_suffix:matrix.org")
)
self.event.room_id = "!some_prefixs0m3th1nGsome_suffix:matrix.org"
self.assertTrue((yield self.service.is_interested(self.event)))
self.assertTrue((yield self.service.is_interested(self.event, None)))
@defer.inlineCallbacks
def test_regex_room_id_no_match(self):
@@ -89,7 +89,7 @@ class ApplicationServiceTestCase(unittest.TestCase):
_regex("!some_prefix.*some_suffix:matrix.org")
)
self.event.room_id = "!XqBunHwQIXUiqCaoxq:matrix.org"
self.assertFalse((yield self.service.is_interested(self.event)))
self.assertFalse((yield self.service.is_interested(self.event, None)))
@defer.inlineCallbacks
def test_regex_alias_match(self):
@@ -160,7 +160,7 @@ class ApplicationServiceTestCase(unittest.TestCase):
self.store.get_aliases_for_room.return_value = [
"#xmpp_foobar:matrix.org", "#athing:matrix.org"
]
self.store.get_users_in_room.return_value = []
self.store.get_appservices_with_user_in_room.return_value = []
self.assertFalse((yield self.service.is_interested(
self.event, self.store
)))
@@ -193,20 +193,3 @@ class ApplicationServiceTestCase(unittest.TestCase):
}
self.event.state_key = self.service.sender
self.assertTrue((yield self.service.is_interested(self.event)))
@defer.inlineCallbacks
def test_member_list_match(self):
self.service.namespaces[ApplicationService.NS_USERS].append(
_regex("@irc_.*")
)
self.store.get_users_in_room.return_value = [
"@alice:here",
"@irc_fo:here", # AS user
"@bob:here",
]
self.store.get_aliases_for_room.return_value = []
self.event.sender = "@xmpp_foobar:matrix.org"
self.assertTrue((yield self.service.is_interested(
event=self.event, store=self.store
)))