Compare commits
129 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
301cb60d0b | ||
|
|
0b01281e77 | ||
|
|
e8e540630e | ||
|
|
a796bdd35e | ||
|
|
09f3cf1a7e | ||
|
|
3d6aa06577 | ||
|
|
ea068d6f3c | ||
|
|
14e4d4f4bf | ||
|
|
475253a88e | ||
|
|
7f0399586d | ||
|
|
8c0c51ecb3 | ||
|
|
79a8a347a6 | ||
|
|
82276a18d1 | ||
|
|
b1580f50fe | ||
|
|
414fa36f3e | ||
|
|
32eb1dedd2 | ||
|
|
71990b3cae | ||
|
|
0b07f02e19 | ||
|
|
9fbaed325f | ||
|
|
9db2476991 | ||
|
|
69f7f1418f | ||
|
|
48b04c4a4c | ||
|
|
08abe8e13c | ||
|
|
05077e06fa | ||
|
|
01a5a8b9e3 | ||
|
|
897d976c1e | ||
|
|
45fc0ead10 | ||
|
|
cdd24449ee | ||
|
|
14d49c51db | ||
|
|
84b4e76fed | ||
|
|
c780d84d66 | ||
|
|
1d67b13674 | ||
|
|
92d50e3c2a | ||
|
|
e94cdbaecf | ||
|
|
9b1bc593c5 | ||
|
|
7f147d623b | ||
|
|
15e8dd2ccc | ||
|
|
05fe8a6c1c | ||
|
|
f584d6108f | ||
|
|
28d7b546cb | ||
|
|
f2cbbda956 | ||
|
|
cd77270a66 | ||
|
|
242a0483eb | ||
|
|
60cf1c6e16 | ||
|
|
7e6e588e60 | ||
|
|
1ca2744621 | ||
|
|
dddb5aa7bb | ||
|
|
4eb8408ed2 | ||
|
|
c5842dff1a | ||
|
|
7a0da69eee | ||
|
|
a5806aba27 | ||
|
|
fd2dbf1836 | ||
|
|
9643a6f7f2 | ||
|
|
c7181dcc6c | ||
|
|
74854a9719 | ||
|
|
48fec67536 | ||
|
|
db10f553ba | ||
|
|
764030cf63 | ||
|
|
8432e2ebd7 | ||
|
|
a81f140880 | ||
|
|
47b25ba5f3 | ||
|
|
3bf8bab8f9 | ||
|
|
a4cf660a32 | ||
|
|
0d568ff403 | ||
|
|
365f588d98 | ||
|
|
eb7be75a10 | ||
|
|
dd0ac1614c | ||
|
|
bb81e78ec6 | ||
|
|
a52f276990 | ||
|
|
46c832eaac | ||
|
|
2b1a4b2596 | ||
|
|
cd6937fb26 | ||
|
|
c2c153dd3b | ||
|
|
79d3b4689e | ||
|
|
808d8e06aa | ||
|
|
3f6762f0bb | ||
|
|
8bd585b09b | ||
|
|
f89f6b7c09 | ||
|
|
e2c0aa2c26 | ||
|
|
b01a755498 | ||
|
|
1058d14127 | ||
|
|
4d664278af | ||
|
|
8dee601054 | ||
|
|
e21c368b8b | ||
|
|
e07970165f | ||
|
|
c5171bf171 | ||
|
|
ba1fbf7d5b | ||
|
|
ab822a2d1f | ||
|
|
91cdb6de08 | ||
|
|
d49b77404b | ||
|
|
3ee57bdcbb | ||
|
|
782689bd40 | ||
|
|
ca87ad1def | ||
|
|
b5f638f1f4 | ||
|
|
38f708a2bb | ||
|
|
51b17ec566 | ||
|
|
3c1080b6e4 | ||
|
|
eff3ae3b9a | ||
|
|
bfb6c58624 | ||
|
|
a675f9c556 | ||
|
|
25d2b5d55f | ||
|
|
df1e4f259f | ||
|
|
c055c91655 | ||
|
|
8cfad2e686 | ||
|
|
fc5d937550 | ||
|
|
eabc5f8271 | ||
|
|
c24fc9797b | ||
|
|
e2c9fe0a6a | ||
|
|
9b75c78b4d | ||
|
|
63417c31e9 | ||
|
|
cd32c19a60 | ||
|
|
34f51babc5 | ||
|
|
8226e5597a | ||
|
|
c79c4c0a7d | ||
|
|
6c6aba76e1 | ||
|
|
01021c812f | ||
|
|
5075e444f4 | ||
|
|
3e19beb941 | ||
|
|
bb99b1f550 | ||
|
|
ce6db0e547 | ||
|
|
152c0aa58e | ||
|
|
119451dcd1 | ||
|
|
a6c813761a | ||
|
|
54a9bea88c | ||
|
|
484a0ebdfc | ||
|
|
f81f421086 | ||
|
|
cd9765805e | ||
|
|
495cb100d1 | ||
|
|
37be52ac34 |
@@ -59,9 +59,10 @@ To create a changelog entry, make a new file in the ``changelog.d``
|
||||
file named in the format of ``PRnumber.type``. The type can be
|
||||
one of ``feature``, ``bugfix``, ``removal`` (also used for
|
||||
deprecations), or ``misc`` (for internal-only changes). The content of
|
||||
the file is your changelog entry, which can contain RestructuredText
|
||||
formatting. A note of contributors is welcomed in changelogs for
|
||||
non-misc changes (the content of misc changes is not displayed).
|
||||
the file is your changelog entry, which can contain Markdown
|
||||
formatting. Adding credits to the changelog is encouraged, we value
|
||||
your contributions and would like to have you shouted out in the
|
||||
release notes!
|
||||
|
||||
For example, a fix in PR #1234 would have its changelog entry in
|
||||
``changelog.d/1234.bugfix``, and contain content like "The security levels of
|
||||
|
||||
1
changelog.d/3659.feature
Normal file
1
changelog.d/3659.feature
Normal file
@@ -0,0 +1 @@
|
||||
Support profile API endpoints on workers
|
||||
1
changelog.d/3673.misc
Normal file
1
changelog.d/3673.misc
Normal file
@@ -0,0 +1 @@
|
||||
Refactor state module to support multiple room versions
|
||||
1
changelog.d/3680.feature
Normal file
1
changelog.d/3680.feature
Normal file
@@ -0,0 +1 @@
|
||||
Server notices for resource limit blocking
|
||||
1
changelog.d/3722.bugfix
Normal file
1
changelog.d/3722.bugfix
Normal file
@@ -0,0 +1 @@
|
||||
Fix error collecting prometheus metrics when run on dedicated thread due to threading concurrency issues
|
||||
1
changelog.d/3724.feature
Normal file
1
changelog.d/3724.feature
Normal file
@@ -0,0 +1 @@
|
||||
Allow guests to use /rooms/:roomId/event/:eventId
|
||||
1
changelog.d/3725.misc
Normal file
1
changelog.d/3725.misc
Normal file
@@ -0,0 +1 @@
|
||||
The synapse.storage module has been ported to Python 3.
|
||||
1
changelog.d/3726.misc
Normal file
1
changelog.d/3726.misc
Normal file
@@ -0,0 +1 @@
|
||||
Split the state_group_cache into member and non-member state events (and so speed up LL /sync)
|
||||
1
changelog.d/3727.misc
Normal file
1
changelog.d/3727.misc
Normal file
@@ -0,0 +1 @@
|
||||
Log failure to authenticate remote servers as warnings (without stack traces)
|
||||
1
changelog.d/3730.misc
Normal file
1
changelog.d/3730.misc
Normal file
@@ -0,0 +1 @@
|
||||
The CONTRIBUTING guidelines have been updated to mention our use of Markdown and that .misc files have content.
|
||||
1
changelog.d/3734.misc
Normal file
1
changelog.d/3734.misc
Normal file
@@ -0,0 +1 @@
|
||||
Reference the need for an HTTP replication port when using the federation_reader worker
|
||||
1
changelog.d/3735.misc
Normal file
1
changelog.d/3735.misc
Normal file
@@ -0,0 +1 @@
|
||||
Fix minor spelling error in federation client documentation.
|
||||
1
changelog.d/3746.misc
Normal file
1
changelog.d/3746.misc
Normal file
@@ -0,0 +1 @@
|
||||
Fix MAU cache invalidation due to missing yield
|
||||
1
changelog.d/3747.bugfix
Normal file
1
changelog.d/3747.bugfix
Normal file
@@ -0,0 +1 @@
|
||||
Fix bug where we resent "limit exceeded" server notices repeatedly
|
||||
1
changelog.d/3749.feature
Normal file
1
changelog.d/3749.feature
Normal file
@@ -0,0 +1 @@
|
||||
Add mau_trial_days config param, so that users only get counted as MAU after N days.
|
||||
1
changelog.d/3751.feature
Normal file
1
changelog.d/3751.feature
Normal file
@@ -0,0 +1 @@
|
||||
Require twisted 17.1 or later (fixes [#3741](https://github.com/matrix-org/synapse/issues/3741)).
|
||||
1
changelog.d/3753.bugfix
Normal file
1
changelog.d/3753.bugfix
Normal file
@@ -0,0 +1 @@
|
||||
Fix bug where we broke sync when using limit_usage_by_mau but hadn't configured server notices
|
||||
1
changelog.d/3754.bugfix
Normal file
1
changelog.d/3754.bugfix
Normal file
@@ -0,0 +1 @@
|
||||
Fix 'federation_domain_whitelist' such that an empty list correctly blocks all outbound federation traffic
|
||||
1
changelog.d/3755.bugfix
Normal file
1
changelog.d/3755.bugfix
Normal file
@@ -0,0 +1 @@
|
||||
Fix tagging of server notice rooms
|
||||
1
changelog.d/3756.bugfix
Normal file
1
changelog.d/3756.bugfix
Normal file
@@ -0,0 +1 @@
|
||||
Fix tagging of server notice rooms
|
||||
1
changelog.d/3758.bugfix
Normal file
1
changelog.d/3758.bugfix
Normal file
@@ -0,0 +1 @@
|
||||
Fix 'admin_uri' config variable and error parameter to be 'admin_contact' to match the spec.
|
||||
1
changelog.d/3760.bugfix
Normal file
1
changelog.d/3760.bugfix
Normal file
@@ -0,0 +1 @@
|
||||
Don't return non-LL-member state in incremental sync state blocks
|
||||
1
changelog.d/3764.misc
Normal file
1
changelog.d/3764.misc
Normal file
@@ -0,0 +1 @@
|
||||
Make sure that we close db connections opened during init
|
||||
1
changelog.d/3768.bugfix
Normal file
1
changelog.d/3768.bugfix
Normal file
@@ -0,0 +1 @@
|
||||
Fix bug in sending presence over federation
|
||||
1
changelog.d/3777.bugfix
Normal file
1
changelog.d/3777.bugfix
Normal file
@@ -0,0 +1 @@
|
||||
Fix bug where preserved threepid user comes to sign up and server is mau blocked
|
||||
@@ -74,7 +74,7 @@ replication endpoints that it's talking to on the main synapse process.
|
||||
``worker_replication_port`` should point to the TCP replication listener port and
|
||||
``worker_replication_http_port`` should point to the HTTP replication port.
|
||||
|
||||
Currently, only the ``event_creator`` worker requires specifying
|
||||
Currently, the ``event_creator`` and ``federation_reader`` workers require specifying
|
||||
``worker_replication_http_port``.
|
||||
|
||||
For instance::
|
||||
@@ -265,6 +265,7 @@ Handles some event creation. It can handle REST endpoints matching::
|
||||
^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/send
|
||||
^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/(join|invite|leave|ban|unban|kick)$
|
||||
^/_matrix/client/(api/v1|r0|unstable)/join/
|
||||
^/_matrix/client/(api/v1|r0|unstable)/profile/
|
||||
|
||||
It will create events locally and then send them on to the main synapse
|
||||
instance to be persisted and handled.
|
||||
|
||||
@@ -31,5 +31,5 @@ $TOX_BIN/pip install 'setuptools>=18.5'
|
||||
$TOX_BIN/pip install 'pip>=10'
|
||||
|
||||
{ python synapse/python_dependencies.py
|
||||
echo lxml psycopg2
|
||||
echo lxml
|
||||
} | xargs $TOX_BIN/pip install
|
||||
|
||||
@@ -26,6 +26,7 @@ import synapse.types
|
||||
from synapse import event_auth
|
||||
from synapse.api.constants import EventTypes, JoinRules, Membership
|
||||
from synapse.api.errors import AuthError, Codes, ResourceLimitError
|
||||
from synapse.config.server import is_threepid_reserved
|
||||
from synapse.types import UserID
|
||||
from synapse.util.caches import CACHE_SIZE_FACTOR, register_cache
|
||||
from synapse.util.caches.lrucache import LruCache
|
||||
@@ -775,34 +776,56 @@ class Auth(object):
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def check_auth_blocking(self, user_id=None):
|
||||
def check_auth_blocking(self, user_id=None, threepid=None):
|
||||
"""Checks if the user should be rejected for some external reason,
|
||||
such as monthly active user limiting or global disable flag
|
||||
|
||||
Args:
|
||||
user_id(str|None): If present, checks for presence against existing
|
||||
MAU cohort
|
||||
|
||||
threepid(dict|None): If present, checks for presence against configured
|
||||
reserved threepid. Used in cases where the user is trying register
|
||||
with a MAU blocked server, normally they would be rejected but their
|
||||
threepid is on the reserved list. user_id and
|
||||
threepid should never be set at the same time.
|
||||
"""
|
||||
|
||||
# Never fail an auth check for the server notices users
|
||||
# This can be a problem where event creation is prohibited due to blocking
|
||||
if user_id == self.hs.config.server_notices_mxid:
|
||||
return
|
||||
|
||||
if self.hs.config.hs_disabled:
|
||||
raise ResourceLimitError(
|
||||
403, self.hs.config.hs_disabled_message,
|
||||
errcode=Codes.RESOURCE_LIMIT_EXCEED,
|
||||
admin_uri=self.hs.config.admin_uri,
|
||||
errcode=Codes.RESOURCE_LIMIT_EXCEEDED,
|
||||
admin_contact=self.hs.config.admin_contact,
|
||||
limit_type=self.hs.config.hs_disabled_limit_type
|
||||
)
|
||||
if self.hs.config.limit_usage_by_mau is True:
|
||||
# If the user is already part of the MAU cohort
|
||||
assert not (user_id and threepid)
|
||||
|
||||
# If the user is already part of the MAU cohort or a trial user
|
||||
if user_id:
|
||||
timestamp = yield self.store.user_last_seen_monthly_active(user_id)
|
||||
if timestamp:
|
||||
return
|
||||
|
||||
is_trial = yield self.store.is_trial_user(user_id)
|
||||
if is_trial:
|
||||
return
|
||||
elif threepid:
|
||||
# If the user does not exist yet, but is signing up with a
|
||||
# reserved threepid then pass auth check
|
||||
if is_threepid_reserved(self.hs.config, threepid):
|
||||
return
|
||||
# Else if there is no room in the MAU bucket, bail
|
||||
current_mau = yield self.store.get_monthly_active_count()
|
||||
if current_mau >= self.hs.config.max_mau_value:
|
||||
raise ResourceLimitError(
|
||||
403, "Monthly Active User Limit Exceeded",
|
||||
|
||||
admin_uri=self.hs.config.admin_uri,
|
||||
errcode=Codes.RESOURCE_LIMIT_EXCEED,
|
||||
admin_contact=self.hs.config.admin_contact,
|
||||
errcode=Codes.RESOURCE_LIMIT_EXCEEDED,
|
||||
limit_type="monthly_active_user"
|
||||
)
|
||||
|
||||
@@ -78,6 +78,7 @@ class EventTypes(object):
|
||||
Name = "m.room.name"
|
||||
|
||||
ServerACL = "m.room.server_acl"
|
||||
Pinned = "m.room.pinned_events"
|
||||
|
||||
|
||||
class RejectedReason(object):
|
||||
@@ -97,9 +98,17 @@ class ThirdPartyEntityKind(object):
|
||||
LOCATION = "location"
|
||||
|
||||
|
||||
class RoomVersions(object):
|
||||
V1 = "1"
|
||||
VDH_TEST = "vdh-test-version"
|
||||
|
||||
|
||||
# the version we will give rooms which are created on this server
|
||||
DEFAULT_ROOM_VERSION = "1"
|
||||
DEFAULT_ROOM_VERSION = RoomVersions.V1
|
||||
|
||||
# vdh-test-version is a placeholder to get room versioning support working and tested
|
||||
# until we have a working v2.
|
||||
KNOWN_ROOM_VERSIONS = {"1", "vdh-test-version"}
|
||||
KNOWN_ROOM_VERSIONS = {RoomVersions.V1, RoomVersions.VDH_TEST}
|
||||
|
||||
ServerNoticeMsgType = "m.server_notice"
|
||||
ServerNoticeLimitReached = "m.server_notice.usage_limit_reached"
|
||||
|
||||
@@ -56,7 +56,7 @@ class Codes(object):
|
||||
SERVER_NOT_TRUSTED = "M_SERVER_NOT_TRUSTED"
|
||||
CONSENT_NOT_GIVEN = "M_CONSENT_NOT_GIVEN"
|
||||
CANNOT_LEAVE_SERVER_NOTICE_ROOM = "M_CANNOT_LEAVE_SERVER_NOTICE_ROOM"
|
||||
RESOURCE_LIMIT_EXCEED = "M_RESOURCE_LIMIT_EXCEED"
|
||||
RESOURCE_LIMIT_EXCEEDED = "M_RESOURCE_LIMIT_EXCEEDED"
|
||||
UNSUPPORTED_ROOM_VERSION = "M_UNSUPPORTED_ROOM_VERSION"
|
||||
INCOMPATIBLE_ROOM_VERSION = "M_INCOMPATIBLE_ROOM_VERSION"
|
||||
|
||||
@@ -238,11 +238,11 @@ class ResourceLimitError(SynapseError):
|
||||
"""
|
||||
def __init__(
|
||||
self, code, msg,
|
||||
errcode=Codes.RESOURCE_LIMIT_EXCEED,
|
||||
admin_uri=None,
|
||||
errcode=Codes.RESOURCE_LIMIT_EXCEEDED,
|
||||
admin_contact=None,
|
||||
limit_type=None,
|
||||
):
|
||||
self.admin_uri = admin_uri
|
||||
self.admin_contact = admin_contact
|
||||
self.limit_type = limit_type
|
||||
super(ResourceLimitError, self).__init__(code, msg, errcode=errcode)
|
||||
|
||||
@@ -250,7 +250,7 @@ class ResourceLimitError(SynapseError):
|
||||
return cs_error(
|
||||
self.msg,
|
||||
self.errcode,
|
||||
admin_uri=self.admin_uri,
|
||||
admin_contact=self.admin_contact,
|
||||
limit_type=self.limit_type
|
||||
)
|
||||
|
||||
|
||||
@@ -51,10 +51,7 @@ class AppserviceSlaveStore(
|
||||
|
||||
|
||||
class AppserviceServer(HomeServer):
|
||||
def setup(self):
|
||||
logger.info("Setting up.")
|
||||
self.datastore = AppserviceSlaveStore(self.get_db_conn(), self)
|
||||
logger.info("Finished setting up.")
|
||||
DATASTORE_CLASS = AppserviceSlaveStore
|
||||
|
||||
def _listen_http(self, listener_config):
|
||||
port = listener_config["port"]
|
||||
|
||||
@@ -74,10 +74,7 @@ class ClientReaderSlavedStore(
|
||||
|
||||
|
||||
class ClientReaderServer(HomeServer):
|
||||
def setup(self):
|
||||
logger.info("Setting up.")
|
||||
self.datastore = ClientReaderSlavedStore(self.get_db_conn(), self)
|
||||
logger.info("Finished setting up.")
|
||||
DATASTORE_CLASS = ClientReaderSlavedStore
|
||||
|
||||
def _listen_http(self, listener_config):
|
||||
port = listener_config["port"]
|
||||
|
||||
@@ -45,6 +45,11 @@ from synapse.replication.slave.storage.registration import SlavedRegistrationSto
|
||||
from synapse.replication.slave.storage.room import RoomStore
|
||||
from synapse.replication.slave.storage.transactions import SlavedTransactionStore
|
||||
from synapse.replication.tcp.client import ReplicationClientHandler
|
||||
from synapse.rest.client.v1.profile import (
|
||||
ProfileAvatarURLRestServlet,
|
||||
ProfileDisplaynameRestServlet,
|
||||
ProfileRestServlet,
|
||||
)
|
||||
from synapse.rest.client.v1.room import (
|
||||
JoinRoomAliasServlet,
|
||||
RoomMembershipRestServlet,
|
||||
@@ -53,6 +58,7 @@ from synapse.rest.client.v1.room import (
|
||||
)
|
||||
from synapse.server import HomeServer
|
||||
from synapse.storage.engines import create_engine
|
||||
from synapse.storage.user_directory import UserDirectoryStore
|
||||
from synapse.util.httpresourcetree import create_resource_tree
|
||||
from synapse.util.logcontext import LoggingContext
|
||||
from synapse.util.manhole import manhole
|
||||
@@ -62,6 +68,9 @@ logger = logging.getLogger("synapse.app.event_creator")
|
||||
|
||||
|
||||
class EventCreatorSlavedStore(
|
||||
# FIXME(#3714): We need to add UserDirectoryStore as we write directly
|
||||
# rather than going via the correct worker.
|
||||
UserDirectoryStore,
|
||||
DirectoryStore,
|
||||
SlavedTransactionStore,
|
||||
SlavedProfileStore,
|
||||
@@ -81,10 +90,7 @@ class EventCreatorSlavedStore(
|
||||
|
||||
|
||||
class EventCreatorServer(HomeServer):
|
||||
def setup(self):
|
||||
logger.info("Setting up.")
|
||||
self.datastore = EventCreatorSlavedStore(self.get_db_conn(), self)
|
||||
logger.info("Finished setting up.")
|
||||
DATASTORE_CLASS = EventCreatorSlavedStore
|
||||
|
||||
def _listen_http(self, listener_config):
|
||||
port = listener_config["port"]
|
||||
@@ -101,6 +107,9 @@ class EventCreatorServer(HomeServer):
|
||||
RoomMembershipRestServlet(self).register(resource)
|
||||
RoomStateEventRestServlet(self).register(resource)
|
||||
JoinRoomAliasServlet(self).register(resource)
|
||||
ProfileAvatarURLRestServlet(self).register(resource)
|
||||
ProfileDisplaynameRestServlet(self).register(resource)
|
||||
ProfileRestServlet(self).register(resource)
|
||||
resources.update({
|
||||
"/_matrix/client/r0": resource,
|
||||
"/_matrix/client/unstable": resource,
|
||||
|
||||
@@ -72,10 +72,7 @@ class FederationReaderSlavedStore(
|
||||
|
||||
|
||||
class FederationReaderServer(HomeServer):
|
||||
def setup(self):
|
||||
logger.info("Setting up.")
|
||||
self.datastore = FederationReaderSlavedStore(self.get_db_conn(), self)
|
||||
logger.info("Finished setting up.")
|
||||
DATASTORE_CLASS = FederationReaderSlavedStore
|
||||
|
||||
def _listen_http(self, listener_config):
|
||||
port = listener_config["port"]
|
||||
|
||||
@@ -78,10 +78,7 @@ class FederationSenderSlaveStore(
|
||||
|
||||
|
||||
class FederationSenderServer(HomeServer):
|
||||
def setup(self):
|
||||
logger.info("Setting up.")
|
||||
self.datastore = FederationSenderSlaveStore(self.get_db_conn(), self)
|
||||
logger.info("Finished setting up.")
|
||||
DATASTORE_CLASS = FederationSenderSlaveStore
|
||||
|
||||
def _listen_http(self, listener_config):
|
||||
port = listener_config["port"]
|
||||
|
||||
@@ -148,10 +148,7 @@ class FrontendProxySlavedStore(
|
||||
|
||||
|
||||
class FrontendProxyServer(HomeServer):
|
||||
def setup(self):
|
||||
logger.info("Setting up.")
|
||||
self.datastore = FrontendProxySlavedStore(self.get_db_conn(), self)
|
||||
logger.info("Finished setting up.")
|
||||
DATASTORE_CLASS = FrontendProxySlavedStore
|
||||
|
||||
def _listen_http(self, listener_config):
|
||||
port = listener_config["port"]
|
||||
|
||||
@@ -62,7 +62,7 @@ from synapse.rest.key.v1.server_key_resource import LocalKey
|
||||
from synapse.rest.key.v2 import KeyApiV2Resource
|
||||
from synapse.rest.media.v0.content_repository import ContentRepoResource
|
||||
from synapse.server import HomeServer
|
||||
from synapse.storage import are_all_users_on_domain
|
||||
from synapse.storage import DataStore, are_all_users_on_domain
|
||||
from synapse.storage.engines import IncorrectDatabaseSetup, create_engine
|
||||
from synapse.storage.prepare_database import UpgradeDatabaseException, prepare_database
|
||||
from synapse.util.caches import CACHE_SIZE_FACTOR
|
||||
@@ -111,6 +111,8 @@ def build_resource_for_web_client(hs):
|
||||
|
||||
|
||||
class SynapseHomeServer(HomeServer):
|
||||
DATASTORE_CLASS = DataStore
|
||||
|
||||
def _listener_http(self, config, listener_config):
|
||||
port = listener_config["port"]
|
||||
bind_addresses = listener_config["bind_addresses"]
|
||||
@@ -356,13 +358,13 @@ def setup(config_options):
|
||||
logger.info("Preparing database: %s...", config.database_config['name'])
|
||||
|
||||
try:
|
||||
db_conn = hs.get_db_conn(run_new_connection=False)
|
||||
prepare_database(db_conn, database_engine, config=config)
|
||||
database_engine.on_new_connection(db_conn)
|
||||
with hs.get_db_conn(run_new_connection=False) as db_conn:
|
||||
prepare_database(db_conn, database_engine, config=config)
|
||||
database_engine.on_new_connection(db_conn)
|
||||
|
||||
hs.run_startup_checks(db_conn, database_engine)
|
||||
hs.run_startup_checks(db_conn, database_engine)
|
||||
|
||||
db_conn.commit()
|
||||
db_conn.commit()
|
||||
except UpgradeDatabaseException:
|
||||
sys.stderr.write(
|
||||
"\nFailed to upgrade database.\n"
|
||||
|
||||
@@ -60,10 +60,7 @@ class MediaRepositorySlavedStore(
|
||||
|
||||
|
||||
class MediaRepositoryServer(HomeServer):
|
||||
def setup(self):
|
||||
logger.info("Setting up.")
|
||||
self.datastore = MediaRepositorySlavedStore(self.get_db_conn(), self)
|
||||
logger.info("Finished setting up.")
|
||||
DATASTORE_CLASS = MediaRepositorySlavedStore
|
||||
|
||||
def _listen_http(self, listener_config):
|
||||
port = listener_config["port"]
|
||||
|
||||
@@ -78,10 +78,7 @@ class PusherSlaveStore(
|
||||
|
||||
|
||||
class PusherServer(HomeServer):
|
||||
def setup(self):
|
||||
logger.info("Setting up.")
|
||||
self.datastore = PusherSlaveStore(self.get_db_conn(), self)
|
||||
logger.info("Finished setting up.")
|
||||
DATASTORE_CLASS = PusherSlaveStore
|
||||
|
||||
def remove_pusher(self, app_id, push_key, user_id):
|
||||
self.get_tcp_replication().send_remove_pusher(app_id, push_key, user_id)
|
||||
|
||||
@@ -249,10 +249,7 @@ class SynchrotronApplicationService(object):
|
||||
|
||||
|
||||
class SynchrotronServer(HomeServer):
|
||||
def setup(self):
|
||||
logger.info("Setting up.")
|
||||
self.datastore = SynchrotronSlavedStore(self.get_db_conn(), self)
|
||||
logger.info("Finished setting up.")
|
||||
DATASTORE_CLASS = SynchrotronSlavedStore
|
||||
|
||||
def _listen_http(self, listener_config):
|
||||
port = listener_config["port"]
|
||||
|
||||
@@ -94,10 +94,7 @@ class UserDirectorySlaveStore(
|
||||
|
||||
|
||||
class UserDirectoryServer(HomeServer):
|
||||
def setup(self):
|
||||
logger.info("Setting up.")
|
||||
self.datastore = UserDirectorySlaveStore(self.get_db_conn(), self)
|
||||
logger.info("Finished setting up.")
|
||||
DATASTORE_CLASS = UserDirectorySlaveStore
|
||||
|
||||
def _listen_http(self, listener_config):
|
||||
port = listener_config["port"]
|
||||
|
||||
@@ -77,10 +77,15 @@ class ServerConfig(Config):
|
||||
self.max_mau_value = config.get(
|
||||
"max_mau_value", 0,
|
||||
)
|
||||
|
||||
self.mau_limits_reserved_threepids = config.get(
|
||||
"mau_limit_reserved_threepids", []
|
||||
)
|
||||
|
||||
self.mau_trial_days = config.get(
|
||||
"mau_trial_days", 0,
|
||||
)
|
||||
|
||||
# Options to disable HS
|
||||
self.hs_disabled = config.get("hs_disabled", False)
|
||||
self.hs_disabled_message = config.get("hs_disabled_message", "")
|
||||
@@ -88,7 +93,7 @@ class ServerConfig(Config):
|
||||
|
||||
# Admin uri to direct users at should their instance become blocked
|
||||
# due to resource constraints
|
||||
self.admin_uri = config.get("admin_uri", None)
|
||||
self.admin_contact = config.get("admin_contact", None)
|
||||
|
||||
# FIXME: federation_domain_whitelist needs sytests
|
||||
self.federation_domain_whitelist = None
|
||||
@@ -352,7 +357,7 @@ class ServerConfig(Config):
|
||||
# Homeserver blocking
|
||||
#
|
||||
# How to reach the server admin, used in ResourceLimitError
|
||||
# admin_uri: 'mailto:admin@server.com'
|
||||
# admin_contact: 'mailto:admin@server.com'
|
||||
#
|
||||
# Global block config
|
||||
#
|
||||
@@ -365,6 +370,7 @@ class ServerConfig(Config):
|
||||
# Enables monthly active user checking
|
||||
# limit_usage_by_mau: False
|
||||
# max_mau_value: 50
|
||||
# mau_trial_days: 2
|
||||
#
|
||||
# Sometimes the server admin will want to ensure certain accounts are
|
||||
# never blocked by mau checking. These accounts are specified here.
|
||||
@@ -398,6 +404,23 @@ class ServerConfig(Config):
|
||||
" service on the given port.")
|
||||
|
||||
|
||||
def is_threepid_reserved(config, threepid):
|
||||
"""Check the threepid against the reserved threepid config
|
||||
Args:
|
||||
config(ServerConfig) - to access server config attributes
|
||||
threepid(dict) - The threepid to test for
|
||||
|
||||
Returns:
|
||||
boolean Is the threepid undertest reserved_user
|
||||
"""
|
||||
|
||||
for tp in config.mau_limits_reserved_threepids:
|
||||
if (threepid['medium'] == tp['medium']
|
||||
and threepid['address'] == tp['address']):
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def read_gc_thresholds(thresholds):
|
||||
"""Reads the three integer thresholds for garbage collection. Ensures that
|
||||
the thresholds are integers if thresholds are supplied.
|
||||
|
||||
@@ -18,7 +18,9 @@ import logging
|
||||
from canonicaljson import json
|
||||
|
||||
from twisted.internet import defer, reactor
|
||||
from twisted.internet.error import ConnectError
|
||||
from twisted.internet.protocol import Factory
|
||||
from twisted.names.error import DomainError
|
||||
from twisted.web.http import HTTPClient
|
||||
|
||||
from synapse.http.endpoint import matrix_federation_endpoint
|
||||
@@ -47,12 +49,14 @@ def fetch_server_key(server_name, tls_client_options_factory, path=KEY_API_V1):
|
||||
server_response, server_certificate = yield protocol.remote_key
|
||||
defer.returnValue((server_response, server_certificate))
|
||||
except SynapseKeyClientError as e:
|
||||
logger.exception("Error getting key for %r" % (server_name,))
|
||||
logger.warn("Error getting key for %r: %s", server_name, e)
|
||||
if e.status.startswith("4"):
|
||||
# Don't retry for 4xx responses.
|
||||
raise IOError("Cannot get key for %r" % server_name)
|
||||
except (ConnectError, DomainError) as e:
|
||||
logger.warn("Error getting key for %r: %s", server_name, e)
|
||||
except Exception as e:
|
||||
logger.exception(e)
|
||||
logger.exception("Error getting key for %r", server_name)
|
||||
raise IOError("Cannot get key for %r" % server_name)
|
||||
|
||||
|
||||
|
||||
@@ -32,7 +32,7 @@ Events are replicated via a separate events stream.
|
||||
import logging
|
||||
from collections import namedtuple
|
||||
|
||||
from six import iteritems, itervalues
|
||||
from six import iteritems
|
||||
|
||||
from sortedcontainers import SortedDict
|
||||
|
||||
@@ -117,7 +117,7 @@ class FederationRemoteSendQueue(object):
|
||||
|
||||
user_ids = set(
|
||||
user_id
|
||||
for uids in itervalues(self.presence_changed)
|
||||
for uids in self.presence_changed.values()
|
||||
for user_id in uids
|
||||
)
|
||||
|
||||
|
||||
@@ -106,7 +106,7 @@ class TransportLayerClient(object):
|
||||
dest (str)
|
||||
room_id (str)
|
||||
event_tuples (list)
|
||||
limt (int)
|
||||
limit (int)
|
||||
|
||||
Returns:
|
||||
Deferred: Results in a dict received from the remote homeserver.
|
||||
|
||||
@@ -261,10 +261,10 @@ class BaseFederationServlet(object):
|
||||
except NoAuthenticationError:
|
||||
origin = None
|
||||
if self.REQUIRE_AUTH:
|
||||
logger.exception("authenticate_request failed")
|
||||
logger.warn("authenticate_request failed: missing authentication")
|
||||
raise
|
||||
except Exception:
|
||||
logger.exception("authenticate_request failed")
|
||||
except Exception as e:
|
||||
logger.warn("authenticate_request failed: %s", e)
|
||||
raise
|
||||
|
||||
if origin:
|
||||
|
||||
@@ -291,8 +291,9 @@ class FederationHandler(BaseHandler):
|
||||
ev_ids, get_prev_content=False, check_redacted=False
|
||||
)
|
||||
|
||||
room_version = yield self.store.get_room_version(pdu.room_id)
|
||||
state_map = yield resolve_events_with_factory(
|
||||
state_groups, {pdu.event_id: pdu}, fetch
|
||||
room_version, state_groups, {pdu.event_id: pdu}, fetch
|
||||
)
|
||||
|
||||
state = (yield self.store.get_events(state_map.values())).values()
|
||||
@@ -1828,7 +1829,10 @@ class FederationHandler(BaseHandler):
|
||||
(d.type, d.state_key): d for d in different_events if d
|
||||
})
|
||||
|
||||
room_version = yield self.store.get_room_version(event.room_id)
|
||||
|
||||
new_state = self.state_handler.resolve_events(
|
||||
room_version,
|
||||
[list(local_view.values()), list(remote_view.values())],
|
||||
event
|
||||
)
|
||||
|
||||
@@ -32,12 +32,16 @@ from ._base import BaseHandler
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ProfileHandler(BaseHandler):
|
||||
PROFILE_UPDATE_MS = 60 * 1000
|
||||
PROFILE_UPDATE_EVERY_MS = 24 * 60 * 60 * 1000
|
||||
class BaseProfileHandler(BaseHandler):
|
||||
"""Handles fetching and updating user profile information.
|
||||
|
||||
BaseProfileHandler can be instantiated directly on workers and will
|
||||
delegate to master when necessary. The master process should use the
|
||||
subclass MasterProfileHandler
|
||||
"""
|
||||
|
||||
def __init__(self, hs):
|
||||
super(ProfileHandler, self).__init__(hs)
|
||||
super(BaseProfileHandler, self).__init__(hs)
|
||||
|
||||
self.federation = hs.get_federation_client()
|
||||
hs.get_federation_registry().register_query_handler(
|
||||
@@ -46,11 +50,6 @@ class ProfileHandler(BaseHandler):
|
||||
|
||||
self.user_directory_handler = hs.get_user_directory_handler()
|
||||
|
||||
if hs.config.worker_app is None:
|
||||
self.clock.looping_call(
|
||||
self._start_update_remote_profile_cache, self.PROFILE_UPDATE_MS,
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_profile(self, user_id):
|
||||
target_user = UserID.from_string(user_id)
|
||||
@@ -282,6 +281,20 @@ class ProfileHandler(BaseHandler):
|
||||
room_id, str(e.message)
|
||||
)
|
||||
|
||||
|
||||
class MasterProfileHandler(BaseProfileHandler):
|
||||
PROFILE_UPDATE_MS = 60 * 1000
|
||||
PROFILE_UPDATE_EVERY_MS = 24 * 60 * 60 * 1000
|
||||
|
||||
def __init__(self, hs):
|
||||
super(MasterProfileHandler, self).__init__(hs)
|
||||
|
||||
assert hs.config.worker_app is None
|
||||
|
||||
self.clock.looping_call(
|
||||
self._start_update_remote_profile_cache, self.PROFILE_UPDATE_MS,
|
||||
)
|
||||
|
||||
def _start_update_remote_profile_cache(self):
|
||||
return run_as_background_process(
|
||||
"Update remote profile", self._update_remote_profile_cache,
|
||||
|
||||
@@ -125,6 +125,7 @@ class RegistrationHandler(BaseHandler):
|
||||
guest_access_token=None,
|
||||
make_guest=False,
|
||||
admin=False,
|
||||
threepid=None,
|
||||
):
|
||||
"""Registers a new client on the server.
|
||||
|
||||
@@ -145,7 +146,7 @@ class RegistrationHandler(BaseHandler):
|
||||
RegistrationError if there was a problem registering.
|
||||
"""
|
||||
|
||||
yield self.auth.check_auth_blocking()
|
||||
yield self.auth.check_auth_blocking(threepid=threepid)
|
||||
password_hash = None
|
||||
if password:
|
||||
password_hash = yield self.auth_handler().hash(password)
|
||||
|
||||
@@ -344,6 +344,7 @@ class RoomMemberHandler(object):
|
||||
latest_event_ids = (
|
||||
event_id for (event_id, _, _) in prev_events_and_hashes
|
||||
)
|
||||
|
||||
current_state_ids = yield self.state_handler.get_current_state_ids(
|
||||
room_id, latest_event_ids=latest_event_ids,
|
||||
)
|
||||
|
||||
@@ -745,9 +745,16 @@ class SyncHandler(object):
|
||||
state_ids = {}
|
||||
if lazy_load_members:
|
||||
if types:
|
||||
# We're returning an incremental sync, with no "gap" since
|
||||
# the previous sync, so normally there would be no state to return
|
||||
# But we're lazy-loading, so the client might need some more
|
||||
# member events to understand the events in this timeline.
|
||||
# So we fish out all the member events corresponding to the
|
||||
# timeline here, and then dedupe any redundant ones below.
|
||||
|
||||
state_ids = yield self.store.get_state_ids_for_event(
|
||||
batch.events[0].event_id, types=types,
|
||||
filtered_types=filtered_types,
|
||||
filtered_types=None, # we only want members!
|
||||
)
|
||||
|
||||
if lazy_load_members and not include_redundant_members:
|
||||
|
||||
@@ -119,6 +119,8 @@ class UserDirectoryHandler(object):
|
||||
"""Called to update index of our local user profiles when they change
|
||||
irrespective of any rooms the user may be in.
|
||||
"""
|
||||
# FIXME(#3714): We should probably do this in the same worker as all
|
||||
# the other changes.
|
||||
yield self.store.update_profile_in_user_dir(
|
||||
user_id, profile.display_name, profile.avatar_url, None,
|
||||
)
|
||||
@@ -127,6 +129,8 @@ class UserDirectoryHandler(object):
|
||||
def handle_user_deactivated(self, user_id):
|
||||
"""Called when a user ID is deactivated
|
||||
"""
|
||||
# FIXME(#3714): We should probably do this in the same worker as all
|
||||
# the other changes.
|
||||
yield self.store.remove_from_user_dir(user_id)
|
||||
yield self.store.remove_from_user_in_public_room(user_id)
|
||||
|
||||
|
||||
@@ -133,7 +133,7 @@ class MatrixFederationHttpClient(object):
|
||||
failures, connection failures, SSL failures.)
|
||||
"""
|
||||
if (
|
||||
self.hs.config.federation_domain_whitelist and
|
||||
self.hs.config.federation_domain_whitelist is not None and
|
||||
destination not in self.hs.config.federation_domain_whitelist
|
||||
):
|
||||
raise FederationDeniedError(destination)
|
||||
|
||||
@@ -15,6 +15,7 @@
|
||||
# limitations under the License.
|
||||
|
||||
import logging
|
||||
import threading
|
||||
|
||||
from prometheus_client.core import Counter, Histogram
|
||||
|
||||
@@ -111,6 +112,9 @@ in_flight_requests_db_sched_duration = Counter(
|
||||
# The set of all in flight requests, set[RequestMetrics]
|
||||
_in_flight_requests = set()
|
||||
|
||||
# Protects the _in_flight_requests set from concurrent accesss
|
||||
_in_flight_requests_lock = threading.Lock()
|
||||
|
||||
|
||||
def _get_in_flight_counts():
|
||||
"""Returns a count of all in flight requests by (method, server_name)
|
||||
@@ -120,7 +124,8 @@ def _get_in_flight_counts():
|
||||
"""
|
||||
# Cast to a list to prevent it changing while the Prometheus
|
||||
# thread is collecting metrics
|
||||
reqs = list(_in_flight_requests)
|
||||
with _in_flight_requests_lock:
|
||||
reqs = list(_in_flight_requests)
|
||||
|
||||
for rm in reqs:
|
||||
rm.update_metrics()
|
||||
@@ -154,10 +159,12 @@ class RequestMetrics(object):
|
||||
# to the "in flight" metrics.
|
||||
self._request_stats = self.start_context.get_resource_usage()
|
||||
|
||||
_in_flight_requests.add(self)
|
||||
with _in_flight_requests_lock:
|
||||
_in_flight_requests.add(self)
|
||||
|
||||
def stop(self, time_sec, request):
|
||||
_in_flight_requests.discard(self)
|
||||
with _in_flight_requests_lock:
|
||||
_in_flight_requests.discard(self)
|
||||
|
||||
context = LoggingContext.current_context()
|
||||
|
||||
|
||||
@@ -13,6 +13,8 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import threading
|
||||
|
||||
import six
|
||||
|
||||
from prometheus_client.core import REGISTRY, Counter, GaugeMetricFamily
|
||||
@@ -78,6 +80,9 @@ _background_process_counts = dict() # type: dict[str, int]
|
||||
# of process descriptions that no longer have any active processes.
|
||||
_background_processes = dict() # type: dict[str, set[_BackgroundProcess]]
|
||||
|
||||
# A lock that covers the above dicts
|
||||
_bg_metrics_lock = threading.Lock()
|
||||
|
||||
|
||||
class _Collector(object):
|
||||
"""A custom metrics collector for the background process metrics.
|
||||
@@ -92,7 +97,11 @@ class _Collector(object):
|
||||
labels=["name"],
|
||||
)
|
||||
|
||||
for desc, processes in six.iteritems(_background_processes):
|
||||
# We copy the dict so that it doesn't change from underneath us
|
||||
with _bg_metrics_lock:
|
||||
_background_processes_copy = dict(_background_processes)
|
||||
|
||||
for desc, processes in six.iteritems(_background_processes_copy):
|
||||
background_process_in_flight_count.add_metric(
|
||||
(desc,), len(processes),
|
||||
)
|
||||
@@ -167,19 +176,26 @@ def run_as_background_process(desc, func, *args, **kwargs):
|
||||
"""
|
||||
@defer.inlineCallbacks
|
||||
def run():
|
||||
count = _background_process_counts.get(desc, 0)
|
||||
_background_process_counts[desc] = count + 1
|
||||
with _bg_metrics_lock:
|
||||
count = _background_process_counts.get(desc, 0)
|
||||
_background_process_counts[desc] = count + 1
|
||||
|
||||
_background_process_start_count.labels(desc).inc()
|
||||
|
||||
with LoggingContext(desc) as context:
|
||||
context.request = "%s-%i" % (desc, count)
|
||||
proc = _BackgroundProcess(desc, context)
|
||||
_background_processes.setdefault(desc, set()).add(proc)
|
||||
|
||||
with _bg_metrics_lock:
|
||||
_background_processes.setdefault(desc, set()).add(proc)
|
||||
|
||||
try:
|
||||
yield func(*args, **kwargs)
|
||||
finally:
|
||||
proc.update_metrics()
|
||||
_background_processes[desc].remove(proc)
|
||||
|
||||
with _bg_metrics_lock:
|
||||
_background_processes[desc].remove(proc)
|
||||
|
||||
with PreserveLoggingContext():
|
||||
return run()
|
||||
|
||||
@@ -39,7 +39,7 @@ REQUIREMENTS = {
|
||||
"signedjson>=1.0.0": ["signedjson>=1.0.0"],
|
||||
"pynacl>=1.2.1": ["nacl>=1.2.1", "nacl.bindings"],
|
||||
"service_identity>=1.0.0": ["service_identity>=1.0.0"],
|
||||
"Twisted>=16.0.0": ["twisted>=16.0.0"],
|
||||
"Twisted>=17.1.0": ["twisted>=17.1.0"],
|
||||
|
||||
# We use crypto.get_elliptic_curve which is only supported in >=0.15
|
||||
"pyopenssl>=0.15": ["OpenSSL>=0.15"],
|
||||
@@ -78,6 +78,9 @@ CONDITIONAL_REQUIREMENTS = {
|
||||
"affinity": {
|
||||
"affinity": ["affinity"],
|
||||
},
|
||||
"postgres": {
|
||||
"psycopg2>=2.6": ["psycopg2"]
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
@@ -531,7 +531,7 @@ class RoomEventServlet(ClientV1RestServlet):
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_GET(self, request, room_id, event_id):
|
||||
requester = yield self.auth.get_user_by_req(request)
|
||||
requester = yield self.auth.get_user_by_req(request, allow_guest=True)
|
||||
event = yield self.event_handler.get_event(requester.user, room_id, event_id)
|
||||
|
||||
time_now = self.clock.time_msec()
|
||||
|
||||
@@ -23,6 +23,7 @@ from twisted.internet import defer
|
||||
import synapse.util.stringutils as stringutils
|
||||
from synapse.api.constants import LoginType
|
||||
from synapse.api.errors import Codes, SynapseError
|
||||
from synapse.config.server import is_threepid_reserved
|
||||
from synapse.http.servlet import assert_params_in_dict, parse_json_object_from_request
|
||||
from synapse.rest.client.v1.base import ClientV1RestServlet
|
||||
from synapse.types import create_requester
|
||||
@@ -281,12 +282,20 @@ class RegisterRestServlet(ClientV1RestServlet):
|
||||
register_json["user"].encode("utf-8")
|
||||
if "user" in register_json else None
|
||||
)
|
||||
threepid = None
|
||||
if session.get(LoginType.EMAIL_IDENTITY):
|
||||
threepid = session["threepidCreds"]
|
||||
|
||||
handler = self.handlers.registration_handler
|
||||
(user_id, token) = yield handler.register(
|
||||
localpart=desired_user_id,
|
||||
password=password
|
||||
password=password,
|
||||
threepid=threepid,
|
||||
)
|
||||
# Necessary due to auth checks prior to the threepid being
|
||||
# written to the db
|
||||
if is_threepid_reserved(self.hs.config, threepid):
|
||||
yield self.store.upsert_monthly_active_user(user_id)
|
||||
|
||||
if session[LoginType.EMAIL_IDENTITY]:
|
||||
logger.debug("Binding emails %s to %s" % (
|
||||
|
||||
@@ -26,6 +26,7 @@ import synapse
|
||||
import synapse.types
|
||||
from synapse.api.constants import LoginType
|
||||
from synapse.api.errors import Codes, SynapseError, UnrecognizedRequestError
|
||||
from synapse.config.server import is_threepid_reserved
|
||||
from synapse.http.servlet import (
|
||||
RestServlet,
|
||||
assert_params_in_dict,
|
||||
@@ -395,12 +396,21 @@ class RegisterRestServlet(RestServlet):
|
||||
if desired_username is not None:
|
||||
desired_username = desired_username.lower()
|
||||
|
||||
threepid = None
|
||||
if auth_result:
|
||||
threepid = auth_result.get(LoginType.EMAIL_IDENTITY)
|
||||
|
||||
(registered_user_id, _) = yield self.registration_handler.register(
|
||||
localpart=desired_username,
|
||||
password=new_password,
|
||||
guest_access_token=guest_access_token,
|
||||
generate_token=False,
|
||||
threepid=threepid,
|
||||
)
|
||||
# Necessary due to auth checks prior to the threepid being
|
||||
# written to the db
|
||||
if is_threepid_reserved(self.hs.config, threepid):
|
||||
yield self.store.upsert_monthly_active_user(registered_user_id)
|
||||
|
||||
# remember that we've now registered that user account, and with
|
||||
# what user ID (since the user may not have specified)
|
||||
|
||||
@@ -19,6 +19,7 @@
|
||||
# partial one for unit test mocking.
|
||||
|
||||
# Imports required for the default HomeServer() implementation
|
||||
import abc
|
||||
import logging
|
||||
|
||||
from twisted.enterprise import adbapi
|
||||
@@ -56,7 +57,7 @@ from synapse.handlers.initial_sync import InitialSyncHandler
|
||||
from synapse.handlers.message import EventCreationHandler, MessageHandler
|
||||
from synapse.handlers.pagination import PaginationHandler
|
||||
from synapse.handlers.presence import PresenceHandler
|
||||
from synapse.handlers.profile import ProfileHandler
|
||||
from synapse.handlers.profile import BaseProfileHandler, MasterProfileHandler
|
||||
from synapse.handlers.read_marker import ReadMarkerHandler
|
||||
from synapse.handlers.receipts import ReceiptsHandler
|
||||
from synapse.handlers.room import RoomContextHandler, RoomCreationHandler
|
||||
@@ -81,7 +82,6 @@ from synapse.server_notices.server_notices_manager import ServerNoticesManager
|
||||
from synapse.server_notices.server_notices_sender import ServerNoticesSender
|
||||
from synapse.server_notices.worker_server_notices_sender import WorkerServerNoticesSender
|
||||
from synapse.state import StateHandler, StateResolutionHandler
|
||||
from synapse.storage import DataStore
|
||||
from synapse.streams.events import EventSources
|
||||
from synapse.util import Clock
|
||||
from synapse.util.distributor import Distributor
|
||||
@@ -111,6 +111,8 @@ class HomeServer(object):
|
||||
config (synapse.config.homeserver.HomeserverConfig):
|
||||
"""
|
||||
|
||||
__metaclass__ = abc.ABCMeta
|
||||
|
||||
DEPENDENCIES = [
|
||||
'http_client',
|
||||
'db_pool',
|
||||
@@ -172,6 +174,11 @@ class HomeServer(object):
|
||||
'room_context_handler',
|
||||
]
|
||||
|
||||
# This is overridden in derived application classes
|
||||
# (such as synapse.app.homeserver.SynapseHomeServer) and gives the class to be
|
||||
# instantiated during setup() for future return by get_datastore()
|
||||
DATASTORE_CLASS = abc.abstractproperty()
|
||||
|
||||
def __init__(self, hostname, reactor=None, **kwargs):
|
||||
"""
|
||||
Args:
|
||||
@@ -188,13 +195,16 @@ class HomeServer(object):
|
||||
self.distributor = Distributor()
|
||||
self.ratelimiter = Ratelimiter()
|
||||
|
||||
self.datastore = None
|
||||
|
||||
# Other kwargs are explicit dependencies
|
||||
for depname in kwargs:
|
||||
setattr(self, depname, kwargs[depname])
|
||||
|
||||
def setup(self):
|
||||
logger.info("Setting up.")
|
||||
self.datastore = DataStore(self.get_db_conn(), self)
|
||||
with self.get_db_conn() as conn:
|
||||
self.datastore = self.DATASTORE_CLASS(conn, self)
|
||||
logger.info("Finished setting up.")
|
||||
|
||||
def get_reactor(self):
|
||||
@@ -308,7 +318,10 @@ class HomeServer(object):
|
||||
return InitialSyncHandler(self)
|
||||
|
||||
def build_profile_handler(self):
|
||||
return ProfileHandler(self)
|
||||
if self.config.worker_app:
|
||||
return BaseProfileHandler(self)
|
||||
else:
|
||||
return MasterProfileHandler(self)
|
||||
|
||||
def build_event_creation_handler(self):
|
||||
return EventCreationHandler(self)
|
||||
|
||||
203
synapse/server_notices/resource_limits_server_notices.py
Normal file
203
synapse/server_notices/resource_limits_server_notices.py
Normal file
@@ -0,0 +1,203 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2018 New Vector Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import logging
|
||||
|
||||
from six import iteritems
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
from synapse.api.constants import (
|
||||
EventTypes,
|
||||
ServerNoticeLimitReached,
|
||||
ServerNoticeMsgType,
|
||||
)
|
||||
from synapse.api.errors import AuthError, ResourceLimitError, SynapseError
|
||||
from synapse.server_notices.server_notices_manager import SERVER_NOTICE_ROOM_TAG
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ResourceLimitsServerNotices(object):
|
||||
""" Keeps track of whether the server has reached it's resource limit and
|
||||
ensures that the client is kept up to date.
|
||||
"""
|
||||
def __init__(self, hs):
|
||||
"""
|
||||
Args:
|
||||
hs (synapse.server.HomeServer):
|
||||
"""
|
||||
self._server_notices_manager = hs.get_server_notices_manager()
|
||||
self._store = hs.get_datastore()
|
||||
self._auth = hs.get_auth()
|
||||
self._config = hs.config
|
||||
self._resouce_limited = False
|
||||
self._message_handler = hs.get_message_handler()
|
||||
self._state = hs.get_state_handler()
|
||||
|
||||
self._notifier = hs.get_notifier()
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def maybe_send_server_notice_to_user(self, user_id):
|
||||
"""Check if we need to send a notice to this user, this will be true in
|
||||
two cases.
|
||||
1. The server has reached its limit does not reflect this
|
||||
2. The room state indicates that the server has reached its limit when
|
||||
actually the server is fine
|
||||
|
||||
Args:
|
||||
user_id (str): user to check
|
||||
|
||||
Returns:
|
||||
Deferred
|
||||
"""
|
||||
if self._config.hs_disabled is True:
|
||||
return
|
||||
|
||||
if self._config.limit_usage_by_mau is False:
|
||||
return
|
||||
|
||||
if not self._server_notices_manager.is_enabled():
|
||||
# Don't try and send server notices unles they've been enabled
|
||||
return
|
||||
|
||||
timestamp = yield self._store.user_last_seen_monthly_active(user_id)
|
||||
if timestamp is None:
|
||||
# This user will be blocked from receiving the notice anyway.
|
||||
# In practice, not sure we can ever get here
|
||||
return
|
||||
|
||||
# Determine current state of room
|
||||
|
||||
room_id = yield self._server_notices_manager.get_notice_room_for_user(user_id)
|
||||
|
||||
if not room_id:
|
||||
logger.warn("Failed to get server notices room")
|
||||
return
|
||||
|
||||
yield self._check_and_set_tags(user_id, room_id)
|
||||
currently_blocked, ref_events = yield self._is_room_currently_blocked(room_id)
|
||||
|
||||
try:
|
||||
# Normally should always pass in user_id if you have it, but in
|
||||
# this case are checking what would happen to other users if they
|
||||
# were to arrive.
|
||||
try:
|
||||
yield self._auth.check_auth_blocking()
|
||||
is_auth_blocking = False
|
||||
except ResourceLimitError as e:
|
||||
is_auth_blocking = True
|
||||
event_content = e.msg
|
||||
event_limit_type = e.limit_type
|
||||
|
||||
if currently_blocked and not is_auth_blocking:
|
||||
# Room is notifying of a block, when it ought not to be.
|
||||
# Remove block notification
|
||||
content = {
|
||||
"pinned": ref_events
|
||||
}
|
||||
yield self._server_notices_manager.send_notice(
|
||||
user_id, content, EventTypes.Pinned, '',
|
||||
)
|
||||
|
||||
elif not currently_blocked and is_auth_blocking:
|
||||
# Room is not notifying of a block, when it ought to be.
|
||||
# Add block notification
|
||||
content = {
|
||||
'body': event_content,
|
||||
'msgtype': ServerNoticeMsgType,
|
||||
'server_notice_type': ServerNoticeLimitReached,
|
||||
'admin_contact': self._config.admin_contact,
|
||||
'limit_type': event_limit_type
|
||||
}
|
||||
event = yield self._server_notices_manager.send_notice(
|
||||
user_id, content, EventTypes.Message,
|
||||
)
|
||||
|
||||
content = {
|
||||
"pinned": [
|
||||
event.event_id,
|
||||
]
|
||||
}
|
||||
yield self._server_notices_manager.send_notice(
|
||||
user_id, content, EventTypes.Pinned, '',
|
||||
)
|
||||
|
||||
except SynapseError as e:
|
||||
logger.error("Error sending resource limits server notice: %s", e)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _check_and_set_tags(self, user_id, room_id):
|
||||
"""
|
||||
Since server notices rooms were originally not with tags,
|
||||
important to check that tags have been set correctly
|
||||
Args:
|
||||
user_id(str): the user in question
|
||||
room_id(str): the server notices room for that user
|
||||
"""
|
||||
tags = yield self._store.get_tags_for_room(user_id, room_id)
|
||||
need_to_set_tag = True
|
||||
if tags:
|
||||
if SERVER_NOTICE_ROOM_TAG in tags:
|
||||
# tag already present, nothing to do here
|
||||
need_to_set_tag = False
|
||||
if need_to_set_tag:
|
||||
max_id = yield self._store.add_tag_to_room(
|
||||
user_id, room_id, SERVER_NOTICE_ROOM_TAG, {}
|
||||
)
|
||||
self._notifier.on_new_event(
|
||||
"account_data_key", max_id, users=[user_id]
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _is_room_currently_blocked(self, room_id):
|
||||
"""
|
||||
Determines if the room is currently blocked
|
||||
|
||||
Args:
|
||||
room_id(str): The room id of the server notices room
|
||||
|
||||
Returns:
|
||||
|
||||
bool: Is the room currently blocked
|
||||
list: The list of pinned events that are unrelated to limit blocking
|
||||
This list can be used as a convenience in the case where the block
|
||||
is to be lifted and the remaining pinned event references need to be
|
||||
preserved
|
||||
"""
|
||||
currently_blocked = False
|
||||
pinned_state_event = None
|
||||
try:
|
||||
pinned_state_event = yield self._state.get_current_state(
|
||||
room_id, event_type=EventTypes.Pinned
|
||||
)
|
||||
except AuthError:
|
||||
# The user has yet to join the server notices room
|
||||
pass
|
||||
|
||||
referenced_events = []
|
||||
if pinned_state_event is not None:
|
||||
referenced_events = list(pinned_state_event.content.get('pinned', []))
|
||||
|
||||
events = yield self._store.get_events(referenced_events)
|
||||
for event_id, event in iteritems(events):
|
||||
if event.type != EventTypes.Message:
|
||||
continue
|
||||
if event.content.get("msgtype") == ServerNoticeMsgType:
|
||||
currently_blocked = True
|
||||
# remove event in case we need to disable blocking later on.
|
||||
if event_id in referenced_events:
|
||||
referenced_events.remove(event.event_id)
|
||||
|
||||
defer.returnValue((currently_blocked, referenced_events))
|
||||
@@ -22,6 +22,8 @@ from synapse.util.caches.descriptors import cachedInlineCallbacks
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
SERVER_NOTICE_ROOM_TAG = "m.server_notice"
|
||||
|
||||
|
||||
class ServerNoticesManager(object):
|
||||
def __init__(self, hs):
|
||||
@@ -37,6 +39,8 @@ class ServerNoticesManager(object):
|
||||
self._event_creation_handler = hs.get_event_creation_handler()
|
||||
self._is_mine_id = hs.is_mine_id
|
||||
|
||||
self._notifier = hs.get_notifier()
|
||||
|
||||
def is_enabled(self):
|
||||
"""Checks if server notices are enabled on this server.
|
||||
|
||||
@@ -46,7 +50,10 @@ class ServerNoticesManager(object):
|
||||
return self._config.server_notices_mxid is not None
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def send_notice(self, user_id, event_content):
|
||||
def send_notice(
|
||||
self, user_id, event_content,
|
||||
type=EventTypes.Message, state_key=None
|
||||
):
|
||||
"""Send a notice to the given user
|
||||
|
||||
Creates the server notices room, if none exists.
|
||||
@@ -54,9 +61,11 @@ class ServerNoticesManager(object):
|
||||
Args:
|
||||
user_id (str): mxid of user to send event to.
|
||||
event_content (dict): content of event to send
|
||||
type(EventTypes): type of event
|
||||
is_state_event(bool): Is the event a state event
|
||||
|
||||
Returns:
|
||||
Deferred[None]
|
||||
Deferred[FrozenEvent]
|
||||
"""
|
||||
room_id = yield self.get_notice_room_for_user(user_id)
|
||||
|
||||
@@ -65,15 +74,20 @@ class ServerNoticesManager(object):
|
||||
|
||||
logger.info("Sending server notice to %s", user_id)
|
||||
|
||||
yield self._event_creation_handler.create_and_send_nonmember_event(
|
||||
requester, {
|
||||
"type": EventTypes.Message,
|
||||
"room_id": room_id,
|
||||
"sender": system_mxid,
|
||||
"content": event_content,
|
||||
},
|
||||
ratelimit=False,
|
||||
event_dict = {
|
||||
"type": type,
|
||||
"room_id": room_id,
|
||||
"sender": system_mxid,
|
||||
"content": event_content,
|
||||
}
|
||||
|
||||
if state_key is not None:
|
||||
event_dict['state_key'] = state_key
|
||||
|
||||
res = yield self._event_creation_handler.create_and_send_nonmember_event(
|
||||
requester, event_dict, ratelimit=False,
|
||||
)
|
||||
defer.returnValue(res)
|
||||
|
||||
@cachedInlineCallbacks()
|
||||
def get_notice_room_for_user(self, user_id):
|
||||
@@ -142,5 +156,12 @@ class ServerNoticesManager(object):
|
||||
)
|
||||
room_id = info['room_id']
|
||||
|
||||
max_id = yield self._store.add_tag_to_room(
|
||||
user_id, room_id, SERVER_NOTICE_ROOM_TAG, {},
|
||||
)
|
||||
self._notifier.on_new_event(
|
||||
"account_data_key", max_id, users=[user_id]
|
||||
)
|
||||
|
||||
logger.info("Created server notices room %s for %s", room_id, user_id)
|
||||
defer.returnValue(room_id)
|
||||
|
||||
@@ -12,7 +12,12 @@
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from twisted.internet import defer
|
||||
|
||||
from synapse.server_notices.consent_server_notices import ConsentServerNotices
|
||||
from synapse.server_notices.resource_limits_server_notices import (
|
||||
ResourceLimitsServerNotices,
|
||||
)
|
||||
|
||||
|
||||
class ServerNoticesSender(object):
|
||||
@@ -25,34 +30,34 @@ class ServerNoticesSender(object):
|
||||
Args:
|
||||
hs (synapse.server.HomeServer):
|
||||
"""
|
||||
# todo: it would be nice to make this more dynamic
|
||||
self._consent_server_notices = ConsentServerNotices(hs)
|
||||
self._server_notices = (
|
||||
ConsentServerNotices(hs),
|
||||
ResourceLimitsServerNotices(hs)
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_user_syncing(self, user_id):
|
||||
"""Called when the user performs a sync operation.
|
||||
|
||||
Args:
|
||||
user_id (str): mxid of user who synced
|
||||
|
||||
Returns:
|
||||
Deferred
|
||||
"""
|
||||
return self._consent_server_notices.maybe_send_server_notice_to_user(
|
||||
user_id,
|
||||
)
|
||||
for sn in self._server_notices:
|
||||
yield sn.maybe_send_server_notice_to_user(
|
||||
user_id,
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_user_ip(self, user_id):
|
||||
"""Called on the master when a worker process saw a client request.
|
||||
|
||||
Args:
|
||||
user_id (str): mxid
|
||||
|
||||
Returns:
|
||||
Deferred
|
||||
"""
|
||||
# The synchrotrons use a stubbed version of ServerNoticesSender, so
|
||||
# we check for notices to send to the user in on_user_ip as well as
|
||||
# in on_user_syncing
|
||||
return self._consent_server_notices.maybe_send_server_notice_to_user(
|
||||
user_id,
|
||||
)
|
||||
for sn in self._server_notices:
|
||||
yield sn.maybe_send_server_notice_to_user(
|
||||
user_id,
|
||||
)
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2014-2016 OpenMarket Ltd
|
||||
# Copyright 2018 New Vector Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -13,21 +14,18 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
import hashlib
|
||||
import logging
|
||||
from collections import namedtuple
|
||||
|
||||
from six import iteritems, iterkeys, itervalues
|
||||
from six import iteritems, itervalues
|
||||
|
||||
from frozendict import frozendict
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
from synapse import event_auth
|
||||
from synapse.api.constants import EventTypes
|
||||
from synapse.api.errors import AuthError
|
||||
from synapse.api.constants import EventTypes, RoomVersions
|
||||
from synapse.events.snapshot import EventContext
|
||||
from synapse.state import v1
|
||||
from synapse.util.async_helpers import Linearizer
|
||||
from synapse.util.caches import get_cache_factor_for
|
||||
from synapse.util.caches.expiringcache import ExpiringCache
|
||||
@@ -264,6 +262,7 @@ class StateHandler(object):
|
||||
defer.returnValue(context)
|
||||
|
||||
logger.debug("calling resolve_state_groups from compute_event_context")
|
||||
|
||||
entry = yield self.resolve_state_groups_for_events(
|
||||
event.room_id, [e for e, _ in event.prev_events],
|
||||
)
|
||||
@@ -338,8 +337,11 @@ class StateHandler(object):
|
||||
event, resolves conflicts between them and returns them.
|
||||
|
||||
Args:
|
||||
room_id (str):
|
||||
event_ids (list[str]):
|
||||
room_id (str)
|
||||
event_ids (list[str])
|
||||
explicit_room_version (str|None): If set uses the the given room
|
||||
version to choose the resolution algorithm. If None, then
|
||||
checks the database for room version.
|
||||
|
||||
Returns:
|
||||
Deferred[_StateCacheEntry]: resolved state
|
||||
@@ -353,7 +355,12 @@ class StateHandler(object):
|
||||
room_id, event_ids
|
||||
)
|
||||
|
||||
if len(state_groups_ids) == 1:
|
||||
if len(state_groups_ids) == 0:
|
||||
defer.returnValue(_StateCacheEntry(
|
||||
state={},
|
||||
state_group=None,
|
||||
))
|
||||
elif len(state_groups_ids) == 1:
|
||||
name, state_list = list(state_groups_ids.items()).pop()
|
||||
|
||||
prev_group, delta_ids = yield self.store.get_state_group_delta(name)
|
||||
@@ -365,8 +372,11 @@ class StateHandler(object):
|
||||
delta_ids=delta_ids,
|
||||
))
|
||||
|
||||
room_version = yield self.store.get_room_version(room_id)
|
||||
|
||||
result = yield self._state_resolution_handler.resolve_state_groups(
|
||||
room_id, state_groups_ids, None, self._state_map_factory,
|
||||
room_id, room_version, state_groups_ids, None,
|
||||
self._state_map_factory,
|
||||
)
|
||||
defer.returnValue(result)
|
||||
|
||||
@@ -375,7 +385,7 @@ class StateHandler(object):
|
||||
ev_ids, get_prev_content=False, check_redacted=False,
|
||||
)
|
||||
|
||||
def resolve_events(self, state_sets, event):
|
||||
def resolve_events(self, room_version, state_sets, event):
|
||||
logger.info(
|
||||
"Resolving state for %s with %d groups", event.room_id, len(state_sets)
|
||||
)
|
||||
@@ -391,7 +401,9 @@ class StateHandler(object):
|
||||
}
|
||||
|
||||
with Measure(self.clock, "state._resolve_events"):
|
||||
new_state = resolve_events_with_state_map(state_set_ids, state_map)
|
||||
new_state = resolve_events_with_state_map(
|
||||
room_version, state_set_ids, state_map,
|
||||
)
|
||||
|
||||
new_state = {
|
||||
key: state_map[ev_id] for key, ev_id in iteritems(new_state)
|
||||
@@ -430,7 +442,7 @@ class StateResolutionHandler(object):
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
def resolve_state_groups(
|
||||
self, room_id, state_groups_ids, event_map, state_map_factory,
|
||||
self, room_id, room_version, state_groups_ids, event_map, state_map_factory,
|
||||
):
|
||||
"""Resolves conflicts between a set of state groups
|
||||
|
||||
@@ -439,6 +451,7 @@ class StateResolutionHandler(object):
|
||||
|
||||
Args:
|
||||
room_id (str): room we are resolving for (used for logging)
|
||||
room_version (str): version of the room
|
||||
state_groups_ids (dict[int, dict[(str, str), str]]):
|
||||
map from state group id to the state in that state group
|
||||
(where 'state' is a map from state key to event id)
|
||||
@@ -492,6 +505,7 @@ class StateResolutionHandler(object):
|
||||
logger.info("Resolving conflicted state for %r", room_id)
|
||||
with Measure(self.clock, "state._resolve_events"):
|
||||
new_state = yield resolve_events_with_factory(
|
||||
room_version,
|
||||
list(itervalues(state_groups_ids)),
|
||||
event_map=event_map,
|
||||
state_map_factory=state_map_factory,
|
||||
@@ -575,16 +589,10 @@ def _make_state_cache_entry(
|
||||
)
|
||||
|
||||
|
||||
def _ordered_events(events):
|
||||
def key_func(e):
|
||||
return -int(e.depth), hashlib.sha1(e.event_id.encode('ascii')).hexdigest()
|
||||
|
||||
return sorted(events, key=key_func)
|
||||
|
||||
|
||||
def resolve_events_with_state_map(state_sets, state_map):
|
||||
def resolve_events_with_state_map(room_version, state_sets, state_map):
|
||||
"""
|
||||
Args:
|
||||
room_version(str): Version of the room
|
||||
state_sets(list): List of dicts of (type, state_key) -> event_id,
|
||||
which are the different state groups to resolve.
|
||||
state_map(dict): a dict from event_id to event, for all events in
|
||||
@@ -594,75 +602,23 @@ def resolve_events_with_state_map(state_sets, state_map):
|
||||
dict[(str, str), str]:
|
||||
a map from (type, state_key) to event_id.
|
||||
"""
|
||||
if len(state_sets) == 1:
|
||||
return state_sets[0]
|
||||
|
||||
unconflicted_state, conflicted_state = _seperate(
|
||||
state_sets,
|
||||
)
|
||||
|
||||
auth_events = _create_auth_events_from_maps(
|
||||
unconflicted_state, conflicted_state, state_map
|
||||
)
|
||||
|
||||
return _resolve_with_state(
|
||||
unconflicted_state, conflicted_state, auth_events, state_map
|
||||
)
|
||||
if room_version in (RoomVersions.V1, RoomVersions.VDH_TEST,):
|
||||
return v1.resolve_events_with_state_map(
|
||||
state_sets, state_map,
|
||||
)
|
||||
else:
|
||||
# This should only happen if we added a version but forgot to add it to
|
||||
# the list above.
|
||||
raise Exception(
|
||||
"No state resolution algorithm defined for version %r" % (room_version,)
|
||||
)
|
||||
|
||||
|
||||
def _seperate(state_sets):
|
||||
"""Takes the state_sets and figures out which keys are conflicted and
|
||||
which aren't. i.e., which have multiple different event_ids associated
|
||||
with them in different state sets.
|
||||
|
||||
Args:
|
||||
state_sets(iterable[dict[(str, str), str]]):
|
||||
List of dicts of (type, state_key) -> event_id, which are the
|
||||
different state groups to resolve.
|
||||
|
||||
Returns:
|
||||
(dict[(str, str), str], dict[(str, str), set[str]]):
|
||||
A tuple of (unconflicted_state, conflicted_state), where:
|
||||
|
||||
unconflicted_state is a dict mapping (type, state_key)->event_id
|
||||
for unconflicted state keys.
|
||||
|
||||
conflicted_state is a dict mapping (type, state_key) to a set of
|
||||
event ids for conflicted state keys.
|
||||
"""
|
||||
state_set_iterator = iter(state_sets)
|
||||
unconflicted_state = dict(next(state_set_iterator))
|
||||
conflicted_state = {}
|
||||
|
||||
for state_set in state_set_iterator:
|
||||
for key, value in iteritems(state_set):
|
||||
# Check if there is an unconflicted entry for the state key.
|
||||
unconflicted_value = unconflicted_state.get(key)
|
||||
if unconflicted_value is None:
|
||||
# There isn't an unconflicted entry so check if there is a
|
||||
# conflicted entry.
|
||||
ls = conflicted_state.get(key)
|
||||
if ls is None:
|
||||
# There wasn't a conflicted entry so haven't seen this key before.
|
||||
# Therefore it isn't conflicted yet.
|
||||
unconflicted_state[key] = value
|
||||
else:
|
||||
# This key is already conflicted, add our value to the conflict set.
|
||||
ls.add(value)
|
||||
elif unconflicted_value != value:
|
||||
# If the unconflicted value is not the same as our value then we
|
||||
# have a new conflict. So move the key from the unconflicted_state
|
||||
# to the conflicted state.
|
||||
conflicted_state[key] = {value, unconflicted_value}
|
||||
unconflicted_state.pop(key, None)
|
||||
|
||||
return unconflicted_state, conflicted_state
|
||||
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def resolve_events_with_factory(state_sets, event_map, state_map_factory):
|
||||
def resolve_events_with_factory(room_version, state_sets, event_map, state_map_factory):
|
||||
"""
|
||||
Args:
|
||||
room_version(str): Version of the room
|
||||
|
||||
state_sets(list): List of dicts of (type, state_key) -> event_id,
|
||||
which are the different state groups to resolve.
|
||||
|
||||
@@ -682,185 +638,13 @@ def resolve_events_with_factory(state_sets, event_map, state_map_factory):
|
||||
Deferred[dict[(str, str), str]]:
|
||||
a map from (type, state_key) to event_id.
|
||||
"""
|
||||
if len(state_sets) == 1:
|
||||
defer.returnValue(state_sets[0])
|
||||
|
||||
unconflicted_state, conflicted_state = _seperate(
|
||||
state_sets,
|
||||
)
|
||||
|
||||
needed_events = set(
|
||||
event_id
|
||||
for event_ids in itervalues(conflicted_state)
|
||||
for event_id in event_ids
|
||||
)
|
||||
if event_map is not None:
|
||||
needed_events -= set(iterkeys(event_map))
|
||||
|
||||
logger.info("Asking for %d conflicted events", len(needed_events))
|
||||
|
||||
# dict[str, FrozenEvent]: a map from state event id to event. Only includes
|
||||
# the state events which are in conflict (and those in event_map)
|
||||
state_map = yield state_map_factory(needed_events)
|
||||
if event_map is not None:
|
||||
state_map.update(event_map)
|
||||
|
||||
# get the ids of the auth events which allow us to authenticate the
|
||||
# conflicted state, picking only from the unconflicting state.
|
||||
#
|
||||
# dict[(str, str), str]: a map from state key to event id
|
||||
auth_events = _create_auth_events_from_maps(
|
||||
unconflicted_state, conflicted_state, state_map
|
||||
)
|
||||
|
||||
new_needed_events = set(itervalues(auth_events))
|
||||
new_needed_events -= needed_events
|
||||
if event_map is not None:
|
||||
new_needed_events -= set(iterkeys(event_map))
|
||||
|
||||
logger.info("Asking for %d auth events", len(new_needed_events))
|
||||
|
||||
state_map_new = yield state_map_factory(new_needed_events)
|
||||
state_map.update(state_map_new)
|
||||
|
||||
defer.returnValue(_resolve_with_state(
|
||||
unconflicted_state, conflicted_state, auth_events, state_map
|
||||
))
|
||||
|
||||
|
||||
def _create_auth_events_from_maps(unconflicted_state, conflicted_state, state_map):
|
||||
auth_events = {}
|
||||
for event_ids in itervalues(conflicted_state):
|
||||
for event_id in event_ids:
|
||||
if event_id in state_map:
|
||||
keys = event_auth.auth_types_for_event(state_map[event_id])
|
||||
for key in keys:
|
||||
if key not in auth_events:
|
||||
event_id = unconflicted_state.get(key, None)
|
||||
if event_id:
|
||||
auth_events[key] = event_id
|
||||
return auth_events
|
||||
|
||||
|
||||
def _resolve_with_state(unconflicted_state_ids, conflicted_state_ids, auth_event_ids,
|
||||
state_map):
|
||||
conflicted_state = {}
|
||||
for key, event_ids in iteritems(conflicted_state_ids):
|
||||
events = [state_map[ev_id] for ev_id in event_ids if ev_id in state_map]
|
||||
if len(events) > 1:
|
||||
conflicted_state[key] = events
|
||||
elif len(events) == 1:
|
||||
unconflicted_state_ids[key] = events[0].event_id
|
||||
|
||||
auth_events = {
|
||||
key: state_map[ev_id]
|
||||
for key, ev_id in iteritems(auth_event_ids)
|
||||
if ev_id in state_map
|
||||
}
|
||||
|
||||
try:
|
||||
resolved_state = _resolve_state_events(
|
||||
conflicted_state, auth_events
|
||||
if room_version in (RoomVersions.V1, RoomVersions.VDH_TEST,):
|
||||
return v1.resolve_events_with_factory(
|
||||
state_sets, event_map, state_map_factory,
|
||||
)
|
||||
else:
|
||||
# This should only happen if we added a version but forgot to add it to
|
||||
# the list above.
|
||||
raise Exception(
|
||||
"No state resolution algorithm defined for version %r" % (room_version,)
|
||||
)
|
||||
except Exception:
|
||||
logger.exception("Failed to resolve state")
|
||||
raise
|
||||
|
||||
new_state = unconflicted_state_ids
|
||||
for key, event in iteritems(resolved_state):
|
||||
new_state[key] = event.event_id
|
||||
|
||||
return new_state
|
||||
|
||||
|
||||
def _resolve_state_events(conflicted_state, auth_events):
|
||||
""" This is where we actually decide which of the conflicted state to
|
||||
use.
|
||||
|
||||
We resolve conflicts in the following order:
|
||||
1. power levels
|
||||
2. join rules
|
||||
3. memberships
|
||||
4. other events.
|
||||
"""
|
||||
resolved_state = {}
|
||||
if POWER_KEY in conflicted_state:
|
||||
events = conflicted_state[POWER_KEY]
|
||||
logger.debug("Resolving conflicted power levels %r", events)
|
||||
resolved_state[POWER_KEY] = _resolve_auth_events(
|
||||
events, auth_events)
|
||||
|
||||
auth_events.update(resolved_state)
|
||||
|
||||
for key, events in iteritems(conflicted_state):
|
||||
if key[0] == EventTypes.JoinRules:
|
||||
logger.debug("Resolving conflicted join rules %r", events)
|
||||
resolved_state[key] = _resolve_auth_events(
|
||||
events,
|
||||
auth_events
|
||||
)
|
||||
|
||||
auth_events.update(resolved_state)
|
||||
|
||||
for key, events in iteritems(conflicted_state):
|
||||
if key[0] == EventTypes.Member:
|
||||
logger.debug("Resolving conflicted member lists %r", events)
|
||||
resolved_state[key] = _resolve_auth_events(
|
||||
events,
|
||||
auth_events
|
||||
)
|
||||
|
||||
auth_events.update(resolved_state)
|
||||
|
||||
for key, events in iteritems(conflicted_state):
|
||||
if key not in resolved_state:
|
||||
logger.debug("Resolving conflicted state %r:%r", key, events)
|
||||
resolved_state[key] = _resolve_normal_events(
|
||||
events, auth_events
|
||||
)
|
||||
|
||||
return resolved_state
|
||||
|
||||
|
||||
def _resolve_auth_events(events, auth_events):
|
||||
reverse = [i for i in reversed(_ordered_events(events))]
|
||||
|
||||
auth_keys = set(
|
||||
key
|
||||
for event in events
|
||||
for key in event_auth.auth_types_for_event(event)
|
||||
)
|
||||
|
||||
new_auth_events = {}
|
||||
for key in auth_keys:
|
||||
auth_event = auth_events.get(key, None)
|
||||
if auth_event:
|
||||
new_auth_events[key] = auth_event
|
||||
|
||||
auth_events = new_auth_events
|
||||
|
||||
prev_event = reverse[0]
|
||||
for event in reverse[1:]:
|
||||
auth_events[(prev_event.type, prev_event.state_key)] = prev_event
|
||||
try:
|
||||
# The signatures have already been checked at this point
|
||||
event_auth.check(event, auth_events, do_sig_check=False, do_size_check=False)
|
||||
prev_event = event
|
||||
except AuthError:
|
||||
return prev_event
|
||||
|
||||
return event
|
||||
|
||||
|
||||
def _resolve_normal_events(events, auth_events):
|
||||
for event in _ordered_events(events):
|
||||
try:
|
||||
# The signatures have already been checked at this point
|
||||
event_auth.check(event, auth_events, do_sig_check=False, do_size_check=False)
|
||||
return event
|
||||
except AuthError:
|
||||
pass
|
||||
|
||||
# Use the last event (the one with the least depth) if they all fail
|
||||
# the auth check.
|
||||
return event
|
||||
321
synapse/state/v1.py
Normal file
321
synapse/state/v1.py
Normal file
@@ -0,0 +1,321 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2018 New Vector Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import hashlib
|
||||
import logging
|
||||
|
||||
from six import iteritems, iterkeys, itervalues
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
from synapse import event_auth
|
||||
from synapse.api.constants import EventTypes
|
||||
from synapse.api.errors import AuthError
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
POWER_KEY = (EventTypes.PowerLevels, "")
|
||||
|
||||
|
||||
def resolve_events_with_state_map(state_sets, state_map):
|
||||
"""
|
||||
Args:
|
||||
state_sets(list): List of dicts of (type, state_key) -> event_id,
|
||||
which are the different state groups to resolve.
|
||||
state_map(dict): a dict from event_id to event, for all events in
|
||||
state_sets.
|
||||
|
||||
Returns
|
||||
dict[(str, str), str]:
|
||||
a map from (type, state_key) to event_id.
|
||||
"""
|
||||
if len(state_sets) == 1:
|
||||
return state_sets[0]
|
||||
|
||||
unconflicted_state, conflicted_state = _seperate(
|
||||
state_sets,
|
||||
)
|
||||
|
||||
auth_events = _create_auth_events_from_maps(
|
||||
unconflicted_state, conflicted_state, state_map
|
||||
)
|
||||
|
||||
return _resolve_with_state(
|
||||
unconflicted_state, conflicted_state, auth_events, state_map
|
||||
)
|
||||
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def resolve_events_with_factory(state_sets, event_map, state_map_factory):
|
||||
"""
|
||||
Args:
|
||||
state_sets(list): List of dicts of (type, state_key) -> event_id,
|
||||
which are the different state groups to resolve.
|
||||
|
||||
event_map(dict[str,FrozenEvent]|None):
|
||||
a dict from event_id to event, for any events that we happen to
|
||||
have in flight (eg, those currently being persisted). This will be
|
||||
used as a starting point fof finding the state we need; any missing
|
||||
events will be requested via state_map_factory.
|
||||
|
||||
If None, all events will be fetched via state_map_factory.
|
||||
|
||||
state_map_factory(func): will be called
|
||||
with a list of event_ids that are needed, and should return with
|
||||
a Deferred of dict of event_id to event.
|
||||
|
||||
Returns
|
||||
Deferred[dict[(str, str), str]]:
|
||||
a map from (type, state_key) to event_id.
|
||||
"""
|
||||
if len(state_sets) == 1:
|
||||
defer.returnValue(state_sets[0])
|
||||
|
||||
unconflicted_state, conflicted_state = _seperate(
|
||||
state_sets,
|
||||
)
|
||||
|
||||
needed_events = set(
|
||||
event_id
|
||||
for event_ids in itervalues(conflicted_state)
|
||||
for event_id in event_ids
|
||||
)
|
||||
if event_map is not None:
|
||||
needed_events -= set(iterkeys(event_map))
|
||||
|
||||
logger.info("Asking for %d conflicted events", len(needed_events))
|
||||
|
||||
# dict[str, FrozenEvent]: a map from state event id to event. Only includes
|
||||
# the state events which are in conflict (and those in event_map)
|
||||
state_map = yield state_map_factory(needed_events)
|
||||
if event_map is not None:
|
||||
state_map.update(event_map)
|
||||
|
||||
# get the ids of the auth events which allow us to authenticate the
|
||||
# conflicted state, picking only from the unconflicting state.
|
||||
#
|
||||
# dict[(str, str), str]: a map from state key to event id
|
||||
auth_events = _create_auth_events_from_maps(
|
||||
unconflicted_state, conflicted_state, state_map
|
||||
)
|
||||
|
||||
new_needed_events = set(itervalues(auth_events))
|
||||
new_needed_events -= needed_events
|
||||
if event_map is not None:
|
||||
new_needed_events -= set(iterkeys(event_map))
|
||||
|
||||
logger.info("Asking for %d auth events", len(new_needed_events))
|
||||
|
||||
state_map_new = yield state_map_factory(new_needed_events)
|
||||
state_map.update(state_map_new)
|
||||
|
||||
defer.returnValue(_resolve_with_state(
|
||||
unconflicted_state, conflicted_state, auth_events, state_map
|
||||
))
|
||||
|
||||
|
||||
def _seperate(state_sets):
|
||||
"""Takes the state_sets and figures out which keys are conflicted and
|
||||
which aren't. i.e., which have multiple different event_ids associated
|
||||
with them in different state sets.
|
||||
|
||||
Args:
|
||||
state_sets(iterable[dict[(str, str), str]]):
|
||||
List of dicts of (type, state_key) -> event_id, which are the
|
||||
different state groups to resolve.
|
||||
|
||||
Returns:
|
||||
(dict[(str, str), str], dict[(str, str), set[str]]):
|
||||
A tuple of (unconflicted_state, conflicted_state), where:
|
||||
|
||||
unconflicted_state is a dict mapping (type, state_key)->event_id
|
||||
for unconflicted state keys.
|
||||
|
||||
conflicted_state is a dict mapping (type, state_key) to a set of
|
||||
event ids for conflicted state keys.
|
||||
"""
|
||||
state_set_iterator = iter(state_sets)
|
||||
unconflicted_state = dict(next(state_set_iterator))
|
||||
conflicted_state = {}
|
||||
|
||||
for state_set in state_set_iterator:
|
||||
for key, value in iteritems(state_set):
|
||||
# Check if there is an unconflicted entry for the state key.
|
||||
unconflicted_value = unconflicted_state.get(key)
|
||||
if unconflicted_value is None:
|
||||
# There isn't an unconflicted entry so check if there is a
|
||||
# conflicted entry.
|
||||
ls = conflicted_state.get(key)
|
||||
if ls is None:
|
||||
# There wasn't a conflicted entry so haven't seen this key before.
|
||||
# Therefore it isn't conflicted yet.
|
||||
unconflicted_state[key] = value
|
||||
else:
|
||||
# This key is already conflicted, add our value to the conflict set.
|
||||
ls.add(value)
|
||||
elif unconflicted_value != value:
|
||||
# If the unconflicted value is not the same as our value then we
|
||||
# have a new conflict. So move the key from the unconflicted_state
|
||||
# to the conflicted state.
|
||||
conflicted_state[key] = {value, unconflicted_value}
|
||||
unconflicted_state.pop(key, None)
|
||||
|
||||
return unconflicted_state, conflicted_state
|
||||
|
||||
|
||||
def _create_auth_events_from_maps(unconflicted_state, conflicted_state, state_map):
|
||||
auth_events = {}
|
||||
for event_ids in itervalues(conflicted_state):
|
||||
for event_id in event_ids:
|
||||
if event_id in state_map:
|
||||
keys = event_auth.auth_types_for_event(state_map[event_id])
|
||||
for key in keys:
|
||||
if key not in auth_events:
|
||||
event_id = unconflicted_state.get(key, None)
|
||||
if event_id:
|
||||
auth_events[key] = event_id
|
||||
return auth_events
|
||||
|
||||
|
||||
def _resolve_with_state(unconflicted_state_ids, conflicted_state_ids, auth_event_ids,
|
||||
state_map):
|
||||
conflicted_state = {}
|
||||
for key, event_ids in iteritems(conflicted_state_ids):
|
||||
events = [state_map[ev_id] for ev_id in event_ids if ev_id in state_map]
|
||||
if len(events) > 1:
|
||||
conflicted_state[key] = events
|
||||
elif len(events) == 1:
|
||||
unconflicted_state_ids[key] = events[0].event_id
|
||||
|
||||
auth_events = {
|
||||
key: state_map[ev_id]
|
||||
for key, ev_id in iteritems(auth_event_ids)
|
||||
if ev_id in state_map
|
||||
}
|
||||
|
||||
try:
|
||||
resolved_state = _resolve_state_events(
|
||||
conflicted_state, auth_events
|
||||
)
|
||||
except Exception:
|
||||
logger.exception("Failed to resolve state")
|
||||
raise
|
||||
|
||||
new_state = unconflicted_state_ids
|
||||
for key, event in iteritems(resolved_state):
|
||||
new_state[key] = event.event_id
|
||||
|
||||
return new_state
|
||||
|
||||
|
||||
def _resolve_state_events(conflicted_state, auth_events):
|
||||
""" This is where we actually decide which of the conflicted state to
|
||||
use.
|
||||
|
||||
We resolve conflicts in the following order:
|
||||
1. power levels
|
||||
2. join rules
|
||||
3. memberships
|
||||
4. other events.
|
||||
"""
|
||||
resolved_state = {}
|
||||
if POWER_KEY in conflicted_state:
|
||||
events = conflicted_state[POWER_KEY]
|
||||
logger.debug("Resolving conflicted power levels %r", events)
|
||||
resolved_state[POWER_KEY] = _resolve_auth_events(
|
||||
events, auth_events)
|
||||
|
||||
auth_events.update(resolved_state)
|
||||
|
||||
for key, events in iteritems(conflicted_state):
|
||||
if key[0] == EventTypes.JoinRules:
|
||||
logger.debug("Resolving conflicted join rules %r", events)
|
||||
resolved_state[key] = _resolve_auth_events(
|
||||
events,
|
||||
auth_events
|
||||
)
|
||||
|
||||
auth_events.update(resolved_state)
|
||||
|
||||
for key, events in iteritems(conflicted_state):
|
||||
if key[0] == EventTypes.Member:
|
||||
logger.debug("Resolving conflicted member lists %r", events)
|
||||
resolved_state[key] = _resolve_auth_events(
|
||||
events,
|
||||
auth_events
|
||||
)
|
||||
|
||||
auth_events.update(resolved_state)
|
||||
|
||||
for key, events in iteritems(conflicted_state):
|
||||
if key not in resolved_state:
|
||||
logger.debug("Resolving conflicted state %r:%r", key, events)
|
||||
resolved_state[key] = _resolve_normal_events(
|
||||
events, auth_events
|
||||
)
|
||||
|
||||
return resolved_state
|
||||
|
||||
|
||||
def _resolve_auth_events(events, auth_events):
|
||||
reverse = [i for i in reversed(_ordered_events(events))]
|
||||
|
||||
auth_keys = set(
|
||||
key
|
||||
for event in events
|
||||
for key in event_auth.auth_types_for_event(event)
|
||||
)
|
||||
|
||||
new_auth_events = {}
|
||||
for key in auth_keys:
|
||||
auth_event = auth_events.get(key, None)
|
||||
if auth_event:
|
||||
new_auth_events[key] = auth_event
|
||||
|
||||
auth_events = new_auth_events
|
||||
|
||||
prev_event = reverse[0]
|
||||
for event in reverse[1:]:
|
||||
auth_events[(prev_event.type, prev_event.state_key)] = prev_event
|
||||
try:
|
||||
# The signatures have already been checked at this point
|
||||
event_auth.check(event, auth_events, do_sig_check=False, do_size_check=False)
|
||||
prev_event = event
|
||||
except AuthError:
|
||||
return prev_event
|
||||
|
||||
return event
|
||||
|
||||
|
||||
def _resolve_normal_events(events, auth_events):
|
||||
for event in _ordered_events(events):
|
||||
try:
|
||||
# The signatures have already been checked at this point
|
||||
event_auth.check(event, auth_events, do_sig_check=False, do_size_check=False)
|
||||
return event
|
||||
except AuthError:
|
||||
pass
|
||||
|
||||
# Use the last event (the one with the least depth) if they all fail
|
||||
# the auth check.
|
||||
return event
|
||||
|
||||
|
||||
def _ordered_events(events):
|
||||
def key_func(e):
|
||||
return -int(e.depth), hashlib.sha1(e.event_id.encode('ascii')).hexdigest()
|
||||
|
||||
return sorted(events, key=key_func)
|
||||
@@ -17,9 +17,10 @@ import sys
|
||||
import threading
|
||||
import time
|
||||
|
||||
from six import iteritems, iterkeys, itervalues
|
||||
from six import PY2, iteritems, iterkeys, itervalues
|
||||
from six.moves import intern, range
|
||||
|
||||
from canonicaljson import json
|
||||
from prometheus_client import Histogram
|
||||
|
||||
from twisted.internet import defer
|
||||
@@ -1216,3 +1217,32 @@ class _RollbackButIsFineException(Exception):
|
||||
something went wrong.
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
def db_to_json(db_content):
|
||||
"""
|
||||
Take some data from a database row and return a JSON-decoded object.
|
||||
|
||||
Args:
|
||||
db_content (memoryview|buffer|bytes|bytearray|unicode)
|
||||
"""
|
||||
# psycopg2 on Python 3 returns memoryview objects, which we need to
|
||||
# cast to bytes to decode
|
||||
if isinstance(db_content, memoryview):
|
||||
db_content = db_content.tobytes()
|
||||
|
||||
# psycopg2 on Python 2 returns buffer objects, which we need to cast to
|
||||
# bytes to decode
|
||||
if PY2 and isinstance(db_content, buffer):
|
||||
db_content = bytes(db_content)
|
||||
|
||||
# Decode it to a Unicode string before feeding it to json.loads, so we
|
||||
# consistenty get a Unicode-containing object out.
|
||||
if isinstance(db_content, (bytes, bytearray)):
|
||||
db_content = db_content.decode('utf8')
|
||||
|
||||
try:
|
||||
return json.loads(db_content)
|
||||
except Exception:
|
||||
logging.warning("Tried to decode '%r' as JSON and failed", db_content)
|
||||
raise
|
||||
|
||||
@@ -169,7 +169,7 @@ class DeviceInboxStore(BackgroundUpdateStore):
|
||||
local_by_user_then_device = {}
|
||||
for user_id, messages_by_device in messages_by_user_then_device.items():
|
||||
messages_json_for_user = {}
|
||||
devices = messages_by_device.keys()
|
||||
devices = list(messages_by_device.keys())
|
||||
if len(devices) == 1 and devices[0] == "*":
|
||||
# Handle wildcard device_ids.
|
||||
sql = (
|
||||
|
||||
@@ -24,7 +24,7 @@ from synapse.api.errors import StoreError
|
||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||
from synapse.util.caches.descriptors import cached, cachedInlineCallbacks, cachedList
|
||||
|
||||
from ._base import Cache, SQLBaseStore
|
||||
from ._base import Cache, SQLBaseStore, db_to_json
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -411,7 +411,7 @@ class DeviceStore(SQLBaseStore):
|
||||
if device is not None:
|
||||
key_json = device.get("key_json", None)
|
||||
if key_json:
|
||||
result["keys"] = json.loads(key_json)
|
||||
result["keys"] = db_to_json(key_json)
|
||||
device_display_name = device.get("device_display_name", None)
|
||||
if device_display_name:
|
||||
result["device_display_name"] = device_display_name
|
||||
@@ -466,7 +466,7 @@ class DeviceStore(SQLBaseStore):
|
||||
retcol="content",
|
||||
desc="_get_cached_user_device",
|
||||
)
|
||||
defer.returnValue(json.loads(content))
|
||||
defer.returnValue(db_to_json(content))
|
||||
|
||||
@cachedInlineCallbacks()
|
||||
def _get_cached_devices_for_user(self, user_id):
|
||||
@@ -479,7 +479,7 @@ class DeviceStore(SQLBaseStore):
|
||||
desc="_get_cached_devices_for_user",
|
||||
)
|
||||
defer.returnValue({
|
||||
device["device_id"]: json.loads(device["content"])
|
||||
device["device_id"]: db_to_json(device["content"])
|
||||
for device in devices
|
||||
})
|
||||
|
||||
@@ -511,7 +511,7 @@ class DeviceStore(SQLBaseStore):
|
||||
|
||||
key_json = device.get("key_json", None)
|
||||
if key_json:
|
||||
result["keys"] = json.loads(key_json)
|
||||
result["keys"] = db_to_json(key_json)
|
||||
device_display_name = device.get("device_display_name", None)
|
||||
if device_display_name:
|
||||
result["device_display_name"] = device_display_name
|
||||
|
||||
@@ -14,13 +14,13 @@
|
||||
# limitations under the License.
|
||||
from six import iteritems
|
||||
|
||||
from canonicaljson import encode_canonical_json, json
|
||||
from canonicaljson import encode_canonical_json
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
from synapse.util.caches.descriptors import cached
|
||||
|
||||
from ._base import SQLBaseStore
|
||||
from ._base import SQLBaseStore, db_to_json
|
||||
|
||||
|
||||
class EndToEndKeyStore(SQLBaseStore):
|
||||
@@ -90,7 +90,7 @@ class EndToEndKeyStore(SQLBaseStore):
|
||||
|
||||
for user_id, device_keys in iteritems(results):
|
||||
for device_id, device_info in iteritems(device_keys):
|
||||
device_info["keys"] = json.loads(device_info.pop("key_json"))
|
||||
device_info["keys"] = db_to_json(device_info.pop("key_json"))
|
||||
|
||||
defer.returnValue(results)
|
||||
|
||||
|
||||
@@ -41,13 +41,18 @@ class PostgresEngine(object):
|
||||
db_conn.set_isolation_level(
|
||||
self.module.extensions.ISOLATION_LEVEL_REPEATABLE_READ
|
||||
)
|
||||
|
||||
# Set the bytea output to escape, vs the default of hex
|
||||
cursor = db_conn.cursor()
|
||||
cursor.execute("SET bytea_output TO escape")
|
||||
|
||||
# Asynchronous commit, don't wait for the server to call fsync before
|
||||
# ending the transaction.
|
||||
# https://www.postgresql.org/docs/current/static/wal-async-commit.html
|
||||
if not self.synchronous_commit:
|
||||
cursor = db_conn.cursor()
|
||||
cursor.execute("SET synchronous_commit TO OFF")
|
||||
cursor.close()
|
||||
|
||||
cursor.close()
|
||||
|
||||
def is_deadlock(self, error):
|
||||
if isinstance(error, self.module.DatabaseError):
|
||||
|
||||
@@ -19,7 +19,7 @@ import logging
|
||||
from collections import OrderedDict, deque, namedtuple
|
||||
from functools import wraps
|
||||
|
||||
from six import iteritems
|
||||
from six import iteritems, text_type
|
||||
from six.moves import range
|
||||
|
||||
from canonicaljson import json
|
||||
@@ -705,9 +705,11 @@ class EventsStore(EventFederationStore, EventsWorkerStore, BackgroundUpdateStore
|
||||
}
|
||||
|
||||
events_map = {ev.event_id: ev for ev, _ in events_context}
|
||||
room_version = yield self.get_room_version(room_id)
|
||||
|
||||
logger.debug("calling resolve_state_groups from preserve_events")
|
||||
res = yield self._state_resolution_handler.resolve_state_groups(
|
||||
room_id, state_groups, events_map, get_events
|
||||
room_id, room_version, state_groups, events_map, get_events
|
||||
)
|
||||
|
||||
defer.returnValue((res.state, None))
|
||||
@@ -1218,7 +1220,7 @@ class EventsStore(EventFederationStore, EventsWorkerStore, BackgroundUpdateStore
|
||||
"sender": event.sender,
|
||||
"contains_url": (
|
||||
"url" in event.content
|
||||
and isinstance(event.content["url"], basestring)
|
||||
and isinstance(event.content["url"], text_type)
|
||||
),
|
||||
}
|
||||
for event, _ in events_and_contexts
|
||||
@@ -1527,7 +1529,7 @@ class EventsStore(EventFederationStore, EventsWorkerStore, BackgroundUpdateStore
|
||||
|
||||
contains_url = "url" in content
|
||||
if contains_url:
|
||||
contains_url &= isinstance(content["url"], basestring)
|
||||
contains_url &= isinstance(content["url"], text_type)
|
||||
except (KeyError, AttributeError):
|
||||
# If the event is missing a necessary field then
|
||||
# skip over it.
|
||||
@@ -1908,9 +1910,9 @@ class EventsStore(EventFederationStore, EventsWorkerStore, BackgroundUpdateStore
|
||||
(room_id,)
|
||||
)
|
||||
rows = txn.fetchall()
|
||||
max_depth = max(row[0] for row in rows)
|
||||
max_depth = max(row[1] for row in rows)
|
||||
|
||||
if max_depth <= token.topological:
|
||||
if max_depth < token.topological:
|
||||
# We need to ensure we don't delete all the events from the database
|
||||
# otherwise we wouldn't be able to send any events (due to not
|
||||
# having any backwards extremeties)
|
||||
|
||||
@@ -12,6 +12,7 @@
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import itertools
|
||||
import logging
|
||||
from collections import namedtuple
|
||||
@@ -265,7 +266,7 @@ class EventsWorkerStore(SQLBaseStore):
|
||||
"""
|
||||
with Measure(self._clock, "_fetch_event_list"):
|
||||
try:
|
||||
event_id_lists = zip(*event_list)[0]
|
||||
event_id_lists = list(zip(*event_list))[0]
|
||||
event_ids = [
|
||||
item for sublist in event_id_lists for item in sublist
|
||||
]
|
||||
@@ -299,14 +300,14 @@ class EventsWorkerStore(SQLBaseStore):
|
||||
logger.exception("do_fetch")
|
||||
|
||||
# We only want to resolve deferreds from the main thread
|
||||
def fire(evs):
|
||||
def fire(evs, exc):
|
||||
for _, d in evs:
|
||||
if not d.called:
|
||||
with PreserveLoggingContext():
|
||||
d.errback(e)
|
||||
d.errback(exc)
|
||||
|
||||
with PreserveLoggingContext():
|
||||
self.hs.get_reactor().callFromThread(fire, event_list)
|
||||
self.hs.get_reactor().callFromThread(fire, event_list, e)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _enqueue_events(self, events, check_redacted=True, allow_rejected=False):
|
||||
|
||||
@@ -13,14 +13,14 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from canonicaljson import encode_canonical_json, json
|
||||
from canonicaljson import encode_canonical_json
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
from synapse.api.errors import Codes, SynapseError
|
||||
from synapse.util.caches.descriptors import cachedInlineCallbacks
|
||||
|
||||
from ._base import SQLBaseStore
|
||||
from ._base import SQLBaseStore, db_to_json
|
||||
|
||||
|
||||
class FilteringStore(SQLBaseStore):
|
||||
@@ -44,7 +44,7 @@ class FilteringStore(SQLBaseStore):
|
||||
desc="get_user_filter",
|
||||
)
|
||||
|
||||
defer.returnValue(json.loads(bytes(def_json).decode("utf-8")))
|
||||
defer.returnValue(db_to_json(def_json))
|
||||
|
||||
def add_user_filter(self, user_localpart, user_filter):
|
||||
def_json = encode_canonical_json(user_filter)
|
||||
|
||||
@@ -36,7 +36,6 @@ class MonthlyActiveUsersStore(SQLBaseStore):
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def initialise_reserved_users(self, threepids):
|
||||
# TODO Why can't I do this in init?
|
||||
store = self.hs.get_datastore()
|
||||
reserved_user_list = []
|
||||
|
||||
@@ -147,6 +146,7 @@ class MonthlyActiveUsersStore(SQLBaseStore):
|
||||
return count
|
||||
return self.runInteraction("count_users", _count_users)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def upsert_monthly_active_user(self, user_id):
|
||||
"""
|
||||
Updates or inserts monthly active user member
|
||||
@@ -155,7 +155,7 @@ class MonthlyActiveUsersStore(SQLBaseStore):
|
||||
Deferred[bool]: True if a new entry was created, False if an
|
||||
existing one was updated.
|
||||
"""
|
||||
is_insert = self._simple_upsert(
|
||||
is_insert = yield self._simple_upsert(
|
||||
desc="upsert_monthly_active_user",
|
||||
table="monthly_active_users",
|
||||
keyvalues={
|
||||
@@ -200,6 +200,11 @@ class MonthlyActiveUsersStore(SQLBaseStore):
|
||||
user_id(str): the user_id to query
|
||||
"""
|
||||
if self.hs.config.limit_usage_by_mau:
|
||||
is_trial = yield self.is_trial_user(user_id)
|
||||
if is_trial:
|
||||
# we don't track trial users in the MAU table.
|
||||
return
|
||||
|
||||
last_seen_timestamp = yield self.user_last_seen_monthly_active(user_id)
|
||||
now = self.hs.get_clock().time_msec()
|
||||
|
||||
|
||||
@@ -71,8 +71,6 @@ class ProfileWorkerStore(SQLBaseStore):
|
||||
desc="get_from_remote_profile_cache",
|
||||
)
|
||||
|
||||
|
||||
class ProfileStore(ProfileWorkerStore):
|
||||
def create_profile(self, user_localpart):
|
||||
return self._simple_insert(
|
||||
table="profiles",
|
||||
@@ -96,6 +94,8 @@ class ProfileStore(ProfileWorkerStore):
|
||||
desc="set_profile_avatar_url",
|
||||
)
|
||||
|
||||
|
||||
class ProfileStore(ProfileWorkerStore):
|
||||
def add_remote_profile_cache(self, user_id, displayname, avatar_url):
|
||||
"""Ensure we are caching the remote user's profiles.
|
||||
|
||||
|
||||
@@ -15,7 +15,8 @@
|
||||
# limitations under the License.
|
||||
|
||||
import logging
|
||||
import types
|
||||
|
||||
import six
|
||||
|
||||
from canonicaljson import encode_canonical_json, json
|
||||
|
||||
@@ -27,6 +28,11 @@ from ._base import SQLBaseStore
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
if six.PY2:
|
||||
db_binary_type = buffer
|
||||
else:
|
||||
db_binary_type = memoryview
|
||||
|
||||
|
||||
class PusherWorkerStore(SQLBaseStore):
|
||||
def _decode_pushers_rows(self, rows):
|
||||
@@ -34,18 +40,18 @@ class PusherWorkerStore(SQLBaseStore):
|
||||
dataJson = r['data']
|
||||
r['data'] = None
|
||||
try:
|
||||
if isinstance(dataJson, types.BufferType):
|
||||
if isinstance(dataJson, db_binary_type):
|
||||
dataJson = str(dataJson).decode("UTF8")
|
||||
|
||||
r['data'] = json.loads(dataJson)
|
||||
except Exception as e:
|
||||
logger.warn(
|
||||
"Invalid JSON in data for pusher %d: %s, %s",
|
||||
r['id'], dataJson, e.message,
|
||||
r['id'], dataJson, e.args[0],
|
||||
)
|
||||
pass
|
||||
|
||||
if isinstance(r['pushkey'], types.BufferType):
|
||||
if isinstance(r['pushkey'], db_binary_type):
|
||||
r['pushkey'] = str(r['pushkey']).decode("UTF8")
|
||||
|
||||
return rows
|
||||
|
||||
@@ -26,6 +26,11 @@ from synapse.util.caches.descriptors import cached, cachedInlineCallbacks
|
||||
|
||||
|
||||
class RegistrationWorkerStore(SQLBaseStore):
|
||||
def __init__(self, db_conn, hs):
|
||||
super(RegistrationWorkerStore, self).__init__(db_conn, hs)
|
||||
|
||||
self.config = hs.config
|
||||
|
||||
@cached()
|
||||
def get_user_by_id(self, user_id):
|
||||
return self._simple_select_one(
|
||||
@@ -36,12 +41,33 @@ class RegistrationWorkerStore(SQLBaseStore):
|
||||
retcols=[
|
||||
"name", "password_hash", "is_guest",
|
||||
"consent_version", "consent_server_notice_sent",
|
||||
"appservice_id",
|
||||
"appservice_id", "creation_ts",
|
||||
],
|
||||
allow_none=True,
|
||||
desc="get_user_by_id",
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def is_trial_user(self, user_id):
|
||||
"""Checks if user is in the "trial" period, i.e. within the first
|
||||
N days of registration defined by `mau_trial_days` config
|
||||
|
||||
Args:
|
||||
user_id (str)
|
||||
|
||||
Returns:
|
||||
Deferred[bool]
|
||||
"""
|
||||
|
||||
info = yield self.get_user_by_id(user_id)
|
||||
if not info:
|
||||
defer.returnValue(False)
|
||||
|
||||
now = self.clock.time_msec()
|
||||
trial_duration_ms = self.config.mau_trial_days * 24 * 60 * 60 * 1000
|
||||
is_trial = (now - info["creation_ts"] * 1000) < trial_duration_ms
|
||||
defer.returnValue(is_trial)
|
||||
|
||||
@cached()
|
||||
def get_user_by_access_token(self, token):
|
||||
"""Get a user from the given access token.
|
||||
|
||||
@@ -186,6 +186,35 @@ class RoomWorkerStore(SQLBaseStore):
|
||||
desc="is_room_blocked",
|
||||
)
|
||||
|
||||
@cachedInlineCallbacks(max_entries=10000)
|
||||
def get_ratelimit_for_user(self, user_id):
|
||||
"""Check if there are any overrides for ratelimiting for the given
|
||||
user
|
||||
|
||||
Args:
|
||||
user_id (str)
|
||||
|
||||
Returns:
|
||||
RatelimitOverride if there is an override, else None. If the contents
|
||||
of RatelimitOverride are None or 0 then ratelimitng has been
|
||||
disabled for that user entirely.
|
||||
"""
|
||||
row = yield self._simple_select_one(
|
||||
table="ratelimit_override",
|
||||
keyvalues={"user_id": user_id},
|
||||
retcols=("messages_per_second", "burst_count"),
|
||||
allow_none=True,
|
||||
desc="get_ratelimit_for_user",
|
||||
)
|
||||
|
||||
if row:
|
||||
defer.returnValue(RatelimitOverride(
|
||||
messages_per_second=row["messages_per_second"],
|
||||
burst_count=row["burst_count"],
|
||||
))
|
||||
else:
|
||||
defer.returnValue(None)
|
||||
|
||||
|
||||
class RoomStore(RoomWorkerStore, SearchStore):
|
||||
|
||||
@@ -469,35 +498,6 @@ class RoomStore(RoomWorkerStore, SearchStore):
|
||||
"get_all_new_public_rooms", get_all_new_public_rooms
|
||||
)
|
||||
|
||||
@cachedInlineCallbacks(max_entries=10000)
|
||||
def get_ratelimit_for_user(self, user_id):
|
||||
"""Check if there are any overrides for ratelimiting for the given
|
||||
user
|
||||
|
||||
Args:
|
||||
user_id (str)
|
||||
|
||||
Returns:
|
||||
RatelimitOverride if there is an override, else None. If the contents
|
||||
of RatelimitOverride are None or 0 then ratelimitng has been
|
||||
disabled for that user entirely.
|
||||
"""
|
||||
row = yield self._simple_select_one(
|
||||
table="ratelimit_override",
|
||||
keyvalues={"user_id": user_id},
|
||||
retcols=("messages_per_second", "burst_count"),
|
||||
allow_none=True,
|
||||
desc="get_ratelimit_for_user",
|
||||
)
|
||||
|
||||
if row:
|
||||
defer.returnValue(RatelimitOverride(
|
||||
messages_per_second=row["messages_per_second"],
|
||||
burst_count=row["burst_count"],
|
||||
))
|
||||
else:
|
||||
defer.returnValue(None)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def block_room(self, room_id, user_id):
|
||||
yield self._simple_insert(
|
||||
|
||||
@@ -60,8 +60,43 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore):
|
||||
def __init__(self, db_conn, hs):
|
||||
super(StateGroupWorkerStore, self).__init__(db_conn, hs)
|
||||
|
||||
# Originally the state store used a single DictionaryCache to cache the
|
||||
# event IDs for the state types in a given state group to avoid hammering
|
||||
# on the state_group* tables.
|
||||
#
|
||||
# The point of using a DictionaryCache is that it can cache a subset
|
||||
# of the state events for a given state group (i.e. a subset of the keys for a
|
||||
# given dict which is an entry in the cache for a given state group ID).
|
||||
#
|
||||
# However, this poses problems when performing complicated queries
|
||||
# on the store - for instance: "give me all the state for this group, but
|
||||
# limit members to this subset of users", as DictionaryCache's API isn't
|
||||
# rich enough to say "please cache any of these fields, apart from this subset".
|
||||
# This is problematic when lazy loading members, which requires this behaviour,
|
||||
# as without it the cache has no choice but to speculatively load all
|
||||
# state events for the group, which negates the efficiency being sought.
|
||||
#
|
||||
# Rather than overcomplicating DictionaryCache's API, we instead split the
|
||||
# state_group_cache into two halves - one for tracking non-member events,
|
||||
# and the other for tracking member_events. This means that lazy loading
|
||||
# queries can be made in a cache-friendly manner by querying both caches
|
||||
# separately and then merging the result. So for the example above, you
|
||||
# would query the members cache for a specific subset of state keys
|
||||
# (which DictionaryCache will handle efficiently and fine) and the non-members
|
||||
# cache for all state (which DictionaryCache will similarly handle fine)
|
||||
# and then just merge the results together.
|
||||
#
|
||||
# We size the non-members cache to be smaller than the members cache as the
|
||||
# vast majority of state in Matrix (today) is member events.
|
||||
|
||||
self._state_group_cache = DictionaryCache(
|
||||
"*stateGroupCache*", 500000 * get_cache_factor_for("stateGroupCache")
|
||||
"*stateGroupCache*",
|
||||
# TODO: this hasn't been tuned yet
|
||||
50000 * get_cache_factor_for("stateGroupCache")
|
||||
)
|
||||
self._state_group_members_cache = DictionaryCache(
|
||||
"*stateGroupMembersCache*",
|
||||
500000 * get_cache_factor_for("stateGroupMembersCache")
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@@ -275,7 +310,7 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore):
|
||||
})
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _get_state_groups_from_groups(self, groups, types):
|
||||
def _get_state_groups_from_groups(self, groups, types, members=None):
|
||||
"""Returns the state groups for a given set of groups, filtering on
|
||||
types of state events.
|
||||
|
||||
@@ -284,6 +319,9 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore):
|
||||
types (Iterable[str, str|None]|None): list of 2-tuples of the form
|
||||
(`type`, `state_key`), where a `state_key` of `None` matches all
|
||||
state_keys for the `type`. If None, all types are returned.
|
||||
members (bool|None): If not None, then, in addition to any filtering
|
||||
implied by types, the results are also filtered to only include
|
||||
member events (if True), or to exclude member events (if False)
|
||||
|
||||
Returns:
|
||||
dictionary state_group -> (dict of (type, state_key) -> event id)
|
||||
@@ -294,14 +332,14 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore):
|
||||
for chunk in chunks:
|
||||
res = yield self.runInteraction(
|
||||
"_get_state_groups_from_groups",
|
||||
self._get_state_groups_from_groups_txn, chunk, types,
|
||||
self._get_state_groups_from_groups_txn, chunk, types, members,
|
||||
)
|
||||
results.update(res)
|
||||
|
||||
defer.returnValue(results)
|
||||
|
||||
def _get_state_groups_from_groups_txn(
|
||||
self, txn, groups, types=None,
|
||||
self, txn, groups, types=None, members=None,
|
||||
):
|
||||
results = {group: {} for group in groups}
|
||||
|
||||
@@ -339,6 +377,11 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore):
|
||||
%s
|
||||
""")
|
||||
|
||||
if members is True:
|
||||
sql += " AND type = '%s'" % (EventTypes.Member,)
|
||||
elif members is False:
|
||||
sql += " AND type <> '%s'" % (EventTypes.Member,)
|
||||
|
||||
# Turns out that postgres doesn't like doing a list of OR's and
|
||||
# is about 1000x slower, so we just issue a query for each specific
|
||||
# type seperately.
|
||||
@@ -386,6 +429,11 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore):
|
||||
else:
|
||||
where_clause = ""
|
||||
|
||||
if members is True:
|
||||
where_clause += " AND type = '%s'" % EventTypes.Member
|
||||
elif members is False:
|
||||
where_clause += " AND type <> '%s'" % EventTypes.Member
|
||||
|
||||
# We don't use WITH RECURSIVE on sqlite3 as there are distributions
|
||||
# that ship with an sqlite3 version that doesn't support it (e.g. wheezy)
|
||||
for group in groups:
|
||||
@@ -580,10 +628,11 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore):
|
||||
|
||||
defer.returnValue({row["event_id"]: row["state_group"] for row in rows})
|
||||
|
||||
def _get_some_state_from_cache(self, group, types, filtered_types=None):
|
||||
def _get_some_state_from_cache(self, cache, group, types, filtered_types=None):
|
||||
"""Checks if group is in cache. See `_get_state_for_groups`
|
||||
|
||||
Args:
|
||||
cache(DictionaryCache): the state group cache to use
|
||||
group(int): The state group to lookup
|
||||
types(list[str, str|None]): List of 2-tuples of the form
|
||||
(`type`, `state_key`), where a `state_key` of `None` matches all
|
||||
@@ -597,11 +646,11 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore):
|
||||
requests state from the cache, if False we need to query the DB for the
|
||||
missing state.
|
||||
"""
|
||||
is_all, known_absent, state_dict_ids = self._state_group_cache.get(group)
|
||||
is_all, known_absent, state_dict_ids = cache.get(group)
|
||||
|
||||
type_to_key = {}
|
||||
|
||||
# tracks whether any of ourrequested types are missing from the cache
|
||||
# tracks whether any of our requested types are missing from the cache
|
||||
missing_types = False
|
||||
|
||||
for typ, state_key in types:
|
||||
@@ -648,7 +697,7 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore):
|
||||
if include(k[0], k[1])
|
||||
}, got_all
|
||||
|
||||
def _get_all_state_from_cache(self, group):
|
||||
def _get_all_state_from_cache(self, cache, group):
|
||||
"""Checks if group is in cache. See `_get_state_for_groups`
|
||||
|
||||
Returns 2-tuple (`state_dict`, `got_all`). `got_all` is a bool
|
||||
@@ -656,9 +705,10 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore):
|
||||
cache, if False we need to query the DB for the missing state.
|
||||
|
||||
Args:
|
||||
cache(DictionaryCache): the state group cache to use
|
||||
group: The state group to lookup
|
||||
"""
|
||||
is_all, _, state_dict_ids = self._state_group_cache.get(group)
|
||||
is_all, _, state_dict_ids = cache.get(group)
|
||||
|
||||
return state_dict_ids, is_all
|
||||
|
||||
@@ -681,6 +731,62 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore):
|
||||
list of event types. Other types of events are returned unfiltered.
|
||||
If None, `types` filtering is applied to all events.
|
||||
|
||||
Returns:
|
||||
Deferred[dict[int, dict[(type, state_key), EventBase]]]
|
||||
a dictionary mapping from state group to state dictionary.
|
||||
"""
|
||||
if types is not None:
|
||||
non_member_types = [t for t in types if t[0] != EventTypes.Member]
|
||||
|
||||
if filtered_types is not None and EventTypes.Member not in filtered_types:
|
||||
# we want all of the membership events
|
||||
member_types = None
|
||||
else:
|
||||
member_types = [t for t in types if t[0] == EventTypes.Member]
|
||||
|
||||
else:
|
||||
non_member_types = None
|
||||
member_types = None
|
||||
|
||||
non_member_state = yield self._get_state_for_groups_using_cache(
|
||||
groups, self._state_group_cache, non_member_types, filtered_types,
|
||||
)
|
||||
# XXX: we could skip this entirely if member_types is []
|
||||
member_state = yield self._get_state_for_groups_using_cache(
|
||||
# we set filtered_types=None as member_state only ever contain members.
|
||||
groups, self._state_group_members_cache, member_types, None,
|
||||
)
|
||||
|
||||
state = non_member_state
|
||||
for group in groups:
|
||||
state[group].update(member_state[group])
|
||||
|
||||
defer.returnValue(state)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _get_state_for_groups_using_cache(
|
||||
self, groups, cache, types=None, filtered_types=None
|
||||
):
|
||||
"""Gets the state at each of a list of state groups, optionally
|
||||
filtering by type/state_key, querying from a specific cache.
|
||||
|
||||
Args:
|
||||
groups (iterable[int]): list of state groups for which we want
|
||||
to get the state.
|
||||
cache (DictionaryCache): the cache of group ids to state dicts which
|
||||
we will pass through - either the normal state cache or the specific
|
||||
members state cache.
|
||||
types (None|iterable[(str, None|str)]):
|
||||
indicates the state type/keys required. If None, the whole
|
||||
state is fetched and returned.
|
||||
|
||||
Otherwise, each entry should be a `(type, state_key)` tuple to
|
||||
include in the response. A `state_key` of None is a wildcard
|
||||
meaning that we require all state with that type.
|
||||
filtered_types(list[str]|None): Only apply filtering via `types` to this
|
||||
list of event types. Other types of events are returned unfiltered.
|
||||
If None, `types` filtering is applied to all events.
|
||||
|
||||
Returns:
|
||||
Deferred[dict[int, dict[(type, state_key), EventBase]]]
|
||||
a dictionary mapping from state group to state dictionary.
|
||||
@@ -692,7 +798,7 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore):
|
||||
if types is not None:
|
||||
for group in set(groups):
|
||||
state_dict_ids, got_all = self._get_some_state_from_cache(
|
||||
group, types, filtered_types
|
||||
cache, group, types, filtered_types
|
||||
)
|
||||
results[group] = state_dict_ids
|
||||
|
||||
@@ -701,7 +807,7 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore):
|
||||
else:
|
||||
for group in set(groups):
|
||||
state_dict_ids, got_all = self._get_all_state_from_cache(
|
||||
group
|
||||
cache, group
|
||||
)
|
||||
|
||||
results[group] = state_dict_ids
|
||||
@@ -710,8 +816,8 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore):
|
||||
missing_groups.append(group)
|
||||
|
||||
if missing_groups:
|
||||
# Okay, so we have some missing_types, lets fetch them.
|
||||
cache_seq_num = self._state_group_cache.sequence
|
||||
# Okay, so we have some missing_types, let's fetch them.
|
||||
cache_seq_num = cache.sequence
|
||||
|
||||
# the DictionaryCache knows if it has *all* the state, but
|
||||
# does not know if it has all of the keys of a particular type,
|
||||
@@ -725,7 +831,7 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore):
|
||||
types_to_fetch = types
|
||||
|
||||
group_to_state_dict = yield self._get_state_groups_from_groups(
|
||||
missing_groups, types_to_fetch
|
||||
missing_groups, types_to_fetch, cache == self._state_group_members_cache,
|
||||
)
|
||||
|
||||
for group, group_state_dict in iteritems(group_to_state_dict):
|
||||
@@ -745,7 +851,7 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore):
|
||||
|
||||
# update the cache with all the things we fetched from the
|
||||
# database.
|
||||
self._state_group_cache.update(
|
||||
cache.update(
|
||||
cache_seq_num,
|
||||
key=group,
|
||||
value=group_state_dict,
|
||||
@@ -847,15 +953,33 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore):
|
||||
],
|
||||
)
|
||||
|
||||
# Prefill the state group cache with this group.
|
||||
# Prefill the state group caches with this group.
|
||||
# It's fine to use the sequence like this as the state group map
|
||||
# is immutable. (If the map wasn't immutable then this prefill could
|
||||
# race with another update)
|
||||
|
||||
current_member_state_ids = {
|
||||
s: ev
|
||||
for (s, ev) in iteritems(current_state_ids)
|
||||
if s[0] == EventTypes.Member
|
||||
}
|
||||
txn.call_after(
|
||||
self._state_group_members_cache.update,
|
||||
self._state_group_members_cache.sequence,
|
||||
key=state_group,
|
||||
value=dict(current_member_state_ids),
|
||||
)
|
||||
|
||||
current_non_member_state_ids = {
|
||||
s: ev
|
||||
for (s, ev) in iteritems(current_state_ids)
|
||||
if s[0] != EventTypes.Member
|
||||
}
|
||||
txn.call_after(
|
||||
self._state_group_cache.update,
|
||||
self._state_group_cache.sequence,
|
||||
key=state_group,
|
||||
value=dict(current_state_ids),
|
||||
value=dict(current_non_member_state_ids),
|
||||
)
|
||||
|
||||
return state_group
|
||||
|
||||
@@ -18,14 +18,14 @@ from collections import namedtuple
|
||||
|
||||
import six
|
||||
|
||||
from canonicaljson import encode_canonical_json, json
|
||||
from canonicaljson import encode_canonical_json
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||
from synapse.util.caches.descriptors import cached
|
||||
|
||||
from ._base import SQLBaseStore
|
||||
from ._base import SQLBaseStore, db_to_json
|
||||
|
||||
# py2 sqlite has buffer hardcoded as only binary type, so we must use it,
|
||||
# despite being deprecated and removed in favor of memoryview
|
||||
@@ -95,7 +95,8 @@ class TransactionStore(SQLBaseStore):
|
||||
)
|
||||
|
||||
if result and result["response_code"]:
|
||||
return result["response_code"], json.loads(str(result["response_json"]))
|
||||
return result["response_code"], db_to_json(result["response_json"])
|
||||
|
||||
else:
|
||||
return None
|
||||
|
||||
|
||||
@@ -457,8 +457,8 @@ class AuthTestCase(unittest.TestCase):
|
||||
|
||||
with self.assertRaises(ResourceLimitError) as e:
|
||||
yield self.auth.check_auth_blocking()
|
||||
self.assertEquals(e.exception.admin_uri, self.hs.config.admin_uri)
|
||||
self.assertEquals(e.exception.errcode, Codes.RESOURCE_LIMIT_EXCEED)
|
||||
self.assertEquals(e.exception.admin_contact, self.hs.config.admin_contact)
|
||||
self.assertEquals(e.exception.errcode, Codes.RESOURCE_LIMIT_EXCEEDED)
|
||||
self.assertEquals(e.exception.code, 403)
|
||||
|
||||
# Ensure does not throw an error
|
||||
@@ -467,12 +467,37 @@ class AuthTestCase(unittest.TestCase):
|
||||
)
|
||||
yield self.auth.check_auth_blocking()
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def test_reserved_threepid(self):
|
||||
self.hs.config.limit_usage_by_mau = True
|
||||
self.hs.config.max_mau_value = 1
|
||||
threepid = {'medium': 'email', 'address': 'reserved@server.com'}
|
||||
unknown_threepid = {'medium': 'email', 'address': 'unreserved@server.com'}
|
||||
self.hs.config.mau_limits_reserved_threepids = [threepid]
|
||||
|
||||
yield self.store.register(user_id='user1', token="123", password_hash=None)
|
||||
with self.assertRaises(ResourceLimitError):
|
||||
yield self.auth.check_auth_blocking()
|
||||
|
||||
with self.assertRaises(ResourceLimitError):
|
||||
yield self.auth.check_auth_blocking(threepid=unknown_threepid)
|
||||
|
||||
yield self.auth.check_auth_blocking(threepid=threepid)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def test_hs_disabled(self):
|
||||
self.hs.config.hs_disabled = True
|
||||
self.hs.config.hs_disabled_message = "Reason for being disabled"
|
||||
with self.assertRaises(ResourceLimitError) as e:
|
||||
yield self.auth.check_auth_blocking()
|
||||
self.assertEquals(e.exception.admin_uri, self.hs.config.admin_uri)
|
||||
self.assertEquals(e.exception.errcode, Codes.RESOURCE_LIMIT_EXCEED)
|
||||
self.assertEquals(e.exception.admin_contact, self.hs.config.admin_contact)
|
||||
self.assertEquals(e.exception.errcode, Codes.RESOURCE_LIMIT_EXCEEDED)
|
||||
self.assertEquals(e.exception.code, 403)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def test_server_notices_mxid_special_cased(self):
|
||||
self.hs.config.hs_disabled = True
|
||||
user = "@user:server"
|
||||
self.hs.config.server_notices_mxid = user
|
||||
self.hs.config.hs_disabled_message = "Reason for being disabled"
|
||||
yield self.auth.check_auth_blocking(user)
|
||||
|
||||
@@ -20,7 +20,7 @@ from twisted.internet import defer
|
||||
|
||||
import synapse.types
|
||||
from synapse.api.errors import AuthError
|
||||
from synapse.handlers.profile import ProfileHandler
|
||||
from synapse.handlers.profile import MasterProfileHandler
|
||||
from synapse.types import UserID
|
||||
|
||||
from tests import unittest
|
||||
@@ -29,7 +29,7 @@ from tests.utils import setup_test_homeserver
|
||||
|
||||
class ProfileHandlers(object):
|
||||
def __init__(self, hs):
|
||||
self.profile_handler = ProfileHandler(hs)
|
||||
self.profile_handler = MasterProfileHandler(hs)
|
||||
|
||||
|
||||
class ProfileTestCase(unittest.TestCase):
|
||||
|
||||
@@ -51,7 +51,7 @@ class SyncTestCase(tests.unittest.TestCase):
|
||||
self.hs.config.hs_disabled = True
|
||||
with self.assertRaises(ResourceLimitError) as e:
|
||||
yield self.sync_handler.wait_for_sync_for_user(sync_config)
|
||||
self.assertEquals(e.exception.errcode, Codes.RESOURCE_LIMIT_EXCEED)
|
||||
self.assertEquals(e.exception.errcode, Codes.RESOURCE_LIMIT_EXCEEDED)
|
||||
|
||||
self.hs.config.hs_disabled = False
|
||||
|
||||
@@ -59,7 +59,7 @@ class SyncTestCase(tests.unittest.TestCase):
|
||||
|
||||
with self.assertRaises(ResourceLimitError) as e:
|
||||
yield self.sync_handler.wait_for_sync_for_user(sync_config)
|
||||
self.assertEquals(e.exception.errcode, Codes.RESOURCE_LIMIT_EXCEED)
|
||||
self.assertEquals(e.exception.errcode, Codes.RESOURCE_LIMIT_EXCEEDED)
|
||||
|
||||
def _generate_sync_config(self, user_id):
|
||||
return SyncConfig(
|
||||
|
||||
@@ -112,6 +112,7 @@ class SlavedEventStoreTestCase(BaseSlavedStoreTestCase):
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def test_invites(self):
|
||||
yield self.persist(type="m.room.create", key="", creator=USER_ID)
|
||||
yield self.check("get_invited_rooms_for_user", [USER_ID_2], [])
|
||||
event = yield self.persist(
|
||||
type="m.room.member", key=USER_ID_2, membership="invite"
|
||||
@@ -133,7 +134,7 @@ class SlavedEventStoreTestCase(BaseSlavedStoreTestCase):
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def test_push_actions_for_user(self):
|
||||
yield self.persist(type="m.room.create", creator=USER_ID)
|
||||
yield self.persist(type="m.room.create", key="", creator=USER_ID)
|
||||
yield self.persist(type="m.room.join", key=USER_ID, membership="join")
|
||||
yield self.persist(
|
||||
type="m.room.join", sender=USER_ID, key=USER_ID_2, membership="join"
|
||||
|
||||
@@ -240,7 +240,6 @@ class RestHelper(object):
|
||||
self.assertEquals(200, code)
|
||||
defer.returnValue(response)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def send(self, room_id, body=None, txn_id=None, tok=None, expect_code=200):
|
||||
if txn_id is None:
|
||||
txn_id = "m%s" % (str(time.time()))
|
||||
@@ -248,9 +247,16 @@ class RestHelper(object):
|
||||
body = "body_text_here"
|
||||
|
||||
path = "/_matrix/client/r0/rooms/%s/send/m.room.message/%s" % (room_id, txn_id)
|
||||
content = '{"msgtype":"m.text","body":"%s"}' % body
|
||||
content = {"msgtype": "m.text", "body": body}
|
||||
if tok:
|
||||
path = path + "?access_token=%s" % tok
|
||||
|
||||
(code, response) = yield self.mock_resource.trigger("PUT", path, content)
|
||||
self.assertEquals(expect_code, code, msg=str(response))
|
||||
request, channel = make_request("PUT", path, json.dumps(content).encode('utf8'))
|
||||
render(request, self.resource, self.hs.get_reactor())
|
||||
|
||||
assert int(channel.result["code"]) == expect_code, (
|
||||
"Expected: %d, got: %d, resp: %r"
|
||||
% (expect_code, int(channel.result["code"]), channel.result["body"])
|
||||
)
|
||||
|
||||
return channel.json_body
|
||||
|
||||
@@ -5,7 +5,7 @@ from six import text_type
|
||||
|
||||
import attr
|
||||
|
||||
from twisted.internet import threads
|
||||
from twisted.internet import address, threads
|
||||
from twisted.internet.defer import Deferred
|
||||
from twisted.python.failure import Failure
|
||||
from twisted.test.proto_helpers import MemoryReactorClock
|
||||
@@ -63,7 +63,9 @@ class FakeChannel(object):
|
||||
self.result["done"] = True
|
||||
|
||||
def getPeer(self):
|
||||
return None
|
||||
# We give an address so that getClientIP returns a non null entry,
|
||||
# causing us to record the MAU
|
||||
return address.IPv4Address(b"TCP", "127.0.0.1", 3423)
|
||||
|
||||
def getHost(self):
|
||||
return None
|
||||
@@ -91,7 +93,7 @@ class FakeSite:
|
||||
return FakeLogger()
|
||||
|
||||
|
||||
def make_request(method, path, content=b""):
|
||||
def make_request(method, path, content=b"", access_token=None):
|
||||
"""
|
||||
Make a web request using the given method and path, feed it the
|
||||
content, and return the Request and the Channel underneath.
|
||||
@@ -116,6 +118,11 @@ def make_request(method, path, content=b""):
|
||||
req = SynapseRequest(site, channel)
|
||||
req.process = lambda: b""
|
||||
req.content = BytesIO(content)
|
||||
|
||||
if access_token:
|
||||
req.requestHeaders.addRawHeader(b"Authorization", b"Bearer " + access_token)
|
||||
|
||||
req.requestHeaders.addRawHeader(b"X-Forwarded-For", b"127.0.0.1")
|
||||
req.requestReceived(method, path, b"1.1")
|
||||
|
||||
return req, channel
|
||||
|
||||
0
tests/server_notices/__init__.py
Normal file
0
tests/server_notices/__init__.py
Normal file
213
tests/server_notices/test_resource_limits_server_notices.py
Normal file
213
tests/server_notices/test_resource_limits_server_notices.py
Normal file
@@ -0,0 +1,213 @@
|
||||
from mock import Mock
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
from synapse.api.constants import EventTypes, ServerNoticeMsgType
|
||||
from synapse.api.errors import ResourceLimitError
|
||||
from synapse.handlers.auth import AuthHandler
|
||||
from synapse.server_notices.resource_limits_server_notices import (
|
||||
ResourceLimitsServerNotices,
|
||||
)
|
||||
|
||||
from tests import unittest
|
||||
from tests.utils import setup_test_homeserver
|
||||
|
||||
|
||||
class AuthHandlers(object):
|
||||
def __init__(self, hs):
|
||||
self.auth_handler = AuthHandler(hs)
|
||||
|
||||
|
||||
class TestResourceLimitsServerNotices(unittest.TestCase):
|
||||
@defer.inlineCallbacks
|
||||
def setUp(self):
|
||||
self.hs = yield setup_test_homeserver(self.addCleanup, handlers=None)
|
||||
self.hs.handlers = AuthHandlers(self.hs)
|
||||
self.auth_handler = self.hs.handlers.auth_handler
|
||||
self.server_notices_sender = self.hs.get_server_notices_sender()
|
||||
|
||||
# relying on [1] is far from ideal, but the only case where
|
||||
# ResourceLimitsServerNotices class needs to be isolated is this test,
|
||||
# general code should never have a reason to do so ...
|
||||
self._rlsn = self.server_notices_sender._server_notices[1]
|
||||
if not isinstance(self._rlsn, ResourceLimitsServerNotices):
|
||||
raise Exception("Failed to find reference to ResourceLimitsServerNotices")
|
||||
|
||||
self._rlsn._store.user_last_seen_monthly_active = Mock(
|
||||
return_value=defer.succeed(1000)
|
||||
)
|
||||
self._send_notice = self._rlsn._server_notices_manager.send_notice
|
||||
self._rlsn._server_notices_manager.send_notice = Mock()
|
||||
self._rlsn._state.get_current_state = Mock(return_value=defer.succeed(None))
|
||||
self._rlsn._store.get_events = Mock(return_value=defer.succeed({}))
|
||||
|
||||
self._send_notice = self._rlsn._server_notices_manager.send_notice
|
||||
|
||||
self.hs.config.limit_usage_by_mau = True
|
||||
self.user_id = "@user_id:test"
|
||||
|
||||
# self.server_notices_mxid = "@server:test"
|
||||
# self.server_notices_mxid_display_name = None
|
||||
# self.server_notices_mxid_avatar_url = None
|
||||
# self.server_notices_room_name = "Server Notices"
|
||||
|
||||
self._rlsn._server_notices_manager.get_notice_room_for_user = Mock(
|
||||
returnValue=""
|
||||
)
|
||||
self._rlsn._store.add_tag_to_room = Mock()
|
||||
self._rlsn._store.get_tags_for_room = Mock(return_value={})
|
||||
self.hs.config.admin_contact = "mailto:user@test.com"
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def test_maybe_send_server_notice_to_user_flag_off(self):
|
||||
"""Tests cases where the flags indicate nothing to do"""
|
||||
# test hs disabled case
|
||||
self.hs.config.hs_disabled = True
|
||||
|
||||
yield self._rlsn.maybe_send_server_notice_to_user(self.user_id)
|
||||
|
||||
self._send_notice.assert_not_called()
|
||||
# Test when mau limiting disabled
|
||||
self.hs.config.hs_disabled = False
|
||||
self.hs.limit_usage_by_mau = False
|
||||
yield self._rlsn.maybe_send_server_notice_to_user(self.user_id)
|
||||
|
||||
self._send_notice.assert_not_called()
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def test_maybe_send_server_notice_to_user_remove_blocked_notice(self):
|
||||
"""Test when user has blocked notice, but should have it removed"""
|
||||
|
||||
self._rlsn._auth.check_auth_blocking = Mock()
|
||||
mock_event = Mock(
|
||||
type=EventTypes.Message,
|
||||
content={"msgtype": ServerNoticeMsgType},
|
||||
)
|
||||
self._rlsn._store.get_events = Mock(return_value=defer.succeed(
|
||||
{"123": mock_event}
|
||||
))
|
||||
|
||||
yield self._rlsn.maybe_send_server_notice_to_user(self.user_id)
|
||||
# Would be better to check the content, but once == remove blocking event
|
||||
self._send_notice.assert_called_once()
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def test_maybe_send_server_notice_to_user_remove_blocked_notice_noop(self):
|
||||
"""Test when user has blocked notice, but notice ought to be there (NOOP)"""
|
||||
self._rlsn._auth.check_auth_blocking = Mock(
|
||||
side_effect=ResourceLimitError(403, 'foo')
|
||||
)
|
||||
|
||||
mock_event = Mock(
|
||||
type=EventTypes.Message,
|
||||
content={"msgtype": ServerNoticeMsgType},
|
||||
)
|
||||
self._rlsn._store.get_events = Mock(return_value=defer.succeed(
|
||||
{"123": mock_event}
|
||||
))
|
||||
yield self._rlsn.maybe_send_server_notice_to_user(self.user_id)
|
||||
|
||||
self._send_notice.assert_not_called()
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def test_maybe_send_server_notice_to_user_add_blocked_notice(self):
|
||||
"""Test when user does not have blocked notice, but should have one"""
|
||||
|
||||
self._rlsn._auth.check_auth_blocking = Mock(
|
||||
side_effect=ResourceLimitError(403, 'foo')
|
||||
)
|
||||
yield self._rlsn.maybe_send_server_notice_to_user(self.user_id)
|
||||
|
||||
# Would be better to check contents, but 2 calls == set blocking event
|
||||
self.assertTrue(self._send_notice.call_count == 2)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def test_maybe_send_server_notice_to_user_add_blocked_notice_noop(self):
|
||||
"""Test when user does not have blocked notice, nor should they (NOOP)"""
|
||||
|
||||
self._rlsn._auth.check_auth_blocking = Mock()
|
||||
|
||||
yield self._rlsn.maybe_send_server_notice_to_user(self.user_id)
|
||||
|
||||
self._send_notice.assert_not_called()
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def test_maybe_send_server_notice_to_user_not_in_mau_cohort(self):
|
||||
|
||||
"""Test when user is not part of the MAU cohort - this should not ever
|
||||
happen - but ...
|
||||
"""
|
||||
|
||||
self._rlsn._auth.check_auth_blocking = Mock()
|
||||
self._rlsn._store.user_last_seen_monthly_active = Mock(
|
||||
return_value=defer.succeed(None)
|
||||
)
|
||||
yield self._rlsn.maybe_send_server_notice_to_user(self.user_id)
|
||||
|
||||
self._send_notice.assert_not_called()
|
||||
|
||||
|
||||
class TestResourceLimitsServerNoticesWithRealRooms(unittest.TestCase):
|
||||
@defer.inlineCallbacks
|
||||
def setUp(self):
|
||||
self.hs = yield setup_test_homeserver(self.addCleanup)
|
||||
self.store = self.hs.get_datastore()
|
||||
self.server_notices_sender = self.hs.get_server_notices_sender()
|
||||
self.server_notices_manager = self.hs.get_server_notices_manager()
|
||||
self.event_source = self.hs.get_event_sources()
|
||||
|
||||
# relying on [1] is far from ideal, but the only case where
|
||||
# ResourceLimitsServerNotices class needs to be isolated is this test,
|
||||
# general code should never have a reason to do so ...
|
||||
self._rlsn = self.server_notices_sender._server_notices[1]
|
||||
if not isinstance(self._rlsn, ResourceLimitsServerNotices):
|
||||
raise Exception("Failed to find reference to ResourceLimitsServerNotices")
|
||||
|
||||
self.hs.config.limit_usage_by_mau = True
|
||||
self.hs.config.hs_disabled = False
|
||||
self.hs.config.max_mau_value = 5
|
||||
self.hs.config.server_notices_mxid = "@server:test"
|
||||
self.hs.config.server_notices_mxid_display_name = None
|
||||
self.hs.config.server_notices_mxid_avatar_url = None
|
||||
self.hs.config.server_notices_room_name = "Test Server Notice Room"
|
||||
|
||||
self.user_id = "@user_id:test"
|
||||
|
||||
self.hs.config.admin_contact = "mailto:user@test.com"
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def test_server_notice_only_sent_once(self):
|
||||
self.store.get_monthly_active_count = Mock(
|
||||
return_value=1000,
|
||||
)
|
||||
|
||||
self.store.user_last_seen_monthly_active = Mock(
|
||||
return_value=1000,
|
||||
)
|
||||
|
||||
# Call the function multiple times to ensure we only send the notice once
|
||||
yield self._rlsn.maybe_send_server_notice_to_user(self.user_id)
|
||||
yield self._rlsn.maybe_send_server_notice_to_user(self.user_id)
|
||||
yield self._rlsn.maybe_send_server_notice_to_user(self.user_id)
|
||||
|
||||
# Now lets get the last load of messages in the service notice room and
|
||||
# check that there is only one server notice
|
||||
room_id = yield self.server_notices_manager.get_notice_room_for_user(
|
||||
self.user_id,
|
||||
)
|
||||
|
||||
token = yield self.event_source.get_current_token()
|
||||
events, _ = yield self.store.get_recent_events_for_room(
|
||||
room_id, limit=100, end_token=token.room_key,
|
||||
)
|
||||
|
||||
count = 0
|
||||
for event in events:
|
||||
if event.type != EventTypes.Message:
|
||||
continue
|
||||
if event.content.get("msgtype") != ServerNoticeMsgType:
|
||||
continue
|
||||
|
||||
count += 1
|
||||
|
||||
self.assertEqual(count, 1)
|
||||
@@ -20,11 +20,11 @@ from mock import Mock
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
from synapse.server import HomeServer
|
||||
from synapse.storage._base import SQLBaseStore
|
||||
from synapse.storage.engines import create_engine
|
||||
|
||||
from tests import unittest
|
||||
from tests.utils import TestHomeServer
|
||||
|
||||
|
||||
class SQLBaseStoreTestCase(unittest.TestCase):
|
||||
@@ -51,7 +51,7 @@ class SQLBaseStoreTestCase(unittest.TestCase):
|
||||
config = Mock()
|
||||
config.event_cache_size = 1
|
||||
config.database_config = {"name": "sqlite3"}
|
||||
hs = HomeServer(
|
||||
hs = TestHomeServer(
|
||||
"test",
|
||||
db_pool=self.db_pool,
|
||||
config=config,
|
||||
|
||||
106
tests/storage/test_purge.py
Normal file
106
tests/storage/test_purge.py
Normal file
@@ -0,0 +1,106 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2018 New Vector Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from synapse.rest.client.v1 import room
|
||||
|
||||
from tests.unittest import HomeserverTestCase
|
||||
|
||||
|
||||
class PurgeTests(HomeserverTestCase):
|
||||
|
||||
user_id = "@red:server"
|
||||
servlets = [room.register_servlets]
|
||||
|
||||
def make_homeserver(self, reactor, clock):
|
||||
hs = self.setup_test_homeserver("server", http_client=None)
|
||||
return hs
|
||||
|
||||
def prepare(self, reactor, clock, hs):
|
||||
self.room_id = self.helper.create_room_as(self.user_id)
|
||||
|
||||
def test_purge(self):
|
||||
"""
|
||||
Purging a room will delete everything before the topological point.
|
||||
"""
|
||||
# Send four messages to the room
|
||||
first = self.helper.send(self.room_id, body="test1")
|
||||
second = self.helper.send(self.room_id, body="test2")
|
||||
third = self.helper.send(self.room_id, body="test3")
|
||||
last = self.helper.send(self.room_id, body="test4")
|
||||
|
||||
storage = self.hs.get_datastore()
|
||||
|
||||
# Get the topological token
|
||||
event = storage.get_topological_token_for_event(last["event_id"])
|
||||
self.pump()
|
||||
event = self.successResultOf(event)
|
||||
|
||||
# Purge everything before this topological token
|
||||
purge = storage.purge_history(self.room_id, event, True)
|
||||
self.pump()
|
||||
self.assertEqual(self.successResultOf(purge), None)
|
||||
|
||||
# Try and get the events
|
||||
get_first = storage.get_event(first["event_id"])
|
||||
get_second = storage.get_event(second["event_id"])
|
||||
get_third = storage.get_event(third["event_id"])
|
||||
get_last = storage.get_event(last["event_id"])
|
||||
self.pump()
|
||||
|
||||
# 1-3 should fail and last will succeed, meaning that 1-3 are deleted
|
||||
# and last is not.
|
||||
self.failureResultOf(get_first)
|
||||
self.failureResultOf(get_second)
|
||||
self.failureResultOf(get_third)
|
||||
self.successResultOf(get_last)
|
||||
|
||||
def test_purge_wont_delete_extrems(self):
|
||||
"""
|
||||
Purging a room will delete everything before the topological point.
|
||||
"""
|
||||
# Send four messages to the room
|
||||
first = self.helper.send(self.room_id, body="test1")
|
||||
second = self.helper.send(self.room_id, body="test2")
|
||||
third = self.helper.send(self.room_id, body="test3")
|
||||
last = self.helper.send(self.room_id, body="test4")
|
||||
|
||||
storage = self.hs.get_datastore()
|
||||
|
||||
# Set the topological token higher than it should be
|
||||
event = storage.get_topological_token_for_event(last["event_id"])
|
||||
self.pump()
|
||||
event = self.successResultOf(event)
|
||||
event = "t{}-{}".format(
|
||||
*list(map(lambda x: x + 1, map(int, event[1:].split("-"))))
|
||||
)
|
||||
|
||||
# Purge everything before this topological token
|
||||
purge = storage.purge_history(self.room_id, event, True)
|
||||
self.pump()
|
||||
f = self.failureResultOf(purge)
|
||||
self.assertIn("greater than forward", f.value.args[0])
|
||||
|
||||
# Try and get the events
|
||||
get_first = storage.get_event(first["event_id"])
|
||||
get_second = storage.get_event(second["event_id"])
|
||||
get_third = storage.get_event(third["event_id"])
|
||||
get_last = storage.get_event(last["event_id"])
|
||||
self.pump()
|
||||
|
||||
# Nothing is deleted.
|
||||
self.successResultOf(get_first)
|
||||
self.successResultOf(get_second)
|
||||
self.successResultOf(get_third)
|
||||
self.successResultOf(get_last)
|
||||
@@ -22,7 +22,7 @@ from synapse.api.constants import EventTypes, Membership
|
||||
from synapse.types import RoomID, UserID
|
||||
|
||||
from tests import unittest
|
||||
from tests.utils import setup_test_homeserver
|
||||
from tests.utils import create_room, setup_test_homeserver
|
||||
|
||||
|
||||
class RedactionTestCase(unittest.TestCase):
|
||||
@@ -41,6 +41,8 @@ class RedactionTestCase(unittest.TestCase):
|
||||
|
||||
self.room1 = RoomID.from_string("!abc123:test")
|
||||
|
||||
yield create_room(hs, self.room1.to_string(), self.u_alice.to_string())
|
||||
|
||||
self.depth = 1
|
||||
|
||||
@defer.inlineCallbacks
|
||||
|
||||
@@ -46,6 +46,7 @@ class RegistrationStoreTestCase(unittest.TestCase):
|
||||
"consent_version": None,
|
||||
"consent_server_notice_sent": None,
|
||||
"appservice_id": None,
|
||||
"creation_ts": 1000,
|
||||
},
|
||||
(yield self.store.get_user_by_id(self.user_id)),
|
||||
)
|
||||
|
||||
@@ -22,7 +22,7 @@ from synapse.api.constants import EventTypes, Membership
|
||||
from synapse.types import RoomID, UserID
|
||||
|
||||
from tests import unittest
|
||||
from tests.utils import setup_test_homeserver
|
||||
from tests.utils import create_room, setup_test_homeserver
|
||||
|
||||
|
||||
class RoomMemberStoreTestCase(unittest.TestCase):
|
||||
@@ -45,6 +45,8 @@ class RoomMemberStoreTestCase(unittest.TestCase):
|
||||
|
||||
self.room = RoomID.from_string("!abc123:test")
|
||||
|
||||
yield create_room(hs, self.room.to_string(), self.u_alice.to_string())
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def inject_room_member(self, room, user, membership, replaces_state=None):
|
||||
builder = self.event_builder_factory.new(
|
||||
|
||||
@@ -185,6 +185,7 @@ class StateStoreTestCase(tests.unittest.TestCase):
|
||||
|
||||
# test _get_some_state_from_cache correctly filters out members with types=[]
|
||||
(state_dict, is_all) = yield self.store._get_some_state_from_cache(
|
||||
self.store._state_group_cache,
|
||||
group, [], filtered_types=[EventTypes.Member]
|
||||
)
|
||||
|
||||
@@ -197,8 +198,20 @@ class StateStoreTestCase(tests.unittest.TestCase):
|
||||
state_dict,
|
||||
)
|
||||
|
||||
(state_dict, is_all) = yield self.store._get_some_state_from_cache(
|
||||
self.store._state_group_members_cache,
|
||||
group, [], filtered_types=[EventTypes.Member]
|
||||
)
|
||||
|
||||
self.assertEqual(is_all, True)
|
||||
self.assertDictEqual(
|
||||
{},
|
||||
state_dict,
|
||||
)
|
||||
|
||||
# test _get_some_state_from_cache correctly filters in members with wildcard types
|
||||
(state_dict, is_all) = yield self.store._get_some_state_from_cache(
|
||||
self.store._state_group_cache,
|
||||
group, [(EventTypes.Member, None)], filtered_types=[EventTypes.Member]
|
||||
)
|
||||
|
||||
@@ -207,6 +220,18 @@ class StateStoreTestCase(tests.unittest.TestCase):
|
||||
{
|
||||
(e1.type, e1.state_key): e1.event_id,
|
||||
(e2.type, e2.state_key): e2.event_id,
|
||||
},
|
||||
state_dict,
|
||||
)
|
||||
|
||||
(state_dict, is_all) = yield self.store._get_some_state_from_cache(
|
||||
self.store._state_group_members_cache,
|
||||
group, [(EventTypes.Member, None)], filtered_types=[EventTypes.Member]
|
||||
)
|
||||
|
||||
self.assertEqual(is_all, True)
|
||||
self.assertDictEqual(
|
||||
{
|
||||
(e3.type, e3.state_key): e3.event_id,
|
||||
# e4 is overwritten by e5
|
||||
(e5.type, e5.state_key): e5.event_id,
|
||||
@@ -216,6 +241,7 @@ class StateStoreTestCase(tests.unittest.TestCase):
|
||||
|
||||
# test _get_some_state_from_cache correctly filters in members with specific types
|
||||
(state_dict, is_all) = yield self.store._get_some_state_from_cache(
|
||||
self.store._state_group_cache,
|
||||
group,
|
||||
[(EventTypes.Member, e5.state_key)],
|
||||
filtered_types=[EventTypes.Member],
|
||||
@@ -226,6 +252,20 @@ class StateStoreTestCase(tests.unittest.TestCase):
|
||||
{
|
||||
(e1.type, e1.state_key): e1.event_id,
|
||||
(e2.type, e2.state_key): e2.event_id,
|
||||
},
|
||||
state_dict,
|
||||
)
|
||||
|
||||
(state_dict, is_all) = yield self.store._get_some_state_from_cache(
|
||||
self.store._state_group_members_cache,
|
||||
group,
|
||||
[(EventTypes.Member, e5.state_key)],
|
||||
filtered_types=[EventTypes.Member],
|
||||
)
|
||||
|
||||
self.assertEqual(is_all, True)
|
||||
self.assertDictEqual(
|
||||
{
|
||||
(e5.type, e5.state_key): e5.event_id,
|
||||
},
|
||||
state_dict,
|
||||
@@ -234,6 +274,7 @@ class StateStoreTestCase(tests.unittest.TestCase):
|
||||
# test _get_some_state_from_cache correctly filters in members with specific types
|
||||
# and no filtered_types
|
||||
(state_dict, is_all) = yield self.store._get_some_state_from_cache(
|
||||
self.store._state_group_members_cache,
|
||||
group, [(EventTypes.Member, e5.state_key)], filtered_types=None
|
||||
)
|
||||
|
||||
@@ -254,9 +295,6 @@ class StateStoreTestCase(tests.unittest.TestCase):
|
||||
{
|
||||
(e1.type, e1.state_key): e1.event_id,
|
||||
(e2.type, e2.state_key): e2.event_id,
|
||||
(e3.type, e3.state_key): e3.event_id,
|
||||
# e4 is overwritten by e5
|
||||
(e5.type, e5.state_key): e5.event_id,
|
||||
},
|
||||
)
|
||||
|
||||
@@ -269,8 +307,6 @@ class StateStoreTestCase(tests.unittest.TestCase):
|
||||
# list fetched keys so it knows it's partial
|
||||
fetched_keys=(
|
||||
(e1.type, e1.state_key),
|
||||
(e3.type, e3.state_key),
|
||||
(e5.type, e5.state_key),
|
||||
),
|
||||
)
|
||||
|
||||
@@ -284,8 +320,6 @@ class StateStoreTestCase(tests.unittest.TestCase):
|
||||
set(
|
||||
[
|
||||
(e1.type, e1.state_key),
|
||||
(e3.type, e3.state_key),
|
||||
(e5.type, e5.state_key),
|
||||
]
|
||||
),
|
||||
)
|
||||
@@ -293,8 +327,6 @@ class StateStoreTestCase(tests.unittest.TestCase):
|
||||
state_dict_ids,
|
||||
{
|
||||
(e1.type, e1.state_key): e1.event_id,
|
||||
(e3.type, e3.state_key): e3.event_id,
|
||||
(e5.type, e5.state_key): e5.event_id,
|
||||
},
|
||||
)
|
||||
|
||||
@@ -304,14 +336,25 @@ class StateStoreTestCase(tests.unittest.TestCase):
|
||||
# test _get_some_state_from_cache correctly filters out members with types=[]
|
||||
room_id = self.room.to_string()
|
||||
(state_dict, is_all) = yield self.store._get_some_state_from_cache(
|
||||
self.store._state_group_cache,
|
||||
group, [], filtered_types=[EventTypes.Member]
|
||||
)
|
||||
|
||||
self.assertEqual(is_all, False)
|
||||
self.assertDictEqual({(e1.type, e1.state_key): e1.event_id}, state_dict)
|
||||
|
||||
room_id = self.room.to_string()
|
||||
(state_dict, is_all) = yield self.store._get_some_state_from_cache(
|
||||
self.store._state_group_members_cache,
|
||||
group, [], filtered_types=[EventTypes.Member]
|
||||
)
|
||||
|
||||
self.assertEqual(is_all, True)
|
||||
self.assertDictEqual({}, state_dict)
|
||||
|
||||
# test _get_some_state_from_cache correctly filters in members wildcard types
|
||||
(state_dict, is_all) = yield self.store._get_some_state_from_cache(
|
||||
self.store._state_group_cache,
|
||||
group, [(EventTypes.Member, None)], filtered_types=[EventTypes.Member]
|
||||
)
|
||||
|
||||
@@ -319,8 +362,19 @@ class StateStoreTestCase(tests.unittest.TestCase):
|
||||
self.assertDictEqual(
|
||||
{
|
||||
(e1.type, e1.state_key): e1.event_id,
|
||||
},
|
||||
state_dict,
|
||||
)
|
||||
|
||||
(state_dict, is_all) = yield self.store._get_some_state_from_cache(
|
||||
self.store._state_group_members_cache,
|
||||
group, [(EventTypes.Member, None)], filtered_types=[EventTypes.Member]
|
||||
)
|
||||
|
||||
self.assertEqual(is_all, True)
|
||||
self.assertDictEqual(
|
||||
{
|
||||
(e3.type, e3.state_key): e3.event_id,
|
||||
# e4 is overwritten by e5
|
||||
(e5.type, e5.state_key): e5.event_id,
|
||||
},
|
||||
state_dict,
|
||||
@@ -328,6 +382,7 @@ class StateStoreTestCase(tests.unittest.TestCase):
|
||||
|
||||
# test _get_some_state_from_cache correctly filters in members with specific types
|
||||
(state_dict, is_all) = yield self.store._get_some_state_from_cache(
|
||||
self.store._state_group_cache,
|
||||
group,
|
||||
[(EventTypes.Member, e5.state_key)],
|
||||
filtered_types=[EventTypes.Member],
|
||||
@@ -337,6 +392,20 @@ class StateStoreTestCase(tests.unittest.TestCase):
|
||||
self.assertDictEqual(
|
||||
{
|
||||
(e1.type, e1.state_key): e1.event_id,
|
||||
},
|
||||
state_dict,
|
||||
)
|
||||
|
||||
(state_dict, is_all) = yield self.store._get_some_state_from_cache(
|
||||
self.store._state_group_members_cache,
|
||||
group,
|
||||
[(EventTypes.Member, e5.state_key)],
|
||||
filtered_types=[EventTypes.Member],
|
||||
)
|
||||
|
||||
self.assertEqual(is_all, True)
|
||||
self.assertDictEqual(
|
||||
{
|
||||
(e5.type, e5.state_key): e5.event_id,
|
||||
},
|
||||
state_dict,
|
||||
@@ -345,8 +414,22 @@ class StateStoreTestCase(tests.unittest.TestCase):
|
||||
# test _get_some_state_from_cache correctly filters in members with specific types
|
||||
# and no filtered_types
|
||||
(state_dict, is_all) = yield self.store._get_some_state_from_cache(
|
||||
self.store._state_group_cache,
|
||||
group, [(EventTypes.Member, e5.state_key)], filtered_types=None
|
||||
)
|
||||
|
||||
self.assertEqual(is_all, False)
|
||||
self.assertDictEqual({}, state_dict)
|
||||
|
||||
(state_dict, is_all) = yield self.store._get_some_state_from_cache(
|
||||
self.store._state_group_members_cache,
|
||||
group, [(EventTypes.Member, e5.state_key)], filtered_types=None
|
||||
)
|
||||
|
||||
self.assertEqual(is_all, True)
|
||||
self.assertDictEqual({(e5.type, e5.state_key): e5.event_id}, state_dict)
|
||||
self.assertDictEqual(
|
||||
{
|
||||
(e5.type, e5.state_key): e5.event_id,
|
||||
},
|
||||
state_dict,
|
||||
)
|
||||
|
||||
217
tests/test_mau.py
Normal file
217
tests/test_mau.py
Normal file
@@ -0,0 +1,217 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2018 New Vector Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Tests REST events for /rooms paths."""
|
||||
|
||||
import json
|
||||
|
||||
from mock import Mock, NonCallableMock
|
||||
|
||||
from synapse.api.constants import LoginType
|
||||
from synapse.api.errors import Codes, HttpResponseException, SynapseError
|
||||
from synapse.http.server import JsonResource
|
||||
from synapse.rest.client.v2_alpha import register, sync
|
||||
from synapse.util import Clock
|
||||
|
||||
from tests import unittest
|
||||
from tests.server import (
|
||||
ThreadedMemoryReactorClock,
|
||||
make_request,
|
||||
render,
|
||||
setup_test_homeserver,
|
||||
)
|
||||
|
||||
|
||||
class TestMauLimit(unittest.TestCase):
|
||||
def setUp(self):
|
||||
self.reactor = ThreadedMemoryReactorClock()
|
||||
self.clock = Clock(self.reactor)
|
||||
|
||||
self.hs = setup_test_homeserver(
|
||||
self.addCleanup,
|
||||
"red",
|
||||
http_client=None,
|
||||
clock=self.clock,
|
||||
reactor=self.reactor,
|
||||
federation_client=Mock(),
|
||||
ratelimiter=NonCallableMock(spec_set=["send_message"]),
|
||||
)
|
||||
|
||||
self.store = self.hs.get_datastore()
|
||||
|
||||
self.hs.config.registrations_require_3pid = []
|
||||
self.hs.config.enable_registration_captcha = False
|
||||
self.hs.config.recaptcha_public_key = []
|
||||
|
||||
self.hs.config.limit_usage_by_mau = True
|
||||
self.hs.config.hs_disabled = False
|
||||
self.hs.config.max_mau_value = 2
|
||||
self.hs.config.mau_trial_days = 0
|
||||
self.hs.config.server_notices_mxid = "@server:red"
|
||||
self.hs.config.server_notices_mxid_display_name = None
|
||||
self.hs.config.server_notices_mxid_avatar_url = None
|
||||
self.hs.config.server_notices_room_name = "Test Server Notice Room"
|
||||
|
||||
self.resource = JsonResource(self.hs)
|
||||
register.register_servlets(self.hs, self.resource)
|
||||
sync.register_servlets(self.hs, self.resource)
|
||||
|
||||
def test_simple_deny_mau(self):
|
||||
# Create and sync so that the MAU counts get updated
|
||||
token1 = self.create_user("kermit1")
|
||||
self.do_sync_for_user(token1)
|
||||
token2 = self.create_user("kermit2")
|
||||
self.do_sync_for_user(token2)
|
||||
|
||||
# We've created and activated two users, we shouldn't be able to
|
||||
# register new users
|
||||
with self.assertRaises(SynapseError) as cm:
|
||||
self.create_user("kermit3")
|
||||
|
||||
e = cm.exception
|
||||
self.assertEqual(e.code, 403)
|
||||
self.assertEqual(e.errcode, Codes.RESOURCE_LIMIT_EXCEEDED)
|
||||
|
||||
def test_allowed_after_a_month_mau(self):
|
||||
# Create and sync so that the MAU counts get updated
|
||||
token1 = self.create_user("kermit1")
|
||||
self.do_sync_for_user(token1)
|
||||
token2 = self.create_user("kermit2")
|
||||
self.do_sync_for_user(token2)
|
||||
|
||||
# Advance time by 31 days
|
||||
self.reactor.advance(31 * 24 * 60 * 60)
|
||||
|
||||
self.store.reap_monthly_active_users()
|
||||
|
||||
self.reactor.advance(0)
|
||||
|
||||
# We should be able to register more users
|
||||
token3 = self.create_user("kermit3")
|
||||
self.do_sync_for_user(token3)
|
||||
|
||||
def test_trial_delay(self):
|
||||
self.hs.config.mau_trial_days = 1
|
||||
|
||||
# We should be able to register more than the limit initially
|
||||
token1 = self.create_user("kermit1")
|
||||
self.do_sync_for_user(token1)
|
||||
token2 = self.create_user("kermit2")
|
||||
self.do_sync_for_user(token2)
|
||||
token3 = self.create_user("kermit3")
|
||||
self.do_sync_for_user(token3)
|
||||
|
||||
# Advance time by 2 days
|
||||
self.reactor.advance(2 * 24 * 60 * 60)
|
||||
|
||||
# Two users should be able to sync
|
||||
self.do_sync_for_user(token1)
|
||||
self.do_sync_for_user(token2)
|
||||
|
||||
# But the third should fail
|
||||
with self.assertRaises(SynapseError) as cm:
|
||||
self.do_sync_for_user(token3)
|
||||
|
||||
e = cm.exception
|
||||
self.assertEqual(e.code, 403)
|
||||
self.assertEqual(e.errcode, Codes.RESOURCE_LIMIT_EXCEEDED)
|
||||
|
||||
# And new registrations are now denied too
|
||||
with self.assertRaises(SynapseError) as cm:
|
||||
self.create_user("kermit4")
|
||||
|
||||
e = cm.exception
|
||||
self.assertEqual(e.code, 403)
|
||||
self.assertEqual(e.errcode, Codes.RESOURCE_LIMIT_EXCEEDED)
|
||||
|
||||
def test_trial_users_cant_come_back(self):
|
||||
self.hs.config.mau_trial_days = 1
|
||||
|
||||
# We should be able to register more than the limit initially
|
||||
token1 = self.create_user("kermit1")
|
||||
self.do_sync_for_user(token1)
|
||||
token2 = self.create_user("kermit2")
|
||||
self.do_sync_for_user(token2)
|
||||
token3 = self.create_user("kermit3")
|
||||
self.do_sync_for_user(token3)
|
||||
|
||||
# Advance time by 2 days
|
||||
self.reactor.advance(2 * 24 * 60 * 60)
|
||||
|
||||
# Two users should be able to sync
|
||||
self.do_sync_for_user(token1)
|
||||
self.do_sync_for_user(token2)
|
||||
|
||||
# Advance by 2 months so everyone falls out of MAU
|
||||
self.reactor.advance(60 * 24 * 60 * 60)
|
||||
self.store.reap_monthly_active_users()
|
||||
self.reactor.advance(0)
|
||||
|
||||
# We can create as many new users as we want
|
||||
token4 = self.create_user("kermit4")
|
||||
self.do_sync_for_user(token4)
|
||||
token5 = self.create_user("kermit5")
|
||||
self.do_sync_for_user(token5)
|
||||
token6 = self.create_user("kermit6")
|
||||
self.do_sync_for_user(token6)
|
||||
|
||||
# users 2 and 3 can come back to bring us back up to MAU limit
|
||||
self.do_sync_for_user(token2)
|
||||
self.do_sync_for_user(token3)
|
||||
|
||||
# New trial users can still sync
|
||||
self.do_sync_for_user(token4)
|
||||
self.do_sync_for_user(token5)
|
||||
self.do_sync_for_user(token6)
|
||||
|
||||
# But old user cant
|
||||
with self.assertRaises(SynapseError) as cm:
|
||||
self.do_sync_for_user(token1)
|
||||
|
||||
e = cm.exception
|
||||
self.assertEqual(e.code, 403)
|
||||
self.assertEqual(e.errcode, Codes.RESOURCE_LIMIT_EXCEEDED)
|
||||
|
||||
def create_user(self, localpart):
|
||||
request_data = json.dumps({
|
||||
"username": localpart,
|
||||
"password": "monkey",
|
||||
"auth": {"type": LoginType.DUMMY},
|
||||
})
|
||||
|
||||
request, channel = make_request(b"POST", b"/register", request_data)
|
||||
render(request, self.resource, self.reactor)
|
||||
|
||||
if channel.result["code"] != b"200":
|
||||
raise HttpResponseException(
|
||||
int(channel.result["code"]),
|
||||
channel.result["reason"],
|
||||
channel.result["body"],
|
||||
).to_synapse_error()
|
||||
|
||||
access_token = channel.json_body["access_token"]
|
||||
|
||||
return access_token
|
||||
|
||||
def do_sync_for_user(self, token):
|
||||
request, channel = make_request(b"GET", b"/sync", access_token=token)
|
||||
render(request, self.resource, self.reactor)
|
||||
|
||||
if channel.result["code"] != b"200":
|
||||
raise HttpResponseException(
|
||||
int(channel.result["code"]),
|
||||
channel.result["reason"],
|
||||
channel.result["body"],
|
||||
).to_synapse_error()
|
||||
@@ -18,7 +18,7 @@ from mock import Mock
|
||||
from twisted.internet import defer
|
||||
|
||||
from synapse.api.auth import Auth
|
||||
from synapse.api.constants import EventTypes, Membership
|
||||
from synapse.api.constants import EventTypes, Membership, RoomVersions
|
||||
from synapse.events import FrozenEvent
|
||||
from synapse.state import StateHandler, StateResolutionHandler
|
||||
|
||||
@@ -117,6 +117,9 @@ class StateGroupStore(object):
|
||||
def register_event_id_state_group(self, event_id, state_group):
|
||||
self._event_to_state_group[event_id] = state_group
|
||||
|
||||
def get_room_version(self, room_id):
|
||||
return RoomVersions.V1
|
||||
|
||||
|
||||
class DictObj(dict):
|
||||
def __init__(self, **kwargs):
|
||||
@@ -176,7 +179,9 @@ class StateTestCase(unittest.TestCase):
|
||||
def test_branch_no_conflict(self):
|
||||
graph = Graph(
|
||||
nodes={
|
||||
"START": DictObj(type=EventTypes.Create, state_key="", depth=1),
|
||||
"START": DictObj(
|
||||
type=EventTypes.Create, state_key="", content={}, depth=1,
|
||||
),
|
||||
"A": DictObj(type=EventTypes.Message, depth=2),
|
||||
"B": DictObj(type=EventTypes.Message, depth=3),
|
||||
"C": DictObj(type=EventTypes.Name, state_key="", depth=3),
|
||||
|
||||
@@ -14,12 +14,12 @@
|
||||
# limitations under the License.
|
||||
|
||||
from synapse.api.errors import SynapseError
|
||||
from synapse.server import HomeServer
|
||||
from synapse.types import GroupID, RoomAlias, UserID
|
||||
|
||||
from tests import unittest
|
||||
from tests.utils import TestHomeServer
|
||||
|
||||
mock_homeserver = HomeServer(hostname="my.domain")
|
||||
mock_homeserver = TestHomeServer(hostname="my.domain")
|
||||
|
||||
|
||||
class UserIDTestCase(unittest.TestCase):
|
||||
|
||||
@@ -21,7 +21,7 @@ from synapse.events import FrozenEvent
|
||||
from synapse.visibility import filter_events_for_server
|
||||
|
||||
import tests.unittest
|
||||
from tests.utils import setup_test_homeserver
|
||||
from tests.utils import create_room, setup_test_homeserver
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -36,6 +36,8 @@ class FilterEventsForServerTestCase(tests.unittest.TestCase):
|
||||
self.event_builder_factory = self.hs.get_event_builder_factory()
|
||||
self.store = self.hs.get_datastore()
|
||||
|
||||
yield create_room(self.hs, TEST_ROOM_ID, "@someone:ROOM")
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def test_filtering(self):
|
||||
#
|
||||
|
||||
@@ -151,6 +151,7 @@ class HomeserverTestCase(TestCase):
|
||||
hijack_auth (bool): Whether to hijack auth to return the user specified
|
||||
in user_id.
|
||||
"""
|
||||
|
||||
servlets = []
|
||||
hijack_auth = True
|
||||
|
||||
@@ -279,3 +280,13 @@ class HomeserverTestCase(TestCase):
|
||||
kwargs = dict(kwargs)
|
||||
kwargs.update(self._hs_args)
|
||||
return setup_test_homeserver(self.addCleanup, *args, **kwargs)
|
||||
|
||||
def pump(self):
|
||||
"""
|
||||
Pump the reactor enough that Deferreds will fire.
|
||||
"""
|
||||
self.reactor.pump([0.0] * 100)
|
||||
|
||||
def get_success(self, d):
|
||||
self.pump()
|
||||
return self.successResultOf(d)
|
||||
|
||||
@@ -24,11 +24,13 @@ from six.moves.urllib import parse as urlparse
|
||||
|
||||
from twisted.internet import defer, reactor
|
||||
|
||||
from synapse.api.constants import EventTypes
|
||||
from synapse.api.errors import CodeMessageException, cs_error
|
||||
from synapse.config.server import ServerConfig
|
||||
from synapse.federation.transport import server
|
||||
from synapse.http.server import HttpServer
|
||||
from synapse.server import HomeServer
|
||||
from synapse.storage import PostgresEngine
|
||||
from synapse.storage import DataStore, PostgresEngine
|
||||
from synapse.storage.engines import create_engine
|
||||
from synapse.storage.prepare_database import (
|
||||
_get_or_create_schema_state,
|
||||
@@ -91,10 +93,14 @@ def setupdb():
|
||||
atexit.register(_cleanup)
|
||||
|
||||
|
||||
class TestHomeServer(HomeServer):
|
||||
DATASTORE_CLASS = DataStore
|
||||
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def setup_test_homeserver(
|
||||
cleanup_func, name="test", datastore=None, config=None, reactor=None,
|
||||
homeserverToUse=HomeServer, **kargs
|
||||
homeserverToUse=TestHomeServer, **kargs
|
||||
):
|
||||
"""
|
||||
Setup a homeserver suitable for running tests against. Keyword arguments
|
||||
@@ -141,7 +147,9 @@ def setup_test_homeserver(
|
||||
config.hs_disabled_limit_type = ""
|
||||
config.max_mau_value = 50
|
||||
config.mau_limits_reserved_threepids = []
|
||||
config.admin_uri = None
|
||||
config.admin_contact = None
|
||||
config.rc_messages_per_second = 10000
|
||||
config.rc_message_burst_count = 10000
|
||||
|
||||
# we need a sane default_room_version, otherwise attempts to create rooms will
|
||||
# fail.
|
||||
@@ -151,6 +159,11 @@ def setup_test_homeserver(
|
||||
# background, which upsets the test runner.
|
||||
config.update_user_directory = False
|
||||
|
||||
def is_threepid_reserved(threepid):
|
||||
return ServerConfig.is_threepid_reserved(config, threepid)
|
||||
|
||||
config.is_threepid_reserved.side_effect = is_threepid_reserved
|
||||
|
||||
config.use_frozen_dicts = True
|
||||
config.ldap_enabled = False
|
||||
|
||||
@@ -539,3 +552,32 @@ class DeferredMockCallable(object):
|
||||
"Expected not to received any calls, got:\n"
|
||||
+ "\n".join(["call(%s)" % _format_call(c[0], c[1]) for c in calls])
|
||||
)
|
||||
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def create_room(hs, room_id, creator_id):
|
||||
"""Creates and persist a creation event for the given room
|
||||
|
||||
Args:
|
||||
hs
|
||||
room_id (str)
|
||||
creator_id (str)
|
||||
"""
|
||||
|
||||
store = hs.get_datastore()
|
||||
event_builder_factory = hs.get_event_builder_factory()
|
||||
event_creation_handler = hs.get_event_creation_handler()
|
||||
|
||||
builder = event_builder_factory.new({
|
||||
"type": EventTypes.Create,
|
||||
"state_key": "",
|
||||
"sender": creator_id,
|
||||
"room_id": room_id,
|
||||
"content": {},
|
||||
})
|
||||
|
||||
event, context = yield event_creation_handler.create_new_client_event(
|
||||
builder
|
||||
)
|
||||
|
||||
yield store.persist_event(event, context)
|
||||
|
||||
Reference in New Issue
Block a user