Compare commits
37 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 3a5cb40d45 | |||
| 88cc9cc69e | |||
| 9a0db062af | |||
| e9e4cb25fc | |||
| 545001b9e4 | |||
| 01ccc9e6f2 | |||
| a9cb1a35c8 | |||
| f879127aaa | |||
| e6d87c93f3 | |||
| 004cc8a328 | |||
| a134c572a6 | |||
| c2a5cf2fe3 | |||
| 800cfd5774 | |||
| 152c2ac19e | |||
| e70287cff3 | |||
| 03a26e28d9 | |||
| 3e0c0660b3 | |||
| 3f49e131d9 | |||
| 9b8c0fb162 | |||
| 691f8492fb | |||
| a9d7d98d3f | |||
| bdbb1eec65 | |||
| fecb45e0c3 | |||
| 44cd6e1358 | |||
| 8d6dc106d1 | |||
| a052aa42e7 | |||
| 8efe773ef1 | |||
| b7e7b52452 | |||
| 8cbbfaefc1 | |||
| 84b5cc69f5 | |||
| fde8e8f09f | |||
| eb9fc021e3 | |||
| 1c41b05c8c | |||
| 5bdb57cb66 | |||
| 9aa5a0af51 | |||
| 610accbb7f | |||
| 9cf519769b |
+9
-1
@@ -1,3 +1,11 @@
|
||||
Changes in synapse v0.27.2 (2018-03-26)
|
||||
=======================================
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* Fix bug which broke TCP replication between workers (PR #3015)
|
||||
|
||||
|
||||
Changes in synapse v0.27.1 (2018-03-26)
|
||||
=======================================
|
||||
|
||||
@@ -51,7 +59,7 @@ Features:
|
||||
|
||||
Changes:
|
||||
|
||||
* Continue to factor out processing from main process and into worker processes. See updated `docs/workers.rst <docs/metrics-howto.rst>`_ (PR #2892 - #2904, #2913, #2920 - #2926, #2947, #2847, #2854, #2872, #2873, #2874, #2928, #2929, #2934, #2856, #2976 - #2984, #2987 - #2989, #2991 - #2993, #2995, #2784)
|
||||
* Continue to factor out processing from main process and into worker processes. See updated `docs/workers.rst <docs/workers.rst>`_ (PR #2892 - #2904, #2913, #2920 - #2926, #2947, #2847, #2854, #2872, #2873, #2874, #2928, #2929, #2934, #2856, #2976 - #2984, #2987 - #2989, #2991 - #2993, #2995, #2784)
|
||||
* Ensure state cache is used when persisting events (PR #2864, #2871, #2802, #2835, #2836, #2841, #2842, #2849)
|
||||
* Change the default config to bind on both IPv4 and IPv6 on all platforms (PR #2435) Thanks to @silkeh!
|
||||
* No longer require a specific version of saml2 (PR #2695) Thanks to @okurz!
|
||||
|
||||
+1
-1
@@ -16,4 +16,4 @@
|
||||
""" This is a reference implementation of a Matrix home server.
|
||||
"""
|
||||
|
||||
__version__ = "0.27.1"
|
||||
__version__ = "0.27.2"
|
||||
|
||||
@@ -36,7 +36,7 @@ from synapse.util.logcontext import LoggingContext, preserve_fn
|
||||
from synapse.util.manhole import manhole
|
||||
from synapse.util.versionstring import get_version_string
|
||||
from twisted.internet import reactor
|
||||
from twisted.web.resource import Resource
|
||||
from twisted.web.resource import NoResource
|
||||
|
||||
logger = logging.getLogger("synapse.app.appservice")
|
||||
|
||||
@@ -64,7 +64,7 @@ class AppserviceServer(HomeServer):
|
||||
if name == "metrics":
|
||||
resources[METRICS_PREFIX] = MetricsResource(self)
|
||||
|
||||
root_resource = create_resource_tree(resources, Resource())
|
||||
root_resource = create_resource_tree(resources, NoResource())
|
||||
|
||||
_base.listen_tcp(
|
||||
bind_addresses,
|
||||
|
||||
@@ -44,7 +44,7 @@ from synapse.util.logcontext import LoggingContext
|
||||
from synapse.util.manhole import manhole
|
||||
from synapse.util.versionstring import get_version_string
|
||||
from twisted.internet import reactor
|
||||
from twisted.web.resource import Resource
|
||||
from twisted.web.resource import NoResource
|
||||
|
||||
logger = logging.getLogger("synapse.app.client_reader")
|
||||
|
||||
@@ -88,7 +88,7 @@ class ClientReaderServer(HomeServer):
|
||||
"/_matrix/client/api/v1": resource,
|
||||
})
|
||||
|
||||
root_resource = create_resource_tree(resources, Resource())
|
||||
root_resource = create_resource_tree(resources, NoResource())
|
||||
|
||||
_base.listen_tcp(
|
||||
bind_addresses,
|
||||
|
||||
@@ -52,7 +52,7 @@ from synapse.util.logcontext import LoggingContext
|
||||
from synapse.util.manhole import manhole
|
||||
from synapse.util.versionstring import get_version_string
|
||||
from twisted.internet import reactor
|
||||
from twisted.web.resource import Resource
|
||||
from twisted.web.resource import NoResource
|
||||
|
||||
logger = logging.getLogger("synapse.app.event_creator")
|
||||
|
||||
@@ -104,7 +104,7 @@ class EventCreatorServer(HomeServer):
|
||||
"/_matrix/client/api/v1": resource,
|
||||
})
|
||||
|
||||
root_resource = create_resource_tree(resources, Resource())
|
||||
root_resource = create_resource_tree(resources, NoResource())
|
||||
|
||||
_base.listen_tcp(
|
||||
bind_addresses,
|
||||
|
||||
@@ -41,7 +41,7 @@ from synapse.util.logcontext import LoggingContext
|
||||
from synapse.util.manhole import manhole
|
||||
from synapse.util.versionstring import get_version_string
|
||||
from twisted.internet import reactor
|
||||
from twisted.web.resource import Resource
|
||||
from twisted.web.resource import NoResource
|
||||
|
||||
logger = logging.getLogger("synapse.app.federation_reader")
|
||||
|
||||
@@ -77,7 +77,7 @@ class FederationReaderServer(HomeServer):
|
||||
FEDERATION_PREFIX: TransportLayerServer(self),
|
||||
})
|
||||
|
||||
root_resource = create_resource_tree(resources, Resource())
|
||||
root_resource = create_resource_tree(resources, NoResource())
|
||||
|
||||
_base.listen_tcp(
|
||||
bind_addresses,
|
||||
|
||||
@@ -42,7 +42,7 @@ from synapse.util.logcontext import LoggingContext, preserve_fn
|
||||
from synapse.util.manhole import manhole
|
||||
from synapse.util.versionstring import get_version_string
|
||||
from twisted.internet import defer, reactor
|
||||
from twisted.web.resource import Resource
|
||||
from twisted.web.resource import NoResource
|
||||
|
||||
logger = logging.getLogger("synapse.app.federation_sender")
|
||||
|
||||
@@ -91,7 +91,7 @@ class FederationSenderServer(HomeServer):
|
||||
if name == "metrics":
|
||||
resources[METRICS_PREFIX] = MetricsResource(self)
|
||||
|
||||
root_resource = create_resource_tree(resources, Resource())
|
||||
root_resource = create_resource_tree(resources, NoResource())
|
||||
|
||||
_base.listen_tcp(
|
||||
bind_addresses,
|
||||
|
||||
@@ -44,7 +44,7 @@ from synapse.util.logcontext import LoggingContext
|
||||
from synapse.util.manhole import manhole
|
||||
from synapse.util.versionstring import get_version_string
|
||||
from twisted.internet import defer, reactor
|
||||
from twisted.web.resource import Resource
|
||||
from twisted.web.resource import NoResource
|
||||
|
||||
logger = logging.getLogger("synapse.app.frontend_proxy")
|
||||
|
||||
@@ -142,7 +142,7 @@ class FrontendProxyServer(HomeServer):
|
||||
"/_matrix/client/api/v1": resource,
|
||||
})
|
||||
|
||||
root_resource = create_resource_tree(resources, Resource())
|
||||
root_resource = create_resource_tree(resources, NoResource())
|
||||
|
||||
_base.listen_tcp(
|
||||
bind_addresses,
|
||||
|
||||
@@ -56,7 +56,7 @@ from synapse.util.rlimit import change_resource_limit
|
||||
from synapse.util.versionstring import get_version_string
|
||||
from twisted.application import service
|
||||
from twisted.internet import defer, reactor
|
||||
from twisted.web.resource import EncodingResourceWrapper, Resource
|
||||
from twisted.web.resource import EncodingResourceWrapper, NoResource
|
||||
from twisted.web.server import GzipEncoderFactory
|
||||
from twisted.web.static import File
|
||||
|
||||
@@ -126,7 +126,7 @@ class SynapseHomeServer(HomeServer):
|
||||
if WEB_CLIENT_PREFIX in resources:
|
||||
root_resource = RootRedirect(WEB_CLIENT_PREFIX)
|
||||
else:
|
||||
root_resource = Resource()
|
||||
root_resource = NoResource()
|
||||
|
||||
root_resource = create_resource_tree(resources, root_resource)
|
||||
|
||||
|
||||
@@ -43,7 +43,7 @@ from synapse.util.logcontext import LoggingContext
|
||||
from synapse.util.manhole import manhole
|
||||
from synapse.util.versionstring import get_version_string
|
||||
from twisted.internet import reactor
|
||||
from twisted.web.resource import Resource
|
||||
from twisted.web.resource import NoResource
|
||||
|
||||
logger = logging.getLogger("synapse.app.media_repository")
|
||||
|
||||
@@ -84,7 +84,7 @@ class MediaRepositoryServer(HomeServer):
|
||||
),
|
||||
})
|
||||
|
||||
root_resource = create_resource_tree(resources, Resource())
|
||||
root_resource = create_resource_tree(resources, NoResource())
|
||||
|
||||
_base.listen_tcp(
|
||||
bind_addresses,
|
||||
|
||||
@@ -37,7 +37,7 @@ from synapse.util.logcontext import LoggingContext, preserve_fn
|
||||
from synapse.util.manhole import manhole
|
||||
from synapse.util.versionstring import get_version_string
|
||||
from twisted.internet import defer, reactor
|
||||
from twisted.web.resource import Resource
|
||||
from twisted.web.resource import NoResource
|
||||
|
||||
logger = logging.getLogger("synapse.app.pusher")
|
||||
|
||||
@@ -94,7 +94,7 @@ class PusherServer(HomeServer):
|
||||
if name == "metrics":
|
||||
resources[METRICS_PREFIX] = MetricsResource(self)
|
||||
|
||||
root_resource = create_resource_tree(resources, Resource())
|
||||
root_resource = create_resource_tree(resources, NoResource())
|
||||
|
||||
_base.listen_tcp(
|
||||
bind_addresses,
|
||||
|
||||
@@ -56,7 +56,7 @@ from synapse.util.manhole import manhole
|
||||
from synapse.util.stringutils import random_string
|
||||
from synapse.util.versionstring import get_version_string
|
||||
from twisted.internet import defer, reactor
|
||||
from twisted.web.resource import Resource
|
||||
from twisted.web.resource import NoResource
|
||||
|
||||
logger = logging.getLogger("synapse.app.synchrotron")
|
||||
|
||||
@@ -269,7 +269,7 @@ class SynchrotronServer(HomeServer):
|
||||
"/_matrix/client/api/v1": resource,
|
||||
})
|
||||
|
||||
root_resource = create_resource_tree(resources, Resource())
|
||||
root_resource = create_resource_tree(resources, NoResource())
|
||||
|
||||
_base.listen_tcp(
|
||||
bind_addresses,
|
||||
|
||||
@@ -43,7 +43,7 @@ from synapse.util.logcontext import LoggingContext, preserve_fn
|
||||
from synapse.util.manhole import manhole
|
||||
from synapse.util.versionstring import get_version_string
|
||||
from twisted.internet import reactor
|
||||
from twisted.web.resource import Resource
|
||||
from twisted.web.resource import NoResource
|
||||
|
||||
logger = logging.getLogger("synapse.app.user_dir")
|
||||
|
||||
@@ -116,7 +116,7 @@ class UserDirectoryServer(HomeServer):
|
||||
"/_matrix/client/api/v1": resource,
|
||||
})
|
||||
|
||||
root_resource = create_resource_tree(resources, Resource())
|
||||
root_resource = create_resource_tree(resources, NoResource())
|
||||
|
||||
_base.listen_tcp(
|
||||
bind_addresses,
|
||||
|
||||
@@ -186,8 +186,13 @@ class ApplicationService(object):
|
||||
|
||||
@cachedInlineCallbacks(num_args=1, cache_context=True)
|
||||
def _matches_user_in_member_list(self, room_id, store, cache_context):
|
||||
|
||||
def invalidate_only_when_as_users_change(room_id, member):
|
||||
if self.is_interested_in_user(member):
|
||||
cache_context.invalidate(room_id)
|
||||
|
||||
member_list = yield store.get_users_in_room(
|
||||
room_id, on_invalidate=cache_context.invalidate
|
||||
room_id, on_invalidate=invalidate_only_when_as_users_change
|
||||
)
|
||||
|
||||
# check joined member events
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2016 OpenMarket Ltd
|
||||
# Copyright 2018 New Vector Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -134,23 +135,8 @@ class E2eKeysHandler(object):
|
||||
if user_id in destination_query:
|
||||
results[user_id] = keys
|
||||
|
||||
except CodeMessageException as e:
|
||||
failures[destination] = {
|
||||
"status": e.code, "message": e.message
|
||||
}
|
||||
except NotRetryingDestination as e:
|
||||
failures[destination] = {
|
||||
"status": 503, "message": "Not ready for retry",
|
||||
}
|
||||
except FederationDeniedError as e:
|
||||
failures[destination] = {
|
||||
"status": 403, "message": "Federation Denied",
|
||||
}
|
||||
except Exception as e:
|
||||
# include ConnectionRefused and other errors
|
||||
failures[destination] = {
|
||||
"status": 503, "message": e.message
|
||||
}
|
||||
failures[destination] = _exception_to_failure(e)
|
||||
|
||||
yield make_deferred_yieldable(defer.gatherResults([
|
||||
preserve_fn(do_remote_query)(destination)
|
||||
@@ -252,19 +238,8 @@ class E2eKeysHandler(object):
|
||||
for user_id, keys in remote_result["one_time_keys"].items():
|
||||
if user_id in device_keys:
|
||||
json_result[user_id] = keys
|
||||
except CodeMessageException as e:
|
||||
failures[destination] = {
|
||||
"status": e.code, "message": e.message
|
||||
}
|
||||
except NotRetryingDestination as e:
|
||||
failures[destination] = {
|
||||
"status": 503, "message": "Not ready for retry",
|
||||
}
|
||||
except Exception as e:
|
||||
# include ConnectionRefused and other errors
|
||||
failures[destination] = {
|
||||
"status": 503, "message": e.message
|
||||
}
|
||||
failures[destination] = _exception_to_failure(e)
|
||||
|
||||
yield make_deferred_yieldable(defer.gatherResults([
|
||||
preserve_fn(claim_client_keys)(destination)
|
||||
@@ -362,6 +337,31 @@ class E2eKeysHandler(object):
|
||||
)
|
||||
|
||||
|
||||
def _exception_to_failure(e):
|
||||
if isinstance(e, CodeMessageException):
|
||||
return {
|
||||
"status": e.code, "message": e.message,
|
||||
}
|
||||
|
||||
if isinstance(e, NotRetryingDestination):
|
||||
return {
|
||||
"status": 503, "message": "Not ready for retry",
|
||||
}
|
||||
|
||||
if isinstance(e, FederationDeniedError):
|
||||
return {
|
||||
"status": 403, "message": "Federation Denied",
|
||||
}
|
||||
|
||||
# include ConnectionRefused and other errors
|
||||
#
|
||||
# Note that some Exceptions (notably twisted's ResponseFailed etc) don't
|
||||
# give a string for e.message, which simplejson then fails to serialize.
|
||||
return {
|
||||
"status": 503, "message": str(e.message),
|
||||
}
|
||||
|
||||
|
||||
def _one_time_keys_match(old_key_json, new_key):
|
||||
old_key = json.loads(old_key_json)
|
||||
|
||||
|
||||
@@ -24,7 +24,7 @@ from synapse.api.errors import (
|
||||
from synapse.http.client import CaptchaServerHttpClient
|
||||
from synapse import types
|
||||
from synapse.types import UserID
|
||||
from synapse.util.async import run_on_reactor
|
||||
from synapse.util.async import run_on_reactor, Linearizer
|
||||
from synapse.util.threepids import check_3pid_allowed
|
||||
from ._base import BaseHandler
|
||||
|
||||
@@ -46,6 +46,10 @@ class RegistrationHandler(BaseHandler):
|
||||
|
||||
self.macaroon_gen = hs.get_macaroon_generator()
|
||||
|
||||
self._generate_user_id_linearizer = Linearizer(
|
||||
name="_generate_user_id_linearizer",
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def check_username(self, localpart, guest_access_token=None,
|
||||
assigned_user_id=None):
|
||||
@@ -345,9 +349,11 @@ class RegistrationHandler(BaseHandler):
|
||||
@defer.inlineCallbacks
|
||||
def _generate_user_id(self, reseed=False):
|
||||
if reseed or self._next_generated_user_id is None:
|
||||
self._next_generated_user_id = (
|
||||
yield self.store.find_next_generated_user_id_localpart()
|
||||
)
|
||||
with (yield self._generate_user_id_linearizer.queue(())):
|
||||
if reseed or self._next_generated_user_id is None:
|
||||
self._next_generated_user_id = (
|
||||
yield self.store.find_next_generated_user_id_localpart()
|
||||
)
|
||||
|
||||
id = self._next_generated_user_id
|
||||
self._next_generated_user_id += 1
|
||||
|
||||
@@ -488,6 +488,7 @@ def respond_with_json_bytes(request, code, json_bytes, send_cors=False,
|
||||
request.setHeader(b"Content-Type", b"application/json")
|
||||
request.setHeader(b"Server", version_string)
|
||||
request.setHeader(b"Content-Length", b"%d" % (len(json_bytes),))
|
||||
request.setHeader(b"Cache-Control", b"no-cache, no-store, must-revalidate")
|
||||
|
||||
if send_cors:
|
||||
set_cors_headers(request)
|
||||
|
||||
@@ -34,7 +34,6 @@ REQUIREMENTS = {
|
||||
"bcrypt": ["bcrypt>=3.1.0"],
|
||||
"pillow": ["PIL"],
|
||||
"pydenticon": ["pydenticon"],
|
||||
"ujson": ["ujson"],
|
||||
"blist": ["blist"],
|
||||
"pysaml2>=3.0.0": ["saml2>=3.0.0"],
|
||||
"pymacaroons-pynacl": ["pymacaroons"],
|
||||
|
||||
@@ -19,7 +19,7 @@ allowed to be sent by which side.
|
||||
"""
|
||||
|
||||
import logging
|
||||
import simplejson as json
|
||||
import simplejson
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@@ -100,14 +100,14 @@ class RdataCommand(Command):
|
||||
return cls(
|
||||
stream_name,
|
||||
None if token == "batch" else int(token),
|
||||
json.loads(row_json)
|
||||
simplejson.loads(row_json)
|
||||
)
|
||||
|
||||
def to_line(self):
|
||||
return " ".join((
|
||||
self.stream_name,
|
||||
str(self.token) if self.token is not None else "batch",
|
||||
json.dumps(self.row),
|
||||
simplejson.dumps(self.row, namedtuple_as_object=False),
|
||||
))
|
||||
|
||||
|
||||
@@ -298,10 +298,12 @@ class InvalidateCacheCommand(Command):
|
||||
def from_line(cls, line):
|
||||
cache_func, keys_json = line.split(" ", 1)
|
||||
|
||||
return cls(cache_func, json.loads(keys_json))
|
||||
return cls(cache_func, simplejson.loads(keys_json))
|
||||
|
||||
def to_line(self):
|
||||
return " ".join((self.cache_func, json.dumps(self.keys)))
|
||||
return " ".join((
|
||||
self.cache_func, simplejson.dumps(self.keys, namedtuple_as_object=False)
|
||||
))
|
||||
|
||||
|
||||
class UserIpCommand(Command):
|
||||
@@ -325,14 +327,14 @@ class UserIpCommand(Command):
|
||||
def from_line(cls, line):
|
||||
user_id, jsn = line.split(" ", 1)
|
||||
|
||||
access_token, ip, user_agent, device_id, last_seen = json.loads(jsn)
|
||||
access_token, ip, user_agent, device_id, last_seen = simplejson.loads(jsn)
|
||||
|
||||
return cls(
|
||||
user_id, access_token, ip, user_agent, device_id, last_seen
|
||||
)
|
||||
|
||||
def to_line(self):
|
||||
return self.user_id + " " + json.dumps((
|
||||
return self.user_id + " " + simplejson.dumps((
|
||||
self.access_token, self.ip, self.user_agent, self.device_id,
|
||||
self.last_seen,
|
||||
))
|
||||
|
||||
+36
-35
@@ -132,7 +132,7 @@ class StateHandler(object):
|
||||
|
||||
state_map = yield self.store.get_events(state.values(), get_prev_content=False)
|
||||
state = {
|
||||
key: state_map[e_id] for key, e_id in state.items() if e_id in state_map
|
||||
key: state_map[e_id] for key, e_id in state.iteritems() if e_id in state_map
|
||||
}
|
||||
|
||||
defer.returnValue(state)
|
||||
@@ -378,7 +378,7 @@ class StateHandler(object):
|
||||
new_state = resolve_events_with_state_map(state_set_ids, state_map)
|
||||
|
||||
new_state = {
|
||||
key: state_map[ev_id] for key, ev_id in new_state.items()
|
||||
key: state_map[ev_id] for key, ev_id in new_state.iteritems()
|
||||
}
|
||||
|
||||
return new_state
|
||||
@@ -458,15 +458,15 @@ class StateResolutionHandler(object):
|
||||
# build a map from state key to the event_ids which set that state.
|
||||
# dict[(str, str), set[str])
|
||||
state = {}
|
||||
for st in state_groups_ids.values():
|
||||
for key, e_id in st.items():
|
||||
for st in state_groups_ids.itervalues():
|
||||
for key, e_id in st.iteritems():
|
||||
state.setdefault(key, set()).add(e_id)
|
||||
|
||||
# build a map from state key to the event_ids which set that state,
|
||||
# including only those where there are state keys in conflict.
|
||||
conflicted_state = {
|
||||
k: list(v)
|
||||
for k, v in state.items()
|
||||
for k, v in state.iteritems()
|
||||
if len(v) > 1
|
||||
}
|
||||
|
||||
@@ -480,36 +480,37 @@ class StateResolutionHandler(object):
|
||||
)
|
||||
else:
|
||||
new_state = {
|
||||
key: e_ids.pop() for key, e_ids in state.items()
|
||||
key: e_ids.pop() for key, e_ids in state.iteritems()
|
||||
}
|
||||
|
||||
# if the new state matches any of the input state groups, we can
|
||||
# use that state group again. Otherwise we will generate a state_id
|
||||
# which will be used as a cache key for future resolutions, but
|
||||
# not get persisted.
|
||||
state_group = None
|
||||
new_state_event_ids = frozenset(new_state.values())
|
||||
for sg, events in state_groups_ids.items():
|
||||
if new_state_event_ids == frozenset(e_id for e_id in events):
|
||||
state_group = sg
|
||||
break
|
||||
with Measure(self.clock, "state.create_group_ids"):
|
||||
# if the new state matches any of the input state groups, we can
|
||||
# use that state group again. Otherwise we will generate a state_id
|
||||
# which will be used as a cache key for future resolutions, but
|
||||
# not get persisted.
|
||||
state_group = None
|
||||
new_state_event_ids = frozenset(new_state.itervalues())
|
||||
for sg, events in state_groups_ids.iteritems():
|
||||
if new_state_event_ids == frozenset(e_id for e_id in events):
|
||||
state_group = sg
|
||||
break
|
||||
|
||||
# TODO: We want to create a state group for this set of events, to
|
||||
# increase cache hits, but we need to make sure that it doesn't
|
||||
# end up as a prev_group without being added to the database
|
||||
# TODO: We want to create a state group for this set of events, to
|
||||
# increase cache hits, but we need to make sure that it doesn't
|
||||
# end up as a prev_group without being added to the database
|
||||
|
||||
prev_group = None
|
||||
delta_ids = None
|
||||
for old_group, old_ids in state_groups_ids.iteritems():
|
||||
if not set(new_state) - set(old_ids):
|
||||
n_delta_ids = {
|
||||
k: v
|
||||
for k, v in new_state.iteritems()
|
||||
if old_ids.get(k) != v
|
||||
}
|
||||
if not delta_ids or len(n_delta_ids) < len(delta_ids):
|
||||
prev_group = old_group
|
||||
delta_ids = n_delta_ids
|
||||
prev_group = None
|
||||
delta_ids = None
|
||||
for old_group, old_ids in state_groups_ids.iteritems():
|
||||
if not set(new_state) - set(old_ids):
|
||||
n_delta_ids = {
|
||||
k: v
|
||||
for k, v in new_state.iteritems()
|
||||
if old_ids.get(k) != v
|
||||
}
|
||||
if not delta_ids or len(n_delta_ids) < len(delta_ids):
|
||||
prev_group = old_group
|
||||
delta_ids = n_delta_ids
|
||||
|
||||
cache = _StateCacheEntry(
|
||||
state=new_state,
|
||||
@@ -702,7 +703,7 @@ def _resolve_with_state(unconflicted_state_ids, conflicted_state_ds, auth_event_
|
||||
|
||||
auth_events = {
|
||||
key: state_map[ev_id]
|
||||
for key, ev_id in auth_event_ids.items()
|
||||
for key, ev_id in auth_event_ids.iteritems()
|
||||
if ev_id in state_map
|
||||
}
|
||||
|
||||
@@ -740,7 +741,7 @@ def _resolve_state_events(conflicted_state, auth_events):
|
||||
|
||||
auth_events.update(resolved_state)
|
||||
|
||||
for key, events in conflicted_state.items():
|
||||
for key, events in conflicted_state.iteritems():
|
||||
if key[0] == EventTypes.JoinRules:
|
||||
logger.debug("Resolving conflicted join rules %r", events)
|
||||
resolved_state[key] = _resolve_auth_events(
|
||||
@@ -750,7 +751,7 @@ def _resolve_state_events(conflicted_state, auth_events):
|
||||
|
||||
auth_events.update(resolved_state)
|
||||
|
||||
for key, events in conflicted_state.items():
|
||||
for key, events in conflicted_state.iteritems():
|
||||
if key[0] == EventTypes.Member:
|
||||
logger.debug("Resolving conflicted member lists %r", events)
|
||||
resolved_state[key] = _resolve_auth_events(
|
||||
@@ -760,7 +761,7 @@ def _resolve_state_events(conflicted_state, auth_events):
|
||||
|
||||
auth_events.update(resolved_state)
|
||||
|
||||
for key, events in conflicted_state.items():
|
||||
for key, events in conflicted_state.iteritems():
|
||||
if key not in resolved_state:
|
||||
logger.debug("Resolving conflicted state %r:%r", key, events)
|
||||
resolved_state[key] = _resolve_normal_events(
|
||||
|
||||
@@ -53,6 +53,22 @@ event_counter = metrics.register_counter(
|
||||
"persisted_events_sep", labels=["type", "origin_type", "origin_entity"]
|
||||
)
|
||||
|
||||
# The number of times we are recalculating the current state
|
||||
state_delta_counter = metrics.register_counter(
|
||||
"state_delta",
|
||||
)
|
||||
# The number of times we are recalculating state when there is only a
|
||||
# single forward extremity
|
||||
state_delta_single_event_counter = metrics.register_counter(
|
||||
"state_delta_single_event",
|
||||
)
|
||||
# The number of times we are reculating state when we could have resonably
|
||||
# calculated the delta when we calculated the state for an event we were
|
||||
# persisting.
|
||||
state_delta_reuse_delta_counter = metrics.register_counter(
|
||||
"state_delta_reuse_delta",
|
||||
)
|
||||
|
||||
|
||||
def encode_json(json_object):
|
||||
if USE_FROZEN_DICTS:
|
||||
@@ -368,7 +384,8 @@ class EventsStore(EventsWorkerStore):
|
||||
room_id, ev_ctx_rm, latest_event_ids
|
||||
)
|
||||
|
||||
if new_latest_event_ids == set(latest_event_ids):
|
||||
latest_event_ids = set(latest_event_ids)
|
||||
if new_latest_event_ids == latest_event_ids:
|
||||
# No change in extremities, so no change in state
|
||||
continue
|
||||
|
||||
@@ -389,6 +406,26 @@ class EventsStore(EventsWorkerStore):
|
||||
if all_single_prev_not_state:
|
||||
continue
|
||||
|
||||
state_delta_counter.inc()
|
||||
if len(new_latest_event_ids) == 1:
|
||||
state_delta_single_event_counter.inc()
|
||||
|
||||
# This is a fairly handwavey check to see if we could
|
||||
# have guessed what the delta would have been when
|
||||
# processing one of these events.
|
||||
# What we're interested in is if the latest extremities
|
||||
# were the same when we created the event as they are
|
||||
# now. When this server creates a new event (as opposed
|
||||
# to receiving it over federation) it will use the
|
||||
# forward extremities as the prev_events, so we can
|
||||
# guess this by looking at the prev_events and checking
|
||||
# if they match the current forward extremities.
|
||||
for ev, _ in ev_ctx_rm:
|
||||
prev_event_ids = set(e for e, _ in ev.prev_events)
|
||||
if latest_event_ids == prev_event_ids:
|
||||
state_delta_reuse_delta_counter.inc()
|
||||
break
|
||||
|
||||
logger.info(
|
||||
"Calculating state delta for room %s", room_id,
|
||||
)
|
||||
@@ -756,6 +793,13 @@ class EventsStore(EventsWorkerStore):
|
||||
self._invalidate_cache_and_stream(
|
||||
txn, self.get_rooms_for_user_with_stream_ordering, (member,)
|
||||
)
|
||||
# we call this once for each member to allow AS workers to
|
||||
# only invalidate their caches when AS members change.
|
||||
# other folks naively using a cached get_users_in_rooms will
|
||||
# just get prodded multiple times and can ignore the member key.
|
||||
self._invalidate_cache_and_stream(
|
||||
txn, self.get_users_in_room, (room_id, member)
|
||||
)
|
||||
|
||||
for host in set(get_domain_from_id(u) for u in members_changed):
|
||||
self._invalidate_cache_and_stream(
|
||||
@@ -765,10 +809,6 @@ class EventsStore(EventsWorkerStore):
|
||||
txn, self.was_host_joined, (room_id, host)
|
||||
)
|
||||
|
||||
self._invalidate_cache_and_stream(
|
||||
txn, self.get_users_in_room, (room_id,)
|
||||
)
|
||||
|
||||
self._invalidate_cache_and_stream(
|
||||
txn, self.get_current_state_ids, (room_id,)
|
||||
)
|
||||
|
||||
@@ -19,7 +19,7 @@ from synapse.api.errors import SynapseError
|
||||
|
||||
from ._base import SQLBaseStore
|
||||
|
||||
import ujson as json
|
||||
import simplejson as json
|
||||
|
||||
|
||||
# The category ID for the "default" category. We don't store as null in the
|
||||
|
||||
@@ -460,14 +460,12 @@ class RegistrationStore(RegistrationWorkerStore,
|
||||
"""
|
||||
def _find_next_generated_user_id(txn):
|
||||
txn.execute("SELECT name FROM users")
|
||||
rows = self.cursor_to_dict(txn)
|
||||
|
||||
regex = re.compile("^@(\d+):")
|
||||
|
||||
found = set()
|
||||
|
||||
for r in rows:
|
||||
user_id = r["name"]
|
||||
for user_id, in txn:
|
||||
match = regex.search(user_id)
|
||||
if match:
|
||||
found.add(int(match.group(1)))
|
||||
|
||||
@@ -667,7 +667,7 @@ class UserDirectoryStore(SQLBaseStore):
|
||||
# The array of numbers are the weights for the various part of the
|
||||
# search: (domain, _, display name, localpart)
|
||||
sql = """
|
||||
SELECT d.user_id, display_name, avatar_url
|
||||
SELECT d.user_id AS user_id, display_name, avatar_url
|
||||
FROM user_directory_search
|
||||
INNER JOIN user_directory AS d USING (user_id)
|
||||
%s
|
||||
@@ -702,7 +702,7 @@ class UserDirectoryStore(SQLBaseStore):
|
||||
search_query = _parse_query_sqlite(search_term)
|
||||
|
||||
sql = """
|
||||
SELECT d.user_id, display_name, avatar_url
|
||||
SELECT d.user_id AS user_id, display_name, avatar_url
|
||||
FROM user_directory_search
|
||||
INNER JOIN user_directory AS d USING (user_id)
|
||||
%s
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from twisted.web.resource import Resource
|
||||
from twisted.web.resource import NoResource
|
||||
|
||||
import logging
|
||||
|
||||
@@ -45,7 +45,7 @@ def create_resource_tree(desired_tree, root_resource):
|
||||
for path_seg in full_path.split('/')[1:-1]:
|
||||
if path_seg not in last_resource.listNames():
|
||||
# resource doesn't exist, so make a "dummy resource"
|
||||
child_resource = Resource()
|
||||
child_resource = NoResource()
|
||||
last_resource.putChild(path_seg, child_resource)
|
||||
res_id = _resource_id(last_resource, path_seg)
|
||||
resource_mappings[res_id] = child_resource
|
||||
|
||||
Reference in New Issue
Block a user