merge conflicts
This commit is contained in:
@@ -1,48 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2019 The Matrix.org Foundation C.I.C.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import sys
|
||||
from tap.parser import Parser
|
||||
from tap.line import Result, Unknown, Diagnostic
|
||||
|
||||
out = ["### TAP Output for " + sys.argv[2]]
|
||||
|
||||
p = Parser()
|
||||
|
||||
in_error = False
|
||||
|
||||
for line in p.parse_file(sys.argv[1]):
|
||||
if isinstance(line, Result):
|
||||
if in_error:
|
||||
out.append("")
|
||||
out.append("</pre></code></details>")
|
||||
out.append("")
|
||||
out.append("----")
|
||||
out.append("")
|
||||
in_error = False
|
||||
|
||||
if not line.ok and not line.todo:
|
||||
in_error = True
|
||||
|
||||
out.append("FAILURE Test #%d: ``%s``" % (line.number, line.description))
|
||||
out.append("")
|
||||
out.append("<details><summary>Show log</summary><code><pre>")
|
||||
|
||||
elif isinstance(line, Diagnostic) and in_error:
|
||||
out.append(line.text)
|
||||
|
||||
if out:
|
||||
for line in out[:-3]:
|
||||
print(line)
|
||||
11
MANIFEST.in
11
MANIFEST.in
@@ -8,11 +8,12 @@ include demo/demo.tls.dh
|
||||
include demo/*.py
|
||||
include demo/*.sh
|
||||
|
||||
recursive-include synapse/storage/schema *.sql
|
||||
recursive-include synapse/storage/schema *.sql.postgres
|
||||
recursive-include synapse/storage/schema *.sql.sqlite
|
||||
recursive-include synapse/storage/schema *.py
|
||||
recursive-include synapse/storage/schema *.txt
|
||||
recursive-include synapse/storage *.sql
|
||||
recursive-include synapse/storage *.sql.postgres
|
||||
recursive-include synapse/storage *.sql.sqlite
|
||||
recursive-include synapse/storage *.py
|
||||
recursive-include synapse/storage *.txt
|
||||
recursive-include synapse/storage *.md
|
||||
|
||||
recursive-include docs *
|
||||
recursive-include scripts *
|
||||
|
||||
1
changelog.d/5726.feature
Normal file
1
changelog.d/5726.feature
Normal file
@@ -0,0 +1 @@
|
||||
Add ability to upload cross-signing signatures.
|
||||
4
changelog.d/5759.misc
Normal file
4
changelog.d/5759.misc
Normal file
@@ -0,0 +1,4 @@
|
||||
Allow devices to be marked as hidden, for use by features such as cross-signing.
|
||||
This adds a new field with a default value to the devices field in the database,
|
||||
and so the database upgrade may take a long time depending on how many devices
|
||||
are in the database.
|
||||
1
changelog.d/5769.feature
Normal file
1
changelog.d/5769.feature
Normal file
@@ -0,0 +1 @@
|
||||
Allow uploading of cross-signing keys.
|
||||
1
changelog.d/6102.bugfix
Normal file
1
changelog.d/6102.bugfix
Normal file
@@ -0,0 +1 @@
|
||||
Make the `synapse_port_db` script create the right indexes on a new PostgreSQL database.
|
||||
1
changelog.d/6203.misc
Normal file
1
changelog.d/6203.misc
Normal file
@@ -0,0 +1 @@
|
||||
Enforce that all boolean configuration values are lowercase in CI.
|
||||
1
changelog.d/6214.misc
Normal file
1
changelog.d/6214.misc
Normal file
@@ -0,0 +1 @@
|
||||
Remove some unused event-auth code.
|
||||
1
changelog.d/6217.misc
Normal file
1
changelog.d/6217.misc
Normal file
@@ -0,0 +1 @@
|
||||
Remove Auth.check method.
|
||||
1
changelog.d/6219.misc
Normal file
1
changelog.d/6219.misc
Normal file
@@ -0,0 +1 @@
|
||||
Remove `format_tap.py` script in favour of a perl reimplementation in Sytest's repo.
|
||||
1
changelog.d/6229.bugfix
Normal file
1
changelog.d/6229.bugfix
Normal file
@@ -0,0 +1 @@
|
||||
Prevent the demo Synapse's from blacklisting `::1`.
|
||||
1
changelog.d/6231.misc
Normal file
1
changelog.d/6231.misc
Normal file
@@ -0,0 +1 @@
|
||||
Refactor storage layer in preparation to support having multiple databases.
|
||||
@@ -77,14 +77,13 @@ for port in 8080 8081 8082; do
|
||||
|
||||
# Reduce the blacklist
|
||||
blacklist=$(cat <<-BLACK
|
||||
# Set the blacklist so that it doesn't include 127.0.0.1
|
||||
# Set the blacklist so that it doesn't include 127.0.0.1, ::1
|
||||
federation_ip_range_blacklist:
|
||||
- '10.0.0.0/8'
|
||||
- '172.16.0.0/12'
|
||||
- '192.168.0.0/16'
|
||||
- '100.64.0.0/10'
|
||||
- '169.254.0.0/16'
|
||||
- '::1/128'
|
||||
- 'fe80::/64'
|
||||
- 'fc00::/7'
|
||||
BLACK
|
||||
|
||||
@@ -27,17 +27,21 @@ connect to a postgres database.
|
||||
|
||||
## Set up database
|
||||
|
||||
Assuming your PostgreSQL database user is called `postgres`, create a
|
||||
user `synapse_user` with:
|
||||
Assuming your PostgreSQL database user is called `postgres`, first authenticate as the database user with:
|
||||
|
||||
su - postgres
|
||||
# Or, if your system uses sudo to get administrative rights
|
||||
sudo -u postgres bash
|
||||
|
||||
Then, create a user ``synapse_user`` with:
|
||||
|
||||
createuser --pwprompt synapse_user
|
||||
|
||||
Before you can authenticate with the `synapse_user`, you must create a
|
||||
database that it can access. To create a database, first connect to the
|
||||
database with your database user:
|
||||
|
||||
su - postgres
|
||||
su - postgres # Or: sudo -u postgres bash
|
||||
psql
|
||||
|
||||
and then run:
|
||||
|
||||
@@ -86,7 +86,7 @@ pid_file: DATADIR/homeserver.pid
|
||||
# Whether room invites to users on this server should be blocked
|
||||
# (except those sent by local server admins). The default is False.
|
||||
#
|
||||
#block_non_admin_invites: True
|
||||
#block_non_admin_invites: true
|
||||
|
||||
# Room searching
|
||||
#
|
||||
@@ -239,7 +239,7 @@ listeners:
|
||||
|
||||
# Global blocking
|
||||
#
|
||||
#hs_disabled: False
|
||||
#hs_disabled: false
|
||||
#hs_disabled_message: 'Human readable reason for why the HS is blocked'
|
||||
|
||||
# Monthly Active User Blocking
|
||||
@@ -266,16 +266,16 @@ listeners:
|
||||
# interest increasing the mau limit further. Defaults to True, which
|
||||
# means that alerting is enabled
|
||||
#
|
||||
#limit_usage_by_mau: False
|
||||
#limit_usage_by_mau: false
|
||||
#max_mau_value: 50
|
||||
#mau_trial_days: 2
|
||||
#mau_limit_alerting: False
|
||||
#mau_limit_alerting: false
|
||||
|
||||
# If enabled, the metrics for the number of monthly active users will
|
||||
# be populated, however no one will be limited. If limit_usage_by_mau
|
||||
# is true, this is implied to be true.
|
||||
#
|
||||
#mau_stats_only: False
|
||||
#mau_stats_only: false
|
||||
|
||||
# Sometimes the server admin will want to ensure certain accounts are
|
||||
# never blocked by mau checking. These accounts are specified here.
|
||||
@@ -300,7 +300,7 @@ listeners:
|
||||
#
|
||||
# Uncomment the below lines to enable:
|
||||
#limit_remote_rooms:
|
||||
# enabled: True
|
||||
# enabled: true
|
||||
# complexity: 1.0
|
||||
# complexity_error: "This room is too complex."
|
||||
|
||||
@@ -417,7 +417,7 @@ acme:
|
||||
# ACME support is disabled by default. Set this to `true` and uncomment
|
||||
# tls_certificate_path and tls_private_key_path above to enable it.
|
||||
#
|
||||
enabled: False
|
||||
enabled: false
|
||||
|
||||
# Endpoint to use to request certificates. If you only want to test,
|
||||
# use Let's Encrypt's staging url:
|
||||
@@ -792,7 +792,7 @@ uploads_path: "DATADIR/uploads"
|
||||
# connect to arbitrary endpoints without having first signed up for a
|
||||
# valid account (e.g. by passing a CAPTCHA).
|
||||
#
|
||||
#turn_allow_guests: True
|
||||
#turn_allow_guests: true
|
||||
|
||||
|
||||
## Registration ##
|
||||
@@ -835,7 +835,7 @@ uploads_path: "DATADIR/uploads"
|
||||
# where d is equal to 10% of the validity period.
|
||||
#
|
||||
#account_validity:
|
||||
# enabled: True
|
||||
# enabled: true
|
||||
# period: 6w
|
||||
# renew_at: 1w
|
||||
# renew_email_subject: "Renew your %(app)s account"
|
||||
@@ -977,7 +977,7 @@ account_threepid_delegates:
|
||||
|
||||
# Enable collection and rendering of performance metrics
|
||||
#
|
||||
#enable_metrics: False
|
||||
#enable_metrics: false
|
||||
|
||||
# Enable sentry integration
|
||||
# NOTE: While attempts are made to ensure that the logs don't contain
|
||||
@@ -1029,7 +1029,7 @@ metrics_flags:
|
||||
# Uncomment to enable tracking of application service IP addresses. Implicitly
|
||||
# enables MAU tracking for application service users.
|
||||
#
|
||||
#track_appservice_user_ips: True
|
||||
#track_appservice_user_ips: true
|
||||
|
||||
|
||||
# a secret which is used to sign access tokens. If none is specified,
|
||||
@@ -1155,7 +1155,7 @@ saml2_config:
|
||||
# - url: https://our_idp/metadata.xml
|
||||
#
|
||||
# # By default, the user has to go to our login page first. If you'd like
|
||||
# # to allow IdP-initiated login, set 'allow_unsolicited: True' in a
|
||||
# # to allow IdP-initiated login, set 'allow_unsolicited: true' in a
|
||||
# # 'service.sp' section:
|
||||
# #
|
||||
# #service:
|
||||
@@ -1269,13 +1269,13 @@ password_config:
|
||||
# smtp_port: 25 # SSL: 465, STARTTLS: 587
|
||||
# smtp_user: "exampleusername"
|
||||
# smtp_pass: "examplepassword"
|
||||
# require_transport_security: False
|
||||
# require_transport_security: false
|
||||
# notif_from: "Your Friendly %(app)s Home Server <noreply@example.com>"
|
||||
# app_name: Matrix
|
||||
#
|
||||
# # Enable email notifications by default
|
||||
# #
|
||||
# notif_for_new_users: True
|
||||
# notif_for_new_users: true
|
||||
#
|
||||
# # Defining a custom URL for Riot is only needed if email notifications
|
||||
# # should contain links to a self-hosted installation of Riot; when set
|
||||
@@ -1453,11 +1453,11 @@ password_config:
|
||||
# body: >-
|
||||
# To continue using this homeserver you must review and agree to the
|
||||
# terms and conditions at %(consent_uri)s
|
||||
# send_server_notice_to_guests: True
|
||||
# send_server_notice_to_guests: true
|
||||
# block_events_error: >-
|
||||
# To continue using this homeserver you must review and agree to the
|
||||
# terms and conditions at %(consent_uri)s
|
||||
# require_at_registration: False
|
||||
# require_at_registration: false
|
||||
# policy_name: Privacy Policy
|
||||
#
|
||||
|
||||
|
||||
@@ -1,58 +0,0 @@
|
||||
from __future__ import print_function
|
||||
|
||||
import argparse
|
||||
import itertools
|
||||
import json
|
||||
import sys
|
||||
|
||||
from mock import Mock
|
||||
|
||||
from synapse.api.auth import Auth
|
||||
from synapse.events import FrozenEvent
|
||||
|
||||
|
||||
def check_auth(auth, auth_chain, events):
|
||||
auth_chain.sort(key=lambda e: e.depth)
|
||||
|
||||
auth_map = {e.event_id: e for e in auth_chain}
|
||||
|
||||
create_events = {}
|
||||
for e in auth_chain:
|
||||
if e.type == "m.room.create":
|
||||
create_events[e.room_id] = e
|
||||
|
||||
for e in itertools.chain(auth_chain, events):
|
||||
auth_events_list = [auth_map[i] for i, _ in e.auth_events]
|
||||
|
||||
auth_events = {(e.type, e.state_key): e for e in auth_events_list}
|
||||
|
||||
auth_events[("m.room.create", "")] = create_events[e.room_id]
|
||||
|
||||
try:
|
||||
auth.check(e, auth_events=auth_events)
|
||||
except Exception as ex:
|
||||
print("Failed:", e.event_id, e.type, e.state_key)
|
||||
print("Auth_events:", auth_events)
|
||||
print(ex)
|
||||
print(json.dumps(e.get_dict(), sort_keys=True, indent=4))
|
||||
# raise
|
||||
print("Success:", e.event_id, e.type, e.state_key)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser()
|
||||
|
||||
parser.add_argument(
|
||||
"json", nargs="?", type=argparse.FileType("r"), default=sys.stdin
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
js = json.load(args.json)
|
||||
|
||||
auth = Auth(Mock())
|
||||
check_auth(
|
||||
auth,
|
||||
[FrozenEvent(d) for d in js["auth_chain"]],
|
||||
[FrozenEvent(d) for d in js.get("pdus", [])],
|
||||
)
|
||||
9
scripts-dev/config-lint.sh
Executable file
9
scripts-dev/config-lint.sh
Executable file
@@ -0,0 +1,9 @@
|
||||
#!/bin/bash
|
||||
# Find linting errors in Synapse's default config file.
|
||||
# Exits with 0 if there are no problems, or another code otherwise.
|
||||
|
||||
# Fix non-lowercase true/false values
|
||||
sed -i -E "s/: +True/: true/g; s/: +False/: false/g;" docs/sample_config.yaml
|
||||
|
||||
# Check if anything changed
|
||||
git diff --exit-code docs/sample_config.yaml
|
||||
@@ -10,3 +10,4 @@ set -e
|
||||
isort -y -rc synapse tests scripts-dev scripts
|
||||
flake8 synapse tests
|
||||
python3 -m black synapse tests scripts-dev scripts
|
||||
./scripts-dev/config-lint.sh
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2015, 2016 OpenMarket Ltd
|
||||
# Copyright 2018 New Vector Ltd
|
||||
# Copyright 2019 The Matrix.org Foundation C.I.C.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -29,9 +30,23 @@ import yaml
|
||||
from twisted.enterprise import adbapi
|
||||
from twisted.internet import defer, reactor
|
||||
|
||||
from synapse.storage._base import LoggingTransaction, SQLBaseStore
|
||||
from synapse.config.homeserver import HomeServerConfig
|
||||
from synapse.logging.context import PreserveLoggingContext
|
||||
from synapse.storage._base import LoggingTransaction
|
||||
from synapse.storage.client_ips import ClientIpBackgroundUpdateStore
|
||||
from synapse.storage.deviceinbox import DeviceInboxBackgroundUpdateStore
|
||||
from synapse.storage.devices import DeviceBackgroundUpdateStore
|
||||
from synapse.storage.engines import create_engine
|
||||
from synapse.storage.events_bg_updates import EventsBackgroundUpdatesStore
|
||||
from synapse.storage.media_repository import MediaRepositoryBackgroundUpdateStore
|
||||
from synapse.storage.prepare_database import prepare_database
|
||||
from synapse.storage.registration import RegistrationBackgroundUpdateStore
|
||||
from synapse.storage.roommember import RoomMemberBackgroundUpdateStore
|
||||
from synapse.storage.search import SearchBackgroundUpdateStore
|
||||
from synapse.storage.state import StateBackgroundUpdateStore
|
||||
from synapse.storage.stats import StatsStore
|
||||
from synapse.storage.user_directory import UserDirectoryBackgroundUpdateStore
|
||||
from synapse.util import Clock
|
||||
|
||||
logger = logging.getLogger("synapse_port_db")
|
||||
|
||||
@@ -98,33 +113,24 @@ APPEND_ONLY_TABLES = [
|
||||
end_error_exec_info = None
|
||||
|
||||
|
||||
class Store(object):
|
||||
"""This object is used to pull out some of the convenience API from the
|
||||
Storage layer.
|
||||
|
||||
*All* database interactions should go through this object.
|
||||
"""
|
||||
|
||||
def __init__(self, db_pool, engine):
|
||||
self.db_pool = db_pool
|
||||
self.database_engine = engine
|
||||
|
||||
_simple_insert_txn = SQLBaseStore.__dict__["_simple_insert_txn"]
|
||||
_simple_insert = SQLBaseStore.__dict__["_simple_insert"]
|
||||
|
||||
_simple_select_onecol_txn = SQLBaseStore.__dict__["_simple_select_onecol_txn"]
|
||||
_simple_select_onecol = SQLBaseStore.__dict__["_simple_select_onecol"]
|
||||
_simple_select_one = SQLBaseStore.__dict__["_simple_select_one"]
|
||||
_simple_select_one_txn = SQLBaseStore.__dict__["_simple_select_one_txn"]
|
||||
_simple_select_one_onecol = SQLBaseStore.__dict__["_simple_select_one_onecol"]
|
||||
_simple_select_one_onecol_txn = SQLBaseStore.__dict__[
|
||||
"_simple_select_one_onecol_txn"
|
||||
]
|
||||
|
||||
_simple_update_one = SQLBaseStore.__dict__["_simple_update_one"]
|
||||
_simple_update_one_txn = SQLBaseStore.__dict__["_simple_update_one_txn"]
|
||||
_simple_update_txn = SQLBaseStore.__dict__["_simple_update_txn"]
|
||||
class Store(
|
||||
ClientIpBackgroundUpdateStore,
|
||||
DeviceInboxBackgroundUpdateStore,
|
||||
DeviceBackgroundUpdateStore,
|
||||
EventsBackgroundUpdatesStore,
|
||||
MediaRepositoryBackgroundUpdateStore,
|
||||
RegistrationBackgroundUpdateStore,
|
||||
RoomMemberBackgroundUpdateStore,
|
||||
SearchBackgroundUpdateStore,
|
||||
StateBackgroundUpdateStore,
|
||||
UserDirectoryBackgroundUpdateStore,
|
||||
StatsStore,
|
||||
):
|
||||
def __init__(self, db_conn, hs):
|
||||
super().__init__(db_conn, hs)
|
||||
self.db_pool = hs.get_db_pool()
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def runInteraction(self, desc, func, *args, **kwargs):
|
||||
def r(conn):
|
||||
try:
|
||||
@@ -150,7 +156,8 @@ class Store(object):
|
||||
logger.debug("[TXN FAIL] {%s} %s", desc, e)
|
||||
raise
|
||||
|
||||
return self.db_pool.runWithConnection(r)
|
||||
with PreserveLoggingContext():
|
||||
return (yield self.db_pool.runWithConnection(r))
|
||||
|
||||
def execute(self, f, *args, **kwargs):
|
||||
return self.runInteraction(f.__name__, f, *args, **kwargs)
|
||||
@@ -176,6 +183,25 @@ class Store(object):
|
||||
raise
|
||||
|
||||
|
||||
class MockHomeserver:
|
||||
def __init__(self, config, database_engine, db_conn, db_pool):
|
||||
self.database_engine = database_engine
|
||||
self.db_conn = db_conn
|
||||
self.db_pool = db_pool
|
||||
self.clock = Clock(reactor)
|
||||
self.config = config
|
||||
self.hostname = config.server_name
|
||||
|
||||
def get_db_conn(self):
|
||||
return self.db_conn
|
||||
|
||||
def get_db_pool(self):
|
||||
return self.db_pool
|
||||
|
||||
def get_clock(self):
|
||||
return self.clock
|
||||
|
||||
|
||||
class Porter(object):
|
||||
def __init__(self, **kwargs):
|
||||
self.__dict__.update(kwargs)
|
||||
@@ -447,31 +473,75 @@ class Porter(object):
|
||||
|
||||
db_conn.commit()
|
||||
|
||||
return db_conn
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def build_db_store(self, config):
|
||||
"""Builds and returns a database store using the provided configuration.
|
||||
|
||||
Args:
|
||||
config: The database configuration, i.e. a dict following the structure of
|
||||
the "database" section of Synapse's configuration file.
|
||||
|
||||
Returns:
|
||||
The built Store object.
|
||||
"""
|
||||
engine = create_engine(config)
|
||||
|
||||
self.progress.set_state("Preparing %s" % config["name"])
|
||||
conn = self.setup_db(config, engine)
|
||||
|
||||
db_pool = adbapi.ConnectionPool(
|
||||
config["name"], **config["args"]
|
||||
)
|
||||
|
||||
hs = MockHomeserver(self.hs_config, engine, conn, db_pool)
|
||||
|
||||
store = Store(conn, hs)
|
||||
|
||||
yield store.runInteraction(
|
||||
"%s_engine.check_database" % config["name"],
|
||||
engine.check_database,
|
||||
)
|
||||
|
||||
return store
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def run_background_updates_on_postgres(self):
|
||||
# Manually apply all background updates on the PostgreSQL database.
|
||||
postgres_ready = yield self.postgres_store.has_completed_background_updates()
|
||||
|
||||
if not postgres_ready:
|
||||
# Only say that we're running background updates when there are background
|
||||
# updates to run.
|
||||
self.progress.set_state("Running background updates on PostgreSQL")
|
||||
|
||||
while not postgres_ready:
|
||||
yield self.postgres_store.do_next_background_update(100)
|
||||
postgres_ready = yield (
|
||||
self.postgres_store.has_completed_background_updates()
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def run(self):
|
||||
try:
|
||||
sqlite_db_pool = adbapi.ConnectionPool(
|
||||
self.sqlite_config["name"], **self.sqlite_config["args"]
|
||||
self.sqlite_store = yield self.build_db_store(self.sqlite_config)
|
||||
|
||||
# Check if all background updates are done, abort if not.
|
||||
updates_complete = yield self.sqlite_store.has_completed_background_updates()
|
||||
if not updates_complete:
|
||||
sys.stderr.write(
|
||||
"Pending background updates exist in the SQLite3 database."
|
||||
" Please start Synapse again and wait until every update has finished"
|
||||
" before running this script.\n"
|
||||
)
|
||||
defer.returnValue(None)
|
||||
|
||||
self.postgres_store = yield self.build_db_store(
|
||||
self.hs_config.database_config
|
||||
)
|
||||
|
||||
postgres_db_pool = adbapi.ConnectionPool(
|
||||
self.postgres_config["name"], **self.postgres_config["args"]
|
||||
)
|
||||
|
||||
sqlite_engine = create_engine(sqlite_config)
|
||||
postgres_engine = create_engine(postgres_config)
|
||||
|
||||
self.sqlite_store = Store(sqlite_db_pool, sqlite_engine)
|
||||
self.postgres_store = Store(postgres_db_pool, postgres_engine)
|
||||
|
||||
yield self.postgres_store.execute(postgres_engine.check_database)
|
||||
|
||||
# Step 1. Set up databases.
|
||||
self.progress.set_state("Preparing SQLite3")
|
||||
self.setup_db(sqlite_config, sqlite_engine)
|
||||
|
||||
self.progress.set_state("Preparing PostgreSQL")
|
||||
self.setup_db(postgres_config, postgres_engine)
|
||||
yield self.run_background_updates_on_postgres()
|
||||
|
||||
self.progress.set_state("Creating port tables")
|
||||
|
||||
@@ -563,6 +633,8 @@ class Porter(object):
|
||||
def conv(j, col):
|
||||
if j in bool_cols:
|
||||
return bool(col)
|
||||
if isinstance(col, bytes):
|
||||
return bytearray(col)
|
||||
elif isinstance(col, string_types) and "\0" in col:
|
||||
logger.warn(
|
||||
"DROPPING ROW: NUL value in table %s col %s: %r",
|
||||
@@ -926,18 +998,24 @@ if __name__ == "__main__":
|
||||
},
|
||||
}
|
||||
|
||||
postgres_config = yaml.safe_load(args.postgres_config)
|
||||
hs_config = yaml.safe_load(args.postgres_config)
|
||||
|
||||
if "database" in postgres_config:
|
||||
postgres_config = postgres_config["database"]
|
||||
if "database" not in hs_config:
|
||||
sys.stderr.write("The configuration file must have a 'database' section.\n")
|
||||
sys.exit(4)
|
||||
|
||||
postgres_config = hs_config["database"]
|
||||
|
||||
if "name" not in postgres_config:
|
||||
sys.stderr.write("Malformed database config: no 'name'")
|
||||
sys.stderr.write("Malformed database config: no 'name'\n")
|
||||
sys.exit(2)
|
||||
if postgres_config["name"] != "psycopg2":
|
||||
sys.stderr.write("Database must use 'psycopg2' connector.")
|
||||
sys.stderr.write("Database must use the 'psycopg2' connector.\n")
|
||||
sys.exit(3)
|
||||
|
||||
config = HomeServerConfig()
|
||||
config.parse_config_dict(hs_config, "", "")
|
||||
|
||||
def start(stdscr=None):
|
||||
if stdscr:
|
||||
progress = CursesProgress(stdscr)
|
||||
@@ -946,9 +1024,9 @@ if __name__ == "__main__":
|
||||
|
||||
porter = Porter(
|
||||
sqlite_config=sqlite_config,
|
||||
postgres_config=postgres_config,
|
||||
progress=progress,
|
||||
batch_size=args.batch_size,
|
||||
hs_config=config,
|
||||
)
|
||||
|
||||
reactor.callWhenRunning(porter.run)
|
||||
|
||||
@@ -90,27 +90,10 @@ class Auth(object):
|
||||
)
|
||||
auth_events = yield self.store.get_events(auth_events_ids)
|
||||
auth_events = {(e.type, e.state_key): e for e in itervalues(auth_events)}
|
||||
self.check(
|
||||
event_auth.check(
|
||||
room_version, event, auth_events=auth_events, do_sig_check=do_sig_check
|
||||
)
|
||||
|
||||
def check(self, room_version, event, auth_events, do_sig_check=True):
|
||||
""" Checks if this event is correctly authed.
|
||||
|
||||
Args:
|
||||
room_version (str): version of the room
|
||||
event: the event being checked.
|
||||
auth_events (dict: event-key -> event): the existing room state.
|
||||
|
||||
|
||||
Returns:
|
||||
True if the auth checks pass.
|
||||
"""
|
||||
with Measure(self.clock, "auth.check"):
|
||||
event_auth.check(
|
||||
room_version, event, auth_events, do_sig_check=do_sig_check
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def check_joined_room(self, room_id, user_id, current_state=None):
|
||||
"""Check if the user is currently joined in the room
|
||||
|
||||
@@ -97,8 +97,6 @@ class EventTypes(object):
|
||||
|
||||
class RejectedReason(object):
|
||||
AUTH_ERROR = "auth_error"
|
||||
REPLACED = "replaced"
|
||||
NOT_ANCESTOR = "not_ancestor"
|
||||
|
||||
|
||||
class RoomCreationPreset(object):
|
||||
|
||||
@@ -62,6 +62,7 @@ class Codes(object):
|
||||
INCOMPATIBLE_ROOM_VERSION = "M_INCOMPATIBLE_ROOM_VERSION"
|
||||
WRONG_ROOM_KEYS_VERSION = "M_WRONG_ROOM_KEYS_VERSION"
|
||||
EXPIRED_ACCOUNT = "ORG_MATRIX_EXPIRED_ACCOUNT"
|
||||
INVALID_SIGNATURE = "M_INVALID_SIGNATURE"
|
||||
USER_DEACTIVATED = "M_USER_DEACTIVATED"
|
||||
|
||||
|
||||
|
||||
@@ -56,8 +56,8 @@ from synapse.rest.client.v1.room import (
|
||||
RoomStateEventRestServlet,
|
||||
)
|
||||
from synapse.server import HomeServer
|
||||
from synapse.storage.data_stores.main.user_directory import UserDirectoryStore
|
||||
from synapse.storage.engines import create_engine
|
||||
from synapse.storage.user_directory import UserDirectoryStore
|
||||
from synapse.util.httpresourcetree import create_resource_tree
|
||||
from synapse.util.manhole import manhole
|
||||
from synapse.util.versionstring import get_version_string
|
||||
|
||||
@@ -39,8 +39,8 @@ from synapse.replication.tcp.client import ReplicationClientHandler
|
||||
from synapse.rest.admin import register_servlets_for_media_repo
|
||||
from synapse.rest.media.v0.content_repository import ContentRepoResource
|
||||
from synapse.server import HomeServer
|
||||
from synapse.storage.data_stores.main.media_repository import MediaRepositoryStore
|
||||
from synapse.storage.engines import create_engine
|
||||
from synapse.storage.media_repository import MediaRepositoryStore
|
||||
from synapse.util.httpresourcetree import create_resource_tree
|
||||
from synapse.util.manhole import manhole
|
||||
from synapse.util.versionstring import get_version_string
|
||||
|
||||
@@ -54,8 +54,8 @@ from synapse.rest.client.v1.initial_sync import InitialSyncRestServlet
|
||||
from synapse.rest.client.v1.room import RoomInitialSyncRestServlet
|
||||
from synapse.rest.client.v2_alpha import sync
|
||||
from synapse.server import HomeServer
|
||||
from synapse.storage.data_stores.main.presence import UserPresenceState
|
||||
from synapse.storage.engines import create_engine
|
||||
from synapse.storage.presence import UserPresenceState
|
||||
from synapse.util.httpresourcetree import create_resource_tree
|
||||
from synapse.util.manhole import manhole
|
||||
from synapse.util.stringutils import random_string
|
||||
|
||||
@@ -42,8 +42,8 @@ from synapse.replication.tcp.streams.events import (
|
||||
)
|
||||
from synapse.rest.client.v2_alpha import user_directory
|
||||
from synapse.server import HomeServer
|
||||
from synapse.storage.data_stores.main.user_directory import UserDirectoryStore
|
||||
from synapse.storage.engines import create_engine
|
||||
from synapse.storage.user_directory import UserDirectoryStore
|
||||
from synapse.util.caches.stream_change_cache import StreamChangeCache
|
||||
from synapse.util.httpresourcetree import create_resource_tree
|
||||
from synapse.util.manhole import manhole
|
||||
|
||||
@@ -48,7 +48,7 @@ class AppServiceConfig(Config):
|
||||
# Uncomment to enable tracking of application service IP addresses. Implicitly
|
||||
# enables MAU tracking for application service users.
|
||||
#
|
||||
#track_appservice_user_ips: True
|
||||
#track_appservice_user_ips: true
|
||||
"""
|
||||
|
||||
|
||||
|
||||
@@ -62,11 +62,11 @@ DEFAULT_CONFIG = """\
|
||||
# body: >-
|
||||
# To continue using this homeserver you must review and agree to the
|
||||
# terms and conditions at %(consent_uri)s
|
||||
# send_server_notice_to_guests: True
|
||||
# send_server_notice_to_guests: true
|
||||
# block_events_error: >-
|
||||
# To continue using this homeserver you must review and agree to the
|
||||
# terms and conditions at %(consent_uri)s
|
||||
# require_at_registration: False
|
||||
# require_at_registration: false
|
||||
# policy_name: Privacy Policy
|
||||
#
|
||||
"""
|
||||
|
||||
@@ -304,13 +304,13 @@ class EmailConfig(Config):
|
||||
# smtp_port: 25 # SSL: 465, STARTTLS: 587
|
||||
# smtp_user: "exampleusername"
|
||||
# smtp_pass: "examplepassword"
|
||||
# require_transport_security: False
|
||||
# require_transport_security: false
|
||||
# notif_from: "Your Friendly %(app)s Home Server <noreply@example.com>"
|
||||
# app_name: Matrix
|
||||
#
|
||||
# # Enable email notifications by default
|
||||
# #
|
||||
# notif_for_new_users: True
|
||||
# notif_for_new_users: true
|
||||
#
|
||||
# # Defining a custom URL for Riot is only needed if email notifications
|
||||
# # should contain links to a self-hosted installation of Riot; when set
|
||||
|
||||
@@ -70,7 +70,7 @@ class MetricsConfig(Config):
|
||||
|
||||
# Enable collection and rendering of performance metrics
|
||||
#
|
||||
#enable_metrics: False
|
||||
#enable_metrics: false
|
||||
|
||||
# Enable sentry integration
|
||||
# NOTE: While attempts are made to ensure that the logs don't contain
|
||||
|
||||
@@ -180,7 +180,7 @@ class RegistrationConfig(Config):
|
||||
# where d is equal to 10%% of the validity period.
|
||||
#
|
||||
#account_validity:
|
||||
# enabled: True
|
||||
# enabled: true
|
||||
# period: 6w
|
||||
# renew_at: 1w
|
||||
# renew_email_subject: "Renew your %%(app)s account"
|
||||
|
||||
@@ -176,7 +176,7 @@ class SAML2Config(Config):
|
||||
# - url: https://our_idp/metadata.xml
|
||||
#
|
||||
# # By default, the user has to go to our login page first. If you'd like
|
||||
# # to allow IdP-initiated login, set 'allow_unsolicited: True' in a
|
||||
# # to allow IdP-initiated login, set 'allow_unsolicited: true' in a
|
||||
# # 'service.sp' section:
|
||||
# #
|
||||
# #service:
|
||||
|
||||
@@ -532,7 +532,7 @@ class ServerConfig(Config):
|
||||
# Whether room invites to users on this server should be blocked
|
||||
# (except those sent by local server admins). The default is False.
|
||||
#
|
||||
#block_non_admin_invites: True
|
||||
#block_non_admin_invites: true
|
||||
|
||||
# Room searching
|
||||
#
|
||||
@@ -673,7 +673,7 @@ class ServerConfig(Config):
|
||||
|
||||
# Global blocking
|
||||
#
|
||||
#hs_disabled: False
|
||||
#hs_disabled: false
|
||||
#hs_disabled_message: 'Human readable reason for why the HS is blocked'
|
||||
|
||||
# Monthly Active User Blocking
|
||||
@@ -700,16 +700,16 @@ class ServerConfig(Config):
|
||||
# interest increasing the mau limit further. Defaults to True, which
|
||||
# means that alerting is enabled
|
||||
#
|
||||
#limit_usage_by_mau: False
|
||||
#limit_usage_by_mau: false
|
||||
#max_mau_value: 50
|
||||
#mau_trial_days: 2
|
||||
#mau_limit_alerting: False
|
||||
#mau_limit_alerting: false
|
||||
|
||||
# If enabled, the metrics for the number of monthly active users will
|
||||
# be populated, however no one will be limited. If limit_usage_by_mau
|
||||
# is true, this is implied to be true.
|
||||
#
|
||||
#mau_stats_only: False
|
||||
#mau_stats_only: false
|
||||
|
||||
# Sometimes the server admin will want to ensure certain accounts are
|
||||
# never blocked by mau checking. These accounts are specified here.
|
||||
@@ -734,7 +734,7 @@ class ServerConfig(Config):
|
||||
#
|
||||
# Uncomment the below lines to enable:
|
||||
#limit_remote_rooms:
|
||||
# enabled: True
|
||||
# enabled: true
|
||||
# complexity: 1.0
|
||||
# complexity_error: "This room is too complex."
|
||||
|
||||
|
||||
@@ -289,6 +289,9 @@ class TlsConfig(Config):
|
||||
"http://localhost:8009/.well-known/acme-challenge"
|
||||
)
|
||||
|
||||
# flake8 doesn't recognise that variables are used in the below string
|
||||
_ = tls_enabled, proxypassline, acme_enabled, default_acme_account_file
|
||||
|
||||
return (
|
||||
"""\
|
||||
## TLS ##
|
||||
@@ -451,7 +454,11 @@ class TlsConfig(Config):
|
||||
#tls_fingerprints: [{"sha256": "<base64_encoded_sha256_fingerprint>"}]
|
||||
|
||||
"""
|
||||
% locals()
|
||||
# Lowercase the string representation of boolean values
|
||||
% {
|
||||
x[0]: str(x[1]).lower() if isinstance(x[1], bool) else x[1]
|
||||
for x in locals().items()
|
||||
}
|
||||
)
|
||||
|
||||
def read_tls_certificate(self):
|
||||
|
||||
@@ -56,5 +56,5 @@ class VoipConfig(Config):
|
||||
# connect to arbitrary endpoints without having first signed up for a
|
||||
# valid account (e.g. by passing a CAPTCHA).
|
||||
#
|
||||
#turn_allow_guests: True
|
||||
#turn_allow_guests: true
|
||||
"""
|
||||
|
||||
@@ -878,44 +878,6 @@ class FederationClient(FederationBase):
|
||||
third_party_instance_id=third_party_instance_id,
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def query_auth(self, destination, room_id, event_id, local_auth):
|
||||
"""
|
||||
Params:
|
||||
destination (str)
|
||||
event_it (str)
|
||||
local_auth (list)
|
||||
"""
|
||||
time_now = self._clock.time_msec()
|
||||
|
||||
send_content = {"auth_chain": [e.get_pdu_json(time_now) for e in local_auth]}
|
||||
|
||||
code, content = yield self.transport_layer.send_query_auth(
|
||||
destination=destination,
|
||||
room_id=room_id,
|
||||
event_id=event_id,
|
||||
content=send_content,
|
||||
)
|
||||
|
||||
room_version = yield self.store.get_room_version(room_id)
|
||||
format_ver = room_version_to_event_format(room_version)
|
||||
|
||||
auth_chain = [event_from_pdu_json(e, format_ver) for e in content["auth_chain"]]
|
||||
|
||||
signed_auth = yield self._check_sigs_and_hash_and_fetch(
|
||||
destination, auth_chain, outlier=True, room_version=room_version
|
||||
)
|
||||
|
||||
signed_auth.sort(key=lambda e: e.depth)
|
||||
|
||||
ret = {
|
||||
"auth_chain": signed_auth,
|
||||
"rejects": content.get("rejects", []),
|
||||
"missing": content.get("missing", []),
|
||||
}
|
||||
|
||||
return ret
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_missing_events(
|
||||
self,
|
||||
|
||||
@@ -30,7 +30,7 @@ from synapse.federation.units import Edu
|
||||
from synapse.handlers.presence import format_user_presence_state
|
||||
from synapse.metrics import sent_transactions_counter
|
||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||
from synapse.storage import UserPresenceState
|
||||
from synapse.storage.presence import UserPresenceState
|
||||
from synapse.util.retryutils import NotRetryingDestination, get_retry_limiter
|
||||
|
||||
# This is defined in the Matrix spec and enforced by the receiver.
|
||||
|
||||
@@ -381,17 +381,6 @@ class TransportLayerClient(object):
|
||||
|
||||
return content
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
def send_query_auth(self, destination, room_id, event_id, content):
|
||||
path = _create_v1_path("/query_auth/%s/%s", room_id, event_id)
|
||||
|
||||
content = yield self.client.post_json(
|
||||
destination=destination, path=path, data=content
|
||||
)
|
||||
|
||||
return content
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
def query_client_keys(self, destination, query_content, timeout):
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2016 OpenMarket Ltd
|
||||
# Copyright 2019 New Vector Ltd
|
||||
# Copyright 2019 The Matrix.org Foundation C.I.C.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -438,6 +440,21 @@ class DeviceHandler(DeviceWorkerHandler):
|
||||
self.federation_sender.send_device_messages(host)
|
||||
log_kv({"message": "sent device update to host", "host": host})
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def notify_user_signature_update(self, from_user_id, user_ids):
|
||||
"""Notify a user that they have made new signatures of other users.
|
||||
|
||||
Args:
|
||||
from_user_id (str): the user who made the signature
|
||||
user_ids (list[str]): the users IDs that have new signatures
|
||||
"""
|
||||
|
||||
position = yield self.store.add_user_signature_change_to_streams(
|
||||
from_user_id, user_ids
|
||||
)
|
||||
|
||||
self.notifier.on_new_event("device_list_key", position, users=[from_user_id])
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_federation_query_user_devices(self, user_id):
|
||||
stream_id, devices = yield self.store.get_devices_with_keys_by_user(user_id)
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2016 OpenMarket Ltd
|
||||
# Copyright 2018 New Vector Ltd
|
||||
# Copyright 2018-2019 New Vector Ltd
|
||||
# Copyright 2019 The Matrix.org Foundation C.I.C.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -18,14 +19,22 @@ import logging
|
||||
|
||||
from six import iteritems
|
||||
|
||||
import attr
|
||||
from canonicaljson import encode_canonical_json, json
|
||||
from signedjson.key import decode_verify_key_bytes
|
||||
from signedjson.sign import SignatureVerifyException, verify_signed_json
|
||||
from unpaddedbase64 import decode_base64
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
from synapse.api.errors import CodeMessageException, SynapseError
|
||||
from synapse.api.errors import CodeMessageException, Codes, NotFoundError, SynapseError
|
||||
from synapse.logging.context import make_deferred_yieldable, run_in_background
|
||||
from synapse.logging.opentracing import log_kv, set_tag, tag_args, trace
|
||||
from synapse.types import UserID, get_domain_from_id
|
||||
from synapse.types import (
|
||||
UserID,
|
||||
get_domain_from_id,
|
||||
get_verify_key_from_cross_signing_key,
|
||||
)
|
||||
from synapse.util import unwrapFirstError
|
||||
from synapse.util.retryutils import NotRetryingDestination
|
||||
|
||||
@@ -49,7 +58,7 @@ class E2eKeysHandler(object):
|
||||
|
||||
@trace
|
||||
@defer.inlineCallbacks
|
||||
def query_devices(self, query_body, timeout):
|
||||
def query_devices(self, query_body, timeout, from_user_id):
|
||||
""" Handle a device key query from a client
|
||||
|
||||
{
|
||||
@@ -67,6 +76,11 @@ class E2eKeysHandler(object):
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Args:
|
||||
from_user_id (str): the user making the query. This is used when
|
||||
adding cross-signing signatures to limit what signatures users
|
||||
can see.
|
||||
"""
|
||||
|
||||
device_keys_query = query_body.get("device_keys", {})
|
||||
@@ -125,6 +139,11 @@ class E2eKeysHandler(object):
|
||||
r = remote_queries_not_in_cache.setdefault(domain, {})
|
||||
r[user_id] = remote_queries[user_id]
|
||||
|
||||
# Get cached cross-signing keys
|
||||
cross_signing_keys = yield self.get_cross_signing_keys_from_cache(
|
||||
device_keys_query, from_user_id
|
||||
)
|
||||
|
||||
# Now fetch any devices that we don't have in our cache
|
||||
@trace
|
||||
@defer.inlineCallbacks
|
||||
@@ -188,6 +207,14 @@ class E2eKeysHandler(object):
|
||||
if user_id in destination_query:
|
||||
results[user_id] = keys
|
||||
|
||||
for user_id, key in remote_result["master_keys"].items():
|
||||
if user_id in destination_query:
|
||||
cross_signing_keys["master_keys"][user_id] = key
|
||||
|
||||
for user_id, key in remote_result["self_signing_keys"].items():
|
||||
if user_id in destination_query:
|
||||
cross_signing_keys["self_signing_keys"][user_id] = key
|
||||
|
||||
except Exception as e:
|
||||
failure = _exception_to_failure(e)
|
||||
failures[destination] = failure
|
||||
@@ -204,7 +231,61 @@ class E2eKeysHandler(object):
|
||||
).addErrback(unwrapFirstError)
|
||||
)
|
||||
|
||||
return {"device_keys": results, "failures": failures}
|
||||
ret = {"device_keys": results, "failures": failures}
|
||||
|
||||
ret.update(cross_signing_keys)
|
||||
|
||||
return ret
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_cross_signing_keys_from_cache(self, query, from_user_id):
|
||||
"""Get cross-signing keys for users from the database
|
||||
|
||||
Args:
|
||||
query (Iterable[string]) an iterable of user IDs. A dict whose keys
|
||||
are user IDs satisfies this, so the query format used for
|
||||
query_devices can be used here.
|
||||
from_user_id (str): the user making the query. This is used when
|
||||
adding cross-signing signatures to limit what signatures users
|
||||
can see.
|
||||
|
||||
Returns:
|
||||
defer.Deferred[dict[str, dict[str, dict]]]: map from
|
||||
(master|self_signing|user_signing) -> user_id -> key
|
||||
"""
|
||||
master_keys = {}
|
||||
self_signing_keys = {}
|
||||
user_signing_keys = {}
|
||||
|
||||
for user_id in query:
|
||||
# XXX: consider changing the store functions to allow querying
|
||||
# multiple users simultaneously.
|
||||
key = yield self.store.get_e2e_cross_signing_key(
|
||||
user_id, "master", from_user_id
|
||||
)
|
||||
if key:
|
||||
master_keys[user_id] = key
|
||||
|
||||
key = yield self.store.get_e2e_cross_signing_key(
|
||||
user_id, "self_signing", from_user_id
|
||||
)
|
||||
if key:
|
||||
self_signing_keys[user_id] = key
|
||||
|
||||
# users can see other users' master and self-signing keys, but can
|
||||
# only see their own user-signing keys
|
||||
if from_user_id == user_id:
|
||||
key = yield self.store.get_e2e_cross_signing_key(
|
||||
user_id, "user_signing", from_user_id
|
||||
)
|
||||
if key:
|
||||
user_signing_keys[user_id] = key
|
||||
|
||||
return {
|
||||
"master_keys": master_keys,
|
||||
"self_signing_keys": self_signing_keys,
|
||||
"user_signing_keys": user_signing_keys,
|
||||
}
|
||||
|
||||
@trace
|
||||
@defer.inlineCallbacks
|
||||
@@ -441,8 +522,493 @@ class E2eKeysHandler(object):
|
||||
log_kv({"message": "Inserting new one_time_keys.", "keys": new_keys})
|
||||
yield self.store.add_e2e_one_time_keys(user_id, device_id, time_now, new_keys)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def upload_signing_keys_for_user(self, user_id, keys):
|
||||
"""Upload signing keys for cross-signing
|
||||
|
||||
Args:
|
||||
user_id (string): the user uploading the keys
|
||||
keys (dict[string, dict]): the signing keys
|
||||
"""
|
||||
|
||||
# if a master key is uploaded, then check it. Otherwise, load the
|
||||
# stored master key, to check signatures on other keys
|
||||
if "master_key" in keys:
|
||||
master_key = keys["master_key"]
|
||||
|
||||
_check_cross_signing_key(master_key, user_id, "master")
|
||||
else:
|
||||
master_key = yield self.store.get_e2e_cross_signing_key(user_id, "master")
|
||||
|
||||
# if there is no master key, then we can't do anything, because all the
|
||||
# other cross-signing keys need to be signed by the master key
|
||||
if not master_key:
|
||||
raise SynapseError(400, "No master key available", Codes.MISSING_PARAM)
|
||||
|
||||
try:
|
||||
master_key_id, master_verify_key = get_verify_key_from_cross_signing_key(
|
||||
master_key
|
||||
)
|
||||
except ValueError:
|
||||
if "master_key" in keys:
|
||||
# the invalid key came from the request
|
||||
raise SynapseError(400, "Invalid master key", Codes.INVALID_PARAM)
|
||||
else:
|
||||
# the invalid key came from the database
|
||||
logger.error("Invalid master key found for user %s", user_id)
|
||||
raise SynapseError(500, "Invalid master key")
|
||||
|
||||
# for the other cross-signing keys, make sure that they have valid
|
||||
# signatures from the master key
|
||||
if "self_signing_key" in keys:
|
||||
self_signing_key = keys["self_signing_key"]
|
||||
|
||||
_check_cross_signing_key(
|
||||
self_signing_key, user_id, "self_signing", master_verify_key
|
||||
)
|
||||
|
||||
if "user_signing_key" in keys:
|
||||
user_signing_key = keys["user_signing_key"]
|
||||
|
||||
_check_cross_signing_key(
|
||||
user_signing_key, user_id, "user_signing", master_verify_key
|
||||
)
|
||||
|
||||
# if everything checks out, then store the keys and send notifications
|
||||
deviceids = []
|
||||
if "master_key" in keys:
|
||||
yield self.store.set_e2e_cross_signing_key(user_id, "master", master_key)
|
||||
deviceids.append(master_verify_key.version)
|
||||
if "self_signing_key" in keys:
|
||||
yield self.store.set_e2e_cross_signing_key(
|
||||
user_id, "self_signing", self_signing_key
|
||||
)
|
||||
try:
|
||||
deviceids.append(
|
||||
get_verify_key_from_cross_signing_key(self_signing_key)[1].version
|
||||
)
|
||||
except ValueError:
|
||||
raise SynapseError(400, "Invalid self-signing key", Codes.INVALID_PARAM)
|
||||
if "user_signing_key" in keys:
|
||||
yield self.store.set_e2e_cross_signing_key(
|
||||
user_id, "user_signing", user_signing_key
|
||||
)
|
||||
# the signature stream matches the semantics that we want for
|
||||
# user-signing key updates: only the user themselves is notified of
|
||||
# their own user-signing key updates
|
||||
yield self.device_handler.notify_user_signature_update(user_id, [user_id])
|
||||
|
||||
# master key and self-signing key updates match the semantics of device
|
||||
# list updates: all users who share an encrypted room are notified
|
||||
if len(deviceids):
|
||||
yield self.device_handler.notify_device_update(user_id, deviceids)
|
||||
|
||||
return {}
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def upload_signatures_for_device_keys(self, user_id, signatures):
|
||||
"""Upload device signatures for cross-signing
|
||||
|
||||
Args:
|
||||
user_id (string): the user uploading the signatures
|
||||
signatures (dict[string, dict[string, dict]]): map of users to
|
||||
devices to signed keys. This is the submission from the user; an
|
||||
exception will be raised if it is malformed.
|
||||
Returns:
|
||||
dict: response to be sent back to the client. The response will have
|
||||
a "failures" key, which will be a dict mapping users to devices
|
||||
to errors for the signatures that failed.
|
||||
Raises:
|
||||
SynapseError: if the signatures dict is not valid.
|
||||
"""
|
||||
failures = {}
|
||||
|
||||
# signatures to be stored. Each item will be a SignatureListItem
|
||||
signature_list = []
|
||||
|
||||
# split between checking signatures for own user and signatures for
|
||||
# other users, since we verify them with different keys
|
||||
self_signatures = signatures.get(user_id, {})
|
||||
other_signatures = {k: v for k, v in signatures.items() if k != user_id}
|
||||
|
||||
self_signature_list, self_failures = yield self._process_self_signatures(
|
||||
user_id, self_signatures
|
||||
)
|
||||
signature_list.extend(self_signature_list)
|
||||
failures.update(self_failures)
|
||||
|
||||
other_signature_list, other_failures = yield self._process_other_signatures(
|
||||
user_id, other_signatures
|
||||
)
|
||||
signature_list.extend(other_signature_list)
|
||||
failures.update(other_failures)
|
||||
|
||||
# store the signature, and send the appropriate notifications for sync
|
||||
logger.debug("upload signature failures: %r", failures)
|
||||
yield self.store.store_e2e_cross_signing_signatures(user_id, signature_list)
|
||||
|
||||
self_device_ids = [item.target_device_id for item in self_signature_list]
|
||||
if self_device_ids:
|
||||
yield self.device_handler.notify_device_update(user_id, self_device_ids)
|
||||
signed_users = [item.target_user_id for item in other_signature_list]
|
||||
if signed_users:
|
||||
yield self.device_handler.notify_user_signature_update(
|
||||
user_id, signed_users
|
||||
)
|
||||
|
||||
return {"failures": failures}
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _process_self_signatures(self, user_id, signatures):
|
||||
"""Process uploaded signatures of the user's own keys.
|
||||
|
||||
Signatures of the user's own keys from this API come in two forms:
|
||||
- signatures of the user's devices by the user's self-signing key,
|
||||
- signatures of the user's master key by the user's devices.
|
||||
|
||||
Args:
|
||||
user_id (string): the user uploading the keys
|
||||
signatures (dict[string, dict]): map of devices to signed keys
|
||||
|
||||
Returns:
|
||||
(list[SignatureListItem], dict[string, dict[string, dict]]):
|
||||
a list of signatures to store, and a map of users to devices to failure
|
||||
reasons
|
||||
|
||||
Raises:
|
||||
SynapseError: if the input is malformed
|
||||
"""
|
||||
signature_list = []
|
||||
failures = {}
|
||||
if not signatures:
|
||||
return signature_list, failures
|
||||
|
||||
if not isinstance(signatures, dict):
|
||||
raise SynapseError(400, "Invalid parameter", Codes.INVALID_PARAM)
|
||||
|
||||
try:
|
||||
# get our self-signing key to verify the signatures
|
||||
_, self_signing_key_id, self_signing_verify_key = yield self._get_e2e_cross_signing_verify_key(
|
||||
user_id, "self_signing"
|
||||
)
|
||||
|
||||
# get our master key, since we may have received a signature of it.
|
||||
# We need to fetch it here so that we know what its key ID is, so
|
||||
# that we can check if a signature that was sent is a signature of
|
||||
# the master key or of a device
|
||||
master_key, _, master_verify_key = yield self._get_e2e_cross_signing_verify_key(
|
||||
user_id, "master"
|
||||
)
|
||||
|
||||
# fetch our stored devices. This is used to 1. verify
|
||||
# signatures on the master key, and 2. to compare with what
|
||||
# was sent if the device was signed
|
||||
devices = yield self.store.get_e2e_device_keys([(user_id, None)])
|
||||
|
||||
if user_id not in devices:
|
||||
raise NotFoundError("No device keys found")
|
||||
|
||||
devices = devices[user_id]
|
||||
except SynapseError as e:
|
||||
failure = _exception_to_failure(e)
|
||||
failures[user_id] = {device: failure for device in signatures.keys()}
|
||||
return signature_list, failures
|
||||
|
||||
for device_id, device in signatures.items():
|
||||
# make sure submitted data is in the right form
|
||||
if not isinstance(device, dict):
|
||||
raise SynapseError(400, "Invalid parameter", Codes.INVALID_PARAM)
|
||||
|
||||
try:
|
||||
if "signatures" not in device or user_id not in device["signatures"]:
|
||||
# no signature was sent
|
||||
raise SynapseError(
|
||||
400, "Invalid signature", Codes.INVALID_SIGNATURE
|
||||
)
|
||||
|
||||
if device_id == master_verify_key.version:
|
||||
# The signature is of the master key. This needs to be
|
||||
# handled differently from signatures of normal devices.
|
||||
master_key_signature_list = self._check_master_key_signature(
|
||||
user_id, device_id, device, master_key, devices
|
||||
)
|
||||
signature_list.extend(master_key_signature_list)
|
||||
continue
|
||||
|
||||
# at this point, we have a device that should be signed
|
||||
# by the self-signing key
|
||||
if self_signing_key_id not in device["signatures"][user_id]:
|
||||
# no signature was sent
|
||||
raise SynapseError(
|
||||
400, "Invalid signature", Codes.INVALID_SIGNATURE
|
||||
)
|
||||
|
||||
try:
|
||||
stored_device = devices[device_id]
|
||||
except KeyError:
|
||||
raise NotFoundError("Unknown device")
|
||||
if self_signing_key_id in stored_device.get("signatures", {}).get(
|
||||
user_id, {}
|
||||
):
|
||||
# we already have a signature on this device, so we
|
||||
# can skip it, since it should be exactly the same
|
||||
continue
|
||||
|
||||
_check_device_signature(
|
||||
user_id, self_signing_verify_key, device, stored_device
|
||||
)
|
||||
|
||||
signature = device["signatures"][user_id][self_signing_key_id]
|
||||
signature_list.append(
|
||||
SignatureListItem(
|
||||
self_signing_key_id, user_id, device_id, signature
|
||||
)
|
||||
)
|
||||
except SynapseError as e:
|
||||
failures.setdefault(user_id, {})[device_id] = _exception_to_failure(e)
|
||||
|
||||
return signature_list, failures
|
||||
|
||||
def _check_master_key_signature(
|
||||
self, user_id, master_key_id, signed_master_key, stored_master_key, devices
|
||||
):
|
||||
"""Check signatures of a user's master key made by their devices.
|
||||
|
||||
Args:
|
||||
user_id (string): the user whose master key is being checked
|
||||
master_key_id (string): the ID of the user's master key
|
||||
signed_master_key (dict): the user's signed master key that was uploaded
|
||||
stored_master_key (dict): our previously-stored copy of the user's master key
|
||||
devices (iterable(dict)): the user's devices
|
||||
|
||||
Returns:
|
||||
list[SignatureListItem]: a list of signatures to store
|
||||
|
||||
Raises:
|
||||
SynapseError: if a signature is invalid
|
||||
"""
|
||||
# for each device that signed the master key, check the signature.
|
||||
master_key_signature_list = []
|
||||
sigs = signed_master_key["signatures"]
|
||||
for signing_key_id, signature in sigs[user_id].items():
|
||||
_, signing_device_id = signing_key_id.split(":", 1)
|
||||
if (
|
||||
signing_device_id not in devices
|
||||
or signing_key_id not in devices[signing_device_id]["keys"]
|
||||
):
|
||||
# signed by an unknown device, or the
|
||||
# device does not have the key
|
||||
raise SynapseError(400, "Invalid signature", Codes.INVALID_SIGNATURE)
|
||||
|
||||
# get the key and check the signature
|
||||
pubkey = devices[signing_device_id]["keys"][signing_key_id]
|
||||
verify_key = decode_verify_key_bytes(signing_key_id, decode_base64(pubkey))
|
||||
_check_device_signature(
|
||||
user_id, verify_key, signed_master_key, stored_master_key
|
||||
)
|
||||
|
||||
master_key_signature_list.append(
|
||||
SignatureListItem(signing_key_id, user_id, master_key_id, signature)
|
||||
)
|
||||
|
||||
return master_key_signature_list
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _process_other_signatures(self, user_id, signatures):
|
||||
"""Process uploaded signatures of other users' keys. These will be the
|
||||
target user's master keys, signed by the uploading user's user-signing
|
||||
key.
|
||||
|
||||
Args:
|
||||
user_id (string): the user uploading the keys
|
||||
signatures (dict[string, dict]): map of users to devices to signed keys
|
||||
|
||||
Returns:
|
||||
(list[SignatureListItem], dict[string, dict[string, dict]]):
|
||||
a list of signatures to store, and a map of users to devices to failure
|
||||
reasons
|
||||
|
||||
Raises:
|
||||
SynapseError: if the input is malformed
|
||||
"""
|
||||
signature_list = []
|
||||
failures = {}
|
||||
if not signatures:
|
||||
return signature_list, failures
|
||||
|
||||
try:
|
||||
# get our user-signing key to verify the signatures
|
||||
user_signing_key, user_signing_key_id, user_signing_verify_key = yield self._get_e2e_cross_signing_verify_key(
|
||||
user_id, "user_signing"
|
||||
)
|
||||
except SynapseError as e:
|
||||
failure = _exception_to_failure(e)
|
||||
for user, devicemap in signatures.items():
|
||||
failures[user] = {device_id: failure for device_id in devicemap.keys()}
|
||||
return signature_list, failures
|
||||
|
||||
for target_user, devicemap in signatures.items():
|
||||
# make sure submitted data is in the right form
|
||||
if not isinstance(devicemap, dict):
|
||||
raise SynapseError(400, "Invalid parameter", Codes.INVALID_PARAM)
|
||||
for device in devicemap.values():
|
||||
if not isinstance(device, dict):
|
||||
raise SynapseError(400, "Invalid parameter", Codes.INVALID_PARAM)
|
||||
|
||||
device_id = None
|
||||
try:
|
||||
# get the target user's master key, to make sure it matches
|
||||
# what was sent
|
||||
master_key, master_key_id, _ = yield self._get_e2e_cross_signing_verify_key(
|
||||
target_user, "master", user_id
|
||||
)
|
||||
|
||||
# make sure that the target user's master key is the one that
|
||||
# was signed (and no others)
|
||||
device_id = master_key_id.split(":", 1)[1]
|
||||
if device_id not in devicemap:
|
||||
logger.debug(
|
||||
"upload signature: could not find signature for device %s",
|
||||
device_id,
|
||||
)
|
||||
# set device to None so that the failure gets
|
||||
# marked on all the signatures
|
||||
device_id = None
|
||||
raise NotFoundError("Unknown device")
|
||||
key = devicemap[device_id]
|
||||
other_devices = [k for k in devicemap.keys() if k != device_id]
|
||||
if other_devices:
|
||||
# other devices were signed -- mark those as failures
|
||||
logger.debug("upload signature: too many devices specified")
|
||||
failure = _exception_to_failure(NotFoundError("Unknown device"))
|
||||
failures[target_user] = {
|
||||
device: failure for device in other_devices
|
||||
}
|
||||
|
||||
if user_signing_key_id in master_key.get("signatures", {}).get(
|
||||
user_id, {}
|
||||
):
|
||||
# we already have the signature, so we can skip it
|
||||
continue
|
||||
|
||||
_check_device_signature(
|
||||
user_id, user_signing_verify_key, key, master_key
|
||||
)
|
||||
|
||||
signature = key["signatures"][user_id][user_signing_key_id]
|
||||
signature_list.append(
|
||||
SignatureListItem(
|
||||
user_signing_key_id, target_user, device_id, signature
|
||||
)
|
||||
)
|
||||
except SynapseError as e:
|
||||
failure = _exception_to_failure(e)
|
||||
if device_id is None:
|
||||
failures[target_user] = {
|
||||
device_id: failure for device_id in devicemap.keys()
|
||||
}
|
||||
else:
|
||||
failures.setdefault(target_user, {})[device_id] = failure
|
||||
|
||||
return signature_list, failures
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _get_e2e_cross_signing_verify_key(self, user_id, key_type, from_user_id=None):
|
||||
"""Fetch the cross-signing public key from storage and interpret it.
|
||||
|
||||
Args:
|
||||
user_id (str): the user whose key should be fetched
|
||||
key_type (str): the type of key to fetch
|
||||
from_user_id (str): the user that we are fetching the keys for.
|
||||
This affects what signatures are fetched.
|
||||
|
||||
Returns:
|
||||
dict, str, VerifyKey: the raw key data, the key ID, and the
|
||||
signedjson verify key
|
||||
|
||||
Raises:
|
||||
NotFoundError: if the key is not found
|
||||
"""
|
||||
key = yield self.store.get_e2e_cross_signing_key(
|
||||
user_id, key_type, from_user_id
|
||||
)
|
||||
if key is None:
|
||||
logger.debug("no %s key found for %s", key_type, user_id)
|
||||
raise NotFoundError("No %s key found for %s" % (key_type, user_id))
|
||||
key_id, verify_key = get_verify_key_from_cross_signing_key(key)
|
||||
return key, key_id, verify_key
|
||||
|
||||
|
||||
def _check_cross_signing_key(key, user_id, key_type, signing_key=None):
|
||||
"""Check a cross-signing key uploaded by a user. Performs some basic sanity
|
||||
checking, and ensures that it is signed, if a signature is required.
|
||||
|
||||
Args:
|
||||
key (dict): the key data to verify
|
||||
user_id (str): the user whose key is being checked
|
||||
key_type (str): the type of key that the key should be
|
||||
signing_key (VerifyKey): (optional) the signing key that the key should
|
||||
be signed with. If omitted, signatures will not be checked.
|
||||
"""
|
||||
if (
|
||||
key.get("user_id") != user_id
|
||||
or key_type not in key.get("usage", [])
|
||||
or len(key.get("keys", {})) != 1
|
||||
):
|
||||
raise SynapseError(400, ("Invalid %s key" % (key_type,)), Codes.INVALID_PARAM)
|
||||
|
||||
if signing_key:
|
||||
try:
|
||||
verify_signed_json(key, user_id, signing_key)
|
||||
except SignatureVerifyException:
|
||||
raise SynapseError(
|
||||
400, ("Invalid signature on %s key" % key_type), Codes.INVALID_SIGNATURE
|
||||
)
|
||||
|
||||
|
||||
def _check_device_signature(user_id, verify_key, signed_device, stored_device):
|
||||
"""Check that a signature on a device or cross-signing key is correct and
|
||||
matches the copy of the device/key that we have stored. Throws an
|
||||
exception if an error is detected.
|
||||
|
||||
Args:
|
||||
user_id (str): the user ID whose signature is being checked
|
||||
verify_key (VerifyKey): the key to verify the device with
|
||||
signed_device (dict): the uploaded signed device data
|
||||
stored_device (dict): our previously stored copy of the device
|
||||
|
||||
Raises:
|
||||
SynapseError: if the signature was invalid or the sent device is not the
|
||||
same as the stored device
|
||||
|
||||
"""
|
||||
|
||||
# make sure that the device submitted matches what we have stored
|
||||
stripped_signed_device = {
|
||||
k: v for k, v in signed_device.items() if k not in ["signatures", "unsigned"]
|
||||
}
|
||||
stripped_stored_device = {
|
||||
k: v for k, v in stored_device.items() if k not in ["signatures", "unsigned"]
|
||||
}
|
||||
if stripped_signed_device != stripped_stored_device:
|
||||
logger.debug(
|
||||
"upload signatures: key does not match %s vs %s",
|
||||
signed_device,
|
||||
stored_device,
|
||||
)
|
||||
raise SynapseError(400, "Key does not match")
|
||||
|
||||
try:
|
||||
verify_signed_json(signed_device, user_id, verify_key)
|
||||
except SignatureVerifyException:
|
||||
logger.debug("invalid signature on key")
|
||||
raise SynapseError(400, "Invalid signature", Codes.INVALID_SIGNATURE)
|
||||
|
||||
|
||||
def _exception_to_failure(e):
|
||||
if isinstance(e, SynapseError):
|
||||
return {"status": e.code, "errcode": e.errcode, "message": str(e)}
|
||||
|
||||
if isinstance(e, CodeMessageException):
|
||||
return {"status": e.code, "message": str(e)}
|
||||
|
||||
@@ -470,3 +1036,14 @@ def _one_time_keys_match(old_key_json, new_key):
|
||||
new_key_copy.pop("signatures", None)
|
||||
|
||||
return old_key == new_key_copy
|
||||
|
||||
|
||||
@attr.s
|
||||
class SignatureListItem:
|
||||
"""An item in the signature list as used by upload_signatures_for_device_keys.
|
||||
"""
|
||||
|
||||
signing_key_id = attr.ib()
|
||||
target_user_id = attr.ib()
|
||||
target_device_id = attr.ib()
|
||||
signature = attr.ib()
|
||||
|
||||
@@ -30,6 +30,7 @@ from unpaddedbase64 import decode_base64
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
from synapse import event_auth
|
||||
from synapse.api.constants import EventTypes, Membership, RejectedReason
|
||||
from synapse.api.errors import (
|
||||
AuthError,
|
||||
@@ -1763,7 +1764,7 @@ class FederationHandler(BaseHandler):
|
||||
auth_for_e[(EventTypes.Create, "")] = create_event
|
||||
|
||||
try:
|
||||
self.auth.check(room_version, e, auth_events=auth_for_e)
|
||||
event_auth.check(room_version, e, auth_events=auth_for_e)
|
||||
except SynapseError as err:
|
||||
# we may get SynapseErrors here as well as AuthErrors. For
|
||||
# instance, there are a couple of (ancient) events in some
|
||||
@@ -1919,7 +1920,7 @@ class FederationHandler(BaseHandler):
|
||||
}
|
||||
|
||||
try:
|
||||
self.auth.check(room_version, event, auth_events=current_auth_events)
|
||||
event_auth.check(room_version, event, auth_events=current_auth_events)
|
||||
except AuthError as e:
|
||||
logger.warn("Soft-failing %r because %s", event, e)
|
||||
event.internal_metadata.soft_failed = True
|
||||
@@ -2018,7 +2019,7 @@ class FederationHandler(BaseHandler):
|
||||
)
|
||||
|
||||
try:
|
||||
self.auth.check(room_version, event, auth_events=auth_events)
|
||||
event_auth.check(room_version, event, auth_events=auth_events)
|
||||
except AuthError as e:
|
||||
logger.warn("Failed auth resolution for %r because %s", event, e)
|
||||
raise e
|
||||
@@ -2181,103 +2182,10 @@ class FederationHandler(BaseHandler):
|
||||
|
||||
auth_events.update(new_state)
|
||||
|
||||
different_auth = event_auth_events.difference(
|
||||
e.event_id for e in auth_events.values()
|
||||
)
|
||||
|
||||
yield self._update_context_for_auth_events(
|
||||
event, context, auth_events, event_key
|
||||
)
|
||||
|
||||
if not different_auth:
|
||||
# we're done
|
||||
return
|
||||
|
||||
logger.info(
|
||||
"auth_events still refers to events which are not in the calculated auth "
|
||||
"chain after state resolution: %s",
|
||||
different_auth,
|
||||
)
|
||||
|
||||
# Only do auth resolution if we have something new to say.
|
||||
# We can't prove an auth failure.
|
||||
do_resolution = False
|
||||
|
||||
for e_id in different_auth:
|
||||
if e_id in have_events:
|
||||
if have_events[e_id] == RejectedReason.NOT_ANCESTOR:
|
||||
do_resolution = True
|
||||
break
|
||||
|
||||
if not do_resolution:
|
||||
logger.info(
|
||||
"Skipping auth resolution due to lack of provable rejection reasons"
|
||||
)
|
||||
return
|
||||
|
||||
logger.info("Doing auth resolution")
|
||||
|
||||
prev_state_ids = yield context.get_prev_state_ids(self.store)
|
||||
|
||||
# 1. Get what we think is the auth chain.
|
||||
auth_ids = yield self.auth.compute_auth_events(event, prev_state_ids)
|
||||
local_auth_chain = yield self.store.get_auth_chain(auth_ids, include_given=True)
|
||||
|
||||
try:
|
||||
# 2. Get remote difference.
|
||||
try:
|
||||
result = yield self.federation_client.query_auth(
|
||||
origin, event.room_id, event.event_id, local_auth_chain
|
||||
)
|
||||
except RequestSendFailed as e:
|
||||
# The other side isn't around or doesn't implement the
|
||||
# endpoint, so lets just bail out.
|
||||
logger.info("Failed to query auth from remote: %s", e)
|
||||
return
|
||||
|
||||
seen_remotes = yield self.store.have_seen_events(
|
||||
[e.event_id for e in result["auth_chain"]]
|
||||
)
|
||||
|
||||
# 3. Process any remote auth chain events we haven't seen.
|
||||
for ev in result["auth_chain"]:
|
||||
if ev.event_id in seen_remotes:
|
||||
continue
|
||||
|
||||
if ev.event_id == event.event_id:
|
||||
continue
|
||||
|
||||
try:
|
||||
auth_ids = ev.auth_event_ids()
|
||||
auth = {
|
||||
(e.type, e.state_key): e
|
||||
for e in result["auth_chain"]
|
||||
if e.event_id in auth_ids or event.type == EventTypes.Create
|
||||
}
|
||||
ev.internal_metadata.outlier = True
|
||||
|
||||
logger.debug(
|
||||
"do_auth %s different_auth: %s", event.event_id, e.event_id
|
||||
)
|
||||
|
||||
yield self._handle_new_event(origin, ev, auth_events=auth)
|
||||
|
||||
if ev.event_id in event_auth_events:
|
||||
auth_events[(ev.type, ev.state_key)] = ev
|
||||
except AuthError:
|
||||
pass
|
||||
|
||||
except Exception:
|
||||
# FIXME:
|
||||
logger.exception("Failed to query auth chain")
|
||||
|
||||
# 4. Look at rejects and their proofs.
|
||||
# TODO.
|
||||
|
||||
yield self._update_context_for_auth_events(
|
||||
event, context, auth_events, event_key
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _update_context_for_auth_events(self, event, context, auth_events, event_key):
|
||||
"""Update the state_ids in an event context after auth event resolution,
|
||||
@@ -2444,15 +2352,6 @@ class FederationHandler(BaseHandler):
|
||||
|
||||
reason_map[e.event_id] = reason
|
||||
|
||||
if reason == RejectedReason.AUTH_ERROR:
|
||||
pass
|
||||
elif reason == RejectedReason.REPLACED:
|
||||
# TODO: Get proof
|
||||
pass
|
||||
elif reason == RejectedReason.NOT_ANCESTOR:
|
||||
# TODO: Get proof.
|
||||
pass
|
||||
|
||||
logger.debug("construct_auth_difference returning")
|
||||
|
||||
return {
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2015, 2016 OpenMarket Ltd
|
||||
# Copyright 2018 New Vector Ltd
|
||||
# Copyright 2018, 2019 New Vector Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -1124,6 +1124,11 @@ class SyncHandler(object):
|
||||
# weren't in the previous sync *or* they left and rejoined.
|
||||
users_that_have_changed.update(newly_joined_or_invited_users)
|
||||
|
||||
user_signatures_changed = yield self.store.get_users_whose_signatures_changed(
|
||||
user_id, since_token.device_list_key
|
||||
)
|
||||
users_that_have_changed.update(user_signatures_changed)
|
||||
|
||||
# Now find users that we no longer track
|
||||
for room_id in newly_left_rooms:
|
||||
left_users = yield self.state.get_current_users_in_room(room_id)
|
||||
|
||||
@@ -16,8 +16,8 @@
|
||||
|
||||
from synapse.replication.slave.storage._base import BaseSlavedStore
|
||||
from synapse.replication.slave.storage._slaved_id_tracker import SlavedIdTracker
|
||||
from synapse.storage.account_data import AccountDataWorkerStore
|
||||
from synapse.storage.tags import TagsWorkerStore
|
||||
from synapse.storage.data_stores.main.account_data import AccountDataWorkerStore
|
||||
from synapse.storage.data_stores.main.tags import TagsWorkerStore
|
||||
|
||||
|
||||
class SlavedAccountDataStore(TagsWorkerStore, AccountDataWorkerStore, BaseSlavedStore):
|
||||
|
||||
@@ -14,7 +14,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from synapse.storage.appservice import (
|
||||
from synapse.storage.data_stores.main.appservice import (
|
||||
ApplicationServiceTransactionWorkerStore,
|
||||
ApplicationServiceWorkerStore,
|
||||
)
|
||||
|
||||
@@ -13,7 +13,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from synapse.storage.client_ips import LAST_SEEN_GRANULARITY
|
||||
from synapse.storage.data_stores.main.client_ips import LAST_SEEN_GRANULARITY
|
||||
from synapse.util.caches import CACHE_SIZE_FACTOR
|
||||
from synapse.util.caches.descriptors import Cache
|
||||
|
||||
|
||||
@@ -15,7 +15,7 @@
|
||||
|
||||
from synapse.replication.slave.storage._base import BaseSlavedStore
|
||||
from synapse.replication.slave.storage._slaved_id_tracker import SlavedIdTracker
|
||||
from synapse.storage.deviceinbox import DeviceInboxWorkerStore
|
||||
from synapse.storage.data_stores.main.deviceinbox import DeviceInboxWorkerStore
|
||||
from synapse.util.caches.expiringcache import ExpiringCache
|
||||
from synapse.util.caches.stream_change_cache import StreamChangeCache
|
||||
|
||||
|
||||
@@ -15,8 +15,8 @@
|
||||
|
||||
from synapse.replication.slave.storage._base import BaseSlavedStore
|
||||
from synapse.replication.slave.storage._slaved_id_tracker import SlavedIdTracker
|
||||
from synapse.storage.devices import DeviceWorkerStore
|
||||
from synapse.storage.end_to_end_keys import EndToEndKeyWorkerStore
|
||||
from synapse.storage.data_stores.main.devices import DeviceWorkerStore
|
||||
from synapse.storage.data_stores.main.end_to_end_keys import EndToEndKeyWorkerStore
|
||||
from synapse.util.caches.stream_change_cache import StreamChangeCache
|
||||
|
||||
|
||||
@@ -33,6 +33,9 @@ class SlavedDeviceStore(EndToEndKeyWorkerStore, DeviceWorkerStore, BaseSlavedSto
|
||||
self._device_list_stream_cache = StreamChangeCache(
|
||||
"DeviceListStreamChangeCache", device_list_max
|
||||
)
|
||||
self._user_signature_stream_cache = StreamChangeCache(
|
||||
"UserSignatureStreamChangeCache", device_list_max
|
||||
)
|
||||
self._device_list_federation_stream_cache = StreamChangeCache(
|
||||
"DeviceListFederationStreamChangeCache", device_list_max
|
||||
)
|
||||
|
||||
@@ -13,7 +13,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from synapse.storage.directory import DirectoryWorkerStore
|
||||
from synapse.storage.data_stores.main.directory import DirectoryWorkerStore
|
||||
|
||||
from ._base import BaseSlavedStore
|
||||
|
||||
|
||||
@@ -20,15 +20,17 @@ from synapse.replication.tcp.streams.events import (
|
||||
EventsStreamCurrentStateRow,
|
||||
EventsStreamEventRow,
|
||||
)
|
||||
from synapse.storage.event_federation import EventFederationWorkerStore
|
||||
from synapse.storage.event_push_actions import EventPushActionsWorkerStore
|
||||
from synapse.storage.events_worker import EventsWorkerStore
|
||||
from synapse.storage.relations import RelationsWorkerStore
|
||||
from synapse.storage.roommember import RoomMemberWorkerStore
|
||||
from synapse.storage.signatures import SignatureWorkerStore
|
||||
from synapse.storage.state import StateGroupWorkerStore
|
||||
from synapse.storage.stream import StreamWorkerStore
|
||||
from synapse.storage.user_erasure_store import UserErasureWorkerStore
|
||||
from synapse.storage.data_stores.main.event_federation import EventFederationWorkerStore
|
||||
from synapse.storage.data_stores.main.event_push_actions import (
|
||||
EventPushActionsWorkerStore,
|
||||
)
|
||||
from synapse.storage.data_stores.main.events_worker import EventsWorkerStore
|
||||
from synapse.storage.data_stores.main.relations import RelationsWorkerStore
|
||||
from synapse.storage.data_stores.main.roommember import RoomMemberWorkerStore
|
||||
from synapse.storage.data_stores.main.signatures import SignatureWorkerStore
|
||||
from synapse.storage.data_stores.main.state import StateGroupWorkerStore
|
||||
from synapse.storage.data_stores.main.stream import StreamWorkerStore
|
||||
from synapse.storage.data_stores.main.user_erasure_store import UserErasureWorkerStore
|
||||
|
||||
from ._base import BaseSlavedStore
|
||||
from ._slaved_id_tracker import SlavedIdTracker
|
||||
|
||||
@@ -13,7 +13,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from synapse.storage.filtering import FilteringStore
|
||||
from synapse.storage.data_stores.main.filtering import FilteringStore
|
||||
|
||||
from ._base import BaseSlavedStore
|
||||
|
||||
|
||||
@@ -13,7 +13,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from synapse.storage import KeyStore
|
||||
from synapse.storage.data_stores.main.keys import KeyStore
|
||||
|
||||
# KeyStore isn't really safe to use from a worker, but for now we do so and hope that
|
||||
# the races it creates aren't too bad.
|
||||
|
||||
@@ -14,7 +14,7 @@
|
||||
# limitations under the License.
|
||||
|
||||
from synapse.storage import DataStore
|
||||
from synapse.storage.presence import PresenceStore
|
||||
from synapse.storage.data_stores.main.presence import PresenceStore
|
||||
from synapse.util.caches.stream_change_cache import StreamChangeCache
|
||||
|
||||
from ._base import BaseSlavedStore, __func__
|
||||
|
||||
@@ -14,7 +14,7 @@
|
||||
# limitations under the License.
|
||||
|
||||
from synapse.replication.slave.storage._base import BaseSlavedStore
|
||||
from synapse.storage.profile import ProfileWorkerStore
|
||||
from synapse.storage.data_stores.main.profile import ProfileWorkerStore
|
||||
|
||||
|
||||
class SlavedProfileStore(ProfileWorkerStore, BaseSlavedStore):
|
||||
|
||||
@@ -14,7 +14,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from synapse.storage.push_rule import PushRulesWorkerStore
|
||||
from synapse.storage.data_stores.main.push_rule import PushRulesWorkerStore
|
||||
|
||||
from ._slaved_id_tracker import SlavedIdTracker
|
||||
from .events import SlavedEventStore
|
||||
|
||||
@@ -14,7 +14,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from synapse.storage.pusher import PusherWorkerStore
|
||||
from synapse.storage.data_stores.main.pusher import PusherWorkerStore
|
||||
|
||||
from ._base import BaseSlavedStore
|
||||
from ._slaved_id_tracker import SlavedIdTracker
|
||||
|
||||
@@ -14,7 +14,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from synapse.storage.receipts import ReceiptsWorkerStore
|
||||
from synapse.storage.data_stores.main.receipts import ReceiptsWorkerStore
|
||||
|
||||
from ._base import BaseSlavedStore
|
||||
from ._slaved_id_tracker import SlavedIdTracker
|
||||
|
||||
@@ -13,7 +13,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from synapse.storage.registration import RegistrationWorkerStore
|
||||
from synapse.storage.data_stores.main.registration import RegistrationWorkerStore
|
||||
|
||||
from ._base import BaseSlavedStore
|
||||
|
||||
|
||||
@@ -13,7 +13,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from synapse.storage.room import RoomWorkerStore
|
||||
from synapse.storage.data_stores.main.room import RoomWorkerStore
|
||||
|
||||
from ._base import BaseSlavedStore
|
||||
from ._slaved_id_tracker import SlavedIdTracker
|
||||
|
||||
@@ -13,7 +13,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from synapse.storage.transactions import TransactionStore
|
||||
from synapse.storage.data_stores.main.transactions import TransactionStore
|
||||
|
||||
from ._base import BaseSlavedStore
|
||||
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2015, 2016 OpenMarket Ltd
|
||||
# Copyright 2019 New Vector Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -27,7 +28,7 @@ from synapse.http.servlet import (
|
||||
from synapse.logging.opentracing import log_kv, set_tag, trace
|
||||
from synapse.types import StreamToken
|
||||
|
||||
from ._base import client_patterns
|
||||
from ._base import client_patterns, interactive_auth_handler
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -155,10 +156,11 @@ class KeyQueryServlet(RestServlet):
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_POST(self, request):
|
||||
yield self.auth.get_user_by_req(request, allow_guest=True)
|
||||
requester = yield self.auth.get_user_by_req(request, allow_guest=True)
|
||||
user_id = requester.user.to_string()
|
||||
timeout = parse_integer(request, "timeout", 10 * 1000)
|
||||
body = parse_json_object_from_request(request)
|
||||
result = yield self.e2e_keys_handler.query_devices(body, timeout)
|
||||
result = yield self.e2e_keys_handler.query_devices(body, timeout, user_id)
|
||||
return 200, result
|
||||
|
||||
|
||||
@@ -238,8 +240,97 @@ class OneTimeKeyServlet(RestServlet):
|
||||
return 200, result
|
||||
|
||||
|
||||
class SigningKeyUploadServlet(RestServlet):
|
||||
"""
|
||||
POST /keys/device_signing/upload HTTP/1.1
|
||||
Content-Type: application/json
|
||||
|
||||
{
|
||||
}
|
||||
"""
|
||||
|
||||
PATTERNS = client_patterns("/keys/device_signing/upload$", releases=())
|
||||
|
||||
def __init__(self, hs):
|
||||
"""
|
||||
Args:
|
||||
hs (synapse.server.HomeServer): server
|
||||
"""
|
||||
super(SigningKeyUploadServlet, self).__init__()
|
||||
self.hs = hs
|
||||
self.auth = hs.get_auth()
|
||||
self.e2e_keys_handler = hs.get_e2e_keys_handler()
|
||||
self.auth_handler = hs.get_auth_handler()
|
||||
|
||||
@interactive_auth_handler
|
||||
@defer.inlineCallbacks
|
||||
def on_POST(self, request):
|
||||
requester = yield self.auth.get_user_by_req(request)
|
||||
user_id = requester.user.to_string()
|
||||
body = parse_json_object_from_request(request)
|
||||
|
||||
yield self.auth_handler.validate_user_via_ui_auth(
|
||||
requester, body, self.hs.get_ip_from_request(request)
|
||||
)
|
||||
|
||||
result = yield self.e2e_keys_handler.upload_signing_keys_for_user(user_id, body)
|
||||
return 200, result
|
||||
|
||||
|
||||
class SignaturesUploadServlet(RestServlet):
|
||||
"""
|
||||
POST /keys/signatures/upload HTTP/1.1
|
||||
Content-Type: application/json
|
||||
|
||||
{
|
||||
"@alice:example.com": {
|
||||
"<device_id>": {
|
||||
"user_id": "<user_id>",
|
||||
"device_id": "<device_id>",
|
||||
"algorithms": [
|
||||
"m.olm.curve25519-aes-sha256",
|
||||
"m.megolm.v1.aes-sha"
|
||||
],
|
||||
"keys": {
|
||||
"<algorithm>:<device_id>": "<key_base64>",
|
||||
},
|
||||
"signatures": {
|
||||
"<signing_user_id>": {
|
||||
"<algorithm>:<signing_key_base64>": "<signature_base64>>"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
"""
|
||||
|
||||
PATTERNS = client_patterns("/keys/signatures/upload$")
|
||||
|
||||
def __init__(self, hs):
|
||||
"""
|
||||
Args:
|
||||
hs (synapse.server.HomeServer): server
|
||||
"""
|
||||
super(SignaturesUploadServlet, self).__init__()
|
||||
self.auth = hs.get_auth()
|
||||
self.e2e_keys_handler = hs.get_e2e_keys_handler()
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_POST(self, request):
|
||||
requester = yield self.auth.get_user_by_req(request, allow_guest=True)
|
||||
user_id = requester.user.to_string()
|
||||
body = parse_json_object_from_request(request)
|
||||
|
||||
result = yield self.e2e_keys_handler.upload_signatures_for_device_keys(
|
||||
user_id, body
|
||||
)
|
||||
return 200, result
|
||||
|
||||
|
||||
def register_servlets(hs, http_server):
|
||||
KeyUploadServlet(hs).register(http_server)
|
||||
KeyQueryServlet(hs).register(http_server)
|
||||
KeyChangesServlet(hs).register(http_server)
|
||||
OneTimeKeyServlet(hs).register(http_server)
|
||||
SigningKeyUploadServlet(hs).register(http_server)
|
||||
SignaturesUploadServlet(hs).register(http_server)
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2014-2016 OpenMarket Ltd
|
||||
# Copyright 2018 New Vector Ltd
|
||||
# Copyright 2018,2019 New Vector Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -14,509 +14,20 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import calendar
|
||||
import logging
|
||||
import time
|
||||
"""
|
||||
The storage layer is split up into multiple parts to allow Synapse to run
|
||||
against different configurations of databases (e.g. single or multiple
|
||||
databases). The `data_stores` are classes that talk directly to a single
|
||||
database and have associated schemas, background updates, etc. On top of those
|
||||
there are (or will be) classes that provide high level interfaces that combine
|
||||
calls to multiple `data_stores`.
|
||||
|
||||
from twisted.internet import defer
|
||||
There are also schemas that get applied to every database, regardless of the
|
||||
data stores associated with them (e.g. the schema version tables), which are
|
||||
stored in `synapse.storage.schema`.
|
||||
"""
|
||||
|
||||
from synapse.api.constants import PresenceState
|
||||
from synapse.storage.devices import DeviceStore
|
||||
from synapse.storage.user_erasure_store import UserErasureStore
|
||||
from synapse.util.caches.stream_change_cache import StreamChangeCache
|
||||
|
||||
from .account_data import AccountDataStore
|
||||
from .appservice import ApplicationServiceStore, ApplicationServiceTransactionStore
|
||||
from .client_ips import ClientIpStore
|
||||
from .deviceinbox import DeviceInboxStore
|
||||
from .directory import DirectoryStore
|
||||
from .e2e_room_keys import EndToEndRoomKeyStore
|
||||
from .end_to_end_keys import EndToEndKeyStore
|
||||
from .engines import PostgresEngine
|
||||
from .event_federation import EventFederationStore
|
||||
from .event_push_actions import EventPushActionsStore
|
||||
from .events import EventsStore
|
||||
from .events_bg_updates import EventsBackgroundUpdatesStore
|
||||
from .filtering import FilteringStore
|
||||
from .group_server import GroupServerStore
|
||||
from .keys import KeyStore
|
||||
from .media_repository import MediaRepositoryStore
|
||||
from .monthly_active_users import MonthlyActiveUsersStore
|
||||
from .openid import OpenIdStore
|
||||
from .presence import PresenceStore, UserPresenceState
|
||||
from .profile import ProfileStore
|
||||
from .push_rule import PushRuleStore
|
||||
from .pusher import PusherStore
|
||||
from .receipts import ReceiptsStore
|
||||
from .registration import RegistrationStore
|
||||
from .rejections import RejectionsStore
|
||||
from .relations import RelationsStore
|
||||
from .room import RoomStore
|
||||
from .roommember import RoomMemberStore
|
||||
from .search import SearchStore
|
||||
from .signatures import SignatureStore
|
||||
from .state import StateStore
|
||||
from .stats import StatsStore
|
||||
from .stream import StreamStore
|
||||
from .tags import TagsStore
|
||||
from .transactions import TransactionStore
|
||||
from .user_directory import UserDirectoryStore
|
||||
from .util.id_generators import ChainedIdGenerator, IdGenerator, StreamIdGenerator
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class DataStore(
|
||||
EventsBackgroundUpdatesStore,
|
||||
RoomMemberStore,
|
||||
RoomStore,
|
||||
RegistrationStore,
|
||||
StreamStore,
|
||||
ProfileStore,
|
||||
PresenceStore,
|
||||
TransactionStore,
|
||||
DirectoryStore,
|
||||
KeyStore,
|
||||
StateStore,
|
||||
SignatureStore,
|
||||
ApplicationServiceStore,
|
||||
EventsStore,
|
||||
EventFederationStore,
|
||||
MediaRepositoryStore,
|
||||
RejectionsStore,
|
||||
FilteringStore,
|
||||
PusherStore,
|
||||
PushRuleStore,
|
||||
ApplicationServiceTransactionStore,
|
||||
ReceiptsStore,
|
||||
EndToEndKeyStore,
|
||||
EndToEndRoomKeyStore,
|
||||
SearchStore,
|
||||
TagsStore,
|
||||
AccountDataStore,
|
||||
EventPushActionsStore,
|
||||
OpenIdStore,
|
||||
ClientIpStore,
|
||||
DeviceStore,
|
||||
DeviceInboxStore,
|
||||
UserDirectoryStore,
|
||||
GroupServerStore,
|
||||
UserErasureStore,
|
||||
MonthlyActiveUsersStore,
|
||||
StatsStore,
|
||||
RelationsStore,
|
||||
):
|
||||
def __init__(self, db_conn, hs):
|
||||
self.hs = hs
|
||||
self._clock = hs.get_clock()
|
||||
self.database_engine = hs.database_engine
|
||||
|
||||
self._stream_id_gen = StreamIdGenerator(
|
||||
db_conn,
|
||||
"events",
|
||||
"stream_ordering",
|
||||
extra_tables=[("local_invites", "stream_id")],
|
||||
)
|
||||
self._backfill_id_gen = StreamIdGenerator(
|
||||
db_conn,
|
||||
"events",
|
||||
"stream_ordering",
|
||||
step=-1,
|
||||
extra_tables=[("ex_outlier_stream", "event_stream_ordering")],
|
||||
)
|
||||
self._presence_id_gen = StreamIdGenerator(
|
||||
db_conn, "presence_stream", "stream_id"
|
||||
)
|
||||
self._device_inbox_id_gen = StreamIdGenerator(
|
||||
db_conn, "device_max_stream_id", "stream_id"
|
||||
)
|
||||
self._public_room_id_gen = StreamIdGenerator(
|
||||
db_conn, "public_room_list_stream", "stream_id"
|
||||
)
|
||||
self._device_list_id_gen = StreamIdGenerator(
|
||||
db_conn, "device_lists_stream", "stream_id"
|
||||
)
|
||||
|
||||
self._access_tokens_id_gen = IdGenerator(db_conn, "access_tokens", "id")
|
||||
self._event_reports_id_gen = IdGenerator(db_conn, "event_reports", "id")
|
||||
self._push_rule_id_gen = IdGenerator(db_conn, "push_rules", "id")
|
||||
self._push_rules_enable_id_gen = IdGenerator(db_conn, "push_rules_enable", "id")
|
||||
self._push_rules_stream_id_gen = ChainedIdGenerator(
|
||||
self._stream_id_gen, db_conn, "push_rules_stream", "stream_id"
|
||||
)
|
||||
self._pushers_id_gen = StreamIdGenerator(
|
||||
db_conn, "pushers", "id", extra_tables=[("deleted_pushers", "stream_id")]
|
||||
)
|
||||
self._group_updates_id_gen = StreamIdGenerator(
|
||||
db_conn, "local_group_updates", "stream_id"
|
||||
)
|
||||
|
||||
if isinstance(self.database_engine, PostgresEngine):
|
||||
self._cache_id_gen = StreamIdGenerator(
|
||||
db_conn, "cache_invalidation_stream", "stream_id"
|
||||
)
|
||||
else:
|
||||
self._cache_id_gen = None
|
||||
|
||||
self._presence_on_startup = self._get_active_presence(db_conn)
|
||||
|
||||
presence_cache_prefill, min_presence_val = self._get_cache_dict(
|
||||
db_conn,
|
||||
"presence_stream",
|
||||
entity_column="user_id",
|
||||
stream_column="stream_id",
|
||||
max_value=self._presence_id_gen.get_current_token(),
|
||||
)
|
||||
self.presence_stream_cache = StreamChangeCache(
|
||||
"PresenceStreamChangeCache",
|
||||
min_presence_val,
|
||||
prefilled_cache=presence_cache_prefill,
|
||||
)
|
||||
|
||||
max_device_inbox_id = self._device_inbox_id_gen.get_current_token()
|
||||
device_inbox_prefill, min_device_inbox_id = self._get_cache_dict(
|
||||
db_conn,
|
||||
"device_inbox",
|
||||
entity_column="user_id",
|
||||
stream_column="stream_id",
|
||||
max_value=max_device_inbox_id,
|
||||
limit=1000,
|
||||
)
|
||||
self._device_inbox_stream_cache = StreamChangeCache(
|
||||
"DeviceInboxStreamChangeCache",
|
||||
min_device_inbox_id,
|
||||
prefilled_cache=device_inbox_prefill,
|
||||
)
|
||||
# The federation outbox and the local device inbox uses the same
|
||||
# stream_id generator.
|
||||
device_outbox_prefill, min_device_outbox_id = self._get_cache_dict(
|
||||
db_conn,
|
||||
"device_federation_outbox",
|
||||
entity_column="destination",
|
||||
stream_column="stream_id",
|
||||
max_value=max_device_inbox_id,
|
||||
limit=1000,
|
||||
)
|
||||
self._device_federation_outbox_stream_cache = StreamChangeCache(
|
||||
"DeviceFederationOutboxStreamChangeCache",
|
||||
min_device_outbox_id,
|
||||
prefilled_cache=device_outbox_prefill,
|
||||
)
|
||||
|
||||
device_list_max = self._device_list_id_gen.get_current_token()
|
||||
self._device_list_stream_cache = StreamChangeCache(
|
||||
"DeviceListStreamChangeCache", device_list_max
|
||||
)
|
||||
self._device_list_federation_stream_cache = StreamChangeCache(
|
||||
"DeviceListFederationStreamChangeCache", device_list_max
|
||||
)
|
||||
|
||||
events_max = self._stream_id_gen.get_current_token()
|
||||
curr_state_delta_prefill, min_curr_state_delta_id = self._get_cache_dict(
|
||||
db_conn,
|
||||
"current_state_delta_stream",
|
||||
entity_column="room_id",
|
||||
stream_column="stream_id",
|
||||
max_value=events_max, # As we share the stream id with events token
|
||||
limit=1000,
|
||||
)
|
||||
self._curr_state_delta_stream_cache = StreamChangeCache(
|
||||
"_curr_state_delta_stream_cache",
|
||||
min_curr_state_delta_id,
|
||||
prefilled_cache=curr_state_delta_prefill,
|
||||
)
|
||||
|
||||
_group_updates_prefill, min_group_updates_id = self._get_cache_dict(
|
||||
db_conn,
|
||||
"local_group_updates",
|
||||
entity_column="user_id",
|
||||
stream_column="stream_id",
|
||||
max_value=self._group_updates_id_gen.get_current_token(),
|
||||
limit=1000,
|
||||
)
|
||||
self._group_updates_stream_cache = StreamChangeCache(
|
||||
"_group_updates_stream_cache",
|
||||
min_group_updates_id,
|
||||
prefilled_cache=_group_updates_prefill,
|
||||
)
|
||||
|
||||
self._stream_order_on_start = self.get_room_max_stream_ordering()
|
||||
self._min_stream_order_on_start = self.get_room_min_stream_ordering()
|
||||
|
||||
# Used in _generate_user_daily_visits to keep track of progress
|
||||
self._last_user_visit_update = self._get_start_of_day()
|
||||
|
||||
super(DataStore, self).__init__(db_conn, hs)
|
||||
|
||||
def take_presence_startup_info(self):
|
||||
active_on_startup = self._presence_on_startup
|
||||
self._presence_on_startup = None
|
||||
return active_on_startup
|
||||
|
||||
def _get_active_presence(self, db_conn):
|
||||
"""Fetch non-offline presence from the database so that we can register
|
||||
the appropriate time outs.
|
||||
"""
|
||||
|
||||
sql = (
|
||||
"SELECT user_id, state, last_active_ts, last_federation_update_ts,"
|
||||
" last_user_sync_ts, status_msg, currently_active FROM presence_stream"
|
||||
" WHERE state != ?"
|
||||
)
|
||||
sql = self.database_engine.convert_param_style(sql)
|
||||
|
||||
txn = db_conn.cursor()
|
||||
txn.execute(sql, (PresenceState.OFFLINE,))
|
||||
rows = self.cursor_to_dict(txn)
|
||||
txn.close()
|
||||
|
||||
for row in rows:
|
||||
row["currently_active"] = bool(row["currently_active"])
|
||||
|
||||
return [UserPresenceState(**row) for row in rows]
|
||||
|
||||
def count_daily_users(self):
|
||||
"""
|
||||
Counts the number of users who used this homeserver in the last 24 hours.
|
||||
"""
|
||||
yesterday = int(self._clock.time_msec()) - (1000 * 60 * 60 * 24)
|
||||
return self.runInteraction("count_daily_users", self._count_users, yesterday)
|
||||
|
||||
def count_monthly_users(self):
|
||||
"""
|
||||
Counts the number of users who used this homeserver in the last 30 days.
|
||||
Note this method is intended for phonehome metrics only and is different
|
||||
from the mau figure in synapse.storage.monthly_active_users which,
|
||||
amongst other things, includes a 3 day grace period before a user counts.
|
||||
"""
|
||||
thirty_days_ago = int(self._clock.time_msec()) - (1000 * 60 * 60 * 24 * 30)
|
||||
return self.runInteraction(
|
||||
"count_monthly_users", self._count_users, thirty_days_ago
|
||||
)
|
||||
|
||||
def _count_users(self, txn, time_from):
|
||||
"""
|
||||
Returns number of users seen in the past time_from period
|
||||
"""
|
||||
sql = """
|
||||
SELECT COALESCE(count(*), 0) FROM (
|
||||
SELECT user_id FROM user_ips
|
||||
WHERE last_seen > ?
|
||||
GROUP BY user_id
|
||||
) u
|
||||
"""
|
||||
txn.execute(sql, (time_from,))
|
||||
count, = txn.fetchone()
|
||||
return count
|
||||
|
||||
def count_r30_users(self):
|
||||
"""
|
||||
Counts the number of 30 day retained users, defined as:-
|
||||
* Users who have created their accounts more than 30 days ago
|
||||
* Where last seen at most 30 days ago
|
||||
* Where account creation and last_seen are > 30 days apart
|
||||
|
||||
Returns counts globaly for a given user as well as breaking
|
||||
by platform
|
||||
"""
|
||||
|
||||
def _count_r30_users(txn):
|
||||
thirty_days_in_secs = 86400 * 30
|
||||
now = int(self._clock.time())
|
||||
thirty_days_ago_in_secs = now - thirty_days_in_secs
|
||||
|
||||
sql = """
|
||||
SELECT platform, COALESCE(count(*), 0) FROM (
|
||||
SELECT
|
||||
users.name, platform, users.creation_ts * 1000,
|
||||
MAX(uip.last_seen)
|
||||
FROM users
|
||||
INNER JOIN (
|
||||
SELECT
|
||||
user_id,
|
||||
last_seen,
|
||||
CASE
|
||||
WHEN user_agent LIKE '%%Android%%' THEN 'android'
|
||||
WHEN user_agent LIKE '%%iOS%%' THEN 'ios'
|
||||
WHEN user_agent LIKE '%%Electron%%' THEN 'electron'
|
||||
WHEN user_agent LIKE '%%Mozilla%%' THEN 'web'
|
||||
WHEN user_agent LIKE '%%Gecko%%' THEN 'web'
|
||||
ELSE 'unknown'
|
||||
END
|
||||
AS platform
|
||||
FROM user_ips
|
||||
) uip
|
||||
ON users.name = uip.user_id
|
||||
AND users.appservice_id is NULL
|
||||
AND users.creation_ts < ?
|
||||
AND uip.last_seen/1000 > ?
|
||||
AND (uip.last_seen/1000) - users.creation_ts > 86400 * 30
|
||||
GROUP BY users.name, platform, users.creation_ts
|
||||
) u GROUP BY platform
|
||||
"""
|
||||
|
||||
results = {}
|
||||
txn.execute(sql, (thirty_days_ago_in_secs, thirty_days_ago_in_secs))
|
||||
|
||||
for row in txn:
|
||||
if row[0] == "unknown":
|
||||
pass
|
||||
results[row[0]] = row[1]
|
||||
|
||||
sql = """
|
||||
SELECT COALESCE(count(*), 0) FROM (
|
||||
SELECT users.name, users.creation_ts * 1000,
|
||||
MAX(uip.last_seen)
|
||||
FROM users
|
||||
INNER JOIN (
|
||||
SELECT
|
||||
user_id,
|
||||
last_seen
|
||||
FROM user_ips
|
||||
) uip
|
||||
ON users.name = uip.user_id
|
||||
AND appservice_id is NULL
|
||||
AND users.creation_ts < ?
|
||||
AND uip.last_seen/1000 > ?
|
||||
AND (uip.last_seen/1000) - users.creation_ts > 86400 * 30
|
||||
GROUP BY users.name, users.creation_ts
|
||||
) u
|
||||
"""
|
||||
|
||||
txn.execute(sql, (thirty_days_ago_in_secs, thirty_days_ago_in_secs))
|
||||
|
||||
count, = txn.fetchone()
|
||||
results["all"] = count
|
||||
|
||||
return results
|
||||
|
||||
return self.runInteraction("count_r30_users", _count_r30_users)
|
||||
|
||||
def _get_start_of_day(self):
|
||||
"""
|
||||
Returns millisecond unixtime for start of UTC day.
|
||||
"""
|
||||
now = time.gmtime()
|
||||
today_start = calendar.timegm((now.tm_year, now.tm_mon, now.tm_mday, 0, 0, 0))
|
||||
return today_start * 1000
|
||||
|
||||
def generate_user_daily_visits(self):
|
||||
"""
|
||||
Generates daily visit data for use in cohort/ retention analysis
|
||||
"""
|
||||
|
||||
def _generate_user_daily_visits(txn):
|
||||
logger.info("Calling _generate_user_daily_visits")
|
||||
today_start = self._get_start_of_day()
|
||||
a_day_in_milliseconds = 24 * 60 * 60 * 1000
|
||||
now = self.clock.time_msec()
|
||||
|
||||
sql = """
|
||||
INSERT INTO user_daily_visits (user_id, device_id, timestamp)
|
||||
SELECT u.user_id, u.device_id, ?
|
||||
FROM user_ips AS u
|
||||
LEFT JOIN (
|
||||
SELECT user_id, device_id, timestamp FROM user_daily_visits
|
||||
WHERE timestamp = ?
|
||||
) udv
|
||||
ON u.user_id = udv.user_id AND u.device_id=udv.device_id
|
||||
INNER JOIN users ON users.name=u.user_id
|
||||
WHERE last_seen > ? AND last_seen <= ?
|
||||
AND udv.timestamp IS NULL AND users.is_guest=0
|
||||
AND users.appservice_id IS NULL
|
||||
GROUP BY u.user_id, u.device_id
|
||||
"""
|
||||
|
||||
# This means that the day has rolled over but there could still
|
||||
# be entries from the previous day. There is an edge case
|
||||
# where if the user logs in at 23:59 and overwrites their
|
||||
# last_seen at 00:01 then they will not be counted in the
|
||||
# previous day's stats - it is important that the query is run
|
||||
# often to minimise this case.
|
||||
if today_start > self._last_user_visit_update:
|
||||
yesterday_start = today_start - a_day_in_milliseconds
|
||||
txn.execute(
|
||||
sql,
|
||||
(
|
||||
yesterday_start,
|
||||
yesterday_start,
|
||||
self._last_user_visit_update,
|
||||
today_start,
|
||||
),
|
||||
)
|
||||
self._last_user_visit_update = today_start
|
||||
|
||||
txn.execute(
|
||||
sql, (today_start, today_start, self._last_user_visit_update, now)
|
||||
)
|
||||
# Update _last_user_visit_update to now. The reason to do this
|
||||
# rather just clamping to the beginning of the day is to limit
|
||||
# the size of the join - meaning that the query can be run more
|
||||
# frequently
|
||||
self._last_user_visit_update = now
|
||||
|
||||
return self.runInteraction(
|
||||
"generate_user_daily_visits", _generate_user_daily_visits
|
||||
)
|
||||
|
||||
def get_users(self):
|
||||
"""Function to reterive a list of users in users table.
|
||||
|
||||
Args:
|
||||
Returns:
|
||||
defer.Deferred: resolves to list[dict[str, Any]]
|
||||
"""
|
||||
return self._simple_select_list(
|
||||
table="users",
|
||||
keyvalues={},
|
||||
retcols=["name", "password_hash", "is_guest", "admin", "user_type"],
|
||||
desc="get_users",
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_users_paginate(self, order, start, limit):
|
||||
"""Function to reterive a paginated list of users from
|
||||
users list. This will return a json object, which contains
|
||||
list of users and the total number of users in users table.
|
||||
|
||||
Args:
|
||||
order (str): column name to order the select by this column
|
||||
start (int): start number to begin the query from
|
||||
limit (int): number of rows to reterive
|
||||
Returns:
|
||||
defer.Deferred: resolves to json object {list[dict[str, Any]], count}
|
||||
"""
|
||||
users = yield self.runInteraction(
|
||||
"get_users_paginate",
|
||||
self._simple_select_list_paginate_txn,
|
||||
table="users",
|
||||
keyvalues={"is_guest": False},
|
||||
orderby=order,
|
||||
start=start,
|
||||
limit=limit,
|
||||
retcols=["name", "password_hash", "is_guest", "admin", "user_type"],
|
||||
)
|
||||
count = yield self.runInteraction("get_users_paginate", self.get_user_count_txn)
|
||||
retval = {"users": users, "total": count}
|
||||
return retval
|
||||
|
||||
def search_users(self, term):
|
||||
"""Function to search users list for one or more users with
|
||||
the matched term.
|
||||
|
||||
Args:
|
||||
term (str): search term
|
||||
col (str): column to query term should be matched to
|
||||
Returns:
|
||||
defer.Deferred: resolves to list[dict[str, Any]]
|
||||
"""
|
||||
return self._simple_search_list(
|
||||
table="users",
|
||||
term=term,
|
||||
col="name",
|
||||
retcols=["name", "password_hash", "is_guest", "admin", "user_type"],
|
||||
desc="search_users",
|
||||
)
|
||||
from synapse.storage.data_stores.main import DataStore # noqa: F401
|
||||
|
||||
|
||||
def are_all_users_on_domain(txn, database_engine, domain):
|
||||
|
||||
14
synapse/storage/data_stores/__init__.py
Normal file
14
synapse/storage/data_stores/__init__.py
Normal file
@@ -0,0 +1,14 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2019 The Matrix.org Foundation C.I.C.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
530
synapse/storage/data_stores/main/__init__.py
Normal file
530
synapse/storage/data_stores/main/__init__.py
Normal file
@@ -0,0 +1,530 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2014-2016 OpenMarket Ltd
|
||||
# Copyright 2018 New Vector Ltd
|
||||
# Copyright 2019 The Matrix.org Foundation C.I.C.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import calendar
|
||||
import logging
|
||||
import time
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
from synapse.api.constants import PresenceState
|
||||
from synapse.storage.engines import PostgresEngine
|
||||
from synapse.storage.util.id_generators import (
|
||||
ChainedIdGenerator,
|
||||
IdGenerator,
|
||||
StreamIdGenerator,
|
||||
)
|
||||
from synapse.util.caches.stream_change_cache import StreamChangeCache
|
||||
|
||||
from .account_data import AccountDataStore
|
||||
from .appservice import ApplicationServiceStore, ApplicationServiceTransactionStore
|
||||
from .client_ips import ClientIpStore
|
||||
from .deviceinbox import DeviceInboxStore
|
||||
from .devices import DeviceStore
|
||||
from .directory import DirectoryStore
|
||||
from .e2e_room_keys import EndToEndRoomKeyStore
|
||||
from .end_to_end_keys import EndToEndKeyStore
|
||||
from .event_federation import EventFederationStore
|
||||
from .event_push_actions import EventPushActionsStore
|
||||
from .events import EventsStore
|
||||
from .events_bg_updates import EventsBackgroundUpdatesStore
|
||||
from .filtering import FilteringStore
|
||||
from .group_server import GroupServerStore
|
||||
from .keys import KeyStore
|
||||
from .media_repository import MediaRepositoryStore
|
||||
from .monthly_active_users import MonthlyActiveUsersStore
|
||||
from .openid import OpenIdStore
|
||||
from .presence import PresenceStore, UserPresenceState
|
||||
from .profile import ProfileStore
|
||||
from .push_rule import PushRuleStore
|
||||
from .pusher import PusherStore
|
||||
from .receipts import ReceiptsStore
|
||||
from .registration import RegistrationStore
|
||||
from .rejections import RejectionsStore
|
||||
from .relations import RelationsStore
|
||||
from .room import RoomStore
|
||||
from .roommember import RoomMemberStore
|
||||
from .search import SearchStore
|
||||
from .signatures import SignatureStore
|
||||
from .state import StateStore
|
||||
from .stats import StatsStore
|
||||
from .stream import StreamStore
|
||||
from .tags import TagsStore
|
||||
from .transactions import TransactionStore
|
||||
from .user_directory import UserDirectoryStore
|
||||
from .user_erasure_store import UserErasureStore
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class DataStore(
|
||||
EventsBackgroundUpdatesStore,
|
||||
RoomMemberStore,
|
||||
RoomStore,
|
||||
RegistrationStore,
|
||||
StreamStore,
|
||||
ProfileStore,
|
||||
PresenceStore,
|
||||
TransactionStore,
|
||||
DirectoryStore,
|
||||
KeyStore,
|
||||
StateStore,
|
||||
SignatureStore,
|
||||
ApplicationServiceStore,
|
||||
EventsStore,
|
||||
EventFederationStore,
|
||||
MediaRepositoryStore,
|
||||
RejectionsStore,
|
||||
FilteringStore,
|
||||
PusherStore,
|
||||
PushRuleStore,
|
||||
ApplicationServiceTransactionStore,
|
||||
ReceiptsStore,
|
||||
EndToEndKeyStore,
|
||||
EndToEndRoomKeyStore,
|
||||
SearchStore,
|
||||
TagsStore,
|
||||
AccountDataStore,
|
||||
EventPushActionsStore,
|
||||
OpenIdStore,
|
||||
ClientIpStore,
|
||||
DeviceStore,
|
||||
DeviceInboxStore,
|
||||
UserDirectoryStore,
|
||||
GroupServerStore,
|
||||
UserErasureStore,
|
||||
MonthlyActiveUsersStore,
|
||||
StatsStore,
|
||||
RelationsStore,
|
||||
):
|
||||
def __init__(self, db_conn, hs):
|
||||
self.hs = hs
|
||||
self._clock = hs.get_clock()
|
||||
self.database_engine = hs.database_engine
|
||||
|
||||
self._stream_id_gen = StreamIdGenerator(
|
||||
db_conn,
|
||||
"events",
|
||||
"stream_ordering",
|
||||
extra_tables=[("local_invites", "stream_id")],
|
||||
)
|
||||
self._backfill_id_gen = StreamIdGenerator(
|
||||
db_conn,
|
||||
"events",
|
||||
"stream_ordering",
|
||||
step=-1,
|
||||
extra_tables=[("ex_outlier_stream", "event_stream_ordering")],
|
||||
)
|
||||
self._presence_id_gen = StreamIdGenerator(
|
||||
db_conn, "presence_stream", "stream_id"
|
||||
)
|
||||
self._device_inbox_id_gen = StreamIdGenerator(
|
||||
db_conn, "device_max_stream_id", "stream_id"
|
||||
)
|
||||
self._public_room_id_gen = StreamIdGenerator(
|
||||
db_conn, "public_room_list_stream", "stream_id"
|
||||
)
|
||||
self._device_list_id_gen = StreamIdGenerator(
|
||||
db_conn, "device_lists_stream", "stream_id"
|
||||
)
|
||||
self._cross_signing_id_gen = StreamIdGenerator(
|
||||
db_conn, "e2e_cross_signing_keys", "stream_id"
|
||||
)
|
||||
|
||||
self._access_tokens_id_gen = IdGenerator(db_conn, "access_tokens", "id")
|
||||
self._event_reports_id_gen = IdGenerator(db_conn, "event_reports", "id")
|
||||
self._push_rule_id_gen = IdGenerator(db_conn, "push_rules", "id")
|
||||
self._push_rules_enable_id_gen = IdGenerator(db_conn, "push_rules_enable", "id")
|
||||
self._push_rules_stream_id_gen = ChainedIdGenerator(
|
||||
self._stream_id_gen, db_conn, "push_rules_stream", "stream_id"
|
||||
)
|
||||
self._pushers_id_gen = StreamIdGenerator(
|
||||
db_conn, "pushers", "id", extra_tables=[("deleted_pushers", "stream_id")]
|
||||
)
|
||||
self._group_updates_id_gen = StreamIdGenerator(
|
||||
db_conn, "local_group_updates", "stream_id"
|
||||
)
|
||||
|
||||
if isinstance(self.database_engine, PostgresEngine):
|
||||
self._cache_id_gen = StreamIdGenerator(
|
||||
db_conn, "cache_invalidation_stream", "stream_id"
|
||||
)
|
||||
else:
|
||||
self._cache_id_gen = None
|
||||
|
||||
self._presence_on_startup = self._get_active_presence(db_conn)
|
||||
|
||||
presence_cache_prefill, min_presence_val = self._get_cache_dict(
|
||||
db_conn,
|
||||
"presence_stream",
|
||||
entity_column="user_id",
|
||||
stream_column="stream_id",
|
||||
max_value=self._presence_id_gen.get_current_token(),
|
||||
)
|
||||
self.presence_stream_cache = StreamChangeCache(
|
||||
"PresenceStreamChangeCache",
|
||||
min_presence_val,
|
||||
prefilled_cache=presence_cache_prefill,
|
||||
)
|
||||
|
||||
max_device_inbox_id = self._device_inbox_id_gen.get_current_token()
|
||||
device_inbox_prefill, min_device_inbox_id = self._get_cache_dict(
|
||||
db_conn,
|
||||
"device_inbox",
|
||||
entity_column="user_id",
|
||||
stream_column="stream_id",
|
||||
max_value=max_device_inbox_id,
|
||||
limit=1000,
|
||||
)
|
||||
self._device_inbox_stream_cache = StreamChangeCache(
|
||||
"DeviceInboxStreamChangeCache",
|
||||
min_device_inbox_id,
|
||||
prefilled_cache=device_inbox_prefill,
|
||||
)
|
||||
# The federation outbox and the local device inbox uses the same
|
||||
# stream_id generator.
|
||||
device_outbox_prefill, min_device_outbox_id = self._get_cache_dict(
|
||||
db_conn,
|
||||
"device_federation_outbox",
|
||||
entity_column="destination",
|
||||
stream_column="stream_id",
|
||||
max_value=max_device_inbox_id,
|
||||
limit=1000,
|
||||
)
|
||||
self._device_federation_outbox_stream_cache = StreamChangeCache(
|
||||
"DeviceFederationOutboxStreamChangeCache",
|
||||
min_device_outbox_id,
|
||||
prefilled_cache=device_outbox_prefill,
|
||||
)
|
||||
|
||||
device_list_max = self._device_list_id_gen.get_current_token()
|
||||
self._device_list_stream_cache = StreamChangeCache(
|
||||
"DeviceListStreamChangeCache", device_list_max
|
||||
)
|
||||
self._user_signature_stream_cache = StreamChangeCache(
|
||||
"UserSignatureStreamChangeCache", device_list_max
|
||||
)
|
||||
self._device_list_federation_stream_cache = StreamChangeCache(
|
||||
"DeviceListFederationStreamChangeCache", device_list_max
|
||||
)
|
||||
|
||||
events_max = self._stream_id_gen.get_current_token()
|
||||
curr_state_delta_prefill, min_curr_state_delta_id = self._get_cache_dict(
|
||||
db_conn,
|
||||
"current_state_delta_stream",
|
||||
entity_column="room_id",
|
||||
stream_column="stream_id",
|
||||
max_value=events_max, # As we share the stream id with events token
|
||||
limit=1000,
|
||||
)
|
||||
self._curr_state_delta_stream_cache = StreamChangeCache(
|
||||
"_curr_state_delta_stream_cache",
|
||||
min_curr_state_delta_id,
|
||||
prefilled_cache=curr_state_delta_prefill,
|
||||
)
|
||||
|
||||
_group_updates_prefill, min_group_updates_id = self._get_cache_dict(
|
||||
db_conn,
|
||||
"local_group_updates",
|
||||
entity_column="user_id",
|
||||
stream_column="stream_id",
|
||||
max_value=self._group_updates_id_gen.get_current_token(),
|
||||
limit=1000,
|
||||
)
|
||||
self._group_updates_stream_cache = StreamChangeCache(
|
||||
"_group_updates_stream_cache",
|
||||
min_group_updates_id,
|
||||
prefilled_cache=_group_updates_prefill,
|
||||
)
|
||||
|
||||
self._stream_order_on_start = self.get_room_max_stream_ordering()
|
||||
self._min_stream_order_on_start = self.get_room_min_stream_ordering()
|
||||
|
||||
# Used in _generate_user_daily_visits to keep track of progress
|
||||
self._last_user_visit_update = self._get_start_of_day()
|
||||
|
||||
super(DataStore, self).__init__(db_conn, hs)
|
||||
|
||||
def take_presence_startup_info(self):
|
||||
active_on_startup = self._presence_on_startup
|
||||
self._presence_on_startup = None
|
||||
return active_on_startup
|
||||
|
||||
def _get_active_presence(self, db_conn):
|
||||
"""Fetch non-offline presence from the database so that we can register
|
||||
the appropriate time outs.
|
||||
"""
|
||||
|
||||
sql = (
|
||||
"SELECT user_id, state, last_active_ts, last_federation_update_ts,"
|
||||
" last_user_sync_ts, status_msg, currently_active FROM presence_stream"
|
||||
" WHERE state != ?"
|
||||
)
|
||||
sql = self.database_engine.convert_param_style(sql)
|
||||
|
||||
txn = db_conn.cursor()
|
||||
txn.execute(sql, (PresenceState.OFFLINE,))
|
||||
rows = self.cursor_to_dict(txn)
|
||||
txn.close()
|
||||
|
||||
for row in rows:
|
||||
row["currently_active"] = bool(row["currently_active"])
|
||||
|
||||
return [UserPresenceState(**row) for row in rows]
|
||||
|
||||
def count_daily_users(self):
|
||||
"""
|
||||
Counts the number of users who used this homeserver in the last 24 hours.
|
||||
"""
|
||||
yesterday = int(self._clock.time_msec()) - (1000 * 60 * 60 * 24)
|
||||
return self.runInteraction("count_daily_users", self._count_users, yesterday)
|
||||
|
||||
def count_monthly_users(self):
|
||||
"""
|
||||
Counts the number of users who used this homeserver in the last 30 days.
|
||||
Note this method is intended for phonehome metrics only and is different
|
||||
from the mau figure in synapse.storage.monthly_active_users which,
|
||||
amongst other things, includes a 3 day grace period before a user counts.
|
||||
"""
|
||||
thirty_days_ago = int(self._clock.time_msec()) - (1000 * 60 * 60 * 24 * 30)
|
||||
return self.runInteraction(
|
||||
"count_monthly_users", self._count_users, thirty_days_ago
|
||||
)
|
||||
|
||||
def _count_users(self, txn, time_from):
|
||||
"""
|
||||
Returns number of users seen in the past time_from period
|
||||
"""
|
||||
sql = """
|
||||
SELECT COALESCE(count(*), 0) FROM (
|
||||
SELECT user_id FROM user_ips
|
||||
WHERE last_seen > ?
|
||||
GROUP BY user_id
|
||||
) u
|
||||
"""
|
||||
txn.execute(sql, (time_from,))
|
||||
count, = txn.fetchone()
|
||||
return count
|
||||
|
||||
def count_r30_users(self):
|
||||
"""
|
||||
Counts the number of 30 day retained users, defined as:-
|
||||
* Users who have created their accounts more than 30 days ago
|
||||
* Where last seen at most 30 days ago
|
||||
* Where account creation and last_seen are > 30 days apart
|
||||
|
||||
Returns counts globaly for a given user as well as breaking
|
||||
by platform
|
||||
"""
|
||||
|
||||
def _count_r30_users(txn):
|
||||
thirty_days_in_secs = 86400 * 30
|
||||
now = int(self._clock.time())
|
||||
thirty_days_ago_in_secs = now - thirty_days_in_secs
|
||||
|
||||
sql = """
|
||||
SELECT platform, COALESCE(count(*), 0) FROM (
|
||||
SELECT
|
||||
users.name, platform, users.creation_ts * 1000,
|
||||
MAX(uip.last_seen)
|
||||
FROM users
|
||||
INNER JOIN (
|
||||
SELECT
|
||||
user_id,
|
||||
last_seen,
|
||||
CASE
|
||||
WHEN user_agent LIKE '%%Android%%' THEN 'android'
|
||||
WHEN user_agent LIKE '%%iOS%%' THEN 'ios'
|
||||
WHEN user_agent LIKE '%%Electron%%' THEN 'electron'
|
||||
WHEN user_agent LIKE '%%Mozilla%%' THEN 'web'
|
||||
WHEN user_agent LIKE '%%Gecko%%' THEN 'web'
|
||||
ELSE 'unknown'
|
||||
END
|
||||
AS platform
|
||||
FROM user_ips
|
||||
) uip
|
||||
ON users.name = uip.user_id
|
||||
AND users.appservice_id is NULL
|
||||
AND users.creation_ts < ?
|
||||
AND uip.last_seen/1000 > ?
|
||||
AND (uip.last_seen/1000) - users.creation_ts > 86400 * 30
|
||||
GROUP BY users.name, platform, users.creation_ts
|
||||
) u GROUP BY platform
|
||||
"""
|
||||
|
||||
results = {}
|
||||
txn.execute(sql, (thirty_days_ago_in_secs, thirty_days_ago_in_secs))
|
||||
|
||||
for row in txn:
|
||||
if row[0] == "unknown":
|
||||
pass
|
||||
results[row[0]] = row[1]
|
||||
|
||||
sql = """
|
||||
SELECT COALESCE(count(*), 0) FROM (
|
||||
SELECT users.name, users.creation_ts * 1000,
|
||||
MAX(uip.last_seen)
|
||||
FROM users
|
||||
INNER JOIN (
|
||||
SELECT
|
||||
user_id,
|
||||
last_seen
|
||||
FROM user_ips
|
||||
) uip
|
||||
ON users.name = uip.user_id
|
||||
AND appservice_id is NULL
|
||||
AND users.creation_ts < ?
|
||||
AND uip.last_seen/1000 > ?
|
||||
AND (uip.last_seen/1000) - users.creation_ts > 86400 * 30
|
||||
GROUP BY users.name, users.creation_ts
|
||||
) u
|
||||
"""
|
||||
|
||||
txn.execute(sql, (thirty_days_ago_in_secs, thirty_days_ago_in_secs))
|
||||
|
||||
count, = txn.fetchone()
|
||||
results["all"] = count
|
||||
|
||||
return results
|
||||
|
||||
return self.runInteraction("count_r30_users", _count_r30_users)
|
||||
|
||||
def _get_start_of_day(self):
|
||||
"""
|
||||
Returns millisecond unixtime for start of UTC day.
|
||||
"""
|
||||
now = time.gmtime()
|
||||
today_start = calendar.timegm((now.tm_year, now.tm_mon, now.tm_mday, 0, 0, 0))
|
||||
return today_start * 1000
|
||||
|
||||
def generate_user_daily_visits(self):
|
||||
"""
|
||||
Generates daily visit data for use in cohort/ retention analysis
|
||||
"""
|
||||
|
||||
def _generate_user_daily_visits(txn):
|
||||
logger.info("Calling _generate_user_daily_visits")
|
||||
today_start = self._get_start_of_day()
|
||||
a_day_in_milliseconds = 24 * 60 * 60 * 1000
|
||||
now = self.clock.time_msec()
|
||||
|
||||
sql = """
|
||||
INSERT INTO user_daily_visits (user_id, device_id, timestamp)
|
||||
SELECT u.user_id, u.device_id, ?
|
||||
FROM user_ips AS u
|
||||
LEFT JOIN (
|
||||
SELECT user_id, device_id, timestamp FROM user_daily_visits
|
||||
WHERE timestamp = ?
|
||||
) udv
|
||||
ON u.user_id = udv.user_id AND u.device_id=udv.device_id
|
||||
INNER JOIN users ON users.name=u.user_id
|
||||
WHERE last_seen > ? AND last_seen <= ?
|
||||
AND udv.timestamp IS NULL AND users.is_guest=0
|
||||
AND users.appservice_id IS NULL
|
||||
GROUP BY u.user_id, u.device_id
|
||||
"""
|
||||
|
||||
# This means that the day has rolled over but there could still
|
||||
# be entries from the previous day. There is an edge case
|
||||
# where if the user logs in at 23:59 and overwrites their
|
||||
# last_seen at 00:01 then they will not be counted in the
|
||||
# previous day's stats - it is important that the query is run
|
||||
# often to minimise this case.
|
||||
if today_start > self._last_user_visit_update:
|
||||
yesterday_start = today_start - a_day_in_milliseconds
|
||||
txn.execute(
|
||||
sql,
|
||||
(
|
||||
yesterday_start,
|
||||
yesterday_start,
|
||||
self._last_user_visit_update,
|
||||
today_start,
|
||||
),
|
||||
)
|
||||
self._last_user_visit_update = today_start
|
||||
|
||||
txn.execute(
|
||||
sql, (today_start, today_start, self._last_user_visit_update, now)
|
||||
)
|
||||
# Update _last_user_visit_update to now. The reason to do this
|
||||
# rather just clamping to the beginning of the day is to limit
|
||||
# the size of the join - meaning that the query can be run more
|
||||
# frequently
|
||||
self._last_user_visit_update = now
|
||||
|
||||
return self.runInteraction(
|
||||
"generate_user_daily_visits", _generate_user_daily_visits
|
||||
)
|
||||
|
||||
def get_users(self):
|
||||
"""Function to reterive a list of users in users table.
|
||||
|
||||
Args:
|
||||
Returns:
|
||||
defer.Deferred: resolves to list[dict[str, Any]]
|
||||
"""
|
||||
return self._simple_select_list(
|
||||
table="users",
|
||||
keyvalues={},
|
||||
retcols=["name", "password_hash", "is_guest", "admin", "user_type"],
|
||||
desc="get_users",
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_users_paginate(self, order, start, limit):
|
||||
"""Function to reterive a paginated list of users from
|
||||
users list. This will return a json object, which contains
|
||||
list of users and the total number of users in users table.
|
||||
|
||||
Args:
|
||||
order (str): column name to order the select by this column
|
||||
start (int): start number to begin the query from
|
||||
limit (int): number of rows to reterive
|
||||
Returns:
|
||||
defer.Deferred: resolves to json object {list[dict[str, Any]], count}
|
||||
"""
|
||||
users = yield self.runInteraction(
|
||||
"get_users_paginate",
|
||||
self._simple_select_list_paginate_txn,
|
||||
table="users",
|
||||
keyvalues={"is_guest": False},
|
||||
orderby=order,
|
||||
start=start,
|
||||
limit=limit,
|
||||
retcols=["name", "password_hash", "is_guest", "admin", "user_type"],
|
||||
)
|
||||
count = yield self.runInteraction("get_users_paginate", self.get_user_count_txn)
|
||||
retval = {"users": users, "total": count}
|
||||
return retval
|
||||
|
||||
def search_users(self, term):
|
||||
"""Function to search users list for one or more users with
|
||||
the matched term.
|
||||
|
||||
Args:
|
||||
term (str): search term
|
||||
col (str): column to query term should be matched to
|
||||
Returns:
|
||||
defer.Deferred: resolves to list[dict[str, Any]]
|
||||
"""
|
||||
return self._simple_search_list(
|
||||
table="users",
|
||||
term=term,
|
||||
col="name",
|
||||
retcols=["name", "password_hash", "is_guest", "admin", "user_type"],
|
||||
desc="search_users",
|
||||
)
|
||||
@@ -22,9 +22,8 @@ from twisted.internet import defer
|
||||
|
||||
from synapse.appservice import AppServiceTransaction
|
||||
from synapse.config.appservice import load_appservices
|
||||
from synapse.storage.events_worker import EventsWorkerStore
|
||||
|
||||
from ._base import SQLBaseStore
|
||||
from synapse.storage._base import SQLBaseStore
|
||||
from synapse.storage.data_stores.main.events_worker import EventsWorkerStore
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -20,11 +20,10 @@ from six import iteritems
|
||||
from twisted.internet import defer
|
||||
|
||||
from synapse.metrics.background_process_metrics import wrap_as_background_process
|
||||
from synapse.storage import background_updates
|
||||
from synapse.storage._base import Cache
|
||||
from synapse.util.caches import CACHE_SIZE_FACTOR
|
||||
|
||||
from . import background_updates
|
||||
from ._base import Cache
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Number of msec of granularity to store the user IP 'last seen' time. Smaller
|
||||
@@ -1,5 +1,7 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2016 OpenMarket Ltd
|
||||
# Copyright 2019 New Vector Ltd
|
||||
# Copyright 2019 The Matrix.org Foundation C.I.C.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -20,7 +22,7 @@ from canonicaljson import json
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
from synapse.api.errors import StoreError
|
||||
from synapse.api.errors import Codes, StoreError
|
||||
from synapse.logging.opentracing import (
|
||||
get_active_span_text_map,
|
||||
set_tag,
|
||||
@@ -47,7 +49,8 @@ DROP_DEVICE_LIST_STREAMS_NON_UNIQUE_INDEXES = (
|
||||
|
||||
class DeviceWorkerStore(SQLBaseStore):
|
||||
def get_device(self, user_id, device_id):
|
||||
"""Retrieve a device.
|
||||
"""Retrieve a device. Only returns devices that are not marked as
|
||||
hidden.
|
||||
|
||||
Args:
|
||||
user_id (str): The ID of the user which owns the device
|
||||
@@ -59,14 +62,15 @@ class DeviceWorkerStore(SQLBaseStore):
|
||||
"""
|
||||
return self._simple_select_one(
|
||||
table="devices",
|
||||
keyvalues={"user_id": user_id, "device_id": device_id},
|
||||
keyvalues={"user_id": user_id, "device_id": device_id, "hidden": False},
|
||||
retcols=("user_id", "device_id", "display_name"),
|
||||
desc="get_device",
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_devices_by_user(self, user_id):
|
||||
"""Retrieve all of a user's registered devices.
|
||||
"""Retrieve all of a user's registered devices. Only returns devices
|
||||
that are not marked as hidden.
|
||||
|
||||
Args:
|
||||
user_id (str):
|
||||
@@ -77,7 +81,7 @@ class DeviceWorkerStore(SQLBaseStore):
|
||||
"""
|
||||
devices = yield self._simple_select_list(
|
||||
table="devices",
|
||||
keyvalues={"user_id": user_id},
|
||||
keyvalues={"user_id": user_id, "hidden": False},
|
||||
retcols=("user_id", "device_id", "display_name"),
|
||||
desc="get_devices_by_user",
|
||||
)
|
||||
@@ -324,6 +328,41 @@ class DeviceWorkerStore(SQLBaseStore):
|
||||
"""
|
||||
txn.execute(sql, (destination, stream_id))
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def add_user_signature_change_to_streams(self, from_user_id, user_ids):
|
||||
"""Persist that a user has made new signatures
|
||||
|
||||
Args:
|
||||
from_user_id (str): the user who made the signatures
|
||||
user_ids (list[str]): the users who were signed
|
||||
"""
|
||||
|
||||
with self._device_list_id_gen.get_next() as stream_id:
|
||||
yield self.runInteraction(
|
||||
"add_user_sig_change_to_streams",
|
||||
self._add_user_signature_change_txn,
|
||||
from_user_id,
|
||||
user_ids,
|
||||
stream_id,
|
||||
)
|
||||
return stream_id
|
||||
|
||||
def _add_user_signature_change_txn(self, txn, from_user_id, user_ids, stream_id):
|
||||
txn.call_after(
|
||||
self._user_signature_stream_cache.entity_has_changed,
|
||||
from_user_id,
|
||||
stream_id,
|
||||
)
|
||||
self._simple_insert_txn(
|
||||
txn,
|
||||
"user_signature_stream",
|
||||
values={
|
||||
"stream_id": stream_id,
|
||||
"from_user_id": from_user_id,
|
||||
"user_ids": json.dumps(user_ids),
|
||||
},
|
||||
)
|
||||
|
||||
def get_device_stream_token(self):
|
||||
return self._device_list_id_gen.get_current_token()
|
||||
|
||||
@@ -469,6 +508,28 @@ class DeviceWorkerStore(SQLBaseStore):
|
||||
"get_users_whose_devices_changed", _get_users_whose_devices_changed_txn
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_users_whose_signatures_changed(self, user_id, from_key):
|
||||
"""Get the users who have new cross-signing signatures made by `user_id` since
|
||||
`from_key`.
|
||||
|
||||
Args:
|
||||
user_id (str): the user who made the signatures
|
||||
from_key (str): The device lists stream token
|
||||
"""
|
||||
from_key = int(from_key)
|
||||
if self._user_signature_stream_cache.has_entity_changed(user_id, from_key):
|
||||
sql = """
|
||||
SELECT DISTINCT user_ids FROM user_signature_stream
|
||||
WHERE from_user_id = ? AND stream_id > ?
|
||||
"""
|
||||
rows = yield self._execute(
|
||||
"get_users_whose_signatures_changed", None, sql, user_id, from_key
|
||||
)
|
||||
return set(user for row in rows for user in json.loads(row[0]))
|
||||
else:
|
||||
return set()
|
||||
|
||||
def get_all_device_list_changes_for_remotes(self, from_key, to_key):
|
||||
"""Return a list of `(stream_id, user_id, destination)` which is the
|
||||
combined list of changes to devices, and which destinations need to be
|
||||
@@ -592,6 +653,8 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore):
|
||||
Returns:
|
||||
defer.Deferred: boolean whether the device was inserted or an
|
||||
existing device existed with that ID.
|
||||
Raises:
|
||||
StoreError: if the device is already in use
|
||||
"""
|
||||
key = (user_id, device_id)
|
||||
if self.device_id_exists_cache.get(key, None):
|
||||
@@ -604,12 +667,25 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore):
|
||||
"user_id": user_id,
|
||||
"device_id": device_id,
|
||||
"display_name": initial_device_display_name,
|
||||
"hidden": False,
|
||||
},
|
||||
desc="store_device",
|
||||
or_ignore=True,
|
||||
)
|
||||
if not inserted:
|
||||
# if the device already exists, check if it's a real device, or
|
||||
# if the device ID is reserved by something else
|
||||
hidden = yield self._simple_select_one_onecol(
|
||||
"devices",
|
||||
keyvalues={"user_id": user_id, "device_id": device_id},
|
||||
retcol="hidden",
|
||||
)
|
||||
if hidden:
|
||||
raise StoreError(400, "The device ID is in use", Codes.FORBIDDEN)
|
||||
self.device_id_exists_cache.prefill(key, True)
|
||||
return inserted
|
||||
except StoreError:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
"store_device with device_id=%s(%r) user_id=%s(%r)"
|
||||
@@ -636,7 +712,7 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore):
|
||||
"""
|
||||
yield self._simple_delete_one(
|
||||
table="devices",
|
||||
keyvalues={"user_id": user_id, "device_id": device_id},
|
||||
keyvalues={"user_id": user_id, "device_id": device_id, "hidden": False},
|
||||
desc="delete_device",
|
||||
)
|
||||
|
||||
@@ -656,14 +732,15 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore):
|
||||
table="devices",
|
||||
column="device_id",
|
||||
iterable=device_ids,
|
||||
keyvalues={"user_id": user_id},
|
||||
keyvalues={"user_id": user_id, "hidden": False},
|
||||
desc="delete_devices",
|
||||
)
|
||||
for device_id in device_ids:
|
||||
self.device_id_exists_cache.invalidate((user_id, device_id))
|
||||
|
||||
def update_device(self, user_id, device_id, new_display_name=None):
|
||||
"""Update a device.
|
||||
"""Update a device. Only updates the device if it is not marked as
|
||||
hidden.
|
||||
|
||||
Args:
|
||||
user_id (str): The ID of the user which owns the device
|
||||
@@ -682,7 +759,7 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore):
|
||||
return defer.succeed(None)
|
||||
return self._simple_update_one(
|
||||
table="devices",
|
||||
keyvalues={"user_id": user_id, "device_id": device_id},
|
||||
keyvalues={"user_id": user_id, "device_id": device_id, "hidden": False},
|
||||
updatevalues=updates,
|
||||
desc="update_device",
|
||||
)
|
||||
@@ -18,10 +18,9 @@ from collections import namedtuple
|
||||
from twisted.internet import defer
|
||||
|
||||
from synapse.api.errors import SynapseError
|
||||
from synapse.storage._base import SQLBaseStore
|
||||
from synapse.util.caches.descriptors import cached
|
||||
|
||||
from ._base import SQLBaseStore
|
||||
|
||||
RoomAliasMapping = namedtuple("RoomAliasMapping", ("room_id", "room_alias", "servers"))
|
||||
|
||||
|
||||
@@ -19,8 +19,7 @@ from twisted.internet import defer
|
||||
|
||||
from synapse.api.errors import StoreError
|
||||
from synapse.logging.opentracing import log_kv, trace
|
||||
|
||||
from ._base import SQLBaseStore
|
||||
from synapse.storage._base import SQLBaseStore
|
||||
|
||||
|
||||
class EndToEndRoomKeyStore(SQLBaseStore):
|
||||
@@ -1,5 +1,7 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2015, 2016 OpenMarket Ltd
|
||||
# Copyright 2019 New Vector Ltd
|
||||
# Copyright 2019 The Matrix.org Foundation C.I.C.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -14,15 +16,14 @@
|
||||
# limitations under the License.
|
||||
from six import iteritems
|
||||
|
||||
from canonicaljson import encode_canonical_json
|
||||
from canonicaljson import encode_canonical_json, json
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
from synapse.logging.opentracing import log_kv, set_tag, trace
|
||||
from synapse.storage._base import SQLBaseStore, db_to_json
|
||||
from synapse.util.caches.descriptors import cached
|
||||
|
||||
from ._base import SQLBaseStore, db_to_json
|
||||
|
||||
|
||||
class EndToEndKeyWorkerStore(SQLBaseStore):
|
||||
@trace
|
||||
@@ -66,6 +67,11 @@ class EndToEndKeyWorkerStore(SQLBaseStore):
|
||||
display_name = device_info["device_display_name"]
|
||||
if display_name is not None:
|
||||
r["unsigned"]["device_display_name"] = display_name
|
||||
if "signatures" in device_info:
|
||||
for sig_user_id, sigs in device_info["signatures"].items():
|
||||
r.setdefault("signatures", {}).setdefault(
|
||||
sig_user_id, {}
|
||||
).update(sigs)
|
||||
rv[user_id][device_id] = r
|
||||
|
||||
return rv
|
||||
@@ -79,6 +85,8 @@ class EndToEndKeyWorkerStore(SQLBaseStore):
|
||||
|
||||
query_clauses = []
|
||||
query_params = []
|
||||
signature_query_clauses = []
|
||||
signature_query_params = []
|
||||
|
||||
if include_all_devices is False:
|
||||
include_deleted_devices = False
|
||||
@@ -89,12 +97,20 @@ class EndToEndKeyWorkerStore(SQLBaseStore):
|
||||
for (user_id, device_id) in query_list:
|
||||
query_clause = "user_id = ?"
|
||||
query_params.append(user_id)
|
||||
signature_query_clause = "target_user_id = ?"
|
||||
signature_query_params.append(user_id)
|
||||
|
||||
if device_id is not None:
|
||||
query_clause += " AND device_id = ?"
|
||||
query_params.append(device_id)
|
||||
signature_query_clause += " AND target_device_id = ?"
|
||||
signature_query_params.append(device_id)
|
||||
|
||||
signature_query_clause += " AND user_id = ?"
|
||||
signature_query_params.append(user_id)
|
||||
|
||||
query_clauses.append(query_clause)
|
||||
signature_query_clauses.append(signature_query_clause)
|
||||
|
||||
sql = (
|
||||
"SELECT user_id, device_id, "
|
||||
@@ -102,7 +118,7 @@ class EndToEndKeyWorkerStore(SQLBaseStore):
|
||||
" k.key_json"
|
||||
" FROM devices d"
|
||||
" %s JOIN e2e_device_keys_json k USING (user_id, device_id)"
|
||||
" WHERE %s"
|
||||
" WHERE %s AND NOT d.hidden"
|
||||
) % (
|
||||
"LEFT" if include_all_devices else "INNER",
|
||||
" OR ".join("(" + q + ")" for q in query_clauses),
|
||||
@@ -121,6 +137,22 @@ class EndToEndKeyWorkerStore(SQLBaseStore):
|
||||
for user_id, device_id in deleted_devices:
|
||||
result.setdefault(user_id, {})[device_id] = None
|
||||
|
||||
# get signatures on the device
|
||||
signature_sql = (
|
||||
"SELECT * " " FROM e2e_cross_signing_signatures " " WHERE %s"
|
||||
) % (" OR ".join("(" + q + ")" for q in signature_query_clauses))
|
||||
|
||||
txn.execute(signature_sql, signature_query_params)
|
||||
rows = self.cursor_to_dict(txn)
|
||||
|
||||
for row in rows:
|
||||
target_user_id = row["target_user_id"]
|
||||
target_device_id = row["target_device_id"]
|
||||
if target_user_id in result and target_device_id in result[target_user_id]:
|
||||
result[target_user_id][target_device_id].setdefault(
|
||||
"signatures", {}
|
||||
).setdefault(row["user_id"], {})[row["key_id"]] = row["signature"]
|
||||
|
||||
log_kv(result)
|
||||
return result
|
||||
|
||||
@@ -321,3 +353,164 @@ class EndToEndKeyStore(EndToEndKeyWorkerStore, SQLBaseStore):
|
||||
return self.runInteraction(
|
||||
"delete_e2e_keys_by_device", delete_e2e_keys_by_device_txn
|
||||
)
|
||||
|
||||
def _set_e2e_cross_signing_key_txn(self, txn, user_id, key_type, key):
|
||||
"""Set a user's cross-signing key.
|
||||
|
||||
Args:
|
||||
txn (twisted.enterprise.adbapi.Connection): db connection
|
||||
user_id (str): the user to set the signing key for
|
||||
key_type (str): the type of key that is being set: either 'master'
|
||||
for a master key, 'self_signing' for a self-signing key, or
|
||||
'user_signing' for a user-signing key
|
||||
key (dict): the key data
|
||||
"""
|
||||
# the cross-signing keys need to occupy the same namespace as devices,
|
||||
# since signatures are identified by device ID. So add an entry to the
|
||||
# device table to make sure that we don't have a collision with device
|
||||
# IDs
|
||||
|
||||
# the 'key' dict will look something like:
|
||||
# {
|
||||
# "user_id": "@alice:example.com",
|
||||
# "usage": ["self_signing"],
|
||||
# "keys": {
|
||||
# "ed25519:base64+self+signing+public+key": "base64+self+signing+public+key",
|
||||
# },
|
||||
# "signatures": {
|
||||
# "@alice:example.com": {
|
||||
# "ed25519:base64+master+public+key": "base64+signature"
|
||||
# }
|
||||
# }
|
||||
# }
|
||||
# The "keys" property must only have one entry, which will be the public
|
||||
# key, so we just grab the first value in there
|
||||
pubkey = next(iter(key["keys"].values()))
|
||||
self._simple_insert_txn(
|
||||
txn,
|
||||
"devices",
|
||||
values={
|
||||
"user_id": user_id,
|
||||
"device_id": pubkey,
|
||||
"display_name": key_type + " signing key",
|
||||
"hidden": True,
|
||||
},
|
||||
)
|
||||
|
||||
# and finally, store the key itself
|
||||
with self._cross_signing_id_gen.get_next() as stream_id:
|
||||
self._simple_insert_txn(
|
||||
txn,
|
||||
"e2e_cross_signing_keys",
|
||||
values={
|
||||
"user_id": user_id,
|
||||
"keytype": key_type,
|
||||
"keydata": json.dumps(key),
|
||||
"stream_id": stream_id,
|
||||
},
|
||||
)
|
||||
|
||||
def set_e2e_cross_signing_key(self, user_id, key_type, key):
|
||||
"""Set a user's cross-signing key.
|
||||
|
||||
Args:
|
||||
user_id (str): the user to set the user-signing key for
|
||||
key_type (str): the type of cross-signing key to set
|
||||
key (dict): the key data
|
||||
"""
|
||||
return self.runInteraction(
|
||||
"add_e2e_cross_signing_key",
|
||||
self._set_e2e_cross_signing_key_txn,
|
||||
user_id,
|
||||
key_type,
|
||||
key,
|
||||
)
|
||||
|
||||
def _get_e2e_cross_signing_key_txn(self, txn, user_id, key_type, from_user_id=None):
|
||||
"""Returns a user's cross-signing key.
|
||||
|
||||
Args:
|
||||
txn (twisted.enterprise.adbapi.Connection): db connection
|
||||
user_id (str): the user whose key is being requested
|
||||
key_type (str): the type of key that is being set: either 'master'
|
||||
for a master key, 'self_signing' for a self-signing key, or
|
||||
'user_signing' for a user-signing key
|
||||
from_user_id (str): if specified, signatures made by this user on
|
||||
the key will be included in the result
|
||||
|
||||
Returns:
|
||||
dict of the key data or None if not found
|
||||
"""
|
||||
sql = (
|
||||
"SELECT keydata "
|
||||
" FROM e2e_cross_signing_keys "
|
||||
" WHERE user_id = ? AND keytype = ? ORDER BY stream_id DESC LIMIT 1"
|
||||
)
|
||||
txn.execute(sql, (user_id, key_type))
|
||||
row = txn.fetchone()
|
||||
if not row:
|
||||
return None
|
||||
key = json.loads(row[0])
|
||||
|
||||
device_id = None
|
||||
for k in key["keys"].values():
|
||||
device_id = k
|
||||
|
||||
if from_user_id is not None:
|
||||
sql = (
|
||||
"SELECT key_id, signature "
|
||||
" FROM e2e_cross_signing_signatures "
|
||||
" WHERE user_id = ? "
|
||||
" AND target_user_id = ? "
|
||||
" AND target_device_id = ? "
|
||||
)
|
||||
txn.execute(sql, (from_user_id, user_id, device_id))
|
||||
row = txn.fetchone()
|
||||
if row:
|
||||
key.setdefault("signatures", {}).setdefault(from_user_id, {})[
|
||||
row[0]
|
||||
] = row[1]
|
||||
|
||||
return key
|
||||
|
||||
def get_e2e_cross_signing_key(self, user_id, key_type, from_user_id=None):
|
||||
"""Returns a user's cross-signing key.
|
||||
|
||||
Args:
|
||||
user_id (str): the user whose self-signing key is being requested
|
||||
key_type (str): the type of cross-signing key to get
|
||||
from_user_id (str): if specified, signatures made by this user on
|
||||
the self-signing key will be included in the result
|
||||
|
||||
Returns:
|
||||
dict of the key data or None if not found
|
||||
"""
|
||||
return self.runInteraction(
|
||||
"get_e2e_cross_signing_key",
|
||||
self._get_e2e_cross_signing_key_txn,
|
||||
user_id,
|
||||
key_type,
|
||||
from_user_id,
|
||||
)
|
||||
|
||||
def store_e2e_cross_signing_signatures(self, user_id, signatures):
|
||||
"""Stores cross-signing signatures.
|
||||
|
||||
Args:
|
||||
user_id (str): the user who made the signatures
|
||||
signatures (iterable[SignatureListItem]): signatures to add
|
||||
"""
|
||||
return self._simple_insert_many(
|
||||
"e2e_cross_signing_signatures",
|
||||
[
|
||||
{
|
||||
"user_id": user_id,
|
||||
"key_id": item.signing_key_id,
|
||||
"target_user_id": item.target_user_id,
|
||||
"target_device_id": item.target_device_id,
|
||||
"signature": item.signature,
|
||||
}
|
||||
for item in signatures
|
||||
],
|
||||
"add_e2e_signing_key",
|
||||
)
|
||||
@@ -26,8 +26,8 @@ from twisted.internet import defer
|
||||
from synapse.api.errors import StoreError
|
||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||
from synapse.storage._base import SQLBaseStore, make_in_list_sql_clause
|
||||
from synapse.storage.events_worker import EventsWorkerStore
|
||||
from synapse.storage.signatures import SignatureWorkerStore
|
||||
from synapse.storage.data_stores.main.events_worker import EventsWorkerStore
|
||||
from synapse.storage.data_stores.main.signatures import SignatureWorkerStore
|
||||
from synapse.util.caches.descriptors import cached
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@@ -41,9 +41,9 @@ from synapse.metrics.background_process_metrics import run_as_background_process
|
||||
from synapse.state import StateResolutionStore
|
||||
from synapse.storage._base import make_in_list_sql_clause
|
||||
from synapse.storage.background_updates import BackgroundUpdateStore
|
||||
from synapse.storage.event_federation import EventFederationStore
|
||||
from synapse.storage.events_worker import EventsWorkerStore
|
||||
from synapse.storage.state import StateGroupWorkerStore
|
||||
from synapse.storage.data_stores.main.event_federation import EventFederationStore
|
||||
from synapse.storage.data_stores.main.events_worker import EventsWorkerStore
|
||||
from synapse.storage.data_stores.main.state import StateGroupWorkerStore
|
||||
from synapse.types import RoomStreamToken, get_domain_from_id
|
||||
from synapse.util import batch_iter
|
||||
from synapse.util.async_helpers import ObservableDeferred
|
||||
@@ -16,10 +16,9 @@
|
||||
from canonicaljson import encode_canonical_json
|
||||
|
||||
from synapse.api.errors import Codes, SynapseError
|
||||
from synapse.storage._base import SQLBaseStore, db_to_json
|
||||
from synapse.util.caches.descriptors import cachedInlineCallbacks
|
||||
|
||||
from ._base import SQLBaseStore, db_to_json
|
||||
|
||||
|
||||
class FilteringStore(SQLBaseStore):
|
||||
@cachedInlineCallbacks(num_args=2)
|
||||
@@ -19,8 +19,7 @@ from canonicaljson import json
|
||||
from twisted.internet import defer
|
||||
|
||||
from synapse.api.errors import SynapseError
|
||||
|
||||
from ._base import SQLBaseStore
|
||||
from synapse.storage._base import SQLBaseStore
|
||||
|
||||
# The category ID for the "default" category. We don't store as null in the
|
||||
# database to avoid the fun of null != null
|
||||
214
synapse/storage/data_stores/main/keys.py
Normal file
214
synapse/storage/data_stores/main/keys.py
Normal file
@@ -0,0 +1,214 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2014-2016 OpenMarket Ltd
|
||||
# Copyright 2019 New Vector Ltd.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import itertools
|
||||
import logging
|
||||
|
||||
import six
|
||||
|
||||
from signedjson.key import decode_verify_key_bytes
|
||||
|
||||
from synapse.storage._base import SQLBaseStore
|
||||
from synapse.storage.keys import FetchKeyResult
|
||||
from synapse.util import batch_iter
|
||||
from synapse.util.caches.descriptors import cached, cachedList
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# py2 sqlite has buffer hardcoded as only binary type, so we must use it,
|
||||
# despite being deprecated and removed in favor of memoryview
|
||||
if six.PY2:
|
||||
db_binary_type = six.moves.builtins.buffer
|
||||
else:
|
||||
db_binary_type = memoryview
|
||||
|
||||
|
||||
class KeyStore(SQLBaseStore):
|
||||
"""Persistence for signature verification keys
|
||||
"""
|
||||
|
||||
@cached()
|
||||
def _get_server_verify_key(self, server_name_and_key_id):
|
||||
raise NotImplementedError()
|
||||
|
||||
@cachedList(
|
||||
cached_method_name="_get_server_verify_key", list_name="server_name_and_key_ids"
|
||||
)
|
||||
def get_server_verify_keys(self, server_name_and_key_ids):
|
||||
"""
|
||||
Args:
|
||||
server_name_and_key_ids (iterable[Tuple[str, str]]):
|
||||
iterable of (server_name, key-id) tuples to fetch keys for
|
||||
|
||||
Returns:
|
||||
Deferred: resolves to dict[Tuple[str, str], FetchKeyResult|None]:
|
||||
map from (server_name, key_id) -> FetchKeyResult, or None if the key is
|
||||
unknown
|
||||
"""
|
||||
keys = {}
|
||||
|
||||
def _get_keys(txn, batch):
|
||||
"""Processes a batch of keys to fetch, and adds the result to `keys`."""
|
||||
|
||||
# batch_iter always returns tuples so it's safe to do len(batch)
|
||||
sql = (
|
||||
"SELECT server_name, key_id, verify_key, ts_valid_until_ms "
|
||||
"FROM server_signature_keys WHERE 1=0"
|
||||
) + " OR (server_name=? AND key_id=?)" * len(batch)
|
||||
|
||||
txn.execute(sql, tuple(itertools.chain.from_iterable(batch)))
|
||||
|
||||
for row in txn:
|
||||
server_name, key_id, key_bytes, ts_valid_until_ms = row
|
||||
|
||||
if ts_valid_until_ms is None:
|
||||
# Old keys may be stored with a ts_valid_until_ms of null,
|
||||
# in which case we treat this as if it was set to `0`, i.e.
|
||||
# it won't match key requests that define a minimum
|
||||
# `ts_valid_until_ms`.
|
||||
ts_valid_until_ms = 0
|
||||
|
||||
res = FetchKeyResult(
|
||||
verify_key=decode_verify_key_bytes(key_id, bytes(key_bytes)),
|
||||
valid_until_ts=ts_valid_until_ms,
|
||||
)
|
||||
keys[(server_name, key_id)] = res
|
||||
|
||||
def _txn(txn):
|
||||
for batch in batch_iter(server_name_and_key_ids, 50):
|
||||
_get_keys(txn, batch)
|
||||
return keys
|
||||
|
||||
return self.runInteraction("get_server_verify_keys", _txn)
|
||||
|
||||
def store_server_verify_keys(self, from_server, ts_added_ms, verify_keys):
|
||||
"""Stores NACL verification keys for remote servers.
|
||||
Args:
|
||||
from_server (str): Where the verification keys were looked up
|
||||
ts_added_ms (int): The time to record that the key was added
|
||||
verify_keys (iterable[tuple[str, str, FetchKeyResult]]):
|
||||
keys to be stored. Each entry is a triplet of
|
||||
(server_name, key_id, key).
|
||||
"""
|
||||
key_values = []
|
||||
value_values = []
|
||||
invalidations = []
|
||||
for server_name, key_id, fetch_result in verify_keys:
|
||||
key_values.append((server_name, key_id))
|
||||
value_values.append(
|
||||
(
|
||||
from_server,
|
||||
ts_added_ms,
|
||||
fetch_result.valid_until_ts,
|
||||
db_binary_type(fetch_result.verify_key.encode()),
|
||||
)
|
||||
)
|
||||
# invalidate takes a tuple corresponding to the params of
|
||||
# _get_server_verify_key. _get_server_verify_key only takes one
|
||||
# param, which is itself the 2-tuple (server_name, key_id).
|
||||
invalidations.append((server_name, key_id))
|
||||
|
||||
def _invalidate(res):
|
||||
f = self._get_server_verify_key.invalidate
|
||||
for i in invalidations:
|
||||
f((i,))
|
||||
return res
|
||||
|
||||
return self.runInteraction(
|
||||
"store_server_verify_keys",
|
||||
self._simple_upsert_many_txn,
|
||||
table="server_signature_keys",
|
||||
key_names=("server_name", "key_id"),
|
||||
key_values=key_values,
|
||||
value_names=(
|
||||
"from_server",
|
||||
"ts_added_ms",
|
||||
"ts_valid_until_ms",
|
||||
"verify_key",
|
||||
),
|
||||
value_values=value_values,
|
||||
).addCallback(_invalidate)
|
||||
|
||||
def store_server_keys_json(
|
||||
self, server_name, key_id, from_server, ts_now_ms, ts_expires_ms, key_json_bytes
|
||||
):
|
||||
"""Stores the JSON bytes for a set of keys from a server
|
||||
The JSON should be signed by the originating server, the intermediate
|
||||
server, and by this server. Updates the value for the
|
||||
(server_name, key_id, from_server) triplet if one already existed.
|
||||
Args:
|
||||
server_name (str): The name of the server.
|
||||
key_id (str): The identifer of the key this JSON is for.
|
||||
from_server (str): The server this JSON was fetched from.
|
||||
ts_now_ms (int): The time now in milliseconds.
|
||||
ts_valid_until_ms (int): The time when this json stops being valid.
|
||||
key_json (bytes): The encoded JSON.
|
||||
"""
|
||||
return self._simple_upsert(
|
||||
table="server_keys_json",
|
||||
keyvalues={
|
||||
"server_name": server_name,
|
||||
"key_id": key_id,
|
||||
"from_server": from_server,
|
||||
},
|
||||
values={
|
||||
"server_name": server_name,
|
||||
"key_id": key_id,
|
||||
"from_server": from_server,
|
||||
"ts_added_ms": ts_now_ms,
|
||||
"ts_valid_until_ms": ts_expires_ms,
|
||||
"key_json": db_binary_type(key_json_bytes),
|
||||
},
|
||||
desc="store_server_keys_json",
|
||||
)
|
||||
|
||||
def get_server_keys_json(self, server_keys):
|
||||
"""Retrive the key json for a list of server_keys and key ids.
|
||||
If no keys are found for a given server, key_id and source then
|
||||
that server, key_id, and source triplet entry will be an empty list.
|
||||
The JSON is returned as a byte array so that it can be efficiently
|
||||
used in an HTTP response.
|
||||
Args:
|
||||
server_keys (list): List of (server_name, key_id, source) triplets.
|
||||
Returns:
|
||||
Deferred[dict[Tuple[str, str, str|None], list[dict]]]:
|
||||
Dict mapping (server_name, key_id, source) triplets to lists of dicts
|
||||
"""
|
||||
|
||||
def _get_server_keys_json_txn(txn):
|
||||
results = {}
|
||||
for server_name, key_id, from_server in server_keys:
|
||||
keyvalues = {"server_name": server_name}
|
||||
if key_id is not None:
|
||||
keyvalues["key_id"] = key_id
|
||||
if from_server is not None:
|
||||
keyvalues["from_server"] = from_server
|
||||
rows = self._simple_select_list_txn(
|
||||
txn,
|
||||
"server_keys_json",
|
||||
keyvalues=keyvalues,
|
||||
retcols=(
|
||||
"key_id",
|
||||
"from_server",
|
||||
"ts_added_ms",
|
||||
"ts_valid_until_ms",
|
||||
"key_json",
|
||||
),
|
||||
)
|
||||
results[(server_name, key_id, from_server)] = rows
|
||||
return results
|
||||
|
||||
return self.runInteraction("get_server_keys_json", _get_server_keys_json_txn)
|
||||
@@ -16,10 +16,9 @@ import logging
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
from synapse.storage._base import SQLBaseStore
|
||||
from synapse.util.caches.descriptors import cached
|
||||
|
||||
from ._base import SQLBaseStore
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Number of msec of granularity to store the monthly_active_user timestamp
|
||||
@@ -1,4 +1,4 @@
|
||||
from ._base import SQLBaseStore
|
||||
from synapse.storage._base import SQLBaseStore
|
||||
|
||||
|
||||
class OpenIdStore(SQLBaseStore):
|
||||
150
synapse/storage/data_stores/main/presence.py
Normal file
150
synapse/storage/data_stores/main/presence.py
Normal file
@@ -0,0 +1,150 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2014-2016 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
from synapse.storage._base import SQLBaseStore, make_in_list_sql_clause
|
||||
from synapse.storage.presence import UserPresenceState
|
||||
from synapse.util import batch_iter
|
||||
from synapse.util.caches.descriptors import cached, cachedList
|
||||
|
||||
|
||||
class PresenceStore(SQLBaseStore):
|
||||
@defer.inlineCallbacks
|
||||
def update_presence(self, presence_states):
|
||||
stream_ordering_manager = self._presence_id_gen.get_next_mult(
|
||||
len(presence_states)
|
||||
)
|
||||
|
||||
with stream_ordering_manager as stream_orderings:
|
||||
yield self.runInteraction(
|
||||
"update_presence",
|
||||
self._update_presence_txn,
|
||||
stream_orderings,
|
||||
presence_states,
|
||||
)
|
||||
|
||||
return stream_orderings[-1], self._presence_id_gen.get_current_token()
|
||||
|
||||
def _update_presence_txn(self, txn, stream_orderings, presence_states):
|
||||
for stream_id, state in zip(stream_orderings, presence_states):
|
||||
txn.call_after(
|
||||
self.presence_stream_cache.entity_has_changed, state.user_id, stream_id
|
||||
)
|
||||
txn.call_after(self._get_presence_for_user.invalidate, (state.user_id,))
|
||||
|
||||
# Actually insert new rows
|
||||
self._simple_insert_many_txn(
|
||||
txn,
|
||||
table="presence_stream",
|
||||
values=[
|
||||
{
|
||||
"stream_id": stream_id,
|
||||
"user_id": state.user_id,
|
||||
"state": state.state,
|
||||
"last_active_ts": state.last_active_ts,
|
||||
"last_federation_update_ts": state.last_federation_update_ts,
|
||||
"last_user_sync_ts": state.last_user_sync_ts,
|
||||
"status_msg": state.status_msg,
|
||||
"currently_active": state.currently_active,
|
||||
}
|
||||
for state in presence_states
|
||||
],
|
||||
)
|
||||
|
||||
# Delete old rows to stop database from getting really big
|
||||
sql = "DELETE FROM presence_stream WHERE stream_id < ? AND "
|
||||
|
||||
for states in batch_iter(presence_states, 50):
|
||||
clause, args = make_in_list_sql_clause(
|
||||
self.database_engine, "user_id", [s.user_id for s in states]
|
||||
)
|
||||
txn.execute(sql + clause, [stream_id] + list(args))
|
||||
|
||||
def get_all_presence_updates(self, last_id, current_id):
|
||||
if last_id == current_id:
|
||||
return defer.succeed([])
|
||||
|
||||
def get_all_presence_updates_txn(txn):
|
||||
sql = (
|
||||
"SELECT stream_id, user_id, state, last_active_ts,"
|
||||
" last_federation_update_ts, last_user_sync_ts, status_msg,"
|
||||
" currently_active"
|
||||
" FROM presence_stream"
|
||||
" WHERE ? < stream_id AND stream_id <= ?"
|
||||
)
|
||||
txn.execute(sql, (last_id, current_id))
|
||||
return txn.fetchall()
|
||||
|
||||
return self.runInteraction(
|
||||
"get_all_presence_updates", get_all_presence_updates_txn
|
||||
)
|
||||
|
||||
@cached()
|
||||
def _get_presence_for_user(self, user_id):
|
||||
raise NotImplementedError()
|
||||
|
||||
@cachedList(
|
||||
cached_method_name="_get_presence_for_user",
|
||||
list_name="user_ids",
|
||||
num_args=1,
|
||||
inlineCallbacks=True,
|
||||
)
|
||||
def get_presence_for_users(self, user_ids):
|
||||
rows = yield self._simple_select_many_batch(
|
||||
table="presence_stream",
|
||||
column="user_id",
|
||||
iterable=user_ids,
|
||||
keyvalues={},
|
||||
retcols=(
|
||||
"user_id",
|
||||
"state",
|
||||
"last_active_ts",
|
||||
"last_federation_update_ts",
|
||||
"last_user_sync_ts",
|
||||
"status_msg",
|
||||
"currently_active",
|
||||
),
|
||||
desc="get_presence_for_users",
|
||||
)
|
||||
|
||||
for row in rows:
|
||||
row["currently_active"] = bool(row["currently_active"])
|
||||
|
||||
return {row["user_id"]: UserPresenceState(**row) for row in rows}
|
||||
|
||||
def get_current_presence_token(self):
|
||||
return self._presence_id_gen.get_current_token()
|
||||
|
||||
def allow_presence_visible(self, observed_localpart, observer_userid):
|
||||
return self._simple_insert(
|
||||
table="presence_allow_inbound",
|
||||
values={
|
||||
"observed_user_id": observed_localpart,
|
||||
"observer_user_id": observer_userid,
|
||||
},
|
||||
desc="allow_presence_visible",
|
||||
or_ignore=True,
|
||||
)
|
||||
|
||||
def disallow_presence_visible(self, observed_localpart, observer_userid):
|
||||
return self._simple_delete_one(
|
||||
table="presence_allow_inbound",
|
||||
keyvalues={
|
||||
"observed_user_id": observed_localpart,
|
||||
"observer_user_id": observer_userid,
|
||||
},
|
||||
desc="disallow_presence_visible",
|
||||
)
|
||||
@@ -16,9 +16,8 @@
|
||||
from twisted.internet import defer
|
||||
|
||||
from synapse.api.errors import StoreError
|
||||
from synapse.storage.roommember import ProfileInfo
|
||||
|
||||
from ._base import SQLBaseStore
|
||||
from synapse.storage._base import SQLBaseStore
|
||||
from synapse.storage.data_stores.main.roommember import ProfileInfo
|
||||
|
||||
|
||||
class ProfileWorkerStore(SQLBaseStore):
|
||||
713
synapse/storage/data_stores/main/push_rule.py
Normal file
713
synapse/storage/data_stores/main/push_rule.py
Normal file
@@ -0,0 +1,713 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2014-2016 OpenMarket Ltd
|
||||
# Copyright 2018 New Vector Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import abc
|
||||
import logging
|
||||
|
||||
from canonicaljson import json
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
from synapse.push.baserules import list_with_base_rules
|
||||
from synapse.storage._base import SQLBaseStore
|
||||
from synapse.storage.data_stores.main.appservice import ApplicationServiceWorkerStore
|
||||
from synapse.storage.data_stores.main.pusher import PusherWorkerStore
|
||||
from synapse.storage.data_stores.main.receipts import ReceiptsWorkerStore
|
||||
from synapse.storage.data_stores.main.roommember import RoomMemberWorkerStore
|
||||
from synapse.storage.push_rule import InconsistentRuleException, RuleNotFoundException
|
||||
from synapse.util.caches.descriptors import cachedInlineCallbacks, cachedList
|
||||
from synapse.util.caches.stream_change_cache import StreamChangeCache
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _load_rules(rawrules, enabled_map):
|
||||
ruleslist = []
|
||||
for rawrule in rawrules:
|
||||
rule = dict(rawrule)
|
||||
rule["conditions"] = json.loads(rawrule["conditions"])
|
||||
rule["actions"] = json.loads(rawrule["actions"])
|
||||
ruleslist.append(rule)
|
||||
|
||||
# We're going to be mutating this a lot, so do a deep copy
|
||||
rules = list(list_with_base_rules(ruleslist))
|
||||
|
||||
for i, rule in enumerate(rules):
|
||||
rule_id = rule["rule_id"]
|
||||
if rule_id in enabled_map:
|
||||
if rule.get("enabled", True) != bool(enabled_map[rule_id]):
|
||||
# Rules are cached across users.
|
||||
rule = dict(rule)
|
||||
rule["enabled"] = bool(enabled_map[rule_id])
|
||||
rules[i] = rule
|
||||
|
||||
return rules
|
||||
|
||||
|
||||
class PushRulesWorkerStore(
|
||||
ApplicationServiceWorkerStore,
|
||||
ReceiptsWorkerStore,
|
||||
PusherWorkerStore,
|
||||
RoomMemberWorkerStore,
|
||||
SQLBaseStore,
|
||||
):
|
||||
"""This is an abstract base class where subclasses must implement
|
||||
`get_max_push_rules_stream_id` which can be called in the initializer.
|
||||
"""
|
||||
|
||||
# This ABCMeta metaclass ensures that we cannot be instantiated without
|
||||
# the abstract methods being implemented.
|
||||
__metaclass__ = abc.ABCMeta
|
||||
|
||||
def __init__(self, db_conn, hs):
|
||||
super(PushRulesWorkerStore, self).__init__(db_conn, hs)
|
||||
|
||||
push_rules_prefill, push_rules_id = self._get_cache_dict(
|
||||
db_conn,
|
||||
"push_rules_stream",
|
||||
entity_column="user_id",
|
||||
stream_column="stream_id",
|
||||
max_value=self.get_max_push_rules_stream_id(),
|
||||
)
|
||||
|
||||
self.push_rules_stream_cache = StreamChangeCache(
|
||||
"PushRulesStreamChangeCache",
|
||||
push_rules_id,
|
||||
prefilled_cache=push_rules_prefill,
|
||||
)
|
||||
|
||||
@abc.abstractmethod
|
||||
def get_max_push_rules_stream_id(self):
|
||||
"""Get the position of the push rules stream.
|
||||
|
||||
Returns:
|
||||
int
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
@cachedInlineCallbacks(max_entries=5000)
|
||||
def get_push_rules_for_user(self, user_id):
|
||||
rows = yield self._simple_select_list(
|
||||
table="push_rules",
|
||||
keyvalues={"user_name": user_id},
|
||||
retcols=(
|
||||
"user_name",
|
||||
"rule_id",
|
||||
"priority_class",
|
||||
"priority",
|
||||
"conditions",
|
||||
"actions",
|
||||
),
|
||||
desc="get_push_rules_enabled_for_user",
|
||||
)
|
||||
|
||||
rows.sort(key=lambda row: (-int(row["priority_class"]), -int(row["priority"])))
|
||||
|
||||
enabled_map = yield self.get_push_rules_enabled_for_user(user_id)
|
||||
|
||||
rules = _load_rules(rows, enabled_map)
|
||||
|
||||
return rules
|
||||
|
||||
@cachedInlineCallbacks(max_entries=5000)
|
||||
def get_push_rules_enabled_for_user(self, user_id):
|
||||
results = yield self._simple_select_list(
|
||||
table="push_rules_enable",
|
||||
keyvalues={"user_name": user_id},
|
||||
retcols=("user_name", "rule_id", "enabled"),
|
||||
desc="get_push_rules_enabled_for_user",
|
||||
)
|
||||
return {r["rule_id"]: False if r["enabled"] == 0 else True for r in results}
|
||||
|
||||
def have_push_rules_changed_for_user(self, user_id, last_id):
|
||||
if not self.push_rules_stream_cache.has_entity_changed(user_id, last_id):
|
||||
return defer.succeed(False)
|
||||
else:
|
||||
|
||||
def have_push_rules_changed_txn(txn):
|
||||
sql = (
|
||||
"SELECT COUNT(stream_id) FROM push_rules_stream"
|
||||
" WHERE user_id = ? AND ? < stream_id"
|
||||
)
|
||||
txn.execute(sql, (user_id, last_id))
|
||||
count, = txn.fetchone()
|
||||
return bool(count)
|
||||
|
||||
return self.runInteraction(
|
||||
"have_push_rules_changed", have_push_rules_changed_txn
|
||||
)
|
||||
|
||||
@cachedList(
|
||||
cached_method_name="get_push_rules_for_user",
|
||||
list_name="user_ids",
|
||||
num_args=1,
|
||||
inlineCallbacks=True,
|
||||
)
|
||||
def bulk_get_push_rules(self, user_ids):
|
||||
if not user_ids:
|
||||
return {}
|
||||
|
||||
results = {user_id: [] for user_id in user_ids}
|
||||
|
||||
rows = yield self._simple_select_many_batch(
|
||||
table="push_rules",
|
||||
column="user_name",
|
||||
iterable=user_ids,
|
||||
retcols=("*",),
|
||||
desc="bulk_get_push_rules",
|
||||
)
|
||||
|
||||
rows.sort(key=lambda row: (-int(row["priority_class"]), -int(row["priority"])))
|
||||
|
||||
for row in rows:
|
||||
results.setdefault(row["user_name"], []).append(row)
|
||||
|
||||
enabled_map_by_user = yield self.bulk_get_push_rules_enabled(user_ids)
|
||||
|
||||
for user_id, rules in results.items():
|
||||
results[user_id] = _load_rules(rules, enabled_map_by_user.get(user_id, {}))
|
||||
|
||||
return results
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def copy_push_rule_from_room_to_room(self, new_room_id, user_id, rule):
|
||||
"""Copy a single push rule from one room to another for a specific user.
|
||||
|
||||
Args:
|
||||
new_room_id (str): ID of the new room.
|
||||
user_id (str): ID of user the push rule belongs to.
|
||||
rule (Dict): A push rule.
|
||||
"""
|
||||
# Create new rule id
|
||||
rule_id_scope = "/".join(rule["rule_id"].split("/")[:-1])
|
||||
new_rule_id = rule_id_scope + "/" + new_room_id
|
||||
|
||||
# Change room id in each condition
|
||||
for condition in rule.get("conditions", []):
|
||||
if condition.get("key") == "room_id":
|
||||
condition["pattern"] = new_room_id
|
||||
|
||||
# Add the rule for the new room
|
||||
yield self.add_push_rule(
|
||||
user_id=user_id,
|
||||
rule_id=new_rule_id,
|
||||
priority_class=rule["priority_class"],
|
||||
conditions=rule["conditions"],
|
||||
actions=rule["actions"],
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def copy_push_rules_from_room_to_room_for_user(
|
||||
self, old_room_id, new_room_id, user_id
|
||||
):
|
||||
"""Copy all of the push rules from one room to another for a specific
|
||||
user.
|
||||
|
||||
Args:
|
||||
old_room_id (str): ID of the old room.
|
||||
new_room_id (str): ID of the new room.
|
||||
user_id (str): ID of user to copy push rules for.
|
||||
"""
|
||||
# Retrieve push rules for this user
|
||||
user_push_rules = yield self.get_push_rules_for_user(user_id)
|
||||
|
||||
# Get rules relating to the old room and copy them to the new room
|
||||
for rule in user_push_rules:
|
||||
conditions = rule.get("conditions", [])
|
||||
if any(
|
||||
(c.get("key") == "room_id" and c.get("pattern") == old_room_id)
|
||||
for c in conditions
|
||||
):
|
||||
yield self.copy_push_rule_from_room_to_room(new_room_id, user_id, rule)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def bulk_get_push_rules_for_room(self, event, context):
|
||||
state_group = context.state_group
|
||||
if not state_group:
|
||||
# If state_group is None it means it has yet to be assigned a
|
||||
# state group, i.e. we need to make sure that calls with a state_group
|
||||
# of None don't hit previous cached calls with a None state_group.
|
||||
# To do this we set the state_group to a new object as object() != object()
|
||||
state_group = object()
|
||||
|
||||
current_state_ids = yield context.get_current_state_ids(self)
|
||||
result = yield self._bulk_get_push_rules_for_room(
|
||||
event.room_id, state_group, current_state_ids, event=event
|
||||
)
|
||||
return result
|
||||
|
||||
@cachedInlineCallbacks(num_args=2, cache_context=True)
|
||||
def _bulk_get_push_rules_for_room(
|
||||
self, room_id, state_group, current_state_ids, cache_context, event=None
|
||||
):
|
||||
# We don't use `state_group`, its there so that we can cache based
|
||||
# on it. However, its important that its never None, since two current_state's
|
||||
# with a state_group of None are likely to be different.
|
||||
# See bulk_get_push_rules_for_room for how we work around this.
|
||||
assert state_group is not None
|
||||
|
||||
# We also will want to generate notifs for other people in the room so
|
||||
# their unread countss are correct in the event stream, but to avoid
|
||||
# generating them for bot / AS users etc, we only do so for people who've
|
||||
# sent a read receipt into the room.
|
||||
|
||||
users_in_room = yield self._get_joined_users_from_context(
|
||||
room_id,
|
||||
state_group,
|
||||
current_state_ids,
|
||||
on_invalidate=cache_context.invalidate,
|
||||
event=event,
|
||||
)
|
||||
|
||||
# We ignore app service users for now. This is so that we don't fill
|
||||
# up the `get_if_users_have_pushers` cache with AS entries that we
|
||||
# know don't have pushers, nor even read receipts.
|
||||
local_users_in_room = set(
|
||||
u
|
||||
for u in users_in_room
|
||||
if self.hs.is_mine_id(u)
|
||||
and not self.get_if_app_services_interested_in_user(u)
|
||||
)
|
||||
|
||||
# users in the room who have pushers need to get push rules run because
|
||||
# that's how their pushers work
|
||||
if_users_with_pushers = yield self.get_if_users_have_pushers(
|
||||
local_users_in_room, on_invalidate=cache_context.invalidate
|
||||
)
|
||||
user_ids = set(
|
||||
uid for uid, have_pusher in if_users_with_pushers.items() if have_pusher
|
||||
)
|
||||
|
||||
users_with_receipts = yield self.get_users_with_read_receipts_in_room(
|
||||
room_id, on_invalidate=cache_context.invalidate
|
||||
)
|
||||
|
||||
# any users with pushers must be ours: they have pushers
|
||||
for uid in users_with_receipts:
|
||||
if uid in local_users_in_room:
|
||||
user_ids.add(uid)
|
||||
|
||||
rules_by_user = yield self.bulk_get_push_rules(
|
||||
user_ids, on_invalidate=cache_context.invalidate
|
||||
)
|
||||
|
||||
rules_by_user = {k: v for k, v in rules_by_user.items() if v is not None}
|
||||
|
||||
return rules_by_user
|
||||
|
||||
@cachedList(
|
||||
cached_method_name="get_push_rules_enabled_for_user",
|
||||
list_name="user_ids",
|
||||
num_args=1,
|
||||
inlineCallbacks=True,
|
||||
)
|
||||
def bulk_get_push_rules_enabled(self, user_ids):
|
||||
if not user_ids:
|
||||
return {}
|
||||
|
||||
results = {user_id: {} for user_id in user_ids}
|
||||
|
||||
rows = yield self._simple_select_many_batch(
|
||||
table="push_rules_enable",
|
||||
column="user_name",
|
||||
iterable=user_ids,
|
||||
retcols=("user_name", "rule_id", "enabled"),
|
||||
desc="bulk_get_push_rules_enabled",
|
||||
)
|
||||
for row in rows:
|
||||
enabled = bool(row["enabled"])
|
||||
results.setdefault(row["user_name"], {})[row["rule_id"]] = enabled
|
||||
return results
|
||||
|
||||
|
||||
class PushRuleStore(PushRulesWorkerStore):
|
||||
@defer.inlineCallbacks
|
||||
def add_push_rule(
|
||||
self,
|
||||
user_id,
|
||||
rule_id,
|
||||
priority_class,
|
||||
conditions,
|
||||
actions,
|
||||
before=None,
|
||||
after=None,
|
||||
):
|
||||
conditions_json = json.dumps(conditions)
|
||||
actions_json = json.dumps(actions)
|
||||
with self._push_rules_stream_id_gen.get_next() as ids:
|
||||
stream_id, event_stream_ordering = ids
|
||||
if before or after:
|
||||
yield self.runInteraction(
|
||||
"_add_push_rule_relative_txn",
|
||||
self._add_push_rule_relative_txn,
|
||||
stream_id,
|
||||
event_stream_ordering,
|
||||
user_id,
|
||||
rule_id,
|
||||
priority_class,
|
||||
conditions_json,
|
||||
actions_json,
|
||||
before,
|
||||
after,
|
||||
)
|
||||
else:
|
||||
yield self.runInteraction(
|
||||
"_add_push_rule_highest_priority_txn",
|
||||
self._add_push_rule_highest_priority_txn,
|
||||
stream_id,
|
||||
event_stream_ordering,
|
||||
user_id,
|
||||
rule_id,
|
||||
priority_class,
|
||||
conditions_json,
|
||||
actions_json,
|
||||
)
|
||||
|
||||
def _add_push_rule_relative_txn(
|
||||
self,
|
||||
txn,
|
||||
stream_id,
|
||||
event_stream_ordering,
|
||||
user_id,
|
||||
rule_id,
|
||||
priority_class,
|
||||
conditions_json,
|
||||
actions_json,
|
||||
before,
|
||||
after,
|
||||
):
|
||||
# Lock the table since otherwise we'll have annoying races between the
|
||||
# SELECT here and the UPSERT below.
|
||||
self.database_engine.lock_table(txn, "push_rules")
|
||||
|
||||
relative_to_rule = before or after
|
||||
|
||||
res = self._simple_select_one_txn(
|
||||
txn,
|
||||
table="push_rules",
|
||||
keyvalues={"user_name": user_id, "rule_id": relative_to_rule},
|
||||
retcols=["priority_class", "priority"],
|
||||
allow_none=True,
|
||||
)
|
||||
|
||||
if not res:
|
||||
raise RuleNotFoundException(
|
||||
"before/after rule not found: %s" % (relative_to_rule,)
|
||||
)
|
||||
|
||||
base_priority_class = res["priority_class"]
|
||||
base_rule_priority = res["priority"]
|
||||
|
||||
if base_priority_class != priority_class:
|
||||
raise InconsistentRuleException(
|
||||
"Given priority class does not match class of relative rule"
|
||||
)
|
||||
|
||||
if before:
|
||||
# Higher priority rules are executed first, So adding a rule before
|
||||
# a rule means giving it a higher priority than that rule.
|
||||
new_rule_priority = base_rule_priority + 1
|
||||
else:
|
||||
# We increment the priority of the existing rules to make space for
|
||||
# the new rule. Therefore if we want this rule to appear after
|
||||
# an existing rule we give it the priority of the existing rule,
|
||||
# and then increment the priority of the existing rule.
|
||||
new_rule_priority = base_rule_priority
|
||||
|
||||
sql = (
|
||||
"UPDATE push_rules SET priority = priority + 1"
|
||||
" WHERE user_name = ? AND priority_class = ? AND priority >= ?"
|
||||
)
|
||||
|
||||
txn.execute(sql, (user_id, priority_class, new_rule_priority))
|
||||
|
||||
self._upsert_push_rule_txn(
|
||||
txn,
|
||||
stream_id,
|
||||
event_stream_ordering,
|
||||
user_id,
|
||||
rule_id,
|
||||
priority_class,
|
||||
new_rule_priority,
|
||||
conditions_json,
|
||||
actions_json,
|
||||
)
|
||||
|
||||
def _add_push_rule_highest_priority_txn(
|
||||
self,
|
||||
txn,
|
||||
stream_id,
|
||||
event_stream_ordering,
|
||||
user_id,
|
||||
rule_id,
|
||||
priority_class,
|
||||
conditions_json,
|
||||
actions_json,
|
||||
):
|
||||
# Lock the table since otherwise we'll have annoying races between the
|
||||
# SELECT here and the UPSERT below.
|
||||
self.database_engine.lock_table(txn, "push_rules")
|
||||
|
||||
# find the highest priority rule in that class
|
||||
sql = (
|
||||
"SELECT COUNT(*), MAX(priority) FROM push_rules"
|
||||
" WHERE user_name = ? and priority_class = ?"
|
||||
)
|
||||
txn.execute(sql, (user_id, priority_class))
|
||||
res = txn.fetchall()
|
||||
(how_many, highest_prio) = res[0]
|
||||
|
||||
new_prio = 0
|
||||
if how_many > 0:
|
||||
new_prio = highest_prio + 1
|
||||
|
||||
self._upsert_push_rule_txn(
|
||||
txn,
|
||||
stream_id,
|
||||
event_stream_ordering,
|
||||
user_id,
|
||||
rule_id,
|
||||
priority_class,
|
||||
new_prio,
|
||||
conditions_json,
|
||||
actions_json,
|
||||
)
|
||||
|
||||
def _upsert_push_rule_txn(
|
||||
self,
|
||||
txn,
|
||||
stream_id,
|
||||
event_stream_ordering,
|
||||
user_id,
|
||||
rule_id,
|
||||
priority_class,
|
||||
priority,
|
||||
conditions_json,
|
||||
actions_json,
|
||||
update_stream=True,
|
||||
):
|
||||
"""Specialised version of _simple_upsert_txn that picks a push_rule_id
|
||||
using the _push_rule_id_gen if it needs to insert the rule. It assumes
|
||||
that the "push_rules" table is locked"""
|
||||
|
||||
sql = (
|
||||
"UPDATE push_rules"
|
||||
" SET priority_class = ?, priority = ?, conditions = ?, actions = ?"
|
||||
" WHERE user_name = ? AND rule_id = ?"
|
||||
)
|
||||
|
||||
txn.execute(
|
||||
sql,
|
||||
(priority_class, priority, conditions_json, actions_json, user_id, rule_id),
|
||||
)
|
||||
|
||||
if txn.rowcount == 0:
|
||||
# We didn't update a row with the given rule_id so insert one
|
||||
push_rule_id = self._push_rule_id_gen.get_next()
|
||||
|
||||
self._simple_insert_txn(
|
||||
txn,
|
||||
table="push_rules",
|
||||
values={
|
||||
"id": push_rule_id,
|
||||
"user_name": user_id,
|
||||
"rule_id": rule_id,
|
||||
"priority_class": priority_class,
|
||||
"priority": priority,
|
||||
"conditions": conditions_json,
|
||||
"actions": actions_json,
|
||||
},
|
||||
)
|
||||
|
||||
if update_stream:
|
||||
self._insert_push_rules_update_txn(
|
||||
txn,
|
||||
stream_id,
|
||||
event_stream_ordering,
|
||||
user_id,
|
||||
rule_id,
|
||||
op="ADD",
|
||||
data={
|
||||
"priority_class": priority_class,
|
||||
"priority": priority,
|
||||
"conditions": conditions_json,
|
||||
"actions": actions_json,
|
||||
},
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def delete_push_rule(self, user_id, rule_id):
|
||||
"""
|
||||
Delete a push rule. Args specify the row to be deleted and can be
|
||||
any of the columns in the push_rule table, but below are the
|
||||
standard ones
|
||||
|
||||
Args:
|
||||
user_id (str): The matrix ID of the push rule owner
|
||||
rule_id (str): The rule_id of the rule to be deleted
|
||||
"""
|
||||
|
||||
def delete_push_rule_txn(txn, stream_id, event_stream_ordering):
|
||||
self._simple_delete_one_txn(
|
||||
txn, "push_rules", {"user_name": user_id, "rule_id": rule_id}
|
||||
)
|
||||
|
||||
self._insert_push_rules_update_txn(
|
||||
txn, stream_id, event_stream_ordering, user_id, rule_id, op="DELETE"
|
||||
)
|
||||
|
||||
with self._push_rules_stream_id_gen.get_next() as ids:
|
||||
stream_id, event_stream_ordering = ids
|
||||
yield self.runInteraction(
|
||||
"delete_push_rule",
|
||||
delete_push_rule_txn,
|
||||
stream_id,
|
||||
event_stream_ordering,
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def set_push_rule_enabled(self, user_id, rule_id, enabled):
|
||||
with self._push_rules_stream_id_gen.get_next() as ids:
|
||||
stream_id, event_stream_ordering = ids
|
||||
yield self.runInteraction(
|
||||
"_set_push_rule_enabled_txn",
|
||||
self._set_push_rule_enabled_txn,
|
||||
stream_id,
|
||||
event_stream_ordering,
|
||||
user_id,
|
||||
rule_id,
|
||||
enabled,
|
||||
)
|
||||
|
||||
def _set_push_rule_enabled_txn(
|
||||
self, txn, stream_id, event_stream_ordering, user_id, rule_id, enabled
|
||||
):
|
||||
new_id = self._push_rules_enable_id_gen.get_next()
|
||||
self._simple_upsert_txn(
|
||||
txn,
|
||||
"push_rules_enable",
|
||||
{"user_name": user_id, "rule_id": rule_id},
|
||||
{"enabled": 1 if enabled else 0},
|
||||
{"id": new_id},
|
||||
)
|
||||
|
||||
self._insert_push_rules_update_txn(
|
||||
txn,
|
||||
stream_id,
|
||||
event_stream_ordering,
|
||||
user_id,
|
||||
rule_id,
|
||||
op="ENABLE" if enabled else "DISABLE",
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def set_push_rule_actions(self, user_id, rule_id, actions, is_default_rule):
|
||||
actions_json = json.dumps(actions)
|
||||
|
||||
def set_push_rule_actions_txn(txn, stream_id, event_stream_ordering):
|
||||
if is_default_rule:
|
||||
# Add a dummy rule to the rules table with the user specified
|
||||
# actions.
|
||||
priority_class = -1
|
||||
priority = 1
|
||||
self._upsert_push_rule_txn(
|
||||
txn,
|
||||
stream_id,
|
||||
event_stream_ordering,
|
||||
user_id,
|
||||
rule_id,
|
||||
priority_class,
|
||||
priority,
|
||||
"[]",
|
||||
actions_json,
|
||||
update_stream=False,
|
||||
)
|
||||
else:
|
||||
self._simple_update_one_txn(
|
||||
txn,
|
||||
"push_rules",
|
||||
{"user_name": user_id, "rule_id": rule_id},
|
||||
{"actions": actions_json},
|
||||
)
|
||||
|
||||
self._insert_push_rules_update_txn(
|
||||
txn,
|
||||
stream_id,
|
||||
event_stream_ordering,
|
||||
user_id,
|
||||
rule_id,
|
||||
op="ACTIONS",
|
||||
data={"actions": actions_json},
|
||||
)
|
||||
|
||||
with self._push_rules_stream_id_gen.get_next() as ids:
|
||||
stream_id, event_stream_ordering = ids
|
||||
yield self.runInteraction(
|
||||
"set_push_rule_actions",
|
||||
set_push_rule_actions_txn,
|
||||
stream_id,
|
||||
event_stream_ordering,
|
||||
)
|
||||
|
||||
def _insert_push_rules_update_txn(
|
||||
self, txn, stream_id, event_stream_ordering, user_id, rule_id, op, data=None
|
||||
):
|
||||
values = {
|
||||
"stream_id": stream_id,
|
||||
"event_stream_ordering": event_stream_ordering,
|
||||
"user_id": user_id,
|
||||
"rule_id": rule_id,
|
||||
"op": op,
|
||||
}
|
||||
if data is not None:
|
||||
values.update(data)
|
||||
|
||||
self._simple_insert_txn(txn, "push_rules_stream", values=values)
|
||||
|
||||
txn.call_after(self.get_push_rules_for_user.invalidate, (user_id,))
|
||||
txn.call_after(self.get_push_rules_enabled_for_user.invalidate, (user_id,))
|
||||
txn.call_after(
|
||||
self.push_rules_stream_cache.entity_has_changed, user_id, stream_id
|
||||
)
|
||||
|
||||
def get_all_push_rule_updates(self, last_id, current_id, limit):
|
||||
"""Get all the push rules changes that have happend on the server"""
|
||||
if last_id == current_id:
|
||||
return defer.succeed([])
|
||||
|
||||
def get_all_push_rule_updates_txn(txn):
|
||||
sql = (
|
||||
"SELECT stream_id, event_stream_ordering, user_id, rule_id,"
|
||||
" op, priority_class, priority, conditions, actions"
|
||||
" FROM push_rules_stream"
|
||||
" WHERE ? < stream_id AND stream_id <= ?"
|
||||
" ORDER BY stream_id ASC LIMIT ?"
|
||||
)
|
||||
txn.execute(sql, (last_id, current_id, limit))
|
||||
return txn.fetchall()
|
||||
|
||||
return self.runInteraction(
|
||||
"get_all_push_rule_updates", get_all_push_rule_updates_txn
|
||||
)
|
||||
|
||||
def get_push_rules_stream_token(self):
|
||||
"""Get the position of the push rules stream.
|
||||
Returns a pair of a stream id for the push_rules stream and the
|
||||
room stream ordering it corresponds to."""
|
||||
return self._push_rules_stream_id_gen.get_current_token()
|
||||
|
||||
def get_max_push_rules_stream_id(self):
|
||||
return self.get_push_rules_stream_token()[0]
|
||||
@@ -22,10 +22,9 @@ from canonicaljson import encode_canonical_json, json
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
from synapse.storage._base import SQLBaseStore
|
||||
from synapse.util.caches.descriptors import cachedInlineCallbacks, cachedList
|
||||
|
||||
from ._base import SQLBaseStore
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
if six.PY2:
|
||||
@@ -15,7 +15,7 @@
|
||||
|
||||
import logging
|
||||
|
||||
from ._base import SQLBaseStore
|
||||
from synapse.storage._base import SQLBaseStore
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
385
synapse/storage/data_stores/main/relations.py
Normal file
385
synapse/storage/data_stores/main/relations.py
Normal file
@@ -0,0 +1,385 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2019 New Vector Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import logging
|
||||
|
||||
import attr
|
||||
|
||||
from synapse.api.constants import RelationTypes
|
||||
from synapse.storage._base import SQLBaseStore
|
||||
from synapse.storage.data_stores.main.stream import generate_pagination_where_clause
|
||||
from synapse.storage.relations import (
|
||||
AggregationPaginationToken,
|
||||
PaginationChunk,
|
||||
RelationPaginationToken,
|
||||
)
|
||||
from synapse.util.caches.descriptors import cached, cachedInlineCallbacks
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class RelationsWorkerStore(SQLBaseStore):
|
||||
@cached(tree=True)
|
||||
def get_relations_for_event(
|
||||
self,
|
||||
event_id,
|
||||
relation_type=None,
|
||||
event_type=None,
|
||||
aggregation_key=None,
|
||||
limit=5,
|
||||
direction="b",
|
||||
from_token=None,
|
||||
to_token=None,
|
||||
):
|
||||
"""Get a list of relations for an event, ordered by topological ordering.
|
||||
|
||||
Args:
|
||||
event_id (str): Fetch events that relate to this event ID.
|
||||
relation_type (str|None): Only fetch events with this relation
|
||||
type, if given.
|
||||
event_type (str|None): Only fetch events with this event type, if
|
||||
given.
|
||||
aggregation_key (str|None): Only fetch events with this aggregation
|
||||
key, if given.
|
||||
limit (int): Only fetch the most recent `limit` events.
|
||||
direction (str): Whether to fetch the most recent first (`"b"`) or
|
||||
the oldest first (`"f"`).
|
||||
from_token (RelationPaginationToken|None): Fetch rows from the given
|
||||
token, or from the start if None.
|
||||
to_token (RelationPaginationToken|None): Fetch rows up to the given
|
||||
token, or up to the end if None.
|
||||
|
||||
Returns:
|
||||
Deferred[PaginationChunk]: List of event IDs that match relations
|
||||
requested. The rows are of the form `{"event_id": "..."}`.
|
||||
"""
|
||||
|
||||
where_clause = ["relates_to_id = ?"]
|
||||
where_args = [event_id]
|
||||
|
||||
if relation_type is not None:
|
||||
where_clause.append("relation_type = ?")
|
||||
where_args.append(relation_type)
|
||||
|
||||
if event_type is not None:
|
||||
where_clause.append("type = ?")
|
||||
where_args.append(event_type)
|
||||
|
||||
if aggregation_key:
|
||||
where_clause.append("aggregation_key = ?")
|
||||
where_args.append(aggregation_key)
|
||||
|
||||
pagination_clause = generate_pagination_where_clause(
|
||||
direction=direction,
|
||||
column_names=("topological_ordering", "stream_ordering"),
|
||||
from_token=attr.astuple(from_token) if from_token else None,
|
||||
to_token=attr.astuple(to_token) if to_token else None,
|
||||
engine=self.database_engine,
|
||||
)
|
||||
|
||||
if pagination_clause:
|
||||
where_clause.append(pagination_clause)
|
||||
|
||||
if direction == "b":
|
||||
order = "DESC"
|
||||
else:
|
||||
order = "ASC"
|
||||
|
||||
sql = """
|
||||
SELECT event_id, topological_ordering, stream_ordering
|
||||
FROM event_relations
|
||||
INNER JOIN events USING (event_id)
|
||||
WHERE %s
|
||||
ORDER BY topological_ordering %s, stream_ordering %s
|
||||
LIMIT ?
|
||||
""" % (
|
||||
" AND ".join(where_clause),
|
||||
order,
|
||||
order,
|
||||
)
|
||||
|
||||
def _get_recent_references_for_event_txn(txn):
|
||||
txn.execute(sql, where_args + [limit + 1])
|
||||
|
||||
last_topo_id = None
|
||||
last_stream_id = None
|
||||
events = []
|
||||
for row in txn:
|
||||
events.append({"event_id": row[0]})
|
||||
last_topo_id = row[1]
|
||||
last_stream_id = row[2]
|
||||
|
||||
next_batch = None
|
||||
if len(events) > limit and last_topo_id and last_stream_id:
|
||||
next_batch = RelationPaginationToken(last_topo_id, last_stream_id)
|
||||
|
||||
return PaginationChunk(
|
||||
chunk=list(events[:limit]), next_batch=next_batch, prev_batch=from_token
|
||||
)
|
||||
|
||||
return self.runInteraction(
|
||||
"get_recent_references_for_event", _get_recent_references_for_event_txn
|
||||
)
|
||||
|
||||
@cached(tree=True)
|
||||
def get_aggregation_groups_for_event(
|
||||
self,
|
||||
event_id,
|
||||
event_type=None,
|
||||
limit=5,
|
||||
direction="b",
|
||||
from_token=None,
|
||||
to_token=None,
|
||||
):
|
||||
"""Get a list of annotations on the event, grouped by event type and
|
||||
aggregation key, sorted by count.
|
||||
|
||||
This is used e.g. to get the what and how many reactions have happend
|
||||
on an event.
|
||||
|
||||
Args:
|
||||
event_id (str): Fetch events that relate to this event ID.
|
||||
event_type (str|None): Only fetch events with this event type, if
|
||||
given.
|
||||
limit (int): Only fetch the `limit` groups.
|
||||
direction (str): Whether to fetch the highest count first (`"b"`) or
|
||||
the lowest count first (`"f"`).
|
||||
from_token (AggregationPaginationToken|None): Fetch rows from the
|
||||
given token, or from the start if None.
|
||||
to_token (AggregationPaginationToken|None): Fetch rows up to the
|
||||
given token, or up to the end if None.
|
||||
|
||||
|
||||
Returns:
|
||||
Deferred[PaginationChunk]: List of groups of annotations that
|
||||
match. Each row is a dict with `type`, `key` and `count` fields.
|
||||
"""
|
||||
|
||||
where_clause = ["relates_to_id = ?", "relation_type = ?"]
|
||||
where_args = [event_id, RelationTypes.ANNOTATION]
|
||||
|
||||
if event_type:
|
||||
where_clause.append("type = ?")
|
||||
where_args.append(event_type)
|
||||
|
||||
having_clause = generate_pagination_where_clause(
|
||||
direction=direction,
|
||||
column_names=("COUNT(*)", "MAX(stream_ordering)"),
|
||||
from_token=attr.astuple(from_token) if from_token else None,
|
||||
to_token=attr.astuple(to_token) if to_token else None,
|
||||
engine=self.database_engine,
|
||||
)
|
||||
|
||||
if direction == "b":
|
||||
order = "DESC"
|
||||
else:
|
||||
order = "ASC"
|
||||
|
||||
if having_clause:
|
||||
having_clause = "HAVING " + having_clause
|
||||
else:
|
||||
having_clause = ""
|
||||
|
||||
sql = """
|
||||
SELECT type, aggregation_key, COUNT(DISTINCT sender), MAX(stream_ordering)
|
||||
FROM event_relations
|
||||
INNER JOIN events USING (event_id)
|
||||
WHERE {where_clause}
|
||||
GROUP BY relation_type, type, aggregation_key
|
||||
{having_clause}
|
||||
ORDER BY COUNT(*) {order}, MAX(stream_ordering) {order}
|
||||
LIMIT ?
|
||||
""".format(
|
||||
where_clause=" AND ".join(where_clause),
|
||||
order=order,
|
||||
having_clause=having_clause,
|
||||
)
|
||||
|
||||
def _get_aggregation_groups_for_event_txn(txn):
|
||||
txn.execute(sql, where_args + [limit + 1])
|
||||
|
||||
next_batch = None
|
||||
events = []
|
||||
for row in txn:
|
||||
events.append({"type": row[0], "key": row[1], "count": row[2]})
|
||||
next_batch = AggregationPaginationToken(row[2], row[3])
|
||||
|
||||
if len(events) <= limit:
|
||||
next_batch = None
|
||||
|
||||
return PaginationChunk(
|
||||
chunk=list(events[:limit]), next_batch=next_batch, prev_batch=from_token
|
||||
)
|
||||
|
||||
return self.runInteraction(
|
||||
"get_aggregation_groups_for_event", _get_aggregation_groups_for_event_txn
|
||||
)
|
||||
|
||||
@cachedInlineCallbacks()
|
||||
def get_applicable_edit(self, event_id):
|
||||
"""Get the most recent edit (if any) that has happened for the given
|
||||
event.
|
||||
|
||||
Correctly handles checking whether edits were allowed to happen.
|
||||
|
||||
Args:
|
||||
event_id (str): The original event ID
|
||||
|
||||
Returns:
|
||||
Deferred[EventBase|None]: Returns the most recent edit, if any.
|
||||
"""
|
||||
|
||||
# We only allow edits for `m.room.message` events that have the same sender
|
||||
# and event type. We can't assert these things during regular event auth so
|
||||
# we have to do the checks post hoc.
|
||||
|
||||
# Fetches latest edit that has the same type and sender as the
|
||||
# original, and is an `m.room.message`.
|
||||
sql = """
|
||||
SELECT edit.event_id FROM events AS edit
|
||||
INNER JOIN event_relations USING (event_id)
|
||||
INNER JOIN events AS original ON
|
||||
original.event_id = relates_to_id
|
||||
AND edit.type = original.type
|
||||
AND edit.sender = original.sender
|
||||
WHERE
|
||||
relates_to_id = ?
|
||||
AND relation_type = ?
|
||||
AND edit.type = 'm.room.message'
|
||||
ORDER by edit.origin_server_ts DESC, edit.event_id DESC
|
||||
LIMIT 1
|
||||
"""
|
||||
|
||||
def _get_applicable_edit_txn(txn):
|
||||
txn.execute(sql, (event_id, RelationTypes.REPLACE))
|
||||
row = txn.fetchone()
|
||||
if row:
|
||||
return row[0]
|
||||
|
||||
edit_id = yield self.runInteraction(
|
||||
"get_applicable_edit", _get_applicable_edit_txn
|
||||
)
|
||||
|
||||
if not edit_id:
|
||||
return
|
||||
|
||||
edit_event = yield self.get_event(edit_id, allow_none=True)
|
||||
return edit_event
|
||||
|
||||
def has_user_annotated_event(self, parent_id, event_type, aggregation_key, sender):
|
||||
"""Check if a user has already annotated an event with the same key
|
||||
(e.g. already liked an event).
|
||||
|
||||
Args:
|
||||
parent_id (str): The event being annotated
|
||||
event_type (str): The event type of the annotation
|
||||
aggregation_key (str): The aggregation key of the annotation
|
||||
sender (str): The sender of the annotation
|
||||
|
||||
Returns:
|
||||
Deferred[bool]
|
||||
"""
|
||||
|
||||
sql = """
|
||||
SELECT 1 FROM event_relations
|
||||
INNER JOIN events USING (event_id)
|
||||
WHERE
|
||||
relates_to_id = ?
|
||||
AND relation_type = ?
|
||||
AND type = ?
|
||||
AND sender = ?
|
||||
AND aggregation_key = ?
|
||||
LIMIT 1;
|
||||
"""
|
||||
|
||||
def _get_if_user_has_annotated_event(txn):
|
||||
txn.execute(
|
||||
sql,
|
||||
(
|
||||
parent_id,
|
||||
RelationTypes.ANNOTATION,
|
||||
event_type,
|
||||
sender,
|
||||
aggregation_key,
|
||||
),
|
||||
)
|
||||
|
||||
return bool(txn.fetchone())
|
||||
|
||||
return self.runInteraction(
|
||||
"get_if_user_has_annotated_event", _get_if_user_has_annotated_event
|
||||
)
|
||||
|
||||
|
||||
class RelationsStore(RelationsWorkerStore):
|
||||
def _handle_event_relations(self, txn, event):
|
||||
"""Handles inserting relation data during peristence of events
|
||||
|
||||
Args:
|
||||
txn
|
||||
event (EventBase)
|
||||
"""
|
||||
relation = event.content.get("m.relates_to")
|
||||
if not relation:
|
||||
# No relations
|
||||
return
|
||||
|
||||
rel_type = relation.get("rel_type")
|
||||
if rel_type not in (
|
||||
RelationTypes.ANNOTATION,
|
||||
RelationTypes.REFERENCE,
|
||||
RelationTypes.REPLACE,
|
||||
):
|
||||
# Unknown relation type
|
||||
return
|
||||
|
||||
parent_id = relation.get("event_id")
|
||||
if not parent_id:
|
||||
# Invalid relation
|
||||
return
|
||||
|
||||
aggregation_key = relation.get("key")
|
||||
|
||||
self._simple_insert_txn(
|
||||
txn,
|
||||
table="event_relations",
|
||||
values={
|
||||
"event_id": event.event_id,
|
||||
"relates_to_id": parent_id,
|
||||
"relation_type": rel_type,
|
||||
"aggregation_key": aggregation_key,
|
||||
},
|
||||
)
|
||||
|
||||
txn.call_after(self.get_relations_for_event.invalidate_many, (parent_id,))
|
||||
txn.call_after(
|
||||
self.get_aggregation_groups_for_event.invalidate_many, (parent_id,)
|
||||
)
|
||||
|
||||
if rel_type == RelationTypes.REPLACE:
|
||||
txn.call_after(self.get_applicable_edit.invalidate, (parent_id,))
|
||||
|
||||
def _handle_redaction(self, txn, redacted_event_id):
|
||||
"""Handles receiving a redaction and checking whether we need to remove
|
||||
any redacted relations from the database.
|
||||
|
||||
Args:
|
||||
txn
|
||||
redacted_event_id (str): The event that was redacted.
|
||||
"""
|
||||
|
||||
self._simple_delete_txn(
|
||||
txn, table="event_relations", keyvalues={"event_id": redacted_event_id}
|
||||
)
|
||||
@@ -25,7 +25,7 @@ from twisted.internet import defer
|
||||
|
||||
from synapse.api.errors import StoreError
|
||||
from synapse.storage._base import SQLBaseStore
|
||||
from synapse.storage.search import SearchStore
|
||||
from synapse.storage.data_stores.main.search import SearchStore
|
||||
from synapse.types import ThirdPartyInstanceID
|
||||
from synapse.util.caches.descriptors import cached, cachedInlineCallbacks
|
||||
|
||||
1145
synapse/storage/data_stores/main/roommember.py
Normal file
1145
synapse/storage/data_stores/main/roommember.py
Normal file
File diff suppressed because it is too large
Load Diff
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user