Compare commits
37 Commits
anoa/fix_e
...
shhs-v1.2
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f61cdc14e7 | ||
|
|
43cf23475f | ||
|
|
b7962f5bfd | ||
|
|
9bbf2d23c4 | ||
|
|
5daee2eb4a | ||
|
|
14c8b036ea | ||
|
|
7fcd6c1df9 | ||
|
|
c43c1adb0c | ||
|
|
a025abebe8 | ||
|
|
c1777f51a9 | ||
|
|
646292cfb1 | ||
|
|
a175e608e9 | ||
|
|
9b3a63e1c8 | ||
|
|
3d89feb438 | ||
|
|
400bc061ca | ||
|
|
a1de642fe7 | ||
|
|
f4343c7d2b | ||
|
|
4689408a35 | ||
|
|
bed45ab20b | ||
|
|
0993b05ca5 | ||
|
|
e001115221 | ||
|
|
e60aab14b4 | ||
|
|
e7c1171935 | ||
|
|
8fe26db968 | ||
|
|
c99c105158 | ||
|
|
d142e51f76 | ||
|
|
d424ba9e5b | ||
|
|
a1b8767da8 | ||
|
|
faee1e9bab | ||
|
|
12875f995a | ||
|
|
ed38141620 | ||
|
|
bd5f62469c | ||
|
|
c0f57cab68 | ||
|
|
1d5cf66958 | ||
|
|
25256f958b | ||
|
|
a32aa2ce71 | ||
|
|
cbc866a607 |
@@ -31,7 +31,7 @@ steps:
|
||||
- "python -m pip install tox"
|
||||
- "scripts-dev/check-newsfragment"
|
||||
label: ":newspaper: Newsfile"
|
||||
branches: "!master !develop !release-*"
|
||||
branches: "!master !develop !release-* !shhs-v*"
|
||||
plugins:
|
||||
- docker#v3.0.1:
|
||||
image: "python:3.6"
|
||||
@@ -47,17 +47,16 @@ steps:
|
||||
|
||||
- wait
|
||||
|
||||
|
||||
- command:
|
||||
- "apt-get update && apt-get install -y python3.5 python3.5-dev python3-pip libxml2-dev libxslt-dev zlib1g-dev"
|
||||
- "python3.5 -m pip install tox"
|
||||
- "python -m pip install tox"
|
||||
- "tox -e py35-old,codecov"
|
||||
label: ":python: 3.5 / SQLite / Old Deps"
|
||||
branches: "!shhs !shhs-*"
|
||||
env:
|
||||
TRIAL_FLAGS: "-j 2"
|
||||
plugins:
|
||||
- docker#v3.0.1:
|
||||
image: "ubuntu:xenial" # We use xenail to get an old sqlite and python
|
||||
image: "python:3.5"
|
||||
propagate-environment: true
|
||||
retry:
|
||||
automatic:
|
||||
@@ -70,6 +69,7 @@ steps:
|
||||
- "python -m pip install tox"
|
||||
- "tox -e py35,codecov"
|
||||
label: ":python: 3.5 / SQLite"
|
||||
branches: "!shhs !shhs-*"
|
||||
env:
|
||||
TRIAL_FLAGS: "-j 2"
|
||||
plugins:
|
||||
@@ -87,6 +87,7 @@ steps:
|
||||
- "python -m pip install tox"
|
||||
- "tox -e py36,codecov"
|
||||
label: ":python: 3.6 / SQLite"
|
||||
branches: "!shhs !shhs-*"
|
||||
env:
|
||||
TRIAL_FLAGS: "-j 2"
|
||||
plugins:
|
||||
@@ -118,10 +119,9 @@ steps:
|
||||
limit: 2
|
||||
|
||||
- label: ":python: 3.5 / :postgres: 9.5"
|
||||
agents:
|
||||
queue: "medium"
|
||||
branches: "!shhs !shhs-*"
|
||||
env:
|
||||
TRIAL_FLAGS: "-j 8"
|
||||
TRIAL_FLAGS: "-j 4"
|
||||
command:
|
||||
- "bash -c 'python -m pip install tox && python -m tox -e py35-postgres,codecov'"
|
||||
plugins:
|
||||
@@ -137,10 +137,9 @@ steps:
|
||||
limit: 2
|
||||
|
||||
- label: ":python: 3.7 / :postgres: 9.5"
|
||||
agents:
|
||||
queue: "medium"
|
||||
branches: "!shhs !shhs-*"
|
||||
env:
|
||||
TRIAL_FLAGS: "-j 8"
|
||||
TRIAL_FLAGS: "-j 4"
|
||||
command:
|
||||
- "bash -c 'python -m pip install tox && python -m tox -e py37-postgres,codecov'"
|
||||
plugins:
|
||||
@@ -156,10 +155,8 @@ steps:
|
||||
limit: 2
|
||||
|
||||
- label: ":python: 3.7 / :postgres: 11"
|
||||
agents:
|
||||
queue: "medium"
|
||||
env:
|
||||
TRIAL_FLAGS: "-j 8"
|
||||
TRIAL_FLAGS: "-j 4"
|
||||
command:
|
||||
- "bash -c 'python -m pip install tox && python -m tox -e py37-postgres,codecov'"
|
||||
plugins:
|
||||
@@ -176,6 +173,7 @@ steps:
|
||||
|
||||
|
||||
- label: "SyTest - :python: 3.5 / SQLite / Monolith"
|
||||
branches: "!shhs !shhs-*"
|
||||
agents:
|
||||
queue: "medium"
|
||||
command:
|
||||
@@ -216,15 +214,14 @@ steps:
|
||||
limit: 2
|
||||
|
||||
- label: "SyTest - :python: 3.5 / :postgres: 9.6 / Workers"
|
||||
branches: "!shhs !shhs-*"
|
||||
agents:
|
||||
queue: "medium"
|
||||
env:
|
||||
POSTGRES: "1"
|
||||
WORKERS: "1"
|
||||
BLACKLIST: "synapse-blacklist-with-workers"
|
||||
command:
|
||||
- "bash .buildkite/merge_base_branch.sh"
|
||||
- "bash -c 'cat /src/sytest-blacklist /src/.buildkite/worker-blacklist > /src/synapse-blacklist-with-workers'"
|
||||
- "bash /synapse_sytest.sh"
|
||||
plugins:
|
||||
- docker#v3.0.1:
|
||||
@@ -232,9 +229,22 @@ steps:
|
||||
propagate-environment: true
|
||||
always-pull: true
|
||||
workdir: "/src"
|
||||
soft_fail: true
|
||||
retry:
|
||||
automatic:
|
||||
- exit_status: -1
|
||||
limit: 2
|
||||
- exit_status: 2
|
||||
limit: 2
|
||||
|
||||
- wait
|
||||
|
||||
- label: ":docker: x86_64"
|
||||
agents:
|
||||
queue: "release"
|
||||
branches: "shhs-*"
|
||||
command:
|
||||
- "docker build -f docker/Dockerfile --build-arg PYTHON_VERSION=3.7.4 . -t matrixdotorg/synapse:${BUILDKITE_TAG}"
|
||||
- "docker save matrixdotorg/synapse:${BUILDKITE_TAG} | gzip -9 > docker.tar.gz"
|
||||
artifact_paths:
|
||||
- "docker.tar.gz"
|
||||
@@ -1,34 +0,0 @@
|
||||
# This file serves as a blacklist for SyTest tests that we expect will fail in
|
||||
# Synapse when run under worker mode. For more details, see sytest-blacklist.
|
||||
|
||||
Message history can be paginated
|
||||
|
||||
m.room.history_visibility == "world_readable" allows/forbids appropriately for Guest users
|
||||
|
||||
m.room.history_visibility == "world_readable" allows/forbids appropriately for Real users
|
||||
|
||||
Can re-join room if re-invited
|
||||
|
||||
/upgrade creates a new room
|
||||
|
||||
The only membership state included in an initial sync is for all the senders in the timeline
|
||||
|
||||
Local device key changes get to remote servers
|
||||
|
||||
If remote user leaves room we no longer receive device updates
|
||||
|
||||
Forgotten room messages cannot be paginated
|
||||
|
||||
Inbound federation can get public room list
|
||||
|
||||
Members from the gap are included in gappy incr LL sync
|
||||
|
||||
Leaves are present in non-gapped incremental syncs
|
||||
|
||||
Old leaves are present in gapped incremental syncs
|
||||
|
||||
User sees updates to presence from other users in the incremental sync.
|
||||
|
||||
Gapped incremental syncs include all state changes
|
||||
|
||||
Old members are included in gappy incr LL sync if they start speaking
|
||||
@@ -8,6 +8,13 @@ jobs:
|
||||
- run: docker login --username $DOCKER_HUB_USERNAME --password $DOCKER_HUB_PASSWORD
|
||||
- run: docker push matrixdotorg/synapse:${CIRCLE_TAG}
|
||||
- run: docker push matrixdotorg/synapse:${CIRCLE_TAG}-py3
|
||||
dockerhubuploadreleaseshhs:
|
||||
machine: true
|
||||
steps:
|
||||
- checkout
|
||||
- run: docker build -f docker/Dockerfile --label gitsha1=${CIRCLE_SHA1} -t matrixdotorg/synapse:${CIRCLE_TAG} --build-arg PYTHON_VERSION=3.7 .
|
||||
- run: docker login --username $DOCKER_HUB_USERNAME --password $DOCKER_HUB_PASSWORD
|
||||
- run: docker push matrixdotorg/synapse:${CIRCLE_TAG}
|
||||
dockerhubuploadlatest:
|
||||
machine: true
|
||||
steps:
|
||||
@@ -27,6 +34,12 @@ workflows:
|
||||
only: /v[0-9].[0-9]+.[0-9]+.*/
|
||||
branches:
|
||||
ignore: /.*/
|
||||
- dockerhubuploadreleaseshhs:
|
||||
filters:
|
||||
tags:
|
||||
only: /shhs-v[0-9]+.[0-9]+.*/
|
||||
branches:
|
||||
ignore: /.*/
|
||||
- dockerhubuploadlatest:
|
||||
filters:
|
||||
branches:
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
comment: off
|
||||
comment:
|
||||
layout: "diff"
|
||||
|
||||
coverage:
|
||||
status:
|
||||
|
||||
2
.gitignore
vendored
2
.gitignore
vendored
@@ -19,6 +19,7 @@ _trial_temp*/
|
||||
/*.signing.key
|
||||
/env/
|
||||
/homeserver*.yaml
|
||||
/logs
|
||||
/media_store/
|
||||
/uploads
|
||||
|
||||
@@ -37,4 +38,3 @@ _trial_temp*/
|
||||
/docs/build/
|
||||
/htmlcov
|
||||
/pip-wheel-metadata/
|
||||
|
||||
|
||||
28
CHANGES.md
28
CHANGES.md
@@ -1,22 +1,3 @@
|
||||
Synapse 1.2.1 (2019-07-26)
|
||||
==========================
|
||||
|
||||
Security update
|
||||
---------------
|
||||
|
||||
This release includes *four* security fixes:
|
||||
|
||||
- Prevent an attack where a federated server could send redactions for arbitrary events in v1 and v2 rooms. ([\#5767](https://github.com/matrix-org/synapse/issues/5767))
|
||||
- Prevent a denial-of-service attack where cycles of redaction events would make Synapse spin infinitely. Thanks to `@lrizika:matrix.org` for identifying and responsibly disclosing this issue. ([0f2ecb961](https://github.com/matrix-org/synapse/commit/0f2ecb961))
|
||||
- Prevent an attack where users could be joined or parted from public rooms without their consent. Thanks to @dylangerdaly for identifying and responsibly disclosing this issue. ([\#5744](https://github.com/matrix-org/synapse/issues/5744))
|
||||
- Fix a vulnerability where a federated server could spoof read-receipts from
|
||||
users on other servers. Thanks to @dylangerdaly for identifying this issue too. ([\#5743](https://github.com/matrix-org/synapse/issues/5743))
|
||||
|
||||
Additionally, the following fix was in Synapse **1.2.0**, but was not correctly
|
||||
identified during the original release:
|
||||
|
||||
- It was possible for a room moderator to send a redaction for an `m.room.create` event, which would downgrade the room to version 1. Thanks to `/dev/ponies` for identifying and responsibly disclosing this issue! ([\#5701](https://github.com/matrix-org/synapse/issues/5701))
|
||||
|
||||
Synapse 1.2.0 (2019-07-25)
|
||||
==========================
|
||||
|
||||
@@ -35,14 +16,6 @@ Bugfixes
|
||||
Synapse 1.2.0rc1 (2019-07-22)
|
||||
=============================
|
||||
|
||||
Security fixes
|
||||
--------------
|
||||
|
||||
This update included a security fix which was initially incorrectly flagged as
|
||||
a regular bug fix.
|
||||
|
||||
- It was possible for a room moderator to send a redaction for an `m.room.create` event, which would downgrade the room to version 1. Thanks to `/dev/ponies` for identifying and responsibly disclosing this issue! ([\#5701](https://github.com/matrix-org/synapse/issues/5701))
|
||||
|
||||
Features
|
||||
--------
|
||||
|
||||
@@ -68,6 +41,7 @@ Bugfixes
|
||||
- Fix bug in #5626 that prevented the original_event field from actually having the contents of the original event in a call to `/relations`. ([\#5654](https://github.com/matrix-org/synapse/issues/5654))
|
||||
- Fix 3PID bind requests being sent to identity servers as `application/x-form-www-urlencoded` data, which is deprecated. ([\#5658](https://github.com/matrix-org/synapse/issues/5658))
|
||||
- Fix some problems with authenticating redactions in recent room versions. ([\#5699](https://github.com/matrix-org/synapse/issues/5699), [\#5700](https://github.com/matrix-org/synapse/issues/5700), [\#5707](https://github.com/matrix-org/synapse/issues/5707))
|
||||
- Ignore redactions of m.room.create events. ([\#5701](https://github.com/matrix-org/synapse/issues/5701))
|
||||
|
||||
|
||||
Updates to the Docker image
|
||||
|
||||
@@ -7,6 +7,7 @@ include demo/README
|
||||
include demo/demo.tls.dh
|
||||
include demo/*.py
|
||||
include demo/*.sh
|
||||
include sytest-blacklist
|
||||
|
||||
recursive-include synapse/storage/schema *.sql
|
||||
recursive-include synapse/storage/schema *.sql.postgres
|
||||
@@ -33,7 +34,6 @@ exclude Dockerfile
|
||||
exclude .dockerignore
|
||||
exclude test_postgresql.sh
|
||||
exclude .editorconfig
|
||||
exclude sytest-blacklist
|
||||
|
||||
include pyproject.toml
|
||||
recursive-include changelog.d *
|
||||
|
||||
1
changelog.d/5072.feature
Normal file
1
changelog.d/5072.feature
Normal file
@@ -0,0 +1 @@
|
||||
Synapse can now be configured to not join remote rooms of a given "complexity" (currently, state events). This option can be used to prevent adverse performance on resource-constrained homeservers.
|
||||
1
changelog.d/5099.misc
Normal file
1
changelog.d/5099.misc
Normal file
@@ -0,0 +1 @@
|
||||
Python 2 has been removed from the CI.
|
||||
@@ -1 +0,0 @@
|
||||
Synapse now no longer accepts the `-v`/`--verbose`, `-f`/`--log-file`, or `--log-config` command line flags, and removes the deprecated `verbose` and `log_file` configuration file options. Users of these options should migrate their options into the dedicated log configuration.
|
||||
@@ -1 +0,0 @@
|
||||
Fix UISIs during homeserver outage.
|
||||
@@ -1 +0,0 @@
|
||||
Make Jaeger fully configurable.
|
||||
@@ -1 +0,0 @@
|
||||
Add precautionary measures to prevent future abuse of `window.opener` in default welcome page.
|
||||
@@ -1 +0,0 @@
|
||||
Reduce database IO usage by optimising queries for current membership.
|
||||
@@ -1 +0,0 @@
|
||||
Improve caching when fetching `get_filtered_current_state_ids`.
|
||||
@@ -1 +0,0 @@
|
||||
Don't accept opentracing data from clients.
|
||||
@@ -1 +0,0 @@
|
||||
Speed up PostgreSQL unit tests in CI.
|
||||
@@ -1 +0,0 @@
|
||||
Update the coding style document.
|
||||
@@ -1 +0,0 @@
|
||||
Improve database query performance when recording retry intervals for remote hosts.
|
||||
@@ -1 +0,0 @@
|
||||
Add a set of opentracing utils.
|
||||
@@ -1 +0,0 @@
|
||||
Fix stack overflow in server key lookup code.
|
||||
@@ -1 +0,0 @@
|
||||
start.sh no longer uses deprecated cli option.
|
||||
@@ -1 +0,0 @@
|
||||
Synapse now no longer accepts the `-v`/`--verbose`, `-f`/`--log-file`, or `--log-config` command line flags, and removes the deprecated `verbose` and `log_file` configuration file options. Users of these options should migrate their options into the dedicated log configuration.
|
||||
@@ -1 +0,0 @@
|
||||
Cache result of get_version_string to reduce overhead of `/version` federation requests.
|
||||
@@ -1 +0,0 @@
|
||||
Return 'user_type' in admin API user endpoints results.
|
||||
@@ -1 +0,0 @@
|
||||
Add sd_notify hooks to ease systemd integration and allows usage of Type=Notify.
|
||||
@@ -1 +0,0 @@
|
||||
Don't package the sytest test blacklist file.
|
||||
@@ -1 +0,0 @@
|
||||
Replace uses of returnValue with plain return, as returnValue is not needed on Python 3.
|
||||
@@ -1 +0,0 @@
|
||||
Reduce database IO usage by optimising queries for current membership.
|
||||
@@ -1 +0,0 @@
|
||||
Blacklist some flakey tests in worker mode.
|
||||
@@ -1 +0,0 @@
|
||||
Log when we receive an event receipt from an unexpected origin.
|
||||
@@ -1 +0,0 @@
|
||||
Reduce database IO usage by optimising queries for current membership.
|
||||
@@ -1 +0,0 @@
|
||||
Fix some error cases in the caching layer.
|
||||
@@ -1 +0,0 @@
|
||||
Add a prometheus metric for pending cache lookups.
|
||||
@@ -1 +0,0 @@
|
||||
Reduce database IO usage by optimising queries for current membership.
|
||||
@@ -1 +0,0 @@
|
||||
Stop trying to fetch events with event_id=None.
|
||||
@@ -1 +0,0 @@
|
||||
Convert RedactionTestCase to modern test style.
|
||||
@@ -1 +0,0 @@
|
||||
Reduce database IO usage by optimising queries for current membership.
|
||||
@@ -1 +0,0 @@
|
||||
Reduce database IO usage by optimising queries for current membership.
|
||||
@@ -1 +0,0 @@
|
||||
Fix debian packaging scripts to correctly build sid packages.
|
||||
@@ -1 +0,0 @@
|
||||
Allow looping calls to be given arguments.
|
||||
@@ -1 +0,0 @@
|
||||
Remove non-functional 'expire_access_token' setting.
|
||||
@@ -1 +0,0 @@
|
||||
Synapse can now be configured to not join remote rooms of a given "complexity" (currently, state events) over federation. This option can be used to prevent adverse performance on resource-constrained homeservers.
|
||||
@@ -1 +0,0 @@
|
||||
Set the logs emitted when checking typing and presence timeouts to DEBUG level, not INFO.
|
||||
@@ -1 +0,0 @@
|
||||
Remove DelayedCall debugging from the test suite, as it is no longer required in the vast majority of Synapse's tests.
|
||||
@@ -1 +0,0 @@
|
||||
Fix UISIs during homeserver outage.
|
||||
@@ -1 +0,0 @@
|
||||
Reduce database IO usage by optimising queries for current membership.
|
||||
@@ -1 +0,0 @@
|
||||
Reduce database IO usage by optimising queries for current membership.
|
||||
@@ -1 +0,0 @@
|
||||
Improve performance when making `.well-known` requests by sharing the SSL options between requests.
|
||||
@@ -1 +0,0 @@
|
||||
Disable codecov GitHub comments on PRs.
|
||||
@@ -1 +0,0 @@
|
||||
Return 404 instead of 403 when accessing /rooms/{roomId}/event/{eventId} for an event without the appropriate permissions.
|
||||
@@ -4,8 +4,7 @@ After=matrix-synapse.service
|
||||
BindsTo=matrix-synapse.service
|
||||
|
||||
[Service]
|
||||
Type=notify
|
||||
NotifyAccess=main
|
||||
Type=simple
|
||||
User=matrix-synapse
|
||||
WorkingDirectory=/var/lib/matrix-synapse
|
||||
EnvironmentFile=/etc/default/matrix-synapse
|
||||
|
||||
@@ -2,8 +2,7 @@
|
||||
Description=Synapse Matrix Homeserver
|
||||
|
||||
[Service]
|
||||
Type=notify
|
||||
NotifyAccess=main
|
||||
Type=simple
|
||||
User=matrix-synapse
|
||||
WorkingDirectory=/var/lib/matrix-synapse
|
||||
EnvironmentFile=/etc/default/matrix-synapse
|
||||
|
||||
@@ -14,9 +14,7 @@
|
||||
Description=Synapse Matrix homeserver
|
||||
|
||||
[Service]
|
||||
Type=notify
|
||||
NotifyAccess=main
|
||||
ExecReload=/bin/kill -HUP $MAINPID
|
||||
Type=simple
|
||||
Restart=on-abort
|
||||
|
||||
User=synapse
|
||||
|
||||
6
debian/changelog
vendored
6
debian/changelog
vendored
@@ -1,9 +1,3 @@
|
||||
matrix-synapse-py3 (1.2.1) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.2.1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Fri, 26 Jul 2019 11:32:47 +0100
|
||||
|
||||
matrix-synapse-py3 (1.2.0) stable; urgency=medium
|
||||
|
||||
[ Amber Brown ]
|
||||
|
||||
@@ -29,7 +29,7 @@ for port in 8080 8081 8082; do
|
||||
|
||||
if ! grep -F "Customisation made by demo/start.sh" -q $DIR/etc/$port.config; then
|
||||
printf '\n\n# Customisation made by demo/start.sh\n' >> $DIR/etc/$port.config
|
||||
|
||||
|
||||
echo 'enable_registration: true' >> $DIR/etc/$port.config
|
||||
|
||||
# Warning, this heredoc depends on the interaction of tabs and spaces. Please don't
|
||||
@@ -43,7 +43,7 @@ for port in 8080 8081 8082; do
|
||||
tls: true
|
||||
resources:
|
||||
- names: [client, federation]
|
||||
|
||||
|
||||
- port: $port
|
||||
tls: false
|
||||
bind_addresses: ['::1', '127.0.0.1']
|
||||
@@ -68,7 +68,7 @@ for port in 8080 8081 8082; do
|
||||
|
||||
# Generate tls keys
|
||||
openssl req -x509 -newkey rsa:4096 -keyout $DIR/etc/localhost\:$https_port.tls.key -out $DIR/etc/localhost\:$https_port.tls.crt -days 365 -nodes -subj "/O=matrix"
|
||||
|
||||
|
||||
# Ignore keys from the trusted keys server
|
||||
echo '# Ignore keys from the trusted keys server' >> $DIR/etc/$port.config
|
||||
echo 'trusted_key_servers:' >> $DIR/etc/$port.config
|
||||
@@ -120,6 +120,7 @@ for port in 8080 8081 8082; do
|
||||
python3 -m synapse.app.homeserver \
|
||||
--config-path "$DIR/etc/$port.config" \
|
||||
-D \
|
||||
-vv \
|
||||
|
||||
popd
|
||||
done
|
||||
|
||||
@@ -42,11 +42,6 @@ RUN cd dh-virtualenv-1.1 && dpkg-buildpackage -us -uc -b
|
||||
###
|
||||
FROM ${distro}
|
||||
|
||||
# Get the distro we want to pull from as a dynamic build variable
|
||||
# (We need to define it in each build stage)
|
||||
ARG distro=""
|
||||
ENV distro ${distro}
|
||||
|
||||
# Install the build dependencies
|
||||
#
|
||||
# NB: keep this list in sync with the list of build-deps in debian/control
|
||||
|
||||
@@ -4,8 +4,7 @@
|
||||
|
||||
set -ex
|
||||
|
||||
# Get the codename from distro env
|
||||
DIST=`cut -d ':' -f2 <<< $distro`
|
||||
DIST=`lsb_release -c -s`
|
||||
|
||||
# we get a read-only copy of the source: make a writeable copy
|
||||
cp -aT /synapse/source /synapse/build
|
||||
|
||||
@@ -1,8 +1,4 @@
|
||||
Code Style
|
||||
==========
|
||||
|
||||
Formatting tools
|
||||
----------------
|
||||
# Code Style
|
||||
|
||||
The Synapse codebase uses a number of code formatting tools in order to
|
||||
quickly and automatically check for formatting (and sometimes logical) errors
|
||||
@@ -10,20 +6,20 @@ in code.
|
||||
|
||||
The necessary tools are detailed below.
|
||||
|
||||
- **black**
|
||||
## Formatting tools
|
||||
|
||||
The Synapse codebase uses `black <https://pypi.org/project/black/>`_ as an
|
||||
opinionated code formatter, ensuring all comitted code is properly
|
||||
formatted.
|
||||
The Synapse codebase uses [black](https://pypi.org/project/black/) as an
|
||||
opinionated code formatter, ensuring all comitted code is properly
|
||||
formatted.
|
||||
|
||||
First install ``black`` with::
|
||||
First install ``black`` with::
|
||||
|
||||
pip install --upgrade black
|
||||
pip install --upgrade black
|
||||
|
||||
Have ``black`` auto-format your code (it shouldn't change any functionality)
|
||||
with::
|
||||
Have ``black`` auto-format your code (it shouldn't change any
|
||||
functionality) with::
|
||||
|
||||
black . --exclude="\.tox|build|env"
|
||||
black . --exclude="\.tox|build|env"
|
||||
|
||||
- **flake8**
|
||||
|
||||
@@ -58,16 +54,17 @@ functionality is supported in your editor for a more convenient development
|
||||
workflow. It is not, however, recommended to run ``flake8`` on save as it
|
||||
takes a while and is very resource intensive.
|
||||
|
||||
General rules
|
||||
-------------
|
||||
## General rules
|
||||
|
||||
- **Naming**:
|
||||
|
||||
- Use camel case for class and type names
|
||||
- Use underscores for functions and variables.
|
||||
|
||||
- **Docstrings**: should follow the `google code style
|
||||
<https://google.github.io/styleguide/pyguide.html#38-comments-and-docstrings>`_.
|
||||
- Use double quotes ``"foo"`` rather than single quotes ``'foo'``.
|
||||
|
||||
- **Comments**: should follow the `google code style
|
||||
<http://google.github.io/styleguide/pyguide.html?showone=Comments#Comments>`_.
|
||||
This is so that we can generate documentation with `sphinx
|
||||
<http://sphinxcontrib-napoleon.readthedocs.org/en/latest/>`_. See the
|
||||
`examples
|
||||
@@ -76,8 +73,6 @@ General rules
|
||||
|
||||
- **Imports**:
|
||||
|
||||
- Imports should be sorted by ``isort`` as described above.
|
||||
|
||||
- Prefer to import classes and functions rather than packages or modules.
|
||||
|
||||
Example::
|
||||
@@ -97,84 +92,25 @@ General rules
|
||||
This goes against the advice in the Google style guide, but it means that
|
||||
errors in the name are caught early (at import time).
|
||||
|
||||
- Multiple imports from the same package can be combined onto one line::
|
||||
|
||||
from synapse.types import GroupID, RoomID, UserID
|
||||
|
||||
An effort should be made to keep the individual imports in alphabetical
|
||||
order.
|
||||
|
||||
If the list becomes long, wrap it with parentheses and split it over
|
||||
multiple lines.
|
||||
|
||||
- As per `PEP-8 <https://www.python.org/dev/peps/pep-0008/#imports>`_,
|
||||
imports should be grouped in the following order, with a blank line between
|
||||
each group:
|
||||
|
||||
1. standard library imports
|
||||
2. related third party imports
|
||||
3. local application/library specific imports
|
||||
|
||||
- Imports within each group should be sorted alphabetically by module name.
|
||||
|
||||
- Avoid wildcard imports (``from synapse.types import *``) and relative
|
||||
imports (``from .types import UserID``).
|
||||
|
||||
Configuration file format
|
||||
-------------------------
|
||||
|
||||
The `sample configuration file <./sample_config.yaml>`_ acts as a reference to
|
||||
Synapse's configuration options for server administrators. Remember that many
|
||||
readers will be unfamiliar with YAML and server administration in general, so
|
||||
that it is important that the file be as easy to understand as possible, which
|
||||
includes following a consistent format.
|
||||
|
||||
Some guidelines follow:
|
||||
|
||||
* Sections should be separated with a heading consisting of a single line
|
||||
prefixed and suffixed with ``##``. There should be **two** blank lines
|
||||
before the section header, and **one** after.
|
||||
|
||||
* Each option should be listed in the file with the following format:
|
||||
|
||||
* A comment describing the setting. Each line of this comment should be
|
||||
prefixed with a hash (``#``) and a space.
|
||||
|
||||
The comment should describe the default behaviour (ie, what happens if
|
||||
the setting is omitted), as well as what the effect will be if the
|
||||
setting is changed.
|
||||
|
||||
Often, the comment end with something like "uncomment the
|
||||
following to \<do action>".
|
||||
|
||||
* A line consisting of only ``#``.
|
||||
|
||||
* A commented-out example setting, prefixed with only ``#``.
|
||||
|
||||
For boolean (on/off) options, convention is that this example should be
|
||||
the *opposite* to the default (so the comment will end with "Uncomment
|
||||
the following to enable [or disable] \<feature\>." For other options,
|
||||
the example should give some non-default value which is likely to be
|
||||
useful to the reader.
|
||||
|
||||
* There should be a blank line between each option.
|
||||
|
||||
* Where several settings are grouped into a single dict, *avoid* the
|
||||
convention where the whole block is commented out, resulting in comment
|
||||
lines starting ``# #``, as this is hard to read and confusing to
|
||||
edit. Instead, leave the top-level config option uncommented, and follow
|
||||
the conventions above for sub-options. Ensure that your code correctly
|
||||
handles the top-level option being set to ``None`` (as it will be if no
|
||||
sub-options are enabled).
|
||||
|
||||
* Lines should be wrapped at 80 characters.
|
||||
|
||||
Example::
|
||||
|
||||
## Frobnication ##
|
||||
|
||||
# The frobnicator will ensure that all requests are fully frobnicated.
|
||||
# To enable it, uncomment the following.
|
||||
#
|
||||
#frobnicator_enabled: true
|
||||
|
||||
# By default, the frobnicator will frobnicate with the default frobber.
|
||||
# The following will make it use an alternative frobber.
|
||||
#
|
||||
#frobincator_frobber: special_frobber
|
||||
|
||||
# Settings for the frobber
|
||||
#
|
||||
frobber:
|
||||
# frobbing speed. Defaults to 1.
|
||||
#
|
||||
#speed: 10
|
||||
|
||||
# frobbing distance. Defaults to 1000.
|
||||
#
|
||||
#distance: 100
|
||||
|
||||
Note that the sample configuration is generated from the synapse code and is
|
||||
maintained by a script, ``scripts-dev/generate_sample_config``. Making sure
|
||||
that the output from this script matches the desired format is left as an
|
||||
exercise for the reader!
|
||||
|
||||
@@ -148,7 +148,7 @@ call any other functions.
|
||||
d = more_stuff()
|
||||
result = yield d # also fine, of course
|
||||
|
||||
return result
|
||||
defer.returnValue(result)
|
||||
|
||||
def nonInlineCallbacksFun():
|
||||
logger.debug("just a wrapper really")
|
||||
|
||||
@@ -280,20 +280,14 @@ listeners:
|
||||
|
||||
# Resource-constrained Homeserver Settings
|
||||
#
|
||||
# If limit_remote_rooms.enabled is True, the room complexity will be
|
||||
# If limit_large_remote_room_joins is True, the room complexity will be
|
||||
# checked before a user joins a new remote room. If it is above
|
||||
# limit_remote_rooms.complexity, it will disallow joining or
|
||||
# limit_large_remote_room_complexity, it will disallow joining or
|
||||
# instantly leave.
|
||||
#
|
||||
# limit_remote_rooms.complexity_error can be set to customise the text
|
||||
# displayed to the user when a room above the complexity threshold has
|
||||
# its join cancelled.
|
||||
#
|
||||
# Uncomment the below lines to enable:
|
||||
#limit_remote_rooms:
|
||||
# enabled: True
|
||||
# complexity: 1.0
|
||||
# complexity_error: "This room is too complex."
|
||||
#limit_large_remote_room_joins: True
|
||||
#limit_large_remote_room_complexity: 1.0
|
||||
|
||||
# Whether to require a user to be in the room to add an alias to it.
|
||||
# Defaults to 'true'.
|
||||
@@ -942,6 +936,10 @@ uploads_path: "DATADIR/uploads"
|
||||
#
|
||||
# macaroon_secret_key: <PRIVATE STRING>
|
||||
|
||||
# Used to enable access token expiration.
|
||||
#
|
||||
#expire_access_token: False
|
||||
|
||||
# a secret which is used to calculate HMACs for form values, to stop
|
||||
# falsification of values. Must be specified for the User Consent
|
||||
# forms to work.
|
||||
@@ -1443,19 +1441,3 @@ opentracing:
|
||||
#
|
||||
#homeserver_whitelist:
|
||||
# - ".*"
|
||||
|
||||
# Jaeger can be configured to sample traces at different rates.
|
||||
# All configuration options provided by Jaeger can be set here.
|
||||
# Jaeger's configuration mostly related to trace sampling which
|
||||
# is documented here:
|
||||
# https://www.jaegertracing.io/docs/1.13/sampling/.
|
||||
#
|
||||
#jaeger_config:
|
||||
# sampler:
|
||||
# type: const
|
||||
# param: 1
|
||||
|
||||
# Logging whether spans were started and reported
|
||||
#
|
||||
# logging:
|
||||
# false
|
||||
|
||||
@@ -35,4 +35,4 @@ try:
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
__version__ = "1.2.1"
|
||||
__version__ = "1.2.0"
|
||||
|
||||
@@ -128,7 +128,7 @@ class Auth(object):
|
||||
)
|
||||
|
||||
self._check_joined_room(member, user_id, room_id)
|
||||
return member
|
||||
defer.returnValue(member)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def check_user_was_in_room(self, room_id, user_id):
|
||||
@@ -156,13 +156,13 @@ class Auth(object):
|
||||
if forgot:
|
||||
raise AuthError(403, "User %s not in room %s" % (user_id, room_id))
|
||||
|
||||
return member
|
||||
defer.returnValue(member)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def check_host_in_room(self, room_id, host):
|
||||
with Measure(self.clock, "check_host_in_room"):
|
||||
latest_event_ids = yield self.store.is_host_joined(room_id, host)
|
||||
return latest_event_ids
|
||||
defer.returnValue(latest_event_ids)
|
||||
|
||||
def _check_joined_room(self, member, user_id, room_id):
|
||||
if not member or member.membership != Membership.JOIN:
|
||||
@@ -219,7 +219,9 @@ class Auth(object):
|
||||
device_id="dummy-device", # stubbed
|
||||
)
|
||||
|
||||
return synapse.types.create_requester(user_id, app_service=app_service)
|
||||
defer.returnValue(
|
||||
synapse.types.create_requester(user_id, app_service=app_service)
|
||||
)
|
||||
|
||||
user_info = yield self.get_user_by_access_token(access_token, rights)
|
||||
user = user_info["user"]
|
||||
@@ -260,8 +262,10 @@ class Auth(object):
|
||||
|
||||
request.authenticated_entity = user.to_string()
|
||||
|
||||
return synapse.types.create_requester(
|
||||
user, token_id, is_guest, device_id, app_service=app_service
|
||||
defer.returnValue(
|
||||
synapse.types.create_requester(
|
||||
user, token_id, is_guest, device_id, app_service=app_service
|
||||
)
|
||||
)
|
||||
except KeyError:
|
||||
raise MissingClientTokenError()
|
||||
@@ -272,25 +276,25 @@ class Auth(object):
|
||||
self.get_access_token_from_request(request)
|
||||
)
|
||||
if app_service is None:
|
||||
return (None, None)
|
||||
defer.returnValue((None, None))
|
||||
|
||||
if app_service.ip_range_whitelist:
|
||||
ip_address = IPAddress(self.hs.get_ip_from_request(request))
|
||||
if ip_address not in app_service.ip_range_whitelist:
|
||||
return (None, None)
|
||||
defer.returnValue((None, None))
|
||||
|
||||
if b"user_id" not in request.args:
|
||||
return (app_service.sender, app_service)
|
||||
defer.returnValue((app_service.sender, app_service))
|
||||
|
||||
user_id = request.args[b"user_id"][0].decode("utf8")
|
||||
if app_service.sender == user_id:
|
||||
return (app_service.sender, app_service)
|
||||
defer.returnValue((app_service.sender, app_service))
|
||||
|
||||
if not app_service.is_interested_in_user(user_id):
|
||||
raise AuthError(403, "Application service cannot masquerade as this user.")
|
||||
if not (yield self.store.get_user_by_id(user_id)):
|
||||
raise AuthError(403, "Application service has not registered this user")
|
||||
return (user_id, app_service)
|
||||
defer.returnValue((user_id, app_service))
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_user_by_access_token(self, token, rights="access"):
|
||||
@@ -326,7 +330,7 @@ class Auth(object):
|
||||
msg="Access token has expired", soft_logout=True
|
||||
)
|
||||
|
||||
return r
|
||||
defer.returnValue(r)
|
||||
|
||||
# otherwise it needs to be a valid macaroon
|
||||
try:
|
||||
@@ -374,7 +378,7 @@ class Auth(object):
|
||||
}
|
||||
else:
|
||||
raise RuntimeError("Unknown rights setting %s", rights)
|
||||
return ret
|
||||
defer.returnValue(ret)
|
||||
except (
|
||||
_InvalidMacaroonException,
|
||||
pymacaroons.exceptions.MacaroonException,
|
||||
@@ -410,16 +414,21 @@ class Auth(object):
|
||||
try:
|
||||
user_id = self.get_user_id_from_macaroon(macaroon)
|
||||
|
||||
has_expiry = False
|
||||
guest = False
|
||||
for caveat in macaroon.caveats:
|
||||
if caveat.caveat_id == "guest = true":
|
||||
if caveat.caveat_id.startswith("time "):
|
||||
has_expiry = True
|
||||
elif caveat.caveat_id == "guest = true":
|
||||
guest = True
|
||||
|
||||
self.validate_macaroon(macaroon, rights, user_id=user_id)
|
||||
self.validate_macaroon(
|
||||
macaroon, rights, self.hs.config.expire_access_token, user_id=user_id
|
||||
)
|
||||
except (pymacaroons.exceptions.MacaroonException, TypeError, ValueError):
|
||||
raise InvalidClientTokenError("Invalid macaroon passed.")
|
||||
|
||||
if rights == "access":
|
||||
if not has_expiry and rights == "access":
|
||||
self.token_cache[token] = (user_id, guest)
|
||||
|
||||
return user_id, guest
|
||||
@@ -445,7 +454,7 @@ class Auth(object):
|
||||
return caveat.caveat_id[len(user_prefix) :]
|
||||
raise InvalidClientTokenError("No user caveat in macaroon")
|
||||
|
||||
def validate_macaroon(self, macaroon, type_string, user_id):
|
||||
def validate_macaroon(self, macaroon, type_string, verify_expiry, user_id):
|
||||
"""
|
||||
validate that a Macaroon is understood by and was signed by this server.
|
||||
|
||||
@@ -453,6 +462,7 @@ class Auth(object):
|
||||
macaroon(pymacaroons.Macaroon): The macaroon to validate
|
||||
type_string(str): The kind of token required (e.g. "access",
|
||||
"delete_pusher")
|
||||
verify_expiry(bool): Whether to verify whether the macaroon has expired.
|
||||
user_id (str): The user_id required
|
||||
"""
|
||||
v = pymacaroons.Verifier()
|
||||
@@ -465,7 +475,19 @@ class Auth(object):
|
||||
v.satisfy_exact("type = " + type_string)
|
||||
v.satisfy_exact("user_id = %s" % user_id)
|
||||
v.satisfy_exact("guest = true")
|
||||
v.satisfy_general(self._verify_expiry)
|
||||
|
||||
# verify_expiry should really always be True, but there exist access
|
||||
# tokens in the wild which expire when they should not, so we can't
|
||||
# enforce expiry yet (so we have to allow any caveat starting with
|
||||
# 'time < ' in access tokens).
|
||||
#
|
||||
# On the other hand, short-term login tokens (as used by CAS login, for
|
||||
# example) have an expiry time which we do want to enforce.
|
||||
|
||||
if verify_expiry:
|
||||
v.satisfy_general(self._verify_expiry)
|
||||
else:
|
||||
v.satisfy_general(lambda c: c.startswith("time < "))
|
||||
|
||||
# access_tokens include a nonce for uniqueness: any value is acceptable
|
||||
v.satisfy_general(lambda c: c.startswith("nonce = "))
|
||||
@@ -484,7 +506,7 @@ class Auth(object):
|
||||
def _look_up_user_by_access_token(self, token):
|
||||
ret = yield self.store.get_user_by_access_token(token)
|
||||
if not ret:
|
||||
return None
|
||||
defer.returnValue(None)
|
||||
|
||||
# we use ret.get() below because *lots* of unit tests stub out
|
||||
# get_user_by_access_token in a way where it only returns a couple of
|
||||
@@ -496,7 +518,7 @@ class Auth(object):
|
||||
"device_id": ret.get("device_id"),
|
||||
"valid_until_ms": ret.get("valid_until_ms"),
|
||||
}
|
||||
return user_info
|
||||
defer.returnValue(user_info)
|
||||
|
||||
def get_appservice_by_req(self, request):
|
||||
token = self.get_access_token_from_request(request)
|
||||
@@ -521,7 +543,7 @@ class Auth(object):
|
||||
@defer.inlineCallbacks
|
||||
def compute_auth_events(self, event, current_state_ids, for_verification=False):
|
||||
if event.type == EventTypes.Create:
|
||||
return []
|
||||
defer.returnValue([])
|
||||
|
||||
auth_ids = []
|
||||
|
||||
@@ -582,7 +604,7 @@ class Auth(object):
|
||||
if member_event.content["membership"] == Membership.JOIN:
|
||||
auth_ids.append(member_event.event_id)
|
||||
|
||||
return auth_ids
|
||||
defer.returnValue(auth_ids)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def check_can_change_room_list(self, room_id, user):
|
||||
@@ -596,7 +618,7 @@ class Auth(object):
|
||||
|
||||
is_admin = yield self.is_server_admin(user)
|
||||
if is_admin:
|
||||
return True
|
||||
defer.returnValue(True)
|
||||
|
||||
user_id = user.to_string()
|
||||
yield self.check_joined_room(room_id, user_id)
|
||||
@@ -690,7 +712,7 @@ class Auth(object):
|
||||
# * The user is a guest user, and has joined the room
|
||||
# else it will throw.
|
||||
member_event = yield self.check_user_was_in_room(room_id, user_id)
|
||||
return (member_event.membership, member_event.event_id)
|
||||
defer.returnValue((member_event.membership, member_event.event_id))
|
||||
except AuthError:
|
||||
visibility = yield self.state.get_current_state(
|
||||
room_id, EventTypes.RoomHistoryVisibility, ""
|
||||
@@ -699,7 +721,7 @@ class Auth(object):
|
||||
visibility
|
||||
and visibility.content["history_visibility"] == "world_readable"
|
||||
):
|
||||
return (Membership.JOIN, None)
|
||||
defer.returnValue((Membership.JOIN, None))
|
||||
return
|
||||
raise AuthError(
|
||||
403, "Guest access not allowed", errcode=Codes.GUEST_ACCESS_FORBIDDEN
|
||||
|
||||
@@ -132,7 +132,7 @@ class Filtering(object):
|
||||
@defer.inlineCallbacks
|
||||
def get_user_filter(self, user_localpart, filter_id):
|
||||
result = yield self.store.get_user_filter(user_localpart, filter_id)
|
||||
return FilterCollection(result)
|
||||
defer.returnValue(FilterCollection(result))
|
||||
|
||||
def add_user_filter(self, user_localpart, user_filter):
|
||||
self.check_valid_filter(user_filter)
|
||||
|
||||
@@ -15,12 +15,10 @@
|
||||
|
||||
import gc
|
||||
import logging
|
||||
import os
|
||||
import signal
|
||||
import sys
|
||||
import traceback
|
||||
|
||||
import sdnotify
|
||||
from daemonize import Daemonize
|
||||
|
||||
from twisted.internet import defer, error, reactor
|
||||
@@ -244,16 +242,9 @@ def start(hs, listeners=None):
|
||||
if hasattr(signal, "SIGHUP"):
|
||||
|
||||
def handle_sighup(*args, **kwargs):
|
||||
# Tell systemd our state, if we're using it. This will silently fail if
|
||||
# we're not using systemd.
|
||||
sd_channel = sdnotify.SystemdNotifier()
|
||||
sd_channel.notify("RELOADING=1")
|
||||
|
||||
for i in _sighup_callbacks:
|
||||
i(hs)
|
||||
|
||||
sd_channel.notify("READY=1")
|
||||
|
||||
signal.signal(signal.SIGHUP, handle_sighup)
|
||||
|
||||
register_sighup(refresh_certificate)
|
||||
@@ -269,7 +260,6 @@ def start(hs, listeners=None):
|
||||
hs.get_datastore().start_profiling()
|
||||
|
||||
setup_sentry(hs)
|
||||
setup_sdnotify(hs)
|
||||
except Exception:
|
||||
traceback.print_exc(file=sys.stderr)
|
||||
reactor = hs.get_reactor()
|
||||
@@ -302,25 +292,6 @@ def setup_sentry(hs):
|
||||
scope.set_tag("worker_name", name)
|
||||
|
||||
|
||||
def setup_sdnotify(hs):
|
||||
"""Adds process state hooks to tell systemd what we are up to.
|
||||
"""
|
||||
|
||||
# Tell systemd our state, if we're using it. This will silently fail if
|
||||
# we're not using systemd.
|
||||
sd_channel = sdnotify.SystemdNotifier()
|
||||
|
||||
hs.get_reactor().addSystemEventTrigger(
|
||||
"after",
|
||||
"startup",
|
||||
lambda: sd_channel.notify("READY=1\nMAINPID=%s" % (os.getpid())),
|
||||
)
|
||||
|
||||
hs.get_reactor().addSystemEventTrigger(
|
||||
"before", "shutdown", lambda: sd_channel.notify("STOPPING=1")
|
||||
)
|
||||
|
||||
|
||||
def install_dns_limiter(reactor, max_dns_requests_in_flight=100):
|
||||
"""Replaces the resolver with one that limits the number of in flight DNS
|
||||
requests.
|
||||
|
||||
@@ -168,9 +168,7 @@ def start(config_options):
|
||||
)
|
||||
|
||||
ps.setup()
|
||||
reactor.addSystemEventTrigger(
|
||||
"before", "startup", _base.start, ps, config.worker_listeners
|
||||
)
|
||||
reactor.callWhenRunning(_base.start, ps, config.worker_listeners)
|
||||
|
||||
_base.start_worker_reactor("synapse-appservice", config)
|
||||
|
||||
|
||||
@@ -194,9 +194,7 @@ def start(config_options):
|
||||
)
|
||||
|
||||
ss.setup()
|
||||
reactor.addSystemEventTrigger(
|
||||
"before", "startup", _base.start, ss, config.worker_listeners
|
||||
)
|
||||
reactor.callWhenRunning(_base.start, ss, config.worker_listeners)
|
||||
|
||||
_base.start_worker_reactor("synapse-client-reader", config)
|
||||
|
||||
|
||||
@@ -193,9 +193,7 @@ def start(config_options):
|
||||
)
|
||||
|
||||
ss.setup()
|
||||
reactor.addSystemEventTrigger(
|
||||
"before", "startup", _base.start, ss, config.worker_listeners
|
||||
)
|
||||
reactor.callWhenRunning(_base.start, ss, config.worker_listeners)
|
||||
|
||||
_base.start_worker_reactor("synapse-event-creator", config)
|
||||
|
||||
|
||||
@@ -175,9 +175,7 @@ def start(config_options):
|
||||
)
|
||||
|
||||
ss.setup()
|
||||
reactor.addSystemEventTrigger(
|
||||
"before", "startup", _base.start, ss, config.worker_listeners
|
||||
)
|
||||
reactor.callWhenRunning(_base.start, ss, config.worker_listeners)
|
||||
|
||||
_base.start_worker_reactor("synapse-federation-reader", config)
|
||||
|
||||
|
||||
@@ -198,9 +198,7 @@ def start(config_options):
|
||||
)
|
||||
|
||||
ss.setup()
|
||||
reactor.addSystemEventTrigger(
|
||||
"before", "startup", _base.start, ss, config.worker_listeners
|
||||
)
|
||||
reactor.callWhenRunning(_base.start, ss, config.worker_listeners)
|
||||
|
||||
_base.start_worker_reactor("synapse-federation-sender", config)
|
||||
|
||||
|
||||
@@ -70,12 +70,12 @@ class PresenceStatusStubServlet(RestServlet):
|
||||
except HttpResponseException as e:
|
||||
raise e.to_synapse_error()
|
||||
|
||||
return (200, result)
|
||||
defer.returnValue((200, result))
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_PUT(self, request, user_id):
|
||||
yield self.auth.get_user_by_req(request)
|
||||
return (200, {})
|
||||
defer.returnValue((200, {}))
|
||||
|
||||
|
||||
class KeyUploadServlet(RestServlet):
|
||||
@@ -126,11 +126,11 @@ class KeyUploadServlet(RestServlet):
|
||||
self.main_uri + request.uri.decode("ascii"), body, headers=headers
|
||||
)
|
||||
|
||||
return (200, result)
|
||||
defer.returnValue((200, result))
|
||||
else:
|
||||
# Just interested in counts.
|
||||
result = yield self.store.count_e2e_one_time_keys(user_id, device_id)
|
||||
return (200, {"one_time_key_counts": result})
|
||||
defer.returnValue((200, {"one_time_key_counts": result}))
|
||||
|
||||
|
||||
class FrontendProxySlavedStore(
|
||||
@@ -247,9 +247,7 @@ def start(config_options):
|
||||
)
|
||||
|
||||
ss.setup()
|
||||
reactor.addSystemEventTrigger(
|
||||
"before", "startup", _base.start, ss, config.worker_listeners
|
||||
)
|
||||
reactor.callWhenRunning(_base.start, ss, config.worker_listeners)
|
||||
|
||||
_base.start_worker_reactor("synapse-frontend-proxy", config)
|
||||
|
||||
|
||||
4
synapse/app/homeserver.py
Normal file → Executable file
4
synapse/app/homeserver.py
Normal file → Executable file
@@ -406,7 +406,7 @@ def setup(config_options):
|
||||
if provision:
|
||||
yield acme.provision_certificate()
|
||||
|
||||
return provision
|
||||
defer.returnValue(provision)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def reprovision_acme():
|
||||
@@ -447,7 +447,7 @@ def setup(config_options):
|
||||
reactor.stop()
|
||||
sys.exit(1)
|
||||
|
||||
reactor.addSystemEventTrigger("before", "startup", start)
|
||||
reactor.callWhenRunning(start)
|
||||
|
||||
return hs
|
||||
|
||||
|
||||
@@ -161,9 +161,7 @@ def start(config_options):
|
||||
)
|
||||
|
||||
ss.setup()
|
||||
reactor.addSystemEventTrigger(
|
||||
"before", "startup", _base.start, ss, config.worker_listeners
|
||||
)
|
||||
reactor.callWhenRunning(_base.start, ss, config.worker_listeners)
|
||||
|
||||
_base.start_worker_reactor("synapse-media-repository", config)
|
||||
|
||||
|
||||
@@ -216,7 +216,7 @@ def start(config_options):
|
||||
_base.start(ps, config.worker_listeners)
|
||||
ps.get_pusherpool().start()
|
||||
|
||||
reactor.addSystemEventTrigger("before", "startup", start)
|
||||
reactor.callWhenRunning(start)
|
||||
|
||||
_base.start_worker_reactor("synapse-pusher", config)
|
||||
|
||||
|
||||
@@ -451,9 +451,7 @@ def start(config_options):
|
||||
)
|
||||
|
||||
ss.setup()
|
||||
reactor.addSystemEventTrigger(
|
||||
"before", "startup", _base.start, ss, config.worker_listeners
|
||||
)
|
||||
reactor.callWhenRunning(_base.start, ss, config.worker_listeners)
|
||||
|
||||
_base.start_worker_reactor("synapse-synchrotron", config)
|
||||
|
||||
|
||||
@@ -224,9 +224,7 @@ def start(config_options):
|
||||
)
|
||||
|
||||
ss.setup()
|
||||
reactor.addSystemEventTrigger(
|
||||
"before", "startup", _base.start, ss, config.worker_listeners
|
||||
)
|
||||
reactor.callWhenRunning(_base.start, ss, config.worker_listeners)
|
||||
|
||||
_base.start_worker_reactor("synapse-user-dir", config)
|
||||
|
||||
|
||||
@@ -175,21 +175,21 @@ class ApplicationService(object):
|
||||
@defer.inlineCallbacks
|
||||
def _matches_user(self, event, store):
|
||||
if not event:
|
||||
return False
|
||||
defer.returnValue(False)
|
||||
|
||||
if self.is_interested_in_user(event.sender):
|
||||
return True
|
||||
defer.returnValue(True)
|
||||
# also check m.room.member state key
|
||||
if event.type == EventTypes.Member and self.is_interested_in_user(
|
||||
event.state_key
|
||||
):
|
||||
return True
|
||||
defer.returnValue(True)
|
||||
|
||||
if not store:
|
||||
return False
|
||||
defer.returnValue(False)
|
||||
|
||||
does_match = yield self._matches_user_in_member_list(event.room_id, store)
|
||||
return does_match
|
||||
defer.returnValue(does_match)
|
||||
|
||||
@cachedInlineCallbacks(num_args=1, cache_context=True)
|
||||
def _matches_user_in_member_list(self, room_id, store, cache_context):
|
||||
@@ -200,8 +200,8 @@ class ApplicationService(object):
|
||||
# check joined member events
|
||||
for user_id in member_list:
|
||||
if self.is_interested_in_user(user_id):
|
||||
return True
|
||||
return False
|
||||
defer.returnValue(True)
|
||||
defer.returnValue(False)
|
||||
|
||||
def _matches_room_id(self, event):
|
||||
if hasattr(event, "room_id"):
|
||||
@@ -211,13 +211,13 @@ class ApplicationService(object):
|
||||
@defer.inlineCallbacks
|
||||
def _matches_aliases(self, event, store):
|
||||
if not store or not event:
|
||||
return False
|
||||
defer.returnValue(False)
|
||||
|
||||
alias_list = yield store.get_aliases_for_room(event.room_id)
|
||||
for alias in alias_list:
|
||||
if self.is_interested_in_alias(alias):
|
||||
return True
|
||||
return False
|
||||
defer.returnValue(True)
|
||||
defer.returnValue(False)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def is_interested(self, event, store=None):
|
||||
@@ -231,15 +231,15 @@ class ApplicationService(object):
|
||||
"""
|
||||
# Do cheap checks first
|
||||
if self._matches_room_id(event):
|
||||
return True
|
||||
defer.returnValue(True)
|
||||
|
||||
if (yield self._matches_aliases(event, store)):
|
||||
return True
|
||||
defer.returnValue(True)
|
||||
|
||||
if (yield self._matches_user(event, store)):
|
||||
return True
|
||||
defer.returnValue(True)
|
||||
|
||||
return False
|
||||
defer.returnValue(False)
|
||||
|
||||
def is_interested_in_user(self, user_id):
|
||||
return (
|
||||
|
||||
@@ -97,40 +97,40 @@ class ApplicationServiceApi(SimpleHttpClient):
|
||||
@defer.inlineCallbacks
|
||||
def query_user(self, service, user_id):
|
||||
if service.url is None:
|
||||
return False
|
||||
defer.returnValue(False)
|
||||
uri = service.url + ("/users/%s" % urllib.parse.quote(user_id))
|
||||
response = None
|
||||
try:
|
||||
response = yield self.get_json(uri, {"access_token": service.hs_token})
|
||||
if response is not None: # just an empty json object
|
||||
return True
|
||||
defer.returnValue(True)
|
||||
except CodeMessageException as e:
|
||||
if e.code == 404:
|
||||
return False
|
||||
defer.returnValue(False)
|
||||
return
|
||||
logger.warning("query_user to %s received %s", uri, e.code)
|
||||
except Exception as ex:
|
||||
logger.warning("query_user to %s threw exception %s", uri, ex)
|
||||
return False
|
||||
defer.returnValue(False)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def query_alias(self, service, alias):
|
||||
if service.url is None:
|
||||
return False
|
||||
defer.returnValue(False)
|
||||
uri = service.url + ("/rooms/%s" % urllib.parse.quote(alias))
|
||||
response = None
|
||||
try:
|
||||
response = yield self.get_json(uri, {"access_token": service.hs_token})
|
||||
if response is not None: # just an empty json object
|
||||
return True
|
||||
defer.returnValue(True)
|
||||
except CodeMessageException as e:
|
||||
logger.warning("query_alias to %s received %s", uri, e.code)
|
||||
if e.code == 404:
|
||||
return False
|
||||
defer.returnValue(False)
|
||||
return
|
||||
except Exception as ex:
|
||||
logger.warning("query_alias to %s threw exception %s", uri, ex)
|
||||
return False
|
||||
defer.returnValue(False)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def query_3pe(self, service, kind, protocol, fields):
|
||||
@@ -141,7 +141,7 @@ class ApplicationServiceApi(SimpleHttpClient):
|
||||
else:
|
||||
raise ValueError("Unrecognised 'kind' argument %r to query_3pe()", kind)
|
||||
if service.url is None:
|
||||
return []
|
||||
defer.returnValue([])
|
||||
|
||||
uri = "%s%s/thirdparty/%s/%s" % (
|
||||
service.url,
|
||||
@@ -155,7 +155,7 @@ class ApplicationServiceApi(SimpleHttpClient):
|
||||
logger.warning(
|
||||
"query_3pe to %s returned an invalid response %r", uri, response
|
||||
)
|
||||
return []
|
||||
defer.returnValue([])
|
||||
|
||||
ret = []
|
||||
for r in response:
|
||||
@@ -166,14 +166,14 @@ class ApplicationServiceApi(SimpleHttpClient):
|
||||
"query_3pe to %s returned an invalid result %r", uri, r
|
||||
)
|
||||
|
||||
return ret
|
||||
defer.returnValue(ret)
|
||||
except Exception as ex:
|
||||
logger.warning("query_3pe to %s threw exception %s", uri, ex)
|
||||
return []
|
||||
defer.returnValue([])
|
||||
|
||||
def get_3pe_protocol(self, service, protocol):
|
||||
if service.url is None:
|
||||
return {}
|
||||
defer.returnValue({})
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _get():
|
||||
@@ -189,7 +189,7 @@ class ApplicationServiceApi(SimpleHttpClient):
|
||||
logger.warning(
|
||||
"query_3pe_protocol to %s did not return a" " valid result", uri
|
||||
)
|
||||
return None
|
||||
defer.returnValue(None)
|
||||
|
||||
for instance in info.get("instances", []):
|
||||
network_id = instance.get("network_id", None)
|
||||
@@ -198,10 +198,10 @@ class ApplicationServiceApi(SimpleHttpClient):
|
||||
service.id, network_id
|
||||
).to_string()
|
||||
|
||||
return info
|
||||
defer.returnValue(info)
|
||||
except Exception as ex:
|
||||
logger.warning("query_3pe_protocol to %s threw exception %s", uri, ex)
|
||||
return None
|
||||
defer.returnValue(None)
|
||||
|
||||
key = (service.id, protocol)
|
||||
return self.protocol_meta_cache.wrap(key, _get)
|
||||
@@ -209,7 +209,7 @@ class ApplicationServiceApi(SimpleHttpClient):
|
||||
@defer.inlineCallbacks
|
||||
def push_bulk(self, service, events, txn_id=None):
|
||||
if service.url is None:
|
||||
return True
|
||||
defer.returnValue(True)
|
||||
|
||||
events = self._serialize(events)
|
||||
|
||||
@@ -229,14 +229,14 @@ class ApplicationServiceApi(SimpleHttpClient):
|
||||
)
|
||||
sent_transactions_counter.labels(service.id).inc()
|
||||
sent_events_counter.labels(service.id).inc(len(events))
|
||||
return True
|
||||
defer.returnValue(True)
|
||||
return
|
||||
except CodeMessageException as e:
|
||||
logger.warning("push_bulk to %s received %s", uri, e.code)
|
||||
except Exception as ex:
|
||||
logger.warning("push_bulk to %s threw exception %s", uri, ex)
|
||||
failed_transactions_counter.labels(service.id).inc()
|
||||
return False
|
||||
defer.returnValue(False)
|
||||
|
||||
def _serialize(self, events):
|
||||
time_now = self.clock.time_msec()
|
||||
|
||||
@@ -193,7 +193,7 @@ class _TransactionController(object):
|
||||
@defer.inlineCallbacks
|
||||
def _is_service_up(self, service):
|
||||
state = yield self.store.get_appservice_state(service)
|
||||
return state == ApplicationServiceState.UP or state is None
|
||||
defer.returnValue(state == ApplicationServiceState.UP or state is None)
|
||||
|
||||
|
||||
class _Recoverer(object):
|
||||
@@ -208,7 +208,7 @@ class _Recoverer(object):
|
||||
r.service.id,
|
||||
)
|
||||
r.recover()
|
||||
return recoverers
|
||||
defer.returnValue(recoverers)
|
||||
|
||||
def __init__(self, clock, store, as_api, service, callback):
|
||||
self.clock = clock
|
||||
|
||||
@@ -116,6 +116,8 @@ class KeyConfig(Config):
|
||||
seed = bytes(self.signing_key[0])
|
||||
self.macaroon_secret_key = hashlib.sha256(seed).digest()
|
||||
|
||||
self.expire_access_token = config.get("expire_access_token", False)
|
||||
|
||||
# a secret which is used to calculate HMACs for form values, to stop
|
||||
# falsification of values
|
||||
self.form_secret = config.get("form_secret", None)
|
||||
@@ -142,6 +144,10 @@ class KeyConfig(Config):
|
||||
#
|
||||
%(macaroon_secret_key)s
|
||||
|
||||
# Used to enable access token expiration.
|
||||
#
|
||||
#expire_access_token: False
|
||||
|
||||
# a secret which is used to calculate HMACs for form values, to stop
|
||||
# falsification of values. Must be specified for the User Consent
|
||||
# forms to work.
|
||||
|
||||
@@ -12,7 +12,6 @@
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import logging
|
||||
import logging.config
|
||||
import os
|
||||
@@ -76,8 +75,10 @@ root:
|
||||
|
||||
class LoggingConfig(Config):
|
||||
def read_config(self, config, **kwargs):
|
||||
self.log_config = self.abspath(config.get("log_config"))
|
||||
self.verbosity = config.get("verbose", 0)
|
||||
self.no_redirect_stdio = config.get("no_redirect_stdio", False)
|
||||
self.log_config = self.abspath(config.get("log_config"))
|
||||
self.log_file = self.abspath(config.get("log_file"))
|
||||
|
||||
def generate_config_section(self, config_dir_path, server_name, **kwargs):
|
||||
log_config = os.path.join(config_dir_path, server_name + ".log.config")
|
||||
@@ -93,12 +94,38 @@ class LoggingConfig(Config):
|
||||
)
|
||||
|
||||
def read_arguments(self, args):
|
||||
if args.verbose is not None:
|
||||
self.verbosity = args.verbose
|
||||
if args.no_redirect_stdio is not None:
|
||||
self.no_redirect_stdio = args.no_redirect_stdio
|
||||
if args.log_config is not None:
|
||||
self.log_config = args.log_config
|
||||
if args.log_file is not None:
|
||||
self.log_file = args.log_file
|
||||
|
||||
@staticmethod
|
||||
def add_arguments(parser):
|
||||
logging_group = parser.add_argument_group("logging")
|
||||
logging_group.add_argument(
|
||||
"-v",
|
||||
"--verbose",
|
||||
dest="verbose",
|
||||
action="count",
|
||||
help="The verbosity level. Specify multiple times to increase "
|
||||
"verbosity. (Ignored if --log-config is specified.)",
|
||||
)
|
||||
logging_group.add_argument(
|
||||
"-f",
|
||||
"--log-file",
|
||||
dest="log_file",
|
||||
help="File to log to. (Ignored if --log-config is specified.)",
|
||||
)
|
||||
logging_group.add_argument(
|
||||
"--log-config",
|
||||
dest="log_config",
|
||||
default=None,
|
||||
help="Python logging config file",
|
||||
)
|
||||
logging_group.add_argument(
|
||||
"-n",
|
||||
"--no-redirect-stdio",
|
||||
@@ -126,29 +153,58 @@ def setup_logging(config, use_worker_options=False):
|
||||
config (LoggingConfig | synapse.config.workers.WorkerConfig):
|
||||
configuration data
|
||||
|
||||
use_worker_options (bool): True to use the 'worker_log_config' option
|
||||
instead of 'log_config'.
|
||||
use_worker_options (bool): True to use 'worker_log_config' and
|
||||
'worker_log_file' options instead of 'log_config' and 'log_file'.
|
||||
|
||||
register_sighup (func | None): Function to call to register a
|
||||
sighup handler.
|
||||
"""
|
||||
log_config = config.worker_log_config if use_worker_options else config.log_config
|
||||
log_file = config.worker_log_file if use_worker_options else config.log_file
|
||||
|
||||
log_format = (
|
||||
"%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s"
|
||||
" - %(message)s"
|
||||
)
|
||||
|
||||
if log_config is None:
|
||||
log_format = (
|
||||
"%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s"
|
||||
" - %(message)s"
|
||||
)
|
||||
# We don't have a logfile, so fall back to the 'verbosity' param from
|
||||
# the config or cmdline. (Note that we generate a log config for new
|
||||
# installs, so this will be an unusual case)
|
||||
level = logging.INFO
|
||||
level_for_storage = logging.INFO
|
||||
if config.verbosity:
|
||||
level = logging.DEBUG
|
||||
if config.verbosity > 1:
|
||||
level_for_storage = logging.DEBUG
|
||||
|
||||
logger = logging.getLogger("")
|
||||
logger.setLevel(logging.INFO)
|
||||
logging.getLogger("synapse.storage.SQL").setLevel(logging.INFO)
|
||||
logger.setLevel(level)
|
||||
|
||||
logging.getLogger("synapse.storage.SQL").setLevel(level_for_storage)
|
||||
|
||||
formatter = logging.Formatter(log_format)
|
||||
if log_file:
|
||||
# TODO: Customisable file size / backup count
|
||||
handler = logging.handlers.RotatingFileHandler(
|
||||
log_file, maxBytes=(1000 * 1000 * 100), backupCount=3, encoding="utf8"
|
||||
)
|
||||
|
||||
def sighup(signum, stack):
|
||||
logger.info("Closing log file due to SIGHUP")
|
||||
handler.doRollover()
|
||||
logger.info("Opened new log file due to SIGHUP")
|
||||
|
||||
else:
|
||||
handler = logging.StreamHandler()
|
||||
|
||||
def sighup(*args):
|
||||
pass
|
||||
|
||||
handler = logging.StreamHandler()
|
||||
handler.setFormatter(formatter)
|
||||
|
||||
handler.addFilter(LoggingContextFilter(request=""))
|
||||
|
||||
logger.addHandler(handler)
|
||||
else:
|
||||
|
||||
@@ -162,7 +218,8 @@ def setup_logging(config, use_worker_options=False):
|
||||
logging.info("Reloaded log config from %s due to SIGHUP", log_config)
|
||||
|
||||
load_log_config()
|
||||
appbase.register_sighup(sighup)
|
||||
|
||||
appbase.register_sighup(sighup)
|
||||
|
||||
# make sure that the first thing we log is a thing we can grep backwards
|
||||
# for
|
||||
|
||||
@@ -87,13 +87,22 @@ def parse_thumbnail_requirements(thumbnail_sizes):
|
||||
|
||||
class ContentRepositoryConfig(Config):
|
||||
def read_config(self, config, **kwargs):
|
||||
self.enable_media_repo = config.get("enable_media_repo", True)
|
||||
|
||||
self.max_upload_size = self.parse_size(config.get("max_upload_size", "10M"))
|
||||
self.max_image_pixels = self.parse_size(config.get("max_image_pixels", "32M"))
|
||||
self.max_spider_size = self.parse_size(config.get("max_spider_size", "10M"))
|
||||
|
||||
self.media_store_path = self.ensure_directory(
|
||||
config.get("media_store_path", "media_store")
|
||||
)
|
||||
if self.enable_media_repo:
|
||||
self.media_store_path = self.ensure_directory(
|
||||
config.get("media_store_path", "media_store")
|
||||
)
|
||||
self.uploads_path = self.ensure_directory(
|
||||
config.get("uploads_path", "uploads")
|
||||
)
|
||||
else:
|
||||
self.media_store_path = None
|
||||
self.uploads_path = None
|
||||
|
||||
backup_media_store_path = config.get("backup_media_store_path")
|
||||
|
||||
@@ -150,7 +159,6 @@ class ContentRepositoryConfig(Config):
|
||||
(provider_class, parsed_config, wrapper_config)
|
||||
)
|
||||
|
||||
self.uploads_path = self.ensure_directory(config.get("uploads_path", "uploads"))
|
||||
self.dynamic_thumbnails = config.get("dynamic_thumbnails", False)
|
||||
self.thumbnail_requirements = parse_thumbnail_requirements(
|
||||
config.get("thumbnail_sizes", DEFAULT_THUMBNAIL_SIZES)
|
||||
|
||||
@@ -18,7 +18,6 @@
|
||||
import logging
|
||||
import os.path
|
||||
|
||||
import attr
|
||||
from netaddr import IPSet
|
||||
|
||||
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS
|
||||
@@ -39,12 +38,6 @@ DEFAULT_BIND_ADDRESSES = ["::", "0.0.0.0"]
|
||||
|
||||
DEFAULT_ROOM_VERSION = "4"
|
||||
|
||||
ROOM_COMPLEXITY_TOO_GREAT = (
|
||||
"Your homeserver is unable to join rooms this large or complex. "
|
||||
"Please speak to your server administrator, or upgrade your instance "
|
||||
"to join this room."
|
||||
)
|
||||
|
||||
|
||||
class ServerConfig(Config):
|
||||
def read_config(self, config, **kwargs):
|
||||
@@ -254,21 +247,10 @@ class ServerConfig(Config):
|
||||
|
||||
self.gc_thresholds = read_gc_thresholds(config.get("gc_thresholds", None))
|
||||
|
||||
@attr.s
|
||||
class LimitRemoteRoomsConfig(object):
|
||||
enabled = attr.ib(
|
||||
validator=attr.validators.instance_of(bool), default=False
|
||||
)
|
||||
complexity = attr.ib(
|
||||
validator=attr.validators.instance_of((int, float)), default=1.0
|
||||
)
|
||||
complexity_error = attr.ib(
|
||||
validator=attr.validators.instance_of(str),
|
||||
default=ROOM_COMPLEXITY_TOO_GREAT,
|
||||
)
|
||||
|
||||
self.limit_remote_rooms = LimitRemoteRoomsConfig(
|
||||
**config.get("limit_remote_rooms", {})
|
||||
# Resource-constrained Homeserver Configuration
|
||||
self.limit_large_room_joins = config.get("limit_large_remote_room_joins", False)
|
||||
self.limit_large_room_complexity = config.get(
|
||||
"limit_large_remote_room_complexity", 1.0
|
||||
)
|
||||
|
||||
bind_port = config.get("bind_port")
|
||||
@@ -643,20 +625,14 @@ class ServerConfig(Config):
|
||||
|
||||
# Resource-constrained Homeserver Settings
|
||||
#
|
||||
# If limit_remote_rooms.enabled is True, the room complexity will be
|
||||
# If limit_large_remote_room_joins is True, the room complexity will be
|
||||
# checked before a user joins a new remote room. If it is above
|
||||
# limit_remote_rooms.complexity, it will disallow joining or
|
||||
# limit_large_remote_room_complexity, it will disallow joining or
|
||||
# instantly leave.
|
||||
#
|
||||
# limit_remote_rooms.complexity_error can be set to customise the text
|
||||
# displayed to the user when a room above the complexity threshold has
|
||||
# its join cancelled.
|
||||
#
|
||||
# Uncomment the below lines to enable:
|
||||
#limit_remote_rooms:
|
||||
# enabled: True
|
||||
# complexity: 1.0
|
||||
# complexity_error: "This room is too complex."
|
||||
#limit_large_remote_room_joins: True
|
||||
#limit_large_remote_room_complexity: 1.0
|
||||
|
||||
# Whether to require a user to be in the room to add an alias to it.
|
||||
# Defaults to 'true'.
|
||||
|
||||
@@ -23,12 +23,6 @@ class TracerConfig(Config):
|
||||
opentracing_config = {}
|
||||
|
||||
self.opentracer_enabled = opentracing_config.get("enabled", False)
|
||||
|
||||
self.jaeger_config = opentracing_config.get(
|
||||
"jaeger_config",
|
||||
{"sampler": {"type": "const", "param": 1}, "logging": False},
|
||||
)
|
||||
|
||||
if not self.opentracer_enabled:
|
||||
return
|
||||
|
||||
@@ -62,20 +56,4 @@ class TracerConfig(Config):
|
||||
#
|
||||
#homeserver_whitelist:
|
||||
# - ".*"
|
||||
|
||||
# Jaeger can be configured to sample traces at different rates.
|
||||
# All configuration options provided by Jaeger can be set here.
|
||||
# Jaeger's configuration mostly related to trace sampling which
|
||||
# is documented here:
|
||||
# https://www.jaegertracing.io/docs/1.13/sampling/.
|
||||
#
|
||||
#jaeger_config:
|
||||
# sampler:
|
||||
# type: const
|
||||
# param: 1
|
||||
|
||||
# Logging whether spans were started and reported
|
||||
#
|
||||
# logging:
|
||||
# false
|
||||
"""
|
||||
|
||||
@@ -31,6 +31,7 @@ class WorkerConfig(Config):
|
||||
self.worker_listeners = config.get("worker_listeners", [])
|
||||
self.worker_daemonize = config.get("worker_daemonize")
|
||||
self.worker_pid_file = config.get("worker_pid_file")
|
||||
self.worker_log_file = config.get("worker_log_file")
|
||||
self.worker_log_config = config.get("worker_log_config")
|
||||
|
||||
# The host used to connect to the main synapse
|
||||
@@ -77,5 +78,9 @@ class WorkerConfig(Config):
|
||||
|
||||
if args.daemonize is not None:
|
||||
self.worker_daemonize = args.daemonize
|
||||
if args.log_config is not None:
|
||||
self.worker_log_config = args.log_config
|
||||
if args.log_file is not None:
|
||||
self.worker_log_file = args.log_file
|
||||
if args.manhole is not None:
|
||||
self.worker_manhole = args.worker_manhole
|
||||
|
||||
@@ -31,7 +31,6 @@ from twisted.internet.ssl import (
|
||||
platformTrust,
|
||||
)
|
||||
from twisted.python.failure import Failure
|
||||
from twisted.web.iweb import IPolicyForHTTPS
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -75,7 +74,6 @@ class ServerContextFactory(ContextFactory):
|
||||
return self._context
|
||||
|
||||
|
||||
@implementer(IPolicyForHTTPS)
|
||||
class ClientTLSOptionsFactory(object):
|
||||
"""Factory for Twisted SSLClientConnectionCreators that are used to make connections
|
||||
to remote servers for federation.
|
||||
@@ -148,12 +146,6 @@ class ClientTLSOptionsFactory(object):
|
||||
f = Failure()
|
||||
tls_protocol.failVerification(f)
|
||||
|
||||
def creatorForNetloc(self, hostname, port):
|
||||
"""Implements the IPolicyForHTTPS interace so that this can be passed
|
||||
directly to agents.
|
||||
"""
|
||||
return self.get_options(hostname)
|
||||
|
||||
|
||||
@implementer(IOpenSSLClientConnectionCreator)
|
||||
class SSLClientConnectionCreator(object):
|
||||
|
||||
@@ -238,9 +238,27 @@ class Keyring(object):
|
||||
"""
|
||||
|
||||
try:
|
||||
ctx = LoggingContext.current_context()
|
||||
# create a deferred for each server we're going to look up the keys
|
||||
# for; we'll resolve them once we have completed our lookups.
|
||||
# These will be passed into wait_for_previous_lookups to block
|
||||
# any other lookups until we have finished.
|
||||
# The deferreds are called with no logcontext.
|
||||
server_to_deferred = {
|
||||
rq.server_name: defer.Deferred() for rq in verify_requests
|
||||
}
|
||||
|
||||
# map from server name to a set of outstanding request ids
|
||||
# We want to wait for any previous lookups to complete before
|
||||
# proceeding.
|
||||
yield self.wait_for_previous_lookups(server_to_deferred)
|
||||
|
||||
# Actually start fetching keys.
|
||||
self._get_server_verify_keys(verify_requests)
|
||||
|
||||
# When we've finished fetching all the keys for a given server_name,
|
||||
# resolve the deferred passed to `wait_for_previous_lookups` so that
|
||||
# any lookups waiting will proceed.
|
||||
#
|
||||
# map from server name to a set of request ids
|
||||
server_to_request_ids = {}
|
||||
|
||||
for verify_request in verify_requests:
|
||||
@@ -248,61 +266,40 @@ class Keyring(object):
|
||||
request_id = id(verify_request)
|
||||
server_to_request_ids.setdefault(server_name, set()).add(request_id)
|
||||
|
||||
# Wait for any previous lookups to complete before proceeding.
|
||||
yield self.wait_for_previous_lookups(server_to_request_ids.keys())
|
||||
|
||||
# take out a lock on each of the servers by sticking a Deferred in
|
||||
# key_downloads
|
||||
for server_name in server_to_request_ids.keys():
|
||||
self.key_downloads[server_name] = defer.Deferred()
|
||||
logger.debug("Got key lookup lock on %s", server_name)
|
||||
|
||||
# When we've finished fetching all the keys for a given server_name,
|
||||
# drop the lock by resolving the deferred in key_downloads.
|
||||
def drop_server_lock(server_name):
|
||||
d = self.key_downloads.pop(server_name)
|
||||
d.callback(None)
|
||||
|
||||
def lookup_done(res, verify_request):
|
||||
def remove_deferreds(res, verify_request):
|
||||
server_name = verify_request.server_name
|
||||
server_requests = server_to_request_ids[server_name]
|
||||
server_requests.remove(id(verify_request))
|
||||
|
||||
# if there are no more requests for this server, we can drop the lock.
|
||||
if not server_requests:
|
||||
with PreserveLoggingContext(ctx):
|
||||
logger.debug("Releasing key lookup lock on %s", server_name)
|
||||
|
||||
# ... but not immediately, as that can cause stack explosions if
|
||||
# we get a long queue of lookups.
|
||||
self.clock.call_later(0, drop_server_lock, server_name)
|
||||
|
||||
request_id = id(verify_request)
|
||||
server_to_request_ids[server_name].discard(request_id)
|
||||
if not server_to_request_ids[server_name]:
|
||||
d = server_to_deferred.pop(server_name, None)
|
||||
if d:
|
||||
d.callback(None)
|
||||
return res
|
||||
|
||||
for verify_request in verify_requests:
|
||||
verify_request.key_ready.addBoth(lookup_done, verify_request)
|
||||
|
||||
# Actually start fetching keys.
|
||||
self._get_server_verify_keys(verify_requests)
|
||||
verify_request.key_ready.addBoth(remove_deferreds, verify_request)
|
||||
except Exception:
|
||||
logger.exception("Error starting key lookups")
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def wait_for_previous_lookups(self, server_names):
|
||||
def wait_for_previous_lookups(self, server_to_deferred):
|
||||
"""Waits for any previous key lookups for the given servers to finish.
|
||||
|
||||
Args:
|
||||
server_names (Iterable[str]): list of servers which we want to look up
|
||||
server_to_deferred (dict[str, Deferred]): server_name to deferred which gets
|
||||
resolved once we've finished looking up keys for that server.
|
||||
The Deferreds should be regular twisted ones which call their
|
||||
callbacks with no logcontext.
|
||||
|
||||
Returns:
|
||||
Deferred[None]: resolves once all key lookups for the given servers have
|
||||
completed. Follows the synapse rules of logcontext preservation.
|
||||
Returns: a Deferred which resolves once all key lookups for the given
|
||||
servers have completed. Follows the synapse rules of logcontext
|
||||
preservation.
|
||||
"""
|
||||
loop_count = 1
|
||||
while True:
|
||||
wait_on = [
|
||||
(server_name, self.key_downloads[server_name])
|
||||
for server_name in server_names
|
||||
for server_name in server_to_deferred.keys()
|
||||
if server_name in self.key_downloads
|
||||
]
|
||||
if not wait_on:
|
||||
@@ -317,6 +314,19 @@ class Keyring(object):
|
||||
|
||||
loop_count += 1
|
||||
|
||||
ctx = LoggingContext.current_context()
|
||||
|
||||
def rm(r, server_name_):
|
||||
with PreserveLoggingContext(ctx):
|
||||
logger.debug("Releasing key lookup lock on %s", server_name_)
|
||||
self.key_downloads.pop(server_name_, None)
|
||||
return r
|
||||
|
||||
for server_name, deferred in server_to_deferred.items():
|
||||
logger.debug("Got key lookup lock on %s", server_name)
|
||||
self.key_downloads[server_name] = deferred
|
||||
deferred.addBoth(rm, server_name)
|
||||
|
||||
def _get_server_verify_keys(self, verify_requests):
|
||||
"""Tries to find at least one key for each verify request
|
||||
|
||||
@@ -462,7 +472,7 @@ class StoreKeyFetcher(KeyFetcher):
|
||||
keys = {}
|
||||
for (server_name, key_id), key in res.items():
|
||||
keys.setdefault(server_name, {})[key_id] = key
|
||||
return keys
|
||||
defer.returnValue(keys)
|
||||
|
||||
|
||||
class BaseV2KeyFetcher(object):
|
||||
@@ -566,7 +576,7 @@ class BaseV2KeyFetcher(object):
|
||||
).addErrback(unwrapFirstError)
|
||||
)
|
||||
|
||||
return verify_keys
|
||||
defer.returnValue(verify_keys)
|
||||
|
||||
|
||||
class PerspectivesKeyFetcher(BaseV2KeyFetcher):
|
||||
@@ -588,7 +598,7 @@ class PerspectivesKeyFetcher(BaseV2KeyFetcher):
|
||||
result = yield self.get_server_verify_key_v2_indirect(
|
||||
keys_to_fetch, key_server
|
||||
)
|
||||
return result
|
||||
defer.returnValue(result)
|
||||
except KeyLookupError as e:
|
||||
logger.warning(
|
||||
"Key lookup failed from %r: %s", key_server.server_name, e
|
||||
@@ -601,7 +611,7 @@ class PerspectivesKeyFetcher(BaseV2KeyFetcher):
|
||||
str(e),
|
||||
)
|
||||
|
||||
return {}
|
||||
defer.returnValue({})
|
||||
|
||||
results = yield make_deferred_yieldable(
|
||||
defer.gatherResults(
|
||||
@@ -615,7 +625,7 @@ class PerspectivesKeyFetcher(BaseV2KeyFetcher):
|
||||
for server_name, keys in result.items():
|
||||
union_of_keys.setdefault(server_name, {}).update(keys)
|
||||
|
||||
return union_of_keys
|
||||
defer.returnValue(union_of_keys)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_server_verify_key_v2_indirect(self, keys_to_fetch, key_server):
|
||||
@@ -701,7 +711,7 @@ class PerspectivesKeyFetcher(BaseV2KeyFetcher):
|
||||
perspective_name, time_now_ms, added_keys
|
||||
)
|
||||
|
||||
return keys
|
||||
defer.returnValue(keys)
|
||||
|
||||
def _validate_perspectives_response(self, key_server, response):
|
||||
"""Optionally check the signature on the result of a /key/query request
|
||||
@@ -843,7 +853,7 @@ class ServerKeyFetcher(BaseV2KeyFetcher):
|
||||
)
|
||||
keys.update(response_keys)
|
||||
|
||||
return keys
|
||||
defer.returnValue(keys)
|
||||
|
||||
|
||||
@defer.inlineCallbacks
|
||||
|
||||
@@ -144,13 +144,15 @@ class EventBuilder(object):
|
||||
if self._origin_server_ts is not None:
|
||||
event_dict["origin_server_ts"] = self._origin_server_ts
|
||||
|
||||
return create_local_event_from_event_dict(
|
||||
clock=self._clock,
|
||||
hostname=self._hostname,
|
||||
signing_key=self._signing_key,
|
||||
format_version=self.format_version,
|
||||
event_dict=event_dict,
|
||||
internal_metadata_dict=self.internal_metadata.get_dict(),
|
||||
defer.returnValue(
|
||||
create_local_event_from_event_dict(
|
||||
clock=self._clock,
|
||||
hostname=self._hostname,
|
||||
signing_key=self._signing_key,
|
||||
format_version=self.format_version,
|
||||
event_dict=event_dict,
|
||||
internal_metadata_dict=self.internal_metadata.get_dict(),
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
|
||||
@@ -133,17 +133,19 @@ class EventContext(object):
|
||||
else:
|
||||
prev_state_id = None
|
||||
|
||||
return {
|
||||
"prev_state_id": prev_state_id,
|
||||
"event_type": event.type,
|
||||
"event_state_key": event.state_key if event.is_state() else None,
|
||||
"state_group": self.state_group,
|
||||
"rejected": self.rejected,
|
||||
"prev_group": self.prev_group,
|
||||
"delta_ids": _encode_state_dict(self.delta_ids),
|
||||
"prev_state_events": self.prev_state_events,
|
||||
"app_service_id": self.app_service.id if self.app_service else None,
|
||||
}
|
||||
defer.returnValue(
|
||||
{
|
||||
"prev_state_id": prev_state_id,
|
||||
"event_type": event.type,
|
||||
"event_state_key": event.state_key if event.is_state() else None,
|
||||
"state_group": self.state_group,
|
||||
"rejected": self.rejected,
|
||||
"prev_group": self.prev_group,
|
||||
"delta_ids": _encode_state_dict(self.delta_ids),
|
||||
"prev_state_events": self.prev_state_events,
|
||||
"app_service_id": self.app_service.id if self.app_service else None,
|
||||
}
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def deserialize(store, input):
|
||||
@@ -200,7 +202,7 @@ class EventContext(object):
|
||||
|
||||
yield make_deferred_yieldable(self._fetching_state_deferred)
|
||||
|
||||
return self._current_state_ids
|
||||
defer.returnValue(self._current_state_ids)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_prev_state_ids(self, store):
|
||||
@@ -220,7 +222,7 @@ class EventContext(object):
|
||||
|
||||
yield make_deferred_yieldable(self._fetching_state_deferred)
|
||||
|
||||
return self._prev_state_ids
|
||||
defer.returnValue(self._prev_state_ids)
|
||||
|
||||
def get_cached_current_state_ids(self):
|
||||
"""Gets the current state IDs if we have them already cached.
|
||||
|
||||
@@ -51,7 +51,7 @@ class ThirdPartyEventRules(object):
|
||||
defer.Deferred[bool]: True if the event should be allowed, False if not.
|
||||
"""
|
||||
if self.third_party_rules is None:
|
||||
return True
|
||||
defer.returnValue(True)
|
||||
|
||||
prev_state_ids = yield context.get_prev_state_ids(self.store)
|
||||
|
||||
@@ -61,7 +61,7 @@ class ThirdPartyEventRules(object):
|
||||
state_events[key] = yield self.store.get_event(event_id, allow_none=True)
|
||||
|
||||
ret = yield self.third_party_rules.check_event_allowed(event, state_events)
|
||||
return ret
|
||||
defer.returnValue(ret)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_create_room(self, requester, config, is_requester_admin):
|
||||
@@ -98,7 +98,7 @@ class ThirdPartyEventRules(object):
|
||||
"""
|
||||
|
||||
if self.third_party_rules is None:
|
||||
return True
|
||||
defer.returnValue(True)
|
||||
|
||||
state_ids = yield self.store.get_filtered_current_state_ids(room_id)
|
||||
room_state_events = yield self.store.get_events(state_ids.values())
|
||||
@@ -110,4 +110,4 @@ class ThirdPartyEventRules(object):
|
||||
ret = yield self.third_party_rules.check_threepid_can_be_invited(
|
||||
medium, address, state_events
|
||||
)
|
||||
return ret
|
||||
defer.returnValue(ret)
|
||||
|
||||
@@ -360,7 +360,7 @@ class EventClientSerializer(object):
|
||||
"""
|
||||
# To handle the case of presence events and the like
|
||||
if not isinstance(event, EventBase):
|
||||
return event
|
||||
defer.returnValue(event)
|
||||
|
||||
event_id = event.event_id
|
||||
serialized_event = serialize_event(event, time_now, **kwargs)
|
||||
@@ -406,7 +406,7 @@ class EventClientSerializer(object):
|
||||
"sender": edit.sender,
|
||||
}
|
||||
|
||||
return serialized_event
|
||||
defer.returnValue(serialized_event)
|
||||
|
||||
def serialize_events(self, events, time_now, **kwargs):
|
||||
"""Serializes multiple events.
|
||||
|
||||
@@ -106,7 +106,7 @@ class FederationBase(object):
|
||||
"Failed to find copy of %s with valid signature", pdu.event_id
|
||||
)
|
||||
|
||||
return res
|
||||
defer.returnValue(res)
|
||||
|
||||
handle = preserve_fn(handle_check_result)
|
||||
deferreds2 = [handle(pdu, deferred) for pdu, deferred in zip(pdus, deferreds)]
|
||||
@@ -116,9 +116,9 @@ class FederationBase(object):
|
||||
).addErrback(unwrapFirstError)
|
||||
|
||||
if include_none:
|
||||
return valid_pdus
|
||||
defer.returnValue(valid_pdus)
|
||||
else:
|
||||
return [p for p in valid_pdus if p]
|
||||
defer.returnValue([p for p in valid_pdus if p])
|
||||
|
||||
def _check_sigs_and_hash(self, room_version, pdu):
|
||||
return make_deferred_yieldable(
|
||||
|
||||
@@ -213,7 +213,7 @@ class FederationClient(FederationBase):
|
||||
).addErrback(unwrapFirstError)
|
||||
)
|
||||
|
||||
return pdus
|
||||
defer.returnValue(pdus)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
@@ -245,7 +245,7 @@ class FederationClient(FederationBase):
|
||||
|
||||
ev = self._get_pdu_cache.get(event_id)
|
||||
if ev:
|
||||
return ev
|
||||
defer.returnValue(ev)
|
||||
|
||||
pdu_attempts = self.pdu_destination_tried.setdefault(event_id, {})
|
||||
|
||||
@@ -307,7 +307,7 @@ class FederationClient(FederationBase):
|
||||
if signed_pdu:
|
||||
self._get_pdu_cache[event_id] = signed_pdu
|
||||
|
||||
return signed_pdu
|
||||
defer.returnValue(signed_pdu)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
@@ -355,7 +355,7 @@ class FederationClient(FederationBase):
|
||||
|
||||
auth_chain.sort(key=lambda e: e.depth)
|
||||
|
||||
return (pdus, auth_chain)
|
||||
defer.returnValue((pdus, auth_chain))
|
||||
except HttpResponseException as e:
|
||||
if e.code == 400 or e.code == 404:
|
||||
logger.info("Failed to use get_room_state_ids API, falling back")
|
||||
@@ -404,7 +404,7 @@ class FederationClient(FederationBase):
|
||||
|
||||
signed_auth.sort(key=lambda e: e.depth)
|
||||
|
||||
return (signed_pdus, signed_auth)
|
||||
defer.returnValue((signed_pdus, signed_auth))
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_events_from_store_or_dest(self, destination, room_id, event_ids):
|
||||
@@ -429,7 +429,7 @@ class FederationClient(FederationBase):
|
||||
missing_events.discard(k)
|
||||
|
||||
if not missing_events:
|
||||
return (signed_events, failed_to_fetch)
|
||||
defer.returnValue((signed_events, failed_to_fetch))
|
||||
|
||||
logger.debug(
|
||||
"Fetching unknown state/auth events %s for room %s",
|
||||
@@ -465,7 +465,7 @@ class FederationClient(FederationBase):
|
||||
# We removed all events we successfully fetched from `batch`
|
||||
failed_to_fetch.update(batch)
|
||||
|
||||
return (signed_events, failed_to_fetch)
|
||||
defer.returnValue((signed_events, failed_to_fetch))
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
@@ -485,7 +485,7 @@ class FederationClient(FederationBase):
|
||||
|
||||
signed_auth.sort(key=lambda e: e.depth)
|
||||
|
||||
return signed_auth
|
||||
defer.returnValue(signed_auth)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _try_destination_list(self, description, destinations, callback):
|
||||
@@ -521,7 +521,7 @@ class FederationClient(FederationBase):
|
||||
|
||||
try:
|
||||
res = yield callback(destination)
|
||||
return res
|
||||
defer.returnValue(res)
|
||||
except InvalidResponseError as e:
|
||||
logger.warn("Failed to %s via %s: %s", description, destination, e)
|
||||
except HttpResponseException as e:
|
||||
@@ -615,7 +615,7 @@ class FederationClient(FederationBase):
|
||||
event_dict=pdu_dict,
|
||||
)
|
||||
|
||||
return (destination, ev, event_format)
|
||||
defer.returnValue((destination, ev, event_format))
|
||||
|
||||
return self._try_destination_list(
|
||||
"make_" + membership, destinations, send_request
|
||||
@@ -728,11 +728,13 @@ class FederationClient(FederationBase):
|
||||
|
||||
check_authchain_validity(signed_auth)
|
||||
|
||||
return {
|
||||
"state": signed_state,
|
||||
"auth_chain": signed_auth,
|
||||
"origin": destination,
|
||||
}
|
||||
defer.returnValue(
|
||||
{
|
||||
"state": signed_state,
|
||||
"auth_chain": signed_auth,
|
||||
"origin": destination,
|
||||
}
|
||||
)
|
||||
|
||||
return self._try_destination_list("send_join", destinations, send_request)
|
||||
|
||||
@@ -756,7 +758,7 @@ class FederationClient(FederationBase):
|
||||
|
||||
# FIXME: We should handle signature failures more gracefully.
|
||||
|
||||
return pdu
|
||||
defer.returnValue(pdu)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _do_send_invite(self, destination, pdu, room_version):
|
||||
@@ -784,7 +786,7 @@ class FederationClient(FederationBase):
|
||||
"invite_room_state": pdu.unsigned.get("invite_room_state", []),
|
||||
},
|
||||
)
|
||||
return content
|
||||
defer.returnValue(content)
|
||||
except HttpResponseException as e:
|
||||
if e.code in [400, 404]:
|
||||
err = e.to_synapse_error()
|
||||
@@ -819,7 +821,7 @@ class FederationClient(FederationBase):
|
||||
event_id=pdu.event_id,
|
||||
content=pdu.get_pdu_json(time_now),
|
||||
)
|
||||
return content
|
||||
defer.returnValue(content)
|
||||
|
||||
def send_leave(self, destinations, pdu):
|
||||
"""Sends a leave event to one of a list of homeservers.
|
||||
@@ -854,7 +856,7 @@ class FederationClient(FederationBase):
|
||||
)
|
||||
|
||||
logger.debug("Got content: %s", content)
|
||||
return None
|
||||
defer.returnValue(None)
|
||||
|
||||
return self._try_destination_list("send_leave", destinations, send_request)
|
||||
|
||||
@@ -915,7 +917,7 @@ class FederationClient(FederationBase):
|
||||
"missing": content.get("missing", []),
|
||||
}
|
||||
|
||||
return ret
|
||||
defer.returnValue(ret)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_missing_events(
|
||||
@@ -972,7 +974,7 @@ class FederationClient(FederationBase):
|
||||
# get_missing_events
|
||||
signed_events = []
|
||||
|
||||
return signed_events
|
||||
defer.returnValue(signed_events)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def forward_third_party_invite(self, destinations, room_id, event_dict):
|
||||
@@ -984,7 +986,7 @@ class FederationClient(FederationBase):
|
||||
yield self.transport_layer.exchange_third_party_invite(
|
||||
destination=destination, room_id=room_id, event_dict=event_dict
|
||||
)
|
||||
return None
|
||||
defer.returnValue(None)
|
||||
except CodeMessageException:
|
||||
raise
|
||||
except Exception as e:
|
||||
|
||||
@@ -99,7 +99,7 @@ class FederationServer(FederationBase):
|
||||
|
||||
res = self._transaction_from_pdus(pdus).get_dict()
|
||||
|
||||
return (200, res)
|
||||
defer.returnValue((200, res))
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
@@ -126,7 +126,7 @@ class FederationServer(FederationBase):
|
||||
origin, transaction, request_time
|
||||
)
|
||||
|
||||
return result
|
||||
defer.returnValue(result)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _handle_incoming_transaction(self, origin, transaction, request_time):
|
||||
@@ -147,7 +147,8 @@ class FederationServer(FederationBase):
|
||||
"[%s] We've already responded to this request",
|
||||
transaction.transaction_id,
|
||||
)
|
||||
return response
|
||||
defer.returnValue(response)
|
||||
return
|
||||
|
||||
logger.debug("[%s] Transaction is new", transaction.transaction_id)
|
||||
|
||||
@@ -162,7 +163,7 @@ class FederationServer(FederationBase):
|
||||
yield self.transaction_actions.set_response(
|
||||
origin, transaction, 400, response
|
||||
)
|
||||
return (400, response)
|
||||
defer.returnValue((400, response))
|
||||
|
||||
received_pdus_counter.inc(len(transaction.pdus))
|
||||
|
||||
@@ -264,7 +265,7 @@ class FederationServer(FederationBase):
|
||||
logger.debug("Returning: %s", str(response))
|
||||
|
||||
yield self.transaction_actions.set_response(origin, transaction, 200, response)
|
||||
return (200, response)
|
||||
defer.returnValue((200, response))
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def received_edu(self, origin, edu_type, content):
|
||||
@@ -297,7 +298,7 @@ class FederationServer(FederationBase):
|
||||
event_id,
|
||||
)
|
||||
|
||||
return (200, resp)
|
||||
defer.returnValue((200, resp))
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_state_ids_request(self, origin, room_id, event_id):
|
||||
@@ -314,7 +315,9 @@ class FederationServer(FederationBase):
|
||||
state_ids = yield self.handler.get_state_ids_for_pdu(room_id, event_id)
|
||||
auth_chain_ids = yield self.store.get_auth_chain_ids(state_ids)
|
||||
|
||||
return (200, {"pdu_ids": state_ids, "auth_chain_ids": auth_chain_ids})
|
||||
defer.returnValue(
|
||||
(200, {"pdu_ids": state_ids, "auth_chain_ids": auth_chain_ids})
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _on_context_state_request_compute(self, room_id, event_id):
|
||||
@@ -333,10 +336,12 @@ class FederationServer(FederationBase):
|
||||
)
|
||||
)
|
||||
|
||||
return {
|
||||
"pdus": [pdu.get_pdu_json() for pdu in pdus],
|
||||
"auth_chain": [pdu.get_pdu_json() for pdu in auth_chain],
|
||||
}
|
||||
defer.returnValue(
|
||||
{
|
||||
"pdus": [pdu.get_pdu_json() for pdu in pdus],
|
||||
"auth_chain": [pdu.get_pdu_json() for pdu in auth_chain],
|
||||
}
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
@@ -344,15 +349,15 @@ class FederationServer(FederationBase):
|
||||
pdu = yield self.handler.get_persisted_pdu(origin, event_id)
|
||||
|
||||
if pdu:
|
||||
return (200, self._transaction_from_pdus([pdu]).get_dict())
|
||||
defer.returnValue((200, self._transaction_from_pdus([pdu]).get_dict()))
|
||||
else:
|
||||
return (404, "")
|
||||
defer.returnValue((404, ""))
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_query_request(self, query_type, args):
|
||||
received_queries_counter.labels(query_type).inc()
|
||||
resp = yield self.registry.on_query(query_type, args)
|
||||
return (200, resp)
|
||||
defer.returnValue((200, resp))
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_make_join_request(self, origin, room_id, user_id, supported_versions):
|
||||
@@ -364,9 +369,11 @@ class FederationServer(FederationBase):
|
||||
logger.warn("Room version %s not in %s", room_version, supported_versions)
|
||||
raise IncompatibleRoomVersionError(room_version=room_version)
|
||||
|
||||
pdu = yield self.handler.on_make_join_request(origin, room_id, user_id)
|
||||
pdu = yield self.handler.on_make_join_request(room_id, user_id)
|
||||
time_now = self._clock.time_msec()
|
||||
return {"event": pdu.get_pdu_json(time_now), "room_version": room_version}
|
||||
defer.returnValue(
|
||||
{"event": pdu.get_pdu_json(time_now), "room_version": room_version}
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_invite_request(self, origin, content, room_version):
|
||||
@@ -384,7 +391,7 @@ class FederationServer(FederationBase):
|
||||
yield self.check_server_matches_acl(origin_host, pdu.room_id)
|
||||
ret_pdu = yield self.handler.on_invite_request(origin, pdu)
|
||||
time_now = self._clock.time_msec()
|
||||
return {"event": ret_pdu.get_pdu_json(time_now)}
|
||||
defer.returnValue({"event": ret_pdu.get_pdu_json(time_now)})
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_send_join_request(self, origin, content, room_id):
|
||||
@@ -400,26 +407,30 @@ class FederationServer(FederationBase):
|
||||
logger.debug("on_send_join_request: pdu sigs: %s", pdu.signatures)
|
||||
res_pdus = yield self.handler.on_send_join_request(origin, pdu)
|
||||
time_now = self._clock.time_msec()
|
||||
return (
|
||||
200,
|
||||
{
|
||||
"state": [p.get_pdu_json(time_now) for p in res_pdus["state"]],
|
||||
"auth_chain": [
|
||||
p.get_pdu_json(time_now) for p in res_pdus["auth_chain"]
|
||||
],
|
||||
},
|
||||
defer.returnValue(
|
||||
(
|
||||
200,
|
||||
{
|
||||
"state": [p.get_pdu_json(time_now) for p in res_pdus["state"]],
|
||||
"auth_chain": [
|
||||
p.get_pdu_json(time_now) for p in res_pdus["auth_chain"]
|
||||
],
|
||||
},
|
||||
)
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_make_leave_request(self, origin, room_id, user_id):
|
||||
origin_host, _ = parse_server_name(origin)
|
||||
yield self.check_server_matches_acl(origin_host, room_id)
|
||||
pdu = yield self.handler.on_make_leave_request(origin, room_id, user_id)
|
||||
pdu = yield self.handler.on_make_leave_request(room_id, user_id)
|
||||
|
||||
room_version = yield self.store.get_room_version(room_id)
|
||||
|
||||
time_now = self._clock.time_msec()
|
||||
return {"event": pdu.get_pdu_json(time_now), "room_version": room_version}
|
||||
defer.returnValue(
|
||||
{"event": pdu.get_pdu_json(time_now), "room_version": room_version}
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_send_leave_request(self, origin, content, room_id):
|
||||
@@ -434,7 +445,7 @@ class FederationServer(FederationBase):
|
||||
|
||||
logger.debug("on_send_leave_request: pdu sigs: %s", pdu.signatures)
|
||||
yield self.handler.on_send_leave_request(origin, pdu)
|
||||
return (200, {})
|
||||
defer.returnValue((200, {}))
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_event_auth(self, origin, room_id, event_id):
|
||||
@@ -445,7 +456,7 @@ class FederationServer(FederationBase):
|
||||
time_now = self._clock.time_msec()
|
||||
auth_pdus = yield self.handler.on_event_auth(event_id)
|
||||
res = {"auth_chain": [a.get_pdu_json(time_now) for a in auth_pdus]}
|
||||
return (200, res)
|
||||
defer.returnValue((200, res))
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_query_auth_request(self, origin, content, room_id, event_id):
|
||||
@@ -498,7 +509,7 @@ class FederationServer(FederationBase):
|
||||
"missing": ret.get("missing", []),
|
||||
}
|
||||
|
||||
return (200, send_content)
|
||||
defer.returnValue((200, send_content))
|
||||
|
||||
@log_function
|
||||
def on_query_client_keys(self, origin, content):
|
||||
@@ -537,7 +548,7 @@ class FederationServer(FederationBase):
|
||||
),
|
||||
)
|
||||
|
||||
return {"one_time_keys": json_result}
|
||||
defer.returnValue({"one_time_keys": json_result})
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
@@ -569,7 +580,9 @@ class FederationServer(FederationBase):
|
||||
|
||||
time_now = self._clock.time_msec()
|
||||
|
||||
return {"events": [ev.get_pdu_json(time_now) for ev in missing_events]}
|
||||
defer.returnValue(
|
||||
{"events": [ev.get_pdu_json(time_now) for ev in missing_events]}
|
||||
)
|
||||
|
||||
@log_function
|
||||
def on_openid_userinfo(self, token):
|
||||
@@ -663,14 +676,14 @@ class FederationServer(FederationBase):
|
||||
ret = yield self.handler.exchange_third_party_invite(
|
||||
sender_user_id, target_user_id, room_id, signed
|
||||
)
|
||||
return ret
|
||||
defer.returnValue(ret)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_exchange_third_party_invite_request(self, origin, room_id, event_dict):
|
||||
ret = yield self.handler.on_exchange_third_party_invite_request(
|
||||
origin, room_id, event_dict
|
||||
)
|
||||
return ret
|
||||
defer.returnValue(ret)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def check_server_matches_acl(self, server_name, room_id):
|
||||
|
||||
@@ -374,7 +374,7 @@ class PerDestinationQueue(object):
|
||||
|
||||
assert len(edus) <= limit, "get_devices_by_remote returned too many EDUs"
|
||||
|
||||
return (edus, now_stream_id)
|
||||
defer.returnValue((edus, now_stream_id))
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _get_to_device_message_edus(self, limit):
|
||||
@@ -393,4 +393,4 @@ class PerDestinationQueue(object):
|
||||
for content in contents
|
||||
]
|
||||
|
||||
return (edus, stream_id)
|
||||
defer.returnValue((edus, stream_id))
|
||||
|
||||
@@ -133,4 +133,4 @@ class TransactionManager(object):
|
||||
)
|
||||
success = False
|
||||
|
||||
return success
|
||||
defer.returnValue(success)
|
||||
|
||||
@@ -187,7 +187,7 @@ class TransportLayerClient(object):
|
||||
try_trailing_slash_on_400=True,
|
||||
)
|
||||
|
||||
return response
|
||||
defer.returnValue(response)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
@@ -205,7 +205,7 @@ class TransportLayerClient(object):
|
||||
ignore_backoff=ignore_backoff,
|
||||
)
|
||||
|
||||
return content
|
||||
defer.returnValue(content)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
@@ -263,7 +263,7 @@ class TransportLayerClient(object):
|
||||
ignore_backoff=ignore_backoff,
|
||||
)
|
||||
|
||||
return content
|
||||
defer.returnValue(content)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
@@ -274,7 +274,7 @@ class TransportLayerClient(object):
|
||||
destination=destination, path=path, data=content
|
||||
)
|
||||
|
||||
return response
|
||||
defer.returnValue(response)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
@@ -292,7 +292,7 @@ class TransportLayerClient(object):
|
||||
ignore_backoff=True,
|
||||
)
|
||||
|
||||
return response
|
||||
defer.returnValue(response)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
@@ -303,7 +303,7 @@ class TransportLayerClient(object):
|
||||
destination=destination, path=path, data=content, ignore_backoff=True
|
||||
)
|
||||
|
||||
return response
|
||||
defer.returnValue(response)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
@@ -314,7 +314,7 @@ class TransportLayerClient(object):
|
||||
destination=destination, path=path, data=content, ignore_backoff=True
|
||||
)
|
||||
|
||||
return response
|
||||
defer.returnValue(response)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
@@ -343,7 +343,7 @@ class TransportLayerClient(object):
|
||||
destination=remote_server, path=path, args=args, ignore_backoff=True
|
||||
)
|
||||
|
||||
return response
|
||||
defer.returnValue(response)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
@@ -354,7 +354,7 @@ class TransportLayerClient(object):
|
||||
destination=destination, path=path, data=event_dict
|
||||
)
|
||||
|
||||
return response
|
||||
defer.returnValue(response)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
@@ -363,7 +363,7 @@ class TransportLayerClient(object):
|
||||
|
||||
content = yield self.client.get_json(destination=destination, path=path)
|
||||
|
||||
return content
|
||||
defer.returnValue(content)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
@@ -374,7 +374,7 @@ class TransportLayerClient(object):
|
||||
destination=destination, path=path, data=content
|
||||
)
|
||||
|
||||
return content
|
||||
defer.returnValue(content)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
@@ -406,7 +406,7 @@ class TransportLayerClient(object):
|
||||
content = yield self.client.post_json(
|
||||
destination=destination, path=path, data=query_content, timeout=timeout
|
||||
)
|
||||
return content
|
||||
defer.returnValue(content)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
@@ -430,7 +430,7 @@ class TransportLayerClient(object):
|
||||
content = yield self.client.get_json(
|
||||
destination=destination, path=path, timeout=timeout
|
||||
)
|
||||
return content
|
||||
defer.returnValue(content)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
@@ -464,7 +464,7 @@ class TransportLayerClient(object):
|
||||
content = yield self.client.post_json(
|
||||
destination=destination, path=path, data=query_content, timeout=timeout
|
||||
)
|
||||
return content
|
||||
defer.returnValue(content)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
@@ -492,7 +492,7 @@ class TransportLayerClient(object):
|
||||
timeout=timeout,
|
||||
)
|
||||
|
||||
return content
|
||||
defer.returnValue(content)
|
||||
|
||||
@log_function
|
||||
def get_group_profile(self, destination, group_id, requester_user_id):
|
||||
|
||||
@@ -157,7 +157,7 @@ class GroupAttestionRenewer(object):
|
||||
|
||||
yield self.store.update_remote_attestion(group_id, user_id, attestation)
|
||||
|
||||
return {}
|
||||
defer.returnValue({})
|
||||
|
||||
def _start_renew_attestations(self):
|
||||
return run_as_background_process("renew_attestations", self._renew_attestations)
|
||||
|
||||
@@ -85,7 +85,7 @@ class GroupsServerHandler(object):
|
||||
if not is_admin:
|
||||
raise SynapseError(403, "User is not admin in group")
|
||||
|
||||
return group
|
||||
defer.returnValue(group)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_group_summary(self, group_id, requester_user_id):
|
||||
@@ -151,20 +151,22 @@ class GroupsServerHandler(object):
|
||||
group_id, requester_user_id
|
||||
)
|
||||
|
||||
return {
|
||||
"profile": profile,
|
||||
"users_section": {
|
||||
"users": users,
|
||||
"roles": roles,
|
||||
"total_user_count_estimate": 0, # TODO
|
||||
},
|
||||
"rooms_section": {
|
||||
"rooms": rooms,
|
||||
"categories": categories,
|
||||
"total_room_count_estimate": 0, # TODO
|
||||
},
|
||||
"user": membership_info,
|
||||
}
|
||||
defer.returnValue(
|
||||
{
|
||||
"profile": profile,
|
||||
"users_section": {
|
||||
"users": users,
|
||||
"roles": roles,
|
||||
"total_user_count_estimate": 0, # TODO
|
||||
},
|
||||
"rooms_section": {
|
||||
"rooms": rooms,
|
||||
"categories": categories,
|
||||
"total_room_count_estimate": 0, # TODO
|
||||
},
|
||||
"user": membership_info,
|
||||
}
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def update_group_summary_room(
|
||||
@@ -190,7 +192,7 @@ class GroupsServerHandler(object):
|
||||
is_public=is_public,
|
||||
)
|
||||
|
||||
return {}
|
||||
defer.returnValue({})
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def delete_group_summary_room(
|
||||
@@ -206,7 +208,7 @@ class GroupsServerHandler(object):
|
||||
group_id=group_id, room_id=room_id, category_id=category_id
|
||||
)
|
||||
|
||||
return {}
|
||||
defer.returnValue({})
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def set_group_join_policy(self, group_id, requester_user_id, content):
|
||||
@@ -226,7 +228,7 @@ class GroupsServerHandler(object):
|
||||
|
||||
yield self.store.set_group_join_policy(group_id, join_policy=join_policy)
|
||||
|
||||
return {}
|
||||
defer.returnValue({})
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_group_categories(self, group_id, requester_user_id):
|
||||
@@ -235,7 +237,7 @@ class GroupsServerHandler(object):
|
||||
yield self.check_group_is_ours(group_id, requester_user_id, and_exists=True)
|
||||
|
||||
categories = yield self.store.get_group_categories(group_id=group_id)
|
||||
return {"categories": categories}
|
||||
defer.returnValue({"categories": categories})
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_group_category(self, group_id, requester_user_id, category_id):
|
||||
@@ -247,7 +249,7 @@ class GroupsServerHandler(object):
|
||||
group_id=group_id, category_id=category_id
|
||||
)
|
||||
|
||||
return res
|
||||
defer.returnValue(res)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def update_group_category(self, group_id, requester_user_id, category_id, content):
|
||||
@@ -267,7 +269,7 @@ class GroupsServerHandler(object):
|
||||
profile=profile,
|
||||
)
|
||||
|
||||
return {}
|
||||
defer.returnValue({})
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def delete_group_category(self, group_id, requester_user_id, category_id):
|
||||
@@ -281,7 +283,7 @@ class GroupsServerHandler(object):
|
||||
group_id=group_id, category_id=category_id
|
||||
)
|
||||
|
||||
return {}
|
||||
defer.returnValue({})
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_group_roles(self, group_id, requester_user_id):
|
||||
@@ -290,7 +292,7 @@ class GroupsServerHandler(object):
|
||||
yield self.check_group_is_ours(group_id, requester_user_id, and_exists=True)
|
||||
|
||||
roles = yield self.store.get_group_roles(group_id=group_id)
|
||||
return {"roles": roles}
|
||||
defer.returnValue({"roles": roles})
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_group_role(self, group_id, requester_user_id, role_id):
|
||||
@@ -299,7 +301,7 @@ class GroupsServerHandler(object):
|
||||
yield self.check_group_is_ours(group_id, requester_user_id, and_exists=True)
|
||||
|
||||
res = yield self.store.get_group_role(group_id=group_id, role_id=role_id)
|
||||
return res
|
||||
defer.returnValue(res)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def update_group_role(self, group_id, requester_user_id, role_id, content):
|
||||
@@ -317,7 +319,7 @@ class GroupsServerHandler(object):
|
||||
group_id=group_id, role_id=role_id, is_public=is_public, profile=profile
|
||||
)
|
||||
|
||||
return {}
|
||||
defer.returnValue({})
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def delete_group_role(self, group_id, requester_user_id, role_id):
|
||||
@@ -329,7 +331,7 @@ class GroupsServerHandler(object):
|
||||
|
||||
yield self.store.remove_group_role(group_id=group_id, role_id=role_id)
|
||||
|
||||
return {}
|
||||
defer.returnValue({})
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def update_group_summary_user(
|
||||
@@ -353,7 +355,7 @@ class GroupsServerHandler(object):
|
||||
is_public=is_public,
|
||||
)
|
||||
|
||||
return {}
|
||||
defer.returnValue({})
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def delete_group_summary_user(self, group_id, requester_user_id, user_id, role_id):
|
||||
@@ -367,7 +369,7 @@ class GroupsServerHandler(object):
|
||||
group_id=group_id, user_id=user_id, role_id=role_id
|
||||
)
|
||||
|
||||
return {}
|
||||
defer.returnValue({})
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_group_profile(self, group_id, requester_user_id):
|
||||
@@ -389,7 +391,7 @@ class GroupsServerHandler(object):
|
||||
group_description = {key: group[key] for key in cols}
|
||||
group_description["is_openly_joinable"] = group["join_policy"] == "open"
|
||||
|
||||
return group_description
|
||||
defer.returnValue(group_description)
|
||||
else:
|
||||
raise SynapseError(404, "Unknown group")
|
||||
|
||||
@@ -459,7 +461,9 @@ class GroupsServerHandler(object):
|
||||
|
||||
# TODO: If admin add lists of users whose attestations have timed out
|
||||
|
||||
return {"chunk": chunk, "total_user_count_estimate": len(user_results)}
|
||||
defer.returnValue(
|
||||
{"chunk": chunk, "total_user_count_estimate": len(user_results)}
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_invited_users_in_group(self, group_id, requester_user_id):
|
||||
@@ -490,7 +494,9 @@ class GroupsServerHandler(object):
|
||||
logger.warn("Error getting profile for %s: %s", user_id, e)
|
||||
user_profiles.append(user_profile)
|
||||
|
||||
return {"chunk": user_profiles, "total_user_count_estimate": len(invited_users)}
|
||||
defer.returnValue(
|
||||
{"chunk": user_profiles, "total_user_count_estimate": len(invited_users)}
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_rooms_in_group(self, group_id, requester_user_id):
|
||||
@@ -527,7 +533,9 @@ class GroupsServerHandler(object):
|
||||
|
||||
chunk.sort(key=lambda e: -e["num_joined_members"])
|
||||
|
||||
return {"chunk": chunk, "total_room_count_estimate": len(room_results)}
|
||||
defer.returnValue(
|
||||
{"chunk": chunk, "total_room_count_estimate": len(room_results)}
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def add_room_to_group(self, group_id, requester_user_id, room_id, content):
|
||||
@@ -543,7 +551,7 @@ class GroupsServerHandler(object):
|
||||
|
||||
yield self.store.add_room_to_group(group_id, room_id, is_public=is_public)
|
||||
|
||||
return {}
|
||||
defer.returnValue({})
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def update_room_in_group(
|
||||
@@ -566,7 +574,7 @@ class GroupsServerHandler(object):
|
||||
else:
|
||||
raise SynapseError(400, "Uknown config option")
|
||||
|
||||
return {}
|
||||
defer.returnValue({})
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def remove_room_from_group(self, group_id, requester_user_id, room_id):
|
||||
@@ -578,7 +586,7 @@ class GroupsServerHandler(object):
|
||||
|
||||
yield self.store.remove_room_from_group(group_id, room_id)
|
||||
|
||||
return {}
|
||||
defer.returnValue({})
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def invite_to_group(self, group_id, user_id, requester_user_id, content):
|
||||
@@ -636,9 +644,9 @@ class GroupsServerHandler(object):
|
||||
)
|
||||
elif res["state"] == "invite":
|
||||
yield self.store.add_group_invite(group_id, user_id)
|
||||
return {"state": "invite"}
|
||||
defer.returnValue({"state": "invite"})
|
||||
elif res["state"] == "reject":
|
||||
return {"state": "reject"}
|
||||
defer.returnValue({"state": "reject"})
|
||||
else:
|
||||
raise SynapseError(502, "Unknown state returned by HS")
|
||||
|
||||
@@ -671,7 +679,7 @@ class GroupsServerHandler(object):
|
||||
remote_attestation=remote_attestation,
|
||||
)
|
||||
|
||||
return local_attestation
|
||||
defer.returnValue(local_attestation)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def accept_invite(self, group_id, requester_user_id, content):
|
||||
@@ -691,7 +699,7 @@ class GroupsServerHandler(object):
|
||||
|
||||
local_attestation = yield self._add_user(group_id, requester_user_id, content)
|
||||
|
||||
return {"state": "join", "attestation": local_attestation}
|
||||
defer.returnValue({"state": "join", "attestation": local_attestation})
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def join_group(self, group_id, requester_user_id, content):
|
||||
@@ -708,7 +716,7 @@ class GroupsServerHandler(object):
|
||||
|
||||
local_attestation = yield self._add_user(group_id, requester_user_id, content)
|
||||
|
||||
return {"state": "join", "attestation": local_attestation}
|
||||
defer.returnValue({"state": "join", "attestation": local_attestation})
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def knock(self, group_id, requester_user_id, content):
|
||||
@@ -761,7 +769,7 @@ class GroupsServerHandler(object):
|
||||
if not self.hs.is_mine_id(user_id):
|
||||
yield self.store.maybe_delete_remote_profile_cache(user_id)
|
||||
|
||||
return {}
|
||||
defer.returnValue({})
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def create_group(self, group_id, requester_user_id, content):
|
||||
@@ -837,7 +845,7 @@ class GroupsServerHandler(object):
|
||||
avatar_url=user_profile.get("avatar_url"),
|
||||
)
|
||||
|
||||
return {"group_id": group_id}
|
||||
defer.returnValue({"group_id": group_id})
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def delete_group(self, group_id, requester_user_id):
|
||||
|
||||
@@ -51,8 +51,8 @@ class AccountDataEventSource(object):
|
||||
{"type": account_data_type, "content": content, "room_id": room_id}
|
||||
)
|
||||
|
||||
return (results, current_stream_id)
|
||||
defer.returnValue((results, current_stream_id))
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_pagination_rows(self, user, config, key):
|
||||
return ([], config.to_id)
|
||||
defer.returnValue(([], config.to_id))
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user