Compare commits
134 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
44848d6b77 | ||
|
|
c891774a0e | ||
|
|
a36d1ec5ae | ||
|
|
ad8e2cc36c | ||
|
|
585ae3884c | ||
|
|
12d130738f | ||
|
|
e2cd6b54f7 | ||
|
|
8832e048bc | ||
|
|
83d2f62263 | ||
|
|
2978ae1b16 | ||
|
|
df647d64c3 | ||
|
|
6783198b5f | ||
|
|
006576fc1c | ||
|
|
84fad5ecf9 | ||
|
|
2a3b35885f | ||
|
|
bfc08191a6 | ||
|
|
f70c7aa89a | ||
|
|
1de1225767 | ||
|
|
5be9e61c1e | ||
|
|
0d754ef4cf | ||
|
|
d9bd181a3f | ||
|
|
3efd98aa1c | ||
|
|
c1dbe84c3d | ||
|
|
1d5f0e3529 | ||
|
|
1fc97ee876 | ||
|
|
a7044e5c0f | ||
|
|
3efde8b69a | ||
|
|
e300ef64b1 | ||
|
|
0b3112123d | ||
|
|
f946450184 | ||
|
|
abc814dcbf | ||
|
|
0277b8f3e6 | ||
|
|
48a1f4db31 | ||
|
|
2ca4e349e9 | ||
|
|
64f4f506c5 | ||
|
|
9e167d9c53 | ||
|
|
24c58ebfc9 | ||
|
|
88b9414e32 | ||
|
|
be0e722fe1 | ||
|
|
3a569fb200 | ||
|
|
77e56deffc | ||
|
|
04ff88139a | ||
|
|
9278eb701e | ||
|
|
3ada9b4264 | ||
|
|
abade34633 | ||
|
|
906065c75b | ||
|
|
5edd91caec | ||
|
|
cb657eb2f8 | ||
|
|
452991527a | ||
|
|
48d44ab142 | ||
|
|
0d87c6bd12 | ||
|
|
04819239ba | ||
|
|
44bb881096 | ||
|
|
024f121b74 | ||
|
|
0ef321ff3b | ||
|
|
5688a74cf3 | ||
|
|
1d8863c67d | ||
|
|
a888cbdd31 | ||
|
|
fc8695d621 | ||
|
|
d959d28730 | ||
|
|
e7b769aea1 | ||
|
|
e2b8a90897 | ||
|
|
4609e58970 | ||
|
|
33548f37aa | ||
|
|
bb0fe02a52 | ||
|
|
35c5ef2d24 | ||
|
|
e32294f54b | ||
|
|
5fe38e07e7 | ||
|
|
5ff8eb97c6 | ||
|
|
670564446c | ||
|
|
ac99774dac | ||
|
|
4dabcf026e | ||
|
|
f02663c4dd | ||
|
|
963f4309fe | ||
|
|
3a446c21f8 | ||
|
|
78e48f61bf | ||
|
|
f380bb77d1 | ||
|
|
01dd90b0f0 | ||
|
|
7dcf3fd221 | ||
|
|
da75d2ea1f | ||
|
|
4bbd535450 | ||
|
|
5fdff97719 | ||
|
|
fc53a606e4 | ||
|
|
ad8690a26c | ||
|
|
0a778c135f | ||
|
|
7c8402ddb8 | ||
|
|
b5efcb577e | ||
|
|
019010964d | ||
|
|
262ed05f5b | ||
|
|
548c4a6587 | ||
|
|
c6f8e8086c | ||
|
|
12d6184713 | ||
|
|
d7d4232a2d | ||
|
|
d4c4798a25 | ||
|
|
e5801db830 | ||
|
|
fae81f2f68 | ||
|
|
c602ba8336 | ||
|
|
c2d4bd62a2 | ||
|
|
4c3827f2c1 | ||
|
|
c73cc2c2ad | ||
|
|
4655d2221e | ||
|
|
83de0be4b0 | ||
|
|
af387cf52a | ||
|
|
7e8dc9934e | ||
|
|
e550ab17ad | ||
|
|
0caf2a338e | ||
|
|
4ecba9bd5c | ||
|
|
b7748d3c00 | ||
|
|
5b268997bd | ||
|
|
4612302399 | ||
|
|
d66f9070cd | ||
|
|
d600d4506b | ||
|
|
e09838c78f | ||
|
|
b6ed4f55ac | ||
|
|
592d6305fd | ||
|
|
0b56481caa | ||
|
|
066068f034 | ||
|
|
0e35584734 | ||
|
|
201178db1a | ||
|
|
9b0e3009fa | ||
|
|
004234f03a | ||
|
|
066c703729 | ||
|
|
8dd2ea65a9 | ||
|
|
dd71eb0f8a | ||
|
|
405aeb0b2c | ||
|
|
7b06f85c0e | ||
|
|
cc324d53fe | ||
|
|
73dbce5523 | ||
|
|
ad721fc559 | ||
|
|
567f88f835 | ||
|
|
b449af0379 | ||
|
|
27d2820c33 | ||
|
|
dd5e5dc1d6 | ||
|
|
8000cf1315 |
@@ -1,16 +1,16 @@
|
||||
#!/bin/bash
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# this script is run by buildkite in a plain `xenial` container; it installs the
|
||||
# minimal requirements for tox and hands over to the py35-old tox environment.
|
||||
# this script is run by buildkite in a plain `bionic` container; it installs the
|
||||
# minimal requirements for tox and hands over to the py3-old tox environment.
|
||||
|
||||
set -ex
|
||||
|
||||
apt-get update
|
||||
apt-get install -y python3.5 python3.5-dev python3-pip libxml2-dev libxslt-dev xmlsec1 zlib1g-dev tox
|
||||
apt-get install -y python3 python3-dev python3-pip libxml2-dev libxslt-dev xmlsec1 zlib1g-dev tox
|
||||
|
||||
export LANG="C.UTF-8"
|
||||
|
||||
# Prevent virtualenv from auto-updating pip to an incompatible version
|
||||
export VIRTUALENV_NO_DOWNLOAD=1
|
||||
|
||||
exec tox -e py35-old,combine
|
||||
exec tox -e py3-old,combine
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#!/bin/bash
|
||||
#!/usr/bin/env bash
|
||||
#
|
||||
# Test script for 'synapse_port_db', which creates a virtualenv, installs Synapse along
|
||||
# with additional dependencies needed for the test (such as coverage or the PostgreSQL
|
||||
|
||||
322
.github/workflows/tests.yml
vendored
Normal file
322
.github/workflows/tests.yml
vendored
Normal file
@@ -0,0 +1,322 @@
|
||||
name: Tests
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: ["develop", "release-*"]
|
||||
pull_request:
|
||||
|
||||
jobs:
|
||||
lint:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
toxenv:
|
||||
- "check-sampleconfig"
|
||||
- "check_codestyle"
|
||||
- "check_isort"
|
||||
- "mypy"
|
||||
- "packaging"
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-python@v2
|
||||
- run: pip install tox
|
||||
- run: tox -e ${{ matrix.toxenv }}
|
||||
|
||||
lint-crlf:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Check line endings
|
||||
run: scripts-dev/check_line_terminators.sh
|
||||
|
||||
lint-newsfile:
|
||||
if: ${{ github.base_ref == 'develop' || contains(github.base_ref, 'release-') }}
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-python@v2
|
||||
- run: pip install tox
|
||||
- name: Patch Buildkite-specific test script
|
||||
run: |
|
||||
sed -i -e 's/\$BUILDKITE_PULL_REQUEST/${{ github.event.number }}/' \
|
||||
scripts-dev/check-newsfragment
|
||||
- run: scripts-dev/check-newsfragment
|
||||
|
||||
lint-sdist:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: "3.x"
|
||||
- run: pip install wheel
|
||||
- run: python setup.py sdist bdist_wheel
|
||||
- uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: Python Distributions
|
||||
path: dist/*
|
||||
|
||||
# Dummy step to gate other tests on without repeating the whole list
|
||||
linting-done:
|
||||
if: ${{ always() }} # Run this even if prior jobs were skipped
|
||||
needs: [lint, lint-crlf, lint-newsfile, lint-sdist]
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- run: "true"
|
||||
|
||||
trial:
|
||||
if: ${{ !failure() }} # Allow previous steps to be skipped, but not fail
|
||||
needs: linting-done
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
python-version: ["3.6", "3.7", "3.8", "3.9"]
|
||||
database: ["sqlite"]
|
||||
include:
|
||||
# Newest Python without optional deps
|
||||
- python-version: "3.9"
|
||||
toxenv: "py-noextras,combine"
|
||||
|
||||
# Oldest Python with PostgreSQL
|
||||
- python-version: "3.6"
|
||||
database: "postgres"
|
||||
postgres-version: "9.6"
|
||||
|
||||
# Newest Python with PostgreSQL
|
||||
- python-version: "3.9"
|
||||
database: "postgres"
|
||||
postgres-version: "13"
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- run: sudo apt-get -qq install xmlsec1
|
||||
- name: Set up PostgreSQL ${{ matrix.postgres-version }}
|
||||
if: ${{ matrix.postgres-version }}
|
||||
run: |
|
||||
docker run -d -p 5432:5432 \
|
||||
-e POSTGRES_PASSWORD=postgres \
|
||||
-e POSTGRES_INITDB_ARGS="--lc-collate C --lc-ctype C --encoding UTF8" \
|
||||
postgres:${{ matrix.postgres-version }}
|
||||
- uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
- run: pip install tox
|
||||
- name: Await PostgreSQL
|
||||
if: ${{ matrix.postgres-version }}
|
||||
timeout-minutes: 2
|
||||
run: until pg_isready -h localhost; do sleep 1; done
|
||||
- run: tox -e py,combine
|
||||
env:
|
||||
TRIAL_FLAGS: "--jobs=2"
|
||||
SYNAPSE_POSTGRES: ${{ matrix.database == 'postgres' || '' }}
|
||||
SYNAPSE_POSTGRES_HOST: localhost
|
||||
SYNAPSE_POSTGRES_USER: postgres
|
||||
SYNAPSE_POSTGRES_PASSWORD: postgres
|
||||
- name: Dump logs
|
||||
# Note: Dumps to workflow logs instead of using actions/upload-artifact
|
||||
# This keeps logs colocated with failing jobs
|
||||
# It also ignores find's exit code; this is a best effort affair
|
||||
run: >-
|
||||
find _trial_temp -name '*.log'
|
||||
-exec echo "::group::{}" \;
|
||||
-exec cat {} \;
|
||||
-exec echo "::endgroup::" \;
|
||||
|| true
|
||||
|
||||
trial-olddeps:
|
||||
if: ${{ !failure() }} # Allow previous steps to be skipped, but not fail
|
||||
needs: linting-done
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Test with old deps
|
||||
uses: docker://ubuntu:bionic # For old python and sqlite
|
||||
with:
|
||||
workdir: /github/workspace
|
||||
entrypoint: .buildkite/scripts/test_old_deps.sh
|
||||
env:
|
||||
TRIAL_FLAGS: "--jobs=2"
|
||||
- name: Dump logs
|
||||
# Note: Dumps to workflow logs instead of using actions/upload-artifact
|
||||
# This keeps logs colocated with failing jobs
|
||||
# It also ignores find's exit code; this is a best effort affair
|
||||
run: >-
|
||||
find _trial_temp -name '*.log'
|
||||
-exec echo "::group::{}" \;
|
||||
-exec cat {} \;
|
||||
-exec echo "::endgroup::" \;
|
||||
|| true
|
||||
|
||||
trial-pypy:
|
||||
# Very slow; only run if the branch name includes 'pypy'
|
||||
if: ${{ contains(github.ref, 'pypy') && !failure() }}
|
||||
needs: linting-done
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
python-version: ["pypy-3.6"]
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- run: sudo apt-get -qq install xmlsec1 libxml2-dev libxslt-dev
|
||||
- uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
- run: pip install tox
|
||||
- run: tox -e py,combine
|
||||
env:
|
||||
TRIAL_FLAGS: "--jobs=2"
|
||||
- name: Dump logs
|
||||
# Note: Dumps to workflow logs instead of using actions/upload-artifact
|
||||
# This keeps logs colocated with failing jobs
|
||||
# It also ignores find's exit code; this is a best effort affair
|
||||
run: >-
|
||||
find _trial_temp -name '*.log'
|
||||
-exec echo "::group::{}" \;
|
||||
-exec cat {} \;
|
||||
-exec echo "::endgroup::" \;
|
||||
|| true
|
||||
|
||||
sytest:
|
||||
if: ${{ !failure() }}
|
||||
needs: linting-done
|
||||
runs-on: ubuntu-latest
|
||||
container:
|
||||
image: matrixdotorg/sytest-synapse:${{ matrix.sytest-tag }}
|
||||
volumes:
|
||||
- ${{ github.workspace }}:/src
|
||||
env:
|
||||
BUILDKITE_BRANCH: ${{ github.head_ref }}
|
||||
POSTGRES: ${{ matrix.postgres && 1}}
|
||||
MULTI_POSTGRES: ${{ (matrix.postgres == 'multi-postgres') && 1}}
|
||||
WORKERS: ${{ matrix.workers && 1 }}
|
||||
REDIS: ${{ matrix.redis && 1 }}
|
||||
BLACKLIST: ${{ matrix.workers && 'synapse-blacklist-with-workers' }}
|
||||
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- sytest-tag: bionic
|
||||
|
||||
- sytest-tag: bionic
|
||||
postgres: postgres
|
||||
|
||||
- sytest-tag: testing
|
||||
postgres: postgres
|
||||
|
||||
- sytest-tag: bionic
|
||||
postgres: multi-postgres
|
||||
workers: workers
|
||||
|
||||
- sytest-tag: buster
|
||||
postgres: multi-postgres
|
||||
workers: workers
|
||||
|
||||
- sytest-tag: buster
|
||||
postgres: postgres
|
||||
workers: workers
|
||||
redis: redis
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Prepare test blacklist
|
||||
run: cat sytest-blacklist .buildkite/worker-blacklist > synapse-blacklist-with-workers
|
||||
- name: Run SyTest
|
||||
run: /bootstrap.sh synapse
|
||||
working-directory: /src
|
||||
- name: Dump results.tap
|
||||
if: ${{ always() }}
|
||||
run: cat /logs/results.tap
|
||||
- name: Upload SyTest logs
|
||||
uses: actions/upload-artifact@v2
|
||||
if: ${{ always() }}
|
||||
with:
|
||||
name: Sytest Logs - ${{ job.status }} - (${{ join(matrix.*, ', ') }})
|
||||
path: |
|
||||
/logs/results.tap
|
||||
/logs/**/*.log*
|
||||
|
||||
portdb:
|
||||
if: ${{ !failure() }} # Allow previous steps to be skipped, but not fail
|
||||
needs: linting-done
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
include:
|
||||
- python-version: "3.6"
|
||||
postgres-version: "9.6"
|
||||
|
||||
- python-version: "3.9"
|
||||
postgres-version: "13"
|
||||
|
||||
services:
|
||||
postgres:
|
||||
image: postgres:${{ matrix.postgres-version }}
|
||||
ports:
|
||||
- 5432:5432
|
||||
env:
|
||||
POSTGRES_PASSWORD: "postgres"
|
||||
POSTGRES_INITDB_ARGS: "--lc-collate C --lc-ctype C --encoding UTF8"
|
||||
options: >-
|
||||
--health-cmd pg_isready
|
||||
--health-interval 10s
|
||||
--health-timeout 5s
|
||||
--health-retries 5
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- run: sudo apt-get -qq install xmlsec1
|
||||
- uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
- name: Patch Buildkite-specific test scripts
|
||||
run: |
|
||||
sed -i -e 's/host="postgres"/host="localhost"/' .buildkite/scripts/create_postgres_db.py
|
||||
sed -i -e 's/host: postgres/host: localhost/' .buildkite/postgres-config.yaml
|
||||
sed -i -e 's|/src/||' .buildkite/{sqlite,postgres}-config.yaml
|
||||
sed -i -e 's/\$TOP/\$GITHUB_WORKSPACE/' .coveragerc
|
||||
- run: .buildkite/scripts/test_synapse_port_db.sh
|
||||
|
||||
complement:
|
||||
if: ${{ !failure() }}
|
||||
needs: linting-done
|
||||
runs-on: ubuntu-latest
|
||||
container:
|
||||
# https://github.com/matrix-org/complement/blob/master/dockerfiles/ComplementCIBuildkite.Dockerfile
|
||||
image: matrixdotorg/complement:latest
|
||||
env:
|
||||
CI: true
|
||||
ports:
|
||||
- 8448:8448
|
||||
volumes:
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
|
||||
steps:
|
||||
- name: Run actions/checkout@v2 for synapse
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
path: synapse
|
||||
|
||||
- name: Run actions/checkout@v2 for complement
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
repository: "matrix-org/complement"
|
||||
path: complement
|
||||
|
||||
# Build initial Synapse image
|
||||
- run: docker build -t matrixdotorg/synapse:latest -f docker/Dockerfile .
|
||||
working-directory: synapse
|
||||
|
||||
# Build a ready-to-run Synapse image based on the initial image above.
|
||||
# This new image includes a config file, keys for signing and TLS, and
|
||||
# other settings to make it suitable for testing under Complement.
|
||||
- run: docker build -t complement-synapse -f Synapse.Dockerfile .
|
||||
working-directory: complement/dockerfiles
|
||||
|
||||
# Run Complement
|
||||
- run: go test -v -tags synapse_blacklist ./tests
|
||||
env:
|
||||
COMPLEMENT_BASE_IMAGE: complement-synapse:latest
|
||||
working-directory: complement
|
||||
187
CHANGES.md
187
CHANGES.md
@@ -1,3 +1,190 @@
|
||||
Synapse 1.32.0rc1 (2021-04-13)
|
||||
==============================
|
||||
|
||||
**Note:** This release requires Python 3.6+ and Postgres 9.6+ or SQLite 3.22+.
|
||||
|
||||
This release removes the deprecated `GET /_synapse/admin/v1/users/<user_id>` admin API. Please use the [v2 API](https://github.com/matrix-org/synapse/blob/develop/docs/admin_api/user_admin_api.rst#query-user-account) instead, which has improved capabilities.
|
||||
|
||||
This release requires Application Services to use type `m.login.application_services` when registering users via the `/_matrix/client/r0/register` endpoint to comply with the spec. Please ensure your Application Services are up to date.
|
||||
|
||||
Features
|
||||
--------
|
||||
|
||||
- Add a Synapse module for routing presence updates between users. ([\#9491](https://github.com/matrix-org/synapse/issues/9491))
|
||||
- Add an admin API to manage ratelimit for a specific user. ([\#9648](https://github.com/matrix-org/synapse/issues/9648))
|
||||
- Include request information in structured logging output. ([\#9654](https://github.com/matrix-org/synapse/issues/9654))
|
||||
- Add `order_by` to the admin API `GET /_synapse/admin/v2/users`. Contributed by @dklimpel. ([\#9691](https://github.com/matrix-org/synapse/issues/9691))
|
||||
- Replace the `room_invite_state_types` configuration setting with `room_prejoin_state`. ([\#9700](https://github.com/matrix-org/synapse/issues/9700))
|
||||
- Add experimental support for [MSC3083](https://github.com/matrix-org/matrix-doc/pull/3083): restricting room access via group membership. ([\#9717](https://github.com/matrix-org/synapse/issues/9717), [\#9735](https://github.com/matrix-org/synapse/issues/9735))
|
||||
- Update experimental support for Spaces: include `m.room.create` in the room state sent with room-invites. ([\#9710](https://github.com/matrix-org/synapse/issues/9710))
|
||||
- Synapse now requires Python 3.6 or later. It also requires Postgres 9.6 or later or SQLite 3.22 or later. ([\#9766](https://github.com/matrix-org/synapse/issues/9766))
|
||||
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Prevent `synapse_forward_extremities` and `synapse_excess_extremity_events` Prometheus metrics from initially reporting zero-values after startup. ([\#8926](https://github.com/matrix-org/synapse/issues/8926))
|
||||
- Fix recently added ratelimits to correctly honour the application service `rate_limited` flag. ([\#9711](https://github.com/matrix-org/synapse/issues/9711))
|
||||
- Fix longstanding bug which caused `duplicate key value violates unique constraint "remote_media_cache_thumbnails_media_origin_media_id_thumbna_key"` errors. ([\#9725](https://github.com/matrix-org/synapse/issues/9725))
|
||||
- Fix bug where sharded federation senders could get stuck repeatedly querying the DB in a loop, using lots of CPU. ([\#9770](https://github.com/matrix-org/synapse/issues/9770))
|
||||
- Fix duplicate logging of exceptions thrown during federation transaction processing. ([\#9780](https://github.com/matrix-org/synapse/issues/9780))
|
||||
|
||||
|
||||
Updates to the Docker image
|
||||
---------------------------
|
||||
|
||||
- Move opencontainers labels to the final Docker image such that users can inspect them. ([\#9765](https://github.com/matrix-org/synapse/issues/9765))
|
||||
|
||||
|
||||
Improved Documentation
|
||||
----------------------
|
||||
|
||||
- Make the `allowed_local_3pids` regex example in the sample config stricter. ([\#9719](https://github.com/matrix-org/synapse/issues/9719))
|
||||
|
||||
|
||||
Deprecations and Removals
|
||||
-------------------------
|
||||
|
||||
- Remove old admin API `GET /_synapse/admin/v1/users/<user_id>`. ([\#9401](https://github.com/matrix-org/synapse/issues/9401))
|
||||
- Make `/_matrix/client/r0/register` expect a type of `m.login.application_service` when an Application Service registers a user, to align with [the relevant spec](https://spec.matrix.org/unstable/application-service-api/#server-admin-style-permissions). ([\#9548](https://github.com/matrix-org/synapse/issues/9548))
|
||||
|
||||
|
||||
Internal Changes
|
||||
----------------
|
||||
|
||||
- Replace deprecated `imp` module with successor `importlib`. Contributed by Cristina Muñoz. ([\#9718](https://github.com/matrix-org/synapse/issues/9718))
|
||||
- Experiment with GitHub Actions for CI. ([\#9661](https://github.com/matrix-org/synapse/issues/9661))
|
||||
- Introduce flake8-bugbear to the test suite and fix some of its lint violations. ([\#9682](https://github.com/matrix-org/synapse/issues/9682))
|
||||
- Update `scripts-dev/complement.sh` to use a local checkout of Complement, allow running a subset of tests and have it use Synapse's Complement test blacklist. ([\#9685](https://github.com/matrix-org/synapse/issues/9685))
|
||||
- Improve Jaeger tracing for `to_device` messages. ([\#9686](https://github.com/matrix-org/synapse/issues/9686))
|
||||
- Add release helper script for automating part of the Synapse release process. ([\#9713](https://github.com/matrix-org/synapse/issues/9713))
|
||||
- Add type hints to expiring cache. ([\#9730](https://github.com/matrix-org/synapse/issues/9730))
|
||||
- Convert various testcases to `HomeserverTestCase`. ([\#9736](https://github.com/matrix-org/synapse/issues/9736))
|
||||
- Start linting mypy with `no_implicit_optional`. ([\#9742](https://github.com/matrix-org/synapse/issues/9742))
|
||||
- Add missing type hints to federation handler and server. ([\#9743](https://github.com/matrix-org/synapse/issues/9743))
|
||||
- Check that a `ConfigError` is raised, rather than simply `Exception`, when appropriate in homeserver config file generation tests. ([\#9753](https://github.com/matrix-org/synapse/issues/9753))
|
||||
- Fix incompatibility with `tox` 2.5. ([\#9769](https://github.com/matrix-org/synapse/issues/9769))
|
||||
- Enable Complement tests for [MSC2946](https://github.com/matrix-org/matrix-doc/pull/2946): Spaces Summary API. ([\#9771](https://github.com/matrix-org/synapse/issues/9771))
|
||||
- Use mock from the standard library instead of a separate package. ([\#9772](https://github.com/matrix-org/synapse/issues/9772))
|
||||
- Update Black configuration to target Python 3.6. ([\#9781](https://github.com/matrix-org/synapse/issues/9781))
|
||||
- Add option to skip unit tests when building Debian packages. ([\#9793](https://github.com/matrix-org/synapse/issues/9793))
|
||||
|
||||
|
||||
Synapse 1.31.0 (2021-04-06)
|
||||
===========================
|
||||
|
||||
**Note:** As announced in v1.25.0, and in line with the deprecation policy for platform dependencies, this is the last release to support Python 3.5 and PostgreSQL 9.5. Future versions of Synapse will require Python 3.6+ and PostgreSQL 9.6+, as per our [deprecation policy](docs/deprecation_policy.md).
|
||||
|
||||
This is also the last release that the Synapse team will be publishing packages for Debian Stretch and Ubuntu Xenial.
|
||||
|
||||
|
||||
Improved Documentation
|
||||
----------------------
|
||||
|
||||
- Add a document describing the deprecation policy for platform dependencies. ([\#9723](https://github.com/matrix-org/synapse/issues/9723))
|
||||
|
||||
|
||||
Internal Changes
|
||||
----------------
|
||||
|
||||
- Revert using `dmypy run` in lint script. ([\#9720](https://github.com/matrix-org/synapse/issues/9720))
|
||||
- Pin flake8-bugbear's version. ([\#9734](https://github.com/matrix-org/synapse/issues/9734))
|
||||
|
||||
|
||||
Synapse 1.31.0rc1 (2021-03-30)
|
||||
==============================
|
||||
|
||||
Features
|
||||
--------
|
||||
|
||||
- Add support to OpenID Connect login for requiring attributes on the `userinfo` response. Contributed by Hubbe King. ([\#9609](https://github.com/matrix-org/synapse/issues/9609))
|
||||
- Add initial experimental support for a "space summary" API. ([\#9643](https://github.com/matrix-org/synapse/issues/9643), [\#9652](https://github.com/matrix-org/synapse/issues/9652), [\#9653](https://github.com/matrix-org/synapse/issues/9653))
|
||||
- Add support for the busy presence state as described in [MSC3026](https://github.com/matrix-org/matrix-doc/pull/3026). ([\#9644](https://github.com/matrix-org/synapse/issues/9644))
|
||||
- Add support for credentials for proxy authentication in the `HTTPS_PROXY` environment variable. ([\#9657](https://github.com/matrix-org/synapse/issues/9657))
|
||||
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Fix a longstanding bug that could cause issues when editing a reply to a message. ([\#9585](https://github.com/matrix-org/synapse/issues/9585))
|
||||
- Fix the `/capabilities` endpoint to return `m.change_password` as disabled if the local password database is not used for authentication. Contributed by @dklimpel. ([\#9588](https://github.com/matrix-org/synapse/issues/9588))
|
||||
- Check if local passwords are enabled before setting them for the user. ([\#9636](https://github.com/matrix-org/synapse/issues/9636))
|
||||
- Fix a bug where federation sending can stall due to `concurrent access` database exceptions when it falls behind. ([\#9639](https://github.com/matrix-org/synapse/issues/9639))
|
||||
- Fix a bug introduced in Synapse 1.30.1 which meant the suggested `pip` incantation to install an updated `cryptography` was incorrect. ([\#9699](https://github.com/matrix-org/synapse/issues/9699))
|
||||
|
||||
|
||||
Updates to the Docker image
|
||||
---------------------------
|
||||
|
||||
- Speed up Docker builds and make it nicer to test against Complement while developing (install all dependencies before copying the project). ([\#9610](https://github.com/matrix-org/synapse/issues/9610))
|
||||
- Include [opencontainers labels](https://github.com/opencontainers/image-spec/blob/master/annotations.md#pre-defined-annotation-keys) in the Docker image. ([\#9612](https://github.com/matrix-org/synapse/issues/9612))
|
||||
|
||||
|
||||
Improved Documentation
|
||||
----------------------
|
||||
|
||||
- Clarify that `register_new_matrix_user` is present also when installed via non-pip package. ([\#9074](https://github.com/matrix-org/synapse/issues/9074))
|
||||
- Update source install documentation to mention platform prerequisites before the source install steps. ([\#9667](https://github.com/matrix-org/synapse/issues/9667))
|
||||
- Improve worker documentation for fallback/web auth endpoints. ([\#9679](https://github.com/matrix-org/synapse/issues/9679))
|
||||
- Update the sample configuration for OIDC authentication. ([\#9695](https://github.com/matrix-org/synapse/issues/9695))
|
||||
|
||||
|
||||
Internal Changes
|
||||
----------------
|
||||
|
||||
- Preparatory steps for removing redundant `outlier` data from `event_json.internal_metadata` column. ([\#9411](https://github.com/matrix-org/synapse/issues/9411))
|
||||
- Add type hints to the caching module. ([\#9442](https://github.com/matrix-org/synapse/issues/9442))
|
||||
- Introduce flake8-bugbear to the test suite and fix some of its lint violations. ([\#9499](https://github.com/matrix-org/synapse/issues/9499), [\#9659](https://github.com/matrix-org/synapse/issues/9659))
|
||||
- Add additional type hints to the Homeserver object. ([\#9631](https://github.com/matrix-org/synapse/issues/9631), [\#9638](https://github.com/matrix-org/synapse/issues/9638), [\#9675](https://github.com/matrix-org/synapse/issues/9675), [\#9681](https://github.com/matrix-org/synapse/issues/9681))
|
||||
- Only save remote cross-signing and device keys if they're different from the current ones. ([\#9634](https://github.com/matrix-org/synapse/issues/9634))
|
||||
- Rename storage function to fix spelling and not conflict with another function's name. ([\#9637](https://github.com/matrix-org/synapse/issues/9637))
|
||||
- Improve performance of federation catch up by sending the latest events in the room to the remote, rather than just the last event sent by the local server. ([\#9640](https://github.com/matrix-org/synapse/issues/9640), [\#9664](https://github.com/matrix-org/synapse/issues/9664))
|
||||
- In the `federation_client` commandline client, stop automatically adding the URL prefix, so that servlets on other prefixes can be tested. ([\#9645](https://github.com/matrix-org/synapse/issues/9645))
|
||||
- In the `federation_client` commandline client, handle inline `signing_key`s in `homeserver.yaml`. ([\#9647](https://github.com/matrix-org/synapse/issues/9647))
|
||||
- Fixed some antipattern issues to improve code quality. ([\#9649](https://github.com/matrix-org/synapse/issues/9649))
|
||||
- Add a storage method for pulling all current user presence state from the database. ([\#9650](https://github.com/matrix-org/synapse/issues/9650))
|
||||
- Import `HomeServer` from the proper module. ([\#9665](https://github.com/matrix-org/synapse/issues/9665))
|
||||
- Increase default join ratelimiting burst rate. ([\#9674](https://github.com/matrix-org/synapse/issues/9674))
|
||||
- Add type hints to third party event rules and visibility modules. ([\#9676](https://github.com/matrix-org/synapse/issues/9676))
|
||||
- Bump mypy-zope to 0.2.13 to fix "Cannot determine consistent method resolution order (MRO)" errors when running mypy a second time. ([\#9678](https://github.com/matrix-org/synapse/issues/9678))
|
||||
- Use interpreter from `$PATH` via `/usr/bin/env` instead of absolute paths in various scripts. ([\#9689](https://github.com/matrix-org/synapse/issues/9689))
|
||||
- Make it possible to use `dmypy`. ([\#9692](https://github.com/matrix-org/synapse/issues/9692))
|
||||
- Suppress "CryptographyDeprecationWarning: int_from_bytes is deprecated". ([\#9698](https://github.com/matrix-org/synapse/issues/9698))
|
||||
- Use `dmypy run` in lint script for improved performance in type-checking while developing. ([\#9701](https://github.com/matrix-org/synapse/issues/9701))
|
||||
- Fix undetected mypy error when using Python 3.6. ([\#9703](https://github.com/matrix-org/synapse/issues/9703))
|
||||
- Fix type-checking CI on develop. ([\#9709](https://github.com/matrix-org/synapse/issues/9709))
|
||||
|
||||
|
||||
Synapse 1.30.1 (2021-03-26)
|
||||
===========================
|
||||
|
||||
This release is identical to Synapse 1.30.0, with the exception of explicitly
|
||||
setting a minimum version of Python's Cryptography library to ensure that users
|
||||
of Synapse are protected from the recent [OpenSSL security advisories](https://mta.openssl.org/pipermail/openssl-announce/2021-March/000198.html),
|
||||
especially CVE-2021-3449.
|
||||
|
||||
Note that Cryptography defaults to bundling its own statically linked copy of
|
||||
OpenSSL, which means that you may not be protected by your operating system's
|
||||
security updates.
|
||||
|
||||
It's also worth noting that Cryptography no longer supports Python 3.5, so
|
||||
admins deploying to older environments may not be protected against this or
|
||||
future vulnerabilities. Synapse will be dropping support for Python 3.5 at the
|
||||
end of March.
|
||||
|
||||
|
||||
Updates to the Docker image
|
||||
---------------------------
|
||||
|
||||
- Ensure that the docker container has up to date versions of openssl. ([\#9697](https://github.com/matrix-org/synapse/issues/9697))
|
||||
|
||||
|
||||
Internal Changes
|
||||
----------------
|
||||
|
||||
- Enforce that `cryptography` dependency is up to date to ensure it has the most recent openssl patches. ([\#9697](https://github.com/matrix-org/synapse/issues/9697))
|
||||
|
||||
|
||||
Synapse 1.30.0 (2021-03-22)
|
||||
===========================
|
||||
|
||||
|
||||
38
INSTALL.md
38
INSTALL.md
@@ -6,7 +6,7 @@ There are 3 steps to follow under **Installation Instructions**.
|
||||
- [Choosing your server name](#choosing-your-server-name)
|
||||
- [Installing Synapse](#installing-synapse)
|
||||
- [Installing from source](#installing-from-source)
|
||||
- [Platform-Specific Instructions](#platform-specific-instructions)
|
||||
- [Platform-specific prerequisites](#platform-specific-prerequisites)
|
||||
- [Debian/Ubuntu/Raspbian](#debianubunturaspbian)
|
||||
- [ArchLinux](#archlinux)
|
||||
- [CentOS/Fedora](#centosfedora)
|
||||
@@ -38,6 +38,7 @@ There are 3 steps to follow under **Installation Instructions**.
|
||||
- [URL previews](#url-previews)
|
||||
- [Troubleshooting Installation](#troubleshooting-installation)
|
||||
|
||||
|
||||
## Choosing your server name
|
||||
|
||||
It is important to choose the name for your server before you install Synapse,
|
||||
@@ -60,17 +61,14 @@ that your email address is probably `user@example.com` rather than
|
||||
|
||||
(Prebuilt packages are available for some platforms - see [Prebuilt packages](#prebuilt-packages).)
|
||||
|
||||
When installing from source please make sure that the [Platform-specific prerequisites](#platform-specific-prerequisites) are already installed.
|
||||
|
||||
System requirements:
|
||||
|
||||
- POSIX-compliant system (tested on Linux & OS X)
|
||||
- Python 3.5.2 or later, up to Python 3.9.
|
||||
- At least 1GB of free RAM if you want to join large public rooms like #matrix:matrix.org
|
||||
|
||||
Synapse is written in Python but some of the libraries it uses are written in
|
||||
C. So before we can install Synapse itself we need a working C compiler and the
|
||||
header files for Python C extensions. See [Platform-Specific
|
||||
Instructions](#platform-specific-instructions) for information on installing
|
||||
these on various platforms.
|
||||
|
||||
To install the Synapse homeserver run:
|
||||
|
||||
@@ -128,7 +126,11 @@ source env/bin/activate
|
||||
synctl start
|
||||
```
|
||||
|
||||
#### Platform-Specific Instructions
|
||||
#### Platform-specific prerequisites
|
||||
|
||||
Synapse is written in Python but some of the libraries it uses are written in
|
||||
C. So before we can install Synapse itself we need a working C compiler and the
|
||||
header files for Python C extensions.
|
||||
|
||||
##### Debian/Ubuntu/Raspbian
|
||||
|
||||
@@ -526,14 +528,24 @@ email will be disabled.
|
||||
|
||||
The easiest way to create a new user is to do so from a client like [Element](https://element.io/).
|
||||
|
||||
Alternatively you can do so from the command line if you have installed via pip.
|
||||
Alternatively, you can do so from the command line. This can be done as follows:
|
||||
|
||||
This can be done as follows:
|
||||
1. If synapse was installed via pip, activate the virtualenv as follows (if Synapse was
|
||||
installed via a prebuilt package, `register_new_matrix_user` should already be
|
||||
on the search path):
|
||||
```sh
|
||||
cd ~/synapse
|
||||
source env/bin/activate
|
||||
synctl start # if not already running
|
||||
```
|
||||
2. Run the following command:
|
||||
```sh
|
||||
register_new_matrix_user -c homeserver.yaml http://localhost:8008
|
||||
```
|
||||
|
||||
```sh
|
||||
$ source ~/synapse/env/bin/activate
|
||||
$ synctl start # if not already running
|
||||
$ register_new_matrix_user -c homeserver.yaml http://localhost:8008
|
||||
This will prompt you to add details for the new user, and will then connect to
|
||||
the running Synapse to create the new user. For example:
|
||||
```
|
||||
New user localpart: erikj
|
||||
Password:
|
||||
Confirm password:
|
||||
|
||||
18
README.rst
18
README.rst
@@ -314,6 +314,15 @@ Testing with SyTest is recommended for verifying that changes related to the
|
||||
Client-Server API are functioning correctly. See the `installation instructions
|
||||
<https://github.com/matrix-org/sytest#installing>`_ for details.
|
||||
|
||||
|
||||
Platform dependencies
|
||||
=====================
|
||||
|
||||
Synapse uses a number of platform dependencies such as Python and PostgreSQL,
|
||||
and aims to follow supported upstream versions. See the
|
||||
`<docs/deprecation_policy.md>`_ document for more details.
|
||||
|
||||
|
||||
Troubleshooting
|
||||
===============
|
||||
|
||||
@@ -384,12 +393,17 @@ massive excess of outgoing federation requests (see `discussion
|
||||
indicate that your server is also issuing far more outgoing federation
|
||||
requests than can be accounted for by your users' activity, this is a
|
||||
likely cause. The misbehavior can be worked around by setting
|
||||
``use_presence: false`` in the Synapse config file.
|
||||
the following in the Synapse config file:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
presence:
|
||||
enabled: false
|
||||
|
||||
People can't accept room invitations from me
|
||||
--------------------------------------------
|
||||
|
||||
The typical failure mode here is that you send an invitation to someone
|
||||
The typical failure mode here is that you send an invitation to someone
|
||||
to join a room or direct chat, but when they go to accept it, they get an
|
||||
error (typically along the lines of "Invalid signature"). They might see
|
||||
something like the following in their logs::
|
||||
|
||||
22
UPGRADE.rst
22
UPGRADE.rst
@@ -85,6 +85,19 @@ for example:
|
||||
wget https://packages.matrix.org/debian/pool/main/m/matrix-synapse-py3/matrix-synapse-py3_1.3.0+stretch1_amd64.deb
|
||||
dpkg -i matrix-synapse-py3_1.3.0+stretch1_amd64.deb
|
||||
|
||||
Upgrading to v1.32.0
|
||||
====================
|
||||
|
||||
Removal of old List Accounts Admin API
|
||||
--------------------------------------
|
||||
|
||||
The deprecated v1 "list accounts" admin API (``GET /_synapse/admin/v1/users/<user_id>``) has been removed in this version.
|
||||
|
||||
The `v2 list accounts API <https://github.com/matrix-org/synapse/blob/master/docs/admin_api/user_admin_api.rst#list-accounts>`_
|
||||
has been available since Synapse 1.7.0 (2019-12-13), and is accessible under ``GET /_synapse/admin/v2/users``.
|
||||
|
||||
The deprecation of the old endpoint was announced with Synapse 1.28.0 (released on 2021-02-25).
|
||||
|
||||
Upgrading to v1.29.0
|
||||
====================
|
||||
|
||||
@@ -98,9 +111,12 @@ will log a warning on each received request.
|
||||
|
||||
To avoid the warning, administrators using a reverse proxy should ensure that
|
||||
the reverse proxy sets `X-Forwarded-Proto` header to `https` or `http` to
|
||||
indicate the protocol used by the client. See the `reverse proxy documentation
|
||||
<docs/reverse_proxy.md>`_, where the example configurations have been updated to
|
||||
show how to set this header.
|
||||
indicate the protocol used by the client.
|
||||
|
||||
Synapse also requires the `Host` header to be preserved.
|
||||
|
||||
See the `reverse proxy documentation <docs/reverse_proxy.md>`_, where the
|
||||
example configurations have been updated to show how to set these headers.
|
||||
|
||||
(Users of `Caddy <https://caddyserver.com/>`_ are unaffected, since we believe it
|
||||
sets `X-Forwarded-Proto` by default.)
|
||||
|
||||
1
changelog.d/9803.doc
Normal file
1
changelog.d/9803.doc
Normal file
@@ -0,0 +1 @@
|
||||
Add hardened systemd files as proposed in [#9760](https://github.com/matrix-org/synapse/issues/9760) and added them to `contrib/`. Change the docs to reflect the presence of these files.
|
||||
@@ -24,6 +24,7 @@ import sys
|
||||
import time
|
||||
import urllib
|
||||
from http import TwistedHttpClient
|
||||
from typing import Optional
|
||||
|
||||
import nacl.encoding
|
||||
import nacl.signing
|
||||
@@ -718,7 +719,7 @@ class SynapseCmd(cmd.Cmd):
|
||||
method,
|
||||
path,
|
||||
data=None,
|
||||
query_params={"access_token": None},
|
||||
query_params: Optional[dict] = None,
|
||||
alt_text=None,
|
||||
):
|
||||
"""Runs an HTTP request and pretty prints the output.
|
||||
@@ -729,6 +730,8 @@ class SynapseCmd(cmd.Cmd):
|
||||
data: Raw JSON data if any
|
||||
query_params: dict of query parameters to add to the url
|
||||
"""
|
||||
query_params = query_params or {"access_token": None}
|
||||
|
||||
url = self._url() + path
|
||||
if "access_token" in query_params:
|
||||
query_params["access_token"] = self._tok()
|
||||
|
||||
@@ -16,6 +16,7 @@
|
||||
import json
|
||||
import urllib
|
||||
from pprint import pformat
|
||||
from typing import Optional
|
||||
|
||||
from twisted.internet import defer, reactor
|
||||
from twisted.web.client import Agent, readBody
|
||||
@@ -85,8 +86,9 @@ class TwistedHttpClient(HttpClient):
|
||||
body = yield readBody(response)
|
||||
defer.returnValue(json.loads(body))
|
||||
|
||||
def _create_put_request(self, url, json_data, headers_dict={}):
|
||||
def _create_put_request(self, url, json_data, headers_dict: Optional[dict] = None):
|
||||
"""Wrapper of _create_request to issue a PUT request"""
|
||||
headers_dict = headers_dict or {}
|
||||
|
||||
if "Content-Type" not in headers_dict:
|
||||
raise defer.error(RuntimeError("Must include Content-Type header for PUTs"))
|
||||
@@ -95,14 +97,22 @@ class TwistedHttpClient(HttpClient):
|
||||
"PUT", url, producer=_JsonProducer(json_data), headers_dict=headers_dict
|
||||
)
|
||||
|
||||
def _create_get_request(self, url, headers_dict={}):
|
||||
def _create_get_request(self, url, headers_dict: Optional[dict] = None):
|
||||
"""Wrapper of _create_request to issue a GET request"""
|
||||
return self._create_request("GET", url, headers_dict=headers_dict)
|
||||
return self._create_request("GET", url, headers_dict=headers_dict or {})
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def do_request(
|
||||
self, method, url, data=None, qparams=None, jsonreq=True, headers={}
|
||||
self,
|
||||
method,
|
||||
url,
|
||||
data=None,
|
||||
qparams=None,
|
||||
jsonreq=True,
|
||||
headers: Optional[dict] = None,
|
||||
):
|
||||
headers = headers or {}
|
||||
|
||||
if qparams:
|
||||
url = "%s?%s" % (url, urllib.urlencode(qparams, True))
|
||||
|
||||
@@ -123,8 +133,12 @@ class TwistedHttpClient(HttpClient):
|
||||
defer.returnValue(json.loads(body))
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _create_request(self, method, url, producer=None, headers_dict={}):
|
||||
def _create_request(
|
||||
self, method, url, producer=None, headers_dict: Optional[dict] = None
|
||||
):
|
||||
"""Creates and sends a request to the given url"""
|
||||
headers_dict = headers_dict or {}
|
||||
|
||||
headers_dict["User-Agent"] = ["Synapse Cmd Client"]
|
||||
|
||||
retries_left = 5
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#!/bin/bash
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# this script will use the api:
|
||||
# https://github.com/matrix-org/synapse/blob/master/docs/admin_api/purge_history_api.rst
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#!/bin/bash
|
||||
#!/usr/bin/env bash
|
||||
|
||||
DOMAIN=yourserver.tld
|
||||
# add this user as admin in your home server:
|
||||
|
||||
71
contrib/systemd/override-hardened.conf
Normal file
71
contrib/systemd/override-hardened.conf
Normal file
@@ -0,0 +1,71 @@
|
||||
[Service]
|
||||
# The following directives give the synapse service R/W access to:
|
||||
# - /run/matrix-synapse
|
||||
# - /var/lib/matrix-synapse
|
||||
# - /var/log/matrix-synapse
|
||||
|
||||
RuntimeDirectory=matrix-synapse
|
||||
StateDirectory=matrix-synapse
|
||||
LogsDirectory=matrix-synapse
|
||||
|
||||
######################
|
||||
## Security Sandbox ##
|
||||
######################
|
||||
|
||||
# Make sure that the service has its own unshared tmpfs at /tmp and that it
|
||||
# cannot see or change any real devices
|
||||
PrivateTmp=true
|
||||
PrivateDevices=true
|
||||
|
||||
# We give no capabilities to a service by default
|
||||
CapabilityBoundingSet=
|
||||
AmbientCapabilities=
|
||||
|
||||
# Protect the following from modification:
|
||||
# - The entire filesystem
|
||||
# - sysctl settings and loaded kernel modules
|
||||
# - No modifications allowed to Control Groups
|
||||
# - Hostname
|
||||
# - System Clock
|
||||
ProtectSystem=strict
|
||||
ProtectKernelTunables=true
|
||||
ProtectKernelModules=true
|
||||
ProtectControlGroups=true
|
||||
ProtectClock=true
|
||||
ProtectHostname=true
|
||||
|
||||
# Prevent access to the following:
|
||||
# - /home directory
|
||||
# - Kernel logs
|
||||
ProtectHome=tmpfs
|
||||
ProtectKernelLogs=true
|
||||
|
||||
# Make sure that the process can only see PIDs and process details of itself,
|
||||
# and the second option disables seeing details of things like system load and
|
||||
# I/O etc
|
||||
ProtectProc=invisible
|
||||
ProcSubset=pid
|
||||
|
||||
# While not needed, we set these options explicitly
|
||||
# - This process has been given access to the host network
|
||||
# - It can also communicate with any IP Address
|
||||
PrivateNetwork=false
|
||||
RestrictAddressFamilies=AF_INET AF_INET6 AF_UNIX
|
||||
IPAddressAllow=any
|
||||
|
||||
# Restrict system calls to a sane bunch
|
||||
SystemCallArchitectures=native
|
||||
SystemCallFilter=@system-service
|
||||
SystemCallFilter=~@privileged @resources @obsolete
|
||||
|
||||
# Misc restrictions
|
||||
# - Since the process is a python process it needs to be able to write and
|
||||
# execute memory regions, so we set MemoryDenyWriteExecute to false
|
||||
RestrictSUIDSGID=true
|
||||
RemoveIPC=true
|
||||
NoNewPrivileges=true
|
||||
RestrictRealtime=true
|
||||
RestrictNamespaces=true
|
||||
LockPersonality=true
|
||||
PrivateUsers=true
|
||||
MemoryDenyWriteExecute=false
|
||||
23
debian/build_virtualenv
vendored
23
debian/build_virtualenv
vendored
@@ -50,15 +50,24 @@ PACKAGE_BUILD_DIR="debian/matrix-synapse-py3"
|
||||
VIRTUALENV_DIR="${PACKAGE_BUILD_DIR}${DH_VIRTUALENV_INSTALL_ROOT}/matrix-synapse"
|
||||
TARGET_PYTHON="${VIRTUALENV_DIR}/bin/python"
|
||||
|
||||
# we copy the tests to a temporary directory so that we can put them on the
|
||||
# PYTHONPATH without putting the uninstalled synapse on the pythonpath.
|
||||
tmpdir=`mktemp -d`
|
||||
trap "rm -r $tmpdir" EXIT
|
||||
case "$DEB_BUILD_OPTIONS" in
|
||||
*nocheck*)
|
||||
# Skip running tests if "nocheck" present in $DEB_BUILD_OPTIONS
|
||||
;;
|
||||
|
||||
cp -r tests "$tmpdir"
|
||||
*)
|
||||
# Copy tests to a temporary directory so that we can put them on the
|
||||
# PYTHONPATH without putting the uninstalled synapse on the pythonpath.
|
||||
tmpdir=`mktemp -d`
|
||||
trap "rm -r $tmpdir" EXIT
|
||||
|
||||
PYTHONPATH="$tmpdir" \
|
||||
"${TARGET_PYTHON}" -m twisted.trial --reporter=text -j2 tests
|
||||
cp -r tests "$tmpdir"
|
||||
|
||||
PYTHONPATH="$tmpdir" \
|
||||
"${TARGET_PYTHON}" -m twisted.trial --reporter=text -j2 tests
|
||||
|
||||
;;
|
||||
esac
|
||||
|
||||
# build the config file
|
||||
"${TARGET_PYTHON}" "${VIRTUALENV_DIR}/bin/generate_config" \
|
||||
|
||||
18
debian/changelog
vendored
18
debian/changelog
vendored
@@ -1,3 +1,21 @@
|
||||
matrix-synapse-py3 (1.31.0+nmu1) UNRELEASED; urgency=medium
|
||||
|
||||
* Skip tests when DEB_BUILD_OPTIONS contains "nocheck".
|
||||
|
||||
-- Dan Callahan <danc@element.io> Mon, 12 Apr 2021 13:07:36 +0000
|
||||
|
||||
matrix-synapse-py3 (1.31.0) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.31.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 06 Apr 2021 13:08:29 +0100
|
||||
|
||||
matrix-synapse-py3 (1.30.1) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.30.1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Fri, 26 Mar 2021 12:01:28 +0000
|
||||
|
||||
matrix-synapse-py3 (1.30.0) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.30.0.
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#!/bin/bash
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -e
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#!/bin/bash
|
||||
#!/usr/bin/env bash
|
||||
|
||||
DIR="$( cd "$( dirname "$0" )" && pwd )"
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#!/bin/bash
|
||||
#!/usr/bin/env bash
|
||||
|
||||
DIR="$( cd "$( dirname "$0" )" && pwd )"
|
||||
|
||||
|
||||
@@ -28,33 +28,32 @@ RUN apt-get update && apt-get install -y \
|
||||
libwebp-dev \
|
||||
libxml++2.6-dev \
|
||||
libxslt1-dev \
|
||||
openssl \
|
||||
rustc \
|
||||
zlib1g-dev \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Build dependencies that are not available as wheels, to speed up rebuilds
|
||||
RUN pip install --prefix="/install" --no-warn-script-location \
|
||||
cryptography \
|
||||
frozendict \
|
||||
jaeger-client \
|
||||
opentracing \
|
||||
# Match the version constraints of Synapse
|
||||
"prometheus_client>=0.4.0" \
|
||||
psycopg2 \
|
||||
pycparser \
|
||||
pyrsistent \
|
||||
pyyaml \
|
||||
simplejson \
|
||||
threadloop \
|
||||
thrift
|
||||
|
||||
# now install synapse and all of the python deps to /install.
|
||||
COPY synapse /synapse/synapse/
|
||||
# Copy just what we need to pip install
|
||||
COPY scripts /synapse/scripts/
|
||||
COPY MANIFEST.in README.rst setup.py synctl /synapse/
|
||||
COPY synapse/__init__.py /synapse/synapse/__init__.py
|
||||
COPY synapse/python_dependencies.py /synapse/synapse/python_dependencies.py
|
||||
|
||||
# To speed up rebuilds, install all of the dependencies before we copy over
|
||||
# the whole synapse project so that we this layer in the Docker cache can be
|
||||
# used while you develop on the source
|
||||
#
|
||||
# This is aiming at installing the `install_requires` and `extras_require` from `setup.py`
|
||||
RUN pip install --prefix="/install" --no-warn-script-location \
|
||||
/synapse[all]
|
||||
/synapse[all]
|
||||
|
||||
# Copy over the rest of the project
|
||||
COPY synapse /synapse/synapse/
|
||||
|
||||
# Install the synapse package itself and all of its children packages.
|
||||
#
|
||||
# This is aiming at installing only the `packages=find_packages(...)` from `setup.py
|
||||
RUN pip install --prefix="/install" --no-deps --no-warn-script-location /synapse
|
||||
|
||||
###
|
||||
### Stage 1: runtime
|
||||
@@ -62,6 +61,11 @@ RUN pip install --prefix="/install" --no-warn-script-location \
|
||||
|
||||
FROM docker.io/python:${PYTHON_VERSION}-slim
|
||||
|
||||
LABEL org.opencontainers.image.url='https://matrix.org/docs/projects/server/synapse'
|
||||
LABEL org.opencontainers.image.documentation='https://github.com/matrix-org/synapse/blob/master/docker/README.md'
|
||||
LABEL org.opencontainers.image.source='https://github.com/matrix-org/synapse.git'
|
||||
LABEL org.opencontainers.image.licenses='Apache-2.0'
|
||||
|
||||
RUN apt-get update && apt-get install -y \
|
||||
curl \
|
||||
gosu \
|
||||
@@ -70,7 +74,9 @@ RUN apt-get update && apt-get install -y \
|
||||
libwebp6 \
|
||||
xmlsec1 \
|
||||
libjemalloc2 \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
libssl-dev \
|
||||
openssl \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
COPY --from=builder /install /usr/local
|
||||
COPY ./docker/start.py /start.py
|
||||
@@ -83,4 +89,4 @@ EXPOSE 8008/tcp 8009/tcp 8448/tcp
|
||||
ENTRYPOINT ["/start.py"]
|
||||
|
||||
HEALTHCHECK --interval=1m --timeout=5s \
|
||||
CMD curl -fSs http://localhost:8008/health || exit 1
|
||||
CMD curl -fSs http://localhost:8008/health || exit 1
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#!/bin/bash
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# The script to build the Debian package, as ran inside the Docker image.
|
||||
|
||||
|
||||
@@ -173,18 +173,10 @@ report_stats: False
|
||||
|
||||
## API Configuration ##
|
||||
|
||||
room_invite_state_types:
|
||||
- "m.room.join_rules"
|
||||
- "m.room.canonical_alias"
|
||||
- "m.room.avatar"
|
||||
- "m.room.name"
|
||||
|
||||
{% if SYNAPSE_APPSERVICES %}
|
||||
app_service_config_files:
|
||||
{% for appservice in SYNAPSE_APPSERVICES %} - "{{ appservice }}"
|
||||
{% endfor %}
|
||||
{% else %}
|
||||
app_service_config_files: []
|
||||
{% endif %}
|
||||
|
||||
macaroon_secret_key: "{{ SYNAPSE_MACAROON_SECRET_KEY }}"
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#!/bin/bash
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# This script runs the PostgreSQL tests inside a Docker container. It expects
|
||||
# the relevant source files to be mounted into /src (done automatically by the
|
||||
|
||||
@@ -111,35 +111,16 @@ List Accounts
|
||||
=============
|
||||
|
||||
This API returns all local user accounts.
|
||||
By default, the response is ordered by ascending user ID.
|
||||
|
||||
The api is::
|
||||
The API is::
|
||||
|
||||
GET /_synapse/admin/v2/users?from=0&limit=10&guests=false
|
||||
|
||||
To use it, you will need to authenticate by providing an ``access_token`` for a
|
||||
server admin: see `README.rst <README.rst>`_.
|
||||
|
||||
The parameter ``from`` is optional but used for pagination, denoting the
|
||||
offset in the returned results. This should be treated as an opaque value and
|
||||
not explicitly set to anything other than the return value of ``next_token``
|
||||
from a previous call.
|
||||
|
||||
The parameter ``limit`` is optional but is used for pagination, denoting the
|
||||
maximum number of items to return in this call. Defaults to ``100``.
|
||||
|
||||
The parameter ``user_id`` is optional and filters to only return users with user IDs
|
||||
that contain this value. This parameter is ignored when using the ``name`` parameter.
|
||||
|
||||
The parameter ``name`` is optional and filters to only return users with user ID localparts
|
||||
**or** displaynames that contain this value.
|
||||
|
||||
The parameter ``guests`` is optional and if ``false`` will **exclude** guest users.
|
||||
Defaults to ``true`` to include guest users.
|
||||
|
||||
The parameter ``deactivated`` is optional and if ``true`` will **include** deactivated users.
|
||||
Defaults to ``false`` to exclude deactivated users.
|
||||
|
||||
A JSON body is returned with the following shape:
|
||||
A response body like the following is returned:
|
||||
|
||||
.. code:: json
|
||||
|
||||
@@ -175,6 +156,66 @@ with ``from`` set to the value of ``next_token``. This will return a new page.
|
||||
If the endpoint does not return a ``next_token`` then there are no more users
|
||||
to paginate through.
|
||||
|
||||
**Parameters**
|
||||
|
||||
The following parameters should be set in the URL:
|
||||
|
||||
- ``user_id`` - Is optional and filters to only return users with user IDs
|
||||
that contain this value. This parameter is ignored when using the ``name`` parameter.
|
||||
- ``name`` - Is optional and filters to only return users with user ID localparts
|
||||
**or** displaynames that contain this value.
|
||||
- ``guests`` - string representing a bool - Is optional and if ``false`` will **exclude** guest users.
|
||||
Defaults to ``true`` to include guest users.
|
||||
- ``deactivated`` - string representing a bool - Is optional and if ``true`` will **include** deactivated users.
|
||||
Defaults to ``false`` to exclude deactivated users.
|
||||
- ``limit`` - string representing a positive integer - Is optional but is used for pagination,
|
||||
denoting the maximum number of items to return in this call. Defaults to ``100``.
|
||||
- ``from`` - string representing a positive integer - Is optional but used for pagination,
|
||||
denoting the offset in the returned results. This should be treated as an opaque value and
|
||||
not explicitly set to anything other than the return value of ``next_token`` from a previous call.
|
||||
Defaults to ``0``.
|
||||
- ``order_by`` - The method by which to sort the returned list of users.
|
||||
If the ordered field has duplicates, the second order is always by ascending ``name``,
|
||||
which guarantees a stable ordering. Valid values are:
|
||||
|
||||
- ``name`` - Users are ordered alphabetically by ``name``. This is the default.
|
||||
- ``is_guest`` - Users are ordered by ``is_guest`` status.
|
||||
- ``admin`` - Users are ordered by ``admin`` status.
|
||||
- ``user_type`` - Users are ordered alphabetically by ``user_type``.
|
||||
- ``deactivated`` - Users are ordered by ``deactivated`` status.
|
||||
- ``shadow_banned`` - Users are ordered by ``shadow_banned`` status.
|
||||
- ``displayname`` - Users are ordered alphabetically by ``displayname``.
|
||||
- ``avatar_url`` - Users are ordered alphabetically by avatar URL.
|
||||
|
||||
- ``dir`` - Direction of media order. Either ``f`` for forwards or ``b`` for backwards.
|
||||
Setting this value to ``b`` will reverse the above sort order. Defaults to ``f``.
|
||||
|
||||
Caution. The database only has indexes on the columns ``name`` and ``created_ts``.
|
||||
This means that if a different sort order is used (``is_guest``, ``admin``,
|
||||
``user_type``, ``deactivated``, ``shadow_banned``, ``avatar_url`` or ``displayname``),
|
||||
this can cause a large load on the database, especially for large environments.
|
||||
|
||||
**Response**
|
||||
|
||||
The following fields are returned in the JSON response body:
|
||||
|
||||
- ``users`` - An array of objects, each containing information about an user.
|
||||
User objects contain the following fields:
|
||||
|
||||
- ``name`` - string - Fully-qualified user ID (ex. ``@user:server.com``).
|
||||
- ``is_guest`` - bool - Status if that user is a guest account.
|
||||
- ``admin`` - bool - Status if that user is a server administrator.
|
||||
- ``user_type`` - string - Type of the user. Normal users are type ``None``.
|
||||
This allows user type specific behaviour. There are also types ``support`` and ``bot``.
|
||||
- ``deactivated`` - bool - Status if that user has been marked as deactivated.
|
||||
- ``shadow_banned`` - bool - Status if that user has been marked as shadow banned.
|
||||
- ``displayname`` - string - The user's display name if they have set one.
|
||||
- ``avatar_url`` - string - The user's avatar URL if they have set one.
|
||||
|
||||
- ``next_token``: string representing a positive integer - Indication for pagination. See above.
|
||||
- ``total`` - integer - Total number of media.
|
||||
|
||||
|
||||
Query current sessions for a user
|
||||
=================================
|
||||
|
||||
@@ -823,3 +864,118 @@ The following parameters should be set in the URL:
|
||||
|
||||
- ``user_id`` - The fully qualified MXID: for example, ``@user:server.com``. The user must
|
||||
be local.
|
||||
|
||||
Override ratelimiting for users
|
||||
===============================
|
||||
|
||||
This API allows to override or disable ratelimiting for a specific user.
|
||||
There are specific APIs to set, get and delete a ratelimit.
|
||||
|
||||
Get status of ratelimit
|
||||
-----------------------
|
||||
|
||||
The API is::
|
||||
|
||||
GET /_synapse/admin/v1/users/<user_id>/override_ratelimit
|
||||
|
||||
To use it, you will need to authenticate by providing an ``access_token`` for a
|
||||
server admin: see `README.rst <README.rst>`_.
|
||||
|
||||
A response body like the following is returned:
|
||||
|
||||
.. code:: json
|
||||
|
||||
{
|
||||
"messages_per_second": 0,
|
||||
"burst_count": 0
|
||||
}
|
||||
|
||||
**Parameters**
|
||||
|
||||
The following parameters should be set in the URL:
|
||||
|
||||
- ``user_id`` - The fully qualified MXID: for example, ``@user:server.com``. The user must
|
||||
be local.
|
||||
|
||||
**Response**
|
||||
|
||||
The following fields are returned in the JSON response body:
|
||||
|
||||
- ``messages_per_second`` - integer - The number of actions that can
|
||||
be performed in a second. `0` mean that ratelimiting is disabled for this user.
|
||||
- ``burst_count`` - integer - How many actions that can be performed before
|
||||
being limited.
|
||||
|
||||
If **no** custom ratelimit is set, an empty JSON dict is returned.
|
||||
|
||||
.. code:: json
|
||||
|
||||
{}
|
||||
|
||||
Set ratelimit
|
||||
-------------
|
||||
|
||||
The API is::
|
||||
|
||||
POST /_synapse/admin/v1/users/<user_id>/override_ratelimit
|
||||
|
||||
To use it, you will need to authenticate by providing an ``access_token`` for a
|
||||
server admin: see `README.rst <README.rst>`_.
|
||||
|
||||
A response body like the following is returned:
|
||||
|
||||
.. code:: json
|
||||
|
||||
{
|
||||
"messages_per_second": 0,
|
||||
"burst_count": 0
|
||||
}
|
||||
|
||||
**Parameters**
|
||||
|
||||
The following parameters should be set in the URL:
|
||||
|
||||
- ``user_id`` - The fully qualified MXID: for example, ``@user:server.com``. The user must
|
||||
be local.
|
||||
|
||||
Body parameters:
|
||||
|
||||
- ``messages_per_second`` - positive integer, optional. The number of actions that can
|
||||
be performed in a second. Defaults to ``0``.
|
||||
- ``burst_count`` - positive integer, optional. How many actions that can be performed
|
||||
before being limited. Defaults to ``0``.
|
||||
|
||||
To disable users' ratelimit set both values to ``0``.
|
||||
|
||||
**Response**
|
||||
|
||||
The following fields are returned in the JSON response body:
|
||||
|
||||
- ``messages_per_second`` - integer - The number of actions that can
|
||||
be performed in a second.
|
||||
- ``burst_count`` - integer - How many actions that can be performed before
|
||||
being limited.
|
||||
|
||||
Delete ratelimit
|
||||
----------------
|
||||
|
||||
The API is::
|
||||
|
||||
DELETE /_synapse/admin/v1/users/<user_id>/override_ratelimit
|
||||
|
||||
To use it, you will need to authenticate by providing an ``access_token`` for a
|
||||
server admin: see `README.rst <README.rst>`_.
|
||||
|
||||
An empty JSON dict is returned.
|
||||
|
||||
.. code:: json
|
||||
|
||||
{}
|
||||
|
||||
**Parameters**
|
||||
|
||||
The following parameters should be set in the URL:
|
||||
|
||||
- ``user_id`` - The fully qualified MXID: for example, ``@user:server.com``. The user must
|
||||
be local.
|
||||
|
||||
|
||||
@@ -128,6 +128,9 @@ Some guidelines follow:
|
||||
will be if no sub-options are enabled).
|
||||
- Lines should be wrapped at 80 characters.
|
||||
- Use two-space indents.
|
||||
- `true` and `false` are spelt thus (as opposed to `True`, etc.)
|
||||
- Use single quotes (`'`) rather than double-quotes (`"`) or backticks
|
||||
(`` ` ``) to refer to configuration options.
|
||||
|
||||
Example:
|
||||
|
||||
|
||||
33
docs/deprecation_policy.md
Normal file
33
docs/deprecation_policy.md
Normal file
@@ -0,0 +1,33 @@
|
||||
Deprecation Policy for Platform Dependencies
|
||||
============================================
|
||||
|
||||
Synapse has a number of platform dependencies, including Python and PostgreSQL.
|
||||
This document outlines the policy towards which versions we support, and when we
|
||||
drop support for versions in the future.
|
||||
|
||||
|
||||
Policy
|
||||
------
|
||||
|
||||
Synapse follows the upstream support life cycles for Python and PostgreSQL,
|
||||
i.e. when a version reaches End of Life Synapse will withdraw support for that
|
||||
version in future releases.
|
||||
|
||||
Details on the upstream support life cycles for Python and PostgreSQL are
|
||||
documented at https://endoflife.date/python and
|
||||
https://endoflife.date/postgresql.
|
||||
|
||||
|
||||
Context
|
||||
-------
|
||||
|
||||
It is important for system admins to have a clear understanding of the platform
|
||||
requirements of Synapse and its deprecation policies so that they can
|
||||
effectively plan upgrading their infrastructure ahead of time. This is
|
||||
especially important in contexts where upgrading the infrastructure requires
|
||||
auditing and approval from a security team, or where otherwise upgrading is a
|
||||
long process.
|
||||
|
||||
By following the upstream support life cycles Synapse can ensure that its
|
||||
dependencies continue to get security patches, while not requiring system admins
|
||||
to constantly update their platform dependencies to the latest versions.
|
||||
235
docs/presence_router_module.md
Normal file
235
docs/presence_router_module.md
Normal file
@@ -0,0 +1,235 @@
|
||||
# Presence Router Module
|
||||
|
||||
Synapse supports configuring a module that can specify additional users
|
||||
(local or remote) to should receive certain presence updates from local
|
||||
users.
|
||||
|
||||
Note that routing presence via Application Service transactions is not
|
||||
currently supported.
|
||||
|
||||
The presence routing module is implemented as a Python class, which will
|
||||
be imported by the running Synapse.
|
||||
|
||||
## Python Presence Router Class
|
||||
|
||||
The Python class is instantiated with two objects:
|
||||
|
||||
* A configuration object of some type (see below).
|
||||
* An instance of `synapse.module_api.ModuleApi`.
|
||||
|
||||
It then implements methods related to presence routing.
|
||||
|
||||
Note that one method of `ModuleApi` that may be useful is:
|
||||
|
||||
```python
|
||||
async def ModuleApi.send_local_online_presence_to(users: Iterable[str]) -> None
|
||||
```
|
||||
|
||||
which can be given a list of local or remote MXIDs to broadcast known, online user
|
||||
presence to (for those users that the receiving user is considered interested in).
|
||||
It does not include state for users who are currently offline, and it can only be
|
||||
called on workers that support sending federation.
|
||||
|
||||
### Module structure
|
||||
|
||||
Below is a list of possible methods that can be implemented, and whether they are
|
||||
required.
|
||||
|
||||
#### `parse_config`
|
||||
|
||||
```python
|
||||
def parse_config(config_dict: dict) -> Any
|
||||
```
|
||||
|
||||
**Required.** A static method that is passed a dictionary of config options, and
|
||||
should return a validated config object. This method is described further in
|
||||
[Configuration](#configuration).
|
||||
|
||||
#### `get_users_for_states`
|
||||
|
||||
```python
|
||||
async def get_users_for_states(
|
||||
self,
|
||||
state_updates: Iterable[UserPresenceState],
|
||||
) -> Dict[str, Set[UserPresenceState]]:
|
||||
```
|
||||
|
||||
**Required.** An asynchronous method that is passed an iterable of user presence
|
||||
state. This method can determine whether a given presence update should be sent to certain
|
||||
users. It does this by returning a dictionary with keys representing local or remote
|
||||
Matrix User IDs, and values being a python set
|
||||
of `synapse.handlers.presence.UserPresenceState` instances.
|
||||
|
||||
Synapse will then attempt to send the specified presence updates to each user when
|
||||
possible.
|
||||
|
||||
#### `get_interested_users`
|
||||
|
||||
```python
|
||||
async def get_interested_users(self, user_id: str) -> Union[Set[str], str]
|
||||
```
|
||||
|
||||
**Required.** An asynchronous method that is passed a single Matrix User ID. This
|
||||
method is expected to return the users that the passed in user may be interested in the
|
||||
presence of. Returned users may be local or remote. The presence routed as a result of
|
||||
what this method returns is sent in addition to the updates already sent between users
|
||||
that share a room together. Presence updates are deduplicated.
|
||||
|
||||
This method should return a python set of Matrix User IDs, or the object
|
||||
`synapse.events.presence_router.PresenceRouter.ALL_USERS` to indicate that the passed
|
||||
user should receive presence information for *all* known users.
|
||||
|
||||
For clarity, if the user `@alice:example.org` is passed to this method, and the Set
|
||||
`{"@bob:example.com", "@charlie:somewhere.org"}` is returned, this signifies that Alice
|
||||
should receive presence updates sent by Bob and Charlie, regardless of whether these
|
||||
users share a room.
|
||||
|
||||
### Example
|
||||
|
||||
Below is an example implementation of a presence router class.
|
||||
|
||||
```python
|
||||
from typing import Dict, Iterable, Set, Union
|
||||
from synapse.events.presence_router import PresenceRouter
|
||||
from synapse.handlers.presence import UserPresenceState
|
||||
from synapse.module_api import ModuleApi
|
||||
|
||||
class PresenceRouterConfig:
|
||||
def __init__(self):
|
||||
# Config options with their defaults
|
||||
# A list of users to always send all user presence updates to
|
||||
self.always_send_to_users = [] # type: List[str]
|
||||
|
||||
# A list of users to ignore presence updates for. Does not affect
|
||||
# shared-room presence relationships
|
||||
self.blacklisted_users = [] # type: List[str]
|
||||
|
||||
class ExamplePresenceRouter:
|
||||
"""An example implementation of synapse.presence_router.PresenceRouter.
|
||||
Supports routing all presence to a configured set of users, or a subset
|
||||
of presence from certain users to members of certain rooms.
|
||||
|
||||
Args:
|
||||
config: A configuration object.
|
||||
module_api: An instance of Synapse's ModuleApi.
|
||||
"""
|
||||
def __init__(self, config: PresenceRouterConfig, module_api: ModuleApi):
|
||||
self._config = config
|
||||
self._module_api = module_api
|
||||
|
||||
@staticmethod
|
||||
def parse_config(config_dict: dict) -> PresenceRouterConfig:
|
||||
"""Parse a configuration dictionary from the homeserver config, do
|
||||
some validation and return a typed PresenceRouterConfig.
|
||||
|
||||
Args:
|
||||
config_dict: The configuration dictionary.
|
||||
|
||||
Returns:
|
||||
A validated config object.
|
||||
"""
|
||||
# Initialise a typed config object
|
||||
config = PresenceRouterConfig()
|
||||
always_send_to_users = config_dict.get("always_send_to_users")
|
||||
blacklisted_users = config_dict.get("blacklisted_users")
|
||||
|
||||
# Do some validation of config options... otherwise raise a
|
||||
# synapse.config.ConfigError.
|
||||
config.always_send_to_users = always_send_to_users
|
||||
config.blacklisted_users = blacklisted_users
|
||||
|
||||
return config
|
||||
|
||||
async def get_users_for_states(
|
||||
self,
|
||||
state_updates: Iterable[UserPresenceState],
|
||||
) -> Dict[str, Set[UserPresenceState]]:
|
||||
"""Given an iterable of user presence updates, determine where each one
|
||||
needs to go. Returned results will not affect presence updates that are
|
||||
sent between users who share a room.
|
||||
|
||||
Args:
|
||||
state_updates: An iterable of user presence state updates.
|
||||
|
||||
Returns:
|
||||
A dictionary of user_id -> set of UserPresenceState that the user should
|
||||
receive.
|
||||
"""
|
||||
destination_users = {} # type: Dict[str, Set[UserPresenceState]
|
||||
|
||||
# Ignore any updates for blacklisted users
|
||||
desired_updates = set()
|
||||
for update in state_updates:
|
||||
if update.state_key not in self._config.blacklisted_users:
|
||||
desired_updates.add(update)
|
||||
|
||||
# Send all presence updates to specific users
|
||||
for user_id in self._config.always_send_to_users:
|
||||
destination_users[user_id] = desired_updates
|
||||
|
||||
return destination_users
|
||||
|
||||
async def get_interested_users(
|
||||
self,
|
||||
user_id: str,
|
||||
) -> Union[Set[str], PresenceRouter.ALL_USERS]:
|
||||
"""
|
||||
Retrieve a list of users that `user_id` is interested in receiving the
|
||||
presence of. This will be in addition to those they share a room with.
|
||||
Optionally, the object PresenceRouter.ALL_USERS can be returned to indicate
|
||||
that this user should receive all incoming local and remote presence updates.
|
||||
|
||||
Note that this method will only be called for local users.
|
||||
|
||||
Args:
|
||||
user_id: A user requesting presence updates.
|
||||
|
||||
Returns:
|
||||
A set of user IDs to return additional presence updates for, or
|
||||
PresenceRouter.ALL_USERS to return presence updates for all other users.
|
||||
"""
|
||||
if user_id in self._config.always_send_to_users:
|
||||
return PresenceRouter.ALL_USERS
|
||||
|
||||
return set()
|
||||
```
|
||||
|
||||
#### A note on `get_users_for_states` and `get_interested_users`
|
||||
|
||||
Both of these methods are effectively two different sides of the same coin. The logic
|
||||
regarding which users should receive updates for other users should be the same
|
||||
between them.
|
||||
|
||||
`get_users_for_states` is called when presence updates come in from either federation
|
||||
or local users, and is used to either direct local presence to remote users, or to
|
||||
wake up the sync streams of local users to collect remote presence.
|
||||
|
||||
In contrast, `get_interested_users` is used to determine the users that presence should
|
||||
be fetched for when a local user is syncing. This presence is then retrieved, before
|
||||
being fed through `get_users_for_states` once again, with only the syncing user's
|
||||
routing information pulled from the resulting dictionary.
|
||||
|
||||
Their routing logic should thus line up, else you may run into unintended behaviour.
|
||||
|
||||
## Configuration
|
||||
|
||||
Once you've crafted your module and installed it into the same Python environment as
|
||||
Synapse, amend your homeserver config file with the following.
|
||||
|
||||
```yaml
|
||||
presence:
|
||||
routing_module:
|
||||
module: my_module.ExamplePresenceRouter
|
||||
config:
|
||||
# Any configuration options for your module. The below is an example.
|
||||
# of setting options for ExamplePresenceRouter.
|
||||
always_send_to_users: ["@presence_gobbler:example.org"]
|
||||
blacklisted_users:
|
||||
- "@alice:example.com"
|
||||
- "@bob:example.com"
|
||||
...
|
||||
```
|
||||
|
||||
The contents of `config` will be passed as a Python dictionary to the static
|
||||
`parse_config` method of your class. The object returned by this method will
|
||||
then be passed to the `__init__` method of your module as `config`.
|
||||
@@ -104,10 +104,11 @@ example.com:8448 {
|
||||
```
|
||||
<VirtualHost *:443>
|
||||
SSLEngine on
|
||||
ServerName matrix.example.com;
|
||||
ServerName matrix.example.com
|
||||
|
||||
RequestHeader set "X-Forwarded-Proto" expr=%{REQUEST_SCHEME}
|
||||
AllowEncodedSlashes NoDecode
|
||||
ProxyPreserveHost on
|
||||
ProxyPass /_matrix http://127.0.0.1:8008/_matrix nocanon
|
||||
ProxyPassReverse /_matrix http://127.0.0.1:8008/_matrix
|
||||
ProxyPass /_synapse/client http://127.0.0.1:8008/_synapse/client nocanon
|
||||
@@ -116,7 +117,7 @@ example.com:8448 {
|
||||
|
||||
<VirtualHost *:8448>
|
||||
SSLEngine on
|
||||
ServerName example.com;
|
||||
ServerName example.com
|
||||
|
||||
RequestHeader set "X-Forwarded-Proto" expr=%{REQUEST_SCHEME}
|
||||
AllowEncodedSlashes NoDecode
|
||||
@@ -135,6 +136,8 @@ example.com:8448 {
|
||||
</IfModule>
|
||||
```
|
||||
|
||||
**NOTE 3**: Missing `ProxyPreserveHost on` can lead to a redirect loop.
|
||||
|
||||
### HAProxy
|
||||
|
||||
```
|
||||
|
||||
@@ -82,9 +82,28 @@ pid_file: DATADIR/homeserver.pid
|
||||
#
|
||||
#soft_file_limit: 0
|
||||
|
||||
# Set to false to disable presence tracking on this homeserver.
|
||||
# Presence tracking allows users to see the state (e.g online/offline)
|
||||
# of other local and remote users.
|
||||
#
|
||||
#use_presence: false
|
||||
presence:
|
||||
# Uncomment to disable presence tracking on this homeserver. This option
|
||||
# replaces the previous top-level 'use_presence' option.
|
||||
#
|
||||
#enabled: false
|
||||
|
||||
# Presence routers are third-party modules that can specify additional logic
|
||||
# to where presence updates from users are routed.
|
||||
#
|
||||
presence_router:
|
||||
# The custom module's class. Uncomment to use a custom presence router module.
|
||||
#
|
||||
#module: "my_custom_router.PresenceRouter"
|
||||
|
||||
# Configuration options of the custom module. Refer to your module's
|
||||
# documentation for available options.
|
||||
#
|
||||
#config:
|
||||
# example_option: 'something'
|
||||
|
||||
# Whether to require authentication to retrieve profile data (avatars,
|
||||
# display names) of other users through the client API. Defaults to
|
||||
@@ -869,10 +888,10 @@ log_config: "CONFDIR/SERVERNAME.log.config"
|
||||
#rc_joins:
|
||||
# local:
|
||||
# per_second: 0.1
|
||||
# burst_count: 3
|
||||
# burst_count: 10
|
||||
# remote:
|
||||
# per_second: 0.01
|
||||
# burst_count: 3
|
||||
# burst_count: 10
|
||||
#
|
||||
#rc_3pid_validation:
|
||||
# per_second: 0.003
|
||||
@@ -1246,9 +1265,9 @@ account_validity:
|
||||
#
|
||||
#allowed_local_3pids:
|
||||
# - medium: email
|
||||
# pattern: '.*@matrix\.org'
|
||||
# pattern: '^[^@]+@matrix\.org$'
|
||||
# - medium: email
|
||||
# pattern: '.*@vector\.im'
|
||||
# pattern: '^[^@]+@vector\.im$'
|
||||
# - medium: msisdn
|
||||
# pattern: '\+44'
|
||||
|
||||
@@ -1451,14 +1470,31 @@ metrics_flags:
|
||||
|
||||
## API Configuration ##
|
||||
|
||||
# A list of event types that will be included in the room_invite_state
|
||||
# Controls for the state that is shared with users who receive an invite
|
||||
# to a room
|
||||
#
|
||||
#room_invite_state_types:
|
||||
# - "m.room.join_rules"
|
||||
# - "m.room.canonical_alias"
|
||||
# - "m.room.avatar"
|
||||
# - "m.room.encryption"
|
||||
# - "m.room.name"
|
||||
room_prejoin_state:
|
||||
# By default, the following state event types are shared with users who
|
||||
# receive invites to the room:
|
||||
#
|
||||
# - m.room.join_rules
|
||||
# - m.room.canonical_alias
|
||||
# - m.room.avatar
|
||||
# - m.room.encryption
|
||||
# - m.room.name
|
||||
#
|
||||
# Uncomment the following to disable these defaults (so that only the event
|
||||
# types listed in 'additional_event_types' are shared). Defaults to 'false'.
|
||||
#
|
||||
#disable_default_event_types: true
|
||||
|
||||
# Additional state event types to share with users when they are invited
|
||||
# to a room.
|
||||
#
|
||||
# By default, this list is empty (so only the default event types are shared).
|
||||
#
|
||||
#additional_event_types:
|
||||
# - org.example.custom.event.type
|
||||
|
||||
|
||||
# A list of application service config files to use
|
||||
@@ -1758,6 +1794,9 @@ saml2_config:
|
||||
# Note that, if this is changed, users authenticating via that provider
|
||||
# will no longer be recognised as the same user!
|
||||
#
|
||||
# (Use "oidc" here if you are migrating from an old "oidc_config"
|
||||
# configuration.)
|
||||
#
|
||||
# idp_name: A user-facing name for this identity provider, which is used to
|
||||
# offer the user a choice of login mechanisms.
|
||||
#
|
||||
@@ -1873,6 +1912,24 @@ saml2_config:
|
||||
# which is set to the claims returned by the UserInfo Endpoint and/or
|
||||
# in the ID Token.
|
||||
#
|
||||
# It is possible to configure Synapse to only allow logins if certain attributes
|
||||
# match particular values in the OIDC userinfo. The requirements can be listed under
|
||||
# `attribute_requirements` as shown below. All of the listed attributes must
|
||||
# match for the login to be permitted. Additional attributes can be added to
|
||||
# userinfo by expanding the `scopes` section of the OIDC config to retrieve
|
||||
# additional information from the OIDC provider.
|
||||
#
|
||||
# If the OIDC claim is a list, then the attribute must match any value in the list.
|
||||
# Otherwise, it must exactly match the value of the claim. Using the example
|
||||
# below, the `family_name` claim MUST be "Stephensson", but the `groups`
|
||||
# claim MUST contain "admin".
|
||||
#
|
||||
# attribute_requirements:
|
||||
# - attribute: family_name
|
||||
# value: "Stephensson"
|
||||
# - attribute: groups
|
||||
# value: "admin"
|
||||
#
|
||||
# See https://github.com/matrix-org/synapse/blob/master/docs/openid.md
|
||||
# for information on how to configure these options.
|
||||
#
|
||||
@@ -1905,34 +1962,9 @@ oidc_providers:
|
||||
# localpart_template: "{{ user.login }}"
|
||||
# display_name_template: "{{ user.name }}"
|
||||
# email_template: "{{ user.email }}"
|
||||
|
||||
# For use with Keycloak
|
||||
#
|
||||
#- idp_id: keycloak
|
||||
# idp_name: Keycloak
|
||||
# issuer: "https://127.0.0.1:8443/auth/realms/my_realm_name"
|
||||
# client_id: "synapse"
|
||||
# client_secret: "copy secret generated in Keycloak UI"
|
||||
# scopes: ["openid", "profile"]
|
||||
|
||||
# For use with Github
|
||||
#
|
||||
#- idp_id: github
|
||||
# idp_name: Github
|
||||
# idp_brand: github
|
||||
# discover: false
|
||||
# issuer: "https://github.com/"
|
||||
# client_id: "your-client-id" # TO BE FILLED
|
||||
# client_secret: "your-client-secret" # TO BE FILLED
|
||||
# authorization_endpoint: "https://github.com/login/oauth/authorize"
|
||||
# token_endpoint: "https://github.com/login/oauth/access_token"
|
||||
# userinfo_endpoint: "https://api.github.com/user"
|
||||
# scopes: ["read:user"]
|
||||
# user_mapping_provider:
|
||||
# config:
|
||||
# subject_claim: "id"
|
||||
# localpart_template: "{{ user.login }}"
|
||||
# display_name_template: "{{ user.name }}"
|
||||
# attribute_requirements:
|
||||
# - attribute: userGroup
|
||||
# value: "synapseUsers"
|
||||
|
||||
|
||||
# Enable Central Authentication Service (CAS) for registration and login.
|
||||
|
||||
@@ -65,3 +65,33 @@ systemctl restart matrix-synapse-worker@federation_reader.service
|
||||
systemctl enable matrix-synapse-worker@federation_writer.service
|
||||
systemctl restart matrix-synapse.target
|
||||
```
|
||||
|
||||
## Hardening
|
||||
|
||||
**Optional:** If further hardening is desired, the file
|
||||
`override-hardened.conf` may be copied from
|
||||
`contrib/systemd/override-hardened.conf` in this repository to the location
|
||||
`/etc/systemd/system/matrix-synapse.service.d/override-hardened.conf` (the
|
||||
directory may have to be created). It enables certain sandboxing features in
|
||||
systemd to further secure the synapse service. You may read the comments to
|
||||
understand what the override file is doing. The same file will need to be copied
|
||||
to
|
||||
`/etc/systemd/system/matrix-synapse-worker@.service.d/override-hardened-worker.conf`
|
||||
(this directory may also have to be created) in order to apply the same
|
||||
hardening options to any worker processes.
|
||||
|
||||
Once these files have been copied to their appropriate locations, simply reload
|
||||
systemd's manager config files and restart all Synapse services to apply the hardening options. They will automatically
|
||||
be applied at every restart as long as the override files are present at the
|
||||
specified locations.
|
||||
|
||||
```sh
|
||||
systemctl daemon-reload
|
||||
|
||||
# Restart services
|
||||
systemctl restart matrix-synapse.target
|
||||
```
|
||||
|
||||
In order to see their effect, you may run `systemd-analyze security
|
||||
matrix-synapse.service` before and after applying the hardening options to see
|
||||
the changes being applied at a glance.
|
||||
|
||||
@@ -232,7 +232,6 @@ expressions:
|
||||
# Registration/login requests
|
||||
^/_matrix/client/(api/v1|r0|unstable)/login$
|
||||
^/_matrix/client/(r0|unstable)/register$
|
||||
^/_matrix/client/(r0|unstable)/auth/.*/fallback/web$
|
||||
|
||||
# Event sending requests
|
||||
^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/redact
|
||||
@@ -276,7 +275,7 @@ using):
|
||||
|
||||
Ensure that all SSO logins go to a single process.
|
||||
For multiple workers not handling the SSO endpoints properly, see
|
||||
[#7530](https://github.com/matrix-org/synapse/issues/7530) and
|
||||
[#7530](https://github.com/matrix-org/synapse/issues/7530) and
|
||||
[#9427](https://github.com/matrix-org/synapse/issues/9427).
|
||||
|
||||
Note that a HTTP listener with `client` and `federation` resources must be
|
||||
|
||||
9
mypy.ini
9
mypy.ini
@@ -1,12 +1,14 @@
|
||||
[mypy]
|
||||
namespace_packages = True
|
||||
plugins = mypy_zope:plugin, scripts-dev/mypy_synapse_plugin.py
|
||||
follow_imports = silent
|
||||
follow_imports = normal
|
||||
check_untyped_defs = True
|
||||
show_error_codes = True
|
||||
show_traceback = True
|
||||
mypy_path = stubs
|
||||
warn_unreachable = True
|
||||
local_partial_types = True
|
||||
no_implicit_optional = True
|
||||
|
||||
# To find all folders that pass mypy you run:
|
||||
#
|
||||
@@ -20,8 +22,9 @@ files =
|
||||
synapse/crypto,
|
||||
synapse/event_auth.py,
|
||||
synapse/events/builder.py,
|
||||
synapse/events/validator.py,
|
||||
synapse/events/spamcheck.py,
|
||||
synapse/events/third_party_rules.py,
|
||||
synapse/events/validator.py,
|
||||
synapse/federation,
|
||||
synapse/groups,
|
||||
synapse/handlers,
|
||||
@@ -38,6 +41,7 @@ files =
|
||||
synapse/push,
|
||||
synapse/replication,
|
||||
synapse/rest,
|
||||
synapse/secrets.py,
|
||||
synapse/server.py,
|
||||
synapse/server_notices,
|
||||
synapse/spam_checker_api,
|
||||
@@ -71,6 +75,7 @@ files =
|
||||
synapse/util/metrics.py,
|
||||
synapse/util/macaroons.py,
|
||||
synapse/util/stringutils.py,
|
||||
synapse/visibility.py,
|
||||
tests/replication,
|
||||
tests/test_utils,
|
||||
tests/handlers/test_password_providers.py,
|
||||
|
||||
@@ -35,7 +35,7 @@
|
||||
showcontent = true
|
||||
|
||||
[tool.black]
|
||||
target-version = ['py35']
|
||||
target-version = ['py36']
|
||||
exclude = '''
|
||||
|
||||
(
|
||||
|
||||
@@ -18,11 +18,9 @@ import threading
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
|
||||
DISTS = (
|
||||
"debian:stretch",
|
||||
"debian:buster",
|
||||
"debian:bullseye",
|
||||
"debian:sid",
|
||||
"ubuntu:xenial",
|
||||
"ubuntu:bionic",
|
||||
"ubuntu:focal",
|
||||
"ubuntu:groovy",
|
||||
@@ -43,7 +41,7 @@ class Builder(object):
|
||||
self._lock = threading.Lock()
|
||||
self._failed = False
|
||||
|
||||
def run_build(self, dist):
|
||||
def run_build(self, dist, skip_tests=False):
|
||||
"""Build deb for a single distribution"""
|
||||
|
||||
if self._failed:
|
||||
@@ -51,13 +49,13 @@ class Builder(object):
|
||||
raise Exception("failed")
|
||||
|
||||
try:
|
||||
self._inner_build(dist)
|
||||
self._inner_build(dist, skip_tests)
|
||||
except Exception as e:
|
||||
print("build of %s failed: %s" % (dist, e), file=sys.stderr)
|
||||
self._failed = True
|
||||
raise
|
||||
|
||||
def _inner_build(self, dist):
|
||||
def _inner_build(self, dist, skip_tests=False):
|
||||
projdir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
|
||||
os.chdir(projdir)
|
||||
|
||||
@@ -101,6 +99,7 @@ class Builder(object):
|
||||
"--volume=" + debsdir + ":/debs",
|
||||
"-e", "TARGET_USERID=%i" % (os.getuid(), ),
|
||||
"-e", "TARGET_GROUPID=%i" % (os.getgid(), ),
|
||||
"-e", "DEB_BUILD_OPTIONS=%s" % ("nocheck" if skip_tests else ""),
|
||||
"dh-venv-builder:" + tag,
|
||||
], stdout=stdout, stderr=subprocess.STDOUT)
|
||||
|
||||
@@ -124,7 +123,7 @@ class Builder(object):
|
||||
self.active_containers.remove(c)
|
||||
|
||||
|
||||
def run_builds(dists, jobs=1):
|
||||
def run_builds(dists, jobs=1, skip_tests=False):
|
||||
builder = Builder(redirect_stdout=(jobs > 1))
|
||||
|
||||
def sig(signum, _frame):
|
||||
@@ -133,7 +132,7 @@ def run_builds(dists, jobs=1):
|
||||
signal.signal(signal.SIGINT, sig)
|
||||
|
||||
with ThreadPoolExecutor(max_workers=jobs) as e:
|
||||
res = e.map(builder.run_build, dists)
|
||||
res = e.map(lambda dist: builder.run_build(dist, skip_tests), dists)
|
||||
|
||||
# make sure we consume the iterable so that exceptions are raised.
|
||||
for r in res:
|
||||
@@ -148,9 +147,13 @@ if __name__ == '__main__':
|
||||
'-j', '--jobs', type=int, default=1,
|
||||
help='specify the number of builds to run in parallel',
|
||||
)
|
||||
parser.add_argument(
|
||||
'--no-check', action='store_true',
|
||||
help='skip running tests after building',
|
||||
)
|
||||
parser.add_argument(
|
||||
'dist', nargs='*', default=DISTS,
|
||||
help='a list of distributions to build for. Default: %(default)s',
|
||||
)
|
||||
args = parser.parse_args()
|
||||
run_builds(dists=args.dist, jobs=args.jobs)
|
||||
run_builds(dists=args.dist, jobs=args.jobs, skip_tests=args.no_check)
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#!/bin/bash
|
||||
#!/usr/bin/env bash
|
||||
#
|
||||
# A script which checks that an appropriate news file has been added on this
|
||||
# branch.
|
||||
|
||||
@@ -1,22 +1,49 @@
|
||||
#! /bin/bash -eu
|
||||
#!/usr/bin/env bash
|
||||
# This script is designed for developers who want to test their code
|
||||
# against Complement.
|
||||
#
|
||||
# It makes a Synapse image which represents the current checkout,
|
||||
# then downloads Complement and runs it with that image.
|
||||
# builds a synapse-complement image on top, then runs tests with it.
|
||||
#
|
||||
# By default the script will fetch the latest Complement master branch and
|
||||
# run tests with that. This can be overridden to use a custom Complement
|
||||
# checkout by setting the COMPLEMENT_DIR environment variable to the
|
||||
# filepath of a local Complement checkout.
|
||||
#
|
||||
# A regular expression of test method names can be supplied as the first
|
||||
# argument to the script. Complement will then only run those tests. If
|
||||
# no regex is supplied, all tests are run. For example;
|
||||
#
|
||||
# ./complement.sh "TestOutboundFederation(Profile|Send)"
|
||||
#
|
||||
|
||||
# Exit if a line returns a non-zero exit code
|
||||
set -e
|
||||
|
||||
# Change to the repository root
|
||||
cd "$(dirname $0)/.."
|
||||
|
||||
# Check for a user-specified Complement checkout
|
||||
if [[ -z "$COMPLEMENT_DIR" ]]; then
|
||||
echo "COMPLEMENT_DIR not set. Fetching the latest Complement checkout..."
|
||||
wget -Nq https://github.com/matrix-org/complement/archive/master.tar.gz
|
||||
tar -xzf master.tar.gz
|
||||
COMPLEMENT_DIR=complement-master
|
||||
echo "Checkout available at 'complement-master'"
|
||||
fi
|
||||
|
||||
# Build the base Synapse image from the local checkout
|
||||
docker build -t matrixdotorg/synapse:latest -f docker/Dockerfile .
|
||||
docker build -t matrixdotorg/synapse -f docker/Dockerfile .
|
||||
# Build the Synapse monolith image from Complement, based on the above image we just built
|
||||
docker build -t complement-synapse -f "$COMPLEMENT_DIR/dockerfiles/Synapse.Dockerfile" "$COMPLEMENT_DIR/dockerfiles"
|
||||
|
||||
# Download Complement
|
||||
wget -N https://github.com/matrix-org/complement/archive/master.tar.gz
|
||||
tar -xzf master.tar.gz
|
||||
cd complement-master
|
||||
cd "$COMPLEMENT_DIR"
|
||||
|
||||
# Build the Synapse image from Complement, based on the above image we just built
|
||||
docker build -t complement-synapse -f dockerfiles/Synapse.Dockerfile ./dockerfiles
|
||||
EXTRA_COMPLEMENT_ARGS=""
|
||||
if [[ -n "$1" ]]; then
|
||||
# A test name regex has been set, supply it to Complement
|
||||
EXTRA_COMPLEMENT_ARGS+="-run $1 "
|
||||
fi
|
||||
|
||||
# Run the tests on the resulting image!
|
||||
COMPLEMENT_BASE_IMAGE=complement-synapse go test -v -count=1 ./tests
|
||||
# Run the tests!
|
||||
COMPLEMENT_BASE_IMAGE=complement-synapse go test -v -tags synapse_blacklist,msc2946,msc3083 -count=1 $EXTRA_COMPLEMENT_ARGS ./tests
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#!/bin/bash
|
||||
#!/usr/bin/env bash
|
||||
# Find linting errors in Synapse's default config file.
|
||||
# Exits with 0 if there are no problems, or another code otherwise.
|
||||
|
||||
|
||||
@@ -22,8 +22,8 @@ import sys
|
||||
from typing import Any, Optional
|
||||
from urllib import parse as urlparse
|
||||
|
||||
import nacl.signing
|
||||
import requests
|
||||
import signedjson.key
|
||||
import signedjson.types
|
||||
import srvlookup
|
||||
import yaml
|
||||
@@ -44,18 +44,6 @@ def encode_base64(input_bytes):
|
||||
return output_string
|
||||
|
||||
|
||||
def decode_base64(input_string):
|
||||
"""Decode a base64 string to bytes inferring padding from the length of the
|
||||
string."""
|
||||
|
||||
input_bytes = input_string.encode("ascii")
|
||||
input_len = len(input_bytes)
|
||||
padding = b"=" * (3 - ((input_len + 3) % 4))
|
||||
output_len = 3 * ((input_len + 2) // 4) + (input_len + 2) % 4 - 2
|
||||
output_bytes = base64.b64decode(input_bytes + padding)
|
||||
return output_bytes[:output_len]
|
||||
|
||||
|
||||
def encode_canonical_json(value):
|
||||
return json.dumps(
|
||||
value,
|
||||
@@ -88,42 +76,6 @@ def sign_json(
|
||||
return json_object
|
||||
|
||||
|
||||
NACL_ED25519 = "ed25519"
|
||||
|
||||
|
||||
def decode_signing_key_base64(algorithm, version, key_base64):
|
||||
"""Decode a base64 encoded signing key
|
||||
Args:
|
||||
algorithm (str): The algorithm the key is for (currently "ed25519").
|
||||
version (str): Identifies this key out of the keys for this entity.
|
||||
key_base64 (str): Base64 encoded bytes of the key.
|
||||
Returns:
|
||||
A SigningKey object.
|
||||
"""
|
||||
if algorithm == NACL_ED25519:
|
||||
key_bytes = decode_base64(key_base64)
|
||||
key = nacl.signing.SigningKey(key_bytes)
|
||||
key.version = version
|
||||
key.alg = NACL_ED25519
|
||||
return key
|
||||
else:
|
||||
raise ValueError("Unsupported algorithm %s" % (algorithm,))
|
||||
|
||||
|
||||
def read_signing_keys(stream):
|
||||
"""Reads a list of keys from a stream
|
||||
Args:
|
||||
stream : A stream to iterate for keys.
|
||||
Returns:
|
||||
list of SigningKey objects.
|
||||
"""
|
||||
keys = []
|
||||
for line in stream:
|
||||
algorithm, version, key_base64 = line.split()
|
||||
keys.append(decode_signing_key_base64(algorithm, version, key_base64))
|
||||
return keys
|
||||
|
||||
|
||||
def request(
|
||||
method: Optional[str],
|
||||
origin_name: str,
|
||||
@@ -223,23 +175,28 @@ def main():
|
||||
parser.add_argument("--body", help="Data to send as the body of the HTTP request")
|
||||
|
||||
parser.add_argument(
|
||||
"path", help="request path. We will add '/_matrix/federation/v1/' to this."
|
||||
"path", help="request path, including the '/_matrix/federation/...' prefix."
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
if not args.server_name or not args.signing_key_path:
|
||||
args.signing_key = None
|
||||
if args.signing_key_path:
|
||||
with open(args.signing_key_path) as f:
|
||||
args.signing_key = f.readline()
|
||||
|
||||
if not args.server_name or not args.signing_key:
|
||||
read_args_from_config(args)
|
||||
|
||||
with open(args.signing_key_path) as f:
|
||||
key = read_signing_keys(f)[0]
|
||||
algorithm, version, key_base64 = args.signing_key.split()
|
||||
key = signedjson.key.decode_signing_key_base64(algorithm, version, key_base64)
|
||||
|
||||
result = request(
|
||||
args.method,
|
||||
args.server_name,
|
||||
key,
|
||||
args.destination,
|
||||
"/_matrix/federation/v1/" + args.path,
|
||||
args.path,
|
||||
content=args.body,
|
||||
)
|
||||
|
||||
@@ -255,10 +212,16 @@ def main():
|
||||
def read_args_from_config(args):
|
||||
with open(args.config, "r") as fh:
|
||||
config = yaml.safe_load(fh)
|
||||
|
||||
if not args.server_name:
|
||||
args.server_name = config["server_name"]
|
||||
if not args.signing_key_path:
|
||||
args.signing_key_path = config["signing_key_path"]
|
||||
|
||||
if not args.signing_key:
|
||||
if "signing_key" in config:
|
||||
args.signing_key = config["signing_key"]
|
||||
else:
|
||||
with open(config["signing_key_path"]) as f:
|
||||
args.signing_key = f.readline()
|
||||
|
||||
|
||||
class MatrixConnectionAdapter(HTTPAdapter):
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#!/bin/bash
|
||||
#!/usr/bin/env bash
|
||||
#
|
||||
# Update/check the docs/sample_config.yaml
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#!/bin/bash
|
||||
#!/usr/bin/env bash
|
||||
#
|
||||
# Runs linting scripts over the local Synapse checkout
|
||||
# isort - sorts import statements
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#!/bin/bash
|
||||
#!/usr/bin/env bash
|
||||
#
|
||||
# This script generates SQL files for creating a brand new Synapse DB with the latest
|
||||
# schema, on both SQLite3 and Postgres.
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#!/bin/bash
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -e
|
||||
|
||||
@@ -6,4 +6,4 @@ set -e
|
||||
# next PR number.
|
||||
CURRENT_NUMBER=`curl -s "https://api.github.com/repos/matrix-org/synapse/issues?state=all&per_page=1" | jq -r ".[0].number"`
|
||||
CURRENT_NUMBER=$((CURRENT_NUMBER+1))
|
||||
echo $CURRENT_NUMBER
|
||||
echo $CURRENT_NUMBER
|
||||
|
||||
244
scripts-dev/release.py
Executable file
244
scripts-dev/release.py
Executable file
@@ -0,0 +1,244 @@
|
||||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2020 The Matrix.org Foundation C.I.C.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""An interactive script for doing a release. See `run()` below.
|
||||
"""
|
||||
|
||||
import subprocess
|
||||
import sys
|
||||
from typing import Optional
|
||||
|
||||
import click
|
||||
import git
|
||||
from packaging import version
|
||||
from redbaron import RedBaron
|
||||
|
||||
|
||||
@click.command()
|
||||
def run():
|
||||
"""An interactive script to walk through the initial stages of creating a
|
||||
release, including creating release branch, updating changelog and pushing to
|
||||
GitHub.
|
||||
|
||||
Requires the dev dependencies be installed, which can be done via:
|
||||
|
||||
pip install -e .[dev]
|
||||
|
||||
"""
|
||||
|
||||
# Make sure we're in a git repo.
|
||||
try:
|
||||
repo = git.Repo()
|
||||
except git.InvalidGitRepositoryError:
|
||||
raise click.ClickException("Not in Synapse repo.")
|
||||
|
||||
if repo.is_dirty():
|
||||
raise click.ClickException("Uncommitted changes exist.")
|
||||
|
||||
click.secho("Updating git repo...")
|
||||
repo.remote().fetch()
|
||||
|
||||
# Parse the AST and load the `__version__` node so that we can edit it
|
||||
# later.
|
||||
with open("synapse/__init__.py") as f:
|
||||
red = RedBaron(f.read())
|
||||
|
||||
version_node = None
|
||||
for node in red:
|
||||
if node.type != "assignment":
|
||||
continue
|
||||
|
||||
if node.target.type != "name":
|
||||
continue
|
||||
|
||||
if node.target.value != "__version__":
|
||||
continue
|
||||
|
||||
version_node = node
|
||||
break
|
||||
|
||||
if not version_node:
|
||||
print("Failed to find '__version__' definition in synapse/__init__.py")
|
||||
sys.exit(1)
|
||||
|
||||
# Parse the current version.
|
||||
current_version = version.parse(version_node.value.value.strip('"'))
|
||||
assert isinstance(current_version, version.Version)
|
||||
|
||||
# Figure out what sort of release we're doing and calcuate the new version.
|
||||
rc = click.confirm("RC", default=True)
|
||||
if current_version.pre:
|
||||
# If the current version is an RC we don't need to bump any of the
|
||||
# version numbers (other than the RC number).
|
||||
base_version = "{}.{}.{}".format(
|
||||
current_version.major,
|
||||
current_version.minor,
|
||||
current_version.micro,
|
||||
)
|
||||
|
||||
if rc:
|
||||
new_version = "{}.{}.{}rc{}".format(
|
||||
current_version.major,
|
||||
current_version.minor,
|
||||
current_version.micro,
|
||||
current_version.pre[1] + 1,
|
||||
)
|
||||
else:
|
||||
new_version = base_version
|
||||
else:
|
||||
# If this is a new release cycle then we need to know if its a major
|
||||
# version bump or a hotfix.
|
||||
release_type = click.prompt(
|
||||
"Release type",
|
||||
type=click.Choice(("major", "hotfix")),
|
||||
show_choices=True,
|
||||
default="major",
|
||||
)
|
||||
|
||||
if release_type == "major":
|
||||
base_version = new_version = "{}.{}.{}".format(
|
||||
current_version.major,
|
||||
current_version.minor + 1,
|
||||
0,
|
||||
)
|
||||
if rc:
|
||||
new_version = "{}.{}.{}rc1".format(
|
||||
current_version.major,
|
||||
current_version.minor + 1,
|
||||
0,
|
||||
)
|
||||
|
||||
else:
|
||||
base_version = new_version = "{}.{}.{}".format(
|
||||
current_version.major,
|
||||
current_version.minor,
|
||||
current_version.micro + 1,
|
||||
)
|
||||
if rc:
|
||||
new_version = "{}.{}.{}rc1".format(
|
||||
current_version.major,
|
||||
current_version.minor,
|
||||
current_version.micro + 1,
|
||||
)
|
||||
|
||||
# Confirm the calculated version is OK.
|
||||
if not click.confirm(f"Create new version: {new_version}?", default=True):
|
||||
click.get_current_context().abort()
|
||||
|
||||
# Switch to the release branch.
|
||||
release_branch_name = f"release-v{base_version}"
|
||||
release_branch = find_ref(repo, release_branch_name)
|
||||
if release_branch:
|
||||
if release_branch.is_remote():
|
||||
# If the release branch only exists on the remote we check it out
|
||||
# locally.
|
||||
repo.git.checkout(release_branch_name)
|
||||
release_branch = repo.active_branch
|
||||
else:
|
||||
# If a branch doesn't exist we create one. We ask which one branch it
|
||||
# should be based off, defaulting to sensible values depending on the
|
||||
# release type.
|
||||
if current_version.is_prerelease:
|
||||
default = release_branch_name
|
||||
elif release_type == "major":
|
||||
default = "develop"
|
||||
else:
|
||||
default = "master"
|
||||
|
||||
branch_name = click.prompt(
|
||||
"Which branch should the release be based on?", default=default
|
||||
)
|
||||
|
||||
base_branch = find_ref(repo, branch_name)
|
||||
if not base_branch:
|
||||
print(f"Could not find base branch {branch_name}!")
|
||||
click.get_current_context().abort()
|
||||
|
||||
# Check out the base branch and ensure it's up to date
|
||||
repo.head.reference = base_branch
|
||||
repo.head.reset(index=True, working_tree=True)
|
||||
if not base_branch.is_remote():
|
||||
update_branch(repo)
|
||||
|
||||
# Create the new release branch
|
||||
release_branch = repo.create_head(release_branch_name, commit=base_branch)
|
||||
|
||||
# Switch to the release branch and ensure its up to date.
|
||||
repo.git.checkout(release_branch_name)
|
||||
update_branch(repo)
|
||||
|
||||
# Update the `__version__` variable and write it back to the file.
|
||||
version_node.value = '"' + new_version + '"'
|
||||
with open("synapse/__init__.py", "w") as f:
|
||||
f.write(red.dumps())
|
||||
|
||||
# Generate changelogs
|
||||
subprocess.run("python3 -m towncrier", shell=True)
|
||||
|
||||
# Generate debian changelogs if its not an RC.
|
||||
if not rc:
|
||||
subprocess.run(
|
||||
f'dch -M -v {new_version} "New synapse release {new_version}."', shell=True
|
||||
)
|
||||
subprocess.run('dch -M -r -D stable ""', shell=True)
|
||||
|
||||
# Show the user the changes and ask if they want to edit the change log.
|
||||
repo.git.add("-u")
|
||||
subprocess.run("git diff --cached", shell=True)
|
||||
|
||||
if click.confirm("Edit changelog?", default=False):
|
||||
click.edit(filename="CHANGES.md")
|
||||
|
||||
# Commit the changes.
|
||||
repo.git.add("-u")
|
||||
repo.git.commit(f"-m {new_version}")
|
||||
|
||||
# We give the option to bail here in case the user wants to make sure things
|
||||
# are OK before pushing.
|
||||
if not click.confirm("Push branch to github?", default=True):
|
||||
print("")
|
||||
print("Run when ready to push:")
|
||||
print("")
|
||||
print(f"\tgit push -u {repo.remote().name} {repo.active_branch.name}")
|
||||
print("")
|
||||
sys.exit(0)
|
||||
|
||||
# Otherwise, push and open the changelog in the browser.
|
||||
repo.git.push("-u", repo.remote().name, repo.active_branch.name)
|
||||
|
||||
click.launch(
|
||||
f"https://github.com/matrix-org/synapse/blob/{repo.active_branch.name}/CHANGES.md"
|
||||
)
|
||||
|
||||
|
||||
def find_ref(repo: git.Repo, ref_name: str) -> Optional[git.HEAD]:
|
||||
"""Find the branch/ref, looking first locally then in the remote."""
|
||||
if ref_name in repo.refs:
|
||||
return repo.refs[ref_name]
|
||||
elif ref_name in repo.remote().refs:
|
||||
return repo.remote().refs[ref_name]
|
||||
else:
|
||||
return None
|
||||
|
||||
|
||||
def update_branch(repo: git.Repo):
|
||||
"""Ensure branch is up to date if it has a remote"""
|
||||
if repo.active_branch.tracking_branch():
|
||||
repo.git.merge(repo.active_branch.tracking_branch().name)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
run()
|
||||
@@ -51,7 +51,7 @@ def main(src_repo, dest_repo):
|
||||
parts = line.split("|")
|
||||
if len(parts) != 2:
|
||||
print("Unable to parse input line %s" % line, file=sys.stderr)
|
||||
exit(1)
|
||||
sys.exit(1)
|
||||
|
||||
move_media(parts[0], parts[1], src_paths, dest_paths)
|
||||
|
||||
|
||||
@@ -18,15 +18,15 @@ ignore =
|
||||
# E203: whitespace before ':' (which is contrary to pep8?)
|
||||
# E731: do not assign a lambda expression, use a def
|
||||
# E501: Line too long (black enforces this for us)
|
||||
ignore=W503,W504,E203,E731,E501
|
||||
# B007: Subsection of the bugbear suite (TODO: add in remaining fixes)
|
||||
ignore=W503,W504,E203,E731,E501,B007
|
||||
|
||||
[isort]
|
||||
line_length = 88
|
||||
sections=FUTURE,STDLIB,COMPAT,THIRDPARTY,TWISTED,FIRSTPARTY,TESTS,LOCALFOLDER
|
||||
sections=FUTURE,STDLIB,THIRDPARTY,TWISTED,FIRSTPARTY,TESTS,LOCALFOLDER
|
||||
default_section=THIRDPARTY
|
||||
known_first_party = synapse
|
||||
known_tests=tests
|
||||
known_compat = mock
|
||||
known_twisted=twisted,OpenSSL
|
||||
multi_line_output=3
|
||||
include_trailing_comma=true
|
||||
|
||||
15
setup.py
15
setup.py
@@ -99,17 +99,25 @@ CONDITIONAL_REQUIREMENTS["lint"] = [
|
||||
"isort==5.7.0",
|
||||
"black==20.8b1",
|
||||
"flake8-comprehensions",
|
||||
"flake8-bugbear==21.3.2",
|
||||
"flake8",
|
||||
]
|
||||
|
||||
CONDITIONAL_REQUIREMENTS["mypy"] = ["mypy==0.812", "mypy-zope==0.2.11"]
|
||||
CONDITIONAL_REQUIREMENTS["dev"] = CONDITIONAL_REQUIREMENTS["lint"] + [
|
||||
# The following are used by the release script
|
||||
"click==7.1.2",
|
||||
"redbaron==0.9.2",
|
||||
"GitPython==3.1.14",
|
||||
]
|
||||
|
||||
CONDITIONAL_REQUIREMENTS["mypy"] = ["mypy==0.812", "mypy-zope==0.2.13"]
|
||||
|
||||
# Dependencies which are exclusively required by unit test code. This is
|
||||
# NOT a list of all modules that are necessary to run the unit tests.
|
||||
# Tests assume that all optional dependencies are installed.
|
||||
#
|
||||
# parameterized_class decorator was introduced in parameterized 0.7.0
|
||||
CONDITIONAL_REQUIREMENTS["test"] = ["mock>=2.0", "parameterized>=0.7.0"]
|
||||
CONDITIONAL_REQUIREMENTS["test"] = ["parameterized>=0.7.0"]
|
||||
|
||||
setup(
|
||||
name="matrix-synapse",
|
||||
@@ -122,13 +130,12 @@ setup(
|
||||
zip_safe=False,
|
||||
long_description=long_description,
|
||||
long_description_content_type="text/x-rst",
|
||||
python_requires="~=3.5",
|
||||
python_requires="~=3.6",
|
||||
classifiers=[
|
||||
"Development Status :: 5 - Production/Stable",
|
||||
"Topic :: Communications :: Chat",
|
||||
"License :: OSI Approved :: Apache Software License",
|
||||
"Programming Language :: Python :: 3 :: Only",
|
||||
"Programming Language :: Python :: 3.5",
|
||||
"Programming Language :: Python :: 3.6",
|
||||
"Programming Language :: Python :: 3.7",
|
||||
"Programming Language :: Python :: 3.8",
|
||||
|
||||
@@ -48,7 +48,7 @@ try:
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
__version__ = "1.30.0"
|
||||
__version__ = "1.32.0rc1"
|
||||
|
||||
if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)):
|
||||
# We import here so that we don't have to install a bunch of deps when
|
||||
|
||||
@@ -558,6 +558,9 @@ class Auth:
|
||||
Returns:
|
||||
bool: False if no access_token was given, True otherwise.
|
||||
"""
|
||||
# This will always be set by the time Twisted calls us.
|
||||
assert request.args is not None
|
||||
|
||||
query_params = request.args.get(b"access_token")
|
||||
auth_headers = request.requestHeaders.getRawHeaders(b"Authorization")
|
||||
return bool(query_params) or bool(auth_headers)
|
||||
@@ -574,6 +577,8 @@ class Auth:
|
||||
MissingClientTokenError: If there isn't a single access_token in the
|
||||
request
|
||||
"""
|
||||
# This will always be set by the time Twisted calls us.
|
||||
assert request.args is not None
|
||||
|
||||
auth_headers = request.requestHeaders.getRawHeaders(b"Authorization")
|
||||
query_params = request.args.get(b"access_token")
|
||||
|
||||
@@ -51,6 +51,7 @@ class PresenceState:
|
||||
OFFLINE = "offline"
|
||||
UNAVAILABLE = "unavailable"
|
||||
ONLINE = "online"
|
||||
BUSY = "org.matrix.msc3026.busy"
|
||||
|
||||
|
||||
class JoinRules:
|
||||
@@ -58,6 +59,8 @@ class JoinRules:
|
||||
KNOCK = "knock"
|
||||
INVITE = "invite"
|
||||
PRIVATE = "private"
|
||||
# As defined for MSC3083.
|
||||
MSC3083_RESTRICTED = "restricted"
|
||||
|
||||
|
||||
class LoginType:
|
||||
@@ -70,6 +73,11 @@ class LoginType:
|
||||
DUMMY = "m.login.dummy"
|
||||
|
||||
|
||||
# This is used in the `type` parameter for /register when called by
|
||||
# an appservice to register a new user.
|
||||
APP_SERVICE_REGISTRATION_TYPE = "m.login.application_service"
|
||||
|
||||
|
||||
class EventTypes:
|
||||
Member = "m.room.member"
|
||||
Create = "m.room.create"
|
||||
@@ -100,6 +108,9 @@ class EventTypes:
|
||||
|
||||
Dummy = "org.matrix.dummy_event"
|
||||
|
||||
MSC1772_SPACE_CHILD = "org.matrix.msc1772.space.child"
|
||||
MSC1772_SPACE_PARENT = "org.matrix.msc1772.space.parent"
|
||||
|
||||
|
||||
class EduTypes:
|
||||
Presence = "m.presence"
|
||||
@@ -160,6 +171,9 @@ class EventContentFields:
|
||||
# cf https://github.com/matrix-org/matrix-doc/pull/2228
|
||||
SELF_DESTRUCT_AFTER = "org.matrix.self_destruct_after"
|
||||
|
||||
# cf https://github.com/matrix-org/matrix-doc/pull/1772
|
||||
MSC1772_ROOM_TYPE = "org.matrix.msc1772.type"
|
||||
|
||||
|
||||
class RoomEncryptionAlgorithms:
|
||||
MEGOLM_V1_AES_SHA2 = "m.megolm.v1.aes-sha2"
|
||||
|
||||
@@ -17,6 +17,7 @@ from collections import OrderedDict
|
||||
from typing import Hashable, Optional, Tuple
|
||||
|
||||
from synapse.api.errors import LimitExceededError
|
||||
from synapse.storage.databases.main import DataStore
|
||||
from synapse.types import Requester
|
||||
from synapse.util import Clock
|
||||
|
||||
@@ -31,10 +32,13 @@ class Ratelimiter:
|
||||
burst_count: How many actions that can be performed before being limited.
|
||||
"""
|
||||
|
||||
def __init__(self, clock: Clock, rate_hz: float, burst_count: int):
|
||||
def __init__(
|
||||
self, store: DataStore, clock: Clock, rate_hz: float, burst_count: int
|
||||
):
|
||||
self.clock = clock
|
||||
self.rate_hz = rate_hz
|
||||
self.burst_count = burst_count
|
||||
self.store = store
|
||||
|
||||
# A ordered dictionary keeping track of actions, when they were last
|
||||
# performed and how often. Each entry is a mapping from a key of arbitrary type
|
||||
@@ -46,45 +50,10 @@ class Ratelimiter:
|
||||
OrderedDict()
|
||||
) # type: OrderedDict[Hashable, Tuple[float, int, float]]
|
||||
|
||||
def can_requester_do_action(
|
||||
async def can_do_action(
|
||||
self,
|
||||
requester: Requester,
|
||||
rate_hz: Optional[float] = None,
|
||||
burst_count: Optional[int] = None,
|
||||
update: bool = True,
|
||||
_time_now_s: Optional[int] = None,
|
||||
) -> Tuple[bool, float]:
|
||||
"""Can the requester perform the action?
|
||||
|
||||
Args:
|
||||
requester: The requester to key off when rate limiting. The user property
|
||||
will be used.
|
||||
rate_hz: The long term number of actions that can be performed in a second.
|
||||
Overrides the value set during instantiation if set.
|
||||
burst_count: How many actions that can be performed before being limited.
|
||||
Overrides the value set during instantiation if set.
|
||||
update: Whether to count this check as performing the action
|
||||
_time_now_s: The current time. Optional, defaults to the current time according
|
||||
to self.clock. Only used by tests.
|
||||
|
||||
Returns:
|
||||
A tuple containing:
|
||||
* A bool indicating if they can perform the action now
|
||||
* The reactor timestamp for when the action can be performed next.
|
||||
-1 if rate_hz is less than or equal to zero
|
||||
"""
|
||||
# Disable rate limiting of users belonging to any AS that is configured
|
||||
# not to be rate limited in its registration file (rate_limited: true|false).
|
||||
if requester.app_service and not requester.app_service.is_rate_limited():
|
||||
return True, -1.0
|
||||
|
||||
return self.can_do_action(
|
||||
requester.user.to_string(), rate_hz, burst_count, update, _time_now_s
|
||||
)
|
||||
|
||||
def can_do_action(
|
||||
self,
|
||||
key: Hashable,
|
||||
requester: Optional[Requester],
|
||||
key: Optional[Hashable] = None,
|
||||
rate_hz: Optional[float] = None,
|
||||
burst_count: Optional[int] = None,
|
||||
update: bool = True,
|
||||
@@ -92,9 +61,16 @@ class Ratelimiter:
|
||||
) -> Tuple[bool, float]:
|
||||
"""Can the entity (e.g. user or IP address) perform the action?
|
||||
|
||||
Checks if the user has ratelimiting disabled in the database by looking
|
||||
for null/zero values in the `ratelimit_override` table. (Non-zero
|
||||
values aren't honoured, as they're specific to the event sending
|
||||
ratelimiter, rather than all ratelimiters)
|
||||
|
||||
Args:
|
||||
key: The key we should use when rate limiting. Can be a user ID
|
||||
(when sending events), an IP address, etc.
|
||||
requester: The requester that is doing the action, if any. Used to check
|
||||
if the user has ratelimits disabled in the database.
|
||||
key: An arbitrary key used to classify an action. Defaults to the
|
||||
requester's user ID.
|
||||
rate_hz: The long term number of actions that can be performed in a second.
|
||||
Overrides the value set during instantiation if set.
|
||||
burst_count: How many actions that can be performed before being limited.
|
||||
@@ -109,6 +85,30 @@ class Ratelimiter:
|
||||
* The reactor timestamp for when the action can be performed next.
|
||||
-1 if rate_hz is less than or equal to zero
|
||||
"""
|
||||
if key is None:
|
||||
if not requester:
|
||||
raise ValueError("Must supply at least one of `requester` or `key`")
|
||||
|
||||
key = requester.user.to_string()
|
||||
|
||||
if requester:
|
||||
# Disable rate limiting of users belonging to any AS that is configured
|
||||
# not to be rate limited in its registration file (rate_limited: true|false).
|
||||
if requester.app_service and not requester.app_service.is_rate_limited():
|
||||
return True, -1.0
|
||||
|
||||
# Check if ratelimiting has been disabled for the user.
|
||||
#
|
||||
# Note that we don't use the returned rate/burst count, as the table
|
||||
# is specifically for the event sending ratelimiter. Instead, we
|
||||
# only use it to (somewhat cheekily) infer whether the user should
|
||||
# be subject to any rate limiting or not.
|
||||
override = await self.store.get_ratelimit_for_user(
|
||||
requester.authenticated_entity
|
||||
)
|
||||
if override and not override.messages_per_second:
|
||||
return True, -1.0
|
||||
|
||||
# Override default values if set
|
||||
time_now_s = _time_now_s if _time_now_s is not None else self.clock.time()
|
||||
rate_hz = rate_hz if rate_hz is not None else self.rate_hz
|
||||
@@ -175,9 +175,10 @@ class Ratelimiter:
|
||||
else:
|
||||
del self.actions[key]
|
||||
|
||||
def ratelimit(
|
||||
async def ratelimit(
|
||||
self,
|
||||
key: Hashable,
|
||||
requester: Optional[Requester],
|
||||
key: Optional[Hashable] = None,
|
||||
rate_hz: Optional[float] = None,
|
||||
burst_count: Optional[int] = None,
|
||||
update: bool = True,
|
||||
@@ -185,8 +186,16 @@ class Ratelimiter:
|
||||
):
|
||||
"""Checks if an action can be performed. If not, raises a LimitExceededError
|
||||
|
||||
Checks if the user has ratelimiting disabled in the database by looking
|
||||
for null/zero values in the `ratelimit_override` table. (Non-zero
|
||||
values aren't honoured, as they're specific to the event sending
|
||||
ratelimiter, rather than all ratelimiters)
|
||||
|
||||
Args:
|
||||
key: An arbitrary key used to classify an action
|
||||
requester: The requester that is doing the action, if any. Used to check for
|
||||
if the user has ratelimits disabled.
|
||||
key: An arbitrary key used to classify an action. Defaults to the
|
||||
requester's user ID.
|
||||
rate_hz: The long term number of actions that can be performed in a second.
|
||||
Overrides the value set during instantiation if set.
|
||||
burst_count: How many actions that can be performed before being limited.
|
||||
@@ -201,7 +210,8 @@ class Ratelimiter:
|
||||
"""
|
||||
time_now_s = _time_now_s if _time_now_s is not None else self.clock.time()
|
||||
|
||||
allowed, time_allowed = self.can_do_action(
|
||||
allowed, time_allowed = await self.can_do_action(
|
||||
requester,
|
||||
key,
|
||||
rate_hz=rate_hz,
|
||||
burst_count=burst_count,
|
||||
|
||||
@@ -57,7 +57,7 @@ class RoomVersion:
|
||||
state_res = attr.ib(type=int) # one of the StateResolutionVersions
|
||||
enforce_key_validity = attr.ib(type=bool)
|
||||
|
||||
# bool: before MSC2261/MSC2432, m.room.aliases had special auth rules and redaction rules
|
||||
# Before MSC2261/MSC2432, m.room.aliases had special auth rules and redaction rules
|
||||
special_case_aliases_auth = attr.ib(type=bool)
|
||||
# Strictly enforce canonicaljson, do not allow:
|
||||
# * Integers outside the range of [-2 ^ 53 + 1, 2 ^ 53 - 1]
|
||||
@@ -69,6 +69,8 @@ class RoomVersion:
|
||||
limit_notifications_power_levels = attr.ib(type=bool)
|
||||
# MSC2174/MSC2176: Apply updated redaction rules algorithm.
|
||||
msc2176_redaction_rules = attr.ib(type=bool)
|
||||
# MSC3083: Support the 'restricted' join_rule.
|
||||
msc3083_join_rules = attr.ib(type=bool)
|
||||
|
||||
|
||||
class RoomVersions:
|
||||
@@ -82,6 +84,7 @@ class RoomVersions:
|
||||
strict_canonicaljson=False,
|
||||
limit_notifications_power_levels=False,
|
||||
msc2176_redaction_rules=False,
|
||||
msc3083_join_rules=False,
|
||||
)
|
||||
V2 = RoomVersion(
|
||||
"2",
|
||||
@@ -93,6 +96,7 @@ class RoomVersions:
|
||||
strict_canonicaljson=False,
|
||||
limit_notifications_power_levels=False,
|
||||
msc2176_redaction_rules=False,
|
||||
msc3083_join_rules=False,
|
||||
)
|
||||
V3 = RoomVersion(
|
||||
"3",
|
||||
@@ -104,6 +108,7 @@ class RoomVersions:
|
||||
strict_canonicaljson=False,
|
||||
limit_notifications_power_levels=False,
|
||||
msc2176_redaction_rules=False,
|
||||
msc3083_join_rules=False,
|
||||
)
|
||||
V4 = RoomVersion(
|
||||
"4",
|
||||
@@ -115,6 +120,7 @@ class RoomVersions:
|
||||
strict_canonicaljson=False,
|
||||
limit_notifications_power_levels=False,
|
||||
msc2176_redaction_rules=False,
|
||||
msc3083_join_rules=False,
|
||||
)
|
||||
V5 = RoomVersion(
|
||||
"5",
|
||||
@@ -126,6 +132,7 @@ class RoomVersions:
|
||||
strict_canonicaljson=False,
|
||||
limit_notifications_power_levels=False,
|
||||
msc2176_redaction_rules=False,
|
||||
msc3083_join_rules=False,
|
||||
)
|
||||
V6 = RoomVersion(
|
||||
"6",
|
||||
@@ -137,6 +144,7 @@ class RoomVersions:
|
||||
strict_canonicaljson=True,
|
||||
limit_notifications_power_levels=True,
|
||||
msc2176_redaction_rules=False,
|
||||
msc3083_join_rules=False,
|
||||
)
|
||||
MSC2176 = RoomVersion(
|
||||
"org.matrix.msc2176",
|
||||
@@ -148,6 +156,19 @@ class RoomVersions:
|
||||
strict_canonicaljson=True,
|
||||
limit_notifications_power_levels=True,
|
||||
msc2176_redaction_rules=True,
|
||||
msc3083_join_rules=False,
|
||||
)
|
||||
MSC3083 = RoomVersion(
|
||||
"org.matrix.msc3083",
|
||||
RoomDisposition.UNSTABLE,
|
||||
EventFormatVersions.V3,
|
||||
StateResolutionVersions.V2,
|
||||
enforce_key_validity=True,
|
||||
special_case_aliases_auth=False,
|
||||
strict_canonicaljson=True,
|
||||
limit_notifications_power_levels=True,
|
||||
msc2176_redaction_rules=False,
|
||||
msc3083_join_rules=True,
|
||||
)
|
||||
|
||||
|
||||
@@ -162,4 +183,5 @@ KNOWN_ROOM_VERSIONS = {
|
||||
RoomVersions.V6,
|
||||
RoomVersions.MSC2176,
|
||||
)
|
||||
# Note that we do not include MSC3083 here unless it is enabled in the config.
|
||||
} # type: Dict[str, RoomVersion]
|
||||
|
||||
@@ -22,7 +22,9 @@ logger = logging.getLogger(__name__)
|
||||
try:
|
||||
python_dependencies.check_requirements()
|
||||
except python_dependencies.DependencyException as e:
|
||||
sys.stderr.writelines(e.message)
|
||||
sys.stderr.writelines(
|
||||
e.message # noqa: B306, DependencyException.message is a property
|
||||
)
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
|
||||
@@ -21,8 +21,10 @@ import signal
|
||||
import socket
|
||||
import sys
|
||||
import traceback
|
||||
import warnings
|
||||
from typing import Awaitable, Callable, Iterable
|
||||
|
||||
from cryptography.utils import CryptographyDeprecationWarning
|
||||
from typing_extensions import NoReturn
|
||||
|
||||
from twisted.internet import defer, error, reactor
|
||||
@@ -195,6 +197,25 @@ def listen_metrics(bind_addresses, port):
|
||||
start_http_server(port, addr=host, registry=RegistryProxy)
|
||||
|
||||
|
||||
def listen_manhole(bind_addresses: Iterable[str], port: int, manhole_globals: dict):
|
||||
# twisted.conch.manhole 21.1.0 uses "int_from_bytes", which produces a confusing
|
||||
# warning. It's fixed by https://github.com/twisted/twisted/pull/1522), so
|
||||
# suppress the warning for now.
|
||||
warnings.filterwarnings(
|
||||
action="ignore",
|
||||
category=CryptographyDeprecationWarning,
|
||||
message="int_from_bytes is deprecated",
|
||||
)
|
||||
|
||||
from synapse.util.manhole import manhole
|
||||
|
||||
listen_tcp(
|
||||
bind_addresses,
|
||||
port,
|
||||
manhole(username="matrix", password="rabbithole", globals=manhole_globals),
|
||||
)
|
||||
|
||||
|
||||
def listen_tcp(bind_addresses, port, factory, reactor=reactor, backlog=50):
|
||||
"""
|
||||
Create a TCP socket for a port and several addresses
|
||||
|
||||
@@ -147,7 +147,6 @@ from synapse.storage.databases.main.user_directory import UserDirectoryStore
|
||||
from synapse.types import ReadReceipt
|
||||
from synapse.util.async_helpers import Linearizer
|
||||
from synapse.util.httpresourcetree import create_resource_tree
|
||||
from synapse.util.manhole import manhole
|
||||
from synapse.util.versionstring import get_version_string
|
||||
|
||||
logger = logging.getLogger("synapse.app.generic_worker")
|
||||
@@ -282,6 +281,7 @@ class GenericWorkerPresence(BasePresenceHandler):
|
||||
self.hs = hs
|
||||
self.is_mine_id = hs.is_mine_id
|
||||
|
||||
self.presence_router = hs.get_presence_router()
|
||||
self._presence_enabled = hs.config.use_presence
|
||||
|
||||
# The number of ongoing syncs on this process, by user id.
|
||||
@@ -302,6 +302,8 @@ class GenericWorkerPresence(BasePresenceHandler):
|
||||
self.send_stop_syncing, UPDATE_SYNCING_USERS_MS
|
||||
)
|
||||
|
||||
self._busy_presence_enabled = hs.config.experimental.msc3026_enabled
|
||||
|
||||
hs.get_reactor().addSystemEventTrigger(
|
||||
"before",
|
||||
"shutdown",
|
||||
@@ -394,7 +396,7 @@ class GenericWorkerPresence(BasePresenceHandler):
|
||||
return _user_syncing()
|
||||
|
||||
async def notify_from_replication(self, states, stream_id):
|
||||
parties = await get_interested_parties(self.store, states)
|
||||
parties = await get_interested_parties(self.store, self.presence_router, states)
|
||||
room_ids_to_states, users_to_states = parties
|
||||
|
||||
self.notifier.on_new_event(
|
||||
@@ -439,8 +441,12 @@ class GenericWorkerPresence(BasePresenceHandler):
|
||||
PresenceState.ONLINE,
|
||||
PresenceState.UNAVAILABLE,
|
||||
PresenceState.OFFLINE,
|
||||
PresenceState.BUSY,
|
||||
)
|
||||
if presence not in valid_presence:
|
||||
|
||||
if presence not in valid_presence or (
|
||||
presence == PresenceState.BUSY and not self._busy_presence_enabled
|
||||
):
|
||||
raise SynapseError(400, "Invalid presence state")
|
||||
|
||||
user_id = target_user.to_string()
|
||||
@@ -634,12 +640,8 @@ class GenericWorkerServer(HomeServer):
|
||||
if listener.type == "http":
|
||||
self._listen_http(listener)
|
||||
elif listener.type == "manhole":
|
||||
_base.listen_tcp(
|
||||
listener.bind_addresses,
|
||||
listener.port,
|
||||
manhole(
|
||||
username="matrix", password="rabbithole", globals={"hs": self}
|
||||
),
|
||||
_base.listen_manhole(
|
||||
listener.bind_addresses, listener.port, manhole_globals={"hs": self}
|
||||
)
|
||||
elif listener.type == "metrics":
|
||||
if not self.get_config().enable_metrics:
|
||||
@@ -786,13 +788,6 @@ class FederationSenderHandler:
|
||||
|
||||
self._fed_position_linearizer = Linearizer(name="_fed_position_linearizer")
|
||||
|
||||
def on_start(self):
|
||||
# There may be some events that are persisted but haven't been sent,
|
||||
# so send them now.
|
||||
self.federation_sender.notify_new_events(
|
||||
self.store.get_room_max_stream_ordering()
|
||||
)
|
||||
|
||||
def wake_destination(self, server: str):
|
||||
self.federation_sender.wake_destination(server)
|
||||
|
||||
|
||||
@@ -67,7 +67,6 @@ from synapse.storage import DataStore
|
||||
from synapse.storage.engines import IncorrectDatabaseSetup
|
||||
from synapse.storage.prepare_database import UpgradeDatabaseException
|
||||
from synapse.util.httpresourcetree import create_resource_tree
|
||||
from synapse.util.manhole import manhole
|
||||
from synapse.util.module_loader import load_module
|
||||
from synapse.util.versionstring import get_version_string
|
||||
|
||||
@@ -288,12 +287,8 @@ class SynapseHomeServer(HomeServer):
|
||||
if listener.type == "http":
|
||||
self._listening_services.extend(self._listener_http(config, listener))
|
||||
elif listener.type == "manhole":
|
||||
listen_tcp(
|
||||
listener.bind_addresses,
|
||||
listener.port,
|
||||
manhole(
|
||||
username="matrix", password="rabbithole", globals={"hs": self}
|
||||
),
|
||||
_base.listen_manhole(
|
||||
listener.bind_addresses, listener.port, manhole_globals={"hs": self}
|
||||
)
|
||||
elif listener.type == "replication":
|
||||
services = listen_tcp(
|
||||
|
||||
@@ -49,7 +49,7 @@ This is all tied together by the AppServiceScheduler which DIs the required
|
||||
components.
|
||||
"""
|
||||
import logging
|
||||
from typing import List
|
||||
from typing import List, Optional
|
||||
|
||||
from synapse.appservice import ApplicationService, ApplicationServiceState
|
||||
from synapse.events import EventBase
|
||||
@@ -191,11 +191,11 @@ class _TransactionController:
|
||||
self,
|
||||
service: ApplicationService,
|
||||
events: List[EventBase],
|
||||
ephemeral: List[JsonDict] = [],
|
||||
ephemeral: Optional[List[JsonDict]] = None,
|
||||
):
|
||||
try:
|
||||
txn = await self.store.create_appservice_txn(
|
||||
service=service, events=events, ephemeral=ephemeral
|
||||
service=service, events=events, ephemeral=ephemeral or []
|
||||
)
|
||||
service_is_up = await self._is_service_up(service)
|
||||
if service_is_up:
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
# Copyright 2015, 2016 OpenMarket Ltd
|
||||
# Copyright 2015-2021 The Matrix.org Foundation C.I.C.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -12,38 +12,131 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from synapse.api.constants import EventTypes
|
||||
import logging
|
||||
from typing import Iterable
|
||||
|
||||
from ._base import Config
|
||||
from synapse.api.constants import EventTypes
|
||||
from synapse.config._base import Config, ConfigError
|
||||
from synapse.config._util import validate_config
|
||||
from synapse.types import JsonDict
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ApiConfig(Config):
|
||||
section = "api"
|
||||
|
||||
def read_config(self, config, **kwargs):
|
||||
self.room_invite_state_types = config.get(
|
||||
"room_invite_state_types",
|
||||
[
|
||||
EventTypes.JoinRules,
|
||||
EventTypes.CanonicalAlias,
|
||||
EventTypes.RoomAvatar,
|
||||
EventTypes.RoomEncryption,
|
||||
EventTypes.Name,
|
||||
],
|
||||
def read_config(self, config: JsonDict, **kwargs):
|
||||
validate_config(_MAIN_SCHEMA, config, ())
|
||||
self.room_prejoin_state = list(self._get_prejoin_state_types(config))
|
||||
|
||||
def generate_config_section(cls, **kwargs) -> str:
|
||||
formatted_default_state_types = "\n".join(
|
||||
" # - %s" % (t,) for t in _DEFAULT_PREJOIN_STATE_TYPES
|
||||
)
|
||||
|
||||
def generate_config_section(cls, **kwargs):
|
||||
return """\
|
||||
## API Configuration ##
|
||||
|
||||
# A list of event types that will be included in the room_invite_state
|
||||
# Controls for the state that is shared with users who receive an invite
|
||||
# to a room
|
||||
#
|
||||
#room_invite_state_types:
|
||||
# - "{JoinRules}"
|
||||
# - "{CanonicalAlias}"
|
||||
# - "{RoomAvatar}"
|
||||
# - "{RoomEncryption}"
|
||||
# - "{Name}"
|
||||
""".format(
|
||||
**vars(EventTypes)
|
||||
)
|
||||
room_prejoin_state:
|
||||
# By default, the following state event types are shared with users who
|
||||
# receive invites to the room:
|
||||
#
|
||||
%(formatted_default_state_types)s
|
||||
#
|
||||
# Uncomment the following to disable these defaults (so that only the event
|
||||
# types listed in 'additional_event_types' are shared). Defaults to 'false'.
|
||||
#
|
||||
#disable_default_event_types: true
|
||||
|
||||
# Additional state event types to share with users when they are invited
|
||||
# to a room.
|
||||
#
|
||||
# By default, this list is empty (so only the default event types are shared).
|
||||
#
|
||||
#additional_event_types:
|
||||
# - org.example.custom.event.type
|
||||
""" % {
|
||||
"formatted_default_state_types": formatted_default_state_types
|
||||
}
|
||||
|
||||
def _get_prejoin_state_types(self, config: JsonDict) -> Iterable[str]:
|
||||
"""Get the event types to include in the prejoin state
|
||||
|
||||
Parses the config and returns an iterable of the event types to be included.
|
||||
"""
|
||||
room_prejoin_state_config = config.get("room_prejoin_state") or {}
|
||||
|
||||
# backwards-compatibility support for room_invite_state_types
|
||||
if "room_invite_state_types" in config:
|
||||
# if both "room_invite_state_types" and "room_prejoin_state" are set, then
|
||||
# we don't really know what to do.
|
||||
if room_prejoin_state_config:
|
||||
raise ConfigError(
|
||||
"Can't specify both 'room_invite_state_types' and 'room_prejoin_state' "
|
||||
"in config"
|
||||
)
|
||||
|
||||
logger.warning(_ROOM_INVITE_STATE_TYPES_WARNING)
|
||||
|
||||
yield from config["room_invite_state_types"]
|
||||
return
|
||||
|
||||
if not room_prejoin_state_config.get("disable_default_event_types"):
|
||||
yield from _DEFAULT_PREJOIN_STATE_TYPES
|
||||
|
||||
if self.spaces_enabled:
|
||||
# MSC1772 suggests adding m.room.create to the prejoin state
|
||||
yield EventTypes.Create
|
||||
|
||||
yield from room_prejoin_state_config.get("additional_event_types", [])
|
||||
|
||||
|
||||
_ROOM_INVITE_STATE_TYPES_WARNING = """\
|
||||
WARNING: The 'room_invite_state_types' configuration setting is now deprecated,
|
||||
and replaced with 'room_prejoin_state'. New features may not work correctly
|
||||
unless 'room_invite_state_types' is removed. See the sample configuration file for
|
||||
details of 'room_prejoin_state'.
|
||||
--------------------------------------------------------------------------------
|
||||
"""
|
||||
|
||||
_DEFAULT_PREJOIN_STATE_TYPES = [
|
||||
EventTypes.JoinRules,
|
||||
EventTypes.CanonicalAlias,
|
||||
EventTypes.RoomAvatar,
|
||||
EventTypes.RoomEncryption,
|
||||
EventTypes.Name,
|
||||
]
|
||||
|
||||
|
||||
# room_prejoin_state can either be None (as it is in the default config), or
|
||||
# an object containing other config settings
|
||||
_ROOM_PREJOIN_STATE_CONFIG_SCHEMA = {
|
||||
"oneOf": [
|
||||
{
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"disable_default_event_types": {"type": "boolean"},
|
||||
"additional_event_types": {
|
||||
"type": "array",
|
||||
"items": {"type": "string"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{"type": "null"},
|
||||
]
|
||||
}
|
||||
|
||||
# the legacy room_invite_state_types setting
|
||||
_ROOM_INVITE_STATE_TYPES_SCHEMA = {"type": "array", "items": {"type": "string"}}
|
||||
|
||||
_MAIN_SCHEMA = {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"room_prejoin_state": _ROOM_PREJOIN_STATE_CONFIG_SCHEMA,
|
||||
"room_invite_state_types": _ROOM_INVITE_STATE_TYPES_SCHEMA,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -24,7 +24,7 @@ from ._base import Config, ConfigError
|
||||
_CACHE_PREFIX = "SYNAPSE_CACHE_FACTOR"
|
||||
|
||||
# Map from canonicalised cache name to cache.
|
||||
_CACHES = {}
|
||||
_CACHES = {} # type: Dict[str, Callable[[float], None]]
|
||||
|
||||
# a lock on the contents of _CACHES
|
||||
_CACHES_LOCK = threading.Lock()
|
||||
@@ -59,7 +59,9 @@ def _canonicalise_cache_name(cache_name: str) -> str:
|
||||
return cache_name.lower()
|
||||
|
||||
|
||||
def add_resizable_cache(cache_name: str, cache_resize_callback: Callable):
|
||||
def add_resizable_cache(
|
||||
cache_name: str, cache_resize_callback: Callable[[float], None]
|
||||
):
|
||||
"""Register a cache that's size can dynamically change
|
||||
|
||||
Args:
|
||||
|
||||
@@ -13,6 +13,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersions
|
||||
from synapse.config._base import Config
|
||||
from synapse.types import JsonDict
|
||||
|
||||
@@ -27,3 +28,11 @@ class ExperimentalConfig(Config):
|
||||
|
||||
# MSC2858 (multiple SSO identity providers)
|
||||
self.msc2858_enabled = experimental.get("msc2858_enabled", False) # type: bool
|
||||
|
||||
# Spaces (MSC1772, MSC2946, MSC3083, etc)
|
||||
self.spaces_enabled = experimental.get("spaces_enabled", False) # type: bool
|
||||
if self.spaces_enabled:
|
||||
KNOWN_ROOM_VERSIONS[RoomVersions.MSC3083.identifier] = RoomVersions.MSC3083
|
||||
|
||||
# MSC3026 (busy presence state)
|
||||
self.msc3026_enabled = experimental.get("msc3026_enabled", False) # type: bool
|
||||
|
||||
@@ -404,7 +404,11 @@ def _parse_key_servers(key_servers, federation_verify_certificates):
|
||||
try:
|
||||
jsonschema.validate(key_servers, TRUSTED_KEY_SERVERS_SCHEMA)
|
||||
except jsonschema.ValidationError as e:
|
||||
raise ConfigError("Unable to parse 'trusted_key_servers': " + e.message)
|
||||
raise ConfigError(
|
||||
"Unable to parse 'trusted_key_servers': {}".format(
|
||||
e.message # noqa: B306, jsonschema.ValidationError.message is a valid attribute
|
||||
)
|
||||
)
|
||||
|
||||
for server in key_servers:
|
||||
server_name = server["server_name"]
|
||||
|
||||
@@ -56,7 +56,9 @@ class MetricsConfig(Config):
|
||||
try:
|
||||
check_requirements("sentry")
|
||||
except DependencyException as e:
|
||||
raise ConfigError(e.message)
|
||||
raise ConfigError(
|
||||
e.message # noqa: B306, DependencyException.message is a property
|
||||
)
|
||||
|
||||
self.sentry_dsn = config["sentry"].get("dsn")
|
||||
if not self.sentry_dsn:
|
||||
|
||||
@@ -15,11 +15,12 @@
|
||||
# limitations under the License.
|
||||
|
||||
from collections import Counter
|
||||
from typing import Iterable, Mapping, Optional, Tuple, Type
|
||||
from typing import Iterable, List, Mapping, Optional, Tuple, Type
|
||||
|
||||
import attr
|
||||
|
||||
from synapse.config._util import validate_config
|
||||
from synapse.config.sso import SsoAttributeRequirement
|
||||
from synapse.python_dependencies import DependencyException, check_requirements
|
||||
from synapse.types import Collection, JsonDict
|
||||
from synapse.util.module_loader import load_module
|
||||
@@ -41,7 +42,9 @@ class OIDCConfig(Config):
|
||||
try:
|
||||
check_requirements("oidc")
|
||||
except DependencyException as e:
|
||||
raise ConfigError(e.message) from e
|
||||
raise ConfigError(
|
||||
e.message # noqa: B306, DependencyException.message is a property
|
||||
) from e
|
||||
|
||||
# check we don't have any duplicate idp_ids now. (The SSO handler will also
|
||||
# check for duplicates when the REST listeners get registered, but that happens
|
||||
@@ -76,6 +79,9 @@ class OIDCConfig(Config):
|
||||
# Note that, if this is changed, users authenticating via that provider
|
||||
# will no longer be recognised as the same user!
|
||||
#
|
||||
# (Use "oidc" here if you are migrating from an old "oidc_config"
|
||||
# configuration.)
|
||||
#
|
||||
# idp_name: A user-facing name for this identity provider, which is used to
|
||||
# offer the user a choice of login mechanisms.
|
||||
#
|
||||
@@ -191,6 +197,24 @@ class OIDCConfig(Config):
|
||||
# which is set to the claims returned by the UserInfo Endpoint and/or
|
||||
# in the ID Token.
|
||||
#
|
||||
# It is possible to configure Synapse to only allow logins if certain attributes
|
||||
# match particular values in the OIDC userinfo. The requirements can be listed under
|
||||
# `attribute_requirements` as shown below. All of the listed attributes must
|
||||
# match for the login to be permitted. Additional attributes can be added to
|
||||
# userinfo by expanding the `scopes` section of the OIDC config to retrieve
|
||||
# additional information from the OIDC provider.
|
||||
#
|
||||
# If the OIDC claim is a list, then the attribute must match any value in the list.
|
||||
# Otherwise, it must exactly match the value of the claim. Using the example
|
||||
# below, the `family_name` claim MUST be "Stephensson", but the `groups`
|
||||
# claim MUST contain "admin".
|
||||
#
|
||||
# attribute_requirements:
|
||||
# - attribute: family_name
|
||||
# value: "Stephensson"
|
||||
# - attribute: groups
|
||||
# value: "admin"
|
||||
#
|
||||
# See https://github.com/matrix-org/synapse/blob/master/docs/openid.md
|
||||
# for information on how to configure these options.
|
||||
#
|
||||
@@ -223,34 +247,9 @@ class OIDCConfig(Config):
|
||||
# localpart_template: "{{{{ user.login }}}}"
|
||||
# display_name_template: "{{{{ user.name }}}}"
|
||||
# email_template: "{{{{ user.email }}}}"
|
||||
|
||||
# For use with Keycloak
|
||||
#
|
||||
#- idp_id: keycloak
|
||||
# idp_name: Keycloak
|
||||
# issuer: "https://127.0.0.1:8443/auth/realms/my_realm_name"
|
||||
# client_id: "synapse"
|
||||
# client_secret: "copy secret generated in Keycloak UI"
|
||||
# scopes: ["openid", "profile"]
|
||||
|
||||
# For use with Github
|
||||
#
|
||||
#- idp_id: github
|
||||
# idp_name: Github
|
||||
# idp_brand: github
|
||||
# discover: false
|
||||
# issuer: "https://github.com/"
|
||||
# client_id: "your-client-id" # TO BE FILLED
|
||||
# client_secret: "your-client-secret" # TO BE FILLED
|
||||
# authorization_endpoint: "https://github.com/login/oauth/authorize"
|
||||
# token_endpoint: "https://github.com/login/oauth/access_token"
|
||||
# userinfo_endpoint: "https://api.github.com/user"
|
||||
# scopes: ["read:user"]
|
||||
# user_mapping_provider:
|
||||
# config:
|
||||
# subject_claim: "id"
|
||||
# localpart_template: "{{{{ user.login }}}}"
|
||||
# display_name_template: "{{{{ user.name }}}}"
|
||||
# attribute_requirements:
|
||||
# - attribute: userGroup
|
||||
# value: "synapseUsers"
|
||||
""".format(
|
||||
mapping_provider=DEFAULT_USER_MAPPING_PROVIDER
|
||||
)
|
||||
@@ -329,6 +328,10 @@ OIDC_PROVIDER_CONFIG_SCHEMA = {
|
||||
},
|
||||
"allow_existing_users": {"type": "boolean"},
|
||||
"user_mapping_provider": {"type": ["object", "null"]},
|
||||
"attribute_requirements": {
|
||||
"type": "array",
|
||||
"items": SsoAttributeRequirement.JSON_SCHEMA,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -465,6 +468,11 @@ def _parse_oidc_config_dict(
|
||||
jwt_header=client_secret_jwt_key_config["jwt_header"],
|
||||
jwt_payload=client_secret_jwt_key_config.get("jwt_payload", {}),
|
||||
)
|
||||
# parse attribute_requirements from config (list of dicts) into a list of SsoAttributeRequirement
|
||||
attribute_requirements = [
|
||||
SsoAttributeRequirement(**x)
|
||||
for x in oidc_config.get("attribute_requirements", [])
|
||||
]
|
||||
|
||||
return OidcProviderConfig(
|
||||
idp_id=idp_id,
|
||||
@@ -488,6 +496,7 @@ def _parse_oidc_config_dict(
|
||||
allow_existing_users=oidc_config.get("allow_existing_users", False),
|
||||
user_mapping_provider_class=user_mapping_provider_class,
|
||||
user_mapping_provider_config=user_mapping_provider_config,
|
||||
attribute_requirements=attribute_requirements,
|
||||
)
|
||||
|
||||
|
||||
@@ -577,3 +586,6 @@ class OidcProviderConfig:
|
||||
|
||||
# the config of the user mapping provider
|
||||
user_mapping_provider_config = attr.ib()
|
||||
|
||||
# required attributes to require in userinfo to allow login/registration
|
||||
attribute_requirements = attr.ib(type=List[SsoAttributeRequirement])
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from typing import Dict
|
||||
from typing import Dict, Optional
|
||||
|
||||
from ._base import Config
|
||||
|
||||
@@ -21,8 +21,10 @@ class RateLimitConfig:
|
||||
def __init__(
|
||||
self,
|
||||
config: Dict[str, float],
|
||||
defaults={"per_second": 0.17, "burst_count": 3.0},
|
||||
defaults: Optional[Dict[str, float]] = None,
|
||||
):
|
||||
defaults = defaults or {"per_second": 0.17, "burst_count": 3.0}
|
||||
|
||||
self.per_second = config.get("per_second", defaults["per_second"])
|
||||
self.burst_count = int(config.get("burst_count", defaults["burst_count"]))
|
||||
|
||||
@@ -95,11 +97,11 @@ class RatelimitConfig(Config):
|
||||
|
||||
self.rc_joins_local = RateLimitConfig(
|
||||
config.get("rc_joins", {}).get("local", {}),
|
||||
defaults={"per_second": 0.1, "burst_count": 3},
|
||||
defaults={"per_second": 0.1, "burst_count": 10},
|
||||
)
|
||||
self.rc_joins_remote = RateLimitConfig(
|
||||
config.get("rc_joins", {}).get("remote", {}),
|
||||
defaults={"per_second": 0.01, "burst_count": 3},
|
||||
defaults={"per_second": 0.01, "burst_count": 10},
|
||||
)
|
||||
|
||||
# Ratelimit cross-user key requests:
|
||||
@@ -187,10 +189,10 @@ class RatelimitConfig(Config):
|
||||
#rc_joins:
|
||||
# local:
|
||||
# per_second: 0.1
|
||||
# burst_count: 3
|
||||
# burst_count: 10
|
||||
# remote:
|
||||
# per_second: 0.01
|
||||
# burst_count: 3
|
||||
# burst_count: 10
|
||||
#
|
||||
#rc_3pid_validation:
|
||||
# per_second: 0.003
|
||||
|
||||
@@ -298,9 +298,9 @@ class RegistrationConfig(Config):
|
||||
#
|
||||
#allowed_local_3pids:
|
||||
# - medium: email
|
||||
# pattern: '.*@matrix\\.org'
|
||||
# pattern: '^[^@]+@matrix\\.org$'
|
||||
# - medium: email
|
||||
# pattern: '.*@vector\\.im'
|
||||
# pattern: '^[^@]+@vector\\.im$'
|
||||
# - medium: msisdn
|
||||
# pattern: '\\+44'
|
||||
|
||||
|
||||
@@ -176,7 +176,9 @@ class ContentRepositoryConfig(Config):
|
||||
check_requirements("url_preview")
|
||||
|
||||
except DependencyException as e:
|
||||
raise ConfigError(e.message)
|
||||
raise ConfigError(
|
||||
e.message # noqa: B306, DependencyException.message is a property
|
||||
)
|
||||
|
||||
if "url_preview_ip_range_blacklist" not in config:
|
||||
raise ConfigError(
|
||||
|
||||
@@ -76,7 +76,9 @@ class SAML2Config(Config):
|
||||
try:
|
||||
check_requirements("saml2")
|
||||
except DependencyException as e:
|
||||
raise ConfigError(e.message)
|
||||
raise ConfigError(
|
||||
e.message # noqa: B306, DependencyException.message is a property
|
||||
)
|
||||
|
||||
self.saml2_enabled = True
|
||||
|
||||
|
||||
@@ -27,6 +27,7 @@ import yaml
|
||||
from netaddr import AddrFormatError, IPNetwork, IPSet
|
||||
|
||||
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS
|
||||
from synapse.util.module_loader import load_module
|
||||
from synapse.util.stringutils import parse_and_validate_server_name
|
||||
|
||||
from ._base import Config, ConfigError
|
||||
@@ -238,7 +239,20 @@ class ServerConfig(Config):
|
||||
self.public_baseurl = config.get("public_baseurl")
|
||||
|
||||
# Whether to enable user presence.
|
||||
self.use_presence = config.get("use_presence", True)
|
||||
presence_config = config.get("presence") or {}
|
||||
self.use_presence = presence_config.get("enabled")
|
||||
if self.use_presence is None:
|
||||
self.use_presence = config.get("use_presence", True)
|
||||
|
||||
# Custom presence router module
|
||||
self.presence_router_module_class = None
|
||||
self.presence_router_config = None
|
||||
presence_router_config = presence_config.get("presence_router")
|
||||
if presence_router_config:
|
||||
(
|
||||
self.presence_router_module_class,
|
||||
self.presence_router_config,
|
||||
) = load_module(presence_router_config, ("presence", "presence_router"))
|
||||
|
||||
# Whether to update the user directory or not. This should be set to
|
||||
# false only if we are updating the user directory in a worker
|
||||
@@ -834,9 +848,28 @@ class ServerConfig(Config):
|
||||
#
|
||||
#soft_file_limit: 0
|
||||
|
||||
# Set to false to disable presence tracking on this homeserver.
|
||||
# Presence tracking allows users to see the state (e.g online/offline)
|
||||
# of other local and remote users.
|
||||
#
|
||||
#use_presence: false
|
||||
presence:
|
||||
# Uncomment to disable presence tracking on this homeserver. This option
|
||||
# replaces the previous top-level 'use_presence' option.
|
||||
#
|
||||
#enabled: false
|
||||
|
||||
# Presence routers are third-party modules that can specify additional logic
|
||||
# to where presence updates from users are routed.
|
||||
#
|
||||
presence_router:
|
||||
# The custom module's class. Uncomment to use a custom presence router module.
|
||||
#
|
||||
#module: "my_custom_router.PresenceRouter"
|
||||
|
||||
# Configuration options of the custom module. Refer to your module's
|
||||
# documentation for available options.
|
||||
#
|
||||
#config:
|
||||
# example_option: 'something'
|
||||
|
||||
# Whether to require authentication to retrieve profile data (avatars,
|
||||
# display names) of other users through the client API. Defaults to
|
||||
|
||||
@@ -270,7 +270,7 @@ class TlsConfig(Config):
|
||||
tls_certificate_path,
|
||||
tls_private_key_path,
|
||||
acme_domain,
|
||||
**kwargs
|
||||
**kwargs,
|
||||
):
|
||||
"""If the acme_domain is specified acme will be enabled.
|
||||
If the TLS paths are not specified the default will be certs in the
|
||||
|
||||
@@ -39,7 +39,9 @@ class TracerConfig(Config):
|
||||
try:
|
||||
check_requirements("opentracing")
|
||||
except DependencyException as e:
|
||||
raise ConfigError(e.message)
|
||||
raise ConfigError(
|
||||
e.message # noqa: B306, DependencyException.message is a property
|
||||
)
|
||||
|
||||
# The tracer is enabled so sanitize the config
|
||||
|
||||
|
||||
@@ -191,7 +191,7 @@ def _context_info_cb(ssl_connection, where, ret):
|
||||
# ... we further assume that SSLClientConnectionCreator has set the
|
||||
# '_synapse_tls_verifier' attribute to a ConnectionVerifier object.
|
||||
tls_protocol._synapse_tls_verifier.verify_context_info_cb(ssl_connection, where)
|
||||
except: # noqa: E722, taken from the twisted implementation
|
||||
except BaseException: # taken from the twisted implementation
|
||||
logger.exception("Error during info_callback")
|
||||
f = Failure()
|
||||
tls_protocol.failVerification(f)
|
||||
@@ -219,7 +219,7 @@ class SSLClientConnectionCreator:
|
||||
# ... and we also gut-wrench a '_synapse_tls_verifier' attribute into the
|
||||
# tls_protocol so that the SSL context's info callback has something to
|
||||
# call to do the cert verification.
|
||||
setattr(tls_protocol, "_synapse_tls_verifier", self._verifier)
|
||||
tls_protocol._synapse_tls_verifier = self._verifier
|
||||
return connection
|
||||
|
||||
|
||||
|
||||
@@ -57,7 +57,7 @@ from synapse.util.metrics import Measure
|
||||
from synapse.util.retryutils import NotRetryingDestination
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.app.homeserver import HomeServer
|
||||
from synapse.server import HomeServer
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@@ -162,7 +162,7 @@ def check(
|
||||
logger.debug("Auth events: %s", [a.event_id for a in auth_events.values()])
|
||||
|
||||
if event.type == EventTypes.Member:
|
||||
_is_membership_change_allowed(event, auth_events)
|
||||
_is_membership_change_allowed(room_version_obj, event, auth_events)
|
||||
logger.debug("Allowing! %s", event)
|
||||
return
|
||||
|
||||
@@ -220,8 +220,19 @@ def _can_federate(event: EventBase, auth_events: StateMap[EventBase]) -> bool:
|
||||
|
||||
|
||||
def _is_membership_change_allowed(
|
||||
event: EventBase, auth_events: StateMap[EventBase]
|
||||
room_version: RoomVersion, event: EventBase, auth_events: StateMap[EventBase]
|
||||
) -> None:
|
||||
"""
|
||||
Confirms that the event which changes membership is an allowed change.
|
||||
|
||||
Args:
|
||||
room_version: The version of the room.
|
||||
event: The event to check.
|
||||
auth_events: The current auth events of the room.
|
||||
|
||||
Raises:
|
||||
AuthError if the event is not allowed.
|
||||
"""
|
||||
membership = event.content["membership"]
|
||||
|
||||
# Check if this is the room creator joining:
|
||||
@@ -315,14 +326,19 @@ def _is_membership_change_allowed(
|
||||
if user_level < invite_level:
|
||||
raise AuthError(403, "You don't have permission to invite users")
|
||||
elif Membership.JOIN == membership:
|
||||
# Joins are valid iff caller == target and they were:
|
||||
# invited: They are accepting the invitation
|
||||
# joined: It's a NOOP
|
||||
# Joins are valid iff caller == target and:
|
||||
# * They are not banned.
|
||||
# * They are accepting a previously sent invitation.
|
||||
# * They are already joined (it's a NOOP).
|
||||
# * The room is public or restricted.
|
||||
if event.user_id != target_user_id:
|
||||
raise AuthError(403, "Cannot force another user to join.")
|
||||
elif target_banned:
|
||||
raise AuthError(403, "You are banned from this room")
|
||||
elif join_rule == JoinRules.PUBLIC:
|
||||
elif join_rule == JoinRules.PUBLIC or (
|
||||
room_version.msc3083_join_rules
|
||||
and join_rule == JoinRules.MSC3083_RESTRICTED
|
||||
):
|
||||
pass
|
||||
elif join_rule == JoinRules.INVITE:
|
||||
if not caller_in_room and not caller_invited:
|
||||
|
||||
@@ -98,7 +98,7 @@ class DefaultDictProperty(DictProperty):
|
||||
|
||||
|
||||
class _EventInternalMetadata:
|
||||
__slots__ = ["_dict", "stream_ordering"]
|
||||
__slots__ = ["_dict", "stream_ordering", "outlier"]
|
||||
|
||||
def __init__(self, internal_metadata_dict: JsonDict):
|
||||
# we have to copy the dict, because it turns out that the same dict is
|
||||
@@ -108,7 +108,10 @@ class _EventInternalMetadata:
|
||||
# the stream ordering of this event. None, until it has been persisted.
|
||||
self.stream_ordering = None # type: Optional[int]
|
||||
|
||||
outlier = DictProperty("outlier") # type: bool
|
||||
# whether this event is an outlier (ie, whether we have the state at that point
|
||||
# in the DAG)
|
||||
self.outlier = False
|
||||
|
||||
out_of_band_membership = DictProperty("out_of_band_membership") # type: bool
|
||||
send_on_behalf_of = DictProperty("send_on_behalf_of") # type: str
|
||||
recheck_redaction = DictProperty("recheck_redaction") # type: bool
|
||||
@@ -129,7 +132,7 @@ class _EventInternalMetadata:
|
||||
return dict(self._dict)
|
||||
|
||||
def is_outlier(self) -> bool:
|
||||
return self._dict.get("outlier", False)
|
||||
return self.outlier
|
||||
|
||||
def is_out_of_band_membership(self) -> bool:
|
||||
"""Whether this is an out of band membership, like an invite or an invite
|
||||
@@ -327,9 +330,11 @@ class FrozenEvent(EventBase):
|
||||
self,
|
||||
event_dict: JsonDict,
|
||||
room_version: RoomVersion,
|
||||
internal_metadata_dict: JsonDict = {},
|
||||
internal_metadata_dict: Optional[JsonDict] = None,
|
||||
rejected_reason: Optional[str] = None,
|
||||
):
|
||||
internal_metadata_dict = internal_metadata_dict or {}
|
||||
|
||||
event_dict = dict(event_dict)
|
||||
|
||||
# Signatures is a dict of dicts, and this is faster than doing a
|
||||
@@ -383,9 +388,11 @@ class FrozenEventV2(EventBase):
|
||||
self,
|
||||
event_dict: JsonDict,
|
||||
room_version: RoomVersion,
|
||||
internal_metadata_dict: JsonDict = {},
|
||||
internal_metadata_dict: Optional[JsonDict] = None,
|
||||
rejected_reason: Optional[str] = None,
|
||||
):
|
||||
internal_metadata_dict = internal_metadata_dict or {}
|
||||
|
||||
event_dict = dict(event_dict)
|
||||
|
||||
# Signatures is a dict of dicts, and this is faster than doing a
|
||||
@@ -504,9 +511,11 @@ def _event_type_from_format_version(format_version: int) -> Type[EventBase]:
|
||||
def make_event_from_dict(
|
||||
event_dict: JsonDict,
|
||||
room_version: RoomVersion = RoomVersions.V1,
|
||||
internal_metadata_dict: JsonDict = {},
|
||||
internal_metadata_dict: Optional[JsonDict] = None,
|
||||
rejected_reason: Optional[str] = None,
|
||||
) -> EventBase:
|
||||
"""Construct an EventBase from the given event dict"""
|
||||
event_type = _event_type_from_format_version(room_version.event_format)
|
||||
return event_type(event_dict, room_version, internal_metadata_dict, rejected_reason)
|
||||
return event_type(
|
||||
event_dict, room_version, internal_metadata_dict or {}, rejected_reason
|
||||
)
|
||||
|
||||
104
synapse/events/presence_router.py
Normal file
104
synapse/events/presence_router.py
Normal file
@@ -0,0 +1,104 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2021 The Matrix.org Foundation C.I.C.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from typing import TYPE_CHECKING, Dict, Iterable, Set, Union
|
||||
|
||||
from synapse.api.presence import UserPresenceState
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.server import HomeServer
|
||||
|
||||
|
||||
class PresenceRouter:
|
||||
"""
|
||||
A module that the homeserver will call upon to help route user presence updates to
|
||||
additional destinations. If a custom presence router is configured, calls will be
|
||||
passed to that instead.
|
||||
"""
|
||||
|
||||
ALL_USERS = "ALL"
|
||||
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
self.custom_presence_router = None
|
||||
|
||||
# Check whether a custom presence router module has been configured
|
||||
if hs.config.presence_router_module_class:
|
||||
# Initialise the module
|
||||
self.custom_presence_router = hs.config.presence_router_module_class(
|
||||
config=hs.config.presence_router_config, module_api=hs.get_module_api()
|
||||
)
|
||||
|
||||
# Ensure the module has implemented the required methods
|
||||
required_methods = ["get_users_for_states", "get_interested_users"]
|
||||
for method_name in required_methods:
|
||||
if not hasattr(self.custom_presence_router, method_name):
|
||||
raise Exception(
|
||||
"PresenceRouter module '%s' must implement all required methods: %s"
|
||||
% (
|
||||
hs.config.presence_router_module_class.__name__,
|
||||
", ".join(required_methods),
|
||||
)
|
||||
)
|
||||
|
||||
async def get_users_for_states(
|
||||
self,
|
||||
state_updates: Iterable[UserPresenceState],
|
||||
) -> Dict[str, Set[UserPresenceState]]:
|
||||
"""
|
||||
Given an iterable of user presence updates, determine where each one
|
||||
needs to go.
|
||||
|
||||
Args:
|
||||
state_updates: An iterable of user presence state updates.
|
||||
|
||||
Returns:
|
||||
A dictionary of user_id -> set of UserPresenceState, indicating which
|
||||
presence updates each user should receive.
|
||||
"""
|
||||
if self.custom_presence_router is not None:
|
||||
# Ask the custom module
|
||||
return await self.custom_presence_router.get_users_for_states(
|
||||
state_updates=state_updates
|
||||
)
|
||||
|
||||
# Don't include any extra destinations for presence updates
|
||||
return {}
|
||||
|
||||
async def get_interested_users(self, user_id: str) -> Union[Set[str], ALL_USERS]:
|
||||
"""
|
||||
Retrieve a list of users that `user_id` is interested in receiving the
|
||||
presence of. This will be in addition to those they share a room with.
|
||||
Optionally, the object PresenceRouter.ALL_USERS can be returned to indicate
|
||||
that this user should receive all incoming local and remote presence updates.
|
||||
|
||||
Note that this method will only be called for local users, but can return users
|
||||
that are local or remote.
|
||||
|
||||
Args:
|
||||
user_id: A user requesting presence updates.
|
||||
|
||||
Returns:
|
||||
A set of user IDs to return presence updates for, or ALL_USERS to return all
|
||||
known updates.
|
||||
"""
|
||||
if self.custom_presence_router is not None:
|
||||
# Ask the custom module for interested users
|
||||
return await self.custom_presence_router.get_interested_users(
|
||||
user_id=user_id
|
||||
)
|
||||
|
||||
# A custom presence router is not defined.
|
||||
# Don't report any additional interested users
|
||||
return set()
|
||||
@@ -13,12 +13,15 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from typing import Callable, Union
|
||||
from typing import TYPE_CHECKING, Union
|
||||
|
||||
from synapse.events import EventBase
|
||||
from synapse.events.snapshot import EventContext
|
||||
from synapse.types import Requester, StateMap
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.server import HomeServer
|
||||
|
||||
|
||||
class ThirdPartyEventRules:
|
||||
"""Allows server admins to provide a Python module implementing an extra
|
||||
@@ -28,7 +31,7 @@ class ThirdPartyEventRules:
|
||||
behaviours.
|
||||
"""
|
||||
|
||||
def __init__(self, hs):
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
self.third_party_rules = None
|
||||
|
||||
self.store = hs.get_datastore()
|
||||
@@ -95,10 +98,9 @@ class ThirdPartyEventRules:
|
||||
if self.third_party_rules is None:
|
||||
return True
|
||||
|
||||
ret = await self.third_party_rules.on_create_room(
|
||||
return await self.third_party_rules.on_create_room(
|
||||
requester, config, is_requester_admin
|
||||
)
|
||||
return ret
|
||||
|
||||
async def check_threepid_can_be_invited(
|
||||
self, medium: str, address: str, room_id: str
|
||||
@@ -119,10 +121,9 @@ class ThirdPartyEventRules:
|
||||
|
||||
state_events = await self._get_state_map_for_room(room_id)
|
||||
|
||||
ret = await self.third_party_rules.check_threepid_can_be_invited(
|
||||
return await self.third_party_rules.check_threepid_can_be_invited(
|
||||
medium, address, state_events
|
||||
)
|
||||
return ret
|
||||
|
||||
async def check_visibility_can_be_modified(
|
||||
self, room_id: str, new_visibility: str
|
||||
@@ -143,7 +144,7 @@ class ThirdPartyEventRules:
|
||||
check_func = getattr(
|
||||
self.third_party_rules, "check_visibility_can_be_modified", None
|
||||
)
|
||||
if not check_func or not isinstance(check_func, Callable):
|
||||
if not check_func or not callable(check_func):
|
||||
return True
|
||||
|
||||
state_events = await self._get_state_map_for_room(room_id)
|
||||
|
||||
@@ -22,6 +22,7 @@ from synapse.api.constants import EventTypes, RelationTypes
|
||||
from synapse.api.errors import Codes, SynapseError
|
||||
from synapse.api.room_versions import RoomVersion
|
||||
from synapse.util.async_helpers import yieldable_gather_results
|
||||
from synapse.util.frozenutils import unfreeze
|
||||
|
||||
from . import EventBase
|
||||
|
||||
@@ -54,6 +55,8 @@ def prune_event(event: EventBase) -> EventBase:
|
||||
event.internal_metadata.stream_ordering
|
||||
)
|
||||
|
||||
pruned_event.internal_metadata.outlier = event.internal_metadata.outlier
|
||||
|
||||
# Mark the event as redacted
|
||||
pruned_event.internal_metadata.redacted = True
|
||||
|
||||
@@ -400,10 +403,19 @@ class EventClientSerializer:
|
||||
# If there is an edit replace the content, preserving existing
|
||||
# relations.
|
||||
|
||||
# Ensure we take copies of the edit content, otherwise we risk modifying
|
||||
# the original event.
|
||||
edit_content = edit.content.copy()
|
||||
|
||||
# Unfreeze the event content if necessary, so that we may modify it below
|
||||
edit_content = unfreeze(edit_content)
|
||||
serialized_event["content"] = edit_content.get("m.new_content", {})
|
||||
|
||||
# Check for existing relations
|
||||
relations = event.content.get("m.relates_to")
|
||||
serialized_event["content"] = edit.content.get("m.new_content", {})
|
||||
if relations:
|
||||
serialized_event["content"]["m.relates_to"] = relations
|
||||
# Keep the relations, ensuring we use a dict copy of the original
|
||||
serialized_event["content"]["m.relates_to"] = relations.copy()
|
||||
else:
|
||||
serialized_event["content"].pop("m.relates_to", None)
|
||||
|
||||
|
||||
@@ -27,11 +27,13 @@ from typing import (
|
||||
List,
|
||||
Mapping,
|
||||
Optional,
|
||||
Sequence,
|
||||
Tuple,
|
||||
TypeVar,
|
||||
Union,
|
||||
)
|
||||
|
||||
import attr
|
||||
from prometheus_client import Counter
|
||||
|
||||
from twisted.internet import defer
|
||||
@@ -62,7 +64,7 @@ from synapse.util.caches.expiringcache import ExpiringCache
|
||||
from synapse.util.retryutils import NotRetryingDestination
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.app.homeserver import HomeServer
|
||||
from synapse.server import HomeServer
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -100,7 +102,7 @@ class FederationClient(FederationBase):
|
||||
max_len=1000,
|
||||
expiry_ms=120 * 1000,
|
||||
reset_expiry_on_get=False,
|
||||
)
|
||||
) # type: ExpiringCache[str, EventBase]
|
||||
|
||||
def _clear_tried_cache(self):
|
||||
"""Clear pdu_destination_tried cache"""
|
||||
@@ -455,6 +457,7 @@ class FederationClient(FederationBase):
|
||||
description: str,
|
||||
destinations: Iterable[str],
|
||||
callback: Callable[[str], Awaitable[T]],
|
||||
failover_on_unknown_endpoint: bool = False,
|
||||
) -> T:
|
||||
"""Try an operation on a series of servers, until it succeeds
|
||||
|
||||
@@ -474,6 +477,10 @@ class FederationClient(FederationBase):
|
||||
next server tried. Normally the stacktrace is logged but this is
|
||||
suppressed if the exception is an InvalidResponseError.
|
||||
|
||||
failover_on_unknown_endpoint: if True, we will try other servers if it looks
|
||||
like a server doesn't support the endpoint. This is typically useful
|
||||
if the endpoint in question is new or experimental.
|
||||
|
||||
Returns:
|
||||
The result of callback, if it succeeds
|
||||
|
||||
@@ -493,16 +500,31 @@ class FederationClient(FederationBase):
|
||||
except UnsupportedRoomVersionError:
|
||||
raise
|
||||
except HttpResponseException as e:
|
||||
if not 500 <= e.code < 600:
|
||||
raise e.to_synapse_error()
|
||||
else:
|
||||
logger.warning(
|
||||
"Failed to %s via %s: %i %s",
|
||||
description,
|
||||
destination,
|
||||
e.code,
|
||||
e.args[0],
|
||||
)
|
||||
synapse_error = e.to_synapse_error()
|
||||
failover = False
|
||||
|
||||
if 500 <= e.code < 600:
|
||||
failover = True
|
||||
|
||||
elif failover_on_unknown_endpoint:
|
||||
# there is no good way to detect an "unknown" endpoint. Dendrite
|
||||
# returns a 404 (with no body); synapse returns a 400
|
||||
# with M_UNRECOGNISED.
|
||||
if e.code == 404 or (
|
||||
e.code == 400 and synapse_error.errcode == Codes.UNRECOGNIZED
|
||||
):
|
||||
failover = True
|
||||
|
||||
if not failover:
|
||||
raise synapse_error from e
|
||||
|
||||
logger.warning(
|
||||
"Failed to %s via %s: %i %s",
|
||||
description,
|
||||
destination,
|
||||
e.code,
|
||||
e.args[0],
|
||||
)
|
||||
except Exception:
|
||||
logger.warning(
|
||||
"Failed to %s via %s", description, destination, exc_info=True
|
||||
@@ -1042,3 +1064,141 @@ class FederationClient(FederationBase):
|
||||
# If we don't manage to find it, return None. It's not an error if a
|
||||
# server doesn't give it to us.
|
||||
return None
|
||||
|
||||
async def get_space_summary(
|
||||
self,
|
||||
destinations: Iterable[str],
|
||||
room_id: str,
|
||||
suggested_only: bool,
|
||||
max_rooms_per_space: Optional[int],
|
||||
exclude_rooms: List[str],
|
||||
) -> "FederationSpaceSummaryResult":
|
||||
"""
|
||||
Call other servers to get a summary of the given space
|
||||
|
||||
|
||||
Args:
|
||||
destinations: The remote servers. We will try them in turn, omitting any
|
||||
that have been blacklisted.
|
||||
|
||||
room_id: ID of the space to be queried
|
||||
|
||||
suggested_only: If true, ask the remote server to only return children
|
||||
with the "suggested" flag set
|
||||
|
||||
max_rooms_per_space: A limit on the number of children to return for each
|
||||
space
|
||||
|
||||
exclude_rooms: A list of room IDs to tell the remote server to skip
|
||||
|
||||
Returns:
|
||||
a parsed FederationSpaceSummaryResult
|
||||
|
||||
Raises:
|
||||
SynapseError if we were unable to get a valid summary from any of the
|
||||
remote servers
|
||||
"""
|
||||
|
||||
async def send_request(destination: str) -> FederationSpaceSummaryResult:
|
||||
res = await self.transport_layer.get_space_summary(
|
||||
destination=destination,
|
||||
room_id=room_id,
|
||||
suggested_only=suggested_only,
|
||||
max_rooms_per_space=max_rooms_per_space,
|
||||
exclude_rooms=exclude_rooms,
|
||||
)
|
||||
|
||||
try:
|
||||
return FederationSpaceSummaryResult.from_json_dict(res)
|
||||
except ValueError as e:
|
||||
raise InvalidResponseError(str(e))
|
||||
|
||||
return await self._try_destination_list(
|
||||
"fetch space summary",
|
||||
destinations,
|
||||
send_request,
|
||||
failover_on_unknown_endpoint=True,
|
||||
)
|
||||
|
||||
|
||||
@attr.s(frozen=True, slots=True)
|
||||
class FederationSpaceSummaryEventResult:
|
||||
"""Represents a single event in the result of a successful get_space_summary call.
|
||||
|
||||
It's essentially just a serialised event object, but we do a bit of parsing and
|
||||
validation in `from_json_dict` and store some of the validated properties in
|
||||
object attributes.
|
||||
"""
|
||||
|
||||
event_type = attr.ib(type=str)
|
||||
state_key = attr.ib(type=str)
|
||||
via = attr.ib(type=Sequence[str])
|
||||
|
||||
# the raw data, including the above keys
|
||||
data = attr.ib(type=JsonDict)
|
||||
|
||||
@classmethod
|
||||
def from_json_dict(cls, d: JsonDict) -> "FederationSpaceSummaryEventResult":
|
||||
"""Parse an event within the result of a /spaces/ request
|
||||
|
||||
Args:
|
||||
d: json object to be parsed
|
||||
|
||||
Raises:
|
||||
ValueError if d is not a valid event
|
||||
"""
|
||||
|
||||
event_type = d.get("type")
|
||||
if not isinstance(event_type, str):
|
||||
raise ValueError("Invalid event: 'event_type' must be a str")
|
||||
|
||||
state_key = d.get("state_key")
|
||||
if not isinstance(state_key, str):
|
||||
raise ValueError("Invalid event: 'state_key' must be a str")
|
||||
|
||||
content = d.get("content")
|
||||
if not isinstance(content, dict):
|
||||
raise ValueError("Invalid event: 'content' must be a dict")
|
||||
|
||||
via = content.get("via")
|
||||
if not isinstance(via, Sequence):
|
||||
raise ValueError("Invalid event: 'via' must be a list")
|
||||
if any(not isinstance(v, str) for v in via):
|
||||
raise ValueError("Invalid event: 'via' must be a list of strings")
|
||||
|
||||
return cls(event_type, state_key, via, d)
|
||||
|
||||
|
||||
@attr.s(frozen=True, slots=True)
|
||||
class FederationSpaceSummaryResult:
|
||||
"""Represents the data returned by a successful get_space_summary call."""
|
||||
|
||||
rooms = attr.ib(type=Sequence[JsonDict])
|
||||
events = attr.ib(type=Sequence[FederationSpaceSummaryEventResult])
|
||||
|
||||
@classmethod
|
||||
def from_json_dict(cls, d: JsonDict) -> "FederationSpaceSummaryResult":
|
||||
"""Parse the result of a /spaces/ request
|
||||
|
||||
Args:
|
||||
d: json object to be parsed
|
||||
|
||||
Raises:
|
||||
ValueError if d is not a valid /spaces/ response
|
||||
"""
|
||||
rooms = d.get("rooms")
|
||||
if not isinstance(rooms, Sequence):
|
||||
raise ValueError("'rooms' must be a list")
|
||||
if any(not isinstance(r, dict) for r in rooms):
|
||||
raise ValueError("Invalid room in 'rooms' list")
|
||||
|
||||
events = d.get("events")
|
||||
if not isinstance(events, Sequence):
|
||||
raise ValueError("'events' must be a list")
|
||||
if any(not isinstance(e, dict) for e in events):
|
||||
raise ValueError("Invalid event in 'events' list")
|
||||
parsed_events = [
|
||||
FederationSpaceSummaryEventResult.from_json_dict(e) for e in events
|
||||
]
|
||||
|
||||
return cls(rooms, parsed_events)
|
||||
|
||||
@@ -35,7 +35,7 @@ from twisted.internet import defer
|
||||
from twisted.internet.abstract import isIPAddress
|
||||
from twisted.python import failure
|
||||
|
||||
from synapse.api.constants import EduTypes, EventTypes, Membership
|
||||
from synapse.api.constants import EduTypes, EventTypes
|
||||
from synapse.api.errors import (
|
||||
AuthError,
|
||||
Codes,
|
||||
@@ -63,7 +63,7 @@ from synapse.replication.http.federation import (
|
||||
ReplicationFederationSendEduRestServlet,
|
||||
ReplicationGetQueryRestServlet,
|
||||
)
|
||||
from synapse.types import JsonDict, get_domain_from_id
|
||||
from synapse.types import JsonDict
|
||||
from synapse.util import glob_to_regex, json_decoder, unwrapFirstError
|
||||
from synapse.util.async_helpers import Linearizer, concurrently_execute
|
||||
from synapse.util.caches.response_cache import ResponseCache
|
||||
@@ -727,27 +727,6 @@ class FederationServer(FederationBase):
|
||||
if the event was unacceptable for any other reason (eg, too large,
|
||||
too many prev_events, couldn't find the prev_events)
|
||||
"""
|
||||
# check that it's actually being sent from a valid destination to
|
||||
# workaround bug #1753 in 0.18.5 and 0.18.6
|
||||
if origin != get_domain_from_id(pdu.sender):
|
||||
# We continue to accept join events from any server; this is
|
||||
# necessary for the federation join dance to work correctly.
|
||||
# (When we join over federation, the "helper" server is
|
||||
# responsible for sending out the join event, rather than the
|
||||
# origin. See bug #1893. This is also true for some third party
|
||||
# invites).
|
||||
if not (
|
||||
pdu.type == "m.room.member"
|
||||
and pdu.content
|
||||
and pdu.content.get("membership", None)
|
||||
in (Membership.JOIN, Membership.INVITE)
|
||||
):
|
||||
logger.info(
|
||||
"Discarding PDU %s from invalid origin %s", pdu.event_id, origin
|
||||
)
|
||||
return
|
||||
else:
|
||||
logger.info("Accepting join PDU %s from %s", pdu.event_id, origin)
|
||||
|
||||
# We've already checked that we know the room version by this point
|
||||
room_version = await self.store.get_room_version(pdu.room_id)
|
||||
@@ -760,22 +739,20 @@ class FederationServer(FederationBase):
|
||||
|
||||
await self.handler.on_receive_pdu(origin, pdu, sent_to_us_directly=True)
|
||||
|
||||
def __str__(self):
|
||||
def __str__(self) -> str:
|
||||
return "<ReplicationLayer(%s)>" % self.server_name
|
||||
|
||||
async def exchange_third_party_invite(
|
||||
self, sender_user_id: str, target_user_id: str, room_id: str, signed: Dict
|
||||
):
|
||||
ret = await self.handler.exchange_third_party_invite(
|
||||
) -> None:
|
||||
await self.handler.exchange_third_party_invite(
|
||||
sender_user_id, target_user_id, room_id, signed
|
||||
)
|
||||
return ret
|
||||
|
||||
async def on_exchange_third_party_invite_request(self, event_dict: Dict):
|
||||
ret = await self.handler.on_exchange_third_party_invite_request(event_dict)
|
||||
return ret
|
||||
async def on_exchange_third_party_invite_request(self, event_dict: Dict) -> None:
|
||||
await self.handler.on_exchange_third_party_invite_request(event_dict)
|
||||
|
||||
async def check_server_matches_acl(self, server_name: str, room_id: str):
|
||||
async def check_server_matches_acl(self, server_name: str, room_id: str) -> None:
|
||||
"""Check if the given server is allowed by the server ACLs in the room
|
||||
|
||||
Args:
|
||||
@@ -891,6 +868,7 @@ class FederationHandlerRegistry:
|
||||
|
||||
# A rate limiter for incoming room key requests per origin.
|
||||
self._room_key_request_rate_limiter = Ratelimiter(
|
||||
store=hs.get_datastore(),
|
||||
clock=self.clock,
|
||||
rate_hz=self.config.rc_key_requests.per_second,
|
||||
burst_count=self.config.rc_key_requests.burst_count,
|
||||
@@ -898,7 +876,7 @@ class FederationHandlerRegistry:
|
||||
|
||||
def register_edu_handler(
|
||||
self, edu_type: str, handler: Callable[[str, JsonDict], Awaitable[None]]
|
||||
):
|
||||
) -> None:
|
||||
"""Sets the handler callable that will be used to handle an incoming
|
||||
federation EDU of the given type.
|
||||
|
||||
@@ -917,7 +895,7 @@ class FederationHandlerRegistry:
|
||||
|
||||
def register_query_handler(
|
||||
self, query_type: str, handler: Callable[[dict], Awaitable[JsonDict]]
|
||||
):
|
||||
) -> None:
|
||||
"""Sets the handler callable that will be used to handle an incoming
|
||||
federation query of the given type.
|
||||
|
||||
@@ -935,15 +913,17 @@ class FederationHandlerRegistry:
|
||||
|
||||
self.query_handlers[query_type] = handler
|
||||
|
||||
def register_instance_for_edu(self, edu_type: str, instance_name: str):
|
||||
def register_instance_for_edu(self, edu_type: str, instance_name: str) -> None:
|
||||
"""Register that the EDU handler is on a different instance than master."""
|
||||
self._edu_type_to_instance[edu_type] = [instance_name]
|
||||
|
||||
def register_instances_for_edu(self, edu_type: str, instance_names: List[str]):
|
||||
def register_instances_for_edu(
|
||||
self, edu_type: str, instance_names: List[str]
|
||||
) -> None:
|
||||
"""Register that the EDU handler is on multiple instances."""
|
||||
self._edu_type_to_instance[edu_type] = instance_names
|
||||
|
||||
async def on_edu(self, edu_type: str, origin: str, content: dict):
|
||||
async def on_edu(self, edu_type: str, origin: str, content: dict) -> None:
|
||||
if not self.config.use_presence and edu_type == EduTypes.Presence:
|
||||
return
|
||||
|
||||
@@ -951,7 +931,9 @@ class FederationHandlerRegistry:
|
||||
# the limit, drop them.
|
||||
if (
|
||||
edu_type == EduTypes.RoomKeyRequest
|
||||
and not self._room_key_request_rate_limiter.can_do_action(origin)
|
||||
and not await self._room_key_request_rate_limiter.can_do_action(
|
||||
None, origin
|
||||
)
|
||||
):
|
||||
return
|
||||
|
||||
|
||||
@@ -31,25 +31,39 @@ Events are replicated via a separate events stream.
|
||||
|
||||
import logging
|
||||
from collections import namedtuple
|
||||
from typing import Dict, List, Tuple, Type
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
Dict,
|
||||
Hashable,
|
||||
Iterable,
|
||||
List,
|
||||
Optional,
|
||||
Sized,
|
||||
Tuple,
|
||||
Type,
|
||||
)
|
||||
|
||||
from sortedcontainers import SortedDict
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
from synapse.api.presence import UserPresenceState
|
||||
from synapse.federation.sender import AbstractFederationSender, FederationSender
|
||||
from synapse.metrics import LaterGauge
|
||||
from synapse.replication.tcp.streams.federation import FederationStream
|
||||
from synapse.types import JsonDict, ReadReceipt, RoomStreamToken
|
||||
from synapse.util.metrics import Measure
|
||||
|
||||
from .units import Edu
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.server import HomeServer
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class FederationRemoteSendQueue:
|
||||
class FederationRemoteSendQueue(AbstractFederationSender):
|
||||
"""A drop in replacement for FederationSender"""
|
||||
|
||||
def __init__(self, hs):
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
self.server_name = hs.hostname
|
||||
self.clock = hs.get_clock()
|
||||
self.notifier = hs.get_notifier()
|
||||
@@ -58,7 +72,7 @@ class FederationRemoteSendQueue:
|
||||
# We may have multiple federation sender instances, so we need to track
|
||||
# their positions separately.
|
||||
self._sender_instances = hs.config.worker.federation_shard_config.instances
|
||||
self._sender_positions = {}
|
||||
self._sender_positions = {} # type: Dict[str, int]
|
||||
|
||||
# Pending presence map user_id -> UserPresenceState
|
||||
self.presence_map = {} # type: Dict[str, UserPresenceState]
|
||||
@@ -71,7 +85,7 @@ class FederationRemoteSendQueue:
|
||||
# Stream position -> (user_id, destinations)
|
||||
self.presence_destinations = (
|
||||
SortedDict()
|
||||
) # type: SortedDict[int, Tuple[str, List[str]]]
|
||||
) # type: SortedDict[int, Tuple[str, Iterable[str]]]
|
||||
|
||||
# (destination, key) -> EDU
|
||||
self.keyed_edu = {} # type: Dict[Tuple[str, tuple], Edu]
|
||||
@@ -94,7 +108,7 @@ class FederationRemoteSendQueue:
|
||||
# we make a new function, so we need to make a new function so the inner
|
||||
# lambda binds to the queue rather than to the name of the queue which
|
||||
# changes. ARGH.
|
||||
def register(name, queue):
|
||||
def register(name: str, queue: Sized) -> None:
|
||||
LaterGauge(
|
||||
"synapse_federation_send_queue_%s_size" % (queue_name,),
|
||||
"",
|
||||
@@ -115,13 +129,13 @@ class FederationRemoteSendQueue:
|
||||
|
||||
self.clock.looping_call(self._clear_queue, 30 * 1000)
|
||||
|
||||
def _next_pos(self):
|
||||
def _next_pos(self) -> int:
|
||||
pos = self.pos
|
||||
self.pos += 1
|
||||
self.pos_time[self.clock.time_msec()] = pos
|
||||
return pos
|
||||
|
||||
def _clear_queue(self):
|
||||
def _clear_queue(self) -> None:
|
||||
"""Clear the queues for anything older than N minutes"""
|
||||
|
||||
FIVE_MINUTES_AGO = 5 * 60 * 1000
|
||||
@@ -138,7 +152,7 @@ class FederationRemoteSendQueue:
|
||||
|
||||
self._clear_queue_before_pos(position_to_delete)
|
||||
|
||||
def _clear_queue_before_pos(self, position_to_delete):
|
||||
def _clear_queue_before_pos(self, position_to_delete: int) -> None:
|
||||
"""Clear all the queues from before a given position"""
|
||||
with Measure(self.clock, "send_queue._clear"):
|
||||
# Delete things out of presence maps
|
||||
@@ -188,13 +202,18 @@ class FederationRemoteSendQueue:
|
||||
for key in keys[:i]:
|
||||
del self.edus[key]
|
||||
|
||||
def notify_new_events(self, max_token):
|
||||
def notify_new_events(self, max_token: RoomStreamToken) -> None:
|
||||
"""As per FederationSender"""
|
||||
# We don't need to replicate this as it gets sent down a different
|
||||
# stream.
|
||||
pass
|
||||
# This should never get called.
|
||||
raise NotImplementedError()
|
||||
|
||||
def build_and_send_edu(self, destination, edu_type, content, key=None):
|
||||
def build_and_send_edu(
|
||||
self,
|
||||
destination: str,
|
||||
edu_type: str,
|
||||
content: JsonDict,
|
||||
key: Optional[Hashable] = None,
|
||||
) -> None:
|
||||
"""As per FederationSender"""
|
||||
if destination == self.server_name:
|
||||
logger.info("Not sending EDU to ourselves")
|
||||
@@ -218,38 +237,39 @@ class FederationRemoteSendQueue:
|
||||
|
||||
self.notifier.on_new_replication_data()
|
||||
|
||||
def send_read_receipt(self, receipt):
|
||||
async def send_read_receipt(self, receipt: ReadReceipt) -> None:
|
||||
"""As per FederationSender
|
||||
|
||||
Args:
|
||||
receipt (synapse.types.ReadReceipt):
|
||||
receipt:
|
||||
"""
|
||||
# nothing to do here: the replication listener will handle it.
|
||||
return defer.succeed(None)
|
||||
|
||||
def send_presence(self, states):
|
||||
def send_presence(self, states: List[UserPresenceState]) -> None:
|
||||
"""As per FederationSender
|
||||
|
||||
Args:
|
||||
states (list(UserPresenceState))
|
||||
states
|
||||
"""
|
||||
pos = self._next_pos()
|
||||
|
||||
# We only want to send presence for our own users, so lets always just
|
||||
# filter here just in case.
|
||||
local_states = list(filter(lambda s: self.is_mine_id(s.user_id), states))
|
||||
local_states = [s for s in states if self.is_mine_id(s.user_id)]
|
||||
|
||||
self.presence_map.update({state.user_id: state for state in local_states})
|
||||
self.presence_changed[pos] = [state.user_id for state in local_states]
|
||||
|
||||
self.notifier.on_new_replication_data()
|
||||
|
||||
def send_presence_to_destinations(self, states, destinations):
|
||||
def send_presence_to_destinations(
|
||||
self, states: Iterable[UserPresenceState], destinations: Iterable[str]
|
||||
) -> None:
|
||||
"""As per FederationSender
|
||||
|
||||
Args:
|
||||
states (list[UserPresenceState])
|
||||
destinations (list[str])
|
||||
states
|
||||
destinations
|
||||
"""
|
||||
for state in states:
|
||||
pos = self._next_pos()
|
||||
@@ -258,15 +278,18 @@ class FederationRemoteSendQueue:
|
||||
|
||||
self.notifier.on_new_replication_data()
|
||||
|
||||
def send_device_messages(self, destination):
|
||||
def send_device_messages(self, destination: str) -> None:
|
||||
"""As per FederationSender"""
|
||||
# We don't need to replicate this as it gets sent down a different
|
||||
# stream.
|
||||
|
||||
def get_current_token(self):
|
||||
def wake_destination(self, server: str) -> None:
|
||||
pass
|
||||
|
||||
def get_current_token(self) -> int:
|
||||
return self.pos - 1
|
||||
|
||||
def federation_ack(self, instance_name, token):
|
||||
def federation_ack(self, instance_name: str, token: int) -> None:
|
||||
if self._sender_instances:
|
||||
# If we have configured multiple federation sender instances we need
|
||||
# to track their positions separately, and only clear the queue up
|
||||
@@ -504,13 +527,16 @@ ParsedFederationStreamData = namedtuple(
|
||||
)
|
||||
|
||||
|
||||
def process_rows_for_federation(transaction_queue, rows):
|
||||
def process_rows_for_federation(
|
||||
transaction_queue: FederationSender,
|
||||
rows: List[FederationStream.FederationStreamRow],
|
||||
) -> None:
|
||||
"""Parse a list of rows from the federation stream and put them in the
|
||||
transaction queue ready for sending to the relevant homeservers.
|
||||
|
||||
Args:
|
||||
transaction_queue (FederationSender)
|
||||
rows (list(synapse.replication.tcp.streams.federation.FederationStream.FederationStreamRow))
|
||||
transaction_queue
|
||||
rows
|
||||
"""
|
||||
|
||||
# The federation stream contains a bunch of different types of
|
||||
|
||||
@@ -13,14 +13,14 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import abc
|
||||
import logging
|
||||
from typing import Dict, Hashable, Iterable, List, Optional, Set, Tuple
|
||||
from typing import TYPE_CHECKING, Dict, Hashable, Iterable, List, Optional, Set, Tuple
|
||||
|
||||
from prometheus_client import Counter
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
import synapse
|
||||
import synapse.metrics
|
||||
from synapse.api.presence import UserPresenceState
|
||||
from synapse.events import EventBase
|
||||
@@ -40,9 +40,13 @@ from synapse.metrics import (
|
||||
events_processed_counter,
|
||||
)
|
||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||
from synapse.types import ReadReceipt, RoomStreamToken
|
||||
from synapse.types import JsonDict, ReadReceipt, RoomStreamToken
|
||||
from synapse.util.metrics import Measure, measure_func
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.events.presence_router import PresenceRouter
|
||||
from synapse.server import HomeServer
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
sent_pdus_destination_dist_count = Counter(
|
||||
@@ -65,8 +69,91 @@ CATCH_UP_STARTUP_DELAY_SEC = 15
|
||||
CATCH_UP_STARTUP_INTERVAL_SEC = 5
|
||||
|
||||
|
||||
class FederationSender:
|
||||
def __init__(self, hs: "synapse.server.HomeServer"):
|
||||
class AbstractFederationSender(metaclass=abc.ABCMeta):
|
||||
@abc.abstractmethod
|
||||
def notify_new_events(self, max_token: RoomStreamToken) -> None:
|
||||
"""This gets called when we have some new events we might want to
|
||||
send out to other servers.
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
@abc.abstractmethod
|
||||
async def send_read_receipt(self, receipt: ReadReceipt) -> None:
|
||||
"""Send a RR to any other servers in the room
|
||||
|
||||
Args:
|
||||
receipt: receipt to be sent
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
@abc.abstractmethod
|
||||
def send_presence(self, states: List[UserPresenceState]) -> None:
|
||||
"""Send the new presence states to the appropriate destinations.
|
||||
|
||||
This actually queues up the presence states ready for sending and
|
||||
triggers a background task to process them and send out the transactions.
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
@abc.abstractmethod
|
||||
def send_presence_to_destinations(
|
||||
self, states: Iterable[UserPresenceState], destinations: Iterable[str]
|
||||
) -> None:
|
||||
"""Send the given presence states to the given destinations.
|
||||
|
||||
Args:
|
||||
destinations:
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
@abc.abstractmethod
|
||||
def build_and_send_edu(
|
||||
self,
|
||||
destination: str,
|
||||
edu_type: str,
|
||||
content: JsonDict,
|
||||
key: Optional[Hashable] = None,
|
||||
) -> None:
|
||||
"""Construct an Edu object, and queue it for sending
|
||||
|
||||
Args:
|
||||
destination: name of server to send to
|
||||
edu_type: type of EDU to send
|
||||
content: content of EDU
|
||||
key: clobbering key for this edu
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
@abc.abstractmethod
|
||||
def send_device_messages(self, destination: str) -> None:
|
||||
raise NotImplementedError()
|
||||
|
||||
@abc.abstractmethod
|
||||
def wake_destination(self, destination: str) -> None:
|
||||
"""Called when we want to retry sending transactions to a remote.
|
||||
|
||||
This is mainly useful if the remote server has been down and we think it
|
||||
might have come back.
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
@abc.abstractmethod
|
||||
def get_current_token(self) -> int:
|
||||
raise NotImplementedError()
|
||||
|
||||
@abc.abstractmethod
|
||||
def federation_ack(self, instance_name: str, token: int) -> None:
|
||||
raise NotImplementedError()
|
||||
|
||||
@abc.abstractmethod
|
||||
async def get_replication_rows(
|
||||
self, instance_name: str, from_token: int, to_token: int, target_row_count: int
|
||||
) -> Tuple[List[Tuple[int, Tuple]], int, bool]:
|
||||
raise NotImplementedError()
|
||||
|
||||
|
||||
class FederationSender(AbstractFederationSender):
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
self.hs = hs
|
||||
self.server_name = hs.hostname
|
||||
|
||||
@@ -76,6 +163,7 @@ class FederationSender:
|
||||
self.clock = hs.get_clock()
|
||||
self.is_mine_id = hs.is_mine_id
|
||||
|
||||
self._presence_router = None # type: Optional[PresenceRouter]
|
||||
self._transaction_manager = TransactionManager(hs)
|
||||
|
||||
self._instance_name = hs.get_instance_name()
|
||||
@@ -432,7 +520,7 @@ class FederationSender:
|
||||
queue.flush_read_receipts_for_room(room_id)
|
||||
|
||||
@preserve_fn # the caller should not yield on this
|
||||
async def send_presence(self, states: List[UserPresenceState]):
|
||||
async def send_presence(self, states: List[UserPresenceState]) -> None:
|
||||
"""Send the new presence states to the appropriate destinations.
|
||||
|
||||
This actually queues up the presence states ready for sending and
|
||||
@@ -494,11 +582,26 @@ class FederationSender:
|
||||
self._get_per_destination_queue(destination).send_presence(states)
|
||||
|
||||
@measure_func("txnqueue._process_presence")
|
||||
async def _process_presence_inner(self, states: List[UserPresenceState]):
|
||||
async def _process_presence_inner(self, states: List[UserPresenceState]) -> None:
|
||||
"""Given a list of states populate self.pending_presence_by_dest and
|
||||
poke to send a new transaction to each destination
|
||||
"""
|
||||
hosts_and_states = await get_interested_remotes(self.store, states, self.state)
|
||||
# We pull the presence router here instead of __init__
|
||||
# to prevent a dependency cycle:
|
||||
#
|
||||
# AuthHandler -> Notifier -> FederationSender
|
||||
# -> PresenceRouter -> ModuleApi -> AuthHandler
|
||||
if self._presence_router is None:
|
||||
self._presence_router = self.hs.get_presence_router()
|
||||
|
||||
assert self._presence_router is not None
|
||||
|
||||
hosts_and_states = await get_interested_remotes(
|
||||
self.store,
|
||||
self._presence_router,
|
||||
states,
|
||||
self.state,
|
||||
)
|
||||
|
||||
for destinations, states in hosts_and_states:
|
||||
for destination in destinations:
|
||||
@@ -516,9 +619,9 @@ class FederationSender:
|
||||
self,
|
||||
destination: str,
|
||||
edu_type: str,
|
||||
content: dict,
|
||||
content: JsonDict,
|
||||
key: Optional[Hashable] = None,
|
||||
):
|
||||
) -> None:
|
||||
"""Construct an Edu object, and queue it for sending
|
||||
|
||||
Args:
|
||||
@@ -545,7 +648,7 @@ class FederationSender:
|
||||
|
||||
self.send_edu(edu, key)
|
||||
|
||||
def send_edu(self, edu: Edu, key: Optional[Hashable]):
|
||||
def send_edu(self, edu: Edu, key: Optional[Hashable]) -> None:
|
||||
"""Queue an EDU for sending
|
||||
|
||||
Args:
|
||||
@@ -563,7 +666,7 @@ class FederationSender:
|
||||
else:
|
||||
queue.send_edu(edu)
|
||||
|
||||
def send_device_messages(self, destination: str):
|
||||
def send_device_messages(self, destination: str) -> None:
|
||||
if destination == self.server_name:
|
||||
logger.warning("Not sending device update to ourselves")
|
||||
return
|
||||
@@ -575,7 +678,7 @@ class FederationSender:
|
||||
|
||||
self._get_per_destination_queue(destination).attempt_new_transaction()
|
||||
|
||||
def wake_destination(self, destination: str):
|
||||
def wake_destination(self, destination: str) -> None:
|
||||
"""Called when we want to retry sending transactions to a remote.
|
||||
|
||||
This is mainly useful if the remote server has been down and we think it
|
||||
@@ -599,6 +702,10 @@ class FederationSender:
|
||||
# to a worker.
|
||||
return 0
|
||||
|
||||
def federation_ack(self, instance_name: str, token: int) -> None:
|
||||
# It is not expected that this gets called on FederationSender.
|
||||
raise NotImplementedError()
|
||||
|
||||
@staticmethod
|
||||
async def get_replication_rows(
|
||||
instance_name: str, from_token: int, to_token: int, target_row_count: int
|
||||
@@ -607,7 +714,7 @@ class FederationSender:
|
||||
# to a worker.
|
||||
return [], 0, False
|
||||
|
||||
async def _wake_destinations_needing_catchup(self):
|
||||
async def _wake_destinations_needing_catchup(self) -> None:
|
||||
"""
|
||||
Wakes up destinations that need catch-up and are not currently being
|
||||
backed off from.
|
||||
@@ -627,16 +734,18 @@ class FederationSender:
|
||||
self._catchup_after_startup_timer = None
|
||||
break
|
||||
|
||||
last_processed = destinations_to_wake[-1]
|
||||
|
||||
destinations_to_wake = [
|
||||
d
|
||||
for d in destinations_to_wake
|
||||
if self._federation_shard_config.should_handle(self._instance_name, d)
|
||||
]
|
||||
|
||||
for last_processed in destinations_to_wake:
|
||||
for destination in destinations_to_wake:
|
||||
logger.info(
|
||||
"Destination %s has outstanding catch-up, waking up.",
|
||||
last_processed,
|
||||
)
|
||||
self.wake_destination(last_processed)
|
||||
self.wake_destination(destination)
|
||||
await self.clock.sleep(CATCH_UP_STARTUP_INTERVAL_SEC)
|
||||
|
||||
@@ -15,7 +15,7 @@
|
||||
# limitations under the License.
|
||||
import datetime
|
||||
import logging
|
||||
from typing import TYPE_CHECKING, Dict, Hashable, Iterable, List, Optional, Tuple, cast
|
||||
from typing import TYPE_CHECKING, Dict, Hashable, Iterable, List, Optional, Tuple
|
||||
|
||||
import attr
|
||||
from prometheus_client import Counter
|
||||
@@ -29,6 +29,7 @@ from synapse.api.presence import UserPresenceState
|
||||
from synapse.events import EventBase
|
||||
from synapse.federation.units import Edu
|
||||
from synapse.handlers.presence import format_user_presence_state
|
||||
from synapse.logging.opentracing import SynapseTags, set_tag
|
||||
from synapse.metrics import sent_transactions_counter
|
||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||
from synapse.types import ReadReceipt
|
||||
@@ -77,6 +78,7 @@ class PerDestinationQueue:
|
||||
self._transaction_manager = transaction_manager
|
||||
self._instance_name = hs.get_instance_name()
|
||||
self._federation_shard_config = hs.config.worker.federation_shard_config
|
||||
self._state = hs.get_state_handler()
|
||||
|
||||
self._should_send_on_this_instance = True
|
||||
if not self._federation_shard_config.should_handle(
|
||||
@@ -415,22 +417,97 @@ class PerDestinationQueue:
|
||||
"This should not happen." % event_ids
|
||||
)
|
||||
|
||||
if logger.isEnabledFor(logging.INFO):
|
||||
rooms = [p.room_id for p in catchup_pdus]
|
||||
logger.info("Catching up rooms to %s: %r", self._destination, rooms)
|
||||
# We send transactions with events from one room only, as its likely
|
||||
# that the remote will have to do additional processing, which may
|
||||
# take some time. It's better to give it small amounts of work
|
||||
# rather than risk the request timing out and repeatedly being
|
||||
# retried, and not making any progress.
|
||||
#
|
||||
# Note: `catchup_pdus` will have exactly one PDU per room.
|
||||
for pdu in catchup_pdus:
|
||||
# The PDU from the DB will be the last PDU in the room from
|
||||
# *this server* that wasn't sent to the remote. However, other
|
||||
# servers may have sent lots of events since then, and we want
|
||||
# to try and tell the remote only about the *latest* events in
|
||||
# the room. This is so that it doesn't get inundated by events
|
||||
# from various parts of the DAG, which all need to be processed.
|
||||
#
|
||||
# Note: this does mean that in large rooms a server coming back
|
||||
# online will get sent the same events from all the different
|
||||
# servers, but the remote will correctly deduplicate them and
|
||||
# handle it only once.
|
||||
|
||||
await self._transaction_manager.send_new_transaction(
|
||||
self._destination, catchup_pdus, []
|
||||
)
|
||||
# Step 1, fetch the current extremities
|
||||
extrems = await self._store.get_prev_events_for_room(pdu.room_id)
|
||||
|
||||
sent_transactions_counter.inc()
|
||||
final_pdu = catchup_pdus[-1]
|
||||
self._last_successful_stream_ordering = cast(
|
||||
int, final_pdu.internal_metadata.stream_ordering
|
||||
)
|
||||
await self._store.set_destination_last_successful_stream_ordering(
|
||||
self._destination, self._last_successful_stream_ordering
|
||||
)
|
||||
if pdu.event_id in extrems:
|
||||
# If the event is in the extremities, then great! We can just
|
||||
# use that without having to do further checks.
|
||||
room_catchup_pdus = [pdu]
|
||||
else:
|
||||
# If not, fetch the extremities and figure out which we can
|
||||
# send.
|
||||
extrem_events = await self._store.get_events_as_list(extrems)
|
||||
|
||||
new_pdus = []
|
||||
for p in extrem_events:
|
||||
# We pulled this from the DB, so it'll be non-null
|
||||
assert p.internal_metadata.stream_ordering
|
||||
|
||||
# Filter out events that happened before the remote went
|
||||
# offline
|
||||
if (
|
||||
p.internal_metadata.stream_ordering
|
||||
< self._last_successful_stream_ordering
|
||||
):
|
||||
continue
|
||||
|
||||
# Filter out events where the server is not in the room,
|
||||
# e.g. it may have left/been kicked. *Ideally* we'd pull
|
||||
# out the kick and send that, but it's a rare edge case
|
||||
# so we don't bother for now (the server that sent the
|
||||
# kick should send it out if its online).
|
||||
hosts = await self._state.get_hosts_in_room_at_events(
|
||||
p.room_id, [p.event_id]
|
||||
)
|
||||
if self._destination not in hosts:
|
||||
continue
|
||||
|
||||
new_pdus.append(p)
|
||||
|
||||
# If we've filtered out all the extremities, fall back to
|
||||
# sending the original event. This should ensure that the
|
||||
# server gets at least some of missed events (especially if
|
||||
# the other sending servers are up).
|
||||
if new_pdus:
|
||||
room_catchup_pdus = new_pdus
|
||||
else:
|
||||
room_catchup_pdus = [pdu]
|
||||
|
||||
logger.info(
|
||||
"Catching up rooms to %s: %r", self._destination, pdu.room_id
|
||||
)
|
||||
|
||||
await self._transaction_manager.send_new_transaction(
|
||||
self._destination, room_catchup_pdus, []
|
||||
)
|
||||
|
||||
sent_transactions_counter.inc()
|
||||
|
||||
# We pulled this from the DB, so it'll be non-null
|
||||
assert pdu.internal_metadata.stream_ordering
|
||||
|
||||
# Note that we mark the last successful stream ordering as that
|
||||
# from the *original* PDU, rather than the PDU(s) we actually
|
||||
# send. This is because we use it to mark our position in the
|
||||
# queue of missed PDUs to process.
|
||||
self._last_successful_stream_ordering = (
|
||||
pdu.internal_metadata.stream_ordering
|
||||
)
|
||||
|
||||
await self._store.set_destination_last_successful_stream_ordering(
|
||||
self._destination, self._last_successful_stream_ordering
|
||||
)
|
||||
|
||||
def _get_rr_edus(self, force_flush: bool) -> Iterable[Edu]:
|
||||
if not self._pending_rrs:
|
||||
@@ -481,6 +558,13 @@ class PerDestinationQueue:
|
||||
contents, stream_id = await self._store.get_new_device_msgs_for_remote(
|
||||
self._destination, last_device_stream_id, to_device_stream_id, limit
|
||||
)
|
||||
for content in contents:
|
||||
message_id = content.get("message_id")
|
||||
if not message_id:
|
||||
continue
|
||||
|
||||
set_tag(SynapseTags.TO_DEVICE_MESSAGE_ID, message_id)
|
||||
|
||||
edus = [
|
||||
Edu(
|
||||
origin=self._server_name,
|
||||
|
||||
@@ -16,7 +16,7 @@
|
||||
|
||||
import logging
|
||||
import urllib
|
||||
from typing import Any, Dict, Optional
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
from synapse.api.constants import Membership
|
||||
from synapse.api.errors import Codes, HttpResponseException, SynapseError
|
||||
@@ -26,6 +26,7 @@ from synapse.api.urls import (
|
||||
FEDERATION_V2_PREFIX,
|
||||
)
|
||||
from synapse.logging.utils import log_function
|
||||
from synapse.types import JsonDict
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -978,6 +979,38 @@ class TransportLayerClient:
|
||||
|
||||
return self.client.get_json(destination=destination, path=path)
|
||||
|
||||
async def get_space_summary(
|
||||
self,
|
||||
destination: str,
|
||||
room_id: str,
|
||||
suggested_only: bool,
|
||||
max_rooms_per_space: Optional[int],
|
||||
exclude_rooms: List[str],
|
||||
) -> JsonDict:
|
||||
"""
|
||||
Args:
|
||||
destination: The remote server
|
||||
room_id: The room ID to ask about.
|
||||
suggested_only: if True, only suggested rooms will be returned
|
||||
max_rooms_per_space: an optional limit to the number of children to be
|
||||
returned per space
|
||||
exclude_rooms: a list of any rooms we can skip
|
||||
"""
|
||||
path = _create_path(
|
||||
FEDERATION_UNSTABLE_PREFIX, "/org.matrix.msc2946/spaces/%s", room_id
|
||||
)
|
||||
|
||||
params = {
|
||||
"suggested_only": suggested_only,
|
||||
"exclude_rooms": exclude_rooms,
|
||||
}
|
||||
if max_rooms_per_space is not None:
|
||||
params["max_rooms_per_space"] = max_rooms_per_space
|
||||
|
||||
return await self.client.post_json(
|
||||
destination=destination, path=path, data=params
|
||||
)
|
||||
|
||||
|
||||
def _create_path(federation_prefix, path, *args):
|
||||
"""
|
||||
|
||||
@@ -18,7 +18,7 @@
|
||||
import functools
|
||||
import logging
|
||||
import re
|
||||
from typing import Optional, Tuple, Type
|
||||
from typing import Container, Mapping, Optional, Sequence, Tuple, Type
|
||||
|
||||
import synapse
|
||||
from synapse.api.constants import MAX_GROUP_CATEGORYID_LENGTH, MAX_GROUP_ROLEID_LENGTH
|
||||
@@ -29,7 +29,7 @@ from synapse.api.urls import (
|
||||
FEDERATION_V1_PREFIX,
|
||||
FEDERATION_V2_PREFIX,
|
||||
)
|
||||
from synapse.http.server import JsonResource
|
||||
from synapse.http.server import HttpServer, JsonResource
|
||||
from synapse.http.servlet import (
|
||||
parse_boolean_from_args,
|
||||
parse_integer_from_args,
|
||||
@@ -44,7 +44,8 @@ from synapse.logging.opentracing import (
|
||||
whitelisted_homeserver,
|
||||
)
|
||||
from synapse.server import HomeServer
|
||||
from synapse.types import ThirdPartyInstanceID, get_domain_from_id
|
||||
from synapse.types import JsonDict, ThirdPartyInstanceID, get_domain_from_id
|
||||
from synapse.util.ratelimitutils import FederationRateLimiter
|
||||
from synapse.util.stringutils import parse_and_validate_server_name
|
||||
from synapse.util.versionstring import get_version_string
|
||||
|
||||
@@ -424,13 +425,9 @@ class FederationSendServlet(BaseFederationServlet):
|
||||
logger.exception(e)
|
||||
return 400, {"error": "Invalid transaction"}
|
||||
|
||||
try:
|
||||
code, response = await self.handler.on_incoming_transaction(
|
||||
origin, transaction_data
|
||||
)
|
||||
except Exception:
|
||||
logger.exception("on_incoming_transaction failed")
|
||||
raise
|
||||
code, response = await self.handler.on_incoming_transaction(
|
||||
origin, transaction_data
|
||||
)
|
||||
|
||||
return code, response
|
||||
|
||||
@@ -619,8 +616,8 @@ class FederationThirdPartyInviteExchangeServlet(BaseFederationServlet):
|
||||
PATH = "/exchange_third_party_invite/(?P<room_id>[^/]*)"
|
||||
|
||||
async def on_PUT(self, origin, content, query, room_id):
|
||||
content = await self.handler.on_exchange_third_party_invite_request(content)
|
||||
return 200, content
|
||||
await self.handler.on_exchange_third_party_invite_request(content)
|
||||
return 200, {}
|
||||
|
||||
|
||||
class FederationClientKeysQueryServlet(BaseFederationServlet):
|
||||
@@ -1376,6 +1373,40 @@ class FederationGroupsSettingJoinPolicyServlet(BaseFederationServlet):
|
||||
return 200, new_content
|
||||
|
||||
|
||||
class FederationSpaceSummaryServlet(BaseFederationServlet):
|
||||
PREFIX = FEDERATION_UNSTABLE_PREFIX + "/org.matrix.msc2946"
|
||||
PATH = "/spaces/(?P<room_id>[^/]*)"
|
||||
|
||||
async def on_POST(
|
||||
self,
|
||||
origin: str,
|
||||
content: JsonDict,
|
||||
query: Mapping[bytes, Sequence[bytes]],
|
||||
room_id: str,
|
||||
) -> Tuple[int, JsonDict]:
|
||||
suggested_only = content.get("suggested_only", False)
|
||||
if not isinstance(suggested_only, bool):
|
||||
raise SynapseError(
|
||||
400, "'suggested_only' must be a boolean", Codes.BAD_JSON
|
||||
)
|
||||
|
||||
exclude_rooms = content.get("exclude_rooms", [])
|
||||
if not isinstance(exclude_rooms, list) or any(
|
||||
not isinstance(x, str) for x in exclude_rooms
|
||||
):
|
||||
raise SynapseError(400, "bad value for 'exclude_rooms'", Codes.BAD_JSON)
|
||||
|
||||
max_rooms_per_space = content.get("max_rooms_per_space")
|
||||
if max_rooms_per_space is not None and not isinstance(max_rooms_per_space, int):
|
||||
raise SynapseError(
|
||||
400, "bad value for 'max_rooms_per_space'", Codes.BAD_JSON
|
||||
)
|
||||
|
||||
return 200, await self.handler.federation_space_summary(
|
||||
room_id, suggested_only, max_rooms_per_space, exclude_rooms
|
||||
)
|
||||
|
||||
|
||||
class RoomComplexityServlet(BaseFederationServlet):
|
||||
"""
|
||||
Indicates to other servers how complex (and therefore likely
|
||||
@@ -1474,18 +1505,24 @@ DEFAULT_SERVLET_GROUPS = (
|
||||
)
|
||||
|
||||
|
||||
def register_servlets(hs, resource, authenticator, ratelimiter, servlet_groups=None):
|
||||
def register_servlets(
|
||||
hs: HomeServer,
|
||||
resource: HttpServer,
|
||||
authenticator: Authenticator,
|
||||
ratelimiter: FederationRateLimiter,
|
||||
servlet_groups: Optional[Container[str]] = None,
|
||||
):
|
||||
"""Initialize and register servlet classes.
|
||||
|
||||
Will by default register all servlets. For custom behaviour, pass in
|
||||
a list of servlet_groups to register.
|
||||
|
||||
Args:
|
||||
hs (synapse.server.HomeServer): homeserver
|
||||
resource (JsonResource): resource class to register to
|
||||
authenticator (Authenticator): authenticator to use
|
||||
ratelimiter (util.ratelimitutils.FederationRateLimiter): ratelimiter to use
|
||||
servlet_groups (list[str], optional): List of servlet groups to register.
|
||||
hs: homeserver
|
||||
resource: resource class to register to
|
||||
authenticator: authenticator to use
|
||||
ratelimiter: ratelimiter to use
|
||||
servlet_groups: List of servlet groups to register.
|
||||
Defaults to ``DEFAULT_SERVLET_GROUPS``.
|
||||
"""
|
||||
if not servlet_groups:
|
||||
@@ -1500,6 +1537,14 @@ def register_servlets(hs, resource, authenticator, ratelimiter, servlet_groups=N
|
||||
server_name=hs.hostname,
|
||||
).register(resource)
|
||||
|
||||
if hs.config.experimental.spaces_enabled:
|
||||
FederationSpaceSummaryServlet(
|
||||
handler=hs.get_space_summary_handler(),
|
||||
authenticator=authenticator,
|
||||
ratelimiter=ratelimiter,
|
||||
server_name=hs.hostname,
|
||||
).register(resource)
|
||||
|
||||
if "openid" in servlet_groups:
|
||||
for servletclass in OPENID_SERVLET_CLASSES:
|
||||
servletclass(
|
||||
|
||||
@@ -18,6 +18,7 @@ server protocol.
|
||||
"""
|
||||
|
||||
import logging
|
||||
from typing import Optional
|
||||
|
||||
import attr
|
||||
|
||||
@@ -98,7 +99,7 @@ class Transaction(JsonEncodedObject):
|
||||
"pdus",
|
||||
]
|
||||
|
||||
def __init__(self, transaction_id=None, pdus=[], **kwargs):
|
||||
def __init__(self, transaction_id=None, pdus: Optional[list] = None, **kwargs):
|
||||
"""If we include a list of pdus then we decode then as PDU's
|
||||
automatically.
|
||||
"""
|
||||
@@ -107,7 +108,7 @@ class Transaction(JsonEncodedObject):
|
||||
if "edus" in kwargs and not kwargs["edus"]:
|
||||
del kwargs["edus"]
|
||||
|
||||
super().__init__(transaction_id=transaction_id, pdus=pdus, **kwargs)
|
||||
super().__init__(transaction_id=transaction_id, pdus=pdus or [], **kwargs)
|
||||
|
||||
@staticmethod
|
||||
def create_new(pdus, **kwargs):
|
||||
|
||||
@@ -46,7 +46,7 @@ from synapse.metrics.background_process_metrics import run_as_background_process
|
||||
from synapse.types import JsonDict, get_domain_from_id
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.app.homeserver import HomeServer
|
||||
from synapse.server import HomeServer
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@@ -25,7 +25,7 @@ from synapse.types import GroupID, JsonDict, RoomID, UserID, get_domain_from_id
|
||||
from synapse.util.async_helpers import concurrently_execute
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.app.homeserver import HomeServer
|
||||
from synapse.server import HomeServer
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@@ -24,7 +24,7 @@ from synapse.api.ratelimiting import Ratelimiter
|
||||
from synapse.types import UserID
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.app.homeserver import HomeServer
|
||||
from synapse.server import HomeServer
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -49,7 +49,7 @@ class BaseHandler:
|
||||
|
||||
# The rate_hz and burst_count are overridden on a per-user basis
|
||||
self.request_ratelimiter = Ratelimiter(
|
||||
clock=self.clock, rate_hz=0, burst_count=0
|
||||
store=self.store, clock=self.clock, rate_hz=0, burst_count=0
|
||||
)
|
||||
self._rc_message = self.hs.config.rc_message
|
||||
|
||||
@@ -57,6 +57,7 @@ class BaseHandler:
|
||||
# by the presence of rate limits in the config
|
||||
if self.hs.config.rc_admin_redaction:
|
||||
self.admin_redaction_ratelimiter = Ratelimiter(
|
||||
store=self.store,
|
||||
clock=self.clock,
|
||||
rate_hz=self.hs.config.rc_admin_redaction.per_second,
|
||||
burst_count=self.hs.config.rc_admin_redaction.burst_count,
|
||||
@@ -91,11 +92,6 @@ class BaseHandler:
|
||||
if app_service is not None:
|
||||
return # do not ratelimit app service senders
|
||||
|
||||
# Disable rate limiting of users belonging to any AS that is configured
|
||||
# not to be rate limited in its registration file (rate_limited: true|false).
|
||||
if requester.app_service and not requester.app_service.is_rate_limited():
|
||||
return
|
||||
|
||||
messages_per_second = self._rc_message.per_second
|
||||
burst_count = self._rc_message.burst_count
|
||||
|
||||
@@ -113,11 +109,11 @@ class BaseHandler:
|
||||
if is_admin_redaction and self.admin_redaction_ratelimiter:
|
||||
# If we have separate config for admin redactions, use a separate
|
||||
# ratelimiter as to not have user_ids clash
|
||||
self.admin_redaction_ratelimiter.ratelimit(user_id, update=update)
|
||||
await self.admin_redaction_ratelimiter.ratelimit(requester, update=update)
|
||||
else:
|
||||
# Override rate and burst count per-user
|
||||
self.request_ratelimiter.ratelimit(
|
||||
user_id,
|
||||
await self.request_ratelimiter.ratelimit(
|
||||
requester,
|
||||
rate_hz=messages_per_second,
|
||||
burst_count=burst_count,
|
||||
update=update,
|
||||
|
||||
@@ -25,7 +25,7 @@ from synapse.replication.http.account_data import (
|
||||
from synapse.types import JsonDict, UserID
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.app.homeserver import HomeServer
|
||||
from synapse.server import HomeServer
|
||||
|
||||
|
||||
class AccountDataHandler:
|
||||
|
||||
@@ -18,7 +18,7 @@ import email.utils
|
||||
import logging
|
||||
from email.mime.multipart import MIMEMultipart
|
||||
from email.mime.text import MIMEText
|
||||
from typing import TYPE_CHECKING, List
|
||||
from typing import TYPE_CHECKING, List, Optional
|
||||
|
||||
from synapse.api.errors import StoreError, SynapseError
|
||||
from synapse.logging.context import make_deferred_yieldable
|
||||
@@ -27,7 +27,7 @@ from synapse.types import UserID
|
||||
from synapse.util import stringutils
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.app.homeserver import HomeServer
|
||||
from synapse.server import HomeServer
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -241,7 +241,10 @@ class AccountValidityHandler:
|
||||
return True
|
||||
|
||||
async def renew_account_for_user(
|
||||
self, user_id: str, expiration_ts: int = None, email_sent: bool = False
|
||||
self,
|
||||
user_id: str,
|
||||
expiration_ts: Optional[int] = None,
|
||||
email_sent: bool = False,
|
||||
) -> int:
|
||||
"""Renews the account attached to a given user by pushing back the
|
||||
expiration date by the current validity period in the server's
|
||||
|
||||
@@ -24,7 +24,7 @@ from twisted.web.resource import Resource
|
||||
from synapse.app import check_bind_error
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.app.homeserver import HomeServer
|
||||
from synapse.server import HomeServer
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@@ -25,7 +25,7 @@ from synapse.visibility import filter_events_for_client
|
||||
from ._base import BaseHandler
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.app.homeserver import HomeServer
|
||||
from synapse.server import HomeServer
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@@ -38,7 +38,7 @@ from synapse.types import Collection, JsonDict, RoomAlias, RoomStreamToken, User
|
||||
from synapse.util.metrics import Measure
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.app.homeserver import HomeServer
|
||||
from synapse.server import HomeServer
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -182,7 +182,7 @@ class ApplicationServicesHandler:
|
||||
self,
|
||||
stream_key: str,
|
||||
new_token: Optional[int],
|
||||
users: Collection[Union[str, UserID]] = [],
|
||||
users: Optional[Collection[Union[str, UserID]]] = None,
|
||||
):
|
||||
"""This is called by the notifier in the background
|
||||
when a ephemeral event handled by the homeserver.
|
||||
@@ -215,7 +215,7 @@ class ApplicationServicesHandler:
|
||||
# We only start a new background process if necessary rather than
|
||||
# optimistically (to cut down on overhead).
|
||||
self._notify_interested_services_ephemeral(
|
||||
services, stream_key, new_token, users
|
||||
services, stream_key, new_token, users or []
|
||||
)
|
||||
|
||||
@wrap_as_background_process("notify_interested_services_ephemeral")
|
||||
|
||||
@@ -70,7 +70,7 @@ from synapse.util.msisdn import phone_number_to_msisdn
|
||||
from synapse.util.threepids import canonicalise_email
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.app.homeserver import HomeServer
|
||||
from synapse.server import HomeServer
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -238,6 +238,7 @@ class AuthHandler(BaseHandler):
|
||||
# Ratelimiter for failed auth during UIA. Uses same ratelimit config
|
||||
# as per `rc_login.failed_attempts`.
|
||||
self._failed_uia_attempts_ratelimiter = Ratelimiter(
|
||||
store=self.store,
|
||||
clock=self.clock,
|
||||
rate_hz=self.hs.config.rc_login_failed_attempts.per_second,
|
||||
burst_count=self.hs.config.rc_login_failed_attempts.burst_count,
|
||||
@@ -248,6 +249,7 @@ class AuthHandler(BaseHandler):
|
||||
|
||||
# Ratelimitier for failed /login attempts
|
||||
self._failed_login_attempts_ratelimiter = Ratelimiter(
|
||||
store=self.store,
|
||||
clock=hs.get_clock(),
|
||||
rate_hz=self.hs.config.rc_login_failed_attempts.per_second,
|
||||
burst_count=self.hs.config.rc_login_failed_attempts.burst_count,
|
||||
@@ -352,7 +354,7 @@ class AuthHandler(BaseHandler):
|
||||
requester_user_id = requester.user.to_string()
|
||||
|
||||
# Check if we should be ratelimited due to too many previous failed attempts
|
||||
self._failed_uia_attempts_ratelimiter.ratelimit(requester_user_id, update=False)
|
||||
await self._failed_uia_attempts_ratelimiter.ratelimit(requester, update=False)
|
||||
|
||||
# build a list of supported flows
|
||||
supported_ui_auth_types = await self._get_available_ui_auth_types(
|
||||
@@ -373,7 +375,9 @@ class AuthHandler(BaseHandler):
|
||||
)
|
||||
except LoginError:
|
||||
# Update the ratelimiter to say we failed (`can_do_action` doesn't raise).
|
||||
self._failed_uia_attempts_ratelimiter.can_do_action(requester_user_id)
|
||||
await self._failed_uia_attempts_ratelimiter.can_do_action(
|
||||
requester,
|
||||
)
|
||||
raise
|
||||
|
||||
# find the completed login type
|
||||
@@ -886,6 +890,19 @@ class AuthHandler(BaseHandler):
|
||||
)
|
||||
return result
|
||||
|
||||
def can_change_password(self) -> bool:
|
||||
"""Get whether users on this server are allowed to change or set a password.
|
||||
|
||||
Both `config.password_enabled` and `config.password_localdb_enabled` must be true.
|
||||
|
||||
Note that any account (even SSO accounts) are allowed to add passwords if the above
|
||||
is true.
|
||||
|
||||
Returns:
|
||||
Whether users on this server are allowed to change or set a password
|
||||
"""
|
||||
return self._password_enabled and self._password_localdb_enabled
|
||||
|
||||
def get_supported_login_types(self) -> Iterable[str]:
|
||||
"""Get a the login types supported for the /login API
|
||||
|
||||
@@ -969,8 +986,8 @@ class AuthHandler(BaseHandler):
|
||||
# We also apply account rate limiting using the 3PID as a key, as
|
||||
# otherwise using 3PID bypasses the ratelimiting based on user ID.
|
||||
if ratelimit:
|
||||
self._failed_login_attempts_ratelimiter.ratelimit(
|
||||
(medium, address), update=False
|
||||
await self._failed_login_attempts_ratelimiter.ratelimit(
|
||||
None, (medium, address), update=False
|
||||
)
|
||||
|
||||
# Check for login providers that support 3pid login types
|
||||
@@ -1003,8 +1020,8 @@ class AuthHandler(BaseHandler):
|
||||
# this code path, which is fine as then the per-user ratelimit
|
||||
# will kick in below.
|
||||
if ratelimit:
|
||||
self._failed_login_attempts_ratelimiter.can_do_action(
|
||||
(medium, address)
|
||||
await self._failed_login_attempts_ratelimiter.can_do_action(
|
||||
None, (medium, address)
|
||||
)
|
||||
raise LoginError(403, "", errcode=Codes.FORBIDDEN)
|
||||
|
||||
@@ -1026,8 +1043,8 @@ class AuthHandler(BaseHandler):
|
||||
|
||||
# Check if we've hit the failed ratelimit (but don't update it)
|
||||
if ratelimit:
|
||||
self._failed_login_attempts_ratelimiter.ratelimit(
|
||||
qualified_user_id.lower(), update=False
|
||||
await self._failed_login_attempts_ratelimiter.ratelimit(
|
||||
None, qualified_user_id.lower(), update=False
|
||||
)
|
||||
|
||||
try:
|
||||
@@ -1038,8 +1055,8 @@ class AuthHandler(BaseHandler):
|
||||
# exception and masking the LoginError. The actual ratelimiting
|
||||
# should have happened above.
|
||||
if ratelimit:
|
||||
self._failed_login_attempts_ratelimiter.can_do_action(
|
||||
qualified_user_id.lower()
|
||||
await self._failed_login_attempts_ratelimiter.can_do_action(
|
||||
None, qualified_user_id.lower()
|
||||
)
|
||||
raise
|
||||
|
||||
|
||||
@@ -27,7 +27,7 @@ from synapse.http.site import SynapseRequest
|
||||
from synapse.types import UserID, map_username_to_mxid_localpart
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.app.homeserver import HomeServer
|
||||
from synapse.server import HomeServer
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@@ -23,7 +23,7 @@ from synapse.types import Requester, UserID, create_requester
|
||||
from ._base import BaseHandler
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.app.homeserver import HomeServer
|
||||
from synapse.server import HomeServer
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@@ -45,7 +45,7 @@ from synapse.util.retryutils import NotRetryingDestination
|
||||
from ._base import BaseHandler
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.app.homeserver import HomeServer
|
||||
from synapse.server import HomeServer
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -166,7 +166,7 @@ class DeviceWorkerHandler(BaseHandler):
|
||||
|
||||
# Fetch the current state at the time.
|
||||
try:
|
||||
event_ids = await self.store.get_forward_extremeties_for_room(
|
||||
event_ids = await self.store.get_forward_extremities_for_room_at_stream_ordering(
|
||||
room_id, stream_ordering=stream_ordering
|
||||
)
|
||||
except errors.StoreError:
|
||||
@@ -631,7 +631,7 @@ class DeviceListUpdater:
|
||||
max_len=10000,
|
||||
expiry_ms=30 * 60 * 1000,
|
||||
iterable=True,
|
||||
)
|
||||
) # type: ExpiringCache[str, Set[str]]
|
||||
|
||||
# Attempt to resync out of sync device lists every 30s.
|
||||
self._resync_retry_in_progress = False
|
||||
@@ -760,7 +760,7 @@ class DeviceListUpdater:
|
||||
"""Given a list of updates for a user figure out if we need to do a full
|
||||
resync, or whether we have enough data that we can just apply the delta.
|
||||
"""
|
||||
seen_updates = self._seen_updates.get(user_id, set())
|
||||
seen_updates = self._seen_updates.get(user_id, set()) # type: Set[str]
|
||||
|
||||
extremity = await self.store.get_device_list_last_stream_id_for_remote(user_id)
|
||||
|
||||
@@ -907,6 +907,7 @@ class DeviceListUpdater:
|
||||
master_key = result.get("master_key")
|
||||
self_signing_key = result.get("self_signing_key")
|
||||
|
||||
ignore_devices = False
|
||||
# If the remote server has more than ~1000 devices for this user
|
||||
# we assume that something is going horribly wrong (e.g. a bot
|
||||
# that logs in and creates a new device every time it tries to
|
||||
@@ -925,6 +926,12 @@ class DeviceListUpdater:
|
||||
len(devices),
|
||||
)
|
||||
devices = []
|
||||
ignore_devices = True
|
||||
else:
|
||||
cached_devices = await self.store.get_cached_devices_for_user(user_id)
|
||||
if cached_devices == {d["device_id"]: d for d in devices}:
|
||||
devices = []
|
||||
ignore_devices = True
|
||||
|
||||
for device in devices:
|
||||
logger.debug(
|
||||
@@ -934,7 +941,10 @@ class DeviceListUpdater:
|
||||
stream_id,
|
||||
)
|
||||
|
||||
await self.store.update_remote_device_list_cache(user_id, devices, stream_id)
|
||||
if not ignore_devices:
|
||||
await self.store.update_remote_device_list_cache(
|
||||
user_id, devices, stream_id
|
||||
)
|
||||
device_ids = [device["device_id"] for device in devices]
|
||||
|
||||
# Handle cross-signing keys.
|
||||
@@ -945,7 +955,8 @@ class DeviceListUpdater:
|
||||
)
|
||||
device_ids = device_ids + cross_signing_device_ids
|
||||
|
||||
await self.device_handler.notify_device_update(user_id, device_ids)
|
||||
if device_ids:
|
||||
await self.device_handler.notify_device_update(user_id, device_ids)
|
||||
|
||||
# We clobber the seen updates since we've re-synced from a given
|
||||
# point.
|
||||
@@ -973,14 +984,17 @@ class DeviceListUpdater:
|
||||
"""
|
||||
device_ids = []
|
||||
|
||||
if master_key:
|
||||
current_keys_map = await self.store.get_e2e_cross_signing_keys_bulk([user_id])
|
||||
current_keys = current_keys_map.get(user_id) or {}
|
||||
|
||||
if master_key and master_key != current_keys.get("master"):
|
||||
await self.store.set_e2e_cross_signing_key(user_id, "master", master_key)
|
||||
_, verify_key = get_verify_key_from_cross_signing_key(master_key)
|
||||
# verify_key is a VerifyKey from signedjson, which uses
|
||||
# .version to denote the portion of the key ID after the
|
||||
# algorithm and colon, which is the device ID
|
||||
device_ids.append(verify_key.version)
|
||||
if self_signing_key:
|
||||
if self_signing_key and self_signing_key != current_keys.get("self_signing"):
|
||||
await self.store.set_e2e_cross_signing_key(
|
||||
user_id, "self_signing", self_signing_key
|
||||
)
|
||||
|
||||
@@ -21,10 +21,10 @@ from synapse.api.errors import SynapseError
|
||||
from synapse.api.ratelimiting import Ratelimiter
|
||||
from synapse.logging.context import run_in_background
|
||||
from synapse.logging.opentracing import (
|
||||
SynapseTags,
|
||||
get_active_span_text_map,
|
||||
log_kv,
|
||||
set_tag,
|
||||
start_active_span,
|
||||
)
|
||||
from synapse.replication.http.devices import ReplicationUserDevicesResyncRestServlet
|
||||
from synapse.types import JsonDict, Requester, UserID, get_domain_from_id
|
||||
@@ -32,7 +32,7 @@ from synapse.util import json_encoder
|
||||
from synapse.util.stringutils import random_string
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.app.homeserver import HomeServer
|
||||
from synapse.server import HomeServer
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@@ -81,6 +81,7 @@ class DeviceMessageHandler:
|
||||
)
|
||||
|
||||
self._ratelimiter = Ratelimiter(
|
||||
store=self.store,
|
||||
clock=hs.get_clock(),
|
||||
rate_hz=hs.config.rc_key_requests.per_second,
|
||||
burst_count=hs.config.rc_key_requests.burst_count,
|
||||
@@ -182,7 +183,10 @@ class DeviceMessageHandler:
|
||||
) -> None:
|
||||
sender_user_id = requester.user.to_string()
|
||||
|
||||
set_tag("number_of_messages", len(messages))
|
||||
message_id = random_string(16)
|
||||
set_tag(SynapseTags.TO_DEVICE_MESSAGE_ID, message_id)
|
||||
|
||||
log_kv({"number_of_to_device_messages": len(messages)})
|
||||
set_tag("sender", sender_user_id)
|
||||
local_messages = {}
|
||||
remote_messages = {} # type: Dict[str, Dict[str, Dict[str, JsonDict]]]
|
||||
@@ -191,8 +195,8 @@ class DeviceMessageHandler:
|
||||
if (
|
||||
message_type == EduTypes.RoomKeyRequest
|
||||
and user_id != sender_user_id
|
||||
and self._ratelimiter.can_do_action(
|
||||
(sender_user_id, requester.device_id)
|
||||
and await self._ratelimiter.can_do_action(
|
||||
requester, (sender_user_id, requester.device_id)
|
||||
)
|
||||
):
|
||||
continue
|
||||
@@ -204,32 +208,35 @@ class DeviceMessageHandler:
|
||||
"content": message_content,
|
||||
"type": message_type,
|
||||
"sender": sender_user_id,
|
||||
"message_id": message_id,
|
||||
}
|
||||
for device_id, message_content in by_device.items()
|
||||
}
|
||||
if messages_by_device:
|
||||
local_messages[user_id] = messages_by_device
|
||||
log_kv(
|
||||
{
|
||||
"user_id": user_id,
|
||||
"device_id": list(messages_by_device),
|
||||
}
|
||||
)
|
||||
else:
|
||||
destination = get_domain_from_id(user_id)
|
||||
remote_messages.setdefault(destination, {})[user_id] = by_device
|
||||
|
||||
message_id = random_string(16)
|
||||
|
||||
context = get_active_span_text_map()
|
||||
|
||||
remote_edu_contents = {}
|
||||
for destination, messages in remote_messages.items():
|
||||
with start_active_span("to_device_for_user"):
|
||||
set_tag("destination", destination)
|
||||
remote_edu_contents[destination] = {
|
||||
"messages": messages,
|
||||
"sender": sender_user_id,
|
||||
"type": message_type,
|
||||
"message_id": message_id,
|
||||
"org.matrix.opentracing_context": json_encoder.encode(context),
|
||||
}
|
||||
log_kv({"destination": destination})
|
||||
remote_edu_contents[destination] = {
|
||||
"messages": messages,
|
||||
"sender": sender_user_id,
|
||||
"type": message_type,
|
||||
"message_id": message_id,
|
||||
"org.matrix.opentracing_context": json_encoder.encode(context),
|
||||
}
|
||||
|
||||
log_kv({"local_messages": local_messages})
|
||||
stream_id = await self.store.add_messages_to_device_inbox(
|
||||
local_messages, remote_edu_contents
|
||||
)
|
||||
@@ -238,7 +245,6 @@ class DeviceMessageHandler:
|
||||
"to_device_key", stream_id, users=local_messages.keys()
|
||||
)
|
||||
|
||||
log_kv({"remote_messages": remote_messages})
|
||||
if self.federation_sender:
|
||||
for destination in remote_messages.keys():
|
||||
# Enqueue a new federation transaction to send the new
|
||||
|
||||
@@ -38,11 +38,10 @@ from synapse.types import (
|
||||
)
|
||||
from synapse.util import json_decoder, unwrapFirstError
|
||||
from synapse.util.async_helpers import Linearizer
|
||||
from synapse.util.caches.expiringcache import ExpiringCache
|
||||
from synapse.util.retryutils import NotRetryingDestination
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.app.homeserver import HomeServer
|
||||
from synapse.server import HomeServer
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -1008,7 +1007,7 @@ class E2eKeysHandler:
|
||||
return signature_list, failures
|
||||
|
||||
async def _get_e2e_cross_signing_verify_key(
|
||||
self, user_id: str, key_type: str, from_user_id: str = None
|
||||
self, user_id: str, key_type: str, from_user_id: Optional[str] = None
|
||||
) -> Tuple[JsonDict, str, VerifyKey]:
|
||||
"""Fetch locally or remotely query for a cross-signing public key.
|
||||
|
||||
@@ -1292,17 +1291,6 @@ class SigningKeyEduUpdater:
|
||||
# user_id -> list of updates waiting to be handled.
|
||||
self._pending_updates = {} # type: Dict[str, List[Tuple[JsonDict, JsonDict]]]
|
||||
|
||||
# Recently seen stream ids. We don't bother keeping these in the DB,
|
||||
# but they're useful to have them about to reduce the number of spurious
|
||||
# resyncs.
|
||||
self._seen_updates = ExpiringCache(
|
||||
cache_name="signing_key_update_edu",
|
||||
clock=self.clock,
|
||||
max_len=10000,
|
||||
expiry_ms=30 * 60 * 1000,
|
||||
iterable=True,
|
||||
)
|
||||
|
||||
async def incoming_signing_key_update(
|
||||
self, origin: str, edu_content: JsonDict
|
||||
) -> None:
|
||||
|
||||
@@ -29,7 +29,7 @@ from synapse.types import JsonDict
|
||||
from synapse.util.async_helpers import Linearizer
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.app.homeserver import HomeServer
|
||||
from synapse.server import HomeServer
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@@ -21,7 +21,17 @@ import itertools
|
||||
import logging
|
||||
from collections.abc import Container
|
||||
from http import HTTPStatus
|
||||
from typing import TYPE_CHECKING, Dict, Iterable, List, Optional, Sequence, Tuple, Union
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
Dict,
|
||||
Iterable,
|
||||
List,
|
||||
Optional,
|
||||
Sequence,
|
||||
Set,
|
||||
Tuple,
|
||||
Union,
|
||||
)
|
||||
|
||||
import attr
|
||||
from signedjson.key import decode_verify_key_bytes
|
||||
@@ -171,15 +181,17 @@ class FederationHandler(BaseHandler):
|
||||
|
||||
self._ephemeral_messages_enabled = hs.config.enable_ephemeral_messages
|
||||
|
||||
async def on_receive_pdu(self, origin, pdu, sent_to_us_directly=False) -> None:
|
||||
async def on_receive_pdu(
|
||||
self, origin: str, pdu: EventBase, sent_to_us_directly: bool = False
|
||||
) -> None:
|
||||
"""Process a PDU received via a federation /send/ transaction, or
|
||||
via backfill of missing prev_events
|
||||
|
||||
Args:
|
||||
origin (str): server which initiated the /send/ transaction. Will
|
||||
origin: server which initiated the /send/ transaction. Will
|
||||
be used to fetch missing events or state.
|
||||
pdu (FrozenEvent): received PDU
|
||||
sent_to_us_directly (bool): True if this event was pushed to us; False if
|
||||
pdu: received PDU
|
||||
sent_to_us_directly: True if this event was pushed to us; False if
|
||||
we pulled it as the result of a missing prev_event.
|
||||
"""
|
||||
|
||||
@@ -411,13 +423,15 @@ class FederationHandler(BaseHandler):
|
||||
|
||||
await self._process_received_pdu(origin, pdu, state=state)
|
||||
|
||||
async def _get_missing_events_for_pdu(self, origin, pdu, prevs, min_depth):
|
||||
async def _get_missing_events_for_pdu(
|
||||
self, origin: str, pdu: EventBase, prevs: Set[str], min_depth: int
|
||||
) -> None:
|
||||
"""
|
||||
Args:
|
||||
origin (str): Origin of the pdu. Will be called to get the missing events
|
||||
origin: Origin of the pdu. Will be called to get the missing events
|
||||
pdu: received pdu
|
||||
prevs (set(str)): List of event ids which we are missing
|
||||
min_depth (int): Minimum depth of events to return.
|
||||
prevs: List of event ids which we are missing
|
||||
min_depth: Minimum depth of events to return.
|
||||
"""
|
||||
|
||||
room_id = pdu.room_id
|
||||
@@ -778,7 +792,7 @@ class FederationHandler(BaseHandler):
|
||||
origin: str,
|
||||
event: EventBase,
|
||||
state: Optional[Iterable[EventBase]],
|
||||
):
|
||||
) -> None:
|
||||
"""Called when we have a new pdu. We need to do auth checks and put it
|
||||
through the StateHandler.
|
||||
|
||||
@@ -887,7 +901,9 @@ class FederationHandler(BaseHandler):
|
||||
logger.exception("Failed to resync device for %s", sender)
|
||||
|
||||
@log_function
|
||||
async def backfill(self, dest, room_id, limit, extremities):
|
||||
async def backfill(
|
||||
self, dest: str, room_id: str, limit: int, extremities: List[str]
|
||||
) -> List[EventBase]:
|
||||
"""Trigger a backfill request to `dest` for the given `room_id`
|
||||
|
||||
This will attempt to get more events from the remote. If the other side
|
||||
@@ -1142,16 +1158,15 @@ class FederationHandler(BaseHandler):
|
||||
|
||||
curr_state = await self.state_handler.get_current_state(room_id)
|
||||
|
||||
def get_domains_from_state(state):
|
||||
def get_domains_from_state(state: StateMap[EventBase]) -> List[Tuple[str, int]]:
|
||||
"""Get joined domains from state
|
||||
|
||||
Args:
|
||||
state (dict[tuple, FrozenEvent]): State map from type/state
|
||||
key to event.
|
||||
state: State map from type/state key to event.
|
||||
|
||||
Returns:
|
||||
list[tuple[str, int]]: Returns a list of servers with the
|
||||
lowest depth of their joins. Sorted by lowest depth first.
|
||||
Returns a list of servers with the lowest depth of their joins.
|
||||
Sorted by lowest depth first.
|
||||
"""
|
||||
joined_users = [
|
||||
(state_key, int(event.depth))
|
||||
@@ -1179,7 +1194,7 @@ class FederationHandler(BaseHandler):
|
||||
domain for domain, depth in curr_domains if domain != self.server_name
|
||||
]
|
||||
|
||||
async def try_backfill(domains):
|
||||
async def try_backfill(domains: List[str]) -> bool:
|
||||
# TODO: Should we try multiple of these at a time?
|
||||
for dom in domains:
|
||||
try:
|
||||
@@ -1258,21 +1273,25 @@ class FederationHandler(BaseHandler):
|
||||
}
|
||||
|
||||
for e_id, _ in sorted_extremeties_tuple:
|
||||
likely_domains = get_domains_from_state(states[e_id])
|
||||
likely_extremeties_domains = get_domains_from_state(states[e_id])
|
||||
|
||||
success = await try_backfill(
|
||||
[dom for dom, _ in likely_domains if dom not in tried_domains]
|
||||
[
|
||||
dom
|
||||
for dom, _ in likely_extremeties_domains
|
||||
if dom not in tried_domains
|
||||
]
|
||||
)
|
||||
if success:
|
||||
return True
|
||||
|
||||
tried_domains.update(dom for dom, _ in likely_domains)
|
||||
tried_domains.update(dom for dom, _ in likely_extremeties_domains)
|
||||
|
||||
return False
|
||||
|
||||
async def _get_events_and_persist(
|
||||
self, destination: str, room_id: str, events: Iterable[str]
|
||||
):
|
||||
) -> None:
|
||||
"""Fetch the given events from a server, and persist them as outliers.
|
||||
|
||||
This function *does not* recursively get missing auth events of the
|
||||
@@ -1348,7 +1367,7 @@ class FederationHandler(BaseHandler):
|
||||
event_infos,
|
||||
)
|
||||
|
||||
def _sanity_check_event(self, ev):
|
||||
def _sanity_check_event(self, ev: EventBase) -> None:
|
||||
"""
|
||||
Do some early sanity checks of a received event
|
||||
|
||||
@@ -1357,9 +1376,7 @@ class FederationHandler(BaseHandler):
|
||||
or cascade of event fetches.
|
||||
|
||||
Args:
|
||||
ev (synapse.events.EventBase): event to be checked
|
||||
|
||||
Returns: None
|
||||
ev: event to be checked
|
||||
|
||||
Raises:
|
||||
SynapseError if the event does not pass muster
|
||||
@@ -1380,7 +1397,7 @@ class FederationHandler(BaseHandler):
|
||||
)
|
||||
raise SynapseError(HTTPStatus.BAD_REQUEST, "Too many auth_events")
|
||||
|
||||
async def send_invite(self, target_host, event):
|
||||
async def send_invite(self, target_host: str, event: EventBase) -> EventBase:
|
||||
"""Sends the invite to the remote server for signing.
|
||||
|
||||
Invites must be signed by the invitee's server before distribution.
|
||||
@@ -1528,12 +1545,13 @@ class FederationHandler(BaseHandler):
|
||||
|
||||
run_in_background(self._handle_queued_pdus, room_queue)
|
||||
|
||||
async def _handle_queued_pdus(self, room_queue):
|
||||
async def _handle_queued_pdus(
|
||||
self, room_queue: List[Tuple[EventBase, str]]
|
||||
) -> None:
|
||||
"""Process PDUs which got queued up while we were busy send_joining.
|
||||
|
||||
Args:
|
||||
room_queue (list[FrozenEvent, str]): list of PDUs to be processed
|
||||
and the servers that sent them
|
||||
room_queue: list of PDUs to be processed and the servers that sent them
|
||||
"""
|
||||
for p, origin in room_queue:
|
||||
try:
|
||||
@@ -1612,7 +1630,7 @@ class FederationHandler(BaseHandler):
|
||||
|
||||
return event
|
||||
|
||||
async def on_send_join_request(self, origin, pdu):
|
||||
async def on_send_join_request(self, origin: str, pdu: EventBase) -> JsonDict:
|
||||
"""We have received a join event for a room. Fully process it and
|
||||
respond with the current state and auth chains.
|
||||
"""
|
||||
@@ -1668,7 +1686,7 @@ class FederationHandler(BaseHandler):
|
||||
|
||||
async def on_invite_request(
|
||||
self, origin: str, event: EventBase, room_version: RoomVersion
|
||||
):
|
||||
) -> EventBase:
|
||||
"""We've got an invite event. Process and persist it. Sign it.
|
||||
|
||||
Respond with the now signed event.
|
||||
@@ -1711,7 +1729,7 @@ class FederationHandler(BaseHandler):
|
||||
member_handler = self.hs.get_room_member_handler()
|
||||
# We don't rate limit based on room ID, as that should be done by
|
||||
# sending server.
|
||||
member_handler.ratelimit_invite(None, event.state_key)
|
||||
await member_handler.ratelimit_invite(None, None, event.state_key)
|
||||
|
||||
# keep a record of the room version, if we don't yet know it.
|
||||
# (this may get overwritten if we later get a different room version in a
|
||||
@@ -1772,7 +1790,7 @@ class FederationHandler(BaseHandler):
|
||||
room_id: str,
|
||||
user_id: str,
|
||||
membership: str,
|
||||
content: JsonDict = {},
|
||||
content: JsonDict,
|
||||
params: Optional[Dict[str, Union[str, Iterable[str]]]] = None,
|
||||
) -> Tuple[str, EventBase, RoomVersion]:
|
||||
(
|
||||
@@ -1841,7 +1859,7 @@ class FederationHandler(BaseHandler):
|
||||
|
||||
return event
|
||||
|
||||
async def on_send_leave_request(self, origin, pdu):
|
||||
async def on_send_leave_request(self, origin: str, pdu: EventBase) -> None:
|
||||
""" We have received a leave event for a room. Fully process it."""
|
||||
event = pdu
|
||||
|
||||
@@ -1969,12 +1987,17 @@ class FederationHandler(BaseHandler):
|
||||
else:
|
||||
return None
|
||||
|
||||
async def get_min_depth_for_context(self, context):
|
||||
async def get_min_depth_for_context(self, context: str) -> int:
|
||||
return await self.store.get_min_depth(context)
|
||||
|
||||
async def _handle_new_event(
|
||||
self, origin, event, state=None, auth_events=None, backfilled=False
|
||||
):
|
||||
self,
|
||||
origin: str,
|
||||
event: EventBase,
|
||||
state: Optional[Iterable[EventBase]] = None,
|
||||
auth_events: Optional[MutableStateMap[EventBase]] = None,
|
||||
backfilled: bool = False,
|
||||
) -> EventContext:
|
||||
context = await self._prep_event(
|
||||
origin, event, state=state, auth_events=auth_events, backfilled=backfilled
|
||||
)
|
||||
@@ -2280,40 +2303,14 @@ class FederationHandler(BaseHandler):
|
||||
logger.warning("Soft-failing %r because %s", event, e)
|
||||
event.internal_metadata.soft_failed = True
|
||||
|
||||
async def on_query_auth(
|
||||
self, origin, event_id, room_id, remote_auth_chain, rejects, missing
|
||||
):
|
||||
in_room = await self.auth.check_host_in_room(room_id, origin)
|
||||
if not in_room:
|
||||
raise AuthError(403, "Host not in room.")
|
||||
|
||||
event = await self.store.get_event(event_id, check_room_id=room_id)
|
||||
|
||||
# Just go through and process each event in `remote_auth_chain`. We
|
||||
# don't want to fall into the trap of `missing` being wrong.
|
||||
for e in remote_auth_chain:
|
||||
try:
|
||||
await self._handle_new_event(origin, e)
|
||||
except AuthError:
|
||||
pass
|
||||
|
||||
# Now get the current auth_chain for the event.
|
||||
local_auth_chain = await self.store.get_auth_chain(
|
||||
room_id, list(event.auth_event_ids()), include_given=True
|
||||
)
|
||||
|
||||
# TODO: Check if we would now reject event_id. If so we need to tell
|
||||
# everyone.
|
||||
|
||||
ret = await self.construct_auth_difference(local_auth_chain, remote_auth_chain)
|
||||
|
||||
logger.debug("on_query_auth returning: %s", ret)
|
||||
|
||||
return ret
|
||||
|
||||
async def on_get_missing_events(
|
||||
self, origin, room_id, earliest_events, latest_events, limit
|
||||
):
|
||||
self,
|
||||
origin: str,
|
||||
room_id: str,
|
||||
earliest_events: List[str],
|
||||
latest_events: List[str],
|
||||
limit: int,
|
||||
) -> List[EventBase]:
|
||||
in_room = await self.auth.check_host_in_room(room_id, origin)
|
||||
if not in_room:
|
||||
raise AuthError(403, "Host not in room.")
|
||||
@@ -2617,8 +2614,8 @@ class FederationHandler(BaseHandler):
|
||||
assumes that we have already processed all events in remote_auth
|
||||
|
||||
Params:
|
||||
local_auth (list)
|
||||
remote_auth (list)
|
||||
local_auth
|
||||
remote_auth
|
||||
|
||||
Returns:
|
||||
dict
|
||||
@@ -2742,8 +2739,8 @@ class FederationHandler(BaseHandler):
|
||||
|
||||
@log_function
|
||||
async def exchange_third_party_invite(
|
||||
self, sender_user_id, target_user_id, room_id, signed
|
||||
):
|
||||
self, sender_user_id: str, target_user_id: str, room_id: str, signed: JsonDict
|
||||
) -> None:
|
||||
third_party_invite = {"signed": signed}
|
||||
|
||||
event_dict = {
|
||||
@@ -2835,8 +2832,12 @@ class FederationHandler(BaseHandler):
|
||||
await member_handler.send_membership_event(None, event, context)
|
||||
|
||||
async def add_display_name_to_third_party_invite(
|
||||
self, room_version, event_dict, event, context
|
||||
):
|
||||
self,
|
||||
room_version: str,
|
||||
event_dict: JsonDict,
|
||||
event: EventBase,
|
||||
context: EventContext,
|
||||
) -> Tuple[EventBase, EventContext]:
|
||||
key = (
|
||||
EventTypes.ThirdPartyInvite,
|
||||
event.content["third_party_invite"]["signed"]["token"],
|
||||
@@ -2872,13 +2873,13 @@ class FederationHandler(BaseHandler):
|
||||
EventValidator().validate_new(event, self.config)
|
||||
return (event, context)
|
||||
|
||||
async def _check_signature(self, event, context):
|
||||
async def _check_signature(self, event: EventBase, context: EventContext) -> None:
|
||||
"""
|
||||
Checks that the signature in the event is consistent with its invite.
|
||||
|
||||
Args:
|
||||
event (Event): The m.room.member event to check
|
||||
context (EventContext):
|
||||
event: The m.room.member event to check
|
||||
context:
|
||||
|
||||
Raises:
|
||||
AuthError: if signature didn't match any keys, or key has been
|
||||
@@ -2964,13 +2965,13 @@ class FederationHandler(BaseHandler):
|
||||
|
||||
raise last_exception
|
||||
|
||||
async def _check_key_revocation(self, public_key, url):
|
||||
async def _check_key_revocation(self, public_key: str, url: str) -> None:
|
||||
"""
|
||||
Checks whether public_key has been revoked.
|
||||
|
||||
Args:
|
||||
public_key (str): base-64 encoded public key.
|
||||
url (str): Key revocation URL.
|
||||
public_key: base-64 encoded public key.
|
||||
url: Key revocation URL.
|
||||
|
||||
Raises:
|
||||
AuthError: if they key has been revoked.
|
||||
|
||||
@@ -21,7 +21,7 @@ from synapse.api.errors import HttpResponseException, RequestSendFailed, Synapse
|
||||
from synapse.types import GroupID, JsonDict, get_domain_from_id
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.app.homeserver import HomeServer
|
||||
from synapse.server import HomeServer
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user