Compare commits
199 Commits
v1.64.0
...
release-v1
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
a5d7d6b40d | ||
|
|
80bb098d87 | ||
|
|
4b678b20a2 | ||
|
|
9d11842562 | ||
|
|
a7c71686ca | ||
|
|
ec2fe7bb53 | ||
|
|
571f565c1f | ||
|
|
cdf7fb737b | ||
|
|
32fc3b7ba4 | ||
|
|
8edf3f66d5 | ||
|
|
c7b18d9d44 | ||
|
|
8cb9261598 | ||
|
|
898fef2789 | ||
|
|
ad7fc8e92f | ||
|
|
877bdfa889 | ||
|
|
36b184b782 | ||
|
|
4fee4a339d | ||
|
|
0fdb685c2b | ||
|
|
044900af6c | ||
|
|
48a5c47a9f | ||
|
|
390b7ce946 | ||
|
|
f48f4dd59e | ||
|
|
9d2823ab70 | ||
|
|
c913e440c0 | ||
|
|
dcfb006f8a | ||
|
|
2318603772 | ||
|
|
e8130f219b | ||
|
|
18e4092801 | ||
|
|
0e99f07952 | ||
|
|
737968b8e0 | ||
|
|
84ddcd7bbf | ||
|
|
6f80fe1e1b | ||
|
|
838d722eba | ||
|
|
c01f21d31d | ||
|
|
d1fb46fbc9 | ||
|
|
42b11d5565 | ||
|
|
7bc110a19e | ||
|
|
90c99fb3aa | ||
|
|
a160406d24 | ||
|
|
5634267d33 | ||
|
|
ef88bc0775 | ||
|
|
d48b70fd37 | ||
|
|
b9924df264 | ||
|
|
61b37ddd37 | ||
|
|
92c5817e34 | ||
|
|
20c76cecb9 | ||
|
|
372136d3a8 | ||
|
|
4249082eed | ||
|
|
31f2a3fbc3 | ||
|
|
e761e8b475 | ||
|
|
8f6aa015a8 | ||
|
|
1c26acd815 | ||
|
|
303b40b988 | ||
|
|
20df96a7a7 | ||
|
|
1eea73b413 | ||
|
|
682dfcfc0d | ||
|
|
51d732db3b | ||
|
|
4f6de33f41 | ||
|
|
c4e29b6908 | ||
|
|
5e5c8150d7 | ||
|
|
998e211836 | ||
|
|
967d7bad6c | ||
|
|
978666a088 | ||
|
|
d092e6f32a | ||
|
|
a2ce614447 | ||
|
|
a282446502 | ||
|
|
0bf180cbb4 | ||
|
|
c406d50d2d | ||
|
|
1a209efdb2 | ||
|
|
d58615c82c | ||
|
|
b93bd95e8a | ||
|
|
c807b814ae | ||
|
|
371db86a86 | ||
|
|
be4250c7a8 | ||
|
|
2e2040c93e | ||
|
|
b687010f89 | ||
|
|
ba882c0357 | ||
|
|
7af07f9716 | ||
|
|
a25a37002c | ||
|
|
f7ddfe17a3 | ||
|
|
05c9c7363b | ||
|
|
bdfff9c36e | ||
|
|
ca3d19b05f | ||
|
|
aec87a0f93 | ||
|
|
ea85a2bf6c | ||
|
|
956e015413 | ||
|
|
5e7847dc92 | ||
|
|
79281f517d | ||
|
|
f8b9abdcdb | ||
|
|
d6f5699737 | ||
|
|
f0b23927fc | ||
|
|
37f329c9ad | ||
|
|
9385c41ba4 | ||
|
|
3dd175b628 | ||
|
|
94375f7a91 | ||
|
|
06df5d4250 | ||
|
|
f9f03426de | ||
|
|
40e3e68cd7 | ||
|
|
f3fba4914d | ||
|
|
3a245f6cfe | ||
|
|
2c42673a9b | ||
|
|
b251cff819 | ||
|
|
d64653d062 | ||
|
|
22ea51faf9 | ||
|
|
84169a82dc | ||
|
|
49d04e43df | ||
|
|
8bdf2bd31e | ||
|
|
82a0752f32 | ||
|
|
436e0eb39a | ||
|
|
ba8938b090 | ||
|
|
b71b41c7bd | ||
|
|
d75512d19e | ||
|
|
c6ee9c0ee4 | ||
|
|
088bcb7ecb | ||
|
|
2c8cfd6d85 | ||
|
|
0a4efbc1dd | ||
|
|
5ace5d7b15 | ||
|
|
06a2733881 | ||
|
|
738c11729a | ||
|
|
f4ab6a4a96 | ||
|
|
14e673ef9d | ||
|
|
c3516e9dec | ||
|
|
5442891cbc | ||
|
|
d642ce4b32 | ||
|
|
73c83c6411 | ||
|
|
344a2f767c | ||
|
|
19e5d44886 | ||
|
|
46bd7f4ed9 | ||
|
|
f383b9b3ec | ||
|
|
434fd82d5f | ||
|
|
2c5e2ae898 | ||
|
|
e825f7366b | ||
|
|
953df2ad88 | ||
|
|
4390121684 | ||
|
|
05bc059e0d | ||
|
|
507c1cb330 | ||
|
|
7a25b4302c | ||
|
|
12abd72497 | ||
|
|
2281427175 | ||
|
|
51c01d450a | ||
|
|
1b09b0832e | ||
|
|
1595052b26 | ||
|
|
54fb517c28 | ||
|
|
3d1b860f90 | ||
|
|
5ce2887653 | ||
|
|
c962f87d6f | ||
|
|
827f0669bf | ||
|
|
70d3e70009 | ||
|
|
0f954466c4 | ||
|
|
c97042f7ee | ||
|
|
7a19995120 | ||
|
|
ab18441573 | ||
|
|
e2ed1b7155 | ||
|
|
3d2cabf966 | ||
|
|
026ac4486c | ||
|
|
b6a6bb4027 | ||
|
|
860fdd9098 | ||
|
|
ec24813220 | ||
|
|
96d92156d0 | ||
|
|
e9e6aacfbe | ||
|
|
41320a0554 | ||
|
|
6dd7fa12dc | ||
|
|
afbdbe0634 | ||
|
|
166fafdf8d | ||
|
|
a91078200d | ||
|
|
845732be45 | ||
|
|
a648a06d52 | ||
|
|
92d21faf12 | ||
|
|
78a3111c41 | ||
|
|
503a95804e | ||
|
|
668597214f | ||
|
|
fb7a2cc4cc | ||
|
|
d6e94ad9d9 | ||
|
|
570bf32bbb | ||
|
|
5eccfdfafd | ||
|
|
ec6758d472 | ||
|
|
1c910e2216 | ||
|
|
8d317f6da5 | ||
|
|
a2a867b521 | ||
|
|
e17e5c97e0 | ||
|
|
f8e7a9418a | ||
|
|
224d792dd7 | ||
|
|
05aeeb3a80 | ||
|
|
b817574be7 | ||
|
|
23768ccb4d | ||
|
|
d548d8f18d | ||
|
|
24ef1460f6 | ||
|
|
583f22780f | ||
|
|
922b771337 | ||
|
|
502f075e96 | ||
|
|
39be5bc550 | ||
|
|
4f3082d6bf | ||
|
|
bf3115584c | ||
|
|
543dc9c93e | ||
|
|
6236afc621 | ||
|
|
57d334a13d | ||
|
|
ca3db044a3 | ||
|
|
335ebb21cc | ||
|
|
8b603299bf |
@@ -27,10 +27,10 @@ which is under the Unlicense licence.
|
||||
{{- . -}}{{- "\n" -}}
|
||||
{{- end -}}
|
||||
{{- with .TestCases -}}
|
||||
{{- /* Failing tests are first */ -}}
|
||||
{{- /* Passing tests are first */ -}}
|
||||
{{- range . -}}
|
||||
{{- if and (ne .Result "PASS") (ne .Result "SKIP") -}}
|
||||
::group::{{ "\033" }}[0;31m❌{{ " " }}{{- .Name -}}
|
||||
{{- if eq .Result "PASS" -}}
|
||||
::group::{{ "\033" }}[0;32m✅{{ " " }}{{- .Name -}}
|
||||
{{- "\033" -}}[0;37m ({{if $settings.ShowTestStatus}}{{.Result}}; {{end}}{{ .Duration -}}
|
||||
{{- with .Coverage -}}
|
||||
, coverage: {{ . }}%
|
||||
@@ -47,7 +47,6 @@ which is under the Unlicense licence.
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
|
||||
{{- /* Then skipped tests are second */ -}}
|
||||
{{- range . -}}
|
||||
{{- if eq .Result "SKIP" -}}
|
||||
@@ -68,11 +67,10 @@ which is under the Unlicense licence.
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
|
||||
{{- /* Then passing tests are last */ -}}
|
||||
{{- /* and failing tests are last */ -}}
|
||||
{{- range . -}}
|
||||
{{- if eq .Result "PASS" -}}
|
||||
::group::{{ "\033" }}[0;32m✅{{ " " }}{{- .Name -}}
|
||||
{{- if and (ne .Result "PASS") (ne .Result "SKIP") -}}
|
||||
::group::{{ "\033" }}[0;31m❌{{ " " }}{{- .Name -}}
|
||||
{{- "\033" -}}[0;37m ({{if $settings.ShowTestStatus}}{{.Result}}; {{end}}{{ .Duration -}}
|
||||
{{- with .Coverage -}}
|
||||
, coverage: {{ . }}%
|
||||
|
||||
128
.ci/scripts/calculate_jobs.py
Executable file
128
.ci/scripts/calculate_jobs.py
Executable file
@@ -0,0 +1,128 @@
|
||||
#!/usr/bin/env python
|
||||
# Copyright 2022 The Matrix.org Foundation C.I.C.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# Calculate the trial jobs to run based on if we're in a PR or not.
|
||||
|
||||
import json
|
||||
import os
|
||||
|
||||
IS_PR = os.environ["GITHUB_REF"].startswith("refs/pull/")
|
||||
|
||||
# First calculate the various trial jobs.
|
||||
#
|
||||
# For each type of test we only run on Py3.7 on PRs
|
||||
|
||||
trial_sqlite_tests = [
|
||||
{
|
||||
"python-version": "3.7",
|
||||
"database": "sqlite",
|
||||
"extras": "all",
|
||||
}
|
||||
]
|
||||
|
||||
if not IS_PR:
|
||||
trial_sqlite_tests.extend(
|
||||
{
|
||||
"python-version": version,
|
||||
"database": "sqlite",
|
||||
"extras": "all",
|
||||
}
|
||||
for version in ("3.8", "3.9", "3.10")
|
||||
)
|
||||
|
||||
|
||||
trial_postgres_tests = [
|
||||
{
|
||||
"python-version": "3.7",
|
||||
"database": "postgres",
|
||||
"postgres-version": "10",
|
||||
"extras": "all",
|
||||
}
|
||||
]
|
||||
|
||||
if not IS_PR:
|
||||
trial_postgres_tests.append(
|
||||
{
|
||||
"python-version": "3.10",
|
||||
"database": "postgres",
|
||||
"postgres-version": "14",
|
||||
"extras": "all",
|
||||
}
|
||||
)
|
||||
|
||||
trial_no_extra_tests = [
|
||||
{
|
||||
"python-version": "3.7",
|
||||
"database": "sqlite",
|
||||
"extras": "",
|
||||
}
|
||||
]
|
||||
|
||||
print("::group::Calculated trial jobs")
|
||||
print(
|
||||
json.dumps(
|
||||
trial_sqlite_tests + trial_postgres_tests + trial_no_extra_tests, indent=4
|
||||
)
|
||||
)
|
||||
print("::endgroup::")
|
||||
|
||||
test_matrix = json.dumps(
|
||||
trial_sqlite_tests + trial_postgres_tests + trial_no_extra_tests
|
||||
)
|
||||
print(f"::set-output name=trial_test_matrix::{test_matrix}")
|
||||
|
||||
|
||||
# First calculate the various sytest jobs.
|
||||
#
|
||||
# For each type of test we only run on focal on PRs
|
||||
|
||||
|
||||
sytest_tests = [
|
||||
{
|
||||
"sytest-tag": "focal",
|
||||
},
|
||||
{
|
||||
"sytest-tag": "focal",
|
||||
"postgres": "postgres",
|
||||
},
|
||||
{
|
||||
"sytest-tag": "focal",
|
||||
"postgres": "multi-postgres",
|
||||
"workers": "workers",
|
||||
},
|
||||
]
|
||||
|
||||
if not IS_PR:
|
||||
sytest_tests.extend(
|
||||
[
|
||||
{
|
||||
"sytest-tag": "testing",
|
||||
"postgres": "postgres",
|
||||
},
|
||||
{
|
||||
"sytest-tag": "buster",
|
||||
"postgres": "multi-postgres",
|
||||
"workers": "workers",
|
||||
},
|
||||
]
|
||||
)
|
||||
|
||||
|
||||
print("::group::Calculated sytest jobs")
|
||||
print(json.dumps(sytest_tests, indent=4))
|
||||
print("::endgroup::")
|
||||
|
||||
test_matrix = json.dumps(sytest_tests)
|
||||
print(f"::set-output name=sytest_test_matrix::{test_matrix}")
|
||||
21
.ci/scripts/gotestfmt
Executable file
21
.ci/scripts/gotestfmt
Executable file
@@ -0,0 +1,21 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# wraps `gotestfmt`, hiding output from successful packages unless
|
||||
# all tests passed.
|
||||
|
||||
set -o pipefail
|
||||
set -e
|
||||
|
||||
# tee the test results to a log, whilst also piping them into gotestfmt,
|
||||
# telling it to hide successful results, so that we can clearly see
|
||||
# unsuccessful results.
|
||||
tee complement.log | gotestfmt -hide successful-packages
|
||||
|
||||
# gotestfmt will exit non-zero if there were any failures, so if we got to this
|
||||
# point, we must have had a successful result.
|
||||
echo "All tests successful; showing all test results"
|
||||
|
||||
# Pipe the test results back through gotestfmt, showing all results.
|
||||
# The log file consists of JSON lines giving the test results, interspersed
|
||||
# with regular stdout lines (including reports of downloaded packages).
|
||||
grep '^{"Time":' complement.log | gotestfmt
|
||||
37
.github/workflows/latest_deps.yml
vendored
37
.github/workflows/latest_deps.yml
vendored
@@ -135,11 +135,42 @@ jobs:
|
||||
/logs/**/*.log*
|
||||
|
||||
|
||||
# TODO: run complement (as with twisted trunk, see #12473).
|
||||
complement:
|
||||
if: "${{ !failure() && !cancelled() }}"
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
# open an issue if the build fails, so we know about it.
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- arrangement: monolith
|
||||
database: SQLite
|
||||
|
||||
- arrangement: monolith
|
||||
database: Postgres
|
||||
|
||||
- arrangement: workers
|
||||
database: Postgres
|
||||
|
||||
steps:
|
||||
- name: Run actions/checkout@v2 for synapse
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
path: synapse
|
||||
|
||||
- name: Prepare Complement's Prerequisites
|
||||
run: synapse/.ci/scripts/setup_complement_prerequisites.sh
|
||||
|
||||
- run: |
|
||||
set -o pipefail
|
||||
TEST_ONLY_IGNORE_POETRY_LOCKFILE=1 POSTGRES=${{ (matrix.database == 'Postgres') && 1 || '' }} WORKERS=${{ (matrix.arrangement == 'workers') && 1 || '' }} COMPLEMENT_DIR=`pwd`/complement synapse/scripts-dev/complement.sh -json 2>&1 | synapse/.ci/scripts/gotestfmt
|
||||
shell: bash
|
||||
name: Run Complement Tests
|
||||
|
||||
# Open an issue if the build fails, so we know about it.
|
||||
# Only do this if we're not experimenting with this action in a PR.
|
||||
open-issue:
|
||||
if: failure()
|
||||
if: "failure() && github.event_name != 'push' && github.event_name != 'pull_request'"
|
||||
needs:
|
||||
# TODO: should mypy be included here? It feels more brittle than the other two.
|
||||
- mypy
|
||||
|
||||
128
.github/workflows/tests.yml
vendored
128
.github/workflows/tests.yml
vendored
@@ -53,61 +53,68 @@ jobs:
|
||||
env:
|
||||
PULL_REQUEST_NUMBER: ${{ github.event.number }}
|
||||
|
||||
lint-pydantic:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
fetch-depth: 0
|
||||
- uses: matrix-org/setup-python-poetry@v1
|
||||
with:
|
||||
extras: "all"
|
||||
- run: poetry run scripts-dev/check_pydantic_models.py
|
||||
|
||||
# Dummy step to gate other tests on without repeating the whole list
|
||||
linting-done:
|
||||
if: ${{ !cancelled() }} # Run this even if prior jobs were skipped
|
||||
needs: [lint, lint-crlf, lint-newsfile, check-sampleconfig, check-schema-delta]
|
||||
needs: [lint, lint-crlf, lint-newsfile, lint-pydantic, check-sampleconfig, check-schema-delta]
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- run: "true"
|
||||
|
||||
trial:
|
||||
calculate-test-jobs:
|
||||
if: ${{ !cancelled() && !failure() }} # Allow previous steps to be skipped, but not fail
|
||||
needs: linting-done
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-python@v2
|
||||
- id: get-matrix
|
||||
run: .ci/scripts/calculate_jobs.py
|
||||
outputs:
|
||||
trial_test_matrix: ${{ steps.get-matrix.outputs.trial_test_matrix }}
|
||||
sytest_test_matrix: ${{ steps.get-matrix.outputs.sytest_test_matrix }}
|
||||
|
||||
trial:
|
||||
if: ${{ !cancelled() && !failure() }} # Allow previous steps to be skipped, but not fail
|
||||
needs: calculate-test-jobs
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
python-version: ["3.7", "3.8", "3.9", "3.10"]
|
||||
database: ["sqlite"]
|
||||
extras: ["all"]
|
||||
include:
|
||||
# Newest Python without optional deps
|
||||
- python-version: "3.10"
|
||||
extras: ""
|
||||
|
||||
# Oldest Python with PostgreSQL
|
||||
- python-version: "3.7"
|
||||
database: "postgres"
|
||||
postgres-version: "10"
|
||||
extras: "all"
|
||||
|
||||
# Newest Python with newest PostgreSQL
|
||||
- python-version: "3.10"
|
||||
database: "postgres"
|
||||
postgres-version: "14"
|
||||
extras: "all"
|
||||
job: ${{ fromJson(needs.calculate-test-jobs.outputs.trial_test_matrix) }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- run: sudo apt-get -qq install xmlsec1
|
||||
- name: Set up PostgreSQL ${{ matrix.postgres-version }}
|
||||
if: ${{ matrix.postgres-version }}
|
||||
- name: Set up PostgreSQL ${{ matrix.job.postgres-version }}
|
||||
if: ${{ matrix.job.postgres-version }}
|
||||
run: |
|
||||
docker run -d -p 5432:5432 \
|
||||
-e POSTGRES_PASSWORD=postgres \
|
||||
-e POSTGRES_INITDB_ARGS="--lc-collate C --lc-ctype C --encoding UTF8" \
|
||||
postgres:${{ matrix.postgres-version }}
|
||||
postgres:${{ matrix.job.postgres-version }}
|
||||
- uses: matrix-org/setup-python-poetry@v1
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
extras: ${{ matrix.extras }}
|
||||
python-version: ${{ matrix.job.python-version }}
|
||||
extras: ${{ matrix.job.extras }}
|
||||
- name: Await PostgreSQL
|
||||
if: ${{ matrix.postgres-version }}
|
||||
if: ${{ matrix.job.postgres-version }}
|
||||
timeout-minutes: 2
|
||||
run: until pg_isready -h localhost; do sleep 1; done
|
||||
- run: poetry run trial --jobs=2 tests
|
||||
env:
|
||||
SYNAPSE_POSTGRES: ${{ matrix.database == 'postgres' || '' }}
|
||||
SYNAPSE_POSTGRES: ${{ matrix.job.database == 'postgres' || '' }}
|
||||
SYNAPSE_POSTGRES_HOST: localhost
|
||||
SYNAPSE_POSTGRES_USER: postgres
|
||||
SYNAPSE_POSTGRES_PASSWORD: postgres
|
||||
@@ -186,45 +193,24 @@ jobs:
|
||||
|
||||
sytest:
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
needs: linting-done
|
||||
needs: calculate-test-jobs
|
||||
runs-on: ubuntu-latest
|
||||
container:
|
||||
image: matrixdotorg/sytest-synapse:${{ matrix.sytest-tag }}
|
||||
image: matrixdotorg/sytest-synapse:${{ matrix.job.sytest-tag }}
|
||||
volumes:
|
||||
- ${{ github.workspace }}:/src
|
||||
env:
|
||||
SYTEST_BRANCH: ${{ github.head_ref }}
|
||||
POSTGRES: ${{ matrix.postgres && 1}}
|
||||
MULTI_POSTGRES: ${{ (matrix.postgres == 'multi-postgres') && 1}}
|
||||
WORKERS: ${{ matrix.workers && 1 }}
|
||||
REDIS: ${{ matrix.redis && 1 }}
|
||||
BLACKLIST: ${{ matrix.workers && 'synapse-blacklist-with-workers' }}
|
||||
POSTGRES: ${{ matrix.job.postgres && 1}}
|
||||
MULTI_POSTGRES: ${{ (matrix.job.postgres == 'multi-postgres') && 1}}
|
||||
WORKERS: ${{ matrix.job.workers && 1 }}
|
||||
BLACKLIST: ${{ matrix.job.workers && 'synapse-blacklist-with-workers' }}
|
||||
TOP: ${{ github.workspace }}
|
||||
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- sytest-tag: focal
|
||||
|
||||
- sytest-tag: focal
|
||||
postgres: postgres
|
||||
|
||||
- sytest-tag: testing
|
||||
postgres: postgres
|
||||
|
||||
- sytest-tag: focal
|
||||
postgres: multi-postgres
|
||||
workers: workers
|
||||
|
||||
- sytest-tag: buster
|
||||
postgres: multi-postgres
|
||||
workers: workers
|
||||
|
||||
- sytest-tag: buster
|
||||
postgres: postgres
|
||||
workers: workers
|
||||
redis: redis
|
||||
job: ${{ fromJson(needs.calculate-test-jobs.outputs.sytest_test_matrix) }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
@@ -240,7 +226,7 @@ jobs:
|
||||
uses: actions/upload-artifact@v2
|
||||
if: ${{ always() }}
|
||||
with:
|
||||
name: Sytest Logs - ${{ job.status }} - (${{ join(matrix.*, ', ') }})
|
||||
name: Sytest Logs - ${{ job.status }} - (${{ join(matrix.job.*, ', ') }})
|
||||
path: |
|
||||
/logs/results.tap
|
||||
/logs/**/*.log*
|
||||
@@ -271,7 +257,6 @@ jobs:
|
||||
- run: sudo apt-get -qq install xmlsec1
|
||||
- uses: matrix-org/setup-python-poetry@v1
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
extras: "postgres"
|
||||
- run: .ci/scripts/test_export_data_command.sh
|
||||
|
||||
@@ -328,29 +313,8 @@ jobs:
|
||||
- arrangement: monolith
|
||||
database: Postgres
|
||||
|
||||
steps:
|
||||
- name: Run actions/checkout@v2 for synapse
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
path: synapse
|
||||
|
||||
- name: Prepare Complement's Prerequisites
|
||||
run: synapse/.ci/scripts/setup_complement_prerequisites.sh
|
||||
|
||||
- run: |
|
||||
set -o pipefail
|
||||
POSTGRES=${{ (matrix.database == 'Postgres') && 1 || '' }} WORKERS=${{ (matrix.arrangement == 'workers') && 1 || '' }} COMPLEMENT_DIR=`pwd`/complement synapse/scripts-dev/complement.sh -json 2>&1 | gotestfmt
|
||||
shell: bash
|
||||
name: Run Complement Tests
|
||||
|
||||
# XXX When complement with workers is stable, move this back into the standard
|
||||
# "complement" matrix above.
|
||||
#
|
||||
# See https://github.com/matrix-org/synapse/issues/13161
|
||||
complement-workers:
|
||||
if: "${{ !failure() && !cancelled() }}"
|
||||
needs: linting-done
|
||||
runs-on: ubuntu-latest
|
||||
- arrangement: workers
|
||||
database: Postgres
|
||||
|
||||
steps:
|
||||
- name: Run actions/checkout@v2 for synapse
|
||||
@@ -363,7 +327,7 @@ jobs:
|
||||
|
||||
- run: |
|
||||
set -o pipefail
|
||||
POSTGRES=1 WORKERS=1 COMPLEMENT_DIR=`pwd`/complement synapse/scripts-dev/complement.sh -json 2>&1 | gotestfmt
|
||||
POSTGRES=${{ (matrix.database == 'Postgres') && 1 || '' }} WORKERS=${{ (matrix.arrangement == 'workers') && 1 || '' }} COMPLEMENT_DIR=`pwd`/complement synapse/scripts-dev/complement.sh -json 2>&1 | synapse/.ci/scripts/gotestfmt
|
||||
shell: bash
|
||||
name: Run Complement Tests
|
||||
|
||||
|
||||
28
.github/workflows/triage-incoming.yml
vendored
Normal file
28
.github/workflows/triage-incoming.yml
vendored
Normal file
@@ -0,0 +1,28 @@
|
||||
name: Move new issues into the issue triage board
|
||||
|
||||
on:
|
||||
issues:
|
||||
types: [ opened ]
|
||||
|
||||
jobs:
|
||||
add_new_issues:
|
||||
name: Add new issues to the triage board
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: octokit/graphql-action@v2.x
|
||||
id: add_to_project
|
||||
with:
|
||||
headers: '{"GraphQL-Features": "projects_next_graphql"}'
|
||||
query: |
|
||||
mutation add_to_project($projectid:ID!,$contentid:ID!) {
|
||||
addProjectV2ItemById(input: {projectId: $projectid contentId: $contentid}) {
|
||||
item {
|
||||
id
|
||||
}
|
||||
}
|
||||
}
|
||||
projectid: ${{ env.PROJECT_ID }}
|
||||
contentid: ${{ github.event.issue.node_id }}
|
||||
env:
|
||||
PROJECT_ID: "PVT_kwDOAIB0Bs4AFDdZ"
|
||||
GITHUB_TOKEN: ${{ secrets.ELEMENT_BOT_TOKEN }}
|
||||
44
.github/workflows/triage_labelled.yml
vendored
Normal file
44
.github/workflows/triage_labelled.yml
vendored
Normal file
@@ -0,0 +1,44 @@
|
||||
name: Move labelled issues to correct projects
|
||||
|
||||
on:
|
||||
issues:
|
||||
types: [ labeled ]
|
||||
|
||||
jobs:
|
||||
move_needs_info:
|
||||
name: Move X-Needs-Info on the triage board
|
||||
runs-on: ubuntu-latest
|
||||
if: >
|
||||
contains(github.event.issue.labels.*.name, 'X-Needs-Info')
|
||||
steps:
|
||||
- uses: octokit/graphql-action@v2.x
|
||||
id: add_to_project
|
||||
with:
|
||||
headers: '{"GraphQL-Features": "projects_next_graphql"}'
|
||||
query: |
|
||||
mutation {
|
||||
updateProjectV2ItemFieldValue(
|
||||
input: {
|
||||
projectId: $projectid
|
||||
itemId: $contentid
|
||||
fieldId: $fieldid
|
||||
value: {
|
||||
singleSelectOptionId: "Todo"
|
||||
}
|
||||
}
|
||||
) {
|
||||
projectV2Item {
|
||||
id
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
projectid: ${{ env.PROJECT_ID }}
|
||||
contentid: ${{ github.event.issue.node_id }}
|
||||
fieldid: ${{ env.FIELD_ID }}
|
||||
optionid: ${{ env.OPTION_ID }}
|
||||
env:
|
||||
PROJECT_ID: "PVT_kwDOAIB0Bs4AFDdZ"
|
||||
GITHUB_TOKEN: ${{ secrets.ELEMENT_BOT_TOKEN }}
|
||||
FIELD_ID: "PVTSSF_lADOAIB0Bs4AFDdZzgC6ZA4"
|
||||
OPTION_ID: "ba22e43c"
|
||||
2
.github/workflows/twisted_trunk.yml
vendored
2
.github/workflows/twisted_trunk.yml
vendored
@@ -137,7 +137,7 @@ jobs:
|
||||
|
||||
- run: |
|
||||
set -o pipefail
|
||||
TEST_ONLY_SKIP_DEP_HASH_VERIFICATION=1 POSTGRES=${{ (matrix.database == 'Postgres') && 1 || '' }} WORKERS=${{ (matrix.arrangement == 'workers') && 1 || '' }} COMPLEMENT_DIR=`pwd`/complement synapse/scripts-dev/complement.sh -json 2>&1 | gotestfmt
|
||||
TEST_ONLY_SKIP_DEP_HASH_VERIFICATION=1 POSTGRES=${{ (matrix.database == 'Postgres') && 1 || '' }} WORKERS=${{ (matrix.arrangement == 'workers') && 1 || '' }} COMPLEMENT_DIR=`pwd`/complement synapse/scripts-dev/complement.sh -json 2>&1 | synapse/.ci/scripts/gotestfmt
|
||||
shell: bash
|
||||
name: Run Complement Tests
|
||||
|
||||
|
||||
319
CHANGES.md
319
CHANGES.md
@@ -1,3 +1,308 @@
|
||||
Synapse 1.67.0 (2022-09-13)
|
||||
===========================
|
||||
|
||||
This release removes using the deprecated direct TCP replication configuration
|
||||
for workers. Server admins should use Redis instead. See the [upgrade
|
||||
notes](https://matrix-org.github.io/synapse/v1.67/upgrade.html#upgrading-to-v1670).
|
||||
|
||||
The minimum version of `poetry` supported for managing source checkouts is now
|
||||
1.2.0.
|
||||
|
||||
**Notice:** from the next major release (1.68.0) installing Synapse from a source
|
||||
checkout will require a recent Rust compiler. Those using packages or
|
||||
`pip install matrix-synapse` will not be affected. See the [upgrade
|
||||
notes](https://matrix-org.github.io/synapse/v1.67/upgrade.html#upgrading-to-v1670).
|
||||
|
||||
**Notice:** from the next major release (1.68.0), running Synapse with a SQLite
|
||||
database will require SQLite version 3.27.0 or higher. (The [current minimum
|
||||
version is SQLite 3.22.0](https://github.com/matrix-org/synapse/blob/release-v1.67/synapse/storage/engines/sqlite.py#L69-L78).)
|
||||
See [#12983](https://github.com/matrix-org/synapse/issues/12983) and the [upgrade notes](https://matrix-org.github.io/synapse/v1.67/upgrade.html#upgrading-to-v1670) for more details.
|
||||
|
||||
|
||||
No significant changes since 1.67.0rc1.
|
||||
|
||||
|
||||
Synapse 1.67.0rc1 (2022-09-06)
|
||||
==============================
|
||||
|
||||
Features
|
||||
--------
|
||||
|
||||
- Support setting the registration shared secret in a file, via a new `registration_shared_secret_path` configuration option. ([\#13614](https://github.com/matrix-org/synapse/issues/13614))
|
||||
- Change the default startup behaviour so that any missing "additional" configuration files (signing key, etc) are generated automatically. ([\#13615](https://github.com/matrix-org/synapse/issues/13615))
|
||||
- Improve performance of sending messages in rooms with thousands of local users. ([\#13634](https://github.com/matrix-org/synapse/issues/13634))
|
||||
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Fix a bug introduced in Synapse 1.13 where the [List Rooms admin API](https://matrix-org.github.io/synapse/develop/admin_api/rooms.html#list-room-api) would return integers instead of booleans for the `federatable` and `public` fields when using a Sqlite database. ([\#13509](https://github.com/matrix-org/synapse/issues/13509))
|
||||
- Fix bug that user cannot `/forget` rooms after the last member has left the room. ([\#13546](https://github.com/matrix-org/synapse/issues/13546))
|
||||
- Faster Room Joins: fix `/make_knock` blocking indefinitely when the room in question is a partial-stated room. ([\#13583](https://github.com/matrix-org/synapse/issues/13583))
|
||||
- Fix loading the current stream position behind the actual position. ([\#13585](https://github.com/matrix-org/synapse/issues/13585))
|
||||
- Fix a longstanding bug in `register_new_matrix_user` which meant it was always necessary to explicitly give a server URL. ([\#13616](https://github.com/matrix-org/synapse/issues/13616))
|
||||
- Fix the running of [MSC1763](https://github.com/matrix-org/matrix-spec-proposals/pull/1763) retention purge_jobs in deployments with background jobs running on a worker by forcing them back onto the main worker. Contributed by Brad @ Beeper. ([\#13632](https://github.com/matrix-org/synapse/issues/13632))
|
||||
- Fix a long-standing bug that downloaded media for URL previews was not deleted while database background updates were running. ([\#13657](https://github.com/matrix-org/synapse/issues/13657))
|
||||
- Fix [MSC3030](https://github.com/matrix-org/matrix-spec-proposals/pull/3030) `/timestamp_to_event` endpoint to return the correct next event when the events have the same timestamp. ([\#13658](https://github.com/matrix-org/synapse/issues/13658))
|
||||
- Fix bug where we wedge media plugins if clients disconnect early. Introduced in v1.22.0. ([\#13660](https://github.com/matrix-org/synapse/issues/13660))
|
||||
- Fix a long-standing bug which meant that keys for unwhitelisted servers were not returned by `/_matrix/key/v2/query`. ([\#13683](https://github.com/matrix-org/synapse/issues/13683))
|
||||
- Fix a bug introduced in Synapse v1.20.0 that would cause the unstable unread counts from [MSC2654](https://github.com/matrix-org/matrix-spec-proposals/pull/2654) to be calculated even if the feature is disabled. ([\#13694](https://github.com/matrix-org/synapse/issues/13694))
|
||||
|
||||
|
||||
Updates to the Docker image
|
||||
---------------------------
|
||||
|
||||
- Update docker image to use a stable version of poetry. ([\#13688](https://github.com/matrix-org/synapse/issues/13688))
|
||||
|
||||
|
||||
Improved Documentation
|
||||
----------------------
|
||||
|
||||
- Improve the description of the ["chain cover index"](https://matrix-org.github.io/synapse/latest/auth_chain_difference_algorithm.html) used internally by Synapse. ([\#13602](https://github.com/matrix-org/synapse/issues/13602))
|
||||
- Document how ["monthly active users"](https://matrix-org.github.io/synapse/latest/usage/administration/monthly_active_users.html) is calculated and used. ([\#13617](https://github.com/matrix-org/synapse/issues/13617))
|
||||
- Improve documentation around user registration. ([\#13640](https://github.com/matrix-org/synapse/issues/13640))
|
||||
- Remove documentation of legacy `frontend_proxy` worker app. ([\#13645](https://github.com/matrix-org/synapse/issues/13645))
|
||||
- Clarify documentation that HTTP replication traffic can be protected with a shared secret. ([\#13656](https://github.com/matrix-org/synapse/issues/13656))
|
||||
- Remove unintentional colons from [config manual](https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html) headers. ([\#13665](https://github.com/matrix-org/synapse/issues/13665))
|
||||
- Update docs to make enabling metrics more clear. ([\#13678](https://github.com/matrix-org/synapse/issues/13678))
|
||||
- Clarify `(room_id, event_id)` global uniqueness and how we should scope our database schemas. ([\#13701](https://github.com/matrix-org/synapse/issues/13701))
|
||||
|
||||
|
||||
Deprecations and Removals
|
||||
-------------------------
|
||||
|
||||
- Drop support for calling `/_matrix/client/v3/rooms/{roomId}/invite` without an `id_access_token`, which was not permitted by the spec. Contributed by @Vetchu. ([\#13241](https://github.com/matrix-org/synapse/issues/13241))
|
||||
- Remove redundant `_get_joined_users_from_context` cache. Contributed by Nick @ Beeper (@fizzadar). ([\#13569](https://github.com/matrix-org/synapse/issues/13569))
|
||||
- Remove the ability to use direct TCP replication with workers. Direct TCP replication was deprecated in Synapse v1.18.0. Workers now require using Redis. ([\#13647](https://github.com/matrix-org/synapse/issues/13647))
|
||||
- Remove support for unstable [private read receipts](https://github.com/matrix-org/matrix-spec-proposals/pull/2285). ([\#13653](https://github.com/matrix-org/synapse/issues/13653), [\#13692](https://github.com/matrix-org/synapse/issues/13692))
|
||||
|
||||
|
||||
Internal Changes
|
||||
----------------
|
||||
|
||||
- Extend the release script to wait for GitHub Actions to finish and to be usable as a guide for the whole process. ([\#13483](https://github.com/matrix-org/synapse/issues/13483))
|
||||
- Add experimental configuration option to allow disabling legacy Prometheus metric names. ([\#13540](https://github.com/matrix-org/synapse/issues/13540))
|
||||
- Cache user IDs instead of profiles to reduce cache memory usage. Contributed by Nick @ Beeper (@fizzadar). ([\#13573](https://github.com/matrix-org/synapse/issues/13573), [\#13600](https://github.com/matrix-org/synapse/issues/13600))
|
||||
- Optimize how Synapse calculates domains to fetch from during backfill. ([\#13575](https://github.com/matrix-org/synapse/issues/13575))
|
||||
- Comment about a better future where we can get the state diff between two events. ([\#13586](https://github.com/matrix-org/synapse/issues/13586))
|
||||
- Instrument `_check_sigs_and_hash_and_fetch` to trace time spent in child concurrent calls for understandable traces in Jaeger. ([\#13588](https://github.com/matrix-org/synapse/issues/13588))
|
||||
- Improve performance of `@cachedList`. ([\#13591](https://github.com/matrix-org/synapse/issues/13591))
|
||||
- Minor speed up of fetching large numbers of push rules. ([\#13592](https://github.com/matrix-org/synapse/issues/13592))
|
||||
- Optimise push action fetching queries. Contributed by Nick @ Beeper (@fizzadar). ([\#13597](https://github.com/matrix-org/synapse/issues/13597))
|
||||
- Rename `event_map` to `unpersisted_events` when computing the auth differences. ([\#13603](https://github.com/matrix-org/synapse/issues/13603))
|
||||
- Refactor `get_users_in_room(room_id)` mis-use with dedicated `get_current_hosts_in_room(room_id)` function. ([\#13605](https://github.com/matrix-org/synapse/issues/13605))
|
||||
- Use dedicated `get_local_users_in_room(room_id)` function to find local users when calculating `join_authorised_via_users_server` of a `/make_join` request. ([\#13606](https://github.com/matrix-org/synapse/issues/13606))
|
||||
- Refactor `get_users_in_room(room_id)` mis-use to lookup single local user with dedicated `check_local_user_in_room(...)` function. ([\#13608](https://github.com/matrix-org/synapse/issues/13608))
|
||||
- Drop unused column `application_services_state.last_txn`. ([\#13627](https://github.com/matrix-org/synapse/issues/13627))
|
||||
- Improve readability of Complement CI logs by printing failure results last. ([\#13639](https://github.com/matrix-org/synapse/issues/13639))
|
||||
- Generalise the `@cancellable` annotation so it can be used on functions other than just servlet methods. ([\#13662](https://github.com/matrix-org/synapse/issues/13662))
|
||||
- Introduce a `CommonUsageMetrics` class to share some usage metrics between the Prometheus exporter and the phone home stats. ([\#13671](https://github.com/matrix-org/synapse/issues/13671))
|
||||
- Add some logging to help track down #13444. ([\#13679](https://github.com/matrix-org/synapse/issues/13679))
|
||||
- Update poetry lock file for v1.2.0. ([\#13689](https://github.com/matrix-org/synapse/issues/13689))
|
||||
- Add cache to `is_partial_state_room`. ([\#13693](https://github.com/matrix-org/synapse/issues/13693))
|
||||
- Update the Grafana dashboard that is included with Synapse in the `contrib` directory. ([\#13697](https://github.com/matrix-org/synapse/issues/13697))
|
||||
- Only run trial CI on all python versions on non-PRs. ([\#13698](https://github.com/matrix-org/synapse/issues/13698))
|
||||
- Fix typechecking with latest types-jsonschema. ([\#13712](https://github.com/matrix-org/synapse/issues/13712))
|
||||
- Reduce number of CI checks we run for PRs. ([\#13713](https://github.com/matrix-org/synapse/issues/13713))
|
||||
|
||||
|
||||
Synapse 1.66.0 (2022-08-31)
|
||||
===========================
|
||||
|
||||
No significant changes since 1.66.0rc2.
|
||||
|
||||
This release removes the ability for homeservers to delegate email ownership
|
||||
verification and password reset confirmation to identity servers. This removal
|
||||
was originally planned for Synapse 1.64, but was later deferred until now. See
|
||||
the [upgrade notes](https://matrix-org.github.io/synapse/v1.66/upgrade.html#upgrading-to-v1660) for more details.
|
||||
|
||||
Deployments with multiple workers should note that the direct TCP replication
|
||||
configuration was deprecated in Synapse v1.18.0 and will be removed in Synapse
|
||||
v1.67.0. In particular, the TCP `replication` [listener](https://matrix-org.github.io/synapse/v1.66/usage/configuration/config_documentation.html#listeners)
|
||||
type (not to be confused with the `replication` resource on the `http` listener
|
||||
type) and the `worker_replication_port` config option will be removed .
|
||||
|
||||
To migrate to Redis, add the [`redis` config](https://matrix-org.github.io/synapse/v1.66/workers.html#shared-configuration),
|
||||
then remove the TCP `replication` listener from config of the master and
|
||||
`worker_replication_port` from worker config. Note that a HTTP listener with a
|
||||
`replication` resource is still required. See the
|
||||
[worker documentation](https://matrix-org.github.io/synapse/v1.66/workers.html)
|
||||
for more details.
|
||||
|
||||
|
||||
Synapse 1.66.0rc2 (2022-08-30)
|
||||
==============================
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Fix a bug introduced in Synapse 1.66.0rc1 where the new rate limit metrics were misreported (`synapse_rate_limit_sleep_affected_hosts`, `synapse_rate_limit_reject_affected_hosts`). ([\#13649](https://github.com/matrix-org/synapse/issues/13649))
|
||||
|
||||
|
||||
Synapse 1.66.0rc1 (2022-08-23)
|
||||
==============================
|
||||
|
||||
Features
|
||||
--------
|
||||
|
||||
- Improve validation of request bodies for the following client-server API endpoints: [`/account/password`](https://spec.matrix.org/v1.3/client-server-api/#post_matrixclientv3accountpassword), [`/account/password/email/requestToken`](https://spec.matrix.org/v1.3/client-server-api/#post_matrixclientv3accountpasswordemailrequesttoken), [`/account/deactivate`](https://spec.matrix.org/v1.3/client-server-api/#post_matrixclientv3accountdeactivate) and [`/account/3pid/email/requestToken`](https://spec.matrix.org/v1.3/client-server-api/#post_matrixclientv3account3pidemailrequesttoken). ([\#13188](https://github.com/matrix-org/synapse/issues/13188), [\#13563](https://github.com/matrix-org/synapse/issues/13563))
|
||||
- Add forgotten status to [Room Details Admin API](https://matrix-org.github.io/synapse/latest/admin_api/rooms.html#room-details-api). ([\#13503](https://github.com/matrix-org/synapse/issues/13503))
|
||||
- Add an experimental implementation for [MSC3852 (Expose user agents on `Device`)](https://github.com/matrix-org/matrix-spec-proposals/pull/3852). ([\#13549](https://github.com/matrix-org/synapse/issues/13549))
|
||||
- Add `org.matrix.msc2716v4` experimental room version with updated content fields. Part of [MSC2716 (Importing history)](https://github.com/matrix-org/matrix-spec-proposals/pull/2716). ([\#13551](https://github.com/matrix-org/synapse/issues/13551))
|
||||
- Add support for compression to federation responses. ([\#13537](https://github.com/matrix-org/synapse/issues/13537))
|
||||
- Improve performance of sending messages in rooms with thousands of local users. ([\#13522](https://github.com/matrix-org/synapse/issues/13522), [\#13547](https://github.com/matrix-org/synapse/issues/13547))
|
||||
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Faster room joins: make `/joined_members` block whilst the room is partial stated. ([\#13514](https://github.com/matrix-org/synapse/issues/13514))
|
||||
- Fix a bug introduced in Synapse 1.21.0 where the [`/event_reports` Admin API](https://matrix-org.github.io/synapse/develop/admin_api/event_reports.html) could return a total count which was larger than the number of results you can actually query for. ([\#13525](https://github.com/matrix-org/synapse/issues/13525))
|
||||
- Fix a bug introduced in Synapse 1.52.0 where sending server notices fails if `max_avatar_size` or `allowed_avatar_mimetypes` is set and not `system_mxid_avatar_url`. ([\#13566](https://github.com/matrix-org/synapse/issues/13566))
|
||||
- Fix a bug where the `opentracing.force_tracing_for_users` config option would not apply to [`/sendToDevice`](https://spec.matrix.org/v1.3/client-server-api/#put_matrixclientv3sendtodeviceeventtypetxnid) and [`/keys/upload`](https://spec.matrix.org/v1.3/client-server-api/#post_matrixclientv3keysupload) requests. ([\#13574](https://github.com/matrix-org/synapse/issues/13574))
|
||||
|
||||
|
||||
Improved Documentation
|
||||
----------------------
|
||||
|
||||
- Add `openssl` example for generating registration HMAC digest. ([\#13472](https://github.com/matrix-org/synapse/issues/13472))
|
||||
- Tidy up Synapse's README. ([\#13491](https://github.com/matrix-org/synapse/issues/13491))
|
||||
- Document that event purging related to the `redaction_retention_period` config option is executed only every 5 minutes. ([\#13492](https://github.com/matrix-org/synapse/issues/13492))
|
||||
- Add a warning to retention documentation regarding the possibility of database corruption. ([\#13497](https://github.com/matrix-org/synapse/issues/13497))
|
||||
- Document that the `DOCKER_BUILDKIT=1` flag is needed to build the docker image. ([\#13515](https://github.com/matrix-org/synapse/issues/13515))
|
||||
- Add missing links in `user_consent` section of configuration manual. ([\#13536](https://github.com/matrix-org/synapse/issues/13536))
|
||||
- Fix the doc and some warnings that were referring to the nonexistent `custom_templates_directory` setting (instead of `custom_template_directory`). ([\#13538](https://github.com/matrix-org/synapse/issues/13538))
|
||||
|
||||
|
||||
Deprecations and Removals
|
||||
-------------------------
|
||||
|
||||
- Remove the ability for homeservers to delegate email ownership verification
|
||||
and password reset confirmation to identity servers. See [upgrade notes](https://matrix-org.github.io/synapse/v1.66/upgrade.html#upgrading-to-v1660) for more details.
|
||||
|
||||
Internal Changes
|
||||
----------------
|
||||
|
||||
### Faster room joins
|
||||
|
||||
- Update the rejected state of events during de-partial-stating. ([\#13459](https://github.com/matrix-org/synapse/issues/13459))
|
||||
- Avoid blocking lazy-loading `/sync`s during partial joins due to remote memberships. Pull remote memberships from auth events instead of the room state. ([\#13477](https://github.com/matrix-org/synapse/issues/13477))
|
||||
- Refuse to start when faster joins is enabled on a deployment with workers, since worker configurations are not currently supported. ([\#13531](https://github.com/matrix-org/synapse/issues/13531))
|
||||
|
||||
### Metrics and tracing
|
||||
|
||||
- Allow use of both `@trace` and `@tag_args` stacked on the same function. ([\#13453](https://github.com/matrix-org/synapse/issues/13453))
|
||||
- Instrument the federation/backfill part of `/messages` for understandable traces in Jaeger. ([\#13489](https://github.com/matrix-org/synapse/issues/13489))
|
||||
- Instrument `FederationStateIdsServlet` (`/state_ids`) for understandable traces in Jaeger. ([\#13499](https://github.com/matrix-org/synapse/issues/13499), [\#13554](https://github.com/matrix-org/synapse/issues/13554))
|
||||
- Track HTTP response times over 10 seconds from `/messages` (`synapse_room_message_list_rest_servlet_response_time_seconds`). ([\#13533](https://github.com/matrix-org/synapse/issues/13533))
|
||||
- Add metrics to track how the rate limiter is affecting requests (sleep/reject). ([\#13534](https://github.com/matrix-org/synapse/issues/13534), [\#13541](https://github.com/matrix-org/synapse/issues/13541))
|
||||
- Add metrics to time how long it takes us to do backfill processing (`synapse_federation_backfill_processing_before_time_seconds`, `synapse_federation_backfill_processing_after_time_seconds`). ([\#13535](https://github.com/matrix-org/synapse/issues/13535), [\#13584](https://github.com/matrix-org/synapse/issues/13584))
|
||||
- Add metrics to track rate limiter queue timing (`synapse_rate_limit_queue_wait_time_seconds`). ([\#13544](https://github.com/matrix-org/synapse/issues/13544))
|
||||
- Update metrics to track `/messages` response time by room size. ([\#13545](https://github.com/matrix-org/synapse/issues/13545))
|
||||
|
||||
### Everything else
|
||||
|
||||
- Refactor methods in `synapse.api.auth.Auth` to use `Requester` objects everywhere instead of user IDs. ([\#13024](https://github.com/matrix-org/synapse/issues/13024))
|
||||
- Clean-up tests for notifications. ([\#13471](https://github.com/matrix-org/synapse/issues/13471))
|
||||
- Add some miscellaneous comments to document sync, especially around `compute_state_delta`. ([\#13474](https://github.com/matrix-org/synapse/issues/13474))
|
||||
- Use literals in place of `HTTPStatus` constants in tests. ([\#13479](https://github.com/matrix-org/synapse/issues/13479), [\#13488](https://github.com/matrix-org/synapse/issues/13488))
|
||||
- Add comments about how event push actions are rotated. ([\#13485](https://github.com/matrix-org/synapse/issues/13485))
|
||||
- Modify HTML template content to better support mobile devices' screen sizes. ([\#13493](https://github.com/matrix-org/synapse/issues/13493))
|
||||
- Add a linter script which will reject non-strict types in Pydantic models. ([\#13502](https://github.com/matrix-org/synapse/issues/13502))
|
||||
- Reduce the number of tests using legacy TCP replication. ([\#13543](https://github.com/matrix-org/synapse/issues/13543))
|
||||
- Allow specifying additional request fields when using the `HomeServerTestCase.login` helper method. ([\#13549](https://github.com/matrix-org/synapse/issues/13549))
|
||||
- Make `HomeServerTestCase` load any configured homeserver modules automatically. ([\#13558](https://github.com/matrix-org/synapse/issues/13558))
|
||||
|
||||
|
||||
Synapse 1.65.0 (2022-08-16)
|
||||
===========================
|
||||
|
||||
No significant changes since 1.65.0rc2.
|
||||
|
||||
|
||||
Synapse 1.65.0rc2 (2022-08-11)
|
||||
==============================
|
||||
|
||||
Internal Changes
|
||||
----------------
|
||||
|
||||
- Revert 'Remove the unspecced `room_id` field in the `/hierarchy` response. ([\#13365](https://github.com/matrix-org/synapse/issues/13365))' to give more time for clients to update. ([\#13501](https://github.com/matrix-org/synapse/issues/13501))
|
||||
|
||||
|
||||
Synapse 1.65.0rc1 (2022-08-09)
|
||||
==============================
|
||||
|
||||
Features
|
||||
--------
|
||||
|
||||
- Add support for stable prefixes for [MSC2285 (private read receipts)](https://github.com/matrix-org/matrix-spec-proposals/pull/2285). ([\#13273](https://github.com/matrix-org/synapse/issues/13273))
|
||||
- Add new unstable error codes `ORG.MATRIX.MSC3848.ALREADY_JOINED`, `ORG.MATRIX.MSC3848.NOT_JOINED`, and `ORG.MATRIX.MSC3848.INSUFFICIENT_POWER` described in [MSC3848](https://github.com/matrix-org/matrix-spec-proposals/pull/3848). ([\#13343](https://github.com/matrix-org/synapse/issues/13343))
|
||||
- Use stable prefixes for [MSC3827](https://github.com/matrix-org/matrix-spec-proposals/pull/3827). ([\#13370](https://github.com/matrix-org/synapse/issues/13370))
|
||||
- Add a new module API method to translate a room alias into a room ID. ([\#13428](https://github.com/matrix-org/synapse/issues/13428))
|
||||
- Add a new module API method to create a room. ([\#13429](https://github.com/matrix-org/synapse/issues/13429))
|
||||
- Add remote join capability to the module API's `update_room_membership` method (in a backwards compatible manner). ([\#13441](https://github.com/matrix-org/synapse/issues/13441))
|
||||
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Update the version of the LDAP3 auth provider module included in the `matrixdotorg/synapse` DockerHub images and the Debian packages hosted on packages.matrix.org to 0.2.2. This version fixes a regression in the module. ([\#13470](https://github.com/matrix-org/synapse/issues/13470))
|
||||
- Fix a bug introduced in Synapse v1.41.0 where the `/hierarchy` API returned non-standard information (a `room_id` field under each entry in `children_state`) (this was reverted in v1.65.0rc2, see changelog notes above). ([\#13365](https://github.com/matrix-org/synapse/issues/13365))
|
||||
- Fix a bug introduced in Synapse 0.24.0 that would respond with the wrong error status code to `/joined_members` requests when the requester is not a current member of the room. Contributed by @andrewdoh. ([\#13374](https://github.com/matrix-org/synapse/issues/13374))
|
||||
- Fix bug in handling of typing events for appservices. Contributed by Nick @ Beeper (@fizzadar). ([\#13392](https://github.com/matrix-org/synapse/issues/13392))
|
||||
- Fix a bug introduced in Synapse 1.57.0 where rooms listed in `exclude_rooms_from_sync` in the configuration file would not be properly excluded from incremental syncs. ([\#13408](https://github.com/matrix-org/synapse/issues/13408))
|
||||
- Fix a bug in the experimental faster-room-joins support which could cause it to get stuck in an infinite loop. ([\#13353](https://github.com/matrix-org/synapse/issues/13353))
|
||||
- Faster room joins: fix a bug which caused rejected events to become un-rejected during state syncing. ([\#13413](https://github.com/matrix-org/synapse/issues/13413))
|
||||
- Faster room joins: fix error when running out of servers to sync partial state with, so that Synapse raises the intended error instead. ([\#13432](https://github.com/matrix-org/synapse/issues/13432))
|
||||
|
||||
|
||||
Updates to the Docker image
|
||||
---------------------------
|
||||
|
||||
- Make Docker images build on armv7 by installing cryptography dependencies in the 'requirements' stage. Contributed by Jasper Spaans. ([\#13372](https://github.com/matrix-org/synapse/issues/13372))
|
||||
|
||||
|
||||
Improved Documentation
|
||||
----------------------
|
||||
|
||||
- Update the 'registration tokens' page to acknowledge that the relevant MSC was merged into version 1.2 of the Matrix specification. Contributed by @moan0s. ([\#11897](https://github.com/matrix-org/synapse/issues/11897))
|
||||
- Document which HTTP resources support gzip compression. ([\#13221](https://github.com/matrix-org/synapse/issues/13221))
|
||||
- Add steps describing how to elevate an existing user to administrator by manipulating the database. ([\#13230](https://github.com/matrix-org/synapse/issues/13230))
|
||||
- Fix wrong headline for `url_preview_accept_language` in documentation. ([\#13437](https://github.com/matrix-org/synapse/issues/13437))
|
||||
- Remove redundant 'Contents' section from the Configuration Manual. Contributed by @dklimpel. ([\#13438](https://github.com/matrix-org/synapse/issues/13438))
|
||||
- Update documentation for config setting `macaroon_secret_key`. ([\#13443](https://github.com/matrix-org/synapse/issues/13443))
|
||||
- Update outdated information on `sso_mapping_providers` documentation. ([\#13449](https://github.com/matrix-org/synapse/issues/13449))
|
||||
- Fix example code in module documentation of `password_auth_provider_callbacks`. ([\#13450](https://github.com/matrix-org/synapse/issues/13450))
|
||||
- Make the configuration for the cache clearer. ([\#13481](https://github.com/matrix-org/synapse/issues/13481))
|
||||
|
||||
|
||||
Internal Changes
|
||||
----------------
|
||||
|
||||
- Extend the release script to automatically push a new SyTest branch, rather than having that be a manual process. ([\#12978](https://github.com/matrix-org/synapse/issues/12978))
|
||||
- Make minor clarifications to the error messages given when we fail to join a room via any server. ([\#13160](https://github.com/matrix-org/synapse/issues/13160))
|
||||
- Enable Complement CI tests in the 'latest deps' test run. ([\#13213](https://github.com/matrix-org/synapse/issues/13213))
|
||||
- Fix long-standing bugged logic which was never hit in `get_pdu` asking every remote destination even after it finds an event. ([\#13346](https://github.com/matrix-org/synapse/issues/13346))
|
||||
- Faster room joins: avoid blocking when pulling events with partially missing prev events. ([\#13355](https://github.com/matrix-org/synapse/issues/13355))
|
||||
- Instrument `/messages` for understandable traces in Jaeger. ([\#13368](https://github.com/matrix-org/synapse/issues/13368))
|
||||
- Remove an unused argument to `get_relations_for_event`. ([\#13383](https://github.com/matrix-org/synapse/issues/13383))
|
||||
- Add a `merge-back` command to the release script, which automates merging the correct branches after a release. ([\#13393](https://github.com/matrix-org/synapse/issues/13393))
|
||||
- Adding missing type hints to tests. ([\#13397](https://github.com/matrix-org/synapse/issues/13397))
|
||||
- Faster Room Joins: don't leave a stuck room partial state flag if the join fails. ([\#13403](https://github.com/matrix-org/synapse/issues/13403))
|
||||
- Refactor `_resolve_state_at_missing_prevs` to compute an `EventContext` instead. ([\#13404](https://github.com/matrix-org/synapse/issues/13404), [\#13431](https://github.com/matrix-org/synapse/issues/13431))
|
||||
- Faster Room Joins: prevent Synapse from answering federated join requests for a room which it has not fully joined yet. ([\#13416](https://github.com/matrix-org/synapse/issues/13416))
|
||||
- Re-enable running Complement tests against Synapse with workers. ([\#13420](https://github.com/matrix-org/synapse/issues/13420))
|
||||
- Prevent unnecessary lookups to any external `get_event` cache. Contributed by Nick @ Beeper (@fizzadar). ([\#13435](https://github.com/matrix-org/synapse/issues/13435))
|
||||
- Add some tracing to give more insight into local room joins. ([\#13439](https://github.com/matrix-org/synapse/issues/13439))
|
||||
- Rename class `RateLimitConfig` to `RatelimitSettings` and `FederationRateLimitConfig` to `FederationRatelimitSettings`. ([\#13442](https://github.com/matrix-org/synapse/issues/13442))
|
||||
- Add some comments about how event push actions are stored. ([\#13445](https://github.com/matrix-org/synapse/issues/13445), [\#13455](https://github.com/matrix-org/synapse/issues/13455))
|
||||
- Improve rebuild speed for the "synapse-workers" docker image. ([\#13447](https://github.com/matrix-org/synapse/issues/13447))
|
||||
- Fix `@tag_args` being off-by-one with the arguments when tagging a span (tracing). ([\#13452](https://github.com/matrix-org/synapse/issues/13452))
|
||||
- Update type of `EventContext.rejected`. ([\#13460](https://github.com/matrix-org/synapse/issues/13460))
|
||||
- Use literals in place of `HTTPStatus` constants in tests. ([\#13463](https://github.com/matrix-org/synapse/issues/13463), [\#13469](https://github.com/matrix-org/synapse/issues/13469))
|
||||
- Correct a misnamed argument in state res v2 internals. ([\#13467](https://github.com/matrix-org/synapse/issues/13467))
|
||||
|
||||
|
||||
Synapse 1.64.0 (2022-08-02)
|
||||
===========================
|
||||
|
||||
@@ -211,6 +516,20 @@ No significant changes since 1.62.0rc3.
|
||||
|
||||
Authors of spam-checker plugins should consult the [upgrade notes](https://github.com/matrix-org/synapse/blob/release-v1.62/docs/upgrade.md#upgrading-to-v1620) to learn about the enriched signatures for spam checker callbacks, which are supported with this release of Synapse.
|
||||
|
||||
## Security advisory
|
||||
|
||||
The following issue is fixed in 1.62.0.
|
||||
|
||||
* [GHSA-jhjh-776m-4765](https://github.com/matrix-org/synapse/security/advisories/GHSA-jhjh-776m-4765) / [CVE-2022-31152](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2022-31152)
|
||||
|
||||
Synapse instances prior to 1.62.0 did not implement the Matrix [event authorization rules](https://spec.matrix.org/v1.3/rooms/v10/#authorization-rules) correctly. An attacker could craft events which would be accepted by Synapse but not a spec-conformant server, potentially causing divergence in the room state between servers.
|
||||
|
||||
Homeservers with federation disabled via the [`federation_domain_whitelist`](https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#federation_domain_whitelist) config option are unaffected.
|
||||
|
||||
Administrators of homeservers with federation enabled are advised to upgrade to v1.62.0 or higher.
|
||||
|
||||
Fixed by [#13087](https://github.com/matrix-org/synapse/pull/13087) and [#13088](https://github.com/matrix-org/synapse/pull/13088).
|
||||
|
||||
Synapse 1.62.0rc3 (2022-07-04)
|
||||
==============================
|
||||
|
||||
|
||||
476
README.rst
476
README.rst
@@ -2,152 +2,70 @@
|
||||
Synapse |support| |development| |documentation| |license| |pypi| |python|
|
||||
=========================================================================
|
||||
|
||||
Synapse is an open-source `Matrix <https://matrix.org/>`_ homeserver written and
|
||||
maintained by the Matrix.org Foundation. We began rapid development in 2014,
|
||||
reaching v1.0.0 in 2019. Development on Synapse and the Matrix protocol itself continues
|
||||
in earnest today.
|
||||
|
||||
Briefly, Matrix is an open standard for communications on the internet, supporting
|
||||
federation, encryption and VoIP. Matrix.org has more to say about the `goals of the
|
||||
Matrix project <https://matrix.org/docs/guides/introduction>`_, and the `formal specification
|
||||
<https://spec.matrix.org/>`_ describes the technical details.
|
||||
|
||||
.. contents::
|
||||
|
||||
Introduction
|
||||
============
|
||||
Installing and configuration
|
||||
============================
|
||||
|
||||
Matrix is an ambitious new ecosystem for open federated Instant Messaging and
|
||||
VoIP. The basics you need to know to get up and running are:
|
||||
|
||||
- Everything in Matrix happens in a room. Rooms are distributed and do not
|
||||
exist on any single server. Rooms can be located using convenience aliases
|
||||
like ``#matrix:matrix.org`` or ``#test:localhost:8448``.
|
||||
|
||||
- Matrix user IDs look like ``@matthew:matrix.org`` (although in the future
|
||||
you will normally refer to yourself and others using a third party identifier
|
||||
(3PID): email address, phone number, etc rather than manipulating Matrix user IDs)
|
||||
|
||||
The overall architecture is::
|
||||
|
||||
client <----> homeserver <=====================> homeserver <----> client
|
||||
https://somewhere.org/_matrix https://elsewhere.net/_matrix
|
||||
|
||||
``#matrix:matrix.org`` is the official support room for Matrix, and can be
|
||||
accessed by any client from https://matrix.org/docs/projects/try-matrix-now.html or
|
||||
via IRC bridge at irc://irc.libera.chat/matrix.
|
||||
|
||||
Synapse is currently in rapid development, but as of version 0.5 we believe it
|
||||
is sufficiently stable to be run as an internet-facing service for real usage!
|
||||
|
||||
About Matrix
|
||||
============
|
||||
|
||||
Matrix specifies a set of pragmatic RESTful HTTP JSON APIs as an open standard,
|
||||
which handle:
|
||||
|
||||
- Creating and managing fully distributed chat rooms with no
|
||||
single points of control or failure
|
||||
- Eventually-consistent cryptographically secure synchronisation of room
|
||||
state across a global open network of federated servers and services
|
||||
- Sending and receiving extensible messages in a room with (optional)
|
||||
end-to-end encryption
|
||||
- Inviting, joining, leaving, kicking, banning room members
|
||||
- Managing user accounts (registration, login, logout)
|
||||
- Using 3rd Party IDs (3PIDs) such as email addresses, phone numbers,
|
||||
Facebook accounts to authenticate, identify and discover users on Matrix.
|
||||
- Placing 1:1 VoIP and Video calls
|
||||
|
||||
These APIs are intended to be implemented on a wide range of servers, services
|
||||
and clients, letting developers build messaging and VoIP functionality on top
|
||||
of the entirely open Matrix ecosystem rather than using closed or proprietary
|
||||
solutions. The hope is for Matrix to act as the building blocks for a new
|
||||
generation of fully open and interoperable messaging and VoIP apps for the
|
||||
internet.
|
||||
|
||||
Synapse is a Matrix "homeserver" implementation developed by the matrix.org core
|
||||
team, written in Python 3/Twisted.
|
||||
|
||||
In Matrix, every user runs one or more Matrix clients, which connect through to
|
||||
a Matrix homeserver. The homeserver stores all their personal chat history and
|
||||
user account information - much as a mail client connects through to an
|
||||
IMAP/SMTP server. Just like email, you can either run your own Matrix
|
||||
homeserver and control and own your own communications and history or use one
|
||||
hosted by someone else (e.g. matrix.org) - there is no single point of control
|
||||
or mandatory service provider in Matrix, unlike WhatsApp, Facebook, Hangouts,
|
||||
etc.
|
||||
|
||||
We'd like to invite you to join #matrix:matrix.org (via
|
||||
https://matrix.org/docs/projects/try-matrix-now.html), run a homeserver, take a look
|
||||
at the `Matrix spec <https://matrix.org/docs/spec>`_, and experiment with the
|
||||
`APIs <https://matrix.org/docs/api>`_ and `Client SDKs
|
||||
<https://matrix.org/docs/projects/try-matrix-now.html#client-sdks>`_.
|
||||
|
||||
Thanks for using Matrix!
|
||||
|
||||
Support
|
||||
=======
|
||||
|
||||
For support installing or managing Synapse, please join |room|_ (from a matrix.org
|
||||
account if necessary) and ask questions there. We do not use GitHub issues for
|
||||
support requests, only for bug reports and feature requests.
|
||||
|
||||
Synapse's documentation is `nicely rendered on GitHub Pages <https://matrix-org.github.io/synapse>`_,
|
||||
with its source available in |docs|_.
|
||||
|
||||
.. |room| replace:: ``#synapse:matrix.org``
|
||||
.. _room: https://matrix.to/#/#synapse:matrix.org
|
||||
|
||||
.. |docs| replace:: ``docs``
|
||||
.. _docs: docs
|
||||
|
||||
Synapse Installation
|
||||
====================
|
||||
The Synapse documentation describes `how to install Synapse <https://matrix-org.github.io/synapse/latest/setup/installation.html>`_. We recommend using
|
||||
`Docker images <https://matrix-org.github.io/synapse/latest/setup/installation.html#docker-images-and-ansible-playbooks>`_ or `Debian packages from Matrix.org
|
||||
<https://matrix-org.github.io/synapse/latest/setup/installation.html#matrixorg-packages>`_.
|
||||
|
||||
.. _federation:
|
||||
|
||||
* For details on how to install synapse, see
|
||||
`Installation Instructions <https://matrix-org.github.io/synapse/latest/setup/installation.html>`_.
|
||||
* For specific details on how to configure Synapse for federation see `docs/federate.md <docs/federate.md>`_
|
||||
Synapse has a variety of `config options
|
||||
<https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html>`_
|
||||
which can be used to customise its behaviour after installation.
|
||||
There are additional details on how to `configure Synapse for federation here
|
||||
<https://matrix-org.github.io/synapse/latest/federate.html>`_.
|
||||
|
||||
.. _reverse-proxy:
|
||||
|
||||
Using a reverse proxy with Synapse
|
||||
----------------------------------
|
||||
|
||||
It is recommended to put a reverse proxy such as
|
||||
`nginx <https://nginx.org/en/docs/http/ngx_http_proxy_module.html>`_,
|
||||
`Apache <https://httpd.apache.org/docs/current/mod/mod_proxy_http.html>`_,
|
||||
`Caddy <https://caddyserver.com/docs/quick-starts/reverse-proxy>`_,
|
||||
`HAProxy <https://www.haproxy.org/>`_ or
|
||||
`relayd <https://man.openbsd.org/relayd.8>`_ in front of Synapse. One advantage of
|
||||
doing so is that it means that you can expose the default https port (443) to
|
||||
Matrix clients without needing to run Synapse with root privileges.
|
||||
For information on configuring one, see `the reverse proxy docs
|
||||
<https://matrix-org.github.io/synapse/latest/reverse_proxy.html>`_.
|
||||
|
||||
Upgrading an existing Synapse
|
||||
-----------------------------
|
||||
|
||||
The instructions for upgrading Synapse are in `the upgrade notes`_.
|
||||
Please check these instructions as upgrading may require extra steps for some
|
||||
versions of Synapse.
|
||||
|
||||
.. _the upgrade notes: https://matrix-org.github.io/synapse/develop/upgrade.html
|
||||
|
||||
|
||||
Connecting to Synapse from a client
|
||||
===================================
|
||||
Platform dependencies
|
||||
---------------------
|
||||
|
||||
The easiest way to try out your new Synapse installation is by connecting to it
|
||||
from a web client.
|
||||
Synapse uses a number of platform dependencies such as Python and PostgreSQL,
|
||||
and aims to follow supported upstream versions. See the
|
||||
`deprecation policy <https://matrix-org.github.io/synapse/latest/deprecation_policy.html>`_
|
||||
for more details.
|
||||
|
||||
Unless you are running a test instance of Synapse on your local machine, in
|
||||
general, you will need to enable TLS support before you can successfully
|
||||
connect from a client: see
|
||||
`TLS certificates <https://matrix-org.github.io/synapse/latest/setup/installation.html#tls-certificates>`_.
|
||||
|
||||
An easy way to get started is to login or register via Element at
|
||||
https://app.element.io/#/login or https://app.element.io/#/register respectively.
|
||||
You will need to change the server you are logging into from ``matrix.org``
|
||||
and instead specify a Homeserver URL of ``https://<server_name>:8448``
|
||||
(or just ``https://<server_name>`` if you are using a reverse proxy).
|
||||
If you prefer to use another client, refer to our
|
||||
`client breakdown <https://matrix.org/docs/projects/clients-matrix>`_.
|
||||
|
||||
If all goes well you should at least be able to log in, create a room, and
|
||||
start sending messages.
|
||||
|
||||
.. _`client-user-reg`:
|
||||
|
||||
Registering a new user from a client
|
||||
------------------------------------
|
||||
|
||||
By default, registration of new users via Matrix clients is disabled. To enable
|
||||
it, specify ``enable_registration: true`` in ``homeserver.yaml``. (It is then
|
||||
recommended to also set up CAPTCHA - see `<docs/CAPTCHA_SETUP.md>`_.)
|
||||
|
||||
Once ``enable_registration`` is set to ``true``, it is possible to register a
|
||||
user via a Matrix client.
|
||||
|
||||
Your new user name will be formed partly from the ``server_name``, and partly
|
||||
from a localpart you specify when you create the account. Your name will take
|
||||
the form of::
|
||||
|
||||
@localpart:my.domain.name
|
||||
|
||||
(pronounced "at localpart on my dot domain dot name").
|
||||
|
||||
As when logging in, you will need to specify a "Custom server". Specify your
|
||||
desired ``localpart`` in the 'User name' box.
|
||||
|
||||
Security note
|
||||
=============
|
||||
-------------
|
||||
|
||||
Matrix serves raw, user-supplied data in some APIs -- specifically the `content
|
||||
repository endpoints`_.
|
||||
@@ -187,30 +105,76 @@ Following this advice ensures that even if an XSS is found in Synapse, the
|
||||
impact to other applications will be minimal.
|
||||
|
||||
|
||||
Upgrading an existing Synapse
|
||||
=============================
|
||||
Testing a new installation
|
||||
==========================
|
||||
|
||||
The instructions for upgrading synapse are in `the upgrade notes`_.
|
||||
Please check these instructions as upgrading may require extra steps for some
|
||||
versions of synapse.
|
||||
The easiest way to try out your new Synapse installation is by connecting to it
|
||||
from a web client.
|
||||
|
||||
.. _the upgrade notes: https://matrix-org.github.io/synapse/develop/upgrade.html
|
||||
Unless you are running a test instance of Synapse on your local machine, in
|
||||
general, you will need to enable TLS support before you can successfully
|
||||
connect from a client: see
|
||||
`TLS certificates <https://matrix-org.github.io/synapse/latest/setup/installation.html#tls-certificates>`_.
|
||||
|
||||
.. _reverse-proxy:
|
||||
An easy way to get started is to login or register via Element at
|
||||
https://app.element.io/#/login or https://app.element.io/#/register respectively.
|
||||
You will need to change the server you are logging into from ``matrix.org``
|
||||
and instead specify a Homeserver URL of ``https://<server_name>:8448``
|
||||
(or just ``https://<server_name>`` if you are using a reverse proxy).
|
||||
If you prefer to use another client, refer to our
|
||||
`client breakdown <https://matrix.org/docs/projects/clients-matrix>`_.
|
||||
|
||||
Using a reverse proxy with Synapse
|
||||
==================================
|
||||
If all goes well you should at least be able to log in, create a room, and
|
||||
start sending messages.
|
||||
|
||||
It is recommended to put a reverse proxy such as
|
||||
`nginx <https://nginx.org/en/docs/http/ngx_http_proxy_module.html>`_,
|
||||
`Apache <https://httpd.apache.org/docs/current/mod/mod_proxy_http.html>`_,
|
||||
`Caddy <https://caddyserver.com/docs/quick-starts/reverse-proxy>`_,
|
||||
`HAProxy <https://www.haproxy.org/>`_ or
|
||||
`relayd <https://man.openbsd.org/relayd.8>`_ in front of Synapse. One advantage of
|
||||
doing so is that it means that you can expose the default https port (443) to
|
||||
Matrix clients without needing to run Synapse with root privileges.
|
||||
.. _`client-user-reg`:
|
||||
|
||||
For information on configuring one, see `<docs/reverse_proxy.md>`_.
|
||||
Registering a new user from a client
|
||||
------------------------------------
|
||||
|
||||
By default, registration of new users via Matrix clients is disabled. To enable
|
||||
it:
|
||||
|
||||
1. In the
|
||||
`registration config section <https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#registration>`_
|
||||
set ``enable_registration: true`` in ``homeserver.yaml``.
|
||||
2. Then **either**:
|
||||
|
||||
a. set up a `CAPTCHA <https://matrix-org.github.io/synapse/latest/CAPTCHA_SETUP.html>`_, or
|
||||
b. set ``enable_registration_without_verification: true`` in ``homeserver.yaml``.
|
||||
|
||||
We **strongly** recommend using a CAPTCHA, particularly if your homeserver is exposed to
|
||||
the public internet. Without it, anyone can freely register accounts on your homeserver.
|
||||
This can be exploited by attackers to create spambots targetting the rest of the Matrix
|
||||
federation.
|
||||
|
||||
Your new user name will be formed partly from the ``server_name``, and partly
|
||||
from a localpart you specify when you create the account. Your name will take
|
||||
the form of::
|
||||
|
||||
@localpart:my.domain.name
|
||||
|
||||
(pronounced "at localpart on my dot domain dot name").
|
||||
|
||||
As when logging in, you will need to specify a "Custom server". Specify your
|
||||
desired ``localpart`` in the 'User name' box.
|
||||
|
||||
Troubleshooting and support
|
||||
===========================
|
||||
|
||||
The `Admin FAQ <https://matrix-org.github.io/synapse/latest/usage/administration/admin_faq.html>`_
|
||||
includes tips on dealing with some common problems. For more details, see
|
||||
`Synapse's wider documentation <https://matrix-org.github.io/synapse/latest/>`_.
|
||||
|
||||
For additional support installing or managing Synapse, please ask in the community
|
||||
support room |room|_ (from a matrix.org account if necessary). We do not use GitHub
|
||||
issues for support requests, only for bug reports and feature requests.
|
||||
|
||||
.. |room| replace:: ``#synapse:matrix.org``
|
||||
.. _room: https://matrix.to/#/#synapse:matrix.org
|
||||
|
||||
.. |docs| replace:: ``docs``
|
||||
.. _docs: docs
|
||||
|
||||
Identity Servers
|
||||
================
|
||||
@@ -242,34 +206,15 @@ an email address with your account, or send an invite to another user via their
|
||||
email address.
|
||||
|
||||
|
||||
Password reset
|
||||
==============
|
||||
|
||||
Users can reset their password through their client. Alternatively, a server admin
|
||||
can reset a users password using the `admin API <docs/admin_api/user_admin_api.md#reset-password>`_
|
||||
or by directly editing the database as shown below.
|
||||
|
||||
First calculate the hash of the new password::
|
||||
|
||||
$ ~/synapse/env/bin/hash_password
|
||||
Password:
|
||||
Confirm password:
|
||||
$2a$12$xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
|
||||
|
||||
Then update the ``users`` table in the database::
|
||||
|
||||
UPDATE users SET password_hash='$2a$12$xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'
|
||||
WHERE name='@test:test.com';
|
||||
|
||||
|
||||
Synapse Development
|
||||
===================
|
||||
Development
|
||||
===========
|
||||
|
||||
We welcome contributions to Synapse from the community!
|
||||
The best place to get started is our
|
||||
`guide for contributors <https://matrix-org.github.io/synapse/latest/development/contributing_guide.html>`_.
|
||||
This is part of our larger `documentation <https://matrix-org.github.io/synapse/latest>`_, which includes
|
||||
information for synapse developers as well as synapse administrators.
|
||||
|
||||
information for Synapse developers as well as Synapse administrators.
|
||||
Developers might be particularly interested in:
|
||||
|
||||
* `Synapse's database schema <https://matrix-org.github.io/synapse/latest/development/database_schema.html>`_,
|
||||
@@ -280,187 +225,6 @@ Alongside all that, join our developer community on Matrix:
|
||||
`#synapse-dev:matrix.org <https://matrix.to/#/#synapse-dev:matrix.org>`_, featuring real humans!
|
||||
|
||||
|
||||
Quick start
|
||||
-----------
|
||||
|
||||
Before setting up a development environment for synapse, make sure you have the
|
||||
system dependencies (such as the python header files) installed - see
|
||||
`Platform-specific prerequisites <https://matrix-org.github.io/synapse/latest/setup/installation.html#platform-specific-prerequisites>`_.
|
||||
|
||||
To check out a synapse for development, clone the git repo into a working
|
||||
directory of your choice::
|
||||
|
||||
git clone https://github.com/matrix-org/synapse.git
|
||||
cd synapse
|
||||
|
||||
Synapse has a number of external dependencies. We maintain a fixed development
|
||||
environment using `Poetry <https://python-poetry.org/>`_. First, install poetry. We recommend::
|
||||
|
||||
pip install --user pipx
|
||||
pipx install poetry
|
||||
|
||||
as described `here <https://python-poetry.org/docs/#installing-with-pipx>`_.
|
||||
(See `poetry's installation docs <https://python-poetry.org/docs/#installation>`_
|
||||
for other installation methods.) Then ask poetry to create a virtual environment
|
||||
from the project and install Synapse's dependencies::
|
||||
|
||||
poetry install --extras "all test"
|
||||
|
||||
This will run a process of downloading and installing all the needed
|
||||
dependencies into a virtual env.
|
||||
|
||||
We recommend using the demo which starts 3 federated instances running on ports `8080` - `8082`::
|
||||
|
||||
poetry run ./demo/start.sh
|
||||
|
||||
(to stop, you can use ``poetry run ./demo/stop.sh``)
|
||||
|
||||
See the `demo documentation <https://matrix-org.github.io/synapse/develop/development/demo.html>`_
|
||||
for more information.
|
||||
|
||||
If you just want to start a single instance of the app and run it directly::
|
||||
|
||||
# Create the homeserver.yaml config once
|
||||
poetry run synapse_homeserver \
|
||||
--server-name my.domain.name \
|
||||
--config-path homeserver.yaml \
|
||||
--generate-config \
|
||||
--report-stats=[yes|no]
|
||||
|
||||
# Start the app
|
||||
poetry run synapse_homeserver --config-path homeserver.yaml
|
||||
|
||||
|
||||
Running the unit tests
|
||||
----------------------
|
||||
|
||||
After getting up and running, you may wish to run Synapse's unit tests to
|
||||
check that everything is installed correctly::
|
||||
|
||||
poetry run trial tests
|
||||
|
||||
This should end with a 'PASSED' result (note that exact numbers will
|
||||
differ)::
|
||||
|
||||
Ran 1337 tests in 716.064s
|
||||
|
||||
PASSED (skips=15, successes=1322)
|
||||
|
||||
For more tips on running the unit tests, like running a specific test or
|
||||
to see the logging output, see the `CONTRIBUTING doc <CONTRIBUTING.md#run-the-unit-tests>`_.
|
||||
|
||||
|
||||
Running the Integration Tests
|
||||
-----------------------------
|
||||
|
||||
Synapse is accompanied by `SyTest <https://github.com/matrix-org/sytest>`_,
|
||||
a Matrix homeserver integration testing suite, which uses HTTP requests to
|
||||
access the API as a Matrix client would. It is able to run Synapse directly from
|
||||
the source tree, so installation of the server is not required.
|
||||
|
||||
Testing with SyTest is recommended for verifying that changes related to the
|
||||
Client-Server API are functioning correctly. See the `SyTest installation
|
||||
instructions <https://github.com/matrix-org/sytest#installing>`_ for details.
|
||||
|
||||
|
||||
Platform dependencies
|
||||
=====================
|
||||
|
||||
Synapse uses a number of platform dependencies such as Python and PostgreSQL,
|
||||
and aims to follow supported upstream versions. See the
|
||||
`<docs/deprecation_policy.md>`_ document for more details.
|
||||
|
||||
|
||||
Troubleshooting
|
||||
===============
|
||||
|
||||
Need help? Join our community support room on Matrix:
|
||||
`#synapse:matrix.org <https://matrix.to/#/#synapse:matrix.org>`_
|
||||
|
||||
Running out of File Handles
|
||||
---------------------------
|
||||
|
||||
If synapse runs out of file handles, it typically fails badly - live-locking
|
||||
at 100% CPU, and/or failing to accept new TCP connections (blocking the
|
||||
connecting client). Matrix currently can legitimately use a lot of file handles,
|
||||
thanks to busy rooms like #matrix:matrix.org containing hundreds of participating
|
||||
servers. The first time a server talks in a room it will try to connect
|
||||
simultaneously to all participating servers, which could exhaust the available
|
||||
file descriptors between DNS queries & HTTPS sockets, especially if DNS is slow
|
||||
to respond. (We need to improve the routing algorithm used to be better than
|
||||
full mesh, but as of March 2019 this hasn't happened yet).
|
||||
|
||||
If you hit this failure mode, we recommend increasing the maximum number of
|
||||
open file handles to be at least 4096 (assuming a default of 1024 or 256).
|
||||
This is typically done by editing ``/etc/security/limits.conf``
|
||||
|
||||
Separately, Synapse may leak file handles if inbound HTTP requests get stuck
|
||||
during processing - e.g. blocked behind a lock or talking to a remote server etc.
|
||||
This is best diagnosed by matching up the 'Received request' and 'Processed request'
|
||||
log lines and looking for any 'Processed request' lines which take more than
|
||||
a few seconds to execute. Please let us know at #synapse:matrix.org if
|
||||
you see this failure mode so we can help debug it, however.
|
||||
|
||||
Help!! Synapse is slow and eats all my RAM/CPU!
|
||||
-----------------------------------------------
|
||||
|
||||
First, ensure you are running the latest version of Synapse, using Python 3
|
||||
with a PostgreSQL database.
|
||||
|
||||
Synapse's architecture is quite RAM hungry currently - we deliberately
|
||||
cache a lot of recent room data and metadata in RAM in order to speed up
|
||||
common requests. We'll improve this in the future, but for now the easiest
|
||||
way to either reduce the RAM usage (at the risk of slowing things down)
|
||||
is to set the almost-undocumented ``SYNAPSE_CACHE_FACTOR`` environment
|
||||
variable. The default is 0.5, which can be decreased to reduce RAM usage
|
||||
in memory constrained enviroments, or increased if performance starts to
|
||||
degrade.
|
||||
|
||||
However, degraded performance due to a low cache factor, common on
|
||||
machines with slow disks, often leads to explosions in memory use due
|
||||
backlogged requests. In this case, reducing the cache factor will make
|
||||
things worse. Instead, try increasing it drastically. 2.0 is a good
|
||||
starting value.
|
||||
|
||||
Using `libjemalloc <http://jemalloc.net/>`_ can also yield a significant
|
||||
improvement in overall memory use, and especially in terms of giving back
|
||||
RAM to the OS. To use it, the library must simply be put in the
|
||||
LD_PRELOAD environment variable when launching Synapse. On Debian, this
|
||||
can be done by installing the ``libjemalloc1`` package and adding this
|
||||
line to ``/etc/default/matrix-synapse``::
|
||||
|
||||
LD_PRELOAD=/usr/lib/x86_64-linux-gnu/libjemalloc.so.1
|
||||
|
||||
This can make a significant difference on Python 2.7 - it's unclear how
|
||||
much of an improvement it provides on Python 3.x.
|
||||
|
||||
If you're encountering high CPU use by the Synapse process itself, you
|
||||
may be affected by a bug with presence tracking that leads to a
|
||||
massive excess of outgoing federation requests (see `discussion
|
||||
<https://github.com/matrix-org/synapse/issues/3971>`_). If metrics
|
||||
indicate that your server is also issuing far more outgoing federation
|
||||
requests than can be accounted for by your users' activity, this is a
|
||||
likely cause. The misbehavior can be worked around by setting
|
||||
the following in the Synapse config file:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
presence:
|
||||
enabled: false
|
||||
|
||||
People can't accept room invitations from me
|
||||
--------------------------------------------
|
||||
|
||||
The typical failure mode here is that you send an invitation to someone
|
||||
to join a room or direct chat, but when they go to accept it, they get an
|
||||
error (typically along the lines of "Invalid signature"). They might see
|
||||
something like the following in their logs::
|
||||
|
||||
2019-09-11 19:32:04,271 - synapse.federation.transport.server - 288 - WARNING - GET-11752 - authenticate_request failed: 401: Invalid signature for server <server> with key ed25519:a_EqML: Unable to verify signature for <server>
|
||||
|
||||
This is normally caused by a misconfiguration in your reverse-proxy. See
|
||||
`<docs/reverse_proxy.md>`_ and double-check that your settings are correct.
|
||||
|
||||
.. |support| image:: https://img.shields.io/matrix/synapse:matrix.org?label=support&logo=matrix
|
||||
:alt: (get support on #synapse:matrix.org)
|
||||
:target: https://matrix.to/#/#synapse:matrix.org
|
||||
|
||||
12
book.toml
12
book.toml
@@ -34,6 +34,14 @@ additional-css = [
|
||||
"docs/website_files/table-of-contents.css",
|
||||
"docs/website_files/remove-nav-buttons.css",
|
||||
"docs/website_files/indent-section-headers.css",
|
||||
"docs/website_files/version-picker.css",
|
||||
]
|
||||
additional-js = ["docs/website_files/table-of-contents.js"]
|
||||
theme = "docs/website_files/theme"
|
||||
additional-js = [
|
||||
"docs/website_files/table-of-contents.js",
|
||||
"docs/website_files/version-picker.js",
|
||||
"docs/website_files/version.js",
|
||||
]
|
||||
theme = "docs/website_files/theme"
|
||||
|
||||
[preprocessor.schema_versions]
|
||||
command = "./scripts-dev/schema_versions.py"
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
2
debian/build_virtualenv
vendored
2
debian/build_virtualenv
vendored
@@ -36,7 +36,7 @@ TEMP_VENV="$(mktemp -d)"
|
||||
python3 -m venv "$TEMP_VENV"
|
||||
source "$TEMP_VENV/bin/activate"
|
||||
pip install -U pip
|
||||
pip install poetry==1.2.0b1
|
||||
pip install poetry==1.2.0
|
||||
poetry export \
|
||||
--extras all \
|
||||
--extras test \
|
||||
|
||||
61
debian/changelog
vendored
61
debian/changelog
vendored
@@ -1,3 +1,64 @@
|
||||
matrix-synapse-py3 (1.67.0) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.67.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 13 Sep 2022 09:19:56 +0100
|
||||
|
||||
matrix-synapse-py3 (1.67.0~rc1) stable; urgency=medium
|
||||
|
||||
[ Erik Johnston ]
|
||||
* Use stable poetry 1.2.0 version, rather than a prerelease.
|
||||
|
||||
[ Synapse Packaging team ]
|
||||
* New Synapse release 1.67.0rc1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 06 Sep 2022 09:01:06 +0100
|
||||
|
||||
matrix-synapse-py3 (1.66.0) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.66.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Wed, 31 Aug 2022 11:20:17 +0100
|
||||
|
||||
matrix-synapse-py3 (1.66.0~rc2+nmu1) UNRELEASED; urgency=medium
|
||||
|
||||
* Update debhelper to compatibility level 12.
|
||||
* Drop the preinst script stopping synapse.
|
||||
* Allocate a group for the system user.
|
||||
* Change dpkg-statoverride to --force-statoverride-add.
|
||||
|
||||
-- Jörg Behrmann <behrmann@physik.fu-berlin.de> Tue, 23 Aug 2022 17:17:00 +0100
|
||||
|
||||
matrix-synapse-py3 (1.66.0~rc2) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.66.0rc2.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 30 Aug 2022 12:25:19 +0100
|
||||
|
||||
matrix-synapse-py3 (1.66.0~rc1) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.66.0rc1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 23 Aug 2022 09:48:55 +0100
|
||||
|
||||
matrix-synapse-py3 (1.65.0) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.65.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 16 Aug 2022 16:51:26 +0100
|
||||
|
||||
matrix-synapse-py3 (1.65.0~rc2) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.65.0rc2.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Thu, 11 Aug 2022 11:38:18 +0100
|
||||
|
||||
matrix-synapse-py3 (1.65.0~rc1) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.65.0rc1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 09 Aug 2022 11:39:29 +0100
|
||||
|
||||
matrix-synapse-py3 (1.64.0) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.64.0.
|
||||
|
||||
1
debian/compat
vendored
1
debian/compat
vendored
@@ -1 +0,0 @@
|
||||
10
|
||||
2
debian/control
vendored
2
debian/control
vendored
@@ -4,7 +4,7 @@ Priority: extra
|
||||
Maintainer: Synapse Packaging team <packages@matrix.org>
|
||||
# keep this list in sync with the build dependencies in docker/Dockerfile-dhvirtualenv.
|
||||
Build-Depends:
|
||||
debhelper (>= 10),
|
||||
debhelper-compat (= 12),
|
||||
dh-virtualenv (>= 1.1),
|
||||
libsystemd-dev,
|
||||
libpq-dev,
|
||||
|
||||
4
debian/matrix-synapse-py3.postinst
vendored
4
debian/matrix-synapse-py3.postinst
vendored
@@ -40,12 +40,12 @@ EOF
|
||||
/opt/venvs/matrix-synapse/lib/manage_debconf.pl update
|
||||
|
||||
if ! getent passwd $USER >/dev/null; then
|
||||
adduser --quiet --system --no-create-home --home /var/lib/matrix-synapse $USER
|
||||
adduser --quiet --system --group --no-create-home --home /var/lib/matrix-synapse $USER
|
||||
fi
|
||||
|
||||
for DIR in /var/lib/matrix-synapse /var/log/matrix-synapse /etc/matrix-synapse; do
|
||||
if ! dpkg-statoverride --list --quiet $DIR >/dev/null; then
|
||||
dpkg-statoverride --force --quiet --update --add $USER nogroup 0755 $DIR
|
||||
dpkg-statoverride --force-statoverride-add --quiet --update --add $USER "$(id -gn $USER)" 0755 $DIR
|
||||
fi
|
||||
done
|
||||
|
||||
|
||||
31
debian/matrix-synapse-py3.preinst
vendored
31
debian/matrix-synapse-py3.preinst
vendored
@@ -1,31 +0,0 @@
|
||||
#!/bin/sh -e
|
||||
|
||||
# Attempt to undo some of the braindamage caused by
|
||||
# https://github.com/matrix-org/package-synapse-debian/issues/18.
|
||||
#
|
||||
# Due to reasons [1], the old python2 matrix-synapse package will not stop the
|
||||
# service when the package is uninstalled. Our maintainer scripts will do the
|
||||
# right thing in terms of ensuring the service is enabled and unmasked, but
|
||||
# then do a `systemctl start matrix-synapse`, which of course does nothing -
|
||||
# leaving the old (py2) service running.
|
||||
#
|
||||
# There should normally be no reason for the service to be running during our
|
||||
# preinst, so we assume that if it *is* running, it's due to that situation,
|
||||
# and stop it.
|
||||
#
|
||||
# [1] dh_systemd_start doesn't do anything because it sees that there is an
|
||||
# init.d script with the same name, so leaves it to dh_installinit.
|
||||
#
|
||||
# dh_installinit doesn't do anything because somebody gave it a --no-start
|
||||
# for unknown reasons.
|
||||
|
||||
if [ -x /bin/systemctl ]; then
|
||||
if /bin/systemctl --quiet is-active -- matrix-synapse; then
|
||||
echo >&2 "stopping existing matrix-synapse service"
|
||||
/bin/systemctl stop matrix-synapse || true
|
||||
fi
|
||||
fi
|
||||
|
||||
#DEBHELPER#
|
||||
|
||||
exit 0
|
||||
2
debian/matrix-synapse.default
vendored
2
debian/matrix-synapse.default
vendored
@@ -1,2 +0,0 @@
|
||||
# Specify environment variables used when running Synapse
|
||||
# SYNAPSE_CACHE_FACTOR=0.5 (default)
|
||||
6
debian/matrix-synapse.service
vendored
6
debian/matrix-synapse.service
vendored
@@ -5,7 +5,6 @@ Description=Synapse Matrix homeserver
|
||||
Type=notify
|
||||
User=matrix-synapse
|
||||
WorkingDirectory=/var/lib/matrix-synapse
|
||||
EnvironmentFile=-/etc/default/matrix-synapse
|
||||
ExecStartPre=/opt/venvs/matrix-synapse/bin/python -m synapse.app.homeserver --config-path=/etc/matrix-synapse/homeserver.yaml --config-path=/etc/matrix-synapse/conf.d/ --generate-keys
|
||||
ExecStart=/opt/venvs/matrix-synapse/bin/python -m synapse.app.homeserver --config-path=/etc/matrix-synapse/homeserver.yaml --config-path=/etc/matrix-synapse/conf.d/
|
||||
ExecReload=/bin/kill -HUP $MAINPID
|
||||
@@ -13,5 +12,10 @@ Restart=always
|
||||
RestartSec=3
|
||||
SyslogIdentifier=matrix-synapse
|
||||
|
||||
# The environment file is not shipped by default anymore and the below directive
|
||||
# is for backwards compatibility only. Please use your homeserver.yaml if
|
||||
# possible.
|
||||
EnvironmentFile=-/etc/default/matrix-synapse
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
|
||||
12
debian/rules
vendored
12
debian/rules
vendored
@@ -6,15 +6,17 @@
|
||||
# assume we only have one package
|
||||
PACKAGE_NAME:=`dh_listpackages`
|
||||
|
||||
override_dh_systemd_enable:
|
||||
dh_systemd_enable --name=matrix-synapse
|
||||
|
||||
override_dh_installinit:
|
||||
dh_installinit --name=matrix-synapse
|
||||
override_dh_installsystemd:
|
||||
dh_installsystemd --name=matrix-synapse
|
||||
|
||||
# we don't really want to strip the symbols from our object files.
|
||||
override_dh_strip:
|
||||
|
||||
# many libraries pulled from PyPI have allocatable sections after
|
||||
# non-allocatable ones on which dwz errors out. For those without the issue the
|
||||
# gains are only marginal
|
||||
override_dh_dwz:
|
||||
|
||||
# dh_shlibdeps calls dpkg-shlibdeps, which finds all the binary files
|
||||
# (executables and shared libs) in the package, and looks for the shared
|
||||
# libraries that they depend on. It then adds a dependency on the package that
|
||||
|
||||
@@ -40,22 +40,14 @@ FROM docker.io/python:${PYTHON_VERSION}-slim as requirements
|
||||
RUN \
|
||||
--mount=type=cache,target=/var/cache/apt,sharing=locked \
|
||||
--mount=type=cache,target=/var/lib/apt,sharing=locked \
|
||||
apt-get update -qq && apt-get install -yqq git \
|
||||
apt-get update -qq && apt-get install -yqq \
|
||||
build-essential cargo git libffi-dev libssl-dev \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# We install poetry in its own build stage to avoid its dependencies conflicting with
|
||||
# synapse's dependencies.
|
||||
# We use a specific commit from poetry's master branch instead of our usual 1.1.14,
|
||||
# to incorporate fixes to some bugs in `poetry export`. This commit corresponds to
|
||||
# https://github.com/python-poetry/poetry/pull/5156 and
|
||||
# https://github.com/python-poetry/poetry/issues/5141 ;
|
||||
# without it, we generate a requirements.txt with incorrect environment markers,
|
||||
# which causes necessary packages to be omitted when we `pip install`.
|
||||
#
|
||||
# NB: In poetry 1.2 `poetry export` will be moved into a plugin; we'll need to also
|
||||
# pip install poetry-plugin-export (https://github.com/python-poetry/poetry-plugin-export).
|
||||
RUN --mount=type=cache,target=/root/.cache/pip \
|
||||
pip install --user "poetry-core==1.1.0a7" "git+https://github.com/python-poetry/poetry.git@fb13b3a676f476177f7937ffa480ee5cff9a90a5"
|
||||
pip install --user "poetry==1.2.0"
|
||||
|
||||
WORKDIR /synapse
|
||||
|
||||
@@ -68,7 +60,18 @@ COPY pyproject.toml poetry.lock /synapse/
|
||||
# reason, such as when a git repository is used directly as a dependency.
|
||||
ARG TEST_ONLY_SKIP_DEP_HASH_VERIFICATION
|
||||
|
||||
RUN /root/.local/bin/poetry export --extras all -o /synapse/requirements.txt ${TEST_ONLY_SKIP_DEP_HASH_VERIFICATION:+--without-hashes}
|
||||
# If specified, we won't use the Poetry lockfile.
|
||||
# Instead, we'll just install what a regular `pip install` would from PyPI.
|
||||
ARG TEST_ONLY_IGNORE_POETRY_LOCKFILE
|
||||
|
||||
# Export the dependencies, but only if we're actually going to use the Poetry lockfile.
|
||||
# Otherwise, just create an empty requirements file so that the Dockerfile can
|
||||
# proceed.
|
||||
RUN if [ -z "$TEST_ONLY_IGNORE_POETRY_LOCKFILE" ]; then \
|
||||
/root/.local/bin/poetry export --extras all -o /synapse/requirements.txt ${TEST_ONLY_SKIP_DEP_HASH_VERIFICATION:+--without-hashes}; \
|
||||
else \
|
||||
touch /synapse/requirements.txt; \
|
||||
fi
|
||||
|
||||
###
|
||||
### Stage 1: builder
|
||||
@@ -108,8 +111,17 @@ COPY synapse /synapse/synapse/
|
||||
# ... and what we need to `pip install`.
|
||||
COPY pyproject.toml README.rst /synapse/
|
||||
|
||||
# Repeat of earlier build argument declaration, as this is a new build stage.
|
||||
ARG TEST_ONLY_IGNORE_POETRY_LOCKFILE
|
||||
|
||||
# Install the synapse package itself.
|
||||
RUN pip install --prefix="/install" --no-deps --no-warn-script-location /synapse
|
||||
# If we have populated requirements.txt, we don't install any dependencies
|
||||
# as we should already have those from the previous `pip install` step.
|
||||
RUN if [ -z "$TEST_ONLY_IGNORE_POETRY_LOCKFILE" ]; then \
|
||||
pip install --prefix="/install" --no-deps --no-warn-script-location /synapse[all]; \
|
||||
else \
|
||||
pip install --prefix="/install" --no-warn-script-location /synapse[all]; \
|
||||
fi
|
||||
|
||||
###
|
||||
### Stage 2: runtime
|
||||
|
||||
@@ -1,39 +1,62 @@
|
||||
# syntax=docker/dockerfile:1
|
||||
# Inherit from the official Synapse docker image
|
||||
|
||||
ARG SYNAPSE_VERSION=latest
|
||||
|
||||
# first of all, we create a base image with an nginx which we can copy into the
|
||||
# target image. For repeated rebuilds, this is much faster than apt installing
|
||||
# each time.
|
||||
|
||||
FROM debian:bullseye-slim AS deps_base
|
||||
RUN \
|
||||
--mount=type=cache,target=/var/cache/apt,sharing=locked \
|
||||
--mount=type=cache,target=/var/lib/apt,sharing=locked \
|
||||
apt-get update -qq && \
|
||||
DEBIAN_FRONTEND=noninteractive apt-get install -yqq --no-install-recommends \
|
||||
redis-server nginx-light
|
||||
|
||||
# Similarly, a base to copy the redis server from.
|
||||
#
|
||||
# The redis docker image has fewer dynamic libraries than the debian package,
|
||||
# which makes it much easier to copy (but we need to make sure we use an image
|
||||
# based on the same debian version as the synapse image, to make sure we get
|
||||
# the expected version of libc.
|
||||
FROM redis:6-bullseye AS redis_base
|
||||
|
||||
# now build the final image, based on the the regular Synapse docker image
|
||||
FROM matrixdotorg/synapse:$SYNAPSE_VERSION
|
||||
|
||||
# Install deps
|
||||
RUN \
|
||||
--mount=type=cache,target=/var/cache/apt,sharing=locked \
|
||||
--mount=type=cache,target=/var/lib/apt,sharing=locked \
|
||||
apt-get update -qq && \
|
||||
DEBIAN_FRONTEND=noninteractive apt-get install -yqq --no-install-recommends \
|
||||
redis-server nginx-light
|
||||
# Install supervisord with pip instead of apt, to avoid installing a second
|
||||
# copy of python.
|
||||
RUN --mount=type=cache,target=/root/.cache/pip \
|
||||
pip install supervisor~=4.2
|
||||
RUN mkdir -p /etc/supervisor/conf.d
|
||||
|
||||
# Install supervisord with pip instead of apt, to avoid installing a second
|
||||
# copy of python.
|
||||
RUN --mount=type=cache,target=/root/.cache/pip \
|
||||
pip install supervisor~=4.2
|
||||
# Copy over redis and nginx
|
||||
COPY --from=redis_base /usr/local/bin/redis-server /usr/local/bin
|
||||
|
||||
# Disable the default nginx sites
|
||||
RUN rm /etc/nginx/sites-enabled/default
|
||||
COPY --from=deps_base /usr/sbin/nginx /usr/sbin
|
||||
COPY --from=deps_base /usr/share/nginx /usr/share/nginx
|
||||
COPY --from=deps_base /usr/lib/nginx /usr/lib/nginx
|
||||
COPY --from=deps_base /etc/nginx /etc/nginx
|
||||
RUN rm /etc/nginx/sites-enabled/default
|
||||
RUN mkdir /var/log/nginx /var/lib/nginx
|
||||
RUN chown www-data /var/log/nginx /var/lib/nginx
|
||||
|
||||
# Copy Synapse worker, nginx and supervisord configuration template files
|
||||
COPY ./docker/conf-workers/* /conf/
|
||||
# Copy Synapse worker, nginx and supervisord configuration template files
|
||||
COPY ./docker/conf-workers/* /conf/
|
||||
|
||||
# Copy a script to prefix log lines with the supervisor program name
|
||||
COPY ./docker/prefix-log /usr/local/bin/
|
||||
# Copy a script to prefix log lines with the supervisor program name
|
||||
COPY ./docker/prefix-log /usr/local/bin/
|
||||
|
||||
# Expose nginx listener port
|
||||
EXPOSE 8080/tcp
|
||||
# Expose nginx listener port
|
||||
EXPOSE 8080/tcp
|
||||
|
||||
# A script to read environment variables and create the necessary
|
||||
# files to run the desired worker configuration. Will start supervisord.
|
||||
COPY ./docker/configure_workers_and_start.py /configure_workers_and_start.py
|
||||
ENTRYPOINT ["/configure_workers_and_start.py"]
|
||||
# A script to read environment variables and create the necessary
|
||||
# files to run the desired worker configuration. Will start supervisord.
|
||||
COPY ./docker/configure_workers_and_start.py /configure_workers_and_start.py
|
||||
ENTRYPOINT ["/configure_workers_and_start.py"]
|
||||
|
||||
# Replace the healthcheck with one which checks *all* the workers. The script
|
||||
# is generated by configure_workers_and_start.py.
|
||||
HEALTHCHECK --start-period=5s --interval=15s --timeout=5s \
|
||||
CMD /bin/sh /healthcheck.sh
|
||||
# Replace the healthcheck with one which checks *all* the workers. The script
|
||||
# is generated by configure_workers_and_start.py.
|
||||
HEALTHCHECK --start-period=5s --interval=15s --timeout=5s \
|
||||
CMD /bin/sh /healthcheck.sh
|
||||
|
||||
@@ -191,7 +191,7 @@ If you need to build the image from a Synapse checkout, use the following `docke
|
||||
build` command from the repo's root:
|
||||
|
||||
```
|
||||
docker build -t matrixdotorg/synapse -f docker/Dockerfile .
|
||||
DOCKER_BUILDKIT=1 docker build -t matrixdotorg/synapse -f docker/Dockerfile .
|
||||
```
|
||||
|
||||
You can choose to build a different docker image by changing the value of the `-f` flag to
|
||||
|
||||
@@ -19,7 +19,7 @@ username=www-data
|
||||
autorestart=true
|
||||
|
||||
[program:redis]
|
||||
command=/usr/local/bin/prefix-log /usr/bin/redis-server /etc/redis/redis.conf --daemonize no
|
||||
command=/usr/local/bin/prefix-log /usr/local/bin/redis-server
|
||||
priority=1
|
||||
stdout_logfile=/dev/stdout
|
||||
stdout_logfile_maxbytes=0
|
||||
|
||||
@@ -69,6 +69,7 @@
|
||||
- [Manhole](manhole.md)
|
||||
- [Monitoring](metrics-howto.md)
|
||||
- [Reporting Homeserver Usage Statistics](usage/administration/monitoring/reporting_homeserver_usage_statistics.md)
|
||||
- [Monthly Active Users](usage/administration/monthly_active_users.md)
|
||||
- [Understanding Synapse Through Grafana Graphs](usage/administration/understanding_synapse_through_grafana_graphs.md)
|
||||
- [Useful SQL for Admins](usage/administration/useful_sql_for_admins.md)
|
||||
- [Database Maintenance Tools](usage/administration/database_maintenance_tools.md)
|
||||
|
||||
@@ -5,9 +5,9 @@ non-interactive way. This is generally used for bootstrapping a Synapse
|
||||
instance with administrator accounts.
|
||||
|
||||
To authenticate yourself to the server, you will need both the shared secret
|
||||
(`registration_shared_secret` in the homeserver configuration), and a
|
||||
one-time nonce. If the registration shared secret is not configured, this API
|
||||
is not enabled.
|
||||
([`registration_shared_secret`](../configuration/config_documentation.md#registration_shared_secret)
|
||||
in the homeserver configuration), and a one-time nonce. If the registration
|
||||
shared secret is not configured, this API is not enabled.
|
||||
|
||||
To fetch the nonce, you need to request one from the API:
|
||||
|
||||
@@ -46,7 +46,24 @@ As an example:
|
||||
The MAC is the hex digest output of the HMAC-SHA1 algorithm, with the key being
|
||||
the shared secret and the content being the nonce, user, password, either the
|
||||
string "admin" or "notadmin", and optionally the user_type
|
||||
each separated by NULs. For an example of generation in Python:
|
||||
each separated by NULs.
|
||||
|
||||
Here is an easy way to generate the HMAC digest if you have Bash and OpenSSL:
|
||||
|
||||
```bash
|
||||
# Update these values and then paste this code block into a bash terminal
|
||||
nonce='thisisanonce'
|
||||
username='pepper_roni'
|
||||
password='pizza'
|
||||
admin='admin'
|
||||
secret='shared_secret'
|
||||
|
||||
printf '%s\0%s\0%s\0%s' "$nonce" "$username" "$password" "$admin" |
|
||||
openssl sha1 -hmac "$secret" |
|
||||
awk '{print $2}'
|
||||
```
|
||||
|
||||
For an example of generation in Python:
|
||||
|
||||
```python
|
||||
import hmac, hashlib
|
||||
@@ -70,4 +87,4 @@ def generate_mac(nonce, user, password, admin=False, user_type=None):
|
||||
mac.update(user_type.encode('utf8'))
|
||||
|
||||
return mac.hexdigest()
|
||||
```
|
||||
```
|
||||
|
||||
@@ -302,6 +302,8 @@ The following fields are possible in the JSON response body:
|
||||
* `state_events` - Total number of state_events of a room. Complexity of the room.
|
||||
* `room_type` - The type of the room taken from the room's creation event; for example "m.space" if the room is a space.
|
||||
If the room does not define a type, the value will be `null`.
|
||||
* `forgotten` - Whether all local users have
|
||||
[forgotten](https://spec.matrix.org/latest/client-server-api/#leaving-rooms) the room.
|
||||
|
||||
The API is:
|
||||
|
||||
@@ -330,10 +332,13 @@ A response body like the following is returned:
|
||||
"guest_access": null,
|
||||
"history_visibility": "shared",
|
||||
"state_events": 93534,
|
||||
"room_type": "m.space"
|
||||
"room_type": "m.space",
|
||||
"forgotten": false
|
||||
}
|
||||
```
|
||||
|
||||
_Changed in Synapse 1.66:_ Added the `forgotten` key to the response body.
|
||||
|
||||
# Room Members API
|
||||
|
||||
The Room Members admin API allows server admins to get a list of all members of a room.
|
||||
|
||||
@@ -753,6 +753,7 @@ A response body like the following is returned:
|
||||
"device_id": "QBUAZIFURK",
|
||||
"display_name": "android",
|
||||
"last_seen_ip": "1.2.3.4",
|
||||
"last_seen_user_agent": "Mozilla/5.0 (X11; Linux x86_64; rv:103.0) Gecko/20100101 Firefox/103.0",
|
||||
"last_seen_ts": 1474491775024,
|
||||
"user_id": "<user_id>"
|
||||
},
|
||||
@@ -760,6 +761,7 @@ A response body like the following is returned:
|
||||
"device_id": "AUIECTSRND",
|
||||
"display_name": "ios",
|
||||
"last_seen_ip": "1.2.3.5",
|
||||
"last_seen_user_agent": "Mozilla/5.0 (X11; Linux x86_64; rv:103.0) Gecko/20100101 Firefox/103.0",
|
||||
"last_seen_ts": 1474491775025,
|
||||
"user_id": "<user_id>"
|
||||
}
|
||||
@@ -786,6 +788,8 @@ The following fields are returned in the JSON response body:
|
||||
Absent if no name has been set.
|
||||
- `last_seen_ip` - The IP address where this device was last seen.
|
||||
(May be a few minutes out of date, for efficiency reasons).
|
||||
- `last_seen_user_agent` - The user agent of the device when it was last seen.
|
||||
(May be a few minutes out of date, for efficiency reasons).
|
||||
- `last_seen_ts` - The timestamp (in milliseconds since the unix epoch) when this
|
||||
devices was last seen. (May be a few minutes out of date, for efficiency reasons).
|
||||
- `user_id` - Owner of device.
|
||||
@@ -837,6 +841,7 @@ A response body like the following is returned:
|
||||
"device_id": "<device_id>",
|
||||
"display_name": "android",
|
||||
"last_seen_ip": "1.2.3.4",
|
||||
"last_seen_user_agent": "Mozilla/5.0 (X11; Linux x86_64; rv:103.0) Gecko/20100101 Firefox/103.0",
|
||||
"last_seen_ts": 1474491775024,
|
||||
"user_id": "<user_id>"
|
||||
}
|
||||
@@ -858,6 +863,8 @@ The following fields are returned in the JSON response body:
|
||||
Absent if no name has been set.
|
||||
- `last_seen_ip` - The IP address where this device was last seen.
|
||||
(May be a few minutes out of date, for efficiency reasons).
|
||||
- `last_seen_user_agent` - The user agent of the device when it was last seen.
|
||||
(May be a few minutes out of date, for efficiency reasons).
|
||||
- `last_seen_ts` - The timestamp (in milliseconds since the unix epoch) when this
|
||||
devices was last seen. (May be a few minutes out of date, for efficiency reasons).
|
||||
- `user_id` - Owner of device.
|
||||
|
||||
@@ -34,13 +34,45 @@ the process of indexing it).
|
||||
## Chain Cover Index
|
||||
|
||||
Synapse computes auth chain differences by pre-computing a "chain cover" index
|
||||
for the auth chain in a room, allowing efficient reachability queries like "is
|
||||
event A in the auth chain of event B". This is done by assigning every event a
|
||||
*chain ID* and *sequence number* (e.g. `(5,3)`), and having a map of *links*
|
||||
between chains (e.g. `(5,3) -> (2,4)`) such that A is reachable by B (i.e. `A`
|
||||
is in the auth chain of `B`) if and only if either:
|
||||
for the auth chain in a room, allowing us to efficiently make reachability queries
|
||||
like "is event `A` in the auth chain of event `B`?". We could do this with an index
|
||||
that tracks all pairs `(A, B)` such that `A` is in the auth chain of `B`. However, this
|
||||
would be prohibitively large, scaling poorly as the room accumulates more state
|
||||
events.
|
||||
|
||||
1. A and B have the same chain ID and `A`'s sequence number is less than `B`'s
|
||||
Instead, we break down the graph into *chains*. A chain is a subset of a DAG
|
||||
with the following property: for any pair of events `E` and `F` in the chain,
|
||||
the chain contains a path `E -> F` or a path `F -> E`. This forces a chain to be
|
||||
linear (without forks), e.g. `E -> F -> G -> ... -> H`. Each event in the chain
|
||||
is given a *sequence number* local to that chain. The oldest event `E` in the
|
||||
chain has sequence number 1. If `E` has a child `F` in the chain, then `F` has
|
||||
sequence number 2. If `E` has a grandchild `G` in the chain, then `G` has
|
||||
sequence number 3; and so on.
|
||||
|
||||
Synapse ensures that each persisted event belongs to exactly one chain, and
|
||||
tracks how the chains are connected to one another. This allows us to
|
||||
efficiently answer reachability queries. Doing so uses less storage than
|
||||
tracking reachability on an event-by-event basis, particularly when we have
|
||||
fewer and longer chains. See
|
||||
|
||||
> Jagadish, H. (1990). [A compression technique to materialize transitive closure](https://doi.org/10.1145/99935.99944).
|
||||
> *ACM Transactions on Database Systems (TODS)*, 15*(4)*, 558-598.
|
||||
|
||||
for the original idea or
|
||||
|
||||
> Y. Chen, Y. Chen, [An efficient algorithm for answering graph
|
||||
> reachability queries](https://doi.org/10.1109/ICDE.2008.4497498),
|
||||
> in: 2008 IEEE 24th International Conference on Data Engineering, April 2008,
|
||||
> pp. 893–902. (PDF available via [Google Scholar](https://scholar.google.com/scholar?q=Y.%20Chen,%20Y.%20Chen,%20An%20efficient%20algorithm%20for%20answering%20graph%20reachability%20queries,%20in:%202008%20IEEE%2024th%20International%20Conference%20on%20Data%20Engineering,%20April%202008,%20pp.%20893902.).)
|
||||
|
||||
for a more modern take.
|
||||
|
||||
In practical terms, the chain cover assigns every event a
|
||||
*chain ID* and *sequence number* (e.g. `(5,3)`), and maintains a map of *links*
|
||||
between events in chains (e.g. `(5,3) -> (2,4)`) such that `A` is reachable by `B`
|
||||
(i.e. `A` is in the auth chain of `B`) if and only if either:
|
||||
|
||||
1. `A` and `B` have the same chain ID and `A`'s sequence number is less than `B`'s
|
||||
sequence number; or
|
||||
2. there is a link `L` between `B`'s chain ID and `A`'s chain ID such that
|
||||
`L.start_seq_no` <= `B.seq_no` and `A.seq_no` <= `L.end_seq_no`.
|
||||
@@ -49,8 +81,9 @@ There are actually two potential implementations, one where we store links from
|
||||
each chain to every other reachable chain (the transitive closure of the links
|
||||
graph), and one where we remove redundant links (the transitive reduction of the
|
||||
links graph) e.g. if we have chains `C3 -> C2 -> C1` then the link `C3 -> C1`
|
||||
would not be stored. Synapse uses the former implementations so that it doesn't
|
||||
need to recurse to test reachability between chains.
|
||||
would not be stored. Synapse uses the former implementation so that it doesn't
|
||||
need to recurse to test reachability between chains. This trades-off extra storage
|
||||
in order to save CPU cycles and DB queries.
|
||||
|
||||
### Example
|
||||
|
||||
|
||||
@@ -62,6 +62,8 @@ pipx install poetry
|
||||
but see poetry's [installation instructions](https://python-poetry.org/docs/#installation)
|
||||
for other installation methods.
|
||||
|
||||
Synapse requires Poetry version 1.2.0 or later.
|
||||
|
||||
Next, open a terminal and install dependencies as follows:
|
||||
|
||||
```sh
|
||||
|
||||
@@ -191,3 +191,27 @@ There are three separate aspects to this:
|
||||
flavour will be accepted by SQLite 3.22, but will give a column whose
|
||||
default value is the **string** `"FALSE"` - which, when cast back to a boolean
|
||||
in Python, evaluates to `True`.
|
||||
|
||||
|
||||
## `event_id` global uniqueness
|
||||
|
||||
In room versions `1` and `2` it's possible to end up with two events with the
|
||||
same `event_id` (in the same or different rooms). After room version `3`, that
|
||||
can only happen with a hash collision, which we basically hope will never
|
||||
happen.
|
||||
|
||||
There are several places in Synapse and even Matrix APIs like [`GET
|
||||
/_matrix/federation/v1/event/{eventId}`](https://spec.matrix.org/v1.1/server-server-api/#get_matrixfederationv1eventeventid)
|
||||
where we assume that event IDs are globally unique.
|
||||
|
||||
But hash collisions are still possible, and by treating event IDs as room
|
||||
scoped, we can reduce the possibility of a hash collision. When scoping
|
||||
`event_id` in the database schema, it should be also accompanied by `room_id`
|
||||
(`PRIMARY KEY (room_id, event_id)`) and lookups should be done through the pair
|
||||
`(room_id, event_id)`.
|
||||
|
||||
There has been a lot of debate on this in places like
|
||||
https://github.com/matrix-org/matrix-spec-proposals/issues/2779 and
|
||||
[MSC2848](https://github.com/matrix-org/matrix-spec-proposals/pull/2848) which
|
||||
has no resolution yet (as of 2022-09-01).
|
||||
|
||||
|
||||
@@ -243,14 +243,11 @@ doesn't require poetry. (It's what we use in CI too). However, you could try
|
||||
|
||||
## Check the version of poetry with `poetry --version`.
|
||||
|
||||
At the time of writing, the 1.2 series is beta only. We have seen some examples
|
||||
where the lockfiles generated by 1.2 prereleasese aren't interpreted correctly
|
||||
by poetry 1.1.x. For now, use poetry 1.1.14, which includes a critical
|
||||
[change](https://github.com/python-poetry/poetry/pull/5973) needed to remain
|
||||
[compatible with PyPI](https://github.com/pypi/warehouse/pull/11775).
|
||||
The minimum version of poetry supported by Synapse is 1.2.
|
||||
|
||||
It can also be useful to check the version of `poetry-core` in use. If you've
|
||||
installed `poetry` with `pipx`, try `pipx runpip poetry list | grep poetry-core`.
|
||||
installed `poetry` with `pipx`, try `pipx runpip poetry list | grep
|
||||
poetry-core`.
|
||||
|
||||
## Clear caches: `poetry cache clear --all pypi`.
|
||||
|
||||
|
||||
@@ -8,7 +8,8 @@ and allow server and room admins to configure how long messages should
|
||||
be kept in a homeserver's database before being purged from it.
|
||||
**Please note that, as this feature isn't part of the Matrix
|
||||
specification yet, this implementation is to be considered as
|
||||
experimental.**
|
||||
experimental. There are known bugs which may cause database corruption.
|
||||
Proceed with caution.**
|
||||
|
||||
A message retention policy is mainly defined by its `max_lifetime`
|
||||
parameter, which defines how long a message can be kept around after
|
||||
|
||||
@@ -7,7 +7,13 @@
|
||||
|
||||
1. Enable Synapse metrics:
|
||||
|
||||
There are two methods of enabling metrics in Synapse.
|
||||
In `homeserver.yaml`, make sure `enable_metrics` is
|
||||
set to `True`.
|
||||
|
||||
1. Enable the `/_synapse/metrics` Synapse endpoint that Prometheus uses to
|
||||
collect data:
|
||||
|
||||
There are two methods of enabling the metrics endpoint in Synapse.
|
||||
|
||||
The first serves the metrics as a part of the usual web server and
|
||||
can be enabled by adding the \"metrics\" resource to the existing
|
||||
@@ -41,9 +47,6 @@
|
||||
- '0.0.0.0'
|
||||
```
|
||||
|
||||
For both options, you will need to ensure that `enable_metrics` is
|
||||
set to `True`.
|
||||
|
||||
1. Restart Synapse.
|
||||
|
||||
1. Add a Prometheus target for Synapse.
|
||||
|
||||
@@ -263,7 +263,7 @@ class MyAuthProvider:
|
||||
return None
|
||||
|
||||
if self.credentials.get(username) == login_dict.get("my_field"):
|
||||
return self.api.get_qualified_user_id(username)
|
||||
return (self.api.get_qualified_user_id(username), None)
|
||||
|
||||
async def check_pass(
|
||||
self,
|
||||
@@ -280,5 +280,5 @@ class MyAuthProvider:
|
||||
return None
|
||||
|
||||
if self.credentials.get(username) == login_dict.get("password"):
|
||||
return self.api.get_qualified_user_id(username)
|
||||
return (self.api.get_qualified_user_id(username), None)
|
||||
```
|
||||
|
||||
@@ -174,7 +174,9 @@ oidc_providers:
|
||||
|
||||
1. Create a regular web application for Synapse
|
||||
2. Set the Allowed Callback URLs to `[synapse public baseurl]/_synapse/client/oidc/callback`
|
||||
3. Add a rule to add the `preferred_username` claim.
|
||||
3. Add a rule with any name to add the `preferred_username` claim.
|
||||
(See https://auth0.com/docs/customize/rules/create-rules for more information on how to create rules.)
|
||||
|
||||
<details>
|
||||
<summary>Code sample</summary>
|
||||
|
||||
|
||||
@@ -506,9 +506,13 @@ email will be disabled.
|
||||
|
||||
### Registering a user
|
||||
|
||||
The easiest way to create a new user is to do so from a client like [Element](https://element.io/).
|
||||
One way to create a new user is to do so from a client like
|
||||
[Element](https://element.io/). This requires registration to be enabled via
|
||||
the
|
||||
[`enable_registration`](../usage/configuration/config_documentation.md#enable_registration)
|
||||
setting.
|
||||
|
||||
Alternatively, you can do so from the command line. This can be done as follows:
|
||||
Alternatively, you can create new users from the command line. This can be done as follows:
|
||||
|
||||
1. If synapse was installed via pip, activate the virtualenv as follows (if Synapse was
|
||||
installed via a prebuilt package, `register_new_matrix_user` should already be
|
||||
@@ -520,7 +524,7 @@ Alternatively, you can do so from the command line. This can be done as follows:
|
||||
```
|
||||
2. Run the following command:
|
||||
```sh
|
||||
register_new_matrix_user -c homeserver.yaml http://localhost:8008
|
||||
register_new_matrix_user -c homeserver.yaml
|
||||
```
|
||||
|
||||
This will prompt you to add details for the new user, and will then connect to
|
||||
@@ -533,12 +537,13 @@ Make admin [no]:
|
||||
Success!
|
||||
```
|
||||
|
||||
This process uses a setting `registration_shared_secret` in
|
||||
`homeserver.yaml`, which is shared between Synapse itself and the
|
||||
`register_new_matrix_user` script. It doesn't matter what it is (a random
|
||||
value is generated by `--generate-config`), but it should be kept secret, as
|
||||
anyone with knowledge of it can register users, including admin accounts,
|
||||
on your server even if `enable_registration` is `false`.
|
||||
This process uses a setting
|
||||
[`registration_shared_secret`](../usage/configuration/config_documentation.md#registration_shared_secret),
|
||||
which is shared between Synapse itself and the `register_new_matrix_user`
|
||||
script. It doesn't matter what it is (a random value is generated by
|
||||
`--generate-config`), but it should be kept secret, as anyone with knowledge of
|
||||
it can register users, including admin accounts, on your server even if
|
||||
`enable_registration` is `false`.
|
||||
|
||||
### Setting up a TURN server
|
||||
|
||||
|
||||
@@ -22,7 +22,7 @@ choose their own username.
|
||||
In the first case - where users are automatically allocated a Matrix ID - it is
|
||||
the responsibility of the mapping provider to normalise the SSO attributes and
|
||||
map them to a valid Matrix ID. The [specification for Matrix
|
||||
IDs](https://matrix.org/docs/spec/appendices#user-identifiers) has some
|
||||
IDs](https://spec.matrix.org/latest/appendices/#user-identifiers) has some
|
||||
information about what is considered valid.
|
||||
|
||||
If the mapping provider does not assign a Matrix ID, then Synapse will
|
||||
@@ -37,9 +37,10 @@ as Synapse). The Synapse config is then modified to point to the mapping provide
|
||||
## OpenID Mapping Providers
|
||||
|
||||
The OpenID mapping provider can be customized by editing the
|
||||
`oidc_config.user_mapping_provider.module` config option.
|
||||
[`oidc_providers.user_mapping_provider.module`](usage/configuration/config_documentation.md#oidc_providers)
|
||||
config option.
|
||||
|
||||
`oidc_config.user_mapping_provider.config` allows you to provide custom
|
||||
`oidc_providers.user_mapping_provider.config` allows you to provide custom
|
||||
configuration options to the module. Check with the module's documentation for
|
||||
what options it provides (if any). The options listed by default are for the
|
||||
user mapping provider built in to Synapse. If using a custom module, you should
|
||||
@@ -58,7 +59,7 @@ A custom mapping provider must specify the following methods:
|
||||
- This method should have the `@staticmethod` decoration.
|
||||
- Arguments:
|
||||
- `config` - A `dict` representing the parsed content of the
|
||||
`oidc_config.user_mapping_provider.config` homeserver config option.
|
||||
`oidc_providers.user_mapping_provider.config` homeserver config option.
|
||||
Runs on homeserver startup. Providers should extract and validate
|
||||
any option values they need here.
|
||||
- Whatever is returned will be passed back to the user mapping provider module's
|
||||
@@ -102,7 +103,7 @@ A custom mapping provider must specify the following methods:
|
||||
will be returned as part of the response during a successful login.
|
||||
|
||||
Note that care should be taken to not overwrite any of the parameters
|
||||
usually returned as part of the [login response](https://matrix.org/docs/spec/client_server/latest#post-matrix-client-r0-login).
|
||||
usually returned as part of the [login response](https://spec.matrix.org/latest/client-server-api/#post_matrixclientv3login).
|
||||
|
||||
### Default OpenID Mapping Provider
|
||||
|
||||
@@ -113,7 +114,8 @@ specified in the config. It is located at
|
||||
## SAML Mapping Providers
|
||||
|
||||
The SAML mapping provider can be customized by editing the
|
||||
`saml2_config.user_mapping_provider.module` config option.
|
||||
[`saml2_config.user_mapping_provider.module`](docs/usage/configuration/config_documentation.md#saml2_config)
|
||||
config option.
|
||||
|
||||
`saml2_config.user_mapping_provider.config` allows you to provide custom
|
||||
configuration options to the module. Check with the module's documentation for
|
||||
|
||||
@@ -5,6 +5,8 @@ worker_name: generic_worker1
|
||||
worker_replication_host: 127.0.0.1
|
||||
worker_replication_http_port: 9093
|
||||
|
||||
worker_main_http_uri: http://localhost:8008/
|
||||
|
||||
worker_listeners:
|
||||
- type: http
|
||||
port: 8083
|
||||
|
||||
@@ -9,7 +9,7 @@ in, allowing them to specify custom templates:
|
||||
|
||||
```yaml
|
||||
templates:
|
||||
custom_templates_directory: /path/to/custom/templates/
|
||||
custom_template_directory: /path/to/custom/templates/
|
||||
```
|
||||
|
||||
If this setting is not set, or the files named below are not found within the directory,
|
||||
|
||||
@@ -89,6 +89,71 @@ process, for example:
|
||||
dpkg -i matrix-synapse-py3_1.3.0+stretch1_amd64.deb
|
||||
```
|
||||
|
||||
# Upgrading to v1.67.0
|
||||
|
||||
## Direct TCP replication is no longer supported: migrate to Redis
|
||||
|
||||
Redis support was added in v1.13.0 with it becoming the recommended method in
|
||||
v1.18.0. It replaced the old direct TCP connections (which was deprecated as of
|
||||
v1.18.0) to the main process. With Redis, rather than all the workers connecting
|
||||
to the main process, all the workers and the main process connect to Redis,
|
||||
which relays replication commands between processes. This can give a significant
|
||||
CPU saving on the main process and is a prerequisite for upcoming
|
||||
performance improvements.
|
||||
|
||||
To migrate to Redis add the [`redis` config](./workers.md#shared-configuration),
|
||||
and remove the TCP `replication` listener from config of the master and
|
||||
`worker_replication_port` from worker config. Note that a HTTP listener with a
|
||||
`replication` resource is still required.
|
||||
|
||||
## Minimum version of Poetry is now v1.2.0
|
||||
|
||||
The minimum supported version of poetry is now 1.2. This should only affect
|
||||
those installing from a source checkout.
|
||||
|
||||
## Rust requirement in the next release
|
||||
|
||||
From the next major release (v1.68.0) installing Synapse from a source checkout
|
||||
will require a recent Rust compiler. Those using packages or
|
||||
`pip install matrix-synapse` will not be affected.
|
||||
|
||||
The simplest way of installing Rust is via [rustup.rs](https://rustup.rs/)
|
||||
|
||||
## SQLite version requirement in the next release
|
||||
|
||||
From the next major release (v1.68.0) Synapse will require SQLite 3.27.0 or
|
||||
higher. Synapse v1.67.0 will be the last major release supporting SQLite
|
||||
versions 3.22 to 3.26.
|
||||
|
||||
Those using docker images or Debian packages from Matrix.org will not be
|
||||
affected. If you have installed from source, you should check the version of
|
||||
SQLite used by Python with:
|
||||
|
||||
```shell
|
||||
python -c "import sqlite3; print(sqlite3.sqlite_version)"
|
||||
```
|
||||
|
||||
If this is too old, refer to your distribution for advice on upgrading.
|
||||
|
||||
# Upgrading to v1.66.0
|
||||
|
||||
## Delegation of email validation no longer supported
|
||||
|
||||
As of this version, Synapse no longer allows the tasks of verifying email address
|
||||
ownership, and password reset confirmation, to be delegated to an identity server.
|
||||
This removal was previously planned for Synapse 1.64.0, but was
|
||||
[delayed](https://github.com/matrix-org/synapse/issues/13421) until now to give
|
||||
homeserver administrators more notice of the change.
|
||||
|
||||
To continue to allow users to add email addresses to their homeserver accounts,
|
||||
and perform password resets, make sure that Synapse is configured with a working
|
||||
email server in the [`email` configuration
|
||||
section](https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#email)
|
||||
(including, at a minimum, a `notif_from` setting.)
|
||||
|
||||
Specifying an `email` setting under `account_threepid_delegates` will now cause
|
||||
an error at startup.
|
||||
|
||||
# Upgrading to v1.64.0
|
||||
|
||||
## Deprecation of the ability to delegate e-mail verification to identity servers
|
||||
@@ -1181,7 +1246,7 @@ updated.
|
||||
When setting up worker processes, we now recommend the use of a Redis
|
||||
server for replication. **The old direct TCP connection method is
|
||||
deprecated and will be removed in a future release.** See
|
||||
[workers](workers.md) for more details.
|
||||
the [worker documentation](https://matrix-org.github.io/synapse/v1.66/workers.html) for more details.
|
||||
|
||||
# Upgrading to v1.14.0
|
||||
|
||||
|
||||
@@ -5,8 +5,9 @@
|
||||
Many of the API calls in the admin api will require an `access_token` for a
|
||||
server admin. (Note that a server admin is distinct from a room admin.)
|
||||
|
||||
A user can be marked as a server admin by updating the database directly, e.g.:
|
||||
An existing user can be marked as a server admin by updating the database directly.
|
||||
|
||||
Check your [database settings](config_documentation.md#database) in the configuration file, connect to the correct database using either `psql [database name]` (if using PostgreSQL) or `sqlite3 path/to/your/database.db` (if using SQLite) and elevate the user `@foo:bar.com` to administrator.
|
||||
```sql
|
||||
UPDATE users SET admin = 1 WHERE name = '@foo:bar.com';
|
||||
```
|
||||
|
||||
@@ -2,11 +2,11 @@
|
||||
|
||||
This API allows you to manage tokens which can be used to authenticate
|
||||
registration requests, as proposed in
|
||||
[MSC3231](https://github.com/matrix-org/matrix-doc/blob/main/proposals/3231-token-authenticated-registration.md).
|
||||
[MSC3231](https://github.com/matrix-org/matrix-doc/blob/main/proposals/3231-token-authenticated-registration.md)
|
||||
and stabilised in version 1.2 of the Matrix specification.
|
||||
To use it, you will need to enable the `registration_requires_token` config
|
||||
option, and authenticate by providing an `access_token` for a server admin:
|
||||
see [Admin API](../../usage/administration/admin_api).
|
||||
Note that this API is still experimental; not all clients may support it yet.
|
||||
see [Admin API](../admin_api).
|
||||
|
||||
|
||||
## Registration token objects
|
||||
|
||||
@@ -2,9 +2,9 @@
|
||||
|
||||
How do I become a server admin?
|
||||
---
|
||||
If your server already has an admin account you should use the user admin API to promote other accounts to become admins. See [User Admin API](../../admin_api/user_admin_api.md#Change-whether-a-user-is-a-server-administrator-or-not)
|
||||
If your server already has an admin account you should use the [User Admin API](../../admin_api/user_admin_api.md#Change-whether-a-user-is-a-server-administrator-or-not) to promote other accounts to become admins.
|
||||
|
||||
If you don't have any admin accounts yet you won't be able to use the admin API so you'll have to edit the database manually. Manually editing the database is generally not recommended so once you have an admin account, use the admin APIs to make further changes.
|
||||
If you don't have any admin accounts yet you won't be able to use the admin API, so you'll have to edit the database manually. Manually editing the database is generally not recommended so once you have an admin account: use the admin APIs to make further changes.
|
||||
|
||||
```sql
|
||||
UPDATE users SET admin = 1 WHERE name = '@foo:bar.com';
|
||||
@@ -32,9 +32,11 @@ What users are registered on my server?
|
||||
SELECT NAME from users;
|
||||
```
|
||||
|
||||
Manually resetting passwords:
|
||||
Manually resetting passwords
|
||||
---
|
||||
See https://github.com/matrix-org/synapse/blob/master/README.rst#password-reset
|
||||
Users can reset their password through their client. Alternatively, a server admin
|
||||
can reset a user's password using the [admin API](../../admin_api/user_admin_api.md#reset-password).
|
||||
|
||||
|
||||
I have a problem with my server. Can I just delete my database and start again?
|
||||
---
|
||||
@@ -101,3 +103,83 @@ LIMIT 10;
|
||||
|
||||
You can also use the [List Room API](../../admin_api/rooms.md#list-room-api)
|
||||
and `order_by` `state_events`.
|
||||
|
||||
|
||||
People can't accept room invitations from me
|
||||
---
|
||||
|
||||
The typical failure mode here is that you send an invitation to someone
|
||||
to join a room or direct chat, but when they go to accept it, they get an
|
||||
error (typically along the lines of "Invalid signature"). They might see
|
||||
something like the following in their logs:
|
||||
|
||||
2019-09-11 19:32:04,271 - synapse.federation.transport.server - 288 - WARNING - GET-11752 - authenticate_request failed: 401: Invalid signature for server <server> with key ed25519:a_EqML: Unable to verify signature for <server>
|
||||
|
||||
This is normally caused by a misconfiguration in your reverse-proxy. See [the reverse proxy docs](docs/reverse_proxy.md) and double-check that your settings are correct.
|
||||
|
||||
|
||||
Help!! Synapse is slow and eats all my RAM/CPU!
|
||||
-----------------------------------------------
|
||||
|
||||
First, ensure you are running the latest version of Synapse, using Python 3
|
||||
with a [PostgreSQL database](../../postgres.md).
|
||||
|
||||
Synapse's architecture is quite RAM hungry currently - we deliberately
|
||||
cache a lot of recent room data and metadata in RAM in order to speed up
|
||||
common requests. We'll improve this in the future, but for now the easiest
|
||||
way to either reduce the RAM usage (at the risk of slowing things down)
|
||||
is to set the almost-undocumented ``SYNAPSE_CACHE_FACTOR`` environment
|
||||
variable. The default is 0.5, which can be decreased to reduce RAM usage
|
||||
in memory constrained environments, or increased if performance starts to
|
||||
degrade.
|
||||
|
||||
However, degraded performance due to a low cache factor, common on
|
||||
machines with slow disks, often leads to explosions in memory use due
|
||||
backlogged requests. In this case, reducing the cache factor will make
|
||||
things worse. Instead, try increasing it drastically. 2.0 is a good
|
||||
starting value.
|
||||
|
||||
Using [libjemalloc](https://jemalloc.net) can also yield a significant
|
||||
improvement in overall memory use, and especially in terms of giving back
|
||||
RAM to the OS. To use it, the library must simply be put in the
|
||||
LD_PRELOAD environment variable when launching Synapse. On Debian, this
|
||||
can be done by installing the `libjemalloc1` package and adding this
|
||||
line to `/etc/default/matrix-synapse`:
|
||||
|
||||
LD_PRELOAD=/usr/lib/x86_64-linux-gnu/libjemalloc.so.1
|
||||
|
||||
This made a significant difference on Python 2.7 - it's unclear how
|
||||
much of an improvement it provides on Python 3.x.
|
||||
|
||||
If you're encountering high CPU use by the Synapse process itself, you
|
||||
may be affected by a bug with presence tracking that leads to a
|
||||
massive excess of outgoing federation requests (see [discussion](https://github.com/matrix-org/synapse/issues/3971)). If metrics
|
||||
indicate that your server is also issuing far more outgoing federation
|
||||
requests than can be accounted for by your users' activity, this is a
|
||||
likely cause. The misbehavior can be worked around by disabling presence
|
||||
in the Synapse config file: [see here](../configuration/config_documentation.md#presence).
|
||||
|
||||
|
||||
Running out of File Handles
|
||||
---------------------------
|
||||
|
||||
If Synapse runs out of file handles, it typically fails badly - live-locking
|
||||
at 100% CPU, and/or failing to accept new TCP connections (blocking the
|
||||
connecting client). Matrix currently can legitimately use a lot of file handles,
|
||||
thanks to busy rooms like `#matrix:matrix.org` containing hundreds of participating
|
||||
servers. The first time a server talks in a room it will try to connect
|
||||
simultaneously to all participating servers, which could exhaust the available
|
||||
file descriptors between DNS queries & HTTPS sockets, especially if DNS is slow
|
||||
to respond. (We need to improve the routing algorithm used to be better than
|
||||
full mesh, but as of March 2019 this hasn't happened yet).
|
||||
|
||||
If you hit this failure mode, we recommend increasing the maximum number of
|
||||
open file handles to be at least 4096 (assuming a default of 1024 or 256).
|
||||
This is typically done by editing ``/etc/security/limits.conf``
|
||||
|
||||
Separately, Synapse may leak file handles if inbound HTTP requests get stuck
|
||||
during processing - e.g. blocked behind a lock or talking to a remote server etc.
|
||||
This is best diagnosed by matching up the 'Received request' and 'Processed request'
|
||||
log lines and looking for any 'Processed request' lines which take more than
|
||||
a few seconds to execute. Please let us know at [`#synapse:matrix.org`](https://matrix.to/#/#synapse-dev:matrix.org) if
|
||||
you see this failure mode so we can help debug it, however.
|
||||
|
||||
84
docs/usage/administration/monthly_active_users.md
Normal file
84
docs/usage/administration/monthly_active_users.md
Normal file
@@ -0,0 +1,84 @@
|
||||
# Monthly Active Users
|
||||
|
||||
Synapse can be configured to record the number of monthly active users (also referred to as MAU) on a given homeserver.
|
||||
For clarity's sake, MAU only tracks local users.
|
||||
|
||||
Please note that the metrics recorded by the [Homeserver Usage Stats](../../usage/administration/monitoring/reporting_homeserver_usage_statistics.md)
|
||||
are calculated differently. The `monthly_active_users` from the usage stats does not take into account any
|
||||
of the rules below, and counts any users who have made a request to the homeserver in the last 30 days.
|
||||
|
||||
See the [configuration manual](../../usage/configuration/config_documentation.md#limit_usage_by_mau) for details on how to configure MAU.
|
||||
|
||||
## Calculating active users
|
||||
|
||||
Individual user activity is measured in active days. If a user performs an action, the exact time of that action is then recorded. When
|
||||
calculating the MAU figure, any users with a recorded action in the last 30 days are considered part of the cohort. Days are measured
|
||||
as a rolling window from the current system time to 30 days ago.
|
||||
|
||||
So for example, if Synapse were to calculate the active users on the 15th July at 13:25, it would include any activity from 15th June 13:25 onwards.
|
||||
|
||||
A user is **never** considered active if they are either:
|
||||
- Part of the trial day cohort (described below)
|
||||
- Owned by an application service.
|
||||
- Note: This **only** covers users that are part of an application service `namespaces.users` registration. The namespace
|
||||
must also be marked as `exclusive`.
|
||||
|
||||
Otherwise, any request to Synapse will mark the user as active. Please note that registration will not mark a user as active *unless*
|
||||
they register with a 3pid that is included in the config field `mau_limits_reserved_threepids`.
|
||||
|
||||
The Prometheus metric for MAU is refreshed every 5 minutes.
|
||||
|
||||
Once an hour, Synapse checks to see if any users are inactive (with only activity timestamps later than 30 days). These users
|
||||
are removed from the active users cohort. If they then become active, they are immediately restored to the cohort.
|
||||
|
||||
It is important to note that **deactivated** users are not immediately removed from the pool of active users, but as these users won't
|
||||
perform actions they will eventually be removed from the cohort.
|
||||
|
||||
### Trial days
|
||||
|
||||
If the config option `mau_trial_days` is set, a user must have been active this many days **after** registration to be active. A user is in the
|
||||
trial period if their registration timestamp (also known as the `creation_ts`) is less than `mau_trial_days` old.
|
||||
|
||||
As an example, if `mau_trial_days` is set to `3` and a user is active **after** 3 days (72 hours from registration time) then they will be counted as active.
|
||||
|
||||
The `mau_appservice_trial_days` config further extends this rule by applying different durations depending on the `appservice_id` of the user.
|
||||
Users registered by an application service will be recorded with an `appservice_id` matching the `id` key in the registration file for that service.
|
||||
|
||||
|
||||
## Limiting usage of the homeserver when the maximum MAU is reached
|
||||
|
||||
If both config options `limit_usage_by_mau` and `max_mau_value` is set, and the current MAU value exceeds the maximum value, the
|
||||
homeserver will begin to block some actions.
|
||||
|
||||
Individual users matching **any** of the below criteria never have their actions blocked:
|
||||
- Considered part of the cohort of MAU users.
|
||||
- Considered part of the trial period.
|
||||
- Registered as a `support` user.
|
||||
- Application service users if `track_appservice_user_ips` is NOT set.
|
||||
|
||||
Please not that server admins are **not** exempt from blocking.
|
||||
|
||||
The following actions are blocked when the MAU limit is exceeded:
|
||||
- Logging in
|
||||
- Sending events
|
||||
- Creating rooms
|
||||
- Syncing
|
||||
|
||||
Registration is also blocked for all new signups *unless* the user is registering with a threepid included in the `mau_limits_reserved_threepids`
|
||||
config value.
|
||||
|
||||
When a request is blocked, the response will have the `errcode` `M_RESOURCE_LIMIT_EXCEEDED`.
|
||||
|
||||
## Metrics
|
||||
|
||||
Synapse records several different prometheus metrics for MAU.
|
||||
|
||||
`synapse_admin_mau:current` records the current MAU figure for native (non-application-service) users.
|
||||
|
||||
`synapse_admin_mau:max` records the maximum MAU as dictated by the `max_mau_value` config value.
|
||||
|
||||
`synapse_admin_mau_current_mau_by_service` records the current MAU including application service users. The label `app_service` can be used
|
||||
to filter by a specific service ID. This *also* includes non-application-service users under `app_service=native` .
|
||||
|
||||
`synapse_admin_mau:registered_reserved_users` records the number of users specified in `mau_limits_reserved_threepids` which have
|
||||
registered accounts on the homeserver.
|
||||
@@ -72,49 +72,6 @@ apply if you want your config file to be read properly. A few helpful things to
|
||||
In addition, each setting has an example of its usage, with the proper indentation
|
||||
shown.
|
||||
|
||||
## Contents
|
||||
[Modules](#modules)
|
||||
|
||||
[Server](#server)
|
||||
|
||||
[Homeserver Blocking](#homeserver-blocking)
|
||||
|
||||
[TLS](#tls)
|
||||
|
||||
[Federation](#federation)
|
||||
|
||||
[Caching](#caching)
|
||||
|
||||
[Database](#database)
|
||||
|
||||
[Logging](#logging)
|
||||
|
||||
[Ratelimiting](#ratelimiting)
|
||||
|
||||
[Media Store](#media-store)
|
||||
|
||||
[Captcha](#captcha)
|
||||
|
||||
[TURN](#turn)
|
||||
|
||||
[Registration](#registration)
|
||||
|
||||
[API Configuration](#api-configuration)
|
||||
|
||||
[Signing Keys](#signing-keys)
|
||||
|
||||
[Single Sign On Integration](#single-sign-on-integration)
|
||||
|
||||
[Push](#push)
|
||||
|
||||
[Rooms](#rooms)
|
||||
|
||||
[Opentracing](#opentracing)
|
||||
|
||||
[Workers](#workers)
|
||||
|
||||
[Background Updates](#background-updates)
|
||||
|
||||
## Modules
|
||||
|
||||
Server admins can expand Synapse's functionality with external modules.
|
||||
@@ -474,8 +431,6 @@ Sub-options for each listener include:
|
||||
|
||||
* `metrics`: (see the docs [here](../../metrics-howto.md)),
|
||||
|
||||
* `replication`: (see the docs [here](../../workers.md)).
|
||||
|
||||
* `tls`: set to true to enable TLS for this listener. Will use the TLS key/cert specified in tls_private_key_path / tls_certificate_path.
|
||||
|
||||
* `x_forwarded`: Only valid for an 'http' listener. Set to true to use the X-Forwarded-For header as the client IP. Useful when Synapse is
|
||||
@@ -486,7 +441,8 @@ Sub-options for each listener include:
|
||||
|
||||
* `names`: a list of names of HTTP resources. See below for a list of valid resource names.
|
||||
|
||||
* `compress`: set to true to enable HTTP compression for this resource.
|
||||
* `compress`: set to true to enable gzip compression on HTTP bodies for this resource. This is currently only supported with the
|
||||
`client`, `consent`, `metrics` and `federation` resources.
|
||||
|
||||
* `additional_resources`: Only valid for an 'http' listener. A map of
|
||||
additional endpoints which should be loaded via dynamic modules.
|
||||
@@ -637,6 +593,8 @@ server owner wants to limit to the number of monthly active users. When enabled
|
||||
reached the server returns a `ResourceLimitError` with error type `Codes.RESOURCE_LIMIT_EXCEEDED`.
|
||||
Defaults to false. If this is enabled, a value for `max_mau_value` must also be set.
|
||||
|
||||
See [Monthly Active Users](../administration/monthly_active_users.md) for details on how to configure MAU.
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
limit_usage_by_mau: true
|
||||
@@ -801,6 +759,10 @@ allowed_avatar_mimetypes: ["image/png", "image/jpeg", "image/gif"]
|
||||
How long to keep redacted events in unredacted form in the database. After
|
||||
this period redacted events get replaced with their redacted form in the DB.
|
||||
|
||||
Synapse will check whether the rentention period has concluded for redacted
|
||||
events every 5 minutes. Thus, even if this option is set to `0`, Synapse may
|
||||
still take up to 5 minutes to purge redacted events from the database.
|
||||
|
||||
Defaults to `7d`. Set to `null` to disable.
|
||||
|
||||
Example configuration:
|
||||
@@ -887,7 +849,11 @@ which are older than the room's maximum retention period. Synapse will also
|
||||
filter events received over federation so that events that should have been
|
||||
purged are ignored and not stored again.
|
||||
|
||||
The message retention policies feature is disabled by default.
|
||||
The message retention policies feature is disabled by default. Please be advised
|
||||
that enabling this feature carries some risk. There are known bugs with the implementation
|
||||
which can cause database corruption. Setting retention to delete older history
|
||||
is less risky than deleting newer history but in general caution is advised when enabling this
|
||||
experimental feature. You can read more about this feature [here](../../message_retention_policies.md).
|
||||
|
||||
This setting has the following sub-options:
|
||||
* `default_policy`: Default retention policy. If set, Synapse will apply it to rooms that lack the
|
||||
@@ -1098,26 +1064,26 @@ allow_device_name_lookup_over_federation: true
|
||||
---
|
||||
## Caching ##
|
||||
|
||||
Options related to caching
|
||||
Options related to caching.
|
||||
|
||||
---
|
||||
### `event_cache_size`
|
||||
|
||||
The number of events to cache in memory. Not affected by
|
||||
`caches.global_factor`. Defaults to 10K.
|
||||
`caches.global_factor` and is not part of the `caches` section. Defaults to 10K.
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
event_cache_size: 15K
|
||||
```
|
||||
---
|
||||
### `cache` and associated values
|
||||
### `caches` and associated values
|
||||
|
||||
A cache 'factor' is a multiplier that can be applied to each of
|
||||
Synapse's caches in order to increase or decrease the maximum
|
||||
number of entries that can be stored.
|
||||
|
||||
Caching can be configured through the following sub-options:
|
||||
`caches` can be configured through the following sub-options:
|
||||
|
||||
* `global_factor`: Controls the global cache factor, which is the default cache factor
|
||||
for all caches if a specific factor for that cache is not otherwise
|
||||
@@ -1179,6 +1145,7 @@ Caching can be configured through the following sub-options:
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
event_cache_size: 15K
|
||||
caches:
|
||||
global_factor: 1.0
|
||||
per_cache_factors:
|
||||
@@ -1858,7 +1825,7 @@ Example configuration:
|
||||
max_spider_size: 8M
|
||||
```
|
||||
---
|
||||
### `url_preview_language`
|
||||
### `url_preview_accept_language`
|
||||
|
||||
A list of values for the Accept-Language HTTP header used when
|
||||
downloading webpages during URL preview generation. This allows
|
||||
@@ -1906,8 +1873,8 @@ See [here](../../CAPTCHA_SETUP.md) for full details on setting up captcha.
|
||||
---
|
||||
### `recaptcha_public_key`
|
||||
|
||||
This homeserver's ReCAPTCHA public key. Must be specified if `enable_registration_captcha` is
|
||||
enabled.
|
||||
This homeserver's ReCAPTCHA public key. Must be specified if
|
||||
[`enable_registration_captcha`](#enable_registration_captcha) is enabled.
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
@@ -1916,7 +1883,8 @@ recaptcha_public_key: "YOUR_PUBLIC_KEY"
|
||||
---
|
||||
### `recaptcha_private_key`
|
||||
|
||||
This homeserver's ReCAPTCHA private key. Must be specified if `enable_registration_captcha` is
|
||||
This homeserver's ReCAPTCHA private key. Must be specified if
|
||||
[`enable_registration_captcha`](#enable_registration_captcha) is
|
||||
enabled.
|
||||
|
||||
Example configuration:
|
||||
@@ -1926,9 +1894,11 @@ recaptcha_private_key: "YOUR_PRIVATE_KEY"
|
||||
---
|
||||
### `enable_registration_captcha`
|
||||
|
||||
Set to true to enable ReCaptcha checks when registering, preventing signup
|
||||
unless a captcha is answered. Requires a valid ReCaptcha public/private key.
|
||||
Defaults to false.
|
||||
Set to `true` to require users to complete a CAPTCHA test when registering an account.
|
||||
Requires a valid ReCaptcha public/private key.
|
||||
Defaults to `false`.
|
||||
|
||||
Note that [`enable_registration`](#enable_registration) must also be set to allow account registration.
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
@@ -2004,9 +1974,21 @@ Registration can be rate-limited using the parameters in the [Ratelimiting](#rat
|
||||
---
|
||||
### `enable_registration`
|
||||
|
||||
Enable registration for new users. Defaults to false. It is highly recommended that if you enable registration,
|
||||
you use either captcha, email, or token-based verification to verify that new users are not bots. In order to enable registration
|
||||
without any verification, you must also set `enable_registration_without_verification` to true.
|
||||
Enable registration for new users. Defaults to `false`.
|
||||
|
||||
It is highly recommended that if you enable registration, you set one or more
|
||||
or the following options, to avoid abuse of your server by "bots":
|
||||
|
||||
* [`enable_registration_captcha`](#enable_registration_captcha)
|
||||
* [`registrations_require_3pid`](#registrations_require_3pid)
|
||||
* [`registration_requires_token`](#registration_requires_token)
|
||||
|
||||
(In order to enable registration without any verification, you must also set
|
||||
[`enable_registration_without_verification`](#enable_registration_without_verification).)
|
||||
|
||||
Note that even if this setting is disabled, new accounts can still be created
|
||||
via the admin API if
|
||||
[`registration_shared_secret`](#registration_shared_secret) is set.
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
@@ -2014,88 +1996,21 @@ enable_registration: true
|
||||
```
|
||||
---
|
||||
### `enable_registration_without_verification`
|
||||
|
||||
Enable registration without email or captcha verification. Note: this option is *not* recommended,
|
||||
as registration without verification is a known vector for spam and abuse. Defaults to false. Has no effect
|
||||
unless `enable_registration` is also enabled.
|
||||
as registration without verification is a known vector for spam and abuse. Defaults to `false`. Has no effect
|
||||
unless [`enable_registration`](#enable_registration) is also enabled.
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
enable_registration_without_verification: true
|
||||
```
|
||||
---
|
||||
### `session_lifetime`
|
||||
|
||||
Time that a user's session remains valid for, after they log in.
|
||||
|
||||
Note that this is not currently compatible with guest logins.
|
||||
|
||||
Note also that this is calculated at login time: changes are not applied retrospectively to users who have already
|
||||
logged in.
|
||||
|
||||
By default, this is infinite.
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
session_lifetime: 24h
|
||||
```
|
||||
----
|
||||
### `refresh_access_token_lifetime`
|
||||
|
||||
Time that an access token remains valid for, if the session is using refresh tokens.
|
||||
|
||||
For more information about refresh tokens, please see the [manual](user_authentication/refresh_tokens.md).
|
||||
|
||||
Note that this only applies to clients which advertise support for refresh tokens.
|
||||
|
||||
Note also that this is calculated at login time and refresh time: changes are not applied to
|
||||
existing sessions until they are refreshed.
|
||||
|
||||
By default, this is 5 minutes.
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
refreshable_access_token_lifetime: 10m
|
||||
```
|
||||
---
|
||||
### `refresh_token_lifetime: 24h`
|
||||
|
||||
Time that a refresh token remains valid for (provided that it is not
|
||||
exchanged for another one first).
|
||||
This option can be used to automatically log-out inactive sessions.
|
||||
Please see the manual for more information.
|
||||
|
||||
Note also that this is calculated at login time and refresh time:
|
||||
changes are not applied to existing sessions until they are refreshed.
|
||||
|
||||
By default, this is infinite.
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
refresh_token_lifetime: 24h
|
||||
```
|
||||
---
|
||||
### `nonrefreshable_access_token_lifetime`
|
||||
|
||||
Time that an access token remains valid for, if the session is NOT
|
||||
using refresh tokens.
|
||||
|
||||
Please note that not all clients support refresh tokens, so setting
|
||||
this to a short value may be inconvenient for some users who will
|
||||
then be logged out frequently.
|
||||
|
||||
Note also that this is calculated at login time: changes are not applied
|
||||
retrospectively to existing sessions for users that have already logged in.
|
||||
|
||||
By default, this is infinite.
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
nonrefreshable_access_token_lifetime: 24h
|
||||
```
|
||||
---
|
||||
### `registrations_require_3pid`
|
||||
|
||||
If this is set, the user must provide all of the specified types of 3PID when registering.
|
||||
If this is set, users must provide all of the specified types of 3PID when registering an account.
|
||||
|
||||
Note that [`enable_registration`](#enable_registration) must also be set to allow account registration.
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
@@ -2143,9 +2058,11 @@ enable_3pid_lookup: false
|
||||
|
||||
Require users to submit a token during registration.
|
||||
Tokens can be managed using the admin [API](../administration/admin_api/registration_tokens.md).
|
||||
Note that `enable_registration` must be set to true.
|
||||
Disabling this option will not delete any tokens previously generated.
|
||||
Defaults to false. Set to true to enable.
|
||||
Defaults to `false`. Set to `true` to enable.
|
||||
|
||||
|
||||
Note that [`enable_registration`](#enable_registration) must also be set to allow account registration.
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
@@ -2154,13 +2071,39 @@ registration_requires_token: true
|
||||
---
|
||||
### `registration_shared_secret`
|
||||
|
||||
If set, allows registration of standard or admin accounts by anyone who
|
||||
has the shared secret, even if registration is otherwise disabled.
|
||||
If set, allows registration of standard or admin accounts by anyone who has the
|
||||
shared secret, even if [`enable_registration`](#enable_registration) is not
|
||||
set.
|
||||
|
||||
This is primarily intended for use with the `register_new_matrix_user` script
|
||||
(see [Registering a user](../../setup/installation.md#registering-a-user));
|
||||
however, the interface is [documented](../admin_api/register_api.html).
|
||||
|
||||
See also [`registration_shared_secret_path`](#registration_shared_secret_path).
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
registration_shared_secret: <PRIVATE STRING>
|
||||
```
|
||||
|
||||
---
|
||||
### `registration_shared_secret_path`
|
||||
|
||||
An alternative to [`registration_shared_secret`](#registration_shared_secret):
|
||||
allows the shared secret to be specified in an external file.
|
||||
|
||||
The file should be a plain text file, containing only the shared secret.
|
||||
|
||||
If this file does not exist, Synapse will create a new signing
|
||||
key on startup and store it in this file.
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
registration_shared_secret_file: /path/to/secrets/file
|
||||
```
|
||||
|
||||
_Added in Synapse 1.67.0._
|
||||
|
||||
---
|
||||
### `bcrypt_rounds`
|
||||
|
||||
@@ -2215,7 +2158,10 @@ their account.
|
||||
by the Matrix Identity Service API
|
||||
[specification](https://matrix.org/docs/spec/identity_service/latest).)
|
||||
|
||||
*Updated in Synapse 1.64.0*: The `email` option is deprecated.
|
||||
*Deprecated in Synapse 1.64.0*: The `email` option is deprecated.
|
||||
|
||||
*Removed in Synapse 1.66.0*: The `email` option has been removed.
|
||||
If present, Synapse will report a configuration error on startup.
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
@@ -2388,6 +2334,79 @@ Example configuration:
|
||||
```yaml
|
||||
inhibit_user_in_use_error: true
|
||||
```
|
||||
---
|
||||
## User session management
|
||||
---
|
||||
### `session_lifetime`
|
||||
|
||||
Time that a user's session remains valid for, after they log in.
|
||||
|
||||
Note that this is not currently compatible with guest logins.
|
||||
|
||||
Note also that this is calculated at login time: changes are not applied retrospectively to users who have already
|
||||
logged in.
|
||||
|
||||
By default, this is infinite.
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
session_lifetime: 24h
|
||||
```
|
||||
----
|
||||
### `refresh_access_token_lifetime`
|
||||
|
||||
Time that an access token remains valid for, if the session is using refresh tokens.
|
||||
|
||||
For more information about refresh tokens, please see the [manual](user_authentication/refresh_tokens.md).
|
||||
|
||||
Note that this only applies to clients which advertise support for refresh tokens.
|
||||
|
||||
Note also that this is calculated at login time and refresh time: changes are not applied to
|
||||
existing sessions until they are refreshed.
|
||||
|
||||
By default, this is 5 minutes.
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
refreshable_access_token_lifetime: 10m
|
||||
```
|
||||
---
|
||||
### `refresh_token_lifetime: 24h`
|
||||
|
||||
Time that a refresh token remains valid for (provided that it is not
|
||||
exchanged for another one first).
|
||||
This option can be used to automatically log-out inactive sessions.
|
||||
Please see the manual for more information.
|
||||
|
||||
Note also that this is calculated at login time and refresh time:
|
||||
changes are not applied to existing sessions until they are refreshed.
|
||||
|
||||
By default, this is infinite.
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
refresh_token_lifetime: 24h
|
||||
```
|
||||
---
|
||||
### `nonrefreshable_access_token_lifetime`
|
||||
|
||||
Time that an access token remains valid for, if the session is NOT
|
||||
using refresh tokens.
|
||||
|
||||
Please note that not all clients support refresh tokens, so setting
|
||||
this to a short value may be inconvenient for some users who will
|
||||
then be logged out frequently.
|
||||
|
||||
Note also that this is calculated at login time: changes are not applied
|
||||
retrospectively to existing sessions for users that have already logged in.
|
||||
|
||||
By default, this is infinite.
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
nonrefreshable_access_token_lifetime: 24h
|
||||
```
|
||||
|
||||
---
|
||||
## Metrics ###
|
||||
Config options related to metrics.
|
||||
@@ -2465,7 +2484,7 @@ report_stats_endpoint: https://example.com/report-usage-stats/push
|
||||
Config settings related to the client/server API
|
||||
|
||||
---
|
||||
### `room_prejoin_state:`
|
||||
### `room_prejoin_state`
|
||||
|
||||
Controls for the state that is shared with users who receive an invite
|
||||
to a room. By default, the following state event types are shared with users who
|
||||
@@ -2537,9 +2556,13 @@ track_appservice_user_ips: true
|
||||
---
|
||||
### `macaroon_secret_key`
|
||||
|
||||
A secret which is used to sign access tokens. If none is specified,
|
||||
the `registration_shared_secret` is used, if one is given; otherwise,
|
||||
a secret key is derived from the signing key.
|
||||
A secret which is used to sign
|
||||
- access token for guest users,
|
||||
- short-term login token used during SSO logins (OIDC or SAML2) and
|
||||
- token used for unsubscribing from email notifications.
|
||||
|
||||
If none is specified, the `registration_shared_secret` is used, if one is given;
|
||||
otherwise, a secret key is derived from the signing key.
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
@@ -2563,7 +2586,10 @@ Config options relating to signing keys
|
||||
---
|
||||
### `signing_key_path`
|
||||
|
||||
Path to the signing key to sign messages with.
|
||||
Path to the signing key to sign events and federation requests with.
|
||||
|
||||
*New in Synapse 1.67*: If this file does not exist, Synapse will create a new signing
|
||||
key on startup and store it in this file.
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
@@ -2598,7 +2624,7 @@ Example configuration:
|
||||
key_refresh_interval: 2d
|
||||
```
|
||||
---
|
||||
### `trusted_key_servers:`
|
||||
### `trusted_key_servers`
|
||||
|
||||
The trusted servers to download signing keys from.
|
||||
|
||||
@@ -2668,13 +2694,10 @@ key_server_signing_keys_path: "key_server_signing_keys.key"
|
||||
The following settings can be used to make Synapse use a single sign-on
|
||||
provider for authentication, instead of its internal password database.
|
||||
|
||||
You will probably also want to set the following options to false to
|
||||
You will probably also want to set the following options to `false` to
|
||||
disable the regular login/registration flows:
|
||||
* `enable_registration`
|
||||
* `password_config.enabled`
|
||||
|
||||
You will also want to investigate the settings under the "sso" configuration
|
||||
section below.
|
||||
* [`enable_registration`](#enable_registration)
|
||||
* [`password_config.enabled`](#password_config)
|
||||
|
||||
---
|
||||
### `saml2_config`
|
||||
@@ -3381,7 +3404,7 @@ user_directory:
|
||||
For detailed instructions on user consent configuration, see [here](../../consent_tracking.md).
|
||||
|
||||
Parts of this section are required if enabling the `consent` resource under
|
||||
`listeners`, in particular `template_dir` and `version`. # TODO: link `listeners`
|
||||
[`listeners`](#listeners), in particular `template_dir` and `version`.
|
||||
|
||||
* `template_dir`: gives the location of the templates for the HTML forms.
|
||||
This directory should contain one subdirectory per language (eg, `en`, `fr`),
|
||||
@@ -3393,7 +3416,7 @@ Parts of this section are required if enabling the `consent` resource under
|
||||
parameter.
|
||||
|
||||
* `server_notice_content`: if enabled, will send a user a "Server Notice"
|
||||
asking them to consent to the privacy policy. The `server_notices` section ##TODO: link
|
||||
asking them to consent to the privacy policy. The [`server_notices` section](#server_notices)
|
||||
must also be configured for this to work. Notices will *not* be sent to
|
||||
guest users unless `send_server_notice_to_guests` is set to true.
|
||||
|
||||
|
||||
@@ -24,6 +24,11 @@ Finally, we also stylise the chapter titles in the left sidebar by indenting the
|
||||
slightly so that they are more visually distinguishable from the section headers
|
||||
(the bold titles). This is done through the `indent-section-headers.css` file.
|
||||
|
||||
In addition to these modifications, we have added a version picker to the documentation.
|
||||
Users can switch between documentations for different versions of Synapse.
|
||||
This functionality was implemented through the `version-picker.js` and
|
||||
`version-picker.css` files.
|
||||
|
||||
More information can be found in mdbook's official documentation for
|
||||
[injecting page JS/CSS](https://rust-lang.github.io/mdBook/format/config.html)
|
||||
and
|
||||
|
||||
@@ -131,6 +131,18 @@
|
||||
<i class="fa fa-search"></i>
|
||||
</button>
|
||||
{{/if}}
|
||||
<div class="version-picker">
|
||||
<div class="dropdown">
|
||||
<div class="select">
|
||||
<span></span>
|
||||
<i class="fa fa-chevron-down"></i>
|
||||
</div>
|
||||
<input type="hidden" name="version">
|
||||
<ul class="dropdown-menu">
|
||||
<!-- Versions will be added dynamically in version-picker.js -->
|
||||
</ul>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<h1 class="menu-title">{{ book_title }}</h1>
|
||||
@@ -309,4 +321,4 @@
|
||||
{{/if}}
|
||||
|
||||
</body>
|
||||
</html>
|
||||
</html>
|
||||
|
||||
78
docs/website_files/version-picker.css
Normal file
78
docs/website_files/version-picker.css
Normal file
@@ -0,0 +1,78 @@
|
||||
.version-picker {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
}
|
||||
|
||||
.version-picker .dropdown {
|
||||
width: 130px;
|
||||
max-height: 29px;
|
||||
margin-left: 10px;
|
||||
display: inline-block;
|
||||
border-radius: 4px;
|
||||
border: 1px solid var(--theme-popup-border);
|
||||
position: relative;
|
||||
font-size: 13px;
|
||||
color: var(--fg);
|
||||
height: 100%;
|
||||
text-align: left;
|
||||
}
|
||||
.version-picker .dropdown .select {
|
||||
cursor: pointer;
|
||||
display: block;
|
||||
padding: 5px 2px 5px 15px;
|
||||
}
|
||||
.version-picker .dropdown .select > i {
|
||||
font-size: 10px;
|
||||
color: var(--fg);
|
||||
cursor: pointer;
|
||||
float: right;
|
||||
line-height: 20px !important;
|
||||
}
|
||||
.version-picker .dropdown:hover {
|
||||
border: 1px solid var(--theme-popup-border);
|
||||
}
|
||||
.version-picker .dropdown:active {
|
||||
background-color: var(--theme-popup-bg);
|
||||
}
|
||||
.version-picker .dropdown.active:hover,
|
||||
.version-picker .dropdown.active {
|
||||
border: 1px solid var(--theme-popup-border);
|
||||
border-radius: 2px 2px 0 0;
|
||||
background-color: var(--theme-popup-bg);
|
||||
}
|
||||
.version-picker .dropdown.active .select > i {
|
||||
transform: rotate(-180deg);
|
||||
}
|
||||
.version-picker .dropdown .dropdown-menu {
|
||||
position: absolute;
|
||||
background-color: var(--theme-popup-bg);
|
||||
width: 100%;
|
||||
left: -1px;
|
||||
right: 1px;
|
||||
margin-top: 1px;
|
||||
border: 1px solid var(--theme-popup-border);
|
||||
border-radius: 0 0 4px 4px;
|
||||
overflow: hidden;
|
||||
display: none;
|
||||
max-height: 300px;
|
||||
overflow-y: auto;
|
||||
z-index: 9;
|
||||
}
|
||||
.version-picker .dropdown .dropdown-menu li {
|
||||
font-size: 12px;
|
||||
padding: 6px 20px;
|
||||
cursor: pointer;
|
||||
}
|
||||
.version-picker .dropdown .dropdown-menu {
|
||||
padding: 0;
|
||||
list-style: none;
|
||||
}
|
||||
.version-picker .dropdown .dropdown-menu li:hover {
|
||||
background-color: var(--theme-hover);
|
||||
}
|
||||
.version-picker .dropdown .dropdown-menu li.active::before {
|
||||
display: inline-block;
|
||||
content: "✓";
|
||||
margin-inline-start: -14px;
|
||||
width: 14px;
|
||||
}
|
||||
127
docs/website_files/version-picker.js
Normal file
127
docs/website_files/version-picker.js
Normal file
@@ -0,0 +1,127 @@
|
||||
|
||||
const dropdown = document.querySelector('.version-picker .dropdown');
|
||||
const dropdownMenu = dropdown.querySelector('.dropdown-menu');
|
||||
|
||||
fetchVersions(dropdown, dropdownMenu).then(() => {
|
||||
initializeVersionDropdown(dropdown, dropdownMenu);
|
||||
});
|
||||
|
||||
/**
|
||||
* Initialize the dropdown functionality for version selection.
|
||||
*
|
||||
* @param {Element} dropdown - The dropdown element.
|
||||
* @param {Element} dropdownMenu - The dropdown menu element.
|
||||
*/
|
||||
function initializeVersionDropdown(dropdown, dropdownMenu) {
|
||||
// Toggle the dropdown menu on click
|
||||
dropdown.addEventListener('click', function () {
|
||||
this.setAttribute('tabindex', 1);
|
||||
this.classList.toggle('active');
|
||||
dropdownMenu.style.display = (dropdownMenu.style.display === 'block') ? 'none' : 'block';
|
||||
});
|
||||
|
||||
// Remove the 'active' class and hide the dropdown menu on focusout
|
||||
dropdown.addEventListener('focusout', function () {
|
||||
this.classList.remove('active');
|
||||
dropdownMenu.style.display = 'none';
|
||||
});
|
||||
|
||||
// Handle item selection within the dropdown menu
|
||||
const dropdownMenuItems = dropdownMenu.querySelectorAll('li');
|
||||
dropdownMenuItems.forEach(function (item) {
|
||||
item.addEventListener('click', function () {
|
||||
dropdownMenuItems.forEach(function (item) {
|
||||
item.classList.remove('active');
|
||||
});
|
||||
this.classList.add('active');
|
||||
dropdown.querySelector('span').textContent = this.textContent;
|
||||
dropdown.querySelector('input').value = this.getAttribute('id');
|
||||
|
||||
window.location.href = changeVersion(window.location.href, this.textContent);
|
||||
});
|
||||
});
|
||||
};
|
||||
|
||||
/**
|
||||
* This function fetches the available versions from a GitHub repository
|
||||
* and inserts them into the version picker.
|
||||
*
|
||||
* @param {Element} dropdown - The dropdown element.
|
||||
* @param {Element} dropdownMenu - The dropdown menu element.
|
||||
* @returns {Promise<Array<string>>} A promise that resolves with an array of available versions.
|
||||
*/
|
||||
function fetchVersions(dropdown, dropdownMenu) {
|
||||
return new Promise((resolve, reject) => {
|
||||
window.addEventListener("load", () => {
|
||||
|
||||
fetch("https://api.github.com/repos/matrix-org/synapse/git/trees/gh-pages", {
|
||||
cache: "force-cache",
|
||||
}).then(res =>
|
||||
res.json()
|
||||
).then(resObject => {
|
||||
const excluded = ['dev-docs', 'v1.91.0', 'v1.80.0', 'v1.69.0'];
|
||||
const tree = resObject.tree.filter(item => item.type === "tree" && !excluded.includes(item.path));
|
||||
const versions = tree.map(item => item.path).sort(sortVersions);
|
||||
|
||||
// Create a list of <li> items for versions
|
||||
versions.forEach((version) => {
|
||||
const li = document.createElement("li");
|
||||
li.textContent = version;
|
||||
li.id = version;
|
||||
|
||||
if (window.SYNAPSE_VERSION === version) {
|
||||
li.classList.add('active');
|
||||
dropdown.querySelector('span').textContent = version;
|
||||
dropdown.querySelector('input').value = version;
|
||||
}
|
||||
|
||||
dropdownMenu.appendChild(li);
|
||||
});
|
||||
|
||||
resolve(versions);
|
||||
|
||||
}).catch(ex => {
|
||||
console.error("Failed to fetch version data", ex);
|
||||
reject(ex);
|
||||
})
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Custom sorting function to sort an array of version strings.
|
||||
*
|
||||
* @param {string} a - The first version string to compare.
|
||||
* @param {string} b - The second version string to compare.
|
||||
* @returns {number} - A negative number if a should come before b, a positive number if b should come before a, or 0 if they are equal.
|
||||
*/
|
||||
function sortVersions(a, b) {
|
||||
// Put 'develop' and 'latest' at the top
|
||||
if (a === 'develop' || a === 'latest') return -1;
|
||||
if (b === 'develop' || b === 'latest') return 1;
|
||||
|
||||
const versionA = (a.match(/v\d+(\.\d+)+/) || [])[0];
|
||||
const versionB = (b.match(/v\d+(\.\d+)+/) || [])[0];
|
||||
|
||||
return versionB.localeCompare(versionA);
|
||||
}
|
||||
|
||||
/**
|
||||
* Change the version in a URL path.
|
||||
*
|
||||
* @param {string} url - The original URL to be modified.
|
||||
* @param {string} newVersion - The new version to replace the existing version in the URL.
|
||||
* @returns {string} The updated URL with the new version.
|
||||
*/
|
||||
function changeVersion(url, newVersion) {
|
||||
const parsedURL = new URL(url);
|
||||
const pathSegments = parsedURL.pathname.split('/');
|
||||
|
||||
// Modify the version
|
||||
pathSegments[2] = newVersion;
|
||||
|
||||
// Reconstruct the URL
|
||||
parsedURL.pathname = pathSegments.join('/');
|
||||
|
||||
return parsedURL.href;
|
||||
}
|
||||
1
docs/website_files/version.js
Normal file
1
docs/website_files/version.js
Normal file
@@ -0,0 +1 @@
|
||||
window.SYNAPSE_VERSION = 'v1.67';
|
||||
@@ -32,13 +32,8 @@ stream between all configured Synapse processes. Additionally, processes may
|
||||
make HTTP requests to each other, primarily for operations which need to wait
|
||||
for a reply ─ such as sending an event.
|
||||
|
||||
Redis support was added in v1.13.0 with it becoming the recommended method in
|
||||
v1.18.0. It replaced the old direct TCP connections (which is deprecated as of
|
||||
v1.18.0) to the main process. With Redis, rather than all the workers connecting
|
||||
to the main process, all the workers and the main process connect to Redis,
|
||||
which relays replication commands between processes. This can give a significant
|
||||
cpu saving on the main process and will be a prerequisite for upcoming
|
||||
performance improvements.
|
||||
All the workers and the main process connect to Redis, which relays replication
|
||||
commands between processes.
|
||||
|
||||
If Redis support is enabled Synapse will use it as a shared cache, as well as a
|
||||
pub/sub mechanism.
|
||||
@@ -117,23 +112,26 @@ redis:
|
||||
enabled: true
|
||||
```
|
||||
|
||||
See the sample config for the full documentation of each option.
|
||||
See the [configuration manual](usage/configuration/config_documentation.html) for the full documentation of each option.
|
||||
|
||||
Under **no circumstances** should the replication listener be exposed to the
|
||||
public internet; it has no authentication and is unencrypted.
|
||||
public internet; replication traffic is:
|
||||
|
||||
* always unencrypted
|
||||
* unauthenticated, unless `worker_replication_secret` is configured
|
||||
|
||||
|
||||
### Worker configuration
|
||||
|
||||
In the config file for each worker, you must specify the type of worker
|
||||
application (`worker_app`), and you should specify a unique name for the worker
|
||||
(`worker_name`). The currently available worker applications are listed below.
|
||||
You must also specify the HTTP replication endpoint that it should talk to on
|
||||
the main synapse process. `worker_replication_host` should specify the host of
|
||||
the main synapse and `worker_replication_http_port` should point to the HTTP
|
||||
replication port. If the worker will handle HTTP requests then the
|
||||
`worker_listeners` option should be set with a `http` listener, in the same way
|
||||
as the `listeners` option in the shared config.
|
||||
In the config file for each worker, you must specify:
|
||||
* The type of worker (`worker_app`). The currently available worker applications are listed below.
|
||||
* A unique name for the worker (`worker_name`).
|
||||
* The HTTP replication endpoint that it should talk to on the main synapse process
|
||||
(`worker_replication_host` and `worker_replication_http_port`)
|
||||
* If handling HTTP requests, a `worker_listeners` option with an `http`
|
||||
listener, in the same way as the `listeners` option in the shared config.
|
||||
* If handling the `^/_matrix/client/v3/keys/upload` endpoint, the HTTP URI for
|
||||
the main process (`worker_main_http_uri`).
|
||||
|
||||
For example:
|
||||
|
||||
@@ -217,10 +215,12 @@ information.
|
||||
^/_matrix/client/(api/v1|r0|v3|unstable)/search$
|
||||
|
||||
# Encryption requests
|
||||
# Note that ^/_matrix/client/(r0|v3|unstable)/keys/upload/ requires `worker_main_http_uri`
|
||||
^/_matrix/client/(r0|v3|unstable)/keys/query$
|
||||
^/_matrix/client/(r0|v3|unstable)/keys/changes$
|
||||
^/_matrix/client/(r0|v3|unstable)/keys/claim$
|
||||
^/_matrix/client/(r0|v3|unstable)/room_keys/
|
||||
^/_matrix/client/(r0|v3|unstable)/keys/upload/
|
||||
|
||||
# Registration/login requests
|
||||
^/_matrix/client/(api/v1|r0|v3|unstable)/login$
|
||||
@@ -325,7 +325,6 @@ effects of bursts of events from that bridge on events sent by normal users.
|
||||
|
||||
Additionally, the writing of specific streams (such as events) can be moved off
|
||||
of the main process to a particular worker.
|
||||
(This is only supported with Redis-based replication.)
|
||||
|
||||
To enable this, the worker must have a HTTP replication listener configured,
|
||||
have a `worker_name` and be listed in the `instance_map` config. The same worker
|
||||
@@ -581,52 +580,23 @@ handle it, and are online.
|
||||
If `update_user_directory` is set to `false`, and this worker is not running,
|
||||
the above endpoint may give outdated results.
|
||||
|
||||
### `synapse.app.frontend_proxy`
|
||||
|
||||
Proxies some frequently-requested client endpoints to add caching and remove
|
||||
load from the main synapse. It can handle REST endpoints matching the following
|
||||
regular expressions:
|
||||
|
||||
^/_matrix/client/(r0|v3|unstable)/keys/upload
|
||||
|
||||
If `use_presence` is False in the homeserver config, it can also handle REST
|
||||
endpoints matching the following regular expressions:
|
||||
|
||||
^/_matrix/client/(api/v1|r0|v3|unstable)/presence/[^/]+/status
|
||||
|
||||
This "stub" presence handler will pass through `GET` request but make the
|
||||
`PUT` effectively a no-op.
|
||||
|
||||
It will proxy any requests it cannot handle to the main synapse instance. It
|
||||
must therefore be configured with the location of the main instance, via
|
||||
the `worker_main_http_uri` setting in the `frontend_proxy` worker configuration
|
||||
file. For example:
|
||||
|
||||
```yaml
|
||||
worker_main_http_uri: http://127.0.0.1:8008
|
||||
```
|
||||
|
||||
### Historical apps
|
||||
|
||||
*Note:* Historically there used to be more apps, however they have been
|
||||
amalgamated into a single `synapse.app.generic_worker` app. The remaining apps
|
||||
are ones that do specific processing unrelated to requests, e.g. the `pusher`
|
||||
that handles sending out push notifications for new events. The intention is for
|
||||
all these to be folded into the `generic_worker` app and to use config to define
|
||||
which processes handle the various proccessing such as push notifications.
|
||||
The following used to be separate worker application types, but are now
|
||||
equivalent to `synapse.app.generic_worker`:
|
||||
|
||||
* `synapse.app.client_reader`
|
||||
* `synapse.app.event_creator`
|
||||
* `synapse.app.federation_reader`
|
||||
* `synapse.app.frontend_proxy`
|
||||
* `synapse.app.synchrotron`
|
||||
|
||||
|
||||
## Migration from old config
|
||||
|
||||
There are two main independent changes that have been made: introducing Redis
|
||||
support and merging apps into `synapse.app.generic_worker`. Both these changes
|
||||
are backwards compatible and so no changes to the config are required, however
|
||||
server admins are encouraged to plan to migrate to Redis as the old style direct
|
||||
TCP replication config is deprecated.
|
||||
|
||||
To migrate to Redis add the `redis` config as above, and optionally remove the
|
||||
TCP `replication` listener from master and `worker_replication_port` from worker
|
||||
config.
|
||||
A main change that has occurred is the merging of worker apps into
|
||||
`synapse.app.generic_worker`. This change is backwards compatible and so no
|
||||
changes to the config are required.
|
||||
|
||||
To migrate apps to use `synapse.app.generic_worker` simply update the
|
||||
`worker_app` option in the worker configs, and where worker are started (e.g.
|
||||
|
||||
2
mypy.ini
2
mypy.ini
@@ -1,6 +1,6 @@
|
||||
[mypy]
|
||||
namespace_packages = True
|
||||
plugins = mypy_zope:plugin, scripts-dev/mypy_synapse_plugin.py
|
||||
plugins = pydantic.mypy, mypy_zope:plugin, scripts-dev/mypy_synapse_plugin.py
|
||||
follow_imports = normal
|
||||
check_untyped_defs = True
|
||||
show_error_codes = True
|
||||
|
||||
202
poetry.lock
generated
202
poetry.lock
generated
@@ -7,10 +7,10 @@ optional = false
|
||||
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*"
|
||||
|
||||
[package.extras]
|
||||
dev = ["coverage[toml] (>=5.0.2)", "hypothesis", "pympler", "pytest (>=4.3.0)", "six", "mypy", "pytest-mypy-plugins", "zope.interface", "furo", "sphinx", "sphinx-notfound-page", "pre-commit", "cloudpickle"]
|
||||
docs = ["furo", "sphinx", "zope.interface", "sphinx-notfound-page"]
|
||||
tests = ["coverage[toml] (>=5.0.2)", "hypothesis", "pympler", "pytest (>=4.3.0)", "six", "mypy", "pytest-mypy-plugins", "zope.interface", "cloudpickle"]
|
||||
tests_no_zope = ["coverage[toml] (>=5.0.2)", "hypothesis", "pympler", "pytest (>=4.3.0)", "six", "mypy", "pytest-mypy-plugins", "cloudpickle"]
|
||||
dev = ["cloudpickle", "coverage[toml] (>=5.0.2)", "furo", "hypothesis", "mypy", "pre-commit", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "six", "sphinx", "sphinx-notfound-page", "zope.interface"]
|
||||
docs = ["furo", "sphinx", "sphinx-notfound-page", "zope.interface"]
|
||||
tests = ["cloudpickle", "coverage[toml] (>=5.0.2)", "hypothesis", "mypy", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "six", "zope.interface"]
|
||||
tests_no_zope = ["cloudpickle", "coverage[toml] (>=5.0.2)", "hypothesis", "mypy", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "six"]
|
||||
|
||||
[[package]]
|
||||
name = "authlib"
|
||||
@@ -39,7 +39,7 @@ attrs = ">=19.2.0"
|
||||
six = "*"
|
||||
|
||||
[package.extras]
|
||||
visualize = ["graphviz (>0.5.1)", "Twisted (>=16.1.1)"]
|
||||
visualize = ["Twisted (>=16.1.1)", "graphviz (>0.5.1)"]
|
||||
|
||||
[[package]]
|
||||
name = "bcrypt"
|
||||
@@ -199,12 +199,12 @@ python-versions = ">=3.6"
|
||||
cffi = ">=1.12"
|
||||
|
||||
[package.extras]
|
||||
docs = ["sphinx (>=1.6.5,!=1.8.0,!=3.1.0,!=3.1.1)", "sphinx-rtd-theme"]
|
||||
docstest = ["pyenchant (>=1.6.11)", "twine (>=1.12.0)", "sphinxcontrib-spelling (>=4.0.1)"]
|
||||
docs = ["sphinx (>=1.6.5,!=1.8.0,!=3.1.0,!=3.1.1)", "sphinx_rtd_theme"]
|
||||
docstest = ["pyenchant (>=1.6.11)", "sphinxcontrib-spelling (>=4.0.1)", "twine (>=1.12.0)"]
|
||||
pep8test = ["black", "flake8", "flake8-import-order", "pep8-naming"]
|
||||
sdist = ["setuptools_rust (>=0.11.4)"]
|
||||
ssh = ["bcrypt (>=3.1.5)"]
|
||||
test = ["pytest (>=6.2.0)", "pytest-cov", "pytest-subtests", "pytest-xdist", "pretend", "iso8601", "pytz", "hypothesis (>=1.11.4,!=3.79.2)"]
|
||||
test = ["hypothesis (>=1.11.4,!=3.79.2)", "iso8601", "pretend", "pytest (>=6.2.0)", "pytest-cov", "pytest-subtests", "pytest-xdist", "pytz"]
|
||||
|
||||
[[package]]
|
||||
name = "defusedxml"
|
||||
@@ -226,7 +226,7 @@ python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*"
|
||||
wrapt = ">=1.10,<2"
|
||||
|
||||
[package.extras]
|
||||
dev = ["tox", "bump2version (<1)", "sphinx (<2)", "importlib-metadata (<3)", "importlib-resources (<4)", "configparser (<5)", "sphinxcontrib-websupport (<2)", "zipp (<2)", "PyTest (<5)", "PyTest-Cov (<2.6)", "pytest", "pytest-cov"]
|
||||
dev = ["PyTest", "PyTest (<5)", "PyTest-Cov", "PyTest-Cov (<2.6)", "bump2version (<1)", "configparser (<5)", "importlib-metadata (<3)", "importlib-resources (<4)", "sphinx (<2)", "sphinxcontrib-websupport (<2)", "tox", "zipp (<2)"]
|
||||
|
||||
[[package]]
|
||||
name = "docutils"
|
||||
@@ -245,7 +245,7 @@ optional = true
|
||||
python-versions = ">=3.7"
|
||||
|
||||
[package.extras]
|
||||
dev = ["tox", "coverage", "lxml", "xmlschema (>=1.8.0)", "sphinx", "memory-profiler", "flake8", "mypy (==0.910)"]
|
||||
dev = ["Sphinx", "coverage", "flake8", "lxml", "memory-profiler", "mypy (==0.910)", "tox", "xmlschema (>=1.8.0)"]
|
||||
|
||||
[[package]]
|
||||
name = "flake8"
|
||||
@@ -274,7 +274,7 @@ attrs = ">=19.2.0"
|
||||
flake8 = ">=3.0.0"
|
||||
|
||||
[package.extras]
|
||||
dev = ["coverage", "black", "hypothesis", "hypothesmith"]
|
||||
dev = ["black", "coverage", "hypothesis", "hypothesmith"]
|
||||
|
||||
[[package]]
|
||||
name = "flake8-comprehensions"
|
||||
@@ -367,8 +367,8 @@ typing-extensions = {version = ">=3.6.4", markers = "python_version < \"3.8\""}
|
||||
zipp = ">=0.5"
|
||||
|
||||
[package.extras]
|
||||
docs = ["sphinx", "jaraco.packaging (>=8.2)", "rst.linker (>=1.9)"]
|
||||
testing = ["pytest (>=4.6)", "pytest-checkdocs (>=2.4)", "pytest-flake8", "pytest-cov", "pytest-enabler (>=1.0.1)", "packaging", "pep517", "pyfakefs", "flufl.flake8", "pytest-black (>=0.3.7)", "pytest-mypy", "importlib-resources (>=1.3)"]
|
||||
docs = ["jaraco.packaging (>=8.2)", "rst.linker (>=1.9)", "sphinx"]
|
||||
testing = ["flufl.flake8", "importlib-resources (>=1.3)", "packaging", "pep517", "pyfakefs", "pytest (>=4.6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.0.1)", "pytest-flake8", "pytest-mypy"]
|
||||
|
||||
[[package]]
|
||||
name = "importlib-resources"
|
||||
@@ -382,8 +382,8 @@ python-versions = ">=3.6"
|
||||
zipp = {version = ">=3.1.0", markers = "python_version < \"3.10\""}
|
||||
|
||||
[package.extras]
|
||||
docs = ["sphinx", "jaraco.packaging (>=8.2)", "rst.linker (>=1.9)"]
|
||||
testing = ["pytest (>=6)", "pytest-checkdocs (>=2.4)", "pytest-flake8", "pytest-cov", "pytest-enabler (>=1.0.1)", "pytest-black (>=0.3.7)", "pytest-mypy"]
|
||||
docs = ["jaraco.packaging (>=8.2)", "rst.linker (>=1.9)", "sphinx"]
|
||||
testing = ["pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.0.1)", "pytest-flake8", "pytest-mypy"]
|
||||
|
||||
[[package]]
|
||||
name = "incremental"
|
||||
@@ -405,9 +405,9 @@ optional = false
|
||||
python-versions = ">=3.6,<4.0"
|
||||
|
||||
[package.extras]
|
||||
pipfile_deprecated_finder = ["pipreqs", "requirementslib"]
|
||||
requirements_deprecated_finder = ["pipreqs", "pip-api"]
|
||||
colors = ["colorama (>=0.4.3,<0.5.0)"]
|
||||
pipfile_deprecated_finder = ["pipreqs", "requirementslib"]
|
||||
requirements_deprecated_finder = ["pip-api", "pipreqs"]
|
||||
|
||||
[[package]]
|
||||
name = "jaeger-client"
|
||||
@@ -424,7 +424,7 @@ thrift = "*"
|
||||
tornado = ">=4.3"
|
||||
|
||||
[package.extras]
|
||||
tests = ["mock", "pycurl", "pytest", "pytest-cov", "coverage", "pytest-timeout", "pytest-tornado", "pytest-benchmark", "pytest-localserver", "flake8", "flake8-quotes", "flake8-typing-imports", "codecov", "tchannel (==2.1.0)", "opentracing_instrumentation (>=3,<4)", "prometheus_client (==0.11.0)", "mypy"]
|
||||
tests = ["codecov", "coverage", "flake8", "flake8-quotes", "flake8-typing-imports", "mock", "mypy", "opentracing_instrumentation (>=3,<4)", "prometheus_client (==0.11.0)", "pycurl", "pytest", "pytest-benchmark[histogram]", "pytest-cov", "pytest-localserver", "pytest-timeout", "pytest-tornado", "tchannel (==2.1.0)"]
|
||||
|
||||
[[package]]
|
||||
name = "jeepney"
|
||||
@@ -435,8 +435,8 @@ optional = false
|
||||
python-versions = ">=3.6"
|
||||
|
||||
[package.extras]
|
||||
test = ["pytest", "pytest-trio", "pytest-asyncio", "testpath", "trio", "async-timeout"]
|
||||
trio = ["trio", "async-generator"]
|
||||
test = ["async-timeout", "pytest", "pytest-asyncio", "pytest-trio", "testpath", "trio"]
|
||||
trio = ["async_generator", "trio"]
|
||||
|
||||
[[package]]
|
||||
name = "jinja2"
|
||||
@@ -486,8 +486,8 @@ pywin32-ctypes = {version = "<0.1.0 || >0.1.0,<0.1.1 || >0.1.1", markers = "sys_
|
||||
SecretStorage = {version = ">=3.2", markers = "sys_platform == \"linux\""}
|
||||
|
||||
[package.extras]
|
||||
docs = ["sphinx", "jaraco.packaging (>=8.2)", "rst.linker (>=1.9)", "jaraco.tidelift (>=1.4)"]
|
||||
testing = ["pytest (>=6)", "pytest-checkdocs (>=2.4)", "pytest-flake8", "pytest-cov", "pytest-enabler (>=1.0.1)", "pytest-black (>=0.3.7)", "pytest-mypy"]
|
||||
docs = ["jaraco.packaging (>=8.2)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx"]
|
||||
testing = ["pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.0.1)", "pytest-flake8", "pytest-mypy"]
|
||||
|
||||
[[package]]
|
||||
name = "ldap3"
|
||||
@@ -511,7 +511,7 @@ python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, != 3.4.*"
|
||||
[package.extras]
|
||||
cssselect = ["cssselect (>=0.7)"]
|
||||
html5 = ["html5lib"]
|
||||
htmlsoup = ["beautifulsoup4"]
|
||||
htmlsoup = ["BeautifulSoup4"]
|
||||
source = ["Cython (>=0.29.7)"]
|
||||
|
||||
[[package]]
|
||||
@@ -535,12 +535,12 @@ attrs = "*"
|
||||
importlib-metadata = {version = ">=1.4", markers = "python_version < \"3.8\""}
|
||||
|
||||
[package.extras]
|
||||
dev = ["tox", "twisted", "aiounittest", "mypy (==0.910)", "black (==22.3.0)", "flake8 (==4.0.1)", "isort (==5.9.3)", "build (==0.8.0)", "twine (==4.0.1)"]
|
||||
test = ["tox", "twisted", "aiounittest"]
|
||||
dev = ["aiounittest", "black (==22.3.0)", "build (==0.8.0)", "flake8 (==4.0.1)", "isort (==5.9.3)", "mypy (==0.910)", "tox", "twine (==4.0.1)", "twisted"]
|
||||
test = ["aiounittest", "tox", "twisted"]
|
||||
|
||||
[[package]]
|
||||
name = "matrix-synapse-ldap3"
|
||||
version = "0.2.1"
|
||||
version = "0.2.2"
|
||||
description = "An LDAP3 auth provider for Synapse"
|
||||
category = "main"
|
||||
optional = true
|
||||
@@ -552,7 +552,7 @@ service-identity = "*"
|
||||
Twisted = ">=15.1.0"
|
||||
|
||||
[package.extras]
|
||||
dev = ["matrix-synapse", "tox", "ldaptor", "mypy (==0.910)", "types-setuptools", "black (==22.3.0)", "flake8 (==4.0.1)", "isort (==5.9.3)"]
|
||||
dev = ["black (==22.3.0)", "flake8 (==4.0.1)", "isort (==5.9.3)", "ldaptor", "matrix-synapse", "mypy (==0.910)", "tox", "types-setuptools"]
|
||||
|
||||
[[package]]
|
||||
name = "mccabe"
|
||||
@@ -611,7 +611,7 @@ mypy = "0.950"
|
||||
"zope.schema" = "*"
|
||||
|
||||
[package.extras]
|
||||
test = ["pytest (>=4.6)", "pytest-cov", "lxml"]
|
||||
test = ["lxml", "pytest (>=4.6)", "pytest-cov"]
|
||||
|
||||
[[package]]
|
||||
name = "netaddr"
|
||||
@@ -630,7 +630,7 @@ optional = true
|
||||
python-versions = "*"
|
||||
|
||||
[package.extras]
|
||||
tests = ["doubles", "flake8", "flake8-quotes", "mock", "pytest", "pytest-cov", "pytest-mock", "sphinx", "sphinx-rtd-theme", "six (>=1.10.0,<2.0)", "gevent", "tornado"]
|
||||
tests = ["Sphinx", "doubles", "flake8", "flake8-quotes", "gevent", "mock", "pytest", "pytest-cov", "pytest-mock", "six (>=1.10.0,<2.0)", "sphinx_rtd_theme", "tornado"]
|
||||
|
||||
[[package]]
|
||||
name = "packaging"
|
||||
@@ -778,6 +778,21 @@ category = "main"
|
||||
optional = false
|
||||
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*"
|
||||
|
||||
[[package]]
|
||||
name = "pydantic"
|
||||
version = "1.9.1"
|
||||
description = "Data validation and settings management using python type hints"
|
||||
category = "main"
|
||||
optional = false
|
||||
python-versions = ">=3.6.1"
|
||||
|
||||
[package.dependencies]
|
||||
typing-extensions = ">=3.7.4.3"
|
||||
|
||||
[package.extras]
|
||||
dotenv = ["python-dotenv (>=0.10.4)"]
|
||||
email = ["email-validator (>=1.0.3)"]
|
||||
|
||||
[[package]]
|
||||
name = "pyflakes"
|
||||
version = "2.4.0"
|
||||
@@ -821,9 +836,9 @@ python-versions = ">=3.6"
|
||||
|
||||
[package.extras]
|
||||
crypto = ["cryptography (>=3.3.1)"]
|
||||
dev = ["sphinx", "sphinx-rtd-theme", "zope.interface", "cryptography (>=3.3.1)", "pytest (>=6.0.0,<7.0.0)", "coverage[toml] (==5.0.4)", "mypy", "pre-commit"]
|
||||
dev = ["coverage[toml] (==5.0.4)", "cryptography (>=3.3.1)", "mypy", "pre-commit", "pytest (>=6.0.0,<7.0.0)", "sphinx", "sphinx-rtd-theme", "zope.interface"]
|
||||
docs = ["sphinx", "sphinx-rtd-theme", "zope.interface"]
|
||||
tests = ["pytest (>=6.0.0,<7.0.0)", "coverage[toml] (==5.0.4)"]
|
||||
tests = ["coverage[toml] (==5.0.4)", "pytest (>=6.0.0,<7.0.0)"]
|
||||
|
||||
[[package]]
|
||||
name = "pymacaroons"
|
||||
@@ -857,8 +872,8 @@ python-versions = ">=3.6"
|
||||
cffi = ">=1.4.1"
|
||||
|
||||
[package.extras]
|
||||
docs = ["sphinx (>=1.6.5)", "sphinx-rtd-theme"]
|
||||
tests = ["pytest (>=3.2.1,!=3.3.0)", "hypothesis (>=3.27.0)"]
|
||||
docs = ["sphinx (>=1.6.5)", "sphinx_rtd_theme"]
|
||||
tests = ["hypothesis (>=3.27.0)", "pytest (>=3.2.1,!=3.3.0)"]
|
||||
|
||||
[[package]]
|
||||
name = "pyopenssl"
|
||||
@@ -910,11 +925,12 @@ pyOpenSSL = "*"
|
||||
python-dateutil = "*"
|
||||
pytz = "*"
|
||||
requests = ">=1.0.0"
|
||||
setuptools = "*"
|
||||
six = "*"
|
||||
xmlschema = ">=1.2.1"
|
||||
|
||||
[package.extras]
|
||||
s2repoze = ["paste", "zope.interface", "repoze.who"]
|
||||
s2repoze = ["paste", "repoze.who", "zope.interface"]
|
||||
|
||||
[[package]]
|
||||
name = "python-dateutil"
|
||||
@@ -1039,11 +1055,11 @@ celery = ["celery (>=3)"]
|
||||
chalice = ["chalice (>=1.16.0)"]
|
||||
django = ["django (>=1.8)"]
|
||||
falcon = ["falcon (>=1.4)"]
|
||||
flask = ["flask (>=0.11)", "blinker (>=1.1)"]
|
||||
flask = ["blinker (>=1.1)", "flask (>=0.11)"]
|
||||
httpx = ["httpx (>=0.16.0)"]
|
||||
pure_eval = ["pure-eval", "executing", "asttokens"]
|
||||
pure_eval = ["asttokens", "executing", "pure-eval"]
|
||||
pyspark = ["pyspark (>=2.4.4)"]
|
||||
quart = ["quart (>=0.16.1)", "blinker (>=1.1)"]
|
||||
quart = ["blinker (>=1.1)", "quart (>=0.16.1)"]
|
||||
rq = ["rq (>=0.6)"]
|
||||
sanic = ["sanic (>=0.8)"]
|
||||
sqlalchemy = ["sqlalchemy (>=1.2)"]
|
||||
@@ -1065,11 +1081,24 @@ pyasn1-modules = "*"
|
||||
six = "*"
|
||||
|
||||
[package.extras]
|
||||
dev = ["coverage[toml] (>=5.0.2)", "pytest", "sphinx", "furo", "idna", "pyopenssl"]
|
||||
docs = ["sphinx", "furo"]
|
||||
dev = ["coverage[toml] (>=5.0.2)", "furo", "idna", "pyOpenSSL", "pytest", "sphinx"]
|
||||
docs = ["furo", "sphinx"]
|
||||
idna = ["idna"]
|
||||
tests = ["coverage[toml] (>=5.0.2)", "pytest"]
|
||||
|
||||
[[package]]
|
||||
name = "setuptools"
|
||||
version = "65.3.0"
|
||||
description = "Easily download, build, install, upgrade, and uninstall Python packages"
|
||||
category = "main"
|
||||
optional = false
|
||||
python-versions = ">=3.7"
|
||||
|
||||
[package.extras]
|
||||
docs = ["furo", "jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "pygments-github-lexers (==0.0.5)", "rst.linker (>=1.9)", "sphinx", "sphinx-favicon", "sphinx-hoverxref (<2)", "sphinx-inline-tabs", "sphinx-notfound-page (==0.8.3)", "sphinx-reredirects", "sphinxcontrib-towncrier"]
|
||||
testing = ["build[virtualenv]", "filelock (>=3.4.0)", "flake8 (<5)", "flake8-2020", "ini2toml[lite] (>=0.9)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "mock", "pip (>=19.1)", "pip-run (>=8.8)", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-flake8", "pytest-mypy (>=0.9.1)", "pytest-perf", "pytest-xdist", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel"]
|
||||
testing-integration = ["build[virtualenv]", "filelock (>=3.4.0)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "pytest", "pytest-enabler", "pytest-xdist", "tomli", "virtualenv (>=13.0.0)", "wheel"]
|
||||
|
||||
[[package]]
|
||||
name = "signedjson"
|
||||
version = "1.1.4"
|
||||
@@ -1184,6 +1213,7 @@ click = "*"
|
||||
click-default-group = "*"
|
||||
incremental = "*"
|
||||
jinja2 = "*"
|
||||
setuptools = "*"
|
||||
tomli = {version = "*", markers = "python_version >= \"3.6\""}
|
||||
|
||||
[package.extras]
|
||||
@@ -1221,7 +1251,7 @@ requests = ">=2.1.0"
|
||||
Twisted = {version = ">=18.7.0", extras = ["tls"]}
|
||||
|
||||
[package.extras]
|
||||
dev = ["pep8", "pyflakes", "httpbin (==0.5.0)"]
|
||||
dev = ["httpbin (==0.5.0)", "pep8", "pyflakes"]
|
||||
docs = ["sphinx (>=1.4.8)"]
|
||||
|
||||
[[package]]
|
||||
@@ -1266,20 +1296,20 @@ typing-extensions = ">=3.6.5"
|
||||
"zope.interface" = ">=4.4.2"
|
||||
|
||||
[package.extras]
|
||||
all_non_platform = ["cython-test-exception-raiser (>=1.0.2,<2)", "PyHamcrest (>=1.9.0)", "pyopenssl (>=16.0.0)", "service-identity (>=18.1.0)", "idna (>=2.4)", "pyasn1", "cryptography (>=2.6)", "appdirs (>=1.4.0)", "bcrypt (>=3.0.0)", "pyserial (>=3.0)", "h2 (>=3.0,<5.0)", "priority (>=1.1.0,<2.0)", "pywin32 (!=226)", "contextvars (>=2.4,<3)"]
|
||||
conch = ["pyasn1", "cryptography (>=2.6)", "appdirs (>=1.4.0)", "bcrypt (>=3.0.0)"]
|
||||
conch_nacl = ["pyasn1", "cryptography (>=2.6)", "appdirs (>=1.4.0)", "bcrypt (>=3.0.0)", "pynacl"]
|
||||
all_non_platform = ["PyHamcrest (>=1.9.0)", "appdirs (>=1.4.0)", "bcrypt (>=3.0.0)", "contextvars (>=2.4,<3)", "cryptography (>=2.6)", "cython-test-exception-raiser (>=1.0.2,<2)", "h2 (>=3.0,<5.0)", "idna (>=2.4)", "priority (>=1.1.0,<2.0)", "pyasn1", "pyopenssl (>=16.0.0)", "pyserial (>=3.0)", "pywin32 (!=226)", "service-identity (>=18.1.0)"]
|
||||
conch = ["appdirs (>=1.4.0)", "bcrypt (>=3.0.0)", "cryptography (>=2.6)", "pyasn1"]
|
||||
conch_nacl = ["PyNaCl", "appdirs (>=1.4.0)", "bcrypt (>=3.0.0)", "cryptography (>=2.6)", "pyasn1"]
|
||||
contextvars = ["contextvars (>=2.4,<3)"]
|
||||
dev = ["towncrier (>=19.2,<20.0)", "sphinx-rtd-theme (>=0.5,<1.0)", "readthedocs-sphinx-ext (>=2.1,<3.0)", "sphinx (>=4.1.2,<6)", "pyflakes (>=2.2,<3.0)", "twistedchecker (>=0.7,<1.0)", "coverage (>=6b1,<7)", "python-subunit (>=1.4,<2.0)", "pydoctor (>=21.9.0,<21.10.0)"]
|
||||
dev_release = ["towncrier (>=19.2,<20.0)", "sphinx-rtd-theme (>=0.5,<1.0)", "readthedocs-sphinx-ext (>=2.1,<3.0)", "sphinx (>=4.1.2,<6)", "pydoctor (>=21.9.0,<21.10.0)"]
|
||||
dev = ["coverage (>=6b1,<7)", "pydoctor (>=21.9.0,<21.10.0)", "pyflakes (>=2.2,<3.0)", "python-subunit (>=1.4,<2.0)", "readthedocs-sphinx-ext (>=2.1,<3.0)", "sphinx (>=4.1.2,<6)", "sphinx-rtd-theme (>=0.5,<1.0)", "towncrier (>=19.2,<20.0)", "twistedchecker (>=0.7,<1.0)"]
|
||||
dev_release = ["pydoctor (>=21.9.0,<21.10.0)", "readthedocs-sphinx-ext (>=2.1,<3.0)", "sphinx (>=4.1.2,<6)", "sphinx-rtd-theme (>=0.5,<1.0)", "towncrier (>=19.2,<20.0)"]
|
||||
http2 = ["h2 (>=3.0,<5.0)", "priority (>=1.1.0,<2.0)"]
|
||||
macos_platform = ["pyobjc-core", "pyobjc-framework-cfnetwork", "pyobjc-framework-cocoa", "cython-test-exception-raiser (>=1.0.2,<2)", "PyHamcrest (>=1.9.0)", "pyopenssl (>=16.0.0)", "service-identity (>=18.1.0)", "idna (>=2.4)", "pyasn1", "cryptography (>=2.6)", "appdirs (>=1.4.0)", "bcrypt (>=3.0.0)", "pyserial (>=3.0)", "h2 (>=3.0,<5.0)", "priority (>=1.1.0,<2.0)", "pywin32 (!=226)", "contextvars (>=2.4,<3)"]
|
||||
mypy = ["mypy (==0.930)", "mypy-zope (==0.3.4)", "types-setuptools", "types-pyopenssl", "towncrier (>=19.2,<20.0)", "sphinx-rtd-theme (>=0.5,<1.0)", "readthedocs-sphinx-ext (>=2.1,<3.0)", "sphinx (>=4.1.2,<6)", "pyflakes (>=2.2,<3.0)", "twistedchecker (>=0.7,<1.0)", "coverage (>=6b1,<7)", "cython-test-exception-raiser (>=1.0.2,<2)", "PyHamcrest (>=1.9.0)", "pyopenssl (>=16.0.0)", "service-identity (>=18.1.0)", "idna (>=2.4)", "pyasn1", "cryptography (>=2.6)", "appdirs (>=1.4.0)", "bcrypt (>=3.0.0)", "pyserial (>=3.0)", "h2 (>=3.0,<5.0)", "priority (>=1.1.0,<2.0)", "pynacl", "pywin32 (!=226)", "python-subunit (>=1.4,<2.0)", "contextvars (>=2.4,<3)", "pydoctor (>=21.9.0,<21.10.0)"]
|
||||
osx_platform = ["pyobjc-core", "pyobjc-framework-cfnetwork", "pyobjc-framework-cocoa", "cython-test-exception-raiser (>=1.0.2,<2)", "PyHamcrest (>=1.9.0)", "pyopenssl (>=16.0.0)", "service-identity (>=18.1.0)", "idna (>=2.4)", "pyasn1", "cryptography (>=2.6)", "appdirs (>=1.4.0)", "bcrypt (>=3.0.0)", "pyserial (>=3.0)", "h2 (>=3.0,<5.0)", "priority (>=1.1.0,<2.0)", "pywin32 (!=226)", "contextvars (>=2.4,<3)"]
|
||||
macos_platform = ["PyHamcrest (>=1.9.0)", "appdirs (>=1.4.0)", "bcrypt (>=3.0.0)", "contextvars (>=2.4,<3)", "cryptography (>=2.6)", "cython-test-exception-raiser (>=1.0.2,<2)", "h2 (>=3.0,<5.0)", "idna (>=2.4)", "priority (>=1.1.0,<2.0)", "pyasn1", "pyobjc-core", "pyobjc-framework-CFNetwork", "pyobjc-framework-Cocoa", "pyopenssl (>=16.0.0)", "pyserial (>=3.0)", "pywin32 (!=226)", "service-identity (>=18.1.0)"]
|
||||
mypy = ["PyHamcrest (>=1.9.0)", "PyNaCl", "appdirs (>=1.4.0)", "bcrypt (>=3.0.0)", "contextvars (>=2.4,<3)", "coverage (>=6b1,<7)", "cryptography (>=2.6)", "cython-test-exception-raiser (>=1.0.2,<2)", "h2 (>=3.0,<5.0)", "idna (>=2.4)", "mypy (==0.930)", "mypy-zope (==0.3.4)", "priority (>=1.1.0,<2.0)", "pyasn1", "pydoctor (>=21.9.0,<21.10.0)", "pyflakes (>=2.2,<3.0)", "pyopenssl (>=16.0.0)", "pyserial (>=3.0)", "python-subunit (>=1.4,<2.0)", "pywin32 (!=226)", "readthedocs-sphinx-ext (>=2.1,<3.0)", "service-identity (>=18.1.0)", "sphinx (>=4.1.2,<6)", "sphinx-rtd-theme (>=0.5,<1.0)", "towncrier (>=19.2,<20.0)", "twistedchecker (>=0.7,<1.0)", "types-pyOpenSSL", "types-setuptools"]
|
||||
osx_platform = ["PyHamcrest (>=1.9.0)", "appdirs (>=1.4.0)", "bcrypt (>=3.0.0)", "contextvars (>=2.4,<3)", "cryptography (>=2.6)", "cython-test-exception-raiser (>=1.0.2,<2)", "h2 (>=3.0,<5.0)", "idna (>=2.4)", "priority (>=1.1.0,<2.0)", "pyasn1", "pyobjc-core", "pyobjc-framework-CFNetwork", "pyobjc-framework-Cocoa", "pyopenssl (>=16.0.0)", "pyserial (>=3.0)", "pywin32 (!=226)", "service-identity (>=18.1.0)"]
|
||||
serial = ["pyserial (>=3.0)", "pywin32 (!=226)"]
|
||||
test = ["cython-test-exception-raiser (>=1.0.2,<2)", "PyHamcrest (>=1.9.0)"]
|
||||
tls = ["pyopenssl (>=16.0.0)", "service-identity (>=18.1.0)", "idna (>=2.4)"]
|
||||
windows_platform = ["pywin32 (!=226)", "cython-test-exception-raiser (>=1.0.2,<2)", "PyHamcrest (>=1.9.0)", "pyopenssl (>=16.0.0)", "service-identity (>=18.1.0)", "idna (>=2.4)", "pyasn1", "cryptography (>=2.6)", "appdirs (>=1.4.0)", "bcrypt (>=3.0.0)", "pyserial (>=3.0)", "h2 (>=3.0,<5.0)", "priority (>=1.1.0,<2.0)", "pywin32 (!=226)", "contextvars (>=2.4,<3)"]
|
||||
test = ["PyHamcrest (>=1.9.0)", "cython-test-exception-raiser (>=1.0.2,<2)"]
|
||||
tls = ["idna (>=2.4)", "pyopenssl (>=16.0.0)", "service-identity (>=18.1.0)"]
|
||||
windows_platform = ["PyHamcrest (>=1.9.0)", "appdirs (>=1.4.0)", "bcrypt (>=3.0.0)", "contextvars (>=2.4,<3)", "cryptography (>=2.6)", "cython-test-exception-raiser (>=1.0.2,<2)", "h2 (>=3.0,<5.0)", "idna (>=2.4)", "priority (>=1.1.0,<2.0)", "pyasn1", "pyopenssl (>=16.0.0)", "pyserial (>=3.0)", "pywin32 (!=226)", "pywin32 (!=226)", "service-identity (>=18.1.0)"]
|
||||
|
||||
[[package]]
|
||||
name = "twisted-iocpsupport"
|
||||
@@ -1457,7 +1487,7 @@ python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, <4"
|
||||
|
||||
[package.extras]
|
||||
brotli = ["brotlipy (>=0.6.0)"]
|
||||
secure = ["pyOpenSSL (>=0.14)", "cryptography (>=1.3.4)", "idna (>=2.0.0)", "certifi", "ipaddress"]
|
||||
secure = ["certifi", "cryptography (>=1.3.4)", "idna (>=2.0.0)", "ipaddress", "pyOpenSSL (>=0.14)"]
|
||||
socks = ["PySocks (>=1.5.6,!=1.5.7,<2.0)"]
|
||||
|
||||
[[package]]
|
||||
@@ -1489,8 +1519,8 @@ elementpath = ">=2.5.0,<3.0.0"
|
||||
|
||||
[package.extras]
|
||||
codegen = ["elementpath (>=2.5.0,<3.0.0)", "jinja2"]
|
||||
dev = ["tox", "coverage", "lxml", "elementpath (>=2.5.0,<3.0.0)", "memory-profiler", "sphinx", "sphinx-rtd-theme", "jinja2", "flake8", "mypy", "lxml-stubs"]
|
||||
docs = ["elementpath (>=2.5.0,<3.0.0)", "sphinx", "sphinx-rtd-theme", "jinja2"]
|
||||
dev = ["Sphinx", "coverage", "elementpath (>=2.5.0,<3.0.0)", "flake8", "jinja2", "lxml", "lxml-stubs", "memory-profiler", "mypy", "sphinx-rtd-theme", "tox"]
|
||||
docs = ["Sphinx", "elementpath (>=2.5.0,<3.0.0)", "jinja2", "sphinx-rtd-theme"]
|
||||
|
||||
[[package]]
|
||||
name = "zipp"
|
||||
@@ -1501,8 +1531,8 @@ optional = false
|
||||
python-versions = ">=3.7"
|
||||
|
||||
[package.extras]
|
||||
docs = ["sphinx", "jaraco.packaging (>=8.2)", "rst.linker (>=1.9)"]
|
||||
testing = ["pytest (>=6)", "pytest-checkdocs (>=2.4)", "pytest-flake8", "pytest-cov", "pytest-enabler (>=1.0.1)", "jaraco.itertools", "func-timeout", "pytest-black (>=0.3.7)", "pytest-mypy"]
|
||||
docs = ["jaraco.packaging (>=8.2)", "rst.linker (>=1.9)", "sphinx"]
|
||||
testing = ["func-timeout", "jaraco.itertools", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.0.1)", "pytest-flake8", "pytest-mypy"]
|
||||
|
||||
[[package]]
|
||||
name = "zope.event"
|
||||
@@ -1512,8 +1542,11 @@ category = "dev"
|
||||
optional = false
|
||||
python-versions = "*"
|
||||
|
||||
[package.dependencies]
|
||||
setuptools = "*"
|
||||
|
||||
[package.extras]
|
||||
docs = ["sphinx"]
|
||||
docs = ["Sphinx"]
|
||||
test = ["zope.testrunner"]
|
||||
|
||||
[[package]]
|
||||
@@ -1524,8 +1557,11 @@ category = "main"
|
||||
optional = false
|
||||
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*"
|
||||
|
||||
[package.dependencies]
|
||||
setuptools = "*"
|
||||
|
||||
[package.extras]
|
||||
docs = ["sphinx", "repoze.sphinx.autointerface"]
|
||||
docs = ["Sphinx", "repoze.sphinx.autointerface"]
|
||||
test = ["coverage (>=5.0.3)", "zope.event", "zope.testing"]
|
||||
testing = ["coverage (>=5.0.3)", "zope.event", "zope.testing"]
|
||||
|
||||
@@ -1538,11 +1574,12 @@ optional = false
|
||||
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*"
|
||||
|
||||
[package.dependencies]
|
||||
setuptools = "*"
|
||||
"zope.event" = "*"
|
||||
"zope.interface" = ">=5.0.0"
|
||||
|
||||
[package.extras]
|
||||
docs = ["sphinx", "repoze.sphinx.autointerface"]
|
||||
docs = ["Sphinx", "repoze.sphinx.autointerface"]
|
||||
test = ["zope.i18nmessageid", "zope.testing", "zope.testrunner"]
|
||||
|
||||
[extras]
|
||||
@@ -1563,7 +1600,7 @@ url_preview = ["lxml"]
|
||||
[metadata]
|
||||
lock-version = "1.1"
|
||||
python-versions = "^3.7.1"
|
||||
content-hash = "c24bbcee7e86dbbe7cdbf49f91a25b310bf21095452641e7440129f59b077f78"
|
||||
content-hash = "7de518bf27967b3547eab8574342cfb67f87d6b47b4145c13de11112141dbf2d"
|
||||
|
||||
[metadata.files]
|
||||
attrs = [
|
||||
@@ -2055,8 +2092,8 @@ matrix-common = [
|
||||
{file = "matrix_common-1.2.1.tar.gz", hash = "sha256:a99dcf02a6bd95b24a5a61b354888a2ac92bf2b4b839c727b8dd9da2cdfa3853"},
|
||||
]
|
||||
matrix-synapse-ldap3 = [
|
||||
{file = "matrix-synapse-ldap3-0.2.1.tar.gz", hash = "sha256:bfb4390f4a262ffb0d6f057ff3aeb1e46d4e52ff420a064d795fb4f555f00285"},
|
||||
{file = "matrix_synapse_ldap3-0.2.1-py3-none-any.whl", hash = "sha256:1b3310a60f1d06466f35905a269b6df95747fd1305f2b7fe638f373963b2aa2c"},
|
||||
{file = "matrix-synapse-ldap3-0.2.2.tar.gz", hash = "sha256:b388d95693486eef69adaefd0fd9e84463d52fe17b0214a00efcaa669b73cb74"},
|
||||
{file = "matrix_synapse_ldap3-0.2.2-py3-none-any.whl", hash = "sha256:66ee4c85d7952c6c27fd04c09cdfdf4847b8e8b7d6a7ada6ba1100013bda060f"},
|
||||
]
|
||||
mccabe = [
|
||||
{file = "mccabe-0.6.1-py2.py3-none-any.whl", hash = "sha256:ab8a6258860da4b6677da4bd2fe5dc2c659cff31b3ee4f7f5d64e79735b80d42"},
|
||||
@@ -2260,6 +2297,43 @@ pycparser = [
|
||||
{file = "pycparser-2.21-py2.py3-none-any.whl", hash = "sha256:8ee45429555515e1f6b185e78100aea234072576aa43ab53aefcae078162fca9"},
|
||||
{file = "pycparser-2.21.tar.gz", hash = "sha256:e644fdec12f7872f86c58ff790da456218b10f863970249516d60a5eaca77206"},
|
||||
]
|
||||
pydantic = [
|
||||
{file = "pydantic-1.9.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c8098a724c2784bf03e8070993f6d46aa2eeca031f8d8a048dff277703e6e193"},
|
||||
{file = "pydantic-1.9.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c320c64dd876e45254bdd350f0179da737463eea41c43bacbee9d8c9d1021f11"},
|
||||
{file = "pydantic-1.9.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:18f3e912f9ad1bdec27fb06b8198a2ccc32f201e24174cec1b3424dda605a310"},
|
||||
{file = "pydantic-1.9.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c11951b404e08b01b151222a1cb1a9f0a860a8153ce8334149ab9199cd198131"},
|
||||
{file = "pydantic-1.9.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:8bc541a405423ce0e51c19f637050acdbdf8feca34150e0d17f675e72d119580"},
|
||||
{file = "pydantic-1.9.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:e565a785233c2d03724c4dc55464559639b1ba9ecf091288dd47ad9c629433bd"},
|
||||
{file = "pydantic-1.9.1-cp310-cp310-win_amd64.whl", hash = "sha256:a4a88dcd6ff8fd47c18b3a3709a89adb39a6373f4482e04c1b765045c7e282fd"},
|
||||
{file = "pydantic-1.9.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:447d5521575f18e18240906beadc58551e97ec98142266e521c34968c76c8761"},
|
||||
{file = "pydantic-1.9.1-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:985ceb5d0a86fcaa61e45781e567a59baa0da292d5ed2e490d612d0de5796918"},
|
||||
{file = "pydantic-1.9.1-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:059b6c1795170809103a1538255883e1983e5b831faea6558ef873d4955b4a74"},
|
||||
{file = "pydantic-1.9.1-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:d12f96b5b64bec3f43c8e82b4aab7599d0157f11c798c9f9c528a72b9e0b339a"},
|
||||
{file = "pydantic-1.9.1-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:ae72f8098acb368d877b210ebe02ba12585e77bd0db78ac04a1ee9b9f5dd2166"},
|
||||
{file = "pydantic-1.9.1-cp36-cp36m-win_amd64.whl", hash = "sha256:79b485767c13788ee314669008d01f9ef3bc05db9ea3298f6a50d3ef596a154b"},
|
||||
{file = "pydantic-1.9.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:494f7c8537f0c02b740c229af4cb47c0d39840b829ecdcfc93d91dcbb0779892"},
|
||||
{file = "pydantic-1.9.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f0f047e11febe5c3198ed346b507e1d010330d56ad615a7e0a89fae604065a0e"},
|
||||
{file = "pydantic-1.9.1-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:969dd06110cb780da01336b281f53e2e7eb3a482831df441fb65dd30403f4608"},
|
||||
{file = "pydantic-1.9.1-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:177071dfc0df6248fd22b43036f936cfe2508077a72af0933d0c1fa269b18537"},
|
||||
{file = "pydantic-1.9.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:9bcf8b6e011be08fb729d110f3e22e654a50f8a826b0575c7196616780683380"},
|
||||
{file = "pydantic-1.9.1-cp37-cp37m-win_amd64.whl", hash = "sha256:a955260d47f03df08acf45689bd163ed9df82c0e0124beb4251b1290fa7ae728"},
|
||||
{file = "pydantic-1.9.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:9ce157d979f742a915b75f792dbd6aa63b8eccaf46a1005ba03aa8a986bde34a"},
|
||||
{file = "pydantic-1.9.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:0bf07cab5b279859c253d26a9194a8906e6f4a210063b84b433cf90a569de0c1"},
|
||||
{file = "pydantic-1.9.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5d93d4e95eacd313d2c765ebe40d49ca9dd2ed90e5b37d0d421c597af830c195"},
|
||||
{file = "pydantic-1.9.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1542636a39c4892c4f4fa6270696902acb186a9aaeac6f6cf92ce6ae2e88564b"},
|
||||
{file = "pydantic-1.9.1-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:a9af62e9b5b9bc67b2a195ebc2c2662fdf498a822d62f902bf27cccb52dbbf49"},
|
||||
{file = "pydantic-1.9.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:fe4670cb32ea98ffbf5a1262f14c3e102cccd92b1869df3bb09538158ba90fe6"},
|
||||
{file = "pydantic-1.9.1-cp38-cp38-win_amd64.whl", hash = "sha256:9f659a5ee95c8baa2436d392267988fd0f43eb774e5eb8739252e5a7e9cf07e0"},
|
||||
{file = "pydantic-1.9.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:b83ba3825bc91dfa989d4eed76865e71aea3a6ca1388b59fc801ee04c4d8d0d6"},
|
||||
{file = "pydantic-1.9.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:1dd8fecbad028cd89d04a46688d2fcc14423e8a196d5b0a5c65105664901f810"},
|
||||
{file = "pydantic-1.9.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:02eefd7087268b711a3ff4db528e9916ac9aa18616da7bca69c1871d0b7a091f"},
|
||||
{file = "pydantic-1.9.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7eb57ba90929bac0b6cc2af2373893d80ac559adda6933e562dcfb375029acee"},
|
||||
{file = "pydantic-1.9.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:4ce9ae9e91f46c344bec3b03d6ee9612802682c1551aaf627ad24045ce090761"},
|
||||
{file = "pydantic-1.9.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:72ccb318bf0c9ab97fc04c10c37683d9eea952ed526707fabf9ac5ae59b701fd"},
|
||||
{file = "pydantic-1.9.1-cp39-cp39-win_amd64.whl", hash = "sha256:61b6760b08b7c395975d893e0b814a11cf011ebb24f7d869e7118f5a339a82e1"},
|
||||
{file = "pydantic-1.9.1-py3-none-any.whl", hash = "sha256:4988c0f13c42bfa9ddd2fe2f569c9d54646ce84adc5de84228cfe83396f3bd58"},
|
||||
{file = "pydantic-1.9.1.tar.gz", hash = "sha256:1ed987c3ff29fff7fd8c3ea3a3ea877ad310aae2ef9889a119e22d3f2db0691a"},
|
||||
]
|
||||
pyflakes = [
|
||||
{file = "pyflakes-2.4.0-py2.py3-none-any.whl", hash = "sha256:3bb3a3f256f4b7968c9c788781e4ff07dce46bdf12339dcda61053375426ee2e"},
|
||||
{file = "pyflakes-2.4.0.tar.gz", hash = "sha256:05a85c2872edf37a4ed30b0cce2f6093e1d0581f8c19d7393122da7e25b2b24c"},
|
||||
@@ -2406,6 +2480,10 @@ service-identity = [
|
||||
{file = "service-identity-21.1.0.tar.gz", hash = "sha256:6e6c6086ca271dc11b033d17c3a8bea9f24ebff920c587da090afc9519419d34"},
|
||||
{file = "service_identity-21.1.0-py2.py3-none-any.whl", hash = "sha256:f0b0caac3d40627c3c04d7a51b6e06721857a0e10a8775f2d1d7e72901b3a7db"},
|
||||
]
|
||||
setuptools = [
|
||||
{file = "setuptools-65.3.0-py3-none-any.whl", hash = "sha256:2e24e0bec025f035a2e72cdd1961119f557d78ad331bb00ff82efb2ab8da8e82"},
|
||||
{file = "setuptools-65.3.0.tar.gz", hash = "sha256:7732871f4f7fa58fb6bdcaeadb0161b2bd046c85905dbaa066bdcbcc81953b57"},
|
||||
]
|
||||
signedjson = [
|
||||
{file = "signedjson-1.1.4-py3-none-any.whl", hash = "sha256:45569ec54241c65d2403fe3faf7169be5322547706a231e884ca2b427f23d228"},
|
||||
{file = "signedjson-1.1.4.tar.gz", hash = "sha256:cd91c56af53f169ef032c62e9c4a3292dc158866933318d0592e3462db3d6492"},
|
||||
|
||||
@@ -54,7 +54,7 @@ skip_gitignore = true
|
||||
|
||||
[tool.poetry]
|
||||
name = "matrix-synapse"
|
||||
version = "1.64.0"
|
||||
version = "1.67.0"
|
||||
description = "Homeserver for the Matrix decentralised comms protocol"
|
||||
authors = ["Matrix.org Team and Contributors <packages@matrix.org>"]
|
||||
license = "Apache-2.0"
|
||||
@@ -158,6 +158,9 @@ packaging = ">=16.1"
|
||||
# At the time of writing, we only use functions from the version `importlib.metadata`
|
||||
# which shipped in Python 3.8. This corresponds to version 1.4 of the backport.
|
||||
importlib_metadata = { version = ">=1.4", python = "<3.8" }
|
||||
# This is the most recent version of Pydantic with available on common distros.
|
||||
pydantic = ">=1.7.4"
|
||||
|
||||
|
||||
|
||||
# Optional Dependencies
|
||||
|
||||
425
scripts-dev/check_pydantic_models.py
Executable file
425
scripts-dev/check_pydantic_models.py
Executable file
@@ -0,0 +1,425 @@
|
||||
#! /usr/bin/env python
|
||||
# Copyright 2022 The Matrix.org Foundation C.I.C.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
"""
|
||||
A script which enforces that Synapse always uses strict types when defining a Pydantic
|
||||
model.
|
||||
|
||||
Pydantic does not yet offer a strict mode, but it is planned for pydantic v2. See
|
||||
|
||||
https://github.com/pydantic/pydantic/issues/1098
|
||||
https://pydantic-docs.helpmanual.io/blog/pydantic-v2/#strict-mode
|
||||
|
||||
until then, this script is a best effort to stop us from introducing type coersion bugs
|
||||
(like the infamous stringy power levels fixed in room version 10).
|
||||
"""
|
||||
import argparse
|
||||
import contextlib
|
||||
import functools
|
||||
import importlib
|
||||
import logging
|
||||
import os
|
||||
import pkgutil
|
||||
import sys
|
||||
import textwrap
|
||||
import traceback
|
||||
import unittest.mock
|
||||
from contextlib import contextmanager
|
||||
from typing import Any, Callable, Dict, Generator, List, Set, Type, TypeVar
|
||||
|
||||
from parameterized import parameterized
|
||||
from pydantic import BaseModel as PydanticBaseModel, conbytes, confloat, conint, constr
|
||||
from pydantic.typing import get_args
|
||||
from typing_extensions import ParamSpec
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
CONSTRAINED_TYPE_FACTORIES_WITH_STRICT_FLAG: List[Callable] = [
|
||||
constr,
|
||||
conbytes,
|
||||
conint,
|
||||
confloat,
|
||||
]
|
||||
|
||||
TYPES_THAT_PYDANTIC_WILL_COERCE_TO = [
|
||||
str,
|
||||
bytes,
|
||||
int,
|
||||
float,
|
||||
bool,
|
||||
]
|
||||
|
||||
|
||||
P = ParamSpec("P")
|
||||
R = TypeVar("R")
|
||||
|
||||
|
||||
class ModelCheckerException(Exception):
|
||||
"""Dummy exception. Allows us to detect unwanted types during a module import."""
|
||||
|
||||
|
||||
class MissingStrictInConstrainedTypeException(ModelCheckerException):
|
||||
factory_name: str
|
||||
|
||||
def __init__(self, factory_name: str):
|
||||
self.factory_name = factory_name
|
||||
|
||||
|
||||
class FieldHasUnwantedTypeException(ModelCheckerException):
|
||||
message: str
|
||||
|
||||
def __init__(self, message: str):
|
||||
self.message = message
|
||||
|
||||
|
||||
def make_wrapper(factory: Callable[P, R]) -> Callable[P, R]:
|
||||
"""We patch `constr` and friends with wrappers that enforce strict=True."""
|
||||
|
||||
@functools.wraps(factory)
|
||||
def wrapper(*args: P.args, **kwargs: P.kwargs) -> R:
|
||||
# type-ignore: should be redundant once we can use https://github.com/python/mypy/pull/12668
|
||||
if "strict" not in kwargs: # type: ignore[attr-defined]
|
||||
raise MissingStrictInConstrainedTypeException(factory.__name__)
|
||||
if not kwargs["strict"]: # type: ignore[index]
|
||||
raise MissingStrictInConstrainedTypeException(factory.__name__)
|
||||
return factory(*args, **kwargs)
|
||||
|
||||
return wrapper
|
||||
|
||||
|
||||
def field_type_unwanted(type_: Any) -> bool:
|
||||
"""Very rough attempt to detect if a type is unwanted as a Pydantic annotation.
|
||||
|
||||
At present, we exclude types which will coerce, or any generic type involving types
|
||||
which will coerce."""
|
||||
logger.debug("Is %s unwanted?")
|
||||
if type_ in TYPES_THAT_PYDANTIC_WILL_COERCE_TO:
|
||||
logger.debug("yes")
|
||||
return True
|
||||
logger.debug("Maybe. Subargs are %s", get_args(type_))
|
||||
rv = any(field_type_unwanted(t) for t in get_args(type_))
|
||||
logger.debug("Conclusion: %s %s unwanted", type_, "is" if rv else "is not")
|
||||
return rv
|
||||
|
||||
|
||||
class PatchedBaseModel(PydanticBaseModel):
|
||||
"""A patched version of BaseModel that inspects fields after models are defined.
|
||||
|
||||
We complain loudly if we see an unwanted type.
|
||||
|
||||
Beware: ModelField.type_ is presumably private; this is likely to be very brittle.
|
||||
"""
|
||||
|
||||
@classmethod
|
||||
def __init_subclass__(cls: Type[PydanticBaseModel], **kwargs: object):
|
||||
for field in cls.__fields__.values():
|
||||
# Note that field.type_ and field.outer_type are computed based on the
|
||||
# annotation type, see pydantic.fields.ModelField._type_analysis
|
||||
if field_type_unwanted(field.outer_type_):
|
||||
# TODO: this only reports the first bad field. Can we find all bad ones
|
||||
# and report them all?
|
||||
raise FieldHasUnwantedTypeException(
|
||||
f"{cls.__module__}.{cls.__qualname__} has field '{field.name}' "
|
||||
f"with unwanted type `{field.outer_type_}`"
|
||||
)
|
||||
|
||||
|
||||
@contextmanager
|
||||
def monkeypatch_pydantic() -> Generator[None, None, None]:
|
||||
"""Patch pydantic with our snooping versions of BaseModel and the con* functions.
|
||||
|
||||
If the snooping functions see something they don't like, they'll raise a
|
||||
ModelCheckingException instance.
|
||||
"""
|
||||
with contextlib.ExitStack() as patches:
|
||||
# Most Synapse code ought to import the patched objects directly from
|
||||
# `pydantic`. But we also patch their containing modules `pydantic.main` and
|
||||
# `pydantic.types` for completeness.
|
||||
patch_basemodel1 = unittest.mock.patch(
|
||||
"pydantic.BaseModel", new=PatchedBaseModel
|
||||
)
|
||||
patch_basemodel2 = unittest.mock.patch(
|
||||
"pydantic.main.BaseModel", new=PatchedBaseModel
|
||||
)
|
||||
patches.enter_context(patch_basemodel1)
|
||||
patches.enter_context(patch_basemodel2)
|
||||
for factory in CONSTRAINED_TYPE_FACTORIES_WITH_STRICT_FLAG:
|
||||
wrapper: Callable = make_wrapper(factory)
|
||||
patch1 = unittest.mock.patch(f"pydantic.{factory.__name__}", new=wrapper)
|
||||
patch2 = unittest.mock.patch(
|
||||
f"pydantic.types.{factory.__name__}", new=wrapper
|
||||
)
|
||||
patches.enter_context(patch1)
|
||||
patches.enter_context(patch2)
|
||||
yield
|
||||
|
||||
|
||||
def format_model_checker_exception(e: ModelCheckerException) -> str:
|
||||
"""Work out which line of code caused e. Format the line in a human-friendly way."""
|
||||
# TODO. FieldHasUnwantedTypeException gives better error messages. Can we ditch the
|
||||
# patches of constr() etc, and instead inspect fields to look for ConstrainedStr
|
||||
# with strict=False? There is some difficulty with the inheritance hierarchy
|
||||
# because StrictStr < ConstrainedStr < str.
|
||||
if isinstance(e, FieldHasUnwantedTypeException):
|
||||
return e.message
|
||||
elif isinstance(e, MissingStrictInConstrainedTypeException):
|
||||
frame_summary = traceback.extract_tb(e.__traceback__)[-2]
|
||||
return (
|
||||
f"Missing `strict=True` from {e.factory_name}() call \n"
|
||||
+ traceback.format_list([frame_summary])[0].lstrip()
|
||||
)
|
||||
else:
|
||||
raise ValueError(f"Unknown exception {e}") from e
|
||||
|
||||
|
||||
def lint() -> int:
|
||||
"""Try to import all of Synapse and see if we spot any Pydantic type coercions.
|
||||
|
||||
Print any problems, then return a status code suitable for sys.exit."""
|
||||
failures = do_lint()
|
||||
if failures:
|
||||
print(f"Found {len(failures)} problem(s)")
|
||||
for failure in sorted(failures):
|
||||
print(failure)
|
||||
return os.EX_DATAERR if failures else os.EX_OK
|
||||
|
||||
|
||||
def do_lint() -> Set[str]:
|
||||
"""Try to import all of Synapse and see if we spot any Pydantic type coercions."""
|
||||
failures = set()
|
||||
|
||||
with monkeypatch_pydantic():
|
||||
logger.debug("Importing synapse")
|
||||
try:
|
||||
# TODO: make "synapse" an argument so we can target this script at
|
||||
# a subpackage
|
||||
module = importlib.import_module("synapse")
|
||||
except ModelCheckerException as e:
|
||||
logger.warning("Bad annotation found when importing synapse")
|
||||
failures.add(format_model_checker_exception(e))
|
||||
return failures
|
||||
|
||||
try:
|
||||
logger.debug("Fetching subpackages")
|
||||
module_infos = list(
|
||||
pkgutil.walk_packages(module.__path__, f"{module.__name__}.")
|
||||
)
|
||||
except ModelCheckerException as e:
|
||||
logger.warning("Bad annotation found when looking for modules to import")
|
||||
failures.add(format_model_checker_exception(e))
|
||||
return failures
|
||||
|
||||
for module_info in module_infos:
|
||||
logger.debug("Importing %s", module_info.name)
|
||||
try:
|
||||
importlib.import_module(module_info.name)
|
||||
except ModelCheckerException as e:
|
||||
logger.warning(
|
||||
f"Bad annotation found when importing {module_info.name}"
|
||||
)
|
||||
failures.add(format_model_checker_exception(e))
|
||||
|
||||
return failures
|
||||
|
||||
|
||||
def run_test_snippet(source: str) -> None:
|
||||
"""Exec a snippet of source code in an isolated environment."""
|
||||
# To emulate `source` being called at the top level of the module,
|
||||
# the globals and locals we provide apparently have to be the same mapping.
|
||||
#
|
||||
# > Remember that at the module level, globals and locals are the same dictionary.
|
||||
# > If exec gets two separate objects as globals and locals, the code will be
|
||||
# > executed as if it were embedded in a class definition.
|
||||
globals_: Dict[str, object]
|
||||
locals_: Dict[str, object]
|
||||
globals_ = locals_ = {}
|
||||
exec(textwrap.dedent(source), globals_, locals_)
|
||||
|
||||
|
||||
class TestConstrainedTypesPatch(unittest.TestCase):
|
||||
def test_expression_without_strict_raises(self) -> None:
|
||||
with monkeypatch_pydantic(), self.assertRaises(ModelCheckerException):
|
||||
run_test_snippet(
|
||||
"""
|
||||
from pydantic import constr
|
||||
constr()
|
||||
"""
|
||||
)
|
||||
|
||||
def test_called_as_module_attribute_raises(self) -> None:
|
||||
with monkeypatch_pydantic(), self.assertRaises(ModelCheckerException):
|
||||
run_test_snippet(
|
||||
"""
|
||||
import pydantic
|
||||
pydantic.constr()
|
||||
"""
|
||||
)
|
||||
|
||||
def test_wildcard_import_raises(self) -> None:
|
||||
with monkeypatch_pydantic(), self.assertRaises(ModelCheckerException):
|
||||
run_test_snippet(
|
||||
"""
|
||||
from pydantic import *
|
||||
constr()
|
||||
"""
|
||||
)
|
||||
|
||||
def test_alternative_import_raises(self) -> None:
|
||||
with monkeypatch_pydantic(), self.assertRaises(ModelCheckerException):
|
||||
run_test_snippet(
|
||||
"""
|
||||
from pydantic.types import constr
|
||||
constr()
|
||||
"""
|
||||
)
|
||||
|
||||
def test_alternative_import_attribute_raises(self) -> None:
|
||||
with monkeypatch_pydantic(), self.assertRaises(ModelCheckerException):
|
||||
run_test_snippet(
|
||||
"""
|
||||
import pydantic.types
|
||||
pydantic.types.constr()
|
||||
"""
|
||||
)
|
||||
|
||||
def test_kwarg_but_no_strict_raises(self) -> None:
|
||||
with monkeypatch_pydantic(), self.assertRaises(ModelCheckerException):
|
||||
run_test_snippet(
|
||||
"""
|
||||
from pydantic import constr
|
||||
constr(min_length=10)
|
||||
"""
|
||||
)
|
||||
|
||||
def test_kwarg_strict_False_raises(self) -> None:
|
||||
with monkeypatch_pydantic(), self.assertRaises(ModelCheckerException):
|
||||
run_test_snippet(
|
||||
"""
|
||||
from pydantic import constr
|
||||
constr(strict=False)
|
||||
"""
|
||||
)
|
||||
|
||||
def test_kwarg_strict_True_doesnt_raise(self) -> None:
|
||||
with monkeypatch_pydantic():
|
||||
run_test_snippet(
|
||||
"""
|
||||
from pydantic import constr
|
||||
constr(strict=True)
|
||||
"""
|
||||
)
|
||||
|
||||
def test_annotation_without_strict_raises(self) -> None:
|
||||
with monkeypatch_pydantic(), self.assertRaises(ModelCheckerException):
|
||||
run_test_snippet(
|
||||
"""
|
||||
from pydantic import constr
|
||||
x: constr()
|
||||
"""
|
||||
)
|
||||
|
||||
def test_field_annotation_without_strict_raises(self) -> None:
|
||||
with monkeypatch_pydantic(), self.assertRaises(ModelCheckerException):
|
||||
run_test_snippet(
|
||||
"""
|
||||
from pydantic import BaseModel, conint
|
||||
class C:
|
||||
x: conint()
|
||||
"""
|
||||
)
|
||||
|
||||
|
||||
class TestFieldTypeInspection(unittest.TestCase):
|
||||
@parameterized.expand(
|
||||
[
|
||||
("str",),
|
||||
("bytes"),
|
||||
("int",),
|
||||
("float",),
|
||||
("bool"),
|
||||
("Optional[str]",),
|
||||
("Union[None, str]",),
|
||||
("List[str]",),
|
||||
("List[List[str]]",),
|
||||
("Dict[StrictStr, str]",),
|
||||
("Dict[str, StrictStr]",),
|
||||
("TypedDict('D', x=int)",),
|
||||
]
|
||||
)
|
||||
def test_field_holding_unwanted_type_raises(self, annotation: str) -> None:
|
||||
with monkeypatch_pydantic(), self.assertRaises(ModelCheckerException):
|
||||
run_test_snippet(
|
||||
f"""
|
||||
from typing import *
|
||||
from pydantic import *
|
||||
class C(BaseModel):
|
||||
f: {annotation}
|
||||
"""
|
||||
)
|
||||
|
||||
@parameterized.expand(
|
||||
[
|
||||
("StrictStr",),
|
||||
("StrictBytes"),
|
||||
("StrictInt",),
|
||||
("StrictFloat",),
|
||||
("StrictBool"),
|
||||
("constr(strict=True, min_length=10)",),
|
||||
("Optional[StrictStr]",),
|
||||
("Union[None, StrictStr]",),
|
||||
("List[StrictStr]",),
|
||||
("List[List[StrictStr]]",),
|
||||
("Dict[StrictStr, StrictStr]",),
|
||||
("TypedDict('D', x=StrictInt)",),
|
||||
]
|
||||
)
|
||||
def test_field_holding_accepted_type_doesnt_raise(self, annotation: str) -> None:
|
||||
with monkeypatch_pydantic():
|
||||
run_test_snippet(
|
||||
f"""
|
||||
from typing import *
|
||||
from pydantic import *
|
||||
class C(BaseModel):
|
||||
f: {annotation}
|
||||
"""
|
||||
)
|
||||
|
||||
def test_field_holding_str_raises_with_alternative_import(self) -> None:
|
||||
with monkeypatch_pydantic(), self.assertRaises(ModelCheckerException):
|
||||
run_test_snippet(
|
||||
"""
|
||||
from pydantic.main import BaseModel
|
||||
class C(BaseModel):
|
||||
f: str
|
||||
"""
|
||||
)
|
||||
|
||||
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("mode", choices=["lint", "test"], default="lint", nargs="?")
|
||||
parser.add_argument("-v", "--verbose", action="store_true")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
args = parser.parse_args(sys.argv[1:])
|
||||
logging.basicConfig(
|
||||
format="%(asctime)s %(name)s:%(lineno)d %(levelname)s %(message)s",
|
||||
level=logging.DEBUG if args.verbose else logging.INFO,
|
||||
)
|
||||
# suppress logs we don't care about
|
||||
logging.getLogger("xmlschema").setLevel(logging.WARNING)
|
||||
if args.mode == "lint":
|
||||
sys.exit(lint())
|
||||
elif args.mode == "test":
|
||||
unittest.main(argv=sys.argv[:1])
|
||||
@@ -101,6 +101,7 @@ if [ -z "$skip_docker_build" ]; then
|
||||
echo_if_github "::group::Build Docker image: matrixdotorg/synapse"
|
||||
docker build -t matrixdotorg/synapse \
|
||||
--build-arg TEST_ONLY_SKIP_DEP_HASH_VERIFICATION \
|
||||
--build-arg TEST_ONLY_IGNORE_POETRY_LOCKFILE \
|
||||
-f "docker/Dockerfile" .
|
||||
echo_if_github "::endgroup::"
|
||||
|
||||
|
||||
@@ -106,4 +106,5 @@ isort "${files[@]}"
|
||||
python3 -m black "${files[@]}"
|
||||
./scripts-dev/config-lint.sh
|
||||
flake8 "${files[@]}"
|
||||
./scripts-dev/check_pydantic_models.py lint
|
||||
mypy
|
||||
|
||||
@@ -18,10 +18,12 @@
|
||||
"""
|
||||
|
||||
import glob
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
import subprocess
|
||||
import sys
|
||||
import time
|
||||
import urllib.request
|
||||
from os import path
|
||||
from tempfile import TemporaryDirectory
|
||||
@@ -32,6 +34,7 @@ import click
|
||||
import commonmark
|
||||
import git
|
||||
from click.exceptions import ClickException
|
||||
from git import GitCommandError, Repo
|
||||
from github import Github
|
||||
from packaging import version
|
||||
|
||||
@@ -55,9 +58,12 @@ def run_until_successful(
|
||||
def cli() -> None:
|
||||
"""An interactive script to walk through the parts of creating a release.
|
||||
|
||||
Requires the dev dependencies be installed, which can be done via:
|
||||
Requirements:
|
||||
- The dev dependencies be installed, which can be done via:
|
||||
|
||||
pip install -e .[dev]
|
||||
pip install -e .[dev]
|
||||
|
||||
- A checkout of the sytest repository at ../sytest
|
||||
|
||||
Then to use:
|
||||
|
||||
@@ -67,16 +73,21 @@ def cli() -> None:
|
||||
|
||||
./scripts-dev/release.py tag
|
||||
|
||||
# ... wait for assets to build ...
|
||||
# wait for assets to build, either manually or with:
|
||||
./scripts-dev/release.py wait-for-actions
|
||||
|
||||
./scripts-dev/release.py publish
|
||||
|
||||
./scripts-dev/release.py upload
|
||||
|
||||
# Optional: generate some nice links for the announcement
|
||||
./scripts-dev/release.py merge-back
|
||||
|
||||
# Optional: generate some nice links for the announcement
|
||||
./scripts-dev/release.py announce
|
||||
|
||||
Alternatively, `./scripts-dev/release.py full` will do all the above
|
||||
as well as guiding you through the manual steps.
|
||||
|
||||
If the env var GH_TOKEN (or GITHUB_TOKEN) is set, or passed into the
|
||||
`tag`/`publish` command, then a new draft release will be created/published.
|
||||
"""
|
||||
@@ -84,15 +95,21 @@ def cli() -> None:
|
||||
|
||||
@cli.command()
|
||||
def prepare() -> None:
|
||||
_prepare()
|
||||
|
||||
|
||||
def _prepare() -> None:
|
||||
"""Do the initial stages of creating a release, including creating release
|
||||
branch, updating changelog and pushing to GitHub.
|
||||
"""
|
||||
|
||||
# Make sure we're in a git repo.
|
||||
repo = get_repo_and_check_clean_checkout()
|
||||
synapse_repo = get_repo_and_check_clean_checkout()
|
||||
sytest_repo = get_repo_and_check_clean_checkout("../sytest", "sytest")
|
||||
|
||||
click.secho("Updating git repo...")
|
||||
repo.remote().fetch()
|
||||
click.secho("Updating Synapse and Sytest git repos...")
|
||||
synapse_repo.remote().fetch()
|
||||
sytest_repo.remote().fetch()
|
||||
|
||||
# Get the current version and AST from root Synapse module.
|
||||
current_version = get_package_version()
|
||||
@@ -166,12 +183,12 @@ def prepare() -> None:
|
||||
assert not parsed_new_version.is_postrelease
|
||||
|
||||
release_branch_name = get_release_branch_name(parsed_new_version)
|
||||
release_branch = find_ref(repo, release_branch_name)
|
||||
release_branch = find_ref(synapse_repo, release_branch_name)
|
||||
if release_branch:
|
||||
if release_branch.is_remote():
|
||||
# If the release branch only exists on the remote we check it out
|
||||
# locally.
|
||||
repo.git.checkout(release_branch_name)
|
||||
synapse_repo.git.checkout(release_branch_name)
|
||||
else:
|
||||
# If a branch doesn't exist we create one. We ask which one branch it
|
||||
# should be based off, defaulting to sensible values depending on the
|
||||
@@ -187,25 +204,34 @@ def prepare() -> None:
|
||||
"Which branch should the release be based on?", default=default
|
||||
)
|
||||
|
||||
base_branch = find_ref(repo, branch_name)
|
||||
if not base_branch:
|
||||
print(f"Could not find base branch {branch_name}!")
|
||||
click.get_current_context().abort()
|
||||
for repo_name, repo in {"synapse": synapse_repo, "sytest": sytest_repo}.items():
|
||||
base_branch = find_ref(repo, branch_name)
|
||||
if not base_branch:
|
||||
print(f"Could not find base branch {branch_name} for {repo_name}!")
|
||||
click.get_current_context().abort()
|
||||
|
||||
# Check out the base branch and ensure it's up to date
|
||||
repo.head.set_reference(base_branch, "check out the base branch")
|
||||
repo.head.reset(index=True, working_tree=True)
|
||||
if not base_branch.is_remote():
|
||||
update_branch(repo)
|
||||
# Check out the base branch and ensure it's up to date
|
||||
repo.head.set_reference(
|
||||
base_branch, f"check out the base branch for {repo_name}"
|
||||
)
|
||||
repo.head.reset(index=True, working_tree=True)
|
||||
if not base_branch.is_remote():
|
||||
update_branch(repo)
|
||||
|
||||
# Create the new release branch
|
||||
# Type ignore will no longer be needed after GitPython 3.1.28.
|
||||
# See https://github.com/gitpython-developers/GitPython/pull/1419
|
||||
repo.create_head(release_branch_name, commit=base_branch) # type: ignore[arg-type]
|
||||
# Create the new release branch
|
||||
# Type ignore will no longer be needed after GitPython 3.1.28.
|
||||
# See https://github.com/gitpython-developers/GitPython/pull/1419
|
||||
repo.create_head(release_branch_name, commit=base_branch) # type: ignore[arg-type]
|
||||
|
||||
# Special-case SyTest: we don't actually prepare any files so we may
|
||||
# as well push it now (and only when we create a release branch;
|
||||
# not on subsequent RCs or full releases).
|
||||
if click.confirm("Push new SyTest branch?", default=True):
|
||||
sytest_repo.git.push("-u", sytest_repo.remote().name, release_branch_name)
|
||||
|
||||
# Switch to the release branch and ensure it's up to date.
|
||||
repo.git.checkout(release_branch_name)
|
||||
update_branch(repo)
|
||||
synapse_repo.git.checkout(release_branch_name)
|
||||
update_branch(synapse_repo)
|
||||
|
||||
# Update the version specified in pyproject.toml.
|
||||
subprocess.check_output(["poetry", "version", new_version])
|
||||
@@ -230,15 +256,15 @@ def prepare() -> None:
|
||||
run_until_successful('dch -M -r -D stable ""', shell=True)
|
||||
|
||||
# Show the user the changes and ask if they want to edit the change log.
|
||||
repo.git.add("-u")
|
||||
synapse_repo.git.add("-u")
|
||||
subprocess.run("git diff --cached", shell=True)
|
||||
|
||||
if click.confirm("Edit changelog?", default=False):
|
||||
click.edit(filename="CHANGES.md")
|
||||
|
||||
# Commit the changes.
|
||||
repo.git.add("-u")
|
||||
repo.git.commit("-m", new_version)
|
||||
synapse_repo.git.add("-u")
|
||||
synapse_repo.git.commit("-m", new_version)
|
||||
|
||||
# We give the option to bail here in case the user wants to make sure things
|
||||
# are OK before pushing.
|
||||
@@ -246,23 +272,31 @@ def prepare() -> None:
|
||||
print("")
|
||||
print("Run when ready to push:")
|
||||
print("")
|
||||
print(f"\tgit push -u {repo.remote().name} {repo.active_branch.name}")
|
||||
print(
|
||||
f"\tgit push -u {synapse_repo.remote().name} {synapse_repo.active_branch.name}"
|
||||
)
|
||||
print("")
|
||||
sys.exit(0)
|
||||
|
||||
# Otherwise, push and open the changelog in the browser.
|
||||
repo.git.push("-u", repo.remote().name, repo.active_branch.name)
|
||||
synapse_repo.git.push(
|
||||
"-u", synapse_repo.remote().name, synapse_repo.active_branch.name
|
||||
)
|
||||
|
||||
print("Opening the changelog in your browser...")
|
||||
print("Please ask others to give it a check.")
|
||||
click.launch(
|
||||
f"https://github.com/matrix-org/synapse/blob/{repo.active_branch.name}/CHANGES.md"
|
||||
f"https://github.com/matrix-org/synapse/blob/{synapse_repo.active_branch.name}/CHANGES.md"
|
||||
)
|
||||
|
||||
|
||||
@cli.command()
|
||||
@click.option("--gh-token", envvar=["GH_TOKEN", "GITHUB_TOKEN"])
|
||||
def tag(gh_token: Optional[str]) -> None:
|
||||
_tag(gh_token)
|
||||
|
||||
|
||||
def _tag(gh_token: Optional[str]) -> None:
|
||||
"""Tags the release and generates a draft GitHub release"""
|
||||
|
||||
# Make sure we're in a git repo.
|
||||
@@ -353,6 +387,10 @@ def tag(gh_token: Optional[str]) -> None:
|
||||
@cli.command()
|
||||
@click.option("--gh-token", envvar=["GH_TOKEN", "GITHUB_TOKEN"], required=True)
|
||||
def publish(gh_token: str) -> None:
|
||||
_publish(gh_token)
|
||||
|
||||
|
||||
def _publish(gh_token: str) -> None:
|
||||
"""Publish release on GitHub."""
|
||||
|
||||
# Make sure we're in a git repo.
|
||||
@@ -390,6 +428,10 @@ def publish(gh_token: str) -> None:
|
||||
|
||||
@cli.command()
|
||||
def upload() -> None:
|
||||
_upload()
|
||||
|
||||
|
||||
def _upload() -> None:
|
||||
"""Upload release to pypi."""
|
||||
|
||||
current_version = get_package_version()
|
||||
@@ -423,8 +465,152 @@ def upload() -> None:
|
||||
)
|
||||
|
||||
|
||||
def _merge_into(repo: Repo, source: str, target: str) -> None:
|
||||
"""
|
||||
Merges branch `source` into branch `target`.
|
||||
Pulls both before merging and pushes the result.
|
||||
"""
|
||||
|
||||
# Update our branches and switch to the target branch
|
||||
for branch in [source, target]:
|
||||
click.echo(f"Switching to {branch} and pulling...")
|
||||
repo.heads[branch].checkout()
|
||||
# Pull so we're up to date
|
||||
repo.remote().pull()
|
||||
|
||||
assert repo.active_branch.name == target
|
||||
|
||||
try:
|
||||
# TODO This seemed easier than using GitPython directly
|
||||
click.echo(f"Merging {source}...")
|
||||
repo.git.merge(source)
|
||||
except GitCommandError as exc:
|
||||
# If a merge conflict occurs, give some context and try to
|
||||
# make it easy to abort if necessary.
|
||||
click.echo(exc)
|
||||
if not click.confirm(
|
||||
f"Likely merge conflict whilst merging ({source} → {target}). "
|
||||
f"Have you resolved it?"
|
||||
):
|
||||
repo.git.merge("--abort")
|
||||
return
|
||||
|
||||
# Push result.
|
||||
click.echo("Pushing...")
|
||||
repo.remote().push()
|
||||
|
||||
|
||||
@cli.command()
|
||||
@click.option("--gh-token", envvar=["GH_TOKEN", "GITHUB_TOKEN"], required=False)
|
||||
def wait_for_actions(gh_token: Optional[str]) -> None:
|
||||
_wait_for_actions(gh_token)
|
||||
|
||||
|
||||
def _wait_for_actions(gh_token: Optional[str]) -> None:
|
||||
# Find out the version and tag name.
|
||||
current_version = get_package_version()
|
||||
tag_name = f"v{current_version}"
|
||||
|
||||
# Authentication is optional on this endpoint,
|
||||
# but use a token if we have one to reduce the chance of being rate-limited.
|
||||
url = f"https://api.github.com/repos/matrix-org/synapse/actions/runs?branch={tag_name}"
|
||||
headers = {"Accept": "application/vnd.github+json"}
|
||||
if gh_token is not None:
|
||||
headers["authorization"] = f"token {gh_token}"
|
||||
req = urllib.request.Request(url, headers=headers)
|
||||
|
||||
time.sleep(10 * 60)
|
||||
while True:
|
||||
time.sleep(5 * 60)
|
||||
response = urllib.request.urlopen(req)
|
||||
resp = json.loads(response.read())
|
||||
|
||||
if len(resp["workflow_runs"]) == 0:
|
||||
continue
|
||||
|
||||
if all(
|
||||
workflow["status"] != "in_progress" for workflow in resp["workflow_runs"]
|
||||
):
|
||||
success = (
|
||||
workflow["status"] == "completed" for workflow in resp["workflow_runs"]
|
||||
)
|
||||
if success:
|
||||
_notify("Workflows successful. You can now continue the release.")
|
||||
else:
|
||||
_notify("Workflows failed.")
|
||||
click.confirm("Continue anyway?", abort=True)
|
||||
|
||||
break
|
||||
|
||||
|
||||
def _notify(message: str) -> None:
|
||||
# Send a bell character. Most terminals will play a sound or show a notification
|
||||
# for this.
|
||||
click.echo(f"\a{message}")
|
||||
|
||||
# Try and run notify-send, but don't raise an Exception if this fails
|
||||
# (This is best-effort)
|
||||
# TODO Support other platforms?
|
||||
subprocess.run(
|
||||
[
|
||||
"notify-send",
|
||||
"--app-name",
|
||||
"Synapse Release Script",
|
||||
"--expire-time",
|
||||
"3600000",
|
||||
message,
|
||||
]
|
||||
)
|
||||
|
||||
|
||||
@cli.command()
|
||||
def merge_back() -> None:
|
||||
_merge_back()
|
||||
|
||||
|
||||
def _merge_back() -> None:
|
||||
"""Merge the release branch back into the appropriate branches.
|
||||
All branches will be automatically pulled from the remote and the results
|
||||
will be pushed to the remote."""
|
||||
|
||||
synapse_repo = get_repo_and_check_clean_checkout()
|
||||
branch_name = synapse_repo.active_branch.name
|
||||
|
||||
if not branch_name.startswith("release-v"):
|
||||
raise RuntimeError("Not on a release branch. This does not seem sensible.")
|
||||
|
||||
# Pull so we're up to date
|
||||
synapse_repo.remote().pull()
|
||||
|
||||
current_version = get_package_version()
|
||||
|
||||
if current_version.is_prerelease:
|
||||
# Release candidate
|
||||
if click.confirm(f"Merge {branch_name} → develop?", default=True):
|
||||
_merge_into(synapse_repo, branch_name, "develop")
|
||||
else:
|
||||
# Full release
|
||||
sytest_repo = get_repo_and_check_clean_checkout("../sytest", "sytest")
|
||||
|
||||
if click.confirm(f"Merge {branch_name} → master?", default=True):
|
||||
_merge_into(synapse_repo, branch_name, "master")
|
||||
|
||||
if click.confirm("Merge master → develop?", default=True):
|
||||
_merge_into(synapse_repo, "master", "develop")
|
||||
|
||||
if click.confirm(f"On SyTest, merge {branch_name} → master?", default=True):
|
||||
_merge_into(sytest_repo, branch_name, "master")
|
||||
|
||||
if click.confirm("On SyTest, merge master → develop?", default=True):
|
||||
_merge_into(sytest_repo, "master", "develop")
|
||||
|
||||
|
||||
@cli.command()
|
||||
def announce() -> None:
|
||||
_announce()
|
||||
|
||||
|
||||
def _announce() -> None:
|
||||
"""Generate markdown to announce the release."""
|
||||
|
||||
current_version = get_package_version()
|
||||
@@ -454,10 +640,56 @@ Announce the release in
|
||||
- #homeowners:matrix.org (Synapse Announcements), bumping the version in the topic
|
||||
- #synapse:matrix.org (Synapse Admins), bumping the version in the topic
|
||||
- #synapse-dev:matrix.org
|
||||
- #synapse-package-maintainers:matrix.org"""
|
||||
- #synapse-package-maintainers:matrix.org
|
||||
|
||||
Ask the designated people to do the blog and tweets."""
|
||||
)
|
||||
|
||||
|
||||
@cli.command()
|
||||
@click.option("--gh-token", envvar=["GH_TOKEN", "GITHUB_TOKEN"], required=True)
|
||||
def full(gh_token: str) -> None:
|
||||
click.echo("1. If this is a security release, read the security wiki page.")
|
||||
click.echo("2. Check for any release blockers before proceeding.")
|
||||
click.echo(" https://github.com/matrix-org/synapse/labels/X-Release-Blocker")
|
||||
|
||||
click.confirm("Ready?", abort=True)
|
||||
|
||||
click.echo("\n*** prepare ***")
|
||||
_prepare()
|
||||
|
||||
click.echo("Deploy to matrix.org and ensure that it hasn't fallen over.")
|
||||
click.echo("Remember to silence the alerts to prevent alert spam.")
|
||||
click.confirm("Deployed?", abort=True)
|
||||
|
||||
click.echo("\n*** tag ***")
|
||||
_tag(gh_token)
|
||||
|
||||
click.echo("\n*** wait for actions ***")
|
||||
_wait_for_actions(gh_token)
|
||||
|
||||
click.echo("\n*** publish ***")
|
||||
_publish(gh_token)
|
||||
|
||||
click.echo("\n*** upload ***")
|
||||
_upload()
|
||||
|
||||
click.echo("\n*** merge back ***")
|
||||
_merge_back()
|
||||
|
||||
click.echo("\nUpdate the Debian repository")
|
||||
click.confirm("Started updating Debian repository?", abort=True)
|
||||
|
||||
click.echo("\nWait for all release methods to be ready.")
|
||||
# Docker should be ready because it was done by the workflows earlier
|
||||
# PyPI should be ready because we just ran upload().
|
||||
# TODO Automatically poll until the Debs have made it to packages.matrix.org
|
||||
click.confirm("Debs ready?", abort=True)
|
||||
|
||||
click.echo("\n*** announce ***")
|
||||
_announce()
|
||||
|
||||
|
||||
def get_package_version() -> version.Version:
|
||||
version_string = subprocess.check_output(["poetry", "version", "--short"]).decode(
|
||||
"utf-8"
|
||||
@@ -469,14 +701,18 @@ def get_release_branch_name(version_number: version.Version) -> str:
|
||||
return f"release-v{version_number.major}.{version_number.minor}"
|
||||
|
||||
|
||||
def get_repo_and_check_clean_checkout() -> git.Repo:
|
||||
def get_repo_and_check_clean_checkout(
|
||||
path: str = ".", name: str = "synapse"
|
||||
) -> git.Repo:
|
||||
"""Get the project repo and check it's not got any uncommitted changes."""
|
||||
try:
|
||||
repo = git.Repo()
|
||||
repo = git.Repo(path=path)
|
||||
except git.InvalidGitRepositoryError:
|
||||
raise click.ClickException("Not in Synapse repo.")
|
||||
raise click.ClickException(
|
||||
f"{path} is not a git repository (expecting a {name} repository)."
|
||||
)
|
||||
if repo.is_dirty():
|
||||
raise click.ClickException("Uncommitted changes exist.")
|
||||
raise click.ClickException(f"Uncommitted changes exist in {path}.")
|
||||
return repo
|
||||
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# Copyright 2015, 2016 OpenMarket Ltd
|
||||
# Copyright 2018 New Vector
|
||||
# Copyright 2021 The Matrix.org Foundation C.I.C.
|
||||
# Copyright 2021-22 The Matrix.org Foundation C.I.C.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -20,11 +20,22 @@ import hashlib
|
||||
import hmac
|
||||
import logging
|
||||
import sys
|
||||
from typing import Callable, Optional
|
||||
from typing import Any, Callable, Dict, Optional
|
||||
|
||||
import requests
|
||||
import yaml
|
||||
|
||||
_CONFLICTING_SHARED_SECRET_OPTS_ERROR = """\
|
||||
Conflicting options 'registration_shared_secret' and 'registration_shared_secret_path'
|
||||
are both defined in config file.
|
||||
"""
|
||||
|
||||
_NO_SHARED_SECRET_OPTS_ERROR = """\
|
||||
No 'registration_shared_secret' or 'registration_shared_secret_path' defined in config.
|
||||
"""
|
||||
|
||||
_DEFAULT_SERVER_URL = "http://localhost:8008"
|
||||
|
||||
|
||||
def request_registration(
|
||||
user: str,
|
||||
@@ -203,31 +214,104 @@ def main() -> None:
|
||||
|
||||
parser.add_argument(
|
||||
"server_url",
|
||||
default="https://localhost:8448",
|
||||
nargs="?",
|
||||
help="URL to use to talk to the homeserver. Defaults to "
|
||||
" 'https://localhost:8448'.",
|
||||
help="URL to use to talk to the homeserver. By default, tries to find a "
|
||||
"suitable URL from the configuration file. Otherwise, defaults to "
|
||||
f"'{_DEFAULT_SERVER_URL}'.",
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
if "config" in args and args.config:
|
||||
config = yaml.safe_load(args.config)
|
||||
secret = config.get("registration_shared_secret", None)
|
||||
if not secret:
|
||||
print("No 'registration_shared_secret' defined in config.")
|
||||
sys.exit(1)
|
||||
else:
|
||||
|
||||
if args.shared_secret:
|
||||
secret = args.shared_secret
|
||||
else:
|
||||
# argparse should check that we have either config or shared secret
|
||||
assert config
|
||||
|
||||
secret = config.get("registration_shared_secret")
|
||||
secret_file = config.get("registration_shared_secret_path")
|
||||
if secret_file:
|
||||
if secret:
|
||||
print(_CONFLICTING_SHARED_SECRET_OPTS_ERROR, file=sys.stderr)
|
||||
sys.exit(1)
|
||||
secret = _read_file(secret_file, "registration_shared_secret_path").strip()
|
||||
if not secret:
|
||||
print(_NO_SHARED_SECRET_OPTS_ERROR, file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
if args.server_url:
|
||||
server_url = args.server_url
|
||||
elif config:
|
||||
server_url = _find_client_listener(config)
|
||||
if not server_url:
|
||||
server_url = _DEFAULT_SERVER_URL
|
||||
print(
|
||||
"Unable to find a suitable HTTP listener in the configuration file. "
|
||||
f"Trying {server_url} as a last resort.",
|
||||
file=sys.stderr,
|
||||
)
|
||||
else:
|
||||
server_url = _DEFAULT_SERVER_URL
|
||||
print(
|
||||
f"No server url or configuration file given. Defaulting to {server_url}.",
|
||||
file=sys.stderr,
|
||||
)
|
||||
|
||||
admin = None
|
||||
if args.admin or args.no_admin:
|
||||
admin = args.admin
|
||||
|
||||
register_new_user(
|
||||
args.user, args.password, args.server_url, secret, admin, args.user_type
|
||||
args.user, args.password, server_url, secret, admin, args.user_type
|
||||
)
|
||||
|
||||
|
||||
def _read_file(file_path: Any, config_path: str) -> str:
|
||||
"""Check the given file exists, and read it into a string
|
||||
|
||||
If it does not, exit with an error indicating the problem
|
||||
|
||||
Args:
|
||||
file_path: the file to be read
|
||||
config_path: where in the configuration file_path came from, so that a useful
|
||||
error can be emitted if it does not exist.
|
||||
Returns:
|
||||
content of the file.
|
||||
"""
|
||||
if not isinstance(file_path, str):
|
||||
print(f"{config_path} setting is not a string", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
try:
|
||||
with open(file_path) as file_stream:
|
||||
return file_stream.read()
|
||||
except OSError as e:
|
||||
print(f"Error accessing file {file_path}: {e}", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def _find_client_listener(config: Dict[str, Any]) -> Optional[str]:
|
||||
# try to find a listener in the config. Returns a host:port pair
|
||||
for listener in config.get("listeners", []):
|
||||
if listener.get("type") != "http" or listener.get("tls", False):
|
||||
continue
|
||||
|
||||
if not any(
|
||||
name == "client"
|
||||
for resource in listener.get("resources", [])
|
||||
for name in resource.get("names", [])
|
||||
):
|
||||
continue
|
||||
|
||||
# TODO: consider bind_addresses
|
||||
return f"http://localhost:{listener['port']}"
|
||||
|
||||
# no suitable listeners?
|
||||
return None
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
||||
@@ -26,13 +26,18 @@ from synapse.api.errors import (
|
||||
Codes,
|
||||
InvalidClientTokenError,
|
||||
MissingClientTokenError,
|
||||
UnstableSpecAuthError,
|
||||
)
|
||||
from synapse.appservice import ApplicationService
|
||||
from synapse.http import get_request_user_agent
|
||||
from synapse.http.site import SynapseRequest
|
||||
from synapse.logging.opentracing import active_span, force_tracing, start_active_span
|
||||
from synapse.storage.databases.main.registration import TokenLookupResult
|
||||
from synapse.types import Requester, UserID, create_requester
|
||||
from synapse.logging.opentracing import (
|
||||
active_span,
|
||||
force_tracing,
|
||||
start_active_span,
|
||||
trace,
|
||||
)
|
||||
from synapse.types import Requester, create_requester
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.server import HomeServer
|
||||
@@ -64,14 +69,14 @@ class Auth:
|
||||
async def check_user_in_room(
|
||||
self,
|
||||
room_id: str,
|
||||
user_id: str,
|
||||
requester: Requester,
|
||||
allow_departed_users: bool = False,
|
||||
) -> Tuple[str, Optional[str]]:
|
||||
"""Check if the user is in the room, or was at some point.
|
||||
Args:
|
||||
room_id: The room to check.
|
||||
|
||||
user_id: The user to check.
|
||||
requester: The user making the request, according to the access token.
|
||||
|
||||
current_state: Optional map of the current state of the room.
|
||||
If provided then that map is used to check whether they are a
|
||||
@@ -88,6 +93,7 @@ class Auth:
|
||||
membership event ID of the user.
|
||||
"""
|
||||
|
||||
user_id = requester.user.to_string()
|
||||
(
|
||||
membership,
|
||||
member_event_id,
|
||||
@@ -106,8 +112,11 @@ class Auth:
|
||||
forgot = await self.store.did_forget(user_id, room_id)
|
||||
if not forgot:
|
||||
return membership, member_event_id
|
||||
|
||||
raise AuthError(403, "User %s not in room %s" % (user_id, room_id))
|
||||
raise UnstableSpecAuthError(
|
||||
403,
|
||||
"User %s not in room %s" % (user_id, room_id),
|
||||
errcode=Codes.NOT_JOINED,
|
||||
)
|
||||
|
||||
async def get_user_by_req(
|
||||
self,
|
||||
@@ -173,96 +182,69 @@ class Auth:
|
||||
|
||||
access_token = self.get_access_token_from_request(request)
|
||||
|
||||
(
|
||||
user_id,
|
||||
device_id,
|
||||
app_service,
|
||||
) = await self._get_appservice_user_id_and_device_id(request)
|
||||
if user_id and app_service:
|
||||
if ip_addr and self._track_appservice_user_ips:
|
||||
await self.store.insert_client_ip(
|
||||
user_id=user_id,
|
||||
access_token=access_token,
|
||||
ip=ip_addr,
|
||||
user_agent=user_agent,
|
||||
device_id="dummy-device"
|
||||
if device_id is None
|
||||
else device_id, # stubbed
|
||||
)
|
||||
|
||||
requester = create_requester(
|
||||
user_id, app_service=app_service, device_id=device_id
|
||||
# First check if it could be a request from an appservice
|
||||
requester = await self._get_appservice_user(request)
|
||||
if not requester:
|
||||
# If not, it should be from a regular user
|
||||
requester = await self.get_user_by_access_token(
|
||||
access_token, allow_expired=allow_expired
|
||||
)
|
||||
|
||||
request.requester = user_id
|
||||
return requester
|
||||
# Deny the request if the user account has expired.
|
||||
# This check is only done for regular users, not appservice ones.
|
||||
if not allow_expired:
|
||||
if await self._account_validity_handler.is_user_expired(
|
||||
requester.user.to_string()
|
||||
):
|
||||
# Raise the error if either an account validity module has determined
|
||||
# the account has expired, or the legacy account validity
|
||||
# implementation is enabled and determined the account has expired
|
||||
raise AuthError(
|
||||
403,
|
||||
"User account has expired",
|
||||
errcode=Codes.EXPIRED_ACCOUNT,
|
||||
)
|
||||
|
||||
user_info = await self.get_user_by_access_token(
|
||||
access_token, allow_expired=allow_expired
|
||||
)
|
||||
token_id = user_info.token_id
|
||||
is_guest = user_info.is_guest
|
||||
shadow_banned = user_info.shadow_banned
|
||||
|
||||
# Deny the request if the user account has expired.
|
||||
if not allow_expired:
|
||||
if await self._account_validity_handler.is_user_expired(
|
||||
user_info.user_id
|
||||
):
|
||||
# Raise the error if either an account validity module has determined
|
||||
# the account has expired, or the legacy account validity
|
||||
# implementation is enabled and determined the account has expired
|
||||
raise AuthError(
|
||||
403,
|
||||
"User account has expired",
|
||||
errcode=Codes.EXPIRED_ACCOUNT,
|
||||
)
|
||||
|
||||
device_id = user_info.device_id
|
||||
|
||||
if access_token and ip_addr:
|
||||
if ip_addr and (
|
||||
not requester.app_service or self._track_appservice_user_ips
|
||||
):
|
||||
# XXX(quenting): I'm 95% confident that we could skip setting the
|
||||
# device_id to "dummy-device" for appservices, and that the only impact
|
||||
# would be some rows which whould not deduplicate in the 'user_ips'
|
||||
# table during the transition
|
||||
recorded_device_id = (
|
||||
"dummy-device"
|
||||
if requester.device_id is None and requester.app_service is not None
|
||||
else requester.device_id
|
||||
)
|
||||
await self.store.insert_client_ip(
|
||||
user_id=user_info.token_owner,
|
||||
user_id=requester.authenticated_entity,
|
||||
access_token=access_token,
|
||||
ip=ip_addr,
|
||||
user_agent=user_agent,
|
||||
device_id=device_id,
|
||||
device_id=recorded_device_id,
|
||||
)
|
||||
|
||||
# Track also the puppeted user client IP if enabled and the user is puppeting
|
||||
if (
|
||||
user_info.user_id != user_info.token_owner
|
||||
requester.user.to_string() != requester.authenticated_entity
|
||||
and self._track_puppeted_user_ips
|
||||
):
|
||||
await self.store.insert_client_ip(
|
||||
user_id=user_info.user_id,
|
||||
user_id=requester.user.to_string(),
|
||||
access_token=access_token,
|
||||
ip=ip_addr,
|
||||
user_agent=user_agent,
|
||||
device_id=device_id,
|
||||
device_id=requester.device_id,
|
||||
)
|
||||
|
||||
if is_guest and not allow_guest:
|
||||
if requester.is_guest and not allow_guest:
|
||||
raise AuthError(
|
||||
403,
|
||||
"Guest access not allowed",
|
||||
errcode=Codes.GUEST_ACCESS_FORBIDDEN,
|
||||
)
|
||||
|
||||
# Mark the token as used. This is used to invalidate old refresh
|
||||
# tokens after some time.
|
||||
if not user_info.token_used and token_id is not None:
|
||||
await self.store.mark_access_token_as_used(token_id)
|
||||
|
||||
requester = create_requester(
|
||||
user_info.user_id,
|
||||
token_id,
|
||||
is_guest,
|
||||
shadow_banned,
|
||||
device_id,
|
||||
app_service=app_service,
|
||||
authenticated_entity=user_info.token_owner,
|
||||
)
|
||||
|
||||
request.requester = requester
|
||||
return requester
|
||||
except KeyError:
|
||||
@@ -299,9 +281,7 @@ class Auth:
|
||||
403, "Application service has not registered this user (%s)" % user_id
|
||||
)
|
||||
|
||||
async def _get_appservice_user_id_and_device_id(
|
||||
self, request: Request
|
||||
) -> Tuple[Optional[str], Optional[str], Optional[ApplicationService]]:
|
||||
async def _get_appservice_user(self, request: Request) -> Optional[Requester]:
|
||||
"""
|
||||
Given a request, reads the request parameters to determine:
|
||||
- whether it's an application service that's making this request
|
||||
@@ -316,15 +296,13 @@ class Auth:
|
||||
Must use `org.matrix.msc3202.device_id` in place of `device_id` for now.
|
||||
|
||||
Returns:
|
||||
3-tuple of
|
||||
(user ID?, device ID?, application service?)
|
||||
the application service `Requester` of that request
|
||||
|
||||
Postconditions:
|
||||
- If an application service is returned, so is a user ID
|
||||
- A user ID is never returned without an application service
|
||||
- A device ID is never returned without a user ID or an application service
|
||||
- The returned application service, if present, is permitted to control the
|
||||
returned user ID.
|
||||
- The `app_service` field in the returned `Requester` is set
|
||||
- The `user_id` field in the returned `Requester` is either the application
|
||||
service sender or the controlled user set by the `user_id` URI parameter
|
||||
- The returned application service is permitted to control the returned user ID.
|
||||
- The returned device ID, if present, has been checked to be a valid device ID
|
||||
for the returned user ID.
|
||||
"""
|
||||
@@ -334,12 +312,12 @@ class Auth:
|
||||
self.get_access_token_from_request(request)
|
||||
)
|
||||
if app_service is None:
|
||||
return None, None, None
|
||||
return None
|
||||
|
||||
if app_service.ip_range_whitelist:
|
||||
ip_address = IPAddress(request.getClientAddress().host)
|
||||
if ip_address not in app_service.ip_range_whitelist:
|
||||
return None, None, None
|
||||
return None
|
||||
|
||||
# This will always be set by the time Twisted calls us.
|
||||
assert request.args is not None
|
||||
@@ -373,13 +351,15 @@ class Auth:
|
||||
Codes.EXCLUSIVE,
|
||||
)
|
||||
|
||||
return effective_user_id, effective_device_id, app_service
|
||||
return create_requester(
|
||||
effective_user_id, app_service=app_service, device_id=effective_device_id
|
||||
)
|
||||
|
||||
async def get_user_by_access_token(
|
||||
self,
|
||||
token: str,
|
||||
allow_expired: bool = False,
|
||||
) -> TokenLookupResult:
|
||||
) -> Requester:
|
||||
"""Validate access token and get user_id from it
|
||||
|
||||
Args:
|
||||
@@ -396,9 +376,9 @@ class Auth:
|
||||
|
||||
# First look in the database to see if the access token is present
|
||||
# as an opaque token.
|
||||
r = await self.store.get_user_by_access_token(token)
|
||||
if r:
|
||||
valid_until_ms = r.valid_until_ms
|
||||
user_info = await self.store.get_user_by_access_token(token)
|
||||
if user_info:
|
||||
valid_until_ms = user_info.valid_until_ms
|
||||
if (
|
||||
not allow_expired
|
||||
and valid_until_ms is not None
|
||||
@@ -410,7 +390,20 @@ class Auth:
|
||||
msg="Access token has expired", soft_logout=True
|
||||
)
|
||||
|
||||
return r
|
||||
# Mark the token as used. This is used to invalidate old refresh
|
||||
# tokens after some time.
|
||||
await self.store.mark_access_token_as_used(user_info.token_id)
|
||||
|
||||
requester = create_requester(
|
||||
user_id=user_info.user_id,
|
||||
access_token_id=user_info.token_id,
|
||||
is_guest=user_info.is_guest,
|
||||
shadow_banned=user_info.shadow_banned,
|
||||
device_id=user_info.device_id,
|
||||
authenticated_entity=user_info.token_owner,
|
||||
)
|
||||
|
||||
return requester
|
||||
|
||||
# If the token isn't found in the database, then it could still be a
|
||||
# macaroon for a guest, so we check that here.
|
||||
@@ -436,11 +429,12 @@ class Auth:
|
||||
"Guest access token used for regular user"
|
||||
)
|
||||
|
||||
return TokenLookupResult(
|
||||
return create_requester(
|
||||
user_id=user_id,
|
||||
is_guest=True,
|
||||
# all guests get the same device id
|
||||
device_id=GUEST_DEVICE_ID,
|
||||
authenticated_entity=user_id,
|
||||
)
|
||||
except (
|
||||
pymacaroons.exceptions.MacaroonException,
|
||||
@@ -463,32 +457,33 @@ class Auth:
|
||||
request.requester = create_requester(service.sender, app_service=service)
|
||||
return service
|
||||
|
||||
async def is_server_admin(self, user: UserID) -> bool:
|
||||
async def is_server_admin(self, requester: Requester) -> bool:
|
||||
"""Check if the given user is a local server admin.
|
||||
|
||||
Args:
|
||||
user: user to check
|
||||
requester: The user making the request, according to the access token.
|
||||
|
||||
Returns:
|
||||
True if the user is an admin
|
||||
"""
|
||||
return await self.store.is_server_admin(user)
|
||||
return await self.store.is_server_admin(requester.user)
|
||||
|
||||
async def check_can_change_room_list(self, room_id: str, user: UserID) -> bool:
|
||||
async def check_can_change_room_list(
|
||||
self, room_id: str, requester: Requester
|
||||
) -> bool:
|
||||
"""Determine whether the user is allowed to edit the room's entry in the
|
||||
published room list.
|
||||
|
||||
Args:
|
||||
room_id
|
||||
user
|
||||
room_id: The room to check.
|
||||
requester: The user making the request, according to the access token.
|
||||
"""
|
||||
|
||||
is_admin = await self.is_server_admin(user)
|
||||
is_admin = await self.is_server_admin(requester)
|
||||
if is_admin:
|
||||
return True
|
||||
|
||||
user_id = user.to_string()
|
||||
await self.check_user_in_room(room_id, user_id)
|
||||
await self.check_user_in_room(room_id, requester)
|
||||
|
||||
# We currently require the user is a "moderator" in the room. We do this
|
||||
# by checking if they would (theoretically) be able to change the
|
||||
@@ -507,7 +502,9 @@ class Auth:
|
||||
send_level = event_auth.get_send_level(
|
||||
EventTypes.CanonicalAlias, "", power_level_event
|
||||
)
|
||||
user_level = event_auth.get_user_power_level(user_id, auth_events)
|
||||
user_level = event_auth.get_user_power_level(
|
||||
requester.user.to_string(), auth_events
|
||||
)
|
||||
|
||||
return user_level >= send_level
|
||||
|
||||
@@ -563,17 +560,18 @@ class Auth:
|
||||
|
||||
return query_params[0].decode("ascii")
|
||||
|
||||
@trace
|
||||
async def check_user_in_room_or_world_readable(
|
||||
self, room_id: str, user_id: str, allow_departed_users: bool = False
|
||||
self, room_id: str, requester: Requester, allow_departed_users: bool = False
|
||||
) -> Tuple[str, Optional[str]]:
|
||||
"""Checks that the user is or was in the room or the room is world
|
||||
readable. If it isn't then an exception is raised.
|
||||
|
||||
Args:
|
||||
room_id: room to check
|
||||
user_id: user to check
|
||||
allow_departed_users: if True, accept users that were previously
|
||||
members but have now departed
|
||||
room_id: The room to check.
|
||||
requester: The user making the request, according to the access token.
|
||||
allow_departed_users: If True, accept users that were previously
|
||||
members but have now departed.
|
||||
|
||||
Returns:
|
||||
Resolves to the current membership of the user in the room and the
|
||||
@@ -588,7 +586,7 @@ class Auth:
|
||||
# * The user is a guest user, and has joined the room
|
||||
# else it will throw.
|
||||
return await self.check_user_in_room(
|
||||
room_id, user_id, allow_departed_users=allow_departed_users
|
||||
room_id, requester, allow_departed_users=allow_departed_users
|
||||
)
|
||||
except AuthError:
|
||||
visibility = await self._storage_controllers.state.get_current_state_event(
|
||||
@@ -600,8 +598,9 @@ class Auth:
|
||||
== HistoryVisibility.WORLD_READABLE
|
||||
):
|
||||
return Membership.JOIN, None
|
||||
raise AuthError(
|
||||
raise UnstableSpecAuthError(
|
||||
403,
|
||||
"User %s not in room %s, and room previews are disabled"
|
||||
% (user_id, room_id),
|
||||
% (requester.user, room_id),
|
||||
errcode=Codes.NOT_JOINED,
|
||||
)
|
||||
|
||||
@@ -216,11 +216,11 @@ class EventContentFields:
|
||||
MSC2716_HISTORICAL: Final = "org.matrix.msc2716.historical"
|
||||
# For "insertion" events to indicate what the next batch ID should be in
|
||||
# order to connect to it
|
||||
MSC2716_NEXT_BATCH_ID: Final = "org.matrix.msc2716.next_batch_id"
|
||||
MSC2716_NEXT_BATCH_ID: Final = "next_batch_id"
|
||||
# Used on "batch" events to indicate which insertion event it connects to
|
||||
MSC2716_BATCH_ID: Final = "org.matrix.msc2716.batch_id"
|
||||
MSC2716_BATCH_ID: Final = "batch_id"
|
||||
# For "marker" events
|
||||
MSC2716_MARKER_INSERTION: Final = "org.matrix.msc2716.marker.insertion"
|
||||
MSC2716_INSERTION_EVENT_REFERENCE: Final = "insertion_event_reference"
|
||||
|
||||
# The authorising user for joining a restricted room.
|
||||
AUTHORISING_USER: Final = "join_authorised_via_users_server"
|
||||
@@ -257,7 +257,7 @@ class GuestAccess:
|
||||
|
||||
class ReceiptTypes:
|
||||
READ: Final = "m.read"
|
||||
READ_PRIVATE: Final = "org.matrix.msc2285.read.private"
|
||||
READ_PRIVATE: Final = "m.read.private"
|
||||
FULLY_READ: Final = "m.fully_read"
|
||||
|
||||
|
||||
@@ -268,4 +268,4 @@ class PublicRoomsFilterFields:
|
||||
"""
|
||||
|
||||
GENERIC_SEARCH_TERM: Final = "generic_search_term"
|
||||
ROOM_TYPES: Final = "org.matrix.msc3827.room_types"
|
||||
ROOM_TYPES: Final = "room_types"
|
||||
|
||||
@@ -26,6 +26,7 @@ from twisted.web import http
|
||||
from synapse.util import json_decoder
|
||||
|
||||
if typing.TYPE_CHECKING:
|
||||
from synapse.config.homeserver import HomeServerConfig
|
||||
from synapse.types import JsonDict
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@@ -80,6 +81,12 @@ class Codes(str, Enum):
|
||||
INVALID_SIGNATURE = "M_INVALID_SIGNATURE"
|
||||
USER_DEACTIVATED = "M_USER_DEACTIVATED"
|
||||
|
||||
# Part of MSC3848
|
||||
# https://github.com/matrix-org/matrix-spec-proposals/pull/3848
|
||||
ALREADY_JOINED = "ORG.MATRIX.MSC3848.ALREADY_JOINED"
|
||||
NOT_JOINED = "ORG.MATRIX.MSC3848.NOT_JOINED"
|
||||
INSUFFICIENT_POWER = "ORG.MATRIX.MSC3848.INSUFFICIENT_POWER"
|
||||
|
||||
# The account has been suspended on the server.
|
||||
# By opposition to `USER_DEACTIVATED`, this is a reversible measure
|
||||
# that can possibly be appealed and reverted.
|
||||
@@ -167,7 +174,7 @@ class SynapseError(CodeMessageException):
|
||||
else:
|
||||
self._additional_fields = dict(additional_fields)
|
||||
|
||||
def error_dict(self) -> "JsonDict":
|
||||
def error_dict(self, config: Optional["HomeServerConfig"]) -> "JsonDict":
|
||||
return cs_error(self.msg, self.errcode, **self._additional_fields)
|
||||
|
||||
|
||||
@@ -213,7 +220,7 @@ class ConsentNotGivenError(SynapseError):
|
||||
)
|
||||
self._consent_uri = consent_uri
|
||||
|
||||
def error_dict(self) -> "JsonDict":
|
||||
def error_dict(self, config: Optional["HomeServerConfig"]) -> "JsonDict":
|
||||
return cs_error(self.msg, self.errcode, consent_uri=self._consent_uri)
|
||||
|
||||
|
||||
@@ -307,6 +314,37 @@ class AuthError(SynapseError):
|
||||
super().__init__(code, msg, errcode, additional_fields)
|
||||
|
||||
|
||||
class UnstableSpecAuthError(AuthError):
|
||||
"""An error raised when a new error code is being proposed to replace a previous one.
|
||||
This error will return a "org.matrix.unstable.errcode" property with the new error code,
|
||||
with the previous error code still being defined in the "errcode" property.
|
||||
|
||||
This error will include `org.matrix.msc3848.unstable.errcode` in the C-S error body.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
code: int,
|
||||
msg: str,
|
||||
errcode: str,
|
||||
previous_errcode: str = Codes.FORBIDDEN,
|
||||
additional_fields: Optional[dict] = None,
|
||||
):
|
||||
self.previous_errcode = previous_errcode
|
||||
super().__init__(code, msg, errcode, additional_fields)
|
||||
|
||||
def error_dict(self, config: Optional["HomeServerConfig"]) -> "JsonDict":
|
||||
fields = {}
|
||||
if config is not None and config.experimental.msc3848_enabled:
|
||||
fields["org.matrix.msc3848.unstable.errcode"] = self.errcode
|
||||
return cs_error(
|
||||
self.msg,
|
||||
self.previous_errcode,
|
||||
**fields,
|
||||
**self._additional_fields,
|
||||
)
|
||||
|
||||
|
||||
class InvalidClientCredentialsError(SynapseError):
|
||||
"""An error raised when there was a problem with the authorisation credentials
|
||||
in a client request.
|
||||
@@ -338,8 +376,8 @@ class InvalidClientTokenError(InvalidClientCredentialsError):
|
||||
super().__init__(msg=msg, errcode="M_UNKNOWN_TOKEN")
|
||||
self._soft_logout = soft_logout
|
||||
|
||||
def error_dict(self) -> "JsonDict":
|
||||
d = super().error_dict()
|
||||
def error_dict(self, config: Optional["HomeServerConfig"]) -> "JsonDict":
|
||||
d = super().error_dict(config)
|
||||
d["soft_logout"] = self._soft_logout
|
||||
return d
|
||||
|
||||
@@ -362,7 +400,7 @@ class ResourceLimitError(SynapseError):
|
||||
self.limit_type = limit_type
|
||||
super().__init__(code, msg, errcode=errcode)
|
||||
|
||||
def error_dict(self) -> "JsonDict":
|
||||
def error_dict(self, config: Optional["HomeServerConfig"]) -> "JsonDict":
|
||||
return cs_error(
|
||||
self.msg,
|
||||
self.errcode,
|
||||
@@ -397,7 +435,7 @@ class InvalidCaptchaError(SynapseError):
|
||||
super().__init__(code, msg, errcode)
|
||||
self.error_url = error_url
|
||||
|
||||
def error_dict(self) -> "JsonDict":
|
||||
def error_dict(self, config: Optional["HomeServerConfig"]) -> "JsonDict":
|
||||
return cs_error(self.msg, self.errcode, error_url=self.error_url)
|
||||
|
||||
|
||||
@@ -414,7 +452,7 @@ class LimitExceededError(SynapseError):
|
||||
super().__init__(code, msg, errcode)
|
||||
self.retry_after_ms = retry_after_ms
|
||||
|
||||
def error_dict(self) -> "JsonDict":
|
||||
def error_dict(self, config: Optional["HomeServerConfig"]) -> "JsonDict":
|
||||
return cs_error(self.msg, self.errcode, retry_after_ms=self.retry_after_ms)
|
||||
|
||||
|
||||
@@ -429,7 +467,7 @@ class RoomKeysVersionError(SynapseError):
|
||||
super().__init__(403, "Wrong room_keys version", Codes.WRONG_ROOM_KEYS_VERSION)
|
||||
self.current_version = current_version
|
||||
|
||||
def error_dict(self) -> "JsonDict":
|
||||
def error_dict(self, config: Optional["HomeServerConfig"]) -> "JsonDict":
|
||||
return cs_error(self.msg, self.errcode, current_version=self.current_version)
|
||||
|
||||
|
||||
@@ -469,7 +507,7 @@ class IncompatibleRoomVersionError(SynapseError):
|
||||
|
||||
self._room_version = room_version
|
||||
|
||||
def error_dict(self) -> "JsonDict":
|
||||
def error_dict(self, config: Optional["HomeServerConfig"]) -> "JsonDict":
|
||||
return cs_error(self.msg, self.errcode, room_version=self._room_version)
|
||||
|
||||
|
||||
@@ -515,7 +553,7 @@ class UnredactedContentDeletedError(SynapseError):
|
||||
)
|
||||
self.content_keep_ms = content_keep_ms
|
||||
|
||||
def error_dict(self) -> "JsonDict":
|
||||
def error_dict(self, config: Optional["HomeServerConfig"]) -> "JsonDict":
|
||||
extra = {}
|
||||
if self.content_keep_ms is not None:
|
||||
extra = {"fi.mau.msc2815.content_keep_ms": self.content_keep_ms}
|
||||
|
||||
@@ -140,13 +140,13 @@ USER_FILTER_SCHEMA = {
|
||||
|
||||
|
||||
@FormatChecker.cls_checks("matrix_room_id")
|
||||
def matrix_room_id_validator(room_id_str: str) -> RoomID:
|
||||
return RoomID.from_string(room_id_str)
|
||||
def matrix_room_id_validator(room_id_str: str) -> bool:
|
||||
return RoomID.is_valid(room_id_str)
|
||||
|
||||
|
||||
@FormatChecker.cls_checks("matrix_user_id")
|
||||
def matrix_user_id_validator(user_id_str: str) -> UserID:
|
||||
return UserID.from_string(user_id_str)
|
||||
def matrix_user_id_validator(user_id_str: str) -> bool:
|
||||
return UserID.is_valid(user_id_str)
|
||||
|
||||
|
||||
class Filtering:
|
||||
|
||||
@@ -17,7 +17,7 @@ from collections import OrderedDict
|
||||
from typing import Hashable, Optional, Tuple
|
||||
|
||||
from synapse.api.errors import LimitExceededError
|
||||
from synapse.config.ratelimiting import RateLimitConfig
|
||||
from synapse.config.ratelimiting import RatelimitSettings
|
||||
from synapse.storage.databases.main import DataStore
|
||||
from synapse.types import Requester
|
||||
from synapse.util import Clock
|
||||
@@ -314,8 +314,8 @@ class RequestRatelimiter:
|
||||
self,
|
||||
store: DataStore,
|
||||
clock: Clock,
|
||||
rc_message: RateLimitConfig,
|
||||
rc_admin_redaction: Optional[RateLimitConfig],
|
||||
rc_message: RatelimitSettings,
|
||||
rc_admin_redaction: Optional[RatelimitSettings],
|
||||
):
|
||||
self.store = store
|
||||
self.clock = clock
|
||||
|
||||
@@ -269,24 +269,6 @@ class RoomVersions:
|
||||
msc3787_knock_restricted_join_rule=False,
|
||||
msc3667_int_only_power_levels=False,
|
||||
)
|
||||
MSC2716v3 = RoomVersion(
|
||||
"org.matrix.msc2716v3",
|
||||
RoomDisposition.UNSTABLE,
|
||||
EventFormatVersions.V3,
|
||||
StateResolutionVersions.V2,
|
||||
enforce_key_validity=True,
|
||||
special_case_aliases_auth=False,
|
||||
strict_canonicaljson=True,
|
||||
limit_notifications_power_levels=True,
|
||||
msc2176_redaction_rules=False,
|
||||
msc3083_join_rules=False,
|
||||
msc3375_redaction_rules=False,
|
||||
msc2403_knocking=True,
|
||||
msc2716_historical=True,
|
||||
msc2716_redactions=True,
|
||||
msc3787_knock_restricted_join_rule=False,
|
||||
msc3667_int_only_power_levels=False,
|
||||
)
|
||||
MSC3787 = RoomVersion(
|
||||
"org.matrix.msc3787",
|
||||
RoomDisposition.UNSTABLE,
|
||||
@@ -323,6 +305,24 @@ class RoomVersions:
|
||||
msc3787_knock_restricted_join_rule=True,
|
||||
msc3667_int_only_power_levels=True,
|
||||
)
|
||||
MSC2716v4 = RoomVersion(
|
||||
"org.matrix.msc2716v4",
|
||||
RoomDisposition.UNSTABLE,
|
||||
EventFormatVersions.V3,
|
||||
StateResolutionVersions.V2,
|
||||
enforce_key_validity=True,
|
||||
special_case_aliases_auth=False,
|
||||
strict_canonicaljson=True,
|
||||
limit_notifications_power_levels=True,
|
||||
msc2176_redaction_rules=False,
|
||||
msc3083_join_rules=False,
|
||||
msc3375_redaction_rules=False,
|
||||
msc2403_knocking=True,
|
||||
msc2716_historical=True,
|
||||
msc2716_redactions=True,
|
||||
msc3787_knock_restricted_join_rule=False,
|
||||
msc3667_int_only_power_levels=False,
|
||||
)
|
||||
|
||||
|
||||
KNOWN_ROOM_VERSIONS: Dict[str, RoomVersion] = {
|
||||
@@ -338,9 +338,9 @@ KNOWN_ROOM_VERSIONS: Dict[str, RoomVersion] = {
|
||||
RoomVersions.V7,
|
||||
RoomVersions.V8,
|
||||
RoomVersions.V9,
|
||||
RoomVersions.MSC2716v3,
|
||||
RoomVersions.MSC3787,
|
||||
RoomVersions.V10,
|
||||
RoomVersions.MSC2716v4,
|
||||
)
|
||||
}
|
||||
|
||||
|
||||
@@ -266,15 +266,48 @@ def register_start(
|
||||
reactor.callWhenRunning(lambda: defer.ensureDeferred(wrapper()))
|
||||
|
||||
|
||||
def listen_metrics(bind_addresses: Iterable[str], port: int) -> None:
|
||||
def listen_metrics(
|
||||
bind_addresses: Iterable[str], port: int, enable_legacy_metric_names: bool
|
||||
) -> None:
|
||||
"""
|
||||
Start Prometheus metrics server.
|
||||
"""
|
||||
from synapse.metrics import RegistryProxy, start_http_server
|
||||
from prometheus_client import start_http_server as start_http_server_prometheus
|
||||
|
||||
from synapse.metrics import (
|
||||
RegistryProxy,
|
||||
start_http_server as start_http_server_legacy,
|
||||
)
|
||||
|
||||
for host in bind_addresses:
|
||||
logger.info("Starting metrics listener on %s:%d", host, port)
|
||||
start_http_server(port, addr=host, registry=RegistryProxy)
|
||||
if enable_legacy_metric_names:
|
||||
start_http_server_legacy(port, addr=host, registry=RegistryProxy)
|
||||
else:
|
||||
_set_prometheus_client_use_created_metrics(False)
|
||||
start_http_server_prometheus(port, addr=host, registry=RegistryProxy)
|
||||
|
||||
|
||||
def _set_prometheus_client_use_created_metrics(new_value: bool) -> None:
|
||||
"""
|
||||
Sets whether prometheus_client should expose `_created`-suffixed metrics for
|
||||
all gauges, histograms and summaries.
|
||||
There is no programmatic way to disable this without poking at internals;
|
||||
the proper way is to use an environment variable which prometheus_client
|
||||
loads at import time.
|
||||
|
||||
The motivation for disabling these `_created` metrics is that they're
|
||||
a waste of space as they're not useful but they take up space in Prometheus.
|
||||
"""
|
||||
|
||||
import prometheus_client.metrics
|
||||
|
||||
if hasattr(prometheus_client.metrics, "_use_created"):
|
||||
prometheus_client.metrics._use_created = new_value
|
||||
else:
|
||||
logger.error(
|
||||
"Can't disable `_created` metrics in prometheus_client (brittle hack broken?)"
|
||||
)
|
||||
|
||||
|
||||
def listen_manhole(
|
||||
@@ -478,9 +511,10 @@ async def start(hs: "HomeServer") -> None:
|
||||
setup_sentry(hs)
|
||||
setup_sdnotify(hs)
|
||||
|
||||
# If background tasks are running on the main process, start collecting the
|
||||
# phone home stats.
|
||||
# If background tasks are running on the main process or this is the worker in
|
||||
# charge of them, start collecting the phone home stats and shared usage metrics.
|
||||
if hs.config.worker.run_background_tasks:
|
||||
await hs.get_common_usage_metrics_manager().setup()
|
||||
start_phone_stats_home(hs)
|
||||
|
||||
# We now freeze all allocated objects in the hopes that (almost)
|
||||
|
||||
@@ -412,7 +412,11 @@ class GenericWorkerServer(HomeServer):
|
||||
"enable_metrics is not True!"
|
||||
)
|
||||
else:
|
||||
_base.listen_metrics(listener.bind_addresses, listener.port)
|
||||
_base.listen_metrics(
|
||||
listener.bind_addresses,
|
||||
listener.port,
|
||||
enable_legacy_metric_names=self.config.metrics.enable_legacy_metrics,
|
||||
)
|
||||
else:
|
||||
logger.warning("Unsupported listener type: %s", listener.type)
|
||||
|
||||
@@ -441,6 +445,13 @@ def start(config_options: List[str]) -> None:
|
||||
"synapse.app.user_dir",
|
||||
)
|
||||
|
||||
if config.experimental.faster_joins_enabled:
|
||||
raise ConfigError(
|
||||
"You have enabled the experimental `faster_joins` config option, but it is "
|
||||
"not compatible with worker deployments yet. Please disable `faster_joins` "
|
||||
"or run Synapse as a single process deployment instead."
|
||||
)
|
||||
|
||||
synapse.events.USE_FROZEN_DICTS = config.server.use_frozen_dicts
|
||||
synapse.util.caches.TRACK_MEMORY_USAGE = config.caches.track_memory_usage
|
||||
|
||||
|
||||
@@ -44,7 +44,6 @@ from synapse.app._base import (
|
||||
register_start,
|
||||
)
|
||||
from synapse.config._base import ConfigError, format_config_error
|
||||
from synapse.config.emailconfig import ThreepidBehaviour
|
||||
from synapse.config.homeserver import HomeServerConfig
|
||||
from synapse.config.server import ListenerConfig
|
||||
from synapse.federation.transport.server import TransportLayerServer
|
||||
@@ -58,7 +57,6 @@ from synapse.http.site import SynapseSite
|
||||
from synapse.logging.context import LoggingContext
|
||||
from synapse.metrics import METRICS_PREFIX, MetricsResource, RegistryProxy
|
||||
from synapse.replication.http import REPLICATION_PREFIX, ReplicationRestResource
|
||||
from synapse.replication.tcp.resource import ReplicationStreamProtocolFactory
|
||||
from synapse.rest import ClientRestResource
|
||||
from synapse.rest.admin import AdminRestResource
|
||||
from synapse.rest.health import HealthResource
|
||||
@@ -202,7 +200,7 @@ class SynapseHomeServer(HomeServer):
|
||||
}
|
||||
)
|
||||
|
||||
if self.config.email.threepid_behaviour_email == ThreepidBehaviour.LOCAL:
|
||||
if self.config.email.can_verify_email:
|
||||
from synapse.rest.synapse.client.password_reset import (
|
||||
PasswordResetSubmitTokenResource,
|
||||
)
|
||||
@@ -220,7 +218,10 @@ class SynapseHomeServer(HomeServer):
|
||||
resources.update({"/_matrix/consent": consent_resource})
|
||||
|
||||
if name == "federation":
|
||||
resources.update({FEDERATION_PREFIX: TransportLayerServer(self)})
|
||||
federation_resource: Resource = TransportLayerServer(self)
|
||||
if compress:
|
||||
federation_resource = gz_wrap(federation_resource)
|
||||
resources.update({FEDERATION_PREFIX: federation_resource})
|
||||
|
||||
if name == "openid":
|
||||
resources.update(
|
||||
@@ -288,16 +289,6 @@ class SynapseHomeServer(HomeServer):
|
||||
manhole_settings=self.config.server.manhole_settings,
|
||||
manhole_globals={"hs": self},
|
||||
)
|
||||
elif listener.type == "replication":
|
||||
services = listen_tcp(
|
||||
listener.bind_addresses,
|
||||
listener.port,
|
||||
ReplicationStreamProtocolFactory(self),
|
||||
)
|
||||
for s in services:
|
||||
self.get_reactor().addSystemEventTrigger(
|
||||
"before", "shutdown", s.stopListening
|
||||
)
|
||||
elif listener.type == "metrics":
|
||||
if not self.config.metrics.enable_metrics:
|
||||
logger.warning(
|
||||
@@ -305,7 +296,11 @@ class SynapseHomeServer(HomeServer):
|
||||
"enable_metrics is not True!"
|
||||
)
|
||||
else:
|
||||
_base.listen_metrics(listener.bind_addresses, listener.port)
|
||||
_base.listen_metrics(
|
||||
listener.bind_addresses,
|
||||
listener.port,
|
||||
enable_legacy_metric_names=self.config.metrics.enable_legacy_metrics,
|
||||
)
|
||||
else:
|
||||
# this shouldn't happen, as the listener type should have been checked
|
||||
# during parsing
|
||||
|
||||
@@ -51,6 +51,16 @@ async def phone_stats_home(
|
||||
stats: JsonDict,
|
||||
stats_process: List[Tuple[int, "resource.struct_rusage"]] = _stats_process,
|
||||
) -> None:
|
||||
"""Collect usage statistics and send them to the configured endpoint.
|
||||
|
||||
Args:
|
||||
hs: the HomeServer object to use for gathering usage data.
|
||||
stats: the dict in which to store the statistics sent to the configured
|
||||
endpoint. Mostly used in tests to figure out the data that is supposed to
|
||||
be sent.
|
||||
stats_process: statistics about resource usage of the process.
|
||||
"""
|
||||
|
||||
logger.info("Gathering stats for reporting")
|
||||
now = int(hs.get_clock().time())
|
||||
# Ensure the homeserver has started.
|
||||
@@ -83,6 +93,7 @@ async def phone_stats_home(
|
||||
#
|
||||
|
||||
store = hs.get_datastores().main
|
||||
common_metrics = await hs.get_common_usage_metrics_manager().get_metrics()
|
||||
|
||||
stats["homeserver"] = hs.config.server.server_name
|
||||
stats["server_context"] = hs.config.server.server_context
|
||||
@@ -104,7 +115,7 @@ async def phone_stats_home(
|
||||
room_count = await store.get_room_count()
|
||||
stats["total_room_count"] = room_count
|
||||
|
||||
stats["daily_active_users"] = await store.count_daily_users()
|
||||
stats["daily_active_users"] = common_metrics.daily_active_users
|
||||
stats["monthly_active_users"] = await store.count_monthly_users()
|
||||
daily_active_e2ee_rooms = await store.count_daily_active_e2ee_rooms()
|
||||
stats["daily_active_e2ee_rooms"] = daily_active_e2ee_rooms
|
||||
|
||||
@@ -20,6 +20,7 @@ import logging
|
||||
import os
|
||||
import re
|
||||
from collections import OrderedDict
|
||||
from enum import Enum, auto
|
||||
from hashlib import sha256
|
||||
from textwrap import dedent
|
||||
from typing import (
|
||||
@@ -603,18 +604,44 @@ class RootConfig:
|
||||
" may specify directories containing *.yaml files.",
|
||||
)
|
||||
|
||||
generate_group = parser.add_argument_group("Config generation")
|
||||
generate_group.add_argument(
|
||||
"--generate-config",
|
||||
action="store_true",
|
||||
help="Generate a config file, then exit.",
|
||||
# we nest the mutually-exclusive group inside another group so that the help
|
||||
# text shows them in their own group.
|
||||
generate_mode_group = parser.add_argument_group(
|
||||
"Config generation mode",
|
||||
)
|
||||
generate_group.add_argument(
|
||||
generate_mode_exclusive = generate_mode_group.add_mutually_exclusive_group()
|
||||
generate_mode_exclusive.add_argument(
|
||||
# hidden option to make the type and default work
|
||||
"--generate-mode",
|
||||
help=argparse.SUPPRESS,
|
||||
type=_ConfigGenerateMode,
|
||||
default=_ConfigGenerateMode.GENERATE_MISSING_AND_RUN,
|
||||
)
|
||||
generate_mode_exclusive.add_argument(
|
||||
"--generate-config",
|
||||
help="Generate a config file, then exit.",
|
||||
action="store_const",
|
||||
const=_ConfigGenerateMode.GENERATE_EVERYTHING_AND_EXIT,
|
||||
dest="generate_mode",
|
||||
)
|
||||
generate_mode_exclusive.add_argument(
|
||||
"--generate-missing-configs",
|
||||
"--generate-keys",
|
||||
action="store_true",
|
||||
help="Generate any missing additional config files, then exit.",
|
||||
action="store_const",
|
||||
const=_ConfigGenerateMode.GENERATE_MISSING_AND_EXIT,
|
||||
dest="generate_mode",
|
||||
)
|
||||
generate_mode_exclusive.add_argument(
|
||||
"--generate-missing-and-run",
|
||||
help="Generate any missing additional config files, then run. This is the "
|
||||
"default behaviour.",
|
||||
action="store_const",
|
||||
const=_ConfigGenerateMode.GENERATE_MISSING_AND_RUN,
|
||||
dest="generate_mode",
|
||||
)
|
||||
|
||||
generate_group = parser.add_argument_group("Details for --generate-config")
|
||||
generate_group.add_argument(
|
||||
"-H", "--server-name", help="The server name to generate a config file for."
|
||||
)
|
||||
@@ -670,11 +697,12 @@ class RootConfig:
|
||||
config_dir_path = os.path.abspath(config_dir_path)
|
||||
data_dir_path = os.getcwd()
|
||||
|
||||
generate_missing_configs = config_args.generate_missing_configs
|
||||
|
||||
obj = cls(config_files)
|
||||
|
||||
if config_args.generate_config:
|
||||
if (
|
||||
config_args.generate_mode
|
||||
== _ConfigGenerateMode.GENERATE_EVERYTHING_AND_EXIT
|
||||
):
|
||||
if config_args.report_stats is None:
|
||||
parser.error(
|
||||
"Please specify either --report-stats=yes or --report-stats=no\n\n"
|
||||
@@ -732,11 +760,14 @@ class RootConfig:
|
||||
)
|
||||
% (config_path,)
|
||||
)
|
||||
generate_missing_configs = True
|
||||
|
||||
config_dict = read_config_files(config_files)
|
||||
if generate_missing_configs:
|
||||
obj.generate_missing_files(config_dict, config_dir_path)
|
||||
obj.generate_missing_files(config_dict, config_dir_path)
|
||||
|
||||
if config_args.generate_mode in (
|
||||
_ConfigGenerateMode.GENERATE_EVERYTHING_AND_EXIT,
|
||||
_ConfigGenerateMode.GENERATE_MISSING_AND_EXIT,
|
||||
):
|
||||
return None
|
||||
|
||||
obj.parse_config_dict(
|
||||
@@ -965,6 +996,12 @@ def read_file(file_path: Any, config_path: Iterable[str]) -> str:
|
||||
raise ConfigError("Error accessing file %r" % (file_path,), config_path) from e
|
||||
|
||||
|
||||
class _ConfigGenerateMode(Enum):
|
||||
GENERATE_MISSING_AND_RUN = auto()
|
||||
GENERATE_MISSING_AND_EXIT = auto()
|
||||
GENERATE_EVERYTHING_AND_EXIT = auto()
|
||||
|
||||
|
||||
__all__ = [
|
||||
"Config",
|
||||
"RootConfig",
|
||||
|
||||
@@ -23,7 +23,7 @@ LEGACY_TEMPLATE_DIR_WARNING = """
|
||||
This server's configuration file is using the deprecated 'template_dir' setting in the
|
||||
'account_validity' section. Support for this setting has been deprecated and will be
|
||||
removed in a future version of Synapse. Server admins should instead use the new
|
||||
'custom_templates_directory' setting documented here:
|
||||
'custom_template_directory' setting documented here:
|
||||
https://matrix-org.github.io/synapse/latest/templates.html
|
||||
---------------------------------------------------------------------------------------"""
|
||||
|
||||
|
||||
@@ -18,7 +18,6 @@
|
||||
import email.utils
|
||||
import logging
|
||||
import os
|
||||
from enum import Enum
|
||||
from typing import Any
|
||||
|
||||
import attr
|
||||
@@ -53,7 +52,7 @@ LEGACY_TEMPLATE_DIR_WARNING = """
|
||||
This server's configuration file is using the deprecated 'template_dir' setting in the
|
||||
'email' section. Support for this setting has been deprecated and will be removed in a
|
||||
future version of Synapse. Server admins should instead use the new
|
||||
'custom_templates_directory' setting documented here:
|
||||
'custom_template_directory' setting documented here:
|
||||
https://matrix-org.github.io/synapse/latest/templates.html
|
||||
---------------------------------------------------------------------------------------"""
|
||||
|
||||
@@ -136,40 +135,22 @@ class EmailConfig(Config):
|
||||
|
||||
self.email_enable_notifs = email_config.get("enable_notifs", False)
|
||||
|
||||
self.threepid_behaviour_email = (
|
||||
# Have Synapse handle the email sending if account_threepid_delegates.email
|
||||
# is not defined
|
||||
# msisdn is currently always remote while Synapse does not support any method of
|
||||
# sending SMS messages
|
||||
ThreepidBehaviour.REMOTE
|
||||
if self.root.registration.account_threepid_delegate_email
|
||||
else ThreepidBehaviour.LOCAL
|
||||
)
|
||||
|
||||
if config.get("trust_identity_server_for_password_resets"):
|
||||
raise ConfigError(
|
||||
'The config option "trust_identity_server_for_password_resets" has been removed.'
|
||||
"Please consult the configuration manual at docs/usage/configuration/config_documentation.md for "
|
||||
"details and update your config file."
|
||||
'The config option "trust_identity_server_for_password_resets" '
|
||||
"is no longer supported. Please remove it from the config file."
|
||||
)
|
||||
|
||||
self.local_threepid_handling_disabled_due_to_email_config = False
|
||||
if (
|
||||
self.threepid_behaviour_email == ThreepidBehaviour.LOCAL
|
||||
and email_config == {}
|
||||
):
|
||||
# We cannot warn the user this has happened here
|
||||
# Instead do so when a user attempts to reset their password
|
||||
self.local_threepid_handling_disabled_due_to_email_config = True
|
||||
|
||||
self.threepid_behaviour_email = ThreepidBehaviour.OFF
|
||||
# If we have email config settings, assume that we can verify ownership of
|
||||
# email addresses.
|
||||
self.can_verify_email = email_config != {}
|
||||
|
||||
# Get lifetime of a validation token in milliseconds
|
||||
self.email_validation_token_lifetime = self.parse_duration(
|
||||
email_config.get("validation_token_lifetime", "1h")
|
||||
)
|
||||
|
||||
if self.threepid_behaviour_email == ThreepidBehaviour.LOCAL:
|
||||
if self.can_verify_email:
|
||||
missing = []
|
||||
if not self.email_notif_from:
|
||||
missing.append("email.notif_from")
|
||||
@@ -360,18 +341,3 @@ class EmailConfig(Config):
|
||||
"Config option email.invite_client_location must be a http or https URL",
|
||||
path=("email", "invite_client_location"),
|
||||
)
|
||||
|
||||
|
||||
class ThreepidBehaviour(Enum):
|
||||
"""
|
||||
Enum to define the behaviour of Synapse with regards to when it contacts an identity
|
||||
server for 3pid registration and password resets
|
||||
|
||||
REMOTE = use an external server to send tokens
|
||||
LOCAL = send tokens ourselves
|
||||
OFF = disable registration via 3pid and password resets
|
||||
"""
|
||||
|
||||
REMOTE = "remote"
|
||||
LOCAL = "local"
|
||||
OFF = "off"
|
||||
|
||||
@@ -32,9 +32,6 @@ class ExperimentalConfig(Config):
|
||||
# MSC2716 (importing historical messages)
|
||||
self.msc2716_enabled: bool = experimental.get("msc2716_enabled", False)
|
||||
|
||||
# MSC2285 (private read receipts)
|
||||
self.msc2285_enabled: bool = experimental.get("msc2285_enabled", False)
|
||||
|
||||
# MSC3244 (room version capabilities)
|
||||
self.msc3244_enabled: bool = experimental.get("msc3244_enabled", True)
|
||||
|
||||
@@ -74,6 +71,9 @@ class ExperimentalConfig(Config):
|
||||
self.msc3720_enabled: bool = experimental.get("msc3720_enabled", False)
|
||||
|
||||
# MSC2654: Unread counts
|
||||
#
|
||||
# Note that enabling this will result in an incorrect unread count for
|
||||
# previously calculated push actions.
|
||||
self.msc2654_enabled: bool = experimental.get("msc2654_enabled", False)
|
||||
|
||||
# MSC2815 (allow room moderators to view redacted event content)
|
||||
@@ -88,5 +88,8 @@ class ExperimentalConfig(Config):
|
||||
# MSC3715: dir param on /relations.
|
||||
self.msc3715_enabled: bool = experimental.get("msc3715_enabled", False)
|
||||
|
||||
# MSC3827: Filtering of /publicRooms by room type
|
||||
self.msc3827_enabled: bool = experimental.get("msc3827_enabled", False)
|
||||
# MSC3848: Introduce errcodes for specific event sending failures
|
||||
self.msc3848_enabled: bool = experimental.get("msc3848_enabled", False)
|
||||
|
||||
# MSC3852: Expose last seen user agent field on /_matrix/client/v3/devices.
|
||||
self.msc3852_enabled: bool = experimental.get("msc3852_enabled", False)
|
||||
|
||||
@@ -42,6 +42,35 @@ class MetricsConfig(Config):
|
||||
|
||||
def read_config(self, config: JsonDict, **kwargs: Any) -> None:
|
||||
self.enable_metrics = config.get("enable_metrics", False)
|
||||
|
||||
"""
|
||||
### `enable_legacy_metrics` (experimental)
|
||||
|
||||
**Experimental: this option may be removed or have its behaviour
|
||||
changed at any time, with no notice.**
|
||||
|
||||
Set to `true` to publish both legacy and non-legacy Prometheus metric names,
|
||||
or to `false` to only publish non-legacy Prometheus metric names.
|
||||
Defaults to `true`. Has no effect if `enable_metrics` is `false`.
|
||||
|
||||
Legacy metric names include:
|
||||
- metrics containing colons in the name, such as `synapse_util_caches_response_cache:hits`, because colons are supposed to be reserved for user-defined recording rules;
|
||||
- counters that don't end with the `_total` suffix, such as `synapse_federation_client_sent_edus`, therefore not adhering to the OpenMetrics standard.
|
||||
|
||||
These legacy metric names are unconventional and not compliant with OpenMetrics standards.
|
||||
They are included for backwards compatibility.
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
enable_legacy_metrics: false
|
||||
```
|
||||
|
||||
See https://github.com/matrix-org/synapse/issues/11106 for context.
|
||||
|
||||
*Since v1.67.0.*
|
||||
"""
|
||||
self.enable_legacy_metrics = config.get("enable_legacy_metrics", True)
|
||||
|
||||
self.report_stats = config.get("report_stats", None)
|
||||
self.report_stats_endpoint = config.get(
|
||||
"report_stats_endpoint", "https://matrix.org/report-usage-stats/push"
|
||||
|
||||
@@ -21,7 +21,7 @@ from synapse.types import JsonDict
|
||||
from ._base import Config
|
||||
|
||||
|
||||
class RateLimitConfig:
|
||||
class RatelimitSettings:
|
||||
def __init__(
|
||||
self,
|
||||
config: Dict[str, float],
|
||||
@@ -34,7 +34,7 @@ class RateLimitConfig:
|
||||
|
||||
|
||||
@attr.s(auto_attribs=True)
|
||||
class FederationRateLimitConfig:
|
||||
class FederationRatelimitSettings:
|
||||
window_size: int = 1000
|
||||
sleep_limit: int = 10
|
||||
sleep_delay: int = 500
|
||||
@@ -50,11 +50,11 @@ class RatelimitConfig(Config):
|
||||
# Load the new-style messages config if it exists. Otherwise fall back
|
||||
# to the old method.
|
||||
if "rc_message" in config:
|
||||
self.rc_message = RateLimitConfig(
|
||||
self.rc_message = RatelimitSettings(
|
||||
config["rc_message"], defaults={"per_second": 0.2, "burst_count": 10.0}
|
||||
)
|
||||
else:
|
||||
self.rc_message = RateLimitConfig(
|
||||
self.rc_message = RatelimitSettings(
|
||||
{
|
||||
"per_second": config.get("rc_messages_per_second", 0.2),
|
||||
"burst_count": config.get("rc_message_burst_count", 10.0),
|
||||
@@ -64,9 +64,9 @@ class RatelimitConfig(Config):
|
||||
# Load the new-style federation config, if it exists. Otherwise, fall
|
||||
# back to the old method.
|
||||
if "rc_federation" in config:
|
||||
self.rc_federation = FederationRateLimitConfig(**config["rc_federation"])
|
||||
self.rc_federation = FederationRatelimitSettings(**config["rc_federation"])
|
||||
else:
|
||||
self.rc_federation = FederationRateLimitConfig(
|
||||
self.rc_federation = FederationRatelimitSettings(
|
||||
**{
|
||||
k: v
|
||||
for k, v in {
|
||||
@@ -80,17 +80,17 @@ class RatelimitConfig(Config):
|
||||
}
|
||||
)
|
||||
|
||||
self.rc_registration = RateLimitConfig(config.get("rc_registration", {}))
|
||||
self.rc_registration = RatelimitSettings(config.get("rc_registration", {}))
|
||||
|
||||
self.rc_registration_token_validity = RateLimitConfig(
|
||||
self.rc_registration_token_validity = RatelimitSettings(
|
||||
config.get("rc_registration_token_validity", {}),
|
||||
defaults={"per_second": 0.1, "burst_count": 5},
|
||||
)
|
||||
|
||||
rc_login_config = config.get("rc_login", {})
|
||||
self.rc_login_address = RateLimitConfig(rc_login_config.get("address", {}))
|
||||
self.rc_login_account = RateLimitConfig(rc_login_config.get("account", {}))
|
||||
self.rc_login_failed_attempts = RateLimitConfig(
|
||||
self.rc_login_address = RatelimitSettings(rc_login_config.get("address", {}))
|
||||
self.rc_login_account = RatelimitSettings(rc_login_config.get("account", {}))
|
||||
self.rc_login_failed_attempts = RatelimitSettings(
|
||||
rc_login_config.get("failed_attempts", {})
|
||||
)
|
||||
|
||||
@@ -101,20 +101,20 @@ class RatelimitConfig(Config):
|
||||
rc_admin_redaction = config.get("rc_admin_redaction")
|
||||
self.rc_admin_redaction = None
|
||||
if rc_admin_redaction:
|
||||
self.rc_admin_redaction = RateLimitConfig(rc_admin_redaction)
|
||||
self.rc_admin_redaction = RatelimitSettings(rc_admin_redaction)
|
||||
|
||||
self.rc_joins_local = RateLimitConfig(
|
||||
self.rc_joins_local = RatelimitSettings(
|
||||
config.get("rc_joins", {}).get("local", {}),
|
||||
defaults={"per_second": 0.1, "burst_count": 10},
|
||||
)
|
||||
self.rc_joins_remote = RateLimitConfig(
|
||||
self.rc_joins_remote = RatelimitSettings(
|
||||
config.get("rc_joins", {}).get("remote", {}),
|
||||
defaults={"per_second": 0.01, "burst_count": 10},
|
||||
)
|
||||
|
||||
# Track the rate of joins to a given room. If there are too many, temporarily
|
||||
# prevent local joins and remote joins via this server.
|
||||
self.rc_joins_per_room = RateLimitConfig(
|
||||
self.rc_joins_per_room = RatelimitSettings(
|
||||
config.get("rc_joins_per_room", {}),
|
||||
defaults={"per_second": 1, "burst_count": 10},
|
||||
)
|
||||
@@ -124,31 +124,31 @@ class RatelimitConfig(Config):
|
||||
# * For requests received over federation this is keyed by the origin.
|
||||
#
|
||||
# Note that this isn't exposed in the configuration as it is obscure.
|
||||
self.rc_key_requests = RateLimitConfig(
|
||||
self.rc_key_requests = RatelimitSettings(
|
||||
config.get("rc_key_requests", {}),
|
||||
defaults={"per_second": 20, "burst_count": 100},
|
||||
)
|
||||
|
||||
self.rc_3pid_validation = RateLimitConfig(
|
||||
self.rc_3pid_validation = RatelimitSettings(
|
||||
config.get("rc_3pid_validation") or {},
|
||||
defaults={"per_second": 0.003, "burst_count": 5},
|
||||
)
|
||||
|
||||
self.rc_invites_per_room = RateLimitConfig(
|
||||
self.rc_invites_per_room = RatelimitSettings(
|
||||
config.get("rc_invites", {}).get("per_room", {}),
|
||||
defaults={"per_second": 0.3, "burst_count": 10},
|
||||
)
|
||||
self.rc_invites_per_user = RateLimitConfig(
|
||||
self.rc_invites_per_user = RatelimitSettings(
|
||||
config.get("rc_invites", {}).get("per_user", {}),
|
||||
defaults={"per_second": 0.003, "burst_count": 5},
|
||||
)
|
||||
|
||||
self.rc_invites_per_issuer = RateLimitConfig(
|
||||
self.rc_invites_per_issuer = RatelimitSettings(
|
||||
config.get("rc_invites", {}).get("per_issuer", {}),
|
||||
defaults={"per_second": 0.3, "burst_count": 10},
|
||||
)
|
||||
|
||||
self.rc_third_party_invite = RateLimitConfig(
|
||||
self.rc_third_party_invite = RatelimitSettings(
|
||||
config.get("rc_third_party_invite", {}),
|
||||
defaults={
|
||||
"per_second": self.rc_message.per_second,
|
||||
|
||||
@@ -13,23 +13,23 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import argparse
|
||||
import logging
|
||||
from typing import Any, Optional
|
||||
from typing import Any, Dict, Optional
|
||||
|
||||
from synapse.api.constants import RoomCreationPreset
|
||||
from synapse.config._base import Config, ConfigError
|
||||
from synapse.config._base import Config, ConfigError, read_file
|
||||
from synapse.types import JsonDict, RoomAlias, UserID
|
||||
from synapse.util.stringutils import random_string_with_symbols, strtobool
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
LEGACY_EMAIL_DELEGATE_WARNING = """\
|
||||
Delegation of email verification to an identity server is now deprecated. To
|
||||
NO_EMAIL_DELEGATE_ERROR = """\
|
||||
Delegation of email verification to an identity server is no longer supported. To
|
||||
continue to allow users to add email addresses to their accounts, and use them for
|
||||
password resets, configure Synapse with an SMTP server via the `email` setting, and
|
||||
remove `account_threepid_delegates.email`.
|
||||
"""
|
||||
|
||||
This will be an error in a future version.
|
||||
CONFLICTING_SHARED_SECRET_OPTS_ERROR = """\
|
||||
You have configured both `registration_shared_secret` and
|
||||
`registration_shared_secret_path`. These are mutually incompatible.
|
||||
"""
|
||||
|
||||
|
||||
@@ -58,15 +58,22 @@ class RegistrationConfig(Config):
|
||||
self.enable_registration_token_3pid_bypass = config.get(
|
||||
"enable_registration_token_3pid_bypass", False
|
||||
)
|
||||
|
||||
# read the shared secret, either inline or from an external file
|
||||
self.registration_shared_secret = config.get("registration_shared_secret")
|
||||
registration_shared_secret_path = config.get("registration_shared_secret_path")
|
||||
if registration_shared_secret_path:
|
||||
if self.registration_shared_secret:
|
||||
raise ConfigError(CONFLICTING_SHARED_SECRET_OPTS_ERROR)
|
||||
self.registration_shared_secret = read_file(
|
||||
registration_shared_secret_path, ("registration_shared_secret_path",)
|
||||
).strip()
|
||||
|
||||
self.bcrypt_rounds = config.get("bcrypt_rounds", 12)
|
||||
|
||||
account_threepid_delegates = config.get("account_threepid_delegates") or {}
|
||||
if "email" in account_threepid_delegates:
|
||||
logger.warning(LEGACY_EMAIL_DELEGATE_WARNING)
|
||||
|
||||
self.account_threepid_delegate_email = account_threepid_delegates.get("email")
|
||||
raise ConfigError(NO_EMAIL_DELEGATE_ERROR)
|
||||
self.account_threepid_delegate_msisdn = account_threepid_delegates.get("msisdn")
|
||||
self.default_identity_server = config.get("default_identity_server")
|
||||
self.allow_guest_access = config.get("allow_guest_access", False)
|
||||
@@ -225,6 +232,21 @@ class RegistrationConfig(Config):
|
||||
else:
|
||||
return ""
|
||||
|
||||
def generate_files(self, config: Dict[str, Any], config_dir_path: str) -> None:
|
||||
# if 'registration_shared_secret_path' is specified, and the target file
|
||||
# does not exist, generate it.
|
||||
registration_shared_secret_path = config.get("registration_shared_secret_path")
|
||||
if registration_shared_secret_path and not self.path_exists(
|
||||
registration_shared_secret_path
|
||||
):
|
||||
print(
|
||||
"Generating registration shared secret file "
|
||||
+ registration_shared_secret_path
|
||||
)
|
||||
secret = random_string_with_symbols(50)
|
||||
with open(registration_shared_secret_path, "w") as f:
|
||||
f.write(f"{secret}\n")
|
||||
|
||||
@staticmethod
|
||||
def add_arguments(parser: argparse.ArgumentParser) -> None:
|
||||
reg_group = parser.add_argument_group("registration")
|
||||
|
||||
@@ -36,6 +36,12 @@ from ._util import validate_config
|
||||
|
||||
logger = logging.Logger(__name__)
|
||||
|
||||
DIRECT_TCP_ERROR = """
|
||||
Using direct TCP replication for workers is no longer supported.
|
||||
|
||||
Please see https://matrix-org.github.io/synapse/latest/upgrade.html#direct-tcp-replication-is-no-longer-supported-migrate-to-redis
|
||||
"""
|
||||
|
||||
# by default, we attempt to listen on both '::' *and* '0.0.0.0' because some OSes
|
||||
# (Windows, macOS, other BSD/Linux where net.ipv6.bindv6only is set) will only listen
|
||||
# on IPv6 when '::' is set.
|
||||
@@ -165,7 +171,6 @@ KNOWN_LISTENER_TYPES = {
|
||||
"http",
|
||||
"metrics",
|
||||
"manhole",
|
||||
"replication",
|
||||
}
|
||||
|
||||
KNOWN_RESOURCES = {
|
||||
@@ -515,7 +520,9 @@ class ServerConfig(Config):
|
||||
):
|
||||
raise ConfigError("allowed_avatar_mimetypes must be a list")
|
||||
|
||||
self.listeners = [parse_listener_def(x) for x in config.get("listeners", [])]
|
||||
self.listeners = [
|
||||
parse_listener_def(i, x) for i, x in enumerate(config.get("listeners", []))
|
||||
]
|
||||
|
||||
# no_tls is not really supported any more, but let's grandfather it in
|
||||
# here.
|
||||
@@ -880,9 +887,12 @@ def read_gc_thresholds(
|
||||
)
|
||||
|
||||
|
||||
def parse_listener_def(listener: Any) -> ListenerConfig:
|
||||
def parse_listener_def(num: int, listener: Any) -> ListenerConfig:
|
||||
"""parse a listener config from the config file"""
|
||||
listener_type = listener["type"]
|
||||
# Raise a helpful error if direct TCP replication is still configured.
|
||||
if listener_type == "replication":
|
||||
raise ConfigError(DIRECT_TCP_ERROR, ("listeners", str(num), "type"))
|
||||
|
||||
port = listener.get("port")
|
||||
if not isinstance(port, int):
|
||||
|
||||
@@ -26,7 +26,7 @@ LEGACY_TEMPLATE_DIR_WARNING = """
|
||||
This server's configuration file is using the deprecated 'template_dir' setting in the
|
||||
'sso' section. Support for this setting has been deprecated and will be removed in a
|
||||
future version of Synapse. Server admins should instead use the new
|
||||
'custom_templates_directory' setting documented here:
|
||||
'custom_template_directory' setting documented here:
|
||||
https://matrix-org.github.io/synapse/latest/templates.html
|
||||
---------------------------------------------------------------------------------------"""
|
||||
|
||||
|
||||
@@ -27,7 +27,7 @@ from ._base import (
|
||||
RoutableShardedWorkerHandlingConfig,
|
||||
ShardedWorkerHandlingConfig,
|
||||
)
|
||||
from .server import ListenerConfig, parse_listener_def
|
||||
from .server import DIRECT_TCP_ERROR, ListenerConfig, parse_listener_def
|
||||
|
||||
_FEDERATION_SENDER_WITH_SEND_FEDERATION_ENABLED_ERROR = """
|
||||
The send_federation config option must be disabled in the main
|
||||
@@ -128,7 +128,8 @@ class WorkerConfig(Config):
|
||||
self.worker_app = None
|
||||
|
||||
self.worker_listeners = [
|
||||
parse_listener_def(x) for x in config.get("worker_listeners", [])
|
||||
parse_listener_def(i, x)
|
||||
for i, x in enumerate(config.get("worker_listeners", []))
|
||||
]
|
||||
self.worker_daemonize = bool(config.get("worker_daemonize"))
|
||||
self.worker_pid_file = config.get("worker_pid_file")
|
||||
@@ -142,7 +143,8 @@ class WorkerConfig(Config):
|
||||
self.worker_replication_host = config.get("worker_replication_host", None)
|
||||
|
||||
# The port on the main synapse for TCP replication
|
||||
self.worker_replication_port = config.get("worker_replication_port", None)
|
||||
if "worker_replication_port" in config:
|
||||
raise ConfigError(DIRECT_TCP_ERROR, ("worker_replication_port",))
|
||||
|
||||
# The port on the main synapse for HTTP replication endpoint
|
||||
self.worker_replication_http_port = config.get("worker_replication_http_port")
|
||||
|
||||
@@ -28,6 +28,7 @@ from synapse.api.errors import Codes, SynapseError
|
||||
from synapse.api.room_versions import RoomVersion
|
||||
from synapse.events import EventBase
|
||||
from synapse.events.utils import prune_event, prune_event_dict
|
||||
from synapse.logging.opentracing import trace
|
||||
from synapse.types import JsonDict
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@@ -35,6 +36,7 @@ logger = logging.getLogger(__name__)
|
||||
Hasher = Callable[[bytes], "hashlib._Hash"]
|
||||
|
||||
|
||||
@trace
|
||||
def check_event_content_hash(
|
||||
event: EventBase, hash_algorithm: Hasher = hashlib.sha256
|
||||
) -> bool:
|
||||
|
||||
@@ -30,7 +30,13 @@ from synapse.api.constants import (
|
||||
JoinRules,
|
||||
Membership,
|
||||
)
|
||||
from synapse.api.errors import AuthError, EventSizeError, SynapseError
|
||||
from synapse.api.errors import (
|
||||
AuthError,
|
||||
Codes,
|
||||
EventSizeError,
|
||||
SynapseError,
|
||||
UnstableSpecAuthError,
|
||||
)
|
||||
from synapse.api.room_versions import (
|
||||
KNOWN_ROOM_VERSIONS,
|
||||
EventFormatVersions,
|
||||
@@ -291,7 +297,11 @@ def check_state_dependent_auth_rules(
|
||||
invite_level = get_named_level(auth_dict, "invite", 0)
|
||||
|
||||
if user_level < invite_level:
|
||||
raise AuthError(403, "You don't have permission to invite users")
|
||||
raise UnstableSpecAuthError(
|
||||
403,
|
||||
"You don't have permission to invite users",
|
||||
errcode=Codes.INSUFFICIENT_POWER,
|
||||
)
|
||||
else:
|
||||
logger.debug("Allowing! %s", event)
|
||||
return
|
||||
@@ -474,7 +484,11 @@ def _is_membership_change_allowed(
|
||||
return
|
||||
|
||||
if not caller_in_room: # caller isn't joined
|
||||
raise AuthError(403, "%s not in room %s." % (event.user_id, event.room_id))
|
||||
raise UnstableSpecAuthError(
|
||||
403,
|
||||
"%s not in room %s." % (event.user_id, event.room_id),
|
||||
errcode=Codes.NOT_JOINED,
|
||||
)
|
||||
|
||||
if Membership.INVITE == membership:
|
||||
# TODO (erikj): We should probably handle this more intelligently
|
||||
@@ -484,10 +498,18 @@ def _is_membership_change_allowed(
|
||||
if target_banned:
|
||||
raise AuthError(403, "%s is banned from the room" % (target_user_id,))
|
||||
elif target_in_room: # the target is already in the room.
|
||||
raise AuthError(403, "%s is already in the room." % target_user_id)
|
||||
raise UnstableSpecAuthError(
|
||||
403,
|
||||
"%s is already in the room." % target_user_id,
|
||||
errcode=Codes.ALREADY_JOINED,
|
||||
)
|
||||
else:
|
||||
if user_level < invite_level:
|
||||
raise AuthError(403, "You don't have permission to invite users")
|
||||
raise UnstableSpecAuthError(
|
||||
403,
|
||||
"You don't have permission to invite users",
|
||||
errcode=Codes.INSUFFICIENT_POWER,
|
||||
)
|
||||
elif Membership.JOIN == membership:
|
||||
# Joins are valid iff caller == target and:
|
||||
# * They are not banned.
|
||||
@@ -549,15 +571,27 @@ def _is_membership_change_allowed(
|
||||
elif Membership.LEAVE == membership:
|
||||
# TODO (erikj): Implement kicks.
|
||||
if target_banned and user_level < ban_level:
|
||||
raise AuthError(403, "You cannot unban user %s." % (target_user_id,))
|
||||
raise UnstableSpecAuthError(
|
||||
403,
|
||||
"You cannot unban user %s." % (target_user_id,),
|
||||
errcode=Codes.INSUFFICIENT_POWER,
|
||||
)
|
||||
elif target_user_id != event.user_id:
|
||||
kick_level = get_named_level(auth_events, "kick", 50)
|
||||
|
||||
if user_level < kick_level or user_level <= target_level:
|
||||
raise AuthError(403, "You cannot kick user %s." % target_user_id)
|
||||
raise UnstableSpecAuthError(
|
||||
403,
|
||||
"You cannot kick user %s." % target_user_id,
|
||||
errcode=Codes.INSUFFICIENT_POWER,
|
||||
)
|
||||
elif Membership.BAN == membership:
|
||||
if user_level < ban_level or user_level <= target_level:
|
||||
raise AuthError(403, "You don't have permission to ban")
|
||||
raise UnstableSpecAuthError(
|
||||
403,
|
||||
"You don't have permission to ban",
|
||||
errcode=Codes.INSUFFICIENT_POWER,
|
||||
)
|
||||
elif room_version.msc2403_knocking and Membership.KNOCK == membership:
|
||||
if join_rule != JoinRules.KNOCK and (
|
||||
not room_version.msc3787_knock_restricted_join_rule
|
||||
@@ -567,7 +601,11 @@ def _is_membership_change_allowed(
|
||||
elif target_user_id != event.user_id:
|
||||
raise AuthError(403, "You cannot knock for other users")
|
||||
elif target_in_room:
|
||||
raise AuthError(403, "You cannot knock on a room you are already in")
|
||||
raise UnstableSpecAuthError(
|
||||
403,
|
||||
"You cannot knock on a room you are already in",
|
||||
errcode=Codes.ALREADY_JOINED,
|
||||
)
|
||||
elif caller_invited:
|
||||
raise AuthError(403, "You are already invited to this room")
|
||||
elif target_banned:
|
||||
@@ -638,10 +676,11 @@ def _can_send_event(event: "EventBase", auth_events: StateMap["EventBase"]) -> b
|
||||
user_level = get_user_power_level(event.user_id, auth_events)
|
||||
|
||||
if user_level < send_level:
|
||||
raise AuthError(
|
||||
raise UnstableSpecAuthError(
|
||||
403,
|
||||
"You don't have permission to post that to the room. "
|
||||
+ "user_level (%d) < send_level (%d)" % (user_level, send_level),
|
||||
errcode=Codes.INSUFFICIENT_POWER,
|
||||
)
|
||||
|
||||
# Check state_key
|
||||
@@ -716,9 +755,10 @@ def check_historical(
|
||||
historical_level = get_named_level(auth_events, "historical", 100)
|
||||
|
||||
if user_level < historical_level:
|
||||
raise AuthError(
|
||||
raise UnstableSpecAuthError(
|
||||
403,
|
||||
'You don\'t have permission to send send historical related events ("insertion", "batch", and "marker")',
|
||||
errcode=Codes.INSUFFICIENT_POWER,
|
||||
)
|
||||
|
||||
|
||||
|
||||
@@ -11,11 +11,10 @@
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from typing import TYPE_CHECKING, List, Optional, Tuple, Union
|
||||
from typing import TYPE_CHECKING, List, Optional, Tuple
|
||||
|
||||
import attr
|
||||
from frozendict import frozendict
|
||||
from typing_extensions import Literal
|
||||
|
||||
from synapse.appservice import ApplicationService
|
||||
from synapse.events import EventBase
|
||||
@@ -33,7 +32,7 @@ class EventContext:
|
||||
Holds information relevant to persisting an event
|
||||
|
||||
Attributes:
|
||||
rejected: A rejection reason if the event was rejected, else False
|
||||
rejected: A rejection reason if the event was rejected, else None
|
||||
|
||||
_state_group: The ID of the state group for this event. Note that state events
|
||||
are persisted with a state group which includes the new event, so this is
|
||||
@@ -85,7 +84,7 @@ class EventContext:
|
||||
"""
|
||||
|
||||
_storage: "StorageControllers"
|
||||
rejected: Union[Literal[False], str] = False
|
||||
rejected: Optional[str] = None
|
||||
_state_group: Optional[int] = None
|
||||
state_group_before_event: Optional[int] = None
|
||||
_state_delta_due_to_event: Optional[StateMap[str]] = None
|
||||
|
||||
@@ -32,6 +32,7 @@ from typing_extensions import Literal
|
||||
|
||||
import synapse
|
||||
from synapse.api.errors import Codes
|
||||
from synapse.logging.opentracing import trace
|
||||
from synapse.rest.media.v1._base import FileInfo
|
||||
from synapse.rest.media.v1.media_storage import ReadableFileWrapper
|
||||
from synapse.spam_checker_api import RegistrationBehaviour
|
||||
@@ -378,6 +379,7 @@ class SpamChecker:
|
||||
if check_media_file_for_spam is not None:
|
||||
self._check_media_file_for_spam_callbacks.append(check_media_file_for_spam)
|
||||
|
||||
@trace
|
||||
async def check_event_for_spam(
|
||||
self, event: "synapse.events.EventBase"
|
||||
) -> Union[Tuple[Codes, JsonDict], str]:
|
||||
|
||||
@@ -161,7 +161,7 @@ def prune_event_dict(room_version: RoomVersion, event_dict: JsonDict) -> JsonDic
|
||||
elif room_version.msc2716_redactions and event_type == EventTypes.MSC2716_BATCH:
|
||||
add_fields(EventContentFields.MSC2716_BATCH_ID)
|
||||
elif room_version.msc2716_redactions and event_type == EventTypes.MSC2716_MARKER:
|
||||
add_fields(EventContentFields.MSC2716_MARKER_INSERTION)
|
||||
add_fields(EventContentFields.MSC2716_INSERTION_EVENT_REFERENCE)
|
||||
|
||||
allowed_fields = {k: v for k, v in event_dict.items() if k in allowed_keys}
|
||||
|
||||
|
||||
@@ -23,6 +23,7 @@ from synapse.crypto.keyring import Keyring
|
||||
from synapse.events import EventBase, make_event_from_dict
|
||||
from synapse.events.utils import prune_event, validate_canonicaljson
|
||||
from synapse.http.servlet import assert_params_in_dict
|
||||
from synapse.logging.opentracing import log_kv, trace
|
||||
from synapse.types import JsonDict, get_domain_from_id
|
||||
|
||||
if TYPE_CHECKING:
|
||||
@@ -55,6 +56,7 @@ class FederationBase:
|
||||
self._clock = hs.get_clock()
|
||||
self._storage_controllers = hs.get_storage_controllers()
|
||||
|
||||
@trace
|
||||
async def _check_sigs_and_hash(
|
||||
self, room_version: RoomVersion, pdu: EventBase
|
||||
) -> EventBase:
|
||||
@@ -97,17 +99,36 @@ class FederationBase:
|
||||
"Event %s seems to have been redacted; using our redacted copy",
|
||||
pdu.event_id,
|
||||
)
|
||||
log_kv(
|
||||
{
|
||||
"message": "Event seems to have been redacted; using our redacted copy",
|
||||
"event_id": pdu.event_id,
|
||||
}
|
||||
)
|
||||
else:
|
||||
logger.warning(
|
||||
"Event %s content has been tampered, redacting",
|
||||
pdu.event_id,
|
||||
)
|
||||
log_kv(
|
||||
{
|
||||
"message": "Event content has been tampered, redacting",
|
||||
"event_id": pdu.event_id,
|
||||
}
|
||||
)
|
||||
return redacted_event
|
||||
|
||||
spam_check = await self.spam_checker.check_event_for_spam(pdu)
|
||||
|
||||
if spam_check != self.spam_checker.NOT_SPAM:
|
||||
logger.warning("Event contains spam, soft-failing %s", pdu.event_id)
|
||||
log_kv(
|
||||
{
|
||||
"message": "Event contains spam, redacting (to save disk space) "
|
||||
"as well as soft-failing (to stop using the event in prev_events)",
|
||||
"event_id": pdu.event_id,
|
||||
}
|
||||
)
|
||||
# we redact (to save disk space) as well as soft-failing (to stop
|
||||
# using the event in prev_events).
|
||||
redacted_event = prune_event(pdu)
|
||||
@@ -117,6 +138,7 @@ class FederationBase:
|
||||
return pdu
|
||||
|
||||
|
||||
@trace
|
||||
async def _check_sigs_on_pdu(
|
||||
keyring: Keyring, room_version: RoomVersion, pdu: EventBase
|
||||
) -> None:
|
||||
|
||||
@@ -61,6 +61,7 @@ from synapse.federation.federation_base import (
|
||||
)
|
||||
from synapse.federation.transport.client import SendJoinResponse
|
||||
from synapse.http.types import QueryParams
|
||||
from synapse.logging.opentracing import SynapseTags, log_kv, set_tag, tag_args, trace
|
||||
from synapse.types import JsonDict, UserID, get_domain_from_id
|
||||
from synapse.util.async_helpers import concurrently_execute
|
||||
from synapse.util.caches.expiringcache import ExpiringCache
|
||||
@@ -233,6 +234,8 @@ class FederationClient(FederationBase):
|
||||
destination, content, timeout
|
||||
)
|
||||
|
||||
@trace
|
||||
@tag_args
|
||||
async def backfill(
|
||||
self, dest: str, room_id: str, limit: int, extremities: Collection[str]
|
||||
) -> Optional[List[EventBase]]:
|
||||
@@ -335,6 +338,8 @@ class FederationClient(FederationBase):
|
||||
|
||||
return None
|
||||
|
||||
@trace
|
||||
@tag_args
|
||||
async def get_pdu(
|
||||
self,
|
||||
destinations: Iterable[str],
|
||||
@@ -403,9 +408,9 @@ class FederationClient(FederationBase):
|
||||
# Prime the cache
|
||||
self._get_pdu_cache[event.event_id] = event
|
||||
|
||||
# FIXME: We should add a `break` here to avoid calling every
|
||||
# destination after we already found a PDU (will follow-up
|
||||
# in a separate PR)
|
||||
# Now that we have an event, we can break out of this
|
||||
# loop and stop asking other destinations.
|
||||
break
|
||||
|
||||
except SynapseError as e:
|
||||
logger.info(
|
||||
@@ -446,6 +451,8 @@ class FederationClient(FederationBase):
|
||||
|
||||
return event_copy
|
||||
|
||||
@trace
|
||||
@tag_args
|
||||
async def get_room_state_ids(
|
||||
self, destination: str, room_id: str, event_id: str
|
||||
) -> Tuple[List[str], List[str]]:
|
||||
@@ -465,6 +472,23 @@ class FederationClient(FederationBase):
|
||||
state_event_ids = result["pdu_ids"]
|
||||
auth_event_ids = result.get("auth_chain_ids", [])
|
||||
|
||||
set_tag(
|
||||
SynapseTags.RESULT_PREFIX + "state_event_ids",
|
||||
str(state_event_ids),
|
||||
)
|
||||
set_tag(
|
||||
SynapseTags.RESULT_PREFIX + "state_event_ids.length",
|
||||
str(len(state_event_ids)),
|
||||
)
|
||||
set_tag(
|
||||
SynapseTags.RESULT_PREFIX + "auth_event_ids",
|
||||
str(auth_event_ids),
|
||||
)
|
||||
set_tag(
|
||||
SynapseTags.RESULT_PREFIX + "auth_event_ids.length",
|
||||
str(len(auth_event_ids)),
|
||||
)
|
||||
|
||||
if not isinstance(state_event_ids, list) or not isinstance(
|
||||
auth_event_ids, list
|
||||
):
|
||||
@@ -472,6 +496,8 @@ class FederationClient(FederationBase):
|
||||
|
||||
return state_event_ids, auth_event_ids
|
||||
|
||||
@trace
|
||||
@tag_args
|
||||
async def get_room_state(
|
||||
self,
|
||||
destination: str,
|
||||
@@ -531,6 +557,7 @@ class FederationClient(FederationBase):
|
||||
|
||||
return valid_state_events, valid_auth_events
|
||||
|
||||
@trace
|
||||
async def _check_sigs_and_hash_and_fetch(
|
||||
self,
|
||||
origin: str,
|
||||
@@ -560,11 +587,15 @@ class FederationClient(FederationBase):
|
||||
Returns:
|
||||
A list of PDUs that have valid signatures and hashes.
|
||||
"""
|
||||
set_tag(
|
||||
SynapseTags.RESULT_PREFIX + "pdus.length",
|
||||
str(len(pdus)),
|
||||
)
|
||||
|
||||
# We limit how many PDUs we check at once, as if we try to do hundreds
|
||||
# of thousands of PDUs at once we see large memory spikes.
|
||||
|
||||
valid_pdus = []
|
||||
valid_pdus: List[EventBase] = []
|
||||
|
||||
async def _execute(pdu: EventBase) -> None:
|
||||
valid_pdu = await self._check_sigs_and_hash_and_fetch_one(
|
||||
@@ -580,6 +611,8 @@ class FederationClient(FederationBase):
|
||||
|
||||
return valid_pdus
|
||||
|
||||
@trace
|
||||
@tag_args
|
||||
async def _check_sigs_and_hash_and_fetch_one(
|
||||
self,
|
||||
pdu: EventBase,
|
||||
@@ -612,16 +645,27 @@ class FederationClient(FederationBase):
|
||||
except InvalidEventSignatureError as e:
|
||||
logger.warning(
|
||||
"Signature on retrieved event %s was invalid (%s). "
|
||||
"Checking local store/orgin server",
|
||||
"Checking local store/origin server",
|
||||
pdu.event_id,
|
||||
e,
|
||||
)
|
||||
log_kv(
|
||||
{
|
||||
"message": "Signature on retrieved event was invalid. "
|
||||
"Checking local store/origin server",
|
||||
"event_id": pdu.event_id,
|
||||
"InvalidEventSignatureError": e,
|
||||
}
|
||||
)
|
||||
|
||||
# Check local db.
|
||||
res = await self.store.get_event(
|
||||
pdu.event_id, allow_rejected=True, allow_none=True
|
||||
)
|
||||
|
||||
# If the PDU fails its signature check and we don't have it in our
|
||||
# database, we then request it from sender's server (if that is not the
|
||||
# same as `origin`).
|
||||
pdu_origin = get_domain_from_id(pdu.sender)
|
||||
if not res and pdu_origin != origin:
|
||||
try:
|
||||
@@ -725,6 +769,12 @@ class FederationClient(FederationBase):
|
||||
if failover_errcodes is None:
|
||||
failover_errcodes = ()
|
||||
|
||||
if not destinations:
|
||||
# Give a bit of a clearer message if no servers were specified at all.
|
||||
raise SynapseError(
|
||||
502, f"Failed to {description} via any server: No servers specified."
|
||||
)
|
||||
|
||||
for destination in destinations:
|
||||
if destination == self.server_name:
|
||||
continue
|
||||
@@ -774,7 +824,7 @@ class FederationClient(FederationBase):
|
||||
"Failed to %s via %s", description, destination, exc_info=True
|
||||
)
|
||||
|
||||
raise SynapseError(502, "Failed to %s via any server" % (description,))
|
||||
raise SynapseError(502, f"Failed to {description} via any server")
|
||||
|
||||
async def make_membership_event(
|
||||
self,
|
||||
|
||||
@@ -61,7 +61,12 @@ from synapse.logging.context import (
|
||||
nested_logging_context,
|
||||
run_in_background,
|
||||
)
|
||||
from synapse.logging.opentracing import log_kv, start_active_span_from_edu, trace
|
||||
from synapse.logging.opentracing import (
|
||||
log_kv,
|
||||
start_active_span_from_edu,
|
||||
tag_args,
|
||||
trace,
|
||||
)
|
||||
from synapse.metrics.background_process_metrics import wrap_as_background_process
|
||||
from synapse.replication.http.federation import (
|
||||
ReplicationFederationSendEduRestServlet,
|
||||
@@ -469,7 +474,7 @@ class FederationServer(FederationBase):
|
||||
)
|
||||
for pdu in pdus_by_room[room_id]:
|
||||
event_id = pdu.event_id
|
||||
pdu_results[event_id] = e.error_dict()
|
||||
pdu_results[event_id] = e.error_dict(self.hs.config)
|
||||
return
|
||||
|
||||
for pdu in pdus_by_room[room_id]:
|
||||
@@ -547,6 +552,8 @@ class FederationServer(FederationBase):
|
||||
|
||||
return 200, resp
|
||||
|
||||
@trace
|
||||
@tag_args
|
||||
async def on_state_ids_request(
|
||||
self, origin: str, room_id: str, event_id: str
|
||||
) -> Tuple[int, JsonDict]:
|
||||
@@ -569,6 +576,8 @@ class FederationServer(FederationBase):
|
||||
|
||||
return 200, resp
|
||||
|
||||
@trace
|
||||
@tag_args
|
||||
async def _on_state_ids_request_compute(
|
||||
self, room_id: str, event_id: str
|
||||
) -> JsonDict:
|
||||
@@ -754,6 +763,17 @@ class FederationServer(FederationBase):
|
||||
The partial knock event.
|
||||
"""
|
||||
origin_host, _ = parse_server_name(origin)
|
||||
|
||||
if await self.store.is_partial_state_room(room_id):
|
||||
# Before we do anything: check if the room is partial-stated.
|
||||
# Note that at the time this check was added, `on_make_knock_request` would
|
||||
# block due to https://github.com/matrix-org/synapse/issues/12997.
|
||||
raise SynapseError(
|
||||
404,
|
||||
"Unable to handle /make_knock right now; this server is not fully joined.",
|
||||
errcode=Codes.NOT_FOUND,
|
||||
)
|
||||
|
||||
await self.check_server_matches_acl(origin_host, room_id)
|
||||
|
||||
room_version = await self.store.get_room_version(room_id)
|
||||
@@ -843,8 +863,25 @@ class FederationServer(FederationBase):
|
||||
Codes.BAD_JSON,
|
||||
)
|
||||
|
||||
# Note that get_room_version throws if the room does not exist here.
|
||||
room_version = await self.store.get_room_version(room_id)
|
||||
|
||||
if await self.store.is_partial_state_room(room_id):
|
||||
# If our server is still only partially joined, we can't give a complete
|
||||
# response to /send_join, /send_knock or /send_leave.
|
||||
# This is because we will not be able to provide the server list (for partial
|
||||
# joins) or the full state (for full joins).
|
||||
# Return a 404 as we would if we weren't in the room at all.
|
||||
logger.info(
|
||||
f"Rejecting /send_{membership_type} to %s because it's a partial state room",
|
||||
room_id,
|
||||
)
|
||||
raise SynapseError(
|
||||
404,
|
||||
f"Unable to handle /send_{membership_type} right now; this server is not fully joined.",
|
||||
errcode=Codes.NOT_FOUND,
|
||||
)
|
||||
|
||||
if membership_type == Membership.KNOCK and not room_version.msc2403_knocking:
|
||||
raise SynapseError(
|
||||
403,
|
||||
|
||||
@@ -441,6 +441,19 @@ class FederationSender(AbstractFederationSender):
|
||||
destinations = await self._external_cache.get(
|
||||
"get_joined_hosts", str(sg)
|
||||
)
|
||||
if destinations is None:
|
||||
# Add logging to help track down #13444
|
||||
logger.info(
|
||||
"Unexpectedly did not have cached destinations for %s / %s",
|
||||
sg,
|
||||
event.event_id,
|
||||
)
|
||||
else:
|
||||
# Add logging to help track down #13444
|
||||
logger.info(
|
||||
"Unexpectedly did not have cached prev group for %s",
|
||||
event.event_id,
|
||||
)
|
||||
|
||||
if destinations is None:
|
||||
try:
|
||||
|
||||
@@ -21,7 +21,7 @@ from typing import TYPE_CHECKING, Any, Awaitable, Callable, Dict, Optional, Tupl
|
||||
|
||||
from synapse.api.errors import Codes, FederationDeniedError, SynapseError
|
||||
from synapse.api.urls import FEDERATION_V1_PREFIX
|
||||
from synapse.http.server import HttpServer, ServletCallback, is_method_cancellable
|
||||
from synapse.http.server import HttpServer, ServletCallback
|
||||
from synapse.http.servlet import parse_json_object_from_request
|
||||
from synapse.http.site import SynapseRequest
|
||||
from synapse.logging.context import run_in_background
|
||||
@@ -34,6 +34,7 @@ from synapse.logging.opentracing import (
|
||||
whitelisted_homeserver,
|
||||
)
|
||||
from synapse.types import JsonDict
|
||||
from synapse.util.cancellation import is_function_cancellable
|
||||
from synapse.util.ratelimitutils import FederationRateLimiter
|
||||
from synapse.util.stringutils import parse_and_validate_server_name
|
||||
|
||||
@@ -375,7 +376,7 @@ class BaseFederationServlet:
|
||||
if code is None:
|
||||
continue
|
||||
|
||||
if is_method_cancellable(code):
|
||||
if is_function_cancellable(code):
|
||||
# The wrapper added by `self._wrap` will inherit the cancellable flag,
|
||||
# but the wrapper itself does not support cancellation yet.
|
||||
# Once resolved, the cancellation tests in
|
||||
|
||||
@@ -280,7 +280,7 @@ class AuthHandler:
|
||||
that it isn't stolen by re-authenticating them.
|
||||
|
||||
Args:
|
||||
requester: The user, as given by the access token
|
||||
requester: The user making the request, according to the access token.
|
||||
|
||||
request: The request sent by the client.
|
||||
|
||||
@@ -565,7 +565,7 @@ class AuthHandler:
|
||||
except LoginError as e:
|
||||
# this step failed. Merge the error dict into the response
|
||||
# so that the client can have another go.
|
||||
errordict = e.error_dict()
|
||||
errordict = e.error_dict(self.hs.config)
|
||||
|
||||
creds = await self.store.get_completed_ui_auth_stages(session.session_id)
|
||||
for f in flows:
|
||||
@@ -1435,20 +1435,25 @@ class AuthHandler:
|
||||
access_token: access token to be deleted
|
||||
|
||||
"""
|
||||
user_info = await self.auth.get_user_by_access_token(access_token)
|
||||
token = await self.store.get_user_by_access_token(access_token)
|
||||
if not token:
|
||||
# At this point, the token should already have been fetched once by
|
||||
# the caller, so this should not happen, unless of a race condition
|
||||
# between two delete requests
|
||||
raise SynapseError(HTTPStatus.UNAUTHORIZED, "Unrecognised access token")
|
||||
await self.store.delete_access_token(access_token)
|
||||
|
||||
# see if any modules want to know about this
|
||||
await self.password_auth_provider.on_logged_out(
|
||||
user_id=user_info.user_id,
|
||||
device_id=user_info.device_id,
|
||||
user_id=token.user_id,
|
||||
device_id=token.device_id,
|
||||
access_token=access_token,
|
||||
)
|
||||
|
||||
# delete pushers associated with this access token
|
||||
if user_info.token_id is not None:
|
||||
if token.token_id is not None:
|
||||
await self.hs.get_pusherpool().remove_pushers_by_access_token(
|
||||
user_info.user_id, (user_info.token_id,)
|
||||
token.user_id, (token.token_id,)
|
||||
)
|
||||
|
||||
async def delete_access_tokens_for_user(
|
||||
|
||||
@@ -74,6 +74,7 @@ class DeviceWorkerHandler:
|
||||
self._state_storage = hs.get_storage_controllers().state
|
||||
self._auth_handler = hs.get_auth_handler()
|
||||
self.server_name = hs.hostname
|
||||
self._msc3852_enabled = hs.config.experimental.msc3852_enabled
|
||||
|
||||
@trace
|
||||
async def get_devices_by_user(self, user_id: str) -> List[JsonDict]:
|
||||
@@ -309,6 +310,7 @@ class DeviceHandler(DeviceWorkerHandler):
|
||||
super().__init__(hs)
|
||||
|
||||
self.federation_sender = hs.get_federation_sender()
|
||||
self._storage_controllers = hs.get_storage_controllers()
|
||||
|
||||
self.device_list_updater = DeviceListUpdater(hs, self)
|
||||
|
||||
@@ -693,8 +695,11 @@ class DeviceHandler(DeviceWorkerHandler):
|
||||
|
||||
# Ignore any users that aren't ours
|
||||
if self.hs.is_mine_id(user_id):
|
||||
joined_user_ids = await self.store.get_users_in_room(room_id)
|
||||
hosts = {get_domain_from_id(u) for u in joined_user_ids}
|
||||
hosts = set(
|
||||
await self._storage_controllers.state.get_current_hosts_in_room(
|
||||
room_id
|
||||
)
|
||||
)
|
||||
hosts.discard(self.server_name)
|
||||
|
||||
# Check if we've already sent this update to some hosts
|
||||
@@ -747,7 +752,13 @@ def _update_device_from_client_ips(
|
||||
device: JsonDict, client_ips: Mapping[Tuple[str, str], Mapping[str, Any]]
|
||||
) -> None:
|
||||
ip = client_ips.get((device["user_id"], device["device_id"]), {})
|
||||
device.update({"last_seen_ts": ip.get("last_seen"), "last_seen_ip": ip.get("ip")})
|
||||
device.update(
|
||||
{
|
||||
"last_seen_user_agent": ip.get("user_agent"),
|
||||
"last_seen_ts": ip.get("last_seen"),
|
||||
"last_seen_ip": ip.get("ip"),
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
class DeviceListUpdater:
|
||||
|
||||
@@ -30,7 +30,7 @@ from synapse.api.errors import (
|
||||
from synapse.appservice import ApplicationService
|
||||
from synapse.module_api import NOT_SPAM
|
||||
from synapse.storage.databases.main.directory import RoomAliasMapping
|
||||
from synapse.types import JsonDict, Requester, RoomAlias, UserID, get_domain_from_id
|
||||
from synapse.types import JsonDict, Requester, RoomAlias
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.server import HomeServer
|
||||
@@ -83,8 +83,9 @@ class DirectoryHandler:
|
||||
# TODO(erikj): Add transactions.
|
||||
# TODO(erikj): Check if there is a current association.
|
||||
if not servers:
|
||||
users = await self.store.get_users_in_room(room_id)
|
||||
servers = {get_domain_from_id(u) for u in users}
|
||||
servers = await self._storage_controllers.state.get_current_hosts_in_room(
|
||||
room_id
|
||||
)
|
||||
|
||||
if not servers:
|
||||
raise SynapseError(400, "Failed to get server list")
|
||||
@@ -133,7 +134,7 @@ class DirectoryHandler:
|
||||
else:
|
||||
# Server admins are not subject to the same constraints as normal
|
||||
# users when creating an alias (e.g. being in the room).
|
||||
is_admin = await self.auth.is_server_admin(requester.user)
|
||||
is_admin = await self.auth.is_server_admin(requester)
|
||||
|
||||
if (self.require_membership and check_membership) and not is_admin:
|
||||
rooms_for_user = await self.store.get_rooms_for_user(user_id)
|
||||
@@ -197,7 +198,7 @@ class DirectoryHandler:
|
||||
user_id = requester.user.to_string()
|
||||
|
||||
try:
|
||||
can_delete = await self._user_can_delete_alias(room_alias, user_id)
|
||||
can_delete = await self._user_can_delete_alias(room_alias, requester)
|
||||
except StoreError as e:
|
||||
if e.code == 404:
|
||||
raise NotFoundError("Unknown room alias")
|
||||
@@ -287,8 +288,9 @@ class DirectoryHandler:
|
||||
Codes.NOT_FOUND,
|
||||
)
|
||||
|
||||
users = await self.store.get_users_in_room(room_id)
|
||||
extra_servers = {get_domain_from_id(u) for u in users}
|
||||
extra_servers = await self._storage_controllers.state.get_current_hosts_in_room(
|
||||
room_id
|
||||
)
|
||||
servers_set = set(extra_servers) | set(servers)
|
||||
|
||||
# If this server is in the list of servers, return it first.
|
||||
@@ -400,7 +402,9 @@ class DirectoryHandler:
|
||||
# either no interested services, or no service with an exclusive lock
|
||||
return True
|
||||
|
||||
async def _user_can_delete_alias(self, alias: RoomAlias, user_id: str) -> bool:
|
||||
async def _user_can_delete_alias(
|
||||
self, alias: RoomAlias, requester: Requester
|
||||
) -> bool:
|
||||
"""Determine whether a user can delete an alias.
|
||||
|
||||
One of the following must be true:
|
||||
@@ -413,7 +417,7 @@ class DirectoryHandler:
|
||||
"""
|
||||
creator = await self.store.get_room_alias_creator(alias.to_string())
|
||||
|
||||
if creator == user_id:
|
||||
if creator == requester.user.to_string():
|
||||
return True
|
||||
|
||||
# Resolve the alias to the corresponding room.
|
||||
@@ -422,9 +426,7 @@ class DirectoryHandler:
|
||||
if not room_id:
|
||||
return False
|
||||
|
||||
return await self.auth.check_can_change_room_list(
|
||||
room_id, UserID.from_string(user_id)
|
||||
)
|
||||
return await self.auth.check_can_change_room_list(room_id, requester)
|
||||
|
||||
async def edit_published_room_list(
|
||||
self, requester: Requester, room_id: str, visibility: str
|
||||
@@ -463,7 +465,7 @@ class DirectoryHandler:
|
||||
raise SynapseError(400, "Unknown room")
|
||||
|
||||
can_change_room_list = await self.auth.check_can_change_room_list(
|
||||
room_id, requester.user
|
||||
room_id, requester
|
||||
)
|
||||
if not can_change_room_list:
|
||||
raise AuthError(
|
||||
@@ -528,10 +530,8 @@ class DirectoryHandler:
|
||||
Get a list of the aliases that currently point to this room on this server
|
||||
"""
|
||||
# allow access to server admins and current members of the room
|
||||
is_admin = await self.auth.is_server_admin(requester.user)
|
||||
is_admin = await self.auth.is_server_admin(requester)
|
||||
if not is_admin:
|
||||
await self.auth.check_user_in_room_or_world_readable(
|
||||
room_id, requester.user.to_string()
|
||||
)
|
||||
await self.auth.check_user_in_room_or_world_readable(room_id, requester)
|
||||
|
||||
return await self.store.get_aliases_for_room(room_id)
|
||||
|
||||
@@ -129,12 +129,9 @@ class EventAuthHandler:
|
||||
else:
|
||||
users = {}
|
||||
|
||||
# Find the user with the highest power level.
|
||||
users_in_room = await self._store.get_users_in_room(room_id)
|
||||
# Only interested in local users.
|
||||
local_users_in_room = [
|
||||
u for u in users_in_room if get_domain_from_id(u) == self._server_name
|
||||
]
|
||||
# Find the user with the highest power level (only interested in local
|
||||
# users).
|
||||
local_users_in_room = await self._store.get_local_users_in_room(room_id)
|
||||
chosen_user = max(
|
||||
local_users_in_room,
|
||||
key=lambda user: users.get(user, users_default_level),
|
||||
|
||||
@@ -151,7 +151,7 @@ class EventHandler:
|
||||
"""Retrieve a single specified event.
|
||||
|
||||
Args:
|
||||
user: The user requesting the event
|
||||
user: The local user requesting the event
|
||||
room_id: The expected room id. We'll return None if the
|
||||
event's room does not match.
|
||||
event_id: The event ID to obtain.
|
||||
@@ -173,8 +173,11 @@ class EventHandler:
|
||||
if not event:
|
||||
return None
|
||||
|
||||
users = await self.store.get_users_in_room(event.room_id)
|
||||
is_peeking = user.to_string() not in users
|
||||
is_user_in_room = await self.store.check_local_user_in_room(
|
||||
user_id=user.to_string(), room_id=event.room_id
|
||||
)
|
||||
# The user is peeking if they aren't in the room already
|
||||
is_peeking = not is_user_in_room
|
||||
|
||||
filtered = await filter_events_for_client(
|
||||
self._storage_controllers, user.to_string(), [event], is_peeking=is_peeking
|
||||
|
||||
@@ -32,6 +32,7 @@ from typing import (
|
||||
)
|
||||
|
||||
import attr
|
||||
from prometheus_client import Histogram
|
||||
from signedjson.key import decode_verify_key_bytes
|
||||
from signedjson.sign import verify_signed_json
|
||||
from unpaddedbase64 import decode_base64
|
||||
@@ -59,6 +60,7 @@ from synapse.events.validator import EventValidator
|
||||
from synapse.federation.federation_client import InvalidResponseError
|
||||
from synapse.http.servlet import assert_params_in_dict
|
||||
from synapse.logging.context import nested_logging_context
|
||||
from synapse.logging.opentracing import SynapseTags, set_tag, tag_args, trace
|
||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||
from synapse.module_api import NOT_SPAM
|
||||
from synapse.replication.http.federation import (
|
||||
@@ -68,7 +70,7 @@ from synapse.replication.http.federation import (
|
||||
from synapse.storage.databases.main.events import PartialStateConflictError
|
||||
from synapse.storage.databases.main.events_worker import EventRedactBehaviour
|
||||
from synapse.storage.state import StateFilter
|
||||
from synapse.types import JsonDict, StateMap, get_domain_from_id
|
||||
from synapse.types import JsonDict, get_domain_from_id
|
||||
from synapse.util.async_helpers import Linearizer
|
||||
from synapse.util.retryutils import NotRetryingDestination
|
||||
from synapse.visibility import filter_events_for_server
|
||||
@@ -78,36 +80,28 @@ if TYPE_CHECKING:
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def get_domains_from_state(state: StateMap[EventBase]) -> List[Tuple[str, int]]:
|
||||
"""Get joined domains from state
|
||||
|
||||
Args:
|
||||
state: State map from type/state key to event.
|
||||
|
||||
Returns:
|
||||
Returns a list of servers with the lowest depth of their joins.
|
||||
Sorted by lowest depth first.
|
||||
"""
|
||||
joined_users = [
|
||||
(state_key, int(event.depth))
|
||||
for (e_type, state_key), event in state.items()
|
||||
if e_type == EventTypes.Member and event.membership == Membership.JOIN
|
||||
]
|
||||
|
||||
joined_domains: Dict[str, int] = {}
|
||||
for u, d in joined_users:
|
||||
try:
|
||||
dom = get_domain_from_id(u)
|
||||
old_d = joined_domains.get(dom)
|
||||
if old_d:
|
||||
joined_domains[dom] = min(d, old_d)
|
||||
else:
|
||||
joined_domains[dom] = d
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
return sorted(joined_domains.items(), key=lambda d: d[1])
|
||||
# Added to debug performance and track progress on optimizations
|
||||
backfill_processing_before_timer = Histogram(
|
||||
"synapse_federation_backfill_processing_before_time_seconds",
|
||||
"sec",
|
||||
[],
|
||||
buckets=(
|
||||
0.1,
|
||||
0.5,
|
||||
1.0,
|
||||
2.5,
|
||||
5.0,
|
||||
7.5,
|
||||
10.0,
|
||||
15.0,
|
||||
20.0,
|
||||
30.0,
|
||||
40.0,
|
||||
60.0,
|
||||
80.0,
|
||||
"+Inf",
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
class _BackfillPointType(Enum):
|
||||
@@ -137,6 +131,7 @@ class FederationHandler:
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
self.hs = hs
|
||||
|
||||
self.clock = hs.get_clock()
|
||||
self.store = hs.get_datastores().main
|
||||
self._storage_controllers = hs.get_storage_controllers()
|
||||
self._state_storage_controller = self._storage_controllers.state
|
||||
@@ -180,6 +175,7 @@ class FederationHandler:
|
||||
"resume_sync_partial_state_room", self._resume_sync_partial_state_room
|
||||
)
|
||||
|
||||
@trace
|
||||
async def maybe_backfill(
|
||||
self, room_id: str, current_depth: int, limit: int
|
||||
) -> bool:
|
||||
@@ -195,12 +191,39 @@ class FederationHandler:
|
||||
return. This is used as part of the heuristic to decide if we
|
||||
should back paginate.
|
||||
"""
|
||||
# Starting the processing time here so we can include the room backfill
|
||||
# linearizer lock queue in the timing
|
||||
processing_start_time = self.clock.time_msec()
|
||||
|
||||
async with self._room_backfill.queue(room_id):
|
||||
return await self._maybe_backfill_inner(room_id, current_depth, limit)
|
||||
return await self._maybe_backfill_inner(
|
||||
room_id,
|
||||
current_depth,
|
||||
limit,
|
||||
processing_start_time=processing_start_time,
|
||||
)
|
||||
|
||||
async def _maybe_backfill_inner(
|
||||
self, room_id: str, current_depth: int, limit: int
|
||||
self,
|
||||
room_id: str,
|
||||
current_depth: int,
|
||||
limit: int,
|
||||
*,
|
||||
processing_start_time: int,
|
||||
) -> bool:
|
||||
"""
|
||||
Checks whether the `current_depth` is at or approaching any backfill
|
||||
points in the room and if so, will backfill. We only care about
|
||||
checking backfill points that happened before the `current_depth`
|
||||
(meaning less than or equal to the `current_depth`).
|
||||
|
||||
Args:
|
||||
room_id: The room to backfill in.
|
||||
current_depth: The depth to check at for any upcoming backfill points.
|
||||
limit: The max number of events to request from the remote federated server.
|
||||
processing_start_time: The time when `maybe_backfill` started
|
||||
processing. Only used for timing.
|
||||
"""
|
||||
backwards_extremities = [
|
||||
_BackfillPoint(event_id, depth, _BackfillPointType.BACKWARDS_EXTREMITY)
|
||||
for event_id, depth in await self.store.get_oldest_event_ids_with_depth_in_room(
|
||||
@@ -368,23 +391,29 @@ class FederationHandler:
|
||||
logger.debug(
|
||||
"_maybe_backfill_inner: extremities_to_request %s", extremities_to_request
|
||||
)
|
||||
set_tag(
|
||||
SynapseTags.RESULT_PREFIX + "extremities_to_request",
|
||||
str(extremities_to_request),
|
||||
)
|
||||
set_tag(
|
||||
SynapseTags.RESULT_PREFIX + "extremities_to_request.length",
|
||||
str(len(extremities_to_request)),
|
||||
)
|
||||
|
||||
# Now we need to decide which hosts to hit first.
|
||||
|
||||
# First we try hosts that are already in the room
|
||||
# First we try hosts that are already in the room.
|
||||
# TODO: HEURISTIC ALERT.
|
||||
likely_domains = (
|
||||
await self._storage_controllers.state.get_current_hosts_in_room(room_id)
|
||||
)
|
||||
|
||||
curr_state = await self._storage_controllers.state.get_current_state(room_id)
|
||||
|
||||
curr_domains = get_domains_from_state(curr_state)
|
||||
|
||||
likely_domains = [
|
||||
domain for domain, depth in curr_domains if domain != self.server_name
|
||||
]
|
||||
|
||||
async def try_backfill(domains: List[str]) -> bool:
|
||||
async def try_backfill(domains: Collection[str]) -> bool:
|
||||
# TODO: Should we try multiple of these at a time?
|
||||
for dom in domains:
|
||||
# We don't want to ask our own server for information we don't have
|
||||
if dom == self.server_name:
|
||||
continue
|
||||
|
||||
try:
|
||||
await self._federation_event_handler.backfill(
|
||||
dom, room_id, limit=100, extremities=extremities_to_request
|
||||
@@ -423,6 +452,11 @@ class FederationHandler:
|
||||
|
||||
return False
|
||||
|
||||
processing_end_time = self.clock.time_msec()
|
||||
backfill_processing_before_timer.observe(
|
||||
(processing_end_time - processing_start_time) / 1000
|
||||
)
|
||||
|
||||
success = await try_backfill(likely_domains)
|
||||
if success:
|
||||
return True
|
||||
@@ -546,9 +580,9 @@ class FederationHandler:
|
||||
)
|
||||
|
||||
if ret.partial_state:
|
||||
# TODO(faster_joins): roll this back if we don't manage to start the
|
||||
# background resync (eg process_remote_join fails)
|
||||
# https://github.com/matrix-org/synapse/issues/12998
|
||||
# Mark the room as having partial state.
|
||||
# The background process is responsible for unmarking this flag,
|
||||
# even if the join fails.
|
||||
await self.store.store_partial_state_room(room_id, ret.servers_in_room)
|
||||
|
||||
try:
|
||||
@@ -574,17 +608,21 @@ class FederationHandler:
|
||||
room_id,
|
||||
)
|
||||
raise LimitExceededError(msg=e.msg, errcode=e.errcode, retry_after_ms=0)
|
||||
|
||||
if ret.partial_state:
|
||||
# Kick off the process of asynchronously fetching the state for this
|
||||
# room.
|
||||
run_as_background_process(
|
||||
desc="sync_partial_state_room",
|
||||
func=self._sync_partial_state_room,
|
||||
initial_destination=origin,
|
||||
other_destinations=ret.servers_in_room,
|
||||
room_id=room_id,
|
||||
)
|
||||
finally:
|
||||
# Always kick off the background process that asynchronously fetches
|
||||
# state for the room.
|
||||
# If the join failed, the background process is responsible for
|
||||
# cleaning up — including unmarking the room as a partial state room.
|
||||
if ret.partial_state:
|
||||
# Kick off the process of asynchronously fetching the state for this
|
||||
# room.
|
||||
run_as_background_process(
|
||||
desc="sync_partial_state_room",
|
||||
func=self._sync_partial_state_room,
|
||||
initial_destination=origin,
|
||||
other_destinations=ret.servers_in_room,
|
||||
room_id=room_id,
|
||||
)
|
||||
|
||||
# We wait here until this instance has seen the events come down
|
||||
# replication (if we're using replication) as the below uses caches.
|
||||
@@ -748,6 +786,23 @@ class FederationHandler:
|
||||
# (and return a 404 otherwise)
|
||||
room_version = await self.store.get_room_version(room_id)
|
||||
|
||||
if await self.store.is_partial_state_room(room_id):
|
||||
# If our server is still only partially joined, we can't give a complete
|
||||
# response to /make_join, so return a 404 as we would if we weren't in the
|
||||
# room at all.
|
||||
# The main reason we can't respond properly is that we need to know about
|
||||
# the auth events for the join event that we would return.
|
||||
# We also should not bother entertaining the /make_join since we cannot
|
||||
# handle the /send_join.
|
||||
logger.info(
|
||||
"Rejecting /make_join to %s because it's a partial state room", room_id
|
||||
)
|
||||
raise SynapseError(
|
||||
404,
|
||||
"Unable to handle /make_join right now; this server is not fully joined.",
|
||||
errcode=Codes.NOT_FOUND,
|
||||
)
|
||||
|
||||
# now check that we are *still* in the room
|
||||
is_in_room = await self._event_auth_handler.check_host_in_room(
|
||||
room_id, self.server_name
|
||||
@@ -1058,6 +1113,8 @@ class FederationHandler:
|
||||
|
||||
return event
|
||||
|
||||
@trace
|
||||
@tag_args
|
||||
async def get_state_ids_for_pdu(self, room_id: str, event_id: str) -> List[str]:
|
||||
"""Returns the state at the event. i.e. not including said event."""
|
||||
event = await self.store.get_event(event_id, check_room_id=room_id)
|
||||
@@ -1539,15 +1596,16 @@ class FederationHandler:
|
||||
|
||||
# Make an infinite iterator of destinations to try. Once we find a working
|
||||
# destination, we'll stick with it until it flakes.
|
||||
destinations: Collection[str]
|
||||
if initial_destination is not None:
|
||||
# Move `initial_destination` to the front of the list.
|
||||
destinations = list(other_destinations)
|
||||
if initial_destination in destinations:
|
||||
destinations.remove(initial_destination)
|
||||
destinations = [initial_destination] + destinations
|
||||
destination_iter = itertools.cycle(destinations)
|
||||
else:
|
||||
destination_iter = itertools.cycle(other_destinations)
|
||||
destinations = other_destinations
|
||||
destination_iter = itertools.cycle(destinations)
|
||||
|
||||
# `destination` is the current remote homeserver we're pulling from.
|
||||
destination = next(destination_iter)
|
||||
|
||||
@@ -29,7 +29,7 @@ from typing import (
|
||||
Tuple,
|
||||
)
|
||||
|
||||
from prometheus_client import Counter
|
||||
from prometheus_client import Counter, Histogram
|
||||
|
||||
from synapse import event_auth
|
||||
from synapse.api.constants import (
|
||||
@@ -59,6 +59,13 @@ from synapse.events import EventBase
|
||||
from synapse.events.snapshot import EventContext
|
||||
from synapse.federation.federation_client import InvalidResponseError
|
||||
from synapse.logging.context import nested_logging_context
|
||||
from synapse.logging.opentracing import (
|
||||
SynapseTags,
|
||||
set_tag,
|
||||
start_active_span,
|
||||
tag_args,
|
||||
trace,
|
||||
)
|
||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||
from synapse.replication.http.devices import ReplicationUserDevicesResyncRestServlet
|
||||
from synapse.replication.http.federation import (
|
||||
@@ -91,6 +98,36 @@ soft_failed_event_counter = Counter(
|
||||
"Events received over federation that we marked as soft_failed",
|
||||
)
|
||||
|
||||
# Added to debug performance and track progress on optimizations
|
||||
backfill_processing_after_timer = Histogram(
|
||||
"synapse_federation_backfill_processing_after_time_seconds",
|
||||
"sec",
|
||||
[],
|
||||
buckets=(
|
||||
0.1,
|
||||
0.25,
|
||||
0.5,
|
||||
1.0,
|
||||
2.5,
|
||||
5.0,
|
||||
7.5,
|
||||
10.0,
|
||||
15.0,
|
||||
20.0,
|
||||
25.0,
|
||||
30.0,
|
||||
40.0,
|
||||
50.0,
|
||||
60.0,
|
||||
80.0,
|
||||
100.0,
|
||||
120.0,
|
||||
150.0,
|
||||
180.0,
|
||||
"+Inf",
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
class FederationEventHandler:
|
||||
"""Handles events that originated from federation.
|
||||
@@ -278,7 +315,8 @@ class FederationEventHandler:
|
||||
)
|
||||
|
||||
try:
|
||||
await self._process_received_pdu(origin, pdu, state_ids=None)
|
||||
context = await self._state_handler.compute_event_context(pdu)
|
||||
await self._process_received_pdu(origin, pdu, context)
|
||||
except PartialStateConflictError:
|
||||
# The room was un-partial stated while we were processing the PDU.
|
||||
# Try once more, with full state this time.
|
||||
@@ -286,7 +324,8 @@ class FederationEventHandler:
|
||||
"Room %s was un-partial stated while processing the PDU, trying again.",
|
||||
room_id,
|
||||
)
|
||||
await self._process_received_pdu(origin, pdu, state_ids=None)
|
||||
context = await self._state_handler.compute_event_context(pdu)
|
||||
await self._process_received_pdu(origin, pdu, context)
|
||||
|
||||
async def on_send_membership_event(
|
||||
self, origin: str, event: EventBase
|
||||
@@ -316,6 +355,7 @@ class FederationEventHandler:
|
||||
The event and context of the event after inserting it into the room graph.
|
||||
|
||||
Raises:
|
||||
RuntimeError if any prev_events are missing
|
||||
SynapseError if the event is not accepted into the room
|
||||
PartialStateConflictError if the room was un-partial stated in between
|
||||
computing the state at the event and persisting it. The caller should
|
||||
@@ -376,7 +416,7 @@ class FederationEventHandler:
|
||||
# need to.
|
||||
await self._event_creation_handler.cache_joined_hosts_for_event(event, context)
|
||||
|
||||
await self._check_for_soft_fail(event, None, origin=origin)
|
||||
await self._check_for_soft_fail(event, context=context, origin=origin)
|
||||
await self._run_push_actions_and_persist_event(event, context)
|
||||
return event, context
|
||||
|
||||
@@ -406,6 +446,7 @@ class FederationEventHandler:
|
||||
prev_member_event,
|
||||
)
|
||||
|
||||
@trace
|
||||
async def process_remote_join(
|
||||
self,
|
||||
origin: str,
|
||||
@@ -534,32 +575,36 @@ class FederationEventHandler:
|
||||
#
|
||||
# This is the same operation as we do when we receive a regular event
|
||||
# over federation.
|
||||
state_ids = await self._resolve_state_at_missing_prevs(destination, event)
|
||||
|
||||
# build a new state group for it if need be
|
||||
context = await self._state_handler.compute_event_context(
|
||||
event,
|
||||
state_ids_before_event=state_ids,
|
||||
context = await self._compute_event_context_with_maybe_missing_prevs(
|
||||
destination, event
|
||||
)
|
||||
if context.partial_state:
|
||||
# this can happen if some or all of the event's prev_events still have
|
||||
# partial state - ie, an event has an earlier stream_ordering than one
|
||||
# or more of its prev_events, so we de-partial-state it before its
|
||||
# prev_events.
|
||||
# partial state. We were careful to only pick events from the db without
|
||||
# partial-state prev events, so that implies that a prev event has
|
||||
# been persisted (with partial state) since we did the query.
|
||||
#
|
||||
# TODO(faster_joins): we probably need to be more intelligent, and
|
||||
# exclude partial-state prev_events from consideration
|
||||
# https://github.com/matrix-org/synapse/issues/13001
|
||||
# So, let's just ignore `event` for now; when we re-run the db query
|
||||
# we should instead get its partial-state prev event, which we will
|
||||
# de-partial-state, and then come back to event.
|
||||
logger.warning(
|
||||
"%s still has partial state: can't de-partial-state it yet",
|
||||
"%s still has prev_events with partial state: can't de-partial-state it yet",
|
||||
event.event_id,
|
||||
)
|
||||
return
|
||||
|
||||
# since the state at this event has changed, we should now re-evaluate
|
||||
# whether it should have been rejected. We must already have all of the
|
||||
# auth events (from last time we went round this path), so there is no
|
||||
# need to pass the origin.
|
||||
await self._check_event_auth(None, event, context)
|
||||
|
||||
await self._store.update_state_for_partial_state_event(event, context)
|
||||
self._state_storage_controller.notify_event_un_partial_stated(
|
||||
event.event_id
|
||||
)
|
||||
|
||||
@trace
|
||||
async def backfill(
|
||||
self, dest: str, room_id: str, limit: int, extremities: Collection[str]
|
||||
) -> None:
|
||||
@@ -589,21 +634,23 @@ class FederationEventHandler:
|
||||
if not events:
|
||||
return
|
||||
|
||||
# if there are any events in the wrong room, the remote server is buggy and
|
||||
# should not be trusted.
|
||||
for ev in events:
|
||||
if ev.room_id != room_id:
|
||||
raise InvalidResponseError(
|
||||
f"Remote server {dest} returned event {ev.event_id} which is in "
|
||||
f"room {ev.room_id}, when we were backfilling in {room_id}"
|
||||
)
|
||||
with backfill_processing_after_timer.time():
|
||||
# if there are any events in the wrong room, the remote server is buggy and
|
||||
# should not be trusted.
|
||||
for ev in events:
|
||||
if ev.room_id != room_id:
|
||||
raise InvalidResponseError(
|
||||
f"Remote server {dest} returned event {ev.event_id} which is in "
|
||||
f"room {ev.room_id}, when we were backfilling in {room_id}"
|
||||
)
|
||||
|
||||
await self._process_pulled_events(
|
||||
dest,
|
||||
events,
|
||||
backfilled=True,
|
||||
)
|
||||
await self._process_pulled_events(
|
||||
dest,
|
||||
events,
|
||||
backfilled=True,
|
||||
)
|
||||
|
||||
@trace
|
||||
async def _get_missing_events_for_pdu(
|
||||
self, origin: str, pdu: EventBase, prevs: Set[str], min_depth: int
|
||||
) -> None:
|
||||
@@ -704,8 +751,9 @@ class FederationEventHandler:
|
||||
logger.info("Got %d prev_events", len(missing_events))
|
||||
await self._process_pulled_events(origin, missing_events, backfilled=False)
|
||||
|
||||
@trace
|
||||
async def _process_pulled_events(
|
||||
self, origin: str, events: Iterable[EventBase], backfilled: bool
|
||||
self, origin: str, events: Collection[EventBase], backfilled: bool
|
||||
) -> None:
|
||||
"""Process a batch of events we have pulled from a remote server
|
||||
|
||||
@@ -720,6 +768,15 @@ class FederationEventHandler:
|
||||
backfilled: True if this is part of a historical batch of events (inhibits
|
||||
notification to clients, and validation of device keys.)
|
||||
"""
|
||||
set_tag(
|
||||
SynapseTags.FUNC_ARG_PREFIX + "event_ids",
|
||||
str([event.event_id for event in events]),
|
||||
)
|
||||
set_tag(
|
||||
SynapseTags.FUNC_ARG_PREFIX + "event_ids.length",
|
||||
str(len(events)),
|
||||
)
|
||||
set_tag(SynapseTags.FUNC_ARG_PREFIX + "backfilled", str(backfilled))
|
||||
logger.debug(
|
||||
"processing pulled backfilled=%s events=%s",
|
||||
backfilled,
|
||||
@@ -742,6 +799,8 @@ class FederationEventHandler:
|
||||
with nested_logging_context(ev.event_id):
|
||||
await self._process_pulled_event(origin, ev, backfilled=backfilled)
|
||||
|
||||
@trace
|
||||
@tag_args
|
||||
async def _process_pulled_event(
|
||||
self, origin: str, event: EventBase, backfilled: bool
|
||||
) -> None:
|
||||
@@ -806,29 +865,56 @@ class FederationEventHandler:
|
||||
return
|
||||
|
||||
try:
|
||||
state_ids = await self._resolve_state_at_missing_prevs(origin, event)
|
||||
# TODO(faster_joins): make sure that _resolve_state_at_missing_prevs does
|
||||
# not return partial state
|
||||
# https://github.com/matrix-org/synapse/issues/13002
|
||||
try:
|
||||
context = await self._compute_event_context_with_maybe_missing_prevs(
|
||||
origin, event
|
||||
)
|
||||
await self._process_received_pdu(
|
||||
origin,
|
||||
event,
|
||||
context,
|
||||
backfilled=backfilled,
|
||||
)
|
||||
except PartialStateConflictError:
|
||||
# The room was un-partial stated while we were processing the event.
|
||||
# Try once more, with full state this time.
|
||||
context = await self._compute_event_context_with_maybe_missing_prevs(
|
||||
origin, event
|
||||
)
|
||||
|
||||
await self._process_received_pdu(
|
||||
origin, event, state_ids=state_ids, backfilled=backfilled
|
||||
)
|
||||
# We ought to have full state now, barring some unlikely race where we left and
|
||||
# rejoned the room in the background.
|
||||
if context.partial_state:
|
||||
raise AssertionError(
|
||||
f"Event {event.event_id} still has a partial resolved state "
|
||||
f"after room {event.room_id} was un-partial stated"
|
||||
)
|
||||
|
||||
await self._process_received_pdu(
|
||||
origin,
|
||||
event,
|
||||
context,
|
||||
backfilled=backfilled,
|
||||
)
|
||||
except FederationError as e:
|
||||
if e.code == 403:
|
||||
logger.warning("Pulled event %s failed history check.", event_id)
|
||||
else:
|
||||
raise
|
||||
|
||||
async def _resolve_state_at_missing_prevs(
|
||||
@trace
|
||||
async def _compute_event_context_with_maybe_missing_prevs(
|
||||
self, dest: str, event: EventBase
|
||||
) -> Optional[StateMap[str]]:
|
||||
"""Calculate the state at an event with missing prev_events.
|
||||
) -> EventContext:
|
||||
"""Build an EventContext structure for a non-outlier event whose prev_events may
|
||||
be missing.
|
||||
|
||||
This is used when we have pulled a batch of events from a remote server, and
|
||||
still don't have all the prev_events.
|
||||
This is used when we have pulled a batch of events from a remote server, and may
|
||||
not have all the prev_events.
|
||||
|
||||
If we already have all the prev_events for `event`, this method does nothing.
|
||||
To build an EventContext, we need to calculate the state before the event. If we
|
||||
already have all the prev_events for `event`, we can simply use the state after
|
||||
the prev_events to calculate the state before `event`.
|
||||
|
||||
Otherwise, the missing prevs become new backwards extremities, and we fall back
|
||||
to asking the remote server for the state after each missing `prev_event`,
|
||||
@@ -849,8 +935,7 @@ class FederationEventHandler:
|
||||
event: an event to check for missing prevs.
|
||||
|
||||
Returns:
|
||||
if we already had all the prev events, `None`. Otherwise, returns
|
||||
the event ids of the state at `event`.
|
||||
The event context.
|
||||
|
||||
Raises:
|
||||
FederationError if we fail to get the state from the remote server after any
|
||||
@@ -864,7 +949,7 @@ class FederationEventHandler:
|
||||
missing_prevs = prevs - seen
|
||||
|
||||
if not missing_prevs:
|
||||
return None
|
||||
return await self._state_handler.compute_event_context(event)
|
||||
|
||||
logger.info(
|
||||
"Event %s is missing prev_events %s: calculating state for a "
|
||||
@@ -876,9 +961,15 @@ class FederationEventHandler:
|
||||
# resolve them to find the correct state at the current event.
|
||||
|
||||
try:
|
||||
# Determine whether we may be about to retrieve partial state
|
||||
# Events may be un-partial stated right after we compute the partial state
|
||||
# flag, but that's okay, as long as the flag errs on the conservative side.
|
||||
partial_state_flags = await self._store.get_partial_state_events(seen)
|
||||
partial_state = any(partial_state_flags.values())
|
||||
|
||||
# Get the state of the events we know about
|
||||
ours = await self._state_storage_controller.get_state_groups_ids(
|
||||
room_id, seen
|
||||
room_id, seen, await_full_state=False
|
||||
)
|
||||
|
||||
# state_maps is a list of mappings from (type, state_key) to event_id
|
||||
@@ -924,8 +1015,12 @@ class FederationEventHandler:
|
||||
"We can't get valid state history.",
|
||||
affected=event_id,
|
||||
)
|
||||
return state_map
|
||||
return await self._state_handler.compute_event_context(
|
||||
event, state_ids_before_event=state_map, partial_state=partial_state
|
||||
)
|
||||
|
||||
@trace
|
||||
@tag_args
|
||||
async def _get_state_ids_after_missing_prev_event(
|
||||
self,
|
||||
destination: str,
|
||||
@@ -946,6 +1041,14 @@ class FederationEventHandler:
|
||||
InvalidResponseError: if the remote homeserver's response contains fields
|
||||
of the wrong type.
|
||||
"""
|
||||
|
||||
# It would be better if we could query the difference from our known
|
||||
# state to the given `event_id` so the sending server doesn't have to
|
||||
# send as much and we don't have to process as many events. For example
|
||||
# in a room like #matrix:matrix.org, we get 200k events (77k state_events, 122k
|
||||
# auth_events) from this call.
|
||||
#
|
||||
# Tracked by https://github.com/matrix-org/synapse/issues/13618
|
||||
(
|
||||
state_event_ids,
|
||||
auth_event_ids,
|
||||
@@ -965,10 +1068,10 @@ class FederationEventHandler:
|
||||
logger.debug("Fetching %i events from cache/store", len(desired_events))
|
||||
have_events = await self._store.have_seen_events(room_id, desired_events)
|
||||
|
||||
missing_desired_events = desired_events - have_events
|
||||
missing_desired_event_ids = desired_events - have_events
|
||||
logger.debug(
|
||||
"We are missing %i events (got %i)",
|
||||
len(missing_desired_events),
|
||||
len(missing_desired_event_ids),
|
||||
len(have_events),
|
||||
)
|
||||
|
||||
@@ -980,13 +1083,30 @@ class FederationEventHandler:
|
||||
# already have a bunch of the state events. It would be nice if the
|
||||
# federation api gave us a way of finding out which we actually need.
|
||||
|
||||
missing_auth_events = set(auth_event_ids) - have_events
|
||||
missing_auth_events.difference_update(
|
||||
await self._store.have_seen_events(room_id, missing_auth_events)
|
||||
missing_auth_event_ids = set(auth_event_ids) - have_events
|
||||
missing_auth_event_ids.difference_update(
|
||||
await self._store.have_seen_events(room_id, missing_auth_event_ids)
|
||||
)
|
||||
logger.debug("We are also missing %i auth events", len(missing_auth_events))
|
||||
logger.debug("We are also missing %i auth events", len(missing_auth_event_ids))
|
||||
|
||||
missing_events = missing_desired_events | missing_auth_events
|
||||
missing_event_ids = missing_desired_event_ids | missing_auth_event_ids
|
||||
|
||||
set_tag(
|
||||
SynapseTags.RESULT_PREFIX + "missing_auth_event_ids",
|
||||
str(missing_auth_event_ids),
|
||||
)
|
||||
set_tag(
|
||||
SynapseTags.RESULT_PREFIX + "missing_auth_event_ids.length",
|
||||
str(len(missing_auth_event_ids)),
|
||||
)
|
||||
set_tag(
|
||||
SynapseTags.RESULT_PREFIX + "missing_desired_event_ids",
|
||||
str(missing_desired_event_ids),
|
||||
)
|
||||
set_tag(
|
||||
SynapseTags.RESULT_PREFIX + "missing_desired_event_ids.length",
|
||||
str(len(missing_desired_event_ids)),
|
||||
)
|
||||
|
||||
# Making an individual request for each of 1000s of events has a lot of
|
||||
# overhead. On the other hand, we don't really want to fetch all of the events
|
||||
@@ -997,13 +1117,13 @@ class FederationEventHandler:
|
||||
#
|
||||
# TODO: might it be better to have an API which lets us do an aggregate event
|
||||
# request
|
||||
if (len(missing_events) * 10) >= len(auth_event_ids) + len(state_event_ids):
|
||||
if (len(missing_event_ids) * 10) >= len(auth_event_ids) + len(state_event_ids):
|
||||
logger.debug("Requesting complete state from remote")
|
||||
await self._get_state_and_persist(destination, room_id, event_id)
|
||||
else:
|
||||
logger.debug("Fetching %i events from remote", len(missing_events))
|
||||
logger.debug("Fetching %i events from remote", len(missing_event_ids))
|
||||
await self._get_events_and_persist(
|
||||
destination=destination, room_id=room_id, event_ids=missing_events
|
||||
destination=destination, room_id=room_id, event_ids=missing_event_ids
|
||||
)
|
||||
|
||||
# We now need to fill out the state map, which involves fetching the
|
||||
@@ -1060,6 +1180,14 @@ class FederationEventHandler:
|
||||
event_id,
|
||||
failed_to_fetch,
|
||||
)
|
||||
set_tag(
|
||||
SynapseTags.RESULT_PREFIX + "failed_to_fetch",
|
||||
str(failed_to_fetch),
|
||||
)
|
||||
set_tag(
|
||||
SynapseTags.RESULT_PREFIX + "failed_to_fetch.length",
|
||||
str(len(failed_to_fetch)),
|
||||
)
|
||||
|
||||
if remote_event.is_state() and remote_event.rejected_reason is None:
|
||||
state_map[
|
||||
@@ -1068,6 +1196,8 @@ class FederationEventHandler:
|
||||
|
||||
return state_map
|
||||
|
||||
@trace
|
||||
@tag_args
|
||||
async def _get_state_and_persist(
|
||||
self, destination: str, room_id: str, event_id: str
|
||||
) -> None:
|
||||
@@ -1089,11 +1219,12 @@ class FederationEventHandler:
|
||||
destination=destination, room_id=room_id, event_ids=(event_id,)
|
||||
)
|
||||
|
||||
@trace
|
||||
async def _process_received_pdu(
|
||||
self,
|
||||
origin: str,
|
||||
event: EventBase,
|
||||
state_ids: Optional[StateMap[str]],
|
||||
context: EventContext,
|
||||
backfilled: bool = False,
|
||||
) -> None:
|
||||
"""Called when we have a new non-outlier event.
|
||||
@@ -1115,24 +1246,18 @@ class FederationEventHandler:
|
||||
|
||||
event: event to be persisted
|
||||
|
||||
state_ids: Normally None, but if we are handling a gap in the graph
|
||||
(ie, we are missing one or more prev_events), the resolved state at the
|
||||
event. Must not be partial state.
|
||||
context: The `EventContext` to persist the event with.
|
||||
|
||||
backfilled: True if this is part of a historical batch of events (inhibits
|
||||
notification to clients, and validation of device keys.)
|
||||
|
||||
PartialStateConflictError: if the room was un-partial stated in between
|
||||
computing the state at the event and persisting it. The caller should retry
|
||||
exactly once in this case. Will never be raised if `state_ids` is provided.
|
||||
computing the state at the event and persisting it. The caller should
|
||||
recompute `context` and retry exactly once when this happens.
|
||||
"""
|
||||
logger.debug("Processing event: %s", event)
|
||||
assert not event.internal_metadata.outlier
|
||||
|
||||
context = await self._state_handler.compute_event_context(
|
||||
event,
|
||||
state_ids_before_event=state_ids,
|
||||
)
|
||||
try:
|
||||
await self._check_event_auth(origin, event, context)
|
||||
except AuthError as e:
|
||||
@@ -1144,7 +1269,7 @@ class FederationEventHandler:
|
||||
# For new (non-backfilled and non-outlier) events we check if the event
|
||||
# passes auth based on the current state. If it doesn't then we
|
||||
# "soft-fail" the event.
|
||||
await self._check_for_soft_fail(event, state_ids, origin=origin)
|
||||
await self._check_for_soft_fail(event, context=context, origin=origin)
|
||||
|
||||
await self._run_push_actions_and_persist_event(event, context, backfilled)
|
||||
|
||||
@@ -1245,6 +1370,7 @@ class FederationEventHandler:
|
||||
except Exception:
|
||||
logger.exception("Failed to resync device for %s", sender)
|
||||
|
||||
@trace
|
||||
async def _handle_marker_event(self, origin: str, marker_event: EventBase) -> None:
|
||||
"""Handles backfilling the insertion event when we receive a marker
|
||||
event that points to one.
|
||||
@@ -1276,7 +1402,7 @@ class FederationEventHandler:
|
||||
logger.debug("_handle_marker_event: received %s", marker_event)
|
||||
|
||||
insertion_event_id = marker_event.content.get(
|
||||
EventContentFields.MSC2716_MARKER_INSERTION
|
||||
EventContentFields.MSC2716_INSERTION_EVENT_REFERENCE
|
||||
)
|
||||
|
||||
if insertion_event_id is None:
|
||||
@@ -1376,6 +1502,8 @@ class FederationEventHandler:
|
||||
|
||||
return event_from_response
|
||||
|
||||
@trace
|
||||
@tag_args
|
||||
async def _get_events_and_persist(
|
||||
self, destination: str, room_id: str, event_ids: Collection[str]
|
||||
) -> None:
|
||||
@@ -1421,6 +1549,7 @@ class FederationEventHandler:
|
||||
logger.info("Fetched %i events of %i requested", len(events), len(event_ids))
|
||||
await self._auth_and_persist_outliers(room_id, events)
|
||||
|
||||
@trace
|
||||
async def _auth_and_persist_outliers(
|
||||
self, room_id: str, events: Iterable[EventBase]
|
||||
) -> None:
|
||||
@@ -1439,6 +1568,16 @@ class FederationEventHandler:
|
||||
"""
|
||||
event_map = {event.event_id: event for event in events}
|
||||
|
||||
event_ids = event_map.keys()
|
||||
set_tag(
|
||||
SynapseTags.FUNC_ARG_PREFIX + "event_ids",
|
||||
str(event_ids),
|
||||
)
|
||||
set_tag(
|
||||
SynapseTags.FUNC_ARG_PREFIX + "event_ids.length",
|
||||
str(len(event_ids)),
|
||||
)
|
||||
|
||||
# filter out any events we have already seen. This might happen because
|
||||
# the events were eagerly pushed to us (eg, during a room join), or because
|
||||
# another thread has raced against us since we decided to request the event.
|
||||
@@ -1555,14 +1694,17 @@ class FederationEventHandler:
|
||||
backfilled=True,
|
||||
)
|
||||
|
||||
@trace
|
||||
async def _check_event_auth(
|
||||
self, origin: str, event: EventBase, context: EventContext
|
||||
self, origin: Optional[str], event: EventBase, context: EventContext
|
||||
) -> None:
|
||||
"""
|
||||
Checks whether an event should be rejected (for failing auth checks).
|
||||
|
||||
Args:
|
||||
origin: The host the event originates from.
|
||||
origin: The host the event originates from. This is used to fetch
|
||||
any missing auth events. It can be set to None, but only if we are
|
||||
sure that we already have all the auth events.
|
||||
event: The event itself.
|
||||
context:
|
||||
The event context.
|
||||
@@ -1591,6 +1733,14 @@ class FederationEventHandler:
|
||||
claimed_auth_events = await self._load_or_fetch_auth_events_for_event(
|
||||
origin, event
|
||||
)
|
||||
set_tag(
|
||||
SynapseTags.RESULT_PREFIX + "claimed_auth_events",
|
||||
str([ev.event_id for ev in claimed_auth_events]),
|
||||
)
|
||||
set_tag(
|
||||
SynapseTags.RESULT_PREFIX + "claimed_auth_events.length",
|
||||
str(len(claimed_auth_events)),
|
||||
)
|
||||
|
||||
# ... and check that the event passes auth at those auth events.
|
||||
# https://spec.matrix.org/v1.3/server-server-api/#checks-performed-on-receipt-of-a-pdu:
|
||||
@@ -1688,6 +1838,7 @@ class FederationEventHandler:
|
||||
)
|
||||
context.rejected = RejectedReason.AUTH_ERROR
|
||||
|
||||
@trace
|
||||
async def _maybe_kick_guest_users(self, event: EventBase) -> None:
|
||||
if event.type != EventTypes.GuestAccess:
|
||||
return
|
||||
@@ -1705,7 +1856,7 @@ class FederationEventHandler:
|
||||
async def _check_for_soft_fail(
|
||||
self,
|
||||
event: EventBase,
|
||||
state_ids: Optional[StateMap[str]],
|
||||
context: EventContext,
|
||||
origin: str,
|
||||
) -> None:
|
||||
"""Checks if we should soft fail the event; if so, marks the event as
|
||||
@@ -1716,7 +1867,7 @@ class FederationEventHandler:
|
||||
|
||||
Args:
|
||||
event
|
||||
state_ids: The state at the event if we don't have all the event's prev events
|
||||
context: The `EventContext` which we are about to persist the event with.
|
||||
origin: The host the event originates from.
|
||||
"""
|
||||
if await self._store.is_partial_state_room(event.room_id):
|
||||
@@ -1742,11 +1893,15 @@ class FederationEventHandler:
|
||||
auth_types = auth_types_for_event(room_version_obj, event)
|
||||
|
||||
# Calculate the "current state".
|
||||
if state_ids is not None:
|
||||
# If we're explicitly given the state then we won't have all the
|
||||
# prev events, and so we have a gap in the graph. In this case
|
||||
# we want to be a little careful as we might have been down for
|
||||
# a while and have an incorrect view of the current state,
|
||||
seen_event_ids = await self._store.have_events_in_timeline(prev_event_ids)
|
||||
has_missing_prevs = bool(prev_event_ids - seen_event_ids)
|
||||
if has_missing_prevs:
|
||||
# We don't have all the prev_events of this event, which means we have a
|
||||
# gap in the graph, and the new event is going to become a new backwards
|
||||
# extremity.
|
||||
#
|
||||
# In this case we want to be a little careful as we might have been
|
||||
# down for a while and have an incorrect view of the current state,
|
||||
# however we still want to do checks as gaps are easy to
|
||||
# maliciously manufacture.
|
||||
#
|
||||
@@ -1759,6 +1914,7 @@ class FederationEventHandler:
|
||||
event.room_id, extrem_ids
|
||||
)
|
||||
state_sets: List[StateMap[str]] = list(state_sets_d.values())
|
||||
state_ids = await context.get_prev_state_ids()
|
||||
state_sets.append(state_ids)
|
||||
current_state_ids = (
|
||||
await self._state_resolution_handler.resolve_events_with_store(
|
||||
@@ -1808,7 +1964,7 @@ class FederationEventHandler:
|
||||
event.internal_metadata.soft_failed = True
|
||||
|
||||
async def _load_or_fetch_auth_events_for_event(
|
||||
self, destination: str, event: EventBase
|
||||
self, destination: Optional[str], event: EventBase
|
||||
) -> Collection[EventBase]:
|
||||
"""Fetch this event's auth_events, from database or remote
|
||||
|
||||
@@ -1824,12 +1980,19 @@ class FederationEventHandler:
|
||||
Args:
|
||||
destination: where to send the /event_auth request. Typically the server
|
||||
that sent us `event` in the first place.
|
||||
|
||||
If this is None, no attempt is made to load any missing auth events:
|
||||
rather, an AssertionError is raised if there are any missing events.
|
||||
|
||||
event: the event whose auth_events we want
|
||||
|
||||
Returns:
|
||||
all of the events listed in `event.auth_events_ids`, after deduplication
|
||||
|
||||
Raises:
|
||||
AssertionError if some auth events were missing and no `destination` was
|
||||
supplied.
|
||||
|
||||
AuthError if we were unable to fetch the auth_events for any reason.
|
||||
"""
|
||||
event_auth_event_ids = set(event.auth_event_ids())
|
||||
@@ -1841,6 +2004,13 @@ class FederationEventHandler:
|
||||
)
|
||||
if not missing_auth_event_ids:
|
||||
return event_auth_events.values()
|
||||
if destination is None:
|
||||
# this shouldn't happen: destination must be set unless we know we have already
|
||||
# persisted the auth events.
|
||||
raise AssertionError(
|
||||
"_load_or_fetch_auth_events_for_event() called with no destination for "
|
||||
"an event with missing auth_events"
|
||||
)
|
||||
|
||||
logger.info(
|
||||
"Event %s refers to unknown auth events %s: fetching auth chain",
|
||||
@@ -1876,6 +2046,8 @@ class FederationEventHandler:
|
||||
# instead we raise an AuthError, which will make the caller ignore it.
|
||||
raise AuthError(code=HTTPStatus.FORBIDDEN, msg="Auth events could not be found")
|
||||
|
||||
@trace
|
||||
@tag_args
|
||||
async def _get_remote_auth_chain_for_event(
|
||||
self, destination: str, room_id: str, event_id: str
|
||||
) -> None:
|
||||
@@ -1904,6 +2076,7 @@ class FederationEventHandler:
|
||||
|
||||
await self._auth_and_persist_outliers(room_id, remote_auth_events)
|
||||
|
||||
@trace
|
||||
async def _run_push_actions_and_persist_event(
|
||||
self, event: EventBase, context: EventContext, backfilled: bool = False
|
||||
) -> None:
|
||||
@@ -2012,8 +2185,17 @@ class FederationEventHandler:
|
||||
self._message_handler.maybe_schedule_expiry(event)
|
||||
|
||||
if not backfilled: # Never notify for backfilled events
|
||||
for event in events:
|
||||
await self._notify_persisted_event(event, max_stream_token)
|
||||
with start_active_span("notify_persisted_events"):
|
||||
set_tag(
|
||||
SynapseTags.RESULT_PREFIX + "event_ids",
|
||||
str([ev.event_id for ev in events]),
|
||||
)
|
||||
set_tag(
|
||||
SynapseTags.RESULT_PREFIX + "event_ids.length",
|
||||
str(len(events)),
|
||||
)
|
||||
for event in events:
|
||||
await self._notify_persisted_event(event, max_stream_token)
|
||||
|
||||
return max_stream_token.stream
|
||||
|
||||
|
||||
@@ -26,7 +26,6 @@ from synapse.api.errors import (
|
||||
SynapseError,
|
||||
)
|
||||
from synapse.api.ratelimiting import Ratelimiter
|
||||
from synapse.config.emailconfig import ThreepidBehaviour
|
||||
from synapse.http import RequestTimedOutError
|
||||
from synapse.http.client import SimpleHttpClient
|
||||
from synapse.http.site import SynapseRequest
|
||||
@@ -416,48 +415,6 @@ class IdentityHandler:
|
||||
|
||||
return session_id
|
||||
|
||||
async def request_email_token(
|
||||
self,
|
||||
id_server: str,
|
||||
email: str,
|
||||
client_secret: str,
|
||||
send_attempt: int,
|
||||
next_link: Optional[str] = None,
|
||||
) -> JsonDict:
|
||||
"""
|
||||
Request an external server send an email on our behalf for the purposes of threepid
|
||||
validation.
|
||||
|
||||
Args:
|
||||
id_server: The identity server to proxy to
|
||||
email: The email to send the message to
|
||||
client_secret: The unique client_secret sends by the user
|
||||
send_attempt: Which attempt this is
|
||||
next_link: A link to redirect the user to once they submit the token
|
||||
|
||||
Returns:
|
||||
The json response body from the server
|
||||
"""
|
||||
params = {
|
||||
"email": email,
|
||||
"client_secret": client_secret,
|
||||
"send_attempt": send_attempt,
|
||||
}
|
||||
if next_link:
|
||||
params["next_link"] = next_link
|
||||
|
||||
try:
|
||||
data = await self.http_client.post_json_get_json(
|
||||
id_server + "/_matrix/identity/api/v1/validate/email/requestToken",
|
||||
params,
|
||||
)
|
||||
return data
|
||||
except HttpResponseException as e:
|
||||
logger.info("Proxied requestToken failed: %r", e)
|
||||
raise e.to_synapse_error()
|
||||
except RequestTimedOutError:
|
||||
raise SynapseError(500, "Timed out contacting identity server")
|
||||
|
||||
async def requestMsisdnToken(
|
||||
self,
|
||||
id_server: str,
|
||||
@@ -531,18 +488,7 @@ class IdentityHandler:
|
||||
validation_session = None
|
||||
|
||||
# Try to validate as email
|
||||
if self.hs.config.email.threepid_behaviour_email == ThreepidBehaviour.REMOTE:
|
||||
# Remote emails will only be used if a valid identity server is provided.
|
||||
assert (
|
||||
self.hs.config.registration.account_threepid_delegate_email is not None
|
||||
)
|
||||
|
||||
# Ask our delegated email identity server
|
||||
validation_session = await self.threepid_from_creds(
|
||||
self.hs.config.registration.account_threepid_delegate_email,
|
||||
threepid_creds,
|
||||
)
|
||||
elif self.hs.config.email.threepid_behaviour_email == ThreepidBehaviour.LOCAL:
|
||||
if self.hs.config.email.can_verify_email:
|
||||
# Get a validated session matching these details
|
||||
validation_session = await self.store.get_threepid_validation_session(
|
||||
"email", client_secret, sid=sid, validated=True
|
||||
@@ -592,11 +538,7 @@ class IdentityHandler:
|
||||
raise SynapseError(400, "Error contacting the identity server")
|
||||
|
||||
async def lookup_3pid(
|
||||
self,
|
||||
id_server: str,
|
||||
medium: str,
|
||||
address: str,
|
||||
id_access_token: Optional[str] = None,
|
||||
self, id_server: str, medium: str, address: str, id_access_token: str
|
||||
) -> Optional[str]:
|
||||
"""Looks up a 3pid in the passed identity server.
|
||||
|
||||
@@ -611,60 +553,15 @@ class IdentityHandler:
|
||||
Returns:
|
||||
the matrix ID of the 3pid, or None if it is not recognized.
|
||||
"""
|
||||
if id_access_token is not None:
|
||||
try:
|
||||
results = await self._lookup_3pid_v2(
|
||||
id_server, id_access_token, medium, address
|
||||
)
|
||||
return results
|
||||
|
||||
except Exception as e:
|
||||
# Catch HttpResponseExcept for a non-200 response code
|
||||
# Check if this identity server does not know about v2 lookups
|
||||
if isinstance(e, HttpResponseException) and e.code == 404:
|
||||
# This is an old identity server that does not yet support v2 lookups
|
||||
logger.warning(
|
||||
"Attempted v2 lookup on v1 identity server %s. Falling "
|
||||
"back to v1",
|
||||
id_server,
|
||||
)
|
||||
else:
|
||||
logger.warning("Error when looking up hashing details: %s", e)
|
||||
return None
|
||||
|
||||
return await self._lookup_3pid_v1(id_server, medium, address)
|
||||
|
||||
async def _lookup_3pid_v1(
|
||||
self, id_server: str, medium: str, address: str
|
||||
) -> Optional[str]:
|
||||
"""Looks up a 3pid in the passed identity server using v1 lookup.
|
||||
|
||||
Args:
|
||||
id_server: The server name (including port, if required)
|
||||
of the identity server to use.
|
||||
medium: The type of the third party identifier (e.g. "email").
|
||||
address: The third party identifier (e.g. "foo@example.com").
|
||||
|
||||
Returns:
|
||||
the matrix ID of the 3pid, or None if it is not recognized.
|
||||
"""
|
||||
try:
|
||||
data = await self.blacklisting_http_client.get_json(
|
||||
"%s%s/_matrix/identity/api/v1/lookup" % (id_server_scheme, id_server),
|
||||
{"medium": medium, "address": address},
|
||||
results = await self._lookup_3pid_v2(
|
||||
id_server, id_access_token, medium, address
|
||||
)
|
||||
|
||||
if "mxid" in data:
|
||||
# note: we used to verify the identity server's signature here, but no longer
|
||||
# require or validate it. See the following for context:
|
||||
# https://github.com/matrix-org/synapse/issues/5253#issuecomment-666246950
|
||||
return data["mxid"]
|
||||
except RequestTimedOutError:
|
||||
raise SynapseError(500, "Timed out contacting identity server")
|
||||
except OSError as e:
|
||||
logger.warning("Error from v1 identity server lookup: %s" % (e,))
|
||||
|
||||
return None
|
||||
return results
|
||||
except Exception as e:
|
||||
logger.warning("Error when looking up hashing details: %s", e)
|
||||
return None
|
||||
|
||||
async def _lookup_3pid_v2(
|
||||
self, id_server: str, id_access_token: str, medium: str, address: str
|
||||
@@ -793,7 +690,7 @@ class IdentityHandler:
|
||||
room_type: Optional[str],
|
||||
inviter_display_name: str,
|
||||
inviter_avatar_url: str,
|
||||
id_access_token: Optional[str] = None,
|
||||
id_access_token: str,
|
||||
) -> Tuple[str, List[Dict[str, str]], Dict[str, str], str]:
|
||||
"""
|
||||
Asks an identity server for a third party invite.
|
||||
@@ -814,7 +711,7 @@ class IdentityHandler:
|
||||
inviter_display_name: The current display name of the
|
||||
inviter.
|
||||
inviter_avatar_url: The URL of the inviter's avatar.
|
||||
id_access_token (str|None): The access token to authenticate to the identity
|
||||
id_access_token (str): The access token to authenticate to the identity
|
||||
server with
|
||||
|
||||
Returns:
|
||||
@@ -846,71 +743,24 @@ class IdentityHandler:
|
||||
invite_config["org.matrix.web_client_location"] = self._web_client_location
|
||||
|
||||
# Add the identity service access token to the JSON body and use the v2
|
||||
# Identity Service endpoints if id_access_token is present
|
||||
# Identity Service endpoints
|
||||
data = None
|
||||
base_url = "%s%s/_matrix/identity" % (id_server_scheme, id_server)
|
||||
|
||||
if id_access_token:
|
||||
key_validity_url = "%s%s/_matrix/identity/v2/pubkey/isvalid" % (
|
||||
id_server_scheme,
|
||||
id_server,
|
||||
key_validity_url = "%s%s/_matrix/identity/v2/pubkey/isvalid" % (
|
||||
id_server_scheme,
|
||||
id_server,
|
||||
)
|
||||
|
||||
url = "%s%s/_matrix/identity/v2/store-invite" % (id_server_scheme, id_server)
|
||||
try:
|
||||
data = await self.blacklisting_http_client.post_json_get_json(
|
||||
url,
|
||||
invite_config,
|
||||
{"Authorization": create_id_access_token_header(id_access_token)},
|
||||
)
|
||||
except RequestTimedOutError:
|
||||
raise SynapseError(500, "Timed out contacting identity server")
|
||||
|
||||
# Attempt a v2 lookup
|
||||
url = base_url + "/v2/store-invite"
|
||||
try:
|
||||
data = await self.blacklisting_http_client.post_json_get_json(
|
||||
url,
|
||||
invite_config,
|
||||
{"Authorization": create_id_access_token_header(id_access_token)},
|
||||
)
|
||||
except RequestTimedOutError:
|
||||
raise SynapseError(500, "Timed out contacting identity server")
|
||||
except HttpResponseException as e:
|
||||
if e.code != 404:
|
||||
logger.info("Failed to POST %s with JSON: %s", url, e)
|
||||
raise e
|
||||
|
||||
if data is None:
|
||||
key_validity_url = "%s%s/_matrix/identity/api/v1/pubkey/isvalid" % (
|
||||
id_server_scheme,
|
||||
id_server,
|
||||
)
|
||||
url = base_url + "/api/v1/store-invite"
|
||||
|
||||
try:
|
||||
data = await self.blacklisting_http_client.post_json_get_json(
|
||||
url, invite_config
|
||||
)
|
||||
except RequestTimedOutError:
|
||||
raise SynapseError(500, "Timed out contacting identity server")
|
||||
except HttpResponseException as e:
|
||||
logger.warning(
|
||||
"Error trying to call /store-invite on %s%s: %s",
|
||||
id_server_scheme,
|
||||
id_server,
|
||||
e,
|
||||
)
|
||||
|
||||
if data is None:
|
||||
# Some identity servers may only support application/x-www-form-urlencoded
|
||||
# types. This is especially true with old instances of Sydent, see
|
||||
# https://github.com/matrix-org/sydent/pull/170
|
||||
try:
|
||||
data = await self.blacklisting_http_client.post_urlencoded_get_json(
|
||||
url, invite_config
|
||||
)
|
||||
except HttpResponseException as e:
|
||||
logger.warning(
|
||||
"Error calling /store-invite on %s%s with fallback "
|
||||
"encoding: %s",
|
||||
id_server_scheme,
|
||||
id_server,
|
||||
e,
|
||||
)
|
||||
raise e
|
||||
|
||||
# TODO: Check for success
|
||||
token = data["token"]
|
||||
public_keys = data.get("public_keys", [])
|
||||
if "public_key" in data:
|
||||
|
||||
@@ -143,8 +143,8 @@ class InitialSyncHandler:
|
||||
joined_rooms,
|
||||
to_key=int(now_token.receipt_key),
|
||||
)
|
||||
if self.hs.config.experimental.msc2285_enabled:
|
||||
receipt = ReceiptEventSource.filter_out_private_receipts(receipt, user_id)
|
||||
|
||||
receipt = ReceiptEventSource.filter_out_private_receipts(receipt, user_id)
|
||||
|
||||
tags_by_room = await self.store.get_tags_for_user(user_id)
|
||||
|
||||
@@ -309,18 +309,18 @@ class InitialSyncHandler:
|
||||
if blocked:
|
||||
raise SynapseError(403, "This room has been blocked on this server")
|
||||
|
||||
user_id = requester.user.to_string()
|
||||
|
||||
(
|
||||
membership,
|
||||
member_event_id,
|
||||
) = await self.auth.check_user_in_room_or_world_readable(
|
||||
room_id,
|
||||
user_id,
|
||||
requester,
|
||||
allow_departed_users=True,
|
||||
)
|
||||
is_peeking = member_event_id is None
|
||||
|
||||
user_id = requester.user.to_string()
|
||||
|
||||
if membership == Membership.JOIN:
|
||||
result = await self._room_initial_sync_joined(
|
||||
user_id, room_id, pagin_config, membership, is_peeking
|
||||
@@ -456,11 +456,8 @@ class InitialSyncHandler:
|
||||
)
|
||||
if not receipts:
|
||||
return []
|
||||
if self.hs.config.experimental.msc2285_enabled:
|
||||
receipts = ReceiptEventSource.filter_out_private_receipts(
|
||||
receipts, user_id
|
||||
)
|
||||
return receipts
|
||||
|
||||
return ReceiptEventSource.filter_out_private_receipts(receipts, user_id)
|
||||
|
||||
presence, receipts, (messages, token) = await make_deferred_yieldable(
|
||||
gather_results(
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user