1
0

Compare commits

..

1 Commits

Author SHA1 Message Date
Action Bot
d57b306023 Version picker added for v1.46 docs 2023-12-11 14:52:40 +00:00
817 changed files with 32821 additions and 75111 deletions

View File

@@ -1,4 +0,0 @@
---
title: CI run against latest deps is failing
---
See https://github.com/{{env.GITHUB_REPOSITORY}}/actions/runs/{{env.GITHUB_RUN_ID}}

8
.ci/patch_for_twisted_trunk.sh Executable file
View File

@@ -0,0 +1,8 @@
#!/bin/sh
# replaces the dependency on Twisted in `python_dependencies` with trunk.
set -e
cd "$(dirname "$0")"/..
sed -i -e 's#"Twisted.*"#"Twisted @ git+https://github.com/twisted/twisted"#' synapse/python_dependencies.py

View File

@@ -2,24 +2,29 @@
# Test for the export-data admin command against sqlite and postgres
# Expects Synapse to have been already installed with `poetry install --extras postgres`.
# Expects `poetry` to be available on the `PATH`.
set -xe
cd "$(dirname "$0")/../.."
cd `dirname $0`/../..
echo "--- Install dependencies"
# Install dependencies for this test.
pip install psycopg2
# Install Synapse itself. This won't update any libraries.
pip install -e .
echo "--- Generate the signing key"
# Generate the server's signing key.
poetry run synapse_homeserver --generate-keys -c .ci/sqlite-config.yaml
python -m synapse.app.homeserver --generate-keys -c .ci/sqlite-config.yaml
echo "--- Prepare test database"
# Make sure the SQLite3 database is using the latest schema and has no pending background update.
poetry run update_synapse_database --database-config .ci/sqlite-config.yaml --run-background-updates
scripts/update_synapse_database --database-config .ci/sqlite-config.yaml --run-background-updates
# Run the export-data command on the sqlite test database
poetry run python -m synapse.app.admin_cmd -c .ci/sqlite-config.yaml export-data @anon-20191002_181700-832:localhost:8800 \
python -m synapse.app.admin_cmd -c .ci/sqlite-config.yaml export-data @anon-20191002_181700-832:localhost:8800 \
--output-directory /tmp/export_data
# Test that the output directory exists and contains the rooms directory
@@ -32,14 +37,14 @@ else
fi
# Create the PostgreSQL database.
poetry run .ci/scripts/postgres_exec.py "CREATE DATABASE synapse"
.ci/scripts/postgres_exec.py "CREATE DATABASE synapse"
# Port the SQLite databse to postgres so we can check command works against postgres
echo "+++ Port SQLite3 databse to postgres"
poetry run synapse_port_db --sqlite-database .ci/test_db.db --postgres-config .ci/postgres-config.yaml
scripts/synapse_port_db --sqlite-database .ci/test_db.db --postgres-config .ci/postgres-config.yaml
# Run the export-data command on postgres database
poetry run python -m synapse.app.admin_cmd -c .ci/postgres-config.yaml export-data @anon-20191002_181700-832:localhost:8800 \
python -m synapse.app.admin_cmd -c .ci/postgres-config.yaml export-data @anon-20191002_181700-832:localhost:8800 \
--output-directory /tmp/export_data2
# Test that the output directory exists and contains the rooms directory

View File

@@ -1,81 +1,16 @@
#!/usr/bin/env bash
# this script is run by GitHub Actions in a plain `focal` container; it
# - installs the minimal system requirements, and poetry;
# - patches the project definition file to refer to old versions only;
# - creates a venv with these old versions using poetry; and finally
# - invokes `trial` to run the tests with old deps.
# Prevent tzdata from asking for user input
export DEBIAN_FRONTEND=noninteractive
# this script is run by GitHub Actions in a plain `bionic` container; it installs the
# minimal requirements for tox and hands over to the py3-old tox environment.
set -ex
apt-get update
apt-get install -y \
python3 python3-dev python3-pip python3-venv pipx \
libxml2-dev libxslt-dev xmlsec1 zlib1g-dev libjpeg-dev libwebp-dev
apt-get install -y python3 python3-dev python3-pip libxml2-dev libxslt-dev xmlsec1 zlib1g-dev tox
export LANG="C.UTF-8"
# Prevent virtualenv from auto-updating pip to an incompatible version
export VIRTUALENV_NO_DOWNLOAD=1
# TODO: in the future, we could use an implementation of
# https://github.com/python-poetry/poetry/issues/3527
# https://github.com/pypa/pip/issues/8085
# to select the lowest possible versions, rather than resorting to this sed script.
# Patch the project definitions in-place:
# - Replace all lower and tilde bounds with exact bounds
# - Make the pyopenssl 17.0, which is the oldest version that works with
# a `cryptography` compiled against OpenSSL 1.1.
# - Delete all lines referring to psycopg2 --- so no testing of postgres support.
# - Omit systemd: we're not logging to journal here.
# TODO: also replace caret bounds, see https://python-poetry.org/docs/dependency-specification/#version-constraints
# We don't use these yet, but IIRC they are the default bound used when you `poetry add`.
# The sed expression 's/\^/==/g' ought to do the trick. But it would also change
# `python = "^3.7"` to `python = "==3.7", which would mean we fail because olddeps
# runs on 3.8 (#12343).
sed -i \
-e "s/[~>]=/==/g" \
-e "/psycopg2/d" \
-e 's/pyOpenSSL = "==16.0.0"/pyOpenSSL = "==17.0.0"/' \
-e '/systemd/d' \
pyproject.toml
# Use poetry to do the installation. This ensures that the versions are all mutually
# compatible (as far the package metadata declares, anyway); pip's package resolver
# is more lax.
#
# Rather than `poetry install --no-dev`, we drop all dev dependencies from the
# toml file. This means we don't have to ensure compatibility between old deps and
# dev tools.
pip install --user toml
REMOVE_DEV_DEPENDENCIES="
import toml
with open('pyproject.toml', 'r') as f:
data = toml.loads(f.read())
del data['tool']['poetry']['dev-dependencies']
with open('pyproject.toml', 'w') as f:
toml.dump(data, f)
"
python3 -c "$REMOVE_DEV_DEPENDENCIES"
pipx install poetry==1.1.12
~/.local/bin/poetry lock
echo "::group::Patched pyproject.toml"
cat pyproject.toml
echo "::endgroup::"
echo "::group::Lockfile after patch"
cat poetry.lock
echo "::endgroup::"
~/.local/bin/poetry install -E "all test"
~/.local/bin/poetry run trial --jobs=2 tests
exec tox -e py3-old,combine

View File

@@ -1,37 +1,41 @@
#!/usr/bin/env bash
#
# Test script for 'synapse_port_db'.
# - configures synapse and a postgres server.
# - sets up synapse and deps
# - runs the port script on a prepopulated test sqlite db
# - also runs it against an new sqlite db
#
# Expects Synapse to have been already installed with `poetry install --extras postgres`.
# Expects `poetry` to be available on the `PATH`.
set -xe
cd "$(dirname "$0")/../.."
cd `dirname $0`/../..
echo "--- Install dependencies"
# Install dependencies for this test.
pip install psycopg2 coverage coverage-enable-subprocess
# Install Synapse itself. This won't update any libraries.
pip install -e .
echo "--- Generate the signing key"
# Generate the server's signing key.
poetry run synapse_homeserver --generate-keys -c .ci/sqlite-config.yaml
python -m synapse.app.homeserver --generate-keys -c .ci/sqlite-config.yaml
echo "--- Prepare test database"
# Make sure the SQLite3 database is using the latest schema and has no pending background update.
poetry run update_synapse_database --database-config .ci/sqlite-config.yaml --run-background-updates
scripts/update_synapse_database --database-config .ci/sqlite-config.yaml --run-background-updates
# Create the PostgreSQL database.
poetry run .ci/scripts/postgres_exec.py "CREATE DATABASE synapse"
.ci/scripts/postgres_exec.py "CREATE DATABASE synapse"
echo "+++ Run synapse_port_db against test database"
# TODO: this invocation of synapse_port_db (and others below) used to be prepended with `coverage run`,
# but coverage seems unable to find the entrypoints installed by `pip install -e .`.
poetry run synapse_port_db --sqlite-database .ci/test_db.db --postgres-config .ci/postgres-config.yaml
coverage run scripts/synapse_port_db --sqlite-database .ci/test_db.db --postgres-config .ci/postgres-config.yaml
# We should be able to run twice against the same database.
echo "+++ Run synapse_port_db a second time"
poetry run synapse_port_db --sqlite-database .ci/test_db.db --postgres-config .ci/postgres-config.yaml
coverage run scripts/synapse_port_db --sqlite-database .ci/test_db.db --postgres-config .ci/postgres-config.yaml
#####
@@ -42,12 +46,12 @@ echo "--- Prepare empty SQLite database"
# we do this by deleting the sqlite db, and then doing the same again.
rm .ci/test_db.db
poetry run update_synapse_database --database-config .ci/sqlite-config.yaml --run-background-updates
scripts/update_synapse_database --database-config .ci/sqlite-config.yaml --run-background-updates
# re-create the PostgreSQL database.
poetry run .ci/scripts/postgres_exec.py \
.ci/scripts/postgres_exec.py \
"DROP DATABASE synapse" \
"CREATE DATABASE synapse"
echo "+++ Run synapse_port_db against empty database"
poetry run synapse_port_db --sqlite-database .ci/test_db.db --postgres-config .ci/postgres-config.yaml
coverage run scripts/synapse_port_db --sqlite-database .ci/test_db.db --postgres-config .ci/postgres-config.yaml

View File

@@ -3,9 +3,11 @@
# things to include
!docker
!scripts
!synapse
!MANIFEST.in
!README.rst
!pyproject.toml
!poetry.lock
!setup.py
!synctl
**/__pycache__

11
.flake8
View File

@@ -1,11 +0,0 @@
# TODO: incorporate this into pyproject.toml if flake8 supports it in the future.
# See https://github.com/PyCQA/flake8/issues/234
[flake8]
# see https://pycodestyle.readthedocs.io/en/latest/intro.html#error-codes
# for error codes. The ones we ignore are:
# W503: line break before binary operator
# W504: line break after binary operator
# E203: whitespace before ':' (which is contrary to pep8?)
# E731: do not assign a lambda expression, use a def
# E501: Line too long (black enforces this for us)
ignore=W503,W504,E203,E731,E501

View File

@@ -6,6 +6,3 @@ aff1eb7c671b0a3813407321d2702ec46c71fa56
# Update black to 20.8b1 (#9381).
0a00b7ff14890987f09112a2ae696c61001e6cf1
# Convert tests/rest/admin/test_room.py to unix file endings (#7953).
c4268e3da64f1abb5b31deaeb5769adb6510c0a7

View File

@@ -1,14 +1,12 @@
### Pull Request Checklist
<!-- Please read https://matrix-org.github.io/synapse/latest/development/contributing_guide.html before submitting your pull request -->
<!-- Please read CONTRIBUTING.md before submitting your pull request -->
* [ ] Pull request is based on the develop branch
* [ ] Pull request includes a [changelog file](https://matrix-org.github.io/synapse/latest/development/contributing_guide.html#changelog). The entry should:
* [ ] Pull request includes a [changelog file](https://github.com/matrix-org/synapse/blob/master/CONTRIBUTING.md#changelog). The entry should:
- Be a short description of your change which makes sense to users. "Fixed a bug that prevented receiving messages from other servers." instead of "Moved X method from `EventStore` to `EventWorkerStore`.".
- Use markdown where necessary, mostly for `code blocks`.
- End with either a period (.) or an exclamation mark (!).
- Start with a capital letter.
- Feel free to credit yourself, by adding a sentence "Contributed by @github_username." or "Contributed by [Your Name]." to the end of the entry.
* [ ] Pull request includes a [sign off](https://matrix-org.github.io/synapse/latest/development/contributing_guide.html#sign-off)
* [ ] [Code style](https://matrix-org.github.io/synapse/latest/code_style.html) is correct
(run the [linters](https://matrix-org.github.io/synapse/latest/development/contributing_guide.html#run-the-linters))
* [ ] Pull request includes a [sign off](https://github.com/matrix-org/synapse/blob/master/CONTRIBUTING.md#sign-off)
* [ ] Code style is correct (run the [linters](https://github.com/matrix-org/synapse/blob/master/CONTRIBUTING.md#code-style))

View File

@@ -5,7 +5,7 @@ name: Build docker images
on:
push:
tags: ["v*"]
branches: [ master, main, develop ]
branches: [ master, main ]
workflow_dispatch:
permissions:
@@ -36,22 +36,37 @@ jobs:
- name: Calculate docker image tag
id: set-tag
uses: docker/metadata-action@master
run: |
case "${GITHUB_REF}" in
refs/heads/master|refs/heads/main)
tag=latest
;;
refs/tags/*)
tag=${GITHUB_REF#refs/tags/}
;;
*)
tag=${GITHUB_SHA}
;;
esac
echo "::set-output name=tag::$tag"
# for release builds, we want to get the amd64 image out asap, so first
# we do an amd64-only build, before following up with a multiarch build.
- name: Build and push amd64
uses: docker/build-push-action@v2
if: "${{ startsWith(github.ref, 'refs/tags/v') }}"
with:
images: matrixdotorg/synapse
flavor: |
latest=false
tags: |
type=raw,value=develop,enable=${{ github.ref == 'refs/heads/develop' }}
type=raw,value=latest,enable=${{ github.ref == 'refs/heads/master' }}
type=raw,value=latest,enable=${{ github.ref == 'refs/heads/main' }}
type=pep440,pattern={{raw}}
push: true
labels: "gitsha1=${{ github.sha }}"
tags: "matrixdotorg/synapse:${{ steps.set-tag.outputs.tag }}"
file: "docker/Dockerfile"
platforms: linux/amd64
- name: Build and push all platforms
uses: docker/build-push-action@v2
with:
push: true
labels: "gitsha1=${{ github.sha }}"
tags: "${{ steps.set-tag.outputs.tags }}"
tags: "matrixdotorg/synapse:${{ steps.set-tag.outputs.tag }}"
file: "docker/Dockerfile"
platforms: linux/amd64,linux/arm64

View File

@@ -22,7 +22,7 @@ jobs:
- name: Setup mdbook
uses: peaceiris/actions-mdbook@4b5ef36b314c2599664ca107bb8c02412548d79d # v1.1.14
with:
mdbook-version: '0.4.17'
mdbook-version: '0.4.9'
- name: Build the documentation
# mdbook will only create an index.html if we're including docs/README.md in SUMMARY.md.

View File

@@ -1,159 +0,0 @@
# People who are freshly `pip install`ing from PyPI will pull in the latest versions of
# dependencies which match the broad requirements. Since most CI runs are against
# the locked poetry environment, run specifically against the latest dependencies to
# know if there's an upcoming breaking change.
#
# As an overview this workflow:
# - checks out develop,
# - installs from source, pulling in the dependencies like a fresh `pip install` would, and
# - runs mypy and test suites in that checkout.
#
# Based on the twisted trunk CI job.
name: Latest dependencies
on:
schedule:
- cron: 0 7 * * *
workflow_dispatch:
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
jobs:
mypy:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
# The dev dependencies aren't exposed in the wheel metadata (at least with current
# poetry-core versions), so we install with poetry.
- uses: matrix-org/setup-python-poetry@v1
with:
python-version: "3.x"
poetry-version: "1.2.0b1"
extras: "all"
# Dump installed versions for debugging.
- run: poetry run pip list > before.txt
# Upgrade all runtime dependencies only. This is intended to mimic a fresh
# `pip install matrix-synapse[all]` as closely as possible.
- run: poetry update --no-dev
- run: poetry run pip list > after.txt && (diff -u before.txt after.txt || true)
- name: Remove warn_unused_ignores from mypy config
run: sed '/warn_unused_ignores = True/d' -i mypy.ini
- run: poetry run mypy
trial:
runs-on: ubuntu-latest
strategy:
matrix:
include:
- database: "sqlite"
- database: "postgres"
postgres-version: "14"
steps:
- uses: actions/checkout@v2
- run: sudo apt-get -qq install xmlsec1
- name: Set up PostgreSQL ${{ matrix.postgres-version }}
if: ${{ matrix.postgres-version }}
run: |
docker run -d -p 5432:5432 \
-e POSTGRES_PASSWORD=postgres \
-e POSTGRES_INITDB_ARGS="--lc-collate C --lc-ctype C --encoding UTF8" \
postgres:${{ matrix.postgres-version }}
- uses: actions/setup-python@v2
with:
python-version: "3.x"
- run: pip install .[all,test]
- name: Await PostgreSQL
if: ${{ matrix.postgres-version }}
timeout-minutes: 2
run: until pg_isready -h localhost; do sleep 1; done
- run: python -m twisted.trial --jobs=2 tests
env:
SYNAPSE_POSTGRES: ${{ matrix.database == 'postgres' || '' }}
SYNAPSE_POSTGRES_HOST: localhost
SYNAPSE_POSTGRES_USER: postgres
SYNAPSE_POSTGRES_PASSWORD: postgres
- name: Dump logs
# Logs are most useful when the command fails, always include them.
if: ${{ always() }}
# Note: Dumps to workflow logs instead of using actions/upload-artifact
# This keeps logs colocated with failing jobs
# It also ignores find's exit code; this is a best effort affair
run: >-
find _trial_temp -name '*.log'
-exec echo "::group::{}" \;
-exec cat {} \;
-exec echo "::endgroup::" \;
|| true
sytest:
runs-on: ubuntu-latest
container:
image: matrixdotorg/sytest-synapse:testing
volumes:
- ${{ github.workspace }}:/src
strategy:
fail-fast: false
matrix:
include:
- sytest-tag: focal
- sytest-tag: focal
postgres: postgres
workers: workers
redis: redis
env:
POSTGRES: ${{ matrix.postgres && 1}}
WORKERS: ${{ matrix.workers && 1 }}
REDIS: ${{ matrix.redis && 1 }}
BLACKLIST: ${{ matrix.workers && 'synapse-blacklist-with-workers' }}
steps:
- uses: actions/checkout@v2
- name: Ensure sytest runs `pip install`
# Delete the lockfile so sytest will `pip install` rather than `poetry install`
run: rm /src/poetry.lock
working-directory: /src
- name: Prepare test blacklist
run: cat sytest-blacklist .ci/worker-blacklist > synapse-blacklist-with-workers
- name: Run SyTest
run: /bootstrap.sh synapse
working-directory: /src
- name: Summarise results.tap
if: ${{ always() }}
run: /sytest/scripts/tap_to_gha.pl /logs/results.tap
- name: Upload SyTest logs
uses: actions/upload-artifact@v2
if: ${{ always() }}
with:
name: Sytest Logs - ${{ job.status }} - (${{ join(matrix.*, ', ') }})
path: |
/logs/results.tap
/logs/**/*.log*
# TODO: run complement (as with twisted trunk, see #12473).
# open an issue if the build fails, so we know about it.
open-issue:
if: failure()
needs:
# TODO: should mypy be included here? It feels more brittle than the other two.
- mypy
- trial
- sytest
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: JasonEtco/create-an-issue@5d9504915f79f9cc6d791934b8ef34f2353dd74d # v2.5.0, 2020-12-06
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
update_existing: true
filename: .ci/latest_deps_build_failed_issue_template.md

View File

@@ -7,7 +7,7 @@ on:
# of things breaking (but only build one set of debs)
pull_request:
push:
branches: ["develop", "release-*"]
branches: ["develop"]
# we do the full build on tags.
tags: ["v*"]
@@ -31,7 +31,7 @@ jobs:
# if we're running from a tag, get the full list of distros; otherwise just use debian:sid
dists='["debian:sid"]'
if [[ $GITHUB_REF == refs/tags/* ]]; then
dists=$(scripts-dev/build_debian_packages.py --show-dists-json)
dists=$(scripts-dev/build_debian_packages --show-dists-json)
fi
echo "::set-output name=distros::$dists"
# map the step outputs to job outputs
@@ -74,7 +74,7 @@ jobs:
# see https://github.com/docker/build-push-action/issues/252
# for the cache magic here
run: |
./src/scripts-dev/build_debian_packages.py \
./src/scripts-dev/build_debian_packages \
--docker-build-arg=--cache-from=type=local,src=/tmp/.buildx-cache \
--docker-build-arg=--cache-to=type=local,mode=max,dest=/tmp/.buildx-cache-new \
--docker-build-arg=--progress=plain \
@@ -91,7 +91,17 @@ jobs:
build-sdist:
name: "Build pypi distribution files"
uses: "matrix-org/backend-meta/.github/workflows/packaging.yml@v1"
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: actions/setup-python@v2
- run: pip install wheel
- run: |
python setup.py sdist bdist_wheel
- uses: actions/upload-artifact@v2
with:
name: python-dist
path: dist/*
# if it's a tag, create a release and attach the artifacts to it
attach-assets:
@@ -112,8 +122,7 @@ jobs:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
files: |
Sdist/*
Wheel/*
python-dist/*
debs.tar.xz
# if it's not already published, keep the release as a draft.
draft: true

View File

@@ -10,19 +10,22 @@ concurrency:
cancel-in-progress: true
jobs:
check-sampleconfig:
lint:
runs-on: ubuntu-latest
strategy:
matrix:
toxenv:
- "check-sampleconfig"
- "check_codestyle"
- "check_isort"
- "mypy"
- "packaging"
steps:
- uses: actions/checkout@v2
- uses: actions/setup-python@v2
- run: pip install .
- run: scripts-dev/generate_sample_config.sh --check
- run: scripts-dev/config-lint.sh
lint:
uses: "matrix-org/backend-meta/.github/workflows/python-poetry-ci.yml@v1"
with:
typechecking-extras: "all"
- run: pip install tox
- run: tox -e ${{ matrix.toxenv }}
lint-crlf:
runs-on: ubuntu-latest
@@ -40,15 +43,29 @@ jobs:
ref: ${{ github.event.pull_request.head.sha }}
fetch-depth: 0
- uses: actions/setup-python@v2
- run: "pip install 'towncrier>=18.6.0rc1'"
- run: scripts-dev/check-newsfragment.sh
- run: pip install tox
- run: scripts-dev/check-newsfragment
env:
PULL_REQUEST_NUMBER: ${{ github.event.number }}
lint-sdist:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: actions/setup-python@v2
with:
python-version: "3.x"
- run: pip install wheel
- run: python setup.py sdist bdist_wheel
- uses: actions/upload-artifact@v2
with:
name: Python Distributions
path: dist/*
# Dummy step to gate other tests on without repeating the whole list
linting-done:
if: ${{ !cancelled() }} # Run this even if prior jobs were skipped
needs: [lint, lint-crlf, lint-newsfile, check-sampleconfig]
needs: [lint, lint-crlf, lint-newsfile, lint-sdist]
runs-on: ubuntu-latest
steps:
- run: "true"
@@ -59,25 +76,25 @@ jobs:
runs-on: ubuntu-latest
strategy:
matrix:
python-version: ["3.7", "3.8", "3.9", "3.10"]
python-version: ["3.6", "3.7", "3.8", "3.9", "3.10"]
database: ["sqlite"]
extras: ["all"]
toxenv: ["py"]
include:
# Newest Python without optional deps
- python-version: "3.10"
extras: ""
toxenv: "py-noextras"
# Oldest Python with PostgreSQL
- python-version: "3.7"
- python-version: "3.6"
database: "postgres"
postgres-version: "10"
extras: "all"
postgres-version: "9.6"
toxenv: "py"
# Newest Python with newest PostgreSQL
- python-version: "3.10"
database: "postgres"
postgres-version: "14"
extras: "all"
toxenv: "py"
steps:
- uses: actions/checkout@v2
@@ -89,16 +106,17 @@ jobs:
-e POSTGRES_PASSWORD=postgres \
-e POSTGRES_INITDB_ARGS="--lc-collate C --lc-ctype C --encoding UTF8" \
postgres:${{ matrix.postgres-version }}
- uses: matrix-org/setup-python-poetry@v1
- uses: actions/setup-python@v2
with:
python-version: ${{ matrix.python-version }}
extras: ${{ matrix.extras }}
- run: pip install tox
- name: Await PostgreSQL
if: ${{ matrix.postgres-version }}
timeout-minutes: 2
run: until pg_isready -h localhost; do sleep 1; done
- run: poetry run trial --jobs=2 tests
- run: tox -e ${{ matrix.toxenv }}
env:
TRIAL_FLAGS: "--jobs=2"
SYNAPSE_POSTGRES: ${{ matrix.database == 'postgres' || '' }}
SYNAPSE_POSTGRES_HOST: localhost
SYNAPSE_POSTGRES_USER: postgres
@@ -117,19 +135,18 @@ jobs:
|| true
trial-olddeps:
# Note: sqlite only; no postgres
if: ${{ !cancelled() && !failure() }} # Allow previous steps to be skipped, but not fail
needs: linting-done
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Test with old deps
uses: docker://ubuntu:focal # For old python and sqlite
# Note: focal seems to be using 3.8, but the oldest is 3.7?
# See https://github.com/matrix-org/synapse/issues/12343
uses: docker://ubuntu:bionic # For old python and sqlite
with:
workdir: /github/workspace
entrypoint: .ci/scripts/test_old_deps.sh
env:
TRIAL_FLAGS: "--jobs=2"
- name: Dump logs
# Logs are most useful when the command fails, always include them.
if: ${{ always() }}
@@ -145,24 +162,23 @@ jobs:
trial-pypy:
# Very slow; only run if the branch name includes 'pypy'
# Note: sqlite only; no postgres. Completely untested since poetry move.
if: ${{ contains(github.ref, 'pypy') && !failure() && !cancelled() }}
needs: linting-done
runs-on: ubuntu-latest
strategy:
matrix:
python-version: ["pypy-3.7"]
extras: ["all"]
python-version: ["pypy-3.6"]
steps:
- uses: actions/checkout@v2
# Install libs necessary for PyPy to build binary wheels for dependencies
- run: sudo apt-get -qq install xmlsec1 libxml2-dev libxslt-dev
- uses: matrix-org/setup-python-poetry@v1
- uses: actions/setup-python@v2
with:
python-version: ${{ matrix.python-version }}
extras: ${{ matrix.extras }}
- run: poetry run trial --jobs=2 tests
- run: pip install tox
- run: tox -e py
env:
TRIAL_FLAGS: "--jobs=2"
- name: Dump logs
# Logs are most useful when the command fails, always include them.
if: ${{ always() }}
@@ -197,15 +213,15 @@ jobs:
fail-fast: false
matrix:
include:
- sytest-tag: focal
- sytest-tag: bionic
- sytest-tag: focal
- sytest-tag: bionic
postgres: postgres
- sytest-tag: testing
postgres: postgres
- sytest-tag: focal
- sytest-tag: bionic
postgres: multi-postgres
workers: workers
@@ -261,10 +277,9 @@ jobs:
steps:
- uses: actions/checkout@v2
- run: sudo apt-get -qq install xmlsec1
- uses: matrix-org/setup-python-poetry@v1
- uses: actions/setup-python@v2
with:
python-version: ${{ matrix.python-version }}
extras: "postgres"
python-version: "3.9"
- run: .ci/scripts/test_export_data_command.sh
portdb:
@@ -276,8 +291,8 @@ jobs:
strategy:
matrix:
include:
- python-version: "3.7"
postgres-version: "10"
- python-version: "3.6"
postgres-version: "9.6"
- python-version: "3.10"
postgres-version: "14"
@@ -299,39 +314,33 @@ jobs:
steps:
- uses: actions/checkout@v2
- run: sudo apt-get -qq install xmlsec1
- uses: matrix-org/setup-python-poetry@v1
- uses: actions/setup-python@v2
with:
python-version: ${{ matrix.python-version }}
extras: "postgres"
- run: .ci/scripts/test_synapse_port_db.sh
complement:
if: ${{ !failure() && !cancelled() }}
needs: linting-done
runs-on: ubuntu-latest
container:
# https://github.com/matrix-org/complement/blob/master/dockerfiles/ComplementCIBuildkite.Dockerfile
image: matrixdotorg/complement:latest
env:
CI: true
ports:
- 8448:8448
volumes:
- /var/run/docker.sock:/var/run/docker.sock
steps:
# The path is set via a file given by $GITHUB_PATH. We need both Go 1.17 and GOPATH on the path to run Complement.
# See https://docs.github.com/en/actions/using-workflows/workflow-commands-for-github-actions#adding-a-system-path
- name: "Set Go Version"
run: |
# Add Go 1.17 to the PATH: see https://github.com/actions/virtual-environments/blob/main/images/linux/Ubuntu2004-Readme.md#environment-variables-2
echo "$GOROOT_1_17_X64/bin" >> $GITHUB_PATH
# Add the Go path to the PATH: We need this so we can call gotestfmt
echo "~/go/bin" >> $GITHUB_PATH
- name: "Install Complement Dependencies"
run: |
sudo apt-get update && sudo apt-get install -y libolm3 libolm-dev
go get -v github.com/haveyoudebuggedit/gotestfmt/v2/cmd/gotestfmt@latest
- name: Run actions/checkout@v2 for synapse
uses: actions/checkout@v2
with:
path: synapse
# Attempt to check out the same branch of Complement as the PR. If it
# doesn't exist, fallback to HEAD.
# doesn't exist, fallback to master.
- name: Checkout complement
shell: bash
run: |
@@ -344,8 +353,8 @@ jobs:
# for pull requests, otherwise GITHUB_REF).
# 2. Attempt to use the base branch, e.g. when merging into release-vX.Y
# (GITHUB_BASE_REF for pull requests).
# 3. Use the default complement branch ("HEAD").
for BRANCH_NAME in "$GITHUB_HEAD_REF" "$GITHUB_BASE_REF" "${GITHUB_REF#refs/heads/}" "HEAD"; do
# 3. Use the default complement branch ("master").
for BRANCH_NAME in "$GITHUB_HEAD_REF" "$GITHUB_BASE_REF" "${GITHUB_REF#refs/heads/}" "master"; do
# Skip empty branch names and merge commits.
if [[ -z "$BRANCH_NAME" || $BRANCH_NAME =~ ^refs/pull/.* ]]; then
continue
@@ -354,32 +363,55 @@ jobs:
(wget -O - "https://github.com/matrix-org/complement/archive/$BRANCH_NAME.tar.gz" | tar -xz --strip-components=1 -C complement) && break
done
- run: |
set -o pipefail
COMPLEMENT_DIR=`pwd`/complement synapse/scripts-dev/complement.sh -json 2>&1 | gotestfmt
shell: bash
name: Run Complement Tests
# Build initial Synapse image
- run: docker build -t matrixdotorg/synapse:latest -f docker/Dockerfile .
working-directory: synapse
# Build a ready-to-run Synapse image based on the initial image above.
# This new image includes a config file, keys for signing and TLS, and
# other settings to make it suitable for testing under Complement.
- run: docker build -t complement-synapse -f Synapse.Dockerfile .
working-directory: complement/dockerfiles
# Run Complement
- run: go test -v -tags synapse_blacklist,msc2403,msc2946,msc3083 ./tests/...
env:
COMPLEMENT_BASE_IMAGE: complement-synapse:latest
working-directory: complement
# a job which marks all the other jobs as complete, thus allowing PRs to be merged.
tests-done:
if: ${{ always() }}
needs:
- check-sampleconfig
- lint
- lint-crlf
- lint-newsfile
- lint-sdist
- trial
- trial-olddeps
- sytest
- export-data
- portdb
- complement
runs-on: ubuntu-latest
steps:
- uses: matrix-org/done-action@v2
with:
needs: ${{ toJSON(needs) }}
- name: Set build result
env:
NEEDS_CONTEXT: ${{ toJSON(needs) }}
# the `jq` incantation dumps out a series of "<job> <result>" lines.
# we set it to an intermediate variable to avoid a pipe, which makes it
# hard to set $rc.
run: |
rc=0
results=$(jq -r 'to_entries[] | [.key,.value.result] | join(" ")' <<< $NEEDS_CONTEXT)
while read job result ; do
# The newsfile lint may be skipped on non PR builds
if [ $result == "skipped" ] && [ $job == "lint-newsfile" ]; then
continue
fi
# The newsfile lint may be skipped on non PR builds
skippable:
lint-newsfile
if [ "$result" != "success" ]; then
echo "::set-failed ::Job $job returned $result"
rc=1
fi
done <<< $results
exit $rc

View File

@@ -6,27 +6,16 @@ on:
workflow_dispatch:
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
jobs:
mypy:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: matrix-org/setup-python-poetry@v1
with:
python-version: "3.x"
extras: "all"
- run: |
poetry remove twisted
poetry add --extras tls git+https://github.com/twisted/twisted.git#trunk
poetry install --no-interaction --extras "all test"
- name: Remove warn_unused_ignores from mypy config
run: sed '/warn_unused_ignores = True/d' -i mypy.ini
- run: poetry run mypy
- uses: actions/setup-python@v2
- run: .ci/patch_for_twisted_trunk.sh
- run: pip install tox
- run: tox -e mypy
trial:
runs-on: ubuntu-latest
@@ -34,15 +23,14 @@ jobs:
steps:
- uses: actions/checkout@v2
- run: sudo apt-get -qq install xmlsec1
- uses: matrix-org/setup-python-poetry@v1
- uses: actions/setup-python@v2
with:
python-version: "3.x"
extras: "all test"
- run: |
poetry remove twisted
poetry add --extras tls git+https://github.com/twisted/twisted.git#trunk
poetry install --no-interaction --extras "all test"
- run: poetry run trial --jobs 2 tests
python-version: 3.6
- run: .ci/patch_for_twisted_trunk.sh
- run: pip install tox
- run: tox -e py
env:
TRIAL_FLAGS: "--jobs=2"
- name: Dump logs
# Logs are most useful when the command fails, always include them.
@@ -67,23 +55,11 @@ jobs:
steps:
- uses: actions/checkout@v2
- name: Patch dependencies
# Note: The poetry commands want to create a virtualenv in /src/.venv/,
# but the sytest-synapse container expects it to be in /venv/.
# We symlink it before running poetry so that poetry actually
# ends up installing to `/venv`.
run: |
ln -s -T /venv /src/.venv
poetry remove twisted
poetry add --extras tls git+https://github.com/twisted/twisted.git#trunk
poetry install --no-interaction --extras "all test"
run: .ci/patch_for_twisted_trunk.sh
working-directory: /src
- name: Run SyTest
run: /bootstrap.sh synapse
working-directory: /src
env:
# Use offline mode to avoid reinstalling the pinned version of
# twisted.
OFFLINE: 1
- name: Summarise results.tap
if: ${{ always() }}
run: /sytest/scripts/tap_to_gha.pl /logs/results.tap

10
.gitignore vendored
View File

@@ -15,9 +15,6 @@ _trial_temp*/
.DS_Store
__pycache__/
# We do want the poetry lockfile.
!poetry.lock
# stuff that is likely to exist when you run a server locally
/*.db
/*.log
@@ -33,9 +30,6 @@ __pycache__/
/media_store/
/uploads
# For direnv users
/.envrc
# IDEs
/.idea/
/.ropeproject/
@@ -56,7 +50,3 @@ __pycache__/
# docs
book/
# complement
/complement-*
/master.tar.gz

9734
CHANGES.md

File diff suppressed because it is too large Load Diff

56
MANIFEST.in Normal file
View File

@@ -0,0 +1,56 @@
include synctl
include LICENSE
include VERSION
include *.rst
include *.md
include demo/README
include demo/demo.tls.dh
include demo/*.py
include demo/*.sh
include synapse/py.typed
recursive-include synapse/storage *.sql
recursive-include synapse/storage *.sql.postgres
recursive-include synapse/storage *.sql.sqlite
recursive-include synapse/storage *.py
recursive-include synapse/storage *.txt
recursive-include synapse/storage *.md
recursive-include docs *
recursive-include scripts *
recursive-include scripts-dev *
recursive-include synapse *.pyi
recursive-include tests *.py
recursive-include tests *.pem
recursive-include tests *.p8
recursive-include tests *.crt
recursive-include tests *.key
recursive-include synapse/res *
recursive-include synapse/static *.css
recursive-include synapse/static *.gif
recursive-include synapse/static *.html
recursive-include synapse/static *.js
exclude .codecov.yml
exclude .coveragerc
exclude .dockerignore
exclude .editorconfig
exclude Dockerfile
exclude mypy.ini
exclude sytest-blacklist
exclude test_postgresql.sh
include book.toml
include pyproject.toml
recursive-include changelog.d *
prune .circleci
prune .github
prune .ci
prune contrib
prune debian
prune demo/etc
prune docker
prune snap
prune stubs

View File

@@ -55,7 +55,7 @@ solutions. The hope is for Matrix to act as the building blocks for a new
generation of fully open and interoperable messaging and VoIP apps for the
internet.
Synapse is a Matrix "homeserver" implementation developed by the matrix.org core
Synapse is a Matrix "homeserver" implementation developed by the matrix.org core
team, written in Python 3/Twisted.
In Matrix, every user runs one or more Matrix clients, which connect through to
@@ -246,7 +246,7 @@ Password reset
==============
Users can reset their password through their client. Alternatively, a server admin
can reset a users password using the `admin API <docs/admin_api/user_admin_api.md#reset-password>`_
can reset a users password using the `admin API <docs/admin_api/user_admin_api.rst#reset-password>`_
or by directly editing the database as shown below.
First calculate the hash of the new password::
@@ -293,42 +293,36 @@ directory of your choice::
git clone https://github.com/matrix-org/synapse.git
cd synapse
Synapse has a number of external dependencies. We maintain a fixed development
environment using `Poetry <https://python-poetry.org/>`_. First, install poetry. We recommend::
Synapse has a number of external dependencies, that are easiest
to install using pip and a virtualenv::
pip install --user pipx
pipx install poetry
as described `here <https://python-poetry.org/docs/#installing-with-pipx>`_.
(See `poetry's installation docs <https://python-poetry.org/docs/#installation>`_
for other installation methods.) Then ask poetry to create a virtual environment
from the project and install Synapse's dependencies::
poetry install --extras "all test"
python3 -m venv ./env
source ./env/bin/activate
pip install -e ".[all,dev]"
This will run a process of downloading and installing all the needed
dependencies into a virtual env.
dependencies into a virtual env. If any dependencies fail to install,
try installing the failing modules individually::
We recommend using the demo which starts 3 federated instances running on ports `8080` - `8082`::
pip install -e "module-name"
poetry run ./demo/start.sh
We recommend using the demo which starts 3 federated instances running on ports `8080` - `8082`
(to stop, you can use ``poetry run ./demo/stop.sh``)
./demo/start.sh
See the `demo documentation <https://matrix-org.github.io/synapse/develop/development/demo.html>`_
for more information.
(to stop, you can use `./demo/stop.sh`)
If you just want to start a single instance of the app and run it directly::
# Create the homeserver.yaml config once
poetry run synapse_homeserver \
python -m synapse.app.homeserver \
--server-name my.domain.name \
--config-path homeserver.yaml \
--generate-config \
--report-stats=[yes|no]
# Start the app
poetry run synapse_homeserver --config-path homeserver.yaml
python -m synapse.app.homeserver --config-path homeserver.yaml
Running the unit tests
@@ -337,7 +331,7 @@ Running the unit tests
After getting up and running, you may wish to run Synapse's unit tests to
check that everything is installed correctly::
poetry run trial tests
trial tests
This should end with a 'PASSED' result (note that exact numbers will
differ)::

View File

@@ -34,6 +34,14 @@ additional-css = [
"docs/website_files/table-of-contents.css",
"docs/website_files/remove-nav-buttons.css",
"docs/website_files/indent-section-headers.css",
"docs/website_files/version-picker.css",
]
additional-js = ["docs/website_files/table-of-contents.js"]
theme = "docs/website_files/theme"
additional-js = [
"docs/website_files/table-of-contents.js",
"docs/website_files/version-picker.js",
"docs/website_files/version.js",
]
theme = "docs/website_files/theme"
[preprocessor.schema_versions]
command = "./scripts-dev/schema_versions.py"

View File

@@ -14,7 +14,6 @@ services:
# failure
restart: unless-stopped
# See the readme for a full documentation of the environment settings
# NOTE: You must edit homeserver.yaml to use postgres, it defaults to sqlite
environment:
- SYNAPSE_CONFIG_PATH=/data/homeserver.yaml
volumes:

View File

@@ -66,18 +66,6 @@
],
"title": "Dashboards",
"type": "dashboards"
},
{
"asDropdown": false,
"icon": "external link",
"includeVars": false,
"keepTime": false,
"tags": [],
"targetBlank": true,
"title": "Synapse Documentation",
"tooltip": "Open Documentation",
"type": "link",
"url": "https://matrix-org.github.io/synapse/latest/"
}
],
"panels": [
@@ -10901,4 +10889,4 @@
"title": "Synapse",
"uid": "000000012",
"version": 100
}
}

View File

@@ -193,15 +193,12 @@ class TrivialXmppClient:
time.sleep(7)
print("SSRC spammer started")
while self.running:
ssrcMsg = (
"<presence to='%(tojid)s' xmlns='jabber:client'><x xmlns='http://jabber.org/protocol/muc'/><c xmlns='http://jabber.org/protocol/caps' hash='sha-1' node='http://jitsi.org/jitsimeet' ver='0WkSdhFnAUxrz4ImQQLdB80GFlE='/><nick xmlns='http://jabber.org/protocol/nick'>%(nick)s</nick><stats xmlns='http://jitsi.org/jitmeet/stats'><stat name='bitrate_download' value='175'/><stat name='bitrate_upload' value='176'/><stat name='packetLoss_total' value='0'/><stat name='packetLoss_download' value='0'/><stat name='packetLoss_upload' value='0'/></stats><media xmlns='http://estos.de/ns/mjs'><source type='audio' ssrc='%(assrc)s' direction='sendre'/><source type='video' ssrc='%(vssrc)s' direction='sendre'/></media></presence>"
% {
"tojid": "%s@%s/%s" % (ROOMNAME, ROOMDOMAIN, self.shortJid),
"nick": self.userId,
"assrc": self.ssrcs["audio"],
"vssrc": self.ssrcs["video"],
}
)
ssrcMsg = "<presence to='%(tojid)s' xmlns='jabber:client'><x xmlns='http://jabber.org/protocol/muc'/><c xmlns='http://jabber.org/protocol/caps' hash='sha-1' node='http://jitsi.org/jitsimeet' ver='0WkSdhFnAUxrz4ImQQLdB80GFlE='/><nick xmlns='http://jabber.org/protocol/nick'>%(nick)s</nick><stats xmlns='http://jitsi.org/jitmeet/stats'><stat name='bitrate_download' value='175'/><stat name='bitrate_upload' value='176'/><stat name='packetLoss_total' value='0'/><stat name='packetLoss_download' value='0'/><stat name='packetLoss_upload' value='0'/></stats><media xmlns='http://estos.de/ns/mjs'><source type='audio' ssrc='%(assrc)s' direction='sendre'/><source type='video' ssrc='%(vssrc)s' direction='sendre'/></media></presence>" % {
"tojid": "%s@%s/%s" % (ROOMNAME, ROOMDOMAIN, self.shortJid),
"nick": self.userId,
"assrc": self.ssrcs["audio"],
"vssrc": self.ssrcs["video"],
}
res = self.sendIq(ssrcMsg)
print("reply from ssrc announce: ", res)
time.sleep(10)

View File

@@ -92,6 +92,22 @@ new PromConsole.Graph({
})
</script>
<h3>Pending calls per tick</h3>
<div id="reactor_pending_calls"></div>
<script>
new PromConsole.Graph({
node: document.querySelector("#reactor_pending_calls"),
expr: "rate(python_twisted_reactor_pending_calls_sum[30s]) / rate(python_twisted_reactor_pending_calls_count[30s])",
name: "[[job]]-[[index]]",
min: 0,
renderer: "line",
height: 150,
yAxisFormatter: PromConsole.NumberFormatter.humanize,
yHoverFormatter: PromConsole.NumberFormatter.humanize,
yTitle: "Pending Calls"
})
</script>
<h1>Storage</h1>
<h3>Queries</h3>

View File

@@ -84,9 +84,7 @@ AUTH="Authorization: Bearer $TOKEN"
###################################################################################################
# finally start pruning the room:
###################################################################################################
# this will really delete local events, so the messages in the room really
# disappear unless they are restored by remote federation. This is because
# we pass {"delete_local_events":true} to the curl invocation below.
POSTDATA='{"delete_local_events":"true"}' # this will really delete local events, so the messages in the room really disappear unless they are restored by remote federation
for ROOM in "${ROOMS_ARRAY[@]}"; do
echo "########################################### $(date) ################# "
@@ -106,7 +104,7 @@ for ROOM in "${ROOMS_ARRAY[@]}"; do
SLEEP=2
set -x
# call purge
OUT=$(curl --header "$AUTH" -s -d '{"delete_local_events":true}' POST "$API_URL/admin/purge_history/$ROOM/$EVENT_ID")
OUT=$(curl --header "$AUTH" -s -d $POSTDATA POST "$API_URL/admin/purge_history/$ROOM/$EVENT_ID")
PURGE_ID=$(echo "$OUT" |grep purge_id|cut -d'"' -f4 )
if [ "$PURGE_ID" == "" ]; then
# probably the history purge is already in progress for $ROOM

View File

@@ -15,7 +15,7 @@ export DH_VIRTUALENV_INSTALL_ROOT=/opt/venvs
# python won't look in the right directory. At least this way, the error will
# be a *bit* more obvious.
#
SNAKE=$(readlink -e /usr/bin/python3)
SNAKE=`readlink -e /usr/bin/python3`
# try to set the CFLAGS so any compiled C extensions are compiled with the most
# generic as possible x64 instructions, so that compiling it on a new Intel chip
@@ -24,29 +24,15 @@ SNAKE=$(readlink -e /usr/bin/python3)
# TODO: add similar things for non-amd64, or figure out a more generic way to
# do this.
case $(dpkg-architecture -q DEB_HOST_ARCH) in
case `dpkg-architecture -q DEB_HOST_ARCH` in
amd64)
export CFLAGS=-march=x86-64
;;
esac
# Manually install Poetry and export a pip-compatible `requirements.txt`
# We need a Poetry pre-release as the export command is buggy in < 1.2
TEMP_VENV="$(mktemp -d)"
python3 -m venv "$TEMP_VENV"
source "$TEMP_VENV/bin/activate"
pip install -U pip
pip install poetry==1.2.0b1
poetry export \
--extras all \
--extras test \
--extras systemd \
-o exported_requirements.txt
deactivate
rm -rf "$TEMP_VENV"
# Use --builtin-venv to use the better `venv` module from CPython 3.4+ rather
# than the 2/3 compatible `virtualenv`.
# Use --no-deps to only install pinned versions in exported_requirements.txt,
# and to avoid https://github.com/pypa/pip/issues/9644
dh_virtualenv \
--install-suffix "matrix-synapse" \
--builtin-venv \
@@ -54,12 +40,9 @@ dh_virtualenv \
--upgrade-pip \
--preinstall="lxml" \
--preinstall="mock" \
--preinstall="wheel" \
--extra-pip-arg="--no-deps" \
--extra-pip-arg="--no-cache-dir" \
--extra-pip-arg="--compile" \
--extras="all,systemd,test" \
--requirements="exported_requirements.txt"
--extras="all,systemd,test"
PACKAGE_BUILD_DIR="debian/matrix-synapse-py3"
VIRTUALENV_DIR="${PACKAGE_BUILD_DIR}${DH_VIRTUALENV_INSTALL_ROOT}/matrix-synapse"
@@ -73,8 +56,8 @@ case "$DEB_BUILD_OPTIONS" in
*)
# Copy tests to a temporary directory so that we can put them on the
# PYTHONPATH without putting the uninstalled synapse on the pythonpath.
tmpdir=$(mktemp -d)
trap 'rm -r $tmpdir' EXIT
tmpdir=`mktemp -d`
trap "rm -r $tmpdir" EXIT
cp -r tests "$tmpdir"
@@ -115,7 +98,7 @@ esac
--output-file="${PACKAGE_BUILD_DIR}/etc/matrix-synapse/log.yaml"
# add a dependency on the right version of python to substvars.
PYPKG=$(basename "$SNAKE")
PYPKG=`basename $SNAKE`
echo "synapse:pydepends=$PYPKG" >> debian/matrix-synapse-py3.substvars

278
debian/changelog vendored
View File

@@ -1,281 +1,3 @@
matrix-synapse-py3 (1.60.0) stable; urgency=medium
* New Synapse release 1.60.0.
-- Synapse Packaging team <packages@matrix.org> Tue, 31 May 2022 13:41:22 +0100
matrix-synapse-py3 (1.60.0~rc2) stable; urgency=medium
* New Synapse release 1.60.0rc2.
-- Synapse Packaging team <packages@matrix.org> Fri, 27 May 2022 11:04:55 +0100
matrix-synapse-py3 (1.60.0~rc1) stable; urgency=medium
* New Synapse release 1.60.0rc1.
-- Synapse Packaging team <packages@matrix.org> Tue, 24 May 2022 12:05:01 +0100
matrix-synapse-py3 (1.59.1) stable; urgency=medium
* New Synapse release 1.59.1.
-- Synapse Packaging team <packages@matrix.org> Wed, 18 May 2022 11:41:46 +0100
matrix-synapse-py3 (1.59.0) stable; urgency=medium
* New Synapse release 1.59.0.
-- Synapse Packaging team <packages@matrix.org> Tue, 17 May 2022 10:26:50 +0100
matrix-synapse-py3 (1.59.0~rc2) stable; urgency=medium
* New Synapse release 1.59.0rc2.
-- Synapse Packaging team <packages@matrix.org> Mon, 16 May 2022 12:52:15 +0100
matrix-synapse-py3 (1.59.0~rc1) stable; urgency=medium
* Adjust how the `exported-requirements.txt` file is generated as part of
the process of building these packages. This affects the package
maintainers only; end-users are unaffected.
* New Synapse release 1.59.0rc1.
-- Synapse Packaging team <packages@matrix.org> Tue, 10 May 2022 10:45:08 +0100
matrix-synapse-py3 (1.58.1) stable; urgency=medium
* Include python dependencies from the `systemd` and `cache_memory` extras package groups, which
were incorrectly omitted from the 1.58.0 package.
* New Synapse release 1.58.1.
-- Synapse Packaging team <packages@matrix.org> Thu, 05 May 2022 14:58:23 +0100
matrix-synapse-py3 (1.58.0) stable; urgency=medium
* New Synapse release 1.58.0.
-- Synapse Packaging team <packages@matrix.org> Tue, 03 May 2022 10:52:58 +0100
matrix-synapse-py3 (1.58.0~rc2) stable; urgency=medium
* New Synapse release 1.58.0rc2.
-- Synapse Packaging team <packages@matrix.org> Tue, 26 Apr 2022 17:14:56 +0100
matrix-synapse-py3 (1.58.0~rc1) stable; urgency=medium
* Use poetry to manage the bundled virtualenv included with this package.
* New Synapse release 1.58.0rc1.
-- Synapse Packaging team <packages@matrix.org> Tue, 26 Apr 2022 11:15:20 +0100
matrix-synapse-py3 (1.57.1) stable; urgency=medium
* New synapse release 1.57.1.
-- Synapse Packaging team <packages@matrix.org> Wed, 20 Apr 2022 15:27:21 +0100
matrix-synapse-py3 (1.57.0) stable; urgency=medium
* New synapse release 1.57.0.
-- Synapse Packaging team <packages@matrix.org> Tue, 19 Apr 2022 10:58:42 +0100
matrix-synapse-py3 (1.57.0~rc1) stable; urgency=medium
* New synapse release 1.57.0~rc1.
-- Synapse Packaging team <packages@matrix.org> Tue, 12 Apr 2022 13:36:25 +0100
matrix-synapse-py3 (1.56.0) stable; urgency=medium
* New synapse release 1.56.0.
-- Synapse Packaging team <packages@matrix.org> Tue, 05 Apr 2022 12:38:39 +0100
matrix-synapse-py3 (1.56.0~rc1) stable; urgency=medium
* New synapse release 1.56.0~rc1.
-- Synapse Packaging team <packages@matrix.org> Tue, 29 Mar 2022 10:40:50 +0100
matrix-synapse-py3 (1.55.2) stable; urgency=medium
* New synapse release 1.55.2.
-- Synapse Packaging team <packages@matrix.org> Thu, 24 Mar 2022 19:07:11 +0000
matrix-synapse-py3 (1.55.1) stable; urgency=medium
* New synapse release 1.55.1.
-- Synapse Packaging team <packages@matrix.org> Thu, 24 Mar 2022 17:44:23 +0000
matrix-synapse-py3 (1.55.0) stable; urgency=medium
* New synapse release 1.55.0.
-- Synapse Packaging team <packages@matrix.org> Tue, 22 Mar 2022 13:59:26 +0000
matrix-synapse-py3 (1.55.0~rc1) stable; urgency=medium
* New synapse release 1.55.0~rc1.
-- Synapse Packaging team <packages@matrix.org> Tue, 15 Mar 2022 10:59:31 +0000
matrix-synapse-py3 (1.54.0) stable; urgency=medium
* New synapse release 1.54.0.
-- Synapse Packaging team <packages@matrix.org> Tue, 08 Mar 2022 10:54:52 +0000
matrix-synapse-py3 (1.54.0~rc1) stable; urgency=medium
* New synapse release 1.54.0~rc1.
-- Synapse Packaging team <packages@matrix.org> Wed, 02 Mar 2022 10:43:22 +0000
matrix-synapse-py3 (1.53.0) stable; urgency=medium
* New synapse release 1.53.0.
-- Synapse Packaging team <packages@matrix.org> Tue, 22 Feb 2022 11:32:06 +0000
matrix-synapse-py3 (1.53.0~rc1) stable; urgency=medium
* New synapse release 1.53.0~rc1.
-- Synapse Packaging team <packages@matrix.org> Tue, 15 Feb 2022 10:40:50 +0000
matrix-synapse-py3 (1.52.0) stable; urgency=medium
* New synapse release 1.52.0.
-- Synapse Packaging team <packages@matrix.org> Tue, 08 Feb 2022 11:34:54 +0000
matrix-synapse-py3 (1.52.0~rc1) stable; urgency=medium
* New synapse release 1.52.0~rc1.
-- Synapse Packaging team <packages@matrix.org> Tue, 01 Feb 2022 11:04:09 +0000
matrix-synapse-py3 (1.51.0) stable; urgency=medium
* New synapse release 1.51.0.
-- Synapse Packaging team <packages@matrix.org> Tue, 25 Jan 2022 11:28:51 +0000
matrix-synapse-py3 (1.51.0~rc2) stable; urgency=medium
* New synapse release 1.51.0~rc2.
-- Synapse Packaging team <packages@matrix.org> Mon, 24 Jan 2022 12:25:00 +0000
matrix-synapse-py3 (1.51.0~rc1) stable; urgency=medium
* New synapse release 1.51.0~rc1.
-- Synapse Packaging team <packages@matrix.org> Fri, 21 Jan 2022 10:46:02 +0000
matrix-synapse-py3 (1.50.2) stable; urgency=medium
* New synapse release 1.50.2.
-- Synapse Packaging team <packages@matrix.org> Mon, 24 Jan 2022 13:37:11 +0000
matrix-synapse-py3 (1.50.1) stable; urgency=medium
* New synapse release 1.50.1.
-- Synapse Packaging team <packages@matrix.org> Tue, 18 Jan 2022 16:06:26 +0000
matrix-synapse-py3 (1.50.0) stable; urgency=medium
* New synapse release 1.50.0.
-- Synapse Packaging team <packages@matrix.org> Tue, 18 Jan 2022 10:40:38 +0000
matrix-synapse-py3 (1.50.0~rc2) stable; urgency=medium
* New synapse release 1.50.0~rc2.
-- Synapse Packaging team <packages@matrix.org> Fri, 14 Jan 2022 11:18:06 +0000
matrix-synapse-py3 (1.50.0~rc1) stable; urgency=medium
* New synapse release 1.50.0~rc1.
-- Synapse Packaging team <packages@matrix.org> Wed, 05 Jan 2022 12:36:17 +0000
matrix-synapse-py3 (1.49.2) stable; urgency=medium
* New synapse release 1.49.2.
-- Synapse Packaging team <packages@matrix.org> Tue, 21 Dec 2021 17:31:03 +0000
matrix-synapse-py3 (1.49.1) stable; urgency=medium
* New synapse release 1.49.1.
-- Synapse Packaging team <packages@matrix.org> Tue, 21 Dec 2021 11:07:30 +0000
matrix-synapse-py3 (1.49.0) stable; urgency=medium
* New synapse release 1.49.0.
-- Synapse Packaging team <packages@matrix.org> Tue, 14 Dec 2021 12:39:46 +0000
matrix-synapse-py3 (1.49.0~rc1) stable; urgency=medium
* New synapse release 1.49.0~rc1.
-- Synapse Packaging team <packages@matrix.org> Tue, 07 Dec 2021 13:52:21 +0000
matrix-synapse-py3 (1.48.0) stable; urgency=medium
* New synapse release 1.48.0.
-- Synapse Packaging team <packages@matrix.org> Tue, 30 Nov 2021 11:24:15 +0000
matrix-synapse-py3 (1.48.0~rc1) stable; urgency=medium
* New synapse release 1.48.0~rc1.
-- Synapse Packaging team <packages@matrix.org> Thu, 25 Nov 2021 15:56:03 +0000
matrix-synapse-py3 (1.47.1) stable; urgency=medium
* New synapse release 1.47.1.
-- Synapse Packaging team <packages@matrix.org> Fri, 19 Nov 2021 13:44:32 +0000
matrix-synapse-py3 (1.47.0) stable; urgency=medium
* New synapse release 1.47.0.
-- Synapse Packaging team <packages@matrix.org> Wed, 17 Nov 2021 13:09:43 +0000
matrix-synapse-py3 (1.47.0~rc3) stable; urgency=medium
* New synapse release 1.47.0~rc3.
-- Synapse Packaging team <packages@matrix.org> Tue, 16 Nov 2021 14:32:47 +0000
matrix-synapse-py3 (1.47.0~rc2) stable; urgency=medium
[ Dan Callahan ]
* Update scripts to pass Shellcheck lints.
* Remove unused Vagrant scripts from debian/ directory.
* Allow building Debian packages for any architecture, not just amd64.
* Preinstall the "wheel" package when building virtualenvs.
* Do not error if /etc/default/matrix-synapse is missing.
[ Synapse Packaging team ]
* New synapse release 1.47.0~rc2.
-- Synapse Packaging team <packages@matrix.org> Wed, 10 Nov 2021 09:41:01 +0000
matrix-synapse-py3 (1.46.0) stable; urgency=medium
[ Richard van der Hoff ]

1
debian/clean vendored
View File

@@ -1 +0,0 @@
exported_requirements.txt

2
debian/control vendored
View File

@@ -19,7 +19,7 @@ Standards-Version: 3.9.8
Homepage: https://github.com/matrix-org/synapse
Package: matrix-synapse-py3
Architecture: any
Architecture: amd64
Provides: matrix-synapse
Conflicts:
matrix-synapse (<< 0.34.0.1-0matrix2),

View File

@@ -2,7 +2,6 @@
set -e
# shellcheck disable=SC1091
. /usr/share/debconf/confmodule
# try to update the debconf db according to whatever is in the config files

View File

@@ -1,6 +1,5 @@
#!/bin/sh -e
# shellcheck disable=SC1091
. /usr/share/debconf/confmodule
CONFIGFILE_SERVERNAME="/etc/matrix-synapse/conf.d/server_name.yaml"

View File

@@ -5,7 +5,7 @@ Description=Synapse Matrix homeserver
Type=notify
User=matrix-synapse
WorkingDirectory=/var/lib/matrix-synapse
EnvironmentFile=-/etc/default/matrix-synapse
EnvironmentFile=/etc/default/matrix-synapse
ExecStartPre=/opt/venvs/matrix-synapse/bin/python -m synapse.app.homeserver --config-path=/etc/matrix-synapse/homeserver.yaml --config-path=/etc/matrix-synapse/conf.d/ --generate-keys
ExecStart=/opt/venvs/matrix-synapse/bin/python -m synapse.app.homeserver --config-path=/etc/matrix-synapse/homeserver.yaml --config-path=/etc/matrix-synapse/conf.d/
ExecReload=/bin/kill -HUP $MAINPID

2
debian/test/.gitignore vendored Normal file
View File

@@ -0,0 +1,2 @@
.vagrant
*.log

23
debian/test/provision.sh vendored Normal file
View File

@@ -0,0 +1,23 @@
#!/bin/bash
#
# provisioning script for vagrant boxes for testing the matrix-synapse debs.
#
# Will install the most recent matrix-synapse-py3 deb for this platform from
# the /debs directory.
set -e
apt-get update
apt-get install -y lsb-release
deb=`ls /debs/matrix-synapse-py3_*+$(lsb_release -cs)*.deb | sort | tail -n1`
debconf-set-selections <<EOF
matrix-synapse matrix-synapse/report-stats boolean false
matrix-synapse matrix-synapse/server-name string localhost:18448
EOF
dpkg -i "$deb"
sed -i -e '/port: 8...$/{s/8448/18448/; s/8008/18008/}' -e '$aregistration_shared_secret: secret' /etc/matrix-synapse/homeserver.yaml
systemctl restart matrix-synapse

13
debian/test/stretch/Vagrantfile vendored Normal file
View File

@@ -0,0 +1,13 @@
# -*- mode: ruby -*-
# vi: set ft=ruby :
ver = `cd ../../..; dpkg-parsechangelog -S Version`.strip()
Vagrant.configure("2") do |config|
config.vm.box = "debian/stretch64"
config.vm.synced_folder ".", "/vagrant", disabled: true
config.vm.synced_folder "../../../../debs", "/debs", type: "nfs"
config.vm.provision "shell", path: "../provision.sh"
end

10
debian/test/xenial/Vagrantfile vendored Normal file
View File

@@ -0,0 +1,10 @@
# -*- mode: ruby -*-
# vi: set ft=ruby :
Vagrant.configure("2") do |config|
config.vm.box = "ubuntu/xenial64"
config.vm.synced_folder ".", "/vagrant", disabled: true
config.vm.synced_folder "../../../../debs", "/debs"
config.vm.provision "shell", path: "../provision.sh"
end

11
demo/.gitignore vendored
View File

@@ -1,4 +1,7 @@
# Ignore all the temporary files from the demo servers.
8080/
8081/
8082/
*.db
*.log
*.log.*
*.pid
/media_store.*
/etc

26
demo/README Normal file
View File

@@ -0,0 +1,26 @@
DO NOT USE THESE DEMO SERVERS IN PRODUCTION
Requires you to have done:
python setup.py develop
The demo start.sh will start three synapse servers on ports 8080, 8081 and 8082, with host names localhost:$port. This can be easily changed to `hostname`:$port in start.sh if required.
To enable the servers to communicate untrusted ssl certs are used. In order to do this the servers do not check the certs
and are configured in a highly insecure way. Do not use these configuration files in production.
stop.sh will stop the synapse servers and the webclient.
clean.sh will delete the databases and log files.
To start a completely new set of servers, run:
./demo/stop.sh; ./demo/clean.sh && ./demo/start.sh
Logs and sqlitedb will be stored in demo/808{0,1,2}.{log,db}
Also note that when joining a public room on a differnt HS via "#foo:bar.net", then you are (in the current impl) joining a room with room_id "foo". This means that it won't work if your HS already has a room with that name.

View File

@@ -4,19 +4,16 @@ set -e
DIR="$( cd "$( dirname "$0" )" && pwd )"
# Ensure that the servers are stopped.
$DIR/stop.sh
PID_FILE="$DIR/servers.pid"
if [ -f "$PID_FILE" ]; then
if [ -f $PID_FILE ]; then
echo "servers.pid exists!"
exit 1
fi
for port in 8080 8081 8082; do
rm -rf "${DIR:?}/$port"
rm -rf "$DIR/media_store.$port"
rm -rf $DIR/$port
rm -rf $DIR/media_store.$port
done
rm -rf "${DIR:?}/etc"
rm -rf $DIR/etc

View File

@@ -4,104 +4,98 @@ DIR="$( cd "$( dirname "$0" )" && pwd )"
CWD=$(pwd)
cd "$DIR/.." || exit
cd "$DIR/.."
PYTHONPATH=$(readlink -f "$(pwd)")
export PYTHONPATH
mkdir -p demo/etc
export PYTHONPATH=$(readlink -f $(pwd))
echo "$PYTHONPATH"
echo $PYTHONPATH
# Create servers which listen on HTTP at 808x and HTTPS at 848x.
for port in 8080 8081 8082; do
echo "Starting server on port $port... "
https_port=$((port + 400))
mkdir -p demo/$port
pushd demo/$port || exit
pushd demo/$port
# Generate the configuration for the homeserver at localhost:848x, note that
# the homeserver name needs to match the HTTPS listening port for federation
# to properly work..
#rm $DIR/etc/$port.config
python3 -m synapse.app.homeserver \
--generate-config \
--server-name "localhost:$https_port" \
--config-path "$port.config" \
-H "localhost:$https_port" \
--config-path "$DIR/etc/$port.config" \
--report-stats no
if ! grep -F "Customisation made by demo/start.sh" -q "$port.config"; then
# Generate TLS keys.
openssl req -x509 -newkey rsa:4096 \
-keyout "localhost:$port.tls.key" \
-out "localhost:$port.tls.crt" \
-days 365 -nodes -subj "/O=matrix"
if ! grep -F "Customisation made by demo/start.sh" -q $DIR/etc/$port.config; then
printf '\n\n# Customisation made by demo/start.sh\n' >> $DIR/etc/$port.config
# Add customisations to the configuration.
{
printf '\n\n# Customisation made by demo/start.sh\n\n'
echo "public_baseurl: http://localhost:$port/"
echo 'enable_registration: true'
echo 'enable_registration_without_verification: true'
echo ''
echo "public_baseurl: http://localhost:$port/" >> $DIR/etc/$port.config
# Warning, this heredoc depends on the interaction of tabs and spaces.
# Please don't accidentaly bork me with your fancy settings.
listeners=$(cat <<-PORTLISTENERS
# Configure server to listen on both $https_port and $port
# This overides some of the default settings above
listeners:
- port: $https_port
type: http
tls: true
resources:
- names: [client, federation]
echo 'enable_registration: true' >> $DIR/etc/$port.config
- port: $port
tls: false
bind_addresses: ['::1', '127.0.0.1']
type: http
x_forwarded: true
resources:
- names: [client, federation]
compress: false
PORTLISTENERS
)
# Warning, this heredoc depends on the interaction of tabs and spaces. Please don't
# accidentaly bork me with your fancy settings.
listeners=$(cat <<-PORTLISTENERS
# Configure server to listen on both $https_port and $port
# This overides some of the default settings above
listeners:
- port: $https_port
type: http
tls: true
resources:
- names: [client, federation]
echo "${listeners}"
- port: $port
tls: false
bind_addresses: ['::1', '127.0.0.1']
type: http
x_forwarded: true
resources:
- names: [client, federation]
compress: false
PORTLISTENERS
)
echo "${listeners}" >> $DIR/etc/$port.config
# Disable TLS for the servers
printf '\n\n# Disable TLS for the servers.'
echo '# DO NOT USE IN PRODUCTION'
echo 'use_insecure_ssl_client_just_for_testing_do_not_use: true'
echo 'federation_verify_certificates: false'
# Disable tls for the servers
printf '\n\n# Disable tls on the servers.' >> $DIR/etc/$port.config
echo '# DO NOT USE IN PRODUCTION' >> $DIR/etc/$port.config
echo 'use_insecure_ssl_client_just_for_testing_do_not_use: true' >> $DIR/etc/$port.config
echo 'federation_verify_certificates: false' >> $DIR/etc/$port.config
# Set paths for the TLS certificates.
echo "tls_certificate_path: \"$DIR/$port/localhost:$port.tls.crt\""
echo "tls_private_key_path: \"$DIR/$port/localhost:$port.tls.key\""
# Set tls paths
echo "tls_certificate_path: \"$DIR/etc/localhost:$https_port.tls.crt\"" >> $DIR/etc/$port.config
echo "tls_private_key_path: \"$DIR/etc/localhost:$https_port.tls.key\"" >> $DIR/etc/$port.config
# Ignore keys from the trusted keys server
echo '# Ignore keys from the trusted keys server'
echo 'trusted_key_servers:'
echo ' - server_name: "matrix.org"'
echo ' accept_keys_insecurely: true'
echo ''
# Generate tls keys
openssl req -x509 -newkey rsa:4096 -keyout $DIR/etc/localhost\:$https_port.tls.key -out $DIR/etc/localhost\:$https_port.tls.crt -days 365 -nodes -subj "/O=matrix"
# Allow the servers to communicate over localhost.
allow_list=$(cat <<-ALLOW_LIST
# Allow the servers to communicate over localhost.
ip_range_whitelist:
- '127.0.0.1/8'
- '::1/128'
ALLOW_LIST
)
# Ignore keys from the trusted keys server
echo '# Ignore keys from the trusted keys server' >> $DIR/etc/$port.config
echo 'trusted_key_servers:' >> $DIR/etc/$port.config
echo ' - server_name: "matrix.org"' >> $DIR/etc/$port.config
echo ' accept_keys_insecurely: true' >> $DIR/etc/$port.config
echo "${allow_list}"
} >> "$port.config"
# Reduce the blacklist
blacklist=$(cat <<-BLACK
# Set the blacklist so that it doesn't include 127.0.0.1, ::1
federation_ip_range_blacklist:
- '10.0.0.0/8'
- '172.16.0.0/12'
- '192.168.0.0/16'
- '100.64.0.0/10'
- '169.254.0.0/16'
- 'fe80::/64'
- 'fc00::/7'
BLACK
)
echo "${blacklist}" >> $DIR/etc/$port.config
fi
# Check script parameters
if [ $# -eq 1 ]; then
if [ "$1" = "--no-rate-limit" ]; then
if [ $1 = "--no-rate-limit" ]; then
# Disable any rate limiting
ratelimiting=$(cat <<-RC
@@ -143,21 +137,22 @@ for port in 8080 8081 8082; do
burst_count: 1000
RC
)
echo "${ratelimiting}" >> "$port.config"
echo "${ratelimiting}" >> $DIR/etc/$port.config
fi
fi
# Always disable reporting of stats if the option is not there.
if ! grep -F "report_stats" -q "$port.config" ; then
echo "report_stats: false" >> "$port.config"
if ! grep -F "full_twisted_stacktraces" -q $DIR/etc/$port.config; then
echo "full_twisted_stacktraces: true" >> $DIR/etc/$port.config
fi
if ! grep -F "report_stats" -q $DIR/etc/$port.config ; then
echo "report_stats: false" >> $DIR/etc/$port.config
fi
# Run the homeserver in the background.
python3 -m synapse.app.homeserver \
--config-path "$port.config" \
--config-path "$DIR/etc/$port.config" \
-D \
popd || exit
popd
done
cd "$CWD" || exit
cd "$CWD"

View File

@@ -8,7 +8,7 @@ for pid_file in $FILES; do
pid=$(cat "$pid_file")
if [[ $pid ]]; then
echo "Killing $pid_file with $pid"
kill "$pid"
kill $pid
fi
done

View File

@@ -1,79 +1,25 @@
# syntax=docker/dockerfile:1
# Dockerfile to build the matrixdotorg/synapse docker images.
#
# Note that it uses features which are only available in BuildKit - see
# https://docs.docker.com/go/buildkit/ for more information.
#
# To build the image, run `docker build` command from the root of the
# synapse repository:
#
# DOCKER_BUILDKIT=1 docker build -f docker/Dockerfile .
# docker build -f docker/Dockerfile .
#
# There is an optional PYTHON_VERSION build argument which sets the
# version of python to build against: for example:
#
# DOCKER_BUILDKIT=1 docker build -f docker/Dockerfile --build-arg PYTHON_VERSION=3.10 .
# docker build -f docker/Dockerfile --build-arg PYTHON_VERSION=3.6 .
#
# Irritatingly, there is no blessed guide on how to distribute an application with its
# poetry-managed environment in a docker image. We have opted for
# `poetry export | pip install -r /dev/stdin`, but there are known bugs in
# in `poetry export` whose fixes (scheduled for poetry 1.2) have yet to be released.
# In case we get bitten by those bugs in the future, the recommendations here might
# be useful:
# https://github.com/python-poetry/poetry/discussions/1879#discussioncomment-216865
# https://stackoverflow.com/questions/53835198/integrating-python-poetry-with-docker?answertab=scoredesc
ARG PYTHON_VERSION=3.9
ARG PYTHON_VERSION=3.8
###
### Stage 0: generate requirements.txt
###
FROM docker.io/python:${PYTHON_VERSION}-slim as requirements
# RUN --mount is specific to buildkit and is documented at
# https://github.com/moby/buildkit/blob/master/frontend/dockerfile/docs/syntax.md#build-mounts-run---mount.
# Here we use it to set up a cache for apt (and below for pip), to improve
# rebuild speeds on slow connections.
RUN \
--mount=type=cache,target=/var/cache/apt,sharing=locked \
--mount=type=cache,target=/var/lib/apt,sharing=locked \
apt-get update && apt-get install -y git \
&& rm -rf /var/lib/apt/lists/*
# We install poetry in its own build stage to avoid its dependencies conflicting with
# synapse's dependencies.
# We use a specific commit from poetry's master branch instead of our usual 1.1.12,
# to incorporate fixes to some bugs in `poetry export`. This commit corresponds to
# https://github.com/python-poetry/poetry/pull/5156 and
# https://github.com/python-poetry/poetry/issues/5141 ;
# without it, we generate a requirements.txt with incorrect environment markers,
# which causes necessary packages to be omitted when we `pip install`.
#
# NB: In poetry 1.2 `poetry export` will be moved into a plugin; we'll need to also
# pip install poetry-plugin-export (https://github.com/python-poetry/poetry-plugin-export).
RUN --mount=type=cache,target=/root/.cache/pip \
pip install --user "poetry-core==1.1.0a7" "git+https://github.com/python-poetry/poetry.git@fb13b3a676f476177f7937ffa480ee5cff9a90a5"
WORKDIR /synapse
# Copy just what we need to run `poetry export`...
COPY pyproject.toml poetry.lock /synapse/
RUN /root/.local/bin/poetry export --extras all -o /synapse/requirements.txt
###
### Stage 1: builder
### Stage 0: builder
###
FROM docker.io/python:${PYTHON_VERSION}-slim as builder
# install the OS build deps
RUN \
--mount=type=cache,target=/var/cache/apt,sharing=locked \
--mount=type=cache,target=/var/lib/apt,sharing=locked \
apt-get update && apt-get install -y \
RUN apt-get update && apt-get install -y \
build-essential \
libffi-dev \
libjpeg-dev \
@@ -87,25 +33,30 @@ RUN \
zlib1g-dev \
&& rm -rf /var/lib/apt/lists/*
# Copy just what we need to pip install
COPY scripts /synapse/scripts/
COPY MANIFEST.in README.rst setup.py synctl /synapse/
COPY synapse/__init__.py /synapse/synapse/__init__.py
COPY synapse/python_dependencies.py /synapse/synapse/python_dependencies.py
# To speed up rebuilds, install all of the dependencies before we copy over
# the whole synapse project, so that this layer in the Docker cache can be
# the whole synapse project so that we this layer in the Docker cache can be
# used while you develop on the source
#
# This is aiming at installing the `[tool.poetry.depdendencies]` from pyproject.toml.
COPY --from=requirements /synapse/requirements.txt /synapse/
RUN --mount=type=cache,target=/root/.cache/pip \
pip install --prefix="/install" --no-deps --no-warn-script-location -r /synapse/requirements.txt
# This is aiming at installing the `install_requires` and `extras_require` from `setup.py`
RUN pip install --prefix="/install" --no-warn-script-location \
/synapse[all]
# Copy over the rest of the synapse source code.
# Copy over the rest of the project
COPY synapse /synapse/synapse/
# ... and what we need to `pip install`.
COPY pyproject.toml README.rst /synapse/
# Install the synapse package itself.
# Install the synapse package itself and all of its children packages.
#
# This is aiming at installing only the `packages=find_packages(...)` from `setup.py
RUN pip install --prefix="/install" --no-deps --no-warn-script-location /synapse
###
### Stage 2: runtime
### Stage 1: runtime
###
FROM docker.io/python:${PYTHON_VERSION}-slim
@@ -115,10 +66,7 @@ LABEL org.opencontainers.image.documentation='https://github.com/matrix-org/syna
LABEL org.opencontainers.image.source='https://github.com/matrix-org/synapse.git'
LABEL org.opencontainers.image.licenses='Apache-2.0'
RUN \
--mount=type=cache,target=/var/cache/apt,sharing=locked \
--mount=type=cache,target=/var/lib/apt,sharing=locked \
apt-get update && apt-get install -y \
RUN apt-get update && apt-get install -y \
curl \
gosu \
libjpeg62-turbo \
@@ -134,6 +82,8 @@ COPY --from=builder /install /usr/local
COPY ./docker/start.py /start.py
COPY ./docker/conf /conf
VOLUME ["/data"]
EXPOSE 8008/tcp 8009/tcp 8448/tcp
ENTRYPOINT ["/start.py"]

View File

@@ -16,7 +16,7 @@ ARG distro=""
### Stage 0: build a dh-virtualenv
###
# This is only really needed on focal, since other distributions we
# This is only really needed on bionic and focal, since other distributions we
# care about have a recent version of dh-virtualenv by default. Unfortunately,
# it looks like focal is going to be with us for a while.
#
@@ -36,8 +36,9 @@ RUN env DEBIAN_FRONTEND=noninteractive apt-get install \
wget
# fetch and unpack the package
# TODO: Upgrade to 1.2.2 once bionic is dropped (1.2.2 requires debhelper 12; bionic has only 11)
RUN mkdir /dh-virtualenv
RUN wget -q -O /dh-virtualenv.tar.gz https://github.com/spotify/dh-virtualenv/archive/refs/tags/1.2.2.tar.gz
RUN wget -q -O /dh-virtualenv.tar.gz https://github.com/spotify/dh-virtualenv/archive/ac6e1b1.tar.gz
RUN tar -xv --strip-components=1 -C /dh-virtualenv -f /dh-virtualenv.tar.gz
# install its build deps. We do another apt-cache-update here, because we might
@@ -85,12 +86,12 @@ RUN apt-get update -qq -o Acquire::Languages=none \
libpq-dev \
xmlsec1
COPY --from=builder /dh-virtualenv_1.2.2-1_all.deb /
COPY --from=builder /dh-virtualenv_1.2~dev-1_all.deb /
# install dhvirtualenv. Update the apt cache again first, in case we got a
# cached cache from docker the first time.
RUN apt-get update -qq -o Acquire::Languages=none \
&& apt-get install -yq /dh-virtualenv_1.2.2-1_all.deb
&& apt-get install -yq /dh-virtualenv_1.2~dev-1_all.deb
WORKDIR /synapse/source
ENTRYPOINT ["bash","/synapse/source/docker/build_debian.sh"]

30
docker/Dockerfile-pgtests Normal file
View File

@@ -0,0 +1,30 @@
# Use the Sytest image that comes with a lot of the build dependencies
# pre-installed
FROM matrixdotorg/sytest:bionic
# The Sytest image doesn't come with python, so install that
RUN apt-get update && apt-get -qq install -y python3 python3-dev python3-pip
# We need tox to run the tests in run_pg_tests.sh
RUN python3 -m pip install tox
# Initialise the db
RUN su -c '/usr/lib/postgresql/10/bin/initdb -D /var/lib/postgresql/data -E "UTF-8" --lc-collate="C.UTF-8" --lc-ctype="C.UTF-8" --username=postgres' postgres
# Add a user with our UID and GID so that files get created on the host owned
# by us, not root.
ARG UID
ARG GID
RUN groupadd --gid $GID user
RUN useradd --uid $UID --gid $GID --groups sudo --no-create-home user
# Ensure we can start postgres by sudo-ing as the postgres user.
RUN apt-get update && apt-get -qq install -y sudo
RUN echo "user ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers
ADD run_pg_tests.sh /run_pg_tests.sh
# Use the "exec form" of ENTRYPOINT (https://docs.docker.com/engine/reference/builder/#entrypoint)
# so that we can `docker run` this container and pass arguments to pg_tests.sh
ENTRYPOINT ["/run_pg_tests.sh"]
USER user

View File

@@ -2,36 +2,22 @@
FROM matrixdotorg/synapse
# Install deps
RUN \
--mount=type=cache,target=/var/cache/apt,sharing=locked \
--mount=type=cache,target=/var/lib/apt,sharing=locked \
apt-get update && \
DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \
redis-server nginx-light
RUN apt-get update
RUN apt-get install -y supervisor redis nginx
# Install supervisord with pip instead of apt, to avoid installing a second
# copy of python.
RUN --mount=type=cache,target=/root/.cache/pip \
pip install supervisor~=4.2
# Disable the default nginx sites
# Remove the default nginx sites
RUN rm /etc/nginx/sites-enabled/default
# Copy Synapse worker, nginx and supervisord configuration template files
COPY ./docker/conf-workers/* /conf/
# Copy a script to prefix log lines with the supervisor program name
COPY ./docker/prefix-log /usr/local/bin/
# Expose nginx listener port
EXPOSE 8080/tcp
# Volume for user-editable config files, logs etc.
VOLUME ["/data"]
# A script to read environment variables and create the necessary
# files to run the desired worker configuration. Will start supervisord.
COPY ./docker/configure_workers_and_start.py /configure_workers_and_start.py
ENTRYPOINT ["/configure_workers_and_start.py"]
# Replace the healthcheck with one which checks *all* the workers. The script
# is generated by configure_workers_and_start.py.
HEALTHCHECK --start-period=5s --interval=15s --timeout=5s \
CMD /bin/sh /healthcheck.sh

View File

@@ -10,10 +10,10 @@ Note that running Synapse's unit tests from within the docker image is not suppo
## Testing with SQLite and single-process Synapse
> Note that `scripts-dev/complement.sh` is a script that will automatically build
> Note that `scripts-dev/complement.sh` is a script that will automatically build
> and run an SQLite-based, single-process of Synapse against Complement.
The instructions below will set up Complement testing for a single-process,
The instructions below will set up Complement testing for a single-process,
SQLite-based Synapse deployment.
Start by building the base Synapse docker image. If you wish to run tests with the latest
@@ -26,22 +26,23 @@ docker build -t matrixdotorg/synapse -f docker/Dockerfile .
This will build an image with the tag `matrixdotorg/synapse`.
Next, build the Synapse image for Complement.
Next, build the Synapse image for Complement. You will need a local checkout
of Complement. Change to the root of your Complement checkout and run:
```sh
docker build -t complement-synapse -f "docker/complement/Dockerfile" docker/complement
docker build -t complement-synapse -f "dockerfiles/Synapse.Dockerfile" dockerfiles
```
This will build an image with the tag `complement-synapse`, which can be handed to
Complement for testing via the `COMPLEMENT_BASE_IMAGE` environment variable. Refer to
[Complement's documentation](https://github.com/matrix-org/complement/#running) for
This will build an image with the tag `complement-synapse`, which can be handed to
Complement for testing via the `COMPLEMENT_BASE_IMAGE` environment variable. Refer to
[Complement's documentation](https://github.com/matrix-org/complement/#running) for
how to run the tests, as well as the various available command line flags.
## Testing with PostgreSQL and single or multi-process Synapse
The above docker image only supports running Synapse with SQLite and in a
single-process topology. The following instructions are used to build a Synapse image for
Complement that supports either single or multi-process topology with a PostgreSQL
The above docker image only supports running Synapse with SQLite and in a
single-process topology. The following instructions are used to build a Synapse image for
Complement that supports either single or multi-process topology with a PostgreSQL
database backend.
As with the single-process image, build the base Synapse docker image. If you wish to run
@@ -54,7 +55,7 @@ docker build -t matrixdotorg/synapse -f docker/Dockerfile .
This will build an image with the tag `matrixdotorg/synapse`.
Next, we build a new image with worker support based on `matrixdotorg/synapse:latest`.
Next, we build a new image with worker support based on `matrixdotorg/synapse:latest`.
Again, from the root of the repository:
```sh
@@ -63,20 +64,21 @@ docker build -t matrixdotorg/synapse-workers -f docker/Dockerfile-workers .
This will build an image with the tag` matrixdotorg/synapse-workers`.
It's worth noting at this point that this image is fully functional, and
can be used for testing against locally. See instructions for using the container
It's worth noting at this point that this image is fully functional, and
can be used for testing against locally. See instructions for using the container
under
[Running the Dockerfile-worker image standalone](#running-the-dockerfile-worker-image-standalone)
below.
Finally, build the Synapse image for Complement, which is based on
`matrixdotorg/synapse-workers`.
`matrixdotorg/synapse-workers`. You will need a local checkout of Complement. Change to
the root of your Complement checkout and run:
```sh
docker build -t matrixdotorg/complement-synapse-workers -f docker/complement/SynapseWorkers.Dockerfile docker/complement
docker build -t matrixdotorg/complement-synapse-workers -f dockerfiles/SynapseWorkers.Dockerfile dockerfiles
```
This will build an image with the tag `complement-synapse-workers`, which can be handed to
This will build an image with the tag `complement-synapse`, which can be handed to
Complement for testing via the `COMPLEMENT_BASE_IMAGE` environment variable. Refer to
[Complement's documentation](https://github.com/matrix-org/complement/#running) for
how to run the tests, as well as the various available command line flags.
@@ -89,10 +91,10 @@ bundling all necessary components together for a workerised homeserver instance.
This includes any desired Synapse worker processes, a nginx to route traffic accordingly,
a redis for worker communication and a supervisord instance to start up and monitor all
processes. You will need to provide your own postgres container to connect to, and TLS
processes. You will need to provide your own postgres container to connect to, and TLS
is not handled by the container.
Once you've built the image using the above instructions, you can run it. Be sure
Once you've built the image using the above instructions, you can run it. Be sure
you've set up a volume according to the [usual Synapse docker instructions](README.md).
Then run something along the lines of:
@@ -110,7 +112,7 @@ docker run -d --name synapse \
matrixdotorg/synapse-workers
```
...substituting `POSTGRES*` variables for those that match a postgres host you have
...substituting `POSTGRES*` variables for those that match a postgres host you have
available (usually a running postgres docker container).
The `SYNAPSE_WORKER_TYPES` environment variable is a comma-separated list of workers to
@@ -128,11 +130,11 @@ Otherwise, `SYNAPSE_WORKER_TYPES` can either be left empty or unset to spawn no
(leaving only the main process). The container is configured to use redis-based worker
mode.
Logs for workers and the main process are logged to stdout and can be viewed with
standard `docker logs` tooling. Worker logs contain their worker name
Logs for workers and the main process are logged to stdout and can be viewed with
standard `docker logs` tooling. Worker logs contain their worker name
after the timestamp.
Setting `SYNAPSE_WORKERS_WRITE_LOGS_TO_DISK=1` will cause worker logs to be written to
`<data_dir>/logs/<worker_name>.log`. Logs are kept for 1 week and rotate every day at 00:
00, according to the container's clock. Logging for the main process must still be
00, according to the container's clock. Logging for the main process must still be
configured by modifying the homeserver's log config in your Synapse data volume.

View File

@@ -65,12 +65,7 @@ The following environment variables are supported in `generate` mode:
* `SYNAPSE_DATA_DIR`: where the generated config will put persistent data
such as the database and media store. Defaults to `/data`.
* `UID`, `GID`: the user id and group id to use for creating the data
directories. If unset, and no user is set via `docker run --user`, defaults
to `991`, `991`.
## Postgres
By default the config will use SQLite. See the [docs on using Postgres](https://github.com/matrix-org/synapse/blob/develop/docs/postgres.md) for more info on how to use Postgres. Until this section is improved [this issue](https://github.com/matrix-org/synapse/issues/8304) may provide useful information.
directories. Defaults to `991`, `991`.
## Running synapse
@@ -102,9 +97,7 @@ The following environment variables are supported in `run` mode:
`<SYNAPSE_CONFIG_DIR>/homeserver.yaml`.
* `SYNAPSE_WORKER`: module to execute, used when running synapse with workers.
Defaults to `synapse.app.homeserver`, which is suitable for non-worker mode.
* `UID`, `GID`: the user and group id to run Synapse as. If unset, and no user
is set via `docker run --user`, defaults to `991`, `991`. Note that this user
must have permission to read the config files, and write to the data directories.
* `UID`, `GID`: the user and group id to run Synapse as. Defaults to `991`, `991`.
* `TZ`: the [timezone](https://en.wikipedia.org/wiki/List_of_tz_database_time_zones) the container will run with. Defaults to `UTC`.
For more complex setups (e.g. for workers) you can also pass your args directly to synapse using `run` mode. For example like this:
@@ -193,7 +186,7 @@ point to another Dockerfile.
## Disabling the healthcheck
If you are using a non-standard port or tls inside docker you can disable the healthcheck
whilst running the above `docker run` commands.
whilst running the above `docker run` commands.
```
--no-healthcheck
@@ -219,7 +212,7 @@ If you wish to point the healthcheck at a different port with docker command, ad
## Setting the healthcheck in docker-compose file
You can add the following to set a custom healthcheck in a docker compose file.
You will need docker-compose version >2.1 for this to work.
You will need docker-compose version >2.1 for this to work.
```
healthcheck:
@@ -233,5 +226,5 @@ healthcheck:
## Using jemalloc
Jemalloc is embedded in the image and will be used instead of the default allocator.
You can read about jemalloc by reading the Synapse
You can read about jemalloc by reading the Synapse
[README](https://github.com/matrix-org/synapse/blob/HEAD/README.rst#help-synapse-is-slow-and-eats-all-my-ram-cpu).

View File

@@ -5,7 +5,7 @@
set -ex
# Get the codename from distro env
DIST=$(cut -d ':' -f2 <<< "${distro:?}")
DIST=`cut -d ':' -f2 <<< $distro`
# we get a read-only copy of the source: make a writeable copy
cp -aT /synapse/source /synapse/build
@@ -17,7 +17,7 @@ cd /synapse/build
# Section to determine which "component" it should go into (see
# https://manpages.debian.org/stretch/reprepro/reprepro.1.en.html#GUESSING)
DEB_VERSION=$(dpkg-parsechangelog -SVersion)
DEB_VERSION=`dpkg-parsechangelog -SVersion`
case $DEB_VERSION in
*~rc*|*~a*|*~b*|*~c*)
sed -ie '/^Section:/c\Section: prerelease' debian/control

View File

@@ -1,22 +0,0 @@
# A dockerfile which builds an image suitable for testing Synapse under
# complement.
ARG SYNAPSE_VERSION=latest
FROM matrixdotorg/synapse:${SYNAPSE_VERSION}
ENV SERVER_NAME=localhost
COPY conf/* /conf/
# generate a signing key
RUN generate_signing_key -o /conf/server.signing.key
WORKDIR /data
EXPOSE 8008 8448
ENTRYPOINT ["/conf/start.sh"]
HEALTHCHECK --start-period=5s --interval=1s --timeout=1s \
CMD curl -fSs http://localhost:8008/health || exit 1

View File

@@ -1 +0,0 @@
Stuff for building the docker image used for testing under complement.

View File

@@ -1,40 +0,0 @@
# This dockerfile builds on top of 'docker/Dockerfile-worker' in matrix-org/synapse
# by including a built-in postgres instance, as well as setting up the homeserver so
# that it is ready for testing via Complement.
#
# Instructions for building this image from those it depends on is detailed in this guide:
# https://github.com/matrix-org/synapse/blob/develop/docker/README-testing.md#testing-with-postgresql-and-single-or-multi-process-synapse
FROM matrixdotorg/synapse-workers
# Install postgresql
RUN apt-get update && \
DEBIAN_FRONTEND=noninteractive apt-get install --no-install-recommends -y postgresql-13
# Configure a user and create a database for Synapse
RUN pg_ctlcluster 13 main start && su postgres -c "echo \
\"ALTER USER postgres PASSWORD 'somesecret'; \
CREATE DATABASE synapse \
ENCODING 'UTF8' \
LC_COLLATE='C' \
LC_CTYPE='C' \
template=template0;\" | psql" && pg_ctlcluster 13 main stop
# Modify the shared homeserver config with postgres support, certificate setup
# and the disabling of rate-limiting
COPY conf-workers/workers-shared.yaml /conf/workers/shared.yaml
WORKDIR /data
COPY conf-workers/postgres.supervisord.conf /etc/supervisor/conf.d/postgres.conf
# Copy the entrypoint
COPY conf-workers/start-complement-synapse-workers.sh /
# Expose nginx's listener ports
EXPOSE 8008 8448
ENTRYPOINT ["/start-complement-synapse-workers.sh"]
# Update the healthcheck to have a shorter check interval
HEALTHCHECK --start-period=5s --interval=1s --timeout=1s \
CMD /bin/sh /healthcheck.sh

View File

@@ -1,16 +0,0 @@
[program:postgres]
command=/usr/local/bin/prefix-log /usr/bin/pg_ctlcluster 13 main start --foreground
# Lower priority number = starts first
priority=1
autorestart=unexpected
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stderr
stderr_logfile_maxbytes=0
# Use 'Fast Shutdown' mode which aborts current transactions and closes connections quickly.
# (Default (TERM) is 'Smart Shutdown' which stops accepting new connections but
# lets existing connections close gracefully.)
stopsignal=INT

View File

@@ -1,61 +0,0 @@
#!/bin/bash
#
# Default ENTRYPOINT for the docker image used for testing synapse with workers under complement
set -e
function log {
d=$(date +"%Y-%m-%d %H:%M:%S,%3N")
echo "$d $@"
}
# Set the server name of the homeserver
export SYNAPSE_SERVER_NAME=${SERVER_NAME}
# No need to report stats here
export SYNAPSE_REPORT_STATS=no
# Set postgres authentication details which will be placed in the homeserver config file
export POSTGRES_PASSWORD=somesecret
export POSTGRES_USER=postgres
export POSTGRES_HOST=localhost
# Specify the workers to test with
export SYNAPSE_WORKER_TYPES="\
event_persister, \
event_persister, \
background_worker, \
frontend_proxy, \
event_creator, \
user_dir, \
media_repository, \
federation_inbound, \
federation_reader, \
federation_sender, \
synchrotron, \
appservice, \
pusher"
# Add Complement's appservice registration directory, if there is one
# (It can be absent when there are no application services in this test!)
if [ -d /complement/appservice ]; then
export SYNAPSE_AS_REGISTRATION_DIR=/complement/appservice
fi
# Generate a TLS key, then generate a certificate by having Complement's CA sign it
# Note that both the key and certificate are in PEM format (not DER).
openssl genrsa -out /conf/server.tls.key 2048
openssl req -new -key /conf/server.tls.key -out /conf/server.tls.csr \
-subj "/CN=${SERVER_NAME}"
openssl x509 -req -in /conf/server.tls.csr \
-CA /complement/ca/ca.crt -CAkey /complement/ca/ca.key -set_serial 1 \
-out /conf/server.tls.crt
export SYNAPSE_TLS_CERT=/conf/server.tls.crt
export SYNAPSE_TLS_KEY=/conf/server.tls.key
# Run the script that writes the necessary config files and starts supervisord, which in turn
# starts everything else
exec /configure_workers_and_start.py

View File

@@ -1,90 +0,0 @@
## Server ##
report_stats: False
trusted_key_servers: []
enable_registration: true
enable_registration_without_verification: true
bcrypt_rounds: 4
## Registration ##
# Needed by Complement to register admin users
# DO NOT USE in a production configuration! This should be a random secret.
registration_shared_secret: complement
## Federation ##
# trust certs signed by Complement's CA
federation_custom_ca_list:
- /complement/ca/ca.crt
# unblacklist RFC1918 addresses
federation_ip_range_blacklist: []
# Disable server rate-limiting
rc_federation:
window_size: 1000
sleep_limit: 10
sleep_delay: 500
reject_limit: 99999
concurrent: 3
rc_message:
per_second: 9999
burst_count: 9999
rc_registration:
per_second: 9999
burst_count: 9999
rc_login:
address:
per_second: 9999
burst_count: 9999
account:
per_second: 9999
burst_count: 9999
failed_attempts:
per_second: 9999
burst_count: 9999
rc_admin_redaction:
per_second: 9999
burst_count: 9999
rc_joins:
local:
per_second: 9999
burst_count: 9999
remote:
per_second: 9999
burst_count: 9999
rc_3pid_validation:
per_second: 1000
burst_count: 1000
rc_invites:
per_room:
per_second: 1000
burst_count: 1000
per_user:
per_second: 1000
burst_count: 1000
federation_rr_transactions_per_room_per_second: 9999
## Experimental Features ##
experimental_features:
# Enable history backfilling support
msc2716_enabled: true
# Enable spaces support
spaces_enabled: true
# Enable jump to date endpoint
msc3030_enabled: true
server_notices:
system_mxid_localpart: _server
system_mxid_display_name: "Server Alert"
system_mxid_avatar_url: ""
room_name: "Server Alert"

View File

@@ -1,129 +0,0 @@
## Server ##
server_name: SERVER_NAME
log_config: /conf/log_config.yaml
report_stats: False
signing_key_path: /conf/server.signing.key
trusted_key_servers: []
enable_registration: true
enable_registration_without_verification: true
## Listeners ##
tls_certificate_path: /conf/server.tls.crt
tls_private_key_path: /conf/server.tls.key
bcrypt_rounds: 4
registration_shared_secret: complement
listeners:
- port: 8448
bind_addresses: ['::']
type: http
tls: true
resources:
- names: [federation]
- port: 8008
bind_addresses: ['::']
type: http
resources:
- names: [client]
## Database ##
database:
name: "sqlite3"
args:
# We avoid /data, as it is a volume and is not transferred when the container is committed,
# which is a fundamental necessity in complement.
database: "/conf/homeserver.db"
## Federation ##
# trust certs signed by the complement CA
federation_custom_ca_list:
- /complement/ca/ca.crt
# unblacklist RFC1918 addresses
ip_range_blacklist: []
# Disable server rate-limiting
rc_federation:
window_size: 1000
sleep_limit: 10
sleep_delay: 500
reject_limit: 99999
concurrent: 3
rc_message:
per_second: 9999
burst_count: 9999
rc_registration:
per_second: 9999
burst_count: 9999
rc_login:
address:
per_second: 9999
burst_count: 9999
account:
per_second: 9999
burst_count: 9999
failed_attempts:
per_second: 9999
burst_count: 9999
rc_admin_redaction:
per_second: 9999
burst_count: 9999
rc_joins:
local:
per_second: 9999
burst_count: 9999
remote:
per_second: 9999
burst_count: 9999
rc_3pid_validation:
per_second: 1000
burst_count: 1000
rc_invites:
per_room:
per_second: 1000
burst_count: 1000
per_user:
per_second: 1000
burst_count: 1000
federation_rr_transactions_per_room_per_second: 9999
## API Configuration ##
# A list of application service config files to use
#
app_service_config_files:
AS_REGISTRATION_FILES
## Experimental Features ##
experimental_features:
# Enable spaces support
spaces_enabled: true
# Enable history backfilling support
msc2716_enabled: true
# server-side support for partial state in /send_join responses
msc3706_enabled: true
# client-side support for partial state in /send_join responses
faster_joins: true
# Enable jump to date endpoint
msc3030_enabled: true
server_notices:
system_mxid_localpart: _server
system_mxid_display_name: "Server Alert"
system_mxid_avatar_url: ""
room_name: "Server Alert"

View File

@@ -1,24 +0,0 @@
version: 1
formatters:
precise:
format: '%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s - %(message)s'
filters:
context:
(): synapse.logging.context.LoggingContextFilter
request: ""
handlers:
console:
class: logging.StreamHandler
formatter: precise
filters: [context]
# log to stdout, for easier use with 'docker logs'
stream: 'ext://sys.stdout'
root:
level: INFO
handlers: [console]
disable_existing_loggers: false

View File

@@ -1,30 +0,0 @@
#!/bin/sh
set -e
sed -i "s/SERVER_NAME/${SERVER_NAME}/g" /conf/homeserver.yaml
# Add the application service registration files to the homeserver.yaml config
for filename in /complement/appservice/*.yaml; do
[ -f "$filename" ] || break
as_id=$(basename "$filename" .yaml)
# Insert the path to the registration file and the AS_REGISTRATION_FILES marker after
# so we can add the next application service in the next iteration of this for loop
sed -i "s/AS_REGISTRATION_FILES/ - \/complement\/appservice\/${as_id}.yaml\nAS_REGISTRATION_FILES/g" /conf/homeserver.yaml
done
# Remove the AS_REGISTRATION_FILES entry
sed -i "s/AS_REGISTRATION_FILES//g" /conf/homeserver.yaml
# generate an ssl key and cert for the server, signed by the complement CA
openssl genrsa -out /conf/server.tls.key 2048
openssl req -new -key /conf/server.tls.key -out /conf/server.tls.csr \
-subj "/CN=${SERVER_NAME}"
openssl x509 -req -in /conf/server.tls.csr \
-CA /complement/ca/ca.crt -CAkey /complement/ca/ca.key -set_serial 1 \
-out /conf/server.tls.crt
exec python -m synapse.app.homeserver -c /conf/homeserver.yaml "$@"

View File

@@ -1,6 +0,0 @@
#!/bin/sh
# This healthcheck script is designed to return OK when every
# host involved returns OK
{%- for healthcheck_url in healthcheck_urls %}
curl -fSs {{ healthcheck_url }} || exit 1
{%- endfor %}

View File

@@ -9,22 +9,6 @@ server {
listen 8008;
listen [::]:8008;
{% if tls_cert_path is not none and tls_key_path is not none %}
listen 8448 ssl;
listen [::]:8448 ssl;
ssl_certificate {{ tls_cert_path }};
ssl_certificate_key {{ tls_key_path }};
# Some directives from cipherlist.eu (fka cipherli.st):
ssl_protocols TLSv1 TLSv1.1 TLSv1.2 TLSv1.3;
ssl_prefer_server_ciphers on;
ssl_ciphers "EECDH+AESGCM:EDH+AESGCM:AES256+EECDH:AES256+EDH";
ssl_ecdh_curve secp384r1; # Requires nginx >= 1.1.0
ssl_session_cache shared:SSL:10m;
ssl_session_tickets off; # Requires nginx >= 1.5.9
{% endif %}
server_name localhost;
# Nginx by default only allows file uploads up to 1M in size

View File

@@ -6,13 +6,4 @@
redis:
enabled: true
{% if appservice_registrations is not none %}
## Application Services ##
# A list of application service config files to use.
app_service_config_files:
{%- for path in appservice_registrations %}
- "{{ path }}"
{%- endfor %}
{%- endif %}
{{ shared_worker_config }}
{{ shared_worker_config }}

View File

@@ -5,11 +5,8 @@
nodaemon=true
user=root
[include]
files = /etc/supervisor/conf.d/*.conf
[program:nginx]
command=/usr/local/bin/prefix-log /usr/sbin/nginx -g "daemon off;"
command=/usr/sbin/nginx -g "daemon off;"
priority=500
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
@@ -19,7 +16,7 @@ username=www-data
autorestart=true
[program:redis]
command=/usr/local/bin/prefix-log /usr/bin/redis-server /etc/redis/redis.conf --daemonize no
command=/usr/bin/redis-server /etc/redis/redis.conf --daemonize no
priority=1
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
@@ -29,7 +26,7 @@ username=redis
autorestart=true
[program:synapse_main]
command=/usr/local/bin/prefix-log /usr/local/bin/python -m synapse.app.homeserver --config-path="{{ main_config_path }}" --config-path=/conf/workers/shared.yaml
command=/usr/local/bin/python -m synapse.app.homeserver --config-path="{{ main_config_path }}" --config-path=/conf/workers/shared.yaml
priority=10
# Log startup failures to supervisord's stdout/err
# Regular synapse logs will still go in the configured data directory

View File

@@ -148,6 +148,14 @@ bcrypt_rounds: 12
allow_guest_access: {{ "True" if SYNAPSE_ALLOW_GUEST else "False" }}
enable_group_creation: true
# The list of identity servers trusted to verify third party
# identifiers by this server.
#
# Also defines the ID server which will be called when an account is
# deactivated (one will be picked arbitrarily).
trusted_third_party_id_servers:
- matrix.org
- vector.im
## Metrics ###

View File

@@ -2,7 +2,11 @@ version: 1
formatters:
precise:
{% if worker_name %}
format: '%(asctime)s - worker:{{ worker_name }} - %(name)s - %(lineno)d - %(levelname)s - %(request)s - %(message)s'
{% else %}
format: '%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s - %(message)s'
{% endif %}
handlers:
{% if LOG_FILE_PATH %}

View File

@@ -21,11 +21,6 @@
# * SYNAPSE_REPORT_STATS: Whether to report stats.
# * SYNAPSE_WORKER_TYPES: A comma separated list of worker names as specified in WORKER_CONFIG
# below. Leave empty for no workers, or set to '*' for all possible workers.
# * SYNAPSE_AS_REGISTRATION_DIR: If specified, a directory in which .yaml and .yml files
# will be treated as Application Service registration files.
# * SYNAPSE_TLS_CERT: Path to a TLS certificate in PEM format.
# * SYNAPSE_TLS_KEY: Path to a TLS key. If this and SYNAPSE_TLS_CERT are specified,
# Nginx will be configured to serve TLS on port 8448.
#
# NOTE: According to Complement's ENTRYPOINT expectations for a homeserver image (as defined
# in the project's README), this script may be run multiple times, and functionality should
@@ -34,8 +29,6 @@
import os
import subprocess
import sys
from pathlib import Path
from typing import Any, Dict, List, Mapping, MutableMapping, NoReturn, Set
import jinja2
import yaml
@@ -43,7 +36,7 @@ import yaml
MAIN_PROCESS_HTTP_LISTENER_PORT = 8080
WORKERS_CONFIG: Dict[str, Dict[str, Any]] = {
WORKERS_CONFIG = {
"pusher": {
"app": "synapse.app.pusher",
"listener_resources": [],
@@ -55,7 +48,7 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = {
"app": "synapse.app.user_dir",
"listener_resources": ["client"],
"endpoint_patterns": [
"^/_matrix/client/(api/v1|r0|v3|unstable)/user_directory/search$"
"^/_matrix/client/(api/v1|r0|unstable)/user_directory/search$"
],
"shared_extra_conf": {"update_user_directory": False},
"worker_extra_conf": "",
@@ -75,10 +68,10 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = {
"worker_extra_conf": "enable_media_repo: true",
},
"appservice": {
"app": "synapse.app.generic_worker",
"app": "synapse.app.appservice",
"listener_resources": [],
"endpoint_patterns": [],
"shared_extra_conf": {"notify_appservices_from_worker": "appservice"},
"shared_extra_conf": {"notify_appservices": False},
"worker_extra_conf": "",
},
"federation_sender": {
@@ -92,10 +85,10 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = {
"app": "synapse.app.generic_worker",
"listener_resources": ["client"],
"endpoint_patterns": [
"^/_matrix/client/(v2_alpha|r0|v3)/sync$",
"^/_matrix/client/(api/v1|v2_alpha|r0|v3)/events$",
"^/_matrix/client/(api/v1|r0|v3)/initialSync$",
"^/_matrix/client/(api/v1|r0|v3)/rooms/[^/]+/initialSync$",
"^/_matrix/client/(v2_alpha|r0)/sync$",
"^/_matrix/client/(api/v1|v2_alpha|r0)/events$",
"^/_matrix/client/(api/v1|r0)/initialSync$",
"^/_matrix/client/(api/v1|r0)/rooms/[^/]+/initialSync$",
],
"shared_extra_conf": {},
"worker_extra_conf": "",
@@ -153,11 +146,11 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = {
"app": "synapse.app.generic_worker",
"listener_resources": ["client"],
"endpoint_patterns": [
"^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/redact",
"^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/send",
"^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/(join|invite|leave|ban|unban|kick)$",
"^/_matrix/client/(api/v1|r0|v3|unstable)/join/",
"^/_matrix/client/(api/v1|r0|v3|unstable)/profile/",
"^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/redact",
"^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/send",
"^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/(join|invite|leave|ban|unban|kick)$",
"^/_matrix/client/(api/v1|r0|unstable)/join/",
"^/_matrix/client/(api/v1|r0|unstable)/profile/",
],
"shared_extra_conf": {},
"worker_extra_conf": "",
@@ -165,7 +158,7 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = {
"frontend_proxy": {
"app": "synapse.app.frontend_proxy",
"listener_resources": ["client", "replication"],
"endpoint_patterns": ["^/_matrix/client/(api/v1|r0|v3|unstable)/keys/upload"],
"endpoint_patterns": ["^/_matrix/client/(api/v1|r0|unstable)/keys/upload"],
"shared_extra_conf": {},
"worker_extra_conf": (
"worker_main_http_uri: http://127.0.0.1:%d"
@@ -177,7 +170,7 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = {
# Templates for sections that may be inserted multiple times in config files
SUPERVISORD_PROCESS_CONFIG_BLOCK = """
[program:synapse_{name}]
command=/usr/local/bin/prefix-log /usr/local/bin/python -m {app} \
command=/usr/local/bin/python -m {app} \
--config-path="{config_path}" \
--config-path=/conf/workers/shared.yaml \
--config-path=/conf/workers/{name}.yaml
@@ -207,7 +200,7 @@ upstream {upstream_worker_type} {{
# Utility functions
def log(txt: str) -> None:
def log(txt: str):
"""Log something to the stdout.
Args:
@@ -216,7 +209,7 @@ def log(txt: str) -> None:
print(txt)
def error(txt: str) -> NoReturn:
def error(txt: str):
"""Log something and exit with an error code.
Args:
@@ -226,7 +219,7 @@ def error(txt: str) -> NoReturn:
sys.exit(2)
def convert(src: str, dst: str, **template_vars: object) -> None:
def convert(src: str, dst: str, **template_vars):
"""Generate a file from a template
Args:
@@ -296,7 +289,7 @@ def add_sharding_to_shared_config(
shared_config.setdefault("media_instance_running_background_jobs", worker_name)
def generate_base_homeserver_config() -> None:
def generate_base_homeserver_config():
"""Starts Synapse and generates a basic homeserver config, which will later be
modified for worker support.
@@ -308,15 +301,13 @@ def generate_base_homeserver_config() -> None:
subprocess.check_output(["/usr/local/bin/python", "/start.py", "migrate_config"])
def generate_worker_files(
environ: Mapping[str, str], config_path: str, data_dir: str
) -> None:
def generate_worker_files(environ, config_path: str, data_dir: str):
"""Read the desired list of workers from environment variables and generate
shared homeserver, nginx and supervisord configs.
Args:
environ: os.environ instance.
config_path: The location of the generated Synapse main worker config file.
environ: _Environ[str]
config_path: Where to output the generated Synapse main worker config file.
data_dir: The location of the synapse data directory. Where log and
user-facing config files live.
"""
@@ -329,8 +320,7 @@ def generate_worker_files(
# and adding a replication listener.
# First read the original config file and extract the listeners block. Then we'll add
# another listener for replication. Later we'll write out the result to the shared
# config file.
# another listener for replication. Later we'll write out the result.
listeners = [
{
"port": 9093,
@@ -349,7 +339,7 @@ def generate_worker_files(
# base shared worker jinja2 template.
#
# This config file will be passed to all workers, included Synapse's main process.
shared_config: Dict[str, Any] = {"listeners": listeners}
shared_config = {"listeners": listeners}
# The supervisord config. The contents of which will be inserted into the
# base supervisord jinja2 template.
@@ -365,7 +355,7 @@ def generate_worker_files(
# worker_type: {1234, 1235, ...}}
# }
# and will be used to construct 'upstream' nginx directives.
nginx_upstreams: Dict[str, Set[int]] = {}
nginx_upstreams = {}
# A map of: {"endpoint": "upstream"}, where "upstream" is a str representing what will be
# placed after the proxy_pass directive. The main benefit to representing this data as a
@@ -377,13 +367,13 @@ def generate_worker_files(
nginx_locations = {}
# Read the desired worker configuration from the environment
worker_types_env = environ.get("SYNAPSE_WORKER_TYPES")
if worker_types_env is None:
worker_types = environ.get("SYNAPSE_WORKER_TYPES")
if worker_types is None:
# No workers, just the main process
worker_types = []
else:
# Split type names by comma
worker_types = worker_types_env.split(",")
worker_types = worker_types.split(",")
# Create the worker configuration directory if it doesn't already exist
os.makedirs("/conf/workers", exist_ok=True)
@@ -394,11 +384,7 @@ def generate_worker_files(
# A counter of worker_type -> int. Used for determining the name for a given
# worker type when generating its config file, as each worker's name is just
# worker_type + instance #
worker_type_counter: Dict[str, int] = {}
# A list of internal endpoints to healthcheck, starting with the main process
# which exists even if no workers do.
healthcheck_urls = ["http://localhost:8080/health"]
worker_type_counter = {}
# For each worker type specified by the user, create config values
for worker_type in worker_types:
@@ -418,14 +404,12 @@ def generate_worker_files(
# e.g. federation_reader1
worker_name = worker_type + str(new_worker_count)
worker_config.update(
{"name": worker_name, "port": str(worker_port), "config_path": config_path}
{"name": worker_name, "port": worker_port, "config_path": config_path}
)
# Update the shared config with any worker-type specific options
shared_config.update(worker_config["shared_extra_conf"])
healthcheck_urls.append("http://localhost:%d/health" % (worker_port,))
# Check if more than one instance of this worker type has been specified
worker_type_total_count = worker_types.count(worker_type)
if worker_type_total_count > 1:
@@ -454,7 +438,21 @@ def generate_worker_files(
# Write out the worker's logging config file
log_config_filepath = generate_worker_log_config(environ, worker_name, data_dir)
# Check whether we should write worker logs to disk, in addition to the console
extra_log_template_args = {}
if environ.get("SYNAPSE_WORKERS_WRITE_LOGS_TO_DISK"):
extra_log_template_args["LOG_FILE_PATH"] = "{dir}/logs/{name}.log".format(
dir=data_dir, name=worker_name
)
# Render and write the file
log_config_filepath = "/conf/workers/{name}.log.config".format(name=worker_name)
convert(
"/conf/log.config",
log_config_filepath,
worker_name=worker_name,
**extra_log_template_args,
)
# Then a worker config file
convert(
@@ -476,7 +474,6 @@ def generate_worker_files(
# Determine the load-balancing upstreams to configure
nginx_upstream_config = ""
for upstream_worker_type, upstream_worker_ports in nginx_upstreams.items():
body = ""
for port in upstream_worker_ports:
@@ -490,27 +487,11 @@ def generate_worker_files(
# Finally, we'll write out the config files.
# log config for the master process
master_log_config = generate_worker_log_config(environ, "master", data_dir)
shared_config["log_config"] = master_log_config
# Find application service registrations
appservice_registrations = None
appservice_registration_dir = os.environ.get("SYNAPSE_AS_REGISTRATION_DIR")
if appservice_registration_dir:
# Scan for all YAML files that should be application service registrations.
appservice_registrations = [
str(reg_path.resolve())
for reg_path in Path(appservice_registration_dir).iterdir()
if reg_path.suffix.lower() in (".yaml", ".yml")
]
# Shared homeserver config
convert(
"/conf/shared.yaml.j2",
"/conf/workers/shared.yaml",
shared_worker_config=yaml.dump(shared_config),
appservice_registrations=appservice_registrations,
)
# Nginx config
@@ -519,57 +500,31 @@ def generate_worker_files(
"/etc/nginx/conf.d/matrix-synapse.conf",
worker_locations=nginx_location_config,
upstream_directives=nginx_upstream_config,
tls_cert_path=os.environ.get("SYNAPSE_TLS_CERT"),
tls_key_path=os.environ.get("SYNAPSE_TLS_KEY"),
)
# Supervisord config
os.makedirs("/etc/supervisor", exist_ok=True)
convert(
"/conf/supervisord.conf.j2",
"/etc/supervisor/supervisord.conf",
"/etc/supervisor/conf.d/supervisord.conf",
main_config_path=config_path,
worker_config=supervisord_config,
)
# healthcheck config
convert(
"/conf/healthcheck.sh.j2",
"/healthcheck.sh",
healthcheck_urls=healthcheck_urls,
)
# Ensure the logging directory exists
log_dir = data_dir + "/logs"
if not os.path.exists(log_dir):
os.mkdir(log_dir)
def generate_worker_log_config(
environ: Mapping[str, str], worker_name: str, data_dir: str
) -> str:
"""Generate a log.config file for the given worker.
def start_supervisord():
"""Starts up supervisord which then starts and monitors all other necessary processes
Returns: the path to the generated file
Raises: CalledProcessError if calling start.py return a non-zero exit code.
"""
# Check whether we should write worker logs to disk, in addition to the console
extra_log_template_args = {}
if environ.get("SYNAPSE_WORKERS_WRITE_LOGS_TO_DISK"):
extra_log_template_args["LOG_FILE_PATH"] = "{dir}/logs/{name}.log".format(
dir=data_dir, name=worker_name
)
# Render and write the file
log_config_filepath = "/conf/workers/{name}.log.config".format(name=worker_name)
convert(
"/conf/log.config",
log_config_filepath,
worker_name=worker_name,
**extra_log_template_args,
)
return log_config_filepath
subprocess.run(["/usr/bin/supervisord"], stdin=subprocess.PIPE)
def main(args: List[str], environ: MutableMapping[str, str]) -> None:
def main(args, environ):
config_dir = environ.get("SYNAPSE_CONFIG_DIR", "/data")
config_path = environ.get("SYNAPSE_CONFIG_PATH", config_dir + "/homeserver.yaml")
data_dir = environ.get("SYNAPSE_DATA_DIR", "/data")
@@ -596,13 +551,7 @@ def main(args: List[str], environ: MutableMapping[str, str]) -> None:
# Start supervisord, which will start Synapse, all of the configured worker
# processes, redis, nginx etc. according to the config we created above.
log("Starting supervisord")
os.execl(
"/usr/local/bin/supervisord",
"supervisord",
"-c",
"/etc/supervisor/supervisord.conf",
)
start_supervisord()
if __name__ == "__main__":

View File

@@ -1,12 +0,0 @@
#!/bin/bash
#
# Prefixes all lines on stdout and stderr with the process name (as determined by
# the SUPERVISOR_PROCESS_NAME env var, which is automatically set by Supervisor).
#
# Usage:
# prefix-log command [args...]
#
exec 1> >(awk '{print "'"${SUPERVISOR_PROCESS_NAME}"' | "$0}' >&1)
exec 2> >(awk '{print "'"${SUPERVISOR_PROCESS_NAME}"' | "$0}' >&2)
exec "$@"

19
docker/run_pg_tests.sh Executable file
View File

@@ -0,0 +1,19 @@
#!/usr/bin/env bash
# This script runs the PostgreSQL tests inside a Docker container. It expects
# the relevant source files to be mounted into /src (done automatically by the
# caller script). It will set up the database, run it, and then use the tox
# configuration to run the tests.
set -e
# Set PGUSER so Synapse's tests know what user to connect to the database with
export PGUSER=postgres
# Start the database
sudo -u postgres /usr/lib/postgresql/10/bin/pg_ctl -w -D /var/lib/postgresql/data start
# Run the tests
cd /src
export TRIAL_FLAGS="-j 4"
tox --workdir=./.tox-pg-container -e py36-postgres "$@"

View File

@@ -6,28 +6,27 @@ import os
import platform
import subprocess
import sys
from typing import Any, Dict, List, Mapping, MutableMapping, NoReturn, Optional
import jinja2
# Utility functions
def log(txt: str) -> None:
def log(txt):
print(txt, file=sys.stderr)
def error(txt: str) -> NoReturn:
def error(txt):
log(txt)
sys.exit(2)
def convert(src: str, dst: str, environ: Mapping[str, object]) -> None:
def convert(src, dst, environ):
"""Generate a file from a template
Args:
src: path to input file
dst: path to file to write
environ: environment dictionary, for replacement mappings.
src (str): path to input file
dst (str): path to file to write
environ (dict): environment dictionary, for replacement mappings.
"""
with open(src) as infile:
template = infile.read()
@@ -36,30 +35,25 @@ def convert(src: str, dst: str, environ: Mapping[str, object]) -> None:
outfile.write(rendered)
def generate_config_from_template(
config_dir: str,
config_path: str,
os_environ: Mapping[str, str],
ownership: Optional[str],
) -> None:
def generate_config_from_template(config_dir, config_path, environ, ownership):
"""Generate a homeserver.yaml from environment variables
Args:
config_dir: where to put generated config files
config_path: where to put the main config file
os_environ: environment mapping
ownership: "<user>:<group>" string which will be used to set
config_dir (str): where to put generated config files
config_path (str): where to put the main config file
environ (dict): environment dictionary
ownership (str|None): "<user>:<group>" string which will be used to set
ownership of the generated configs. If None, ownership will not change.
"""
for v in ("SYNAPSE_SERVER_NAME", "SYNAPSE_REPORT_STATS"):
if v not in os_environ:
if v not in environ:
error(
"Environment variable '%s' is mandatory when generating a config file."
% (v,)
)
# populate some params from data files (if they exist, else create new ones)
environ: Dict[str, Any] = dict(os_environ)
environ = environ.copy()
secrets = {
"registration": "SYNAPSE_REGISTRATION_SHARED_SECRET",
"macaroon": "SYNAPSE_MACAROON_SECRET_KEY",
@@ -114,7 +108,7 @@ def generate_config_from_template(
# Hopefully we already have a signing key, but generate one if not.
args = [
sys.executable,
"python",
"-m",
"synapse.app.homeserver",
"--config-path",
@@ -126,19 +120,18 @@ def generate_config_from_template(
]
if ownership is not None:
log(f"Setting ownership on /data to {ownership}")
subprocess.check_output(["chown", "-R", ownership, "/data"])
args = ["gosu", ownership] + args
subprocess.check_output(args)
def run_generate_config(environ: Mapping[str, str], ownership: Optional[str]) -> None:
def run_generate_config(environ, ownership):
"""Run synapse with a --generate-config param to generate a template config file
Args:
environ: env vars from `os.enrivon`.
ownership: "userid:groupid" arg for chmod. If None, ownership will not change.
environ (dict): env var dict
ownership (str|None): "userid:groupid" arg for chmod. If None, ownership will not change.
Never returns.
"""
@@ -151,20 +144,14 @@ def run_generate_config(environ: Mapping[str, str], ownership: Optional[str]) ->
config_path = environ.get("SYNAPSE_CONFIG_PATH", config_dir + "/homeserver.yaml")
data_dir = environ.get("SYNAPSE_DATA_DIR", "/data")
if ownership is not None:
# make sure that synapse has perms to write to the data dir.
log(f"Setting ownership on {data_dir} to {ownership}")
subprocess.check_output(["chown", ownership, data_dir])
# create a suitable log config from our template
log_config_file = "%s/%s.log.config" % (config_dir, server_name)
if not os.path.exists(log_config_file):
log("Creating log config %s" % (log_config_file,))
convert("/conf/log.config", log_config_file, environ)
# generate the main config file, and a signing key.
args = [
sys.executable,
"python",
"-m",
"synapse.app.homeserver",
"--server-name",
@@ -181,23 +168,29 @@ def run_generate_config(environ: Mapping[str, str], ownership: Optional[str]) ->
"--open-private-ports",
]
# log("running %s" % (args, ))
os.execv(sys.executable, args)
if ownership is not None:
# make sure that synapse has perms to write to the data dir.
subprocess.check_output(["chown", ownership, data_dir])
args = ["gosu", ownership] + args
os.execv("/usr/sbin/gosu", args)
else:
os.execv("/usr/local/bin/python", args)
def main(args: List[str], environ: MutableMapping[str, str]) -> None:
def main(args, environ):
mode = args[1] if len(args) > 1 else "run"
# if we were given an explicit user to switch to, do so
ownership = None
if "UID" in environ:
desired_uid = int(environ["UID"])
desired_gid = int(environ.get("GID", "991"))
ownership = f"{desired_uid}:{desired_gid}"
elif os.getuid() == 0:
# otherwise, if we are running as root, use user 991
ownership = "991:991"
desired_uid = int(environ.get("UID", "991"))
desired_gid = int(environ.get("GID", "991"))
synapse_worker = environ.get("SYNAPSE_WORKER", "synapse.app.homeserver")
if (desired_uid == os.getuid()) and (desired_gid == os.getgid()):
ownership = None
else:
ownership = "{}:{}".format(desired_uid, desired_gid)
if ownership is None:
log("Will not perform chmod/gosu as UserID already matches request")
# In generate mode, generate a configuration and missing keys, then exit
if mode == "generate":
@@ -260,12 +253,12 @@ running with 'migrate_config'. See the README for more details.
log("Starting synapse with args " + " ".join(args))
args = [sys.executable] + args
args = ["python"] + args
if ownership is not None:
args = ["gosu", ownership] + args
os.execve("/usr/sbin/gosu", args, environ)
else:
os.execve(sys.executable, args, environ)
os.execve("/usr/local/bin/python", args, environ)
if __name__ == "__main__":

View File

@@ -15,12 +15,12 @@ in `homeserver.yaml`, to the list of authorized domains. If you have not set
1. Agree to the terms of service and submit.
1. Copy your site key and secret key and add them to your `homeserver.yaml`
configuration file
```yaml
```
recaptcha_public_key: YOUR_SITE_KEY
recaptcha_private_key: YOUR_SECRET_KEY
```
1. Enable the CAPTCHA for new registrations
```yaml
```
enable_registration_captcha: true
```
1. Go to the settings page for the CAPTCHA you just created

View File

@@ -0,0 +1,335 @@
# MSC1711 Certificates FAQ
## Historical Note
This document was originally written to guide server admins through the upgrade
path towards Synapse 1.0. Specifically,
[MSC1711](https://github.com/matrix-org/matrix-doc/blob/main/proposals/1711-x509-for-federation.md)
required that all servers present valid TLS certificates on their federation
API. Admins were encouraged to achieve compliance from version 0.99.0 (released
in February 2019) ahead of version 1.0 (released June 2019) enforcing the
certificate checks.
Much of what follows is now outdated since most admins will have already
upgraded, however it may be of use to those with old installs returning to the
project.
If you are setting up a server from scratch you almost certainly should look at
the [installation guide](setup/installation.md) instead.
## Introduction
The goal of Synapse 0.99.0 is to act as a stepping stone to Synapse 1.0.0. It
supports the r0.1 release of the server to server specification, but is
compatible with both the legacy Matrix federation behaviour (pre-r0.1) as well
as post-r0.1 behaviour, in order to allow for a smooth upgrade across the
federation.
The most important thing to know is that Synapse 1.0.0 will require a valid TLS
certificate on federation endpoints. Self signed certificates will not be
sufficient.
Synapse 0.99.0 makes it easy to configure TLS certificates and will
interoperate with both >= 1.0.0 servers as well as existing servers yet to
upgrade.
**It is critical that all admins upgrade to 0.99.0 and configure a valid TLS
certificate.** Admins will have 1 month to do so, after which 1.0.0 will be
released and those servers without a valid certificate will not longer be able
to federate with >= 1.0.0 servers.
Full details on how to carry out this configuration change is given
[below](#configuring-certificates-for-compatibility-with-synapse-100). A
timeline and some frequently asked questions are also given below.
For more details and context on the release of the r0.1 Server/Server API and
imminent Matrix 1.0 release, you can also see our
[main talk from FOSDEM 2019](https://matrix.org/blog/2019/02/04/matrix-at-fosdem-2019/).
## Contents
* Timeline
* Configuring certificates for compatibility with Synapse 1.0
* FAQ
* Synapse 0.99.0 has just been released, what do I need to do right now?
* How do I upgrade?
* What will happen if I do not set up a valid federation certificate
immediately?
* What will happen if I do nothing at all?
* When do I need a SRV record or .well-known URI?
* Can I still use an SRV record?
* I have created a .well-known URI. Do I still need an SRV record?
* It used to work just fine, why are you breaking everything?
* Can I manage my own certificates rather than having Synapse renew
certificates itself?
* Do you still recommend against using a reverse proxy on the federation port?
* Do I still need to give my TLS certificates to Synapse if I am using a
reverse proxy?
* Do I need the same certificate for the client and federation port?
* How do I tell Synapse to reload my keys/certificates after I replace them?
## Timeline
**5th Feb 2019 - Synapse 0.99.0 is released.**
All server admins are encouraged to upgrade.
0.99.0:
- provides support for ACME to make setting up Let's Encrypt certs easy, as
well as .well-known support.
- does not enforce that a valid CA cert is present on the federation API, but
rather makes it easy to set one up.
- provides support for .well-known
Admins should upgrade and configure a valid CA cert. Homeservers that require a
.well-known entry (see below), should retain their SRV record and use it
alongside their .well-known record.
**10th June 2019 - Synapse 1.0.0 is released**
1.0.0 is scheduled for release on 10th June. In
accordance with the the [S2S spec](https://matrix.org/docs/spec/server_server/r0.1.0.html)
1.0.0 will enforce certificate validity. This means that any homeserver without a
valid certificate after this point will no longer be able to federate with
1.0.0 servers.
## Configuring certificates for compatibility with Synapse 1.0.0
### If you do not currently have an SRV record
In this case, your `server_name` points to the host where your Synapse is
running. There is no need to create a `.well-known` URI or an SRV record, but
you will need to give Synapse a valid, signed, certificate.
### If you do have an SRV record currently
If you are using an SRV record, your matrix domain (`server_name`) may not
point to the same host that your Synapse is running on (the 'target
domain'). (If it does, you can follow the recommendation above; otherwise, read
on.)
Let's assume that your `server_name` is `example.com`, and your Synapse is
hosted at a target domain of `customer.example.net`. Currently you should have
an SRV record which looks like:
```
_matrix._tcp.example.com. IN SRV 10 5 8000 customer.example.net.
```
In this situation, you have three choices for how to proceed:
#### Option 1: give Synapse a certificate for your matrix domain
Synapse 1.0 will expect your server to present a TLS certificate for your
`server_name` (`example.com` in the above example). You can achieve this by acquiring a
certificate for the `server_name` yourself (for example, using `certbot`), and giving it
and the key to Synapse via `tls_certificate_path` and `tls_private_key_path`.
#### Option 2: run Synapse behind a reverse proxy
If you have an existing reverse proxy set up with correct TLS certificates for
your domain, you can simply route all traffic through the reverse proxy by
updating the SRV record appropriately (or removing it, if the proxy listens on
8448).
See [the reverse proxy documentation](reverse_proxy.md) for information on setting up a
reverse proxy.
#### Option 3: add a .well-known file to delegate your matrix traffic
This will allow you to keep Synapse on a separate domain, without having to
give it a certificate for the matrix domain.
You can do this with a `.well-known` file as follows:
1. Keep the SRV record in place - it is needed for backwards compatibility
with Synapse 0.34 and earlier.
2. Give Synapse a certificate corresponding to the target domain
(`customer.example.net` in the above example). You can do this by acquire a
certificate for the target domain and giving it to Synapse via `tls_certificate_path`
and `tls_private_key_path`.
3. Restart Synapse to ensure the new certificate is loaded.
4. Arrange for a `.well-known` file at
`https://<server_name>/.well-known/matrix/server` with contents:
```json
{"m.server": "<target server name>"}
```
where the target server name is resolved as usual (i.e. SRV lookup, falling
back to talking to port 8448).
In the above example, where synapse is listening on port 8000,
`https://example.com/.well-known/matrix/server` should have `m.server` set to one of:
1. `customer.example.net` ─ with a SRV record on
`_matrix._tcp.customer.example.com` pointing to port 8000, or:
2. `customer.example.net` ─ updating synapse to listen on the default port
8448, or:
3. `customer.example.net:8000` ─ ensuring that if there is a reverse proxy
on `customer.example.net:8000` it correctly handles HTTP requests with
Host header set to `customer.example.net:8000`.
## FAQ
### Synapse 0.99.0 has just been released, what do I need to do right now?
Upgrade as soon as you can in preparation for Synapse 1.0.0, and update your
TLS certificates as [above](#configuring-certificates-for-compatibility-with-synapse-100).
### What will happen if I do not set up a valid federation certificate immediately?
Nothing initially, but once 1.0.0 is in the wild it will not be possible to
federate with 1.0.0 servers.
### What will happen if I do nothing at all?
If the admin takes no action at all, and remains on a Synapse < 0.99.0 then the
homeserver will be unable to federate with those who have implemented
.well-known. Then, as above, once the month upgrade window has expired the
homeserver will not be able to federate with any Synapse >= 1.0.0
### When do I need a SRV record or .well-known URI?
If your homeserver listens on the default federation port (8448), and your
`server_name` points to the host that your homeserver runs on, you do not need an
SRV record or `.well-known/matrix/server` URI.
For instance, if you registered `example.com` and pointed its DNS A record at a
fresh Upcloud VPS or similar, you could install Synapse 0.99 on that host,
giving it a server_name of `example.com`, and it would automatically generate a
valid TLS certificate for you via Let's Encrypt and no SRV record or
`.well-known` URI would be needed.
This is the common case, although you can add an SRV record or
`.well-known/matrix/server` URI for completeness if you wish.
**However**, if your server does not listen on port 8448, or if your `server_name`
does not point to the host that your homeserver runs on, you will need to let
other servers know how to find it.
In this case, you should see ["If you do have an SRV record
currently"](#if-you-do-have-an-srv-record-currently) above.
### Can I still use an SRV record?
Firstly, if you didn't need an SRV record before (because your server is
listening on port 8448 of your server_name), you certainly don't need one now:
the defaults are still the same.
If you previously had an SRV record, you can keep using it provided you are
able to give Synapse a TLS certificate corresponding to your server name. For
example, suppose you had the following SRV record, which directs matrix traffic
for example.com to matrix.example.com:443:
```
_matrix._tcp.example.com. IN SRV 10 5 443 matrix.example.com
```
In this case, Synapse must be given a certificate for example.com - or be
configured to acquire one from Let's Encrypt.
If you are unable to give Synapse a certificate for your server_name, you will
also need to use a .well-known URI instead. However, see also "I have created a
.well-known URI. Do I still need an SRV record?".
### I have created a .well-known URI. Do I still need an SRV record?
As of Synapse 0.99, Synapse will first check for the existence of a `.well-known`
URI and follow any delegation it suggests. It will only then check for the
existence of an SRV record.
That means that the SRV record will often be redundant. However, you should
remember that there may still be older versions of Synapse in the federation
which do not understand `.well-known` URIs, so if you removed your SRV record you
would no longer be able to federate with them.
It is therefore best to leave the SRV record in place for now. Synapse 0.34 and
earlier will follow the SRV record (and not care about the invalid
certificate). Synapse 0.99 and later will follow the .well-known URI, with the
correct certificate chain.
### It used to work just fine, why are you breaking everything?
We have always wanted Matrix servers to be as easy to set up as possible, and
so back when we started federation in 2014 we didn't want admins to have to go
through the cumbersome process of buying a valid TLS certificate to run a
server. This was before Let's Encrypt came along and made getting a free and
valid TLS certificate straightforward. So instead, we adopted a system based on
[Perspectives](https://en.wikipedia.org/wiki/Convergence_(SSL)): an approach
where you check a set of "notary servers" (in practice, homeservers) to vouch
for the validity of a certificate rather than having it signed by a CA. As long
as enough different notaries agree on the certificate's validity, then it is
trusted.
However, in practice this has never worked properly. Most people only use the
default notary server (matrix.org), leading to inadvertent centralisation which
we want to eliminate. Meanwhile, we never implemented the full consensus
algorithm to query the servers participating in a room to determine consensus
on whether a given certificate is valid. This is fiddly to get right
(especially in face of sybil attacks), and we found ourselves questioning
whether it was worth the effort to finish the work and commit to maintaining a
secure certificate validation system as opposed to focusing on core Matrix
development.
Meanwhile, Let's Encrypt came along in 2016, and put the final nail in the
coffin of the Perspectives project (which was already pretty dead). So, the
Spec Core Team decided that a better approach would be to mandate valid TLS
certificates for federation alongside the rest of the Web. More details can be
found in
[MSC1711](https://github.com/matrix-org/matrix-doc/blob/main/proposals/1711-x509-for-federation.md#background-the-failure-of-the-perspectives-approach).
This results in a breaking change, which is disruptive, but absolutely critical
for the security model. However, the existence of Let's Encrypt as a trivial
way to replace the old self-signed certificates with valid CA-signed ones helps
smooth things over massively, especially as Synapse can now automate Let's
Encrypt certificate generation if needed.
### Can I manage my own certificates rather than having Synapse renew certificates itself?
Yes, you are welcome to manage your certificates yourself. Synapse will only
attempt to obtain certificates from Let's Encrypt if you configure it to do
so.The only requirement is that there is a valid TLS cert present for
federation end points.
### Do you still recommend against using a reverse proxy on the federation port?
We no longer actively recommend against using a reverse proxy. Many admins will
find it easier to direct federation traffic to a reverse proxy and manage their
own TLS certificates, and this is a supported configuration.
See [the reverse proxy documentation](reverse_proxy.md) for information on setting up a
reverse proxy.
### Do I still need to give my TLS certificates to Synapse if I am using a reverse proxy?
Practically speaking, this is no longer necessary.
If you are using a reverse proxy for all of your TLS traffic, then you can set
`no_tls: True`. In that case, the only reason Synapse needs the certificate is
to populate a legacy 'tls_fingerprints' field in the federation API. This is
ignored by Synapse 0.99.0 and later, and the only time pre-0.99 Synapses will
check it is when attempting to fetch the server keys - and generally this is
delegated via `matrix.org`, which is on 0.99.0.
However, there is a bug in Synapse 0.99.0
[4554](<https://github.com/matrix-org/synapse/issues/4554>) which prevents
Synapse from starting if you do not give it a TLS certificate. To work around
this, you can give it any TLS certificate at all. This will be fixed soon.
### Do I need the same certificate for the client and federation port?
No. There is nothing stopping you from using different certificates,
particularly if you are using a reverse proxy. However, Synapse will use the
same certificate on any ports where TLS is configured.
### How do I tell Synapse to reload my keys/certificates after I replace them?
Synapse will reload the keys and certificates when it receives a SIGHUP - for
example `kill -HUP $(cat homeserver.pid)`. Alternatively, simply restart
Synapse, though this will result in downtime while it restarts.

View File

@@ -50,10 +50,8 @@ build the documentation with:
mdbook build
```
The rendered contents will be outputted to a new `book/` directory at the root of the repository. Please note that
index.html is not built by default, it is created by copying over the file `welcome_and_overview.html` to `index.html`
during deployment. Thus, when running `mdbook serve` locally the book will initially show a 404 in place of the index
due to the above. Do not be alarmed!
The rendered contents will be outputted to a new `book/` directory at the root of the repository. You can
browse the book by opening `book/index.html` in a web browser.
You can also have mdbook host the docs on a local webserver with hot-reload functionality via:

View File

@@ -13,24 +13,23 @@
# Upgrading
- [Upgrading between Synapse Versions](upgrade.md)
- [Upgrading from pre-Synapse 1.0](MSC1711_certificates_FAQ.md)
# Usage
- [Federation](federate.md)
- [Configuration](usage/configuration/README.md)
- [Configuration Manual](usage/configuration/config_documentation.md)
- [Homeserver Sample Config File](usage/configuration/homeserver_sample_config.md)
- [Logging Sample Config File](usage/configuration/logging_sample_config.md)
- [Structured Logging](structured_logging.md)
- [Templates](templates.md)
- [User Authentication](usage/configuration/user_authentication/README.md)
- [Single-Sign On](usage/configuration/user_authentication/single_sign_on/README.md)
- [Single-Sign On]()
- [OpenID Connect](openid.md)
- [SAML](usage/configuration/user_authentication/single_sign_on/saml.md)
- [CAS](usage/configuration/user_authentication/single_sign_on/cas.md)
- [SAML]()
- [CAS]()
- [SSO Mapping Providers](sso_mapping_providers.md)
- [Password Auth Providers](password_auth_providers.md)
- [JSON Web Tokens](jwt.md)
- [Refresh Tokens](usage/configuration/user_authentication/refresh_tokens.md)
- [Registration Captcha](CAPTCHA_SETUP.md)
- [Application Services](application_services.md)
- [Server Notices](server_notices.md)
@@ -45,8 +44,6 @@
- [Presence router callbacks](modules/presence_router_callbacks.md)
- [Account validity callbacks](modules/account_validity_callbacks.md)
- [Password auth provider callbacks](modules/password_auth_provider_callbacks.md)
- [Background update controller callbacks](modules/background_update_controller_callbacks.md)
- [Account data callbacks](modules/account_data_callbacks.md)
- [Porting a legacy module to the new interface](modules/porting_legacy_module.md)
- [Workers](workers.md)
- [Using `synctl` with Workers](synctl_workers.md)
@@ -54,7 +51,6 @@
- [Administration](usage/administration/README.md)
- [Admin API](usage/administration/admin_api/README.md)
- [Account Validity](admin_api/account_validity.md)
- [Background Updates](usage/administration/admin_api/background_updates.md)
- [Delete Group](admin_api/delete_group.md)
- [Event Reports](admin_api/event_reports.md)
- [Media](admin_api/media_admin_api.md)
@@ -67,29 +63,20 @@
- [Statistics](admin_api/statistics.md)
- [Users](admin_api/user_admin_api.md)
- [Server Version](admin_api/version_api.md)
- [Federation](usage/administration/admin_api/federation.md)
- [Manhole](manhole.md)
- [Monitoring](metrics-howto.md)
- [Understanding Synapse Through Grafana Graphs](usage/administration/understanding_synapse_through_grafana_graphs.md)
- [Useful SQL for Admins](usage/administration/useful_sql_for_admins.md)
- [Database Maintenance Tools](usage/administration/database_maintenance_tools.md)
- [State Groups](usage/administration/state_groups.md)
- [Request log format](usage/administration/request_log.md)
- [Admin FAQ](usage/administration/admin_faq.md)
- [Scripts]()
# Development
- [Contributing Guide](development/contributing_guide.md)
- [Code Style](code_style.md)
- [Release Cycle](development/releases.md)
- [Git Usage](development/git.md)
- [Testing]()
- [Demo scripts](development/demo.md)
- [OpenTracing](opentracing.md)
- [Database Schemas](development/database_schema.md)
- [Experimental features](development/experimental_features.md)
- [Synapse Architecture]()
- [Cancellation](development/synapse_architecture/cancellation.md)
- [Log Contexts](log_contexts.md)
- [Replication](replication.md)
- [TCP Replication](tcp_replication.md)
@@ -106,4 +93,3 @@
# Other
- [Dependency Deprecation Policy](deprecation_policy.md)
- [Running Synapse on a Single-Board Computer](other/running_synapse_on_single_board_computers.md)

View File

@@ -4,9 +4,6 @@ This API allows a server administrator to manage the validity of an account. To
use it, you must enable the account validity feature (under
`account_validity`) in Synapse's configuration.
To use it, you will need to authenticate by providing an `access_token`
for a server admin: see [Admin API](../usage/administration/admin_api).
## Renew account
This API extends the validity of an account by as much time as configured in the

View File

@@ -4,11 +4,11 @@ This API lets a server admin delete a local group. Doing so will kick all
users out of the group so that their clients will correctly handle the group
being deleted.
To use it, you will need to authenticate by providing an `access_token`
for a server admin: see [Admin API](../usage/administration/admin_api).
The API is:
```
POST /_synapse/admin/v1/delete_group/<group_id>
```
To use it, you will need to authenticate by providing an `access_token` for a
server admin: see [Admin API](../usage/administration/admin_api).

View File

@@ -2,13 +2,12 @@
This API returns information about reported events.
To use it, you will need to authenticate by providing an `access_token`
for a server admin: see [Admin API](../usage/administration/admin_api).
The api is:
```
GET /_synapse/admin/v1/event_reports?from=0&limit=10
```
To use it, you will need to authenticate by providing an `access_token` for a
server admin: see [Admin API](../usage/administration/admin_api).
It returns a JSON body like the following:
@@ -95,10 +94,12 @@ The api is:
```
GET /_synapse/admin/v1/event_reports/<report_id>
```
To use it, you will need to authenticate by providing an `access_token` for a
server admin: see [Admin API](../usage/administration/admin_api).
It returns a JSON body like the following:
```json
```jsonc
{
"event_id": "$bNUFCwGzWca1meCGkjp-zwslF-GfVcXukvRLI1_FaVY",
"event_json": {
@@ -131,7 +132,7 @@ It returns a JSON body like the following:
},
"type": "m.room.message",
"unsigned": {
"age_ts": 1592291711430
"age_ts": 1592291711430,
}
},
"id": <report_id>,

View File

@@ -1,13 +1,24 @@
# Contents
- [Querying media](#querying-media)
* [List all media in a room](#list-all-media-in-a-room)
* [List all media uploaded by a user](#list-all-media-uploaded-by-a-user)
- [Quarantine media](#quarantine-media)
* [Quarantining media by ID](#quarantining-media-by-id)
* [Remove media from quarantine by ID](#remove-media-from-quarantine-by-id)
* [Quarantining media in a room](#quarantining-media-in-a-room)
* [Quarantining all media of a user](#quarantining-all-media-of-a-user)
* [Protecting media from being quarantined](#protecting-media-from-being-quarantined)
* [Unprotecting media from being quarantined](#unprotecting-media-from-being-quarantined)
- [Delete local media](#delete-local-media)
* [Delete a specific local media](#delete-a-specific-local-media)
* [Delete local media by date or size](#delete-local-media-by-date-or-size)
* [Delete media uploaded by a user](#delete-media-uploaded-by-a-user)
- [Purge Remote Media API](#purge-remote-media-api)
# Querying media
These APIs allow extracting media information from the homeserver.
Details about the format of the `media_id` and storage of the media in the file system
are documented under [media repository](../media_repository.md).
To use it, you will need to authenticate by providing an `access_token`
for a server admin: see [Admin API](../usage/administration/admin_api).
## List all media in a room
This API gets a list of known media in a room.
@@ -17,6 +28,8 @@ The API is:
```
GET /_synapse/admin/v1/room/<room_id>/media
```
To use it, you will need to authenticate by providing an `access_token` for a
server admin: see [Admin API](../usage/administration/admin_api).
The API returns a JSON body like the following:
```json
@@ -289,7 +302,7 @@ POST /_synapse/admin/v1/purge_media_cache?before_ts=<unix_timestamp_in_ms>
URL Parameters
* `before_ts`: string representing a positive integer - Unix timestamp in milliseconds.
* `unix_timestamp_in_ms`: string representing a positive integer - Unix timestamp in milliseconds.
All cached media that was last accessed before this timestamp will be removed.
Response:
@@ -304,5 +317,8 @@ The following fields are returned in the JSON response body:
* `deleted`: integer - The number of media items successfully deleted
To use it, you will need to authenticate by providing an `access_token` for a
server admin: see [Admin API](../usage/administration/admin_api).
If the user re-requests purged remote media, synapse will re-request the media
from the originating server.

View File

@@ -10,15 +10,15 @@ paginate further back in the room from the point being purged from.
Note that Synapse requires at least one message in each room, so it will never
delete the last message in a room.
To use it, you will need to authenticate by providing an `access_token`
for a server admin: see [Admin API](../usage/administration/admin_api).
The API is:
```
POST /_synapse/admin/v1/purge_history/<room_id>[/<event_id>]
```
To use it, you will need to authenticate by providing an `access_token` for a
server admin: [Admin API](../usage/administration/admin_api)
By default, events sent by local users are not deleted, as they may represent
the only copies of this content in existence. (Events sent by remote users are
deleted.)
@@ -27,7 +27,7 @@ Room state data (such as joins, leaves, topic) is always preserved.
To delete local message events as well, set `delete_local_events` in the body:
```json
```
{
"delete_local_events": true
}
@@ -57,6 +57,9 @@ It is possible to poll for updates on recent purges with a second API;
GET /_synapse/admin/v1/purge_history_status/<purge_id>
```
Again, you will need to authenticate by providing an `access_token` for a
server admin.
This API returns a JSON body like the following:
```json
@@ -67,8 +70,6 @@ This API returns a JSON body like the following:
The status will be one of `active`, `complete`, or `failed`.
If `status` is `failed` there will be a string `error` with the error message.
## Reclaim disk space (Postgres)
To reclaim the disk space and return it to the operating system, you need to run

View File

@@ -5,9 +5,6 @@ to a room with a given `room_id_or_alias`. You can only modify the membership of
local users. The server administrator must be in the room and have permission to
invite users.
To use it, you will need to authenticate by providing an `access_token`
for a server admin: see [Admin API](../usage/administration/admin_api).
## Parameters
The following parameters are available:
@@ -26,9 +23,12 @@ POST /_synapse/admin/v1/join/<room_id_or_alias>
}
```
To use it, you will need to authenticate by providing an `access_token` for a
server admin: see [Admin API](../usage/administration/admin_api).
Response:
```json
```
{
"room_id": "!636q39766251:server.com"
}

View File

@@ -1,12 +1,20 @@
# Contents
- [List Room API](#list-room-api)
- [Room Details API](#room-details-api)
- [Room Members API](#room-members-api)
- [Room State API](#room-state-api)
- [Delete Room API](#delete-room-api)
* [Undoing room shutdowns](#undoing-room-shutdowns)
- [Make Room Admin API](#make-room-admin-api)
- [Forward Extremities Admin API](#forward-extremities-admin-api)
- [Event Context API](#event-context-api)
# List Room API
The List Room admin API allows server admins to get a list of rooms on their
server. There are various parameters available that allow for filtering and
sorting the returned list. This API supports pagination.
To use it, you will need to authenticate by providing an `access_token`
for a server admin: see [Admin API](../usage/administration/admin_api).
**Parameters**
The following query parameters are available:
@@ -30,14 +38,9 @@ The following query parameters are available:
- `history_visibility` - Rooms are ordered alphabetically by visibility of history of the room.
- `state_events` - Rooms are ordered by number of state events. Largest to smallest.
* `dir` - Direction of room order. Either `f` for forwards or `b` for backwards. Setting
this value to `b` will reverse the above sort order. Defaults to `f`.
* `search_term` - Filter rooms by their room name, canonical alias and room id.
Specifically, rooms are selected if the search term is contained in
- the room's name,
- the local part of the room's canonical alias, or
- the complete (local and server part) room's id (case sensitive).
Defaults to no filtering.
this value to `b` will reverse the above sort order. Defaults to `f`.
* `search_term` - Filter rooms by their room name. Search term can be contained in any
part of the room name. Defaults to no filtering.
**Response**
@@ -84,7 +87,7 @@ GET /_synapse/admin/v1/rooms
A response body like the following is returned:
```json
```jsonc
{
"rooms": [
{
@@ -167,7 +170,7 @@ GET /_synapse/admin/v1/rooms?order_by=size
A response body like the following is returned:
```json
```jsonc
{
"rooms": [
{
@@ -205,7 +208,7 @@ A response body like the following is returned:
}
],
"offset": 0,
"total_rooms": 150,
"total_rooms": 150
"next_token": 100
}
```
@@ -221,7 +224,7 @@ GET /_synapse/admin/v1/rooms?order_by=size&from=100
A response body like the following is returned:
```json
```jsonc
{
"rooms": [
{
@@ -375,86 +378,9 @@ A response body like the following is returned:
}
```
# Block Room API
The Block Room admin API allows server admins to block and unblock rooms,
and query to see if a given room is blocked.
This API can be used to pre-emptively block a room, even if it's unknown to this
homeserver. Users will be prevented from joining a blocked room.
## Block or unblock a room
The API is:
```
PUT /_synapse/admin/v1/rooms/<room_id>/block
```
with a body of:
```json
{
"block": true
}
```
A response body like the following is returned:
```json
{
"block": true
}
```
**Parameters**
The following parameters should be set in the URL:
- `room_id` - The ID of the room.
The following JSON body parameters are available:
- `block` - If `true` the room will be blocked and if `false` the room will be unblocked.
**Response**
The following fields are possible in the JSON response body:
- `block` - A boolean. `true` if the room is blocked, otherwise `false`
## Get block status
The API is:
```
GET /_synapse/admin/v1/rooms/<room_id>/block
```
A response body like the following is returned:
```json
{
"block": true,
"user_id": "<user_id>"
}
```
**Parameters**
The following parameters should be set in the URL:
- `room_id` - The ID of the room.
**Response**
The following fields are possible in the JSON response body:
- `block` - A boolean. `true` if the room is blocked, otherwise `false`
- `user_id` - An optional string. If the room is blocked (`block` is `true`) shows
the user who has add the room to blocking list. Otherwise it is not displayed.
# Delete Room API
The Delete Room admin API allows server admins to remove rooms from the server
The Delete Room admin API allows server admins to remove rooms from server
and block these rooms.
Shuts down a room. Moves all local users and room aliases automatically to a
@@ -465,30 +391,18 @@ The new room will be created with the user specified by the `new_room_user_id` p
as room administrator and will contain a message explaining what happened. Users invited
to the new room will have power level `-10` by default, and thus be unable to speak.
If `block` is `true`, users will be prevented from joining the old room.
This option can in [Version 1](#version-1-old-version) also be used to pre-emptively
block a room, even if it's unknown to this homeserver. In this case, the room will be
blocked, and no further action will be taken. If `block` is `false`, attempting to
delete an unknown room is invalid and will be rejected as a bad request.
If `block` is `True` it prevents new joins to the old room.
This API will remove all trace of the old room from your database after removing
all local users. If `purge` is `true` (the default), all traces of the old room will
be removed from your database after removing all local users. If you do not want
this to happen, set `purge` to `false`.
Depending on the amount of history being purged, a call to the API may take
Depending on the amount of history being purged a call to the API may take
several minutes or longer.
The local server will only have the power to move local user and room aliases to
the new room. Users on other servers will be unaffected.
## Version 1 (old version)
This version works synchronously. That means you only get the response once the server has
finished the action, which may take a long time. If you request the same action
a second time, and the server has not finished the first one, the second request will block.
This is fixed in version 2 of this API. The parameters are the same in both APIs.
This API will become deprecated in the future.
The API is:
```
@@ -507,6 +421,9 @@ with a body of:
}
```
To use it, you will need to authenticate by providing an ``access_token`` for a
server admin: see [Admin API](../usage/administration/admin_api).
A response body like the following is returned:
```json
@@ -523,44 +440,6 @@ A response body like the following is returned:
}
```
The parameters and response values have the same format as
[version 2](#version-2-new-version) of the API.
## Version 2 (new version)
**Note**: This API is new, experimental and "subject to change".
This version works asynchronously, meaning you get the response from server immediately
while the server works on that task in background. You can then request the status of the action
to check if it has completed.
The API is:
```
DELETE /_synapse/admin/v2/rooms/<room_id>
```
with a body of:
```json
{
"new_room_user_id": "@someuser:example.com",
"room_name": "Content Violation Notification",
"message": "Bad Room has been shutdown due to content violations on this server. Please review our Terms of Service.",
"block": true,
"purge": true
}
```
The API starts the shut down and purge running, and returns immediately with a JSON body with
a purge id:
```json
{
"delete_id": "<opaque id>"
}
```
**Parameters**
The following parameters should be set in the URL:
@@ -580,10 +459,8 @@ The following JSON body parameters are available:
`new_room_user_id` in the new room. Ideally this will clearly convey why the
original room was shut down. Defaults to `Sharing illegal content on this server
is not permitted and rooms in violation will be blocked.`
* `block` - Optional. If set to `true`, this room will be added to a blocking list,
preventing future attempts to join the room. Rooms can be blocked
even if they're not yet known to the homeserver (only with
[Version 1](#version-1-old-version) of the API). Defaults to `false`.
* `block` - Optional. If set to `true`, this room will be added to a blocking list, preventing
future attempts to join the room. Defaults to `false`.
* `purge` - Optional. If set to `true`, it will remove all traces of the room from your database.
Defaults to `true`.
* `force_purge` - Optional, and ignored unless `purge` is `true`. If set to `true`, it
@@ -593,124 +470,16 @@ The following JSON body parameters are available:
The JSON body must not be empty. The body must be at least `{}`.
## Status of deleting rooms
**Note**: This API is new, experimental and "subject to change".
It is possible to query the status of the background task for deleting rooms.
The status can be queried up to 24 hours after completion of the task,
or until Synapse is restarted (whichever happens first).
### Query by `room_id`
With this API you can get the status of all active deletion tasks, and all those completed in the last 24h,
for the given `room_id`.
The API is:
```
GET /_synapse/admin/v2/rooms/<room_id>/delete_status
```
A response body like the following is returned:
```json
{
"results": [
{
"delete_id": "delete_id1",
"status": "failed",
"error": "error message",
"shutdown_room": {
"kicked_users": [],
"failed_to_kick_users": [],
"local_aliases": [],
"new_room_id": null
}
}, {
"delete_id": "delete_id2",
"status": "purging",
"shutdown_room": {
"kicked_users": [
"@foobar:example.com"
],
"failed_to_kick_users": [],
"local_aliases": [
"#badroom:example.com",
"#evilsaloon:example.com"
],
"new_room_id": "!newroomid:example.com"
}
}
]
}
```
**Parameters**
The following parameters should be set in the URL:
* `room_id` - The ID of the room.
### Query by `delete_id`
With this API you can get the status of one specific task by `delete_id`.
The API is:
```
GET /_synapse/admin/v2/rooms/delete_status/<delete_id>
```
A response body like the following is returned:
```json
{
"status": "purging",
"shutdown_room": {
"kicked_users": [
"@foobar:example.com"
],
"failed_to_kick_users": [],
"local_aliases": [
"#badroom:example.com",
"#evilsaloon:example.com"
],
"new_room_id": "!newroomid:example.com"
}
}
```
**Parameters**
The following parameters should be set in the URL:
* `delete_id` - The ID for this delete.
### Response
**Response**
The following fields are returned in the JSON response body:
- `results` - An array of objects, each containing information about one task.
This field is omitted from the result when you query by `delete_id`.
Task objects contain the following fields:
- `delete_id` - The ID for this purge if you query by `room_id`.
- `status` - The status will be one of:
- `shutting_down` - The process is removing users from the room.
- `purging` - The process is purging the room and event data from database.
- `complete` - The process has completed successfully.
- `failed` - The process is aborted, an error has occurred.
- `error` - A string that shows an error message if `status` is `failed`.
Otherwise this field is hidden.
- `shutdown_room` - An object containing information about the result of shutting down the room.
*Note:* The result is shown after removing the room members.
The delete process can still be running. Please pay attention to the `status`.
- `kicked_users` - An array of users (`user_id`) that were kicked.
- `failed_to_kick_users` - An array of users (`user_id`) that that were not kicked.
- `local_aliases` - An array of strings representing the local aliases that were
migrated from the old room to the new.
- `new_room_id` - A string representing the room ID of the new room, or `null` if
no such room was created.
* `kicked_users` - An array of users (`user_id`) that were kicked.
* `failed_to_kick_users` - An array of users (`user_id`) that that were not kicked.
* `local_aliases` - An array of strings representing the local aliases that were migrated from
the old room to the new.
* `new_room_id` - A string representing the room ID of the new room.
## Undoing room deletions
@@ -751,6 +520,16 @@ With all that being said, if you still want to try and recover the room:
4. If `new_room_user_id` was given, a 'Content Violation' will have been
created. Consider whether you want to delete that roomm.
## Deprecated endpoint
The previous deprecated API will be removed in a future release, it was:
```
POST /_synapse/admin/v1/rooms/<room_id>/delete
```
It behaves the same way than the current endpoint except the path and the method.
# Make Room Admin API
Grants another user the highest power available to a local user who is in the room.

View File

@@ -3,15 +3,15 @@
Returns information about all local media usage of users. Gives the
possibility to filter them by time and user.
To use it, you will need to authenticate by providing an `access_token`
for a server admin: see [Admin API](../usage/administration/admin_api).
The API is:
```
GET /_synapse/admin/v1/statistics/users/media
```
To use it, you will need to authenticate by providing an `access_token`
for a server admin: see [Admin API](../usage/administration/admin_api).
A response body like the following is returned:
```json

View File

@@ -1,8 +1,5 @@
# User Admin API
To use it, you will need to authenticate by providing an `access_token`
for a server admin: see [Admin API](../usage/administration/admin_api).
## Query User Account
This API returns information about a specific user account.
@@ -13,12 +10,14 @@ The api is:
GET /_synapse/admin/v2/users/<user_id>
```
To use it, you will need to authenticate by providing an `access_token` for a
server admin: [Admin API](../usage/administration/admin_api)
It returns a JSON body like the following:
```jsonc
```json
{
"name": "@user:example.com",
"displayname": "User", // can be null if not set
"displayname": "User",
"threepids": [
{
"medium": "email",
@@ -33,11 +32,11 @@ It returns a JSON body like the following:
"validated_at": 1586458409743
}
],
"avatar_url": "<avatar_url>", // can be null if not set
"is_guest": 0,
"avatar_url": "<avatar_url>",
"admin": 0,
"deactivated": 0,
"shadow_banned": 0,
"password_hash": "$2b$12$p9B4GkqYdRTPGD",
"creation_ts": 1560432506,
"appservice_id": null,
"consent_server_notice_sent": null,
@@ -104,6 +103,9 @@ with a body of:
}
```
To use it, you will need to authenticate by providing an `access_token` for a
server admin: [Admin API](../usage/administration/admin_api)
Returns HTTP status code:
- `201` - When a new user object was created.
- `200` - When a user was modified.
@@ -126,8 +128,7 @@ Body parameters:
[Sample Configuration File](../usage/configuration/homeserver_sample_config.html)
section `sso` and `oidc_providers`.
- `auth_provider` - string. ID of the external identity provider. Value of `idp_id`
in the homeserver configuration. Note that no error is raised if the provided
value is not in the homeserver configuration.
in homeserver configuration.
- `external_id` - string, user ID in the external identity provider.
- `avatar_url` - string, optional, must be a
[MXC URI](https://matrix.org/docs/spec/client_server/r0.6.0#matrix-content-mxc-uris).
@@ -154,6 +155,9 @@ By default, the response is ordered by ascending user ID.
GET /_synapse/admin/v2/users?from=0&limit=10&guests=false
```
To use it, you will need to authenticate by providing an `access_token` for a
server admin: [Admin API](../usage/administration/admin_api)
A response body like the following is returned:
```json
@@ -273,6 +277,9 @@ GET /_matrix/client/r0/admin/whois/<userId>
See also: [Client Server
API Whois](https://matrix.org/docs/spec/client_server/r0.6.1#get-matrix-client-r0-admin-whois-userid).
To use it, you will need to authenticate by providing an `access_token` for a
server admin: [Admin API](../usage/administration/admin_api)
It returns a JSON body like the following:
```json
@@ -327,12 +334,15 @@ with a body of:
}
```
To use it, you will need to authenticate by providing an `access_token` for a
server admin: [Admin API](../usage/administration/admin_api)
The erase parameter is optional and defaults to `false`.
An empty body may be passed for backwards compatibility.
The following actions are performed when deactivating an user:
- Try to unbind 3PIDs from the identity server
- Try to unpind 3PIDs from the identity server
- Remove all 3PIDs from the homeserver
- Delete all devices and E2EE keys
- Delete all access tokens
@@ -342,11 +352,6 @@ The following actions are performed when deactivating an user:
- Remove the user from the user directory
- Reject all pending invites
- Remove all account validity information related to the user
- Remove the arbitrary data store known as *account data*. For example, this includes:
- list of ignored users;
- push rules;
- secret storage keys; and
- cross-signing keys.
The following additional actions are performed during deactivation if `erase`
is set to `true`:
@@ -360,6 +365,7 @@ The following actions are **NOT** performed. The list may be incomplete.
- Remove mappings of SSO IDs
- [Delete media uploaded](#delete-media-uploaded-by-a-user) by user (included avatar images)
- Delete sent and received messages
- Delete E2E cross-signing keys
- Remove the user's creation (registration) timestamp
- [Remove rate limit overrides](#override-ratelimiting-for-users)
- Remove from monthly active users
@@ -383,6 +389,9 @@ with a body of:
}
```
To use it, you will need to authenticate by providing an `access_token` for a
server admin: [Admin API](../usage/administration/admin_api)
The parameter `new_password` is required.
The parameter `logout_devices` is optional and defaults to `true`.
@@ -395,6 +404,9 @@ The api is:
GET /_synapse/admin/v1/users/<user_id>/admin
```
To use it, you will need to authenticate by providing an `access_token` for a
server admin: [Admin API](../usage/administration/admin_api)
A response body like the following is returned:
```json
@@ -422,6 +434,10 @@ with a body of:
}
```
To use it, you will need to authenticate by providing an `access_token` for a
server admin: [Admin API](../usage/administration/admin_api)
## List room memberships of a user
Gets a list of all `room_id` that a specific `user_id` is member.
@@ -432,6 +448,9 @@ The API is:
GET /_synapse/admin/v1/users/<user_id>/joined_rooms
```
To use it, you will need to authenticate by providing an `access_token` for a
server admin: [Admin API](../usage/administration/admin_api)
A response body like the following is returned:
```json
@@ -461,90 +480,10 @@ The following fields are returned in the JSON response body:
- `joined_rooms` - An array of `room_id`.
- `total` - Number of rooms.
## Account Data
Gets information about account data for a specific `user_id`.
The API is:
```
GET /_synapse/admin/v1/users/<user_id>/accountdata
```
A response body like the following is returned:
```json
{
"account_data": {
"global": {
"m.secret_storage.key.LmIGHTg5W": {
"algorithm": "m.secret_storage.v1.aes-hmac-sha2",
"iv": "fwjNZatxg==",
"mac": "eWh9kNnLWZUNOgnc="
},
"im.vector.hide_profile": {
"hide_profile": true
},
"org.matrix.preview_urls": {
"disable": false
},
"im.vector.riot.breadcrumb_rooms": {
"rooms": [
"!LxcBDAsDUVAfJDEo:matrix.org",
"!MAhRxqasbItjOqxu:matrix.org"
]
},
"m.accepted_terms": {
"accepted": [
"https://example.org/somewhere/privacy-1.2-en.html",
"https://example.org/somewhere/terms-2.0-en.html"
]
},
"im.vector.setting.breadcrumbs": {
"recent_rooms": [
"!MAhRxqasbItqxuEt:matrix.org",
"!ZtSaPCawyWtxiImy:matrix.org"
]
}
},
"rooms": {
"!GUdfZSHUJibpiVqHYd:matrix.org": {
"m.fully_read": {
"event_id": "$156334540fYIhZ:matrix.org"
}
},
"!tOZwOOiqwCYQkLhV:matrix.org": {
"m.fully_read": {
"event_id": "$xjsIyp4_NaVl2yPvIZs_k1Jl8tsC_Sp23wjqXPno"
}
}
}
}
}
```
**Parameters**
The following parameters should be set in the URL:
- `user_id` - fully qualified: for example, `@user:server.com`.
**Response**
The following fields are returned in the JSON response body:
- `account_data` - A map containing the account data for the user
- `global` - A map containing the global account data for the user
- `rooms` - A map containing the account data per room for the user
## User media
### List media uploaded by a user
Gets a list of all local media that a specific `user_id` has created.
These are media that the user has uploaded themselves
([local media](../media_repository.md#local-media)), as well as
[URL preview images](../media_repository.md#url-previews) requested by the user if the
[feature is enabled](../development/url_previews.md).
By default, the response is ordered by descending creation date and ascending media ID.
The newest media is on top. You can change the order with parameters
`order_by` and `dir`.
@@ -555,6 +494,9 @@ The API is:
GET /_synapse/admin/v1/users/<user_id>/media
```
To use it, you will need to authenticate by providing an `access_token` for a
server admin: [Admin API](../usage/administration/admin_api)
A response body like the following is returned:
```json
@@ -641,9 +583,7 @@ The following fields are returned in the JSON response body:
Media objects contain the following fields:
- `created_ts` - integer - Timestamp when the content was uploaded in ms.
- `last_access_ts` - integer - Timestamp when the content was last accessed in ms.
- `media_id` - string - The id used to refer to the media. Details about the format
are documented under
[media repository](../media_repository.md).
- `media_id` - string - The id used to refer to the media.
- `media_length` - integer - Length of the media in bytes.
- `media_type` - string - The MIME-type of the media.
- `quarantined_by` - string - The user ID that initiated the quarantine request
@@ -671,6 +611,9 @@ The API is:
DELETE /_synapse/admin/v1/users/<user_id>/media
```
To use it, you will need to authenticate by providing an `access_token` for a
server admin: [Admin API](../usage/administration/admin_api)
A response body like the following is returned:
```json
@@ -743,6 +686,9 @@ The API is:
GET /_synapse/admin/v2/users/<user_id>/devices
```
To use it, you will need to authenticate by providing an `access_token` for a
server admin: [Admin API](../usage/administration/admin_api)
A response body like the following is returned:
```json
@@ -804,10 +750,13 @@ POST /_synapse/admin/v2/users/<user_id>/delete_devices
"devices": [
"QBUAZIFURK",
"AUIECTSRND"
]
],
}
```
To use it, you will need to authenticate by providing an `access_token` for a
server admin: [Admin API](../usage/administration/admin_api)
An empty JSON dict is returned.
**Parameters**
@@ -829,6 +778,9 @@ The API is:
GET /_synapse/admin/v2/users/<user_id>/devices/<device_id>
```
To use it, you will need to authenticate by providing an `access_token` for a
server admin: [Admin API](../usage/administration/admin_api)
A response body like the following is returned:
```json
@@ -874,6 +826,9 @@ PUT /_synapse/admin/v2/users/<user_id>/devices/<device_id>
}
```
To use it, you will need to authenticate by providing an `access_token` for a
server admin: [Admin API](../usage/administration/admin_api)
An empty JSON dict is returned.
**Parameters**
@@ -900,6 +855,9 @@ DELETE /_synapse/admin/v2/users/<user_id>/devices/<device_id>
{}
```
To use it, you will need to authenticate by providing an `access_token` for a
server admin: [Admin API](../usage/administration/admin_api)
An empty JSON dict is returned.
**Parameters**
@@ -918,6 +876,9 @@ The API is:
GET /_synapse/admin/v1/users/<user_id>/pushers
```
To use it, you will need to authenticate by providing an `access_token` for a
server admin: [Admin API](../usage/administration/admin_api)
A response body like the following is returned:
```json
@@ -987,7 +948,7 @@ The following fields are returned in the JSON response body:
See also the
[Client-Server API Spec on pushers](https://matrix.org/docs/spec/client_server/latest#get-matrix-client-r0-pushers).
## Controlling whether a user is shadow-banned
## Shadow-banning users
Shadow-banning is a useful tool for moderating malicious or egregiously abusive users.
A shadow-banned users receives successful responses to their client-server API requests,
@@ -1000,19 +961,16 @@ or broken behaviour for the client. A shadow-banned user will not receive any
notification and it is generally more appropriate to ban or kick abusive users.
A shadow-banned user will be unable to contact anyone on the server.
To shadow-ban a user the API is:
The API is:
```
POST /_synapse/admin/v1/users/<user_id>/shadow_ban
```
To un-shadow-ban a user the API is:
To use it, you will need to authenticate by providing an `access_token` for a
server admin: [Admin API](../usage/administration/admin_api)
```
DELETE /_synapse/admin/v1/users/<user_id>/shadow_ban
```
An empty JSON dict is returned in both cases.
An empty JSON dict is returned.
**Parameters**
@@ -1034,6 +992,9 @@ The API is:
GET /_synapse/admin/v1/users/<user_id>/override_ratelimit
```
To use it, you will need to authenticate by providing an `access_token` for a
server admin: [Admin API](../usage/administration/admin_api)
A response body like the following is returned:
```json
@@ -1073,6 +1034,9 @@ The API is:
POST /_synapse/admin/v1/users/<user_id>/override_ratelimit
```
To use it, you will need to authenticate by providing an `access_token` for a
server admin: [Admin API](../usage/administration/admin_api)
A response body like the following is returned:
```json
@@ -1115,6 +1079,9 @@ The API is:
DELETE /_synapse/admin/v1/users/<user_id>/override_ratelimit
```
To use it, you will need to authenticate by providing an `access_token` for a
server admin: [Admin API](../usage/administration/admin_api)
An empty JSON dict is returned.
```json
@@ -1140,8 +1107,10 @@ This endpoint will work even if registration is disabled on the server, unlike
The API is:
```
GET /_synapse/admin/v1/username_available?username=$localpart
POST /_synapse/admin/v1/username_availabile?username=$localpart
```
The request and response format is the same as the
[/_matrix/client/r0/register/available](https://matrix.org/docs/spec/client_server/r0.6.0#get-matrix-client-r0-register-available) API.
The request and response format is the same as the [/_matrix/client/r0/register/available](https://matrix.org/docs/spec/client_server/r0.6.0#get-matrix-client-r0-register-available) API.
To use it, you will need to authenticate by providing an `access_token` for a
server admin: [Admin API](../usage/administration/admin_api)

View File

@@ -16,6 +16,6 @@ It returns a JSON body like the following:
```json
{
"server_version": "0.99.2rc1 (b=develop, abcdef123)",
"python_version": "3.7.8"
"python_version": "3.6.8"
}
```

View File

@@ -7,7 +7,7 @@
## Server to Server Stack
To use the server to server stack, homeservers should only need to
To use the server to server stack, home servers should only need to
interact with the Messaging layer.
The server to server side of things is designed into 4 distinct layers:
@@ -23,7 +23,7 @@ Server with a domain specific API.
1. **Messaging Layer**
This is what the rest of the homeserver hits to send messages, join rooms,
This is what the rest of the Home Server hits to send messages, join rooms,
etc. It also allows you to register callbacks for when it get's notified by
lower levels that e.g. a new message has been received.
@@ -45,7 +45,7 @@ Server with a domain specific API.
For incoming PDUs, it has to check the PDUs it references to see
if we have missed any. If we have go and ask someone (another
homeserver) for it.
home server) for it.
3. **Transaction Layer**

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -1 +0,0 @@
This directory contains changelogs for previous years.

View File

@@ -6,36 +6,54 @@ The Synapse codebase uses a number of code formatting tools in order to
quickly and automatically check for formatting (and sometimes logical)
errors in code.
The necessary tools are:
The necessary tools are detailed below.
- [black](https://black.readthedocs.io/en/stable/), a source code formatter;
- [isort](https://pycqa.github.io/isort/), which organises each file's imports;
- [flake8](https://flake8.pycqa.org/en/latest/), which can spot common errors; and
- [mypy](https://mypy.readthedocs.io/en/stable/), a type checker.
First install them with:
Install them with:
pip install -e ".[lint,mypy]"
```sh
pip install -e ".[lint,mypy]"
```
- **black**
The easiest way to run the lints is to invoke the linter script as follows.
The Synapse codebase uses [black](https://pypi.org/project/black/)
as an opinionated code formatter, ensuring all comitted code is
properly formatted.
```sh
scripts-dev/lint.sh
```
Have `black` auto-format your code (it shouldn't change any
functionality) with:
black . --exclude="\.tox|build|env"
- **flake8**
`flake8` is a code checking tool. We require code to pass `flake8`
before being merged into the codebase.
Check all application and test code with:
flake8 synapse tests
- **isort**
`isort` ensures imports are nicely formatted, and can suggest and
auto-fix issues such as double-importing.
Auto-fix imports with:
isort -rc synapse tests
`-rc` means to recursively search the given directories.
It's worth noting that modern IDEs and text editors can run these tools
automatically on save. It may be worth looking into whether this
functionality is supported in your editor for a more convenient
development workflow. It is not, however, recommended to run `flake8` or `mypy`
on save as they take a while and can be very resource intensive.
development workflow. It is not, however, recommended to run `flake8` on
save as it takes a while and is very resource intensive.
## General rules
- **Naming**:
- Use `CamelCase` for class and type names
- Use underscores for `function_names` and `variable_names`.
- Use camel case for class and type names
- Use underscores for functions and variables.
- **Docstrings**: should follow the [google code
style](https://google.github.io/styleguide/pyguide.html#38-comments-and-docstrings).
See the
@@ -48,19 +66,15 @@ on save as they take a while and can be very resource intensive.
Example:
```python
from synapse.types import UserID
...
user_id = UserID(local, server)
```
from synapse.types import UserID
...
user_id = UserID(local, server)
is preferred over:
```python
from synapse import types
...
user_id = types.UserID(local, server)
```
from synapse import types
...
user_id = types.UserID(local, server)
(or any other variant).
@@ -120,32 +134,30 @@ Some guidelines follow:
Example:
```yaml
## Frobnication ##
## Frobnication ##
# The frobnicator will ensure that all requests are fully frobnicated.
# To enable it, uncomment the following.
#
#frobnicator_enabled: true
# The frobnicator will ensure that all requests are fully frobnicated.
# To enable it, uncomment the following.
#
#frobnicator_enabled: true
# By default, the frobnicator will frobnicate with the default frobber.
# The following will make it use an alternative frobber.
#
#frobincator_frobber: special_frobber
# By default, the frobnicator will frobnicate with the default frobber.
# The following will make it use an alternative frobber.
#
#frobincator_frobber: special_frobber
# Settings for the frobber
#
frobber:
# frobbing speed. Defaults to 1.
#
#speed: 10
# Settings for the frobber
#
frobber:
# frobbing speed. Defaults to 1.
#
#speed: 10
# frobbing distance. Defaults to 1000.
#
#distance: 100
```
# frobbing distance. Defaults to 1000.
#
#distance: 100
Note that the sample configuration is generated from the synapse code
and is maintained by a script, `scripts-dev/generate_sample_config.sh`.
and is maintained by a script, `scripts-dev/generate_sample_config`.
Making sure that the output from this script matches the desired format
is left as an exercise for the reader!

View File

@@ -99,7 +99,7 @@ construct URIs where users can give their consent.
see if an unauthenticated user is viewing the page. This is typically
wrapped around the form that would be used to actually agree to the document:
```html
```
{% if not public_version %}
<!-- The variables used here are only provided when the 'u' param is given to the homeserver -->
<form method="post" action="consent">

View File

@@ -1,8 +1,4 @@
# Delegation of incoming federation traffic
In the following documentation, we use the term `server_name` to refer to that setting
in your homeserver configuration file. It appears at the ends of user ids, and tells
other homeservers where they can find your server.
# Delegation
By default, other homeservers will expect to be able to reach yours via
your `server_name`, on port 8448. For example, if you set your `server_name`
@@ -16,21 +12,13 @@ to a different server and/or port (e.g. `synapse.example.com:443`).
## .well-known delegation
To use this method, you need to be able to configure the server at
`https://<server_name>` to serve a file at
`https://<server_name>/.well-known/matrix/server`. There are two ways to do this, shown below.
To use this method, you need to be able to alter the
`server_name` 's https server to serve the `/.well-known/matrix/server`
URL. Having an active server (with a valid TLS certificate) serving your
`server_name` domain is out of the scope of this documentation.
Note that the `.well-known` file is hosted on the default port for `https` (port 443).
### External server
For maximum flexibility, you need to configure an external server such as nginx, Apache
or HAProxy to serve the `https://<server_name>/.well-known/matrix/server` file. Setting
up such a server is out of the scope of this documentation, but note that it is often
possible to configure your [reverse proxy](reverse_proxy.md) for this.
The URL `https://<server_name>/.well-known/matrix/server` should be configured
return a JSON structure containing the key `m.server` like this:
The URL `https://<server_name>/.well-known/matrix/server` should
return a JSON structure containing the key `m.server` like so:
```json
{
@@ -38,9 +26,8 @@ return a JSON structure containing the key `m.server` like this:
}
```
In our example (where we want federation traffic to be routed to
`https://synapse.example.com`, on port 443), this would mean that
`https://example.com/.well-known/matrix/server` should return:
In our example, this would mean that URL `https://example.com/.well-known/matrix/server`
should return:
```json
{
@@ -51,29 +38,16 @@ In our example (where we want federation traffic to be routed to
Note, specifying a port is optional. If no port is specified, then it defaults
to 8448.
### Serving a `.well-known/matrix/server` file with Synapse
If you are able to set up your domain so that `https://<server_name>` is routed to
Synapse (i.e., the only change needed is to direct federation traffic to port 443
instead of port 8448), then it is possible to configure Synapse to serve a suitable
`.well-known/matrix/server` file. To do so, add the following to your `homeserver.yaml`
file:
```yaml
serve_server_wellknown: true
```
**Note**: this *only* works if `https://<server_name>` is routed to Synapse, so is
generally not suitable if Synapse is hosted at a subdomain such as
`https://synapse.example.com`.
With .well-known delegation, federating servers will check for a valid TLS
certificate for the delegated hostname (in our example: `synapse.example.com`).
## SRV DNS record delegation
It is also possible to do delegation using a SRV DNS record. However, that is generally
not recommended, as it can be difficult to configure the TLS certificates correctly in
this case, and it offers little advantage over `.well-known` delegation.
It is also possible to do delegation using a SRV DNS record. However, that is
considered an advanced topic since it's a bit complex to set up, and `.well-known`
delegation is already enough in most cases.
However, if you really need it, you can find some documentation on what such a
However, if you really need it, you can find some documentation on how such a
record should look like and how Synapse will use it in [the Matrix
specification](https://matrix.org/docs/spec/server_server/latest#resolving-server-names).
@@ -94,9 +68,27 @@ wouldn't need any delegation set up.
domain `server_name` points to, you will need to let other servers know how to
find it using delegation.
### Should I use a reverse proxy for federation traffic?
### Do you still recommend against using a reverse proxy on the federation port?
Generally, using a reverse proxy for both the federation and client traffic is a good
idea, since it saves handling TLS traffic in Synapse. See
[the reverse proxy documentation](reverse_proxy.md) for information on setting up a
We no longer actively recommend against using a reverse proxy. Many admins will
find it easier to direct federation traffic to a reverse proxy and manage their
own TLS certificates, and this is a supported configuration.
See [the reverse proxy documentation](reverse_proxy.md) for information on setting up a
reverse proxy.
### Do I still need to give my TLS certificates to Synapse if I am using a reverse proxy?
This is no longer necessary. If you are using a reverse proxy for all of your
TLS traffic, then you can set `no_tls: True` in the Synapse config.
In that case, the only reason Synapse needs the certificate is to populate a legacy
`tls_fingerprints` field in the federation API. This is ignored by Synapse 0.99.0
and later, and the only time pre-0.99 Synapses will check it is when attempting to
fetch the server keys - and generally this is delegated via `matrix.org`, which
is running a modern version of Synapse.
### Do I need the same certificate for the client and federation port?
No. There is nothing stopping you from using different certificates,
particularly if you are using a reverse proxy.

View File

@@ -14,8 +14,8 @@ i.e. when a version reaches End of Life Synapse will withdraw support for that
version in future releases.
Details on the upstream support life cycles for Python and PostgreSQL are
documented at [https://endoflife.date/python](https://endoflife.date/python) and
[https://endoflife.date/postgresql](https://endoflife.date/postgresql).
documented at https://endoflife.date/python and
https://endoflife.date/postgresql.
Context

View File

@@ -8,23 +8,23 @@ easy to run CAS implementation built on top of Django.
1. Create a new virtualenv: `python3 -m venv <your virtualenv>`
2. Activate your virtualenv: `source /path/to/your/virtualenv/bin/activate`
3. Install Django and django-mama-cas:
```sh
```
python -m pip install "django<3" "django-mama-cas==2.4.0"
```
4. Create a Django project in the current directory:
```sh
```
django-admin startproject cas_test .
```
5. Follow the [install directions](https://django-mama-cas.readthedocs.io/en/latest/installation.html#configuring) for django-mama-cas
6. Setup the SQLite database: `python manage.py migrate`
7. Create a user:
```sh
```
python manage.py createsuperuser
```
1. Use whatever you want as the username and password.
2. Leave the other fields blank.
8. Use the built-in Django test server to serve the CAS endpoints on port 8000:
```sh
```
python manage.py runserver
```

View File

@@ -15,14 +15,7 @@ license - in our case, this is almost always Apache Software License v2 (see
# 2. What do I need?
If you are running Windows, the Windows Subsystem for Linux (WSL) is strongly
recommended for development. More information about WSL can be found at
<https://docs.microsoft.com/en-us/windows/wsl/install>. Running Synapse natively
on Windows is not officially supported.
The code of Synapse is written in Python 3. To do pretty much anything, you'll need [a recent version of Python 3](https://www.python.org/downloads/). Your Python also needs support for [virtual environments](https://docs.python.org/3/library/venv.html). This is usually built-in, but some Linux distributions like Debian and Ubuntu split it out into its own package. Running `sudo apt install python3-venv` should be enough.
Synapse can connect to PostgreSQL via the [psycopg2](https://pypi.org/project/psycopg2/) Python library. Building this library from source requires access to PostgreSQL's C header files. On Debian or Ubuntu Linux, these can be installed with `sudo apt install libpq-dev`.
The code of Synapse is written in Python 3. To do pretty much anything, you'll need [a recent version of Python 3](https://wiki.python.org/moin/BeginnersGuide/Download).
The source code of Synapse is hosted on GitHub. You will also need [a recent version of git](https://github.com/git-guides/install-git).
@@ -48,28 +41,24 @@ can find many good git tutorials on the web.
# 4. Install the dependencies
Synapse uses the [poetry](https://python-poetry.org/) project to manage its dependencies
and development environment. Once you have installed Python 3 and added the
source, you should install `poetry`.
Of their installation methods, we recommend
[installing `poetry` using `pipx`](https://python-poetry.org/docs/#installing-with-pipx),
## Under Unix (macOS, Linux, BSD, ...)
```shell
pip install --user pipx
pipx install poetry
```
but see poetry's [installation instructions](https://python-poetry.org/docs/#installation)
for other installation methods.
Next, open a terminal and install dependencies as follows:
Once you have installed Python 3 and added the source, please open a terminal and
setup a *virtualenv*, as follows:
```sh
cd path/where/you/have/cloned/the/repository
poetry install --extras all
python3 -m venv ./env
source ./env/bin/activate
pip install -e ".[all,dev]"
pip install tox
```
This will install the runtime and developer dependencies for the project.
This will install the developer dependencies for the project.
## Under Windows
TBD
# 5. Get in touch.
@@ -126,10 +115,11 @@ The linters look at your code and do two things:
- ensure that your code follows the coding style adopted by the project;
- catch a number of errors in your code.
The linter takes no time at all to run as soon as you've [downloaded the dependencies](#4-install-the-dependencies).
They're pretty fast, don't hesitate!
```sh
poetry run ./scripts-dev/lint.sh
source ./env/bin/activate
./scripts-dev/lint.sh
```
Note that this script *will modify your files* to fix styling errors.
@@ -139,13 +129,15 @@ If you wish to restrict the linters to only the files changed since the last com
(much faster!), you can instead run:
```sh
poetry run ./scripts-dev/lint.sh -d
source ./env/bin/activate
./scripts-dev/lint.sh -d
```
Or if you know exactly which files you wish to lint, you can instead run:
```sh
poetry run ./scripts-dev/lint.sh path/to/file1.py path/to/file2.py path/to/folder
source ./env/bin/activate
./scripts-dev/lint.sh path/to/file1.py path/to/file2.py path/to/folder
```
## Run the unit tests (Twisted trial).
@@ -154,14 +146,16 @@ The unit tests run parts of Synapse, including your changes, to see if anything
was broken. They are slower than the linters but will typically catch more errors.
```sh
poetry run trial tests
source ./env/bin/activate
trial tests
```
If you wish to only run *some* unit tests, you may specify
another module instead of `tests` - or a test class or a method:
```sh
poetry run trial tests.rest.admin.test_room tests.handlers.test_admin.ExfiltrateData.test_invite
source ./env/bin/activate
trial tests.rest.admin.test_room tests.handlers.test_admin.ExfiltrateData.test_invite
```
If your tests fail, you may wish to look at the logs (the default log level is `ERROR`):
@@ -173,30 +167,9 @@ less _trial_temp/test.log
To increase the log level for the tests, set `SYNAPSE_TEST_LOG_LEVEL`:
```sh
SYNAPSE_TEST_LOG_LEVEL=DEBUG poetry run trial tests
SYNAPSE_TEST_LOG_LEVEL=DEBUG trial tests
```
By default, tests will use an in-memory SQLite database for test data. For additional
help with debugging, one can use an on-disk SQLite database file instead, in order to
review database state during and after running tests. This can be done by setting
the `SYNAPSE_TEST_PERSIST_SQLITE_DB` environment variable. Doing so will cause the
database state to be stored in a file named `test.db` under the trial process'
working directory. Typically, this ends up being `_trial_temp/test.db`. For example:
```sh
SYNAPSE_TEST_PERSIST_SQLITE_DB=1 poetry run trial tests
```
The database file can then be inspected with:
```sh
sqlite3 _trial_temp/test.db
```
Note that the database file is cleared at the beginning of each test run. Thus it
will always only contain the data generated by the *last run test*. Though generally
when debugging, one is only running a single test anyway.
### Running tests under PostgreSQL
Invoking `trial` as above will use an in-memory SQLite database. This is great for
@@ -206,39 +179,13 @@ This means that we need to run our unit tests against PostgreSQL too. Our CI doe
this automatically for pull requests and release candidates, but it's sometimes
useful to reproduce this locally.
#### Using Docker
The easiest way to do so is to run Postgres via a docker container. In one
terminal:
```shell
docker run --rm -e POSTGRES_PASSWORD=mysecretpassword -e POSTGRES_USER=postgres -e POSTGRES_DB=postgress -p 5432:5432 postgres:14
```
If you see an error like
```
docker: Error response from daemon: driver failed programming external connectivity on endpoint nice_ride (b57bbe2e251b70015518d00c9981e8cb8346b5c785250341a6c53e3c899875f1): Error starting userland proxy: listen tcp4 0.0.0.0:5432: bind: address already in use.
```
then something is already bound to port 5432. You're probably already running postgres locally.
Once you have a postgres server running, invoke `trial` in a second terminal:
```shell
SYNAPSE_POSTGRES=1 SYNAPSE_POSTGRES_HOST=127.0.0.1 SYNAPSE_POSTGRES_USER=postgres SYNAPSE_POSTGRES_PASSWORD=mysecretpassword poetry run trial tests
````
#### Using an existing Postgres installation
If you have postgres already installed on your system, you can run `trial` with the
To do so, [configure Postgres](../postgres.md) and run `trial` with the
following environment variables matching your configuration:
- `SYNAPSE_POSTGRES` to anything nonempty
- `SYNAPSE_POSTGRES_HOST` (optional if it's the default: UNIX socket)
- `SYNAPSE_POSTGRES_PORT` (optional if it's the default: 5432)
- `SYNAPSE_POSTGRES_USER` (optional if using a UNIX socket)
- `SYNAPSE_POSTGRES_PASSWORD` (optional if using a UNIX socket)
- `SYNAPSE_POSTGRES_HOST`
- `SYNAPSE_POSTGRES_USER`
- `SYNAPSE_POSTGRES_PASSWORD`
For example:
@@ -250,12 +197,26 @@ export SYNAPSE_POSTGRES_PASSWORD=mydevenvpassword
trial
```
You don't need to specify the host, user, port or password if your Postgres
server is set to authenticate you over the UNIX socket (i.e. if the `psql` command
works without further arguments).
#### Prebuilt container
Your Postgres account needs to be able to create databases; see the postgres
docs for [`ALTER ROLE`](https://www.postgresql.org/docs/current/sql-alterrole.html).
Since configuring PostgreSQL can be fiddly, we can make use of a pre-made
Docker container to set up PostgreSQL and run our tests for us. To do so, run
```shell
scripts-dev/test_postgresql.sh
```
Any extra arguments to the script will be passed to `tox` and then to `trial`,
so we can run a specific test in this container with e.g.
```shell
scripts-dev/test_postgresql.sh tests.replication.test_sharded_event_persister.EventPersisterShardTestCase
```
The container creates a folder in your Synapse checkout called
`.tox-pg-container` and uses this as a tox environment. The output of any
`trial` runs goes into `_trial_temp` in your synapse source directory — the same
as running `trial` directly on your host machine.
## Run the integration tests ([Sytest](https://github.com/matrix-org/sytest)).
@@ -270,14 +231,8 @@ configuration:
```sh
$ docker run --rm -it -v /path/where/you/have/cloned/the/repository\:/src:ro -v /path/to/where/you/want/logs\:/logs matrixdotorg/sytest-synapse:buster
```
(Note that the paths must be full paths! You could also write `$(realpath relative/path)` if needed.)
This configuration should generally cover your needs.
- To run with Postgres, supply the `-e POSTGRES=1 -e MULTI_POSTGRES=1` environment flags.
- To run with Synapse in worker mode, supply the `-e WORKERS=1 -e REDIS=1` environment flags (in addition to the Postgres flags).
For more details about other configurations, see the [Docker-specific documentation in the SyTest repo](https://github.com/matrix-org/sytest/blob/develop/docker/README.md).
This configuration should generally cover your needs. For more details about other configurations, see [documentation in the SyTest repo](https://github.com/matrix-org/sytest/blob/develop/docker/README.md).
## Run the integration tests ([Complement](https://github.com/matrix-org/complement)).
@@ -295,13 +250,13 @@ COMPLEMENT_DIR=../complement ./scripts-dev/complement.sh
To run a specific test file, you can pass the test name at the end of the command. The name passed comes from the naming structure in your Complement tests. If you're unsure of the name, you can do a full run and copy it from the test output:
```sh
COMPLEMENT_DIR=../complement ./scripts-dev/complement.sh -run TestImportHistoricalMessages
COMPLEMENT_DIR=../complement ./scripts-dev/complement.sh TestBackfillingHistory
```
To run a specific test, you can specify the whole name structure:
```sh
COMPLEMENT_DIR=../complement ./scripts-dev/complement.sh -run TestImportHistoricalMessages/parallel/Historical_events_resolve_in_the_correct_order
COMPLEMENT_DIR=../complement ./scripts-dev/complement.sh TestBackfillingHistory/parallel/Backfilled_historical_events_resolve_with_proper_state_in_correct_order
```
@@ -422,8 +377,8 @@ same lightweight approach that the Linux Kernel
[submitting patches process](
https://www.kernel.org/doc/html/latest/process/submitting-patches.html#sign-your-work-the-developer-s-certificate-of-origin>),
[Docker](https://github.com/docker/docker/blob/master/CONTRIBUTING.md), and many other
projects use: the DCO ([Developer Certificate of Origin](http://developercertificate.org/)).
This is a simple declaration that you wrote
projects use: the DCO (Developer Certificate of Origin:
http://developercertificate.org/). This is a simple declaration that you wrote
the contribution or otherwise have the right to contribute it to Matrix:
```
@@ -480,17 +435,6 @@ Git allows you to add this signoff automatically when using the `-s`
flag to `git commit`, which uses the name and email set in your
`user.name` and `user.email` git configs.
### Private Sign off
If you would like to provide your legal name privately to the Matrix.org
Foundation (instead of in a public commit or comment), you can do so
by emailing your legal name and a link to the pull request to
[dco@matrix.org](mailto:dco@matrix.org?subject=Private%20sign%20off).
It helps to include "sign off" or similar in the subject line. You will then
be instructed further.
Once private sign off is complete, doing so for future contributions will not
be required.
# 10. Turn feedback into better code.

View File

@@ -89,67 +89,11 @@ To do so, use `scripts-dev/make_full_schema.sh`. This will produce new
Ensure postgres is installed, then run:
```sh
./scripts-dev/make_full_schema.sh -p postgres_username -o output_dir/
```
./scripts-dev/make_full_schema.sh -p postgres_username -o output_dir/
NB at the time of writing, this script predates the split into separate `state`/`main`
databases so will require updates to handle that correctly.
## Delta files
Delta files define the steps required to upgrade the database from an earlier version.
They can be written as either a file containing a series of SQL statements, or a Python
module.
Synapse remembers which delta files it has applied to a database (they are stored in the
`applied_schema_deltas` table) and will not re-apply them (even if a given file is
subsequently updated).
Delta files should be placed in a directory named `synapse/storage/schema/<database>/delta/<version>/`.
They are applied in alphanumeric order, so by convention the first two characters
of the filename should be an integer such as `01`, to put the file in the right order.
### SQL delta files
These should be named `*.sql`, or — for changes which should only be applied for a
given database engine — `*.sql.posgres` or `*.sql.sqlite`. For example, a delta which
adds a new column to the `foo` table might be called `01add_bar_to_foo.sql`.
Note that our SQL parser is a bit simple - it understands comments (`--` and `/*...*/`),
but complex statements which require a `;` in the middle of them (such as `CREATE
TRIGGER`) are beyond it and you'll have to use a Python delta file.
### Python delta files
For more flexibility, a delta file can take the form of a python module. These should
be named `*.py`. Note that database-engine-specific modules are not supported here
instead you can write `if isinstance(database_engine, PostgresEngine)` or similar.
A Python delta module should define either or both of the following functions:
```python
import synapse.config.homeserver
import synapse.storage.engines
import synapse.storage.types
def run_create(
cur: synapse.storage.types.Cursor,
database_engine: synapse.storage.engines.BaseDatabaseEngine,
) -> None:
"""Called whenever an existing or new database is to be upgraded"""
...
def run_upgrade(
cur: synapse.storage.types.Cursor,
database_engine: synapse.storage.engines.BaseDatabaseEngine,
config: synapse.config.homeserver.HomeServerConfig,
) -> None:
"""Called whenever an existing database is to be upgraded."""
...
```
## Boolean columns
Boolean columns require special treatment, since SQLite treats booleans the
@@ -158,9 +102,9 @@ same as integers.
There are three separate aspects to this:
* Any new boolean column must be added to the `BOOLEAN_COLUMNS` list in
`synapse/_scripts/synapse_port_db.py`. This tells the port script to cast
the integer value from SQLite to a boolean before writing the value to the
postgres database.
`scripts/synapse_port_db`. This tells the port script to cast the integer
value from SQLite to a boolean before writing the value to the postgres
database.
* Before SQLite 3.23, `TRUE` and `FALSE` were not recognised as constants by
SQLite, and the `IS [NOT] TRUE`/`IS [NOT] FALSE` operators were not

View File

@@ -1,42 +0,0 @@
# Synapse demo setup
**DO NOT USE THESE DEMO SERVERS IN PRODUCTION**
Requires you to have a [Synapse development environment setup](https://matrix-org.github.io/synapse/develop/development/contributing_guide.html#4-install-the-dependencies).
The demo setup allows running three federation Synapse servers, with server
names `localhost:8480`, `localhost:8481`, and `localhost:8482`.
You can access them via any Matrix client over HTTP at `localhost:8080`,
`localhost:8081`, and `localhost:8082` or over HTTPS at `localhost:8480`,
`localhost:8481`, and `localhost:8482`.
To enable the servers to communicate, self-signed SSL certificates are generated
and the servers are configured in a highly insecure way, including:
* Not checking certificates over federation.
* Not verifying keys.
The servers are configured to store their data under `demo/8080`, `demo/8081`, and
`demo/8082`. This includes configuration, logs, SQLite databases, and media.
Note that when joining a public room on a different homeserver via "#foo:bar.net",
then you are (in the current implementation) joining a room with room_id "foo".
This means that it won't work if your homeserver already has a room with that
name.
## Using the demo scripts
There's three main scripts with straightforward purposes:
* `start.sh` will start the Synapse servers, generating any missing configuration.
* This accepts a single parameter `--no-rate-limit` to "disable" rate limits
(they actually still exist, but are very high).
* `stop.sh` will stop the Synapse servers.
* `clean.sh` will delete the configuration, databases, log files, etc.
To start a completely new set of servers, run:
```sh
./demo/stop.sh; ./demo/clean.sh && ./demo/start.sh
```

View File

@@ -1,239 +0,0 @@
# Managing dependencies with Poetry
This is a quick cheat sheet for developers on how to use [`poetry`](https://python-poetry.org/).
# Background
Synapse uses a variety of third-party Python packages to function as a homeserver.
Some of these are direct dependencies, listed in `pyproject.toml` under the
`[tool.poetry.dependencies]` section. The rest are transitive dependencies (the
things that our direct dependencies themselves depend on, and so on recursively.)
We maintain a locked list of all our dependencies (transitive included) so that
we can track exactly which version of each dependency appears in a given release.
See [here](https://github.com/matrix-org/synapse/issues/11537#issue-1074469665)
for discussion of why we wanted this for Synapse. We chose to use
[`poetry`](https://python-poetry.org/) to manage this locked list; see
[this comment](https://github.com/matrix-org/synapse/issues/11537#issuecomment-1015975819)
for the reasoning.
The locked dependencies get included in our "self-contained" releases: namely,
our docker images and our debian packages. We also use the locked dependencies
in development and our continuous integration.
Separately, our "broad" dependencies—the version ranges specified in
`pyproject.toml`—are included as metadata in our "sdists" and "wheels" [uploaded
to PyPI](https://pypi.org/project/matrix-synapse). Installing from PyPI or from
the Synapse source tree directly will _not_ use the locked dependencies; instead,
they'll pull in the latest version of each package available at install time.
## Example dependency
An example may help. We have a broad dependency on
[`phonenumbers`](https://pypi.org/project/phonenumbers/), as declared in
this snippet from pyproject.toml [as of Synapse 1.57](
https://github.com/matrix-org/synapse/blob/release-v1.57/pyproject.toml#L133
):
```toml
[tool.poetry.dependencies]
# ...
phonenumbers = ">=8.2.0"
```
In our lockfile this is
[pinned]( https://github.com/matrix-org/synapse/blob/dfc7646504cef3e4ff396c36089e1c6f1b1634de/poetry.lock#L679-L685)
to version 8.12.44, even though
[newer versions are available](https://pypi.org/project/phonenumbers/#history).
```toml
[[package]]
name = "phonenumbers"
version = "8.12.44"
description = "Python version of Google's common library for parsing, formatting, storing and validating international phone numbers."
category = "main"
optional = false
python-versions = "*"
```
The lockfile also includes a
[cryptographic checksum](https://github.com/matrix-org/synapse/blob/release-v1.57/poetry.lock#L2178-L2181)
of the sdists and wheels provided for this version of `phonenumbers`.
```toml
[metadata.files]
# ...
phonenumbers = [
{file = "phonenumbers-8.12.44-py2.py3-none-any.whl", hash = "sha256:cc1299cf37b309ecab6214297663ab86cb3d64ae37fd5b88e904fe7983a874a6"},
{file = "phonenumbers-8.12.44.tar.gz", hash = "sha256:26cfd0257d1704fe2f88caff2caabb70d16a877b1e65b6aae51f9fbbe10aa8ce"},
]
```
We can see this pinned version inside the docker image for that release:
```
$ docker pull matrixdotorg/synapse:v1.57.0
...
$ docker run --entrypoint pip matrixdotorg/synapse:v1.57.0 show phonenumbers
Name: phonenumbers
Version: 8.12.44
Summary: Python version of Google's common library for parsing, formatting, storing and validating international phone numbers.
Home-page: https://github.com/daviddrysdale/python-phonenumbers
Author: David Drysdale
Author-email: dmd@lurklurk.org
License: Apache License 2.0
Location: /usr/local/lib/python3.9/site-packages
Requires:
Required-by: matrix-synapse
```
Whereas the wheel metadata just contains the broad dependencies:
```
$ cd /tmp
$ wget https://files.pythonhosted.org/packages/ca/5e/d722d572cc5b3092402b783d6b7185901b444427633bd8a6b00ea0dd41b7/matrix_synapse-1.57.0rc1-py3-none-any.whl
...
$ unzip -c matrix_synapse-1.57.0rc1-py3-none-any.whl matrix_synapse-1.57.0rc1.dist-info/METADATA | grep phonenumbers
Requires-Dist: phonenumbers (>=8.2.0)
```
# Tooling recommendation: direnv
[`direnv`](https://direnv.net/) is a tool for activating environments in your
shell inside a given directory. Its support for poetry is unofficial (a
community wiki recipe only), but works solidly in our experience. We thoroughly
recommend it for daily use. To use it:
1. [Install `direnv`](https://direnv.net/docs/installation.html) - it's likely
packaged for your system already.
2. Teach direnv about poetry. The [shell config here](https://github.com/direnv/direnv/wiki/Python#poetry)
needs to be added to `~/.config/direnv/direnvrc` (or more generally `$XDG_CONFIG_HOME/direnv/direnvrc`).
3. Mark the synapse checkout as a poetry project: `echo layout poetry > .envrc`.
4. Convince yourself that you trust this `.envrc` configuration and project.
Then formally confirm this to `direnv` by running `direnv allow`.
Then whenever you navigate to the synapse checkout, you should be able to run
e.g. `mypy` instead of `poetry run mypy`; `python` instead of
`poetry run python`; and your shell commands will automatically run in the
context of poetry's venv, without having to run `poetry shell` beforehand.
# How do I...
## ...reset my venv to the locked environment?
```shell
poetry install --extras all --remove-untracked
```
## ...run a command in the `poetry` virtualenv?
Use `poetry run cmd args` when you need the python virtualenv context.
To avoid typing `poetry run` all the time, you can run `poetry shell`
to start a new shell in the poetry virtualenv context. Within `poetry shell`,
`python`, `pip`, `mypy`, `trial`, etc. are all run inside the project virtualenv
and isolated from the rest o the system.
Roughly speaking, the translation from a traditional virtualenv is:
- `env/bin/activate` -> `poetry shell`, and
- `deactivate` -> close the terminal (Ctrl-D, `exit`, etc.)
See also the direnv recommendation above, which makes `poetry run` and
`poetry shell` unnecessary.
## ...inspect the `poetry` virtualenv?
Some suggestions:
```shell
# Current env only
poetry env info
# All envs: this allows you to have e.g. a poetry managed venv for Python 3.7,
# and another for Python 3.10.
poetry env list --full-path
poetry run pip list
```
Note that `poetry show` describes the abstract *lock file* rather than your
on-disk environment. With that said, `poetry show --tree` can sometimes be
useful.
## ...add a new dependency?
Either:
- manually update `pyproject.toml`; then `poetry lock --no-update`; or else
- `poetry add packagename`. See `poetry add --help`; note the `--dev`,
`--extras` and `--optional` flags in particular.
- **NB**: this specifies the new package with a version given by a "caret bound". This won't get forced to its lowest version in the old deps CI job: see [this TODO](https://github.com/matrix-org/synapse/blob/4e1374373857f2f7a911a31c50476342d9070681/.ci/scripts/test_old_deps.sh#L35-L39).
Include the updated `pyproject.toml` and `poetry.lock` files in your commit.
## ...remove a dependency?
This is not done often and is untested, but
```shell
poetry remove packagename
```
ought to do the trick. Alternatively, manually update `pyproject.toml` and
`poetry lock --no-update`. Include the updated `pyproject.toml` and poetry.lock`
files in your commit.
## ...update the version range for an existing dependency?
Best done by manually editing `pyproject.toml`, then `poetry lock --no-update`.
Include the updated `pyproject.toml` and `poetry.lock` in your commit.
## ...update a dependency in the locked environment?
Use
```shell
poetry update packagename
```
to use the latest version of `packagename` in the locked environment, without
affecting the broad dependencies listed in the wheel.
There doesn't seem to be a way to do this whilst locking a _specific_ version of
`packagename`. We can workaround this (crudely) as follows:
```shell
poetry add packagename==1.2.3
# This should update pyproject.lock.
# Now undo the changes to pyproject.toml. For example
# git restore pyproject.toml
# Get poetry to recompute the content-hash of pyproject.toml without changing
# the locked package versions.
poetry lock --no-update
```
Either way, include the updated `poetry.lock` file in your commit.
## ...export a `requirements.txt` file?
```shell
poetry export --extras all
```
Be wary of bugs in `poetry export` and `pip install -r requirements.txt`.
Note: `poetry export` will be made a plugin in Poetry 1.2. Additional config may
be required.
## ...build a test wheel?
I usually use
```shell
poetry run pip install build && poetry run python -m build
```
because [`build`](https://github.com/pypa/build) is a standardish tool which
doesn't require poetry. (It's what we use in CI too). However, you could try
`poetry build` too.

View File

@@ -1,37 +0,0 @@
# Synapse Release Cycle
Releases of Synapse follow a two week release cycle with new releases usually
occurring on Tuesdays:
* Day 0: Synapse `N - 1` is released.
* Day 7: Synapse `N` release candidate 1 is released.
* Days 7 - 13: Synapse `N` release candidates 2+ are released, if bugs are found.
* Day 14: Synapse `N` is released.
Note that this schedule might be modified depending on the availability of the
Synapse team, e.g. releases may be skipped to avoid holidays.
Release announcements can be found in the
[release category of the Matrix blog](https://matrix.org/blog/category/releases).
## Bugfix releases
If a bug is found after release that is deemed severe enough (by a combination
of the impacted users and the impact on those users) then a bugfix release may
be issued. This may be at any point in the release cycle.
## Security releases
Security will sometimes be backported to the previous version and released
immediately before the next release candidate. An example of this might be:
* Day 0: Synapse N - 1 is released.
* Day 7: Synapse (N - 1).1 is released as Synapse N - 1 + the security fix.
* Day 7: Synapse N release candidate 1 is released (including the security fix).
Depending on the impact and complexity of security fixes, multiple fixes might
be held to be released together.
In some cases, a pre-disclosure of a security release will be issued as a notice
to Synapse operators that there is an upcoming security release. These can be
found in the [security category of the Matrix blog](https://matrix.org/blog/category/security).

View File

@@ -30,73 +30,39 @@ rather than skipping any that arrived late; whereas if you're looking at a
historical section of timeline (i.e. `/messages`), you want to see the best
representation of the state of the room as others were seeing it at the time.
## Outliers
We mark an event as an `outlier` when we haven't figured out the state for the
room at that point in the DAG yet. They are "floating" events that we haven't
yet correlated to the DAG.
Outliers typically arise when we fetch the auth chain or state for a given
event. When that happens, we just grab the events in the state/auth chain,
without calculating the state at those events, or backfilling their
`prev_events`. Since we don't have the state at any events fetched in that
way, we mark them as outliers.
So, typically, we won't have the `prev_events` of an `outlier` in the database,
(though it's entirely possible that we *might* have them for some other
reason). Other things that make outliers different from regular events:
* We don't have state for them, so there should be no entry in
`event_to_state_groups` for an outlier. (In practice this isn't always
the case, though I'm not sure why: see https://github.com/matrix-org/synapse/issues/12201).
* We don't record entries for them in the `event_edges`,
`event_forward_extremeties` or `event_backward_extremities` tables.
Since outliers are not tied into the DAG, they do not normally form part of the
timeline sent down to clients via `/sync` or `/messages`; however there is an
exception:
### Out-of-band membership events
A special case of outlier events are some membership events for federated rooms
that we aren't full members of. For example:
* invites received over federation, before we join the room
* *rejections* for said invites
* knock events for rooms that we would like to join but have not yet joined.
In all the above cases, we don't have the state for the room, which is why they
are treated as outliers. They are a bit special though, in that they are
proactively sent to clients via `/sync`.
## Forward extremity
Most-recent-in-time events in the DAG which are not referenced by any other
events' `prev_events` yet. (In this definition, outliers, rejected events, and
soft-failed events don't count.)
Most-recent-in-time events in the DAG which are not referenced by any other events' `prev_events` yet.
The forward extremities of a room (or at least, a subset of them, if there are
more than ten) are used as the `prev_events` when the next event is sent.
The forward extremities of a room are used as the `prev_events` when the next event is sent.
The "current state" of a room (ie: the state which would be used if we
generated a new event) is, therefore, the resolution of the room states
at each of the forward extremities.
## Backward extremity
## Backwards extremity
The current marker of where we have backfilled up to and will generally be the
`prev_events` of the oldest-in-time events we have in the DAG. This gives a starting point when
backfilling history.
oldest-in-time events we know of in the DAG.
Note that, unlike forward extremities, we typically don't have any backward
extremity events themselves in the database - or, if we do, they will be "outliers" (see
above). Either way, we don't expect to have the room state at a backward extremity.
This is an event where we haven't fetched all of the `prev_events` for.
Once we have fetched all of its `prev_events`, it's unmarked as a backwards
extremity (although we may have formed new backwards extremities from the prev
events during the backfilling process).
## Outliers
We mark an event as an `outlier` when we haven't figured out the state for the
room at that point in the DAG yet.
We won't *necessarily* have the `prev_events` of an `outlier` in the database,
but it's entirely possible that we *might*. The status of whether we have all of
the `prev_events` is marked as a [backwards extremity](#backwards-extremity).
For example, when we fetch the event auth chain or state for a given event, we
mark all of those claimed auth events as outliers because we haven't done the
state calculation ourself.
When we persist a non-outlier event, if it was previously a backward extremity,
we clear it as a backward extremity and set all of its `prev_events` as the new
backward extremities if they aren't already persisted as non-outliers. This
therefore keeps the backward extremities up-to-date.
## State groups

View File

@@ -15,7 +15,7 @@ To make Synapse (and therefore Element) use it:
sp_config:
allow_unknown_attributes: true # Works around a bug with AVA Hashes: https://github.com/IdentityPython/pysaml2/issues/388
metadata:
local: ["samling.xml"]
local: ["samling.xml"]
```
5. Ensure that your `homeserver.yaml` has a setting for `public_baseurl`:
```yaml

View File

@@ -1,392 +0,0 @@
# Cancellation
Sometimes, requests take a long time to service and clients disconnect
before Synapse produces a response. To avoid wasting resources, Synapse
can cancel request processing for select endpoints marked with the
`@cancellable` decorator.
Synapse makes use of Twisted's `Deferred.cancel()` feature to make
cancellation work. The `@cancellable` decorator does nothing by itself
and merely acts as a flag, signalling to developers and other code alike
that a method can be cancelled.
## Enabling cancellation for an endpoint
1. Check that the endpoint method, and any `async` functions in its call
tree handle cancellation correctly. See
[Handling cancellation correctly](#handling-cancellation-correctly)
for a list of things to look out for.
2. Add the `@cancellable` decorator to the `on_GET/POST/PUT/DELETE`
method. It's not recommended to make non-`GET` methods cancellable,
since cancellation midway through some database updates is less
likely to be handled correctly.
## Mechanics
There are two stages to cancellation: downward propagation of a
`cancel()` call, followed by upwards propagation of a `CancelledError`
out of a blocked `await`.
Both Twisted and asyncio have a cancellation mechanism.
| | Method | Exception | Exception inherits from |
|---------------|---------------------|-----------------------------------------|-------------------------|
| Twisted | `Deferred.cancel()` | `twisted.internet.defer.CancelledError` | `Exception` (!) |
| asyncio | `Task.cancel()` | `asyncio.CancelledError` | `BaseException` |
### Deferred.cancel()
When Synapse starts handling a request, it runs the async method
responsible for handling it using `defer.ensureDeferred`, which returns
a `Deferred`. For example:
```python
def do_something() -> Deferred[None]:
...
@cancellable
async def on_GET() -> Tuple[int, JsonDict]:
d = make_deferred_yieldable(do_something())
await d
return 200, {}
request = defer.ensureDeferred(on_GET())
```
When a client disconnects early, Synapse checks for the presence of the
`@cancellable` decorator on `on_GET`. Since `on_GET` is cancellable,
`Deferred.cancel()` is called on the `Deferred` from
`defer.ensureDeferred`, ie. `request`. Twisted knows which `Deferred`
`request` is waiting on and passes the `cancel()` call on to `d`.
The `Deferred` being waited on, `d`, may have its own handling for
`cancel()` and pass the call on to other `Deferred`s.
Eventually, a `Deferred` handles the `cancel()` call by resolving itself
with a `CancelledError`.
### CancelledError
The `CancelledError` gets raised out of the `await` and bubbles up, as
per normal Python exception handling.
## Handling cancellation correctly
In general, when writing code that might be subject to cancellation, two
things must be considered:
* The effect of `CancelledError`s raised out of `await`s.
* The effect of `Deferred`s being `cancel()`ed.
Examples of code that handles cancellation incorrectly include:
* `try-except` blocks which swallow `CancelledError`s.
* Code that shares the same `Deferred`, which may be cancelled, between
multiple requests.
* Code that starts some processing that's exempt from cancellation, but
uses a logging context from cancellable code. The logging context
will be finished upon cancellation, while the uncancelled processing
is still using it.
Some common patterns are listed below in more detail.
### `async` function calls
Most functions in Synapse are relatively straightforward from a
cancellation standpoint: they don't do anything with `Deferred`s and
purely call and `await` other `async` functions.
An `async` function handles cancellation correctly if its own code
handles cancellation correctly and all the async function it calls
handle cancellation correctly. For example:
```python
async def do_two_things() -> None:
check_something()
await do_something()
await do_something_else()
```
`do_two_things` handles cancellation correctly if `do_something` and
`do_something_else` handle cancellation correctly.
That is, when checking whether a function handles cancellation
correctly, its implementation and all its `async` function calls need to
be checked, recursively.
As `check_something` is not `async`, it does not need to be checked.
### CancelledErrors
Because Twisted's `CancelledError`s are `Exception`s, it's easy to
accidentally catch and suppress them. Care must be taken to ensure that
`CancelledError`s are allowed to propagate upwards.
<table width="100%">
<tr>
<td width="50%" valign="top">
**Bad**:
```python
try:
await do_something()
except Exception:
# `CancelledError` gets swallowed here.
logger.info(...)
```
</td>
<td width="50%" valign="top">
**Good**:
```python
try:
await do_something()
except CancelledError:
raise
except Exception:
logger.info(...)
```
</td>
</tr>
<tr>
<td width="50%" valign="top">
**OK**:
```python
try:
check_something()
# A `CancelledError` won't ever be raised here.
except Exception:
logger.info(...)
```
</td>
<td width="50%" valign="top">
**Good**:
```python
try:
await do_something()
except ValueError:
logger.info(...)
```
</td>
</tr>
</table>
#### defer.gatherResults
`defer.gatherResults` produces a `Deferred` which:
* broadcasts `cancel()` calls to every `Deferred` being waited on.
* wraps the first exception it sees in a `FirstError`.
Together, this means that `CancelledError`s will be wrapped in
a `FirstError` unless unwrapped. Such `FirstError`s are liable to be
swallowed, so they must be unwrapped.
<table width="100%">
<tr>
<td width="50%" valign="top">
**Bad**:
```python
async def do_something() -> None:
await make_deferred_yieldable(
defer.gatherResults([...], consumeErrors=True)
)
try:
await do_something()
except CancelledError:
raise
except Exception:
# `FirstError(CancelledError)` gets swallowed here.
logger.info(...)
```
</td>
<td width="50%" valign="top">
**Good**:
```python
async def do_something() -> None:
await make_deferred_yieldable(
defer.gatherResults([...], consumeErrors=True)
).addErrback(unwrapFirstError)
try:
await do_something()
except CancelledError:
raise
except Exception:
logger.info(...)
```
</td>
</tr>
</table>
### Creation of `Deferred`s
If a function creates a `Deferred`, the effect of cancelling it must be considered. `Deferred`s that get shared are likely to have unintended behaviour when cancelled.
<table width="100%">
<tr>
<td width="50%" valign="top">
**Bad**:
```python
cache: Dict[str, Deferred[None]] = {}
def wait_for_room(room_id: str) -> Deferred[None]:
deferred = cache.get(room_id)
if deferred is None:
deferred = Deferred()
cache[room_id] = deferred
# `deferred` can have multiple waiters.
# All of them will observe a `CancelledError`
# if any one of them is cancelled.
return make_deferred_yieldable(deferred)
# Request 1
await wait_for_room("!aAAaaAaaaAAAaAaAA:matrix.org")
# Request 2
await wait_for_room("!aAAaaAaaaAAAaAaAA:matrix.org")
```
</td>
<td width="50%" valign="top">
**Good**:
```python
cache: Dict[str, Deferred[None]] = {}
def wait_for_room(room_id: str) -> Deferred[None]:
deferred = cache.get(room_id)
if deferred is None:
deferred = Deferred()
cache[room_id] = deferred
# `deferred` will never be cancelled now.
# A `CancelledError` will still come out of
# the `await`.
# `delay_cancellation` may also be used.
return make_deferred_yieldable(stop_cancellation(deferred))
# Request 1
await wait_for_room("!aAAaaAaaaAAAaAaAA:matrix.org")
# Request 2
await wait_for_room("!aAAaaAaaaAAAaAaAA:matrix.org")
```
</td>
</tr>
<tr>
<td width="50%" valign="top">
</td>
<td width="50%" valign="top">
**Good**:
```python
cache: Dict[str, List[Deferred[None]]] = {}
def wait_for_room(room_id: str) -> Deferred[None]:
if room_id not in cache:
cache[room_id] = []
# Each request gets its own `Deferred` to wait on.
deferred = Deferred()
cache[room_id]].append(deferred)
return make_deferred_yieldable(deferred)
# Request 1
await wait_for_room("!aAAaaAaaaAAAaAaAA:matrix.org")
# Request 2
await wait_for_room("!aAAaaAaaaAAAaAaAA:matrix.org")
```
</td>
</table>
### Uncancelled processing
Some `async` functions may kick off some `async` processing which is
intentionally protected from cancellation, by `stop_cancellation` or
other means. If the `async` processing inherits the logcontext of the
request which initiated it, care must be taken to ensure that the
logcontext is not finished before the `async` processing completes.
<table width="100%">
<tr>
<td width="50%" valign="top">
**Bad**:
```python
cache: Optional[ObservableDeferred[None]] = None
async def do_something_else(
to_resolve: Deferred[None]
) -> None:
await ...
logger.info("done!")
to_resolve.callback(None)
async def do_something() -> None:
if not cache:
to_resolve = Deferred()
cache = ObservableDeferred(to_resolve)
# `do_something_else` will never be cancelled and
# can outlive the `request-1` logging context.
run_in_background(do_something_else, to_resolve)
await make_deferred_yieldable(cache.observe())
with LoggingContext("request-1"):
await do_something()
```
</td>
<td width="50%" valign="top">
**Good**:
```python
cache: Optional[ObservableDeferred[None]] = None
async def do_something_else(
to_resolve: Deferred[None]
) -> None:
await ...
logger.info("done!")
to_resolve.callback(None)
async def do_something() -> None:
if not cache:
to_resolve = Deferred()
cache = ObservableDeferred(to_resolve)
run_in_background(do_something_else, to_resolve)
# We'll wait until `do_something_else` is
# done before raising a `CancelledError`.
await make_deferred_yieldable(
delay_cancellation(cache.observe())
)
else:
await make_deferred_yieldable(cache.observe())
with LoggingContext("request-1"):
await do_something()
```
</td>
</tr>
<tr>
<td width="50%">
**OK**:
```python
cache: Optional[ObservableDeferred[None]] = None
async def do_something_else(
to_resolve: Deferred[None]
) -> None:
await ...
logger.info("done!")
to_resolve.callback(None)
async def do_something() -> None:
if not cache:
to_resolve = Deferred()
cache = ObservableDeferred(to_resolve)
# `do_something_else` will get its own independent
# logging context. `request-1` will not count any
# metrics from `do_something_else`.
run_as_background_process(
"do_something_else",
do_something_else,
to_resolve,
)
await make_deferred_yieldable(cache.observe())
with LoggingContext("request-1"):
await do_something()
```
</td>
<td width="50%">
</td>
</tr>
</table>

Some files were not shown because too many files have changed in this diff Show More