1
0

Compare commits

..

4 Commits

Author SHA1 Message Date
David Robertson
4d343db081 Get rid of my home dir, whoops 2021-11-16 16:37:43 +00:00
David Robertson
a1367dcf8c Require networkx 2021-11-16 16:34:33 +00:00
David Robertson
9e361c8550 Changelog 2021-11-16 13:52:59 +00:00
David Robertson
51fec1a534 Commit hacky script to visualise store inheritance
Use e.g. with `scripts-dev/storage_inheritance.py DataStore --show`.
2021-11-16 13:51:50 +00:00
739 changed files with 28268 additions and 54641 deletions

View File

@@ -2,24 +2,29 @@
# Test for the export-data admin command against sqlite and postgres # Test for the export-data admin command against sqlite and postgres
# Expects Synapse to have been already installed with `poetry install --extras postgres`.
# Expects `poetry` to be available on the `PATH`.
set -xe set -xe
cd "$(dirname "$0")/../.." cd "$(dirname "$0")/../.."
echo "--- Install dependencies"
# Install dependencies for this test.
pip install psycopg2
# Install Synapse itself. This won't update any libraries.
pip install -e .
echo "--- Generate the signing key" echo "--- Generate the signing key"
# Generate the server's signing key. # Generate the server's signing key.
poetry run synapse_homeserver --generate-keys -c .ci/sqlite-config.yaml python -m synapse.app.homeserver --generate-keys -c .ci/sqlite-config.yaml
echo "--- Prepare test database" echo "--- Prepare test database"
# Make sure the SQLite3 database is using the latest schema and has no pending background update. # Make sure the SQLite3 database is using the latest schema and has no pending background update.
poetry run update_synapse_database --database-config .ci/sqlite-config.yaml --run-background-updates scripts/update_synapse_database --database-config .ci/sqlite-config.yaml --run-background-updates
# Run the export-data command on the sqlite test database # Run the export-data command on the sqlite test database
poetry run python -m synapse.app.admin_cmd -c .ci/sqlite-config.yaml export-data @anon-20191002_181700-832:localhost:8800 \ python -m synapse.app.admin_cmd -c .ci/sqlite-config.yaml export-data @anon-20191002_181700-832:localhost:8800 \
--output-directory /tmp/export_data --output-directory /tmp/export_data
# Test that the output directory exists and contains the rooms directory # Test that the output directory exists and contains the rooms directory
@@ -32,14 +37,14 @@ else
fi fi
# Create the PostgreSQL database. # Create the PostgreSQL database.
poetry run .ci/scripts/postgres_exec.py "CREATE DATABASE synapse" .ci/scripts/postgres_exec.py "CREATE DATABASE synapse"
# Port the SQLite databse to postgres so we can check command works against postgres # Port the SQLite databse to postgres so we can check command works against postgres
echo "+++ Port SQLite3 databse to postgres" echo "+++ Port SQLite3 databse to postgres"
poetry run synapse_port_db --sqlite-database .ci/test_db.db --postgres-config .ci/postgres-config.yaml scripts/synapse_port_db --sqlite-database .ci/test_db.db --postgres-config .ci/postgres-config.yaml
# Run the export-data command on postgres database # Run the export-data command on postgres database
poetry run python -m synapse.app.admin_cmd -c .ci/postgres-config.yaml export-data @anon-20191002_181700-832:localhost:8800 \ python -m synapse.app.admin_cmd -c .ci/postgres-config.yaml export-data @anon-20191002_181700-832:localhost:8800 \
--output-directory /tmp/export_data2 --output-directory /tmp/export_data2
# Test that the output directory exists and contains the rooms directory # Test that the output directory exists and contains the rooms directory

View File

@@ -1,81 +1,16 @@
#!/usr/bin/env bash #!/usr/bin/env bash
# this script is run by GitHub Actions in a plain `focal` container; it
# - installs the minimal system requirements, and poetry;
# - patches the project definition file to refer to old versions only;
# - creates a venv with these old versions using poetry; and finally
# - invokes `trial` to run the tests with old deps.
# Prevent tzdata from asking for user input # this script is run by GitHub Actions in a plain `bionic` container; it installs the
export DEBIAN_FRONTEND=noninteractive # minimal requirements for tox and hands over to the py3-old tox environment.
set -ex set -ex
apt-get update apt-get update
apt-get install -y \ apt-get install -y python3 python3-dev python3-pip libxml2-dev libxslt-dev xmlsec1 zlib1g-dev tox
python3 python3-dev python3-pip python3-venv pipx \
libxml2-dev libxslt-dev xmlsec1 zlib1g-dev libjpeg-dev libwebp-dev
export LANG="C.UTF-8" export LANG="C.UTF-8"
# Prevent virtualenv from auto-updating pip to an incompatible version # Prevent virtualenv from auto-updating pip to an incompatible version
export VIRTUALENV_NO_DOWNLOAD=1 export VIRTUALENV_NO_DOWNLOAD=1
# TODO: in the future, we could use an implementation of exec tox -e py3-old,combine
# https://github.com/python-poetry/poetry/issues/3527
# https://github.com/pypa/pip/issues/8085
# to select the lowest possible versions, rather than resorting to this sed script.
# Patch the project definitions in-place:
# - Replace all lower and tilde bounds with exact bounds
# - Make the pyopenssl 17.0, which is the oldest version that works with
# a `cryptography` compiled against OpenSSL 1.1.
# - Delete all lines referring to psycopg2 --- so no testing of postgres support.
# - Omit systemd: we're not logging to journal here.
# TODO: also replace caret bounds, see https://python-poetry.org/docs/dependency-specification/#version-constraints
# We don't use these yet, but IIRC they are the default bound used when you `poetry add`.
# The sed expression 's/\^/==/g' ought to do the trick. But it would also change
# `python = "^3.7"` to `python = "==3.7", which would mean we fail because olddeps
# runs on 3.8 (#12343).
sed -i \
-e "s/[~>]=/==/g" \
-e "/psycopg2/d" \
-e 's/pyOpenSSL = "==16.0.0"/pyOpenSSL = "==17.0.0"/' \
-e '/systemd/d' \
pyproject.toml
# Use poetry to do the installation. This ensures that the versions are all mutually
# compatible (as far the package metadata declares, anyway); pip's package resolver
# is more lax.
#
# Rather than `poetry install --no-dev`, we drop all dev dependencies from the
# toml file. This means we don't have to ensure compatibility between old deps and
# dev tools.
pip install --user toml
REMOVE_DEV_DEPENDENCIES="
import toml
with open('pyproject.toml', 'r') as f:
data = toml.loads(f.read())
del data['tool']['poetry']['dev-dependencies']
with open('pyproject.toml', 'w') as f:
toml.dump(data, f)
"
python3 -c "$REMOVE_DEV_DEPENDENCIES"
pipx install poetry==1.1.12
~/.local/bin/poetry lock
echo "::group::Patched pyproject.toml"
cat pyproject.toml
echo "::endgroup::"
echo "::group::Lockfile after patch"
cat poetry.lock
echo "::endgroup::"
~/.local/bin/poetry install -E "all test"
~/.local/bin/poetry run trial --jobs=2 tests

View File

@@ -1,37 +1,41 @@
#!/usr/bin/env bash #!/usr/bin/env bash
# #
# Test script for 'synapse_port_db'. # Test script for 'synapse_port_db'.
# - configures synapse and a postgres server. # - sets up synapse and deps
# - runs the port script on a prepopulated test sqlite db # - runs the port script on a prepopulated test sqlite db
# - also runs it against an new sqlite db # - also runs it against an new sqlite db
#
# Expects Synapse to have been already installed with `poetry install --extras postgres`.
# Expects `poetry` to be available on the `PATH`.
set -xe set -xe
cd "$(dirname "$0")/../.." cd "$(dirname "$0")/../.."
echo "--- Install dependencies"
# Install dependencies for this test.
pip install psycopg2 coverage coverage-enable-subprocess
# Install Synapse itself. This won't update any libraries.
pip install -e .
echo "--- Generate the signing key" echo "--- Generate the signing key"
# Generate the server's signing key. # Generate the server's signing key.
poetry run synapse_homeserver --generate-keys -c .ci/sqlite-config.yaml python -m synapse.app.homeserver --generate-keys -c .ci/sqlite-config.yaml
echo "--- Prepare test database" echo "--- Prepare test database"
# Make sure the SQLite3 database is using the latest schema and has no pending background update. # Make sure the SQLite3 database is using the latest schema and has no pending background update.
poetry run update_synapse_database --database-config .ci/sqlite-config.yaml --run-background-updates scripts/update_synapse_database --database-config .ci/sqlite-config.yaml --run-background-updates
# Create the PostgreSQL database. # Create the PostgreSQL database.
poetry run .ci/scripts/postgres_exec.py "CREATE DATABASE synapse" .ci/scripts/postgres_exec.py "CREATE DATABASE synapse"
echo "+++ Run synapse_port_db against test database" echo "+++ Run synapse_port_db against test database"
# TODO: this invocation of synapse_port_db (and others below) used to be prepended with `coverage run`, coverage run scripts/synapse_port_db --sqlite-database .ci/test_db.db --postgres-config .ci/postgres-config.yaml
# but coverage seems unable to find the entrypoints installed by `pip install -e .`.
poetry run synapse_port_db --sqlite-database .ci/test_db.db --postgres-config .ci/postgres-config.yaml
# We should be able to run twice against the same database. # We should be able to run twice against the same database.
echo "+++ Run synapse_port_db a second time" echo "+++ Run synapse_port_db a second time"
poetry run synapse_port_db --sqlite-database .ci/test_db.db --postgres-config .ci/postgres-config.yaml coverage run scripts/synapse_port_db --sqlite-database .ci/test_db.db --postgres-config .ci/postgres-config.yaml
##### #####
@@ -42,12 +46,12 @@ echo "--- Prepare empty SQLite database"
# we do this by deleting the sqlite db, and then doing the same again. # we do this by deleting the sqlite db, and then doing the same again.
rm .ci/test_db.db rm .ci/test_db.db
poetry run update_synapse_database --database-config .ci/sqlite-config.yaml --run-background-updates scripts/update_synapse_database --database-config .ci/sqlite-config.yaml --run-background-updates
# re-create the PostgreSQL database. # re-create the PostgreSQL database.
poetry run .ci/scripts/postgres_exec.py \ .ci/scripts/postgres_exec.py \
"DROP DATABASE synapse" \ "DROP DATABASE synapse" \
"CREATE DATABASE synapse" "CREATE DATABASE synapse"
echo "+++ Run synapse_port_db against empty database" echo "+++ Run synapse_port_db against empty database"
poetry run synapse_port_db --sqlite-database .ci/test_db.db --postgres-config .ci/postgres-config.yaml coverage run scripts/synapse_port_db --sqlite-database .ci/test_db.db --postgres-config .ci/postgres-config.yaml

View File

@@ -3,13 +3,11 @@
# things to include # things to include
!docker !docker
!scripts
!synapse !synapse
!README.rst
!pyproject.toml
!poetry.lock
# TODO: remove these once we have moved over to using poetry-core in pyproject.toml
!MANIFEST.in !MANIFEST.in
!README.rst
!setup.py !setup.py
!synctl
**/__pycache__ **/__pycache__

11
.flake8
View File

@@ -1,11 +0,0 @@
# TODO: incorporate this into pyproject.toml if flake8 supports it in the future.
# See https://github.com/PyCQA/flake8/issues/234
[flake8]
# see https://pycodestyle.readthedocs.io/en/latest/intro.html#error-codes
# for error codes. The ones we ignore are:
# W503: line break before binary operator
# W504: line break after binary operator
# E203: whitespace before ':' (which is contrary to pep8?)
# E731: do not assign a lambda expression, use a def
# E501: Line too long (black enforces this for us)
ignore=W503,W504,E203,E731,E501

View File

@@ -8,7 +8,6 @@
- Use markdown where necessary, mostly for `code blocks`. - Use markdown where necessary, mostly for `code blocks`.
- End with either a period (.) or an exclamation mark (!). - End with either a period (.) or an exclamation mark (!).
- Start with a capital letter. - Start with a capital letter.
- Feel free to credit yourself, by adding a sentence "Contributed by @github_username." or "Contributed by [Your Name]." to the end of the entry.
* [ ] Pull request includes a [sign off](https://matrix-org.github.io/synapse/latest/development/contributing_guide.html#sign-off) * [ ] Pull request includes a [sign off](https://matrix-org.github.io/synapse/latest/development/contributing_guide.html#sign-off)
* [ ] [Code style](https://matrix-org.github.io/synapse/latest/code_style.html) is correct * [ ] [Code style](https://matrix-org.github.io/synapse/latest/code_style.html) is correct
(run the [linters](https://matrix-org.github.io/synapse/latest/development/contributing_guide.html#run-the-linters)) (run the [linters](https://matrix-org.github.io/synapse/latest/development/contributing_guide.html#run-the-linters))

View File

@@ -5,7 +5,7 @@ name: Build docker images
on: on:
push: push:
tags: ["v*"] tags: ["v*"]
branches: [ master, main, develop ] branches: [ master, main ]
workflow_dispatch: workflow_dispatch:
permissions: permissions:
@@ -34,15 +34,10 @@ jobs:
username: ${{ secrets.DOCKERHUB_USERNAME }} username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }} password: ${{ secrets.DOCKERHUB_TOKEN }}
# TODO: consider using https://github.com/docker/metadata-action instead of this
# custom magic
- name: Calculate docker image tag - name: Calculate docker image tag
id: set-tag id: set-tag
run: | run: |
case "${GITHUB_REF}" in case "${GITHUB_REF}" in
refs/heads/develop)
tag=develop
;;
refs/heads/master|refs/heads/main) refs/heads/master|refs/heads/main)
tag=latest tag=latest
;; ;;
@@ -55,6 +50,18 @@ jobs:
esac esac
echo "::set-output name=tag::$tag" echo "::set-output name=tag::$tag"
# for release builds, we want to get the amd64 image out asap, so first
# we do an amd64-only build, before following up with a multiarch build.
- name: Build and push amd64
uses: docker/build-push-action@v2
if: "${{ startsWith(github.ref, 'refs/tags/v') }}"
with:
push: true
labels: "gitsha1=${{ github.sha }}"
tags: "matrixdotorg/synapse:${{ steps.set-tag.outputs.tag }}"
file: "docker/Dockerfile"
platforms: linux/amd64
- name: Build and push all platforms - name: Build and push all platforms
uses: docker/build-push-action@v2 uses: docker/build-push-action@v2
with: with:

View File

@@ -22,7 +22,7 @@ jobs:
- name: Setup mdbook - name: Setup mdbook
uses: peaceiris/actions-mdbook@4b5ef36b314c2599664ca107bb8c02412548d79d # v1.1.14 uses: peaceiris/actions-mdbook@4b5ef36b314c2599664ca107bb8c02412548d79d # v1.1.14
with: with:
mdbook-version: '0.4.17' mdbook-version: '0.4.9'
- name: Build the documentation - name: Build the documentation
# mdbook will only create an index.html if we're including docs/README.md in SUMMARY.md. # mdbook will only create an index.html if we're including docs/README.md in SUMMARY.md.

View File

@@ -7,7 +7,7 @@ on:
# of things breaking (but only build one set of debs) # of things breaking (but only build one set of debs)
pull_request: pull_request:
push: push:
branches: ["develop", "release-*"] branches: ["develop"]
# we do the full build on tags. # we do the full build on tags.
tags: ["v*"] tags: ["v*"]
@@ -31,7 +31,7 @@ jobs:
# if we're running from a tag, get the full list of distros; otherwise just use debian:sid # if we're running from a tag, get the full list of distros; otherwise just use debian:sid
dists='["debian:sid"]' dists='["debian:sid"]'
if [[ $GITHUB_REF == refs/tags/* ]]; then if [[ $GITHUB_REF == refs/tags/* ]]; then
dists=$(scripts-dev/build_debian_packages.py --show-dists-json) dists=$(scripts-dev/build_debian_packages --show-dists-json)
fi fi
echo "::set-output name=distros::$dists" echo "::set-output name=distros::$dists"
# map the step outputs to job outputs # map the step outputs to job outputs
@@ -74,7 +74,7 @@ jobs:
# see https://github.com/docker/build-push-action/issues/252 # see https://github.com/docker/build-push-action/issues/252
# for the cache magic here # for the cache magic here
run: | run: |
./src/scripts-dev/build_debian_packages.py \ ./src/scripts-dev/build_debian_packages \
--docker-build-arg=--cache-from=type=local,src=/tmp/.buildx-cache \ --docker-build-arg=--cache-from=type=local,src=/tmp/.buildx-cache \
--docker-build-arg=--cache-to=type=local,mode=max,dest=/tmp/.buildx-cache-new \ --docker-build-arg=--cache-to=type=local,mode=max,dest=/tmp/.buildx-cache-new \
--docker-build-arg=--progress=plain \ --docker-build-arg=--progress=plain \
@@ -91,7 +91,17 @@ jobs:
build-sdist: build-sdist:
name: "Build pypi distribution files" name: "Build pypi distribution files"
uses: "matrix-org/backend-meta/.github/workflows/packaging.yml@v1" runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: actions/setup-python@v2
- run: pip install wheel
- run: |
python setup.py sdist bdist_wheel
- uses: actions/upload-artifact@v2
with:
name: python-dist
path: dist/*
# if it's a tag, create a release and attach the artifacts to it # if it's a tag, create a release and attach the artifacts to it
attach-assets: attach-assets:
@@ -112,8 +122,7 @@ jobs:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with: with:
files: | files: |
Sdist/* python-dist/*
Wheel/*
debs.tar.xz debs.tar.xz
# if it's not already published, keep the release as a draft. # if it's not already published, keep the release as a draft.
draft: true draft: true

View File

@@ -10,23 +10,16 @@ concurrency:
cancel-in-progress: true cancel-in-progress: true
jobs: jobs:
check-sampleconfig:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: actions/setup-python@v2
- run: pip install -e .
- run: scripts-dev/generate_sample_config.sh --check
- run: scripts-dev/config-lint.sh
lint: lint:
runs-on: ubuntu-latest runs-on: ubuntu-latest
strategy: strategy:
matrix: matrix:
toxenv: toxenv:
- "check-sampleconfig"
- "check_codestyle" - "check_codestyle"
- "check_isort" - "check_isort"
- "mypy" - "mypy"
- "packaging"
steps: steps:
- uses: actions/checkout@v2 - uses: actions/checkout@v2
@@ -50,15 +43,29 @@ jobs:
ref: ${{ github.event.pull_request.head.sha }} ref: ${{ github.event.pull_request.head.sha }}
fetch-depth: 0 fetch-depth: 0
- uses: actions/setup-python@v2 - uses: actions/setup-python@v2
- run: "pip install 'towncrier>=18.6.0rc1'" - run: pip install tox
- run: scripts-dev/check-newsfragment.sh - run: scripts-dev/check-newsfragment
env: env:
PULL_REQUEST_NUMBER: ${{ github.event.number }} PULL_REQUEST_NUMBER: ${{ github.event.number }}
lint-sdist:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: actions/setup-python@v2
with:
python-version: "3.x"
- run: pip install wheel
- run: python setup.py sdist bdist_wheel
- uses: actions/upload-artifact@v2
with:
name: Python Distributions
path: dist/*
# Dummy step to gate other tests on without repeating the whole list # Dummy step to gate other tests on without repeating the whole list
linting-done: linting-done:
if: ${{ !cancelled() }} # Run this even if prior jobs were skipped if: ${{ !cancelled() }} # Run this even if prior jobs were skipped
needs: [lint, lint-crlf, lint-newsfile, check-sampleconfig] needs: [lint, lint-crlf, lint-newsfile, lint-sdist]
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- run: "true" - run: "true"
@@ -69,7 +76,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
strategy: strategy:
matrix: matrix:
python-version: ["3.7", "3.8", "3.9", "3.10"] python-version: ["3.6", "3.7", "3.8", "3.9", "3.10"]
database: ["sqlite"] database: ["sqlite"]
toxenv: ["py"] toxenv: ["py"]
include: include:
@@ -78,9 +85,9 @@ jobs:
toxenv: "py-noextras" toxenv: "py-noextras"
# Oldest Python with PostgreSQL # Oldest Python with PostgreSQL
- python-version: "3.7" - python-version: "3.6"
database: "postgres" database: "postgres"
postgres-version: "10" postgres-version: "9.6"
toxenv: "py" toxenv: "py"
# Newest Python with newest PostgreSQL # Newest Python with newest PostgreSQL
@@ -128,19 +135,18 @@ jobs:
|| true || true
trial-olddeps: trial-olddeps:
# Note: sqlite only; no postgres
if: ${{ !cancelled() && !failure() }} # Allow previous steps to be skipped, but not fail if: ${{ !cancelled() && !failure() }} # Allow previous steps to be skipped, but not fail
needs: linting-done needs: linting-done
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@v2 - uses: actions/checkout@v2
- name: Test with old deps - name: Test with old deps
uses: docker://ubuntu:focal # For old python and sqlite uses: docker://ubuntu:bionic # For old python and sqlite
# Note: focal seems to be using 3.8, but the oldest is 3.7?
# See https://github.com/matrix-org/synapse/issues/12343
with: with:
workdir: /github/workspace workdir: /github/workspace
entrypoint: .ci/scripts/test_old_deps.sh entrypoint: .ci/scripts/test_old_deps.sh
env:
TRIAL_FLAGS: "--jobs=2"
- name: Dump logs - name: Dump logs
# Logs are most useful when the command fails, always include them. # Logs are most useful when the command fails, always include them.
if: ${{ always() }} if: ${{ always() }}
@@ -161,7 +167,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
strategy: strategy:
matrix: matrix:
python-version: ["pypy-3.7"] python-version: ["pypy-3.6"]
steps: steps:
- uses: actions/checkout@v2 - uses: actions/checkout@v2
@@ -207,15 +213,15 @@ jobs:
fail-fast: false fail-fast: false
matrix: matrix:
include: include:
- sytest-tag: focal - sytest-tag: bionic
- sytest-tag: focal - sytest-tag: bionic
postgres: postgres postgres: postgres
- sytest-tag: testing - sytest-tag: testing
postgres: postgres postgres: postgres
- sytest-tag: focal - sytest-tag: bionic
postgres: multi-postgres postgres: multi-postgres
workers: workers workers: workers
@@ -271,10 +277,9 @@ jobs:
steps: steps:
- uses: actions/checkout@v2 - uses: actions/checkout@v2
- run: sudo apt-get -qq install xmlsec1 - run: sudo apt-get -qq install xmlsec1
- uses: matrix-org/setup-python-poetry@v1 - uses: actions/setup-python@v2
with: with:
python-version: ${{ matrix.python-version }} python-version: "3.9"
extras: "postgres"
- run: .ci/scripts/test_export_data_command.sh - run: .ci/scripts/test_export_data_command.sh
portdb: portdb:
@@ -286,8 +291,8 @@ jobs:
strategy: strategy:
matrix: matrix:
include: include:
- python-version: "3.7" - python-version: "3.6"
postgres-version: "10" postgres-version: "9.6"
- python-version: "3.10" - python-version: "3.10"
postgres-version: "14" postgres-version: "14"
@@ -309,39 +314,33 @@ jobs:
steps: steps:
- uses: actions/checkout@v2 - uses: actions/checkout@v2
- run: sudo apt-get -qq install xmlsec1 - run: sudo apt-get -qq install xmlsec1
- uses: matrix-org/setup-python-poetry@v1 - uses: actions/setup-python@v2
with: with:
python-version: ${{ matrix.python-version }} python-version: ${{ matrix.python-version }}
extras: "postgres"
- run: .ci/scripts/test_synapse_port_db.sh - run: .ci/scripts/test_synapse_port_db.sh
complement: complement:
if: ${{ !failure() && !cancelled() }} if: ${{ !failure() && !cancelled() }}
needs: linting-done needs: linting-done
runs-on: ubuntu-latest runs-on: ubuntu-latest
container:
# https://github.com/matrix-org/complement/blob/master/dockerfiles/ComplementCIBuildkite.Dockerfile
image: matrixdotorg/complement:latest
env:
CI: true
ports:
- 8448:8448
volumes:
- /var/run/docker.sock:/var/run/docker.sock
steps: steps:
# The path is set via a file given by $GITHUB_PATH. We need both Go 1.17 and GOPATH on the path to run Complement.
# See https://docs.github.com/en/actions/using-workflows/workflow-commands-for-github-actions#adding-a-system-path
- name: "Set Go Version"
run: |
# Add Go 1.17 to the PATH: see https://github.com/actions/virtual-environments/blob/main/images/linux/Ubuntu2004-Readme.md#environment-variables-2
echo "$GOROOT_1_17_X64/bin" >> $GITHUB_PATH
# Add the Go path to the PATH: We need this so we can call gotestfmt
echo "~/go/bin" >> $GITHUB_PATH
- name: "Install Complement Dependencies"
run: |
sudo apt-get update && sudo apt-get install -y libolm3 libolm-dev
go get -v github.com/haveyoudebuggedit/gotestfmt/v2/cmd/gotestfmt@latest
- name: Run actions/checkout@v2 for synapse - name: Run actions/checkout@v2 for synapse
uses: actions/checkout@v2 uses: actions/checkout@v2
with: with:
path: synapse path: synapse
# Attempt to check out the same branch of Complement as the PR. If it # Attempt to check out the same branch of Complement as the PR. If it
# doesn't exist, fallback to HEAD. # doesn't exist, fallback to master.
- name: Checkout complement - name: Checkout complement
shell: bash shell: bash
run: | run: |
@@ -354,8 +353,8 @@ jobs:
# for pull requests, otherwise GITHUB_REF). # for pull requests, otherwise GITHUB_REF).
# 2. Attempt to use the base branch, e.g. when merging into release-vX.Y # 2. Attempt to use the base branch, e.g. when merging into release-vX.Y
# (GITHUB_BASE_REF for pull requests). # (GITHUB_BASE_REF for pull requests).
# 3. Use the default complement branch ("HEAD"). # 3. Use the default complement branch ("master").
for BRANCH_NAME in "$GITHUB_HEAD_REF" "$GITHUB_BASE_REF" "${GITHUB_REF#refs/heads/}" "HEAD"; do for BRANCH_NAME in "$GITHUB_HEAD_REF" "$GITHUB_BASE_REF" "${GITHUB_REF#refs/heads/}" "master"; do
# Skip empty branch names and merge commits. # Skip empty branch names and merge commits.
if [[ -z "$BRANCH_NAME" || $BRANCH_NAME =~ ^refs/pull/.* ]]; then if [[ -z "$BRANCH_NAME" || $BRANCH_NAME =~ ^refs/pull/.* ]]; then
continue continue
@@ -364,32 +363,55 @@ jobs:
(wget -O - "https://github.com/matrix-org/complement/archive/$BRANCH_NAME.tar.gz" | tar -xz --strip-components=1 -C complement) && break (wget -O - "https://github.com/matrix-org/complement/archive/$BRANCH_NAME.tar.gz" | tar -xz --strip-components=1 -C complement) && break
done done
- run: | # Build initial Synapse image
set -o pipefail - run: docker build -t matrixdotorg/synapse:latest -f docker/Dockerfile .
COMPLEMENT_DIR=`pwd`/complement synapse/scripts-dev/complement.sh -json 2>&1 | gotestfmt working-directory: synapse
shell: bash
name: Run Complement Tests # Build a ready-to-run Synapse image based on the initial image above.
# This new image includes a config file, keys for signing and TLS, and
# other settings to make it suitable for testing under Complement.
- run: docker build -t complement-synapse -f Synapse.Dockerfile .
working-directory: complement/dockerfiles
# Run Complement
- run: go test -v -tags synapse_blacklist,msc2403,msc2946,msc3083 ./tests/...
env:
COMPLEMENT_BASE_IMAGE: complement-synapse:latest
working-directory: complement
# a job which marks all the other jobs as complete, thus allowing PRs to be merged. # a job which marks all the other jobs as complete, thus allowing PRs to be merged.
tests-done: tests-done:
if: ${{ always() }} if: ${{ always() }}
needs: needs:
- check-sampleconfig
- lint - lint
- lint-crlf - lint-crlf
- lint-newsfile - lint-newsfile
- lint-sdist
- trial - trial
- trial-olddeps - trial-olddeps
- sytest - sytest
- export-data
- portdb - portdb
- complement - complement
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: matrix-org/done-action@v2 - name: Set build result
with: env:
needs: ${{ toJSON(needs) }} NEEDS_CONTEXT: ${{ toJSON(needs) }}
# the `jq` incantation dumps out a series of "<job> <result>" lines.
# we set it to an intermediate variable to avoid a pipe, which makes it
# hard to set $rc.
run: |
rc=0
results=$(jq -r 'to_entries[] | [.key,.value.result] | join(" ")' <<< $NEEDS_CONTEXT)
while read job result ; do
# The newsfile lint may be skipped on non PR builds
if [ $result == "skipped" ] && [ $job == "lint-newsfile" ]; then
continue
fi
# The newsfile lint may be skipped on non PR builds if [ "$result" != "success" ]; then
skippable: echo "::set-failed ::Job $job returned $result"
lint-newsfile rc=1
fi
done <<< $results
exit $rc

View File

@@ -25,7 +25,7 @@ jobs:
- run: sudo apt-get -qq install xmlsec1 - run: sudo apt-get -qq install xmlsec1
- uses: actions/setup-python@v2 - uses: actions/setup-python@v2
with: with:
python-version: 3.7 python-version: 3.6
- run: .ci/patch_for_twisted_trunk.sh - run: .ci/patch_for_twisted_trunk.sh
- run: pip install tox - run: pip install tox
- run: tox -e py - run: tox -e py

11
.gitignore vendored
View File

@@ -15,10 +15,6 @@ _trial_temp*/
.DS_Store .DS_Store
__pycache__/ __pycache__/
# We do want the poetry lockfile. TODO: is there a good reason for ignoring
# '*.lock' above? If not, let's nuke it.
!poetry.lock
# stuff that is likely to exist when you run a server locally # stuff that is likely to exist when you run a server locally
/*.db /*.db
/*.log /*.log
@@ -34,9 +30,6 @@ __pycache__/
/media_store/ /media_store/
/uploads /uploads
# For direnv users
/.envrc
# IDEs # IDEs
/.idea/ /.idea/
/.ropeproject/ /.ropeproject/
@@ -57,7 +50,3 @@ __pycache__/
# docs # docs
book/ book/
# complement
/complement-*
/master.tar.gz

9515
CHANGES.md

File diff suppressed because it is too large Load Diff

View File

@@ -1,3 +1,4 @@
include synctl
include LICENSE include LICENSE
include VERSION include VERSION
include *.rst include *.rst
@@ -16,6 +17,7 @@ recursive-include synapse/storage *.txt
recursive-include synapse/storage *.md recursive-include synapse/storage *.md
recursive-include docs * recursive-include docs *
recursive-include scripts *
recursive-include scripts-dev * recursive-include scripts-dev *
recursive-include synapse *.pyi recursive-include synapse *.pyi
recursive-include tests *.py recursive-include tests *.py
@@ -43,7 +45,6 @@ include book.toml
include pyproject.toml include pyproject.toml
recursive-include changelog.d * recursive-include changelog.d *
include .flake8
prune .circleci prune .circleci
prune .github prune .github
prune .ci prune .ci
@@ -51,4 +52,5 @@ prune contrib
prune debian prune debian
prune demo/etc prune demo/etc
prune docker prune docker
prune snap
prune stubs prune stubs

View File

@@ -246,7 +246,7 @@ Password reset
============== ==============
Users can reset their password through their client. Alternatively, a server admin Users can reset their password through their client. Alternatively, a server admin
can reset a users password using the `admin API <docs/admin_api/user_admin_api.md#reset-password>`_ can reset a users password using the `admin API <docs/admin_api/user_admin_api.rst#reset-password>`_
or by directly editing the database as shown below. or by directly editing the database as shown below.
First calculate the hash of the new password:: First calculate the hash of the new password::
@@ -312,9 +312,6 @@ We recommend using the demo which starts 3 federated instances running on ports
(to stop, you can use `./demo/stop.sh`) (to stop, you can use `./demo/stop.sh`)
See the [demo documentation](https://matrix-org.github.io/synapse/develop/development/demo.html)
for more information.
If you just want to start a single instance of the app and run it directly:: If you just want to start a single instance of the app and run it directly::
# Create the homeserver.yaml config once # Create the homeserver.yaml config once

View File

@@ -0,0 +1 @@
Add a new version of delete room admin API `DELETE /_synapse/admin/v2/rooms/<room_id>` to run it in background. Contributed by @dklimpel.

View File

@@ -0,0 +1 @@
Allow the admin [Delete Room API](https://matrix-org.github.io/synapse/latest/admin_api/rooms.html#delete-room-api) to block a room without the need to join it.

2
changelog.d/11230.bugfix Normal file
View File

@@ -0,0 +1,2 @@
Fix a long-standing bug wherein display names or avatar URLs containing null bytes cause an internal server error
when stored in the DB.

View File

@@ -0,0 +1 @@
Support filtering by relation senders & types per [MSC3440](https://github.com/matrix-org/matrix-doc/pull/3440).

1
changelog.d/11242.misc Normal file
View File

@@ -0,0 +1 @@
Split out federated PDU retrieval function into a non-cached version.

1
changelog.d/11247.misc Normal file
View File

@@ -0,0 +1 @@
Clean up code relating to to-device messages and sending ephemeral events to application services.

1
changelog.d/11278.misc Normal file
View File

@@ -0,0 +1 @@
Fix a small typo in the error response when a relation type other than 'm.annotation' is passed to `GET /rooms/{room_id}/aggregations/{event_id}`.

1
changelog.d/11280.misc Normal file
View File

@@ -0,0 +1 @@
Drop unused db tables `room_stats_historical` and `user_stats_historical`.

1
changelog.d/11281.doc Normal file
View File

@@ -0,0 +1 @@
Suggest users of the Debian packages add configuration to `/etc/matrix-synapse/conf.d/` to prevent, upon upgrade, being asked to choose between their configuration and the maintainer's.

1
changelog.d/11282.misc Normal file
View File

@@ -0,0 +1 @@
Require all files in synapse/ and tests/ to pass mypy unless specifically excluded.

1
changelog.d/11285.misc Normal file
View File

@@ -0,0 +1 @@
Require all files in synapse/ and tests/ to pass mypy unless specifically excluded.

1
changelog.d/11286.doc Normal file
View File

@@ -0,0 +1 @@
Fix typo in the word `available` and fix HTTP method (should be `GET`) for the `username_available` admin API. Contributed by Stanislav Motylkov.

1
changelog.d/11287.misc Normal file
View File

@@ -0,0 +1 @@
Add missing type hints to `synapse.app`.

1
changelog.d/11288.bugfix Normal file
View File

@@ -0,0 +1 @@
Fix a long-standing bug where uploading extremely thin images (e.g. 1000x1) would fail. Contributed by @Neeeflix.

1
changelog.d/11292.misc Normal file
View File

@@ -0,0 +1 @@
Remove unused parameters on `FederationEventHandler._check_event_auth`.

1
changelog.d/11297.misc Normal file
View File

@@ -0,0 +1 @@
Add type hints to `synapse._scripts`.

1
changelog.d/11298.doc Normal file
View File

@@ -0,0 +1 @@
Add Single Sign-On, SAML and CAS pages to the documentation.

1
changelog.d/11303.misc Normal file
View File

@@ -0,0 +1 @@
Fix an issue which prevented the 'remove deleted devices from device_inbox column' background process from running when updating from a recent Synapse version.

1
changelog.d/11307.misc Normal file
View File

@@ -0,0 +1 @@
Add type hints to storage classes.

1
changelog.d/11310.misc Normal file
View File

@@ -0,0 +1 @@
Add type hints to storage classes.

1
changelog.d/11311.misc Normal file
View File

@@ -0,0 +1 @@
Add type hints to storage classes.

1
changelog.d/11312.misc Normal file
View File

@@ -0,0 +1 @@
Add type hints to storage classes.

1
changelog.d/11313.misc Normal file
View File

@@ -0,0 +1 @@
Add type hints to storage classes.

1
changelog.d/11314.misc Normal file
View File

@@ -0,0 +1 @@
Add type hints to storage classes.

1
changelog.d/11316.misc Normal file
View File

@@ -0,0 +1 @@
Add type hints to storage classes.

1
changelog.d/11321.misc Normal file
View File

@@ -0,0 +1 @@
Add type hints to `synapse.util`.

1
changelog.d/11322.misc Normal file
View File

@@ -0,0 +1 @@
Add type hints to storage classes.

1
changelog.d/11323.misc Normal file
View File

@@ -0,0 +1 @@
Improve type annotations in Synapse's test suite.

1
changelog.d/11327.misc Normal file
View File

@@ -0,0 +1 @@
Test that room alias deletion works as intended.

1
changelog.d/11332.misc Normal file
View File

@@ -0,0 +1 @@
Add type hints to storage classes.

View File

@@ -0,0 +1 @@
Support the stable version of [MSC2778](https://github.com/matrix-org/matrix-doc/pull/2778): the `m.login.application_service` login type. Contributed by @tulir.

1
changelog.d/11339.misc Normal file
View File

@@ -0,0 +1 @@
Add type hints to storage classes.

1
changelog.d/11342.misc Normal file
View File

@@ -0,0 +1 @@
Add type hints to storage classes.

1
changelog.d/11357.misc Normal file
View File

@@ -0,0 +1 @@
Add a development script for visualising the storage class inheritance hierarchy.

View File

@@ -14,7 +14,6 @@ services:
# failure # failure
restart: unless-stopped restart: unless-stopped
# See the readme for a full documentation of the environment settings # See the readme for a full documentation of the environment settings
# NOTE: You must edit homeserver.yaml to use postgres, it defaults to sqlite
environment: environment:
- SYNAPSE_CONFIG_PATH=/data/homeserver.yaml - SYNAPSE_CONFIG_PATH=/data/homeserver.yaml
volumes: volumes:

View File

@@ -193,15 +193,12 @@ class TrivialXmppClient:
time.sleep(7) time.sleep(7)
print("SSRC spammer started") print("SSRC spammer started")
while self.running: while self.running:
ssrcMsg = ( ssrcMsg = "<presence to='%(tojid)s' xmlns='jabber:client'><x xmlns='http://jabber.org/protocol/muc'/><c xmlns='http://jabber.org/protocol/caps' hash='sha-1' node='http://jitsi.org/jitsimeet' ver='0WkSdhFnAUxrz4ImQQLdB80GFlE='/><nick xmlns='http://jabber.org/protocol/nick'>%(nick)s</nick><stats xmlns='http://jitsi.org/jitmeet/stats'><stat name='bitrate_download' value='175'/><stat name='bitrate_upload' value='176'/><stat name='packetLoss_total' value='0'/><stat name='packetLoss_download' value='0'/><stat name='packetLoss_upload' value='0'/></stats><media xmlns='http://estos.de/ns/mjs'><source type='audio' ssrc='%(assrc)s' direction='sendre'/><source type='video' ssrc='%(vssrc)s' direction='sendre'/></media></presence>" % {
"<presence to='%(tojid)s' xmlns='jabber:client'><x xmlns='http://jabber.org/protocol/muc'/><c xmlns='http://jabber.org/protocol/caps' hash='sha-1' node='http://jitsi.org/jitsimeet' ver='0WkSdhFnAUxrz4ImQQLdB80GFlE='/><nick xmlns='http://jabber.org/protocol/nick'>%(nick)s</nick><stats xmlns='http://jitsi.org/jitmeet/stats'><stat name='bitrate_download' value='175'/><stat name='bitrate_upload' value='176'/><stat name='packetLoss_total' value='0'/><stat name='packetLoss_download' value='0'/><stat name='packetLoss_upload' value='0'/></stats><media xmlns='http://estos.de/ns/mjs'><source type='audio' ssrc='%(assrc)s' direction='sendre'/><source type='video' ssrc='%(vssrc)s' direction='sendre'/></media></presence>" "tojid": "%s@%s/%s" % (ROOMNAME, ROOMDOMAIN, self.shortJid),
% { "nick": self.userId,
"tojid": "%s@%s/%s" % (ROOMNAME, ROOMDOMAIN, self.shortJid), "assrc": self.ssrcs["audio"],
"nick": self.userId, "vssrc": self.ssrcs["video"],
"assrc": self.ssrcs["audio"], }
"vssrc": self.ssrcs["video"],
}
)
res = self.sendIq(ssrcMsg) res = self.sendIq(ssrcMsg)
print("reply from ssrc announce: ", res) print("reply from ssrc announce: ", res)
time.sleep(10) time.sleep(10)

View File

@@ -92,6 +92,22 @@ new PromConsole.Graph({
}) })
</script> </script>
<h3>Pending calls per tick</h3>
<div id="reactor_pending_calls"></div>
<script>
new PromConsole.Graph({
node: document.querySelector("#reactor_pending_calls"),
expr: "rate(python_twisted_reactor_pending_calls_sum[30s]) / rate(python_twisted_reactor_pending_calls_count[30s])",
name: "[[job]]-[[index]]",
min: 0,
renderer: "line",
height: 150,
yAxisFormatter: PromConsole.NumberFormatter.humanize,
yHoverFormatter: PromConsole.NumberFormatter.humanize,
yTitle: "Pending Calls"
})
</script>
<h1>Storage</h1> <h1>Storage</h1>
<h3>Queries</h3> <h3>Queries</h3>

192
debian/changelog vendored
View File

@@ -1,195 +1,3 @@
matrix-synapse-py3 (1.57.1) stable; urgency=medium
* New synapse release 1.57.1.
-- Synapse Packaging team <packages@matrix.org> Wed, 20 Apr 2022 15:27:21 +0100
matrix-synapse-py3 (1.57.0) stable; urgency=medium
* New synapse release 1.57.0.
-- Synapse Packaging team <packages@matrix.org> Tue, 19 Apr 2022 10:58:42 +0100
matrix-synapse-py3 (1.57.0~rc1) stable; urgency=medium
* New synapse release 1.57.0~rc1.
-- Synapse Packaging team <packages@matrix.org> Tue, 12 Apr 2022 13:36:25 +0100
matrix-synapse-py3 (1.56.0) stable; urgency=medium
* New synapse release 1.56.0.
-- Synapse Packaging team <packages@matrix.org> Tue, 05 Apr 2022 12:38:39 +0100
matrix-synapse-py3 (1.56.0~rc1) stable; urgency=medium
* New synapse release 1.56.0~rc1.
-- Synapse Packaging team <packages@matrix.org> Tue, 29 Mar 2022 10:40:50 +0100
matrix-synapse-py3 (1.55.2) stable; urgency=medium
* New synapse release 1.55.2.
-- Synapse Packaging team <packages@matrix.org> Thu, 24 Mar 2022 19:07:11 +0000
matrix-synapse-py3 (1.55.1) stable; urgency=medium
* New synapse release 1.55.1.
-- Synapse Packaging team <packages@matrix.org> Thu, 24 Mar 2022 17:44:23 +0000
matrix-synapse-py3 (1.55.0) stable; urgency=medium
* New synapse release 1.55.0.
-- Synapse Packaging team <packages@matrix.org> Tue, 22 Mar 2022 13:59:26 +0000
matrix-synapse-py3 (1.55.0~rc1) stable; urgency=medium
* New synapse release 1.55.0~rc1.
-- Synapse Packaging team <packages@matrix.org> Tue, 15 Mar 2022 10:59:31 +0000
matrix-synapse-py3 (1.54.0) stable; urgency=medium
* New synapse release 1.54.0.
-- Synapse Packaging team <packages@matrix.org> Tue, 08 Mar 2022 10:54:52 +0000
matrix-synapse-py3 (1.54.0~rc1) stable; urgency=medium
* New synapse release 1.54.0~rc1.
-- Synapse Packaging team <packages@matrix.org> Wed, 02 Mar 2022 10:43:22 +0000
matrix-synapse-py3 (1.53.0) stable; urgency=medium
* New synapse release 1.53.0.
-- Synapse Packaging team <packages@matrix.org> Tue, 22 Feb 2022 11:32:06 +0000
matrix-synapse-py3 (1.53.0~rc1) stable; urgency=medium
* New synapse release 1.53.0~rc1.
-- Synapse Packaging team <packages@matrix.org> Tue, 15 Feb 2022 10:40:50 +0000
matrix-synapse-py3 (1.52.0) stable; urgency=medium
* New synapse release 1.52.0.
-- Synapse Packaging team <packages@matrix.org> Tue, 08 Feb 2022 11:34:54 +0000
matrix-synapse-py3 (1.52.0~rc1) stable; urgency=medium
* New synapse release 1.52.0~rc1.
-- Synapse Packaging team <packages@matrix.org> Tue, 01 Feb 2022 11:04:09 +0000
matrix-synapse-py3 (1.51.0) stable; urgency=medium
* New synapse release 1.51.0.
-- Synapse Packaging team <packages@matrix.org> Tue, 25 Jan 2022 11:28:51 +0000
matrix-synapse-py3 (1.51.0~rc2) stable; urgency=medium
* New synapse release 1.51.0~rc2.
-- Synapse Packaging team <packages@matrix.org> Mon, 24 Jan 2022 12:25:00 +0000
matrix-synapse-py3 (1.51.0~rc1) stable; urgency=medium
* New synapse release 1.51.0~rc1.
-- Synapse Packaging team <packages@matrix.org> Fri, 21 Jan 2022 10:46:02 +0000
matrix-synapse-py3 (1.50.2) stable; urgency=medium
* New synapse release 1.50.2.
-- Synapse Packaging team <packages@matrix.org> Mon, 24 Jan 2022 13:37:11 +0000
matrix-synapse-py3 (1.50.1) stable; urgency=medium
* New synapse release 1.50.1.
-- Synapse Packaging team <packages@matrix.org> Tue, 18 Jan 2022 16:06:26 +0000
matrix-synapse-py3 (1.50.0) stable; urgency=medium
* New synapse release 1.50.0.
-- Synapse Packaging team <packages@matrix.org> Tue, 18 Jan 2022 10:40:38 +0000
matrix-synapse-py3 (1.50.0~rc2) stable; urgency=medium
* New synapse release 1.50.0~rc2.
-- Synapse Packaging team <packages@matrix.org> Fri, 14 Jan 2022 11:18:06 +0000
matrix-synapse-py3 (1.50.0~rc1) stable; urgency=medium
* New synapse release 1.50.0~rc1.
-- Synapse Packaging team <packages@matrix.org> Wed, 05 Jan 2022 12:36:17 +0000
matrix-synapse-py3 (1.49.2) stable; urgency=medium
* New synapse release 1.49.2.
-- Synapse Packaging team <packages@matrix.org> Tue, 21 Dec 2021 17:31:03 +0000
matrix-synapse-py3 (1.49.1) stable; urgency=medium
* New synapse release 1.49.1.
-- Synapse Packaging team <packages@matrix.org> Tue, 21 Dec 2021 11:07:30 +0000
matrix-synapse-py3 (1.49.0) stable; urgency=medium
* New synapse release 1.49.0.
-- Synapse Packaging team <packages@matrix.org> Tue, 14 Dec 2021 12:39:46 +0000
matrix-synapse-py3 (1.49.0~rc1) stable; urgency=medium
* New synapse release 1.49.0~rc1.
-- Synapse Packaging team <packages@matrix.org> Tue, 07 Dec 2021 13:52:21 +0000
matrix-synapse-py3 (1.48.0) stable; urgency=medium
* New synapse release 1.48.0.
-- Synapse Packaging team <packages@matrix.org> Tue, 30 Nov 2021 11:24:15 +0000
matrix-synapse-py3 (1.48.0~rc1) stable; urgency=medium
* New synapse release 1.48.0~rc1.
-- Synapse Packaging team <packages@matrix.org> Thu, 25 Nov 2021 15:56:03 +0000
matrix-synapse-py3 (1.47.1) stable; urgency=medium
* New synapse release 1.47.1.
-- Synapse Packaging team <packages@matrix.org> Fri, 19 Nov 2021 13:44:32 +0000
matrix-synapse-py3 (1.47.0) stable; urgency=medium
* New synapse release 1.47.0.
-- Synapse Packaging team <packages@matrix.org> Wed, 17 Nov 2021 13:09:43 +0000
matrix-synapse-py3 (1.47.0~rc3) stable; urgency=medium
* New synapse release 1.47.0~rc3.
-- Synapse Packaging team <packages@matrix.org> Tue, 16 Nov 2021 14:32:47 +0000
matrix-synapse-py3 (1.47.0~rc2) stable; urgency=medium matrix-synapse-py3 (1.47.0~rc2) stable; urgency=medium
[ Dan Callahan ] [ Dan Callahan ]

11
demo/.gitignore vendored
View File

@@ -1,4 +1,7 @@
# Ignore all the temporary files from the demo servers. *.db
8080/ *.log
8081/ *.log.*
8082/ *.pid
/media_store.*
/etc

26
demo/README Normal file
View File

@@ -0,0 +1,26 @@
DO NOT USE THESE DEMO SERVERS IN PRODUCTION
Requires you to have done:
python setup.py develop
The demo start.sh will start three synapse servers on ports 8080, 8081 and 8082, with host names localhost:$port. This can be easily changed to `hostname`:$port in start.sh if required.
To enable the servers to communicate untrusted ssl certs are used. In order to do this the servers do not check the certs
and are configured in a highly insecure way. Do not use these configuration files in production.
stop.sh will stop the synapse servers and the webclient.
clean.sh will delete the databases and log files.
To start a completely new set of servers, run:
./demo/stop.sh; ./demo/clean.sh && ./demo/start.sh
Logs and sqlitedb will be stored in demo/808{0,1,2}.{log,db}
Also note that when joining a public room on a differnt HS via "#foo:bar.net", then you are (in the current impl) joining a room with room_id "foo". This means that it won't work if your HS already has a room with that name.

View File

@@ -4,9 +4,6 @@ set -e
DIR="$( cd "$( dirname "$0" )" && pwd )" DIR="$( cd "$( dirname "$0" )" && pwd )"
# Ensure that the servers are stopped.
$DIR/stop.sh
PID_FILE="$DIR/servers.pid" PID_FILE="$DIR/servers.pid"
if [ -f "$PID_FILE" ]; then if [ -f "$PID_FILE" ]; then

View File

@@ -6,6 +6,8 @@ CWD=$(pwd)
cd "$DIR/.." || exit cd "$DIR/.." || exit
mkdir -p demo/etc
PYTHONPATH=$(readlink -f "$(pwd)") PYTHONPATH=$(readlink -f "$(pwd)")
export PYTHONPATH export PYTHONPATH
@@ -19,27 +21,22 @@ for port in 8080 8081 8082; do
mkdir -p demo/$port mkdir -p demo/$port
pushd demo/$port || exit pushd demo/$port || exit
# Generate the configuration for the homeserver at localhost:848x. #rm $DIR/etc/$port.config
python3 -m synapse.app.homeserver \ python3 -m synapse.app.homeserver \
--generate-config \ --generate-config \
--server-name "localhost:$port" \ -H "localhost:$https_port" \
--config-path "$port.config" \ --config-path "$DIR/etc/$port.config" \
--report-stats no --report-stats no
if ! grep -F "Customisation made by demo/start.sh" -q "$port.config"; then if ! grep -F "Customisation made by demo/start.sh" -q "$DIR/etc/$port.config"; then
# Generate TLS keys. # Generate tls keys
openssl req -x509 -newkey rsa:4096 \ openssl req -x509 -newkey rsa:4096 -keyout "$DIR/etc/localhost:$https_port.tls.key" -out "$DIR/etc/localhost:$https_port.tls.crt" -days 365 -nodes -subj "/O=matrix"
-keyout "localhost:$port.tls.key" \
-out "localhost:$port.tls.crt" \
-days 365 -nodes -subj "/O=matrix"
# Add customisations to the configuration. # Regenerate configuration
{ {
printf '\n\n# Customisation made by demo/start.sh\n\n' printf '\n\n# Customisation made by demo/start.sh\n'
echo "public_baseurl: http://localhost:$port/" echo "public_baseurl: http://localhost:$port/"
echo 'enable_registration: true' echo 'enable_registration: true'
echo 'enable_registration_without_verification: true'
echo ''
# Warning, this heredoc depends on the interaction of tabs and spaces. # Warning, this heredoc depends on the interaction of tabs and spaces.
# Please don't accidentaly bork me with your fancy settings. # Please don't accidentaly bork me with your fancy settings.
@@ -66,34 +63,38 @@ for port in 8080 8081 8082; do
echo "${listeners}" echo "${listeners}"
# Disable TLS for the servers # Disable tls for the servers
printf '\n\n# Disable TLS for the servers.' printf '\n\n# Disable tls on the servers.'
echo '# DO NOT USE IN PRODUCTION' echo '# DO NOT USE IN PRODUCTION'
echo 'use_insecure_ssl_client_just_for_testing_do_not_use: true' echo 'use_insecure_ssl_client_just_for_testing_do_not_use: true'
echo 'federation_verify_certificates: false' echo 'federation_verify_certificates: false'
# Set paths for the TLS certificates. # Set tls paths
echo "tls_certificate_path: \"$DIR/$port/localhost:$port.tls.crt\"" echo "tls_certificate_path: \"$DIR/etc/localhost:$https_port.tls.crt\""
echo "tls_private_key_path: \"$DIR/$port/localhost:$port.tls.key\"" echo "tls_private_key_path: \"$DIR/etc/localhost:$https_port.tls.key\""
# Ignore keys from the trusted keys server # Ignore keys from the trusted keys server
echo '# Ignore keys from the trusted keys server' echo '# Ignore keys from the trusted keys server'
echo 'trusted_key_servers:' echo 'trusted_key_servers:'
echo ' - server_name: "matrix.org"' echo ' - server_name: "matrix.org"'
echo ' accept_keys_insecurely: true' echo ' accept_keys_insecurely: true'
echo ''
# Allow the servers to communicate over localhost. # Reduce the blacklist
allow_list=$(cat <<-ALLOW_LIST blacklist=$(cat <<-BLACK
# Allow the servers to communicate over localhost. # Set the blacklist so that it doesn't include 127.0.0.1, ::1
ip_range_whitelist: federation_ip_range_blacklist:
- '127.0.0.1/8' - '10.0.0.0/8'
- '::1/128' - '172.16.0.0/12'
ALLOW_LIST - '192.168.0.0/16'
- '100.64.0.0/10'
- '169.254.0.0/16'
- 'fe80::/64'
- 'fc00::/7'
BLACK
) )
echo "${allow_list}" echo "${blacklist}"
} >> "$port.config" } >> "$DIR/etc/$port.config"
fi fi
# Check script parameters # Check script parameters
@@ -140,18 +141,19 @@ for port in 8080 8081 8082; do
burst_count: 1000 burst_count: 1000
RC RC
) )
echo "${ratelimiting}" >> "$port.config" echo "${ratelimiting}" >> "$DIR/etc/$port.config"
fi fi
fi fi
# Always disable reporting of stats if the option is not there. if ! grep -F "full_twisted_stacktraces" -q "$DIR/etc/$port.config"; then
if ! grep -F "report_stats" -q "$port.config" ; then echo "full_twisted_stacktraces: true" >> "$DIR/etc/$port.config"
echo "report_stats: false" >> "$port.config" fi
if ! grep -F "report_stats" -q "$DIR/etc/$port.config" ; then
echo "report_stats: false" >> "$DIR/etc/$port.config"
fi fi
# Run the homeserver in the background.
python3 -m synapse.app.homeserver \ python3 -m synapse.app.homeserver \
--config-path "$port.config" \ --config-path "$DIR/etc/$port.config" \
-D \ -D \
popd || exit popd || exit

View File

@@ -1,78 +1,25 @@
# Dockerfile to build the matrixdotorg/synapse docker images. # Dockerfile to build the matrixdotorg/synapse docker images.
# #
# Note that it uses features which are only available in BuildKit - see
# https://docs.docker.com/go/buildkit/ for more information.
#
# To build the image, run `docker build` command from the root of the # To build the image, run `docker build` command from the root of the
# synapse repository: # synapse repository:
# #
# DOCKER_BUILDKIT=1 docker build -f docker/Dockerfile . # docker build -f docker/Dockerfile .
# #
# There is an optional PYTHON_VERSION build argument which sets the # There is an optional PYTHON_VERSION build argument which sets the
# version of python to build against: for example: # version of python to build against: for example:
# #
# DOCKER_BUILDKIT=1 docker build -f docker/Dockerfile --build-arg PYTHON_VERSION=3.10 . # docker build -f docker/Dockerfile --build-arg PYTHON_VERSION=3.6 .
# #
# Irritatingly, there is no blessed guide on how to distribute an application with its ARG PYTHON_VERSION=3.8
# poetry-managed environment in a docker image. We have opted for
# `poetry export | pip install -r /dev/stdin`, but there are known bugs in
# in `poetry export` whose fixes (scheduled for poetry 1.2) have yet to be released.
# In case we get bitten by those bugs in the future, the recommendations here might
# be useful:
# https://github.com/python-poetry/poetry/discussions/1879#discussioncomment-216865
# https://stackoverflow.com/questions/53835198/integrating-python-poetry-with-docker?answertab=scoredesc
ARG PYTHON_VERSION=3.9
### ###
### Stage 0: generate requirements.txt ### Stage 0: builder
###
FROM docker.io/python:${PYTHON_VERSION}-slim as requirements
# RUN --mount is specific to buildkit and is documented at
# https://github.com/moby/buildkit/blob/master/frontend/dockerfile/docs/syntax.md#build-mounts-run---mount.
# Here we use it to set up a cache for apt (and below for pip), to improve
# rebuild speeds on slow connections.
RUN \
--mount=type=cache,target=/var/cache/apt,sharing=locked \
--mount=type=cache,target=/var/lib/apt,sharing=locked \
apt-get update && apt-get install -y git \
&& rm -rf /var/lib/apt/lists/*
# We install poetry in its own build stage to avoid its dependencies conflicting with
# synapse's dependencies.
# We use a specific commit from poetry's master branch instead of our usual 1.1.12,
# to incorporate fixes to some bugs in `poetry export`. This commit corresponds to
# https://github.com/python-poetry/poetry/pull/5156 and
# https://github.com/python-poetry/poetry/issues/5141 ;
# without it, we generate a requirements.txt with incorrect environment markers,
# which causes necessary packages to be omitted when we `pip install`.
#
# NB: In poetry 1.2 `poetry export` will be moved into a plugin; we'll need to also
# pip install poetry-plugin-export (https://github.com/python-poetry/poetry-plugin-export).
RUN --mount=type=cache,target=/root/.cache/pip \
pip install --user git+https://github.com/python-poetry/poetry.git@fb13b3a676f476177f7937ffa480ee5cff9a90a5
WORKDIR /synapse
# Copy just what we need to run `poetry export`...
COPY pyproject.toml poetry.lock README.rst /synapse/
RUN /root/.local/bin/poetry export --extras all -o /synapse/requirements.txt
###
### Stage 1: builder
### ###
FROM docker.io/python:${PYTHON_VERSION}-slim as builder FROM docker.io/python:${PYTHON_VERSION}-slim as builder
# install the OS build deps # install the OS build deps
RUN \ RUN apt-get update && apt-get install -y \
--mount=type=cache,target=/var/cache/apt,sharing=locked \
--mount=type=cache,target=/var/lib/apt,sharing=locked \
apt-get update && apt-get install -y \
build-essential \ build-essential \
libffi-dev \ libffi-dev \
libjpeg-dev \ libjpeg-dev \
@@ -86,27 +33,30 @@ RUN \
zlib1g-dev \ zlib1g-dev \
&& rm -rf /var/lib/apt/lists/* && rm -rf /var/lib/apt/lists/*
# Copy just what we need to pip install
COPY scripts /synapse/scripts/
COPY MANIFEST.in README.rst setup.py synctl /synapse/
COPY synapse/__init__.py /synapse/synapse/__init__.py
COPY synapse/python_dependencies.py /synapse/synapse/python_dependencies.py
# To speed up rebuilds, install all of the dependencies before we copy over # To speed up rebuilds, install all of the dependencies before we copy over
# the whole synapse project, so that this layer in the Docker cache can be # the whole synapse project so that we this layer in the Docker cache can be
# used while you develop on the source # used while you develop on the source
# #
# This is aiming at installing the `[tool.poetry.depdendencies]` from pyproject.toml. # This is aiming at installing the `install_requires` and `extras_require` from `setup.py`
COPY --from=requirements /synapse/requirements.txt /synapse/ RUN pip install --prefix="/install" --no-warn-script-location \
RUN --mount=type=cache,target=/root/.cache/pip \ /synapse[all]
pip install --prefix="/install" --no-deps --no-warn-script-location -r /synapse/requirements.txt
# Copy over the rest of the synapse source code. # Copy over the rest of the project
COPY synapse /synapse/synapse/ COPY synapse /synapse/synapse/
# ... and what we need to `pip install`.
# TODO: once pyproject.toml declares poetry-core as its build system, we'll need to copy
# pyproject.toml here, ditching setup.py and MANIFEST.in.
COPY setup.py MANIFEST.in README.rst /synapse/
# Install the synapse package itself. # Install the synapse package itself and all of its children packages.
#
# This is aiming at installing only the `packages=find_packages(...)` from `setup.py
RUN pip install --prefix="/install" --no-deps --no-warn-script-location /synapse RUN pip install --prefix="/install" --no-deps --no-warn-script-location /synapse
### ###
### Stage 2: runtime ### Stage 1: runtime
### ###
FROM docker.io/python:${PYTHON_VERSION}-slim FROM docker.io/python:${PYTHON_VERSION}-slim
@@ -116,10 +66,7 @@ LABEL org.opencontainers.image.documentation='https://github.com/matrix-org/syna
LABEL org.opencontainers.image.source='https://github.com/matrix-org/synapse.git' LABEL org.opencontainers.image.source='https://github.com/matrix-org/synapse.git'
LABEL org.opencontainers.image.licenses='Apache-2.0' LABEL org.opencontainers.image.licenses='Apache-2.0'
RUN \ RUN apt-get update && apt-get install -y \
--mount=type=cache,target=/var/cache/apt,sharing=locked \
--mount=type=cache,target=/var/lib/apt,sharing=locked \
apt-get update && apt-get install -y \
curl \ curl \
gosu \ gosu \
libjpeg62-turbo \ libjpeg62-turbo \
@@ -135,6 +82,8 @@ COPY --from=builder /install /usr/local
COPY ./docker/start.py /start.py COPY ./docker/start.py /start.py
COPY ./docker/conf /conf COPY ./docker/conf /conf
VOLUME ["/data"]
EXPOSE 8008/tcp 8009/tcp 8448/tcp EXPOSE 8008/tcp 8009/tcp 8448/tcp
ENTRYPOINT ["/start.py"] ENTRYPOINT ["/start.py"]

View File

@@ -16,7 +16,7 @@ ARG distro=""
### Stage 0: build a dh-virtualenv ### Stage 0: build a dh-virtualenv
### ###
# This is only really needed on focal, since other distributions we # This is only really needed on bionic and focal, since other distributions we
# care about have a recent version of dh-virtualenv by default. Unfortunately, # care about have a recent version of dh-virtualenv by default. Unfortunately,
# it looks like focal is going to be with us for a while. # it looks like focal is going to be with us for a while.
# #
@@ -36,8 +36,9 @@ RUN env DEBIAN_FRONTEND=noninteractive apt-get install \
wget wget
# fetch and unpack the package # fetch and unpack the package
# TODO: Upgrade to 1.2.2 once bionic is dropped (1.2.2 requires debhelper 12; bionic has only 11)
RUN mkdir /dh-virtualenv RUN mkdir /dh-virtualenv
RUN wget -q -O /dh-virtualenv.tar.gz https://github.com/spotify/dh-virtualenv/archive/refs/tags/1.2.2.tar.gz RUN wget -q -O /dh-virtualenv.tar.gz https://github.com/spotify/dh-virtualenv/archive/ac6e1b1.tar.gz
RUN tar -xv --strip-components=1 -C /dh-virtualenv -f /dh-virtualenv.tar.gz RUN tar -xv --strip-components=1 -C /dh-virtualenv -f /dh-virtualenv.tar.gz
# install its build deps. We do another apt-cache-update here, because we might # install its build deps. We do another apt-cache-update here, because we might
@@ -85,12 +86,12 @@ RUN apt-get update -qq -o Acquire::Languages=none \
libpq-dev \ libpq-dev \
xmlsec1 xmlsec1
COPY --from=builder /dh-virtualenv_1.2.2-1_all.deb / COPY --from=builder /dh-virtualenv_1.2~dev-1_all.deb /
# install dhvirtualenv. Update the apt cache again first, in case we got a # install dhvirtualenv. Update the apt cache again first, in case we got a
# cached cache from docker the first time. # cached cache from docker the first time.
RUN apt-get update -qq -o Acquire::Languages=none \ RUN apt-get update -qq -o Acquire::Languages=none \
&& apt-get install -yq /dh-virtualenv_1.2.2-1_all.deb && apt-get install -yq /dh-virtualenv_1.2~dev-1_all.deb
WORKDIR /synapse/source WORKDIR /synapse/source
ENTRYPOINT ["bash","/synapse/source/docker/build_debian.sh"] ENTRYPOINT ["bash","/synapse/source/docker/build_debian.sh"]

30
docker/Dockerfile-pgtests Normal file
View File

@@ -0,0 +1,30 @@
# Use the Sytest image that comes with a lot of the build dependencies
# pre-installed
FROM matrixdotorg/sytest:bionic
# The Sytest image doesn't come with python, so install that
RUN apt-get update && apt-get -qq install -y python3 python3-dev python3-pip
# We need tox to run the tests in run_pg_tests.sh
RUN python3 -m pip install tox
# Initialise the db
RUN su -c '/usr/lib/postgresql/10/bin/initdb -D /var/lib/postgresql/data -E "UTF-8" --lc-collate="C.UTF-8" --lc-ctype="C.UTF-8" --username=postgres' postgres
# Add a user with our UID and GID so that files get created on the host owned
# by us, not root.
ARG UID
ARG GID
RUN groupadd --gid $GID user
RUN useradd --uid $UID --gid $GID --groups sudo --no-create-home user
# Ensure we can start postgres by sudo-ing as the postgres user.
RUN apt-get update && apt-get -qq install -y sudo
RUN echo "user ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers
ADD run_pg_tests.sh /run_pg_tests.sh
# Use the "exec form" of ENTRYPOINT (https://docs.docker.com/engine/reference/builder/#entrypoint)
# so that we can `docker run` this container and pass arguments to pg_tests.sh
ENTRYPOINT ["/run_pg_tests.sh"]
USER user

View File

@@ -14,10 +14,10 @@ COPY ./docker/conf-workers/* /conf/
# Expose nginx listener port # Expose nginx listener port
EXPOSE 8080/tcp EXPOSE 8080/tcp
# Volume for user-editable config files, logs etc.
VOLUME ["/data"]
# A script to read environment variables and create the necessary # A script to read environment variables and create the necessary
# files to run the desired worker configuration. Will start supervisord. # files to run the desired worker configuration. Will start supervisord.
COPY ./docker/configure_workers_and_start.py /configure_workers_and_start.py COPY ./docker/configure_workers_and_start.py /configure_workers_and_start.py
ENTRYPOINT ["/configure_workers_and_start.py"] ENTRYPOINT ["/configure_workers_and_start.py"]
HEALTHCHECK --start-period=5s --interval=15s --timeout=5s \
CMD /bin/sh /healthcheck.sh

View File

@@ -10,10 +10,10 @@ Note that running Synapse's unit tests from within the docker image is not suppo
## Testing with SQLite and single-process Synapse ## Testing with SQLite and single-process Synapse
> Note that `scripts-dev/complement.sh` is a script that will automatically build > Note that `scripts-dev/complement.sh` is a script that will automatically build
> and run an SQLite-based, single-process of Synapse against Complement. > and run an SQLite-based, single-process of Synapse against Complement.
The instructions below will set up Complement testing for a single-process, The instructions below will set up Complement testing for a single-process,
SQLite-based Synapse deployment. SQLite-based Synapse deployment.
Start by building the base Synapse docker image. If you wish to run tests with the latest Start by building the base Synapse docker image. If you wish to run tests with the latest
@@ -26,22 +26,23 @@ docker build -t matrixdotorg/synapse -f docker/Dockerfile .
This will build an image with the tag `matrixdotorg/synapse`. This will build an image with the tag `matrixdotorg/synapse`.
Next, build the Synapse image for Complement. Next, build the Synapse image for Complement. You will need a local checkout
of Complement. Change to the root of your Complement checkout and run:
```sh ```sh
docker build -t complement-synapse -f "docker/complement/Dockerfile" docker/complement docker build -t complement-synapse -f "dockerfiles/Synapse.Dockerfile" dockerfiles
``` ```
This will build an image with the tag `complement-synapse`, which can be handed to This will build an image with the tag `complement-synapse`, which can be handed to
Complement for testing via the `COMPLEMENT_BASE_IMAGE` environment variable. Refer to Complement for testing via the `COMPLEMENT_BASE_IMAGE` environment variable. Refer to
[Complement's documentation](https://github.com/matrix-org/complement/#running) for [Complement's documentation](https://github.com/matrix-org/complement/#running) for
how to run the tests, as well as the various available command line flags. how to run the tests, as well as the various available command line flags.
## Testing with PostgreSQL and single or multi-process Synapse ## Testing with PostgreSQL and single or multi-process Synapse
The above docker image only supports running Synapse with SQLite and in a The above docker image only supports running Synapse with SQLite and in a
single-process topology. The following instructions are used to build a Synapse image for single-process topology. The following instructions are used to build a Synapse image for
Complement that supports either single or multi-process topology with a PostgreSQL Complement that supports either single or multi-process topology with a PostgreSQL
database backend. database backend.
As with the single-process image, build the base Synapse docker image. If you wish to run As with the single-process image, build the base Synapse docker image. If you wish to run
@@ -54,7 +55,7 @@ docker build -t matrixdotorg/synapse -f docker/Dockerfile .
This will build an image with the tag `matrixdotorg/synapse`. This will build an image with the tag `matrixdotorg/synapse`.
Next, we build a new image with worker support based on `matrixdotorg/synapse:latest`. Next, we build a new image with worker support based on `matrixdotorg/synapse:latest`.
Again, from the root of the repository: Again, from the root of the repository:
```sh ```sh
@@ -63,20 +64,21 @@ docker build -t matrixdotorg/synapse-workers -f docker/Dockerfile-workers .
This will build an image with the tag` matrixdotorg/synapse-workers`. This will build an image with the tag` matrixdotorg/synapse-workers`.
It's worth noting at this point that this image is fully functional, and It's worth noting at this point that this image is fully functional, and
can be used for testing against locally. See instructions for using the container can be used for testing against locally. See instructions for using the container
under under
[Running the Dockerfile-worker image standalone](#running-the-dockerfile-worker-image-standalone) [Running the Dockerfile-worker image standalone](#running-the-dockerfile-worker-image-standalone)
below. below.
Finally, build the Synapse image for Complement, which is based on Finally, build the Synapse image for Complement, which is based on
`matrixdotorg/synapse-workers`. `matrixdotorg/synapse-workers`. You will need a local checkout of Complement. Change to
the root of your Complement checkout and run:
```sh ```sh
docker build -t matrixdotorg/complement-synapse-workers -f docker/complement/SynapseWorkers.Dockerfile docker/complement docker build -t matrixdotorg/complement-synapse-workers -f dockerfiles/SynapseWorkers.Dockerfile dockerfiles
``` ```
This will build an image with the tag `complement-synapse-workers`, which can be handed to This will build an image with the tag `complement-synapse`, which can be handed to
Complement for testing via the `COMPLEMENT_BASE_IMAGE` environment variable. Refer to Complement for testing via the `COMPLEMENT_BASE_IMAGE` environment variable. Refer to
[Complement's documentation](https://github.com/matrix-org/complement/#running) for [Complement's documentation](https://github.com/matrix-org/complement/#running) for
how to run the tests, as well as the various available command line flags. how to run the tests, as well as the various available command line flags.
@@ -89,10 +91,10 @@ bundling all necessary components together for a workerised homeserver instance.
This includes any desired Synapse worker processes, a nginx to route traffic accordingly, This includes any desired Synapse worker processes, a nginx to route traffic accordingly,
a redis for worker communication and a supervisord instance to start up and monitor all a redis for worker communication and a supervisord instance to start up and monitor all
processes. You will need to provide your own postgres container to connect to, and TLS processes. You will need to provide your own postgres container to connect to, and TLS
is not handled by the container. is not handled by the container.
Once you've built the image using the above instructions, you can run it. Be sure Once you've built the image using the above instructions, you can run it. Be sure
you've set up a volume according to the [usual Synapse docker instructions](README.md). you've set up a volume according to the [usual Synapse docker instructions](README.md).
Then run something along the lines of: Then run something along the lines of:
@@ -110,7 +112,7 @@ docker run -d --name synapse \
matrixdotorg/synapse-workers matrixdotorg/synapse-workers
``` ```
...substituting `POSTGRES*` variables for those that match a postgres host you have ...substituting `POSTGRES*` variables for those that match a postgres host you have
available (usually a running postgres docker container). available (usually a running postgres docker container).
The `SYNAPSE_WORKER_TYPES` environment variable is a comma-separated list of workers to The `SYNAPSE_WORKER_TYPES` environment variable is a comma-separated list of workers to
@@ -128,11 +130,11 @@ Otherwise, `SYNAPSE_WORKER_TYPES` can either be left empty or unset to spawn no
(leaving only the main process). The container is configured to use redis-based worker (leaving only the main process). The container is configured to use redis-based worker
mode. mode.
Logs for workers and the main process are logged to stdout and can be viewed with Logs for workers and the main process are logged to stdout and can be viewed with
standard `docker logs` tooling. Worker logs contain their worker name standard `docker logs` tooling. Worker logs contain their worker name
after the timestamp. after the timestamp.
Setting `SYNAPSE_WORKERS_WRITE_LOGS_TO_DISK=1` will cause worker logs to be written to Setting `SYNAPSE_WORKERS_WRITE_LOGS_TO_DISK=1` will cause worker logs to be written to
`<data_dir>/logs/<worker_name>.log`. Logs are kept for 1 week and rotate every day at 00: `<data_dir>/logs/<worker_name>.log`. Logs are kept for 1 week and rotate every day at 00:
00, according to the container's clock. Logging for the main process must still be 00, according to the container's clock. Logging for the main process must still be
configured by modifying the homeserver's log config in your Synapse data volume. configured by modifying the homeserver's log config in your Synapse data volume.

View File

@@ -68,10 +68,6 @@ The following environment variables are supported in `generate` mode:
directories. If unset, and no user is set via `docker run --user`, defaults directories. If unset, and no user is set via `docker run --user`, defaults
to `991`, `991`. to `991`, `991`.
## Postgres
By default the config will use SQLite. See the [docs on using Postgres](https://github.com/matrix-org/synapse/blob/develop/docs/postgres.md) for more info on how to use Postgres. Until this section is improved [this issue](https://github.com/matrix-org/synapse/issues/8304) may provide useful information.
## Running synapse ## Running synapse
Once you have a valid configuration file, you can start synapse as follows: Once you have a valid configuration file, you can start synapse as follows:

View File

@@ -1,22 +0,0 @@
# A dockerfile which builds an image suitable for testing Synapse under
# complement.
ARG SYNAPSE_VERSION=latest
FROM matrixdotorg/synapse:${SYNAPSE_VERSION}
ENV SERVER_NAME=localhost
COPY conf/* /conf/
# generate a signing key
RUN generate_signing_key -o /conf/server.signing.key
WORKDIR /data
EXPOSE 8008 8448
ENTRYPOINT ["/conf/start.sh"]
HEALTHCHECK --start-period=5s --interval=1s --timeout=1s \
CMD curl -fSs http://localhost:8008/health || exit 1

View File

@@ -1 +0,0 @@
Stuff for building the docker image used for testing under complement.

View File

@@ -1,73 +0,0 @@
# This dockerfile builds on top of 'docker/Dockerfile-worker' in matrix-org/synapse
# by including a built-in postgres instance, as well as setting up the homeserver so
# that it is ready for testing via Complement.
#
# Instructions for building this image from those it depends on is detailed in this guide:
# https://github.com/matrix-org/synapse/blob/develop/docker/README-testing.md#testing-with-postgresql-and-single-or-multi-process-synapse
FROM matrixdotorg/synapse-workers
# Download a caddy server to stand in front of nginx and terminate TLS using Complement's
# custom CA.
# We include this near the top of the file in order to cache the result.
RUN curl -OL "https://github.com/caddyserver/caddy/releases/download/v2.3.0/caddy_2.3.0_linux_amd64.tar.gz" && \
tar xzf caddy_2.3.0_linux_amd64.tar.gz && rm caddy_2.3.0_linux_amd64.tar.gz && mv caddy /root
# Install postgresql
RUN apt-get update
RUN apt-get install -y postgresql
# Configure a user and create a database for Synapse
RUN pg_ctlcluster 13 main start && su postgres -c "echo \
\"ALTER USER postgres PASSWORD 'somesecret'; \
CREATE DATABASE synapse \
ENCODING 'UTF8' \
LC_COLLATE='C' \
LC_CTYPE='C' \
template=template0;\" | psql" && pg_ctlcluster 13 main stop
# Modify the shared homeserver config with postgres support, certificate setup
# and the disabling of rate-limiting
COPY conf-workers/workers-shared.yaml /conf/workers/shared.yaml
WORKDIR /data
# Copy the caddy config
COPY conf-workers/caddy.complement.json /root/caddy.json
# Expose caddy's listener ports
EXPOSE 8008 8448
ENTRYPOINT \
# Replace the server name in the caddy config
sed -i "s/{{ server_name }}/${SERVER_NAME}/g" /root/caddy.json && \
# Start postgres
pg_ctlcluster 13 main start 2>&1 && \
# Start caddy
/root/caddy start --config /root/caddy.json 2>&1 && \
# Set the server name of the homeserver
SYNAPSE_SERVER_NAME=${SERVER_NAME} \
# No need to report stats here
SYNAPSE_REPORT_STATS=no \
# Set postgres authentication details which will be placed in the homeserver config file
POSTGRES_PASSWORD=somesecret POSTGRES_USER=postgres POSTGRES_HOST=localhost \
# Specify the workers to test with
SYNAPSE_WORKER_TYPES="\
event_persister, \
event_persister, \
background_worker, \
frontend_proxy, \
event_creator, \
user_dir, \
media_repository, \
federation_inbound, \
federation_reader, \
federation_sender, \
synchrotron, \
appservice, \
pusher" \
# Run the script that writes the necessary config files and starts supervisord, which in turn
# starts everything else
/configure_workers_and_start.py
HEALTHCHECK --start-period=5s --interval=1s --timeout=1s \
CMD /bin/sh /healthcheck.sh

View File

@@ -1,72 +0,0 @@
{
"apps": {
"http": {
"servers": {
"srv0": {
"listen": [
":8448"
],
"routes": [
{
"match": [
{
"host": [
"{{ server_name }}"
]
}
],
"handle": [
{
"handler": "subroute",
"routes": [
{
"handle": [
{
"handler": "reverse_proxy",
"upstreams": [
{
"dial": "localhost:8008"
}
]
}
]
}
]
}
],
"terminal": true
}
]
}
}
},
"tls": {
"automation": {
"policies": [
{
"subjects": [
"{{ server_name }}"
],
"issuers": [
{
"module": "internal"
}
],
"on_demand": true
}
]
}
},
"pki": {
"certificate_authorities": {
"local": {
"name": "Complement CA",
"root": {
"certificate": "/complement/ca/ca.crt",
"private_key": "/complement/ca/ca.key"
}
}
}
}
}
}

View File

@@ -1,72 +0,0 @@
## Server ##
report_stats: False
trusted_key_servers: []
enable_registration: true
enable_registration_without_verification: true
bcrypt_rounds: 4
## Federation ##
# trust certs signed by Complement's CA
federation_custom_ca_list:
- /complement/ca/ca.crt
# unblacklist RFC1918 addresses
federation_ip_range_blacklist: []
# Disable server rate-limiting
rc_federation:
window_size: 1000
sleep_limit: 10
sleep_delay: 500
reject_limit: 99999
concurrent: 3
rc_message:
per_second: 9999
burst_count: 9999
rc_registration:
per_second: 9999
burst_count: 9999
rc_login:
address:
per_second: 9999
burst_count: 9999
account:
per_second: 9999
burst_count: 9999
failed_attempts:
per_second: 9999
burst_count: 9999
rc_admin_redaction:
per_second: 9999
burst_count: 9999
rc_joins:
local:
per_second: 9999
burst_count: 9999
remote:
per_second: 9999
burst_count: 9999
federation_rr_transactions_per_room_per_second: 9999
## Experimental Features ##
experimental_features:
# Enable history backfilling support
msc2716_enabled: true
# Enable spaces support
spaces_enabled: true
# Enable jump to date endpoint
msc3030_enabled: true
server_notices:
system_mxid_localpart: _server
system_mxid_display_name: "Server Alert"
system_mxid_avatar_url: ""
room_name: "Server Alert"

View File

@@ -1,115 +0,0 @@
## Server ##
server_name: SERVER_NAME
log_config: /conf/log_config.yaml
report_stats: False
signing_key_path: /conf/server.signing.key
trusted_key_servers: []
enable_registration: true
enable_registration_without_verification: true
## Listeners ##
tls_certificate_path: /conf/server.tls.crt
tls_private_key_path: /conf/server.tls.key
bcrypt_rounds: 4
registration_shared_secret: complement
listeners:
- port: 8448
bind_addresses: ['::']
type: http
tls: true
resources:
- names: [federation]
- port: 8008
bind_addresses: ['::']
type: http
resources:
- names: [client]
## Database ##
database:
name: "sqlite3"
args:
# We avoid /data, as it is a volume and is not transferred when the container is committed,
# which is a fundamental necessity in complement.
database: "/conf/homeserver.db"
## Federation ##
# trust certs signed by the complement CA
federation_custom_ca_list:
- /complement/ca/ca.crt
# unblacklist RFC1918 addresses
ip_range_blacklist: []
# Disable server rate-limiting
rc_federation:
window_size: 1000
sleep_limit: 10
sleep_delay: 500
reject_limit: 99999
concurrent: 3
rc_message:
per_second: 9999
burst_count: 9999
rc_registration:
per_second: 9999
burst_count: 9999
rc_login:
address:
per_second: 9999
burst_count: 9999
account:
per_second: 9999
burst_count: 9999
failed_attempts:
per_second: 9999
burst_count: 9999
rc_admin_redaction:
per_second: 9999
burst_count: 9999
rc_joins:
local:
per_second: 9999
burst_count: 9999
remote:
per_second: 9999
burst_count: 9999
federation_rr_transactions_per_room_per_second: 9999
## API Configuration ##
# A list of application service config files to use
#
app_service_config_files:
AS_REGISTRATION_FILES
## Experimental Features ##
experimental_features:
# Enable spaces support
spaces_enabled: true
# Enable history backfilling support
msc2716_enabled: true
# server-side support for partial state in /send_join
msc3706_enabled: true
# Enable jump to date endpoint
msc3030_enabled: true
server_notices:
system_mxid_localpart: _server
system_mxid_display_name: "Server Alert"
system_mxid_avatar_url: ""
room_name: "Server Alert"

View File

@@ -1,24 +0,0 @@
version: 1
formatters:
precise:
format: '%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s - %(message)s'
filters:
context:
(): synapse.logging.context.LoggingContextFilter
request: ""
handlers:
console:
class: logging.StreamHandler
formatter: precise
filters: [context]
# log to stdout, for easier use with 'docker logs'
stream: 'ext://sys.stdout'
root:
level: INFO
handlers: [console]
disable_existing_loggers: false

View File

@@ -1,30 +0,0 @@
#!/bin/sh
set -e
sed -i "s/SERVER_NAME/${SERVER_NAME}/g" /conf/homeserver.yaml
# Add the application service registration files to the homeserver.yaml config
for filename in /complement/appservice/*.yaml; do
[ -f "$filename" ] || break
as_id=$(basename "$filename" .yaml)
# Insert the path to the registration file and the AS_REGISTRATION_FILES marker after
# so we can add the next application service in the next iteration of this for loop
sed -i "s/AS_REGISTRATION_FILES/ - \/complement\/appservice\/${as_id}.yaml\nAS_REGISTRATION_FILES/g" /conf/homeserver.yaml
done
# Remove the AS_REGISTRATION_FILES entry
sed -i "s/AS_REGISTRATION_FILES//g" /conf/homeserver.yaml
# generate an ssl key and cert for the server, signed by the complement CA
openssl genrsa -out /conf/server.tls.key 2048
openssl req -new -key /conf/server.tls.key -out /conf/server.tls.csr \
-subj "/CN=${SERVER_NAME}"
openssl x509 -req -in /conf/server.tls.csr \
-CA /complement/ca/ca.crt -CAkey /complement/ca/ca.key -set_serial 1 \
-out /conf/server.tls.crt
exec python -m synapse.app.homeserver -c /conf/homeserver.yaml "$@"

View File

@@ -1,6 +0,0 @@
#!/bin/sh
# This healthcheck script is designed to return OK when every
# host involved returns OK
{%- for healthcheck_url in healthcheck_urls %}
curl -fSs {{ healthcheck_url }} || exit 1
{%- endfor %}

View File

@@ -148,6 +148,14 @@ bcrypt_rounds: 12
allow_guest_access: {{ "True" if SYNAPSE_ALLOW_GUEST else "False" }} allow_guest_access: {{ "True" if SYNAPSE_ALLOW_GUEST else "False" }}
enable_group_creation: true enable_group_creation: true
# The list of identity servers trusted to verify third party
# identifiers by this server.
#
# Also defines the ID server which will be called when an account is
# deactivated (one will be picked arbitrarily).
trusted_third_party_id_servers:
- matrix.org
- vector.im
## Metrics ### ## Metrics ###

View File

@@ -29,7 +29,6 @@
import os import os
import subprocess import subprocess
import sys import sys
from typing import Any, Dict, Set
import jinja2 import jinja2
import yaml import yaml
@@ -37,7 +36,7 @@ import yaml
MAIN_PROCESS_HTTP_LISTENER_PORT = 8080 MAIN_PROCESS_HTTP_LISTENER_PORT = 8080
WORKERS_CONFIG: Dict[str, Dict[str, Any]] = { WORKERS_CONFIG = {
"pusher": { "pusher": {
"app": "synapse.app.pusher", "app": "synapse.app.pusher",
"listener_resources": [], "listener_resources": [],
@@ -49,7 +48,7 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = {
"app": "synapse.app.user_dir", "app": "synapse.app.user_dir",
"listener_resources": ["client"], "listener_resources": ["client"],
"endpoint_patterns": [ "endpoint_patterns": [
"^/_matrix/client/(api/v1|r0|v3|unstable)/user_directory/search$" "^/_matrix/client/(api/v1|r0|unstable)/user_directory/search$"
], ],
"shared_extra_conf": {"update_user_directory": False}, "shared_extra_conf": {"update_user_directory": False},
"worker_extra_conf": "", "worker_extra_conf": "",
@@ -86,10 +85,10 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = {
"app": "synapse.app.generic_worker", "app": "synapse.app.generic_worker",
"listener_resources": ["client"], "listener_resources": ["client"],
"endpoint_patterns": [ "endpoint_patterns": [
"^/_matrix/client/(v2_alpha|r0|v3)/sync$", "^/_matrix/client/(v2_alpha|r0)/sync$",
"^/_matrix/client/(api/v1|v2_alpha|r0|v3)/events$", "^/_matrix/client/(api/v1|v2_alpha|r0)/events$",
"^/_matrix/client/(api/v1|r0|v3)/initialSync$", "^/_matrix/client/(api/v1|r0)/initialSync$",
"^/_matrix/client/(api/v1|r0|v3)/rooms/[^/]+/initialSync$", "^/_matrix/client/(api/v1|r0)/rooms/[^/]+/initialSync$",
], ],
"shared_extra_conf": {}, "shared_extra_conf": {},
"worker_extra_conf": "", "worker_extra_conf": "",
@@ -147,11 +146,11 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = {
"app": "synapse.app.generic_worker", "app": "synapse.app.generic_worker",
"listener_resources": ["client"], "listener_resources": ["client"],
"endpoint_patterns": [ "endpoint_patterns": [
"^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/redact", "^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/redact",
"^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/send", "^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/send",
"^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/(join|invite|leave|ban|unban|kick)$", "^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/(join|invite|leave|ban|unban|kick)$",
"^/_matrix/client/(api/v1|r0|v3|unstable)/join/", "^/_matrix/client/(api/v1|r0|unstable)/join/",
"^/_matrix/client/(api/v1|r0|v3|unstable)/profile/", "^/_matrix/client/(api/v1|r0|unstable)/profile/",
], ],
"shared_extra_conf": {}, "shared_extra_conf": {},
"worker_extra_conf": "", "worker_extra_conf": "",
@@ -159,7 +158,7 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = {
"frontend_proxy": { "frontend_proxy": {
"app": "synapse.app.frontend_proxy", "app": "synapse.app.frontend_proxy",
"listener_resources": ["client", "replication"], "listener_resources": ["client", "replication"],
"endpoint_patterns": ["^/_matrix/client/(api/v1|r0|v3|unstable)/keys/upload"], "endpoint_patterns": ["^/_matrix/client/(api/v1|r0|unstable)/keys/upload"],
"shared_extra_conf": {}, "shared_extra_conf": {},
"worker_extra_conf": ( "worker_extra_conf": (
"worker_main_http_uri: http://127.0.0.1:%d" "worker_main_http_uri: http://127.0.0.1:%d"
@@ -308,7 +307,7 @@ def generate_worker_files(environ, config_path: str, data_dir: str):
Args: Args:
environ: _Environ[str] environ: _Environ[str]
config_path: The location of the generated Synapse main worker config file. config_path: Where to output the generated Synapse main worker config file.
data_dir: The location of the synapse data directory. Where log and data_dir: The location of the synapse data directory. Where log and
user-facing config files live. user-facing config files live.
""" """
@@ -321,8 +320,7 @@ def generate_worker_files(environ, config_path: str, data_dir: str):
# and adding a replication listener. # and adding a replication listener.
# First read the original config file and extract the listeners block. Then we'll add # First read the original config file and extract the listeners block. Then we'll add
# another listener for replication. Later we'll write out the result to the shared # another listener for replication. Later we'll write out the result.
# config file.
listeners = [ listeners = [
{ {
"port": 9093, "port": 9093,
@@ -357,7 +355,7 @@ def generate_worker_files(environ, config_path: str, data_dir: str):
# worker_type: {1234, 1235, ...}} # worker_type: {1234, 1235, ...}}
# } # }
# and will be used to construct 'upstream' nginx directives. # and will be used to construct 'upstream' nginx directives.
nginx_upstreams: Dict[str, Set[int]] = {} nginx_upstreams = {}
# A map of: {"endpoint": "upstream"}, where "upstream" is a str representing what will be # A map of: {"endpoint": "upstream"}, where "upstream" is a str representing what will be
# placed after the proxy_pass directive. The main benefit to representing this data as a # placed after the proxy_pass directive. The main benefit to representing this data as a
@@ -386,11 +384,7 @@ def generate_worker_files(environ, config_path: str, data_dir: str):
# A counter of worker_type -> int. Used for determining the name for a given # A counter of worker_type -> int. Used for determining the name for a given
# worker type when generating its config file, as each worker's name is just # worker type when generating its config file, as each worker's name is just
# worker_type + instance # # worker_type + instance #
worker_type_counter: Dict[str, int] = {} worker_type_counter = {}
# A list of internal endpoints to healthcheck, starting with the main process
# which exists even if no workers do.
healthcheck_urls = ["http://localhost:8080/health"]
# For each worker type specified by the user, create config values # For each worker type specified by the user, create config values
for worker_type in worker_types: for worker_type in worker_types:
@@ -410,14 +404,12 @@ def generate_worker_files(environ, config_path: str, data_dir: str):
# e.g. federation_reader1 # e.g. federation_reader1
worker_name = worker_type + str(new_worker_count) worker_name = worker_type + str(new_worker_count)
worker_config.update( worker_config.update(
{"name": worker_name, "port": str(worker_port), "config_path": config_path} {"name": worker_name, "port": worker_port, "config_path": config_path}
) )
# Update the shared config with any worker-type specific options # Update the shared config with any worker-type specific options
shared_config.update(worker_config["shared_extra_conf"]) shared_config.update(worker_config["shared_extra_conf"])
healthcheck_urls.append("http://localhost:%d/health" % (worker_port,))
# Check if more than one instance of this worker type has been specified # Check if more than one instance of this worker type has been specified
worker_type_total_count = worker_types.count(worker_type) worker_type_total_count = worker_types.count(worker_type)
if worker_type_total_count > 1: if worker_type_total_count > 1:
@@ -482,7 +474,6 @@ def generate_worker_files(environ, config_path: str, data_dir: str):
# Determine the load-balancing upstreams to configure # Determine the load-balancing upstreams to configure
nginx_upstream_config = "" nginx_upstream_config = ""
for upstream_worker_type, upstream_worker_ports in nginx_upstreams.items(): for upstream_worker_type, upstream_worker_ports in nginx_upstreams.items():
body = "" body = ""
for port in upstream_worker_ports: for port in upstream_worker_ports:
@@ -519,13 +510,6 @@ def generate_worker_files(environ, config_path: str, data_dir: str):
worker_config=supervisord_config, worker_config=supervisord_config,
) )
# healthcheck config
convert(
"/conf/healthcheck.sh.j2",
"/healthcheck.sh",
healthcheck_urls=healthcheck_urls,
)
# Ensure the logging directory exists # Ensure the logging directory exists
log_dir = data_dir + "/logs" log_dir = data_dir + "/logs"
if not os.path.exists(log_dir): if not os.path.exists(log_dir):

19
docker/run_pg_tests.sh Executable file
View File

@@ -0,0 +1,19 @@
#!/usr/bin/env bash
# This script runs the PostgreSQL tests inside a Docker container. It expects
# the relevant source files to be mounted into /src (done automatically by the
# caller script). It will set up the database, run it, and then use the tox
# configuration to run the tests.
set -e
# Set PGUSER so Synapse's tests know what user to connect to the database with
export PGUSER=postgres
# Start the database
sudo -u postgres /usr/lib/postgresql/10/bin/pg_ctl -w -D /var/lib/postgresql/data start
# Run the tests
cd /src
export TRIAL_FLAGS="-j 4"
tox --workdir=./.tox-pg-container -e py36-postgres "$@"

View File

@@ -108,7 +108,7 @@ def generate_config_from_template(config_dir, config_path, environ, ownership):
# Hopefully we already have a signing key, but generate one if not. # Hopefully we already have a signing key, but generate one if not.
args = [ args = [
sys.executable, "python",
"-m", "-m",
"synapse.app.homeserver", "synapse.app.homeserver",
"--config-path", "--config-path",
@@ -158,7 +158,7 @@ def run_generate_config(environ, ownership):
# generate the main config file, and a signing key. # generate the main config file, and a signing key.
args = [ args = [
sys.executable, "python",
"-m", "-m",
"synapse.app.homeserver", "synapse.app.homeserver",
"--server-name", "--server-name",
@@ -175,7 +175,7 @@ def run_generate_config(environ, ownership):
"--open-private-ports", "--open-private-ports",
] ]
# log("running %s" % (args, )) # log("running %s" % (args, ))
os.execv(sys.executable, args) os.execv("/usr/local/bin/python", args)
def main(args, environ): def main(args, environ):
@@ -254,12 +254,12 @@ running with 'migrate_config'. See the README for more details.
log("Starting synapse with args " + " ".join(args)) log("Starting synapse with args " + " ".join(args))
args = [sys.executable] + args args = ["python"] + args
if ownership is not None: if ownership is not None:
args = ["gosu", ownership] + args args = ["gosu", ownership] + args
os.execve("/usr/sbin/gosu", args, environ) os.execve("/usr/sbin/gosu", args, environ)
else: else:
os.execve(sys.executable, args, environ) os.execve("/usr/local/bin/python", args, environ)
if __name__ == "__main__": if __name__ == "__main__":

View File

@@ -0,0 +1,335 @@
# MSC1711 Certificates FAQ
## Historical Note
This document was originally written to guide server admins through the upgrade
path towards Synapse 1.0. Specifically,
[MSC1711](https://github.com/matrix-org/matrix-doc/blob/main/proposals/1711-x509-for-federation.md)
required that all servers present valid TLS certificates on their federation
API. Admins were encouraged to achieve compliance from version 0.99.0 (released
in February 2019) ahead of version 1.0 (released June 2019) enforcing the
certificate checks.
Much of what follows is now outdated since most admins will have already
upgraded, however it may be of use to those with old installs returning to the
project.
If you are setting up a server from scratch you almost certainly should look at
the [installation guide](setup/installation.md) instead.
## Introduction
The goal of Synapse 0.99.0 is to act as a stepping stone to Synapse 1.0.0. It
supports the r0.1 release of the server to server specification, but is
compatible with both the legacy Matrix federation behaviour (pre-r0.1) as well
as post-r0.1 behaviour, in order to allow for a smooth upgrade across the
federation.
The most important thing to know is that Synapse 1.0.0 will require a valid TLS
certificate on federation endpoints. Self signed certificates will not be
sufficient.
Synapse 0.99.0 makes it easy to configure TLS certificates and will
interoperate with both >= 1.0.0 servers as well as existing servers yet to
upgrade.
**It is critical that all admins upgrade to 0.99.0 and configure a valid TLS
certificate.** Admins will have 1 month to do so, after which 1.0.0 will be
released and those servers without a valid certificate will not longer be able
to federate with >= 1.0.0 servers.
Full details on how to carry out this configuration change is given
[below](#configuring-certificates-for-compatibility-with-synapse-100). A
timeline and some frequently asked questions are also given below.
For more details and context on the release of the r0.1 Server/Server API and
imminent Matrix 1.0 release, you can also see our
[main talk from FOSDEM 2019](https://matrix.org/blog/2019/02/04/matrix-at-fosdem-2019/).
## Contents
* Timeline
* Configuring certificates for compatibility with Synapse 1.0
* FAQ
* Synapse 0.99.0 has just been released, what do I need to do right now?
* How do I upgrade?
* What will happen if I do not set up a valid federation certificate
immediately?
* What will happen if I do nothing at all?
* When do I need a SRV record or .well-known URI?
* Can I still use an SRV record?
* I have created a .well-known URI. Do I still need an SRV record?
* It used to work just fine, why are you breaking everything?
* Can I manage my own certificates rather than having Synapse renew
certificates itself?
* Do you still recommend against using a reverse proxy on the federation port?
* Do I still need to give my TLS certificates to Synapse if I am using a
reverse proxy?
* Do I need the same certificate for the client and federation port?
* How do I tell Synapse to reload my keys/certificates after I replace them?
## Timeline
**5th Feb 2019 - Synapse 0.99.0 is released.**
All server admins are encouraged to upgrade.
0.99.0:
- provides support for ACME to make setting up Let's Encrypt certs easy, as
well as .well-known support.
- does not enforce that a valid CA cert is present on the federation API, but
rather makes it easy to set one up.
- provides support for .well-known
Admins should upgrade and configure a valid CA cert. Homeservers that require a
.well-known entry (see below), should retain their SRV record and use it
alongside their .well-known record.
**10th June 2019 - Synapse 1.0.0 is released**
1.0.0 is scheduled for release on 10th June. In
accordance with the the [S2S spec](https://matrix.org/docs/spec/server_server/r0.1.0.html)
1.0.0 will enforce certificate validity. This means that any homeserver without a
valid certificate after this point will no longer be able to federate with
1.0.0 servers.
## Configuring certificates for compatibility with Synapse 1.0.0
### If you do not currently have an SRV record
In this case, your `server_name` points to the host where your Synapse is
running. There is no need to create a `.well-known` URI or an SRV record, but
you will need to give Synapse a valid, signed, certificate.
### If you do have an SRV record currently
If you are using an SRV record, your matrix domain (`server_name`) may not
point to the same host that your Synapse is running on (the 'target
domain'). (If it does, you can follow the recommendation above; otherwise, read
on.)
Let's assume that your `server_name` is `example.com`, and your Synapse is
hosted at a target domain of `customer.example.net`. Currently you should have
an SRV record which looks like:
```
_matrix._tcp.example.com. IN SRV 10 5 8000 customer.example.net.
```
In this situation, you have three choices for how to proceed:
#### Option 1: give Synapse a certificate for your matrix domain
Synapse 1.0 will expect your server to present a TLS certificate for your
`server_name` (`example.com` in the above example). You can achieve this by acquiring a
certificate for the `server_name` yourself (for example, using `certbot`), and giving it
and the key to Synapse via `tls_certificate_path` and `tls_private_key_path`.
#### Option 2: run Synapse behind a reverse proxy
If you have an existing reverse proxy set up with correct TLS certificates for
your domain, you can simply route all traffic through the reverse proxy by
updating the SRV record appropriately (or removing it, if the proxy listens on
8448).
See [the reverse proxy documentation](reverse_proxy.md) for information on setting up a
reverse proxy.
#### Option 3: add a .well-known file to delegate your matrix traffic
This will allow you to keep Synapse on a separate domain, without having to
give it a certificate for the matrix domain.
You can do this with a `.well-known` file as follows:
1. Keep the SRV record in place - it is needed for backwards compatibility
with Synapse 0.34 and earlier.
2. Give Synapse a certificate corresponding to the target domain
(`customer.example.net` in the above example). You can do this by acquire a
certificate for the target domain and giving it to Synapse via `tls_certificate_path`
and `tls_private_key_path`.
3. Restart Synapse to ensure the new certificate is loaded.
4. Arrange for a `.well-known` file at
`https://<server_name>/.well-known/matrix/server` with contents:
```json
{"m.server": "<target server name>"}
```
where the target server name is resolved as usual (i.e. SRV lookup, falling
back to talking to port 8448).
In the above example, where synapse is listening on port 8000,
`https://example.com/.well-known/matrix/server` should have `m.server` set to one of:
1. `customer.example.net` ─ with a SRV record on
`_matrix._tcp.customer.example.com` pointing to port 8000, or:
2. `customer.example.net` ─ updating synapse to listen on the default port
8448, or:
3. `customer.example.net:8000` ─ ensuring that if there is a reverse proxy
on `customer.example.net:8000` it correctly handles HTTP requests with
Host header set to `customer.example.net:8000`.
## FAQ
### Synapse 0.99.0 has just been released, what do I need to do right now?
Upgrade as soon as you can in preparation for Synapse 1.0.0, and update your
TLS certificates as [above](#configuring-certificates-for-compatibility-with-synapse-100).
### What will happen if I do not set up a valid federation certificate immediately?
Nothing initially, but once 1.0.0 is in the wild it will not be possible to
federate with 1.0.0 servers.
### What will happen if I do nothing at all?
If the admin takes no action at all, and remains on a Synapse < 0.99.0 then the
homeserver will be unable to federate with those who have implemented
.well-known. Then, as above, once the month upgrade window has expired the
homeserver will not be able to federate with any Synapse >= 1.0.0
### When do I need a SRV record or .well-known URI?
If your homeserver listens on the default federation port (8448), and your
`server_name` points to the host that your homeserver runs on, you do not need an
SRV record or `.well-known/matrix/server` URI.
For instance, if you registered `example.com` and pointed its DNS A record at a
fresh Upcloud VPS or similar, you could install Synapse 0.99 on that host,
giving it a server_name of `example.com`, and it would automatically generate a
valid TLS certificate for you via Let's Encrypt and no SRV record or
`.well-known` URI would be needed.
This is the common case, although you can add an SRV record or
`.well-known/matrix/server` URI for completeness if you wish.
**However**, if your server does not listen on port 8448, or if your `server_name`
does not point to the host that your homeserver runs on, you will need to let
other servers know how to find it.
In this case, you should see ["If you do have an SRV record
currently"](#if-you-do-have-an-srv-record-currently) above.
### Can I still use an SRV record?
Firstly, if you didn't need an SRV record before (because your server is
listening on port 8448 of your server_name), you certainly don't need one now:
the defaults are still the same.
If you previously had an SRV record, you can keep using it provided you are
able to give Synapse a TLS certificate corresponding to your server name. For
example, suppose you had the following SRV record, which directs matrix traffic
for example.com to matrix.example.com:443:
```
_matrix._tcp.example.com. IN SRV 10 5 443 matrix.example.com
```
In this case, Synapse must be given a certificate for example.com - or be
configured to acquire one from Let's Encrypt.
If you are unable to give Synapse a certificate for your server_name, you will
also need to use a .well-known URI instead. However, see also "I have created a
.well-known URI. Do I still need an SRV record?".
### I have created a .well-known URI. Do I still need an SRV record?
As of Synapse 0.99, Synapse will first check for the existence of a `.well-known`
URI and follow any delegation it suggests. It will only then check for the
existence of an SRV record.
That means that the SRV record will often be redundant. However, you should
remember that there may still be older versions of Synapse in the federation
which do not understand `.well-known` URIs, so if you removed your SRV record you
would no longer be able to federate with them.
It is therefore best to leave the SRV record in place for now. Synapse 0.34 and
earlier will follow the SRV record (and not care about the invalid
certificate). Synapse 0.99 and later will follow the .well-known URI, with the
correct certificate chain.
### It used to work just fine, why are you breaking everything?
We have always wanted Matrix servers to be as easy to set up as possible, and
so back when we started federation in 2014 we didn't want admins to have to go
through the cumbersome process of buying a valid TLS certificate to run a
server. This was before Let's Encrypt came along and made getting a free and
valid TLS certificate straightforward. So instead, we adopted a system based on
[Perspectives](https://en.wikipedia.org/wiki/Convergence_(SSL)): an approach
where you check a set of "notary servers" (in practice, homeservers) to vouch
for the validity of a certificate rather than having it signed by a CA. As long
as enough different notaries agree on the certificate's validity, then it is
trusted.
However, in practice this has never worked properly. Most people only use the
default notary server (matrix.org), leading to inadvertent centralisation which
we want to eliminate. Meanwhile, we never implemented the full consensus
algorithm to query the servers participating in a room to determine consensus
on whether a given certificate is valid. This is fiddly to get right
(especially in face of sybil attacks), and we found ourselves questioning
whether it was worth the effort to finish the work and commit to maintaining a
secure certificate validation system as opposed to focusing on core Matrix
development.
Meanwhile, Let's Encrypt came along in 2016, and put the final nail in the
coffin of the Perspectives project (which was already pretty dead). So, the
Spec Core Team decided that a better approach would be to mandate valid TLS
certificates for federation alongside the rest of the Web. More details can be
found in
[MSC1711](https://github.com/matrix-org/matrix-doc/blob/main/proposals/1711-x509-for-federation.md#background-the-failure-of-the-perspectives-approach).
This results in a breaking change, which is disruptive, but absolutely critical
for the security model. However, the existence of Let's Encrypt as a trivial
way to replace the old self-signed certificates with valid CA-signed ones helps
smooth things over massively, especially as Synapse can now automate Let's
Encrypt certificate generation if needed.
### Can I manage my own certificates rather than having Synapse renew certificates itself?
Yes, you are welcome to manage your certificates yourself. Synapse will only
attempt to obtain certificates from Let's Encrypt if you configure it to do
so.The only requirement is that there is a valid TLS cert present for
federation end points.
### Do you still recommend against using a reverse proxy on the federation port?
We no longer actively recommend against using a reverse proxy. Many admins will
find it easier to direct federation traffic to a reverse proxy and manage their
own TLS certificates, and this is a supported configuration.
See [the reverse proxy documentation](reverse_proxy.md) for information on setting up a
reverse proxy.
### Do I still need to give my TLS certificates to Synapse if I am using a reverse proxy?
Practically speaking, this is no longer necessary.
If you are using a reverse proxy for all of your TLS traffic, then you can set
`no_tls: True`. In that case, the only reason Synapse needs the certificate is
to populate a legacy 'tls_fingerprints' field in the federation API. This is
ignored by Synapse 0.99.0 and later, and the only time pre-0.99 Synapses will
check it is when attempting to fetch the server keys - and generally this is
delegated via `matrix.org`, which is on 0.99.0.
However, there is a bug in Synapse 0.99.0
[4554](<https://github.com/matrix-org/synapse/issues/4554>) which prevents
Synapse from starting if you do not give it a TLS certificate. To work around
this, you can give it any TLS certificate at all. This will be fixed soon.
### Do I need the same certificate for the client and federation port?
No. There is nothing stopping you from using different certificates,
particularly if you are using a reverse proxy. However, Synapse will use the
same certificate on any ports where TLS is configured.
### How do I tell Synapse to reload my keys/certificates after I replace them?
Synapse will reload the keys and certificates when it receives a SIGHUP - for
example `kill -HUP $(cat homeserver.pid)`. Alternatively, simply restart
Synapse, though this will result in downtime while it restarts.

View File

@@ -50,10 +50,8 @@ build the documentation with:
mdbook build mdbook build
``` ```
The rendered contents will be outputted to a new `book/` directory at the root of the repository. Please note that The rendered contents will be outputted to a new `book/` directory at the root of the repository. You can
index.html is not built by default, it is created by copying over the file `welcome_and_overview.html` to `index.html` browse the book by opening `book/index.html` in a web browser.
during deployment. Thus, when running `mdbook serve` locally the book will initially show a 404 in place of the index
due to the above. Do not be alarmed!
You can also have mdbook host the docs on a local webserver with hot-reload functionality via: You can also have mdbook host the docs on a local webserver with hot-reload functionality via:

View File

@@ -13,6 +13,7 @@
# Upgrading # Upgrading
- [Upgrading between Synapse Versions](upgrade.md) - [Upgrading between Synapse Versions](upgrade.md)
- [Upgrading from pre-Synapse 1.0](MSC1711_certificates_FAQ.md)
# Usage # Usage
- [Federation](federate.md) - [Federation](federate.md)
@@ -29,7 +30,6 @@
- [SSO Mapping Providers](sso_mapping_providers.md) - [SSO Mapping Providers](sso_mapping_providers.md)
- [Password Auth Providers](password_auth_providers.md) - [Password Auth Providers](password_auth_providers.md)
- [JSON Web Tokens](jwt.md) - [JSON Web Tokens](jwt.md)
- [Refresh Tokens](usage/configuration/user_authentication/refresh_tokens.md)
- [Registration Captcha](CAPTCHA_SETUP.md) - [Registration Captcha](CAPTCHA_SETUP.md)
- [Application Services](application_services.md) - [Application Services](application_services.md)
- [Server Notices](server_notices.md) - [Server Notices](server_notices.md)
@@ -44,8 +44,6 @@
- [Presence router callbacks](modules/presence_router_callbacks.md) - [Presence router callbacks](modules/presence_router_callbacks.md)
- [Account validity callbacks](modules/account_validity_callbacks.md) - [Account validity callbacks](modules/account_validity_callbacks.md)
- [Password auth provider callbacks](modules/password_auth_provider_callbacks.md) - [Password auth provider callbacks](modules/password_auth_provider_callbacks.md)
- [Background update controller callbacks](modules/background_update_controller_callbacks.md)
- [Account data callbacks](modules/account_data_callbacks.md)
- [Porting a legacy module to the new interface](modules/porting_legacy_module.md) - [Porting a legacy module to the new interface](modules/porting_legacy_module.md)
- [Workers](workers.md) - [Workers](workers.md)
- [Using `synctl` with Workers](synctl_workers.md) - [Using `synctl` with Workers](synctl_workers.md)
@@ -66,24 +64,16 @@
- [Statistics](admin_api/statistics.md) - [Statistics](admin_api/statistics.md)
- [Users](admin_api/user_admin_api.md) - [Users](admin_api/user_admin_api.md)
- [Server Version](admin_api/version_api.md) - [Server Version](admin_api/version_api.md)
- [Federation](usage/administration/admin_api/federation.md)
- [Manhole](manhole.md) - [Manhole](manhole.md)
- [Monitoring](metrics-howto.md) - [Monitoring](metrics-howto.md)
- [Understanding Synapse Through Grafana Graphs](usage/administration/understanding_synapse_through_grafana_graphs.md)
- [Useful SQL for Admins](usage/administration/useful_sql_for_admins.md)
- [Database Maintenance Tools](usage/administration/database_maintenance_tools.md)
- [State Groups](usage/administration/state_groups.md)
- [Request log format](usage/administration/request_log.md) - [Request log format](usage/administration/request_log.md)
- [Admin FAQ](usage/administration/admin_faq.md)
- [Scripts]() - [Scripts]()
# Development # Development
- [Contributing Guide](development/contributing_guide.md) - [Contributing Guide](development/contributing_guide.md)
- [Code Style](code_style.md) - [Code Style](code_style.md)
- [Release Cycle](development/releases.md)
- [Git Usage](development/git.md) - [Git Usage](development/git.md)
- [Testing]() - [Testing]()
- [Demo scripts](development/demo.md)
- [OpenTracing](opentracing.md) - [OpenTracing](opentracing.md)
- [Database Schemas](development/database_schema.md) - [Database Schemas](development/database_schema.md)
- [Experimental features](development/experimental_features.md) - [Experimental features](development/experimental_features.md)
@@ -104,4 +94,3 @@
# Other # Other
- [Dependency Deprecation Policy](deprecation_policy.md) - [Dependency Deprecation Policy](deprecation_policy.md)
- [Running Synapse on a Single-Board Computer](other/running_synapse_on_single_board_computers.md)

View File

@@ -4,9 +4,6 @@ This API allows a server administrator to manage the validity of an account. To
use it, you must enable the account validity feature (under use it, you must enable the account validity feature (under
`account_validity`) in Synapse's configuration. `account_validity`) in Synapse's configuration.
To use it, you will need to authenticate by providing an `access_token`
for a server admin: see [Admin API](../usage/administration/admin_api).
## Renew account ## Renew account
This API extends the validity of an account by as much time as configured in the This API extends the validity of an account by as much time as configured in the

View File

@@ -4,11 +4,11 @@ This API lets a server admin delete a local group. Doing so will kick all
users out of the group so that their clients will correctly handle the group users out of the group so that their clients will correctly handle the group
being deleted. being deleted.
To use it, you will need to authenticate by providing an `access_token`
for a server admin: see [Admin API](../usage/administration/admin_api).
The API is: The API is:
``` ```
POST /_synapse/admin/v1/delete_group/<group_id> POST /_synapse/admin/v1/delete_group/<group_id>
``` ```
To use it, you will need to authenticate by providing an `access_token` for a
server admin: see [Admin API](../usage/administration/admin_api).

View File

@@ -2,13 +2,12 @@
This API returns information about reported events. This API returns information about reported events.
To use it, you will need to authenticate by providing an `access_token`
for a server admin: see [Admin API](../usage/administration/admin_api).
The api is: The api is:
``` ```
GET /_synapse/admin/v1/event_reports?from=0&limit=10 GET /_synapse/admin/v1/event_reports?from=0&limit=10
``` ```
To use it, you will need to authenticate by providing an `access_token` for a
server admin: see [Admin API](../usage/administration/admin_api).
It returns a JSON body like the following: It returns a JSON body like the following:
@@ -95,6 +94,8 @@ The api is:
``` ```
GET /_synapse/admin/v1/event_reports/<report_id> GET /_synapse/admin/v1/event_reports/<report_id>
``` ```
To use it, you will need to authenticate by providing an `access_token` for a
server admin: see [Admin API](../usage/administration/admin_api).
It returns a JSON body like the following: It returns a JSON body like the following:

View File

@@ -1,13 +1,24 @@
# Contents
- [Querying media](#querying-media)
* [List all media in a room](#list-all-media-in-a-room)
* [List all media uploaded by a user](#list-all-media-uploaded-by-a-user)
- [Quarantine media](#quarantine-media)
* [Quarantining media by ID](#quarantining-media-by-id)
* [Remove media from quarantine by ID](#remove-media-from-quarantine-by-id)
* [Quarantining media in a room](#quarantining-media-in-a-room)
* [Quarantining all media of a user](#quarantining-all-media-of-a-user)
* [Protecting media from being quarantined](#protecting-media-from-being-quarantined)
* [Unprotecting media from being quarantined](#unprotecting-media-from-being-quarantined)
- [Delete local media](#delete-local-media)
* [Delete a specific local media](#delete-a-specific-local-media)
* [Delete local media by date or size](#delete-local-media-by-date-or-size)
* [Delete media uploaded by a user](#delete-media-uploaded-by-a-user)
- [Purge Remote Media API](#purge-remote-media-api)
# Querying media # Querying media
These APIs allow extracting media information from the homeserver. These APIs allow extracting media information from the homeserver.
Details about the format of the `media_id` and storage of the media in the file system
are documented under [media repository](../media_repository.md).
To use it, you will need to authenticate by providing an `access_token`
for a server admin: see [Admin API](../usage/administration/admin_api).
## List all media in a room ## List all media in a room
This API gets a list of known media in a room. This API gets a list of known media in a room.
@@ -17,6 +28,8 @@ The API is:
``` ```
GET /_synapse/admin/v1/room/<room_id>/media GET /_synapse/admin/v1/room/<room_id>/media
``` ```
To use it, you will need to authenticate by providing an `access_token` for a
server admin: see [Admin API](../usage/administration/admin_api).
The API returns a JSON body like the following: The API returns a JSON body like the following:
```json ```json
@@ -304,5 +317,8 @@ The following fields are returned in the JSON response body:
* `deleted`: integer - The number of media items successfully deleted * `deleted`: integer - The number of media items successfully deleted
To use it, you will need to authenticate by providing an `access_token` for a
server admin: see [Admin API](../usage/administration/admin_api).
If the user re-requests purged remote media, synapse will re-request the media If the user re-requests purged remote media, synapse will re-request the media
from the originating server. from the originating server.

View File

@@ -10,15 +10,15 @@ paginate further back in the room from the point being purged from.
Note that Synapse requires at least one message in each room, so it will never Note that Synapse requires at least one message in each room, so it will never
delete the last message in a room. delete the last message in a room.
To use it, you will need to authenticate by providing an `access_token`
for a server admin: see [Admin API](../usage/administration/admin_api).
The API is: The API is:
``` ```
POST /_synapse/admin/v1/purge_history/<room_id>[/<event_id>] POST /_synapse/admin/v1/purge_history/<room_id>[/<event_id>]
``` ```
To use it, you will need to authenticate by providing an `access_token` for a
server admin: [Admin API](../usage/administration/admin_api)
By default, events sent by local users are not deleted, as they may represent By default, events sent by local users are not deleted, as they may represent
the only copies of this content in existence. (Events sent by remote users are the only copies of this content in existence. (Events sent by remote users are
deleted.) deleted.)
@@ -57,6 +57,9 @@ It is possible to poll for updates on recent purges with a second API;
GET /_synapse/admin/v1/purge_history_status/<purge_id> GET /_synapse/admin/v1/purge_history_status/<purge_id>
``` ```
Again, you will need to authenticate by providing an `access_token` for a
server admin.
This API returns a JSON body like the following: This API returns a JSON body like the following:
```json ```json

View File

@@ -5,9 +5,6 @@ to a room with a given `room_id_or_alias`. You can only modify the membership of
local users. The server administrator must be in the room and have permission to local users. The server administrator must be in the room and have permission to
invite users. invite users.
To use it, you will need to authenticate by providing an `access_token`
for a server admin: see [Admin API](../usage/administration/admin_api).
## Parameters ## Parameters
The following parameters are available: The following parameters are available:
@@ -26,6 +23,9 @@ POST /_synapse/admin/v1/join/<room_id_or_alias>
} }
``` ```
To use it, you will need to authenticate by providing an `access_token` for a
server admin: see [Admin API](../usage/administration/admin_api).
Response: Response:
```json ```json

View File

@@ -1,12 +1,23 @@
# Contents
- [List Room API](#list-room-api)
- [Room Details API](#room-details-api)
- [Room Members API](#room-members-api)
- [Room State API](#room-state-api)
- [Delete Room API](#delete-room-api)
* [Version 1 (old version)](#version-1-old-version)
* [Version 2 (new version)](#version-2-new-version)
* [Status of deleting rooms](#status-of-deleting-rooms)
* [Undoing room shutdowns](#undoing-room-shutdowns)
- [Make Room Admin API](#make-room-admin-api)
- [Forward Extremities Admin API](#forward-extremities-admin-api)
- [Event Context API](#event-context-api)
# List Room API # List Room API
The List Room admin API allows server admins to get a list of rooms on their The List Room admin API allows server admins to get a list of rooms on their
server. There are various parameters available that allow for filtering and server. There are various parameters available that allow for filtering and
sorting the returned list. This API supports pagination. sorting the returned list. This API supports pagination.
To use it, you will need to authenticate by providing an `access_token`
for a server admin: see [Admin API](../usage/administration/admin_api).
**Parameters** **Parameters**
The following query parameters are available: The following query parameters are available:
@@ -375,83 +386,6 @@ A response body like the following is returned:
} }
``` ```
# Block Room API
The Block Room admin API allows server admins to block and unblock rooms,
and query to see if a given room is blocked.
This API can be used to pre-emptively block a room, even if it's unknown to this
homeserver. Users will be prevented from joining a blocked room.
## Block or unblock a room
The API is:
```
PUT /_synapse/admin/v1/rooms/<room_id>/block
```
with a body of:
```json
{
"block": true
}
```
A response body like the following is returned:
```json
{
"block": true
}
```
**Parameters**
The following parameters should be set in the URL:
- `room_id` - The ID of the room.
The following JSON body parameters are available:
- `block` - If `true` the room will be blocked and if `false` the room will be unblocked.
**Response**
The following fields are possible in the JSON response body:
- `block` - A boolean. `true` if the room is blocked, otherwise `false`
## Get block status
The API is:
```
GET /_synapse/admin/v1/rooms/<room_id>/block
```
A response body like the following is returned:
```json
{
"block": true,
"user_id": "<user_id>"
}
```
**Parameters**
The following parameters should be set in the URL:
- `room_id` - The ID of the room.
**Response**
The following fields are possible in the JSON response body:
- `block` - A boolean. `true` if the room is blocked, otherwise `false`
- `user_id` - An optional string. If the room is blocked (`block` is `true`) shows
the user who has add the room to blocking list. Otherwise it is not displayed.
# Delete Room API # Delete Room API
The Delete Room admin API allows server admins to remove rooms from the server The Delete Room admin API allows server admins to remove rooms from the server
@@ -481,6 +415,9 @@ several minutes or longer.
The local server will only have the power to move local user and room aliases to The local server will only have the power to move local user and room aliases to
the new room. Users on other servers will be unaffected. the new room. Users on other servers will be unaffected.
To use it, you will need to authenticate by providing an ``access_token`` for a
server admin: see [Admin API](../usage/administration/admin_api).
## Version 1 (old version) ## Version 1 (old version)
This version works synchronously. That means you only get the response once the server has This version works synchronously. That means you only get the response once the server has

View File

@@ -3,15 +3,15 @@
Returns information about all local media usage of users. Gives the Returns information about all local media usage of users. Gives the
possibility to filter them by time and user. possibility to filter them by time and user.
To use it, you will need to authenticate by providing an `access_token`
for a server admin: see [Admin API](../usage/administration/admin_api).
The API is: The API is:
``` ```
GET /_synapse/admin/v1/statistics/users/media GET /_synapse/admin/v1/statistics/users/media
``` ```
To use it, you will need to authenticate by providing an `access_token`
for a server admin: see [Admin API](../usage/administration/admin_api).
A response body like the following is returned: A response body like the following is returned:
```json ```json

View File

@@ -1,8 +1,5 @@
# User Admin API # User Admin API
To use it, you will need to authenticate by providing an `access_token`
for a server admin: see [Admin API](../usage/administration/admin_api).
## Query User Account ## Query User Account
This API returns information about a specific user account. This API returns information about a specific user account.
@@ -13,12 +10,14 @@ The api is:
GET /_synapse/admin/v2/users/<user_id> GET /_synapse/admin/v2/users/<user_id>
``` ```
To use it, you will need to authenticate by providing an `access_token` for a
server admin: [Admin API](../usage/administration/admin_api)
It returns a JSON body like the following: It returns a JSON body like the following:
```jsonc ```json
{ {
"name": "@user:example.com", "displayname": "User",
"displayname": "User", // can be null if not set
"threepids": [ "threepids": [
{ {
"medium": "email", "medium": "email",
@@ -33,11 +32,11 @@ It returns a JSON body like the following:
"validated_at": 1586458409743 "validated_at": 1586458409743
} }
], ],
"avatar_url": "<avatar_url>", // can be null if not set "avatar_url": "<avatar_url>",
"is_guest": 0,
"admin": 0, "admin": 0,
"deactivated": 0, "deactivated": 0,
"shadow_banned": 0, "shadow_banned": 0,
"password_hash": "$2b$12$p9B4GkqYdRTPGD",
"creation_ts": 1560432506, "creation_ts": 1560432506,
"appservice_id": null, "appservice_id": null,
"consent_server_notice_sent": null, "consent_server_notice_sent": null,
@@ -104,6 +103,9 @@ with a body of:
} }
``` ```
To use it, you will need to authenticate by providing an `access_token` for a
server admin: [Admin API](../usage/administration/admin_api)
Returns HTTP status code: Returns HTTP status code:
- `201` - When a new user object was created. - `201` - When a new user object was created.
- `200` - When a user was modified. - `200` - When a user was modified.
@@ -126,8 +128,7 @@ Body parameters:
[Sample Configuration File](../usage/configuration/homeserver_sample_config.html) [Sample Configuration File](../usage/configuration/homeserver_sample_config.html)
section `sso` and `oidc_providers`. section `sso` and `oidc_providers`.
- `auth_provider` - string. ID of the external identity provider. Value of `idp_id` - `auth_provider` - string. ID of the external identity provider. Value of `idp_id`
in the homeserver configuration. Note that no error is raised if the provided in homeserver configuration.
value is not in the homeserver configuration.
- `external_id` - string, user ID in the external identity provider. - `external_id` - string, user ID in the external identity provider.
- `avatar_url` - string, optional, must be a - `avatar_url` - string, optional, must be a
[MXC URI](https://matrix.org/docs/spec/client_server/r0.6.0#matrix-content-mxc-uris). [MXC URI](https://matrix.org/docs/spec/client_server/r0.6.0#matrix-content-mxc-uris).
@@ -154,6 +155,9 @@ By default, the response is ordered by ascending user ID.
GET /_synapse/admin/v2/users?from=0&limit=10&guests=false GET /_synapse/admin/v2/users?from=0&limit=10&guests=false
``` ```
To use it, you will need to authenticate by providing an `access_token` for a
server admin: [Admin API](../usage/administration/admin_api)
A response body like the following is returned: A response body like the following is returned:
```json ```json
@@ -273,6 +277,9 @@ GET /_matrix/client/r0/admin/whois/<userId>
See also: [Client Server See also: [Client Server
API Whois](https://matrix.org/docs/spec/client_server/r0.6.1#get-matrix-client-r0-admin-whois-userid). API Whois](https://matrix.org/docs/spec/client_server/r0.6.1#get-matrix-client-r0-admin-whois-userid).
To use it, you will need to authenticate by providing an `access_token` for a
server admin: [Admin API](../usage/administration/admin_api)
It returns a JSON body like the following: It returns a JSON body like the following:
```json ```json
@@ -327,12 +334,15 @@ with a body of:
} }
``` ```
To use it, you will need to authenticate by providing an `access_token` for a
server admin: [Admin API](../usage/administration/admin_api)
The erase parameter is optional and defaults to `false`. The erase parameter is optional and defaults to `false`.
An empty body may be passed for backwards compatibility. An empty body may be passed for backwards compatibility.
The following actions are performed when deactivating an user: The following actions are performed when deactivating an user:
- Try to unbind 3PIDs from the identity server - Try to unpind 3PIDs from the identity server
- Remove all 3PIDs from the homeserver - Remove all 3PIDs from the homeserver
- Delete all devices and E2EE keys - Delete all devices and E2EE keys
- Delete all access tokens - Delete all access tokens
@@ -342,11 +352,6 @@ The following actions are performed when deactivating an user:
- Remove the user from the user directory - Remove the user from the user directory
- Reject all pending invites - Reject all pending invites
- Remove all account validity information related to the user - Remove all account validity information related to the user
- Remove the arbitrary data store known as *account data*. For example, this includes:
- list of ignored users;
- push rules;
- secret storage keys; and
- cross-signing keys.
The following additional actions are performed during deactivation if `erase` The following additional actions are performed during deactivation if `erase`
is set to `true`: is set to `true`:
@@ -360,6 +365,7 @@ The following actions are **NOT** performed. The list may be incomplete.
- Remove mappings of SSO IDs - Remove mappings of SSO IDs
- [Delete media uploaded](#delete-media-uploaded-by-a-user) by user (included avatar images) - [Delete media uploaded](#delete-media-uploaded-by-a-user) by user (included avatar images)
- Delete sent and received messages - Delete sent and received messages
- Delete E2E cross-signing keys
- Remove the user's creation (registration) timestamp - Remove the user's creation (registration) timestamp
- [Remove rate limit overrides](#override-ratelimiting-for-users) - [Remove rate limit overrides](#override-ratelimiting-for-users)
- Remove from monthly active users - Remove from monthly active users
@@ -383,6 +389,9 @@ with a body of:
} }
``` ```
To use it, you will need to authenticate by providing an `access_token` for a
server admin: [Admin API](../usage/administration/admin_api)
The parameter `new_password` is required. The parameter `new_password` is required.
The parameter `logout_devices` is optional and defaults to `true`. The parameter `logout_devices` is optional and defaults to `true`.
@@ -395,6 +404,9 @@ The api is:
GET /_synapse/admin/v1/users/<user_id>/admin GET /_synapse/admin/v1/users/<user_id>/admin
``` ```
To use it, you will need to authenticate by providing an `access_token` for a
server admin: [Admin API](../usage/administration/admin_api)
A response body like the following is returned: A response body like the following is returned:
```json ```json
@@ -422,6 +434,10 @@ with a body of:
} }
``` ```
To use it, you will need to authenticate by providing an `access_token` for a
server admin: [Admin API](../usage/administration/admin_api)
## List room memberships of a user ## List room memberships of a user
Gets a list of all `room_id` that a specific `user_id` is member. Gets a list of all `room_id` that a specific `user_id` is member.
@@ -432,6 +448,9 @@ The API is:
GET /_synapse/admin/v1/users/<user_id>/joined_rooms GET /_synapse/admin/v1/users/<user_id>/joined_rooms
``` ```
To use it, you will need to authenticate by providing an `access_token` for a
server admin: [Admin API](../usage/administration/admin_api)
A response body like the following is returned: A response body like the following is returned:
```json ```json
@@ -461,90 +480,10 @@ The following fields are returned in the JSON response body:
- `joined_rooms` - An array of `room_id`. - `joined_rooms` - An array of `room_id`.
- `total` - Number of rooms. - `total` - Number of rooms.
## Account Data
Gets information about account data for a specific `user_id`.
The API is:
```
GET /_synapse/admin/v1/users/<user_id>/accountdata
```
A response body like the following is returned:
```json
{
"account_data": {
"global": {
"m.secret_storage.key.LmIGHTg5W": {
"algorithm": "m.secret_storage.v1.aes-hmac-sha2",
"iv": "fwjNZatxg==",
"mac": "eWh9kNnLWZUNOgnc="
},
"im.vector.hide_profile": {
"hide_profile": true
},
"org.matrix.preview_urls": {
"disable": false
},
"im.vector.riot.breadcrumb_rooms": {
"rooms": [
"!LxcBDAsDUVAfJDEo:matrix.org",
"!MAhRxqasbItjOqxu:matrix.org"
]
},
"m.accepted_terms": {
"accepted": [
"https://example.org/somewhere/privacy-1.2-en.html",
"https://example.org/somewhere/terms-2.0-en.html"
]
},
"im.vector.setting.breadcrumbs": {
"recent_rooms": [
"!MAhRxqasbItqxuEt:matrix.org",
"!ZtSaPCawyWtxiImy:matrix.org"
]
}
},
"rooms": {
"!GUdfZSHUJibpiVqHYd:matrix.org": {
"m.fully_read": {
"event_id": "$156334540fYIhZ:matrix.org"
}
},
"!tOZwOOiqwCYQkLhV:matrix.org": {
"m.fully_read": {
"event_id": "$xjsIyp4_NaVl2yPvIZs_k1Jl8tsC_Sp23wjqXPno"
}
}
}
}
}
```
**Parameters**
The following parameters should be set in the URL:
- `user_id` - fully qualified: for example, `@user:server.com`.
**Response**
The following fields are returned in the JSON response body:
- `account_data` - A map containing the account data for the user
- `global` - A map containing the global account data for the user
- `rooms` - A map containing the account data per room for the user
## User media ## User media
### List media uploaded by a user ### List media uploaded by a user
Gets a list of all local media that a specific `user_id` has created. Gets a list of all local media that a specific `user_id` has created.
These are media that the user has uploaded themselves
([local media](../media_repository.md#local-media)), as well as
[URL preview images](../media_repository.md#url-previews) requested by the user if the
[feature is enabled](../development/url_previews.md).
By default, the response is ordered by descending creation date and ascending media ID. By default, the response is ordered by descending creation date and ascending media ID.
The newest media is on top. You can change the order with parameters The newest media is on top. You can change the order with parameters
`order_by` and `dir`. `order_by` and `dir`.
@@ -555,6 +494,9 @@ The API is:
GET /_synapse/admin/v1/users/<user_id>/media GET /_synapse/admin/v1/users/<user_id>/media
``` ```
To use it, you will need to authenticate by providing an `access_token` for a
server admin: [Admin API](../usage/administration/admin_api)
A response body like the following is returned: A response body like the following is returned:
```json ```json
@@ -641,9 +583,7 @@ The following fields are returned in the JSON response body:
Media objects contain the following fields: Media objects contain the following fields:
- `created_ts` - integer - Timestamp when the content was uploaded in ms. - `created_ts` - integer - Timestamp when the content was uploaded in ms.
- `last_access_ts` - integer - Timestamp when the content was last accessed in ms. - `last_access_ts` - integer - Timestamp when the content was last accessed in ms.
- `media_id` - string - The id used to refer to the media. Details about the format - `media_id` - string - The id used to refer to the media.
are documented under
[media repository](../media_repository.md).
- `media_length` - integer - Length of the media in bytes. - `media_length` - integer - Length of the media in bytes.
- `media_type` - string - The MIME-type of the media. - `media_type` - string - The MIME-type of the media.
- `quarantined_by` - string - The user ID that initiated the quarantine request - `quarantined_by` - string - The user ID that initiated the quarantine request
@@ -671,6 +611,9 @@ The API is:
DELETE /_synapse/admin/v1/users/<user_id>/media DELETE /_synapse/admin/v1/users/<user_id>/media
``` ```
To use it, you will need to authenticate by providing an `access_token` for a
server admin: [Admin API](../usage/administration/admin_api)
A response body like the following is returned: A response body like the following is returned:
```json ```json
@@ -743,6 +686,9 @@ The API is:
GET /_synapse/admin/v2/users/<user_id>/devices GET /_synapse/admin/v2/users/<user_id>/devices
``` ```
To use it, you will need to authenticate by providing an `access_token` for a
server admin: [Admin API](../usage/administration/admin_api)
A response body like the following is returned: A response body like the following is returned:
```json ```json
@@ -808,6 +754,9 @@ POST /_synapse/admin/v2/users/<user_id>/delete_devices
} }
``` ```
To use it, you will need to authenticate by providing an `access_token` for a
server admin: [Admin API](../usage/administration/admin_api)
An empty JSON dict is returned. An empty JSON dict is returned.
**Parameters** **Parameters**
@@ -829,6 +778,9 @@ The API is:
GET /_synapse/admin/v2/users/<user_id>/devices/<device_id> GET /_synapse/admin/v2/users/<user_id>/devices/<device_id>
``` ```
To use it, you will need to authenticate by providing an `access_token` for a
server admin: [Admin API](../usage/administration/admin_api)
A response body like the following is returned: A response body like the following is returned:
```json ```json
@@ -874,6 +826,9 @@ PUT /_synapse/admin/v2/users/<user_id>/devices/<device_id>
} }
``` ```
To use it, you will need to authenticate by providing an `access_token` for a
server admin: [Admin API](../usage/administration/admin_api)
An empty JSON dict is returned. An empty JSON dict is returned.
**Parameters** **Parameters**
@@ -900,6 +855,9 @@ DELETE /_synapse/admin/v2/users/<user_id>/devices/<device_id>
{} {}
``` ```
To use it, you will need to authenticate by providing an `access_token` for a
server admin: [Admin API](../usage/administration/admin_api)
An empty JSON dict is returned. An empty JSON dict is returned.
**Parameters** **Parameters**
@@ -918,6 +876,9 @@ The API is:
GET /_synapse/admin/v1/users/<user_id>/pushers GET /_synapse/admin/v1/users/<user_id>/pushers
``` ```
To use it, you will need to authenticate by providing an `access_token` for a
server admin: [Admin API](../usage/administration/admin_api)
A response body like the following is returned: A response body like the following is returned:
```json ```json
@@ -987,7 +948,7 @@ The following fields are returned in the JSON response body:
See also the See also the
[Client-Server API Spec on pushers](https://matrix.org/docs/spec/client_server/latest#get-matrix-client-r0-pushers). [Client-Server API Spec on pushers](https://matrix.org/docs/spec/client_server/latest#get-matrix-client-r0-pushers).
## Controlling whether a user is shadow-banned ## Shadow-banning users
Shadow-banning is a useful tool for moderating malicious or egregiously abusive users. Shadow-banning is a useful tool for moderating malicious or egregiously abusive users.
A shadow-banned users receives successful responses to their client-server API requests, A shadow-banned users receives successful responses to their client-server API requests,
@@ -1000,19 +961,16 @@ or broken behaviour for the client. A shadow-banned user will not receive any
notification and it is generally more appropriate to ban or kick abusive users. notification and it is generally more appropriate to ban or kick abusive users.
A shadow-banned user will be unable to contact anyone on the server. A shadow-banned user will be unable to contact anyone on the server.
To shadow-ban a user the API is: The API is:
``` ```
POST /_synapse/admin/v1/users/<user_id>/shadow_ban POST /_synapse/admin/v1/users/<user_id>/shadow_ban
``` ```
To un-shadow-ban a user the API is: To use it, you will need to authenticate by providing an `access_token` for a
server admin: [Admin API](../usage/administration/admin_api)
``` An empty JSON dict is returned.
DELETE /_synapse/admin/v1/users/<user_id>/shadow_ban
```
An empty JSON dict is returned in both cases.
**Parameters** **Parameters**
@@ -1034,6 +992,9 @@ The API is:
GET /_synapse/admin/v1/users/<user_id>/override_ratelimit GET /_synapse/admin/v1/users/<user_id>/override_ratelimit
``` ```
To use it, you will need to authenticate by providing an `access_token` for a
server admin: [Admin API](../usage/administration/admin_api)
A response body like the following is returned: A response body like the following is returned:
```json ```json
@@ -1073,6 +1034,9 @@ The API is:
POST /_synapse/admin/v1/users/<user_id>/override_ratelimit POST /_synapse/admin/v1/users/<user_id>/override_ratelimit
``` ```
To use it, you will need to authenticate by providing an `access_token` for a
server admin: [Admin API](../usage/administration/admin_api)
A response body like the following is returned: A response body like the following is returned:
```json ```json
@@ -1115,6 +1079,9 @@ The API is:
DELETE /_synapse/admin/v1/users/<user_id>/override_ratelimit DELETE /_synapse/admin/v1/users/<user_id>/override_ratelimit
``` ```
To use it, you will need to authenticate by providing an `access_token` for a
server admin: [Admin API](../usage/administration/admin_api)
An empty JSON dict is returned. An empty JSON dict is returned.
```json ```json
@@ -1143,5 +1110,7 @@ The API is:
GET /_synapse/admin/v1/username_available?username=$localpart GET /_synapse/admin/v1/username_available?username=$localpart
``` ```
The request and response format is the same as the The request and response format is the same as the [/_matrix/client/r0/register/available](https://matrix.org/docs/spec/client_server/r0.6.0#get-matrix-client-r0-register-available) API.
[/_matrix/client/r0/register/available](https://matrix.org/docs/spec/client_server/r0.6.0#get-matrix-client-r0-register-available) API.
To use it, you will need to authenticate by providing an `access_token` for a
server admin: [Admin API](../usage/administration/admin_api)

View File

@@ -16,6 +16,6 @@ It returns a JSON body like the following:
```json ```json
{ {
"server_version": "0.99.2rc1 (b=develop, abcdef123)", "server_version": "0.99.2rc1 (b=develop, abcdef123)",
"python_version": "3.7.8" "python_version": "3.6.8"
} }
``` ```

View File

@@ -7,7 +7,7 @@
## Server to Server Stack ## Server to Server Stack
To use the server to server stack, homeservers should only need to To use the server to server stack, home servers should only need to
interact with the Messaging layer. interact with the Messaging layer.
The server to server side of things is designed into 4 distinct layers: The server to server side of things is designed into 4 distinct layers:
@@ -23,7 +23,7 @@ Server with a domain specific API.
1. **Messaging Layer** 1. **Messaging Layer**
This is what the rest of the homeserver hits to send messages, join rooms, This is what the rest of the Home Server hits to send messages, join rooms,
etc. It also allows you to register callbacks for when it get's notified by etc. It also allows you to register callbacks for when it get's notified by
lower levels that e.g. a new message has been received. lower levels that e.g. a new message has been received.
@@ -45,7 +45,7 @@ Server with a domain specific API.
For incoming PDUs, it has to check the PDUs it references to see For incoming PDUs, it has to check the PDUs it references to see
if we have missed any. If we have go and ask someone (another if we have missed any. If we have go and ask someone (another
homeserver) for it. home server) for it.
3. **Transaction Layer** 3. **Transaction Layer**

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -1 +0,0 @@
This directory contains changelogs for previous years.

View File

@@ -24,7 +24,7 @@ pip install -e ".[lint,mypy]"
functionality) with: functionality) with:
```sh ```sh
black . black . --exclude="\.tox|build|env"
``` ```
- **flake8** - **flake8**
@@ -35,7 +35,7 @@ pip install -e ".[lint,mypy]"
Check all application and test code with: Check all application and test code with:
```sh ```sh
flake8 . flake8 synapse tests
``` ```
- **isort** - **isort**
@@ -46,9 +46,11 @@ pip install -e ".[lint,mypy]"
Auto-fix imports with: Auto-fix imports with:
```sh ```sh
isort . isort -rc synapse tests
``` ```
`-rc` means to recursively search the given directories.
It's worth noting that modern IDEs and text editors can run these tools It's worth noting that modern IDEs and text editors can run these tools
automatically on save. It may be worth looking into whether this automatically on save. It may be worth looking into whether this
functionality is supported in your editor for a more convenient functionality is supported in your editor for a more convenient
@@ -170,6 +172,6 @@ frobber:
``` ```
Note that the sample configuration is generated from the synapse code Note that the sample configuration is generated from the synapse code
and is maintained by a script, `scripts-dev/generate_sample_config.sh`. and is maintained by a script, `scripts-dev/generate_sample_config`.
Making sure that the output from this script matches the desired format Making sure that the output from this script matches the desired format
is left as an exercise for the reader! is left as an exercise for the reader!

View File

@@ -14,8 +14,8 @@ i.e. when a version reaches End of Life Synapse will withdraw support for that
version in future releases. version in future releases.
Details on the upstream support life cycles for Python and PostgreSQL are Details on the upstream support life cycles for Python and PostgreSQL are
documented at [https://endoflife.date/python](https://endoflife.date/python) and documented at https://endoflife.date/python and
[https://endoflife.date/postgresql](https://endoflife.date/postgresql). https://endoflife.date/postgresql.
Context Context

View File

@@ -20,9 +20,7 @@ recommended for development. More information about WSL can be found at
<https://docs.microsoft.com/en-us/windows/wsl/install>. Running Synapse natively <https://docs.microsoft.com/en-us/windows/wsl/install>. Running Synapse natively
on Windows is not officially supported. on Windows is not officially supported.
The code of Synapse is written in Python 3. To do pretty much anything, you'll need [a recent version of Python 3](https://www.python.org/downloads/). Your Python also needs support for [virtual environments](https://docs.python.org/3/library/venv.html). This is usually built-in, but some Linux distributions like Debian and Ubuntu split it out into its own package. Running `sudo apt install python3-venv` should be enough. The code of Synapse is written in Python 3. To do pretty much anything, you'll need [a recent version of Python 3](https://wiki.python.org/moin/BeginnersGuide/Download).
Synapse can connect to PostgreSQL via the [psycopg2](https://pypi.org/project/psycopg2/) Python library. Building this library from source requires access to PostgreSQL's C header files. On Debian or Ubuntu Linux, these can be installed with `sudo apt install libpq-dev`.
The source code of Synapse is hosted on GitHub. You will also need [a recent version of git](https://github.com/git-guides/install-git). The source code of Synapse is hosted on GitHub. You will also need [a recent version of git](https://github.com/git-guides/install-git).
@@ -55,7 +53,6 @@ setup a *virtualenv*, as follows:
cd path/where/you/have/cloned/the/repository cd path/where/you/have/cloned/the/repository
python3 -m venv ./env python3 -m venv ./env
source ./env/bin/activate source ./env/bin/activate
pip install wheel
pip install -e ".[all,dev]" pip install -e ".[all,dev]"
pip install tox pip install tox
``` ```
@@ -117,7 +114,7 @@ The linters look at your code and do two things:
- ensure that your code follows the coding style adopted by the project; - ensure that your code follows the coding style adopted by the project;
- catch a number of errors in your code. - catch a number of errors in your code.
The linter takes no time at all to run as soon as you've [downloaded the dependencies into your python virtual environment](#4-install-the-dependencies). They're pretty fast, don't hesitate!
```sh ```sh
source ./env/bin/activate source ./env/bin/activate
@@ -172,27 +169,6 @@ To increase the log level for the tests, set `SYNAPSE_TEST_LOG_LEVEL`:
SYNAPSE_TEST_LOG_LEVEL=DEBUG trial tests SYNAPSE_TEST_LOG_LEVEL=DEBUG trial tests
``` ```
By default, tests will use an in-memory SQLite database for test data. For additional
help with debugging, one can use an on-disk SQLite database file instead, in order to
review database state during and after running tests. This can be done by setting
the `SYNAPSE_TEST_PERSIST_SQLITE_DB` environment variable. Doing so will cause the
database state to be stored in a file named `test.db` under the trial process'
working directory. Typically, this ends up being `_trial_temp/test.db`. For example:
```sh
SYNAPSE_TEST_PERSIST_SQLITE_DB=1 trial tests
```
The database file can then be inspected with:
```sh
sqlite3 _trial_temp/test.db
```
Note that the database file is cleared at the beginning of each test run. Thus it
will always only contain the data generated by the *last run test*. Though generally
when debugging, one is only running a single test anyway.
### Running tests under PostgreSQL ### Running tests under PostgreSQL
Invoking `trial` as above will use an in-memory SQLite database. This is great for Invoking `trial` as above will use an in-memory SQLite database. This is great for
@@ -206,10 +182,9 @@ To do so, [configure Postgres](../postgres.md) and run `trial` with the
following environment variables matching your configuration: following environment variables matching your configuration:
- `SYNAPSE_POSTGRES` to anything nonempty - `SYNAPSE_POSTGRES` to anything nonempty
- `SYNAPSE_POSTGRES_HOST` (optional if it's the default: UNIX socket) - `SYNAPSE_POSTGRES_HOST`
- `SYNAPSE_POSTGRES_PORT` (optional if it's the default: 5432) - `SYNAPSE_POSTGRES_USER`
- `SYNAPSE_POSTGRES_USER` (optional if using a UNIX socket) - `SYNAPSE_POSTGRES_PASSWORD`
- `SYNAPSE_POSTGRES_PASSWORD` (optional if using a UNIX socket)
For example: For example:
@@ -221,12 +196,26 @@ export SYNAPSE_POSTGRES_PASSWORD=mydevenvpassword
trial trial
``` ```
You don't need to specify the host, user, port or password if your Postgres #### Prebuilt container
server is set to authenticate you over the UNIX socket (i.e. if the `psql` command
works without further arguments).
Your Postgres account needs to be able to create databases. Since configuring PostgreSQL can be fiddly, we can make use of a pre-made
Docker container to set up PostgreSQL and run our tests for us. To do so, run
```shell
scripts-dev/test_postgresql.sh
```
Any extra arguments to the script will be passed to `tox` and then to `trial`,
so we can run a specific test in this container with e.g.
```shell
scripts-dev/test_postgresql.sh tests.replication.test_sharded_event_persister.EventPersisterShardTestCase
```
The container creates a folder in your Synapse checkout called
`.tox-pg-container` and uses this as a tox environment. The output of any
`trial` runs goes into `_trial_temp` in your synapse source directory — the same
as running `trial` directly on your host machine.
## Run the integration tests ([Sytest](https://github.com/matrix-org/sytest)). ## Run the integration tests ([Sytest](https://github.com/matrix-org/sytest)).
@@ -241,14 +230,8 @@ configuration:
```sh ```sh
$ docker run --rm -it -v /path/where/you/have/cloned/the/repository\:/src:ro -v /path/to/where/you/want/logs\:/logs matrixdotorg/sytest-synapse:buster $ docker run --rm -it -v /path/where/you/have/cloned/the/repository\:/src:ro -v /path/to/where/you/want/logs\:/logs matrixdotorg/sytest-synapse:buster
``` ```
(Note that the paths must be full paths! You could also write `$(realpath relative/path)` if needed.)
This configuration should generally cover your needs. This configuration should generally cover your needs. For more details about other configurations, see [documentation in the SyTest repo](https://github.com/matrix-org/sytest/blob/develop/docker/README.md).
- To run with Postgres, supply the `-e POSTGRES=1 -e MULTI_POSTGRES=1` environment flags.
- To run with Synapse in worker mode, supply the `-e WORKERS=1 -e REDIS=1` environment flags (in addition to the Postgres flags).
For more details about other configurations, see the [Docker-specific documentation in the SyTest repo](https://github.com/matrix-org/sytest/blob/develop/docker/README.md).
## Run the integration tests ([Complement](https://github.com/matrix-org/complement)). ## Run the integration tests ([Complement](https://github.com/matrix-org/complement)).
@@ -451,17 +434,6 @@ Git allows you to add this signoff automatically when using the `-s`
flag to `git commit`, which uses the name and email set in your flag to `git commit`, which uses the name and email set in your
`user.name` and `user.email` git configs. `user.name` and `user.email` git configs.
### Private Sign off
If you would like to provide your legal name privately to the Matrix.org
Foundation (instead of in a public commit or comment), you can do so
by emailing your legal name and a link to the pull request to
[dco@matrix.org](mailto:dco@matrix.org?subject=Private%20sign%20off).
It helps to include "sign off" or similar in the subject line. You will then
be instructed further.
Once private sign off is complete, doing so for future contributions will not
be required.
# 10. Turn feedback into better code. # 10. Turn feedback into better code.

View File

@@ -96,60 +96,6 @@ Ensure postgres is installed, then run:
NB at the time of writing, this script predates the split into separate `state`/`main` NB at the time of writing, this script predates the split into separate `state`/`main`
databases so will require updates to handle that correctly. databases so will require updates to handle that correctly.
## Delta files
Delta files define the steps required to upgrade the database from an earlier version.
They can be written as either a file containing a series of SQL statements, or a Python
module.
Synapse remembers which delta files it has applied to a database (they are stored in the
`applied_schema_deltas` table) and will not re-apply them (even if a given file is
subsequently updated).
Delta files should be placed in a directory named `synapse/storage/schema/<database>/delta/<version>/`.
They are applied in alphanumeric order, so by convention the first two characters
of the filename should be an integer such as `01`, to put the file in the right order.
### SQL delta files
These should be named `*.sql`, or — for changes which should only be applied for a
given database engine — `*.sql.posgres` or `*.sql.sqlite`. For example, a delta which
adds a new column to the `foo` table might be called `01add_bar_to_foo.sql`.
Note that our SQL parser is a bit simple - it understands comments (`--` and `/*...*/`),
but complex statements which require a `;` in the middle of them (such as `CREATE
TRIGGER`) are beyond it and you'll have to use a Python delta file.
### Python delta files
For more flexibility, a delta file can take the form of a python module. These should
be named `*.py`. Note that database-engine-specific modules are not supported here
instead you can write `if isinstance(database_engine, PostgresEngine)` or similar.
A Python delta module should define either or both of the following functions:
```python
import synapse.config.homeserver
import synapse.storage.engines
import synapse.storage.types
def run_create(
cur: synapse.storage.types.Cursor,
database_engine: synapse.storage.engines.BaseDatabaseEngine,
) -> None:
"""Called whenever an existing or new database is to be upgraded"""
...
def run_upgrade(
cur: synapse.storage.types.Cursor,
database_engine: synapse.storage.engines.BaseDatabaseEngine,
config: synapse.config.homeserver.HomeServerConfig,
) -> None:
"""Called whenever an existing database is to be upgraded."""
...
```
## Boolean columns ## Boolean columns
Boolean columns require special treatment, since SQLite treats booleans the Boolean columns require special treatment, since SQLite treats booleans the
@@ -158,9 +104,9 @@ same as integers.
There are three separate aspects to this: There are three separate aspects to this:
* Any new boolean column must be added to the `BOOLEAN_COLUMNS` list in * Any new boolean column must be added to the `BOOLEAN_COLUMNS` list in
`synapse/_scripts/synapse_port_db.py`. This tells the port script to cast `scripts/synapse_port_db`. This tells the port script to cast the integer
the integer value from SQLite to a boolean before writing the value to the value from SQLite to a boolean before writing the value to the postgres
postgres database. database.
* Before SQLite 3.23, `TRUE` and `FALSE` were not recognised as constants by * Before SQLite 3.23, `TRUE` and `FALSE` were not recognised as constants by
SQLite, and the `IS [NOT] TRUE`/`IS [NOT] FALSE` operators were not SQLite, and the `IS [NOT] TRUE`/`IS [NOT] FALSE` operators were not

View File

@@ -1,41 +0,0 @@
# Synapse demo setup
**DO NOT USE THESE DEMO SERVERS IN PRODUCTION**
Requires you to have a [Synapse development environment setup](https://matrix-org.github.io/synapse/develop/development/contributing_guide.html#4-install-the-dependencies).
The demo setup allows running three federation Synapse servers, with server
names `localhost:8080`, `localhost:8081`, and `localhost:8082`.
You can access them via any Matrix client over HTTP at `localhost:8080`,
`localhost:8081`, and `localhost:8082` or over HTTPS at `localhost:8480`,
`localhost:8481`, and `localhost:8482`.
To enable the servers to communicate, self-signed SSL certificates are generated
and the servers are configured in a highly insecure way, including:
* Not checking certificates over federation.
* Not verifying keys.
The servers are configured to store their data under `demo/8080`, `demo/8081`, and
`demo/8082`. This includes configuration, logs, SQLite databases, and media.
Note that when joining a public room on a different HS via "#foo:bar.net", then
you are (in the current impl) joining a room with room_id "foo". This means that
it won't work if your HS already has a room with that name.
## Using the demo scripts
There's three main scripts with straightforward purposes:
* `start.sh` will start the Synapse servers, generating any missing configuration.
* This accepts a single parameter `--no-rate-limit` to "disable" rate limits
(they actually still exist, but are very high).
* `stop.sh` will stop the Synapse servers.
* `clean.sh` will delete the configuration, databases, log files, etc.
To start a completely new set of servers, run:
```sh
./demo/stop.sh; ./demo/clean.sh && ./demo/start.sh
```

Some files were not shown because too many files have changed in this diff Show More