1
0

Compare commits

..

12 Commits

Author SHA1 Message Date
Mathieu Velten
f19f47b44b Add test image for complement with workers 2021-01-19 11:12:35 +01:00
Andrew Morgan
9115c47dc3 Temp: add script for building and running worker-mode synapse in complement 2021-01-07 00:22:27 -05:00
Andrew Morgan
ce591bf75b Remove MoveToComplement.Dockerfile
Also added some debug logging in order to try and figure out why the
main homeserver config file isn't getting generated by start.py
2021-01-07 00:08:29 -05:00
Mathieu Velten
1acb2d9ee1 Remove replication listener from the global template 2020-12-31 17:39:24 +01:00
Mathieu Velten
f73e9db981 Various fixes + force TLS disabled 2020-12-31 15:15:07 +01:00
Mathieu Velten
cbe335d2f0 Add more workers 2020-12-31 00:38:05 +01:00
Mathieu Velten
ee138d87db Move client_max_body_size to server and increase to 100M 2020-12-31 00:21:03 +01:00
Mathieu Velten
dfd5e8079b Add more workers config 2020-12-31 00:16:11 +01:00
Mathieu Velten
80db995e33 Change to more dynamic workers config 2020-12-30 20:07:01 +01:00
Andrew Morgan
fa8bc0ba39 Only expose nginx listening port (8008). Add more worker configs 2020-12-30 13:51:38 +00:00
Andrew Morgan
62ac8b9c0d Get Synapse main and worker process startup working! 2020-12-15 19:15:55 +00:00
Andrew Morgan
422d40e82f major wip 2020-12-14 18:34:58 +00:00
1163 changed files with 19866 additions and 52658 deletions

View File

@@ -3,7 +3,7 @@
# CI's Docker setup at the point where this file is considered.
server_name: "localhost:8800"
signing_key_path: ".buildkite/test.signing.key"
signing_key_path: "/src/.buildkite/test.signing.key"
report_stats: false
@@ -16,4 +16,6 @@ database:
database: synapse
# Suppress the key server warning.
trusted_key_servers: []
trusted_key_servers:
- server_name: "matrix.org"
suppress_key_server_warning: true

View File

@@ -0,0 +1,36 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2019 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from synapse.storage.engines import create_engine
logger = logging.getLogger("create_postgres_db")
if __name__ == "__main__":
# Create a PostgresEngine.
db_engine = create_engine({"name": "psycopg2", "args": {}})
# Connect to postgres to create the base database.
# We use "postgres" as a database because it's bound to exist and the "synapse" one
# doesn't exist yet.
db_conn = db_engine.module.connect(
user="postgres", host="postgres", password="postgres", dbname="postgres"
)
db_conn.autocommit = True
cur = db_conn.cursor()
cur.execute("CREATE DATABASE synapse;")
cur.close()
db_conn.close()

View File

@@ -1,31 +0,0 @@
#!/usr/bin/env python
# Copyright 2019 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import psycopg2
# a very simple replacment for `psql`, to make up for the lack of the postgres client
# libraries in the synapse docker image.
# We use "postgres" as a database because it's bound to exist and the "synapse" one
# doesn't exist yet.
db_conn = psycopg2.connect(
user="postgres", host="postgres", password="postgres", dbname="postgres"
)
db_conn.autocommit = True
cur = db_conn.cursor()
for c in sys.argv[1:]:
cur.execute(c)

View File

@@ -1,16 +1,13 @@
#!/usr/bin/env bash
#!/bin/bash
# this script is run by buildkite in a plain `bionic` container; it installs the
# minimal requirements for tox and hands over to the py3-old tox environment.
# this script is run by buildkite in a plain `xenial` container; it installs the
# minimal requirements for tox and hands over to the py35-old tox environment.
set -ex
apt-get update
apt-get install -y python3 python3-dev python3-pip libxml2-dev libxslt-dev xmlsec1 zlib1g-dev tox
apt-get install -y python3.5 python3.5-dev python3-pip libxml2-dev libxslt-dev xmlsec1 zlib1g-dev tox
export LANG="C.UTF-8"
# Prevent virtualenv from auto-updating pip to an incompatible version
export VIRTUALENV_NO_DOWNLOAD=1
exec tox -e py3-old,combine
exec tox -e py35-old,combine

View File

@@ -1,10 +1,10 @@
#!/usr/bin/env bash
#!/bin/bash
#
# Test script for 'synapse_port_db'.
# - sets up synapse and deps
# - runs the port script on a prepopulated test sqlite db
# - also runs it against an new sqlite db
# Test script for 'synapse_port_db', which creates a virtualenv, installs Synapse along
# with additional dependencies needed for the test (such as coverage or the PostgreSQL
# driver), update the schema of the test SQLite database and run background updates on it,
# create an empty test database in PostgreSQL, then run the 'synapse_port_db' script to
# test porting the SQLite database to the PostgreSQL database (with coverage).
set -xe
cd `dirname $0`/../..
@@ -22,36 +22,15 @@ echo "--- Generate the signing key"
# Generate the server's signing key.
python -m synapse.app.homeserver --generate-keys -c .buildkite/sqlite-config.yaml
echo "--- Prepare test database"
echo "--- Prepare the databases"
# Make sure the SQLite3 database is using the latest schema and has no pending background update.
scripts-dev/update_database --database-config .buildkite/sqlite-config.yaml
# Create the PostgreSQL database.
./.buildkite/scripts/postgres_exec.py "CREATE DATABASE synapse"
./.buildkite/scripts/create_postgres_db.py
echo "+++ Run synapse_port_db against test database"
coverage run scripts/synapse_port_db --sqlite-database .buildkite/test_db.db --postgres-config .buildkite/postgres-config.yaml
# We should be able to run twice against the same database.
echo "+++ Run synapse_port_db a second time"
coverage run scripts/synapse_port_db --sqlite-database .buildkite/test_db.db --postgres-config .buildkite/postgres-config.yaml
#####
# Now do the same again, on an empty database.
echo "--- Prepare empty SQLite database"
# we do this by deleting the sqlite db, and then doing the same again.
rm .buildkite/test_db.db
scripts-dev/update_database --database-config .buildkite/sqlite-config.yaml
# re-create the PostgreSQL database.
./.buildkite/scripts/postgres_exec.py \
"DROP DATABASE synapse" \
"CREATE DATABASE synapse"
echo "+++ Run synapse_port_db against empty database"
echo "+++ Run synapse_port_db"
# Run the script
coverage run scripts/synapse_port_db --sqlite-database .buildkite/test_db.db --postgres-config .buildkite/postgres-config.yaml

View File

@@ -3,7 +3,7 @@
# schema and run background updates on it.
server_name: "localhost:8800"
signing_key_path: ".buildkite/test.signing.key"
signing_key_path: "/src/.buildkite/test.signing.key"
report_stats: false
@@ -13,4 +13,6 @@ database:
database: ".buildkite/test_db.db"
# Suppress the key server warning.
trusted_key_servers: []
trusted_key_servers:
- server_name: "matrix.org"
suppress_key_server_warning: true

View File

@@ -14,7 +14,7 @@ jobs:
platforms: linux/amd64
- docker_build:
tag: -t matrixdotorg/synapse:${CIRCLE_TAG}
platforms: linux/amd64,linux/arm64
platforms: linux/amd64,linux/arm/v7,linux/arm64
dockerhubuploadlatest:
docker:
@@ -27,7 +27,7 @@ jobs:
# until all of the platforms are built.
- docker_build:
tag: -t matrixdotorg/synapse:latest
platforms: linux/amd64,linux/arm64
platforms: linux/amd64,linux/arm/v7,linux/arm64
workflows:
build:
@@ -41,7 +41,7 @@ workflows:
- dockerhubuploadlatest:
filters:
branches:
only: [ master, main ]
only: master
commands:
docker_prepare:

View File

@@ -1,8 +0,0 @@
# Black reformatting (#5482).
32e7c9e7f20b57dd081023ac42d6931a8da9b3a3
# Target Python 3.5 with black (#8664).
aff1eb7c671b0a3813407321d2702ec46c71fa56
# Update black to 20.8b1 (#9381).
0a00b7ff14890987f09112a2ae696c61001e6cf1

View File

@@ -1,31 +0,0 @@
name: Deploy the documentation
on:
push:
branches:
- develop
workflow_dispatch:
jobs:
pages:
name: GitHub Pages
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Setup mdbook
uses: peaceiris/actions-mdbook@4b5ef36b314c2599664ca107bb8c02412548d79d # v1.1.14
with:
mdbook-version: '0.4.9'
- name: Build the documentation
run: mdbook build
- name: Deploy latest documentation
uses: peaceiris/actions-gh-pages@068dc23d9710f1ba62e86896f84735d869951305 # v3.8.0
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
keep_files: true
publish_dir: ./book
destination_dir: ./develop

View File

@@ -1,328 +0,0 @@
name: Tests
on:
push:
branches: ["develop", "release-*"]
pull_request:
jobs:
lint:
runs-on: ubuntu-latest
strategy:
matrix:
toxenv:
- "check-sampleconfig"
- "check_codestyle"
- "check_isort"
- "mypy"
- "packaging"
steps:
- uses: actions/checkout@v2
- uses: actions/setup-python@v2
- run: pip install tox
- run: tox -e ${{ matrix.toxenv }}
lint-crlf:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Check line endings
run: scripts-dev/check_line_terminators.sh
lint-newsfile:
if: ${{ github.base_ref == 'develop' || contains(github.base_ref, 'release-') }}
runs-on: ubuntu-latest
steps:
# Note: This and the script can be simplified once we drop Buildkite. See:
# https://github.com/actions/checkout/issues/266#issuecomment-638346893
# https://github.com/actions/checkout/issues/416
- uses: actions/checkout@v2
with:
ref: ${{ github.event.pull_request.head.sha }}
fetch-depth: 0
- uses: actions/setup-python@v2
- run: pip install tox
- name: Patch Buildkite-specific test script
run: |
sed -i -e 's/\$BUILDKITE_PULL_REQUEST/${{ github.event.number }}/' \
scripts-dev/check-newsfragment
- run: scripts-dev/check-newsfragment
lint-sdist:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: actions/setup-python@v2
with:
python-version: "3.x"
- run: pip install wheel
- run: python setup.py sdist bdist_wheel
- uses: actions/upload-artifact@v2
with:
name: Python Distributions
path: dist/*
# Dummy step to gate other tests on without repeating the whole list
linting-done:
if: ${{ always() }} # Run this even if prior jobs were skipped
needs: [lint, lint-crlf, lint-newsfile, lint-sdist]
runs-on: ubuntu-latest
steps:
- run: "true"
trial:
if: ${{ !failure() }} # Allow previous steps to be skipped, but not fail
needs: linting-done
runs-on: ubuntu-latest
strategy:
matrix:
python-version: ["3.6", "3.7", "3.8", "3.9"]
database: ["sqlite"]
include:
# Newest Python without optional deps
- python-version: "3.9"
toxenv: "py-noextras,combine"
# Oldest Python with PostgreSQL
- python-version: "3.6"
database: "postgres"
postgres-version: "9.6"
# Newest Python with PostgreSQL
- python-version: "3.9"
database: "postgres"
postgres-version: "13"
steps:
- uses: actions/checkout@v2
- run: sudo apt-get -qq install xmlsec1
- name: Set up PostgreSQL ${{ matrix.postgres-version }}
if: ${{ matrix.postgres-version }}
run: |
docker run -d -p 5432:5432 \
-e POSTGRES_PASSWORD=postgres \
-e POSTGRES_INITDB_ARGS="--lc-collate C --lc-ctype C --encoding UTF8" \
postgres:${{ matrix.postgres-version }}
- uses: actions/setup-python@v2
with:
python-version: ${{ matrix.python-version }}
- run: pip install tox
- name: Await PostgreSQL
if: ${{ matrix.postgres-version }}
timeout-minutes: 2
run: until pg_isready -h localhost; do sleep 1; done
- run: tox -e py,combine
env:
TRIAL_FLAGS: "--jobs=2"
SYNAPSE_POSTGRES: ${{ matrix.database == 'postgres' || '' }}
SYNAPSE_POSTGRES_HOST: localhost
SYNAPSE_POSTGRES_USER: postgres
SYNAPSE_POSTGRES_PASSWORD: postgres
- name: Dump logs
# Note: Dumps to workflow logs instead of using actions/upload-artifact
# This keeps logs colocated with failing jobs
# It also ignores find's exit code; this is a best effort affair
run: >-
find _trial_temp -name '*.log'
-exec echo "::group::{}" \;
-exec cat {} \;
-exec echo "::endgroup::" \;
|| true
trial-olddeps:
if: ${{ !failure() }} # Allow previous steps to be skipped, but not fail
needs: linting-done
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Test with old deps
uses: docker://ubuntu:bionic # For old python and sqlite
with:
workdir: /github/workspace
entrypoint: .buildkite/scripts/test_old_deps.sh
env:
TRIAL_FLAGS: "--jobs=2"
- name: Dump logs
# Note: Dumps to workflow logs instead of using actions/upload-artifact
# This keeps logs colocated with failing jobs
# It also ignores find's exit code; this is a best effort affair
run: >-
find _trial_temp -name '*.log'
-exec echo "::group::{}" \;
-exec cat {} \;
-exec echo "::endgroup::" \;
|| true
trial-pypy:
# Very slow; only run if the branch name includes 'pypy'
if: ${{ contains(github.ref, 'pypy') && !failure() }}
needs: linting-done
runs-on: ubuntu-latest
strategy:
matrix:
python-version: ["pypy-3.6"]
steps:
- uses: actions/checkout@v2
- run: sudo apt-get -qq install xmlsec1 libxml2-dev libxslt-dev
- uses: actions/setup-python@v2
with:
python-version: ${{ matrix.python-version }}
- run: pip install tox
- run: tox -e py,combine
env:
TRIAL_FLAGS: "--jobs=2"
- name: Dump logs
# Note: Dumps to workflow logs instead of using actions/upload-artifact
# This keeps logs colocated with failing jobs
# It also ignores find's exit code; this is a best effort affair
run: >-
find _trial_temp -name '*.log'
-exec echo "::group::{}" \;
-exec cat {} \;
-exec echo "::endgroup::" \;
|| true
sytest:
if: ${{ !failure() }}
needs: linting-done
runs-on: ubuntu-latest
container:
image: matrixdotorg/sytest-synapse:${{ matrix.sytest-tag }}
volumes:
- ${{ github.workspace }}:/src
env:
BUILDKITE_BRANCH: ${{ github.head_ref }}
POSTGRES: ${{ matrix.postgres && 1}}
MULTI_POSTGRES: ${{ (matrix.postgres == 'multi-postgres') && 1}}
WORKERS: ${{ matrix.workers && 1 }}
REDIS: ${{ matrix.redis && 1 }}
BLACKLIST: ${{ matrix.workers && 'synapse-blacklist-with-workers' }}
strategy:
fail-fast: false
matrix:
include:
- sytest-tag: bionic
- sytest-tag: bionic
postgres: postgres
- sytest-tag: testing
postgres: postgres
- sytest-tag: bionic
postgres: multi-postgres
workers: workers
- sytest-tag: buster
postgres: multi-postgres
workers: workers
- sytest-tag: buster
postgres: postgres
workers: workers
redis: redis
steps:
- uses: actions/checkout@v2
- name: Prepare test blacklist
run: cat sytest-blacklist .buildkite/worker-blacklist > synapse-blacklist-with-workers
- name: Run SyTest
run: /bootstrap.sh synapse
working-directory: /src
- name: Summarise results.tap
if: ${{ always() }}
run: /sytest/scripts/tap_to_gha.pl /logs/results.tap
- name: Upload SyTest logs
uses: actions/upload-artifact@v2
if: ${{ always() }}
with:
name: Sytest Logs - ${{ job.status }} - (${{ join(matrix.*, ', ') }})
path: |
/logs/results.tap
/logs/**/*.log*
portdb:
if: ${{ !failure() }} # Allow previous steps to be skipped, but not fail
needs: linting-done
runs-on: ubuntu-latest
strategy:
matrix:
include:
- python-version: "3.6"
postgres-version: "9.6"
- python-version: "3.9"
postgres-version: "13"
services:
postgres:
image: postgres:${{ matrix.postgres-version }}
ports:
- 5432:5432
env:
POSTGRES_PASSWORD: "postgres"
POSTGRES_INITDB_ARGS: "--lc-collate C --lc-ctype C --encoding UTF8"
options: >-
--health-cmd pg_isready
--health-interval 10s
--health-timeout 5s
--health-retries 5
steps:
- uses: actions/checkout@v2
- run: sudo apt-get -qq install xmlsec1
- uses: actions/setup-python@v2
with:
python-version: ${{ matrix.python-version }}
- name: Patch Buildkite-specific test scripts
run: |
sed -i -e 's/host="postgres"/host="localhost"/' .buildkite/scripts/postgres_exec.py
sed -i -e 's/host: postgres/host: localhost/' .buildkite/postgres-config.yaml
sed -i -e 's|/src/||' .buildkite/{sqlite,postgres}-config.yaml
sed -i -e 's/\$TOP/\$GITHUB_WORKSPACE/' .coveragerc
- run: .buildkite/scripts/test_synapse_port_db.sh
complement:
if: ${{ !failure() }}
needs: linting-done
runs-on: ubuntu-latest
container:
# https://github.com/matrix-org/complement/blob/master/dockerfiles/ComplementCIBuildkite.Dockerfile
image: matrixdotorg/complement:latest
env:
CI: true
ports:
- 8448:8448
volumes:
- /var/run/docker.sock:/var/run/docker.sock
steps:
- name: Run actions/checkout@v2 for synapse
uses: actions/checkout@v2
with:
path: synapse
- name: Run actions/checkout@v2 for complement
uses: actions/checkout@v2
with:
repository: "matrix-org/complement"
path: complement
# Build initial Synapse image
- run: docker build -t matrixdotorg/synapse:latest -f docker/Dockerfile .
working-directory: synapse
# Build a ready-to-run Synapse image based on the initial image above.
# This new image includes a config file, keys for signing and TLS, and
# other settings to make it suitable for testing under Complement.
- run: docker build -t complement-synapse -f Synapse.Dockerfile .
working-directory: complement/dockerfiles
# Run Complement
- run: go test -v -tags synapse_blacklist ./tests
env:
COMPLEMENT_BASE_IMAGE: complement-synapse:latest
working-directory: complement

8
.gitignore vendored
View File

@@ -6,19 +6,16 @@
*.egg
*.egg-info
*.lock
*.py[cod]
*.pyc
*.snap
*.tac
_trial_temp/
_trial_temp*/
/out
.DS_Store
__pycache__/
# stuff that is likely to exist when you run a server locally
/*.db
/*.log
/*.log.*
/*.log.config
/*.pid
/.python-version
@@ -46,6 +43,3 @@ __pycache__/
/docs/build/
/htmlcov
/pip-wheel-metadata/
# docs
book/

1189
CHANGES.md

File diff suppressed because it is too large Load Diff

View File

@@ -1,31 +1,4 @@
Welcome to Synapse
This document aims to get you started with contributing to this repo!
- [1. Who can contribute to Synapse?](#1-who-can-contribute-to-synapse)
- [2. What do I need?](#2-what-do-i-need)
- [3. Get the source.](#3-get-the-source)
- [4. Install the dependencies](#4-install-the-dependencies)
* [Under Unix (macOS, Linux, BSD, ...)](#under-unix-macos-linux-bsd-)
* [Under Windows](#under-windows)
- [5. Get in touch.](#5-get-in-touch)
- [6. Pick an issue.](#6-pick-an-issue)
- [7. Turn coffee and documentation into code and documentation!](#7-turn-coffee-and-documentation-into-code-and-documentation)
- [8. Test, test, test!](#8-test-test-test)
* [Run the linters.](#run-the-linters)
* [Run the unit tests.](#run-the-unit-tests)
* [Run the integration tests.](#run-the-integration-tests)
- [9. Submit your patch.](#9-submit-your-patch)
* [Changelog](#changelog)
+ [How do I know what to call the changelog file before I create the PR?](#how-do-i-know-what-to-call-the-changelog-file-before-i-create-the-pr)
+ [Debian changelog](#debian-changelog)
* [Sign off](#sign-off)
- [10. Turn feedback into better code.](#10-turn-feedback-into-better-code)
- [11. Find a new issue.](#11-find-a-new-issue)
- [Notes for maintainers on merging PRs etc](#notes-for-maintainers-on-merging-prs-etc)
- [Conclusion](#conclusion)
# 1. Who can contribute to Synapse?
# Contributing code to Synapse
Everyone is welcome to contribute code to [matrix.org
projects](https://github.com/matrix-org), provided that they are willing to
@@ -36,179 +9,70 @@ license the code under the same terms as the project's overall 'outbound'
license - in our case, this is almost always Apache Software License v2 (see
[LICENSE](LICENSE)).
# 2. What do I need?
The code of Synapse is written in Python 3. To do pretty much anything, you'll need [a recent version of Python 3](https://wiki.python.org/moin/BeginnersGuide/Download).
The source code of Synapse is hosted on GitHub. You will also need [a recent version of git](https://github.com/git-guides/install-git).
For some tests, you will need [a recent version of Docker](https://docs.docker.com/get-docker/).
# 3. Get the source.
## How to contribute
The preferred and easiest way to contribute changes is to fork the relevant
project on GitHub, and then [create a pull request](
project on github, and then [create a pull request](
https://help.github.com/articles/using-pull-requests/) to ask us to pull your
changes into our repo.
Please base your changes on the `develop` branch.
Some other points to follow:
```sh
git clone git@github.com:YOUR_GITHUB_USER_NAME/synapse.git
git checkout develop
```
* Please base your changes on the `develop` branch.
If you need help getting started with git, this is beyond the scope of the document, but you
can find many good git tutorials on the web.
* Please follow the [code style requirements](#code-style).
# 4. Install the dependencies
* Please include a [changelog entry](#changelog) with each PR.
## Under Unix (macOS, Linux, BSD, ...)
* Please [sign off](#sign-off) your contribution.
Once you have installed Python 3 and added the source, please open a terminal and
setup a *virtualenv*, as follows:
* Please keep an eye on the pull request for feedback from the [continuous
integration system](#continuous-integration-and-testing) and try to fix any
errors that come up.
```sh
cd path/where/you/have/cloned/the/repository
python3 -m venv ./env
source ./env/bin/activate
pip install -e ".[all,lint,mypy,test]"
pip install tox
```
* If you need to [update your PR](#updating-your-pull-request), just add new
commits to your branch rather than rebasing.
This will install the developer dependencies for the project.
## Under Windows
TBD
# 5. Get in touch.
Join our developer community on Matrix: #synapse-dev:matrix.org !
# 6. Pick an issue.
Fix your favorite problem or perhaps find a [Good First Issue](https://github.com/matrix-org/synapse/issues?q=is%3Aopen+is%3Aissue+label%3A%22Good+First+Issue%22)
to work on.
# 7. Turn coffee and documentation into code and documentation!
## Code style
Synapse's code style is documented [here](docs/code_style.md). Please follow
it, including the conventions for the [sample configuration
file](docs/code_style.md#configuration-file-format).
There is a growing amount of documentation located in the [docs](docs)
directory. This documentation is intended primarily for sysadmins running their
own Synapse instance, as well as developers interacting externally with
Synapse. [docs/dev](docs/dev) exists primarily to house documentation for
Synapse developers. [docs/admin_api](docs/admin_api) houses documentation
regarding Synapse's Admin API, which is used mostly by sysadmins and external
service developers.
Many of the conventions are enforced by scripts which are run as part of the
[continuous integration system](#continuous-integration-and-testing). To help
check if you have followed the code style, you can run `scripts-dev/lint.sh`
locally. You'll need python 3.6 or later, and to install a number of tools:
If you add new files added to either of these folders, please use [GitHub-Flavoured
Markdown](https://guides.github.com/features/mastering-markdown/).
```
# Install the dependencies
pip install -e ".[lint,mypy]"
Some documentation also exists in [Synapse's GitHub
Wiki](https://github.com/matrix-org/synapse/wiki), although this is primarily
contributed to by community authors.
# 8. Test, test, test!
<a name="test-test-test"></a>
While you're developing and before submitting a patch, you'll
want to test your code.
## Run the linters.
The linters look at your code and do two things:
- ensure that your code follows the coding style adopted by the project;
- catch a number of errors in your code.
They're pretty fast, don't hesitate!
```sh
source ./env/bin/activate
# Run the linter script
./scripts-dev/lint.sh
```
Note that this script *will modify your files* to fix styling errors.
Make sure that you have saved all your files.
**Note that the script does not just test/check, but also reformats code, so you
may wish to ensure any new code is committed first**.
If you wish to restrict the linters to only the files changed since the last commit
(much faster!), you can instead run:
By default, this script checks all files and can take some time; if you alter
only certain files, you might wish to specify paths as arguments to reduce the
run-time:
```sh
source ./env/bin/activate
./scripts-dev/lint.sh -d
```
Or if you know exactly which files you wish to lint, you can instead run:
```sh
source ./env/bin/activate
./scripts-dev/lint.sh path/to/file1.py path/to/file2.py path/to/folder
```
## Run the unit tests.
You can also provide the `-d` option, which will lint the files that have been
changed since the last git commit. This will often be significantly faster than
linting the whole codebase.
The unit tests run parts of Synapse, including your changes, to see if anything
was broken. They are slower than the linters but will typically catch more errors.
```sh
source ./env/bin/activate
trial tests
```
If you wish to only run *some* unit tests, you may specify
another module instead of `tests` - or a test class or a method:
```sh
source ./env/bin/activate
trial tests.rest.admin.test_room tests.handlers.test_admin.ExfiltrateData.test_invite
```
If your tests fail, you may wish to look at the logs:
```sh
less _trial_temp/test.log
```
## Run the integration tests.
The integration tests are a more comprehensive suite of tests. They
run a full version of Synapse, including your changes, to check if
anything was broken. They are slower than the unit tests but will
typically catch more errors.
The following command will let you run the integration test with the most common
configuration:
```sh
$ docker run --rm -it -v /path/where/you/have/cloned/the/repository\:/src:ro -v /path/to/where/you/want/logs\:/logs matrixdotorg/sytest-synapse:py37
```
This configuration should generally cover your needs. For more details about other configurations, see [documentation in the SyTest repo](https://github.com/matrix-org/sytest/blob/develop/docker/README.md).
# 9. Submit your patch.
Once you're happy with your patch, it's time to prepare a Pull Request.
To prepare a Pull Request, please:
1. verify that [all the tests pass](#test-test-test), including the coding style;
2. [sign off](#sign-off) your contribution;
3. `git push` your commit to your fork of Synapse;
4. on GitHub, [create the Pull Request](https://docs.github.com/en/github/collaborating-with-issues-and-pull-requests/creating-a-pull-request);
5. add a [changelog entry](#changelog) and push it to your Pull Request;
6. for most contributors, that's all - however, if you are a member of the organization `matrix-org`, on GitHub, please request a review from `matrix.org / Synapse Core`.
Before pushing new changes, ensure they don't produce linting errors. Commit any
files that were corrected.
Please ensure your changes match the cosmetic style of the existing project,
and **never** mix cosmetic and functional changes in the same commit, as it
makes it horribly hard to review otherwise.
## Changelog
@@ -292,6 +156,24 @@ directory, you will need both a regular newsfragment *and* an entry in the
debian changelog. (Though typically such changes should be submitted as two
separate pull requests.)
## Documentation
There is a growing amount of documentation located in the [docs](docs)
directory. This documentation is intended primarily for sysadmins running their
own Synapse instance, as well as developers interacting externally with
Synapse. [docs/dev](docs/dev) exists primarily to house documentation for
Synapse developers. [docs/admin_api](docs/admin_api) houses documentation
regarding Synapse's Admin API, which is used mostly by sysadmins and external
service developers.
New files added to both folders should be written in [Github-Flavoured
Markdown](https://guides.github.com/features/mastering-markdown/), and attempts
should be made to migrate existing documents to markdown where possible.
Some documentation also exists in [Synapse's Github
Wiki](https://github.com/matrix-org/synapse/wiki), although this is primarily
contributed to by community authors.
## Sign off
In order to have a concrete record that your contribution is intentional
@@ -358,36 +240,47 @@ Git allows you to add this signoff automatically when using the `-s`
flag to `git commit`, which uses the name and email set in your
`user.name` and `user.email` git configs.
## Continuous integration and testing
# 10. Turn feedback into better code.
[Buildkite](https://buildkite.com/matrix-dot-org/synapse) will automatically
run a series of checks and tests against any PR which is opened against the
project; if your change breaks the build, this will be shown in GitHub, with
links to the build results. If your build fails, please try to fix the errors
and update your branch.
Once the Pull Request is opened, you will see a few things:
To run unit tests in a local development environment, you can use:
1. our automated CI (Continuous Integration) pipeline will run (again) the linters, the unit tests, the integration tests and more;
2. one or more of the developers will take a look at your Pull Request and offer feedback.
- ``tox -e py35`` (requires tox to be installed by ``pip install tox``)
for SQLite-backed Synapse on Python 3.5.
- ``tox -e py36`` for SQLite-backed Synapse on Python 3.6.
- ``tox -e py36-postgres`` for PostgreSQL-backed Synapse on Python 3.6
(requires a running local PostgreSQL with access to create databases).
- ``./test_postgresql.sh`` for PostgreSQL-backed Synapse on Python 3.5
(requires Docker). Entirely self-contained, recommended if you don't want to
set up PostgreSQL yourself.
From this point, you should:
Docker images are available for running the integration tests (SyTest) locally,
see the [documentation in the SyTest repo](
https://github.com/matrix-org/sytest/blob/develop/docker/README.md) for more
information.
1. Look at the results of the CI pipeline.
- If there is any error, fix the error.
2. If a developer has requested changes, make these changes and let us know if it is ready for a developer to review again.
3. Create a new commit with the changes.
- Please do NOT overwrite the history. New commits make the reviewer's life easier.
- Push this commits to your Pull Request.
4. Back to 1.
## Updating your pull request
Once both the CI and the developers are happy, the patch will be merged into Synapse and released shortly!
If you decide to make changes to your pull request - perhaps to address issues
raised in a review, or to fix problems highlighted by [continuous
integration](#continuous-integration-and-testing) - just add new commits to your
branch, and push to GitHub. The pull request will automatically be updated.
# 11. Find a new issue.
Please **avoid** rebasing your branch, especially once the PR has been
reviewed: doing so makes it very difficult for a reviewer to see what has
changed since a previous review.
By now, you know the drill!
# Notes for maintainers on merging PRs etc
## Notes for maintainers on merging PRs etc
There are some notes for those with commit access to the project on how we
manage git [here](docs/dev/git.md).
# Conclusion
## Conclusion
That's it! Matrix is a very open and collaborative project as you might expect
given our obsession with open communication. If we're going to successfully

View File

@@ -1,45 +1,19 @@
# Installation Instructions
- [Choosing your server name](#choosing-your-server-name)
- [Picking a database engine](#picking-a-database-engine)
- [Installing Synapse](#installing-synapse)
- [Installing from source](#installing-from-source)
- [Platform-Specific Instructions](#platform-specific-instructions)
- [Prebuilt packages](#prebuilt-packages)
- [Setting up Synapse](#setting-up-synapse)
- [TLS certificates](#tls-certificates)
- [Client Well-Known URI](#client-well-known-uri)
- [Email](#email)
- [Registering a user](#registering-a-user)
- [Setting up a TURN server](#setting-up-a-turn-server)
- [URL previews](#url-previews)
- [Troubleshooting Installation](#troubleshooting-installation)
There are 3 steps to follow under **Installation Instructions**.
- [Installation Instructions](#installation-instructions)
- [Choosing your server name](#choosing-your-server-name)
- [Installing Synapse](#installing-synapse)
- [Installing from source](#installing-from-source)
- [Platform-specific prerequisites](#platform-specific-prerequisites)
- [Debian/Ubuntu/Raspbian](#debianubunturaspbian)
- [ArchLinux](#archlinux)
- [CentOS/Fedora](#centosfedora)
- [macOS](#macos)
- [OpenSUSE](#opensuse)
- [OpenBSD](#openbsd)
- [Windows](#windows)
- [Prebuilt packages](#prebuilt-packages)
- [Docker images and Ansible playbooks](#docker-images-and-ansible-playbooks)
- [Debian/Ubuntu](#debianubuntu)
- [Matrix.org packages](#matrixorg-packages)
- [Downstream Debian packages](#downstream-debian-packages)
- [Downstream Ubuntu packages](#downstream-ubuntu-packages)
- [Fedora](#fedora)
- [OpenSUSE](#opensuse-1)
- [SUSE Linux Enterprise Server](#suse-linux-enterprise-server)
- [ArchLinux](#archlinux-1)
- [Void Linux](#void-linux)
- [FreeBSD](#freebsd)
- [OpenBSD](#openbsd-1)
- [NixOS](#nixos)
- [Setting up Synapse](#setting-up-synapse)
- [Using PostgreSQL](#using-postgresql)
- [TLS certificates](#tls-certificates)
- [Client Well-Known URI](#client-well-known-uri)
- [Email](#email)
- [Registering a user](#registering-a-user)
- [Setting up a TURN server](#setting-up-a-turn-server)
- [URL previews](#url-previews)
- [Troubleshooting Installation](#troubleshooting-installation)
## Choosing your server name
# Choosing your server name
It is important to choose the name for your server before you install Synapse,
because it cannot be changed later.
@@ -55,24 +29,46 @@ that your email address is probably `user@example.com` rather than
`user@email.example.com`) - but doing so may require more advanced setup: see
[Setting up Federation](docs/federate.md).
## Installing Synapse
# Picking a database engine
### Installing from source
Synapse offers two database engines:
* [PostgreSQL](https://www.postgresql.org)
* [SQLite](https://sqlite.org/)
Almost all installations should opt to use PostgreSQL. Advantages include:
* significant performance improvements due to the superior threading and
caching model, smarter query optimiser
* allowing the DB to be run on separate hardware
For information on how to install and use PostgreSQL, please see
[docs/postgres.md](docs/postgres.md)
By default Synapse uses SQLite and in doing so trades performance for convenience.
SQLite is only recommended in Synapse for testing purposes or for servers with
light workloads.
# Installing Synapse
## Installing from source
(Prebuilt packages are available for some platforms - see [Prebuilt packages](#prebuilt-packages).)
When installing from source please make sure that the [Platform-specific prerequisites](#platform-specific-prerequisites) are already installed.
System requirements:
- POSIX-compliant system (tested on Linux & OS X)
- Python 3.5.2 or later, up to Python 3.9.
- At least 1GB of free RAM if you want to join large public rooms like #matrix:matrix.org
Synapse is written in Python but some of the libraries it uses are written in
C. So before we can install Synapse itself we need a working C compiler and the
header files for Python C extensions. See [Platform-Specific
Instructions](#platform-specific-instructions) for information on installing
these on various platforms.
To install the Synapse homeserver run:
```sh
```
mkdir -p ~/synapse
virtualenv -p python3 ~/synapse/env
source ~/synapse/env/bin/activate
@@ -89,7 +85,7 @@ prefer.
This Synapse installation can then be later upgraded by using pip again with the
update flag:
```sh
```
source ~/synapse/env/bin/activate
pip install -U matrix-synapse
```
@@ -97,7 +93,7 @@ pip install -U matrix-synapse
Before you can start Synapse, you will need to generate a configuration
file. To do this, run (in your virtualenv, as before):
```sh
```
cd ~/synapse
python -m synapse.app.homeserver \
--server-name my.domain.name \
@@ -115,58 +111,70 @@ wise to back them up somewhere safe. (If, for whatever reason, you do need to
change your homeserver's keys, you may find that other homeserver have the
old key cached. If you update the signing key, you should change the name of the
key in the `<server name>.signing.key` file (the second word) to something
different. See the [spec](https://matrix.org/docs/spec/server_server/latest.html#retrieving-server-keys) for more information on key management).
different. See the
[spec](https://matrix.org/docs/spec/server_server/latest.html#retrieving-server-keys)
for more information on key management).
To actually run your new homeserver, pick a working directory for Synapse to
run (e.g. `~/synapse`), and:
```sh
```
cd ~/synapse
source env/bin/activate
synctl start
```
#### Platform-specific prerequisites
### Platform-Specific Instructions
Synapse is written in Python but some of the libraries it uses are written in
C. So before we can install Synapse itself we need a working C compiler and the
header files for Python C extensions.
##### Debian/Ubuntu/Raspbian
#### Debian/Ubuntu/Raspbian
Installing prerequisites on Ubuntu or Debian:
```sh
sudo apt install build-essential python3-dev libffi-dev \
```
sudo apt-get install build-essential python3-dev libffi-dev \
python3-pip python3-setuptools sqlite3 \
libssl-dev virtualenv libjpeg-dev libxslt1-dev
```
##### ArchLinux
#### ArchLinux
Installing prerequisites on ArchLinux:
```sh
```
sudo pacman -S base-devel python python-pip \
python-setuptools python-virtualenv sqlite3
```
##### CentOS/Fedora
#### CentOS/Fedora
Installing prerequisites on CentOS or Fedora Linux:
Installing prerequisites on CentOS 8 or Fedora>26:
```sh
```
sudo dnf install libtiff-devel libjpeg-devel libzip-devel freetype-devel \
libwebp-devel libxml2-devel libxslt-devel libpq-devel \
python3-virtualenv libffi-devel openssl-devel python3-devel
libwebp-devel tk-devel redhat-rpm-config \
python3-virtualenv libffi-devel openssl-devel
sudo dnf groupinstall "Development Tools"
```
##### macOS
Installing prerequisites on CentOS 7 or Fedora<=25:
```
sudo yum install libtiff-devel libjpeg-devel libzip-devel freetype-devel \
lcms2-devel libwebp-devel tcl-devel tk-devel redhat-rpm-config \
python3-virtualenv libffi-devel openssl-devel
sudo yum groupinstall "Development Tools"
```
Note that Synapse does not support versions of SQLite before 3.11, and CentOS 7
uses SQLite 3.7. You may be able to work around this by installing a more
recent SQLite version, but it is recommended that you instead use a Postgres
database: see [docs/postgres.md](docs/postgres.md).
#### macOS
Installing prerequisites on macOS:
```sh
```
xcode-select --install
sudo easy_install pip
sudo pip install virtualenv
@@ -176,23 +184,22 @@ brew install pkg-config libffi
On macOS Catalina (10.15) you may need to explicitly install OpenSSL
via brew and inform `pip` about it so that `psycopg2` builds:
```sh
```
brew install openssl@1.1
export LDFLAGS="-L/usr/local/opt/openssl/lib"
export CPPFLAGS="-I/usr/local/opt/openssl/include"
export LDFLAGS=-L/usr/local/Cellar/openssl\@1.1/1.1.1d/lib/
```
##### OpenSUSE
#### OpenSUSE
Installing prerequisites on openSUSE:
```sh
```
sudo zypper in -t pattern devel_basis
sudo zypper in python-pip python-setuptools sqlite3 python-virtualenv \
python-devel libffi-devel libopenssl-devel libjpeg62-devel
```
##### OpenBSD
#### OpenBSD
A port of Synapse is available under `net/synapse`. The filesystem
underlying the homeserver directory (defaults to `/var/synapse`) has to be
@@ -206,72 +213,73 @@ mounted with `wxallowed` (cf. `mount(8)`).
Creating a `WRKOBJDIR` for building python under `/usr/local` (which on a
default OpenBSD installation is mounted with `wxallowed`):
```sh
```
doas mkdir /usr/local/pobj_wxallowed
```
Assuming `PORTS_PRIVSEP=Yes` (cf. `bsd.port.mk(5)`) and `SUDO=doas` are
configured in `/etc/mk.conf`:
```sh
```
doas chown _pbuild:_pbuild /usr/local/pobj_wxallowed
```
Setting the `WRKOBJDIR` for building python:
```sh
```
echo WRKOBJDIR_lang/python/3.7=/usr/local/pobj_wxallowed \\nWRKOBJDIR_lang/python/2.7=/usr/local/pobj_wxallowed >> /etc/mk.conf
```
Building Synapse:
```sh
```
cd /usr/ports/net/synapse
make install
```
##### Windows
#### Windows
If you wish to run or develop Synapse on Windows, the Windows Subsystem For
Linux provides a Linux environment on Windows 10 which is capable of using the
Debian, Fedora, or source installation methods. More information about WSL can
be found at <https://docs.microsoft.com/en-us/windows/wsl/install-win10> for
Windows 10 and <https://docs.microsoft.com/en-us/windows/wsl/install-on-server>
be found at https://docs.microsoft.com/en-us/windows/wsl/install-win10 for
Windows 10 and https://docs.microsoft.com/en-us/windows/wsl/install-on-server
for Windows Server.
### Prebuilt packages
## Prebuilt packages
As an alternative to installing from source, prebuilt packages are available
for a number of platforms.
#### Docker images and Ansible playbooks
### Docker images and Ansible playbooks
There is an official synapse image available at
<https://hub.docker.com/r/matrixdotorg/synapse> which can be used with
There is an offical synapse image available at
https://hub.docker.com/r/matrixdotorg/synapse which can be used with
the docker-compose file available at [contrib/docker](contrib/docker). Further
information on this including configuration options is available in the README
on hub.docker.com.
Alternatively, Andreas Peters (previously Silvio Fricke) has contributed a
Dockerfile to automate a synapse server in a single Docker image, at
<https://hub.docker.com/r/avhost/docker-matrix/tags/>
https://hub.docker.com/r/avhost/docker-matrix/tags/
Slavi Pantaleev has created an Ansible playbook,
which installs the offical Docker image of Matrix Synapse
along with many other Matrix-related services (Postgres database, Element, coturn,
ma1sd, SSL support, etc.).
For more details, see
<https://github.com/spantaleev/matrix-docker-ansible-deploy>
https://github.com/spantaleev/matrix-docker-ansible-deploy
#### Debian/Ubuntu
##### Matrix.org packages
### Debian/Ubuntu
#### Matrix.org packages
Matrix.org provides Debian/Ubuntu packages of the latest stable version of
Synapse via <https://packages.matrix.org/debian/>. They are available for Debian
Synapse via https://packages.matrix.org/debian/. They are available for Debian
9 (Stretch), Ubuntu 16.04 (Xenial), and later. To use them:
```sh
```
sudo apt install -y lsb-release wget apt-transport-https
sudo wget -O /usr/share/keyrings/matrix-org-archive-keyring.gpg https://packages.matrix.org/debian/matrix-org-archive-keyring.gpg
echo "deb [signed-by=/usr/share/keyrings/matrix-org-archive-keyring.gpg] https://packages.matrix.org/debian/ $(lsb_release -cs) main" |
@@ -291,7 +299,7 @@ The fingerprint of the repository signing key (as shown by `gpg
/usr/share/keyrings/matrix-org-archive-keyring.gpg`) is
`AAF9AE843A7584B5A3E4CD2BCF45A512DE2DA058`.
##### Downstream Debian packages
#### Downstream Debian packages
We do not recommend using the packages from the default Debian `buster`
repository at this time, as they are old and suffer from known security
@@ -303,49 +311,49 @@ for information on how to use backports.
If you are using Debian `sid` or testing, Synapse is available in the default
repositories and it should be possible to install it simply with:
```sh
```
sudo apt install matrix-synapse
```
##### Downstream Ubuntu packages
#### Downstream Ubuntu packages
We do not recommend using the packages in the default Ubuntu repository
at this time, as they are old and suffer from known security vulnerabilities.
The latest version of Synapse can be installed from [our repository](#matrixorg-packages).
#### Fedora
### Fedora
Synapse is in the Fedora repositories as `matrix-synapse`:
```sh
```
sudo dnf install matrix-synapse
```
Oleg Girko provides Fedora RPMs at
<https://obs.infoserver.lv/project/monitor/matrix-synapse>
https://obs.infoserver.lv/project/monitor/matrix-synapse
#### OpenSUSE
### OpenSUSE
Synapse is in the OpenSUSE repositories as `matrix-synapse`:
```sh
```
sudo zypper install matrix-synapse
```
#### SUSE Linux Enterprise Server
### SUSE Linux Enterprise Server
Unofficial package are built for SLES 15 in the openSUSE:Backports:SLE-15 repository at
<https://download.opensuse.org/repositories/openSUSE:/Backports:/SLE-15/standard/>
https://download.opensuse.org/repositories/openSUSE:/Backports:/SLE-15/standard/
#### ArchLinux
### ArchLinux
The quickest way to get up and running with ArchLinux is probably with the community package
<https://www.archlinux.org/packages/community/any/matrix-synapse/>, which should pull in most of
https://www.archlinux.org/packages/community/any/matrix-synapse/, which should pull in most of
the necessary dependencies.
pip may be outdated (6.0.7-1 and needs to be upgraded to 6.0.8-1 ):
```sh
```
sudo pip install --upgrade pip
```
@@ -354,28 +362,28 @@ ELFCLASS32 (x64 Systems), you may need to reinstall py-bcrypt to correctly
compile it under the right architecture. (This should not be needed if
installing under virtualenv):
```sh
```
sudo pip uninstall py-bcrypt
sudo pip install py-bcrypt
```
#### Void Linux
### Void Linux
Synapse can be found in the void repositories as 'synapse':
```sh
```
xbps-install -Su
xbps-install -S synapse
```
#### FreeBSD
### FreeBSD
Synapse can be installed via FreeBSD Ports or Packages contributed by Brendan Molloy from:
- Ports: `cd /usr/ports/net-im/py-matrix-synapse && make install clean`
- Packages: `pkg install py37-matrix-synapse`
- Ports: `cd /usr/ports/net-im/py-matrix-synapse && make install clean`
- Packages: `pkg install py37-matrix-synapse`
#### OpenBSD
### OpenBSD
As of OpenBSD 6.7 Synapse is available as a pre-compiled binary. The filesystem
underlying the homeserver directory (defaults to `/var/synapse`) has to be
@@ -384,37 +392,20 @@ and mounting it to `/var/synapse` should be taken into consideration.
Installing Synapse:
```sh
```
doas pkg_add synapse
```
#### NixOS
### NixOS
Robin Lambertz has packaged Synapse for NixOS at:
<https://github.com/NixOS/nixpkgs/blob/master/nixos/modules/services/misc/matrix-synapse.nix>
https://github.com/NixOS/nixpkgs/blob/master/nixos/modules/services/misc/matrix-synapse.nix
## Setting up Synapse
# Setting up Synapse
Once you have installed synapse as above, you will need to configure it.
### Using PostgreSQL
By default Synapse uses an [SQLite](https://sqlite.org/) database and in doing so trades
performance for convenience. Almost all installations should opt to use [PostgreSQL](https://www.postgresql.org)
instead. Advantages include:
- significant performance improvements due to the superior threading and
caching model, smarter query optimiser
- allowing the DB to be run on separate hardware
For information on how to install and use PostgreSQL in Synapse, please see
[docs/postgres.md](docs/postgres.md)
SQLite is only acceptable for testing purposes. SQLite should not be used in
a production server. Synapse will perform poorly when using
SQLite, especially when participating in large rooms.
### TLS certificates
## TLS certificates
The default configuration exposes a single HTTP port on the local
interface: `http://localhost:8008`. It is suitable for local testing,
@@ -428,19 +419,19 @@ The recommended way to do so is to set up a reverse proxy on port
Alternatively, you can configure Synapse to expose an HTTPS port. To do
so, you will need to edit `homeserver.yaml`, as follows:
- First, under the `listeners` section, uncomment the configuration for the
* First, under the `listeners` section, uncomment the configuration for the
TLS-enabled listener. (Remove the hash sign (`#`) at the start of
each line). The relevant lines are like this:
```yaml
- port: 8448
type: http
tls: true
resources:
- names: [client, federation]
```
- port: 8448
type: http
tls: true
resources:
- names: [client, federation]
```
- You will also need to uncomment the `tls_certificate_path` and
* You will also need to uncomment the `tls_certificate_path` and
`tls_private_key_path` lines under the `TLS` section. You will need to manage
provisioning of these certificates yourself — Synapse had built-in ACME
support, but the ACMEv1 protocol Synapse implements is deprecated, not
@@ -455,7 +446,7 @@ so, you will need to edit `homeserver.yaml`, as follows:
For a more detailed guide to configuring your server for federation, see
[federate.md](docs/federate.md).
### Client Well-Known URI
## Client Well-Known URI
Setting up the client Well-Known URI is optional but if you set it up, it will
allow users to enter their full username (e.g. `@user:<server_name>`) into clients
@@ -466,7 +457,7 @@ about the actual homeserver URL you are using.
The URL `https://<server_name>/.well-known/matrix/client` should return JSON in
the following format.
```json
```
{
"m.homeserver": {
"base_url": "https://<matrix.example.com>"
@@ -476,7 +467,7 @@ the following format.
It can optionally contain identity server information as well.
```json
```
{
"m.homeserver": {
"base_url": "https://<matrix.example.com>"
@@ -493,8 +484,7 @@ Cross-Origin Resource Sharing (CORS) headers. A recommended value would be
view it.
In nginx this would be something like:
```nginx
```
location /.well-known/matrix/client {
return 200 '{"m.homeserver": {"base_url": "https://<matrix.example.com>"}}';
default_type application/json;
@@ -507,11 +497,11 @@ correctly. `public_baseurl` should be set to the URL that clients will use to
connect to your server. This is the same URL you put for the `m.homeserver`
`base_url` above.
```yaml
```
public_baseurl: "https://<matrix.example.com>"
```
### Email
## Email
It is desirable for Synapse to have the capability to send email. This allows
Synapse to send password reset emails, send verifications when an email address
@@ -526,28 +516,18 @@ and `notif_from` fields filled out. You may also need to set `smtp_user`,
If email is not configured, password reset, registration and notifications via
email will be disabled.
### Registering a user
## Registering a user
The easiest way to create a new user is to do so from a client like [Element](https://element.io/).
Alternatively, you can do so from the command line. This can be done as follows:
Alternatively you can do so from the command line if you have installed via pip.
1. If synapse was installed via pip, activate the virtualenv as follows (if Synapse was
installed via a prebuilt package, `register_new_matrix_user` should already be
on the search path):
```sh
cd ~/synapse
source env/bin/activate
synctl start # if not already running
```
2. Run the following command:
```sh
register_new_matrix_user -c homeserver.yaml http://localhost:8008
```
This can be done as follows:
This will prompt you to add details for the new user, and will then connect to
the running Synapse to create the new user. For example:
```
$ source ~/synapse/env/bin/activate
$ synctl start # if not already running
$ register_new_matrix_user -c homeserver.yaml http://localhost:8008
New user localpart: erikj
Password:
Confirm password:
@@ -562,12 +542,12 @@ value is generated by `--generate-config`), but it should be kept secret, as
anyone with knowledge of it can register users, including admin accounts,
on your server even if `enable_registration` is `false`.
### Setting up a TURN server
## Setting up a TURN server
For reliable VoIP calls to be routed via this homeserver, you MUST configure
a TURN server. See [docs/turn-howto.md](docs/turn-howto.md) for details.
### URL previews
## URL previews
Synapse includes support for previewing URLs, which is disabled by default. To
turn it on you must enable the `url_preview_enabled: True` config parameter
@@ -581,14 +561,14 @@ This also requires the optional `lxml` python dependency to be installed. This
in turn requires the `libxml2` library to be available - on Debian/Ubuntu this
means `apt-get install libxml2-dev`, or equivalent for your OS.
### Troubleshooting Installation
# Troubleshooting Installation
`pip` seems to leak *lots* of memory during installation. For instance, a Linux
host with 512MB of RAM may run out of memory whilst installing Twisted. If this
happens, you will have to individually install the dependencies which are
failing, e.g.:
```sh
```
pip install twisted
```

View File

@@ -20,10 +20,9 @@ recursive-include scripts *
recursive-include scripts-dev *
recursive-include synapse *.pyi
recursive-include tests *.py
recursive-include tests *.pem
recursive-include tests *.p8
recursive-include tests *.crt
recursive-include tests *.key
include tests/http/ca.crt
include tests/http/ca.key
include tests/http/server.key
recursive-include synapse/res *
recursive-include synapse/static *.css
@@ -40,7 +39,6 @@ exclude mypy.ini
exclude sytest-blacklist
exclude test_postgresql.sh
include book.toml
include pyproject.toml
recursive-include changelog.d *

View File

@@ -149,45 +149,21 @@ For details on having Synapse manage your federation TLS certificates
automatically, please see `<docs/ACME.md>`_.
Security note
Security Note
=============
Matrix serves raw, user-supplied data in some APIs -- specifically the `content
repository endpoints`_.
Matrix serves raw user generated data in some APIs - specifically the `content
repository endpoints <https://matrix.org/docs/spec/client_server/latest.html#get-matrix-media-r0-download-servername-mediaid>`_.
.. _content repository endpoints: https://matrix.org/docs/spec/client_server/latest.html#get-matrix-media-r0-download-servername-mediaid
Whilst we have tried to mitigate against possible XSS attacks (e.g.
https://github.com/matrix-org/synapse/pull/1021) we recommend running
matrix homeservers on a dedicated domain name, to limit any malicious user generated
content served to web browsers a matrix API from being able to attack webapps hosted
on the same domain. This is particularly true of sharing a matrix webclient and
server on the same domain.
Whilst we make a reasonable effort to mitigate against XSS attacks (for
instance, by using `CSP`_), a Matrix homeserver should not be hosted on a
domain hosting other web applications. This especially applies to sharing
the domain with Matrix web clients and other sensitive applications like
webmail. See
https://developer.github.com/changes/2014-04-25-user-content-security for more
information.
.. _CSP: https://github.com/matrix-org/synapse/pull/1021
Ideally, the homeserver should not simply be on a different subdomain, but on
a completely different `registered domain`_ (also known as top-level site or
eTLD+1). This is because `some attacks`_ are still possible as long as the two
applications share the same registered domain.
.. _registered domain: https://tools.ietf.org/html/draft-ietf-httpbis-rfc6265bis-03#section-2.3
.. _some attacks: https://en.wikipedia.org/wiki/Session_fixation#Attacks_using_cross-subdomain_cookie
To illustrate this with an example, if your Element Web or other sensitive web
application is hosted on ``A.example1.com``, you should ideally host Synapse on
``example2.com``. Some amount of protection is offered by hosting on
``B.example1.com`` instead, so this is also acceptable in some scenarios.
However, you should *not* host your Synapse on ``A.example1.com``.
Note that all of the above refers exclusively to the domain used in Synapse's
``public_baseurl`` setting. In particular, it has no bearing on the domain
mentioned in MXIDs hosted on that server.
Following this advice ensures that even if an XSS is found in Synapse, the
impact to other applications will be minimal.
See https://github.com/vector-im/riot-web/issues/1977 and
https://developer.github.com/changes/2014-04-25-user-content-security for more details.
Upgrading an existing Synapse
@@ -207,9 +183,8 @@ Using a reverse proxy with Synapse
It is recommended to put a reverse proxy such as
`nginx <https://nginx.org/en/docs/http/ngx_http_proxy_module.html>`_,
`Apache <https://httpd.apache.org/docs/current/mod/mod_proxy_http.html>`_,
`Caddy <https://caddyserver.com/docs/quick-starts/reverse-proxy>`_,
`HAProxy <https://www.haproxy.org/>`_ or
`relayd <https://man.openbsd.org/relayd.8>`_ in front of Synapse. One advantage of
`Caddy <https://caddyserver.com/docs/quick-starts/reverse-proxy>`_ or
`HAProxy <https://www.haproxy.org/>`_ in front of Synapse. One advantage of
doing so is that it means that you can expose the default https port (443) to
Matrix clients without needing to run Synapse with root privileges.
@@ -268,8 +243,6 @@ Then update the ``users`` table in the database::
Synapse Development
===================
Join our developer community on Matrix: `#synapse-dev:matrix.org <https://matrix.to/#/#synapse-dev:matrix.org>`_
Before setting up a development environment for synapse, make sure you have the
system dependencies (such as the python header files) installed - see
`Installing from source <INSTALL.md#installing-from-source>`_.
@@ -305,27 +278,6 @@ differ)::
PASSED (skips=15, successes=1322)
We recommend using the demo which starts 3 federated instances running on ports `8080` - `8082`
./demo/start.sh
(to stop, you can use `./demo/stop.sh`)
If you just want to start a single instance of the app and run it directly::
# Create the homeserver.yaml config once
python -m synapse.app.homeserver \
--server-name my.domain.name \
--config-path homeserver.yaml \
--generate-config \
--report-stats=[yes|no]
# Start the app
python -m synapse.app.homeserver --config-path homeserver.yaml
Running the Integration Tests
=============================
@@ -338,15 +290,6 @@ Testing with SyTest is recommended for verifying that changes related to the
Client-Server API are functioning correctly. See the `installation instructions
<https://github.com/matrix-org/sytest#installing>`_ for details.
Platform dependencies
=====================
Synapse uses a number of platform dependencies such as Python and PostgreSQL,
and aims to follow supported upstream versions. See the
`<docs/deprecation_policy.md>`_ document for more details.
Troubleshooting
===============
@@ -417,17 +360,12 @@ massive excess of outgoing federation requests (see `discussion
indicate that your server is also issuing far more outgoing federation
requests than can be accounted for by your users' activity, this is a
likely cause. The misbehavior can be worked around by setting
the following in the Synapse config file:
.. code-block:: yaml
presence:
enabled: false
``use_presence: false`` in the Synapse config file.
People can't accept room invitations from me
--------------------------------------------
The typical failure mode here is that you send an invitation to someone
The typical failure mode here is that you send an invitation to someone
to join a room or direct chat, but when they go to accept it, they get an
error (typically along the lines of "Invalid signature"). They might see
something like the following in their logs::

View File

@@ -5,16 +5,6 @@ Before upgrading check if any special steps are required to upgrade from the
version you currently have installed to the current version of Synapse. The extra
instructions that may be required are listed later in this document.
* Check that your versions of Python and PostgreSQL are still supported.
Synapse follows upstream lifecycles for `Python`_ and `PostgreSQL`_, and
removes support for versions which are no longer maintained.
The website https://endoflife.date also offers convenient summaries.
.. _Python: https://devguide.python.org/devcycle/#end-of-life-branches
.. _PostgreSQL: https://www.postgresql.org/support/versioning/
* If Synapse was installed using `prebuilt packages
<INSTALL.md#prebuilt-packages>`_, you will need to follow the normal process
for upgrading those packages.
@@ -85,246 +75,9 @@ for example:
wget https://packages.matrix.org/debian/pool/main/m/matrix-synapse-py3/matrix-synapse-py3_1.3.0+stretch1_amd64.deb
dpkg -i matrix-synapse-py3_1.3.0+stretch1_amd64.deb
Upgrading to v1.34.0
====================
``room_invite_state_types`` configuration setting
-----------------------------------------------
The ``room_invite_state_types`` configuration setting has been deprecated and
replaced with ``room_prejoin_state``. See the `sample configuration file <https://github.com/matrix-org/synapse/blob/v1.34.0/docs/sample_config.yaml#L1515>`_.
If you have set ``room_invite_state_types`` to the default value you should simply
remove it from your configuration file. The default value used to be:
.. code:: yaml
room_invite_state_types:
- "m.room.join_rules"
- "m.room.canonical_alias"
- "m.room.avatar"
- "m.room.encryption"
- "m.room.name"
If you have customised this value, you should remove ``room_invite_state_types`` and
configure ``room_prejoin_state`` instead.
Upgrading to v1.33.0
====================
Account Validity HTML templates can now display a user's expiration date
------------------------------------------------------------------------
This may affect you if you have enabled the account validity feature, and have made use of a
custom HTML template specified by the ``account_validity.template_dir`` or ``account_validity.account_renewed_html_path``
Synapse config options.
The template can now accept an ``expiration_ts`` variable, which represents the unix timestamp in milliseconds for the
future date of which their account has been renewed until. See the
`default template <https://github.com/matrix-org/synapse/blob/release-v1.33.0/synapse/res/templates/account_renewed.html>`_
for an example of usage.
ALso note that a new HTML template, ``account_previously_renewed.html``, has been added. This is is shown to users
when they attempt to renew their account with a valid renewal token that has already been used before. The default
template contents can been found
`here <https://github.com/matrix-org/synapse/blob/release-v1.33.0/synapse/res/templates/account_previously_renewed.html>`_,
and can also accept an ``expiration_ts`` variable. This template replaces the error message users would previously see
upon attempting to use a valid renewal token more than once.
Upgrading to v1.32.0
====================
Regression causing connected Prometheus instances to become overwhelmed
-----------------------------------------------------------------------
This release introduces `a regression <https://github.com/matrix-org/synapse/issues/9853>`_
that can overwhelm connected Prometheus instances. This issue is not present in
Synapse v1.32.0rc1.
If you have been affected, please downgrade to 1.31.0. You then may need to
remove excess writeahead logs in order for Prometheus to recover. Instructions
for doing so are provided
`here <https://github.com/matrix-org/synapse/pull/9854#issuecomment-823472183>`_.
Dropping support for old Python, Postgres and SQLite versions
-------------------------------------------------------------
In line with our `deprecation policy <https://github.com/matrix-org/synapse/blob/release-v1.32.0/docs/deprecation_policy.md>`_,
we've dropped support for Python 3.5 and PostgreSQL 9.5, as they are no longer supported upstream.
This release of Synapse requires Python 3.6+ and PostgresSQL 9.6+ or SQLite 3.22+.
Removal of old List Accounts Admin API
--------------------------------------
The deprecated v1 "list accounts" admin API (``GET /_synapse/admin/v1/users/<user_id>``) has been removed in this version.
The `v2 list accounts API <https://github.com/matrix-org/synapse/blob/master/docs/admin_api/user_admin_api.rst#list-accounts>`_
has been available since Synapse 1.7.0 (2019-12-13), and is accessible under ``GET /_synapse/admin/v2/users``.
The deprecation of the old endpoint was announced with Synapse 1.28.0 (released on 2021-02-25).
Application Services must use type ``m.login.application_service`` when registering users
-----------------------------------------------------------------------------------------
In compliance with the
`Application Service spec <https://matrix.org/docs/spec/application_service/r0.1.2#server-admin-style-permissions>`_,
Application Services are now required to use the ``m.login.application_service`` type when registering users via the
``/_matrix/client/r0/register`` endpoint. This behaviour was deprecated in Synapse v1.30.0.
Please ensure your Application Services are up to date.
Upgrading to v1.29.0
====================
Requirement for X-Forwarded-Proto header
----------------------------------------
When using Synapse with a reverse proxy (in particular, when using the
`x_forwarded` option on an HTTP listener), Synapse now expects to receive an
`X-Forwarded-Proto` header on incoming HTTP requests. If it is not set, Synapse
will log a warning on each received request.
To avoid the warning, administrators using a reverse proxy should ensure that
the reverse proxy sets `X-Forwarded-Proto` header to `https` or `http` to
indicate the protocol used by the client.
Synapse also requires the `Host` header to be preserved.
See the `reverse proxy documentation <docs/reverse_proxy.md>`_, where the
example configurations have been updated to show how to set these headers.
(Users of `Caddy <https://caddyserver.com/>`_ are unaffected, since we believe it
sets `X-Forwarded-Proto` by default.)
Upgrading to v1.27.0
====================
Changes to callback URI for OAuth2 / OpenID Connect and SAML2
-------------------------------------------------------------
This version changes the URI used for callbacks from OAuth2 and SAML2 identity providers:
* If your server is configured for single sign-on via an OpenID Connect or OAuth2 identity
provider, you will need to add ``[synapse public baseurl]/_synapse/client/oidc/callback``
to the list of permitted "redirect URIs" at the identity provider.
See `docs/openid.md <docs/openid.md>`_ for more information on setting up OpenID
Connect.
* If your server is configured for single sign-on via a SAML2 identity provider, you will
need to add ``[synapse public baseurl]/_synapse/client/saml2/authn_response`` as a permitted
"ACS location" (also known as "allowed callback URLs") at the identity provider.
The "Issuer" in the "AuthnRequest" to the SAML2 identity provider is also updated to
``[synapse public baseurl]/_synapse/client/saml2/metadata.xml``. If your SAML2 identity
provider uses this property to validate or otherwise identify Synapse, its configuration
will need to be updated to use the new URL. Alternatively you could create a new, separate
"EntityDescriptor" in your SAML2 identity provider with the new URLs and leave the URLs in
the existing "EntityDescriptor" as they were.
Changes to HTML templates
-------------------------
The HTML templates for SSO and email notifications now have `Jinja2's autoescape <https://jinja.palletsprojects.com/en/2.11.x/api/#autoescaping>`_
enabled for files ending in ``.html``, ``.htm``, and ``.xml``. If you have customised
these templates and see issues when viewing them you might need to update them.
It is expected that most configurations will need no changes.
If you have customised the templates *names* for these templates, it is recommended
to verify they end in ``.html`` to ensure autoescape is enabled.
The above applies to the following templates:
* ``add_threepid.html``
* ``add_threepid_failure.html``
* ``add_threepid_success.html``
* ``notice_expiry.html``
* ``notice_expiry.html``
* ``notif_mail.html`` (which, by default, includes ``room.html`` and ``notif.html``)
* ``password_reset.html``
* ``password_reset_confirmation.html``
* ``password_reset_failure.html``
* ``password_reset_success.html``
* ``registration.html``
* ``registration_failure.html``
* ``registration_success.html``
* ``sso_account_deactivated.html``
* ``sso_auth_bad_user.html``
* ``sso_auth_confirm.html``
* ``sso_auth_success.html``
* ``sso_error.html``
* ``sso_login_idp_picker.html``
* ``sso_redirect_confirm.html``
Upgrading to v1.26.0
====================
Rolling back to v1.25.0 after a failed upgrade
----------------------------------------------
v1.26.0 includes a lot of large changes. If something problematic occurs, you
may want to roll-back to a previous version of Synapse. Because v1.26.0 also
includes a new database schema version, reverting that version is also required
alongside the generic rollback instructions mentioned above. In short, to roll
back to v1.25.0 you need to:
1. Stop the server
2. Decrease the schema version in the database:
.. code:: sql
UPDATE schema_version SET version = 58;
3. Delete the ignored users & chain cover data:
.. code:: sql
DROP TABLE IF EXISTS ignored_users;
UPDATE rooms SET has_auth_chain_index = false;
For PostgreSQL run:
.. code:: sql
TRUNCATE event_auth_chain_links;
TRUNCATE event_auth_chains;
For SQLite run:
.. code:: sql
DELETE FROM event_auth_chain_links;
DELETE FROM event_auth_chains;
4. Mark the deltas as not run (so they will re-run on upgrade).
.. code:: sql
DELETE FROM applied_schema_deltas WHERE version = 59 AND file = "59/01ignored_user.py";
DELETE FROM applied_schema_deltas WHERE version = 59 AND file = "59/06chain_cover_index.sql";
5. Downgrade Synapse by following the instructions for your installation method
in the "Rolling back to older versions" section above.
Upgrading to v1.25.0
====================
Last release supporting Python 3.5
----------------------------------
This is the last release of Synapse which guarantees support with Python 3.5,
which passed its upstream End of Life date several months ago.
We will attempt to maintain support through March 2021, but without guarantees.
In the future, Synapse will follow upstream schedules for ending support of
older versions of Python and PostgreSQL. Please upgrade to at least Python 3.6
and PostgreSQL 9.6 as soon as possible.
Blacklisting IP ranges
----------------------
@@ -373,7 +126,7 @@ shown below:
return {"localpart": localpart}
Removal historical Synapse Admin API
Removal historical Synapse Admin API
------------------------------------
Historically, the Synapse Admin API has been accessible under:

View File

@@ -1,39 +0,0 @@
# Documentation for possible options in this file is at
# https://rust-lang.github.io/mdBook/format/config.html
[book]
title = "Synapse"
authors = ["The Matrix.org Foundation C.I.C."]
language = "en"
multilingual = false
# The directory that documentation files are stored in
src = "docs"
[build]
# Prevent markdown pages from being automatically generated when they're
# linked to in SUMMARY.md
create-missing = false
[output.html]
# The URL visitors will be directed to when they try to edit a page
edit-url-template = "https://github.com/matrix-org/synapse/edit/develop/{path}"
# Remove the numbers that appear before each item in the sidebar, as they can
# get quite messy as we nest deeper
no-section-label = true
# The source code URL of the repository
git-repository-url = "https://github.com/matrix-org/synapse"
# The path that the docs are hosted on
site-url = "/synapse/"
# Additional HTML, JS, CSS that's injected into each page of the book.
# More information available in docs/website_files/README.md
additional-css = [
"docs/website_files/table-of-contents.css",
"docs/website_files/remove-nav-buttons.css",
"docs/website_files/indent-section-headers.css",
]
additional-js = ["docs/website_files/table-of-contents.js"]
theme = "docs/website_files/theme"

1
changelog.d/8802.doc Normal file
View File

@@ -0,0 +1 @@
Fix the "Event persist rate" section of the included grafana dashboard by adding missing prometheus rules.

1
changelog.d/8821.bugfix Normal file
View File

@@ -0,0 +1 @@
Apply an IP range blacklist to push and key revocation requests.

1
changelog.d/8827.bugfix Normal file
View File

@@ -0,0 +1 @@
Fix bug where we might not correctly calculate the current state for rooms with multiple extremities.

1
changelog.d/8829.removal Normal file
View File

@@ -0,0 +1 @@
Deprecate Shutdown Room and Purge Room Admin APIs.

1
changelog.d/8837.bugfix Normal file
View File

@@ -0,0 +1 @@
Fix a long standing bug in the register admin endpoint (`/_synapse/admin/v1/register`) when the `mac` field was not provided. The endpoint now properly returns a 400 error. Contributed by @edwargix.

1
changelog.d/8839.doc Normal file
View File

@@ -0,0 +1 @@
Combine related media admin API docs.

1
changelog.d/8853.feature Normal file
View File

@@ -0,0 +1 @@
Add optional HTTP authentication to replication endpoints.

1
changelog.d/8858.bugfix Normal file
View File

@@ -0,0 +1 @@
Fix a long-standing bug on Synapse instances supporting Single-Sign-On, where users would be prompted to enter their password to confirm certain actions, even though they have not set a password.

1
changelog.d/8861.misc Normal file
View File

@@ -0,0 +1 @@
Remove some unnecessary stubbing from unit tests.

1
changelog.d/8862.bugfix Normal file
View File

@@ -0,0 +1 @@
Fix a longstanding bug where a 500 error would be returned if the `Content-Length` header was not provided to the upload media resource.

1
changelog.d/8864.misc Normal file
View File

@@ -0,0 +1 @@
Remove unused `FakeResponse` class from unit tests.

1
changelog.d/8865.bugfix Normal file
View File

@@ -0,0 +1 @@
Add additional validation to pusher URLs to be compliant with the specification.

1
changelog.d/8867.bugfix Normal file
View File

@@ -0,0 +1 @@
Fix the error code that is returned when a user tries to register on a homeserver on which new-user registration has been disabled.

1
changelog.d/8870.bugfix Normal file
View File

@@ -0,0 +1 @@
Apply an IP range blacklist to push and key revocation requests.

1
changelog.d/8872.bugfix Normal file
View File

@@ -0,0 +1 @@
Fix a bug where `PUT /_synapse/admin/v2/users/<user_id>` failed to create a new user when `avatar_url` is specified. Bug introduced in Synapse v1.9.0.

1
changelog.d/8873.doc Normal file
View File

@@ -0,0 +1 @@
Fix an error in the documentation for the SAML username mapping provider.

1
changelog.d/8874.feature Normal file
View File

@@ -0,0 +1 @@
Improve the error messages printed as a result of configuration problems for extension modules.

1
changelog.d/8879.misc Normal file
View File

@@ -0,0 +1 @@
Pass `room_id` to `get_auth_chain_difference`.

1
changelog.d/8880.misc Normal file
View File

@@ -0,0 +1 @@
Add type hints to push module.

1
changelog.d/8881.misc Normal file
View File

@@ -0,0 +1 @@
Simplify logic for handling user-interactive-auth via single-sign-on servers.

1
changelog.d/8882.misc Normal file
View File

@@ -0,0 +1 @@
Add type hints to push module.

1
changelog.d/8883.bugfix Normal file
View File

@@ -0,0 +1 @@
Fix a 500 error when attempting to preview an empty HTML file.

1
changelog.d/8886.feature Normal file
View File

@@ -0,0 +1 @@
Add number of local devices to Room Details Admin API. Contributed by @dklimpel.

1
changelog.d/8887.feature Normal file
View File

@@ -0,0 +1 @@
Add `X-Robots-Tag` header to stop web crawlers from indexing media.

1
changelog.d/8891.doc Normal file
View File

@@ -0,0 +1 @@
Clarify comments around template directories in `sample_config.yaml`.

1
changelog.d/8897.feature Normal file
View File

@@ -0,0 +1 @@
Add support for allowing users to pick their own user ID during a single-sign-on login.

1
changelog.d/8900.feature Normal file
View File

@@ -0,0 +1 @@
Add support for allowing users to pick their own user ID during a single-sign-on login.

1
changelog.d/8905.misc Normal file
View File

@@ -0,0 +1 @@
Skip the SAML tests if the requirements (`pysaml2` and `xmlsec1`) aren't available.

1
changelog.d/8906.misc Normal file
View File

@@ -0,0 +1 @@
Fix multiarch docker image builds.

1
changelog.d/8909.misc Normal file
View File

@@ -0,0 +1 @@
Don't publish `latest` docker image until all archs are built.

1
changelog.d/8916.misc Normal file
View File

@@ -0,0 +1 @@
Improve structured logging tests.

1
changelog.d/8918.bugfix Normal file
View File

@@ -0,0 +1 @@
Fix occasional deadlock when handling SIGHUP.

1
changelog.d/8921.bugfix Normal file
View File

@@ -0,0 +1 @@
Fix bug where we ratelimited auto joining of rooms on registration (using `auto_join_rooms` config).

View File

@@ -24,7 +24,6 @@ import sys
import time
import urllib
from http import TwistedHttpClient
from typing import Optional
import nacl.encoding
import nacl.signing
@@ -93,7 +92,7 @@ class SynapseCmd(cmd.Cmd):
return self.config["user"].split(":")[1]
def do_config(self, line):
"""Show the config for this client: "config"
""" Show the config for this client: "config"
Edit a key value mapping: "config key value" e.g. "config token 1234"
Config variables:
user: The username to auth with.
@@ -361,7 +360,7 @@ class SynapseCmd(cmd.Cmd):
print(e)
def do_topic(self, line):
""" "topic [set|get] <roomid> [<newtopic>]"
""""topic [set|get] <roomid> [<newtopic>]"
Set the topic for a room: topic set <roomid> <newtopic>
Get the topic for a room: topic get <roomid>
"""
@@ -691,7 +690,7 @@ class SynapseCmd(cmd.Cmd):
self._do_presence_state(2, line)
def _parse(self, line, keys, force_keys=False):
"""Parses the given line.
""" Parses the given line.
Args:
line : The line to parse
@@ -719,10 +718,10 @@ class SynapseCmd(cmd.Cmd):
method,
path,
data=None,
query_params: Optional[dict] = None,
query_params={"access_token": None},
alt_text=None,
):
"""Runs an HTTP request and pretty prints the output.
""" Runs an HTTP request and pretty prints the output.
Args:
method: HTTP method
@@ -730,8 +729,6 @@ class SynapseCmd(cmd.Cmd):
data: Raw JSON data if any
query_params: dict of query parameters to add to the url
"""
query_params = query_params or {"access_token": None}
url = self._url() + path
if "access_token" in query_params:
query_params["access_token"] = self._tok()

View File

@@ -1,3 +1,4 @@
# -*- coding: utf-8 -*-
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -15,7 +16,6 @@
import json
import urllib
from pprint import pformat
from typing import Optional
from twisted.internet import defer, reactor
from twisted.web.client import Agent, readBody
@@ -23,10 +23,11 @@ from twisted.web.http_headers import Headers
class HttpClient:
"""Interface for talking json over http"""
""" Interface for talking json over http
"""
def put_json(self, url, data):
"""Sends the specifed json data using PUT
""" Sends the specifed json data using PUT
Args:
url (str): The URL to PUT data to.
@@ -40,7 +41,7 @@ class HttpClient:
pass
def get_json(self, url, args=None):
"""Gets some json from the given host homeserver and path
""" Gets some json from the given host homeserver and path
Args:
url (str): The URL to GET data from.
@@ -57,7 +58,7 @@ class HttpClient:
class TwistedHttpClient(HttpClient):
"""Wrapper around the twisted HTTP client api.
""" Wrapper around the twisted HTTP client api.
Attributes:
agent (twisted.web.client.Agent): The twisted Agent used to send the
@@ -85,9 +86,9 @@ class TwistedHttpClient(HttpClient):
body = yield readBody(response)
defer.returnValue(json.loads(body))
def _create_put_request(self, url, json_data, headers_dict: Optional[dict] = None):
"""Wrapper of _create_request to issue a PUT request"""
headers_dict = headers_dict or {}
def _create_put_request(self, url, json_data, headers_dict={}):
""" Wrapper of _create_request to issue a PUT request
"""
if "Content-Type" not in headers_dict:
raise defer.error(RuntimeError("Must include Content-Type header for PUTs"))
@@ -96,22 +97,15 @@ class TwistedHttpClient(HttpClient):
"PUT", url, producer=_JsonProducer(json_data), headers_dict=headers_dict
)
def _create_get_request(self, url, headers_dict: Optional[dict] = None):
"""Wrapper of _create_request to issue a GET request"""
return self._create_request("GET", url, headers_dict=headers_dict or {})
def _create_get_request(self, url, headers_dict={}):
""" Wrapper of _create_request to issue a GET request
"""
return self._create_request("GET", url, headers_dict=headers_dict)
@defer.inlineCallbacks
def do_request(
self,
method,
url,
data=None,
qparams=None,
jsonreq=True,
headers: Optional[dict] = None,
self, method, url, data=None, qparams=None, jsonreq=True, headers={}
):
headers = headers or {}
if qparams:
url = "%s?%s" % (url, urllib.urlencode(qparams, True))
@@ -132,12 +126,9 @@ class TwistedHttpClient(HttpClient):
defer.returnValue(json.loads(body))
@defer.inlineCallbacks
def _create_request(
self, method, url, producer=None, headers_dict: Optional[dict] = None
):
"""Creates and sends a request to the given url"""
headers_dict = headers_dict or {}
def _create_request(self, method, url, producer=None, headers_dict={}):
""" Creates and sends a request to the given url
"""
headers_dict["User-Agent"] = ["Synapse Cmd Client"]
retries_left = 5
@@ -194,7 +185,8 @@ class _RawProducer:
class _JsonProducer:
"""Used by the twisted http client to create the HTTP body from json"""
""" Used by the twisted http client to create the HTTP body from json
"""
def __init__(self, jsn):
self.data = jsn

View File

@@ -63,7 +63,8 @@ class CursesStdIO:
self.redraw()
def redraw(self):
"""method for redisplaying lines based on internal list of lines"""
""" method for redisplaying lines
based on internal list of lines """
self.stdscr.clear()
self.paintStatus(self.statusText)

View File

@@ -1,3 +1,4 @@
# -*- coding: utf-8 -*-
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -55,7 +56,7 @@ def excpetion_errback(failure):
class InputOutput:
"""This is responsible for basic I/O so that a user can interact with
""" This is responsible for basic I/O so that a user can interact with
the example app.
"""
@@ -67,7 +68,8 @@ class InputOutput:
self.server = server
def on_line(self, line):
"""This is where we process commands."""
""" This is where we process commands.
"""
try:
m = re.match(r"^join (\S+)$", line)
@@ -131,7 +133,7 @@ class IOLoggerHandler(logging.Handler):
class Room:
"""Used to store (in memory) the current membership state of a room, and
""" Used to store (in memory) the current membership state of a room, and
which home servers we should send PDUs associated with the room to.
"""
@@ -146,7 +148,8 @@ class Room:
self.have_got_metadata = False
def add_participant(self, participant):
"""Someone has joined the room"""
""" Someone has joined the room
"""
self.participants.add(participant)
self.invited.discard(participant)
@@ -157,13 +160,14 @@ class Room:
self.oldest_server = server
def add_invited(self, invitee):
"""Someone has been invited to the room"""
""" Someone has been invited to the room
"""
self.invited.add(invitee)
self.servers.add(origin_from_ucid(invitee))
class HomeServer(ReplicationHandler):
"""A very basic home server implentation that allows people to join a
""" A very basic home server implentation that allows people to join a
room and then invite other people.
"""
@@ -177,7 +181,8 @@ class HomeServer(ReplicationHandler):
self.output = output
def on_receive_pdu(self, pdu):
"""We just received a PDU"""
""" We just received a PDU
"""
pdu_type = pdu.pdu_type
if pdu_type == "sy.room.message":
@@ -194,20 +199,23 @@ class HomeServer(ReplicationHandler):
)
def _on_message(self, pdu):
"""We received a message"""
""" We received a message
"""
self.output.print_line(
"#%s %s %s" % (pdu.context, pdu.content["sender"], pdu.content["body"])
)
def _on_join(self, context, joinee):
"""Someone has joined a room, either a remote user or a local user"""
""" Someone has joined a room, either a remote user or a local user
"""
room = self._get_or_create_room(context)
room.add_participant(joinee)
self.output.print_line("#%s %s %s" % (context, joinee, "*** JOINED"))
def _on_invite(self, origin, context, invitee):
"""Someone has been invited"""
""" Someone has been invited
"""
room = self._get_or_create_room(context)
room.add_invited(invitee)
@@ -220,7 +228,8 @@ class HomeServer(ReplicationHandler):
@defer.inlineCallbacks
def send_message(self, room_name, sender, body):
"""Send a message to a room!"""
""" Send a message to a room!
"""
destinations = yield self.get_servers_for_context(room_name)
try:
@@ -238,7 +247,8 @@ class HomeServer(ReplicationHandler):
@defer.inlineCallbacks
def join_room(self, room_name, sender, joinee):
"""Join a room!"""
""" Join a room!
"""
self._on_join(room_name, joinee)
destinations = yield self.get_servers_for_context(room_name)
@@ -259,7 +269,8 @@ class HomeServer(ReplicationHandler):
@defer.inlineCallbacks
def invite_to_room(self, room_name, sender, invitee):
"""Invite someone to a room!"""
""" Invite someone to a room!
"""
self._on_invite(self.server_name, room_name, invitee)
destinations = yield self.get_servers_for_context(room_name)

File diff suppressed because it is too large Load Diff

View File

@@ -193,12 +193,15 @@ class TrivialXmppClient:
time.sleep(7)
print("SSRC spammer started")
while self.running:
ssrcMsg = "<presence to='%(tojid)s' xmlns='jabber:client'><x xmlns='http://jabber.org/protocol/muc'/><c xmlns='http://jabber.org/protocol/caps' hash='sha-1' node='http://jitsi.org/jitsimeet' ver='0WkSdhFnAUxrz4ImQQLdB80GFlE='/><nick xmlns='http://jabber.org/protocol/nick'>%(nick)s</nick><stats xmlns='http://jitsi.org/jitmeet/stats'><stat name='bitrate_download' value='175'/><stat name='bitrate_upload' value='176'/><stat name='packetLoss_total' value='0'/><stat name='packetLoss_download' value='0'/><stat name='packetLoss_upload' value='0'/></stats><media xmlns='http://estos.de/ns/mjs'><source type='audio' ssrc='%(assrc)s' direction='sendre'/><source type='video' ssrc='%(vssrc)s' direction='sendre'/></media></presence>" % {
"tojid": "%s@%s/%s" % (ROOMNAME, ROOMDOMAIN, self.shortJid),
"nick": self.userId,
"assrc": self.ssrcs["audio"],
"vssrc": self.ssrcs["video"],
}
ssrcMsg = (
"<presence to='%(tojid)s' xmlns='jabber:client'><x xmlns='http://jabber.org/protocol/muc'/><c xmlns='http://jabber.org/protocol/caps' hash='sha-1' node='http://jitsi.org/jitsimeet' ver='0WkSdhFnAUxrz4ImQQLdB80GFlE='/><nick xmlns='http://jabber.org/protocol/nick'>%(nick)s</nick><stats xmlns='http://jitsi.org/jitmeet/stats'><stat name='bitrate_download' value='175'/><stat name='bitrate_upload' value='176'/><stat name='packetLoss_total' value='0'/><stat name='packetLoss_download' value='0'/><stat name='packetLoss_upload' value='0'/></stats><media xmlns='http://estos.de/ns/mjs'><source type='audio' ssrc='%(assrc)s' direction='sendre'/><source type='video' ssrc='%(vssrc)s' direction='sendre'/></media></presence>"
% {
"tojid": "%s@%s/%s" % (ROOMNAME, ROOMDOMAIN, self.shortJid),
"nick": self.userId,
"assrc": self.ssrcs["audio"],
"vssrc": self.ssrcs["video"],
}
)
res = self.sendIq(ssrcMsg)
print("reply from ssrc announce: ", res)
time.sleep(10)

View File

@@ -1,4 +1,4 @@
#!/usr/bin/env bash
#!/bin/bash
# this script will use the api:
# https://github.com/matrix-org/synapse/blob/master/docs/admin_api/purge_history_api.rst

View File

@@ -1,4 +1,4 @@
#!/usr/bin/env bash
#!/bin/bash
DOMAIN=yourserver.tld
# add this user as admin in your home server:

View File

@@ -1,71 +0,0 @@
[Service]
# The following directives give the synapse service R/W access to:
# - /run/matrix-synapse
# - /var/lib/matrix-synapse
# - /var/log/matrix-synapse
RuntimeDirectory=matrix-synapse
StateDirectory=matrix-synapse
LogsDirectory=matrix-synapse
######################
## Security Sandbox ##
######################
# Make sure that the service has its own unshared tmpfs at /tmp and that it
# cannot see or change any real devices
PrivateTmp=true
PrivateDevices=true
# We give no capabilities to a service by default
CapabilityBoundingSet=
AmbientCapabilities=
# Protect the following from modification:
# - The entire filesystem
# - sysctl settings and loaded kernel modules
# - No modifications allowed to Control Groups
# - Hostname
# - System Clock
ProtectSystem=strict
ProtectKernelTunables=true
ProtectKernelModules=true
ProtectControlGroups=true
ProtectClock=true
ProtectHostname=true
# Prevent access to the following:
# - /home directory
# - Kernel logs
ProtectHome=tmpfs
ProtectKernelLogs=true
# Make sure that the process can only see PIDs and process details of itself,
# and the second option disables seeing details of things like system load and
# I/O etc
ProtectProc=invisible
ProcSubset=pid
# While not needed, we set these options explicitly
# - This process has been given access to the host network
# - It can also communicate with any IP Address
PrivateNetwork=false
RestrictAddressFamilies=AF_INET AF_INET6 AF_UNIX
IPAddressAllow=any
# Restrict system calls to a sane bunch
SystemCallArchitectures=native
SystemCallFilter=@system-service
SystemCallFilter=~@privileged @resources @obsolete
# Misc restrictions
# - Since the process is a python process it needs to be able to write and
# execute memory regions, so we set MemoryDenyWriteExecute to false
RestrictSUIDSGID=true
RemoveIPC=true
NoNewPrivileges=true
RestrictRealtime=true
RestrictNamespaces=true
LockPersonality=true
PrivateUsers=true
MemoryDenyWriteExecute=false

View File

@@ -33,13 +33,11 @@ esac
# Use --builtin-venv to use the better `venv` module from CPython 3.4+ rather
# than the 2/3 compatible `virtualenv`.
# Pin pip to 20.3.4 to fix breakage in 21.0 on py3.5 (xenial)
dh_virtualenv \
--install-suffix "matrix-synapse" \
--builtin-venv \
--python "$SNAKE" \
--upgrade-pip-to="20.3.4" \
--upgrade-pip \
--preinstall="lxml" \
--preinstall="mock" \
--extra-pip-arg="--no-cache-dir" \
@@ -50,27 +48,18 @@ PACKAGE_BUILD_DIR="debian/matrix-synapse-py3"
VIRTUALENV_DIR="${PACKAGE_BUILD_DIR}${DH_VIRTUALENV_INSTALL_ROOT}/matrix-synapse"
TARGET_PYTHON="${VIRTUALENV_DIR}/bin/python"
case "$DEB_BUILD_OPTIONS" in
*nocheck*)
# Skip running tests if "nocheck" present in $DEB_BUILD_OPTIONS
;;
# we copy the tests to a temporary directory so that we can put them on the
# PYTHONPATH without putting the uninstalled synapse on the pythonpath.
tmpdir=`mktemp -d`
trap "rm -r $tmpdir" EXIT
*)
# Copy tests to a temporary directory so that we can put them on the
# PYTHONPATH without putting the uninstalled synapse on the pythonpath.
tmpdir=`mktemp -d`
trap "rm -r $tmpdir" EXIT
cp -r tests "$tmpdir"
cp -r tests "$tmpdir"
PYTHONPATH="$tmpdir" \
"${TARGET_PYTHON}" -m twisted.trial --reporter=text -j2 tests
;;
esac
PYTHONPATH="$tmpdir" \
"${TARGET_PYTHON}" -B -m twisted.trial --reporter=text -j2 tests
# build the config file
"${TARGET_PYTHON}" "${VIRTUALENV_DIR}/bin/generate_config" \
"${TARGET_PYTHON}" -B "${VIRTUALENV_DIR}/bin/generate_config" \
--config-dir="/etc/matrix-synapse" \
--data-dir="/var/lib/matrix-synapse" |
perl -pe '
@@ -96,7 +85,7 @@ esac
' > "${PACKAGE_BUILD_DIR}/etc/matrix-synapse/homeserver.yaml"
# build the log config file
"${TARGET_PYTHON}" "${VIRTUALENV_DIR}/bin/generate_log_config" \
"${TARGET_PYTHON}" -B "${VIRTUALENV_DIR}/bin/generate_log_config" \
--output-file="${PACKAGE_BUILD_DIR}/etc/matrix-synapse/log.yaml"
# add a dependency on the right version of python to substvars.

129
debian/changelog vendored
View File

@@ -1,132 +1,3 @@
matrix-synapse-py3 (1.36.0) stable; urgency=medium
* New synapse release 1.36.0.
-- Synapse Packaging team <packages@matrix.org> Tue, 15 Jun 2021 15:41:53 +0100
matrix-synapse-py3 (1.35.1) stable; urgency=medium
* New synapse release 1.35.1.
-- Synapse Packaging team <packages@matrix.org> Thu, 03 Jun 2021 08:11:29 -0400
matrix-synapse-py3 (1.35.0) stable; urgency=medium
* New synapse release 1.35.0.
-- Synapse Packaging team <packages@matrix.org> Tue, 01 Jun 2021 13:23:35 +0100
matrix-synapse-py3 (1.34.0) stable; urgency=medium
* New synapse release 1.34.0.
-- Synapse Packaging team <packages@matrix.org> Mon, 17 May 2021 11:34:18 +0100
matrix-synapse-py3 (1.33.2) stable; urgency=medium
* New synapse release 1.33.2.
-- Synapse Packaging team <packages@matrix.org> Tue, 11 May 2021 11:17:59 +0100
matrix-synapse-py3 (1.33.1) stable; urgency=medium
* New synapse release 1.33.1.
-- Synapse Packaging team <packages@matrix.org> Thu, 06 May 2021 14:06:33 +0100
matrix-synapse-py3 (1.33.0) stable; urgency=medium
* New synapse release 1.33.0.
-- Synapse Packaging team <packages@matrix.org> Wed, 05 May 2021 14:15:27 +0100
matrix-synapse-py3 (1.32.2) stable; urgency=medium
* New synapse release 1.32.2.
-- Synapse Packaging team <packages@matrix.org> Wed, 22 Apr 2021 12:43:52 +0100
matrix-synapse-py3 (1.32.1) stable; urgency=medium
* New synapse release 1.32.1.
-- Synapse Packaging team <packages@matrix.org> Wed, 21 Apr 2021 14:00:55 +0100
matrix-synapse-py3 (1.32.0) stable; urgency=medium
[ Dan Callahan ]
* Skip tests when DEB_BUILD_OPTIONS contains "nocheck".
[ Synapse Packaging team ]
* New synapse release 1.32.0.
-- Synapse Packaging team <packages@matrix.org> Tue, 20 Apr 2021 14:28:39 +0100
matrix-synapse-py3 (1.31.0) stable; urgency=medium
* New synapse release 1.31.0.
-- Synapse Packaging team <packages@matrix.org> Tue, 06 Apr 2021 13:08:29 +0100
matrix-synapse-py3 (1.30.1) stable; urgency=medium
* New synapse release 1.30.1.
-- Synapse Packaging team <packages@matrix.org> Fri, 26 Mar 2021 12:01:28 +0000
matrix-synapse-py3 (1.30.0) stable; urgency=medium
* New synapse release 1.30.0.
-- Synapse Packaging team <packages@matrix.org> Mon, 22 Mar 2021 13:15:34 +0000
matrix-synapse-py3 (1.29.0) stable; urgency=medium
[ Jonathan de Jong ]
* Remove the python -B flag (don't generate bytecode) in scripts and documentation.
[ Synapse Packaging team ]
* New synapse release 1.29.0.
-- Synapse Packaging team <packages@matrix.org> Mon, 08 Mar 2021 13:51:50 +0000
matrix-synapse-py3 (1.28.0) stable; urgency=medium
* New synapse release 1.28.0.
-- Synapse Packaging team <packages@matrix.org> Thu, 25 Feb 2021 10:21:57 +0000
matrix-synapse-py3 (1.27.0) stable; urgency=medium
[ Dan Callahan ]
* Fix build on Ubuntu 16.04 LTS (Xenial).
[ Synapse Packaging team ]
* New synapse release 1.27.0.
-- Synapse Packaging team <packages@matrix.org> Tue, 16 Feb 2021 13:11:28 +0000
matrix-synapse-py3 (1.26.0) stable; urgency=medium
[ Richard van der Hoff ]
* Remove dependency on `python3-distutils`.
[ Synapse Packaging team ]
* New synapse release 1.26.0.
-- Synapse Packaging team <packages@matrix.org> Wed, 27 Jan 2021 12:43:35 -0500
matrix-synapse-py3 (1.25.0) stable; urgency=medium
[ Dan Callahan ]
* Update dependencies to account for the removal of the transitional
dh-systemd package from Debian Bullseye.
[ Synapse Packaging team ]
* New synapse release 1.25.0.
-- Synapse Packaging team <packages@matrix.org> Wed, 13 Jan 2021 10:14:55 +0000
matrix-synapse-py3 (1.24.0) stable; urgency=medium
* New synapse release 1.24.0.

7
debian/control vendored
View File

@@ -3,11 +3,9 @@ Section: contrib/python
Priority: extra
Maintainer: Synapse Packaging team <packages@matrix.org>
# keep this list in sync with the build dependencies in docker/Dockerfile-dhvirtualenv.
# TODO: Remove the dependency on dh-systemd after dropping support for Ubuntu xenial
# On all other supported releases, it's merely a transitional package which
# does nothing but depends on debhelper (> 9.20160709)
Build-Depends:
debhelper (>= 9.20160709) | dh-systemd,
debhelper (>= 9),
dh-systemd,
dh-virtualenv (>= 1.1),
libsystemd-dev,
libpq-dev,
@@ -31,6 +29,7 @@ Pre-Depends: dpkg (>= 1.16.1)
Depends:
adduser,
debconf,
python3-distutils|libpython3-stdlib (<< 3.6),
${misc:Depends},
${shlibs:Depends},
${synapse:pydepends},

2
debian/synctl.1 vendored
View File

@@ -44,7 +44,7 @@ Configuration file may be generated as follows:
.
.nf
$ python \-m synapse\.app\.homeserver \-c config\.yaml \-\-generate\-config \-\-server\-name=<server name>
$ python \-B \-m synapse\.app\.homeserver \-c config\.yaml \-\-generate\-config \-\-server\-name=<server name>
.
.fi
.

2
debian/synctl.ronn vendored
View File

@@ -41,7 +41,7 @@ process.
Configuration file may be generated as follows:
$ python -m synapse.app.homeserver -c config.yaml --generate-config --server-name=<server name>
$ python -B -m synapse.app.homeserver -c config.yaml --generate-config --server-name=<server name>
## ENVIRONMENT

View File

@@ -1,4 +1,4 @@
#!/usr/bin/env bash
#!/bin/bash
set -e

View File

@@ -1,4 +1,4 @@
#!/usr/bin/env bash
#!/bin/bash
DIR="$( cd "$( dirname "$0" )" && pwd )"
@@ -96,48 +96,18 @@ for port in 8080 8081 8082; do
# Check script parameters
if [ $# -eq 1 ]; then
if [ $1 = "--no-rate-limit" ]; then
# messages rate limit
echo 'rc_messages_per_second: 1000' >> $DIR/etc/$port.config
echo 'rc_message_burst_count: 1000' >> $DIR/etc/$port.config
# Disable any rate limiting
ratelimiting=$(cat <<-RC
rc_message:
per_second: 1000
burst_count: 1000
rc_registration:
per_second: 1000
burst_count: 1000
rc_login:
address:
per_second: 1000
burst_count: 1000
account:
per_second: 1000
burst_count: 1000
failed_attempts:
per_second: 1000
burst_count: 1000
rc_admin_redaction:
per_second: 1000
burst_count: 1000
rc_joins:
local:
per_second: 1000
burst_count: 1000
remote:
per_second: 1000
burst_count: 1000
rc_3pid_validation:
per_second: 1000
burst_count: 1000
rc_invites:
per_room:
per_second: 1000
burst_count: 1000
per_user:
per_second: 1000
burst_count: 1000
RC
)
echo "${ratelimiting}" >> $DIR/etc/$port.config
# registration rate limit
printf 'rc_registration:\n per_second: 1000\n burst_count: 1000\n' >> $DIR/etc/$port.config
# login rate limit
echo 'rc_login:' >> $DIR/etc/$port.config
printf ' address:\n per_second: 1000\n burst_count: 1000\n' >> $DIR/etc/$port.config
printf ' account:\n per_second: 1000\n burst_count: 1000\n' >> $DIR/etc/$port.config
printf ' failed_attempts:\n per_second: 1000\n burst_count: 1000\n' >> $DIR/etc/$port.config
fi
fi

View File

@@ -1,4 +1,4 @@
#!/usr/bin/env bash
#!/bin/bash
DIR="$( cd "$( dirname "$0" )" && pwd )"

59
demo/webserver.py Normal file
View File

@@ -0,0 +1,59 @@
import argparse
import BaseHTTPServer
import os
import SimpleHTTPServer
import cgi, logging
from daemonize import Daemonize
class SimpleHTTPRequestHandlerWithPOST(SimpleHTTPServer.SimpleHTTPRequestHandler):
UPLOAD_PATH = "upload"
"""
Accept all post request as file upload
"""
def do_POST(self):
path = os.path.join(self.UPLOAD_PATH, os.path.basename(self.path))
length = self.headers["content-length"]
data = self.rfile.read(int(length))
with open(path, "wb") as fh:
fh.write(data)
self.send_response(200)
self.send_header("Content-Type", "application/json")
self.end_headers()
# Return the absolute path of the uploaded file
self.wfile.write('{"url":"/%s"}' % path)
def setup():
parser = argparse.ArgumentParser()
parser.add_argument("directory")
parser.add_argument("-p", "--port", dest="port", type=int, default=8080)
parser.add_argument("-P", "--pid-file", dest="pid", default="web.pid")
args = parser.parse_args()
# Get absolute path to directory to serve, as daemonize changes to '/'
os.chdir(args.directory)
dr = os.getcwd()
httpd = BaseHTTPServer.HTTPServer(("", args.port), SimpleHTTPRequestHandlerWithPOST)
def run():
os.chdir(dr)
httpd.serve_forever()
daemon = Daemonize(
app="synapse-webclient", pid=args.pid, action=run, auto_close_fds=False
)
daemon.start()
if __name__ == "__main__":
setup()

View File

@@ -28,32 +28,31 @@ RUN apt-get update && apt-get install -y \
libwebp-dev \
libxml++2.6-dev \
libxslt1-dev \
openssl \
rustc \
zlib1g-dev \
&& rm -rf /var/lib/apt/lists/*
&& rm -rf /var/lib/apt/lists/*
# Copy just what we need to pip install
# Build dependencies that are not available as wheels, to speed up rebuilds
RUN pip install --prefix="/install" --no-warn-script-location \
frozendict \
jaeger-client \
opentracing \
# Match the version constraints of Synapse
"prometheus_client>=0.4.0" \
psycopg2 \
pycparser \
pyrsistent \
pyyaml \
simplejson \
threadloop \
thrift
# now install synapse and all of the python deps to /install.
COPY synapse /synapse/synapse/
COPY scripts /synapse/scripts/
COPY MANIFEST.in README.rst setup.py synctl /synapse/
COPY synapse/__init__.py /synapse/synapse/__init__.py
COPY synapse/python_dependencies.py /synapse/synapse/python_dependencies.py
# To speed up rebuilds, install all of the dependencies before we copy over
# the whole synapse project so that we this layer in the Docker cache can be
# used while you develop on the source
#
# This is aiming at installing the `install_requires` and `extras_require` from `setup.py`
RUN pip install --prefix="/install" --no-warn-script-location \
/synapse[all]
# Copy over the rest of the project
COPY synapse /synapse/synapse/
# Install the synapse package itself and all of its children packages.
#
# This is aiming at installing only the `packages=find_packages(...)` from `setup.py
RUN pip install --prefix="/install" --no-deps --no-warn-script-location /synapse
/synapse[all]
###
### Stage 1: runtime
@@ -61,11 +60,6 @@ RUN pip install --prefix="/install" --no-deps --no-warn-script-location /synapse
FROM docker.io/python:${PYTHON_VERSION}-slim
LABEL org.opencontainers.image.url='https://matrix.org/docs/projects/server/synapse'
LABEL org.opencontainers.image.documentation='https://github.com/matrix-org/synapse/blob/master/docker/README.md'
LABEL org.opencontainers.image.source='https://github.com/matrix-org/synapse.git'
LABEL org.opencontainers.image.licenses='Apache-2.0'
RUN apt-get update && apt-get install -y \
curl \
gosu \
@@ -73,10 +67,7 @@ RUN apt-get update && apt-get install -y \
libpq5 \
libwebp6 \
xmlsec1 \
libjemalloc2 \
libssl-dev \
openssl \
&& rm -rf /var/lib/apt/lists/*
&& rm -rf /var/lib/apt/lists/*
COPY --from=builder /install /usr/local
COPY ./docker/start.py /start.py
@@ -88,5 +79,5 @@ EXPOSE 8008/tcp 8009/tcp 8448/tcp
ENTRYPOINT ["/start.py"]
HEALTHCHECK --start-period=5s --interval=15s --timeout=5s \
CMD curl -fSs http://localhost:8008/health || exit 1
HEALTHCHECK --interval=1m --timeout=5s \
CMD curl -fSs http://localhost:8008/health || exit 1

View File

@@ -27,7 +27,6 @@ RUN env DEBIAN_FRONTEND=noninteractive apt-get install \
wget
# fetch and unpack the package
# TODO: Upgrade to 1.2.2 once xenial is dropped
RUN mkdir /dh-virtualenv
RUN wget -q -O /dh-virtualenv.tar.gz https://github.com/spotify/dh-virtualenv/archive/ac6e1b1.tar.gz
RUN tar -xv --strip-components=1 -C /dh-virtualenv -f /dh-virtualenv.tar.gz
@@ -51,22 +50,17 @@ FROM ${distro}
ARG distro=""
ENV distro ${distro}
# Python < 3.7 assumes LANG="C" means ASCII-only and throws on printing unicode
# http://bugs.python.org/issue19846
ENV LANG C.UTF-8
# Install the build dependencies
#
# NB: keep this list in sync with the list of build-deps in debian/control
# TODO: it would be nice to do that automatically.
# TODO: Remove the dh-systemd stanza after dropping support for Ubuntu xenial
# it's a transitional package on all other, more recent releases
RUN apt-get update -qq -o Acquire::Languages=none \
&& env DEBIAN_FRONTEND=noninteractive apt-get install \
-yqq --no-install-recommends -o Dpkg::Options::=--force-unsafe-io \
build-essential \
debhelper \
devscripts \
dh-systemd \
libsystemd-dev \
lsb-release \
pkg-config \
@@ -76,10 +70,7 @@ RUN apt-get update -qq -o Acquire::Languages=none \
python3-venv \
sqlite3 \
libpq-dev \
xmlsec1 \
&& ( env DEBIAN_FRONTEND=noninteractive apt-get install \
-yqq --no-install-recommends -o Dpkg::Options::=--force-unsafe-io \
dh-systemd || true )
xmlsec1
COPY --from=builder /dh-virtualenv_1.2~dev-1_all.deb /

View File

@@ -5,11 +5,10 @@ FROM matrixdotorg/synapse
RUN apt-get update
RUN apt-get install -y supervisor redis nginx
# Remove the default nginx sites
RUN rm /etc/nginx/sites-enabled/default
# Copy Synapse worker, nginx and supervisord configuration template files
COPY ./docker/conf-workers/* /conf/
# Copy the worker process and log configuration files
COPY ./docker/worker.yaml.j2 /conf/worker.yaml.j2
# Expose nginx listener port
EXPOSE 8080/tcp
@@ -21,3 +20,5 @@ VOLUME ["/data"]
# files to run the desired worker configuration. Will start supervisord.
COPY ./docker/configure_workers_and_start.py /configure_workers_and_start.py
ENTRYPOINT ["/configure_workers_and_start.py"]
# TODO: Healthcheck? Which worker to ask? Can we ask supervisord?

View File

@@ -0,0 +1,31 @@
# Inherit from the workers Synapse docker image
FROM matrixdotorg/synapse:workers
RUN apt-get update
RUN apt-get install -y postgresql
RUN pg_ctlcluster 11 main start && su postgres -c "echo \
\"ALTER USER postgres PASSWORD 'somesecret'; \
CREATE DATABASE synapse \
ENCODING 'UTF8' \
LC_COLLATE='C' \
LC_CTYPE='C' \
template=template0;\" | psql" && pg_ctlcluster 11 main stop
WORKDIR /root
RUN curl -OL "https://github.com/caddyserver/caddy/releases/download/v2.3.0/caddy_2.3.0_linux_amd64.tar.gz" && \
tar xzf caddy_2.3.0_linux_amd64.tar.gz && rm caddy_2.3.0_linux_amd64.tar.gz
COPY ./docker/caddy.complement.json /root/caddy.json
EXPOSE 8008 8448
ENTRYPOINT sed -i "s/{{ server_name }}/${SERVER_NAME}/g" /root/caddy.json && \
pg_ctlcluster 11 main start > /dev/null && \
/root/caddy start --config /root/caddy.json > /dev/null && \
SYNAPSE_SERVER_NAME=${SERVER_NAME} \
SYNAPSE_REPORT_STATS=no \
POSTGRES_PASSWORD=somesecret POSTGRES_USER=postgres POSTGRES_HOST=localhost \
SYNAPSE_WORKERS=synchrotron \
/configure_workers_and_start.py

View File

@@ -1,140 +0,0 @@
# Running tests against a dockerised Synapse
It's possible to run integration tests against Synapse
using [Complement](https://github.com/matrix-org/complement). Complement is a Matrix Spec
compliance test suite for homeservers, and supports any homeserver docker image configured
to listen on ports 8008/8448. This document contains instructions for building Synapse
docker images that can be run inside Complement for testing purposes.
Note that running Synapse's unit tests from within the docker image is not supported.
## Testing with SQLite and single-process Synapse
> Note that `scripts-dev/complement.sh` is a script that will automatically build
> and run an SQLite-based, single-process of Synapse against Complement.
The instructions below will set up Complement testing for a single-process,
SQLite-based Synapse deployment.
Start by building the base Synapse docker image. If you wish to run tests with the latest
release of Synapse, instead of your current checkout, you can skip this step. From the
root of the repository:
```sh
docker build -t matrixdotorg/synapse -f docker/Dockerfile .
```
This will build an image with the tag `matrixdotorg/synapse`.
Next, build the Synapse image for Complement. You will need a local checkout
of Complement. Change to the root of your Complement checkout and run:
```sh
docker build -t complement-synapse -f "dockerfiles/Synapse.Dockerfile" dockerfiles
```
This will build an image with the tag `complement-synapse`, which can be handed to
Complement for testing via the `COMPLEMENT_BASE_IMAGE` environment variable. Refer to
[Complement's documentation](https://github.com/matrix-org/complement/#running) for
how to run the tests, as well as the various available command line flags.
## Testing with PostgreSQL and single or multi-process Synapse
The above docker image only supports running Synapse with SQLite and in a
single-process topology. The following instructions are used to build a Synapse image for
Complement that supports either single or multi-process topology with a PostgreSQL
database backend.
As with the single-process image, build the base Synapse docker image. If you wish to run
tests with the latest release of Synapse, instead of your current checkout, you can skip
this step. From the root of the repository:
```sh
docker build -t matrixdotorg/synapse -f docker/Dockerfile .
```
This will build an image with the tag `matrixdotorg/synapse`.
Next, we build a new image with worker support based on `matrixdotorg/synapse:latest`.
Again, from the root of the repository:
```sh
docker build -t matrixdotorg/synapse-workers -f docker/Dockerfile-workers .
```
This will build an image with the tag` matrixdotorg/synapse-workers`.
It's worth noting at this point that this image is fully functional, and
can be used for testing against locally. See instructions for using the container
under
[Running the Dockerfile-worker image standalone](#running-the-dockerfile-worker-image-standalone)
below.
Finally, build the Synapse image for Complement, which is based on
`matrixdotorg/synapse-workers`. You will need a local checkout of Complement. Change to
the root of your Complement checkout and run:
```sh
docker build -t matrixdotorg/complement-synapse-workers -f dockerfiles/SynapseWorkers.Dockerfile dockerfiles
```
This will build an image with the tag `complement-synapse`, which can be handed to
Complement for testing via the `COMPLEMENT_BASE_IMAGE` environment variable. Refer to
[Complement's documentation](https://github.com/matrix-org/complement/#running) for
how to run the tests, as well as the various available command line flags.
## Running the Dockerfile-worker image standalone
For manual testing of a multi-process Synapse instance in Docker,
[Dockerfile-workers](Dockerfile-workers) is a Dockerfile that will produce an image
bundling all necessary components together for a workerised homeserver instance.
This includes any desired Synapse worker processes, a nginx to route traffic accordingly,
a redis for worker communication and a supervisord instance to start up and monitor all
processes. You will need to provide your own postgres container to connect to, and TLS
is not handled by the container.
Once you've built the image using the above instructions, you can run it. Be sure
you've set up a volume according to the [usual Synapse docker instructions](README.md).
Then run something along the lines of:
```
docker run -d --name synapse \
--mount type=volume,src=synapse-data,dst=/data \
-p 8008:8008 \
-e SYNAPSE_SERVER_NAME=my.matrix.host \
-e SYNAPSE_REPORT_STATS=no \
-e POSTGRES_HOST=postgres \
-e POSTGRES_USER=postgres \
-e POSTGRES_PASSWORD=somesecret \
-e SYNAPSE_WORKER_TYPES=synchrotron,media_repository,user_dir \
-e SYNAPSE_WORKERS_WRITE_LOGS_TO_DISK=1 \
matrixdotorg/synapse-workers
```
...substituting `POSTGRES*` variables for those that match a postgres host you have
available (usually a running postgres docker container).
The `SYNAPSE_WORKER_TYPES` environment variable is a comma-separated list of workers to
use when running the container. All possible worker names are defined by the keys of the
`WORKERS_CONFIG` variable in [this script](configure_workers_and_start.py), which the
Dockerfile makes use of to generate appropriate worker, nginx and supervisord config
files.
Sharding is supported for a subset of workers, in line with the
[worker documentation](../docs/workers.md). To run multiple instances of a given worker
type, simply specify the type multiple times in `SYNAPSE_WORKER_TYPES`
(e.g `SYNAPSE_WORKER_TYPES=event_creator,event_creator...`).
Otherwise, `SYNAPSE_WORKER_TYPES` can either be left empty or unset to spawn no workers
(leaving only the main process). The container is configured to use redis-based worker
mode.
Logs for workers and the main process are logged to stdout and can be viewed with
standard `docker logs` tooling. Worker logs contain their worker name
after the timestamp.
Setting `SYNAPSE_WORKERS_WRITE_LOGS_TO_DISK=1` will cause worker logs to be written to
`<data_dir>/logs/<worker_name>.log`. Logs are kept for 1 week and rotate every day at 00:
00, according to the container's clock. Logging for the main process must still be
configured by modifying the homeserver's log config in your Synapse data volume.

View File

@@ -2,28 +2,26 @@
This Docker image will run Synapse as a single process. By default it uses a
sqlite database; for production use you should connect it to a separate
postgres database. The image also does *not* provide a TURN server.
postgres database.
This image should work on all platforms that are supported by Docker upstream.
Note that Docker's WS1-backend Linux Containers on Windows
platform is [experimental](https://github.com/docker/for-win/issues/6470) and
is not supported by this image.
The image also does *not* provide a TURN server.
## Volumes
By default, the image expects a single volume, located at `/data`, that will hold:
By default, the image expects a single volume, located at ``/data``, that will hold:
* configuration files;
* temporary files during uploads;
* uploaded media and thumbnails;
* the SQLite database if you do not configure postgres;
* the appservices configuration.
You are free to use separate volumes depending on storage endpoints at your
disposal. For instance, `/data/media` could be stored on a large but low
disposal. For instance, ``/data/media`` could be stored on a large but low
performance hdd storage while other files could be stored on high performance
endpoints.
In order to setup an application service, simply create an `appservices`
In order to setup an application service, simply create an ``appservices``
directory in the data volume and write the application service Yaml
configuration file there. Multiple application services are supported.
@@ -56,8 +54,6 @@ The following environment variables are supported in `generate` mode:
* `SYNAPSE_SERVER_NAME` (mandatory): the server public hostname.
* `SYNAPSE_REPORT_STATS` (mandatory, `yes` or `no`): whether to enable
anonymous statistics reporting.
* `SYNAPSE_HTTP_PORT`: the port Synapse should listen on for http traffic.
Defaults to `8008`.
* `SYNAPSE_CONFIG_DIR`: where additional config files (such as the log config
and event signing key) will be stored. Defaults to `/data`.
* `SYNAPSE_CONFIG_PATH`: path to the file to be generated. Defaults to
@@ -78,8 +74,6 @@ docker run -d --name synapse \
matrixdotorg/synapse:latest
```
(assuming 8008 is the port Synapse is configured to listen on for http traffic.)
You can then check that it has started correctly with:
```
@@ -191,16 +185,6 @@ whilst running the above `docker run` commands.
```
--no-healthcheck
```
## Disabling the healthcheck in docker-compose file
If you wish to disable the healthcheck via docker-compose, append the following to your service configuration.
```
healthcheck:
disable: true
```
## Setting custom healthcheck on docker run
If you wish to point the healthcheck at a different port with docker command, add the following
@@ -212,18 +196,12 @@ If you wish to point the healthcheck at a different port with docker command, ad
## Setting the healthcheck in docker-compose file
You can add the following to set a custom healthcheck in a docker compose file.
You will need docker-compose version >2.1 for this to work.
You will need version >2.1 for this to work.
```
healthcheck:
test: ["CMD", "curl", "-fSs", "http://localhost:8008/health"]
interval: 15s
timeout: 5s
interval: 1m
timeout: 10s
retries: 3
start_period: 5s
```
## Using jemalloc
Jemalloc is embedded in the image and will be used instead of the default allocator.
You can read about jemalloc by reading the Synapse [README](../README.rst).

View File

@@ -1,4 +1,4 @@
#!/usr/bin/env bash
#!/bin/bash
# The script to build the Debian package, as ran inside the Docker image.

View File

@@ -0,0 +1,76 @@
{
"apps": {
"http": {
"servers": {
"srv0": {
"listen": [
":8448"
],
"routes": [
{
"match": [
{
"host": [
"{{ server_name }}"
]
}
],
"handle": [
{
"handler": "subroute",
"routes": [
{
"handle": [
{
"handler": "reverse_proxy",
"upstreams": [
{
"dial": "localhost:80"
}
]
}
]
}
]
}
],
"terminal": true
}
]
}
}
},
"tls": {
"automation": {
"policies": [
{
"subjects": [
"{{ server_name }}"
],
"issuers": [
{
"module": "internal"
}
],
"on_demand": true
}
]
}
},
"pki": {
"certificate_authorities": {
"local": {
"name": "Complement CA",
"root": {
"certificate": "/ca/ca.crt",
"private_key": "/ca/ca.key"
},
"intermediate": {
"certificate": "/ca/ca.crt",
"private_key": "/ca/ca.key"
}
}
}
}
}
}

View File

@@ -1,27 +0,0 @@
# This file contains the base config for the reverse proxy, as part of ../Dockerfile-workers.
# configure_workers_and_start.py uses and amends to this file depending on the workers
# that have been selected.
{{ upstream_directives }}
server {
# Listen on an unoccupied port number
listen 8008;
listen [::]:8008;
server_name localhost;
# Nginx by default only allows file uploads up to 1M in size
# Increase client_max_body_size to match max_upload_size defined in homeserver.yaml
client_max_body_size 100M;
{{ worker_locations }}
# Send all other traffic to the main process
location ~* ^(\\/_matrix|\\/_synapse) {
proxy_pass http://localhost:8080;
proxy_set_header X-Forwarded-For $remote_addr;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header Host $host;
}
}

View File

@@ -1,9 +0,0 @@
# This file contains the base for the shared homeserver config file between Synapse workers,
# as part of ./Dockerfile-workers.
# configure_workers_and_start.py uses and amends to this file depending on the workers
# that have been selected.
redis:
enabled: true
{{ shared_worker_config }}

View File

@@ -1,41 +0,0 @@
# This file contains the base config for supervisord, as part of ../Dockerfile-workers.
# configure_workers_and_start.py uses and amends to this file depending on the workers
# that have been selected.
[supervisord]
nodaemon=true
user=root
[program:nginx]
command=/usr/sbin/nginx -g "daemon off;"
priority=500
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stderr
stderr_logfile_maxbytes=0
username=www-data
autorestart=true
[program:redis]
command=/usr/bin/redis-server /etc/redis/redis.conf --daemonize no
priority=1
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stderr
stderr_logfile_maxbytes=0
username=redis
autorestart=true
[program:synapse_main]
command=/usr/local/bin/python -m synapse.app.homeserver --config-path="{{ main_config_path }}" --config-path=/conf/workers/shared.yaml
priority=10
# Log startup failures to supervisord's stdout/err
# Regular synapse logs will still go in the configured data directory
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stderr
stderr_logfile_maxbytes=0
autorestart=unexpected
exitcodes=0
# Additional process blocks
{{ worker_config }}

View File

@@ -27,8 +27,7 @@ log_config: "{{ SYNAPSE_LOG_CONFIG }}"
listeners:
{% if not SYNAPSE_NO_TLS %}
-
port: 8448
- port: 8448
bind_addresses: ['::']
type: http
tls: true
@@ -40,13 +39,11 @@ listeners:
compress: false
{% endif %}
# Allow configuring in case we want to reverse proxy 8008
# using another process in the same container
- port: {{ SYNAPSE_HTTP_PORT or 8008 }}
- port: 8008
tls: false
bind_addresses: ['::']
type: http
x_forwarded: false
x_forwarded: true
resources:
- names: [client]
@@ -91,6 +88,7 @@ federation_rc_concurrent: 3
## Files ##
media_store_path: "/data/media"
uploads_path: "/data/uploads"
max_upload_size: "{{ SYNAPSE_MAX_UPLOAD_SIZE or "50M" }}"
max_image_pixels: "32M"
dynamic_thumbnails: false
@@ -175,10 +173,18 @@ report_stats: False
## API Configuration ##
room_invite_state_types:
- "m.room.join_rules"
- "m.room.canonical_alias"
- "m.room.avatar"
- "m.room.name"
{% if SYNAPSE_APPSERVICES %}
app_service_config_files:
{% for appservice in SYNAPSE_APPSERVICES %} - "{{ appservice }}"
{% endfor %}
{% else %}
app_service_config_files: []
{% endif %}
macaroon_secret_key: "{{ SYNAPSE_MACAROON_SECRET_KEY }}"
@@ -191,10 +197,12 @@ old_signing_keys: {}
key_refresh_interval: "1d" # 1 Day.
# The trusted servers to download signing keys from.
trusted_key_servers:
- server_name: matrix.org
verify_keys:
"ed25519:auto": "Noi6WqcDj0QmPxCNQqgezwTlBKrfqehY1u2FyWP9uYw"
perspectives:
servers:
"matrix.org":
verify_keys:
"ed25519:auto":
key: "Noi6WqcDj0QmPxCNQqgezwTlBKrfqehY1u2FyWP9uYw"
password_config:
enabled: true

View File

@@ -2,36 +2,9 @@ version: 1
formatters:
precise:
{% if worker_name %}
format: '%(asctime)s - worker:{{ worker_name }} - %(name)s - %(lineno)d - %(levelname)s - %(request)s - %(message)s'
{% else %}
format: '%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s - %(message)s'
{% endif %}
format: '%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s - %(message)s'
handlers:
{% if LOG_FILE_PATH %}
file:
class: logging.handlers.TimedRotatingFileHandler
formatter: precise
filename: {{ LOG_FILE_PATH }}
when: "midnight"
backupCount: 6 # Does not include the current log file.
encoding: utf8
# Default to buffering writes to log file for efficiency. This means that
# there will be a delay for INFO/DEBUG logs to get written, but WARNING/ERROR
# logs will still be flushed immediately.
buffer:
class: logging.handlers.MemoryHandler
target: file
# The capacity is the number of log lines that are buffered before
# being written to disk. Increasing this will lead to better
# performance, at the expensive of it taking longer for log lines to
# be written to disk.
capacity: 10
flushLevel: 30 # Flush for WARNING logs as well
{% endif %}
console:
class: logging.StreamHandler
formatter: precise
@@ -44,11 +17,6 @@ loggers:
root:
level: {{ SYNAPSE_LOG_LEVEL or "INFO" }}
{% if LOG_FILE_PATH %}
handlers: [console, buffer]
{% else %}
handlers: [console]
{% endif %}
disable_existing_loggers: false

View File

@@ -1,5 +1,6 @@
#!/usr/bin/env python
# Copyright 2021 The Matrix.org Foundation C.I.C.
# -*- coding: utf-8 -*-
# Copyright 2020 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -14,88 +15,69 @@
# limitations under the License.
# This script reads environment variables and generates a shared Synapse worker,
# nginx and supervisord configs depending on the workers requested.
#
# The environment variables it reads are:
# * SYNAPSE_SERVER_NAME: The desired server_name of the homeserver.
# * SYNAPSE_REPORT_STATS: Whether to report stats.
# * SYNAPSE_WORKER_TYPES: A comma separated list of worker names as specified in WORKER_CONFIG
# below. Leave empty for no workers, or set to '*' for all possible workers.
#
# NOTE: According to Complement's ENTRYPOINT expectations for a homeserver image (as defined
# in the project's README), this script may be run multiple times, and functionality should
# continue to work if so.
# nginx and supervisord configs depending on the workers requested
import os
import subprocess
import sys
import subprocess
import jinja2
import yaml
MAIN_PROCESS_HTTP_LISTENER_PORT = 8080
DEFAULT_LISTENER_RESOURCES = ["client", "federation"]
WORKERS_CONFIG = {
"pusher": {
"app": "synapse.app.pusher",
"listener_resources": [],
"endpoint_patterns": [],
"shared_extra_conf": {"start_pushers": False},
"worker_extra_conf": "",
"shared_extra_conf": "start_pushers: false"
},
"user_dir": {
"app": "synapse.app.user_dir",
"listener_resources": ["client"],
"listener_resources": DEFAULT_LISTENER_RESOURCES,
"endpoint_patterns": [
"^/_matrix/client/(api/v1|r0|unstable)/user_directory/search$"
],
"shared_extra_conf": {"update_user_directory": False},
"worker_extra_conf": "",
"shared_extra_conf": "update_user_directory: false"
},
"media_repository": {
"app": "synapse.app.media_repository",
"listener_resources": ["media"],
"endpoint_patterns": [
"^/_matrix/media/",
"^/_synapse/admin/v1/purge_media_cache$",
"^/_synapse/admin/v1/room/.*/media.*$",
"^/_synapse/admin/v1/user/.*/media.*$",
"^/_synapse/admin/v1/media/.*$",
"^/_synapse/admin/v1/quarantine_media/.*$",
],
"shared_extra_conf": {"enable_media_repo": False},
"worker_extra_conf": "enable_media_repo: true",
"shared_extra_conf": "enable_media_repo: false"
},
"appservice": {
"app": "synapse.app.appservice",
"listener_resources": [],
"endpoint_patterns": [],
"shared_extra_conf": {"notify_appservices": False},
"worker_extra_conf": "",
"shared_extra_conf": "notify_appservices: false"
},
"federation_sender": {
"app": "synapse.app.federation_sender",
"listener_resources": [],
"endpoint_patterns": [],
"shared_extra_conf": {"send_federation": False},
"worker_extra_conf": "",
"shared_extra_conf": "send_federation: false"
},
"synchrotron": {
"app": "synapse.app.generic_worker",
"listener_resources": ["client"],
"listener_resources": DEFAULT_LISTENER_RESOURCES,
"endpoint_patterns": [
"^/_matrix/client/(v2_alpha|r0)/sync$",
"^/_matrix/client/(api/v1|v2_alpha|r0)/events$",
"^/_matrix/client/(api/v1|r0)/initialSync$",
"^/_matrix/client/(api/v1|r0)/rooms/[^/]+/initialSync$",
],
"shared_extra_conf": {},
"worker_extra_conf": "",
"shared_extra_conf": ""
},
"federation_reader": {
"app": "synapse.app.generic_worker",
"listener_resources": ["federation"],
"listener_resources": DEFAULT_LISTENER_RESOURCES,
"endpoint_patterns": [
"^/_matrix/federation/(v1|v2)/event/",
"^/_matrix/federation/(v1|v2)/state/",
@@ -116,188 +98,52 @@ WORKERS_CONFIG = {
"^/_matrix/federation/(v1|v2)/get_groups_publicised$",
"^/_matrix/key/v2/query",
],
"shared_extra_conf": {},
"worker_extra_conf": "",
"shared_extra_conf": ""
},
"federation_inbound": {
"app": "synapse.app.generic_worker",
"listener_resources": ["federation"],
"endpoint_patterns": ["/_matrix/federation/(v1|v2)/send/"],
"shared_extra_conf": {},
"worker_extra_conf": "",
},
"event_persister": {
"app": "synapse.app.generic_worker",
"listener_resources": ["replication"],
"endpoint_patterns": [],
"shared_extra_conf": {},
"worker_extra_conf": "",
},
"background_worker": {
"app": "synapse.app.generic_worker",
"listener_resources": [],
"endpoint_patterns": [],
# This worker cannot be sharded. Therefore there should only ever be one background
# worker, and it should be named background_worker1
"shared_extra_conf": {"run_background_tasks_on": "background_worker1"},
"worker_extra_conf": "",
},
"event_creator": {
"app": "synapse.app.generic_worker",
"listener_resources": ["client"],
"listener_resources": DEFAULT_LISTENER_RESOURCES,
"endpoint_patterns": [
"^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/redact",
"^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/send",
"^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/(join|invite|leave|ban|unban|kick)$",
"^/_matrix/client/(api/v1|r0|unstable)/join/",
"^/_matrix/client/(api/v1|r0|unstable)/profile/",
"/_matrix/federation/(v1|v2)/send/",
],
"shared_extra_conf": {},
"worker_extra_conf": "",
},
"frontend_proxy": {
"app": "synapse.app.frontend_proxy",
"listener_resources": ["client", "replication"],
"endpoint_patterns": ["^/_matrix/client/(api/v1|r0|unstable)/keys/upload"],
"shared_extra_conf": {},
"worker_extra_conf": (
"worker_main_http_uri: http://127.0.0.1:%d"
% (MAIN_PROCESS_HTTP_LISTENER_PORT,),
),
"shared_extra_conf": ""
},
}
# Templates for sections that may be inserted multiple times in config files
SUPERVISORD_PROCESS_CONFIG_BLOCK = """
[program:synapse_{name}]
command=/usr/local/bin/python -m {app} \
--config-path="{config_path}" \
--config-path=/conf/workers/shared.yaml \
--config-path=/conf/workers/{name}.yaml
autorestart=unexpected
priority=500
exitcodes=0
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stderr
stderr_logfile_maxbytes=0
"""
NGINX_LOCATION_CONFIG_BLOCK = """
location ~* {endpoint} {{
proxy_pass {upstream};
proxy_set_header X-Forwarded-For $remote_addr;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header Host $host;
}}
"""
NGINX_UPSTREAM_CONFIG_BLOCK = """
upstream {upstream_worker_type} {{
{body}
}}
"""
# Utility functions
def log(txt: str):
"""Log something to the stdout.
Args:
txt: The text to log.
"""
def log(txt):
print(txt)
def error(txt: str):
"""Log something and exit with an error code.
Args:
txt: The text to log in error.
"""
def error(txt):
log(txt)
sys.exit(2)
def convert(src: str, dst: str, **template_vars):
def convert(src, dst, environ):
"""Generate a file from a template
Args:
src: Path to the input file.
dst: Path to write to.
template_vars: The arguments to replace placeholder variables in the template with.
src (str): path to input file
dst (str): path to file to write
environ (dict): environment dictionary, for replacement mappings.
"""
# Read the template file
with open(src) as infile:
template = infile.read()
# Generate a string from the template. We disable autoescape to prevent template
# variables from being escaped.
rendered = jinja2.Template(template, autoescape=False).render(**template_vars)
# Write the generated contents to a file
#
# We use append mode in case the files have already been written to by something else
# (for instance, as part of the instructions in a dockerfile).
with open(dst, "a") as outfile:
# In case the existing file doesn't end with a newline
outfile.write("\n")
rendered = jinja2.Template(template, autoescape=True).render(**environ)
print(rendered)
with open(dst, "w") as outfile:
outfile.write(rendered)
def add_sharding_to_shared_config(
shared_config: dict,
worker_type: str,
worker_name: str,
worker_port: int,
) -> None:
"""Given a dictionary representing a config file shared across all workers,
append sharded worker information to it for the current worker_type instance.
Args:
shared_config: The config dict that all worker instances share (after being converted to YAML)
worker_type: The type of worker (one of those defined in WORKERS_CONFIG).
worker_name: The name of the worker instance.
worker_port: The HTTP replication port that the worker instance is listening on.
"""
# The instance_map config field marks the workers that write to various replication streams
instance_map = shared_config.setdefault("instance_map", {})
# Worker-type specific sharding config
if worker_type == "pusher":
shared_config.setdefault("pusher_instances", []).append(worker_name)
elif worker_type == "federation_sender":
shared_config.setdefault("federation_sender_instances", []).append(worker_name)
elif worker_type == "event_persister":
# Event persisters write to the events stream, so we need to update
# the list of event stream writers
shared_config.setdefault("stream_writers", {}).setdefault("events", []).append(
worker_name
)
# Map of stream writer instance names to host/ports combos
instance_map[worker_name] = {
"host": "localhost",
"port": worker_port,
}
elif worker_type == "media_repository":
# The first configured media worker will run the media background jobs
shared_config.setdefault("media_instance_running_background_jobs", worker_name)
def generate_base_homeserver_config():
"""Starts Synapse and generates a basic homeserver config, which will later be
modified for worker support.
Raises: CalledProcessError if calling start.py returned a non-zero exit code.
Raises: CalledProcessError if calling start.py return a non-zero exit code.
"""
# start.py already does this for us, so just call that.
# note that this script is copied in in the official, monolith dockerfile
os.environ["SYNAPSE_HTTP_PORT"] = str(MAIN_PROCESS_HTTP_LISTENER_PORT)
subprocess.check_output(["/usr/local/bin/python", "/start.py", "migrate_config"])
@@ -314,79 +160,100 @@ def generate_worker_files(environ, config_path: str, data_dir: str):
# Note that yaml cares about indentation, so care should be taken to insert lines
# into files at the correct indentation below.
# shared_config is the contents of a Synapse config file that will be shared amongst
# the main Synapse process as well as all workers.
# The contents of a Synapse config file that will be added alongside the generated
# config when running the main Synapse process.
# It is intended mainly for disabling functionality when certain workers are spun up,
# and adding a replication listener.
# and add the replication listener
# First read the original config file and extract the listeners block. Then we'll add
# another listener for replication. Later we'll write out the result.
listeners = [
{
"port": 9093,
"bind_address": "127.0.0.1",
"type": "http",
"resources": [{"names": ["replication"]}],
}
]
# first read the original config file to take listeners config and add the replication one
listeners = [{
"port": 9093,
"bind_address": "127.0.0.1",
"type": "http",
"resources":[{
"names": ["replication"]
}]
}]
with open(config_path) as file_stream:
original_config = yaml.safe_load(file_stream)
original_listeners = original_config.get("listeners")
if original_listeners:
listeners += original_listeners
# The shared homeserver config. The contents of which will be inserted into the
# base shared worker jinja2 template.
#
# This config file will be passed to all workers, included Synapse's main process.
shared_config = {"listeners": listeners}
homeserver_config = yaml.dump({"listeners": listeners})
# The supervisord config. The contents of which will be inserted into the
# base supervisord jinja2 template.
#
# Supervisord will be in charge of running everything, from redis to nginx to Synapse
# and all of its worker processes. Load the config template, which defines a few
# services that are necessary to run.
supervisord_config = ""
homeserver_config += """
redis:
enabled: true
# Upstreams for load-balancing purposes. This dict takes the form of a worker type to the
# ports of each worker. For example:
# {
# worker_type: {1234, 1235, ...}}
# }
# and will be used to construct 'upstream' nginx directives.
nginx_upstreams = {}
# TODO: remove before prod
suppress_key_server_warning: true
"""
# A map of: {"endpoint": "upstream"}, where "upstream" is a str representing what will be
# placed after the proxy_pass directive. The main benefit to representing this data as a
# dict over a str is that we can easily deduplicate endpoints across multiple instances
# of the same worker.
#
# An nginx site config that will be amended to depending on the workers that are
# spun up. To be placed in /etc/nginx/conf.d.
nginx_locations = {}
# The supervisord config
supervisord_config = """
[supervisord]
nodaemon=true
# Read the desired worker configuration from the environment
worker_types = environ.get("SYNAPSE_WORKER_TYPES")
if worker_types is None:
# No workers, just the main process
[program:nginx]
command=/usr/sbin/nginx -g "daemon off;"
priority=500
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stderr
stderr_logfile_maxbytes=0
username=www-data
autorestart=true
[program:synapse_main]
command=/usr/local/bin/python -m synapse.app.homeserver \
--config-path="%s" \
--config-path=/conf/workers/shared.yaml
priority=1
# Log startup failures to supervisord's stdout/err
# Regular synapse logs will still go in the configured data directory
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stderr
stderr_logfile_maxbytes=0
autorestart=unexpected
exitcodes=0
""" % (config_path,)
# An nginx site config. Will live in /etc/nginx/conf.d
nginx_config_template_header = """
server {
# Listen on Synapse's default HTTP port number
listen 8080;
listen [::]:8080;
server_name localhost;
# Nginx by default only allows file uploads up to 1M in size
# Increase client_max_body_size to match max_upload_size defined in homeserver.yaml
client_max_body_size 100M;
"""
nginx_config_body = "" # to modify below
nginx_config_template_end = """
# Send all other traffic to the main process
location ~* ^(\/_matrix|\/_synapse) {
proxy_pass http://localhost:8008;
proxy_set_header X-Forwarded-For $remote_addr;
}
}
"""
# Read desired worker configuration from environment
if "SYNAPSE_WORKERS" not in environ:
worker_types = []
else:
# Split type names by comma
worker_types = environ.get("SYNAPSE_WORKERS")
worker_types = worker_types.split(",")
# Create the worker configuration directory if it doesn't already exist
os.makedirs("/conf/workers", exist_ok=True)
os.mkdir("/conf/workers")
# Start worker ports from this arbitrary port
worker_port = 18009
# A counter of worker_type -> int. Used for determining the name for a given
# worker type when generating its config file, as each worker's name is just
# worker_type + instance #
worker_type_counter = {}
# For each worker type specified by the user, create config values
for worker_type in worker_types:
worker_type = worker_type.strip()
@@ -394,121 +261,70 @@ def generate_worker_files(environ, config_path: str, data_dir: str):
if worker_config:
worker_config = worker_config.copy()
else:
log(worker_type + " is an unknown worker type! It will be ignored")
log(worker_type + " is a wrong worker type ! It will be ignored")
continue
new_worker_count = worker_type_counter.setdefault(worker_type, 0) + 1
worker_type_counter[worker_type] = new_worker_count
# this is not hardcoded bc we want to be able to have several workers
# of each type ultimately (not supported for now)
worker_name = worker_type
worker_config.update({"name": worker_name})
# Name workers by their type concatenated with an incrementing number
# e.g. federation_reader1
worker_name = worker_type + str(new_worker_count)
worker_config.update(
{"name": worker_name, "port": worker_port, "config_path": config_path}
)
worker_config.update({"port": worker_port})
worker_config.update({"config_path": config_path})
# Update the shared config with any worker-type specific options
shared_config.update(worker_config["shared_extra_conf"])
homeserver_config += worker_config['shared_extra_conf'] + "\n"
# Check if more than one instance of this worker type has been specified
worker_type_total_count = worker_types.count(worker_type)
if worker_type_total_count > 1:
# Update the shared config with sharding-related options if necessary
add_sharding_to_shared_config(
shared_config, worker_type, worker_name, worker_port
)
# Enable the pusher worker in supervisord
supervisord_config += """
[program:synapse_{name}]
command=/usr/local/bin/python -m {app} \
--config-path="{config_path}" \
--config-path=/conf/workers/shared.yaml \
--config-path=/conf/workers/{name}.yaml
autorestart=unexpected
priority=500
exitcodes=0
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stderr
stderr_logfile_maxbytes=0""".format_map(worker_config)
# Enable the worker in supervisord
supervisord_config += SUPERVISORD_PROCESS_CONFIG_BLOCK.format_map(worker_config)
# Add nginx location blocks for this worker's endpoints (if any are defined)
for pattern in worker_config["endpoint_patterns"]:
# Determine whether we need to load-balance this worker
if worker_type_total_count > 1:
# Create or add to a load-balanced upstream for this worker
nginx_upstreams.setdefault(worker_type, set()).add(worker_port)
for pattern in worker_config['endpoint_patterns']:
nginx_config_body += """
location ~* %s {
proxy_pass http://localhost:%s;
proxy_set_header X-Forwarded-For $remote_addr;
}
""" % (pattern, worker_port)
# Upstreams are named after the worker_type
upstream = "http://" + worker_type
else:
upstream = "http://localhost:%d" % (worker_port,)
# Note that this endpoint should proxy to this upstream
nginx_locations[pattern] = upstream
# Write out the worker's logging config file
# Check whether we should write worker logs to disk, in addition to the console
extra_log_template_args = {}
if environ.get("SYNAPSE_WORKERS_WRITE_LOGS_TO_DISK"):
extra_log_template_args["LOG_FILE_PATH"] = "{dir}/logs/{name}.log".format(
dir=data_dir, name=worker_name
)
# Render and write the file
log_config_filepath = "/conf/workers/{name}.log.config".format(name=worker_name)
convert(
"/conf/log.config",
log_config_filepath,
worker_name=worker_name,
**extra_log_template_args,
)
# Then a worker config file
convert(
"/conf/worker.yaml.j2",
"/conf/workers/{name}.yaml".format(name=worker_name),
**worker_config,
worker_log_config_filepath=log_config_filepath,
)
convert("/conf/worker.yaml.j2", "/conf/workers/{name}.yaml".format(name=worker_name), worker_config)
worker_port += 1
# Build the nginx location config blocks
nginx_location_config = ""
for endpoint, upstream in nginx_locations.items():
nginx_location_config += NGINX_LOCATION_CONFIG_BLOCK.format(
endpoint=endpoint,
upstream=upstream,
)
# Determine the load-balancing upstreams to configure
nginx_upstream_config = ""
for upstream_worker_type, upstream_worker_ports in nginx_upstreams.items():
body = ""
for port in upstream_worker_ports:
body += " server localhost:%d;\n" % (port,)
# Add to the list of configured upstreams
nginx_upstream_config += NGINX_UPSTREAM_CONFIG_BLOCK.format(
upstream_worker_type=upstream_worker_type,
body=body,
)
# Finally, we'll write out the config files.
# Write out the config files. We use append mode for each in case the
# files may have already been written to by others.
# Shared homeserver config
convert(
"/conf/shared.yaml.j2",
"/conf/workers/shared.yaml",
shared_worker_config=yaml.dump(shared_config),
)
print(homeserver_config)
with open("/conf/workers/shared.yaml", "a") as f:
f.write(homeserver_config)
# Nginx config
convert(
"/conf/nginx.conf.j2",
"/etc/nginx/conf.d/matrix-synapse.conf",
worker_locations=nginx_location_config,
upstream_directives=nginx_upstream_config,
)
print()
print(nginx_config_template_header)
print(nginx_config_body)
print(nginx_config_template_end)
with open("/etc/nginx/conf.d/matrix-synapse.conf", "a") as f:
f.write(nginx_config_template_header)
f.write(nginx_config_body)
f.write(nginx_config_template_end)
# Supervisord config
convert(
"/conf/supervisord.conf.j2",
"/etc/supervisor/conf.d/supervisord.conf",
main_config_path=config_path,
worker_config=supervisord_config,
)
print()
print(supervisord_config)
with open("/etc/supervisor/conf.d/supervisord.conf", "a") as f:
f.write(supervisord_config)
# Ensure the logging directory exists
log_dir = data_dir + "/logs"
@@ -521,7 +337,7 @@ def start_supervisord():
Raises: CalledProcessError if calling start.py return a non-zero exit code.
"""
subprocess.run(["/usr/bin/supervisord"], stdin=subprocess.PIPE)
subprocess.check_output(["/usr/bin/supervisord"])
def main(args, environ):
@@ -538,16 +354,8 @@ def main(args, environ):
log("Generating base homeserver config")
generate_base_homeserver_config()
# This script may be run multiple times (mostly by Complement, see note at top of file).
# Don't re-configure workers in this instance.
mark_filepath = "/conf/workers_have_been_configured"
if not os.path.exists(mark_filepath):
# Always regenerate all other config files
generate_worker_files(environ, config_path, data_dir)
# Mark workers as being configured
with open(mark_filepath, "w") as f:
f.write("")
# Always regenerate all other config files
generate_worker_files(environ, config_path, data_dir)
# Start supervisord, which will start Synapse, all of the configured worker
# processes, redis, nginx etc. according to the config we created above.

View File

@@ -1,4 +1,4 @@
#!/usr/bin/env bash
#!/bin/bash
# This script runs the PostgreSQL tests inside a Docker container. It expects
# the relevant source files to be mounted into /src (done automatically by the

View File

@@ -3,7 +3,6 @@
import codecs
import glob
import os
import platform
import subprocess
import sys
@@ -135,6 +134,7 @@ def run_generate_config(environ, ownership):
Never returns.
"""
print("running generate config")
for v in ("SYNAPSE_SERVER_NAME", "SYNAPSE_REPORT_STATS"):
if v not in environ:
error("Environment variable '%s' is mandatory in `generate` mode." % (v,))
@@ -150,6 +150,8 @@ def run_generate_config(environ, ownership):
log("Creating log config %s" % (log_config_file,))
convert("/conf/log.config", log_config_file, environ)
print("Generating config at", config_path, "Config dir:", config_dir)
args = [
"python",
"-m",
@@ -178,8 +180,8 @@ def run_generate_config(environ, ownership):
else:
os.execv("/usr/local/bin/python", args)
def main(args, environ):
print("bla")
mode = args[1] if len(args) > 1 else "run"
desired_uid = int(environ.get("UID", "991"))
desired_gid = int(environ.get("GID", "991"))
@@ -214,13 +216,6 @@ def main(args, environ):
if "-m" not in args:
args = ["-m", synapse_worker] + args
jemallocpath = "/usr/lib/%s-linux-gnu/libjemalloc.so.2" % (platform.machine(),)
if os.path.isfile(jemallocpath):
environ["LD_PRELOAD"] = jemallocpath
else:
log("Could not find %s, will not use" % (jemallocpath,))
# if there are no config files passed to synapse, try adding the default file
if not any(p.startswith("--config-path") or p.startswith("-c") for p in args):
config_dir = environ.get("SYNAPSE_CONFIG_DIR", "/data")
@@ -256,9 +251,9 @@ running with 'migrate_config'. See the README for more details.
args = ["python"] + args
if ownership is not None:
args = ["gosu", ownership] + args
os.execve("/usr/sbin/gosu", args, environ)
os.execv("/usr/sbin/gosu", args)
else:
os.execve("/usr/local/bin/python", args, environ)
os.execv("/usr/local/bin/python", args)
if __name__ == "__main__":

View File

@@ -1,8 +1,3 @@
# This is a configuration template for a single worker instance, and is
# used by Dockerfile-workers.
# Values will be change depending on whichever workers are selected when
# running that image.
worker_app: "{{ app }}"
worker_name: "{{ name }}"
@@ -13,14 +8,8 @@ worker_replication_http_port: 9093
worker_listeners:
- type: http
port: {{ port }}
{% if listener_resources %}
resources:
- names:
{%- for resource in listener_resources %}
- {{ resource }}
{%- endfor %}
{% endif %}
worker_log_config: {{ worker_log_config_filepath }}
{{ worker_extra_conf }}

View File

@@ -1,37 +1,31 @@
# Overview
A captcha can be enabled on your homeserver to help prevent bots from registering
accounts. Synapse currently uses Google's reCAPTCHA service which requires API keys
from Google.
Captcha can be enabled for this home server. This file explains how to do that.
The captcha mechanism used is Google's ReCaptcha. This requires API keys from Google.
## Getting API keys
## Getting keys
Requires a site/secret key pair from:
<https://developers.google.com/recaptcha/>
Must be a reCAPTCHA v2 key using the "I'm not a robot" Checkbox option
## Setting ReCaptcha Keys
The keys are a config option on the home server config. If they are not
visible, you can generate them via `--generate-config`. Set the following value:
1. Create a new site at <https://www.google.com/recaptcha/admin/create>
1. Set the label to anything you want
1. Set the type to reCAPTCHA v2 using the "I'm not a robot" Checkbox option.
This is the only type of captcha that works with Synapse.
1. Add the public hostname for your server, as set in `public_baseurl`
in `homeserver.yaml`, to the list of authorized domains. If you have not set
`public_baseurl`, use `server_name`.
1. Agree to the terms of service and submit.
1. Copy your site key and secret key and add them to your `homeserver.yaml`
configuration file
```
recaptcha_public_key: YOUR_SITE_KEY
recaptcha_private_key: YOUR_SECRET_KEY
```
1. Enable the CAPTCHA for new registrations
```
In addition, you MUST enable captchas via:
enable_registration_captcha: true
```
1. Go to the settings page for the CAPTCHA you just created
1. Uncheck the "Verify the origin of reCAPTCHA solutions" checkbox so that the
captcha can be displayed in any client. If you do not disable this option then you
must specify the domains of every client that is allowed to display the CAPTCHA.
## Configuring IP used for auth
The reCAPTCHA API requires that the IP address of the user who solved the
CAPTCHA is sent. If the client is connecting through a proxy or load balancer,
The ReCaptcha API requires that the IP address of the user who solved the
captcha is sent. If the client is connecting through a proxy or load balancer,
it may be required to use the `X-Forwarded-For` (XFF) header instead of the origin
IP address. This can be configured using the `x_forwarded` directive in the
listeners section of the `homeserver.yaml` configuration file.
listeners section of the homeserver.yaml configuration file.

View File

@@ -1,72 +1,7 @@
# Synapse Documentation
**The documentation is currently hosted [here](https://matrix-org.github.io/synapse).**
Please update any links to point to the new website instead.
This directory contains documentation specific to the `synapse` homeserver.
## About
All matrix-generic documentation now lives in its own project, located at [matrix-org/matrix-doc](https://github.com/matrix-org/matrix-doc)
This directory currently holds a series of markdown files documenting how to install, use
and develop Synapse, the reference Matrix homeserver. The documentation is readable directly
from this repository, but it is recommended to instead browse through the
[website](https://matrix-org.github.io/synapse) for easier discoverability.
## Adding to the documentation
Most of the documentation currently exists as top-level files, as when organising them into
a structured website, these files were kept in place so that existing links would not break.
The rest of the documentation is stored in folders, such as `setup`, `usage`, and `development`
etc. **All new documentation files should be placed in structured folders.** For example:
To create a new user-facing documentation page about a new Single Sign-On protocol named
"MyCoolProtocol", one should create a new file with a relevant name, such as "my_cool_protocol.md".
This file might fit into the documentation structure at:
- Usage
- Configuration
- User Authentication
- Single Sign-On
- **My Cool Protocol**
Given that, one would place the new file under
`usage/configuration/user_authentication/single_sign_on/my_cool_protocol.md`.
Note that the structure of the documentation (and thus the left sidebar on the website) is determined
by the list in [SUMMARY.md](SUMMARY.md). The final thing to do when adding a new page is to add a new
line linking to the new documentation file:
```markdown
- [My Cool Protocol](usage/configuration/user_authentication/single_sign_on/my_cool_protocol.md)
```
## Building the documentation
The documentation is built with [mdbook](https://rust-lang.github.io/mdBook/), and the outline of the
documentation is determined by the structure of [SUMMARY.md](SUMMARY.md).
First, [get mdbook](https://github.com/rust-lang/mdBook#installation). Then, **from the root of the repository**,
build the documentation with:
```sh
mdbook build
```
The rendered contents will be outputted to a new `book/` directory at the root of the repository. You can
browse the book by opening `book/index.html` in a web browser.
You can also have mdbook host the docs on a local webserver with hot-reload functionality via:
```sh
mdbook serve
```
The URL at which the docs can be viewed at will be logged.
## Configuration and theming
The look and behaviour of the website is configured by the [book.toml](../book.toml) file
at the root of the repository. See
[mdbook's documentation on configuration](https://rust-lang.github.io/mdBook/format/config.html)
for available options.
The site can be themed and additionally extended with extra UI and features. See
[website_files/README.md](website_files/README.md) for details.
(Note: some items here may be moved to [matrix-org/matrix-doc](https://github.com/matrix-org/matrix-doc) at some point in the future.)

View File

@@ -1,87 +0,0 @@
# Summary
# Introduction
- [Welcome and Overview](welcome_and_overview.md)
# Setup
- [Installation](setup/installation.md)
- [Using Postgres](postgres.md)
- [Configuring a Reverse Proxy](reverse_proxy.md)
- [Configuring a Turn Server](turn-howto.md)
- [Delegation](delegate.md)
# Upgrading
- [Upgrading between Synapse Versions](upgrading/README.md)
- [Upgrading from pre-Synapse 1.0](MSC1711_certificates_FAQ.md)
# Usage
- [Federation](federate.md)
- [Configuration](usage/configuration/README.md)
- [Homeserver Sample Config File](usage/configuration/homeserver_sample_config.md)
- [Logging Sample Config File](usage/configuration/logging_sample_config.md)
- [Structured Logging](structured_logging.md)
- [User Authentication](usage/configuration/user_authentication/README.md)
- [Single-Sign On]()
- [OpenID Connect](openid.md)
- [SAML]()
- [CAS]()
- [SSO Mapping Providers](sso_mapping_providers.md)
- [Password Auth Providers](password_auth_providers.md)
- [JSON Web Tokens](jwt.md)
- [Registration Captcha](CAPTCHA_SETUP.md)
- [Application Services](application_services.md)
- [Server Notices](server_notices.md)
- [Consent Tracking](consent_tracking.md)
- [URL Previews](url_previews.md)
- [User Directory](user_directory.md)
- [Message Retention Policies](message_retention_policies.md)
- [Pluggable Modules]()
- [Third Party Rules]()
- [Spam Checker](spam_checker.md)
- [Presence Router](presence_router_module.md)
- [Media Storage Providers]()
- [Workers](workers.md)
- [Using `synctl` with Workers](synctl_workers.md)
- [Systemd](systemd-with-workers/README.md)
- [Administration](usage/administration/README.md)
- [Admin API](usage/administration/admin_api/README.md)
- [Account Validity](admin_api/account_validity.md)
- [Delete Group](admin_api/delete_group.md)
- [Event Reports](admin_api/event_reports.md)
- [Media](admin_api/media_admin_api.md)
- [Purge History](admin_api/purge_history_api.md)
- [Purge Rooms](admin_api/purge_room.md)
- [Register Users](admin_api/register_api.md)
- [Manipulate Room Membership](admin_api/room_membership.md)
- [Rooms](admin_api/rooms.md)
- [Server Notices](admin_api/server_notices.md)
- [Shutdown Room](admin_api/shutdown_room.md)
- [Statistics](admin_api/statistics.md)
- [Users](admin_api/user_admin_api.md)
- [Server Version](admin_api/version_api.md)
- [Manhole](manhole.md)
- [Monitoring](metrics-howto.md)
- [Scripts]()
# Development
- [Contributing Guide](development/contributing_guide.md)
- [Code Style](code_style.md)
- [Git Usage](dev/git.md)
- [Testing]()
- [OpenTracing](opentracing.md)
- [Synapse Architecture]()
- [Log Contexts](log_contexts.md)
- [Replication](replication.md)
- [TCP Replication](tcp_replication.md)
- [Internal Documentation](development/internal_documentation/README.md)
- [Single Sign-On]()
- [SAML](dev/saml.md)
- [CAS](dev/cas.md)
- [State Resolution]()
- [The Auth Chain Difference Algorithm](auth_chain_difference_algorithm.md)
- [Media Repository](media_repository.md)
- [Room and User Statistics](room_and_user_statistics.md)
- [Scripts]()
# Other
- [Dependency Deprecation Policy](deprecation_policy.md)

View File

@@ -1,14 +1,28 @@
Admin APIs
==========
**Note**: The latest documentation can be viewed `here <https://matrix-org.github.io/synapse>`_.
See `docs/README.md <../docs/README.md>`_ for more information.
**Please update links to point to the website instead.** Existing files in this directory
are preserved to maintain historical links, but may be moved in the future.
This directory includes documentation for the various synapse specific admin
APIs available. Updates to the existing Admin API documentation should still
be made to these files, but any new documentation files should instead be placed under
`docs/usage/administration/admin_api <../docs/usage/administration/admin_api>`_.
APIs available.
Authenticating as a server admin
--------------------------------
Many of the API calls in the admin api will require an `access_token` for a
server admin. (Note that a server admin is distinct from a room admin.)
A user can be marked as a server admin by updating the database directly, e.g.:
.. code-block:: sql
UPDATE users SET admin = 1 WHERE name = '@foo:bar.com';
A new server admin user can also be created using the
``register_new_matrix_user`` script.
Finding your user's `access_token` is client-dependent, but will usually be shown in the client's settings.
Once you have your `access_token`, to include it in a request, the best option is to add the token to a request header:
``curl --header "Authorization: Bearer <access_token>" <the_rest_of_your_API_request>``
Fore more details, please refer to the complete `matrix spec documentation <https://matrix.org/docs/spec/client_server/r0.5.0#using-access-tokens>`_.

View File

@@ -1,42 +0,0 @@
# Account validity API
This API allows a server administrator to manage the validity of an account. To
use it, you must enable the account validity feature (under
`account_validity`) in Synapse's configuration.
## Renew account
This API extends the validity of an account by as much time as configured in the
`period` parameter from the `account_validity` configuration.
The API is:
```
POST /_synapse/admin/v1/account_validity/validity
```
with the following body:
```json
{
"user_id": "<user ID for the account to renew>",
"expiration_ts": 0,
"enable_renewal_emails": true
}
```
`expiration_ts` is an optional parameter and overrides the expiration date,
which otherwise defaults to now + validity period.
`enable_renewal_emails` is also an optional parameter and enables/disables
sending renewal emails to the user. Defaults to true.
The API returns with the new expiration date for this account, as a timestamp in
milliseconds since epoch:
```json
{
"expiration_ts": 0
}
```

View File

@@ -0,0 +1,42 @@
Account validity API
====================
This API allows a server administrator to manage the validity of an account. To
use it, you must enable the account validity feature (under
``account_validity``) in Synapse's configuration.
Renew account
-------------
This API extends the validity of an account by as much time as configured in the
``period`` parameter from the ``account_validity`` configuration.
The API is::
POST /_synapse/admin/v1/account_validity/validity
with the following body:
.. code:: json
{
"user_id": "<user ID for the account to renew>",
"expiration_ts": 0,
"enable_renewal_emails": true
}
``expiration_ts`` is an optional parameter and overrides the expiration date,
which otherwise defaults to now + validity period.
``enable_renewal_emails`` is also an optional parameter and enables/disables
sending renewal emails to the user. Defaults to true.
The API returns with the new expiration date for this account, as a timestamp in
milliseconds since epoch:
.. code:: json
{
"expiration_ts": 0
}

View File

@@ -11,4 +11,4 @@ POST /_synapse/admin/v1/delete_group/<group_id>
```
To use it, you will need to authenticate by providing an `access_token` for a
server admin: see [Admin API](../../usage/administration/admin_api).
server admin: see [README.rst](README.rst).

View File

@@ -7,7 +7,7 @@ The api is:
GET /_synapse/admin/v1/event_reports?from=0&limit=10
```
To use it, you will need to authenticate by providing an `access_token` for a
server admin: see [Admin API](../../usage/administration/admin_api).
server admin: see [README.rst](README.rst).
It returns a JSON body like the following:
@@ -75,9 +75,9 @@ The following fields are returned in the JSON response body:
* `name`: string - The name of the room.
* `event_id`: string - The ID of the reported event.
* `user_id`: string - This is the user who reported the event and wrote the reason.
* `reason`: string - Comment made by the `user_id` in this report. May be blank or `null`.
* `reason`: string - Comment made by the `user_id` in this report. May be blank.
* `score`: integer - Content is reported based upon a negative score, where -100 is
"most offensive" and 0 is "inoffensive". May be `null`.
"most offensive" and 0 is "inoffensive".
* `sender`: string - This is the ID of the user who sent the original message/event that
was reported.
* `canonical_alias`: string - The canonical alias of the room. `null` if the room does not
@@ -95,7 +95,7 @@ The api is:
GET /_synapse/admin/v1/event_reports/<report_id>
```
To use it, you will need to authenticate by providing an `access_token` for a
server admin: see [Admin API](../../usage/administration/admin_api).
server admin: see [README.rst](README.rst).
It returns a JSON body like the following:

View File

@@ -1,24 +1,15 @@
# Contents
- [Querying media](#querying-media)
* [List all media in a room](#list-all-media-in-a-room)
* [List all media uploaded by a user](#list-all-media-uploaded-by-a-user)
- [List all media in a room](#list-all-media-in-a-room)
- [Quarantine media](#quarantine-media)
* [Quarantining media by ID](#quarantining-media-by-id)
* [Remove media from quarantine by ID](#remove-media-from-quarantine-by-id)
* [Quarantining media in a room](#quarantining-media-in-a-room)
* [Quarantining all media of a user](#quarantining-all-media-of-a-user)
* [Protecting media from being quarantined](#protecting-media-from-being-quarantined)
* [Unprotecting media from being quarantined](#unprotecting-media-from-being-quarantined)
- [Delete local media](#delete-local-media)
* [Delete a specific local media](#delete-a-specific-local-media)
* [Delete local media by date or size](#delete-local-media-by-date-or-size)
- [Purge Remote Media API](#purge-remote-media-api)
# Querying media
These APIs allow extracting media information from the homeserver.
## List all media in a room
# List all media in a room
This API gets a list of known media in a room.
However, it only shows media from unencrypted events or rooms.
@@ -28,7 +19,7 @@ The API is:
GET /_synapse/admin/v1/room/<room_id>/media
```
To use it, you will need to authenticate by providing an `access_token` for a
server admin: see [Admin API](../../usage/administration/admin_api).
server admin: see [README.rst](README.rst).
The API returns a JSON body like the following:
```json
@@ -44,12 +35,6 @@ The API returns a JSON body like the following:
}
```
## List all media uploaded by a user
Listing all media that has been uploaded by a local user can be achieved through
the use of the [List media of a user](user_admin_api.rst#list-media-of-a-user)
Admin API.
# Quarantine media
Quarantining media means that it is marked as inaccessible by users. It applies
@@ -78,27 +63,6 @@ Response:
{}
```
## Remove media from quarantine by ID
This API removes a single piece of local or remote media from quarantine.
Request:
```
POST /_synapse/admin/v1/media/unquarantine/<server_name>/<media_id>
{}
```
Where `server_name` is in the form of `example.org`, and `media_id` is in the
form of `abcdefg12345...`.
Response:
```json
{}
```
## Quarantining media in a room
This API quarantines all local and remote media in a room.
@@ -159,49 +123,6 @@ The following fields are returned in the JSON response body:
* `num_quarantined`: integer - The number of media items successfully quarantined
## Protecting media from being quarantined
This API protects a single piece of local media from being quarantined using the
above APIs. This is useful for sticker packs and other shared media which you do
not want to get quarantined, especially when
[quarantining media in a room](#quarantining-media-in-a-room).
Request:
```
POST /_synapse/admin/v1/media/protect/<media_id>
{}
```
Where `media_id` is in the form of `abcdefg12345...`.
Response:
```json
{}
```
## Unprotecting media from being quarantined
This API reverts the protection of a media.
Request:
```
POST /_synapse/admin/v1/media/unprotect/<media_id>
{}
```
Where `media_id` is in the form of `abcdefg12345...`.
Response:
```json
{}
```
# Delete local media
This API deletes the *local* media from the disk of your own server.
This includes any local thumbnails and copies of media downloaded from
@@ -311,7 +232,7 @@ The following fields are returned in the JSON response body:
* `deleted`: integer - The number of media items successfully deleted
To use it, you will need to authenticate by providing an `access_token` for a
server admin: see [Admin API](../../usage/administration/admin_api).
server admin: see [README.rst](README.rst).
If the user re-requests purged remote media, synapse will re-request the media
from the originating server.

View File

@@ -1,4 +1,5 @@
# Purge History API
Purge History API
=================
The purge history API allows server admins to purge historic events from their
database, reclaiming disk space.
@@ -12,12 +13,10 @@ delete the last message in a room.
The API is:
```
POST /_synapse/admin/v1/purge_history/<room_id>[/<event_id>]
```
``POST /_synapse/admin/v1/purge_history/<room_id>[/<event_id>]``
To use it, you will need to authenticate by providing an `access_token` for a
server admin: [Admin API](../../usage/administration/admin_api)
To use it, you will need to authenticate by providing an ``access_token`` for a
server admin: see `README.rst <README.rst>`_.
By default, events sent by local users are not deleted, as they may represent
the only copies of this content in existence. (Events sent by remote users are
@@ -25,54 +24,54 @@ deleted.)
Room state data (such as joins, leaves, topic) is always preserved.
To delete local message events as well, set `delete_local_events` in the body:
To delete local message events as well, set ``delete_local_events`` in the body:
```
{
"delete_local_events": true
}
```
.. code:: json
{
"delete_local_events": true
}
The caller must specify the point in the room to purge up to. This can be
specified by including an event_id in the URI, or by setting a
`purge_up_to_event_id` or `purge_up_to_ts` in the request body. If an event
``purge_up_to_event_id`` or ``purge_up_to_ts`` in the request body. If an event
id is given, that event (and others at the same graph depth) will be retained.
If `purge_up_to_ts` is given, it should be a timestamp since the unix epoch,
If ``purge_up_to_ts`` is given, it should be a timestamp since the unix epoch,
in milliseconds.
The API starts the purge running, and returns immediately with a JSON body with
a purge id:
```json
{
"purge_id": "<opaque id>"
}
```
.. code:: json
## Purge status query
{
"purge_id": "<opaque id>"
}
Purge status query
------------------
It is possible to poll for updates on recent purges with a second API;
```
GET /_synapse/admin/v1/purge_history_status/<purge_id>
```
``GET /_synapse/admin/v1/purge_history_status/<purge_id>``
Again, you will need to authenticate by providing an `access_token` for a
Again, you will need to authenticate by providing an ``access_token`` for a
server admin.
This API returns a JSON body like the following:
```json
{
"status": "active"
}
```
.. code:: json
The status will be one of `active`, `complete`, or `failed`.
{
"status": "active"
}
## Reclaim disk space (Postgres)
The status will be one of ``active``, ``complete``, or ``failed``.
Reclaim disk space (Postgres)
-----------------------------
To reclaim the disk space and return it to the operating system, you need to run
`VACUUM FULL;` on the database.
<https://www.postgresql.org/docs/current/sql-vacuum.html>
https://www.postgresql.org/docs/current/sql-vacuum.html

View File

@@ -1,73 +0,0 @@
# Shared-Secret Registration
This API allows for the creation of users in an administrative and
non-interactive way. This is generally used for bootstrapping a Synapse
instance with administrator accounts.
To authenticate yourself to the server, you will need both the shared secret
(`registration_shared_secret` in the homeserver configuration), and a
one-time nonce. If the registration shared secret is not configured, this API
is not enabled.
To fetch the nonce, you need to request one from the API:
```
> GET /_synapse/admin/v1/register
< {"nonce": "thisisanonce"}
```
Once you have the nonce, you can make a `POST` to the same URL with a JSON
body containing the nonce, username, password, whether they are an admin
(optional, False by default), and a HMAC digest of the content. Also you can
set the displayname (optional, `username` by default).
As an example:
```
> POST /_synapse/admin/v1/register
> {
"nonce": "thisisanonce",
"username": "pepper_roni",
"displayname": "Pepper Roni",
"password": "pizza",
"admin": true,
"mac": "mac_digest_here"
}
< {
"access_token": "token_here",
"user_id": "@pepper_roni:localhost",
"home_server": "test",
"device_id": "device_id_here"
}
```
The MAC is the hex digest output of the HMAC-SHA1 algorithm, with the key being
the shared secret and the content being the nonce, user, password, either the
string "admin" or "notadmin", and optionally the user_type
each separated by NULs. For an example of generation in Python:
```python
import hmac, hashlib
def generate_mac(nonce, user, password, admin=False, user_type=None):
mac = hmac.new(
key=shared_secret,
digestmod=hashlib.sha1,
)
mac.update(nonce.encode('utf8'))
mac.update(b"\x00")
mac.update(user.encode('utf8'))
mac.update(b"\x00")
mac.update(password.encode('utf8'))
mac.update(b"\x00")
mac.update(b"admin" if admin else b"notadmin")
if user_type:
mac.update(b"\x00")
mac.update(user_type.encode('utf8'))
return mac.hexdigest()
```

View File

@@ -0,0 +1,68 @@
Shared-Secret Registration
==========================
This API allows for the creation of users in an administrative and
non-interactive way. This is generally used for bootstrapping a Synapse
instance with administrator accounts.
To authenticate yourself to the server, you will need both the shared secret
(``registration_shared_secret`` in the homeserver configuration), and a
one-time nonce. If the registration shared secret is not configured, this API
is not enabled.
To fetch the nonce, you need to request one from the API::
> GET /_synapse/admin/v1/register
< {"nonce": "thisisanonce"}
Once you have the nonce, you can make a ``POST`` to the same URL with a JSON
body containing the nonce, username, password, whether they are an admin
(optional, False by default), and a HMAC digest of the content. Also you can
set the displayname (optional, ``username`` by default).
As an example::
> POST /_synapse/admin/v1/register
> {
"nonce": "thisisanonce",
"username": "pepper_roni",
"displayname": "Pepper Roni",
"password": "pizza",
"admin": true,
"mac": "mac_digest_here"
}
< {
"access_token": "token_here",
"user_id": "@pepper_roni:localhost",
"home_server": "test",
"device_id": "device_id_here"
}
The MAC is the hex digest output of the HMAC-SHA1 algorithm, with the key being
the shared secret and the content being the nonce, user, password, either the
string "admin" or "notadmin", and optionally the user_type
each separated by NULs. For an example of generation in Python::
import hmac, hashlib
def generate_mac(nonce, user, password, admin=False, user_type=None):
mac = hmac.new(
key=shared_secret,
digestmod=hashlib.sha1,
)
mac.update(nonce.encode('utf8'))
mac.update(b"\x00")
mac.update(user.encode('utf8'))
mac.update(b"\x00")
mac.update(password.encode('utf8'))
mac.update(b"\x00")
mac.update(b"admin" if admin else b"notadmin")
if user_type:
mac.update(b"\x00")
mac.update(user_type.encode('utf8'))
return mac.hexdigest()

View File

@@ -24,7 +24,7 @@ POST /_synapse/admin/v1/join/<room_id_or_alias>
```
To use it, you will need to authenticate by providing an `access_token` for a
server admin: see [Admin API](../../usage/administration/admin_api).
server admin: see [README.rst](README.rst).
Response:

View File

@@ -4,14 +4,10 @@
* [Usage](#usage)
- [Room Details API](#room-details-api)
- [Room Members API](#room-members-api)
- [Room State API](#room-state-api)
- [Delete Room API](#delete-room-api)
* [Parameters](#parameters-1)
* [Response](#response)
* [Undoing room shutdowns](#undoing-room-shutdowns)
- [Make Room Admin API](#make-room-admin-api)
- [Forward Extremities Admin API](#forward-extremities-admin-api)
- [Event Context API](#event-context-api)
# List Room API
@@ -370,36 +366,6 @@ Response:
}
```
# Room State API
The Room State admin API allows server admins to get a list of all state events in a room.
The response includes the following fields:
* `state` - The current state of the room at the time of request.
## Usage
A standard request:
```
GET /_synapse/admin/v1/rooms/<room_id>/state
{}
```
Response:
```json
{
"state": [
{"type": "m.room.create", "state_key": "", "etc": true},
{"type": "m.room.power_levels", "state_key": "", "etc": true},
{"type": "m.room.name", "state_key": "", "etc": true}
]
}
```
# Delete Room API
The Delete Room admin API allows server admins to remove rooms from server
@@ -428,7 +394,7 @@ the new room. Users on other servers will be unaffected.
The API is:
```
DELETE /_synapse/admin/v1/rooms/<room_id>
POST /_synapse/admin/v1/rooms/<room_id>/delete
```
with a body of:
@@ -443,7 +409,7 @@ with a body of:
```
To use it, you will need to authenticate by providing an ``access_token`` for a
server admin: see [Admin API](../../usage/administration/admin_api).
server admin: see [README.rst](README.rst).
A response body like the following is returned:
@@ -501,7 +467,6 @@ The following fields are returned in the JSON response body:
the old room to the new.
* `new_room_id` - A string representing the room ID of the new room.
## Undoing room shutdowns
*Note*: This guide may be outdated by the time you read it. By nature of room shutdowns being performed at the database level,
@@ -527,199 +492,4 @@ You will have to manually handle, if you so choose, the following:
* Aliases that would have been redirected to the Content Violation room.
* Users that would have been booted from the room (and will have been force-joined to the Content Violation room).
* Removal of the Content Violation room if desired.
## Deprecated endpoint
The previous deprecated API will be removed in a future release, it was:
```
POST /_synapse/admin/v1/rooms/<room_id>/delete
```
It behaves the same way than the current endpoint except the path and the method.
# Make Room Admin API
Grants another user the highest power available to a local user who is in the room.
If the user is not in the room, and it is not publicly joinable, then invite the user.
By default the server admin (the caller) is granted power, but another user can
optionally be specified, e.g.:
```
POST /_synapse/admin/v1/rooms/<room_id_or_alias>/make_room_admin
{
"user_id": "@foo:example.com"
}
```
# Forward Extremities Admin API
Enables querying and deleting forward extremities from rooms. When a lot of forward
extremities accumulate in a room, performance can become degraded. For details, see
[#1760](https://github.com/matrix-org/synapse/issues/1760).
## Check for forward extremities
To check the status of forward extremities for a room:
```
GET /_synapse/admin/v1/rooms/<room_id_or_alias>/forward_extremities
```
A response as follows will be returned:
```json
{
"count": 1,
"results": [
{
"event_id": "$M5SP266vsnxctfwFgFLNceaCo3ujhRtg_NiiHabcdefgh",
"state_group": 439,
"depth": 123,
"received_ts": 1611263016761
}
]
}
```
## Deleting forward extremities
**WARNING**: Please ensure you know what you're doing and have read
the related issue [#1760](https://github.com/matrix-org/synapse/issues/1760).
Under no situations should this API be executed as an automated maintenance task!
If a room has lots of forward extremities, the extra can be
deleted as follows:
```
DELETE /_synapse/admin/v1/rooms/<room_id_or_alias>/forward_extremities
```
A response as follows will be returned, indicating the amount of forward extremities
that were deleted.
```json
{
"deleted": 1
}
```
# Event Context API
This API lets a client find the context of an event. This is designed primarily to investigate abuse reports.
```
GET /_synapse/admin/v1/rooms/<room_id>/context/<event_id>
```
This API mimmicks [GET /_matrix/client/r0/rooms/{roomId}/context/{eventId}](https://matrix.org/docs/spec/client_server/r0.6.1#get-matrix-client-r0-rooms-roomid-context-eventid). Please refer to the link for all details on parameters and reseponse.
Example response:
```json
{
"end": "t29-57_2_0_2",
"events_after": [
{
"content": {
"body": "This is an example text message",
"msgtype": "m.text",
"format": "org.matrix.custom.html",
"formatted_body": "<b>This is an example text message</b>"
},
"type": "m.room.message",
"event_id": "$143273582443PhrSn:example.org",
"room_id": "!636q39766251:example.com",
"sender": "@example:example.org",
"origin_server_ts": 1432735824653,
"unsigned": {
"age": 1234
}
}
],
"event": {
"content": {
"body": "filename.jpg",
"info": {
"h": 398,
"w": 394,
"mimetype": "image/jpeg",
"size": 31037
},
"url": "mxc://example.org/JWEIFJgwEIhweiWJE",
"msgtype": "m.image"
},
"type": "m.room.message",
"event_id": "$f3h4d129462ha:example.com",
"room_id": "!636q39766251:example.com",
"sender": "@example:example.org",
"origin_server_ts": 1432735824653,
"unsigned": {
"age": 1234
}
},
"events_before": [
{
"content": {
"body": "something-important.doc",
"filename": "something-important.doc",
"info": {
"mimetype": "application/msword",
"size": 46144
},
"msgtype": "m.file",
"url": "mxc://example.org/FHyPlCeYUSFFxlgbQYZmoEoe"
},
"type": "m.room.message",
"event_id": "$143273582443PhrSn:example.org",
"room_id": "!636q39766251:example.com",
"sender": "@example:example.org",
"origin_server_ts": 1432735824653,
"unsigned": {
"age": 1234
}
}
],
"start": "t27-54_2_0_2",
"state": [
{
"content": {
"creator": "@example:example.org",
"room_version": "1",
"m.federate": true,
"predecessor": {
"event_id": "$something:example.org",
"room_id": "!oldroom:example.org"
}
},
"type": "m.room.create",
"event_id": "$143273582443PhrSn:example.org",
"room_id": "!636q39766251:example.com",
"sender": "@example:example.org",
"origin_server_ts": 1432735824653,
"unsigned": {
"age": 1234
},
"state_key": ""
},
{
"content": {
"membership": "join",
"avatar_url": "mxc://example.org/SEsfnsuifSDFSSEF",
"displayname": "Alice Margatroid"
},
"type": "m.room.member",
"event_id": "$143273582443PhrSn:example.org",
"room_id": "!636q39766251:example.com",
"sender": "@example:example.org",
"origin_server_ts": 1432735824653,
"unsigned": {
"age": 1234
},
"state_key": "@alice:example.org"
}
]
}
```
* Removal of the Content Violation room if desired.

Some files were not shown because too many files have changed in this diff Show More