1
0

Compare commits

..

2 Commits

Author SHA1 Message Date
Andrew Morgan
9f2434d494 Don't require Content-Length header 2020-01-09 17:01:55 +00:00
Andrew Morgan
a6670b7257 Don't 500 on not providing a Content-Length header to media upload 2020-01-09 16:25:56 +00:00
847 changed files with 25988 additions and 55143 deletions

View File

@@ -0,0 +1,22 @@
version: '3.1'
services:
postgres:
image: postgres:9.5
environment:
POSTGRES_PASSWORD: postgres
command: -c fsync=off
testenv:
image: python:3.5
depends_on:
- postgres
env_file: .env
environment:
SYNAPSE_POSTGRES_HOST: postgres
SYNAPSE_POSTGRES_USER: postgres
SYNAPSE_POSTGRES_PASSWORD: postgres
working_dir: /src
volumes:
- ..:/src

View File

@@ -0,0 +1,22 @@
version: '3.1'
services:
postgres:
image: postgres:11
environment:
POSTGRES_PASSWORD: postgres
command: -c fsync=off
testenv:
image: python:3.7
depends_on:
- postgres
env_file: .env
environment:
SYNAPSE_POSTGRES_HOST: postgres
SYNAPSE_POSTGRES_USER: postgres
SYNAPSE_POSTGRES_PASSWORD: postgres
working_dir: /src
volumes:
- ..:/src

View File

@@ -0,0 +1,22 @@
version: '3.1'
services:
postgres:
image: postgres:9.5
environment:
POSTGRES_PASSWORD: postgres
command: -c fsync=off
testenv:
image: python:3.7
depends_on:
- postgres
env_file: .env
environment:
SYNAPSE_POSTGRES_HOST: postgres
SYNAPSE_POSTGRES_USER: postgres
SYNAPSE_POSTGRES_PASSWORD: postgres
working_dir: /src
volumes:
- ..:/src

View File

@@ -1,13 +0,0 @@
#!/bin/bash
# this script is run by buildkite in a plain `xenial` container; it installs the
# minimal requirements for tox and hands over to the py35-old tox environment.
set -ex
apt-get update
apt-get install -y python3.5 python3.5-dev python3-pip libxml2-dev libxslt-dev zlib1g-dev tox
export LANG="C.UTF-8"
exec tox -e py35-old,combine

View File

@@ -5,6 +5,8 @@ Message history can be paginated
Can re-join room if re-invited
/upgrade creates a new room
The only membership state included in an initial sync is for all the senders in the timeline
Local device key changes get to remote servers
@@ -37,5 +39,3 @@ Server correctly handles incoming m.device_list_update
# this fails reliably with a torture level of 100 due to https://github.com/matrix-org/synapse/issues/6536
Outbound federation requests missing prev_events and then asks for /state_ids and resolves the state
Can get rooms/{roomId}/members at a given point

View File

@@ -4,16 +4,18 @@ jobs:
machine: true
steps:
- checkout
- run: docker build -f docker/Dockerfile --label gitsha1=${CIRCLE_SHA1} -t matrixdotorg/synapse:${CIRCLE_TAG} .
- run: docker build -f docker/Dockerfile --label gitsha1=${CIRCLE_SHA1} -t matrixdotorg/synapse:${CIRCLE_TAG} -t matrixdotorg/synapse:${CIRCLE_TAG}-py3 .
- run: docker login --username $DOCKER_HUB_USERNAME --password $DOCKER_HUB_PASSWORD
- run: docker push matrixdotorg/synapse:${CIRCLE_TAG}
- run: docker push matrixdotorg/synapse:${CIRCLE_TAG}-py3
dockerhubuploadlatest:
machine: true
steps:
- checkout
- run: docker build -f docker/Dockerfile --label gitsha1=${CIRCLE_SHA1} -t matrixdotorg/synapse:latest .
- run: docker build -f docker/Dockerfile --label gitsha1=${CIRCLE_SHA1} -t matrixdotorg/synapse:latest -t matrixdotorg/synapse:latest-py3 .
- run: docker login --username $DOCKER_HUB_USERNAME --password $DOCKER_HUB_PASSWORD
- run: docker push matrixdotorg/synapse:latest
- run: docker push matrixdotorg/synapse:latest-py3
workflows:
version: 2

View File

@@ -1,5 +0,0 @@
**If you are looking for support** please ask in **#synapse:matrix.org**
(using a matrix.org account if necessary). We do not use GitHub issues for
support.
**If you want to report a security issue** please see https://matrix.org/security-disclosure-policy/

View File

@@ -6,11 +6,9 @@ about: Create a report to help us improve
<!--
**THIS IS NOT A SUPPORT CHANNEL!**
**IF YOU HAVE SUPPORT QUESTIONS ABOUT RUNNING OR CONFIGURING YOUR OWN HOME SERVER**,
please ask in **#synapse:matrix.org** (using a matrix.org account if necessary)
**IF YOU HAVE SUPPORT QUESTIONS ABOUT RUNNING OR CONFIGURING YOUR OWN HOME SERVER**:
You will likely get better support more quickly if you ask in ** #synapse:matrix.org ** ;)
If you want to report a security issue, please see https://matrix.org/security-disclosure-policy/
This is a bug report template. By following the instructions below and
filling out the sections with your information, you will help the us to get all

View File

@@ -3,10 +3,6 @@
<!-- Please read CONTRIBUTING.md before submitting your pull request -->
* [ ] Pull request is based on the develop branch
* [ ] Pull request includes a [changelog file](https://github.com/matrix-org/synapse/blob/master/CONTRIBUTING.md#changelog). The entry should:
- Be a short description of your change which makes sense to users. "Fixed a bug that prevented receiving messages from other servers." instead of "Moved X method from `EventStore` to `EventWorkerStore`.".
- Use markdown where necessary, mostly for `code blocks`.
- End with either a period (.) or an exclamation mark (!).
- Start with a capital letter.
* [ ] Pull request includes a [changelog file](https://github.com/matrix-org/synapse/blob/master/CONTRIBUTING.md#changelog)
* [ ] Pull request includes a [sign off](https://github.com/matrix-org/synapse/blob/master/CONTRIBUTING.md#sign-off)
* [ ] Code style is correct (run the [linters](https://github.com/matrix-org/synapse/blob/master/CONTRIBUTING.md#code-style))

1315
CHANGES.md

File diff suppressed because it is too large Load Diff

View File

@@ -1,63 +1,75 @@
# Contributing code to Synapse
# Contributing code to Matrix
Everyone is welcome to contribute code to [matrix.org
projects](https://github.com/matrix-org), provided that they are willing to
license their contributions under the same license as the project itself. We
follow a simple 'inbound=outbound' model for contributions: the act of
submitting an 'inbound' contribution means that the contributor agrees to
license the code under the same terms as the project's overall 'outbound'
license - in our case, this is almost always Apache Software License v2 (see
[LICENSE](LICENSE)).
Everyone is welcome to contribute code to Matrix
(https://github.com/matrix-org), provided that they are willing to license
their contributions under the same license as the project itself. We follow a
simple 'inbound=outbound' model for contributions: the act of submitting an
'inbound' contribution means that the contributor agrees to license the code
under the same terms as the project's overall 'outbound' license - in our
case, this is almost always Apache Software License v2 (see [LICENSE](LICENSE)).
## How to contribute
The preferred and easiest way to contribute changes is to fork the relevant
project on github, and then [create a pull request](
https://help.github.com/articles/using-pull-requests/) to ask us to pull your
changes into our repo.
The preferred and easiest way to contribute changes to Matrix is to fork the
relevant project on github, and then [create a pull request](
https://help.github.com/articles/using-pull-requests/) to ask us to pull
your changes into our repo.
Some other points to follow:
* Please base your changes on the `develop` branch.
* Please follow the [code style requirements](#code-style).
**The single biggest thing you need to know is: please base your changes on
the develop branch - *not* master.**
* Please include a [changelog entry](#changelog) with each PR.
We use the master branch to track the most recent release, so that folks who
blindly clone the repo and automatically check out master get something that
works. Develop is the unstable branch where all the development actually
happens: the workflow is that contributors should fork the develop branch to
make a 'feature' branch for a particular contribution, and then make a pull
request to merge this back into the matrix.org 'official' develop branch. We
use github's pull request workflow to review the contribution, and either ask
you to make any refinements needed or merge it and make them ourselves. The
changes will then land on master when we next do a release.
* Please [sign off](#sign-off) your contribution.
We use [Buildkite](https://buildkite.com/matrix-dot-org/synapse) for continuous
integration. If your change breaks the build, this will be shown in GitHub, so
please keep an eye on the pull request for feedback.
* Please keep an eye on the pull request for feedback from the [continuous
integration system](#continuous-integration-and-testing) and try to fix any
errors that come up.
To run unit tests in a local development environment, you can use:
* If you need to [update your PR](#updating-your-pull-request), just add new
commits to your branch rather than rebasing.
- ``tox -e py35`` (requires tox to be installed by ``pip install tox``)
for SQLite-backed Synapse on Python 3.5.
- ``tox -e py36`` for SQLite-backed Synapse on Python 3.6.
- ``tox -e py36-postgres`` for PostgreSQL-backed Synapse on Python 3.6
(requires a running local PostgreSQL with access to create databases).
- ``./test_postgresql.sh`` for PostgreSQL-backed Synapse on Python 3.5
(requires Docker). Entirely self-contained, recommended if you don't want to
set up PostgreSQL yourself.
Docker images are available for running the integration tests (SyTest) locally,
see the [documentation in the SyTest repo](
https://github.com/matrix-org/sytest/blob/develop/docker/README.md) for more
information.
## Code style
Synapse's code style is documented [here](docs/code_style.md). Please follow
it, including the conventions for the [sample configuration
file](docs/code_style.md#configuration-file-format).
All Matrix projects have a well-defined code-style - and sometimes we've even
got as far as documenting it... For instance, synapse's code style doc lives
[here](docs/code_style.md).
Many of the conventions are enforced by scripts which are run as part of the
[continuous integration system](#continuous-integration-and-testing). To help
check if you have followed the code style, you can run `scripts-dev/lint.sh`
locally. You'll need python 3.6 or later, and to install a number of tools:
To facilitate meeting these criteria you can run `scripts-dev/lint.sh`
locally. Since this runs the tools listed in the above document, you'll need
python 3.6 and to install each tool:
```
# Install the dependencies
pip install -U black flake8 flake8-comprehensions isort
pip install -U black flake8 isort
# Run the linter script
./scripts-dev/lint.sh
```
**Note that the script does not just test/check, but also reformats code, so you
may wish to ensure any new code is committed first**.
By default, this script checks all files and can take some time; if you alter
only certain files, you might wish to specify paths as arguments to reduce the
run-time:
may wish to ensure any new code is committed first**. By default this script
checks all files and can take some time; if you alter only certain files, you
might wish to specify paths as arguments to reduce the run-time:
```
./scripts-dev/lint.sh path/to/file1.py path/to/file2.py path/to/folder
@@ -70,6 +82,7 @@ Please ensure your changes match the cosmetic style of the existing project,
and **never** mix cosmetic and functional changes in the same commit, as it
makes it horribly hard to review otherwise.
## Changelog
All changes, even minor ones, need a corresponding changelog / newsfragment
@@ -85,55 +98,24 @@ in the format of `PRnumber.type`. The type can be one of the following:
* `removal` (also used for deprecations)
* `misc` (for internal-only changes)
This file will become part of our [changelog](
https://github.com/matrix-org/synapse/blob/master/CHANGES.md) at the next
release, so the content of the file should be a short description of your
change in the same style as the rest of the changelog. The file can contain Markdown
formatting, and should end with a full stop (.) or an exclamation mark (!) for
The content of the file is your changelog entry, which should be a short
description of your change in the same style as the rest of our [changelog](
https://github.com/matrix-org/synapse/blob/master/CHANGES.md). The file can
contain Markdown formatting, and should end with a full stop ('.') for
consistency.
Adding credits to the changelog is encouraged, we value your
contributions and would like to have you shouted out in the release notes!
For example, a fix in PR #1234 would have its changelog entry in
`changelog.d/1234.bugfix`, and contain content like:
`changelog.d/1234.bugfix`, and contain content like "The security levels of
Florbs are now validated when received over federation. Contributed by Jane
Matrix.".
> The security levels of Florbs are now validated when received
> via the `/federation/florb` endpoint. Contributed by Jane Matrix.
If there are multiple pull requests involved in a single bugfix/feature/etc,
then the content for each `changelog.d` file should be the same. Towncrier will
merge the matching files together into a single changelog entry when we come to
release.
### How do I know what to call the changelog file before I create the PR?
Obviously, you don't know if you should call your newsfile
`1234.bugfix` or `5678.bugfix` until you create the PR, which leads to a
chicken-and-egg problem.
There are two options for solving this:
1. Open the PR without a changelog file, see what number you got, and *then*
add the changelog file to your branch (see [Updating your pull
request](#updating-your-pull-request)), or:
1. Look at the [list of all
issues/PRs](https://github.com/matrix-org/synapse/issues?q=), add one to the
highest number you see, and quickly open the PR before somebody else claims
your number.
[This
script](https://github.com/richvdh/scripts/blob/master/next_github_number.sh)
might be helpful if you find yourself doing this a lot.
Sorry, we know it's a bit fiddly, but it's *really* helpful for us when we come
to put together a release!
### Debian changelog
## Debian changelog
Changes which affect the debian packaging files (in `debian`) are an
exception to the rule that all changes require a `changelog.d` file.
exception.
In this case, you will need to add an entry to the debian changelog for the
next release. For this, run the following command:
@@ -218,46 +200,6 @@ Git allows you to add this signoff automatically when using the `-s`
flag to `git commit`, which uses the name and email set in your
`user.name` and `user.email` git configs.
## Continuous integration and testing
[Buildkite](https://buildkite.com/matrix-dot-org/synapse) will automatically
run a series of checks and tests against any PR which is opened against the
project; if your change breaks the build, this will be shown in GitHub, with
links to the build results. If your build fails, please try to fix the errors
and update your branch.
To run unit tests in a local development environment, you can use:
- ``tox -e py35`` (requires tox to be installed by ``pip install tox``)
for SQLite-backed Synapse on Python 3.5.
- ``tox -e py36`` for SQLite-backed Synapse on Python 3.6.
- ``tox -e py36-postgres`` for PostgreSQL-backed Synapse on Python 3.6
(requires a running local PostgreSQL with access to create databases).
- ``./test_postgresql.sh`` for PostgreSQL-backed Synapse on Python 3.5
(requires Docker). Entirely self-contained, recommended if you don't want to
set up PostgreSQL yourself.
Docker images are available for running the integration tests (SyTest) locally,
see the [documentation in the SyTest repo](
https://github.com/matrix-org/sytest/blob/develop/docker/README.md) for more
information.
## Updating your pull request
If you decide to make changes to your pull request - perhaps to address issues
raised in a review, or to fix problems highlighted by [continuous
integration](#continuous-integration-and-testing) - just add new commits to your
branch, and push to GitHub. The pull request will automatically be updated.
Please **avoid** rebasing your branch, especially once the PR has been
reviewed: doing so makes it very difficult for a reviewer to see what has
changed since a previous review.
## Notes for maintainers on merging PRs etc
There are some notes for those with commit access to the project on how we
manage git [here](docs/dev/git.md).
## Conclusion
That's it! Matrix is a very open and collaborative project as you might expect

View File

@@ -1,17 +1,15 @@
- [Choosing your server name](#choosing-your-server-name)
- [Picking a database engine](#picking-a-database-engine)
- [Installing Synapse](#installing-synapse)
- [Installing from source](#installing-from-source)
- [Platform-Specific Instructions](#platform-specific-instructions)
- [Troubleshooting Installation](#troubleshooting-installation)
- [Prebuilt packages](#prebuilt-packages)
- [Setting up Synapse](#setting-up-synapse)
- [TLS certificates](#tls-certificates)
- [Client Well-Known URI](#client-well-known-uri)
- [Email](#email)
- [Registering a user](#registering-a-user)
- [Setting up a TURN server](#setting-up-a-turn-server)
- [URL previews](#url-previews)
- [Troubleshooting Installation](#troubleshooting-installation)
# Choosing your server name
@@ -29,25 +27,6 @@ that your email address is probably `user@example.com` rather than
`user@email.example.com`) - but doing so may require more advanced setup: see
[Setting up Federation](docs/federate.md).
# Picking a database engine
Synapse offers two database engines:
* [PostgreSQL](https://www.postgresql.org)
* [SQLite](https://sqlite.org/)
Almost all installations should opt to use PostgreSQL. Advantages include:
* significant performance improvements due to the superior threading and
caching model, smarter query optimiser
* allowing the DB to be run on separate hardware
For information on how to install and use PostgreSQL, please see
[docs/postgres.md](docs/postgres.md)
By default Synapse uses SQLite and in doing so trades performance for convenience.
SQLite is only recommended in Synapse for testing purposes or for servers with
light workloads.
# Installing Synapse
## Installing from source
@@ -57,7 +36,7 @@ light workloads.
System requirements:
- POSIX-compliant system (tested on Linux & OS X)
- Python 3.5.2 or later, up to Python 3.8.
- Python 3.5, 3.6, 3.7 or 3.8.
- At least 1GB of free RAM if you want to join large public rooms like #matrix:matrix.org
Synapse is written in Python but some of the libraries it uses are written in
@@ -91,7 +70,7 @@ pip install -U matrix-synapse
```
Before you can start Synapse, you will need to generate a configuration
file. To do this, run (in your virtualenv, as before):
file. To do this, run (in your virtualenv, as before)::
```
cd ~/synapse
@@ -105,24 +84,22 @@ python -m synapse.app.homeserver \
... substituting an appropriate value for `--server-name`.
This command will generate you a config file that you can then customise, but it will
also generate a set of keys for you. These keys will allow your homeserver to
identify itself to other homeserver, so don't lose or delete them. It would be
also generate a set of keys for you. These keys will allow your Home Server to
identify itself to other Home Servers, so don't lose or delete them. It would be
wise to back them up somewhere safe. (If, for whatever reason, you do need to
change your homeserver's keys, you may find that other homeserver have the
change your Home Server's keys, you may find that other Home Servers have the
old key cached. If you update the signing key, you should change the name of the
key in the `<server name>.signing.key` file (the second word) to something
different. See the
[spec](https://matrix.org/docs/spec/server_server/latest.html#retrieving-server-keys)
for more information on key management).
for more information on key management.)
To actually run your new homeserver, pick a working directory for Synapse to
run (e.g. `~/synapse`), and:
run (e.g. `~/synapse`), and::
```
cd ~/synapse
source env/bin/activate
synctl start
```
cd ~/synapse
source env/bin/activate
synctl start
### Platform-Specific Instructions
@@ -133,7 +110,7 @@ Installing prerequisites on Ubuntu or Debian:
```
sudo apt-get install build-essential python3-dev libffi-dev \
python3-pip python3-setuptools sqlite3 \
libssl-dev virtualenv libjpeg-dev libxslt1-dev
libssl-dev python3-virtualenv libjpeg-dev libxslt1-dev
```
#### ArchLinux
@@ -147,29 +124,15 @@ sudo pacman -S base-devel python python-pip \
#### CentOS/Fedora
Installing prerequisites on CentOS 8 or Fedora>26:
```
sudo dnf install libtiff-devel libjpeg-devel libzip-devel freetype-devel \
libwebp-devel tk-devel redhat-rpm-config \
python3-virtualenv libffi-devel openssl-devel
sudo dnf groupinstall "Development Tools"
```
Installing prerequisites on CentOS 7 or Fedora<=25:
Installing prerequisites on CentOS 7 or Fedora 25:
```
sudo yum install libtiff-devel libjpeg-devel libzip-devel freetype-devel \
lcms2-devel libwebp-devel tcl-devel tk-devel redhat-rpm-config \
python3-virtualenv libffi-devel openssl-devel
python-virtualenv libffi-devel openssl-devel
sudo yum groupinstall "Development Tools"
```
Note that Synapse does not support versions of SQLite before 3.11, and CentOS 7
uses SQLite 3.7. You may be able to work around this by installing a more
recent SQLite version, but it is recommended that you instead use a Postgres
database: see [docs/postgres.md](docs/postgres.md).
#### macOS
Installing prerequisites on macOS:
@@ -201,41 +164,35 @@ sudo zypper in python-pip python-setuptools sqlite3 python-virtualenv \
#### OpenBSD
A port of Synapse is available under `net/synapse`. The filesystem
underlying the homeserver directory (defaults to `/var/synapse`) has to be
mounted with `wxallowed` (cf. `mount(8)`), so creating a separate filesystem
and mounting it to `/var/synapse` should be taken into consideration.
To be able to build Synapse's dependency on python the `WRKOBJDIR`
(cf. `bsd.port.mk(5)`) for building python, too, needs to be on a filesystem
mounted with `wxallowed` (cf. `mount(8)`).
Creating a `WRKOBJDIR` for building python under `/usr/local` (which on a
default OpenBSD installation is mounted with `wxallowed`):
Installing prerequisites on OpenBSD:
```
doas mkdir /usr/local/pobj_wxallowed
doas pkg_add python libffi py-pip py-setuptools sqlite3 py-virtualenv \
libxslt jpeg
```
Assuming `PORTS_PRIVSEP=Yes` (cf. `bsd.port.mk(5)`) and `SUDO=doas` are
configured in `/etc/mk.conf`:
There is currently no port for OpenBSD. Additionally, OpenBSD's security
settings require a slightly more difficult installation process.
```
doas chown _pbuild:_pbuild /usr/local/pobj_wxallowed
```
XXX: I suspect this is out of date.
Setting the `WRKOBJDIR` for building python:
1. Create a new directory in `/usr/local` called `_synapse`. Also, create a
new user called `_synapse` and set that directory as the new user's home.
This is required because, by default, OpenBSD only allows binaries which need
write and execute permissions on the same memory space to be run from
`/usr/local`.
2. `su` to the new `_synapse` user and change to their home directory.
3. Create a new virtualenv: `virtualenv -p python2.7 ~/.synapse`
4. Source the virtualenv configuration located at
`/usr/local/_synapse/.synapse/bin/activate`. This is done in `ksh` by
using the `.` command, rather than `bash`'s `source`.
5. Optionally, use `pip` to install `lxml`, which Synapse needs to parse
webpages for their titles.
6. Use `pip` to install this repository: `pip install matrix-synapse`
7. Optionally, change `_synapse`'s shell to `/bin/false` to reduce the
chance of a compromised Synapse server being used to take over your box.
```
echo WRKOBJDIR_lang/python/3.7=/usr/local/pobj_wxallowed \\nWRKOBJDIR_lang/python/2.7=/usr/local/pobj_wxallowed >> /etc/mk.conf
```
Building Synapse:
```
cd /usr/ports/net/synapse
make install
```
After this, you may proceed with the rest of the install directions.
#### Windows
@@ -246,6 +203,45 @@ be found at https://docs.microsoft.com/en-us/windows/wsl/install-win10 for
Windows 10 and https://docs.microsoft.com/en-us/windows/wsl/install-on-server
for Windows Server.
### Troubleshooting Installation
XXX a bunch of this is no longer relevant.
Synapse requires pip 8 or later, so if your OS provides too old a version you
may need to manually upgrade it::
sudo pip install --upgrade pip
Installing may fail with `Could not find any downloads that satisfy the requirement pymacaroons-pynacl (from matrix-synapse==0.12.0)`.
You can fix this by manually upgrading pip and virtualenv::
sudo pip install --upgrade virtualenv
You can next rerun `virtualenv -p python3 synapse` to update the virtual env.
Installing may fail during installing virtualenv with `InsecurePlatformWarning: A true SSLContext object is not available. This prevents urllib3 from configuring SSL appropriately and may cause certain SSL connections to fail. For more information, see https://urllib3.readthedocs.org/en/latest/security.html#insecureplatformwarning.`
You can fix this by manually installing ndg-httpsclient::
pip install --upgrade ndg-httpsclient
Installing may fail with `mock requires setuptools>=17.1. Aborting installation`.
You can fix this by upgrading setuptools::
pip install --upgrade setuptools
If pip crashes mid-installation for reason (e.g. lost terminal), pip may
refuse to run until you remove the temporary installation directory it
created. To reset the installation::
rm -rf /tmp/pip_install_matrix
pip seems to leak *lots* of memory during installation. For instance, a Linux
host with 512MB of RAM may run out of memory whilst installing Twisted. If this
happens, you will have to individually install the dependencies which are
failing, e.g.::
pip install twisted
## Prebuilt packages
As an alternative to installing from source, prebuilt packages are available
@@ -255,9 +251,9 @@ for a number of platforms.
There is an offical synapse image available at
https://hub.docker.com/r/matrixdotorg/synapse which can be used with
the docker-compose file available at [contrib/docker](contrib/docker). Further
information on this including configuration options is available in the README
on hub.docker.com.
the docker-compose file available at [contrib/docker](contrib/docker). Further information on
this including configuration options is available in the README on
hub.docker.com.
Alternatively, Andreas Peters (previously Silvio Fricke) has contributed a
Dockerfile to automate a synapse server in a single Docker image, at
@@ -265,8 +261,7 @@ https://hub.docker.com/r/avhost/docker-matrix/tags/
Slavi Pantaleev has created an Ansible playbook,
which installs the offical Docker image of Matrix Synapse
along with many other Matrix-related services (Postgres database, Element, coturn,
ma1sd, SSL support, etc.).
along with many other Matrix-related services (Postgres database, riot-web, coturn, mxisd, SSL support, etc.).
For more details, see
https://github.com/spantaleev/matrix-docker-ansible-deploy
@@ -299,27 +294,22 @@ The fingerprint of the repository signing key (as shown by `gpg
/usr/share/keyrings/matrix-org-archive-keyring.gpg`) is
`AAF9AE843A7584B5A3E4CD2BCF45A512DE2DA058`.
#### Downstream Debian packages
#### Downstream Debian/Ubuntu packages
We do not recommend using the packages from the default Debian `buster`
repository at this time, as they are old and suffer from known security
vulnerabilities. You can install the latest version of Synapse from
[our repository](#matrixorg-packages) or from `buster-backports`. Please
see the [Debian documentation](https://backports.debian.org/Instructions/)
for information on how to use backports.
If you are using Debian `sid` or testing, Synapse is available in the default
repositories and it should be possible to install it simply with:
For `buster` and `sid`, Synapse is available in the Debian repositories and
it should be possible to install it with simply:
```
sudo apt install matrix-synapse
sudo apt install matrix-synapse
```
#### Downstream Ubuntu packages
There is also a version of `matrix-synapse` in `stretch-backports`. Please see
the [Debian documentation on
backports](https://backports.debian.org/Instructions/) for information on how
to use them.
We do not recommend using the packages in the default Ubuntu repository
at this time, as they are old and suffer from known security vulnerabilities.
The latest version of Synapse can be installed from [our repository](#matrixorg-packages).
We do not recommend using the packages in downstream Ubuntu at this time, as
they are old and suffer from known security vulnerabilities.
### Fedora
@@ -371,30 +361,16 @@ sudo pip install py-bcrypt
Synapse can be found in the void repositories as 'synapse':
```
xbps-install -Su
xbps-install -S synapse
```
xbps-install -Su
xbps-install -S synapse
### FreeBSD
Synapse can be installed via FreeBSD Ports or Packages contributed by Brendan Molloy from:
- Ports: `cd /usr/ports/net-im/py-matrix-synapse && make install clean`
- Packages: `pkg install py37-matrix-synapse`
- Packages: `pkg install py27-matrix-synapse`
### OpenBSD
As of OpenBSD 6.7 Synapse is available as a pre-compiled binary. The filesystem
underlying the homeserver directory (defaults to `/var/synapse`) has to be
mounted with `wxallowed` (cf. `mount(8)`), so creating a separate filesystem
and mounting it to `/var/synapse` should be taken into consideration.
Installing Synapse:
```
doas pkg_add synapse
```
### NixOS
@@ -407,17 +383,15 @@ Once you have installed synapse as above, you will need to configure it.
## TLS certificates
The default configuration exposes a single HTTP port on the local
interface: `http://localhost:8008`. It is suitable for local testing,
but for any practical use, you will need Synapse's APIs to be served
over HTTPS.
The default configuration exposes a single HTTP port: http://localhost:8008. It
is suitable for local testing, but for any practical use, you will either need
to enable a reverse proxy, or configure Synapse to expose an HTTPS port.
The recommended way to do so is to set up a reverse proxy on port
`8448`. You can find documentation on doing so in
For information on using a reverse proxy, see
[docs/reverse_proxy.md](docs/reverse_proxy.md).
Alternatively, you can configure Synapse to expose an HTTPS port. To do
so, you will need to edit `homeserver.yaml`, as follows:
To configure Synapse to expose an HTTPS port, you will need to edit
`homeserver.yaml`, as follows:
* First, under the `listeners` section, uncomment the configuration for the
TLS-enabled listener. (Remove the hash sign (`#`) at the start of
@@ -430,76 +404,20 @@ so, you will need to edit `homeserver.yaml`, as follows:
resources:
- names: [client, federation]
```
* You will also need to uncomment the `tls_certificate_path` and
`tls_private_key_path` lines under the `TLS` section. You will need to manage
provisioning of these certificates yourself — Synapse had built-in ACME
support, but the ACMEv1 protocol Synapse implements is deprecated, not
allowed by LetsEncrypt for new sites, and will break for existing sites in
late 2020. See [ACME.md](docs/ACME.md).
If you are using your own certificate, be sure to use a `.pem` file that
includes the full certificate chain including any intermediate certificates
(for instance, if using certbot, use `fullchain.pem` as your certificate, not
`tls_private_key_path` lines under the `TLS` section. You can either
point these settings at an existing certificate and key, or you can
enable Synapse's built-in ACME (Let's Encrypt) support. Instructions
for having Synapse automatically provision and renew federation
certificates through ACME can be found at [ACME.md](docs/ACME.md). If you
are using your own certificate, be sure to use a `.pem` file that includes
the full certificate chain including any intermediate certificates (for
instance, if using certbot, use `fullchain.pem` as your certificate, not
`cert.pem`).
For a more detailed guide to configuring your server for federation, see
[federate.md](docs/federate.md).
[federate.md](docs/federate.md)
## Client Well-Known URI
Setting up the client Well-Known URI is optional but if you set it up, it will
allow users to enter their full username (e.g. `@user:<server_name>`) into clients
which support well-known lookup to automatically configure the homeserver and
identity server URLs. This is useful so that users don't have to memorize or think
about the actual homeserver URL you are using.
The URL `https://<server_name>/.well-known/matrix/client` should return JSON in
the following format.
```
{
"m.homeserver": {
"base_url": "https://<matrix.example.com>"
}
}
```
It can optionally contain identity server information as well.
```
{
"m.homeserver": {
"base_url": "https://<matrix.example.com>"
},
"m.identity_server": {
"base_url": "https://<identity.example.com>"
}
}
```
To work in browser based clients, the file must be served with the appropriate
Cross-Origin Resource Sharing (CORS) headers. A recommended value would be
`Access-Control-Allow-Origin: *` which would allow all browser based clients to
view it.
In nginx this would be something like:
```
location /.well-known/matrix/client {
return 200 '{"m.homeserver": {"base_url": "https://<matrix.example.com>"}}';
add_header Content-Type application/json;
add_header Access-Control-Allow-Origin *;
}
```
You should also ensure the `public_baseurl` option in `homeserver.yaml` is set
correctly. `public_baseurl` should be set to the URL that clients will use to
connect to your server. This is the same URL you put for the `m.homeserver`
`base_url` above.
```
public_baseurl: "https://<matrix.example.com>"
```
## Email
@@ -518,7 +436,7 @@ email will be disabled.
## Registering a user
The easiest way to create a new user is to do so from a client like [Element](https://element.io/).
The easiest way to create a new user is to do so from a client like [Riot](https://riot.im).
Alternatively you can do so from the command line if you have installed via pip.
@@ -545,7 +463,7 @@ on your server even if `enable_registration` is `false`.
## Setting up a TURN server
For reliable VoIP calls to be routed via this homeserver, you MUST configure
a TURN server. See [docs/turn-howto.md](docs/turn-howto.md) for details.
a TURN server. See [docs/turn-howto.md](docs/turn-howto.md) for details.
## URL previews
@@ -554,24 +472,10 @@ turn it on you must enable the `url_preview_enabled: True` config parameter
and explicitly specify the IP ranges that Synapse is not allowed to spider for
previewing in the `url_preview_ip_range_blacklist` configuration parameter.
This is critical from a security perspective to stop arbitrary Matrix users
spidering 'internal' URLs on your network. At the very least we recommend that
spidering 'internal' URLs on your network. At the very least we recommend that
your loopback and RFC1918 IP addresses are blacklisted.
This also requires the optional `lxml` and `netaddr` python dependencies to be
installed. This in turn requires the `libxml2` library to be available - on
This also requires the optional lxml and netaddr python dependencies to be
installed. This in turn requires the libxml2 library to be available - on
Debian/Ubuntu this means `apt-get install libxml2-dev`, or equivalent for
your OS.
# Troubleshooting Installation
`pip` seems to leak *lots* of memory during installation. For instance, a Linux
host with 512MB of RAM may run out of memory whilst installing Twisted. If this
happens, you will have to individually install the dependencies which are
failing, e.g.:
```
pip install twisted
```
If you have any other problems, feel free to ask in
[#synapse:matrix.org](https://matrix.to/#/#synapse:matrix.org).

View File

@@ -30,24 +30,23 @@ recursive-include synapse/static *.gif
recursive-include synapse/static *.html
recursive-include synapse/static *.js
exclude .codecov.yml
exclude .coveragerc
exclude .dockerignore
exclude .editorconfig
exclude Dockerfile
exclude mypy.ini
exclude sytest-blacklist
exclude .dockerignore
exclude test_postgresql.sh
exclude .editorconfig
exclude sytest-blacklist
include pyproject.toml
recursive-include changelog.d *
prune .buildkite
prune .circleci
prune .codecov.yml
prune .coveragerc
prune .github
prune contrib
prune debian
prune demo/etc
prune docker
prune mypy.ini
prune snap
prune stubs

View File

@@ -1,11 +1,3 @@
================
Synapse |shield|
================
.. |shield| image:: https://img.shields.io/matrix/synapse:matrix.org?label=support&logo=matrix
:alt: (get support on #synapse:matrix.org)
:target: https://matrix.to/#/#synapse:matrix.org
.. contents::
Introduction
@@ -45,7 +37,7 @@ which handle:
- Eventually-consistent cryptographically secure synchronisation of room
state across a global open network of federated servers and services
- Sending and receiving extensible messages in a room with (optional)
end-to-end encryption
end-to-end encryption[1]
- Inviting, joining, leaving, kicking, banning room members
- Managing user accounts (registration, login, logout)
- Using 3rd Party IDs (3PIDs) such as email addresses, phone numbers,
@@ -82,15 +74,7 @@ at the `Matrix spec <https://matrix.org/docs/spec>`_, and experiment with the
Thanks for using Matrix!
Support
=======
For support installing or managing Synapse, please join |room|_ (from a matrix.org
account if necessary) and ask questions there. We do not use GitHub issues for
support requests, only for bug reports and feature requests.
.. |room| replace:: ``#synapse:matrix.org``
.. _room: https://matrix.to/#/#synapse:matrix.org
[1] End-to-end encryption is currently in beta: `blog post <https://matrix.org/blog/2016/11/21/matrixs-olm-end-to-end-encryption-security-assessment-released-and-implemented-cross-platform-on-riot-at-last>`_.
Synapse Installation
@@ -112,11 +96,12 @@ Unless you are running a test instance of Synapse on your local machine, in
general, you will need to enable TLS support before you can successfully
connect from a client: see `<INSTALL.md#tls-certificates>`_.
An easy way to get started is to login or register via Element at
https://app.element.io/#/login or https://app.element.io/#/register respectively.
An easy way to get started is to login or register via Riot at
https://riot.im/app/#/login or https://riot.im/app/#/register respectively.
You will need to change the server you are logging into from ``matrix.org``
and instead specify a Homeserver URL of ``https://<server_name>:8448``
(or just ``https://<server_name>`` if you are using a reverse proxy).
(Leave the identity server as the default - see `Identity servers`_.)
If you prefer to use another client, refer to our
`client breakdown <https://matrix.org/docs/projects/clients-matrix>`_.
@@ -133,7 +118,7 @@ it, specify ``enable_registration: true`` in ``homeserver.yaml``. (It is then
recommended to also set up CAPTCHA - see `<docs/CAPTCHA_SETUP.md>`_.)
Once ``enable_registration`` is set to ``true``, it is possible to register a
user via a Matrix client.
user via `riot.im <https://riot.im/app/#/register>`_ or other Matrix clients.
Your new user name will be formed partly from the ``server_name``, and partly
from a localpart you specify when you create the account. Your name will take
@@ -179,6 +164,30 @@ versions of synapse.
.. _UPGRADE.rst: UPGRADE.rst
Using PostgreSQL
================
Synapse offers two database engines:
* `SQLite <https://sqlite.org/>`_
* `PostgreSQL <https://www.postgresql.org>`_
By default Synapse uses SQLite in and doing so trades performance for convenience.
SQLite is only recommended in Synapse for testing purposes or for servers with
light workloads.
Almost all installations should opt to use PostreSQL. Advantages include:
* significant performance improvements due to the superior threading and
caching model, smarter query optimiser
* allowing the DB to be run on separate hardware
* allowing basic active/backup high-availability with a "hot spare" synapse
pointing at the same DB master, as well as enabling DB replication in
synapse itself.
For information on how to install and use PostgreSQL, please see
`docs/postgres.md <docs/postgres.md>`_.
.. _reverse-proxy:
Using a reverse proxy with Synapse
@@ -187,7 +196,7 @@ Using a reverse proxy with Synapse
It is recommended to put a reverse proxy such as
`nginx <https://nginx.org/en/docs/http/ngx_http_proxy_module.html>`_,
`Apache <https://httpd.apache.org/docs/current/mod/mod_proxy_http.html>`_,
`Caddy <https://caddyserver.com/docs/quick-starts/reverse-proxy>`_ or
`Caddy <https://caddyserver.com/docs/proxy>`_ or
`HAProxy <https://www.haproxy.org/>`_ in front of Synapse. One advantage of
doing so is that it means that you can expose the default https port (443) to
Matrix clients without needing to run Synapse with root privileges.
@@ -227,9 +236,10 @@ email address.
Password reset
==============
Users can reset their password through their client. Alternatively, a server admin
can reset a users password using the `admin API <docs/admin_api/user_admin_api.rst#reset-password>`_
or by directly editing the database as shown below.
If a user has registered an email address to their account using an identity
server, they can request a password-reset token via clients such as Riot.
A manual password reset can be done via direct database access as follows.
First calculate the hash of the new password::
@@ -238,7 +248,7 @@ First calculate the hash of the new password::
Confirm password:
$2a$12$xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
Then update the ``users`` table in the database::
Then update the `users` table in the database::
UPDATE users SET password_hash='$2a$12$xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'
WHERE name='@test:test.com';
@@ -262,7 +272,7 @@ to install using pip and a virtualenv::
virtualenv -p python3 env
source env/bin/activate
python -m pip install --no-use-pep517 -e ".[all]"
python -m pip install --no-use-pep517 -e .[all]
This will run a process of downloading and installing all the needed
dependencies into a virtual env.
@@ -306,9 +316,6 @@ Building internal API documentation::
Troubleshooting
===============
Need help? Join our community support room on Matrix:
`#synapse:matrix.org <https://matrix.to/#/#synapse:matrix.org>`_
Running out of File Handles
---------------------------

View File

@@ -75,181 +75,6 @@ for example:
wget https://packages.matrix.org/debian/pool/main/m/matrix-synapse-py3/matrix-synapse-py3_1.3.0+stretch1_amd64.deb
dpkg -i matrix-synapse-py3_1.3.0+stretch1_amd64.deb
Upgrading to v1.18.0
====================
Docker `-py3` suffix will be removed in future versions
-------------------------------------------------------
From 10th August 2020, we will no longer publish Docker images with the `-py3` tag suffix. The images tagged with the `-py3` suffix have been identical to the non-suffixed tags since release 0.99.0, and the suffix is obsolete.
On 10th August, we will remove the `latest-py3` tag. Existing per-release tags (such as `v1.18.0-py3`) will not be removed, but no new `-py3` tags will be added.
Scripts relying on the `-py3` suffix will need to be updated.
Redis replication is now recommended in lieu of TCP replication
---------------------------------------------------------------
When setting up worker processes, we now recommend the use of a Redis server for replication. **The old direct TCP connection method is deprecated and will be removed in a future release.**
See `docs/workers.md <docs/workers.md>`_ for more details.
Upgrading to v1.14.0
====================
This version includes a database update which is run as part of the upgrade,
and which may take a couple of minutes in the case of a large server. Synapse
will not respond to HTTP requests while this update is taking place.
Upgrading to v1.13.0
====================
Incorrect database migration in old synapse versions
----------------------------------------------------
A bug was introduced in Synapse 1.4.0 which could cause the room directory to
be incomplete or empty if Synapse was upgraded directly from v1.2.1 or
earlier, to versions between v1.4.0 and v1.12.x.
This will *not* be a problem for Synapse installations which were:
* created at v1.4.0 or later,
* upgraded via v1.3.x, or
* upgraded straight from v1.2.1 or earlier to v1.13.0 or later.
If completeness of the room directory is a concern, installations which are
affected can be repaired as follows:
1. Run the following sql from a `psql` or `sqlite3` console:
.. code:: sql
INSERT INTO background_updates (update_name, progress_json, depends_on) VALUES
('populate_stats_process_rooms', '{}', 'current_state_events_membership');
INSERT INTO background_updates (update_name, progress_json, depends_on) VALUES
('populate_stats_process_users', '{}', 'populate_stats_process_rooms');
2. Restart synapse.
New Single Sign-on HTML Templates
---------------------------------
New templates (``sso_auth_confirm.html``, ``sso_auth_success.html``, and
``sso_account_deactivated.html``) were added to Synapse. If your Synapse is
configured to use SSO and a custom ``sso_redirect_confirm_template_dir``
configuration then these templates will need to be copied from
`synapse/res/templates <synapse/res/templates>`_ into that directory.
Synapse SSO Plugins Method Deprecation
--------------------------------------
Plugins using the ``complete_sso_login`` method of
``synapse.module_api.ModuleApi`` should update to using the async/await
version ``complete_sso_login_async`` which includes additional checks. The
non-async version is considered deprecated.
Rolling back to v1.12.4 after a failed upgrade
----------------------------------------------
v1.13.0 includes a lot of large changes. If something problematic occurs, you
may want to roll-back to a previous version of Synapse. Because v1.13.0 also
includes a new database schema version, reverting that version is also required
alongside the generic rollback instructions mentioned above. In short, to roll
back to v1.12.4 you need to:
1. Stop the server
2. Decrease the schema version in the database:
.. code:: sql
UPDATE schema_version SET version = 57;
3. Downgrade Synapse by following the instructions for your installation method
in the "Rolling back to older versions" section above.
Upgrading to v1.12.0
====================
This version includes a database update which is run as part of the upgrade,
and which may take some time (several hours in the case of a large
server). Synapse will not respond to HTTP requests while this update is taking
place.
This is only likely to be a problem in the case of a server which is
participating in many rooms.
0. As with all upgrades, it is recommended that you have a recent backup of
your database which can be used for recovery in the event of any problems.
1. As an initial check to see if you will be affected, you can try running the
following query from the `psql` or `sqlite3` console. It is safe to run it
while Synapse is still running.
.. code:: sql
SELECT MAX(q.v) FROM (
SELECT (
SELECT ej.json AS v
FROM state_events se INNER JOIN event_json ej USING (event_id)
WHERE se.room_id=rooms.room_id AND se.type='m.room.create' AND se.state_key=''
LIMIT 1
) FROM rooms WHERE rooms.room_version IS NULL
) q;
This query will take about the same amount of time as the upgrade process: ie,
if it takes 5 minutes, then it is likely that Synapse will be unresponsive for
5 minutes during the upgrade.
If you consider an outage of this duration to be acceptable, no further
action is necessary and you can simply start Synapse 1.12.0.
If you would prefer to reduce the downtime, continue with the steps below.
2. The easiest workaround for this issue is to manually
create a new index before upgrading. On PostgreSQL, his can be done as follows:
.. code:: sql
CREATE INDEX CONCURRENTLY tmp_upgrade_1_12_0_index
ON state_events(room_id) WHERE type = 'm.room.create';
The above query may take some time, but is also safe to run while Synapse is
running.
We assume that no SQLite users have databases large enough to be
affected. If you *are* affected, you can run a similar query, omitting the
``CONCURRENTLY`` keyword. Note however that this operation may in itself cause
Synapse to stop running for some time. Synapse admins are reminded that
`SQLite is not recommended for use outside a test
environment <https://github.com/matrix-org/synapse/blob/master/README.rst#using-postgresql>`_.
3. Once the index has been created, the ``SELECT`` query in step 1 above should
complete quickly. It is therefore safe to upgrade to Synapse 1.12.0.
4. Once Synapse 1.12.0 has successfully started and is responding to HTTP
requests, the temporary index can be removed:
.. code:: sql
DROP INDEX tmp_upgrade_1_12_0_index;
Upgrading to v1.10.0
====================
Synapse will now log a warning on start up if used with a PostgreSQL database
that has a non-recommended locale set.
See `docs/postgres.md <docs/postgres.md>`_ for details.
Upgrading to v1.8.0
===================
Specifying a ``log_file`` config option will now cause Synapse to refuse to
start, and should be replaced by with the ``log_config`` option. Support for
the ``log_file`` option was removed in v1.3.0 and has since had no effect.
Upgrading to v1.7.0
===================

1
changelog.d/6563.bugfix Normal file
View File

@@ -0,0 +1 @@
Fix GET request on /_synapse/admin/v2/users endpoint. Contributed by Awesome Technologies Innovationslabor GmbH.

1
changelog.d/6621.doc Normal file
View File

@@ -0,0 +1 @@
Fix a typo in the configuration example for purge jobs in the sample configuration file.

1
changelog.d/6624.doc Normal file
View File

@@ -0,0 +1 @@
Add complete documentation of the message retention policies support.

1
changelog.d/6654.bugfix Normal file
View File

@@ -0,0 +1 @@
Correctly proxy HTTP errors due to API calls to remote group servers.

1
changelog.d/6656.doc Normal file
View File

@@ -0,0 +1 @@
No more overriding the entire /etc folder of the container in docker-compose.yaml. Contributed by Fabian Meyer.

1
changelog.d/6657.bugfix Normal file
View File

@@ -0,0 +1 @@
Fix incorrect signing of responses from the key server implementation.

1
changelog.d/6664.bugfix Normal file
View File

@@ -0,0 +1 @@
Fix media repo admin APIs when using a media worker.

1
changelog.d/6665.doc Normal file
View File

@@ -0,0 +1 @@
Add complete documentation of the message retention policies support.

View File

@@ -17,6 +17,9 @@
""" Starts a synapse client console. """
from __future__ import print_function
from twisted.internet import reactor, defer, threads
from http import TwistedHttpClient
import argparse
import cmd
import getpass
@@ -25,14 +28,12 @@ import shlex
import sys
import time
import urllib
from http import TwistedHttpClient
import nacl.encoding
import nacl.signing
import urlparse
from signedjson.sign import SignatureVerifyException, verify_signed_json
from twisted.internet import defer, reactor, threads
import nacl.signing
import nacl.encoding
from signedjson.sign import verify_signed_json, SignatureVerifyException
CONFIG_JSON = "cmdclient_config.json"
@@ -492,7 +493,7 @@ class SynapseCmd(cmd.Cmd):
"list messages <roomid> from=END&to=START&limit=3"
"""
args = self._parse(line, ["type", "roomid", "qp"])
if "type" not in args or "roomid" not in args:
if not "type" in args or not "roomid" in args:
print("Must specify type and room ID.")
return
if args["type"] not in ["members", "messages"]:
@@ -507,7 +508,7 @@ class SynapseCmd(cmd.Cmd):
try:
key_value = key_value_str.split("=")
qp[key_value[0]] = key_value[1]
except Exception:
except:
print("Bad query param: %s" % key_value)
return
@@ -584,7 +585,7 @@ class SynapseCmd(cmd.Cmd):
parsed_url = urlparse.urlparse(args["path"])
qp.update(urlparse.parse_qs(parsed_url.query))
args["path"] = parsed_url.path
except Exception:
except:
pass
reactor.callFromThread(
@@ -609,15 +610,13 @@ class SynapseCmd(cmd.Cmd):
@defer.inlineCallbacks
def _do_event_stream(self, timeout):
res = yield defer.ensureDeferred(
self.http_client.get_json(
self._url() + "/events",
{
"access_token": self._tok(),
"timeout": str(timeout),
"from": self.event_stream_token,
},
)
res = yield self.http_client.get_json(
self._url() + "/events",
{
"access_token": self._tok(),
"timeout": str(timeout),
"from": self.event_stream_token,
},
)
print(json.dumps(res, indent=4))
@@ -773,10 +772,10 @@ def main(server_url, identity_server_url, username, token, config_path):
syn_cmd.config = json.load(config)
try:
http_client.verbose = "on" == syn_cmd.config["verbose"]
except Exception:
except:
pass
print("Loaded config from %s" % config_path)
except Exception:
except:
pass
# Twisted-specific: Runs the command processor in Twisted's event loop

View File

@@ -14,14 +14,14 @@
# limitations under the License.
from __future__ import print_function
from twisted.web.client import Agent, readBody
from twisted.web.http_headers import Headers
from twisted.internet import defer, reactor
from pprint import pformat
import json
import urllib
from pprint import pformat
from twisted.internet import defer, reactor
from twisted.web.client import Agent, readBody
from twisted.web.http_headers import Headers
class HttpClient(object):

View File

@@ -15,9 +15,10 @@ services:
restart: unless-stopped
# See the readme for a full documentation of the environment settings
environment:
- SYNAPSE_CONFIG_PATH=/data/homeserver.yaml
- SYNAPSE_CONFIG_PATH=/etc/homeserver.yaml
volumes:
# You may either store all the files in a local folder
- ./matrix-config/homeserver.yaml:/etc/homeserver.yaml
- ./files:/data
# .. or you may split this between different storage points
# - ./files:/data
@@ -50,14 +51,11 @@ services:
- traefik.http.routers.https-synapse.tls.certResolver=le-ssl
db:
image: docker.io/postgres:12-alpine
image: docker.io/postgres:10-alpine
# Change that password, of course!
environment:
- POSTGRES_USER=synapse
- POSTGRES_PASSWORD=changeme
# ensure the database gets created correctly
# https://github.com/matrix-org/synapse/blob/master/docs/postgres.md#set-up-database
- POSTGRES_INITDB_ARGS=--encoding=UTF-8 --lc-collate=C --lc-ctype=C
volumes:
# You may store the database tables in a local folder..
- ./schemas:/var/lib/postgresql/data

View File

@@ -28,24 +28,27 @@ Currently assumes the local address is localhost:<port>
"""
from synapse.federation import ReplicationHandler
from synapse.federation.units import Pdu
from synapse.util import origin_from_ucid
from synapse.app.homeserver import SynapseHomeServer
# from synapse.logging.utils import log_function
from twisted.internet import reactor, defer
from twisted.python import log
import argparse
import curses.wrapper
import json
import logging
import os
import re
import cursesio
from twisted.internet import defer, reactor
from twisted.python import log
from synapse.app.homeserver import SynapseHomeServer
from synapse.federation import ReplicationHandler
from synapse.federation.units import Pdu
from synapse.util import origin_from_ucid
# from synapse.logging.utils import log_function
import curses.wrapper
logger = logging.getLogger("example")
@@ -72,7 +75,7 @@ class InputOutput(object):
"""
try:
m = re.match(r"^join (\S+)$", line)
m = re.match("^join (\S+)$", line)
if m:
# The `sender` wants to join a room.
(room_name,) = m.groups()
@@ -81,7 +84,7 @@ class InputOutput(object):
# self.print_line("OK.")
return
m = re.match(r"^invite (\S+) (\S+)$", line)
m = re.match("^invite (\S+) (\S+)$", line)
if m:
# `sender` wants to invite someone to a room
room_name, invitee = m.groups()
@@ -90,7 +93,7 @@ class InputOutput(object):
# self.print_line("OK.")
return
m = re.match(r"^send (\S+) (.*)$", line)
m = re.match("^send (\S+) (.*)$", line)
if m:
# `sender` wants to message a room
room_name, body = m.groups()
@@ -99,7 +102,7 @@ class InputOutput(object):
# self.print_line("OK.")
return
m = re.match(r"^backfill (\S+)$", line)
m = re.match("^backfill (\S+)$", line)
if m:
# we want to backfill a room
(room_name,) = m.groups()
@@ -198,6 +201,16 @@ class HomeServer(ReplicationHandler):
% (pdu.context, pdu.pdu_type, json.dumps(pdu.content))
)
# def on_state_change(self, pdu):
##self.output.print_line("#%s (state) %s *** %s" %
##(pdu.context, pdu.state_key, pdu.pdu_type)
##)
# if "joinee" in pdu.content:
# self._on_join(pdu.context, pdu.content["joinee"])
# elif "invitee" in pdu.content:
# self._on_invite(pdu.origin, pdu.context, pdu.content["invitee"])
def _on_message(self, pdu):
""" We received a message
"""
@@ -301,7 +314,7 @@ class HomeServer(ReplicationHandler):
return self.replication_layer.backfill(dest, room_name, limit)
def _get_room_remote_servers(self, room_name):
return list(self.joined_rooms.setdefault(room_name).servers)
return [i for i in self.joined_rooms.setdefault(room_name).servers]
def _get_or_create_room(self, room_name):
return self.joined_rooms.setdefault(room_name, Room(room_name))
@@ -321,7 +334,7 @@ def main(stdscr):
user = args.user
server_name = origin_from_ucid(user)
# Set up logging
## Set up logging ##
root_logger = logging.getLogger()
@@ -341,7 +354,7 @@ def main(stdscr):
observer = log.PythonLoggingObserver()
observer.start()
# Set up synapse server
## Set up synapse server
curses_stdio = cursesio.CursesStdIO(stdscr)
input_output = InputOutput(curses_stdio, user)
@@ -355,16 +368,16 @@ def main(stdscr):
input_output.set_home_server(hs)
# Add input_output logger
## Add input_output logger
io_logger = IOLoggerHandler(input_output)
io_logger.setFormatter(formatter)
root_logger.addHandler(io_logger)
# Start!
## Start! ##
try:
port = int(server_name.split(":")[1])
except Exception:
except:
port = 12345
app_hs.get_http_server().start_listening(port)

View File

@@ -1,6 +1,6 @@
# Using the Synapse Grafana dashboard
0. Set up Prometheus and Grafana. Out of scope for this readme. Useful documentation about using Grafana with Prometheus: http://docs.grafana.org/features/datasources/prometheus/
1. Have your Prometheus scrape your Synapse. https://github.com/matrix-org/synapse/blob/master/docs/metrics-howto.md
1. Have your Prometheus scrape your Synapse. https://github.com/matrix-org/synapse/blob/master/docs/metrics-howto.rst
2. Import dashboard into Grafana. Download `synapse.json`. Import it to Grafana and select the correct Prometheus datasource. http://docs.grafana.org/reference/export_import/
3. Set up additional recording rules

File diff suppressed because it is too large Load Diff

View File

@@ -1,13 +1,5 @@
from __future__ import print_function
import argparse
import cgi
import datetime
import json
import pydot
import urllib2
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -23,6 +15,15 @@ import urllib2
# limitations under the License.
import sqlite3
import pydot
import cgi
import json
import datetime
import argparse
import urllib2
def make_name(pdu_id, origin):
return "%s@%s" % (pdu_id, origin)
@@ -32,7 +33,7 @@ def make_graph(pdus, room, filename_prefix):
node_map = {}
origins = set()
colors = {"red", "green", "blue", "yellow", "purple"}
colors = set(("red", "green", "blue", "yellow", "purple"))
for pdu in pdus:
origins.add(pdu.get("origin"))
@@ -48,7 +49,7 @@ def make_graph(pdus, room, filename_prefix):
try:
c = colors.pop()
color_map[o] = c
except Exception:
except:
print("Run out of colours!")
color_map[o] = "black"

View File

@@ -13,13 +13,12 @@
# limitations under the License.
import argparse
import cgi
import datetime
import json
import sqlite3
import pydot
import cgi
import json
import datetime
import argparse
from synapse.events import FrozenEvent
from synapse.util.frozenutils import unfreeze
@@ -99,7 +98,7 @@ def make_graph(db_name, room_id, file_prefix, limit):
for prev_id, _ in event.prev_events:
try:
end_node = node_map[prev_id]
except Exception:
except:
end_node = pydot.Node(name=prev_id, label="<<b>%s</b>>" % (prev_id,))
node_map[prev_id] = end_node

View File

@@ -1,15 +1,5 @@
from __future__ import print_function
import argparse
import cgi
import datetime
import pydot
import simplejson as json
from synapse.events import FrozenEvent
from synapse.util.frozenutils import unfreeze
# Copyright 2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -25,6 +15,18 @@ from synapse.util.frozenutils import unfreeze
# limitations under the License.
import pydot
import cgi
import simplejson as json
import datetime
import argparse
from synapse.events import FrozenEvent
from synapse.util.frozenutils import unfreeze
from six import string_types
def make_graph(file_name, room_id, file_prefix, limit):
print("Reading lines")
with open(file_name) as f:
@@ -60,7 +62,7 @@ def make_graph(file_name, room_id, file_prefix, limit):
for key, value in unfreeze(event.get_dict()["content"]).items():
if value is None:
value = "<null>"
elif isinstance(value, str):
elif isinstance(value, string_types):
pass
else:
value = json.dumps(value)
@@ -106,7 +108,7 @@ def make_graph(file_name, room_id, file_prefix, limit):
for prev_id, _ in event.prev_events:
try:
end_node = node_map[prev_id]
except Exception:
except:
end_node = pydot.Node(name=prev_id, label="<<b>%s</b>>" % (prev_id,))
node_map[prev_id] = end_node

View File

@@ -12,15 +12,15 @@ npm install jquery jsdom
"""
from __future__ import print_function
import json
import subprocess
import time
import gevent
import grequests
from BeautifulSoup import BeautifulSoup
import json
import urllib
import subprocess
import time
ACCESS_TOKEN = ""
# ACCESS_TOKEN="" #
MATRIXBASE = "https://matrix.org/_matrix/client/api/v1/"
MYUSERNAME = "@davetest:matrix.org"

View File

@@ -1,12 +1,10 @@
#!/usr/bin/env python
from __future__ import print_function
from argparse import ArgumentParser
import json
import requests
import sys
import urllib
from argparse import ArgumentParser
import requests
try:
raw_input

View File

@@ -1,2 +1,150 @@
The documentation for using systemd to manage synapse workers is now part of
the main synapse distribution. See [docs/systemd-with-workers](../../docs/systemd-with-workers).
# Setup Synapse with Workers and Systemd
This is a setup for managing synapse with systemd including support for
managing workers. It provides a `matrix-synapse`, as well as a
`matrix-synapse-worker@` service for any workers you require. Additionally to
group the required services it sets up a `matrix.target`. You can use this to
automatically start any bot- or bridge-services. More on this in
[Bots and Bridges](#bots-and-bridges).
See the folder [system](system) for any service and target files.
The folder [workers](workers) contains an example configuration for the
`federation_reader` worker. Pay special attention to the name of the
configuration file. In order to work with the `matrix-synapse-worker@.service`
service, it needs to have the exact same name as the worker app.
This setup expects neither the homeserver nor any workers to fork. Forking is
handled by systemd.
## Setup
1. Adjust your matrix configs. Make sure that the worker config files have the
exact same name as the worker app. Compare `matrix-synapse-worker@.service` for
why. You can find an example worker config in the [workers](workers) folder. See
below for relevant settings in the `homeserver.yaml`.
2. Copy the `*.service` and `*.target` files in [system](system) to
`/etc/systemd/system`.
3. `systemctl enable matrix-synapse.service` this adds the homeserver
app to the `matrix.target`
4. *Optional.* `systemctl enable
matrix-synapse-worker@federation_reader.service` this adds the federation_reader
app to the `matrix-synapse.service`
5. *Optional.* Repeat step 4 for any additional workers you require.
6. *Optional.* Add any bots or bridges by enabling them.
7. Start all matrix related services via `systemctl start matrix.target`
8. *Optional.* Enable autostart of all matrix related services on system boot
via `systemctl enable matrix.target`
## Usage
After you have setup you can use the following commands to manage your synapse
installation:
```
# Start matrix-synapse, all workers and any enabled bots or bridges.
systemctl start matrix.target
# Restart matrix-synapse and all workers (not necessarily restarting bots
# or bridges, see "Bots and Bridges")
systemctl restart matrix-synapse.service
# Stop matrix-synapse and all workers (not necessarily restarting bots
# or bridges, see "Bots and Bridges")
systemctl stop matrix-synapse.service
# Restart a specific worker (i. e. federation_reader), the homeserver is
# unaffected by this.
systemctl restart matrix-synapse-worker@federation_reader.service
# Add a new worker (assuming all configs are setup already)
systemctl enable matrix-synapse-worker@federation_writer.service
systemctl restart matrix-synapse.service
```
## The Configs
Make sure the `worker_app` is set in the `homeserver.yaml` and it does not fork.
```
worker_app: synapse.app.homeserver
daemonize: false
```
None of the workers should fork, as forking is handled by systemd. Hence make
sure this is present in all worker config files.
```
worker_daemonize: false
```
The config files of all workers are expected to be located in
`/etc/matrix-synapse/workers`. If you want to use a different location you have
to edit the provided `*.service` files accordingly.
## Bots and Bridges
Most bots and bridges do not care if the homeserver goes down or is restarted.
Depending on the implementation this may crash them though. So look up the docs
or ask the community of the specific bridge or bot you want to run to make sure
you choose the correct setup.
Whichever configuration you choose, after the setup the following will enable
automatically starting (and potentially restarting) your bot/bridge with the
`matrix.target`.
```
systemctl enable <yourBotOrBridgeName>.service
```
**Note** that from an inactive synapse the bots/bridges will only be started with
synapse if you start the `matrix.target`, not if you start the
`matrix-synapse.service`. This is on purpose. Think of `matrix-synapse.service`
as *just* synapse, but `matrix.target` being anything matrix related, including
synapse and any and all enabled bots and bridges.
### Start with synapse but ignore synapse going down
If the bridge can handle shutdowns of the homeserver you'll want to install the
service in the `matrix.target` and optionally add a
`After=matrix-synapse.service` dependency to have the bot/bridge start after
synapse on starting everything.
In this case the service file should look like this.
```
[Unit]
# ...
# Optional, this will only ensure that if you start everything, synapse will
# be started before the bot/bridge will be started.
After=matrix-synapse.service
[Service]
# ...
[Install]
WantedBy=matrix.target
```
### Stop/restart when synapse stops/restarts
If the bridge can't handle shutdowns of the homeserver you'll still want to
install the service in the `matrix.target` but also have to specify the
`After=matrix-synapse.service` *and* `BindsTo=matrix-synapse.service`
dependencies to have the bot/bridge stop/restart with synapse.
In this case the service file should look like this.
```
[Unit]
# ...
# Mandatory
After=matrix-synapse.service
BindsTo=matrix-synapse.service
[Service]
# ...
[Install]
WantedBy=matrix.target
```

View File

@@ -0,0 +1,19 @@
[Unit]
Description=Synapse Matrix Worker
After=matrix-synapse.service
BindsTo=matrix-synapse.service
[Service]
Type=notify
NotifyAccess=main
User=matrix-synapse
WorkingDirectory=/var/lib/matrix-synapse
EnvironmentFile=/etc/default/matrix-synapse
ExecStart=/opt/venvs/matrix-synapse/bin/python -m synapse.app.%i --config-path=/etc/matrix-synapse/homeserver.yaml --config-path=/etc/matrix-synapse/conf.d/ --config-path=/etc/matrix-synapse/workers/%i.yaml
ExecReload=/bin/kill -HUP $MAINPID
Restart=always
RestartSec=3
SyslogIdentifier=matrix-synapse-%i
[Install]
WantedBy=matrix-synapse.service

View File

@@ -1,8 +1,5 @@
[Unit]
Description=Synapse master
# This service should be restarted when the synapse target is restarted.
PartOf=matrix-synapse.target
Description=Synapse Matrix Homeserver
[Service]
Type=notify
@@ -18,4 +15,4 @@ RestartSec=3
SyslogIdentifier=matrix-synapse
[Install]
WantedBy=matrix-synapse.target
WantedBy=matrix.target

View File

@@ -0,0 +1,7 @@
[Unit]
Description=Contains matrix services like synapse, bridges and bots
After=network.target
AllowIsolate=no
[Install]
WantedBy=multi-user.target

View File

@@ -1,7 +1,7 @@
worker_app: synapse.app.federation_reader
worker_name: federation_reader1
worker_replication_host: 127.0.0.1
worker_replication_port: 9092
worker_replication_http_port: 9093
worker_listeners:
@@ -10,4 +10,5 @@ worker_listeners:
resources:
- names: [federation]
worker_daemonize: false
worker_log_config: /etc/matrix-synapse/federation-reader-log.yaml

View File

@@ -15,9 +15,6 @@
[Unit]
Description=Synapse Matrix homeserver
# If you are using postgresql to persist data, uncomment this line to make sure
# synapse starts after the postgresql service.
# After=postgresql.service
[Service]
Type=notify

View File

@@ -36,6 +36,7 @@ esac
dh_virtualenv \
--install-suffix "matrix-synapse" \
--builtin-venv \
--setuptools \
--python "$SNAKE" \
--upgrade-pip \
--preinstall="lxml" \

165
debian/changelog vendored
View File

@@ -1,169 +1,8 @@
matrix-synapse-py3 (1.19.2) stable; urgency=medium
matrix-synapse-py3 (1.7.3ubuntu1) UNRELEASED; urgency=medium
* New synapse release 1.19.2.
-- Synapse Packaging team <packages@matrix.org> Wed, 16 Sep 2020 12:50:30 +0100
matrix-synapse-py3 (1.19.1) stable; urgency=medium
* New synapse release 1.19.1.
-- Synapse Packaging team <packages@matrix.org> Thu, 27 Aug 2020 10:50:19 +0100
matrix-synapse-py3 (1.19.0) stable; urgency=medium
[ Synapse Packaging team ]
* New synapse release 1.19.0.
[ Aaron Raimist ]
* Fix outdated documentation for SYNAPSE_CACHE_FACTOR
-- Synapse Packaging team <packages@matrix.org> Mon, 17 Aug 2020 14:06:42 +0100
matrix-synapse-py3 (1.18.0) stable; urgency=medium
* New synapse release 1.18.0.
-- Synapse Packaging team <packages@matrix.org> Thu, 30 Jul 2020 10:55:53 +0100
matrix-synapse-py3 (1.17.0) stable; urgency=medium
* New synapse release 1.17.0.
-- Synapse Packaging team <packages@matrix.org> Mon, 13 Jul 2020 10:20:31 +0100
matrix-synapse-py3 (1.16.1) stable; urgency=medium
* New synapse release 1.16.1.
-- Synapse Packaging team <packages@matrix.org> Fri, 10 Jul 2020 12:09:24 +0100
matrix-synapse-py3 (1.17.0rc1) stable; urgency=medium
* New synapse release 1.17.0rc1.
-- Synapse Packaging team <packages@matrix.org> Thu, 09 Jul 2020 16:53:12 +0100
matrix-synapse-py3 (1.16.0) stable; urgency=medium
* New synapse release 1.16.0.
-- Synapse Packaging team <packages@matrix.org> Wed, 08 Jul 2020 11:03:48 +0100
matrix-synapse-py3 (1.15.2) stable; urgency=medium
* New synapse release 1.15.2.
-- Synapse Packaging team <packages@matrix.org> Thu, 02 Jul 2020 10:34:00 -0400
matrix-synapse-py3 (1.15.1) stable; urgency=medium
* New synapse release 1.15.1.
-- Synapse Packaging team <packages@matrix.org> Tue, 16 Jun 2020 10:27:50 +0100
matrix-synapse-py3 (1.15.0) stable; urgency=medium
* New synapse release 1.15.0.
-- Synapse Packaging team <packages@matrix.org> Thu, 11 Jun 2020 13:27:06 +0100
matrix-synapse-py3 (1.14.0) stable; urgency=medium
* New synapse release 1.14.0.
-- Synapse Packaging team <packages@matrix.org> Thu, 28 May 2020 10:37:27 +0000
matrix-synapse-py3 (1.13.0) stable; urgency=medium
[ Patrick Cloke ]
* Add information about .well-known files to Debian installation scripts.
[ Synapse Packaging team ]
* New synapse release 1.13.0.
-- Synapse Packaging team <packages@matrix.org> Tue, 19 May 2020 09:16:56 -0400
matrix-synapse-py3 (1.12.4) stable; urgency=medium
* New synapse release 1.12.4.
-- Synapse Packaging team <packages@matrix.org> Thu, 23 Apr 2020 10:58:14 -0400
matrix-synapse-py3 (1.12.3) stable; urgency=medium
[ Richard van der Hoff ]
* Update the Debian build scripts to handle the new installation paths
for the support libraries introduced by Pillow 7.1.1.
[ Synapse Packaging team ]
* New synapse release 1.12.3.
-- Synapse Packaging team <packages@matrix.org> Fri, 03 Apr 2020 10:55:03 +0100
matrix-synapse-py3 (1.12.2) stable; urgency=medium
* New synapse release 1.12.2.
-- Synapse Packaging team <packages@matrix.org> Mon, 02 Apr 2020 19:02:17 +0000
matrix-synapse-py3 (1.12.1) stable; urgency=medium
* New synapse release 1.12.1.
-- Synapse Packaging team <packages@matrix.org> Mon, 02 Apr 2020 11:30:47 +0000
matrix-synapse-py3 (1.12.0) stable; urgency=medium
* New synapse release 1.12.0.
-- Synapse Packaging team <packages@matrix.org> Mon, 23 Mar 2020 12:13:03 +0000
matrix-synapse-py3 (1.11.1) stable; urgency=medium
* New synapse release 1.11.1.
-- Synapse Packaging team <packages@matrix.org> Tue, 03 Mar 2020 15:01:22 +0000
matrix-synapse-py3 (1.11.0) stable; urgency=medium
* New synapse release 1.11.0.
-- Synapse Packaging team <packages@matrix.org> Fri, 21 Feb 2020 08:54:34 +0000
matrix-synapse-py3 (1.10.1) stable; urgency=medium
* New synapse release 1.10.1.
-- Synapse Packaging team <packages@matrix.org> Mon, 17 Feb 2020 16:27:28 +0000
matrix-synapse-py3 (1.10.0) stable; urgency=medium
* New synapse release 1.10.0.
-- Synapse Packaging team <packages@matrix.org> Wed, 12 Feb 2020 12:18:54 +0000
matrix-synapse-py3 (1.9.1) stable; urgency=medium
* New synapse release 1.9.1.
-- Synapse Packaging team <packages@matrix.org> Tue, 28 Jan 2020 13:09:23 +0000
matrix-synapse-py3 (1.9.0) stable; urgency=medium
* New synapse release 1.9.0.
-- Synapse Packaging team <packages@matrix.org> Thu, 23 Jan 2020 12:56:31 +0000
matrix-synapse-py3 (1.8.0) stable; urgency=medium
[ Richard van der Hoff ]
* Automate generation of the default log configuration file.
[ Synapse Packaging team ]
* New synapse release 1.8.0.
-- Synapse Packaging team <packages@matrix.org> Thu, 09 Jan 2020 11:39:27 +0000
-- Richard van der Hoff <richard@matrix.org> Fri, 03 Jan 2020 13:55:38 +0000
matrix-synapse-py3 (1.7.3) stable; urgency=medium

View File

@@ -1,2 +1,2 @@
# Specify environment variables used when running Synapse
# SYNAPSE_CACHE_FACTOR=0.5 (default)
# SYNAPSE_CACHE_FACTOR=1 (default)

View File

@@ -1,14 +1,14 @@
# SOME DESCRIPTIVE TITLE.
# Copyright (C) YEAR THE PACKAGE'S COPYRIGHT HOLDER
# This file is distributed under the same license as the matrix-synapse-py3 package.
# This file is distributed under the same license as the matrix-synapse package.
# FIRST AUTHOR <EMAIL@ADDRESS>, YEAR.
#
#, fuzzy
msgid ""
msgstr ""
"Project-Id-Version: matrix-synapse-py3\n"
"Report-Msgid-Bugs-To: matrix-synapse-py3@packages.debian.org\n"
"POT-Creation-Date: 2020-04-06 16:39-0400\n"
"Project-Id-Version: matrix-synapse\n"
"Report-Msgid-Bugs-To: matrix-synapse@packages.debian.org\n"
"POT-Creation-Date: 2017-02-21 07:51+0000\n"
"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
"Language-Team: LANGUAGE <LL@li.org>\n"
@@ -28,10 +28,7 @@ msgstr ""
#: ../templates:1001
msgid ""
"The name that this homeserver will appear as, to clients and other servers "
"via federation. This is normally the public hostname of the server running "
"synapse, but can be different if you set up delegation. Please refer to the "
"delegation documentation in this case: https://github.com/matrix-org/synapse/"
"blob/master/docs/delegate.md."
"via federation. This name should match the SRV record published in DNS."
msgstr ""
#. Type: boolean

33
debian/rules vendored
View File

@@ -15,38 +15,17 @@ override_dh_installinit:
# we don't really want to strip the symbols from our object files.
override_dh_strip:
# dh_shlibdeps calls dpkg-shlibdeps, which finds all the binary files
# (executables and shared libs) in the package, and looks for the shared
# libraries that they depend on. It then adds a dependency on the package that
# contains that library to the package.
#
# We make two modifications to that process...
#
override_dh_shlibdeps:
# Firstly, postgres is not a hard dependency for us, so we want to make
# the things that psycopg2 depends on (such as libpq) be
# recommendations rather than hard dependencies. We do so by
# running dpkg-shlibdeps manually on psycopg2's libs.
#
# make the postgres package's dependencies a recommendation
# rather than a hard dependency.
find debian/$(PACKAGE_NAME)/ -path '*/site-packages/psycopg2/*.so' | \
xargs dpkg-shlibdeps -Tdebian/$(PACKAGE_NAME).substvars \
-pshlibs1 -dRecommends
# secondly, we exclude PIL's libraries from the process. They are known
# to be self-contained, but they have interdependencies and
# dpkg-shlibdeps doesn't know how to resolve them.
#
# As of Pillow 7.1.0, these libraries are in
# site-packages/Pillow.libs. Previously, they were in
# site-packages/PIL/.libs.
#
# (we also need to exclude psycopg2, of course, since we've already
# dealt with that.)
#
dh_shlibdeps \
-X site-packages/PIL/.libs \
-X site-packages/Pillow.libs \
-X site-packages/psycopg2
# all the other dependencies can be normal 'Depends' requirements,
# except for PIL's, which is self-contained and which confuses
# dpkg-shlibdeps.
dh_shlibdeps -X site-packages/PIL/.libs -X site-packages/psycopg2
override_dh_virtualenv:
./debian/build_virtualenv

27
debian/synctl.ronn vendored
View File

@@ -46,20 +46,19 @@ Configuration file may be generated as follows:
## ENVIRONMENT
* `SYNAPSE_CACHE_FACTOR`:
Synapse's architecture is quite RAM hungry currently - we deliberately
cache a lot of recent room data and metadata in RAM in order to speed up
common requests. We'll improve this in the future, but for now the easiest
way to either reduce the RAM usage (at the risk of slowing things down)
is to set the almost-undocumented ``SYNAPSE_CACHE_FACTOR`` environment
variable. The default is 0.5, which can be decreased to reduce RAM usage
in memory constrained enviroments, or increased if performance starts to
degrade.
However, degraded performance due to a low cache factor, common on
machines with slow disks, often leads to explosions in memory use due
backlogged requests. In this case, reducing the cache factor will make
things worse. Instead, try increasing it drastically. 2.0 is a good
starting value.
Synapse's architecture is quite RAM hungry currently - a lot of
recent room data and metadata is deliberately cached in RAM in
order to speed up common requests. This will be improved in
future, but for now the easiest way to either reduce the RAM usage
(at the risk of slowing things down) is to set the
SYNAPSE_CACHE_FACTOR environment variable. Roughly speaking, a
SYNAPSE_CACHE_FACTOR of 1.0 will max out at around 3-4GB of
resident memory - this is what we currently run the matrix.org
on. The default setting is currently 0.1, which is probably around
a ~700MB footprint. You can dial it down further to 0.02 if
desired, which targets roughly ~512MB. Conversely you can dial it
up if you need performance for lots of users and have a box with a
lot of RAM.
## COPYRIGHT

6
debian/templates vendored
View File

@@ -2,10 +2,8 @@ Template: matrix-synapse/server-name
Type: string
_Description: Name of the server:
The name that this homeserver will appear as, to clients and other
servers via federation. This is normally the public hostname of the
server running synapse, but can be different if you set up delegation.
Please refer to the delegation documentation in this case:
https://github.com/matrix-org/synapse/blob/master/docs/delegate.md.
servers via federation. This name should match the SRV record
published in DNS.
Template: matrix-synapse/report-stats
Type: boolean

View File

@@ -16,31 +16,34 @@ ARG PYTHON_VERSION=3.7
###
### Stage 0: builder
###
FROM docker.io/python:${PYTHON_VERSION}-slim as builder
FROM docker.io/python:${PYTHON_VERSION}-alpine3.10 as builder
# install the OS build deps
RUN apk add \
build-base \
libffi-dev \
libjpeg-turbo-dev \
libressl-dev \
libxslt-dev \
linux-headers \
postgresql-dev \
zlib-dev
RUN apt-get update && apt-get install -y \
build-essential \
libpq-dev \
&& rm -rf /var/lib/apt/lists/*
# build things which have slow build steps, before we copy synapse, so that
# the layer can be cached.
#
# (we really just care about caching a wheel here, as the "pip install" below
# will install them again.)
# Build dependencies that are not available as wheels, to speed up rebuilds
RUN pip install --prefix="/install" --no-warn-script-location \
frozendict \
jaeger-client \
opentracing \
prometheus-client \
psycopg2 \
pycparser \
pyrsistent \
pyyaml \
simplejson \
threadloop \
thrift
cryptography \
msgpack-python \
pillow \
pynacl
# now install synapse and all of the python deps to /install.
COPY synapse /synapse/synapse/
COPY scripts /synapse/scripts/
COPY MANIFEST.in README.rst setup.py synctl /synapse/
@@ -52,13 +55,19 @@ RUN pip install --prefix="/install" --no-warn-script-location \
### Stage 1: runtime
###
FROM docker.io/python:${PYTHON_VERSION}-slim
FROM docker.io/python:${PYTHON_VERSION}-alpine3.10
RUN apt-get update && apt-get install -y \
libpq5 \
xmlsec1 \
gosu \
&& rm -rf /var/lib/apt/lists/*
# xmlsec is required for saml support
RUN apk add --no-cache --virtual .runtime_deps \
libffi \
libjpeg-turbo \
libressl \
libxslt \
libpq \
zlib \
su-exec \
tzdata \
xmlsec
COPY --from=builder /install /usr/local
COPY ./docker/start.py /start.py

View File

@@ -27,18 +27,15 @@ RUN env DEBIAN_FRONTEND=noninteractive apt-get install \
wget
# fetch and unpack the package
RUN mkdir /dh-virtualenv
RUN wget -q -O /dh-virtualenv.tar.gz https://github.com/spotify/dh-virtualenv/archive/ac6e1b1.tar.gz
RUN tar -xv --strip-components=1 -C /dh-virtualenv -f /dh-virtualenv.tar.gz
RUN wget -q -O /dh-virtuenv-1.1.tar.gz https://github.com/spotify/dh-virtualenv/archive/1.1.tar.gz
RUN tar xvf /dh-virtuenv-1.1.tar.gz
# install its build deps. We do another apt-cache-update here, because we might
# be using a stale cache from docker build.
RUN apt-get update -qq -o Acquire::Languages=none \
&& cd /dh-virtualenv \
&& env DEBIAN_FRONTEND=noninteractive mk-build-deps -ri -t "apt-get -y --no-install-recommends"
# install its build deps
RUN cd dh-virtualenv-1.1/ \
&& env DEBIAN_FRONTEND=noninteractive mk-build-deps -ri -t "apt-get -yqq --no-install-recommends"
# build it
RUN cd /dh-virtualenv && dpkg-buildpackage -us -uc -b
RUN cd dh-virtualenv-1.1 && dpkg-buildpackage -us -uc -b
###
### Stage 1
@@ -71,12 +68,12 @@ RUN apt-get update -qq -o Acquire::Languages=none \
sqlite3 \
libpq-dev
COPY --from=builder /dh-virtualenv_1.2~dev-1_all.deb /
COPY --from=builder /dh-virtualenv_1.1-1_all.deb /
# install dhvirtualenv. Update the apt cache again first, in case we got a
# cached cache from docker the first time.
RUN apt-get update -qq -o Acquire::Languages=none \
&& apt-get install -yq /dh-virtualenv_1.2~dev-1_all.deb
&& apt-get install -yq /dh-virtualenv_1.1-1_all.deb
WORKDIR /synapse/source
ENTRYPOINT ["bash","/synapse/source/docker/build_debian.sh"]

View File

@@ -94,21 +94,6 @@ The following environment variables are supported in run mode:
* `UID`, `GID`: the user and group id to run Synapse as. Defaults to `991`, `991`.
* `TZ`: the [timezone](https://en.wikipedia.org/wiki/List_of_tz_database_time_zones) the container will run with. Defaults to `UTC`.
## Generating an (admin) user
After synapse is running, you may wish to create a user via `register_new_matrix_user`.
This requires a `registration_shared_secret` to be set in your config file. Synapse
must be restarted to pick up this change.
You can then call the script:
```
docker exec -it synapse register_new_matrix_user http://localhost:8008 -c /data/homeserver.yaml --help
```
Remember to remove the `registration_shared_secret` and restart if you no-longer need it.
## TLS support
The default configuration exposes a single HTTP port: http://localhost:8008. It
@@ -125,12 +110,12 @@ argument to `docker run`.
## Legacy dynamic configuration file support
The docker image used to support creating a dynamic configuration file based
on environment variables. This is no longer supported, and an error will be
raised if you try to run synapse without a config file.
For backwards-compatibility only, the docker image supports creating a dynamic
configuration file based on environment variables. This is now deprecated, but
is enabled when the `SYNAPSE_SERVER_NAME` variable is set (and `generate` is
not given).
It is, however, possible to generate a static configuration file based on
the environment variables that were previously used. To do this, run the docker
To migrate from a dynamic configuration file to a static one, run the docker
container once with the environment variables set, and `migrate_config`
command line option. For example:
@@ -142,20 +127,15 @@ docker run -it --rm \
matrixdotorg/synapse:latest migrate_config
```
This will generate the same configuration file as the legacy mode used, and
will store it in `/data/homeserver.yaml`. You can then use it as shown above at
[Running synapse](#running-synapse).
Note that the defaults used in this configuration file may be different to
those when generating a new config file with `generate`: for example, TLS is
enabled by default in this mode. You are encouraged to inspect the generated
configuration file and edit it to ensure it meets your needs.
This will generate the same configuration file as the legacy mode used, but
will store it in `/data/homeserver.yaml` instead of a temporary location. You
can then use it as shown above at [Running synapse](#running-synapse).
## Building the image
If you need to build the image from a Synapse checkout, use the following `docker
build` command from the repo's root:
```
docker build -t matrixdotorg/synapse -f docker/Dockerfile .
```

View File

@@ -4,10 +4,16 @@ formatters:
precise:
format: '%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s - %(message)s'
filters:
context:
(): synapse.logging.context.LoggingContextFilter
request: ""
handlers:
console:
class: logging.StreamHandler
formatter: precise
filters: [context]
loggers:
synapse.storage.SQL:

View File

@@ -120,7 +120,7 @@ def generate_config_from_template(config_dir, config_path, environ, ownership):
if ownership is not None:
subprocess.check_output(["chown", "-R", ownership, "/data"])
args = ["gosu", ownership] + args
args = ["su-exec", ownership] + args
subprocess.check_output(args)
@@ -172,8 +172,8 @@ def run_generate_config(environ, ownership):
# make sure that synapse has perms to write to the data dir.
subprocess.check_output(["chown", ownership, data_dir])
args = ["gosu", ownership] + args
os.execv("/usr/sbin/gosu", args)
args = ["su-exec", ownership] + args
os.execv("/sbin/su-exec", args)
else:
os.execv("/usr/local/bin/python", args)
@@ -188,8 +188,13 @@ def main(args, environ):
else:
ownership = "{}:{}".format(desired_uid, desired_gid)
log(
"Container running as UserID %s:%s, ENV (or defaults) requests %s:%s"
% (os.getuid(), os.getgid(), desired_uid, desired_gid)
)
if ownership is None:
log("Will not perform chmod/gosu as UserID already matches request")
log("Will not perform chmod/su-exec as UserID already matches request")
# In generate mode, generate a configuration and missing keys, then exit
if mode == "generate":
@@ -208,36 +213,44 @@ def main(args, environ):
if mode is not None:
error("Unknown execution mode '%s'" % (mode,))
config_dir = environ.get("SYNAPSE_CONFIG_DIR", "/data")
config_path = environ.get("SYNAPSE_CONFIG_PATH", config_dir + "/homeserver.yaml")
if not os.path.exists(config_path):
if "SYNAPSE_SERVER_NAME" in environ:
if "SYNAPSE_SERVER_NAME" in environ:
# backwards-compatibility generate-a-config-on-the-fly mode
if "SYNAPSE_CONFIG_PATH" in environ:
error(
"""\
Config file '%s' does not exist.
The synapse docker image no longer supports generating a config file on-the-fly
based on environment variables. You can migrate to a static config file by
running with 'migrate_config'. See the README for more details.
"""
% (config_path,)
"SYNAPSE_SERVER_NAME can only be combined with SYNAPSE_CONFIG_PATH "
"in `generate` or `migrate_config` mode. To start synapse using a "
"config file, unset the SYNAPSE_SERVER_NAME environment variable."
)
error(
"Config file '%s' does not exist. You should either create a new "
"config file by running with the `generate` argument (and then edit "
"the resulting file before restarting) or specify the path to an "
"existing config file with the SYNAPSE_CONFIG_PATH variable."
config_path = "/compiled/homeserver.yaml"
log(
"Generating config file '%s' on-the-fly from environment variables.\n"
"Note that this mode is deprecated. You can migrate to a static config\n"
"file by running with 'migrate_config'. See the README for more details."
% (config_path,)
)
generate_config_from_template("/compiled", config_path, environ, ownership)
else:
config_dir = environ.get("SYNAPSE_CONFIG_DIR", "/data")
config_path = environ.get(
"SYNAPSE_CONFIG_PATH", config_dir + "/homeserver.yaml"
)
if not os.path.exists(config_path):
error(
"Config file '%s' does not exist. You should either create a new "
"config file by running with the `generate` argument (and then edit "
"the resulting file before restarting) or specify the path to an "
"existing config file with the SYNAPSE_CONFIG_PATH variable."
% (config_path,)
)
log("Starting synapse with config file " + config_path)
args = ["python", "-m", synapse_worker, "--config-path", config_path]
if ownership is not None:
args = ["gosu", ownership] + args
os.execv("/usr/sbin/gosu", args)
args = ["su-exec", ownership] + args
os.execv("/sbin/su-exec", args)
else:
os.execv("/usr/local/bin/python", args)

View File

@@ -1,4 +1,4 @@
# This file is maintained as an up-to-date snapshot of the default
# The config is maintained as an up-to-date snapshot of the default
# homeserver.yaml configuration generated by Synapse.
#
# It is intended to act as a reference for the default configuration,
@@ -10,16 +10,3 @@
# homeserver.yaml. Instead, if you are starting from scratch, please generate
# a fresh config using Synapse by following the instructions in INSTALL.md.
# Configuration options that take a time period can be set using a number
# followed by a letter. Letters have the following meanings:
# s = second
# m = minute
# h = hour
# d = day
# w = week
# y = year
# For example, setting redaction_retention_period: 5m would remove redacted
# messages from the database after 5 minutes, rather than 5 months.
################################################################################

View File

@@ -1,49 +1,12 @@
# ACME
From version 1.0 (June 2019) onwards, Synapse requires valid TLS
certificates for communication between servers (by default on port
`8448`) in addition to those that are client-facing (port `443`). To
help homeserver admins fulfil this new requirement, Synapse v0.99.0
introduced support for automatically provisioning certificates through
[Let's Encrypt](https://letsencrypt.org/) using the ACME protocol.
## Deprecation of ACME v1
In [March 2019](https://community.letsencrypt.org/t/end-of-life-plan-for-acmev1/88430),
Let's Encrypt announced that they were deprecating version 1 of the ACME
protocol, with the plan to disable the use of it for new accounts in
November 2019, for new domains in June 2020, and for existing accounts and
domains in June 2021.
Synapse doesn't currently support version 2 of the ACME protocol, which
means that:
* for existing installs, Synapse's built-in ACME support will continue
to work until June 2021.
* for new installs, this feature will not work at all.
Either way, it is recommended to move from Synapse's ACME support
feature to an external automated tool such as [certbot](https://github.com/certbot/certbot)
(or browse [this list](https://letsencrypt.org/fr/docs/client-options/)
for an alternative ACME client).
It's also recommended to use a reverse proxy for the server-facing
communications (more documentation about this can be found
[here](/docs/reverse_proxy.md)) as well as the client-facing ones and
have it serve the certificates.
In case you can't do that and need Synapse to serve them itself, make
sure to set the `tls_certificate_path` configuration setting to the path
of the certificate (make sure to use the certificate containing the full
certification chain, e.g. `fullchain.pem` if using certbot) and
`tls_private_key_path` to the path of the matching private key. Note
that in this case you will need to restart Synapse after each
certificate renewal so that Synapse stops using the old certificate.
If you still want to use Synapse's built-in ACME support, the rest of
this document explains how to set it up.
## Initial setup
Synapse v1.0 will require valid TLS certificates for communication between
servers (port `8448` by default) in addition to those that are client-facing
(port `443`). If you do not already have a valid certificate for your domain,
the easiest way to get one is with Synapse's new ACME support, which will use
the ACME protocol to provision a certificate automatically. Synapse v0.99.0+
will provision server-to-server certificates automatically for you for free
through [Let's Encrypt](https://letsencrypt.org/) if you tell it to.
In the case that your `server_name` config variable is the same as
the hostname that the client connects to, then the same certificate can be
@@ -69,6 +32,11 @@ If you already have certificates, you will need to back up or delete them
(files `example.com.tls.crt` and `example.com.tls.key` in Synapse's root
directory), Synapse's ACME implementation will not overwrite them.
You may wish to use alternate methods such as Certbot to obtain a certificate
from Let's Encrypt, depending on your server configuration. Of course, if you
already have a valid certificate for your homeserver's domain, that can be
placed in Synapse's config directory without the need for any ACME setup.
## ACME setup
The main steps for enabling ACME support in short summary are:

View File

@@ -4,21 +4,17 @@ Admin APIs
This directory includes documentation for the various synapse specific admin
APIs available.
Authenticating as a server admin
--------------------------------
Only users that are server admins can use these APIs. A user can be marked as a
server admin by updating the database directly, e.g.:
Many of the API calls in the admin api will require an `access_token` for a
server admin. (Note that a server admin is distinct from a room admin.)
``UPDATE users SET admin = 1 WHERE name = '@foo:bar.com'``
A user can be marked as a server admin by updating the database directly, e.g.:
Restarting may be required for the changes to register.
.. code-block:: sql
UPDATE users SET admin = 1 WHERE name = '@foo:bar.com';
A new server admin user can also be created using the
``register_new_matrix_user`` script.
Using an admin access_token
###########################
Many of the API calls listed in the documentation here will require to include an admin `access_token`.
Finding your user's `access_token` is client-dependent, but will usually be shown in the client's settings.
Once you have your `access_token`, to include it in a request, the best option is to add the token to a request header:

View File

@@ -4,11 +4,11 @@ This API lets a server admin delete a local group. Doing so will kick all
users out of the group so that their clients will correctly handle the group
being deleted.
The API is:
```
POST /_synapse/admin/v1/delete_group/<group_id>
```
To use it, you will need to authenticate by providing an `access_token` for a
server admin: see [README.rst](README.rst).
including an `access_token` of a server admin.

View File

@@ -6,10 +6,9 @@ The API is:
```
GET /_synapse/admin/v1/room/<room_id>/media
```
To use it, you will need to authenticate by providing an `access_token` for a
server admin: see [README.rst](README.rst).
including an `access_token` of a server admin.
The API returns a JSON body like the following:
It returns a JSON body like the following:
```
{
"local": [
@@ -23,80 +22,19 @@ The API returns a JSON body like the following:
}
```
# Quarantine media
# Quarantine media in a room
This API 'quarantines' all the media in a room.
The API is:
```
POST /_synapse/admin/v1/quarantine_media/<room_id>
{}
```
Quarantining media means that it is marked as inaccessible by users. It applies
to any local media, and any locally-cached copies of remote media.
The media file itself (and any thumbnails) is not deleted from the server.
## Quarantining media by ID
This API quarantines a single piece of local or remote media.
Request:
```
POST /_synapse/admin/v1/media/quarantine/<server_name>/<media_id>
{}
```
Where `server_name` is in the form of `example.org`, and `media_id` is in the
form of `abcdefg12345...`.
Response:
```
{}
```
## Quarantining media in a room
This API quarantines all local and remote media in a room.
Request:
```
POST /_synapse/admin/v1/room/<room_id>/media/quarantine
{}
```
Where `room_id` is in the form of `!roomid12345:example.org`.
Response:
```
{
"num_quarantined": 10 # The number of media items successfully quarantined
}
```
Note that there is a legacy endpoint, `POST
/_synapse/admin/v1/quarantine_media/<room_id >`, that operates the same.
However, it is deprecated and may be removed in a future release.
## Quarantining all media of a user
This API quarantines all *local* media that a *local* user has uploaded. That is to say, if
you would like to quarantine media uploaded by a user on a remote homeserver, you should
instead use one of the other APIs.
Request:
```
POST /_synapse/admin/v1/user/<user_id>/media/quarantine
{}
```
Where `user_id` is in the form of `@bob:example.org`.
Response:
```
{
"num_quarantined": 10 # The number of media items successfully quarantined
}
```

View File

@@ -8,15 +8,11 @@ Depending on the amount of history being purged a call to the API may take
several minutes or longer. During this period users will not be able to
paginate further back in the room from the point being purged from.
Note that Synapse requires at least one message in each room, so it will never
delete the last message in a room.
The API is:
``POST /_synapse/admin/v1/purge_history/<room_id>[/<event_id>]``
To use it, you will need to authenticate by providing an ``access_token`` for a
server admin: see `README.rst <README.rst>`_.
including an ``access_token`` of a server admin.
By default, events sent by local users are not deleted, as they may represent
the only copies of this content in existence. (Events sent by remote users are
@@ -55,10 +51,8 @@ It is possible to poll for updates on recent purges with a second API;
``GET /_synapse/admin/v1/purge_history_status/<purge_id>``
Again, you will need to authenticate by providing an ``access_token`` for a
server admin.
This API returns a JSON body like the following:
(again, with a suitable ``access_token``). This API returns a JSON body like
the following:
.. code:: json

View File

@@ -6,15 +6,12 @@ media.
The API is::
POST /_synapse/admin/v1/purge_media_cache?before_ts=<unix_timestamp_in_ms>
POST /_synapse/admin/v1/purge_media_cache?before_ts=<unix_timestamp_in_ms>&access_token=<access_token>
{}
\... which will remove all cached media that was last accessed before
Which will remove all cached media that was last accessed before
``<unix_timestamp_in_ms>``.
To use it, you will need to authenticate by providing an ``access_token`` for a
server admin: see `README.rst <README.rst>`_.
If the user re-requests purged remote media, synapse will re-request the media
from the originating server.

View File

@@ -5,8 +5,6 @@ This API will remove all trace of a room from your database.
All local users must have left the room before it can be removed.
See also: [Delete Room API](rooms.md#delete-room-api)
The API is:
```

View File

@@ -1,35 +0,0 @@
# Edit Room Membership API
This API allows an administrator to join an user account with a given `user_id`
to a room with a given `room_id_or_alias`. You can only modify the membership of
local users. The server administrator must be in the room and have permission to
invite users.
## Parameters
The following parameters are available:
* `user_id` - Fully qualified user: for example, `@user:server.com`.
* `room_id_or_alias` - The room identifier or alias to join: for example,
`!636q39766251:server.com`.
## Usage
```
POST /_synapse/admin/v1/join/<room_id_or_alias>
{
"user_id": "@user:server.com"
}
```
To use it, you will need to authenticate by providing an `access_token` for a
server admin: see [README.rst](README.rst).
Response:
```
{
"room_id": "!636q39766251:server.com"
}
```

View File

@@ -1,451 +0,0 @@
# List Room API
The List Room admin API allows server admins to get a list of rooms on their
server. There are various parameters available that allow for filtering and
sorting the returned list. This API supports pagination.
## Parameters
The following query parameters are available:
* `from` - Offset in the returned list. Defaults to `0`.
* `limit` - Maximum amount of rooms to return. Defaults to `100`.
* `order_by` - The method in which to sort the returned list of rooms. Valid values are:
- `alphabetical` - Same as `name`. This is deprecated.
- `size` - Same as `joined_members`. This is deprecated.
- `name` - Rooms are ordered alphabetically by room name. This is the default.
- `canonical_alias` - Rooms are ordered alphabetically by main alias address of the room.
- `joined_members` - Rooms are ordered by the number of members. Largest to smallest.
- `joined_local_members` - Rooms are ordered by the number of local members. Largest to smallest.
- `version` - Rooms are ordered by room version. Largest to smallest.
- `creator` - Rooms are ordered alphabetically by creator of the room.
- `encryption` - Rooms are ordered alphabetically by the end-to-end encryption algorithm.
- `federatable` - Rooms are ordered by whether the room is federatable.
- `public` - Rooms are ordered by visibility in room list.
- `join_rules` - Rooms are ordered alphabetically by join rules of the room.
- `guest_access` - Rooms are ordered alphabetically by guest access option of the room.
- `history_visibility` - Rooms are ordered alphabetically by visibility of history of the room.
- `state_events` - Rooms are ordered by number of state events. Largest to smallest.
* `dir` - Direction of room order. Either `f` for forwards or `b` for backwards. Setting
this value to `b` will reverse the above sort order. Defaults to `f`.
* `search_term` - Filter rooms by their room name. Search term can be contained in any
part of the room name. Defaults to no filtering.
The following fields are possible in the JSON response body:
* `rooms` - An array of objects, each containing information about a room.
- Room objects contain the following fields:
- `room_id` - The ID of the room.
- `name` - The name of the room.
- `canonical_alias` - The canonical (main) alias address of the room.
- `joined_members` - How many users are currently in the room.
- `joined_local_members` - How many local users are currently in the room.
- `version` - The version of the room as a string.
- `creator` - The `user_id` of the room creator.
- `encryption` - Algorithm of end-to-end encryption of messages. Is `null` if encryption is not active.
- `federatable` - Whether users on other servers can join this room.
- `public` - Whether the room is visible in room directory.
- `join_rules` - The type of rules used for users wishing to join this room. One of: ["public", "knock", "invite", "private"].
- `guest_access` - Whether guests can join the room. One of: ["can_join", "forbidden"].
- `history_visibility` - Who can see the room history. One of: ["invited", "joined", "shared", "world_readable"].
- `state_events` - Total number of state_events of a room. Complexity of the room.
* `offset` - The current pagination offset in rooms. This parameter should be
used instead of `next_token` for room offset as `next_token` is
not intended to be parsed.
* `total_rooms` - The total number of rooms this query can return. Using this
and `offset`, you have enough information to know the current
progression through the list.
* `next_batch` - If this field is present, we know that there are potentially
more rooms on the server that did not all fit into this response.
We can use `next_batch` to get the "next page" of results. To do
so, simply repeat your request, setting the `from` parameter to
the value of `next_batch`.
* `prev_batch` - If this field is present, it is possible to paginate backwards.
Use `prev_batch` for the `from` value in the next request to
get the "previous page" of results.
## Usage
A standard request with no filtering:
```
GET /_synapse/admin/v1/rooms
{}
```
Response:
```
{
"rooms": [
{
"room_id": "!OGEhHVWSdvArJzumhm:matrix.org",
"name": "Matrix HQ",
"canonical_alias": "#matrix:matrix.org",
"joined_members": 8326,
"joined_local_members": 2,
"version": "1",
"creator": "@foo:matrix.org",
"encryption": null,
"federatable": true,
"public": true,
"join_rules": "invite",
"guest_access": null,
"history_visibility": "shared",
"state_events": 93534
},
... (8 hidden items) ...
{
"room_id": "!xYvNcQPhnkrdUmYczI:matrix.org",
"name": "This Week In Matrix (TWIM)",
"canonical_alias": "#twim:matrix.org",
"joined_members": 314,
"joined_local_members": 20,
"version": "4",
"creator": "@foo:matrix.org",
"encryption": "m.megolm.v1.aes-sha2",
"federatable": true,
"public": false,
"join_rules": "invite",
"guest_access": null,
"history_visibility": "shared",
"state_events": 8345
}
],
"offset": 0,
"total_rooms": 10
}
```
Filtering by room name:
```
GET /_synapse/admin/v1/rooms?search_term=TWIM
{}
```
Response:
```
{
"rooms": [
{
"room_id": "!xYvNcQPhnkrdUmYczI:matrix.org",
"name": "This Week In Matrix (TWIM)",
"canonical_alias": "#twim:matrix.org",
"joined_members": 314,
"joined_local_members": 20,
"version": "4",
"creator": "@foo:matrix.org",
"encryption": "m.megolm.v1.aes-sha2",
"federatable": true,
"public": false,
"join_rules": "invite",
"guest_access": null,
"history_visibility": "shared",
"state_events": 8
}
],
"offset": 0,
"total_rooms": 1
}
```
Paginating through a list of rooms:
```
GET /_synapse/admin/v1/rooms?order_by=size
{}
```
Response:
```
{
"rooms": [
{
"room_id": "!OGEhHVWSdvArJzumhm:matrix.org",
"name": "Matrix HQ",
"canonical_alias": "#matrix:matrix.org",
"joined_members": 8326,
"joined_local_members": 2,
"version": "1",
"creator": "@foo:matrix.org",
"encryption": null,
"federatable": true,
"public": true,
"join_rules": "invite",
"guest_access": null,
"history_visibility": "shared",
"state_events": 93534
},
... (98 hidden items) ...
{
"room_id": "!xYvNcQPhnkrdUmYczI:matrix.org",
"name": "This Week In Matrix (TWIM)",
"canonical_alias": "#twim:matrix.org",
"joined_members": 314,
"joined_local_members": 20,
"version": "4",
"creator": "@foo:matrix.org",
"encryption": "m.megolm.v1.aes-sha2",
"federatable": true,
"public": false,
"join_rules": "invite",
"guest_access": null,
"history_visibility": "shared",
"state_events": 8345
}
],
"offset": 0,
"total_rooms": 150
"next_token": 100
}
```
The presence of the `next_token` parameter tells us that there are more rooms
than returned in this request, and we need to make another request to get them.
To get the next batch of room results, we repeat our request, setting the `from`
parameter to the value of `next_token`.
```
GET /_synapse/admin/v1/rooms?order_by=size&from=100
{}
```
Response:
```
{
"rooms": [
{
"room_id": "!mscvqgqpHYjBGDxNym:matrix.org",
"name": "Music Theory",
"canonical_alias": "#musictheory:matrix.org",
"joined_members": 127
"joined_local_members": 2,
"version": "1",
"creator": "@foo:matrix.org",
"encryption": null,
"federatable": true,
"public": true,
"join_rules": "invite",
"guest_access": null,
"history_visibility": "shared",
"state_events": 93534
},
... (48 hidden items) ...
{
"room_id": "!twcBhHVdZlQWuuxBhN:termina.org.uk",
"name": "weechat-matrix",
"canonical_alias": "#weechat-matrix:termina.org.uk",
"joined_members": 137
"joined_local_members": 20,
"version": "4",
"creator": "@foo:termina.org.uk",
"encryption": null,
"federatable": true,
"public": true,
"join_rules": "invite",
"guest_access": null,
"history_visibility": "shared",
"state_events": 8345
}
],
"offset": 100,
"prev_batch": 0,
"total_rooms": 150
}
```
Once the `next_token` parameter is no longer present, we know we've reached the
end of the list.
# DRAFT: Room Details API
The Room Details admin API allows server admins to get all details of a room.
This API is still a draft and details might change!
The following fields are possible in the JSON response body:
* `room_id` - The ID of the room.
* `name` - The name of the room.
* `canonical_alias` - The canonical (main) alias address of the room.
* `joined_members` - How many users are currently in the room.
* `joined_local_members` - How many local users are currently in the room.
* `version` - The version of the room as a string.
* `creator` - The `user_id` of the room creator.
* `encryption` - Algorithm of end-to-end encryption of messages. Is `null` if encryption is not active.
* `federatable` - Whether users on other servers can join this room.
* `public` - Whether the room is visible in room directory.
* `join_rules` - The type of rules used for users wishing to join this room. One of: ["public", "knock", "invite", "private"].
* `guest_access` - Whether guests can join the room. One of: ["can_join", "forbidden"].
* `history_visibility` - Who can see the room history. One of: ["invited", "joined", "shared", "world_readable"].
* `state_events` - Total number of state_events of a room. Complexity of the room.
## Usage
A standard request:
```
GET /_synapse/admin/v1/rooms/<room_id>
{}
```
Response:
```
{
"room_id": "!mscvqgqpHYjBGDxNym:matrix.org",
"name": "Music Theory",
"canonical_alias": "#musictheory:matrix.org",
"joined_members": 127
"joined_local_members": 2,
"version": "1",
"creator": "@foo:matrix.org",
"encryption": null,
"federatable": true,
"public": true,
"join_rules": "invite",
"guest_access": null,
"history_visibility": "shared",
"state_events": 93534
}
```
# Room Members API
The Room Members admin API allows server admins to get a list of all members of a room.
The response includes the following fields:
* `members` - A list of all the members that are present in the room, represented by their ids.
* `total` - Total number of members in the room.
## Usage
A standard request:
```
GET /_synapse/admin/v1/rooms/<room_id>/members
{}
```
Response:
```
{
"members": [
"@foo:matrix.org",
"@bar:matrix.org",
"@foobar:matrix.org
],
"total": 3
}
```
# Delete Room API
The Delete Room admin API allows server admins to remove rooms from server
and block these rooms.
It is a combination and improvement of "[Shutdown room](shutdown_room.md)"
and "[Purge room](purge_room.md)" API.
Shuts down a room. Moves all local users and room aliases automatically to a
new room if `new_room_user_id` is set. Otherwise local users only
leave the room without any information.
The new room will be created with the user specified by the `new_room_user_id` parameter
as room administrator and will contain a message explaining what happened. Users invited
to the new room will have power level `-10` by default, and thus be unable to speak.
If `block` is `True` it prevents new joins to the old room.
This API will remove all trace of the old room from your database after removing
all local users. If `purge` is `true` (the default), all traces of the old room will
be removed from your database after removing all local users. If you do not want
this to happen, set `purge` to `false`.
Depending on the amount of history being purged a call to the API may take
several minutes or longer.
The local server will only have the power to move local user and room aliases to
the new room. Users on other servers will be unaffected.
The API is:
```json
POST /_synapse/admin/v1/rooms/<room_id>/delete
```
with a body of:
```json
{
"new_room_user_id": "@someuser:example.com",
"room_name": "Content Violation Notification",
"message": "Bad Room has been shutdown due to content violations on this server. Please review our Terms of Service.",
"block": true,
"purge": true
}
```
To use it, you will need to authenticate by providing an ``access_token`` for a
server admin: see [README.rst](README.rst).
A response body like the following is returned:
```json
{
"kicked_users": [
"@foobar:example.com"
],
"failed_to_kick_users": [],
"local_aliases": [
"#badroom:example.com",
"#evilsaloon:example.com"
],
"new_room_id": "!newroomid:example.com"
}
```
## Parameters
The following parameters should be set in the URL:
* `room_id` - The ID of the room.
The following JSON body parameters are available:
* `new_room_user_id` - Optional. If set, a new room will be created with this user ID
as the creator and admin, and all users in the old room will be moved into that
room. If not set, no new room will be created and the users will just be removed
from the old room. The user ID must be on the local server, but does not necessarily
have to belong to a registered user.
* `room_name` - Optional. A string representing the name of the room that new users will be
invited to. Defaults to `Content Violation Notification`
* `message` - Optional. A string containing the first message that will be sent as
`new_room_user_id` in the new room. Ideally this will clearly convey why the
original room was shut down. Defaults to `Sharing illegal content on this server
is not permitted and rooms in violation will be blocked.`
* `block` - Optional. If set to `true`, this room will be added to a blocking list, preventing
future attempts to join the room. Defaults to `false`.
* `purge` - Optional. If set to `true`, it will remove all traces of the room from your database.
Defaults to `true`.
The JSON body must not be empty. The body must be at least `{}`.
## Response
The following fields are returned in the JSON response body:
* `kicked_users` - An array of users (`user_id`) that were kicked.
* `failed_to_kick_users` - An array of users (`user_id`) that that were not kicked.
* `local_aliases` - An array of strings representing the local aliases that were migrated from
the old room to the new.
* `new_room_id` - A string representing the room ID of the new room.

View File

@@ -10,8 +10,6 @@ disallow any further invites or joins.
The local server will only have the power to move local user and room aliases to
the new room. Users on other servers will be unaffected.
See also: [Delete Room API](rooms.md#delete-room-api)
## API
You will need to authenticate with an access token for an admin user.
@@ -33,7 +31,7 @@ You will need to authenticate with an access token for an admin user.
* `message` - Optional. A string containing the first message that will be sent as
`new_room_user_id` in the new room. Ideally this will clearly convey why the
original room was shut down.
If not specified, the default value of `room_name` is "Content Violation
Notification". The default value of `message` is "Sharing illegal content on
othis server is not permitted and rooms in violation will be blocked."
@@ -72,30 +70,3 @@ Response:
"new_room_id": "!newroomid:example.com",
},
```
## Undoing room shutdowns
*Note*: This guide may be outdated by the time you read it. By nature of room shutdowns being performed at the database level,
the structure can and does change without notice.
First, it's important to understand that a room shutdown is very destructive. Undoing a shutdown is not as simple as pretending it
never happened - work has to be done to move forward instead of resetting the past. In fact, in some cases it might not be possible
to recover at all:
* If the room was invite-only, your users will need to be re-invited.
* If the room no longer has any members at all, it'll be impossible to rejoin.
* The first user to rejoin will have to do so via an alias on a different server.
With all that being said, if you still want to try and recover the room:
1. For safety reasons, shut down Synapse.
2. In the database, run `DELETE FROM blocked_rooms WHERE room_id = '!example:example.org';`
* For caution: it's recommended to run this in a transaction: `BEGIN; DELETE ...;`, verify you got 1 result, then `COMMIT;`.
* The room ID is the same one supplied to the shutdown room API, not the Content Violation room.
3. Restart Synapse.
You will have to manually handle, if you so choose, the following:
* Aliases that would have been redirected to the Content Violation room.
* Users that would have been booted from the room (and will have been force-joined to the Content Violation room).
* Removal of the Content Violation room if desired.

View File

@@ -1,104 +1,3 @@
.. contents::
Query User Account
==================
This API returns information about a specific user account.
The api is::
GET /_synapse/admin/v2/users/<user_id>
To use it, you will need to authenticate by providing an ``access_token`` for a
server admin: see `README.rst <README.rst>`_.
It returns a JSON body like the following:
.. code:: json
{
"displayname": "User",
"threepids": [
{
"medium": "email",
"address": "<user_mail_1>"
},
{
"medium": "email",
"address": "<user_mail_2>"
}
],
"avatar_url": "<avatar_url>",
"admin": false,
"deactivated": false
}
URL parameters:
- ``user_id``: fully-qualified user id: for example, ``@user:server.com``.
Create or modify Account
========================
This API allows an administrator to create or modify a user account with a
specific ``user_id``.
This api is::
PUT /_synapse/admin/v2/users/<user_id>
with a body of:
.. code:: json
{
"password": "user_password",
"displayname": "User",
"threepids": [
{
"medium": "email",
"address": "<user_mail_1>"
},
{
"medium": "email",
"address": "<user_mail_2>"
}
],
"avatar_url": "<avatar_url>",
"admin": false,
"deactivated": false
}
To use it, you will need to authenticate by providing an ``access_token`` for a
server admin: see `README.rst <README.rst>`_.
URL parameters:
- ``user_id``: fully-qualified user id: for example, ``@user:server.com``.
Body parameters:
- ``password``, optional. If provided, the user's password is updated and all
devices are logged out.
- ``displayname``, optional, defaults to the value of ``user_id``.
- ``threepids``, optional, allows setting the third-party IDs (email, msisdn)
belonging to a user.
- ``avatar_url``, optional, must be a
`MXC URI <https://matrix.org/docs/spec/client_server/r0.6.0#matrix-content-mxc-uris>`_.
- ``admin``, optional, defaults to ``false``.
- ``deactivated``, optional. If unspecified, deactivation state will be left
unchanged on existing accounts and set to ``false`` for new accounts.
If the user already exists then optional parameters default to the current value.
In order to re-activate an account ``deactivated`` must be set to ``false``. If
users do not login via single-sign-on, a new ``password`` must be provided.
List Accounts
=============
@@ -108,27 +7,17 @@ The api is::
GET /_synapse/admin/v2/users?from=0&limit=10&guests=false
To use it, you will need to authenticate by providing an `access_token` for a
server admin: see `README.rst <README.rst>`_.
The parameter ``from`` is optional but used for pagination, denoting the
offset in the returned results. This should be treated as an opaque value and
not explicitly set to anything other than the return value of ``next_token``
from a previous call.
The parameter ``limit`` is optional but is used for pagination, denoting the
maximum number of items to return in this call. Defaults to ``100``.
The parameter ``user_id`` is optional and filters to only users with user IDs
that contain this value.
The parameter ``guests`` is optional and if ``false`` will **exclude** guest users.
Defaults to ``true`` to include guest users.
The parameter ``deactivated`` is optional and if ``true`` will **include** deactivated users.
Defaults to ``false`` to exclude deactivated users.
A JSON body is returned with the following shape:
including an ``access_token`` of a server admin.
The parameters ``from`` and ``limit`` are required only for pagination.
By default, a ``limit`` of 100 is used.
The parameter ``user_id`` can be used to select only users with user ids that
contain this value.
The parameter ``guests=false`` can be used to exclude guest users,
default is to include guest users.
The parameter ``deactivated=true`` can be used to include deactivated users,
default is to exclude deactivated users.
If the endpoint does not return a ``next_token`` then there are no more users left.
It returns a JSON body like the following:
.. code:: json
@@ -140,41 +29,30 @@ A JSON body is returned with the following shape:
"is_guest": 0,
"admin": 0,
"user_type": null,
"deactivated": 0,
"displayname": "<User One>",
"avatar_url": null
"deactivated": 0
}, {
"name": "<user_id2>",
"password_hash": "<password_hash2>",
"is_guest": 0,
"admin": 1,
"user_type": null,
"deactivated": 0,
"displayname": "<User Two>",
"avatar_url": "<avatar_url>"
"deactivated": 0
}
],
"next_token": "100",
"total": 200
"next_token": "100"
}
To paginate, check for ``next_token`` and if present, call the endpoint again
with ``from`` set to the value of ``next_token``. This will return a new page.
If the endpoint does not return a ``next_token`` then there are no more users
to paginate through.
Query Account
=============
Query current sessions for a user
=================================
This API returns information about the active sessions for a specific user.
This API returns information about a specific user account.
The api is::
GET /_synapse/admin/v1/whois/<user_id>
To use it, you will need to authenticate by providing an ``access_token`` for a
server admin: see `README.rst <README.rst>`_.
including an ``access_token`` of a server admin.
It returns a JSON body like the following:
@@ -227,10 +105,9 @@ with a body of:
"erase": true
}
To use it, you will need to authenticate by providing an ``access_token`` for a
server admin: see `README.rst <README.rst>`_.
including an ``access_token`` of a server admin.
The erase parameter is optional and defaults to ``false``.
The erase parameter is optional and defaults to 'false'.
An empty body may be passed for backwards compatibility.
@@ -248,15 +125,11 @@ with a body of:
.. code:: json
{
"new_password": "<secret>",
"logout_devices": true,
"new_password": "<secret>"
}
To use it, you will need to authenticate by providing an ``access_token`` for a
server admin: see `README.rst <README.rst>`_.
including an ``access_token`` of a server admin.
The parameter ``new_password`` is required.
The parameter ``logout_devices`` is optional and defaults to ``true``.
Get whether a user is a server administrator or not
===================================================
@@ -266,8 +139,7 @@ The api is::
GET /_synapse/admin/v1/users/<user_id>/admin
To use it, you will need to authenticate by providing an ``access_token`` for a
server admin: see `README.rst <README.rst>`_.
including an ``access_token`` of a server admin.
A response body like the following is returned:
@@ -295,191 +167,4 @@ with a body of:
"admin": true
}
To use it, you will need to authenticate by providing an ``access_token`` for a
server admin: see `README.rst <README.rst>`_.
User devices
============
List all devices
----------------
Gets information about all devices for a specific ``user_id``.
The API is::
GET /_synapse/admin/v2/users/<user_id>/devices
To use it, you will need to authenticate by providing an ``access_token`` for a
server admin: see `README.rst <README.rst>`_.
A response body like the following is returned:
.. code:: json
{
"devices": [
{
"device_id": "QBUAZIFURK",
"display_name": "android",
"last_seen_ip": "1.2.3.4",
"last_seen_ts": 1474491775024,
"user_id": "<user_id>"
},
{
"device_id": "AUIECTSRND",
"display_name": "ios",
"last_seen_ip": "1.2.3.5",
"last_seen_ts": 1474491775025,
"user_id": "<user_id>"
}
]
}
**Parameters**
The following parameters should be set in the URL:
- ``user_id`` - fully qualified: for example, ``@user:server.com``.
**Response**
The following fields are returned in the JSON response body:
- ``devices`` - An array of objects, each containing information about a device.
Device objects contain the following fields:
- ``device_id`` - Identifier of device.
- ``display_name`` - Display name set by the user for this device.
Absent if no name has been set.
- ``last_seen_ip`` - The IP address where this device was last seen.
(May be a few minutes out of date, for efficiency reasons).
- ``last_seen_ts`` - The timestamp (in milliseconds since the unix epoch) when this
devices was last seen. (May be a few minutes out of date, for efficiency reasons).
- ``user_id`` - Owner of device.
Delete multiple devices
------------------
Deletes the given devices for a specific ``user_id``, and invalidates
any access token associated with them.
The API is::
POST /_synapse/admin/v2/users/<user_id>/delete_devices
{
"devices": [
"QBUAZIFURK",
"AUIECTSRND"
],
}
To use it, you will need to authenticate by providing an ``access_token`` for a
server admin: see `README.rst <README.rst>`_.
An empty JSON dict is returned.
**Parameters**
The following parameters should be set in the URL:
- ``user_id`` - fully qualified: for example, ``@user:server.com``.
The following fields are required in the JSON request body:
- ``devices`` - The list of device IDs to delete.
Show a device
---------------
Gets information on a single device, by ``device_id`` for a specific ``user_id``.
The API is::
GET /_synapse/admin/v2/users/<user_id>/devices/<device_id>
To use it, you will need to authenticate by providing an ``access_token`` for a
server admin: see `README.rst <README.rst>`_.
A response body like the following is returned:
.. code:: json
{
"device_id": "<device_id>",
"display_name": "android",
"last_seen_ip": "1.2.3.4",
"last_seen_ts": 1474491775024,
"user_id": "<user_id>"
}
**Parameters**
The following parameters should be set in the URL:
- ``user_id`` - fully qualified: for example, ``@user:server.com``.
- ``device_id`` - The device to retrieve.
**Response**
The following fields are returned in the JSON response body:
- ``device_id`` - Identifier of device.
- ``display_name`` - Display name set by the user for this device.
Absent if no name has been set.
- ``last_seen_ip`` - The IP address where this device was last seen.
(May be a few minutes out of date, for efficiency reasons).
- ``last_seen_ts`` - The timestamp (in milliseconds since the unix epoch) when this
devices was last seen. (May be a few minutes out of date, for efficiency reasons).
- ``user_id`` - Owner of device.
Update a device
---------------
Updates the metadata on the given ``device_id`` for a specific ``user_id``.
The API is::
PUT /_synapse/admin/v2/users/<user_id>/devices/<device_id>
{
"display_name": "My other phone"
}
To use it, you will need to authenticate by providing an ``access_token`` for a
server admin: see `README.rst <README.rst>`_.
An empty JSON dict is returned.
**Parameters**
The following parameters should be set in the URL:
- ``user_id`` - fully qualified: for example, ``@user:server.com``.
- ``device_id`` - The device to update.
The following fields are required in the JSON request body:
- ``display_name`` - The new display name for this device. If not given,
the display name is unchanged.
Delete a device
---------------
Deletes the given ``device_id`` for a specific ``user_id``,
and invalidates any access token associated with it.
The API is::
DELETE /_synapse/admin/v2/users/<user_id>/devices/<device_id>
{}
To use it, you will need to authenticate by providing an ``access_token`` for a
server admin: see `README.rst <README.rst>`_.
An empty JSON dict is returned.
**Parameters**
The following parameters should be set in the URL:
- ``user_id`` - fully qualified: for example, ``@user:server.com``.
- ``device_id`` - The device to delete.
including an ``access_token`` of a server admin.

View File

@@ -23,13 +23,9 @@ namespaces:
users: # List of users we're interested in
- exclusive: <bool>
regex: <regex>
group_id: <group>
- ...
aliases: [] # List of aliases we're interested in
rooms: [] # List of room ids we're interested in
```
`exclusive`: If enabled, only this application service is allowed to register users in its namespace(s).
`group_id`: All users of this application service are dynamically joined to this group. This is useful for e.g user organisation or flairs.
See the [spec](https://matrix.org/docs/spec/application_service/unstable.html) for further details on how application services work.

View File

@@ -30,7 +30,7 @@ The necessary tools are detailed below.
Install `flake8` with:
pip install --upgrade flake8 flake8-comprehensions
pip install --upgrade flake8
Check all application and test code with:

View File

@@ -1,94 +0,0 @@
# Delegation
By default, other homeservers will expect to be able to reach yours via
your `server_name`, on port 8448. For example, if you set your `server_name`
to `example.com` (so that your user names look like `@user:example.com`),
other servers will try to connect to yours at `https://example.com:8448/`.
Delegation is a Matrix feature allowing a homeserver admin to retain a
`server_name` of `example.com` so that user IDs, room aliases, etc continue
to look like `*:example.com`, whilst having federation traffic routed
to a different server and/or port (e.g. `synapse.example.com:443`).
## .well-known delegation
To use this method, you need to be able to alter the
`server_name` 's https server to serve the `/.well-known/matrix/server`
URL. Having an active server (with a valid TLS certificate) serving your
`server_name` domain is out of the scope of this documentation.
The URL `https://<server_name>/.well-known/matrix/server` should
return a JSON structure containing the key `m.server` like so:
```json
{
"m.server": "<synapse.server.name>[:<yourport>]"
}
```
In our example, this would mean that URL `https://example.com/.well-known/matrix/server`
should return:
```json
{
"m.server": "synapse.example.com:443"
}
```
Note, specifying a port is optional. If no port is specified, then it defaults
to 8448.
With .well-known delegation, federating servers will check for a valid TLS
certificate for the delegated hostname (in our example: `synapse.example.com`).
## SRV DNS record delegation
It is also possible to do delegation using a SRV DNS record. However, that is
considered an advanced topic since it's a bit complex to set up, and `.well-known`
delegation is already enough in most cases.
However, if you really need it, you can find some documentation on how such a
record should look like and how Synapse will use it in [the Matrix
specification](https://matrix.org/docs/spec/server_server/latest#resolving-server-names).
## Delegation FAQ
### When do I need delegation?
If your homeserver's APIs are accessible on the default federation port (8448)
and the domain your `server_name` points to, you do not need any delegation.
For instance, if you registered `example.com` and pointed its DNS A record at a
fresh server, you could install Synapse on that host, giving it a `server_name`
of `example.com`, and once a reverse proxy has been set up to proxy all requests
sent to the port `8448` and serve TLS certificates for `example.com`, you
wouldn't need any delegation set up.
**However**, if your homeserver's APIs aren't accessible on port 8448 and on the
domain `server_name` points to, you will need to let other servers know how to
find it using delegation.
### Do you still recommend against using a reverse proxy on the federation port?
We no longer actively recommend against using a reverse proxy. Many admins will
find it easier to direct federation traffic to a reverse proxy and manage their
own TLS certificates, and this is a supported configuration.
See [reverse_proxy.md](reverse_proxy.md) for information on setting up a
reverse proxy.
### Do I still need to give my TLS certificates to Synapse if I am using a reverse proxy?
This is no longer necessary. If you are using a reverse proxy for all of your
TLS traffic, then you can set `no_tls: True` in the Synapse config.
In that case, the only reason Synapse needs the certificate is to populate a legacy
`tls_fingerprints` field in the federation API. This is ignored by Synapse 0.99.0
and later, and the only time pre-0.99 Synapses will check it is when attempting to
fetch the server keys - and generally this is delegated via `matrix.org`, which
is running a modern version of Synapse.
### Do I need the same certificate for the client and federation port?
No. There is nothing stopping you from using different certificates,
particularly if you are using a reverse proxy.

View File

@@ -1,64 +0,0 @@
# How to test CAS as a developer without a server
The [django-mama-cas](https://github.com/jbittel/django-mama-cas) project is an
easy to run CAS implementation built on top of Django.
## Prerequisites
1. Create a new virtualenv: `python3 -m venv <your virtualenv>`
2. Activate your virtualenv: `source /path/to/your/virtualenv/bin/activate`
3. Install Django and django-mama-cas:
```
python -m pip install "django<3" "django-mama-cas==2.4.0"
```
4. Create a Django project in the current directory:
```
django-admin startproject cas_test .
```
5. Follow the [install directions](https://django-mama-cas.readthedocs.io/en/latest/installation.html#configuring) for django-mama-cas
6. Setup the SQLite database: `python manage.py migrate`
7. Create a user:
```
python manage.py createsuperuser
```
1. Use whatever you want as the username and password.
2. Leave the other fields blank.
8. Use the built-in Django test server to serve the CAS endpoints on port 8000:
```
python manage.py runserver
```
You should now have a Django project configured to serve CAS authentication with
a single user created.
## Configure Synapse (and Riot) to use CAS
1. Modify your `homeserver.yaml` to enable CAS and point it to your locally
running Django test server:
```yaml
cas_config:
enabled: true
server_url: "http://localhost:8000"
service_url: "http://localhost:8081"
#displayname_attribute: name
#required_attributes:
# name: value
```
2. Restart Synapse.
Note that the above configuration assumes the homeserver is running on port 8081
and that the CAS server is on port 8000, both on localhost.
## Testing the configuration
Then in Riot:
1. Visit the login page with a Riot pointing at your homeserver.
2. Click the Single Sign-On button.
3. Login using the credentials created with `createsuperuser`.
4. You should be logged in.
If you want to repeat this process you'll need to manually logout first:
1. http://localhost:8000/admin/
2. Click "logout" in the top right.

View File

@@ -1,148 +0,0 @@
Some notes on how we use git
============================
On keeping the commit history clean
-----------------------------------
In an ideal world, our git commit history would be a linear progression of
commits each of which contains a single change building on what came
before. Here, by way of an arbitrary example, is the top of `git log --graph
b2dba0607`:
<img src="git/clean.png" alt="clean git graph" width="500px">
Note how the commit comment explains clearly what is changing and why. Also
note the *absence* of merge commits, as well as the absence of commits called
things like (to pick a few culprits):
[“pep8”](https://github.com/matrix-org/synapse/commit/84691da6c), [“fix broken
test”](https://github.com/matrix-org/synapse/commit/474810d9d),
[“oops”](https://github.com/matrix-org/synapse/commit/c9d72e457),
[“typo”](https://github.com/matrix-org/synapse/commit/836358823), or [“Who's
the president?”](https://github.com/matrix-org/synapse/commit/707374d5d).
There are a number of reasons why keeping a clean commit history is a good
thing:
* From time to time, after a change lands, it turns out to be necessary to
revert it, or to backport it to a release branch. Those operations are
*much* easier when the change is contained in a single commit.
* Similarly, it's much easier to answer questions like “is the fix for
`/publicRooms` on the release branch?” if that change consists of a single
commit.
* Likewise: “what has changed on this branch in the last week?” is much
clearer without merges and “pep8” commits everywhere.
* Sometimes we need to figure out where a bug got introduced, or some
behaviour changed. One way of doing that is with `git bisect`: pick an
arbitrary commit between the known good point and the known bad point, and
see how the code behaves. However, that strategy fails if the commit you
chose is the middle of someone's epic branch in which they broke the world
before putting it back together again.
One counterargument is that it is sometimes useful to see how a PR evolved as
it went through review cycles. This is true, but that information is always
available via the GitHub UI (or via the little-known [refs/pull
namespace](https://help.github.com/en/github/collaborating-with-issues-and-pull-requests/checking-out-pull-requests-locally)).
Of course, in reality, things are more complicated than that. We have release
branches as well as `develop` and `master`, and we deliberately merge changes
between them. Bugs often slip through and have to be fixed later. That's all
fine: this not a cast-iron rule which must be obeyed, but an ideal to aim
towards.
Merges, squashes, rebases: wtf?
-------------------------------
Ok, so that's what we'd like to achieve. How do we achieve it?
The TL;DR is: when you come to merge a pull request, you *probably* want to
“squash and merge”:
![squash and merge](git/squash.png).
(This applies whether you are merging your own PR, or that of another
contributor.)
“Squash and merge”<sup id="a1">[1](#f1)</sup> takes all of the changes in the
PR, and bundles them into a single commit. GitHub gives you the opportunity to
edit the commit message before you confirm, and normally you should do so,
because the default will be useless (again: `* woops typo` is not a useful
thing to keep in the historical record).
The main problem with this approach comes when you have a series of pull
requests which build on top of one another: as soon as you squash-merge the
first PR, you'll end up with a stack of conflicts to resolve in all of the
others. In general, it's best to avoid this situation in the first place by
trying not to have multiple related PRs in flight at the same time. Still,
sometimes that's not possible and doing a regular merge is the lesser evil.
Another occasion in which a regular merge makes more sense is a PR where you've
deliberately created a series of commits each of which makes sense in its own
right. For example: [a PR which gradually propagates a refactoring operation
through the codebase](https://github.com/matrix-org/synapse/pull/6837), or [a
PR which is the culmination of several other
PRs](https://github.com/matrix-org/synapse/pull/5987). In this case the ability
to figure out when a particular change/bug was introduced could be very useful.
Ultimately: **this is not a hard-and-fast-rule**. If in doubt, ask yourself “do
each of the commits I am about to merge make sense in their own right”, but
remember that we're just doing our best to balance “keeping the commit history
clean” with other factors.
Git branching model
-------------------
A [lot](https://nvie.com/posts/a-successful-git-branching-model/)
[of](http://scottchacon.com/2011/08/31/github-flow.html)
[words](https://www.endoflineblog.com/gitflow-considered-harmful) have been
written in the past about git branching models (no really, [a
lot](https://martinfowler.com/articles/branching-patterns.html)). I tend to
think the whole thing is overblown. Fundamentally, it's not that
complicated. Here's how we do it.
Let's start with a picture:
![branching model](git/branches.jpg)
It looks complicated, but it's really not. There's one basic rule: *anyone* is
free to merge from *any* more-stable branch to *any* less-stable branch at
*any* time<sup id="a2">[2](#f2)</sup>. (The principle behind this is that if a
change is good enough for the more-stable branch, then it's also good enough go
put in a less-stable branch.)
Meanwhile, merging (or squashing, as per the above) from a less-stable to a
more-stable branch is a deliberate action in which you want to publish a change
or a set of changes to (some subset of) the world: for example, this happens
when a PR is landed, or as part of our release process.
So, what counts as a more- or less-stable branch? A little reflection will show
that our active branches are ordered thus, from more-stable to less-stable:
* `master` (tracks our last release).
* `release-vX.Y.Z` (the branch where we prepare the next release)<sup
id="a3">[3](#f3)</sup>.
* PR branches which are targeting the release.
* `develop` (our "mainline" branch containing our bleeding-edge).
* regular PR branches.
The corollary is: if you have a bugfix that needs to land in both
`release-vX.Y.Z` *and* `develop`, then you should base your PR on
`release-vX.Y.Z`, get it merged there, and then merge from `release-vX.Y.Z` to
`develop`. (If a fix lands in `develop` and we later need it in a
release-branch, we can of course cherry-pick it, but landing it in the release
branch first helps reduce the chance of annoying conflicts.)
---
<b id="f1">[1]</b>: “Squash and merge” is GitHub's term for this
operation. Given that there is no merge involved, I'm not convinced it's the
most intuitive name. [^](#a1)
<b id="f2">[2]</b>: Well, anyone with commit access.[^](#a2)
<b id="f3">[3]</b>: Very, very occasionally (I think this has happened once in
the history of Synapse), we've had two releases in flight at once. Obviously,
`release-v1.2.3` is more-stable than `release-v1.3.0`. [^](#a3)

Binary file not shown.

Before

Width:  |  Height:  |  Size: 70 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 108 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 29 KiB

View File

@@ -18,13 +18,9 @@ To make Synapse (and therefore Riot) use it:
metadata:
local: ["samling.xml"]
```
5. Ensure that your `homeserver.yaml` has a setting for `public_baseurl`:
```yaml
public_baseurl: http://localhost:8080/
```
6. Run `apt-get install xmlsec1` and `pip install --upgrade --force 'pysaml2>=4.5.0'` to ensure
5. Run `apt-get install xmlsec1` and `pip install --upgrade --force 'pysaml2>=4.5.0'` to ensure
the dependencies are installed and ready to go.
7. Restart Synapse.
6. Restart Synapse.
Then in Riot:

View File

@@ -1,41 +1,163 @@
Setting up federation
Setting up Federation
=====================
Federation is the process by which users on different servers can participate
in the same room. For this to work, those other servers must be able to contact
yours to send messages.
The `server_name` configured in the Synapse configuration file (often
`homeserver.yaml`) defines how resources (users, rooms, etc.) will be
identified (eg: `@user:example.com`, `#room:example.com`). By default,
it is also the domain that other servers will use to try to reach your
server (via port 8448). This is easy to set up and will work provided
you set the `server_name` to match your machine's public DNS hostname.
For this default configuration to work, you will need to listen for TLS
connections on port 8448. The preferred way to do that is by using a
reverse proxy: see [reverse_proxy.md](<reverse_proxy.md>) for instructions
on how to correctly set one up.
In some cases you might not want to run Synapse on the machine that has
the `server_name` as its public DNS hostname, or you might want federation
traffic to use a different port than 8448. For example, you might want to
have your user names look like `@user:example.com`, but you want to run
Synapse on `synapse.example.com` on port 443. This can be done using
delegation, which allows an admin to control where federation traffic should
be sent. See [delegate.md](delegate.md) for instructions on how to set this up.
The ``server_name`` configured in the Synapse configuration file (often
``homeserver.yaml``) defines how resources (users, rooms, etc.) will be
identified (eg: ``@user:example.com``, ``#room:example.com``). By
default, it is also the domain that other servers will use to
try to reach your server (via port 8448). This is easy to set
up and will work provided you set the ``server_name`` to match your
machine's public DNS hostname, and provide Synapse with a TLS certificate
which is valid for your ``server_name``.
Once federation has been configured, you should be able to join a room over
federation. A good place to start is `#synapse:matrix.org` - a room for
federation. A good place to start is ``#synapse:matrix.org`` - a room for
Synapse admins.
## Delegation
For a more flexible configuration, you can have ``server_name``
resources (eg: ``@user:example.com``) served by a different host and
port (eg: ``synapse.example.com:443``). There are two ways to do this:
- adding a ``/.well-known/matrix/server`` URL served on ``https://example.com``.
- adding a DNS ``SRV`` record in the DNS zone of domain
``example.com``.
Without configuring delegation, the matrix federation will
expect to find your server via ``example.com:8448``. The following methods
allow you retain a `server_name` of `example.com` so that your user IDs, room
aliases, etc continue to look like `*:example.com`, whilst having your
federation traffic routed to a different server.
### .well-known delegation
To use this method, you need to be able to alter the
``server_name`` 's https server to serve the ``/.well-known/matrix/server``
URL. Having an active server (with a valid TLS certificate) serving your
``server_name`` domain is out of the scope of this documentation.
The URL ``https://<server_name>/.well-known/matrix/server`` should
return a JSON structure containing the key ``m.server`` like so:
{
"m.server": "<synapse.server.name>[:<yourport>]"
}
In our example, this would mean that URL ``https://example.com/.well-known/matrix/server``
should return:
{
"m.server": "synapse.example.com:443"
}
Note, specifying a port is optional. If a port is not specified an SRV lookup
is performed, as described below. If the target of the
delegation does not have an SRV record, then the port defaults to 8448.
Most installations will not need to configure .well-known. However, it can be
useful in cases where the admin is hosting on behalf of someone else and
therefore cannot gain access to the necessary certificate. With .well-known,
federation servers will check for a valid TLS certificate for the delegated
hostname (in our example: ``synapse.example.com``).
### DNS SRV delegation
To use this delegation method, you need to have write access to your
``server_name`` 's domain zone DNS records (in our example it would be
``example.com`` DNS zone).
This method requires the target server to provide a
valid TLS certificate for the original ``server_name``.
You need to add a SRV record in your ``server_name`` 's DNS zone with
this format:
_matrix._tcp.<yourdomain.com> <ttl> IN SRV <priority> <weight> <port> <synapse.server.name>
In our example, we would need to add this SRV record in the
``example.com`` DNS zone:
_matrix._tcp.example.com. 3600 IN SRV 10 5 443 synapse.example.com.
Once done and set up, you can check the DNS record with ``dig -t srv
_matrix._tcp.<server_name>``. In our example, we would expect this:
$ dig -t srv _matrix._tcp.example.com
_matrix._tcp.example.com. 3600 IN SRV 10 0 443 synapse.example.com.
Note that the target of a SRV record cannot be an alias (CNAME record): it has to point
directly to the server hosting the synapse instance.
### Delegation FAQ
#### When do I need a SRV record or .well-known URI?
If your homeserver listens on the default federation port (8448), and your
`server_name` points to the host that your homeserver runs on, you do not need an SRV
record or `.well-known/matrix/server` URI.
For instance, if you registered `example.com` and pointed its DNS A record at a
fresh server, you could install Synapse on that host,
giving it a `server_name` of `example.com`, and once [ACME](acme.md) support is enabled,
it would automatically generate a valid TLS certificate for you via Let's Encrypt
and no SRV record or .well-known URI would be needed.
**However**, if your server does not listen on port 8448, or if your `server_name`
does not point to the host that your homeserver runs on, you will need to let
other servers know how to find it. The way to do this is via .well-known or an
SRV record.
#### I have created a .well-known URI. Do I also need an SRV record?
No. You can use either `.well-known` delegation or use an SRV record for delegation. You
do not need to use both to delegate to the same location.
#### Can I manage my own certificates rather than having Synapse renew certificates itself?
Yes, you are welcome to manage your certificates yourself. Synapse will only
attempt to obtain certificates from Let's Encrypt if you configure it to do
so.The only requirement is that there is a valid TLS cert present for
federation end points.
#### Do you still recommend against using a reverse proxy on the federation port?
We no longer actively recommend against using a reverse proxy. Many admins will
find it easier to direct federation traffic to a reverse proxy and manage their
own TLS certificates, and this is a supported configuration.
See [reverse_proxy.md](reverse_proxy.md) for information on setting up a
reverse proxy.
#### Do I still need to give my TLS certificates to Synapse if I am using a reverse proxy?
Practically speaking, this is no longer necessary.
If you are using a reverse proxy for all of your TLS traffic, then you can set
`no_tls: True` in the Synapse config. In that case, the only reason Synapse
needs the certificate is to populate a legacy `tls_fingerprints` field in the
federation API. This is ignored by Synapse 0.99.0 and later, and the only time
pre-0.99 Synapses will check it is when attempting to fetch the server keys -
and generally this is delegated via `matrix.org`, which will be running a modern
version of Synapse.
#### Do I need the same certificate for the client and federation port?
No. There is nothing stopping you from using different certificates,
particularly if you are using a reverse proxy. However, Synapse will use the
same certificate on any ports where TLS is configured.
## Troubleshooting
You can use the [federation tester](https://matrix.org/federationtester)
to check if your homeserver is configured correctly. Alternatively try the
[JSON API used by the federation tester](https://matrix.org/federationtester/api/report?server_name=DOMAIN).
Note that you'll have to modify this URL to replace `DOMAIN` with your
`server_name`. Hitting the API directly provides extra detail.
You can use the [federation tester](
<https://matrix.org/federationtester>) to check if your homeserver is
configured correctly. Alternatively try the [JSON API used by the federation tester](https://matrix.org/federationtester/api/report?server_name=DOMAIN).
Note that you'll have to modify this URL to replace ``DOMAIN`` with your
``server_name``. Hitting the API directly provides extra detail.
The typical failure mode for federation is that when the server tries to join
a room, it is rejected with "401: Unauthorized". Generally this means that other
@@ -47,8 +169,8 @@ you invite them to. This can be caused by an incorrectly-configured reverse
proxy: see [reverse_proxy.md](<reverse_proxy.md>) for instructions on how to correctly
configure a reverse proxy.
## Running a demo federation of Synapses
## Running a Demo Federation of Synapses
If you want to get up and running quickly with a trio of homeservers in a
private federation, there is a script in the `demo` directory. This is mainly
private federation, there is a script in the ``demo`` directory. This is mainly
useful just for development purposes. See [demo/README](<../demo/README>).

View File

@@ -1,97 +0,0 @@
# JWT Login Type
Synapse comes with a non-standard login type to support
[JSON Web Tokens](https://en.wikipedia.org/wiki/JSON_Web_Token). In general the
documentation for
[the login endpoint](https://matrix.org/docs/spec/client_server/r0.6.1#login)
is still valid (and the mechanism works similarly to the
[token based login](https://matrix.org/docs/spec/client_server/r0.6.1#token-based)).
To log in using a JSON Web Token, clients should submit a `/login` request as
follows:
```json
{
"type": "org.matrix.login.jwt",
"token": "<jwt>"
}
```
Note that the login type of `m.login.jwt` is supported, but is deprecated. This
will be removed in a future version of Synapse.
The `token` field should include the JSON web token with the following claims:
* The `sub` (subject) claim is required and should encode the local part of the
user ID.
* The expiration time (`exp`), not before time (`nbf`), and issued at (`iat`)
claims are optional, but validated if present.
* The issuer (`iss`) claim is optional, but required and validated if configured.
* The audience (`aud`) claim is optional, but required and validated if configured.
Providing the audience claim when not configured will cause validation to fail.
In the case that the token is not valid, the homeserver must respond with
`403 Forbidden` and an error code of `M_FORBIDDEN`.
As with other login types, there are additional fields (e.g. `device_id` and
`initial_device_display_name`) which can be included in the above request.
## Preparing Synapse
The JSON Web Token integration in Synapse uses the
[`PyJWT`](https://pypi.org/project/pyjwt/) library, which must be installed
as follows:
* The relevant libraries are included in the Docker images and Debian packages
provided by `matrix.org` so no further action is needed.
* If you installed Synapse into a virtualenv, run `/path/to/env/bin/pip
install synapse[pyjwt]` to install the necessary dependencies.
* For other installation mechanisms, see the documentation provided by the
maintainer.
To enable the JSON web token integration, you should then add an `jwt_config` section
to your configuration file (or uncomment the `enabled: true` line in the
existing section). See [sample_config.yaml](./sample_config.yaml) for some
sample settings.
## How to test JWT as a developer
Although JSON Web Tokens are typically generated from an external server, the
examples below use [PyJWT](https://pyjwt.readthedocs.io/en/latest/) directly.
1. Configure Synapse with JWT logins, note that this example uses a pre-shared
secret and an algorithm of HS256:
```yaml
jwt_config:
enabled: true
secret: "my-secret-token"
algorithm: "HS256"
```
2. Generate a JSON web token:
```bash
$ pyjwt --key=my-secret-token --alg=HS256 encode sub=test-user
eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJzdWIiOiJ0ZXN0LXVzZXIifQ.Ag71GT8v01UO3w80aqRPTeuVPBIBZkYhNTJJ-_-zQIc
```
3. Query for the login types and ensure `org.matrix.login.jwt` is there:
```bash
curl http://localhost:8080/_matrix/client/r0/login
```
4. Login used the generated JSON web token from above:
```bash
$ curl http://localhost:8082/_matrix/client/r0/login -X POST \
--data '{"type":"org.matrix.login.jwt","token":"eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJzdWIiOiJ0ZXN0LXVzZXIifQ.Ag71GT8v01UO3w80aqRPTeuVPBIBZkYhNTJJ-_-zQIc"}'
{
"access_token": "<access token>",
"device_id": "ACBDEFGHI",
"home_server": "localhost:8080",
"user_id": "@test-user:localhost:8480"
}
```
You should now be able to use the returned access token to query the client API.

View File

@@ -29,13 +29,14 @@ from synapse.logging import context # omitted from future snippets
def handle_request(request_id):
request_context = context.LoggingContext()
calling_context = context.set_current_context(request_context)
calling_context = context.LoggingContext.current_context()
context.LoggingContext.set_current_context(request_context)
try:
request_context.request = request_id
do_request_handling()
logger.debug("finished")
finally:
context.set_current_context(calling_context)
context.LoggingContext.set_current_context(calling_context)
def do_request_handling():
logger.debug("phew") # this will be logged against request_id

View File

@@ -42,10 +42,6 @@ purged according to its room's policy, then the receiving server will
process and store that event until it's picked up by the next purge job,
though it will always hide it from clients.
Synapse requires at least one message in each room, so it will never
delete the last message in a room. It will, however, hide it from
clients.
## Server configuration

View File

@@ -27,7 +27,7 @@
different thread to Synapse. This can make it more resilient to
heavy load meaning metrics cannot be retrieved, and can be exposed
to just internal networks easier. The served metrics are available
over HTTP only, and will be available at `/_synapse/metrics`.
over HTTP only, and will be available at `/`.
Add a new listener to homeserver.yaml:
@@ -60,31 +60,6 @@
1. Restart Prometheus.
## Monitoring workers
To monitor a Synapse installation using
[workers](https://github.com/matrix-org/synapse/blob/master/docs/workers.md),
every worker needs to be monitored independently, in addition to
the main homeserver process. This is because workers don't send
their metrics to the main homeserver process, but expose them
directly (if they are configured to do so).
To allow collecting metrics from a worker, you need to add a
`metrics` listener to its configuration, by adding the following
under `worker_listeners`:
```yaml
- type: metrics
bind_address: ''
port: 9101
```
The `bind_address` and `port` parameters should be set so that
the resulting listener can be reached by prometheus, and they
don't clash with an existing worker.
With this example, the worker's metrics would then be available
on `http://127.0.0.1:9101`.
## Renaming of metrics & deprecation of old names in 1.2
Synapse 1.2 updates the Prometheus metrics to match the naming

View File

@@ -1,250 +0,0 @@
# Configuring Synapse to authenticate against an OpenID Connect provider
Synapse can be configured to use an OpenID Connect Provider (OP) for
authentication, instead of its own local password database.
Any OP should work with Synapse, as long as it supports the authorization code
flow. There are a few options for that:
- start a local OP. Synapse has been tested with [Hydra][hydra] and
[Dex][dex-idp]. Note that for an OP to work, it should be served under a
secure (HTTPS) origin. A certificate signed with a self-signed, locally
trusted CA should work. In that case, start Synapse with a `SSL_CERT_FILE`
environment variable set to the path of the CA.
- set up a SaaS OP, like [Google][google-idp], [Auth0][auth0] or
[Okta][okta]. Synapse has been tested with Auth0 and Google.
It may also be possible to use other OAuth2 providers which provide the
[authorization code grant type](https://tools.ietf.org/html/rfc6749#section-4.1),
such as [Github][github-idp].
[google-idp]: https://developers.google.com/identity/protocols/oauth2/openid-connect
[auth0]: https://auth0.com/
[okta]: https://www.okta.com/
[dex-idp]: https://github.com/dexidp/dex
[keycloak-idp]: https://www.keycloak.org/docs/latest/server_admin/#sso-protocols
[hydra]: https://www.ory.sh/docs/hydra/
[github-idp]: https://developer.github.com/apps/building-oauth-apps/authorizing-oauth-apps
## Preparing Synapse
The OpenID integration in Synapse uses the
[`authlib`](https://pypi.org/project/Authlib/) library, which must be installed
as follows:
* The relevant libraries are included in the Docker images and Debian packages
provided by `matrix.org` so no further action is needed.
* If you installed Synapse into a virtualenv, run `/path/to/env/bin/pip
install synapse[oidc]` to install the necessary dependencies.
* For other installation mechanisms, see the documentation provided by the
maintainer.
To enable the OpenID integration, you should then add an `oidc_config` section
to your configuration file (or uncomment the `enabled: true` line in the
existing section). See [sample_config.yaml](./sample_config.yaml) for some
sample settings, as well as the text below for example configurations for
specific providers.
## Sample configs
Here are a few configs for providers that should work with Synapse.
### [Dex][dex-idp]
[Dex][dex-idp] is a simple, open-source, certified OpenID Connect Provider.
Although it is designed to help building a full-blown provider with an
external database, it can be configured with static passwords in a config file.
Follow the [Getting Started
guide](https://github.com/dexidp/dex/blob/master/Documentation/getting-started.md)
to install Dex.
Edit `examples/config-dev.yaml` config file from the Dex repo to add a client:
```yaml
staticClients:
- id: synapse
secret: secret
redirectURIs:
- '[synapse public baseurl]/_synapse/oidc/callback'
name: 'Synapse'
```
Run with `dex serve examples/config-dex.yaml`.
Synapse config:
```yaml
oidc_config:
enabled: true
skip_verification: true # This is needed as Dex is served on an insecure endpoint
issuer: "http://127.0.0.1:5556/dex"
client_id: "synapse"
client_secret: "secret"
scopes: ["openid", "profile"]
user_mapping_provider:
config:
localpart_template: "{{ user.name }}"
display_name_template: "{{ user.name|capitalize }}"
```
### [Keycloak][keycloak-idp]
[Keycloak][keycloak-idp] is an opensource IdP maintained by Red Hat.
Follow the [Getting Started Guide](https://www.keycloak.org/getting-started) to install Keycloak and set up a realm.
1. Click `Clients` in the sidebar and click `Create`
2. Fill in the fields as below:
| Field | Value |
|-----------|-----------|
| Client ID | `synapse` |
| Client Protocol | `openid-connect` |
3. Click `Save`
4. Fill in the fields as below:
| Field | Value |
|-----------|-----------|
| Client ID | `synapse` |
| Enabled | `On` |
| Client Protocol | `openid-connect` |
| Access Type | `confidential` |
| Valid Redirect URIs | `[synapse public baseurl]/_synapse/oidc/callback` |
5. Click `Save`
6. On the Credentials tab, update the fields:
| Field | Value |
|-------|-------|
| Client Authenticator | `Client ID and Secret` |
7. Click `Regenerate Secret`
8. Copy Secret
```yaml
oidc_config:
enabled: true
issuer: "https://127.0.0.1:8443/auth/realms/{realm_name}"
client_id: "synapse"
client_secret: "copy secret generated from above"
scopes: ["openid", "profile"]
```
### [Auth0][auth0]
1. Create a regular web application for Synapse
2. Set the Allowed Callback URLs to `[synapse public baseurl]/_synapse/oidc/callback`
3. Add a rule to add the `preferred_username` claim.
<details>
<summary>Code sample</summary>
```js
function addPersistenceAttribute(user, context, callback) {
user.user_metadata = user.user_metadata || {};
user.user_metadata.preferred_username = user.user_metadata.preferred_username || user.user_id;
context.idToken.preferred_username = user.user_metadata.preferred_username;
auth0.users.updateUserMetadata(user.user_id, user.user_metadata)
.then(function(){
callback(null, user, context);
})
.catch(function(err){
callback(err);
});
}
```
</details>
Synapse config:
```yaml
oidc_config:
enabled: true
issuer: "https://your-tier.eu.auth0.com/" # TO BE FILLED
client_id: "your-client-id" # TO BE FILLED
client_secret: "your-client-secret" # TO BE FILLED
scopes: ["openid", "profile"]
user_mapping_provider:
config:
localpart_template: "{{ user.preferred_username }}"
display_name_template: "{{ user.name }}"
```
### GitHub
GitHub is a bit special as it is not an OpenID Connect compliant provider, but
just a regular OAuth2 provider.
The [`/user` API endpoint](https://developer.github.com/v3/users/#get-the-authenticated-user)
can be used to retrieve information on the authenticated user. As the Synaspse
login mechanism needs an attribute to uniquely identify users, and that endpoint
does not return a `sub` property, an alternative `subject_claim` has to be set.
1. Create a new OAuth application: https://github.com/settings/applications/new.
2. Set the callback URL to `[synapse public baseurl]/_synapse/oidc/callback`.
Synapse config:
```yaml
oidc_config:
enabled: true
discover: false
issuer: "https://github.com/"
client_id: "your-client-id" # TO BE FILLED
client_secret: "your-client-secret" # TO BE FILLED
authorization_endpoint: "https://github.com/login/oauth/authorize"
token_endpoint: "https://github.com/login/oauth/access_token"
userinfo_endpoint: "https://api.github.com/user"
scopes: ["read:user"]
user_mapping_provider:
config:
subject_claim: "id"
localpart_template: "{{ user.login }}"
display_name_template: "{{ user.name }}"
```
### [Google][google-idp]
1. Set up a project in the Google API Console (see
https://developers.google.com/identity/protocols/oauth2/openid-connect#appsetup).
2. add an "OAuth Client ID" for a Web Application under "Credentials".
3. Copy the Client ID and Client Secret, and add the following to your synapse config:
```yaml
oidc_config:
enabled: true
issuer: "https://accounts.google.com/"
client_id: "your-client-id" # TO BE FILLED
client_secret: "your-client-secret" # TO BE FILLED
scopes: ["openid", "profile"]
user_mapping_provider:
config:
localpart_template: "{{ user.given_name|lower }}"
display_name_template: "{{ user.name }}"
```
4. Back in the Google console, add this Authorized redirect URI: `[synapse
public baseurl]/_synapse/oidc/callback`.
### Twitch
1. Setup a developer account on [Twitch](https://dev.twitch.tv/)
2. Obtain the OAuth 2.0 credentials by [creating an app](https://dev.twitch.tv/console/apps/)
3. Add this OAuth Redirect URL: `[synapse public baseurl]/_synapse/oidc/callback`
Synapse config:
```yaml
oidc_config:
enabled: true
issuer: "https://id.twitch.tv/oauth2/"
client_id: "your-client-id" # TO BE FILLED
client_secret: "your-client-secret" # TO BE FILLED
client_auth_method: "client_secret_post"
user_mapping_provider:
config:
localpart_template: '{{ user.preferred_username }}'
display_name_template: '{{ user.name }}'
```

View File

@@ -9,113 +9,108 @@ into Synapse, and provides a number of methods by which it can integrate
with the authentication system.
This document serves as a reference for those looking to implement their
own password auth providers. Additionally, here is a list of known
password auth provider module implementations:
* [matrix-synapse-ldap3](https://github.com/matrix-org/matrix-synapse-ldap3/)
* [matrix-synapse-shared-secret-auth](https://github.com/devture/matrix-synapse-shared-secret-auth)
own password auth providers.
## Required methods
Password auth provider classes must provide the following methods:
* `parse_config(config)`
This method is passed the `config` object for this module from the
homeserver configuration file.
*class* `SomeProvider.parse_config`(*config*)
It should perform any appropriate sanity checks on the provided
configuration, and return an object which is then passed into
> This method is passed the `config` object for this module from the
> homeserver configuration file.
>
> It should perform any appropriate sanity checks on the provided
> configuration, and return an object which is then passed into
> `__init__`.
This method should have the `@staticmethod` decoration.
*class* `SomeProvider`(*config*, *account_handler*)
* `__init__(self, config, account_handler)`
The constructor is passed the config object returned by
`parse_config`, and a `synapse.module_api.ModuleApi` object which
allows the password provider to check if accounts exist and/or create
new ones.
> The constructor is passed the config object returned by
> `parse_config`, and a `synapse.module_api.ModuleApi` object which
> allows the password provider to check if accounts exist and/or create
> new ones.
## Optional methods
Password auth provider classes may optionally provide the following methods:
Password auth provider classes may optionally provide the following
methods.
* `get_db_schema_files(self)`
*class* `SomeProvider.get_db_schema_files`()
This method, if implemented, should return an Iterable of
`(name, stream)` pairs of database schema files. Each file is applied
in turn at initialisation, and a record is then made in the database
so that it is not re-applied on the next start.
> This method, if implemented, should return an Iterable of
> `(name, stream)` pairs of database schema files. Each file is applied
> in turn at initialisation, and a record is then made in the database
> so that it is not re-applied on the next start.
* `get_supported_login_types(self)`
`someprovider.get_supported_login_types`()
This method, if implemented, should return a `dict` mapping from a
login type identifier (such as `m.login.password`) to an iterable
giving the fields which must be provided by the user in the submission
to [the `/login` API](https://matrix.org/docs/spec/client_server/latest#post-matrix-client-r0-login).
These fields are passed in the `login_dict` dictionary to `check_auth`.
> This method, if implemented, should return a `dict` mapping from a
> login type identifier (such as `m.login.password`) to an iterable
> giving the fields which must be provided by the user in the submission
> to the `/login` api. These fields are passed in the `login_dict`
> dictionary to `check_auth`.
>
> For example, if a password auth provider wants to implement a custom
> login type of `com.example.custom_login`, where the client is expected
> to pass the fields `secret1` and `secret2`, the provider should
> implement this method and return the following dict:
>
> {"com.example.custom_login": ("secret1", "secret2")}
For example, if a password auth provider wants to implement a custom
login type of `com.example.custom_login`, where the client is expected
to pass the fields `secret1` and `secret2`, the provider should
implement this method and return the following dict:
`someprovider.check_auth`(*username*, *login_type*, *login_dict*)
```python
{"com.example.custom_login": ("secret1", "secret2")}
```
> This method is the one that does the real work. If implemented, it
> will be called for each login attempt where the login type matches one
> of the keys returned by `get_supported_login_types`.
>
> It is passed the (possibly UNqualified) `user` provided by the client,
> the login type, and a dictionary of login secrets passed by the
> client.
>
> The method should return a Twisted `Deferred` object, which resolves
> to the canonical `@localpart:domain` user id if authentication is
> successful, and `None` if not.
>
> Alternatively, the `Deferred` can resolve to a `(str, func)` tuple, in
> which case the second field is a callback which will be called with
> the result from the `/login` call (including `access_token`,
> `device_id`, etc.)
* `check_auth(self, username, login_type, login_dict)`
`someprovider.check_3pid_auth`(*medium*, *address*, *password*)
This method does the real work. If implemented, it
will be called for each login attempt where the login type matches one
of the keys returned by `get_supported_login_types`.
> This method, if implemented, is called when a user attempts to
> register or log in with a third party identifier, such as email. It is
> passed the medium (ex. "email"), an address (ex.
> "<jdoe@example.com>") and the user's password.
>
> The method should return a Twisted `Deferred` object, which resolves
> to a `str` containing the user's (canonical) User ID if
> authentication was successful, and `None` if not.
>
> As with `check_auth`, the `Deferred` may alternatively resolve to a
> `(user_id, callback)` tuple.
It is passed the (possibly unqualified) `user` field provided by the client,
the login type, and a dictionary of login secrets passed by the
client.
`someprovider.check_password`(*user_id*, *password*)
The method should return an `Awaitable` object, which resolves
to the canonical `@localpart:domain` user ID if authentication is
successful, and `None` if not.
> This method provides a simpler interface than
> `get_supported_login_types` and `check_auth` for password auth
> providers that just want to provide a mechanism for validating
> `m.login.password` logins.
>
> Iif implemented, it will be called to check logins with an
> `m.login.password` login type. It is passed a qualified
> `@localpart:domain` user id, and the password provided by the user.
>
> The method should return a Twisted `Deferred` object, which resolves
> to `True` if authentication is successful, and `False` if not.
Alternatively, the `Awaitable` can resolve to a `(str, func)` tuple, in
which case the second field is a callback which will be called with
the result from the `/login` call (including `access_token`,
`device_id`, etc.)
`someprovider.on_logged_out`(*user_id*, *device_id*, *access_token*)
* `check_3pid_auth(self, medium, address, password)`
This method, if implemented, is called when a user attempts to
register or log in with a third party identifier, such as email. It is
passed the medium (ex. "email"), an address (ex.
"<jdoe@example.com>") and the user's password.
The method should return an `Awaitable` object, which resolves
to a `str` containing the user's (canonical) User id if
authentication was successful, and `None` if not.
As with `check_auth`, the `Awaitable` may alternatively resolve to a
`(user_id, callback)` tuple.
* `check_password(self, user_id, password)`
This method provides a simpler interface than
`get_supported_login_types` and `check_auth` for password auth
providers that just want to provide a mechanism for validating
`m.login.password` logins.
If implemented, it will be called to check logins with an
`m.login.password` login type. It is passed a qualified
`@localpart:domain` user id, and the password provided by the user.
The method should return an `Awaitable` object, which resolves
to `True` if authentication is successful, and `False` if not.
* `on_logged_out(self, user_id, device_id, access_token)`
This method, if implemented, is called when a user logs out. It is
passed the qualified user ID, the ID of the deactivated device (if
any: access tokens are occasionally created without an associated
device ID), and the (now deactivated) access token.
It may return an `Awaitable` object; the logout request will
wait for the `Awaitable` to complete, but the result is ignored.
> This method, if implemented, is called when a user logs out. It is
> passed the qualified user ID, the ID of the deactivated device (if
> any: access tokens are occasionally created without an associated
> device ID), and the (now deactivated) access token.
>
> It may return a Twisted `Deferred` object; the logout request will
> wait for the deferred to complete but the result is ignored.

View File

@@ -32,7 +32,7 @@ Assuming your PostgreSQL database user is called `postgres`, first authenticate
su - postgres
# Or, if your system uses sudo to get administrative rights
sudo -u postgres bash
Then, create a user ``synapse_user`` with:
createuser --pwprompt synapse_user
@@ -61,50 +61,7 @@ Note that the PostgreSQL database *must* have the correct encoding set
You may need to enable password authentication so `synapse_user` can
connect to the database. See
<https://www.postgresql.org/docs/current/auth-pg-hba-conf.html>.
If you get an error along the lines of `FATAL: Ident authentication failed for
user "synapse_user"`, you may need to use an authentication method other than
`ident`:
* If the `synapse_user` user has a password, add the password to the `database:`
section of `homeserver.yaml`. Then add the following to `pg_hba.conf`:
```
host synapse synapse_user ::1/128 md5 # or `scram-sha-256` instead of `md5` if you use that
```
* If the `synapse_user` user does not have a password, then a password doesn't
have to be added to `homeserver.yaml`. But the following does need to be added
to `pg_hba.conf`:
```
host synapse synapse_user ::1/128 trust
```
Note that line order matters in `pg_hba.conf`, so make sure that if you do add a
new line, it is inserted before:
```
host all all ::1/128 ident
```
### Fixing incorrect `COLLATE` or `CTYPE`
Synapse will refuse to set up a new database if it has the wrong values of
`COLLATE` and `CTYPE` set, and will log warnings on existing databases. Using
different locales can cause issues if the locale library is updated from
underneath the database, or if a different version of the locale is used on any
replicas.
The safest way to fix the issue is to take a dump and recreate the database with
the correct `COLLATE` and `CTYPE` parameters (as shown above). It is also possible to change the
parameters on a live database and run a `REINDEX` on the entire database,
however extreme care must be taken to avoid database corruption.
Note that the above may fail with an error about duplicate rows if corruption
has already occurred, and such duplicate rows will need to be manually removed.
<https://www.postgresql.org/docs/11/auth-pg-hba-conf.html>.
## Tuning Postgres
@@ -130,41 +87,19 @@ of free memory the database host has available.
When you are ready to start using PostgreSQL, edit the `database`
section in your config file to match the following lines:
```yaml
database:
name: psycopg2
args:
user: <user>
password: <pass>
database: <db>
host: <host>
cp_min: 5
cp_max: 10
```
database:
name: psycopg2
args:
user: <user>
password: <pass>
database: <db>
host: <host>
cp_min: 5
cp_max: 10
All key, values in `args` are passed to the `psycopg2.connect(..)`
function, except keys beginning with `cp_`, which are consumed by the
twisted adbapi connection pool. See the [libpq
documentation](https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-PARAMKEYWORDS)
for a list of options which can be passed.
You should consider tuning the `args.keepalives_*` options if there is any danger of
the connection between your homeserver and database dropping, otherwise Synapse
may block for an extended period while it waits for a response from the
database server. Example values might be:
```yaml
# seconds of inactivity after which TCP should send a keepalive message to the server
keepalives_idle: 10
# the number of seconds after which a TCP keepalive message that is not
# acknowledged by the server should be retransmitted
keepalives_interval: 10
# the number of TCP keepalives that can be lost before the client's connection
# to the server is considered dead
keepalives_count: 3
```
twisted adbapi connection pool.
## Porting from SQLite
@@ -188,9 +123,6 @@ to do step 2.
It is safe to at any time kill the port script and restart it.
Note that the database may take up significantly more (25% - 100% more)
space on disk after porting to Postgres.
### Using the port script
Firstly, shut down the currently running synapse server and copy its

View File

@@ -3,13 +3,13 @@
It is recommended to put a reverse proxy such as
[nginx](https://nginx.org/en/docs/http/ngx_http_proxy_module.html),
[Apache](https://httpd.apache.org/docs/current/mod/mod_proxy_http.html),
[Caddy](https://caddyserver.com/docs/quick-starts/reverse-proxy) or
[Caddy](https://caddyserver.com/docs/proxy) or
[HAProxy](https://www.haproxy.org/) in front of Synapse. One advantage
of doing so is that it means that you can expose the default https port
(443) to Matrix clients without needing to run Synapse with root
privileges.
**NOTE**: Your reverse proxy must not `canonicalise` or `normalise`
> **NOTE**: Your reverse proxy must not `canonicalise` or `normalise`
the requested URI in any way (for example, by decoding `%xx` escapes).
Beware that Apache *will* canonicalise URIs unless you specifify
`nocanon`.
@@ -18,117 +18,99 @@ When setting up a reverse proxy, remember that Matrix clients and other
Matrix servers do not necessarily need to connect to your server via the
same server name or port. Indeed, clients will use port 443 by default,
whereas servers default to port 8448. Where these are different, we
refer to the 'client port' and the 'federation port'. See [the Matrix
specification](https://matrix.org/docs/spec/server_server/latest#resolving-server-names)
for more details of the algorithm used for federation connections, and
[delegate.md](<delegate.md>) for instructions on setting up delegation.
refer to the 'client port' and the \'federation port\'. See [Setting
up federation](federate.md) for more details of the algorithm used for
federation connections.
Let's assume that we expect clients to connect to our server at
`https://matrix.example.com`, and other servers to connect at
`https://example.com:8448`. The following sections detail the configuration of
the reverse proxy and the homeserver.
## Reverse-proxy configuration examples
## Webserver configuration examples
**NOTE**: You only need one of these.
> **NOTE**: You only need one of these.
### nginx
```
server {
listen 443 ssl;
listen [::]:443 ssl;
server {
listen 443 ssl;
listen [::]:443 ssl;
server_name matrix.example.com;
# For the federation port
listen 8448 ssl default_server;
listen [::]:8448 ssl default_server;
location /_matrix {
proxy_pass http://localhost:8008;
proxy_set_header X-Forwarded-For $remote_addr;
}
}
server_name matrix.example.com;
server {
listen 8448 ssl default_server;
listen [::]:8448 ssl default_server;
server_name example.com;
location /_matrix {
proxy_pass http://localhost:8008;
proxy_set_header X-Forwarded-For $remote_addr;
# Nginx by default only allows file uploads up to 1M in size
# Increase client_max_body_size to match max_upload_size defined in homeserver.yaml
client_max_body_size 10M;
}
}
```
location / {
proxy_pass http://localhost:8008;
proxy_set_header X-Forwarded-For $remote_addr;
}
}
**NOTE**: Do not add a path after the port in `proxy_pass`, otherwise nginx will
> **NOTE**: Do not add a `/` after the port in `proxy_pass`, otherwise nginx will
canonicalise/normalise the URI.
### Caddy 1
### Caddy
```
matrix.example.com {
proxy /_matrix http://localhost:8008 {
transparent
}
}
matrix.example.com {
proxy /_matrix http://localhost:8008 {
transparent
}
}
example.com:8448 {
proxy / http://localhost:8008 {
transparent
}
}
```
### Caddy 2
```
matrix.example.com {
reverse_proxy /_matrix/* http://localhost:8008
}
example.com:8448 {
reverse_proxy http://localhost:8008
}
```
example.com:8448 {
proxy / http://localhost:8008 {
transparent
}
}
### Apache
```
<VirtualHost *:443>
SSLEngine on
ServerName matrix.example.com;
<VirtualHost *:443>
SSLEngine on
ServerName matrix.example.com;
AllowEncodedSlashes NoDecode
ProxyPass /_matrix http://127.0.0.1:8008/_matrix nocanon
ProxyPassReverse /_matrix http://127.0.0.1:8008/_matrix
</VirtualHost>
AllowEncodedSlashes NoDecode
ProxyPass /_matrix http://127.0.0.1:8008/_matrix nocanon
ProxyPassReverse /_matrix http://127.0.0.1:8008/_matrix
</VirtualHost>
<VirtualHost *:8448>
SSLEngine on
ServerName example.com;
<VirtualHost *:8448>
SSLEngine on
ServerName example.com;
AllowEncodedSlashes NoDecode
ProxyPass /_matrix http://127.0.0.1:8008/_matrix nocanon
ProxyPassReverse /_matrix http://127.0.0.1:8008/_matrix
</VirtualHost>
```
AllowEncodedSlashes NoDecode
ProxyPass /_matrix http://127.0.0.1:8008/_matrix nocanon
ProxyPassReverse /_matrix http://127.0.0.1:8008/_matrix
</VirtualHost>
**NOTE**: ensure the `nocanon` options are included.
> **NOTE**: ensure the `nocanon` options are included.
### HAProxy
```
frontend https
bind :::443 v4v6 ssl crt /etc/ssl/haproxy/ strict-sni alpn h2,http/1.1
frontend https
bind :::443 v4v6 ssl crt /etc/ssl/haproxy/ strict-sni alpn h2,http/1.1
# Matrix client traffic
acl matrix-host hdr(host) -i matrix.example.com
acl matrix-path path_beg /_matrix
# Matrix client traffic
acl matrix-host hdr(host) -i matrix.example.com
acl matrix-path path_beg /_matrix
use_backend matrix if matrix-host matrix-path
use_backend matrix if matrix-host matrix-path
frontend matrix-federation
bind :::8448 v4v6 ssl crt /etc/ssl/haproxy/synapse.pem alpn h2,http/1.1
default_backend matrix
frontend matrix-federation
bind :::8448 v4v6 ssl crt /etc/ssl/haproxy/synapse.pem alpn h2,http/1.1
default_backend matrix
backend matrix
server matrix 127.0.0.1:8008
```
backend matrix
server matrix 127.0.0.1:8008
## Homeserver Configuration
@@ -139,10 +121,3 @@ client IP addresses are recorded correctly.
Having done so, you can then use `https://matrix.example.com` (instead
of `https://matrix.example.com:8448`) as the "Custom server" when
connecting to Synapse from a client.
## Health check endpoint
Synapse exposes a health check endpoint for use by reverse proxies.
Each configured HTTP listener has a `/health` endpoint which always returns
200 OK (and doesn't get logged).

View File

@@ -0,0 +1,77 @@
# SAML Mapping Providers
A SAML mapping provider is a Python class (loaded via a Python module) that
works out how to map attributes of a SAML response object to Matrix-specific
user attributes. Details such as user ID localpart, displayname, and even avatar
URLs are all things that can be mapped from talking to a SSO service.
As an example, a SSO service may return the email address
"john.smith@example.com" for a user, whereas Synapse will need to figure out how
to turn that into a displayname when creating a Matrix user for this individual.
It may choose `John Smith`, or `Smith, John [Example.com]` or any number of
variations. As each Synapse configuration may want something different, this is
where SAML mapping providers come into play.
## Enabling Providers
External mapping providers are provided to Synapse in the form of an external
Python module. Retrieve this module from [PyPi](https://pypi.org) or elsewhere,
then tell Synapse where to look for the handler class by editing the
`saml2_config.user_mapping_provider.module` config option.
`saml2_config.user_mapping_provider.config` allows you to provide custom
configuration options to the module. Check with the module's documentation for
what options it provides (if any). The options listed by default are for the
user mapping provider built in to Synapse. If using a custom module, you should
comment these options out and use those specified by the module instead.
## Building a Custom Mapping Provider
A custom mapping provider must specify the following methods:
* `__init__(self, parsed_config)`
- Arguments:
- `parsed_config` - A configuration object that is the return value of the
`parse_config` method. You should set any configuration options needed by
the module here.
* `saml_response_to_user_attributes(self, saml_response, failures)`
- Arguments:
- `saml_response` - A `saml2.response.AuthnResponse` object to extract user
information from.
- `failures` - An `int` that represents the amount of times the returned
mxid localpart mapping has failed. This should be used
to create a deduplicated mxid localpart which should be
returned instead. For example, if this method returns
`john.doe` as the value of `mxid_localpart` in the returned
dict, and that is already taken on the homeserver, this
method will be called again with the same parameters but
with failures=1. The method should then return a different
`mxid_localpart` value, such as `john.doe1`.
- This method must return a dictionary, which will then be used by Synapse
to build a new user. The following keys are allowed:
* `mxid_localpart` - Required. The mxid localpart of the new user.
* `displayname` - The displayname of the new user. If not provided, will default to
the value of `mxid_localpart`.
* `parse_config(config)`
- This method should have the `@staticmethod` decoration.
- Arguments:
- `config` - A `dict` representing the parsed content of the
`saml2_config.user_mapping_provider.config` homeserver config option.
Runs on homeserver startup. Providers should extract any option values
they need here.
- Whatever is returned will be passed back to the user mapping provider module's
`__init__` method during construction.
* `get_saml_attributes(config)`
- This method should have the `@staticmethod` decoration.
- Arguments:
- `config` - A object resulting from a call to `parse_config`.
- Returns a tuple of two sets. The first set equates to the saml auth
response attributes that are required for the module to function, whereas
the second set consists of those attributes which can be used if available,
but are not necessary.
## Synapse's Default Provider
Synapse has a built-in SAML mapping provider if a custom provider isn't
specified in the config. It is located at
[`synapse.handlers.saml_handler.DefaultSamlMappingProvider`](../synapse/handlers/saml_handler.py).

File diff suppressed because it is too large Load Diff

View File

@@ -11,33 +11,24 @@ formatters:
precise:
format: '%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s - %(message)s'
filters:
context:
(): synapse.logging.context.LoggingContextFilter
request: ""
handlers:
file:
class: logging.handlers.TimedRotatingFileHandler
class: logging.handlers.RotatingFileHandler
formatter: precise
filename: /var/log/matrix-synapse/homeserver.log
when: midnight
backupCount: 3 # Does not include the current log file.
maxBytes: 104857600
backupCount: 10
filters: [context]
encoding: utf8
# Default to buffering writes to log file for efficiency. This means that
# will be a delay for INFO/DEBUG logs to get written, but WARNING/ERROR
# logs will still be flushed immediately.
buffer:
class: logging.handlers.MemoryHandler
target: file
# The capacity is the number of log lines that are buffered before
# being written to disk. Increasing this will lead to better
# performance, at the expensive of it taking longer for log lines to
# be written to disk.
capacity: 10
flushLevel: 30 # Flush for WARNING logs as well
# A handler that writes logs to stderr. Unused by default, but can be used
# instead of "buffer" and "file" in the logger handlers.
console:
class: logging.StreamHandler
formatter: precise
filters: [context]
loggers:
synapse.storage.SQL:
@@ -45,23 +36,8 @@ loggers:
# information such as access tokens.
level: INFO
twisted:
# We send the twisted logging directly to the file handler,
# to work around https://github.com/matrix-org/synapse/issues/3471
# when using "buffer" logger. Use "console" to log to stderr instead.
handlers: [file]
propagate: false
root:
level: INFO
# Write logs to the `buffer` handler, which will buffer them together in memory,
# then write them to a file.
#
# Replace "buffer" with "console" to log to stderr instead. (Note that you'll
# also need to update the configuation for the `twisted` logger above, in
# this case.)
#
handlers: [buffer]
handlers: [file, console]
disable_existing_loggers: false

View File

@@ -1,93 +0,0 @@
# Handling spam in Synapse
Synapse has support to customize spam checking behavior. It can plug into a
variety of events and affect how they are presented to users on your homeserver.
The spam checking behavior is implemented as a Python class, which must be
able to be imported by the running Synapse.
## Python spam checker class
The Python class is instantiated with two objects:
* Any configuration (see below).
* An instance of `synapse.spam_checker_api.SpamCheckerApi`.
It then implements methods which return a boolean to alter behavior in Synapse.
There's a generic method for checking every event (`check_event_for_spam`), as
well as some specific methods:
* `user_may_invite`
* `user_may_create_room`
* `user_may_create_room_alias`
* `user_may_publish_room`
The details of the each of these methods (as well as their inputs and outputs)
are documented in the `synapse.events.spamcheck.SpamChecker` class.
The `SpamCheckerApi` class provides a way for the custom spam checker class to
call back into the homeserver internals. It currently implements the following
methods:
* `get_state_events_in_room`
### Example
```python
class ExampleSpamChecker:
def __init__(self, config, api):
self.config = config
self.api = api
def check_event_for_spam(self, foo):
return False # allow all events
def user_may_invite(self, inviter_userid, invitee_userid, room_id):
return True # allow all invites
def user_may_create_room(self, userid):
return True # allow all room creations
def user_may_create_room_alias(self, userid, room_alias):
return True # allow all room aliases
def user_may_publish_room(self, userid, room_id):
return True # allow publishing of all rooms
def check_username_for_spam(self, user_profile):
return False # allow all usernames
```
## Configuration
Modify the `spam_checker` section of your `homeserver.yaml` in the following
manner:
Create a list entry with the keys `module` and `config`.
* `module` should point to the fully qualified Python class that implements your
custom logic, e.g. `my_module.ExampleSpamChecker`.
* `config` is a dictionary that gets passed to the spam checker class.
### Example
This section might look like:
```yaml
spam_checker:
- module: my_module.ExampleSpamChecker
config:
# Enable or disable a specific option in ExampleSpamChecker.
my_custom_option: true
```
More spam checkers can be added in tandem by appending more items to the list. An
action is blocked when at least one of the configured spam checkers flags it.
## Examples
The [Mjolnir](https://github.com/matrix-org/mjolnir) project is a full fledged
example using the Synapse spam checking API, including a bot for dynamic
configuration.

View File

@@ -1,148 +0,0 @@
# SSO Mapping Providers
A mapping provider is a Python class (loaded via a Python module) that
works out how to map attributes of a SSO response to Matrix-specific
user attributes. Details such as user ID localpart, displayname, and even avatar
URLs are all things that can be mapped from talking to a SSO service.
As an example, a SSO service may return the email address
"john.smith@example.com" for a user, whereas Synapse will need to figure out how
to turn that into a displayname when creating a Matrix user for this individual.
It may choose `John Smith`, or `Smith, John [Example.com]` or any number of
variations. As each Synapse configuration may want something different, this is
where SAML mapping providers come into play.
SSO mapping providers are currently supported for OpenID and SAML SSO
configurations. Please see the details below for how to implement your own.
External mapping providers are provided to Synapse in the form of an external
Python module. You can retrieve this module from [PyPi](https://pypi.org) or elsewhere,
but it must be importable via Synapse (e.g. it must be in the same virtualenv
as Synapse). The Synapse config is then modified to point to the mapping provider
(and optionally provide additional configuration for it).
## OpenID Mapping Providers
The OpenID mapping provider can be customized by editing the
`oidc_config.user_mapping_provider.module` config option.
`oidc_config.user_mapping_provider.config` allows you to provide custom
configuration options to the module. Check with the module's documentation for
what options it provides (if any). The options listed by default are for the
user mapping provider built in to Synapse. If using a custom module, you should
comment these options out and use those specified by the module instead.
### Building a Custom OpenID Mapping Provider
A custom mapping provider must specify the following methods:
* `__init__(self, parsed_config)`
- Arguments:
- `parsed_config` - A configuration object that is the return value of the
`parse_config` method. You should set any configuration options needed by
the module here.
* `parse_config(config)`
- This method should have the `@staticmethod` decoration.
- Arguments:
- `config` - A `dict` representing the parsed content of the
`oidc_config.user_mapping_provider.config` homeserver config option.
Runs on homeserver startup. Providers should extract and validate
any option values they need here.
- Whatever is returned will be passed back to the user mapping provider module's
`__init__` method during construction.
* `get_remote_user_id(self, userinfo)`
- Arguments:
- `userinfo` - A `authlib.oidc.core.claims.UserInfo` object to extract user
information from.
- This method must return a string, which is the unique identifier for the
user. Commonly the ``sub`` claim of the response.
* `map_user_attributes(self, userinfo, token)`
- This method should be async.
- Arguments:
- `userinfo` - A `authlib.oidc.core.claims.UserInfo` object to extract user
information from.
- `token` - A dictionary which includes information necessary to make
further requests to the OpenID provider.
- Returns a dictionary with two keys:
- localpart: A required string, used to generate the Matrix ID.
- displayname: An optional string, the display name for the user.
### Default OpenID Mapping Provider
Synapse has a built-in OpenID mapping provider if a custom provider isn't
specified in the config. It is located at
[`synapse.handlers.oidc_handler.JinjaOidcMappingProvider`](../synapse/handlers/oidc_handler.py).
## SAML Mapping Providers
The SAML mapping provider can be customized by editing the
`saml2_config.user_mapping_provider.module` config option.
`saml2_config.user_mapping_provider.config` allows you to provide custom
configuration options to the module. Check with the module's documentation for
what options it provides (if any). The options listed by default are for the
user mapping provider built in to Synapse. If using a custom module, you should
comment these options out and use those specified by the module instead.
### Building a Custom SAML Mapping Provider
A custom mapping provider must specify the following methods:
* `__init__(self, parsed_config)`
- Arguments:
- `parsed_config` - A configuration object that is the return value of the
`parse_config` method. You should set any configuration options needed by
the module here.
* `parse_config(config)`
- This method should have the `@staticmethod` decoration.
- Arguments:
- `config` - A `dict` representing the parsed content of the
`saml_config.user_mapping_provider.config` homeserver config option.
Runs on homeserver startup. Providers should extract and validate
any option values they need here.
- Whatever is returned will be passed back to the user mapping provider module's
`__init__` method during construction.
* `get_saml_attributes(config)`
- This method should have the `@staticmethod` decoration.
- Arguments:
- `config` - A object resulting from a call to `parse_config`.
- Returns a tuple of two sets. The first set equates to the SAML auth
response attributes that are required for the module to function, whereas
the second set consists of those attributes which can be used if available,
but are not necessary.
* `get_remote_user_id(self, saml_response, client_redirect_url)`
- Arguments:
- `saml_response` - A `saml2.response.AuthnResponse` object to extract user
information from.
- `client_redirect_url` - A string, the URL that the client will be
redirected to.
- This method must return a string, which is the unique identifier for the
user. Commonly the ``uid`` claim of the response.
* `saml_response_to_user_attributes(self, saml_response, failures, client_redirect_url)`
- Arguments:
- `saml_response` - A `saml2.response.AuthnResponse` object to extract user
information from.
- `failures` - An `int` that represents the amount of times the returned
mxid localpart mapping has failed. This should be used
to create a deduplicated mxid localpart which should be
returned instead. For example, if this method returns
`john.doe` as the value of `mxid_localpart` in the returned
dict, and that is already taken on the homeserver, this
method will be called again with the same parameters but
with failures=1. The method should then return a different
`mxid_localpart` value, such as `john.doe1`.
- `client_redirect_url` - A string, the URL that the client will be
redirected to.
- This method must return a dictionary, which will then be used by Synapse
to build a new user. The following keys are allowed:
* `mxid_localpart` - Required. The mxid localpart of the new user.
* `displayname` - The displayname of the new user. If not provided, will default to
the value of `mxid_localpart`.
* `emails` - A list of emails for the new user. If not provided, will
default to an empty list.
### Default SAML Mapping Provider
Synapse has a built-in SAML mapping provider if a custom provider isn't
specified in the config. It is located at
[`synapse.handlers.saml_handler.DefaultSamlMappingProvider`](../synapse/handlers/saml_handler.py).

View File

@@ -1,32 +0,0 @@
### Using synctl with workers
If you want to use `synctl` to manage your synapse processes, you will need to
create an an additional configuration file for the main synapse process. That
configuration should look like this:
```yaml
worker_app: synapse.app.homeserver
```
Additionally, each worker app must be configured with the name of a "pid file",
to which it will write its process ID when it starts. For example, for a
synchrotron, you might write:
```yaml
worker_pid_file: /home/matrix/synapse/worker1.pid
```
Finally, to actually run your worker-based synapse, you must pass synctl the `-a`
commandline option to tell it to operate on all the worker configurations found
in the given directory, e.g.:
synctl -a $CONFIG/workers start
Currently one should always restart all workers when restarting or upgrading
synapse, unless you explicitly know it's safe not to. For instance, restarting
synapse without restarting all the synchrotrons may result in broken typing
notifications.
To manipulate a specific worker, you pass the -w option to synctl:
synctl -w $CONFIG/workers/worker1.yaml restart

View File

@@ -1,67 +0,0 @@
# Setting up Synapse with Workers and Systemd
This is a setup for managing synapse with systemd, including support for
managing workers. It provides a `matrix-synapse` service for the master, as
well as a `matrix-synapse-worker@` service template for any workers you
require. Additionally, to group the required services, it sets up a
`matrix-synapse.target`.
See the folder [system](system) for the systemd unit files.
The folder [workers](workers) contains an example configuration for the
`federation_reader` worker.
## Synapse configuration files
See [workers.md](../workers.md) for information on how to set up the
configuration files and reverse-proxy correctly. You can find an example worker
config in the [workers](workers) folder.
Systemd manages daemonization itself, so ensure that none of the configuration
files set either `daemonize` or `worker_daemonize`.
The config files of all workers are expected to be located in
`/etc/matrix-synapse/workers`. If you want to use a different location, edit
the provided `*.service` files accordingly.
There is no need for a separate configuration file for the master process.
## Set up
1. Adjust synapse configuration files as above.
1. Copy the `*.service` and `*.target` files in [system](system) to
`/etc/systemd/system`.
1. Run `systemctl deamon-reload` to tell systemd to load the new unit files.
1. Run `systemctl enable matrix-synapse.service`. This will configure the
synapse master process to be started as part of the `matrix-synapse.target`
target.
1. For each worker process to be enabled, run `systemctl enable
matrix-synapse-worker@<worker_name>.service`. For each `<worker_name>`, there
should be a corresponding configuration file
`/etc/matrix-synapse/workers/<worker_name>.yaml`.
1. Start all the synapse processes with `systemctl start matrix-synapse.target`.
1. Tell systemd to start synapse on boot with `systemctl enable matrix-synapse.target`/
## Usage
Once the services are correctly set up, you can use the following commands
to manage your synapse installation:
```sh
# Restart Synapse master and all workers
systemctl restart matrix-synapse.target
# Stop Synapse and all workers
systemctl stop matrix-synapse.target
# Restart the master alone
systemctl start matrix-synapse.service
# Restart a specific worker (eg. federation_reader); the master is
# unaffected by this.
systemctl restart matrix-synapse-worker@federation_reader.service
# Add a new worker (assuming all configs are set up already)
systemctl enable matrix-synapse-worker@federation_writer.service
systemctl restart matrix-synapse.target
```

View File

@@ -1,20 +0,0 @@
[Unit]
Description=Synapse %i
AssertPathExists=/etc/matrix-synapse/workers/%i.yaml
# This service should be restarted when the synapse target is restarted.
PartOf=matrix-synapse.target
[Service]
Type=notify
NotifyAccess=main
User=matrix-synapse
WorkingDirectory=/var/lib/matrix-synapse
EnvironmentFile=/etc/default/matrix-synapse
ExecStart=/opt/venvs/matrix-synapse/bin/python -m synapse.app.generic_worker --config-path=/etc/matrix-synapse/homeserver.yaml --config-path=/etc/matrix-synapse/conf.d/ --config-path=/etc/matrix-synapse/workers/%i.yaml
ExecReload=/bin/kill -HUP $MAINPID
Restart=always
RestartSec=3
SyslogIdentifier=matrix-synapse-%i
[Install]
WantedBy=matrix-synapse.target

View File

@@ -1,6 +0,0 @@
[Unit]
Description=Synapse parent target
After=network.target
[Install]
WantedBy=multi-user.target

View File

@@ -14,18 +14,16 @@ example flow would be (where '>' indicates master to worker and
'<' worker to master flows):
> SERVER example.com
< REPLICATE
> POSITION events master 53
> RDATA events master 54 ["$foo1:bar.com", ...]
> RDATA events master 55 ["$foo4:bar.com", ...]
< REPLICATE events 53
> RDATA events 54 ["$foo1:bar.com", ...]
> RDATA events 55 ["$foo4:bar.com", ...]
The example shows the server accepting a new connection and sending its identity
with the `SERVER` command, followed by the client server to respond with the
position of all streams. The server then periodically sends `RDATA` commands
which have the format `RDATA <stream_name> <instance_name> <token> <row>`, where
the format of `<row>` is defined by the individual streams. The
`<instance_name>` is the name of the Synapse process that generated the data
(usually "master").
The example shows the server accepting a new connection and sending its
identity with the `SERVER` command, followed by the client asking to
subscribe to the `events` stream from the token `53`. The server then
periodically sends `RDATA` commands which have the format
`RDATA <stream_name> <token> <row>`, where the format of `<row>` is
defined by the individual streams.
Error reporting happens by either the client or server sending an ERROR
command, and usually the connection will be closed.
@@ -34,6 +32,9 @@ Since the protocol is a simple line based, its possible to manually
connect to the server using a tool like netcat. A few things should be
noted when manually using the protocol:
- When subscribing to a stream using `REPLICATE`, the special token
`NOW` can be used to get all future updates. The special stream name
`ALL` can be used with `NOW` to subscribe to all available streams.
- The federation stream is only available if federation sending has
been disabled on the main process.
- The server will only time connections out that have sent a `PING`
@@ -54,7 +55,7 @@ The basic structure of the protocol is line based, where the initial
word of each line specifies the command. The rest of the line is parsed
based on the command. For example, the RDATA command is defined as:
RDATA <stream_name> <instance_name> <token> <row_json>
RDATA <stream_name> <token> <row_json>
(Note that <row_json> may contains spaces, but cannot contain
newlines.)
@@ -90,7 +91,9 @@ The client:
- Sends a `NAME` command, allowing the server to associate a human
friendly name with the connection. This is optional.
- Sends a `PING` as above
- Sends a `REPLICATE` to get the current position of all streams.
- For each stream the client wishes to subscribe to it sends a
`REPLICATE` with the `stream_name` and token it wants to subscribe
from.
- On receipt of a `SERVER` command, checks that the server name
matches the expected server name.
@@ -137,12 +140,14 @@ the wire:
> PING 1490197665618
< NAME synapse.app.appservice
< PING 1490197665618
< REPLICATE
> POSITION events master 1
> POSITION backfill master 1
> POSITION caches master 1
> RDATA caches master 2 ["get_user_by_id",["@01register-user:localhost:8823"],1490197670513]
> RDATA events master 14 ["$149019767112vOHxz:localhost:8823",
< REPLICATE events 1
< REPLICATE backfill 1
< REPLICATE caches 1
> POSITION events 1
> POSITION backfill 1
> POSITION caches 1
> RDATA caches 2 ["get_user_by_id",["@01register-user:localhost:8823"],1490197670513]
> RDATA events 14 ["$149019767112vOHxz:localhost:8823",
"!AFDCvgApUmpdfVjIXm:localhost:8823","m.room.guest_access","",null]
< PING 1490197675618
> ERROR server stopping
@@ -153,10 +158,10 @@ position without needing to send data with the `RDATA` command.
An example of a batched set of `RDATA` is:
> RDATA caches master batch ["get_user_by_id",["@test:localhost:8823"],1490197670513]
> RDATA caches master batch ["get_user_by_id",["@test2:localhost:8823"],1490197670513]
> RDATA caches master batch ["get_user_by_id",["@test3:localhost:8823"],1490197670513]
> RDATA caches master 54 ["get_user_by_id",["@test4:localhost:8823"],1490197670513]
> RDATA caches batch ["get_user_by_id",["@test:localhost:8823"],1490197670513]
> RDATA caches batch ["get_user_by_id",["@test2:localhost:8823"],1490197670513]
> RDATA caches batch ["get_user_by_id",["@test3:localhost:8823"],1490197670513]
> RDATA caches 54 ["get_user_by_id",["@test4:localhost:8823"],1490197670513]
In this case the client shouldn't advance their caches token until it
sees the the last `RDATA`.
@@ -176,14 +181,9 @@ client (C):
#### POSITION (S)
On receipt of a POSITION command clients should check if they have missed any
updates, and if so then fetch them out of band. Sent in response to a
REPLICATE command (but can happen at any time).
The POSITION command includes the source of the stream. Currently all streams
are written by a single process (usually "master"). If fetching missing
updates via HTTP API, rather than via the DB, then processes should make the
request to the appropriate process.
The position of the stream has been updated. Sent to the client
after all missing updates for a stream have been sent to the client
and they're now up to date.
#### ERROR (S, C)
@@ -199,17 +199,24 @@ client (C):
#### REPLICATE (C)
Asks the server for the current position of all streams.
Asks the server to replicate a given stream. The syntax is:
```
REPLICATE <stream_name> <token>
```
Where `<token>` may be either:
* a numeric stream_id to stream updates since (exclusive)
* `NOW` to stream all subsequent updates.
The `<stream_name>` is the name of a replication stream to subscribe
to (see [here](../synapse/replication/tcp/streams/_base.py) for a list
of streams). It can also be `ALL` to subscribe to all known streams,
in which case the `<token>` must be set to `NOW`.
#### USER_SYNC (C)
A user has started or stopped syncing on this process.
#### CLEAR_USER_SYNC (C)
The server should clear all associated user sync data from the worker.
This is used when a worker is shutting down.
A user has started or stopped syncing
#### FEDERATION_ACK (C)
@@ -219,9 +226,13 @@ Asks the server for the current position of all streams.
Inform the server a pusher should be removed
### REMOTE_SERVER_UP (S, C)
#### INVALIDATE_CACHE (C)
Inform other processes that a remote server may have come back online.
Inform the server a cache should be invalidated
#### SYNC (S, C)
Used exclusively in tests
See `synapse/replication/tcp/commands.py` for a detailed description and
the format of each command.
@@ -237,12 +248,7 @@ Each individual cache invalidation results in a row being sent down
replication, which includes the cache name (the name of the function)
and they key to invalidate. For example:
> RDATA caches master 550953771 ["get_user_by_id", ["@bob:example.com"], 1550574873251]
Alternatively, an entire cache can be invalidated by sending down a `null`
instead of the key. For example:
> RDATA caches master 550953772 ["get_user_by_id", null, 1550574873252]
> RDATA caches 550953771 ["get_user_by_id", ["@bob:example.com"], 1550574873251]
However, there are times when a number of caches need to be invalidated
at the same time with the same key. To reduce traffic we batch those

View File

@@ -11,14 +11,7 @@ TURN server.
The following sections describe how to install [coturn](<https://github.com/coturn/coturn>) (which implements the TURN REST API) and integrate it with synapse.
## Requirements
For TURN relaying with `coturn` to work, it must be hosted on a server/endpoint with a public IP.
Hosting TURN behind a NAT (even with appropriate port forwarding) is known to cause issues
and to often not work.
## `coturn` setup
## `coturn` Setup
### Initial installation
@@ -26,13 +19,7 @@ The TURN daemon `coturn` is available from a variety of sources such as native p
#### Debian installation
Just install the debian package:
```sh
apt install coturn
```
This will install and start a systemd service called `coturn`.
# apt install coturn
#### Source installation
@@ -69,52 +56,38 @@ This will install and start a systemd service called `coturn`.
1. Consider your security settings. TURN lets users request a relay which will
connect to arbitrary IP addresses and ports. The following configuration is
suggested as a minimum starting point:
# VoIP traffic is all UDP. There is no reason to let users connect to arbitrary TCP endpoints via the relay.
no-tcp-relay
# don't let the relay ever try to connect to private IP address ranges within your network (if any)
# given the turn server is likely behind your firewall, remember to include any privileged public IPs too.
denied-peer-ip=10.0.0.0-10.255.255.255
denied-peer-ip=192.168.0.0-192.168.255.255
denied-peer-ip=172.16.0.0-172.31.255.255
# special case the turn server itself so that client->TURN->TURN->client flows work
allowed-peer-ip=10.0.0.1
# consider whether you want to limit the quota of relayed streams per user (or total) to avoid risk of DoS.
user-quota=12 # 4 streams per video call, so 12 streams = 3 simultaneous relayed calls per user.
total-quota=1200
1. Also consider supporting TLS/DTLS. To do this, add the following settings
to `turnserver.conf`:
# TLS certificates, including intermediate certs.
# For Let's Encrypt certificates, use `fullchain.pem` here.
cert=/path/to/fullchain.pem
# TLS private key file
pkey=/path/to/privkey.pem
Ideally coturn should refuse to relay traffic which isn't SRTP; see
<https://github.com/matrix-org/synapse/issues/2009>
1. Ensure your firewall allows traffic into the TURN server on the ports
you've configured it to listen on (By default: 3478 and 5349 for the TURN(s)
traffic (remember to allow both TCP and UDP traffic), and ports 49152-65535
for the UDP relay.)
you've configured it to listen on (remember to allow both TCP and UDP TURN
traffic)
1. (Re)start the turn server:
1. If you've configured coturn to support TLS/DTLS, generate or import your
private key and certificate.
* If you used the Debian package (or have set up a systemd unit yourself):
```sh
systemctl restart coturn
```
1. Start the turn server:
* If you installed from source:
bin/turnserver -o
```sh
bin/turnserver -o
```
## Synapse setup
## synapse Setup
Your home server configuration file needs the following extra keys:
@@ -140,20 +113,13 @@ Your home server configuration file needs the following extra keys:
As an example, here is the relevant section of the config file for matrix.org:
turn_uris: [ "turn:turn.matrix.org:3478?transport=udp", "turn:turn.matrix.org:3478?transport=tcp" ]
turn_shared_secret: "n0t4ctuAllymatr1Xd0TorgSshar3d5ecret4obvIousreAsons"
turn_shared_secret: n0t4ctuAllymatr1Xd0TorgSshar3d5ecret4obvIousreAsons
turn_user_lifetime: 86400000
turn_allow_guests: True
After updating the homeserver configuration, you must restart synapse:
* If you use synctl:
```sh
cd /where/you/run/synapse
./synctl restart
```
* If you use systemd:
```
systemctl restart synapse.service
```
..and your Home Server now supports VoIP relaying!

View File

@@ -7,6 +7,6 @@ who are present in a publicly viewable room present on the server.
The directory info is stored in various tables, which can (typically after
DB corruption) get stale or out of sync. If this happens, for now the
solution to fix it is to execute the SQL [here](../synapse/storage/databases/main/schema/delta/53/user_dir_populate.sql)
solution to fix it is to execute the SQL [here](../synapse/storage/data_stores/main/schema/delta/53/user_dir_populate.sql)
and then restart synapse. This should then start a background task to
flush the current tables and regenerate the directory.

View File

@@ -1,183 +1,163 @@
# Scaling synapse via workers
For small instances it recommended to run Synapse in the default monolith mode.
For larger instances where performance is a concern it can be helpful to split
out functionality into multiple separate python processes. These processes are
called 'workers', and are (eventually) intended to scale horizontally
independently.
Synapse has experimental support for splitting out functionality into
multiple separate python processes, helping greatly with scalability. These
processes are called 'workers', and are (eventually) intended to scale
horizontally independently.
Synapse's worker support is under active development and subject to change as
we attempt to rapidly scale ever larger Synapse instances. However we are
documenting it here to help admins needing a highly scalable Synapse instance
similar to the one running `matrix.org`.
All of the below is highly experimental and subject to change as Synapse evolves,
but documenting it here to help folks needing highly scalable Synapses similar
to the one running matrix.org!
All processes continue to share the same database instance, and as such,
workers only work with PostgreSQL-based Synapse deployments. SQLite should only
be used for demo purposes and any admin considering workers should already be
running PostgreSQL.
All processes continue to share the same database instance, and as such, workers
only work with postgres based synapse deployments (sharing a single sqlite
across multiple processes is a recipe for disaster, plus you should be using
postgres anyway if you care about scalability).
## Main process/worker communication
The workers communicate with the master synapse process via a synapse-specific
TCP protocol called 'replication' - analogous to MySQL or Postgres style
database replication; feeding a stream of relevant data to the workers so they
can be kept in sync with the main synapse process and database state.
The processes communicate with each other via a Synapse-specific protocol called
'replication' (analogous to MySQL- or Postgres-style database replication) which
feeds streams of newly written data between processes so they can be kept in
sync with the database state.
When configured to do so, Synapse uses a
[Redis pub/sub channel](https://redis.io/topics/pubsub) to send the replication
stream between all configured Synapse processes. Additionally, processes may
make HTTP requests to each other, primarily for operations which need to wait
for a reply ─ such as sending an event.
Redis support was added in v1.13.0 with it becoming the recommended method in
v1.18.0. It replaced the old direct TCP connections (which is deprecated as of
v1.18.0) to the main process. With Redis, rather than all the workers connecting
to the main process, all the workers and the main process connect to Redis,
which relays replication commands between processes. This can give a significant
cpu saving on the main process and will be a prerequisite for upcoming
performance improvements.
See the [Architectural diagram](#architectural-diagram) section at the end for
a visualisation of what this looks like.
## Setting up workers
A Redis server is required to manage the communication between the processes.
The Redis server should be installed following the normal procedure for your
distribution (e.g. `apt install redis-server` on Debian). It is safe to use an
existing Redis deployment if you have one.
Once installed, check that Redis is running and accessible from the host running
Synapse, for example by executing `echo PING | nc -q1 localhost 6379` and seeing
a response of `+PONG`.
The appropriate dependencies must also be installed for Synapse. If using a
virtualenv, these can be installed with:
```sh
pip install matrix-synapse[redis]
```
Note that these dependencies are included when synapse is installed with `pip
install matrix-synapse[all]`. They are also included in the debian packages from
`matrix.org` and in the docker images at
https://hub.docker.com/r/matrixdotorg/synapse/.
## Configuration
To make effective use of the workers, you will need to configure an HTTP
reverse-proxy such as nginx or haproxy, which will direct incoming requests to
the correct worker, or to the main synapse instance. See
[reverse_proxy.md](reverse_proxy.md) for information on setting up a reverse
proxy.
the correct worker, or to the main synapse instance. Note that this includes
requests made to the federation port. See [reverse_proxy.md](reverse_proxy.md)
for information on setting up a reverse proxy.
When using workers, each worker process has its own configuration file which
contains settings specific to that worker, such as the HTTP listener that it
provides (if any), logging configuration, etc.
To enable workers, you need to add two replication listeners to the master
synapse, e.g.:
Normally, the worker processes are configured to read from a shared
configuration file as well as the worker-specific configuration files. This
makes it easier to keep common configuration settings synchronised across all
the processes.
listeners:
# The TCP replication port
- port: 9092
bind_address: '127.0.0.1'
type: replication
# The HTTP replication port
- port: 9093
bind_address: '127.0.0.1'
type: http
resources:
- names: [replication]
The main process is somewhat special in this respect: it does not normally
need its own configuration file and can take all of its configuration from the
shared configuration file.
Under **no circumstances** should these replication API listeners be exposed to
the public internet; it currently implements no authentication whatsoever and is
unencrypted.
(Roughly, the TCP port is used for streaming data from the master to the
workers, and the HTTP port for the workers to send data to the main
synapse process.)
### Shared configuration
You then create a set of configs for the various worker processes. These
should be worker configuration files, and should be stored in a dedicated
subdirectory, to allow synctl to manipulate them. An additional configuration
for the master synapse process will need to be created because the process will
not be started automatically. That configuration should look like this:
Normally, only a couple of changes are needed to make an existing configuration
file suitable for use with workers. First, you need to enable an "HTTP replication
listener" for the main process; and secondly, you need to enable redis-based
replication. For example:
worker_app: synapse.app.homeserver
daemonize: true
Each worker configuration file inherits the configuration of the main homeserver
configuration file. You can then override configuration specific to that worker,
e.g. the HTTP listener that it provides (if any); logging configuration; etc.
You should minimise the number of overrides though to maintain a usable config.
```yaml
# extend the existing `listeners` section. This defines the ports that the
# main process will listen on.
listeners:
# The HTTP replication port
- port: 9093
bind_address: '127.0.0.1'
type: http
resources:
- names: [replication]
You must specify the type of worker application (`worker_app`). The currently
available worker applications are listed below. You must also specify the
replication endpoints that it's talking to on the main synapse process.
`worker_replication_host` should specify the host of the main synapse,
`worker_replication_port` should point to the TCP replication listener port and
`worker_replication_http_port` should point to the HTTP replication port.
redis:
enabled: true
```
Currently, the `event_creator` and `federation_reader` workers require specifying
`worker_replication_http_port`.
See the sample config for the full documentation of each option.
For instance:
Under **no circumstances** should the replication listener be exposed to the
public internet; it has no authentication and is unencrypted.
worker_app: synapse.app.synchrotron
# The replication listener on the synapse to talk to.
worker_replication_host: 127.0.0.1
worker_replication_port: 9092
worker_replication_http_port: 9093
### Worker configuration
worker_listeners:
- type: http
port: 8083
resources:
- names:
- client
In the config file for each worker, you must specify the type of worker
application (`worker_app`), and you should specify a unqiue name for the worker
(`worker_name`). The currently available worker applications are listed below.
You must also specify the HTTP replication endpoint that it should talk to on
the main synapse process. `worker_replication_host` should specify the host of
the main synapse and `worker_replication_http_port` should point to the HTTP
replication port. If the worker will handle HTTP requests then the
`worker_listeners` option should be set with a `http` listener, in the same way
as the `listeners` option in the shared config.
worker_daemonize: True
worker_pid_file: /home/matrix/synapse/synchrotron.pid
worker_log_config: /home/matrix/synapse/config/synchrotron_log_config.yaml
For example:
```yaml
worker_app: synapse.app.generic_worker
worker_name: worker1
# The replication listener on the main synapse process.
worker_replication_host: 127.0.0.1
worker_replication_http_port: 9093
worker_listeners:
- type: http
port: 8083
resources:
- names:
- client
- federation
worker_log_config: /home/matrix/synapse/config/worker1_log_config.yaml
```
...is a full configuration for a generic worker instance, which will expose a
plain HTTP endpoint on port 8083 separately serving various endpoints, e.g.
`/sync`, which are listed below.
...is a full configuration for a synchrotron worker instance, which will expose a
plain HTTP `/sync` endpoint on port 8083 separately from the `/sync` endpoint provided
by the main synapse.
Obviously you should configure your reverse-proxy to route the relevant
endpoints to the worker (`localhost:8083` in the above example).
Finally, to actually run your worker-based synapse, you must pass synctl the -a
commandline option to tell it to operate on all the worker configurations found
in the given directory, e.g.:
### Running Synapse with workers
synctl -a $CONFIG/workers start
Finally, you need to start your worker processes. This can be done with either
`synctl` or your distribution's preferred service manager such as `systemd`. We
recommend the use of `systemd` where available: for information on setting up
`systemd` to start synapse workers, see
[systemd-with-workers](systemd-with-workers). To use `synctl`, see
[synctl_workers.md](synctl_workers.md).
Currently one should always restart all workers when restarting or upgrading
synapse, unless you explicitly know it's safe not to. For instance, restarting
synapse without restarting all the synchrotrons may result in broken typing
notifications.
To manipulate a specific worker, you pass the -w option to synctl:
synctl -w $CONFIG/workers/synchrotron.yaml restart
## Available worker applications
### `synapse.app.generic_worker`
### `synapse.app.pusher`
This worker can handle API requests matching the following regular
expressions:
Handles sending push notifications to sygnal and email. Doesn't handle any
REST endpoints itself, but you should set `start_pushers: False` in the
shared configuration file to stop the main synapse sending these notifications.
Note this worker cannot be load-balanced: only one instance should be active.
### `synapse.app.synchrotron`
The synchrotron handles `sync` requests from clients. In particular, it can
handle REST endpoints matching the following regular expressions:
# Sync requests
^/_matrix/client/(v2_alpha|r0)/sync$
^/_matrix/client/(api/v1|v2_alpha|r0)/events$
^/_matrix/client/(api/v1|r0)/initialSync$
^/_matrix/client/(api/v1|r0)/rooms/[^/]+/initialSync$
# Federation requests
The above endpoints should all be routed to the synchrotron worker by the
reverse-proxy configuration.
It is possible to run multiple instances of the synchrotron to scale
horizontally. In this case the reverse-proxy should be configured to
load-balance across the instances, though it will be more efficient if all
requests from a particular user are routed to a single instance. Extracting
a userid from the access token is currently left as an exercise for the reader.
### `synapse.app.appservice`
Handles sending output traffic to Application Services. Doesn't handle any
REST endpoints itself, but you should set `notify_appservices: False` in the
shared configuration file to stop the main synapse sending these notifications.
Note this worker cannot be load-balanced: only one instance should be active.
### `synapse.app.federation_reader`
Handles a subset of federation endpoints. In particular, it can handle REST
endpoints matching the following regular expressions:
^/_matrix/federation/v1/event/
^/_matrix/federation/v1/state/
^/_matrix/federation/v1/state_ids/
@@ -188,137 +168,19 @@ expressions:
^/_matrix/federation/v1/make_join/
^/_matrix/federation/v1/make_leave/
^/_matrix/federation/v1/send_join/
^/_matrix/federation/v2/send_join/
^/_matrix/federation/v1/send_leave/
^/_matrix/federation/v2/send_leave/
^/_matrix/federation/v1/invite/
^/_matrix/federation/v2/invite/
^/_matrix/federation/v1/query_auth/
^/_matrix/federation/v1/event_auth/
^/_matrix/federation/v1/exchange_third_party_invite/
^/_matrix/federation/v1/user/devices/
^/_matrix/federation/v1/get_groups_publicised$
^/_matrix/federation/v1/send/
^/_matrix/key/v2/query
# Inbound federation transaction request
^/_matrix/federation/v1/send/
# Client API requests
^/_matrix/client/(api/v1|r0|unstable)/publicRooms$
^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/joined_members$
^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/context/.*$
^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/members$
^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/state$
^/_matrix/client/(api/v1|r0|unstable)/account/3pid$
^/_matrix/client/(api/v1|r0|unstable)/keys/query$
^/_matrix/client/(api/v1|r0|unstable)/keys/changes$
^/_matrix/client/versions$
^/_matrix/client/(api/v1|r0|unstable)/voip/turnServer$
^/_matrix/client/(api/v1|r0|unstable)/joined_groups$
^/_matrix/client/(api/v1|r0|unstable)/publicised_groups$
^/_matrix/client/(api/v1|r0|unstable)/publicised_groups/
# Registration/login requests
^/_matrix/client/(api/v1|r0|unstable)/login$
^/_matrix/client/(r0|unstable)/register$
^/_matrix/client/(r0|unstable)/auth/.*/fallback/web$
# Event sending requests
^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/send
^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/state/
^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/(join|invite|leave|ban|unban|kick)$
^/_matrix/client/(api/v1|r0|unstable)/join/
^/_matrix/client/(api/v1|r0|unstable)/profile/
Additionally, the following REST endpoints can be handled for GET requests:
^/_matrix/federation/v1/groups/
Pagination requests can also be handled, but all requests for a given
room must be routed to the same instance. Additionally, care must be taken to
ensure that the purge history admin API is not used while pagination requests
for the room are in flight:
^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/messages$
Note that a HTTP listener with `client` and `federation` resources must be
configured in the `worker_listeners` option in the worker config.
#### Load balancing
It is possible to run multiple instances of this worker app, with incoming requests
being load-balanced between them by the reverse-proxy. However, different endpoints
have different characteristics and so admins
may wish to run multiple groups of workers handling different endpoints so that
load balancing can be done in different ways.
For `/sync` and `/initialSync` requests it will be more efficient if all
requests from a particular user are routed to a single instance. Extracting a
user ID from the access token or `Authorization` header is currently left as an
exercise for the reader. Admins may additionally wish to separate out `/sync`
requests that have a `since` query parameter from those that don't (and
`/initialSync`), as requests that don't are known as "initial sync" that happens
when a user logs in on a new device and can be *very* resource intensive, so
isolating these requests will stop them from interfering with other users ongoing
syncs.
Federation and client requests can be balanced via simple round robin.
The inbound federation transaction request `^/_matrix/federation/v1/send/`
should be balanced by source IP so that transactions from the same remote server
go to the same process.
Registration/login requests can be handled separately purely to help ensure that
unexpected load doesn't affect new logins and sign ups.
Finally, event sending requests can be balanced by the room ID in the URI (or
the full URI, or even just round robin), the room ID is the path component after
`/rooms/`. If there is a large bridge connected that is sending or may send lots
of events, then a dedicated set of workers can be provisioned to limit the
effects of bursts of events from that bridge on events sent by normal users.
#### Stream writers
Additionally, there is *experimental* support for moving writing of specific
streams (such as events) off of the main process to a particular worker. (This
is only supported with Redis-based replication.)
Currently support streams are `events` and `typing`.
To enable this, the worker must have a HTTP replication listener configured,
have a `worker_name` and be listed in the `instance_map` config. For example to
move event persistence off to a dedicated worker, the shared configuration would
include:
```yaml
instance_map:
event_persister1:
host: localhost
port: 8034
stream_writers:
events: event_persister1
```
### `synapse.app.pusher`
Handles sending push notifications to sygnal and email. Doesn't handle any
REST endpoints itself, but you should set `start_pushers: False` in the
shared configuration file to stop the main synapse sending push notifications.
Note this worker cannot be load-balanced: only one instance should be active.
### `synapse.app.appservice`
Handles sending output traffic to Application Services. Doesn't handle any
REST endpoints itself, but you should set `notify_appservices: False` in the
shared configuration file to stop the main synapse sending appservice notifications.
Note this worker cannot be load-balanced: only one instance should be active.
The above endpoints should all be routed to the federation_reader worker by the
reverse-proxy configuration.
The `^/_matrix/federation/v1/send/` endpoint must only be handled by a single
instance.
### `synapse.app.federation_sender`
@@ -326,16 +188,7 @@ Handles sending federation traffic to other servers. Doesn't handle any
REST endpoints itself, but you should set `send_federation: False` in the
shared configuration file to stop the main synapse sending this traffic.
If running multiple federation senders then you must list each
instance in the `federation_sender_instances` option by their `worker_name`.
All instances must be stopped and started when adding or removing instances.
For example:
```yaml
federation_sender_instances:
- federation_sender1
- federation_sender2
```
Note this worker cannot be load-balanced: only one instance should be active.
### `synapse.app.media_repository`
@@ -346,9 +199,7 @@ Handles the media repository. It can handle all endpoints starting with:
... and the following regular expressions matching media-specific administration APIs:
^/_synapse/admin/v1/purge_media_cache$
^/_synapse/admin/v1/room/.*/media.*$
^/_synapse/admin/v1/user/.*/media.*$
^/_synapse/admin/v1/media/.*$
^/_synapse/admin/v1/room/.*/media$
^/_synapse/admin/v1/quarantine_media/.*$
You should also set `enable_media_repo: False` in the shared configuration
@@ -367,12 +218,40 @@ expose the `media` resource. For example:
- media
```
Note that if running multiple media repositories they must be on the same server
and you must configure a single instance to run the background tasks, e.g.:
Note this worker cannot be load-balanced: only one instance should be active.
```yaml
media_instance_running_background_jobs: "media-repository-1"
```
### `synapse.app.client_reader`
Handles client API endpoints. It can handle REST endpoints matching the
following regular expressions:
^/_matrix/client/(api/v1|r0|unstable)/publicRooms$
^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/joined_members$
^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/context/.*$
^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/members$
^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/state$
^/_matrix/client/(api/v1|r0|unstable)/login$
^/_matrix/client/(api/v1|r0|unstable)/account/3pid$
^/_matrix/client/(api/v1|r0|unstable)/keys/query$
^/_matrix/client/(api/v1|r0|unstable)/keys/changes$
^/_matrix/client/versions$
^/_matrix/client/(api/v1|r0|unstable)/voip/turnServer$
Additionally, the following REST endpoints can be handled for GET requests:
^/_matrix/client/(api/v1|r0|unstable)/pushrules/.*$
Additionally, the following REST endpoints can be handled, but all requests must
be routed to the same instance:
^/_matrix/client/(r0|unstable)/register$
Pagination requests can also be handled, but all requests with the same path
room must be routed to the same instance. Additionally, care must be taken to
ensure that the purge history admin API is not used while pagination requests
for the room are in flight:
^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/messages$
### `synapse.app.user_dir`
@@ -381,10 +260,6 @@ the following regular expressions:
^/_matrix/client/(api/v1|r0|unstable)/user_directory/search$
When using this worker you must also set `update_user_directory: False` in the
shared configuration file to stop the main synapse running background
jobs related to updating the user directory.
### `synapse.app.frontend_proxy`
Proxies some frequently-requested client endpoints to add caching and remove
@@ -408,65 +283,14 @@ file. For example:
worker_main_http_uri: http://127.0.0.1:8008
### Historical apps
### `synapse.app.event_creator`
*Note:* Historically there used to be more apps, however they have been
amalgamated into a single `synapse.app.generic_worker` app. The remaining apps
are ones that do specific processing unrelated to requests, e.g. the `pusher`
that handles sending out push notifications for new events. The intention is for
all these to be folded into the `generic_worker` app and to use config to define
which processes handle the various proccessing such as push notifications.
Handles some event creation. It can handle REST endpoints matching:
^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/send
^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/(join|invite|leave|ban|unban|kick)$
^/_matrix/client/(api/v1|r0|unstable)/join/
^/_matrix/client/(api/v1|r0|unstable)/profile/
## Migration from old config
There are two main independent changes that have been made: introducing Redis
support and merging apps into `synapse.app.generic_worker`. Both these changes
are backwards compatible and so no changes to the config are required, however
server admins are encouraged to plan to migrate to Redis as the old style direct
TCP replication config is deprecated.
To migrate to Redis add the `redis` config as above, and optionally remove the
TCP `replication` listener from master and `worker_replication_port` from worker
config.
To migrate apps to use `synapse.app.generic_worker` simply update the
`worker_app` option in the worker configs, and where worker are started (e.g.
in systemd service files, but not required for synctl).
## Architectural diagram
The following shows an example setup using Redis and a reverse proxy:
```
Clients & Federation
|
v
+-----------+
| |
| Reverse |
| Proxy |
| |
+-----------+
| | |
| | | HTTP requests
+-------------------+ | +-----------+
| +---+ |
| | |
v v v
+--------------+ +--------------+ +--------------+ +--------------+
| Main | | Generic | | Generic | | Event |
| Process | | Worker 1 | | Worker 2 | | Persister |
+--------------+ +--------------+ +--------------+ +--------------+
^ ^ | ^ | | ^ | ^ ^
| | | | | | | | | |
| | | | | HTTP | | | | |
| +----------+<--|---|---------+ | | | |
| | +-------------|-->+----------+ |
| | | |
| | | |
v v v v
====================================================================
Redis pub/sub channel
```
It will create events locally and then send them on to the main synapse
instance to be persisted and handled.

View File

@@ -7,9 +7,6 @@ show_error_codes = True
show_traceback = True
mypy_path = stubs
[mypy-pymacaroons.*]
ignore_missing_imports = True
[mypy-zope]
ignore_missing_imports = True
@@ -66,21 +63,3 @@ ignore_missing_imports = True
[mypy-sentry_sdk]
ignore_missing_imports = True
[mypy-PIL.*]
ignore_missing_imports = True
[mypy-lxml]
ignore_missing_imports = True
[mypy-jwt.*]
ignore_missing_imports = True
[mypy-authlib.*]
ignore_missing_imports = True
[mypy-rust_python_jaeger_reporter.*]
ignore_missing_imports = True
[mypy-nacl.*]
ignore_missing_imports = True

View File

@@ -24,7 +24,9 @@ DISTS = (
"debian:sid",
"ubuntu:xenial",
"ubuntu:bionic",
"ubuntu:focal",
"ubuntu:cosmic",
"ubuntu:disco",
"ubuntu:eoan",
)
DESC = '''\

View File

@@ -3,60 +3,37 @@
# A script which checks that an appropriate news file has been added on this
# branch.
echo -e "+++ \033[32mChecking newsfragment\033[m"
set -e
# make sure that origin/develop is up to date
git remote set-branches --add origin develop
git fetch -q origin develop
pr="$BUILDKITE_PULL_REQUEST"
git fetch origin develop
# if there are changes in the debian directory, check that the debian changelog
# has been updated
if ! git diff --quiet FETCH_HEAD... -- debian; then
if git diff --quiet FETCH_HEAD... -- debian/changelog; then
echo "Updates to debian directory, but no update to the changelog." >&2
echo "!! Please see the contributing guide for help writing your changelog entry:" >&2
echo "https://github.com/matrix-org/synapse/blob/develop/CONTRIBUTING.md#debian-changelog" >&2
exit 1
fi
fi
# if there are changes *outside* the debian directory, check that the
# newsfragments have been updated.
if ! git diff --name-only FETCH_HEAD... | grep -qv '^debian/'; then
exit 0
if git diff --name-only FETCH_HEAD... | grep -qv '^debian/'; then
tox -e check-newsfragment
fi
# Print a link to the contributing guide if the user makes a mistake
CONTRIBUTING_GUIDE_TEXT="!! Please see the contributing guide for help writing your changelog entry:
https://github.com/matrix-org/synapse/blob/develop/CONTRIBUTING.md#changelog"
# If check-newsfragment returns a non-zero exit code, print the contributing guide and exit
tox -qe check-newsfragment || (echo -e "$CONTRIBUTING_GUIDE_TEXT" >&2 && exit 1)
echo
echo "--------------------------"
echo
matched=0
# check that any new newsfiles on this branch end with a full stop.
for f in `git diff --name-only FETCH_HEAD... -- changelog.d`; do
# check that any modified newsfiles on this branch end with a full stop.
lastchar=`tr -d '\n' < $f | tail -c 1`
if [ $lastchar != '.' -a $lastchar != '!' ]; then
echo -e "\e[31mERROR: newsfragment $f does not end with a '.' or '!'\e[39m" >&2
echo -e "$CONTRIBUTING_GUIDE_TEXT" >&2
exit 1
fi
# see if this newsfile corresponds to the right PR
[[ -n "$pr" && "$f" == changelog.d/"$pr".* ]] && matched=1
done
if [[ -n "$pr" && "$matched" -eq 0 ]]; then
echo -e "\e[31mERROR: Did not find a news fragment with the right number: expected changelog.d/$pr.*.\e[39m" >&2
echo -e "$CONTRIBUTING_GUIDE_TEXT" >&2
exit 1
fi

View File

@@ -1,34 +0,0 @@
#!/bin/bash
#
# Copyright 2020 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This script checks that line terminators in all repository files (excluding
# those in the .git directory) feature unix line terminators.
#
# Usage:
#
# ./check_line_terminators.sh
#
# The script will emit exit code 1 if any files that do not use unix line
# terminators are found, 0 otherwise.
# cd to the root of the repository
cd `dirname $0`/..
# Find and print files with non-unix line terminators
if find . -path './.git/*' -prune -o -type f -print0 | xargs -0 grep -I -l $'\r$'; then
echo -e '\e[31mERROR: found files with CRLF line endings. See above.\e[39m'
exit 1
fi

View File

@@ -2,9 +2,9 @@ import argparse
import json
import logging
import sys
import urllib2
import dns.resolver
import urllib2
from signedjson.key import decode_verify_key_bytes, write_signing_keys
from signedjson.sign import verify_signed_json
from unpaddedbase64 import decode_base64

Some files were not shown because too many files have changed in this diff Show More